blob: ac5d998729a005575b63d4d76652418291ac8278 [file] [log] [blame]
Ben Murdoch257744e2011-11-30 15:57:28 +00001// Copyright 2011 the V8 project authors. All rights reserved.
Steve Blocka7e24c12009-10-30 11:49:00 +00002// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#ifndef V8_SPACES_H_
29#define V8_SPACES_H_
30
Ben Murdoch257744e2011-11-30 15:57:28 +000031#include "allocation.h"
32#include "list.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000033#include "log.h"
34
35namespace v8 {
36namespace internal {
37
Steve Block44f0eee2011-05-26 01:26:41 +010038class Isolate;
39
Steve Blocka7e24c12009-10-30 11:49:00 +000040// -----------------------------------------------------------------------------
41// Heap structures:
42//
43// A JS heap consists of a young generation, an old generation, and a large
44// object space. The young generation is divided into two semispaces. A
45// scavenger implements Cheney's copying algorithm. The old generation is
46// separated into a map space and an old object space. The map space contains
47// all (and only) map objects, the rest of old objects go into the old space.
48// The old generation is collected by a mark-sweep-compact collector.
49//
50// The semispaces of the young generation are contiguous. The old and map
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +010051// spaces consists of a list of pages. A page has a page header and an object
52// area. A page size is deliberately chosen as 8K bytes.
53// The first word of a page is an opaque page header that has the
Steve Blocka7e24c12009-10-30 11:49:00 +000054// address of the next page and its ownership information. The second word may
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +010055// have the allocation top address of this page. Heap objects are aligned to the
56// pointer size.
Steve Blocka7e24c12009-10-30 11:49:00 +000057//
58// There is a separate large object space for objects larger than
59// Page::kMaxHeapObjectSize, so that they do not have to move during
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +010060// collection. The large object space is paged. Pages in large object space
61// may be larger than 8K.
Steve Blocka7e24c12009-10-30 11:49:00 +000062//
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +010063// A card marking write barrier is used to keep track of intergenerational
64// references. Old space pages are divided into regions of Page::kRegionSize
65// size. Each region has a corresponding dirty bit in the page header which is
66// set if the region might contain pointers to new space. For details about
67// dirty bits encoding see comments in the Page::GetRegionNumberForAddress()
68// method body.
69//
70// During scavenges and mark-sweep collections we iterate intergenerational
71// pointers without decoding heap object maps so if the page belongs to old
72// pointer space or large object space it is essential to guarantee that
73// the page does not contain any garbage pointers to new space: every pointer
74// aligned word which satisfies the Heap::InNewSpace() predicate must be a
75// pointer to a live heap object in new space. Thus objects in old pointer
76// and large object spaces should have a special layout (e.g. no bare integer
77// fields). This requirement does not apply to map space which is iterated in
78// a special fashion. However we still require pointer fields of dead maps to
79// be cleaned.
80//
81// To enable lazy cleaning of old space pages we use a notion of allocation
82// watermark. Every pointer under watermark is considered to be well formed.
83// Page allocation watermark is not necessarily equal to page allocation top but
84// all alive objects on page should reside under allocation watermark.
85// During scavenge allocation watermark might be bumped and invalid pointers
86// might appear below it. To avoid following them we store a valid watermark
87// into special field in the page header and set a page WATERMARK_INVALIDATED
88// flag. For details see comments in the Page::SetAllocationWatermark() method
89// body.
90//
Steve Blocka7e24c12009-10-30 11:49:00 +000091
92// Some assertion macros used in the debugging mode.
93
Leon Clarkee46be812010-01-19 14:06:41 +000094#define ASSERT_PAGE_ALIGNED(address) \
Steve Blocka7e24c12009-10-30 11:49:00 +000095 ASSERT((OffsetFrom(address) & Page::kPageAlignmentMask) == 0)
96
Leon Clarkee46be812010-01-19 14:06:41 +000097#define ASSERT_OBJECT_ALIGNED(address) \
Steve Blocka7e24c12009-10-30 11:49:00 +000098 ASSERT((OffsetFrom(address) & kObjectAlignmentMask) == 0)
99
Leon Clarkee46be812010-01-19 14:06:41 +0000100#define ASSERT_MAP_ALIGNED(address) \
101 ASSERT((OffsetFrom(address) & kMapAlignmentMask) == 0)
102
103#define ASSERT_OBJECT_SIZE(size) \
Steve Blocka7e24c12009-10-30 11:49:00 +0000104 ASSERT((0 < size) && (size <= Page::kMaxHeapObjectSize))
105
Leon Clarkee46be812010-01-19 14:06:41 +0000106#define ASSERT_PAGE_OFFSET(offset) \
107 ASSERT((Page::kObjectStartOffset <= offset) \
Steve Blocka7e24c12009-10-30 11:49:00 +0000108 && (offset <= Page::kPageSize))
109
Leon Clarkee46be812010-01-19 14:06:41 +0000110#define ASSERT_MAP_PAGE_INDEX(index) \
Steve Blocka7e24c12009-10-30 11:49:00 +0000111 ASSERT((0 <= index) && (index <= MapSpace::kMaxMapPageIndex))
112
113
114class PagedSpace;
115class MemoryAllocator;
116class AllocationInfo;
117
118// -----------------------------------------------------------------------------
119// A page normally has 8K bytes. Large object pages may be larger. A page
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100120// address is always aligned to the 8K page size.
Steve Blocka7e24c12009-10-30 11:49:00 +0000121//
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100122// Each page starts with a header of Page::kPageHeaderSize size which contains
123// bookkeeping data.
Steve Blocka7e24c12009-10-30 11:49:00 +0000124//
125// The mark-compact collector transforms a map pointer into a page index and a
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100126// page offset. The exact encoding is described in the comments for
Leon Clarkee46be812010-01-19 14:06:41 +0000127// class MapWord in objects.h.
Steve Blocka7e24c12009-10-30 11:49:00 +0000128//
129// The only way to get a page pointer is by calling factory methods:
130// Page* p = Page::FromAddress(addr); or
131// Page* p = Page::FromAllocationTop(top);
132class Page {
133 public:
134 // Returns the page containing a given address. The address ranges
135 // from [page_addr .. page_addr + kPageSize[
136 //
137 // Note that this function only works for addresses in normal paged
138 // spaces and addresses in the first 8K of large object pages (i.e.,
139 // the start of large objects but not necessarily derived pointers
140 // within them).
141 INLINE(static Page* FromAddress(Address a)) {
142 return reinterpret_cast<Page*>(OffsetFrom(a) & ~kPageAlignmentMask);
143 }
144
145 // Returns the page containing an allocation top. Because an allocation
146 // top address can be the upper bound of the page, we need to subtract
147 // it with kPointerSize first. The address ranges from
148 // [page_addr + kObjectStartOffset .. page_addr + kPageSize].
149 INLINE(static Page* FromAllocationTop(Address top)) {
150 Page* p = FromAddress(top - kPointerSize);
151 ASSERT_PAGE_OFFSET(p->Offset(top));
152 return p;
153 }
154
155 // Returns the start address of this page.
156 Address address() { return reinterpret_cast<Address>(this); }
157
158 // Checks whether this is a valid page address.
159 bool is_valid() { return address() != NULL; }
160
161 // Returns the next page of this page.
162 inline Page* next_page();
163
164 // Return the end of allocation in this page. Undefined for unused pages.
165 inline Address AllocationTop();
166
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100167 // Return the allocation watermark for the page.
168 // For old space pages it is guaranteed that the area under the watermark
169 // does not contain any garbage pointers to new space.
170 inline Address AllocationWatermark();
171
172 // Return the allocation watermark offset from the beginning of the page.
173 inline uint32_t AllocationWatermarkOffset();
174
175 inline void SetAllocationWatermark(Address allocation_watermark);
176
177 inline void SetCachedAllocationWatermark(Address allocation_watermark);
178 inline Address CachedAllocationWatermark();
179
Steve Blocka7e24c12009-10-30 11:49:00 +0000180 // Returns the start address of the object area in this page.
181 Address ObjectAreaStart() { return address() + kObjectStartOffset; }
182
183 // Returns the end address (exclusive) of the object area in this page.
184 Address ObjectAreaEnd() { return address() + Page::kPageSize; }
185
Steve Blocka7e24c12009-10-30 11:49:00 +0000186 // Checks whether an address is page aligned.
187 static bool IsAlignedToPageSize(Address a) {
188 return 0 == (OffsetFrom(a) & kPageAlignmentMask);
189 }
190
Steve Block6ded16b2010-05-10 14:33:55 +0100191 // True if this page was in use before current compaction started.
192 // Result is valid only for pages owned by paged spaces and
193 // only after PagedSpace::PrepareForMarkCompact was called.
194 inline bool WasInUseBeforeMC();
195
196 inline void SetWasInUseBeforeMC(bool was_in_use);
197
Steve Blocka7e24c12009-10-30 11:49:00 +0000198 // True if this page is a large object page.
Steve Block6ded16b2010-05-10 14:33:55 +0100199 inline bool IsLargeObjectPage();
200
201 inline void SetIsLargeObjectPage(bool is_large_object_page);
Steve Blocka7e24c12009-10-30 11:49:00 +0000202
Steve Block791712a2010-08-27 10:21:07 +0100203 inline bool IsPageExecutable();
204
205 inline void SetIsPageExecutable(bool is_page_executable);
206
Steve Blocka7e24c12009-10-30 11:49:00 +0000207 // Returns the offset of a given address to this page.
208 INLINE(int Offset(Address a)) {
Steve Blockd0582a62009-12-15 09:54:21 +0000209 int offset = static_cast<int>(a - address());
Steve Blocka7e24c12009-10-30 11:49:00 +0000210 ASSERT_PAGE_OFFSET(offset);
211 return offset;
212 }
213
214 // Returns the address for a given offset to the this page.
215 Address OffsetToAddress(int offset) {
216 ASSERT_PAGE_OFFSET(offset);
217 return address() + offset;
218 }
219
220 // ---------------------------------------------------------------------
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100221 // Card marking support
Steve Blocka7e24c12009-10-30 11:49:00 +0000222
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100223 static const uint32_t kAllRegionsCleanMarks = 0x0;
224 static const uint32_t kAllRegionsDirtyMarks = 0xFFFFFFFF;
Steve Blocka7e24c12009-10-30 11:49:00 +0000225
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100226 inline uint32_t GetRegionMarks();
227 inline void SetRegionMarks(uint32_t dirty);
Steve Blocka7e24c12009-10-30 11:49:00 +0000228
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100229 inline uint32_t GetRegionMaskForAddress(Address addr);
230 inline uint32_t GetRegionMaskForSpan(Address start, int length_in_bytes);
231 inline int GetRegionNumberForAddress(Address addr);
Steve Blocka7e24c12009-10-30 11:49:00 +0000232
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100233 inline void MarkRegionDirty(Address addr);
234 inline bool IsRegionDirty(Address addr);
Steve Blocka7e24c12009-10-30 11:49:00 +0000235
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100236 inline void ClearRegionMarks(Address start,
237 Address end,
238 bool reaches_limit);
Steve Blocka7e24c12009-10-30 11:49:00 +0000239
Steve Blocka7e24c12009-10-30 11:49:00 +0000240 // Page size in bytes. This must be a multiple of the OS page size.
241 static const int kPageSize = 1 << kPageSizeBits;
242
243 // Page size mask.
244 static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;
245
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100246 static const int kPageHeaderSize = kPointerSize + kPointerSize + kIntSize +
Steve Block44f0eee2011-05-26 01:26:41 +0100247 kIntSize + kPointerSize + kPointerSize;
Steve Blocka7e24c12009-10-30 11:49:00 +0000248
Kristian Monsen0d5e1162010-09-30 15:31:59 +0100249 // The start offset of the object area in a page. Aligned to both maps and
250 // code alignment to be suitable for both.
251 static const int kObjectStartOffset =
252 CODE_POINTER_ALIGN(MAP_POINTER_ALIGN(kPageHeaderSize));
Steve Blocka7e24c12009-10-30 11:49:00 +0000253
254 // Object area size in bytes.
255 static const int kObjectAreaSize = kPageSize - kObjectStartOffset;
256
257 // Maximum object size that fits in a page.
258 static const int kMaxHeapObjectSize = kObjectAreaSize;
259
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100260 static const int kDirtyFlagOffset = 2 * kPointerSize;
261 static const int kRegionSizeLog2 = 8;
262 static const int kRegionSize = 1 << kRegionSizeLog2;
263 static const intptr_t kRegionAlignmentMask = (kRegionSize - 1);
264
265 STATIC_CHECK(kRegionSize == kPageSize / kBitsPerInt);
266
Steve Block6ded16b2010-05-10 14:33:55 +0100267 enum PageFlag {
Steve Block791712a2010-08-27 10:21:07 +0100268 IS_NORMAL_PAGE = 0,
269 WAS_IN_USE_BEFORE_MC,
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100270
271 // Page allocation watermark was bumped by preallocation during scavenge.
272 // Correct watermark can be retrieved by CachedAllocationWatermark() method
Steve Block791712a2010-08-27 10:21:07 +0100273 WATERMARK_INVALIDATED,
274 IS_EXECUTABLE,
275 NUM_PAGE_FLAGS // Must be last
Steve Block6ded16b2010-05-10 14:33:55 +0100276 };
Steve Block791712a2010-08-27 10:21:07 +0100277 static const int kPageFlagMask = (1 << NUM_PAGE_FLAGS) - 1;
Steve Block6ded16b2010-05-10 14:33:55 +0100278
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100279 // To avoid an additional WATERMARK_INVALIDATED flag clearing pass during
280 // scavenge we just invalidate the watermark on each old space page after
281 // processing it. And then we flip the meaning of the WATERMARK_INVALIDATED
282 // flag at the beginning of the next scavenge and each page becomes marked as
283 // having a valid watermark.
284 //
285 // The following invariant must hold for pages in old pointer and map spaces:
286 // If page is in use then page is marked as having invalid watermark at
287 // the beginning and at the end of any GC.
288 //
289 // This invariant guarantees that after flipping flag meaning at the
290 // beginning of scavenge all pages in use will be marked as having valid
291 // watermark.
Steve Block44f0eee2011-05-26 01:26:41 +0100292 static inline void FlipMeaningOfInvalidatedWatermarkFlag(Heap* heap);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100293
294 // Returns true if the page allocation watermark was not altered during
295 // scavenge.
296 inline bool IsWatermarkValid();
297
298 inline void InvalidateWatermark(bool value);
299
Steve Block6ded16b2010-05-10 14:33:55 +0100300 inline bool GetPageFlag(PageFlag flag);
301 inline void SetPageFlag(PageFlag flag, bool value);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100302 inline void ClearPageFlags();
303
304 inline void ClearGCFields();
305
Steve Block791712a2010-08-27 10:21:07 +0100306 static const int kAllocationWatermarkOffsetShift = WATERMARK_INVALIDATED + 1;
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100307 static const int kAllocationWatermarkOffsetBits = kPageSizeBits + 1;
308 static const uint32_t kAllocationWatermarkOffsetMask =
309 ((1 << kAllocationWatermarkOffsetBits) - 1) <<
310 kAllocationWatermarkOffsetShift;
311
312 static const uint32_t kFlagsMask =
313 ((1 << kAllocationWatermarkOffsetShift) - 1);
314
315 STATIC_CHECK(kBitsPerInt - kAllocationWatermarkOffsetShift >=
316 kAllocationWatermarkOffsetBits);
317
Steve Blocka7e24c12009-10-30 11:49:00 +0000318 //---------------------------------------------------------------------------
319 // Page header description.
320 //
321 // If a page is not in the large object space, the first word,
322 // opaque_header, encodes the next page address (aligned to kPageSize 8K)
323 // and the chunk number (0 ~ 8K-1). Only MemoryAllocator should use
324 // opaque_header. The value range of the opaque_header is [0..kPageSize[,
325 // or [next_page_start, next_page_end[. It cannot point to a valid address
326 // in the current page. If a page is in the large object space, the first
327 // word *may* (if the page start and large object chunk start are the
328 // same) contain the address of the next large object chunk.
329 intptr_t opaque_header;
330
331 // If the page is not in the large object space, the low-order bit of the
332 // second word is set. If the page is in the large object space, the
333 // second word *may* (if the page start and large object chunk start are
334 // the same) contain the large object chunk size. In either case, the
335 // low-order bit for large object pages will be cleared.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100336 // For normal pages this word is used to store page flags and
337 // offset of allocation top.
338 intptr_t flags_;
Steve Blocka7e24c12009-10-30 11:49:00 +0000339
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100340 // This field contains dirty marks for regions covering the page. Only dirty
341 // regions might contain intergenerational references.
342 // Only 32 dirty marks are supported so for large object pages several regions
343 // might be mapped to a single dirty mark.
344 uint32_t dirty_regions_;
Steve Blocka7e24c12009-10-30 11:49:00 +0000345
346 // The index of the page in its owner space.
347 int mc_page_index;
348
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100349 // During mark-compact collections this field contains the forwarding address
350 // of the first live object in this page.
351 // During scavenge collection this field is used to store allocation watermark
352 // if it is altered during scavenge.
Steve Blocka7e24c12009-10-30 11:49:00 +0000353 Address mc_first_forwarded;
Steve Block44f0eee2011-05-26 01:26:41 +0100354
355 Heap* heap_;
Steve Blocka7e24c12009-10-30 11:49:00 +0000356};
357
358
359// ----------------------------------------------------------------------------
360// Space is the abstract superclass for all allocation spaces.
361class Space : public Malloced {
362 public:
Steve Block44f0eee2011-05-26 01:26:41 +0100363 Space(Heap* heap, AllocationSpace id, Executability executable)
364 : heap_(heap), id_(id), executable_(executable) {}
Steve Blocka7e24c12009-10-30 11:49:00 +0000365
366 virtual ~Space() {}
367
Steve Block44f0eee2011-05-26 01:26:41 +0100368 Heap* heap() const { return heap_; }
369
Steve Blocka7e24c12009-10-30 11:49:00 +0000370 // Does the space need executable memory?
371 Executability executable() { return executable_; }
372
373 // Identity used in error reporting.
374 AllocationSpace identity() { return id_; }
375
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -0800376 // Returns allocated size.
Ben Murdochf87a2032010-10-22 12:50:53 +0100377 virtual intptr_t Size() = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000378
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -0800379 // Returns size of objects. Can differ from the allocated size
380 // (e.g. see LargeObjectSpace).
381 virtual intptr_t SizeOfObjects() { return Size(); }
382
Steve Blocka7e24c12009-10-30 11:49:00 +0000383#ifdef DEBUG
384 virtual void Print() = 0;
385#endif
386
Leon Clarkee46be812010-01-19 14:06:41 +0000387 // After calling this we can allocate a certain number of bytes using only
388 // linear allocation (with a LinearAllocationScope and an AlwaysAllocateScope)
389 // without using freelists or causing a GC. This is used by partial
390 // snapshots. It returns true of space was reserved or false if a GC is
391 // needed. For paged spaces the space requested must include the space wasted
392 // at the end of each when allocating linearly.
393 virtual bool ReserveSpace(int bytes) = 0;
394
Steve Blocka7e24c12009-10-30 11:49:00 +0000395 private:
Steve Block44f0eee2011-05-26 01:26:41 +0100396 Heap* heap_;
Steve Blocka7e24c12009-10-30 11:49:00 +0000397 AllocationSpace id_;
398 Executability executable_;
399};
400
401
402// ----------------------------------------------------------------------------
403// All heap objects containing executable code (code objects) must be allocated
404// from a 2 GB range of memory, so that they can call each other using 32-bit
405// displacements. This happens automatically on 32-bit platforms, where 32-bit
406// displacements cover the entire 4GB virtual address space. On 64-bit
407// platforms, we support this using the CodeRange object, which reserves and
408// manages a range of virtual memory.
Steve Block44f0eee2011-05-26 01:26:41 +0100409class CodeRange {
Steve Blocka7e24c12009-10-30 11:49:00 +0000410 public:
411 // Reserves a range of virtual memory, but does not commit any of it.
412 // Can only be called once, at heap initialization time.
413 // Returns false on failure.
Steve Block44f0eee2011-05-26 01:26:41 +0100414 bool Setup(const size_t requested_size);
Steve Blocka7e24c12009-10-30 11:49:00 +0000415
416 // Frees the range of virtual memory, and frees the data structures used to
417 // manage it.
Steve Block44f0eee2011-05-26 01:26:41 +0100418 void TearDown();
Steve Blocka7e24c12009-10-30 11:49:00 +0000419
Ben Murdoch257744e2011-11-30 15:57:28 +0000420 bool exists() { return code_range_ != NULL; }
Steve Block44f0eee2011-05-26 01:26:41 +0100421 bool contains(Address address) {
Ben Murdoch257744e2011-11-30 15:57:28 +0000422 if (code_range_ == NULL) return false;
Steve Blocka7e24c12009-10-30 11:49:00 +0000423 Address start = static_cast<Address>(code_range_->address());
424 return start <= address && address < start + code_range_->size();
425 }
426
427 // Allocates a chunk of memory from the large-object portion of
428 // the code range. On platforms with no separate code range, should
429 // not be called.
Steve Block44f0eee2011-05-26 01:26:41 +0100430 MUST_USE_RESULT void* AllocateRawMemory(const size_t requested,
431 size_t* allocated);
432 void FreeRawMemory(void* buf, size_t length);
Steve Blocka7e24c12009-10-30 11:49:00 +0000433
434 private:
Ben Murdoch257744e2011-11-30 15:57:28 +0000435 CodeRange();
Steve Block44f0eee2011-05-26 01:26:41 +0100436
Steve Blocka7e24c12009-10-30 11:49:00 +0000437 // The reserved range of virtual memory that all code objects are put in.
Steve Block44f0eee2011-05-26 01:26:41 +0100438 VirtualMemory* code_range_;
Steve Blocka7e24c12009-10-30 11:49:00 +0000439 // Plain old data class, just a struct plus a constructor.
440 class FreeBlock {
441 public:
442 FreeBlock(Address start_arg, size_t size_arg)
443 : start(start_arg), size(size_arg) {}
444 FreeBlock(void* start_arg, size_t size_arg)
445 : start(static_cast<Address>(start_arg)), size(size_arg) {}
446
447 Address start;
448 size_t size;
449 };
450
451 // Freed blocks of memory are added to the free list. When the allocation
452 // list is exhausted, the free list is sorted and merged to make the new
453 // allocation list.
Steve Block44f0eee2011-05-26 01:26:41 +0100454 List<FreeBlock> free_list_;
Steve Blocka7e24c12009-10-30 11:49:00 +0000455 // Memory is allocated from the free blocks on the allocation list.
456 // The block at current_allocation_block_index_ is the current block.
Steve Block44f0eee2011-05-26 01:26:41 +0100457 List<FreeBlock> allocation_list_;
458 int current_allocation_block_index_;
Steve Blocka7e24c12009-10-30 11:49:00 +0000459
460 // Finds a block on the allocation list that contains at least the
461 // requested amount of memory. If none is found, sorts and merges
462 // the existing free memory blocks, and searches again.
463 // If none can be found, terminates V8 with FatalProcessOutOfMemory.
Steve Block44f0eee2011-05-26 01:26:41 +0100464 void GetNextAllocationBlock(size_t requested);
Steve Blocka7e24c12009-10-30 11:49:00 +0000465 // Compares the start addresses of two free blocks.
466 static int CompareFreeBlockAddress(const FreeBlock* left,
467 const FreeBlock* right);
Steve Block44f0eee2011-05-26 01:26:41 +0100468
Ben Murdoch257744e2011-11-30 15:57:28 +0000469 friend class Isolate;
470
471 Isolate* isolate_;
472
Steve Block44f0eee2011-05-26 01:26:41 +0100473 DISALLOW_COPY_AND_ASSIGN(CodeRange);
Steve Blocka7e24c12009-10-30 11:49:00 +0000474};
475
476
477// ----------------------------------------------------------------------------
478// A space acquires chunks of memory from the operating system. The memory
479// allocator manages chunks for the paged heap spaces (old space and map
480// space). A paged chunk consists of pages. Pages in a chunk have contiguous
481// addresses and are linked as a list.
482//
483// The allocator keeps an initial chunk which is used for the new space. The
484// leftover regions of the initial chunk are used for the initial chunks of
485// old space and map space if they are big enough to hold at least one page.
486// The allocator assumes that there is one old space and one map space, each
487// expands the space by allocating kPagesPerChunk pages except the last
488// expansion (before running out of space). The first chunk may contain fewer
489// than kPagesPerChunk pages as well.
490//
491// The memory allocator also allocates chunks for the large object space, but
492// they are managed by the space itself. The new space does not expand.
Steve Block6ded16b2010-05-10 14:33:55 +0100493//
494// The fact that pages for paged spaces are allocated and deallocated in chunks
495// induces a constraint on the order of pages in a linked lists. We say that
496// pages are linked in the chunk-order if and only if every two consecutive
497// pages from the same chunk are consecutive in the linked list.
498//
499
Steve Blocka7e24c12009-10-30 11:49:00 +0000500
Steve Block44f0eee2011-05-26 01:26:41 +0100501class MemoryAllocator {
Steve Blocka7e24c12009-10-30 11:49:00 +0000502 public:
503 // Initializes its internal bookkeeping structures.
Russell Brenner90bac252010-11-18 13:33:46 -0800504 // Max capacity of the total space and executable memory limit.
Steve Block44f0eee2011-05-26 01:26:41 +0100505 bool Setup(intptr_t max_capacity, intptr_t capacity_executable);
Steve Blocka7e24c12009-10-30 11:49:00 +0000506
507 // Deletes valid chunks.
Steve Block44f0eee2011-05-26 01:26:41 +0100508 void TearDown();
Steve Blocka7e24c12009-10-30 11:49:00 +0000509
510 // Reserves an initial address range of virtual memory to be split between
511 // the two new space semispaces, the old space, and the map space. The
512 // memory is not yet committed or assigned to spaces and split into pages.
513 // The initial chunk is unmapped when the memory allocator is torn down.
514 // This function should only be called when there is not already a reserved
515 // initial chunk (initial_chunk_ should be NULL). It returns the start
516 // address of the initial chunk if successful, with the side effect of
517 // setting the initial chunk, or else NULL if unsuccessful and leaves the
518 // initial chunk NULL.
Steve Block44f0eee2011-05-26 01:26:41 +0100519 void* ReserveInitialChunk(const size_t requested);
Steve Blocka7e24c12009-10-30 11:49:00 +0000520
521 // Commits pages from an as-yet-unmanaged block of virtual memory into a
522 // paged space. The block should be part of the initial chunk reserved via
523 // a call to ReserveInitialChunk. The number of pages is always returned in
524 // the output parameter num_pages. This function assumes that the start
525 // address is non-null and that it is big enough to hold at least one
526 // page-aligned page. The call always succeeds, and num_pages is always
527 // greater than zero.
Steve Block44f0eee2011-05-26 01:26:41 +0100528 Page* CommitPages(Address start, size_t size, PagedSpace* owner,
529 int* num_pages);
Steve Blocka7e24c12009-10-30 11:49:00 +0000530
531 // Commit a contiguous block of memory from the initial chunk. Assumes that
532 // the address is not NULL, the size is greater than zero, and that the
533 // block is contained in the initial chunk. Returns true if it succeeded
534 // and false otherwise.
Steve Block44f0eee2011-05-26 01:26:41 +0100535 bool CommitBlock(Address start, size_t size, Executability executable);
Steve Blocka7e24c12009-10-30 11:49:00 +0000536
Steve Blocka7e24c12009-10-30 11:49:00 +0000537 // Uncommit a contiguous block of memory [start..(start+size)[.
538 // start is not NULL, the size is greater than zero, and the
539 // block is contained in the initial chunk. Returns true if it succeeded
540 // and false otherwise.
Steve Block44f0eee2011-05-26 01:26:41 +0100541 bool UncommitBlock(Address start, size_t size);
Steve Blocka7e24c12009-10-30 11:49:00 +0000542
Leon Clarke4515c472010-02-03 11:58:03 +0000543 // Zaps a contiguous block of memory [start..(start+size)[ thus
544 // filling it up with a recognizable non-NULL bit pattern.
Steve Block44f0eee2011-05-26 01:26:41 +0100545 void ZapBlock(Address start, size_t size);
Leon Clarke4515c472010-02-03 11:58:03 +0000546
Steve Blocka7e24c12009-10-30 11:49:00 +0000547 // Attempts to allocate the requested (non-zero) number of pages from the
548 // OS. Fewer pages might be allocated than requested. If it fails to
549 // allocate memory for the OS or cannot allocate a single page, this
550 // function returns an invalid page pointer (NULL). The caller must check
551 // whether the returned page is valid (by calling Page::is_valid()). It is
552 // guaranteed that allocated pages have contiguous addresses. The actual
553 // number of allocated pages is returned in the output parameter
554 // allocated_pages. If the PagedSpace owner is executable and there is
555 // a code range, the pages are allocated from the code range.
Steve Block44f0eee2011-05-26 01:26:41 +0100556 Page* AllocatePages(int requested_pages, int* allocated_pages,
557 PagedSpace* owner);
Steve Blocka7e24c12009-10-30 11:49:00 +0000558
Steve Block6ded16b2010-05-10 14:33:55 +0100559 // Frees pages from a given page and after. Requires pages to be
560 // linked in chunk-order (see comment for class).
561 // If 'p' is the first page of a chunk, pages from 'p' are freed
562 // and this function returns an invalid page pointer.
563 // Otherwise, the function searches a page after 'p' that is
564 // the first page of a chunk. Pages after the found page
565 // are freed and the function returns 'p'.
Steve Block44f0eee2011-05-26 01:26:41 +0100566 Page* FreePages(Page* p);
Steve Blocka7e24c12009-10-30 11:49:00 +0000567
Steve Block6ded16b2010-05-10 14:33:55 +0100568 // Frees all pages owned by given space.
Steve Block44f0eee2011-05-26 01:26:41 +0100569 void FreeAllPages(PagedSpace* space);
Steve Block6ded16b2010-05-10 14:33:55 +0100570
Steve Blocka7e24c12009-10-30 11:49:00 +0000571 // Allocates and frees raw memory of certain size.
572 // These are just thin wrappers around OS::Allocate and OS::Free,
573 // but keep track of allocated bytes as part of heap.
574 // If the flag is EXECUTABLE and a code range exists, the requested
575 // memory is allocated from the code range. If a code range exists
576 // and the freed memory is in it, the code range manages the freed memory.
Steve Block44f0eee2011-05-26 01:26:41 +0100577 MUST_USE_RESULT void* AllocateRawMemory(const size_t requested,
578 size_t* allocated,
579 Executability executable);
580 void FreeRawMemory(void* buf,
581 size_t length,
582 Executability executable);
583 void PerformAllocationCallback(ObjectSpace space,
584 AllocationAction action,
585 size_t size);
Iain Merrick9ac36c92010-09-13 15:29:50 +0100586
Steve Block44f0eee2011-05-26 01:26:41 +0100587 void AddMemoryAllocationCallback(MemoryAllocationCallback callback,
588 ObjectSpace space,
589 AllocationAction action);
590 void RemoveMemoryAllocationCallback(MemoryAllocationCallback callback);
591 bool MemoryAllocationCallbackRegistered(MemoryAllocationCallback callback);
Steve Blocka7e24c12009-10-30 11:49:00 +0000592
593 // Returns the maximum available bytes of heaps.
Steve Block44f0eee2011-05-26 01:26:41 +0100594 intptr_t Available() { return capacity_ < size_ ? 0 : capacity_ - size_; }
Steve Blocka7e24c12009-10-30 11:49:00 +0000595
596 // Returns allocated spaces in bytes.
Steve Block44f0eee2011-05-26 01:26:41 +0100597 intptr_t Size() { return size_; }
Steve Blocka7e24c12009-10-30 11:49:00 +0000598
Russell Brenner90bac252010-11-18 13:33:46 -0800599 // Returns the maximum available executable bytes of heaps.
Steve Block44f0eee2011-05-26 01:26:41 +0100600 intptr_t AvailableExecutable() {
Russell Brenner90bac252010-11-18 13:33:46 -0800601 if (capacity_executable_ < size_executable_) return 0;
602 return capacity_executable_ - size_executable_;
603 }
604
Steve Block791712a2010-08-27 10:21:07 +0100605 // Returns allocated executable spaces in bytes.
Steve Block44f0eee2011-05-26 01:26:41 +0100606 intptr_t SizeExecutable() { return size_executable_; }
Steve Block791712a2010-08-27 10:21:07 +0100607
Steve Blocka7e24c12009-10-30 11:49:00 +0000608 // Returns maximum available bytes that the old space can have.
Steve Block44f0eee2011-05-26 01:26:41 +0100609 intptr_t MaxAvailable() {
Steve Blocka7e24c12009-10-30 11:49:00 +0000610 return (Available() / Page::kPageSize) * Page::kObjectAreaSize;
611 }
612
613 // Links two pages.
Steve Block44f0eee2011-05-26 01:26:41 +0100614 inline void SetNextPage(Page* prev, Page* next);
Steve Blocka7e24c12009-10-30 11:49:00 +0000615
616 // Returns the next page of a given page.
Steve Block44f0eee2011-05-26 01:26:41 +0100617 inline Page* GetNextPage(Page* p);
Steve Blocka7e24c12009-10-30 11:49:00 +0000618
619 // Checks whether a page belongs to a space.
Steve Block44f0eee2011-05-26 01:26:41 +0100620 inline bool IsPageInSpace(Page* p, PagedSpace* space);
Steve Blocka7e24c12009-10-30 11:49:00 +0000621
622 // Returns the space that owns the given page.
Steve Block44f0eee2011-05-26 01:26:41 +0100623 inline PagedSpace* PageOwner(Page* page);
Steve Blocka7e24c12009-10-30 11:49:00 +0000624
625 // Finds the first/last page in the same chunk as a given page.
Steve Block44f0eee2011-05-26 01:26:41 +0100626 Page* FindFirstPageInSameChunk(Page* p);
627 Page* FindLastPageInSameChunk(Page* p);
Steve Blocka7e24c12009-10-30 11:49:00 +0000628
Steve Block6ded16b2010-05-10 14:33:55 +0100629 // Relinks list of pages owned by space to make it chunk-ordered.
630 // Returns new first and last pages of space.
631 // Also returns last page in relinked list which has WasInUsedBeforeMC
632 // flag set.
Steve Block44f0eee2011-05-26 01:26:41 +0100633 void RelinkPageListInChunkOrder(PagedSpace* space,
634 Page** first_page,
635 Page** last_page,
636 Page** last_page_in_use);
Steve Block6ded16b2010-05-10 14:33:55 +0100637
Steve Blocka7e24c12009-10-30 11:49:00 +0000638#ifdef DEBUG
639 // Reports statistic info of the space.
Steve Block44f0eee2011-05-26 01:26:41 +0100640 void ReportStatistics();
Steve Blocka7e24c12009-10-30 11:49:00 +0000641#endif
642
643 // Due to encoding limitation, we can only have 8K chunks.
Leon Clarkee46be812010-01-19 14:06:41 +0000644 static const int kMaxNofChunks = 1 << kPageSizeBits;
Steve Blocka7e24c12009-10-30 11:49:00 +0000645 // If a chunk has at least 16 pages, the maximum heap size is about
646 // 8K * 8K * 16 = 1G bytes.
647#ifdef V8_TARGET_ARCH_X64
648 static const int kPagesPerChunk = 32;
Ben Murdochb0fe1622011-05-05 13:52:32 +0100649 // On 64 bit the chunk table consists of 4 levels of 4096-entry tables.
Ben Murdochb0fe1622011-05-05 13:52:32 +0100650 static const int kChunkTableLevels = 4;
651 static const int kChunkTableBitsPerLevel = 12;
Steve Blocka7e24c12009-10-30 11:49:00 +0000652#else
653 static const int kPagesPerChunk = 16;
Ben Murdochb0fe1622011-05-05 13:52:32 +0100654 // On 32 bit the chunk table consists of 2 levels of 256-entry tables.
Ben Murdochb0fe1622011-05-05 13:52:32 +0100655 static const int kChunkTableLevels = 2;
656 static const int kChunkTableBitsPerLevel = 8;
Steve Blocka7e24c12009-10-30 11:49:00 +0000657#endif
Steve Blocka7e24c12009-10-30 11:49:00 +0000658
659 private:
Ben Murdoch257744e2011-11-30 15:57:28 +0000660 MemoryAllocator();
661
Ben Murdochb0fe1622011-05-05 13:52:32 +0100662 static const int kChunkSize = kPagesPerChunk * Page::kPageSize;
Ben Murdochb0fe1622011-05-05 13:52:32 +0100663
Steve Blocka7e24c12009-10-30 11:49:00 +0000664 // Maximum space size in bytes.
Steve Block44f0eee2011-05-26 01:26:41 +0100665 intptr_t capacity_;
Russell Brenner90bac252010-11-18 13:33:46 -0800666 // Maximum subset of capacity_ that can be executable
Steve Block44f0eee2011-05-26 01:26:41 +0100667 intptr_t capacity_executable_;
Ben Murdochb0fe1622011-05-05 13:52:32 +0100668
Steve Blocka7e24c12009-10-30 11:49:00 +0000669 // Allocated space size in bytes.
Steve Block44f0eee2011-05-26 01:26:41 +0100670 intptr_t size_;
671
Steve Block791712a2010-08-27 10:21:07 +0100672 // Allocated executable space size in bytes.
Steve Block44f0eee2011-05-26 01:26:41 +0100673 intptr_t size_executable_;
Steve Blocka7e24c12009-10-30 11:49:00 +0000674
Iain Merrick9ac36c92010-09-13 15:29:50 +0100675 struct MemoryAllocationCallbackRegistration {
676 MemoryAllocationCallbackRegistration(MemoryAllocationCallback callback,
677 ObjectSpace space,
678 AllocationAction action)
679 : callback(callback), space(space), action(action) {
680 }
681 MemoryAllocationCallback callback;
682 ObjectSpace space;
683 AllocationAction action;
684 };
685 // A List of callback that are triggered when memory is allocated or free'd
Steve Block44f0eee2011-05-26 01:26:41 +0100686 List<MemoryAllocationCallbackRegistration>
Iain Merrick9ac36c92010-09-13 15:29:50 +0100687 memory_allocation_callbacks_;
688
Steve Blocka7e24c12009-10-30 11:49:00 +0000689 // The initial chunk of virtual memory.
Steve Block44f0eee2011-05-26 01:26:41 +0100690 VirtualMemory* initial_chunk_;
Steve Blocka7e24c12009-10-30 11:49:00 +0000691
692 // Allocated chunk info: chunk start address, chunk size, and owning space.
693 class ChunkInfo BASE_EMBEDDED {
694 public:
Iain Merrick9ac36c92010-09-13 15:29:50 +0100695 ChunkInfo() : address_(NULL),
696 size_(0),
697 owner_(NULL),
Steve Block44f0eee2011-05-26 01:26:41 +0100698 executable_(NOT_EXECUTABLE),
699 owner_identity_(FIRST_SPACE) {}
Iain Merrick9ac36c92010-09-13 15:29:50 +0100700 inline void init(Address a, size_t s, PagedSpace* o);
Steve Blocka7e24c12009-10-30 11:49:00 +0000701 Address address() { return address_; }
702 size_t size() { return size_; }
703 PagedSpace* owner() { return owner_; }
Iain Merrick9ac36c92010-09-13 15:29:50 +0100704 // We save executability of the owner to allow using it
705 // when collecting stats after the owner has been destroyed.
706 Executability executable() const { return executable_; }
Steve Block44f0eee2011-05-26 01:26:41 +0100707 AllocationSpace owner_identity() const { return owner_identity_; }
Steve Blocka7e24c12009-10-30 11:49:00 +0000708
709 private:
710 Address address_;
711 size_t size_;
712 PagedSpace* owner_;
Iain Merrick9ac36c92010-09-13 15:29:50 +0100713 Executability executable_;
Steve Block44f0eee2011-05-26 01:26:41 +0100714 AllocationSpace owner_identity_;
Steve Blocka7e24c12009-10-30 11:49:00 +0000715 };
716
717 // Chunks_, free_chunk_ids_ and top_ act as a stack of free chunk ids.
Steve Block44f0eee2011-05-26 01:26:41 +0100718 List<ChunkInfo> chunks_;
719 List<int> free_chunk_ids_;
720 int max_nof_chunks_;
721 int top_;
Steve Blocka7e24c12009-10-30 11:49:00 +0000722
723 // Push/pop a free chunk id onto/from the stack.
Steve Block44f0eee2011-05-26 01:26:41 +0100724 void Push(int free_chunk_id);
725 int Pop();
726 bool OutOfChunkIds() { return top_ == 0; }
Steve Blocka7e24c12009-10-30 11:49:00 +0000727
728 // Frees a chunk.
Steve Block44f0eee2011-05-26 01:26:41 +0100729 void DeleteChunk(int chunk_id);
Ben Murdochb0fe1622011-05-05 13:52:32 +0100730
Steve Blocka7e24c12009-10-30 11:49:00 +0000731 // Basic check whether a chunk id is in the valid range.
Steve Block44f0eee2011-05-26 01:26:41 +0100732 inline bool IsValidChunkId(int chunk_id);
Steve Blocka7e24c12009-10-30 11:49:00 +0000733
734 // Checks whether a chunk id identifies an allocated chunk.
Steve Block44f0eee2011-05-26 01:26:41 +0100735 inline bool IsValidChunk(int chunk_id);
Steve Blocka7e24c12009-10-30 11:49:00 +0000736
737 // Returns the chunk id that a page belongs to.
Steve Block44f0eee2011-05-26 01:26:41 +0100738 inline int GetChunkId(Page* p);
Steve Blocka7e24c12009-10-30 11:49:00 +0000739
740 // True if the address lies in the initial chunk.
Steve Block44f0eee2011-05-26 01:26:41 +0100741 inline bool InInitialChunk(Address address);
Steve Blocka7e24c12009-10-30 11:49:00 +0000742
743 // Initializes pages in a chunk. Returns the first page address.
744 // This function and GetChunkId() are provided for the mark-compact
745 // collector to rebuild page headers in the from space, which is
746 // used as a marking stack and its page headers are destroyed.
Steve Block44f0eee2011-05-26 01:26:41 +0100747 Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk,
748 PagedSpace* owner);
Steve Block6ded16b2010-05-10 14:33:55 +0100749
Steve Block44f0eee2011-05-26 01:26:41 +0100750 Page* RelinkPagesInChunk(int chunk_id,
751 Address chunk_start,
752 size_t chunk_size,
753 Page* prev,
754 Page** last_page_in_use);
755
Ben Murdoch257744e2011-11-30 15:57:28 +0000756 friend class Isolate;
757
758 Isolate* isolate_;
759
Steve Block44f0eee2011-05-26 01:26:41 +0100760 DISALLOW_COPY_AND_ASSIGN(MemoryAllocator);
Steve Blocka7e24c12009-10-30 11:49:00 +0000761};
762
763
764// -----------------------------------------------------------------------------
765// Interface for heap object iterator to be implemented by all object space
766// object iterators.
767//
Leon Clarked91b9f72010-01-27 17:25:45 +0000768// NOTE: The space specific object iterators also implements the own next()
769// method which is used to avoid using virtual functions
Steve Blocka7e24c12009-10-30 11:49:00 +0000770// iterating a specific space.
771
772class ObjectIterator : public Malloced {
773 public:
774 virtual ~ObjectIterator() { }
775
Steve Blocka7e24c12009-10-30 11:49:00 +0000776 virtual HeapObject* next_object() = 0;
777};
778
779
780// -----------------------------------------------------------------------------
781// Heap object iterator in new/old/map spaces.
782//
783// A HeapObjectIterator iterates objects from a given address to the
784// top of a space. The given address must be below the current
785// allocation pointer (space top). There are some caveats.
786//
787// (1) If the space top changes upward during iteration (because of
788// allocating new objects), the iterator does not iterate objects
789// above the original space top. The caller must create a new
790// iterator starting from the old top in order to visit these new
791// objects.
792//
793// (2) If new objects are allocated below the original allocation top
794// (e.g., free-list allocation in paged spaces), the new objects
795// may or may not be iterated depending on their position with
796// respect to the current point of iteration.
797//
798// (3) The space top should not change downward during iteration,
799// otherwise the iterator will return not-necessarily-valid
800// objects.
801
802class HeapObjectIterator: public ObjectIterator {
803 public:
804 // Creates a new object iterator in a given space. If a start
805 // address is not given, the iterator starts from the space bottom.
806 // If the size function is not given, the iterator calls the default
807 // Object::Size().
808 explicit HeapObjectIterator(PagedSpace* space);
809 HeapObjectIterator(PagedSpace* space, HeapObjectCallback size_func);
810 HeapObjectIterator(PagedSpace* space, Address start);
811 HeapObjectIterator(PagedSpace* space,
812 Address start,
813 HeapObjectCallback size_func);
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100814 HeapObjectIterator(Page* page, HeapObjectCallback size_func);
Steve Blocka7e24c12009-10-30 11:49:00 +0000815
Leon Clarked91b9f72010-01-27 17:25:45 +0000816 inline HeapObject* next() {
817 return (cur_addr_ < cur_limit_) ? FromCurrentPage() : FromNextPage();
818 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000819
820 // implementation of ObjectIterator.
Steve Blocka7e24c12009-10-30 11:49:00 +0000821 virtual HeapObject* next_object() { return next(); }
822
823 private:
824 Address cur_addr_; // current iteration point
825 Address end_addr_; // end iteration point
826 Address cur_limit_; // current page limit
827 HeapObjectCallback size_func_; // size function
828 Page* end_page_; // caches the page of the end address
829
Leon Clarked91b9f72010-01-27 17:25:45 +0000830 HeapObject* FromCurrentPage() {
831 ASSERT(cur_addr_ < cur_limit_);
832
833 HeapObject* obj = HeapObject::FromAddress(cur_addr_);
834 int obj_size = (size_func_ == NULL) ? obj->Size() : size_func_(obj);
835 ASSERT_OBJECT_SIZE(obj_size);
836
837 cur_addr_ += obj_size;
838 ASSERT(cur_addr_ <= cur_limit_);
839
840 return obj;
841 }
842
843 // Slow path of next, goes into the next page.
844 HeapObject* FromNextPage();
Steve Blocka7e24c12009-10-30 11:49:00 +0000845
846 // Initializes fields.
847 void Initialize(Address start, Address end, HeapObjectCallback size_func);
848
849#ifdef DEBUG
850 // Verifies whether fields have valid values.
851 void Verify();
852#endif
853};
854
855
856// -----------------------------------------------------------------------------
857// A PageIterator iterates the pages in a paged space.
858//
859// The PageIterator class provides three modes for iterating pages in a space:
860// PAGES_IN_USE iterates pages containing allocated objects.
861// PAGES_USED_BY_MC iterates pages that hold relocated objects during a
862// mark-compact collection.
863// ALL_PAGES iterates all pages in the space.
864//
865// There are some caveats.
866//
867// (1) If the space expands during iteration, new pages will not be
868// returned by the iterator in any mode.
869//
870// (2) If new objects are allocated during iteration, they will appear
871// in pages returned by the iterator. Allocation may cause the
872// allocation pointer or MC allocation pointer in the last page to
873// change between constructing the iterator and iterating the last
874// page.
875//
876// (3) The space should not shrink during iteration, otherwise the
877// iterator will return deallocated pages.
878
879class PageIterator BASE_EMBEDDED {
880 public:
881 enum Mode {
882 PAGES_IN_USE,
883 PAGES_USED_BY_MC,
884 ALL_PAGES
885 };
886
887 PageIterator(PagedSpace* space, Mode mode);
888
889 inline bool has_next();
890 inline Page* next();
891
892 private:
893 PagedSpace* space_;
894 Page* prev_page_; // Previous page returned.
895 Page* stop_page_; // Page to stop at (last page returned by the iterator).
896};
897
898
899// -----------------------------------------------------------------------------
900// A space has a list of pages. The next page can be accessed via
901// Page::next_page() call. The next page of the last page is an
902// invalid page pointer. A space can expand and shrink dynamically.
903
904// An abstraction of allocation and relocation pointers in a page-structured
905// space.
906class AllocationInfo {
907 public:
908 Address top; // current allocation top
909 Address limit; // current allocation limit
910
911#ifdef DEBUG
912 bool VerifyPagedAllocation() {
913 return (Page::FromAllocationTop(top) == Page::FromAllocationTop(limit))
914 && (top <= limit);
915 }
916#endif
917};
918
919
920// An abstraction of the accounting statistics of a page-structured space.
921// The 'capacity' of a space is the number of object-area bytes (ie, not
922// including page bookkeeping structures) currently in the space. The 'size'
923// of a space is the number of allocated bytes, the 'waste' in the space is
924// the number of bytes that are not allocated and not available to
925// allocation without reorganizing the space via a GC (eg, small blocks due
926// to internal fragmentation, top of page areas in map space), and the bytes
927// 'available' is the number of unallocated bytes that are not waste. The
928// capacity is the sum of size, waste, and available.
929//
930// The stats are only set by functions that ensure they stay balanced. These
931// functions increase or decrease one of the non-capacity stats in
932// conjunction with capacity, or else they always balance increases and
933// decreases to the non-capacity stats.
934class AllocationStats BASE_EMBEDDED {
935 public:
936 AllocationStats() { Clear(); }
937
938 // Zero out all the allocation statistics (ie, no capacity).
939 void Clear() {
940 capacity_ = 0;
941 available_ = 0;
942 size_ = 0;
943 waste_ = 0;
944 }
945
946 // Reset the allocation statistics (ie, available = capacity with no
947 // wasted or allocated bytes).
948 void Reset() {
949 available_ = capacity_;
950 size_ = 0;
951 waste_ = 0;
952 }
953
954 // Accessors for the allocation statistics.
Ben Murdochf87a2032010-10-22 12:50:53 +0100955 intptr_t Capacity() { return capacity_; }
956 intptr_t Available() { return available_; }
957 intptr_t Size() { return size_; }
958 intptr_t Waste() { return waste_; }
Steve Blocka7e24c12009-10-30 11:49:00 +0000959
960 // Grow the space by adding available bytes.
961 void ExpandSpace(int size_in_bytes) {
962 capacity_ += size_in_bytes;
963 available_ += size_in_bytes;
964 }
965
966 // Shrink the space by removing available bytes.
967 void ShrinkSpace(int size_in_bytes) {
968 capacity_ -= size_in_bytes;
969 available_ -= size_in_bytes;
970 }
971
972 // Allocate from available bytes (available -> size).
Ben Murdochf87a2032010-10-22 12:50:53 +0100973 void AllocateBytes(intptr_t size_in_bytes) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000974 available_ -= size_in_bytes;
975 size_ += size_in_bytes;
976 }
977
978 // Free allocated bytes, making them available (size -> available).
Ben Murdochf87a2032010-10-22 12:50:53 +0100979 void DeallocateBytes(intptr_t size_in_bytes) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000980 size_ -= size_in_bytes;
981 available_ += size_in_bytes;
982 }
983
984 // Waste free bytes (available -> waste).
985 void WasteBytes(int size_in_bytes) {
986 available_ -= size_in_bytes;
987 waste_ += size_in_bytes;
988 }
989
990 // Consider the wasted bytes to be allocated, as they contain filler
991 // objects (waste -> size).
Ben Murdochf87a2032010-10-22 12:50:53 +0100992 void FillWastedBytes(intptr_t size_in_bytes) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000993 waste_ -= size_in_bytes;
994 size_ += size_in_bytes;
995 }
996
997 private:
Ben Murdochf87a2032010-10-22 12:50:53 +0100998 intptr_t capacity_;
999 intptr_t available_;
1000 intptr_t size_;
1001 intptr_t waste_;
Steve Blocka7e24c12009-10-30 11:49:00 +00001002};
1003
1004
1005class PagedSpace : public Space {
1006 public:
1007 // Creates a space with a maximum capacity, and an id.
Steve Block44f0eee2011-05-26 01:26:41 +01001008 PagedSpace(Heap* heap,
1009 intptr_t max_capacity,
Ben Murdochf87a2032010-10-22 12:50:53 +01001010 AllocationSpace id,
1011 Executability executable);
Steve Blocka7e24c12009-10-30 11:49:00 +00001012
1013 virtual ~PagedSpace() {}
1014
1015 // Set up the space using the given address range of virtual memory (from
1016 // the memory allocator's initial chunk) if possible. If the block of
1017 // addresses is not big enough to contain a single page-aligned page, a
1018 // fresh chunk will be allocated.
1019 bool Setup(Address start, size_t size);
1020
1021 // Returns true if the space has been successfully set up and not
1022 // subsequently torn down.
1023 bool HasBeenSetup();
1024
1025 // Cleans up the space, frees all pages in this space except those belonging
1026 // to the initial chunk, uncommits addresses in the initial chunk.
1027 void TearDown();
1028
1029 // Checks whether an object/address is in this space.
1030 inline bool Contains(Address a);
1031 bool Contains(HeapObject* o) { return Contains(o->address()); }
Ben Murdochb0fe1622011-05-05 13:52:32 +01001032 // Never crashes even if a is not a valid pointer.
1033 inline bool SafeContains(Address a);
Steve Blocka7e24c12009-10-30 11:49:00 +00001034
1035 // Given an address occupied by a live object, return that object if it is
1036 // in this space, or Failure::Exception() if it is not. The implementation
1037 // iterates over objects in the page containing the address, the cost is
1038 // linear in the number of objects in the page. It may be slow.
John Reck59135872010-11-02 12:39:01 -07001039 MUST_USE_RESULT MaybeObject* FindObject(Address addr);
Steve Blocka7e24c12009-10-30 11:49:00 +00001040
1041 // Checks whether page is currently in use by this space.
1042 bool IsUsed(Page* page);
1043
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001044 void MarkAllPagesClean();
Steve Blocka7e24c12009-10-30 11:49:00 +00001045
1046 // Prepares for a mark-compact GC.
Steve Block6ded16b2010-05-10 14:33:55 +01001047 virtual void PrepareForMarkCompact(bool will_compact);
Steve Blocka7e24c12009-10-30 11:49:00 +00001048
Steve Block6ded16b2010-05-10 14:33:55 +01001049 // The top of allocation in a page in this space. Undefined if page is unused.
1050 Address PageAllocationTop(Page* page) {
1051 return page == TopPageOf(allocation_info_) ? top()
1052 : PageAllocationLimit(page);
1053 }
1054
1055 // The limit of allocation for a page in this space.
1056 virtual Address PageAllocationLimit(Page* page) = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +00001057
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001058 void FlushTopPageWatermark() {
1059 AllocationTopPage()->SetCachedAllocationWatermark(top());
1060 AllocationTopPage()->InvalidateWatermark(true);
1061 }
1062
Steve Blocka7e24c12009-10-30 11:49:00 +00001063 // Current capacity without growing (Size() + Available() + Waste()).
Ben Murdochf87a2032010-10-22 12:50:53 +01001064 intptr_t Capacity() { return accounting_stats_.Capacity(); }
Steve Blocka7e24c12009-10-30 11:49:00 +00001065
Steve Block3ce2e202009-11-05 08:53:23 +00001066 // Total amount of memory committed for this space. For paged
1067 // spaces this equals the capacity.
Ben Murdochf87a2032010-10-22 12:50:53 +01001068 intptr_t CommittedMemory() { return Capacity(); }
Steve Block3ce2e202009-11-05 08:53:23 +00001069
Steve Blocka7e24c12009-10-30 11:49:00 +00001070 // Available bytes without growing.
Ben Murdochf87a2032010-10-22 12:50:53 +01001071 intptr_t Available() { return accounting_stats_.Available(); }
Steve Blocka7e24c12009-10-30 11:49:00 +00001072
1073 // Allocated bytes in this space.
Ben Murdochf87a2032010-10-22 12:50:53 +01001074 virtual intptr_t Size() { return accounting_stats_.Size(); }
Steve Blocka7e24c12009-10-30 11:49:00 +00001075
1076 // Wasted bytes due to fragmentation and not recoverable until the
1077 // next GC of this space.
Ben Murdochf87a2032010-10-22 12:50:53 +01001078 intptr_t Waste() { return accounting_stats_.Waste(); }
Steve Blocka7e24c12009-10-30 11:49:00 +00001079
1080 // Returns the address of the first object in this space.
1081 Address bottom() { return first_page_->ObjectAreaStart(); }
1082
1083 // Returns the allocation pointer in this space.
1084 Address top() { return allocation_info_.top; }
1085
1086 // Allocate the requested number of bytes in the space if possible, return a
1087 // failure object if not.
John Reck59135872010-11-02 12:39:01 -07001088 MUST_USE_RESULT inline MaybeObject* AllocateRaw(int size_in_bytes);
Steve Blocka7e24c12009-10-30 11:49:00 +00001089
1090 // Allocate the requested number of bytes for relocation during mark-compact
1091 // collection.
John Reck59135872010-11-02 12:39:01 -07001092 MUST_USE_RESULT inline MaybeObject* MCAllocateRaw(int size_in_bytes);
Steve Blocka7e24c12009-10-30 11:49:00 +00001093
Leon Clarkee46be812010-01-19 14:06:41 +00001094 virtual bool ReserveSpace(int bytes);
1095
1096 // Used by ReserveSpace.
1097 virtual void PutRestOfCurrentPageOnFreeList(Page* current_page) = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +00001098
Steve Block6ded16b2010-05-10 14:33:55 +01001099 // Free all pages in range from prev (exclusive) to last (inclusive).
1100 // Freed pages are moved to the end of page list.
1101 void FreePages(Page* prev, Page* last);
1102
Kristian Monsen80d68ea2010-09-08 11:05:35 +01001103 // Deallocates a block.
1104 virtual void DeallocateBlock(Address start,
1105 int size_in_bytes,
1106 bool add_to_freelist) = 0;
1107
Steve Block6ded16b2010-05-10 14:33:55 +01001108 // Set space allocation info.
1109 void SetTop(Address top) {
1110 allocation_info_.top = top;
1111 allocation_info_.limit = PageAllocationLimit(Page::FromAllocationTop(top));
1112 }
1113
Steve Blocka7e24c12009-10-30 11:49:00 +00001114 // ---------------------------------------------------------------------------
1115 // Mark-compact collection support functions
1116
1117 // Set the relocation point to the beginning of the space.
1118 void MCResetRelocationInfo();
1119
1120 // Writes relocation info to the top page.
1121 void MCWriteRelocationInfoToPage() {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001122 TopPageOf(mc_forwarding_info_)->
1123 SetAllocationWatermark(mc_forwarding_info_.top);
Steve Blocka7e24c12009-10-30 11:49:00 +00001124 }
1125
1126 // Computes the offset of a given address in this space to the beginning
1127 // of the space.
1128 int MCSpaceOffsetForAddress(Address addr);
1129
1130 // Updates the allocation pointer to the relocation top after a mark-compact
1131 // collection.
1132 virtual void MCCommitRelocationInfo() = 0;
1133
1134 // Releases half of unused pages.
1135 void Shrink();
1136
1137 // Ensures that the capacity is at least 'capacity'. Returns false on failure.
1138 bool EnsureCapacity(int capacity);
1139
Steve Blocka7e24c12009-10-30 11:49:00 +00001140#ifdef DEBUG
1141 // Print meta info and objects in this space.
1142 virtual void Print();
1143
1144 // Verify integrity of this space.
1145 virtual void Verify(ObjectVisitor* visitor);
1146
1147 // Overridden by subclasses to verify space-specific object
1148 // properties (e.g., only maps or free-list nodes are in map space).
1149 virtual void VerifyObject(HeapObject* obj) {}
1150
1151 // Report code object related statistics
1152 void CollectCodeStatistics();
1153 static void ReportCodeStatistics();
1154 static void ResetCodeStatistics();
1155#endif
1156
Steve Block6ded16b2010-05-10 14:33:55 +01001157 // Returns the page of the allocation pointer.
1158 Page* AllocationTopPage() { return TopPageOf(allocation_info_); }
1159
Kristian Monsen80d68ea2010-09-08 11:05:35 +01001160 void RelinkPageListInChunkOrder(bool deallocate_blocks);
1161
Steve Blocka7e24c12009-10-30 11:49:00 +00001162 protected:
1163 // Maximum capacity of this space.
Ben Murdochf87a2032010-10-22 12:50:53 +01001164 intptr_t max_capacity_;
Steve Blocka7e24c12009-10-30 11:49:00 +00001165
1166 // Accounting information for this space.
1167 AllocationStats accounting_stats_;
1168
1169 // The first page in this space.
1170 Page* first_page_;
1171
1172 // The last page in this space. Initially set in Setup, updated in
1173 // Expand and Shrink.
1174 Page* last_page_;
1175
Steve Block6ded16b2010-05-10 14:33:55 +01001176 // True if pages owned by this space are linked in chunk-order.
1177 // See comment for class MemoryAllocator for definition of chunk-order.
1178 bool page_list_is_chunk_ordered_;
1179
Steve Blocka7e24c12009-10-30 11:49:00 +00001180 // Normal allocation information.
1181 AllocationInfo allocation_info_;
1182
1183 // Relocation information during mark-compact collections.
1184 AllocationInfo mc_forwarding_info_;
1185
1186 // Bytes of each page that cannot be allocated. Possibly non-zero
1187 // for pages in spaces with only fixed-size objects. Always zero
1188 // for pages in spaces with variable sized objects (those pages are
1189 // padded with free-list nodes).
1190 int page_extra_;
1191
1192 // Sets allocation pointer to a page bottom.
1193 static void SetAllocationInfo(AllocationInfo* alloc_info, Page* p);
1194
1195 // Returns the top page specified by an allocation info structure.
1196 static Page* TopPageOf(AllocationInfo alloc_info) {
1197 return Page::FromAllocationTop(alloc_info.limit);
1198 }
1199
Leon Clarked91b9f72010-01-27 17:25:45 +00001200 int CountPagesToTop() {
1201 Page* p = Page::FromAllocationTop(allocation_info_.top);
1202 PageIterator it(this, PageIterator::ALL_PAGES);
1203 int counter = 1;
1204 while (it.has_next()) {
1205 if (it.next() == p) return counter;
1206 counter++;
1207 }
1208 UNREACHABLE();
1209 return -1;
1210 }
1211
Steve Blocka7e24c12009-10-30 11:49:00 +00001212 // Expands the space by allocating a fixed number of pages. Returns false if
1213 // it cannot allocate requested number of pages from OS. Newly allocated
1214 // pages are append to the last_page;
1215 bool Expand(Page* last_page);
1216
1217 // Generic fast case allocation function that tries linear allocation in
1218 // the top page of 'alloc_info'. Returns NULL on failure.
1219 inline HeapObject* AllocateLinearly(AllocationInfo* alloc_info,
1220 int size_in_bytes);
1221
1222 // During normal allocation or deserialization, roll to the next page in
1223 // the space (there is assumed to be one) and allocate there. This
1224 // function is space-dependent.
1225 virtual HeapObject* AllocateInNextPage(Page* current_page,
1226 int size_in_bytes) = 0;
1227
1228 // Slow path of AllocateRaw. This function is space-dependent.
John Reck59135872010-11-02 12:39:01 -07001229 MUST_USE_RESULT virtual HeapObject* SlowAllocateRaw(int size_in_bytes) = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +00001230
1231 // Slow path of MCAllocateRaw.
John Reck59135872010-11-02 12:39:01 -07001232 MUST_USE_RESULT HeapObject* SlowMCAllocateRaw(int size_in_bytes);
Steve Blocka7e24c12009-10-30 11:49:00 +00001233
1234#ifdef DEBUG
Leon Clarkee46be812010-01-19 14:06:41 +00001235 // Returns the number of total pages in this space.
1236 int CountTotalPages();
Steve Blocka7e24c12009-10-30 11:49:00 +00001237#endif
1238 private:
Steve Blocka7e24c12009-10-30 11:49:00 +00001239
1240 // Returns a pointer to the page of the relocation pointer.
1241 Page* MCRelocationTopPage() { return TopPageOf(mc_forwarding_info_); }
1242
Steve Blocka7e24c12009-10-30 11:49:00 +00001243 friend class PageIterator;
1244};
1245
1246
Steve Blocka7e24c12009-10-30 11:49:00 +00001247class NumberAndSizeInfo BASE_EMBEDDED {
1248 public:
1249 NumberAndSizeInfo() : number_(0), bytes_(0) {}
1250
1251 int number() const { return number_; }
1252 void increment_number(int num) { number_ += num; }
1253
1254 int bytes() const { return bytes_; }
1255 void increment_bytes(int size) { bytes_ += size; }
1256
1257 void clear() {
1258 number_ = 0;
1259 bytes_ = 0;
1260 }
1261
1262 private:
1263 int number_;
1264 int bytes_;
1265};
1266
1267
1268// HistogramInfo class for recording a single "bar" of a histogram. This
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001269// class is used for collecting statistics to print to the log file.
Steve Blocka7e24c12009-10-30 11:49:00 +00001270class HistogramInfo: public NumberAndSizeInfo {
1271 public:
1272 HistogramInfo() : NumberAndSizeInfo() {}
1273
1274 const char* name() { return name_; }
1275 void set_name(const char* name) { name_ = name; }
1276
1277 private:
1278 const char* name_;
1279};
Steve Blocka7e24c12009-10-30 11:49:00 +00001280
1281
1282// -----------------------------------------------------------------------------
1283// SemiSpace in young generation
1284//
1285// A semispace is a contiguous chunk of memory. The mark-compact collector
1286// uses the memory in the from space as a marking stack when tracing live
1287// objects.
1288
1289class SemiSpace : public Space {
1290 public:
1291 // Constructor.
Steve Block44f0eee2011-05-26 01:26:41 +01001292 explicit SemiSpace(Heap* heap) : Space(heap, NEW_SPACE, NOT_EXECUTABLE) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001293 start_ = NULL;
1294 age_mark_ = NULL;
1295 }
1296
1297 // Sets up the semispace using the given chunk.
1298 bool Setup(Address start, int initial_capacity, int maximum_capacity);
1299
1300 // Tear down the space. Heap memory was not allocated by the space, so it
1301 // is not deallocated here.
1302 void TearDown();
1303
1304 // True if the space has been set up but not torn down.
1305 bool HasBeenSetup() { return start_ != NULL; }
1306
1307 // Grow the size of the semispace by committing extra virtual memory.
1308 // Assumes that the caller has checked that the semispace has not reached
1309 // its maximum capacity (and thus there is space available in the reserved
1310 // address range to grow).
1311 bool Grow();
1312
1313 // Grow the semispace to the new capacity. The new capacity
1314 // requested must be larger than the current capacity.
1315 bool GrowTo(int new_capacity);
1316
1317 // Shrinks the semispace to the new capacity. The new capacity
1318 // requested must be more than the amount of used memory in the
1319 // semispace and less than the current capacity.
1320 bool ShrinkTo(int new_capacity);
1321
1322 // Returns the start address of the space.
1323 Address low() { return start_; }
1324 // Returns one past the end address of the space.
1325 Address high() { return low() + capacity_; }
1326
1327 // Age mark accessors.
1328 Address age_mark() { return age_mark_; }
1329 void set_age_mark(Address mark) { age_mark_ = mark; }
1330
1331 // True if the address is in the address range of this semispace (not
1332 // necessarily below the allocation pointer).
1333 bool Contains(Address a) {
1334 return (reinterpret_cast<uintptr_t>(a) & address_mask_)
1335 == reinterpret_cast<uintptr_t>(start_);
1336 }
1337
1338 // True if the object is a heap object in the address range of this
1339 // semispace (not necessarily below the allocation pointer).
1340 bool Contains(Object* o) {
1341 return (reinterpret_cast<uintptr_t>(o) & object_mask_) == object_expected_;
1342 }
1343
1344 // The offset of an address from the beginning of the space.
Steve Blockd0582a62009-12-15 09:54:21 +00001345 int SpaceOffsetForAddress(Address addr) {
1346 return static_cast<int>(addr - low());
1347 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001348
Leon Clarkee46be812010-01-19 14:06:41 +00001349 // If we don't have these here then SemiSpace will be abstract. However
1350 // they should never be called.
Ben Murdochf87a2032010-10-22 12:50:53 +01001351 virtual intptr_t Size() {
Steve Blocka7e24c12009-10-30 11:49:00 +00001352 UNREACHABLE();
1353 return 0;
1354 }
1355
Leon Clarkee46be812010-01-19 14:06:41 +00001356 virtual bool ReserveSpace(int bytes) {
1357 UNREACHABLE();
1358 return false;
1359 }
1360
Steve Blocka7e24c12009-10-30 11:49:00 +00001361 bool is_committed() { return committed_; }
1362 bool Commit();
1363 bool Uncommit();
1364
1365#ifdef DEBUG
1366 virtual void Print();
1367 virtual void Verify();
1368#endif
1369
1370 // Returns the current capacity of the semi space.
1371 int Capacity() { return capacity_; }
1372
1373 // Returns the maximum capacity of the semi space.
1374 int MaximumCapacity() { return maximum_capacity_; }
1375
1376 // Returns the initial capacity of the semi space.
1377 int InitialCapacity() { return initial_capacity_; }
1378
1379 private:
1380 // The current and maximum capacity of the space.
1381 int capacity_;
1382 int maximum_capacity_;
1383 int initial_capacity_;
1384
1385 // The start address of the space.
1386 Address start_;
1387 // Used to govern object promotion during mark-compact collection.
1388 Address age_mark_;
1389
1390 // Masks and comparison values to test for containment in this semispace.
1391 uintptr_t address_mask_;
1392 uintptr_t object_mask_;
1393 uintptr_t object_expected_;
1394
1395 bool committed_;
1396
1397 public:
1398 TRACK_MEMORY("SemiSpace")
1399};
1400
1401
1402// A SemiSpaceIterator is an ObjectIterator that iterates over the active
1403// semispace of the heap's new space. It iterates over the objects in the
1404// semispace from a given start address (defaulting to the bottom of the
1405// semispace) to the top of the semispace. New objects allocated after the
1406// iterator is created are not iterated.
1407class SemiSpaceIterator : public ObjectIterator {
1408 public:
1409 // Create an iterator over the objects in the given space. If no start
1410 // address is given, the iterator starts from the bottom of the space. If
1411 // no size function is given, the iterator calls Object::Size().
1412 explicit SemiSpaceIterator(NewSpace* space);
1413 SemiSpaceIterator(NewSpace* space, HeapObjectCallback size_func);
1414 SemiSpaceIterator(NewSpace* space, Address start);
1415
Steve Blocka7e24c12009-10-30 11:49:00 +00001416 HeapObject* next() {
Leon Clarked91b9f72010-01-27 17:25:45 +00001417 if (current_ == limit_) return NULL;
Steve Blocka7e24c12009-10-30 11:49:00 +00001418
1419 HeapObject* object = HeapObject::FromAddress(current_);
1420 int size = (size_func_ == NULL) ? object->Size() : size_func_(object);
1421
1422 current_ += size;
1423 return object;
1424 }
1425
1426 // Implementation of the ObjectIterator functions.
Steve Blocka7e24c12009-10-30 11:49:00 +00001427 virtual HeapObject* next_object() { return next(); }
1428
1429 private:
1430 void Initialize(NewSpace* space, Address start, Address end,
1431 HeapObjectCallback size_func);
1432
1433 // The semispace.
1434 SemiSpace* space_;
1435 // The current iteration point.
1436 Address current_;
1437 // The end of iteration.
1438 Address limit_;
1439 // The callback function.
1440 HeapObjectCallback size_func_;
1441};
1442
1443
1444// -----------------------------------------------------------------------------
1445// The young generation space.
1446//
1447// The new space consists of a contiguous pair of semispaces. It simply
1448// forwards most functions to the appropriate semispace.
1449
1450class NewSpace : public Space {
1451 public:
1452 // Constructor.
Steve Block44f0eee2011-05-26 01:26:41 +01001453 explicit NewSpace(Heap* heap)
1454 : Space(heap, NEW_SPACE, NOT_EXECUTABLE),
1455 to_space_(heap),
1456 from_space_(heap) {}
Steve Blocka7e24c12009-10-30 11:49:00 +00001457
1458 // Sets up the new space using the given chunk.
1459 bool Setup(Address start, int size);
1460
1461 // Tears down the space. Heap memory was not allocated by the space, so it
1462 // is not deallocated here.
1463 void TearDown();
1464
1465 // True if the space has been set up but not torn down.
1466 bool HasBeenSetup() {
1467 return to_space_.HasBeenSetup() && from_space_.HasBeenSetup();
1468 }
1469
1470 // Flip the pair of spaces.
1471 void Flip();
1472
1473 // Grow the capacity of the semispaces. Assumes that they are not at
1474 // their maximum capacity.
1475 void Grow();
1476
1477 // Shrink the capacity of the semispaces.
1478 void Shrink();
1479
1480 // True if the address or object lies in the address range of either
1481 // semispace (not necessarily below the allocation pointer).
1482 bool Contains(Address a) {
1483 return (reinterpret_cast<uintptr_t>(a) & address_mask_)
1484 == reinterpret_cast<uintptr_t>(start_);
1485 }
1486 bool Contains(Object* o) {
1487 return (reinterpret_cast<uintptr_t>(o) & object_mask_) == object_expected_;
1488 }
1489
1490 // Return the allocated bytes in the active semispace.
Ben Murdochf87a2032010-10-22 12:50:53 +01001491 virtual intptr_t Size() { return static_cast<int>(top() - bottom()); }
1492 // The same, but returning an int. We have to have the one that returns
1493 // intptr_t because it is inherited, but if we know we are dealing with the
1494 // new space, which can't get as big as the other spaces then this is useful:
1495 int SizeAsInt() { return static_cast<int>(Size()); }
Steve Block3ce2e202009-11-05 08:53:23 +00001496
Steve Blocka7e24c12009-10-30 11:49:00 +00001497 // Return the current capacity of a semispace.
Ben Murdochf87a2032010-10-22 12:50:53 +01001498 intptr_t Capacity() {
Steve Blocka7e24c12009-10-30 11:49:00 +00001499 ASSERT(to_space_.Capacity() == from_space_.Capacity());
1500 return to_space_.Capacity();
1501 }
Steve Block3ce2e202009-11-05 08:53:23 +00001502
1503 // Return the total amount of memory committed for new space.
Ben Murdochf87a2032010-10-22 12:50:53 +01001504 intptr_t CommittedMemory() {
Steve Block3ce2e202009-11-05 08:53:23 +00001505 if (from_space_.is_committed()) return 2 * Capacity();
1506 return Capacity();
1507 }
1508
Steve Blocka7e24c12009-10-30 11:49:00 +00001509 // Return the available bytes without growing in the active semispace.
Ben Murdochf87a2032010-10-22 12:50:53 +01001510 intptr_t Available() { return Capacity() - Size(); }
Steve Blocka7e24c12009-10-30 11:49:00 +00001511
1512 // Return the maximum capacity of a semispace.
1513 int MaximumCapacity() {
1514 ASSERT(to_space_.MaximumCapacity() == from_space_.MaximumCapacity());
1515 return to_space_.MaximumCapacity();
1516 }
1517
1518 // Returns the initial capacity of a semispace.
1519 int InitialCapacity() {
1520 ASSERT(to_space_.InitialCapacity() == from_space_.InitialCapacity());
1521 return to_space_.InitialCapacity();
1522 }
1523
1524 // Return the address of the allocation pointer in the active semispace.
1525 Address top() { return allocation_info_.top; }
1526 // Return the address of the first object in the active semispace.
1527 Address bottom() { return to_space_.low(); }
1528
1529 // Get the age mark of the inactive semispace.
1530 Address age_mark() { return from_space_.age_mark(); }
1531 // Set the age mark in the active semispace.
1532 void set_age_mark(Address mark) { to_space_.set_age_mark(mark); }
1533
1534 // The start address of the space and a bit mask. Anding an address in the
1535 // new space with the mask will result in the start address.
1536 Address start() { return start_; }
1537 uintptr_t mask() { return address_mask_; }
1538
1539 // The allocation top and limit addresses.
1540 Address* allocation_top_address() { return &allocation_info_.top; }
1541 Address* allocation_limit_address() { return &allocation_info_.limit; }
1542
John Reck59135872010-11-02 12:39:01 -07001543 MUST_USE_RESULT MaybeObject* AllocateRaw(int size_in_bytes) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001544 return AllocateRawInternal(size_in_bytes, &allocation_info_);
1545 }
1546
1547 // Allocate the requested number of bytes for relocation during mark-compact
1548 // collection.
John Reck59135872010-11-02 12:39:01 -07001549 MUST_USE_RESULT MaybeObject* MCAllocateRaw(int size_in_bytes) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001550 return AllocateRawInternal(size_in_bytes, &mc_forwarding_info_);
1551 }
1552
1553 // Reset the allocation pointer to the beginning of the active semispace.
1554 void ResetAllocationInfo();
1555 // Reset the reloction pointer to the bottom of the inactive semispace in
1556 // preparation for mark-compact collection.
1557 void MCResetRelocationInfo();
1558 // Update the allocation pointer in the active semispace after a
1559 // mark-compact collection.
1560 void MCCommitRelocationInfo();
1561
1562 // Get the extent of the inactive semispace (for use as a marking stack).
1563 Address FromSpaceLow() { return from_space_.low(); }
1564 Address FromSpaceHigh() { return from_space_.high(); }
1565
1566 // Get the extent of the active semispace (to sweep newly copied objects
1567 // during a scavenge collection).
1568 Address ToSpaceLow() { return to_space_.low(); }
1569 Address ToSpaceHigh() { return to_space_.high(); }
1570
1571 // Offsets from the beginning of the semispaces.
1572 int ToSpaceOffsetForAddress(Address a) {
1573 return to_space_.SpaceOffsetForAddress(a);
1574 }
1575 int FromSpaceOffsetForAddress(Address a) {
1576 return from_space_.SpaceOffsetForAddress(a);
1577 }
1578
1579 // True if the object is a heap object in the address range of the
1580 // respective semispace (not necessarily below the allocation pointer of the
1581 // semispace).
1582 bool ToSpaceContains(Object* o) { return to_space_.Contains(o); }
1583 bool FromSpaceContains(Object* o) { return from_space_.Contains(o); }
1584
1585 bool ToSpaceContains(Address a) { return to_space_.Contains(a); }
1586 bool FromSpaceContains(Address a) { return from_space_.Contains(a); }
1587
Leon Clarkee46be812010-01-19 14:06:41 +00001588 virtual bool ReserveSpace(int bytes);
1589
Ben Murdochb0fe1622011-05-05 13:52:32 +01001590 // Resizes a sequential string which must be the most recent thing that was
1591 // allocated in new space.
1592 template <typename StringType>
1593 inline void ShrinkStringAtAllocationBoundary(String* string, int len);
1594
Steve Blocka7e24c12009-10-30 11:49:00 +00001595#ifdef DEBUG
1596 // Verify the active semispace.
1597 virtual void Verify();
1598 // Print the active semispace.
1599 virtual void Print() { to_space_.Print(); }
1600#endif
1601
Steve Blocka7e24c12009-10-30 11:49:00 +00001602 // Iterates the active semispace to collect statistics.
1603 void CollectStatistics();
1604 // Reports previously collected statistics of the active semispace.
1605 void ReportStatistics();
1606 // Clears previously collected statistics.
1607 void ClearHistograms();
1608
1609 // Record the allocation or promotion of a heap object. Note that we don't
1610 // record every single allocation, but only those that happen in the
1611 // to space during a scavenge GC.
1612 void RecordAllocation(HeapObject* obj);
1613 void RecordPromotion(HeapObject* obj);
Steve Blocka7e24c12009-10-30 11:49:00 +00001614
1615 // Return whether the operation succeded.
1616 bool CommitFromSpaceIfNeeded() {
1617 if (from_space_.is_committed()) return true;
1618 return from_space_.Commit();
1619 }
1620
1621 bool UncommitFromSpace() {
1622 if (!from_space_.is_committed()) return true;
1623 return from_space_.Uncommit();
1624 }
1625
1626 private:
1627 // The semispaces.
1628 SemiSpace to_space_;
1629 SemiSpace from_space_;
1630
1631 // Start address and bit mask for containment testing.
1632 Address start_;
1633 uintptr_t address_mask_;
1634 uintptr_t object_mask_;
1635 uintptr_t object_expected_;
1636
1637 // Allocation pointer and limit for normal allocation and allocation during
1638 // mark-compact collection.
1639 AllocationInfo allocation_info_;
1640 AllocationInfo mc_forwarding_info_;
1641
Steve Blocka7e24c12009-10-30 11:49:00 +00001642 HistogramInfo* allocated_histogram_;
1643 HistogramInfo* promoted_histogram_;
Steve Blocka7e24c12009-10-30 11:49:00 +00001644
1645 // Implementation of AllocateRaw and MCAllocateRaw.
John Reck59135872010-11-02 12:39:01 -07001646 MUST_USE_RESULT inline MaybeObject* AllocateRawInternal(
1647 int size_in_bytes,
1648 AllocationInfo* alloc_info);
Steve Blocka7e24c12009-10-30 11:49:00 +00001649
1650 friend class SemiSpaceIterator;
1651
1652 public:
1653 TRACK_MEMORY("NewSpace")
1654};
1655
1656
1657// -----------------------------------------------------------------------------
1658// Free lists for old object spaces
1659//
1660// Free-list nodes are free blocks in the heap. They look like heap objects
1661// (free-list node pointers have the heap object tag, and they have a map like
1662// a heap object). They have a size and a next pointer. The next pointer is
1663// the raw address of the next free list node (or NULL).
1664class FreeListNode: public HeapObject {
1665 public:
1666 // Obtain a free-list node from a raw address. This is not a cast because
1667 // it does not check nor require that the first word at the address is a map
1668 // pointer.
1669 static FreeListNode* FromAddress(Address address) {
1670 return reinterpret_cast<FreeListNode*>(HeapObject::FromAddress(address));
1671 }
1672
Steve Block3ce2e202009-11-05 08:53:23 +00001673 static inline bool IsFreeListNode(HeapObject* object);
1674
Steve Blocka7e24c12009-10-30 11:49:00 +00001675 // Set the size in bytes, which can be read with HeapObject::Size(). This
1676 // function also writes a map to the first word of the block so that it
1677 // looks like a heap object to the garbage collector and heap iteration
1678 // functions.
Steve Block44f0eee2011-05-26 01:26:41 +01001679 void set_size(Heap* heap, int size_in_bytes);
Steve Blocka7e24c12009-10-30 11:49:00 +00001680
1681 // Accessors for the next field.
Steve Block44f0eee2011-05-26 01:26:41 +01001682 inline Address next(Heap* heap);
1683 inline void set_next(Heap* heap, Address next);
Steve Blocka7e24c12009-10-30 11:49:00 +00001684
1685 private:
1686 static const int kNextOffset = POINTER_SIZE_ALIGN(ByteArray::kHeaderSize);
1687
1688 DISALLOW_IMPLICIT_CONSTRUCTORS(FreeListNode);
1689};
1690
1691
1692// The free list for the old space.
1693class OldSpaceFreeList BASE_EMBEDDED {
1694 public:
Steve Block44f0eee2011-05-26 01:26:41 +01001695 OldSpaceFreeList(Heap* heap, AllocationSpace owner);
Steve Blocka7e24c12009-10-30 11:49:00 +00001696
1697 // Clear the free list.
1698 void Reset();
1699
1700 // Return the number of bytes available on the free list.
Ben Murdochf87a2032010-10-22 12:50:53 +01001701 intptr_t available() { return available_; }
Steve Blocka7e24c12009-10-30 11:49:00 +00001702
1703 // Place a node on the free list. The block of size 'size_in_bytes'
1704 // starting at 'start' is placed on the free list. The return value is the
1705 // number of bytes that have been lost due to internal fragmentation by
1706 // freeing the block. Bookkeeping information will be written to the block,
1707 // ie, its contents will be destroyed. The start address should be word
1708 // aligned, and the size should be a non-zero multiple of the word size.
1709 int Free(Address start, int size_in_bytes);
1710
1711 // Allocate a block of size 'size_in_bytes' from the free list. The block
1712 // is unitialized. A failure is returned if no block is available. The
1713 // number of bytes lost to fragmentation is returned in the output parameter
1714 // 'wasted_bytes'. The size should be a non-zero multiple of the word size.
John Reck59135872010-11-02 12:39:01 -07001715 MUST_USE_RESULT MaybeObject* Allocate(int size_in_bytes, int* wasted_bytes);
Steve Blocka7e24c12009-10-30 11:49:00 +00001716
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08001717 void MarkNodes();
1718
Steve Blocka7e24c12009-10-30 11:49:00 +00001719 private:
1720 // The size range of blocks, in bytes. (Smaller allocations are allowed, but
1721 // will always result in waste.)
1722 static const int kMinBlockSize = 2 * kPointerSize;
1723 static const int kMaxBlockSize = Page::kMaxHeapObjectSize;
1724
Steve Block44f0eee2011-05-26 01:26:41 +01001725 Heap* heap_;
1726
Steve Blocka7e24c12009-10-30 11:49:00 +00001727 // The identity of the owning space, for building allocation Failure
1728 // objects.
1729 AllocationSpace owner_;
1730
1731 // Total available bytes in all blocks on this free list.
1732 int available_;
1733
1734 // Blocks are put on exact free lists in an array, indexed by size in words.
1735 // The available sizes are kept in an increasingly ordered list. Entries
1736 // corresponding to sizes < kMinBlockSize always have an empty free list
1737 // (but index kHead is used for the head of the size list).
1738 struct SizeNode {
1739 // Address of the head FreeListNode of the implied block size or NULL.
1740 Address head_node_;
1741 // Size (words) of the next larger available size if head_node_ != NULL.
1742 int next_size_;
1743 };
1744 static const int kFreeListsLength = kMaxBlockSize / kPointerSize + 1;
1745 SizeNode free_[kFreeListsLength];
1746
1747 // Sentinel elements for the size list. Real elements are in ]kHead..kEnd[.
1748 static const int kHead = kMinBlockSize / kPointerSize - 1;
1749 static const int kEnd = kMaxInt;
1750
1751 // We keep a "finger" in the size list to speed up a common pattern:
1752 // repeated requests for the same or increasing sizes.
1753 int finger_;
1754
1755 // Starting from *prev, find and return the smallest size >= index (words),
1756 // or kEnd. Update *prev to be the largest size < index, or kHead.
1757 int FindSize(int index, int* prev) {
1758 int cur = free_[*prev].next_size_;
1759 while (cur < index) {
1760 *prev = cur;
1761 cur = free_[cur].next_size_;
1762 }
1763 return cur;
1764 }
1765
1766 // Remove an existing element from the size list.
1767 void RemoveSize(int index) {
1768 int prev = kHead;
1769 int cur = FindSize(index, &prev);
1770 ASSERT(cur == index);
1771 free_[prev].next_size_ = free_[cur].next_size_;
1772 finger_ = prev;
1773 }
1774
1775 // Insert a new element into the size list.
1776 void InsertSize(int index) {
1777 int prev = kHead;
1778 int cur = FindSize(index, &prev);
1779 ASSERT(cur != index);
1780 free_[prev].next_size_ = index;
1781 free_[index].next_size_ = cur;
1782 }
1783
1784 // The size list is not updated during a sequence of calls to Free, but is
1785 // rebuilt before the next allocation.
1786 void RebuildSizeList();
1787 bool needs_rebuild_;
1788
1789#ifdef DEBUG
1790 // Does this free list contain a free block located at the address of 'node'?
1791 bool Contains(FreeListNode* node);
1792#endif
1793
1794 DISALLOW_COPY_AND_ASSIGN(OldSpaceFreeList);
1795};
1796
1797
1798// The free list for the map space.
1799class FixedSizeFreeList BASE_EMBEDDED {
1800 public:
Steve Block44f0eee2011-05-26 01:26:41 +01001801 FixedSizeFreeList(Heap* heap, AllocationSpace owner, int object_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00001802
1803 // Clear the free list.
1804 void Reset();
1805
1806 // Return the number of bytes available on the free list.
Ben Murdochf87a2032010-10-22 12:50:53 +01001807 intptr_t available() { return available_; }
Steve Blocka7e24c12009-10-30 11:49:00 +00001808
1809 // Place a node on the free list. The block starting at 'start' (assumed to
1810 // have size object_size_) is placed on the free list. Bookkeeping
1811 // information will be written to the block, ie, its contents will be
1812 // destroyed. The start address should be word aligned.
1813 void Free(Address start);
1814
1815 // Allocate a fixed sized block from the free list. The block is unitialized.
1816 // A failure is returned if no block is available.
John Reck59135872010-11-02 12:39:01 -07001817 MUST_USE_RESULT MaybeObject* Allocate();
Steve Blocka7e24c12009-10-30 11:49:00 +00001818
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08001819 void MarkNodes();
1820
Steve Blocka7e24c12009-10-30 11:49:00 +00001821 private:
Steve Block44f0eee2011-05-26 01:26:41 +01001822
1823 Heap* heap_;
1824
Steve Blocka7e24c12009-10-30 11:49:00 +00001825 // Available bytes on the free list.
Ben Murdochf87a2032010-10-22 12:50:53 +01001826 intptr_t available_;
Steve Blocka7e24c12009-10-30 11:49:00 +00001827
1828 // The head of the free list.
1829 Address head_;
1830
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001831 // The tail of the free list.
1832 Address tail_;
1833
Steve Blocka7e24c12009-10-30 11:49:00 +00001834 // The identity of the owning space, for building allocation Failure
1835 // objects.
1836 AllocationSpace owner_;
1837
1838 // The size of the objects in this space.
1839 int object_size_;
1840
1841 DISALLOW_COPY_AND_ASSIGN(FixedSizeFreeList);
1842};
1843
1844
1845// -----------------------------------------------------------------------------
1846// Old object space (excluding map objects)
1847
1848class OldSpace : public PagedSpace {
1849 public:
1850 // Creates an old space object with a given maximum capacity.
1851 // The constructor does not allocate pages from OS.
Steve Block44f0eee2011-05-26 01:26:41 +01001852 OldSpace(Heap* heap,
1853 intptr_t max_capacity,
1854 AllocationSpace id,
1855 Executability executable)
1856 : PagedSpace(heap, max_capacity, id, executable),
1857 free_list_(heap, id) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001858 page_extra_ = 0;
1859 }
1860
1861 // The bytes available on the free list (ie, not above the linear allocation
1862 // pointer).
Ben Murdochf87a2032010-10-22 12:50:53 +01001863 intptr_t AvailableFree() { return free_list_.available(); }
Steve Blocka7e24c12009-10-30 11:49:00 +00001864
Steve Block6ded16b2010-05-10 14:33:55 +01001865 // The limit of allocation for a page in this space.
1866 virtual Address PageAllocationLimit(Page* page) {
1867 return page->ObjectAreaEnd();
Steve Blocka7e24c12009-10-30 11:49:00 +00001868 }
1869
1870 // Give a block of memory to the space's free list. It might be added to
1871 // the free list or accounted as waste.
Steve Block6ded16b2010-05-10 14:33:55 +01001872 // If add_to_freelist is false then just accounting stats are updated and
1873 // no attempt to add area to free list is made.
1874 void Free(Address start, int size_in_bytes, bool add_to_freelist) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001875 accounting_stats_.DeallocateBytes(size_in_bytes);
Steve Block6ded16b2010-05-10 14:33:55 +01001876
1877 if (add_to_freelist) {
1878 int wasted_bytes = free_list_.Free(start, size_in_bytes);
1879 accounting_stats_.WasteBytes(wasted_bytes);
1880 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001881 }
1882
Kristian Monsen80d68ea2010-09-08 11:05:35 +01001883 virtual void DeallocateBlock(Address start,
1884 int size_in_bytes,
1885 bool add_to_freelist);
1886
Steve Blocka7e24c12009-10-30 11:49:00 +00001887 // Prepare for full garbage collection. Resets the relocation pointer and
1888 // clears the free list.
1889 virtual void PrepareForMarkCompact(bool will_compact);
1890
1891 // Updates the allocation pointer to the relocation top after a mark-compact
1892 // collection.
1893 virtual void MCCommitRelocationInfo();
1894
Leon Clarkee46be812010-01-19 14:06:41 +00001895 virtual void PutRestOfCurrentPageOnFreeList(Page* current_page);
1896
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08001897 void MarkFreeListNodes() { free_list_.MarkNodes(); }
1898
Steve Blocka7e24c12009-10-30 11:49:00 +00001899#ifdef DEBUG
1900 // Reports statistics for the space
1901 void ReportStatistics();
Steve Blocka7e24c12009-10-30 11:49:00 +00001902#endif
1903
1904 protected:
1905 // Virtual function in the superclass. Slow path of AllocateRaw.
John Reck59135872010-11-02 12:39:01 -07001906 MUST_USE_RESULT HeapObject* SlowAllocateRaw(int size_in_bytes);
Steve Blocka7e24c12009-10-30 11:49:00 +00001907
1908 // Virtual function in the superclass. Allocate linearly at the start of
1909 // the page after current_page (there is assumed to be one).
1910 HeapObject* AllocateInNextPage(Page* current_page, int size_in_bytes);
1911
1912 private:
1913 // The space's free list.
1914 OldSpaceFreeList free_list_;
1915
1916 public:
1917 TRACK_MEMORY("OldSpace")
1918};
1919
1920
1921// -----------------------------------------------------------------------------
1922// Old space for objects of a fixed size
1923
1924class FixedSpace : public PagedSpace {
1925 public:
Steve Block44f0eee2011-05-26 01:26:41 +01001926 FixedSpace(Heap* heap,
1927 intptr_t max_capacity,
Steve Blocka7e24c12009-10-30 11:49:00 +00001928 AllocationSpace id,
1929 int object_size_in_bytes,
1930 const char* name)
Steve Block44f0eee2011-05-26 01:26:41 +01001931 : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE),
Steve Blocka7e24c12009-10-30 11:49:00 +00001932 object_size_in_bytes_(object_size_in_bytes),
1933 name_(name),
Steve Block44f0eee2011-05-26 01:26:41 +01001934 free_list_(heap, id, object_size_in_bytes) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001935 page_extra_ = Page::kObjectAreaSize % object_size_in_bytes;
1936 }
1937
Steve Block6ded16b2010-05-10 14:33:55 +01001938 // The limit of allocation for a page in this space.
1939 virtual Address PageAllocationLimit(Page* page) {
1940 return page->ObjectAreaEnd() - page_extra_;
Steve Blocka7e24c12009-10-30 11:49:00 +00001941 }
1942
1943 int object_size_in_bytes() { return object_size_in_bytes_; }
1944
1945 // Give a fixed sized block of memory to the space's free list.
Steve Block6ded16b2010-05-10 14:33:55 +01001946 // If add_to_freelist is false then just accounting stats are updated and
1947 // no attempt to add area to free list is made.
1948 void Free(Address start, bool add_to_freelist) {
1949 if (add_to_freelist) {
1950 free_list_.Free(start);
1951 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001952 accounting_stats_.DeallocateBytes(object_size_in_bytes_);
1953 }
1954
1955 // Prepares for a mark-compact GC.
1956 virtual void PrepareForMarkCompact(bool will_compact);
1957
1958 // Updates the allocation pointer to the relocation top after a mark-compact
1959 // collection.
1960 virtual void MCCommitRelocationInfo();
1961
Leon Clarkee46be812010-01-19 14:06:41 +00001962 virtual void PutRestOfCurrentPageOnFreeList(Page* current_page);
1963
Kristian Monsen80d68ea2010-09-08 11:05:35 +01001964 virtual void DeallocateBlock(Address start,
1965 int size_in_bytes,
1966 bool add_to_freelist);
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08001967
1968 void MarkFreeListNodes() { free_list_.MarkNodes(); }
1969
Steve Blocka7e24c12009-10-30 11:49:00 +00001970#ifdef DEBUG
1971 // Reports statistic info of the space
1972 void ReportStatistics();
Steve Blocka7e24c12009-10-30 11:49:00 +00001973#endif
1974
1975 protected:
1976 // Virtual function in the superclass. Slow path of AllocateRaw.
John Reck59135872010-11-02 12:39:01 -07001977 MUST_USE_RESULT HeapObject* SlowAllocateRaw(int size_in_bytes);
Steve Blocka7e24c12009-10-30 11:49:00 +00001978
1979 // Virtual function in the superclass. Allocate linearly at the start of
1980 // the page after current_page (there is assumed to be one).
1981 HeapObject* AllocateInNextPage(Page* current_page, int size_in_bytes);
1982
Leon Clarkee46be812010-01-19 14:06:41 +00001983 void ResetFreeList() {
1984 free_list_.Reset();
1985 }
1986
Steve Blocka7e24c12009-10-30 11:49:00 +00001987 private:
1988 // The size of objects in this space.
1989 int object_size_in_bytes_;
1990
1991 // The name of this space.
1992 const char* name_;
1993
1994 // The space's free list.
1995 FixedSizeFreeList free_list_;
1996};
1997
1998
1999// -----------------------------------------------------------------------------
2000// Old space for all map objects
2001
2002class MapSpace : public FixedSpace {
2003 public:
2004 // Creates a map space object with a maximum capacity.
Steve Block44f0eee2011-05-26 01:26:41 +01002005 MapSpace(Heap* heap,
2006 intptr_t max_capacity,
2007 int max_map_space_pages,
2008 AllocationSpace id)
2009 : FixedSpace(heap, max_capacity, id, Map::kSize, "map"),
Leon Clarked91b9f72010-01-27 17:25:45 +00002010 max_map_space_pages_(max_map_space_pages) {
2011 ASSERT(max_map_space_pages < kMaxMapPageIndex);
2012 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002013
2014 // Prepares for a mark-compact GC.
2015 virtual void PrepareForMarkCompact(bool will_compact);
2016
2017 // Given an index, returns the page address.
2018 Address PageAddress(int page_index) { return page_addresses_[page_index]; }
2019
Leon Clarked91b9f72010-01-27 17:25:45 +00002020 static const int kMaxMapPageIndex = 1 << MapWord::kMapPageIndexBits;
Steve Blocka7e24c12009-10-30 11:49:00 +00002021
Leon Clarkee46be812010-01-19 14:06:41 +00002022 // Are map pointers encodable into map word?
2023 bool MapPointersEncodable() {
2024 if (!FLAG_use_big_map_space) {
Leon Clarked91b9f72010-01-27 17:25:45 +00002025 ASSERT(CountPagesToTop() <= kMaxMapPageIndex);
Leon Clarkee46be812010-01-19 14:06:41 +00002026 return true;
2027 }
Leon Clarked91b9f72010-01-27 17:25:45 +00002028 return CountPagesToTop() <= max_map_space_pages_;
Leon Clarkee46be812010-01-19 14:06:41 +00002029 }
2030
2031 // Should be called after forced sweep to find out if map space needs
2032 // compaction.
2033 bool NeedsCompaction(int live_maps) {
Leon Clarked91b9f72010-01-27 17:25:45 +00002034 return !MapPointersEncodable() && live_maps <= CompactionThreshold();
Leon Clarkee46be812010-01-19 14:06:41 +00002035 }
2036
2037 Address TopAfterCompaction(int live_maps) {
2038 ASSERT(NeedsCompaction(live_maps));
2039
2040 int pages_left = live_maps / kMapsPerPage;
2041 PageIterator it(this, PageIterator::ALL_PAGES);
2042 while (pages_left-- > 0) {
2043 ASSERT(it.has_next());
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002044 it.next()->SetRegionMarks(Page::kAllRegionsCleanMarks);
Leon Clarkee46be812010-01-19 14:06:41 +00002045 }
2046 ASSERT(it.has_next());
2047 Page* top_page = it.next();
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002048 top_page->SetRegionMarks(Page::kAllRegionsCleanMarks);
Leon Clarkee46be812010-01-19 14:06:41 +00002049 ASSERT(top_page->is_valid());
2050
2051 int offset = live_maps % kMapsPerPage * Map::kSize;
2052 Address top = top_page->ObjectAreaStart() + offset;
2053 ASSERT(top < top_page->ObjectAreaEnd());
2054 ASSERT(Contains(top));
2055
2056 return top;
2057 }
2058
2059 void FinishCompaction(Address new_top, int live_maps) {
2060 Page* top_page = Page::FromAddress(new_top);
2061 ASSERT(top_page->is_valid());
2062
2063 SetAllocationInfo(&allocation_info_, top_page);
2064 allocation_info_.top = new_top;
2065
2066 int new_size = live_maps * Map::kSize;
2067 accounting_stats_.DeallocateBytes(accounting_stats_.Size());
2068 accounting_stats_.AllocateBytes(new_size);
2069
Ben Murdoche0cee9b2011-05-25 10:26:03 +01002070 // Flush allocation watermarks.
2071 for (Page* p = first_page_; p != top_page; p = p->next_page()) {
2072 p->SetAllocationWatermark(p->AllocationTop());
2073 }
2074 top_page->SetAllocationWatermark(new_top);
2075
Leon Clarkee46be812010-01-19 14:06:41 +00002076#ifdef DEBUG
2077 if (FLAG_enable_slow_asserts) {
Leon Clarked91b9f72010-01-27 17:25:45 +00002078 intptr_t actual_size = 0;
Leon Clarkee46be812010-01-19 14:06:41 +00002079 for (Page* p = first_page_; p != top_page; p = p->next_page())
2080 actual_size += kMapsPerPage * Map::kSize;
2081 actual_size += (new_top - top_page->ObjectAreaStart());
2082 ASSERT(accounting_stats_.Size() == actual_size);
2083 }
2084#endif
2085
2086 Shrink();
2087 ResetFreeList();
2088 }
2089
Steve Blocka7e24c12009-10-30 11:49:00 +00002090 protected:
2091#ifdef DEBUG
2092 virtual void VerifyObject(HeapObject* obj);
2093#endif
2094
2095 private:
Leon Clarkee46be812010-01-19 14:06:41 +00002096 static const int kMapsPerPage = Page::kObjectAreaSize / Map::kSize;
2097
2098 // Do map space compaction if there is a page gap.
Leon Clarked91b9f72010-01-27 17:25:45 +00002099 int CompactionThreshold() {
2100 return kMapsPerPage * (max_map_space_pages_ - 1);
2101 }
2102
2103 const int max_map_space_pages_;
Leon Clarkee46be812010-01-19 14:06:41 +00002104
Steve Blocka7e24c12009-10-30 11:49:00 +00002105 // An array of page start address in a map space.
Leon Clarked91b9f72010-01-27 17:25:45 +00002106 Address page_addresses_[kMaxMapPageIndex];
Steve Blocka7e24c12009-10-30 11:49:00 +00002107
2108 public:
2109 TRACK_MEMORY("MapSpace")
2110};
2111
2112
2113// -----------------------------------------------------------------------------
2114// Old space for all global object property cell objects
2115
2116class CellSpace : public FixedSpace {
2117 public:
2118 // Creates a property cell space object with a maximum capacity.
Steve Block44f0eee2011-05-26 01:26:41 +01002119 CellSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id)
2120 : FixedSpace(heap, max_capacity, id, JSGlobalPropertyCell::kSize, "cell")
2121 {}
Steve Blocka7e24c12009-10-30 11:49:00 +00002122
2123 protected:
2124#ifdef DEBUG
2125 virtual void VerifyObject(HeapObject* obj);
2126#endif
2127
2128 public:
2129 TRACK_MEMORY("CellSpace")
2130};
2131
2132
2133// -----------------------------------------------------------------------------
2134// Large objects ( > Page::kMaxHeapObjectSize ) are allocated and managed by
2135// the large object space. A large object is allocated from OS heap with
2136// extra padding bytes (Page::kPageSize + Page::kObjectStartOffset).
2137// A large object always starts at Page::kObjectStartOffset to a page.
2138// Large objects do not move during garbage collections.
2139
2140// A LargeObjectChunk holds exactly one large object page with exactly one
2141// large object.
2142class LargeObjectChunk {
2143 public:
2144 // Allocates a new LargeObjectChunk that contains a large object page
2145 // (Page::kPageSize aligned) that has at least size_in_bytes (for a large
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002146 // object) bytes after the object area start of that page.
Ben Murdochb0fe1622011-05-05 13:52:32 +01002147 static LargeObjectChunk* New(int size_in_bytes, Executability executable);
2148
2149 // Free the memory associated with the chunk.
2150 inline void Free(Executability executable);
Steve Blocka7e24c12009-10-30 11:49:00 +00002151
2152 // Interpret a raw address as a large object chunk.
2153 static LargeObjectChunk* FromAddress(Address address) {
2154 return reinterpret_cast<LargeObjectChunk*>(address);
2155 }
2156
2157 // Returns the address of this chunk.
2158 Address address() { return reinterpret_cast<Address>(this); }
2159
2160 // Accessors for the fields of the chunk.
2161 LargeObjectChunk* next() { return next_; }
2162 void set_next(LargeObjectChunk* chunk) { next_ = chunk; }
Steve Block791712a2010-08-27 10:21:07 +01002163 size_t size() { return size_ & ~Page::kPageFlagMask; }
Ben Murdochb0fe1622011-05-05 13:52:32 +01002164
2165 // Compute the start address in the chunk.
2166 inline Address GetStartAddress();
Steve Blocka7e24c12009-10-30 11:49:00 +00002167
2168 // Returns the object in this chunk.
Ben Murdochb0fe1622011-05-05 13:52:32 +01002169 HeapObject* GetObject() { return HeapObject::FromAddress(GetStartAddress()); }
Steve Blocka7e24c12009-10-30 11:49:00 +00002170
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002171 // Given a requested size returns the physical size of a chunk to be
2172 // allocated.
Steve Blocka7e24c12009-10-30 11:49:00 +00002173 static int ChunkSizeFor(int size_in_bytes);
2174
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002175 // Given a chunk size, returns the object size it can accommodate. Used by
2176 // LargeObjectSpace::Available.
Ben Murdochf87a2032010-10-22 12:50:53 +01002177 static intptr_t ObjectSizeFor(intptr_t chunk_size) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002178 if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return 0;
2179 return chunk_size - Page::kPageSize - Page::kObjectStartOffset;
2180 }
2181
2182 private:
2183 // A pointer to the next large object chunk in the space or NULL.
2184 LargeObjectChunk* next_;
2185
Ben Murdochb0fe1622011-05-05 13:52:32 +01002186 // The total size of this chunk.
Steve Blocka7e24c12009-10-30 11:49:00 +00002187 size_t size_;
2188
2189 public:
2190 TRACK_MEMORY("LargeObjectChunk")
2191};
2192
2193
2194class LargeObjectSpace : public Space {
2195 public:
Steve Block44f0eee2011-05-26 01:26:41 +01002196 LargeObjectSpace(Heap* heap, AllocationSpace id);
Steve Blocka7e24c12009-10-30 11:49:00 +00002197 virtual ~LargeObjectSpace() {}
2198
2199 // Initializes internal data structures.
2200 bool Setup();
2201
2202 // Releases internal resources, frees objects in this space.
2203 void TearDown();
2204
2205 // Allocates a (non-FixedArray, non-Code) large object.
John Reck59135872010-11-02 12:39:01 -07002206 MUST_USE_RESULT MaybeObject* AllocateRaw(int size_in_bytes);
Steve Blocka7e24c12009-10-30 11:49:00 +00002207 // Allocates a large Code object.
John Reck59135872010-11-02 12:39:01 -07002208 MUST_USE_RESULT MaybeObject* AllocateRawCode(int size_in_bytes);
Steve Blocka7e24c12009-10-30 11:49:00 +00002209 // Allocates a large FixedArray.
John Reck59135872010-11-02 12:39:01 -07002210 MUST_USE_RESULT MaybeObject* AllocateRawFixedArray(int size_in_bytes);
Steve Blocka7e24c12009-10-30 11:49:00 +00002211
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002212 // Available bytes for objects in this space.
Steve Block44f0eee2011-05-26 01:26:41 +01002213 inline intptr_t Available();
Steve Blocka7e24c12009-10-30 11:49:00 +00002214
Ben Murdochf87a2032010-10-22 12:50:53 +01002215 virtual intptr_t Size() {
Steve Blocka7e24c12009-10-30 11:49:00 +00002216 return size_;
2217 }
2218
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08002219 virtual intptr_t SizeOfObjects() {
2220 return objects_size_;
2221 }
2222
Steve Blocka7e24c12009-10-30 11:49:00 +00002223 int PageCount() {
2224 return page_count_;
2225 }
2226
2227 // Finds an object for a given address, returns Failure::Exception()
2228 // if it is not found. The function iterates through all objects in this
2229 // space, may be slow.
John Reck59135872010-11-02 12:39:01 -07002230 MaybeObject* FindObject(Address a);
Steve Blocka7e24c12009-10-30 11:49:00 +00002231
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002232 // Finds a large object page containing the given pc, returns NULL
2233 // if such a page doesn't exist.
2234 LargeObjectChunk* FindChunkContainingPc(Address pc);
2235
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002236 // Iterates objects covered by dirty regions.
2237 void IterateDirtyRegions(ObjectSlotCallback func);
Steve Blocka7e24c12009-10-30 11:49:00 +00002238
2239 // Frees unmarked objects.
2240 void FreeUnmarkedObjects();
2241
2242 // Checks whether a heap object is in this space; O(1).
2243 bool Contains(HeapObject* obj);
2244
2245 // Checks whether the space is empty.
2246 bool IsEmpty() { return first_chunk_ == NULL; }
2247
Leon Clarkee46be812010-01-19 14:06:41 +00002248 // See the comments for ReserveSpace in the Space class. This has to be
2249 // called after ReserveSpace has been called on the paged spaces, since they
2250 // may use some memory, leaving less for large objects.
2251 virtual bool ReserveSpace(int bytes);
2252
Steve Blocka7e24c12009-10-30 11:49:00 +00002253#ifdef DEBUG
2254 virtual void Verify();
2255 virtual void Print();
2256 void ReportStatistics();
2257 void CollectCodeStatistics();
Steve Blocka7e24c12009-10-30 11:49:00 +00002258#endif
2259 // Checks whether an address is in the object area in this space. It
2260 // iterates all objects in the space. May be slow.
2261 bool SlowContains(Address addr) { return !FindObject(addr)->IsFailure(); }
2262
2263 private:
2264 // The head of the linked list of large object chunks.
2265 LargeObjectChunk* first_chunk_;
Ben Murdochf87a2032010-10-22 12:50:53 +01002266 intptr_t size_; // allocated bytes
Steve Blocka7e24c12009-10-30 11:49:00 +00002267 int page_count_; // number of chunks
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08002268 intptr_t objects_size_; // size of objects
Steve Blocka7e24c12009-10-30 11:49:00 +00002269
2270 // Shared implementation of AllocateRaw, AllocateRawCode and
2271 // AllocateRawFixedArray.
John Reck59135872010-11-02 12:39:01 -07002272 MUST_USE_RESULT MaybeObject* AllocateRawInternal(int requested_size,
2273 int object_size,
2274 Executability executable);
Steve Blocka7e24c12009-10-30 11:49:00 +00002275
Steve Blocka7e24c12009-10-30 11:49:00 +00002276 friend class LargeObjectIterator;
2277
2278 public:
2279 TRACK_MEMORY("LargeObjectSpace")
2280};
2281
2282
2283class LargeObjectIterator: public ObjectIterator {
2284 public:
2285 explicit LargeObjectIterator(LargeObjectSpace* space);
2286 LargeObjectIterator(LargeObjectSpace* space, HeapObjectCallback size_func);
2287
Steve Blocka7e24c12009-10-30 11:49:00 +00002288 HeapObject* next();
2289
2290 // implementation of ObjectIterator.
Steve Blocka7e24c12009-10-30 11:49:00 +00002291 virtual HeapObject* next_object() { return next(); }
2292
2293 private:
2294 LargeObjectChunk* current_;
2295 HeapObjectCallback size_func_;
2296};
2297
2298
Steve Block44f0eee2011-05-26 01:26:41 +01002299#ifdef DEBUG
2300struct CommentStatistic {
2301 const char* comment;
2302 int size;
2303 int count;
2304 void Clear() {
2305 comment = NULL;
2306 size = 0;
2307 count = 0;
2308 }
2309 // Must be small, since an iteration is used for lookup.
2310 static const int kMaxComments = 64;
2311};
2312#endif
2313
2314
Steve Blocka7e24c12009-10-30 11:49:00 +00002315} } // namespace v8::internal
2316
2317#endif // V8_SPACES_H_