blob: f1564967e19f65961623d316140343095fa770bc [file] [log] [blame]
Ben Murdoch257744e2011-11-30 15:57:28 +00001// Copyright 2011 the V8 project authors. All rights reserved.
Steve Blocka7e24c12009-10-30 11:49:00 +00002// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#ifndef V8_SPACES_H_
29#define V8_SPACES_H_
30
Ben Murdoch257744e2011-11-30 15:57:28 +000031#include "allocation.h"
32#include "list.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000033#include "log.h"
34
35namespace v8 {
36namespace internal {
37
Steve Block44f0eee2011-05-26 01:26:41 +010038class Isolate;
39
Steve Blocka7e24c12009-10-30 11:49:00 +000040// -----------------------------------------------------------------------------
41// Heap structures:
42//
43// A JS heap consists of a young generation, an old generation, and a large
44// object space. The young generation is divided into two semispaces. A
45// scavenger implements Cheney's copying algorithm. The old generation is
46// separated into a map space and an old object space. The map space contains
47// all (and only) map objects, the rest of old objects go into the old space.
48// The old generation is collected by a mark-sweep-compact collector.
49//
50// The semispaces of the young generation are contiguous. The old and map
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +010051// spaces consists of a list of pages. A page has a page header and an object
Ben Murdoch85b71792012-04-11 18:30:58 +010052// area. A page size is deliberately chosen as 8K bytes.
53// The first word of a page is an opaque page header that has the
54// address of the next page and its ownership information. The second word may
55// have the allocation top address of this page. Heap objects are aligned to the
56// pointer size.
Steve Blocka7e24c12009-10-30 11:49:00 +000057//
58// There is a separate large object space for objects larger than
59// Page::kMaxHeapObjectSize, so that they do not have to move during
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +010060// collection. The large object space is paged. Pages in large object space
Ben Murdoch85b71792012-04-11 18:30:58 +010061// may be larger than 8K.
Steve Blocka7e24c12009-10-30 11:49:00 +000062//
Ben Murdoch85b71792012-04-11 18:30:58 +010063// A card marking write barrier is used to keep track of intergenerational
64// references. Old space pages are divided into regions of Page::kRegionSize
65// size. Each region has a corresponding dirty bit in the page header which is
66// set if the region might contain pointers to new space. For details about
67// dirty bits encoding see comments in the Page::GetRegionNumberForAddress()
68// method body.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +010069//
Ben Murdoch85b71792012-04-11 18:30:58 +010070// During scavenges and mark-sweep collections we iterate intergenerational
71// pointers without decoding heap object maps so if the page belongs to old
72// pointer space or large object space it is essential to guarantee that
73// the page does not contain any garbage pointers to new space: every pointer
74// aligned word which satisfies the Heap::InNewSpace() predicate must be a
75// pointer to a live heap object in new space. Thus objects in old pointer
76// and large object spaces should have a special layout (e.g. no bare integer
77// fields). This requirement does not apply to map space which is iterated in
78// a special fashion. However we still require pointer fields of dead maps to
79// be cleaned.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +010080//
Ben Murdoch85b71792012-04-11 18:30:58 +010081// To enable lazy cleaning of old space pages we use a notion of allocation
82// watermark. Every pointer under watermark is considered to be well formed.
83// Page allocation watermark is not necessarily equal to page allocation top but
84// all alive objects on page should reside under allocation watermark.
85// During scavenge allocation watermark might be bumped and invalid pointers
86// might appear below it. To avoid following them we store a valid watermark
87// into special field in the page header and set a page WATERMARK_INVALIDATED
88// flag. For details see comments in the Page::SetAllocationWatermark() method
89// body.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +010090//
Steve Blocka7e24c12009-10-30 11:49:00 +000091
92// Some assertion macros used in the debugging mode.
93
Leon Clarkee46be812010-01-19 14:06:41 +000094#define ASSERT_PAGE_ALIGNED(address) \
Steve Blocka7e24c12009-10-30 11:49:00 +000095 ASSERT((OffsetFrom(address) & Page::kPageAlignmentMask) == 0)
96
Leon Clarkee46be812010-01-19 14:06:41 +000097#define ASSERT_OBJECT_ALIGNED(address) \
Steve Blocka7e24c12009-10-30 11:49:00 +000098 ASSERT((OffsetFrom(address) & kObjectAlignmentMask) == 0)
99
Leon Clarkee46be812010-01-19 14:06:41 +0000100#define ASSERT_MAP_ALIGNED(address) \
101 ASSERT((OffsetFrom(address) & kMapAlignmentMask) == 0)
102
103#define ASSERT_OBJECT_SIZE(size) \
Ben Murdoch85b71792012-04-11 18:30:58 +0100104 ASSERT((0 < size) && (size <= Page::kMaxHeapObjectSize))
Steve Blocka7e24c12009-10-30 11:49:00 +0000105
Leon Clarkee46be812010-01-19 14:06:41 +0000106#define ASSERT_PAGE_OFFSET(offset) \
107 ASSERT((Page::kObjectStartOffset <= offset) \
Steve Blocka7e24c12009-10-30 11:49:00 +0000108 && (offset <= Page::kPageSize))
109
Leon Clarkee46be812010-01-19 14:06:41 +0000110#define ASSERT_MAP_PAGE_INDEX(index) \
Steve Blocka7e24c12009-10-30 11:49:00 +0000111 ASSERT((0 <= index) && (index <= MapSpace::kMaxMapPageIndex))
112
113
114class PagedSpace;
115class MemoryAllocator;
116class AllocationInfo;
117
118// -----------------------------------------------------------------------------
Ben Murdoch85b71792012-04-11 18:30:58 +0100119// A page normally has 8K bytes. Large object pages may be larger. A page
120// address is always aligned to the 8K page size.
121//
122// Each page starts with a header of Page::kPageHeaderSize size which contains
123// bookkeeping data.
124//
125// The mark-compact collector transforms a map pointer into a page index and a
126// page offset. The exact encoding is described in the comments for
127// class MapWord in objects.h.
Steve Blocka7e24c12009-10-30 11:49:00 +0000128//
129// The only way to get a page pointer is by calling factory methods:
130// Page* p = Page::FromAddress(addr); or
131// Page* p = Page::FromAllocationTop(top);
Ben Murdoch85b71792012-04-11 18:30:58 +0100132class Page {
Steve Blocka7e24c12009-10-30 11:49:00 +0000133 public:
134 // Returns the page containing a given address. The address ranges
135 // from [page_addr .. page_addr + kPageSize[
Ben Murdoch85b71792012-04-11 18:30:58 +0100136 //
137 // Note that this function only works for addresses in normal paged
138 // spaces and addresses in the first 8K of large object pages (i.e.,
139 // the start of large objects but not necessarily derived pointers
140 // within them).
Steve Blocka7e24c12009-10-30 11:49:00 +0000141 INLINE(static Page* FromAddress(Address a)) {
142 return reinterpret_cast<Page*>(OffsetFrom(a) & ~kPageAlignmentMask);
143 }
144
145 // Returns the page containing an allocation top. Because an allocation
146 // top address can be the upper bound of the page, we need to subtract
147 // it with kPointerSize first. The address ranges from
148 // [page_addr + kObjectStartOffset .. page_addr + kPageSize].
149 INLINE(static Page* FromAllocationTop(Address top)) {
150 Page* p = FromAddress(top - kPointerSize);
Ben Murdoch85b71792012-04-11 18:30:58 +0100151 ASSERT_PAGE_OFFSET(p->Offset(top));
Steve Blocka7e24c12009-10-30 11:49:00 +0000152 return p;
153 }
154
Ben Murdoch85b71792012-04-11 18:30:58 +0100155 // Returns the start address of this page.
156 Address address() { return reinterpret_cast<Address>(this); }
157
158 // Checks whether this is a valid page address.
159 bool is_valid() { return address() != NULL; }
160
161 // Returns the next page of this page.
Steve Blocka7e24c12009-10-30 11:49:00 +0000162 inline Page* next_page();
Ben Murdoch85b71792012-04-11 18:30:58 +0100163
164 // Return the end of allocation in this page. Undefined for unused pages.
165 inline Address AllocationTop();
166
167 // Return the allocation watermark for the page.
168 // For old space pages it is guaranteed that the area under the watermark
169 // does not contain any garbage pointers to new space.
170 inline Address AllocationWatermark();
171
172 // Return the allocation watermark offset from the beginning of the page.
173 inline uint32_t AllocationWatermarkOffset();
174
175 inline void SetAllocationWatermark(Address allocation_watermark);
176
177 inline void SetCachedAllocationWatermark(Address allocation_watermark);
178 inline Address CachedAllocationWatermark();
179
180 // Returns the start address of the object area in this page.
181 Address ObjectAreaStart() { return address() + kObjectStartOffset; }
182
183 // Returns the end address (exclusive) of the object area in this page.
184 Address ObjectAreaEnd() { return address() + Page::kPageSize; }
Steve Blocka7e24c12009-10-30 11:49:00 +0000185
Steve Blocka7e24c12009-10-30 11:49:00 +0000186 // Checks whether an address is page aligned.
187 static bool IsAlignedToPageSize(Address a) {
188 return 0 == (OffsetFrom(a) & kPageAlignmentMask);
189 }
190
Ben Murdoch85b71792012-04-11 18:30:58 +0100191 // True if this page was in use before current compaction started.
192 // Result is valid only for pages owned by paged spaces and
193 // only after PagedSpace::PrepareForMarkCompact was called.
194 inline bool WasInUseBeforeMC();
195
196 inline void SetWasInUseBeforeMC(bool was_in_use);
197
198 // True if this page is a large object page.
199 inline bool IsLargeObjectPage();
200
201 inline void SetIsLargeObjectPage(bool is_large_object_page);
202
203 inline Executability PageExecutability();
204
205 inline void SetPageExecutability(Executability executable);
206
Steve Blocka7e24c12009-10-30 11:49:00 +0000207 // Returns the offset of a given address to this page.
208 INLINE(int Offset(Address a)) {
Steve Blockd0582a62009-12-15 09:54:21 +0000209 int offset = static_cast<int>(a - address());
Ben Murdoch85b71792012-04-11 18:30:58 +0100210 ASSERT_PAGE_OFFSET(offset);
Steve Blocka7e24c12009-10-30 11:49:00 +0000211 return offset;
212 }
213
214 // Returns the address for a given offset to the this page.
215 Address OffsetToAddress(int offset) {
216 ASSERT_PAGE_OFFSET(offset);
217 return address() + offset;
218 }
219
220 // ---------------------------------------------------------------------
Ben Murdoch85b71792012-04-11 18:30:58 +0100221 // Card marking support
222
223 static const uint32_t kAllRegionsCleanMarks = 0x0;
224 static const uint32_t kAllRegionsDirtyMarks = 0xFFFFFFFF;
225
226 inline uint32_t GetRegionMarks();
227 inline void SetRegionMarks(uint32_t dirty);
228
229 inline uint32_t GetRegionMaskForAddress(Address addr);
230 inline uint32_t GetRegionMaskForSpan(Address start, int length_in_bytes);
231 inline int GetRegionNumberForAddress(Address addr);
232
233 inline void MarkRegionDirty(Address addr);
234 inline bool IsRegionDirty(Address addr);
235
236 inline void ClearRegionMarks(Address start,
237 Address end,
238 bool reaches_limit);
Steve Blocka7e24c12009-10-30 11:49:00 +0000239
Steve Blocka7e24c12009-10-30 11:49:00 +0000240 // Page size in bytes. This must be a multiple of the OS page size.
241 static const int kPageSize = 1 << kPageSizeBits;
242
243 // Page size mask.
244 static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;
245
Ben Murdoch85b71792012-04-11 18:30:58 +0100246 static const int kPageHeaderSize = kPointerSize + kPointerSize + kIntSize +
247 kIntSize + kPointerSize + kPointerSize;
248
249 // The start offset of the object area in a page. Aligned to both maps and
250 // code alignment to be suitable for both.
251 static const int kObjectStartOffset =
252 CODE_POINTER_ALIGN(MAP_POINTER_ALIGN(kPageHeaderSize));
253
254 // Object area size in bytes.
255 static const int kObjectAreaSize = kPageSize - kObjectStartOffset;
256
257 // Maximum object size that fits in a page.
258 static const int kMaxHeapObjectSize = kObjectAreaSize;
259
260 static const int kDirtyFlagOffset = 2 * kPointerSize;
261 static const int kRegionSizeLog2 = 8;
262 static const int kRegionSize = 1 << kRegionSizeLog2;
263 static const intptr_t kRegionAlignmentMask = (kRegionSize - 1);
264
265 STATIC_CHECK(kRegionSize == kPageSize / kBitsPerInt);
266
267 enum PageFlag {
268 IS_NORMAL_PAGE = 0,
269 WAS_IN_USE_BEFORE_MC,
270
271 // Page allocation watermark was bumped by preallocation during scavenge.
272 // Correct watermark can be retrieved by CachedAllocationWatermark() method
273 WATERMARK_INVALIDATED,
274 IS_EXECUTABLE,
275 NUM_PAGE_FLAGS // Must be last
276 };
277 static const int kPageFlagMask = (1 << NUM_PAGE_FLAGS) - 1;
278
279 // To avoid an additional WATERMARK_INVALIDATED flag clearing pass during
280 // scavenge we just invalidate the watermark on each old space page after
281 // processing it. And then we flip the meaning of the WATERMARK_INVALIDATED
282 // flag at the beginning of the next scavenge and each page becomes marked as
283 // having a valid watermark.
284 //
285 // The following invariant must hold for pages in old pointer and map spaces:
286 // If page is in use then page is marked as having invalid watermark at
287 // the beginning and at the end of any GC.
288 //
289 // This invariant guarantees that after flipping flag meaning at the
290 // beginning of scavenge all pages in use will be marked as having valid
291 // watermark.
292 static inline void FlipMeaningOfInvalidatedWatermarkFlag(Heap* heap);
293
294 // Returns true if the page allocation watermark was not altered during
295 // scavenge.
296 inline bool IsWatermarkValid();
297
298 inline void InvalidateWatermark(bool value);
299
300 inline bool GetPageFlag(PageFlag flag);
301 inline void SetPageFlag(PageFlag flag, bool value);
302 inline void ClearPageFlags();
303
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100304 inline void ClearGCFields();
305
Ben Murdoch85b71792012-04-11 18:30:58 +0100306 static const int kAllocationWatermarkOffsetShift = WATERMARK_INVALIDATED + 1;
307 static const int kAllocationWatermarkOffsetBits = kPageSizeBits + 1;
308 static const uint32_t kAllocationWatermarkOffsetMask =
309 ((1 << kAllocationWatermarkOffsetBits) - 1) <<
310 kAllocationWatermarkOffsetShift;
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100311
Ben Murdoch85b71792012-04-11 18:30:58 +0100312 static const uint32_t kFlagsMask =
313 ((1 << kAllocationWatermarkOffsetShift) - 1);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100314
Ben Murdoch85b71792012-04-11 18:30:58 +0100315 STATIC_CHECK(kBitsPerInt - kAllocationWatermarkOffsetShift >=
316 kAllocationWatermarkOffsetBits);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100317
Ben Murdoch85b71792012-04-11 18:30:58 +0100318 //---------------------------------------------------------------------------
319 // Page header description.
320 //
321 // If a page is not in the large object space, the first word,
322 // opaque_header, encodes the next page address (aligned to kPageSize 8K)
323 // and the chunk number (0 ~ 8K-1). Only MemoryAllocator should use
324 // opaque_header. The value range of the opaque_header is [0..kPageSize[,
325 // or [next_page_start, next_page_end[. It cannot point to a valid address
326 // in the current page. If a page is in the large object space, the first
327 // word *may* (if the page start and large object chunk start are the
328 // same) contain the address of the next large object chunk.
329 intptr_t opaque_header;
Steve Blocka7e24c12009-10-30 11:49:00 +0000330
Ben Murdoch85b71792012-04-11 18:30:58 +0100331 // If the page is not in the large object space, the low-order bit of the
332 // second word is set. If the page is in the large object space, the
333 // second word *may* (if the page start and large object chunk start are
334 // the same) contain the large object chunk size. In either case, the
335 // low-order bit for large object pages will be cleared.
336 // For normal pages this word is used to store page flags and
337 // offset of allocation top.
338 intptr_t flags_;
Steve Blocka7e24c12009-10-30 11:49:00 +0000339
Ben Murdoch85b71792012-04-11 18:30:58 +0100340 // This field contains dirty marks for regions covering the page. Only dirty
341 // regions might contain intergenerational references.
342 // Only 32 dirty marks are supported so for large object pages several regions
343 // might be mapped to a single dirty mark.
344 uint32_t dirty_regions_;
Steve Blocka7e24c12009-10-30 11:49:00 +0000345
Ben Murdoch85b71792012-04-11 18:30:58 +0100346 // The index of the page in its owner space.
347 int mc_page_index;
348
349 // During mark-compact collections this field contains the forwarding address
350 // of the first live object in this page.
351 // During scavenge collection this field is used to store allocation watermark
352 // if it is altered during scavenge.
353 Address mc_first_forwarded;
354
355 Heap* heap_;
Steve Blocka7e24c12009-10-30 11:49:00 +0000356};
357
358
359// ----------------------------------------------------------------------------
360// Space is the abstract superclass for all allocation spaces.
361class Space : public Malloced {
362 public:
Steve Block44f0eee2011-05-26 01:26:41 +0100363 Space(Heap* heap, AllocationSpace id, Executability executable)
364 : heap_(heap), id_(id), executable_(executable) {}
Steve Blocka7e24c12009-10-30 11:49:00 +0000365
366 virtual ~Space() {}
367
Steve Block44f0eee2011-05-26 01:26:41 +0100368 Heap* heap() const { return heap_; }
369
Steve Blocka7e24c12009-10-30 11:49:00 +0000370 // Does the space need executable memory?
371 Executability executable() { return executable_; }
372
373 // Identity used in error reporting.
374 AllocationSpace identity() { return id_; }
375
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -0800376 // Returns allocated size.
Ben Murdochf87a2032010-10-22 12:50:53 +0100377 virtual intptr_t Size() = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000378
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -0800379 // Returns size of objects. Can differ from the allocated size
380 // (e.g. see LargeObjectSpace).
381 virtual intptr_t SizeOfObjects() { return Size(); }
382
Steve Blocka7e24c12009-10-30 11:49:00 +0000383#ifdef DEBUG
384 virtual void Print() = 0;
385#endif
386
Leon Clarkee46be812010-01-19 14:06:41 +0000387 // After calling this we can allocate a certain number of bytes using only
388 // linear allocation (with a LinearAllocationScope and an AlwaysAllocateScope)
389 // without using freelists or causing a GC. This is used by partial
390 // snapshots. It returns true of space was reserved or false if a GC is
391 // needed. For paged spaces the space requested must include the space wasted
392 // at the end of each when allocating linearly.
393 virtual bool ReserveSpace(int bytes) = 0;
394
Steve Blocka7e24c12009-10-30 11:49:00 +0000395 private:
Steve Block44f0eee2011-05-26 01:26:41 +0100396 Heap* heap_;
Steve Blocka7e24c12009-10-30 11:49:00 +0000397 AllocationSpace id_;
398 Executability executable_;
399};
400
401
402// ----------------------------------------------------------------------------
403// All heap objects containing executable code (code objects) must be allocated
404// from a 2 GB range of memory, so that they can call each other using 32-bit
405// displacements. This happens automatically on 32-bit platforms, where 32-bit
406// displacements cover the entire 4GB virtual address space. On 64-bit
407// platforms, we support this using the CodeRange object, which reserves and
408// manages a range of virtual memory.
Steve Block44f0eee2011-05-26 01:26:41 +0100409class CodeRange {
Steve Blocka7e24c12009-10-30 11:49:00 +0000410 public:
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000411 explicit CodeRange(Isolate* isolate);
412 ~CodeRange() { TearDown(); }
413
Steve Blocka7e24c12009-10-30 11:49:00 +0000414 // Reserves a range of virtual memory, but does not commit any of it.
415 // Can only be called once, at heap initialization time.
416 // Returns false on failure.
Ben Murdoch85b71792012-04-11 18:30:58 +0100417 bool Setup(const size_t requested_size);
Steve Blocka7e24c12009-10-30 11:49:00 +0000418
419 // Frees the range of virtual memory, and frees the data structures used to
420 // manage it.
Steve Block44f0eee2011-05-26 01:26:41 +0100421 void TearDown();
Steve Blocka7e24c12009-10-30 11:49:00 +0000422
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000423 bool exists() { return this != NULL && code_range_ != NULL; }
Steve Block44f0eee2011-05-26 01:26:41 +0100424 bool contains(Address address) {
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000425 if (this == NULL || code_range_ == NULL) return false;
Steve Blocka7e24c12009-10-30 11:49:00 +0000426 Address start = static_cast<Address>(code_range_->address());
427 return start <= address && address < start + code_range_->size();
428 }
429
430 // Allocates a chunk of memory from the large-object portion of
431 // the code range. On platforms with no separate code range, should
432 // not be called.
Ben Murdoch85b71792012-04-11 18:30:58 +0100433 MUST_USE_RESULT void* AllocateRawMemory(const size_t requested,
434 size_t* allocated);
435 void FreeRawMemory(void* buf, size_t length);
Steve Blocka7e24c12009-10-30 11:49:00 +0000436
437 private:
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000438 Isolate* isolate_;
Steve Block44f0eee2011-05-26 01:26:41 +0100439
Steve Blocka7e24c12009-10-30 11:49:00 +0000440 // The reserved range of virtual memory that all code objects are put in.
Steve Block44f0eee2011-05-26 01:26:41 +0100441 VirtualMemory* code_range_;
Steve Blocka7e24c12009-10-30 11:49:00 +0000442 // Plain old data class, just a struct plus a constructor.
443 class FreeBlock {
444 public:
445 FreeBlock(Address start_arg, size_t size_arg)
Ben Murdoch85b71792012-04-11 18:30:58 +0100446 : start(start_arg), size(size_arg) {}
Steve Blocka7e24c12009-10-30 11:49:00 +0000447 FreeBlock(void* start_arg, size_t size_arg)
Ben Murdoch85b71792012-04-11 18:30:58 +0100448 : start(static_cast<Address>(start_arg)), size(size_arg) {}
Steve Blocka7e24c12009-10-30 11:49:00 +0000449
450 Address start;
451 size_t size;
452 };
453
454 // Freed blocks of memory are added to the free list. When the allocation
455 // list is exhausted, the free list is sorted and merged to make the new
456 // allocation list.
Steve Block44f0eee2011-05-26 01:26:41 +0100457 List<FreeBlock> free_list_;
Steve Blocka7e24c12009-10-30 11:49:00 +0000458 // Memory is allocated from the free blocks on the allocation list.
459 // The block at current_allocation_block_index_ is the current block.
Steve Block44f0eee2011-05-26 01:26:41 +0100460 List<FreeBlock> allocation_list_;
461 int current_allocation_block_index_;
Steve Blocka7e24c12009-10-30 11:49:00 +0000462
463 // Finds a block on the allocation list that contains at least the
464 // requested amount of memory. If none is found, sorts and merges
465 // the existing free memory blocks, and searches again.
466 // If none can be found, terminates V8 with FatalProcessOutOfMemory.
Steve Block44f0eee2011-05-26 01:26:41 +0100467 void GetNextAllocationBlock(size_t requested);
Steve Blocka7e24c12009-10-30 11:49:00 +0000468 // Compares the start addresses of two free blocks.
469 static int CompareFreeBlockAddress(const FreeBlock* left,
470 const FreeBlock* right);
Steve Block44f0eee2011-05-26 01:26:41 +0100471
Steve Block44f0eee2011-05-26 01:26:41 +0100472 DISALLOW_COPY_AND_ASSIGN(CodeRange);
Steve Blocka7e24c12009-10-30 11:49:00 +0000473};
474
475
476// ----------------------------------------------------------------------------
477// A space acquires chunks of memory from the operating system. The memory
Ben Murdoch85b71792012-04-11 18:30:58 +0100478// allocator manages chunks for the paged heap spaces (old space and map
479// space). A paged chunk consists of pages. Pages in a chunk have contiguous
480// addresses and are linked as a list.
Steve Blocka7e24c12009-10-30 11:49:00 +0000481//
Ben Murdoch85b71792012-04-11 18:30:58 +0100482// The allocator keeps an initial chunk which is used for the new space. The
483// leftover regions of the initial chunk are used for the initial chunks of
484// old space and map space if they are big enough to hold at least one page.
485// The allocator assumes that there is one old space and one map space, each
486// expands the space by allocating kPagesPerChunk pages except the last
487// expansion (before running out of space). The first chunk may contain fewer
488// than kPagesPerChunk pages as well.
Steve Blocka7e24c12009-10-30 11:49:00 +0000489//
Ben Murdoch85b71792012-04-11 18:30:58 +0100490// The memory allocator also allocates chunks for the large object space, but
491// they are managed by the space itself. The new space does not expand.
492//
493// The fact that pages for paged spaces are allocated and deallocated in chunks
494// induces a constraint on the order of pages in a linked lists. We say that
495// pages are linked in the chunk-order if and only if every two consecutive
496// pages from the same chunk are consecutive in the linked list.
497//
498
499
Steve Block44f0eee2011-05-26 01:26:41 +0100500class MemoryAllocator {
Steve Blocka7e24c12009-10-30 11:49:00 +0000501 public:
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000502 explicit MemoryAllocator(Isolate* isolate);
503
Steve Blocka7e24c12009-10-30 11:49:00 +0000504 // Initializes its internal bookkeeping structures.
Russell Brenner90bac252010-11-18 13:33:46 -0800505 // Max capacity of the total space and executable memory limit.
Ben Murdoch85b71792012-04-11 18:30:58 +0100506 bool Setup(intptr_t max_capacity, intptr_t capacity_executable);
Steve Blocka7e24c12009-10-30 11:49:00 +0000507
Ben Murdoch85b71792012-04-11 18:30:58 +0100508 // Deletes valid chunks.
Steve Block44f0eee2011-05-26 01:26:41 +0100509 void TearDown();
Steve Blocka7e24c12009-10-30 11:49:00 +0000510
Ben Murdoch85b71792012-04-11 18:30:58 +0100511 // Reserves an initial address range of virtual memory to be split between
512 // the two new space semispaces, the old space, and the map space. The
513 // memory is not yet committed or assigned to spaces and split into pages.
514 // The initial chunk is unmapped when the memory allocator is torn down.
515 // This function should only be called when there is not already a reserved
516 // initial chunk (initial_chunk_ should be NULL). It returns the start
517 // address of the initial chunk if successful, with the side effect of
518 // setting the initial chunk, or else NULL if unsuccessful and leaves the
519 // initial chunk NULL.
520 void* ReserveInitialChunk(const size_t requested);
Steve Blocka7e24c12009-10-30 11:49:00 +0000521
Ben Murdoch85b71792012-04-11 18:30:58 +0100522 // Commits pages from an as-yet-unmanaged block of virtual memory into a
523 // paged space. The block should be part of the initial chunk reserved via
524 // a call to ReserveInitialChunk. The number of pages is always returned in
525 // the output parameter num_pages. This function assumes that the start
526 // address is non-null and that it is big enough to hold at least one
527 // page-aligned page. The call always succeeds, and num_pages is always
528 // greater than zero.
529 Page* CommitPages(Address start, size_t size, PagedSpace* owner,
530 int* num_pages);
Steve Blocka7e24c12009-10-30 11:49:00 +0000531
Ben Murdoch85b71792012-04-11 18:30:58 +0100532 // Commit a contiguous block of memory from the initial chunk. Assumes that
533 // the address is not NULL, the size is greater than zero, and that the
534 // block is contained in the initial chunk. Returns true if it succeeded
535 // and false otherwise.
536 bool CommitBlock(Address start, size_t size, Executability executable);
537
538 // Uncommit a contiguous block of memory [start..(start+size)[.
539 // start is not NULL, the size is greater than zero, and the
540 // block is contained in the initial chunk. Returns true if it succeeded
541 // and false otherwise.
542 bool UncommitBlock(Address start, size_t size);
543
544 // Zaps a contiguous block of memory [start..(start+size)[ thus
545 // filling it up with a recognizable non-NULL bit pattern.
546 void ZapBlock(Address start, size_t size);
547
548 // Attempts to allocate the requested (non-zero) number of pages from the
549 // OS. Fewer pages might be allocated than requested. If it fails to
550 // allocate memory for the OS or cannot allocate a single page, this
551 // function returns an invalid page pointer (NULL). The caller must check
552 // whether the returned page is valid (by calling Page::is_valid()). It is
553 // guaranteed that allocated pages have contiguous addresses. The actual
554 // number of allocated pages is returned in the output parameter
555 // allocated_pages. If the PagedSpace owner is executable and there is
556 // a code range, the pages are allocated from the code range.
557 Page* AllocatePages(int requested_pages, int* allocated_pages,
558 PagedSpace* owner);
559
560 // Frees pages from a given page and after. Requires pages to be
561 // linked in chunk-order (see comment for class).
562 // If 'p' is the first page of a chunk, pages from 'p' are freed
563 // and this function returns an invalid page pointer.
564 // Otherwise, the function searches a page after 'p' that is
565 // the first page of a chunk. Pages after the found page
566 // are freed and the function returns 'p'.
567 Page* FreePages(Page* p);
568
569 // Frees all pages owned by given space.
570 void FreeAllPages(PagedSpace* space);
571
572 // Allocates and frees raw memory of certain size.
573 // These are just thin wrappers around OS::Allocate and OS::Free,
574 // but keep track of allocated bytes as part of heap.
575 // If the flag is EXECUTABLE and a code range exists, the requested
576 // memory is allocated from the code range. If a code range exists
577 // and the freed memory is in it, the code range manages the freed memory.
578 MUST_USE_RESULT void* AllocateRawMemory(const size_t requested,
579 size_t* allocated,
580 Executability executable);
581 void FreeRawMemory(void* buf,
582 size_t length,
583 Executability executable);
584 void PerformAllocationCallback(ObjectSpace space,
585 AllocationAction action,
586 size_t size);
587
588 void AddMemoryAllocationCallback(MemoryAllocationCallback callback,
589 ObjectSpace space,
590 AllocationAction action);
591 void RemoveMemoryAllocationCallback(MemoryAllocationCallback callback);
592 bool MemoryAllocationCallbackRegistered(MemoryAllocationCallback callback);
Steve Blocka7e24c12009-10-30 11:49:00 +0000593
594 // Returns the maximum available bytes of heaps.
Steve Block44f0eee2011-05-26 01:26:41 +0100595 intptr_t Available() { return capacity_ < size_ ? 0 : capacity_ - size_; }
Steve Blocka7e24c12009-10-30 11:49:00 +0000596
597 // Returns allocated spaces in bytes.
Steve Block44f0eee2011-05-26 01:26:41 +0100598 intptr_t Size() { return size_; }
Steve Blocka7e24c12009-10-30 11:49:00 +0000599
Russell Brenner90bac252010-11-18 13:33:46 -0800600 // Returns the maximum available executable bytes of heaps.
Steve Block44f0eee2011-05-26 01:26:41 +0100601 intptr_t AvailableExecutable() {
Russell Brenner90bac252010-11-18 13:33:46 -0800602 if (capacity_executable_ < size_executable_) return 0;
603 return capacity_executable_ - size_executable_;
604 }
605
Steve Block791712a2010-08-27 10:21:07 +0100606 // Returns allocated executable spaces in bytes.
Steve Block44f0eee2011-05-26 01:26:41 +0100607 intptr_t SizeExecutable() { return size_executable_; }
Steve Block791712a2010-08-27 10:21:07 +0100608
Steve Blocka7e24c12009-10-30 11:49:00 +0000609 // Returns maximum available bytes that the old space can have.
Steve Block44f0eee2011-05-26 01:26:41 +0100610 intptr_t MaxAvailable() {
Ben Murdoch85b71792012-04-11 18:30:58 +0100611 return (Available() / Page::kPageSize) * Page::kObjectAreaSize;
Steve Blocka7e24c12009-10-30 11:49:00 +0000612 }
613
Ben Murdoch85b71792012-04-11 18:30:58 +0100614 // Links two pages.
615 inline void SetNextPage(Page* prev, Page* next);
616
617 // Returns the next page of a given page.
618 inline Page* GetNextPage(Page* p);
619
620 // Checks whether a page belongs to a space.
621 inline bool IsPageInSpace(Page* p, PagedSpace* space);
622
623 // Returns the space that owns the given page.
624 inline PagedSpace* PageOwner(Page* page);
625
626 // Finds the first/last page in the same chunk as a given page.
627 Page* FindFirstPageInSameChunk(Page* p);
628 Page* FindLastPageInSameChunk(Page* p);
629
630 // Relinks list of pages owned by space to make it chunk-ordered.
631 // Returns new first and last pages of space.
632 // Also returns last page in relinked list which has WasInUsedBeforeMC
633 // flag set.
634 void RelinkPageListInChunkOrder(PagedSpace* space,
635 Page** first_page,
636 Page** last_page,
637 Page** last_page_in_use);
638
Steve Blocka7e24c12009-10-30 11:49:00 +0000639#ifdef DEBUG
640 // Reports statistic info of the space.
Steve Block44f0eee2011-05-26 01:26:41 +0100641 void ReportStatistics();
Steve Blocka7e24c12009-10-30 11:49:00 +0000642#endif
643
Ben Murdoch85b71792012-04-11 18:30:58 +0100644 // Due to encoding limitation, we can only have 8K chunks.
645 static const int kMaxNofChunks = 1 << kPageSizeBits;
646 // If a chunk has at least 16 pages, the maximum heap size is about
647 // 8K * 8K * 16 = 1G bytes.
648#ifdef V8_TARGET_ARCH_X64
649 static const int kPagesPerChunk = 32;
650 // On 64 bit the chunk table consists of 4 levels of 4096-entry tables.
651 static const int kChunkTableLevels = 4;
652 static const int kChunkTableBitsPerLevel = 12;
653#else
654 static const int kPagesPerChunk = 16;
655 // On 32 bit the chunk table consists of 2 levels of 256-entry tables.
656 static const int kChunkTableLevels = 2;
657 static const int kChunkTableBitsPerLevel = 8;
658#endif
Steve Blocka7e24c12009-10-30 11:49:00 +0000659
660 private:
Ben Murdoch85b71792012-04-11 18:30:58 +0100661 static const int kChunkSize = kPagesPerChunk * Page::kPageSize;
662
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000663 Isolate* isolate_;
664
Steve Blocka7e24c12009-10-30 11:49:00 +0000665 // Maximum space size in bytes.
Ben Murdoch85b71792012-04-11 18:30:58 +0100666 intptr_t capacity_;
Russell Brenner90bac252010-11-18 13:33:46 -0800667 // Maximum subset of capacity_ that can be executable
Ben Murdoch85b71792012-04-11 18:30:58 +0100668 intptr_t capacity_executable_;
Ben Murdochb0fe1622011-05-05 13:52:32 +0100669
Steve Blocka7e24c12009-10-30 11:49:00 +0000670 // Allocated space size in bytes.
Ben Murdoch85b71792012-04-11 18:30:58 +0100671 intptr_t size_;
672
Steve Block791712a2010-08-27 10:21:07 +0100673 // Allocated executable space size in bytes.
Ben Murdoch85b71792012-04-11 18:30:58 +0100674 intptr_t size_executable_;
Steve Blocka7e24c12009-10-30 11:49:00 +0000675
Iain Merrick9ac36c92010-09-13 15:29:50 +0100676 struct MemoryAllocationCallbackRegistration {
677 MemoryAllocationCallbackRegistration(MemoryAllocationCallback callback,
678 ObjectSpace space,
679 AllocationAction action)
680 : callback(callback), space(space), action(action) {
681 }
682 MemoryAllocationCallback callback;
683 ObjectSpace space;
684 AllocationAction action;
685 };
686 // A List of callback that are triggered when memory is allocated or free'd
Steve Block44f0eee2011-05-26 01:26:41 +0100687 List<MemoryAllocationCallbackRegistration>
Iain Merrick9ac36c92010-09-13 15:29:50 +0100688 memory_allocation_callbacks_;
689
Ben Murdoch85b71792012-04-11 18:30:58 +0100690 // The initial chunk of virtual memory.
691 VirtualMemory* initial_chunk_;
692
693 // Allocated chunk info: chunk start address, chunk size, and owning space.
694 class ChunkInfo BASE_EMBEDDED {
695 public:
696 ChunkInfo() : address_(NULL),
697 size_(0),
698 owner_(NULL),
699 executable_(NOT_EXECUTABLE),
700 owner_identity_(FIRST_SPACE) {}
701 inline void init(Address a, size_t s, PagedSpace* o);
702 Address address() { return address_; }
703 size_t size() { return size_; }
704 PagedSpace* owner() { return owner_; }
705 // We save executability of the owner to allow using it
706 // when collecting stats after the owner has been destroyed.
707 Executability executable() const { return executable_; }
708 AllocationSpace owner_identity() const { return owner_identity_; }
709
710 private:
711 Address address_;
712 size_t size_;
713 PagedSpace* owner_;
714 Executability executable_;
715 AllocationSpace owner_identity_;
716 };
717
718 // Chunks_, free_chunk_ids_ and top_ act as a stack of free chunk ids.
719 List<ChunkInfo> chunks_;
720 List<int> free_chunk_ids_;
721 int max_nof_chunks_;
722 int top_;
723
724 // Push/pop a free chunk id onto/from the stack.
725 void Push(int free_chunk_id);
726 int Pop();
727 bool OutOfChunkIds() { return top_ == 0; }
728
729 // Frees a chunk.
730 void DeleteChunk(int chunk_id);
731
732 // Basic check whether a chunk id is in the valid range.
733 inline bool IsValidChunkId(int chunk_id);
734
735 // Checks whether a chunk id identifies an allocated chunk.
736 inline bool IsValidChunk(int chunk_id);
737
738 // Returns the chunk id that a page belongs to.
739 inline int GetChunkId(Page* p);
740
741 // True if the address lies in the initial chunk.
742 inline bool InInitialChunk(Address address);
743
Steve Blocka7e24c12009-10-30 11:49:00 +0000744 // Initializes pages in a chunk. Returns the first page address.
745 // This function and GetChunkId() are provided for the mark-compact
746 // collector to rebuild page headers in the from space, which is
747 // used as a marking stack and its page headers are destroyed.
Steve Block44f0eee2011-05-26 01:26:41 +0100748 Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk,
749 PagedSpace* owner);
Steve Block6ded16b2010-05-10 14:33:55 +0100750
Ben Murdoch85b71792012-04-11 18:30:58 +0100751 Page* RelinkPagesInChunk(int chunk_id,
752 Address chunk_start,
753 size_t chunk_size,
754 Page* prev,
755 Page** last_page_in_use);
756
757 DISALLOW_COPY_AND_ASSIGN(MemoryAllocator);
Steve Blocka7e24c12009-10-30 11:49:00 +0000758};
759
760
761// -----------------------------------------------------------------------------
762// Interface for heap object iterator to be implemented by all object space
763// object iterators.
764//
Leon Clarked91b9f72010-01-27 17:25:45 +0000765// NOTE: The space specific object iterators also implements the own next()
766// method which is used to avoid using virtual functions
Steve Blocka7e24c12009-10-30 11:49:00 +0000767// iterating a specific space.
768
769class ObjectIterator : public Malloced {
770 public:
771 virtual ~ObjectIterator() { }
772
Steve Blocka7e24c12009-10-30 11:49:00 +0000773 virtual HeapObject* next_object() = 0;
774};
775
776
777// -----------------------------------------------------------------------------
778// Heap object iterator in new/old/map spaces.
779//
Ben Murdoch85b71792012-04-11 18:30:58 +0100780// A HeapObjectIterator iterates objects from a given address to the
781// top of a space. The given address must be below the current
782// allocation pointer (space top). There are some caveats.
Steve Blocka7e24c12009-10-30 11:49:00 +0000783//
Ben Murdoch85b71792012-04-11 18:30:58 +0100784// (1) If the space top changes upward during iteration (because of
785// allocating new objects), the iterator does not iterate objects
786// above the original space top. The caller must create a new
787// iterator starting from the old top in order to visit these new
788// objects.
789//
790// (2) If new objects are allocated below the original allocation top
791// (e.g., free-list allocation in paged spaces), the new objects
792// may or may not be iterated depending on their position with
793// respect to the current point of iteration.
794//
795// (3) The space top should not change downward during iteration,
796// otherwise the iterator will return not-necessarily-valid
797// objects.
798
Steve Blocka7e24c12009-10-30 11:49:00 +0000799class HeapObjectIterator: public ObjectIterator {
800 public:
Ben Murdoch85b71792012-04-11 18:30:58 +0100801 // Creates a new object iterator in a given space. If a start
802 // address is not given, the iterator starts from the space bottom.
Steve Blocka7e24c12009-10-30 11:49:00 +0000803 // If the size function is not given, the iterator calls the default
804 // Object::Size().
805 explicit HeapObjectIterator(PagedSpace* space);
806 HeapObjectIterator(PagedSpace* space, HeapObjectCallback size_func);
Ben Murdoch85b71792012-04-11 18:30:58 +0100807 HeapObjectIterator(PagedSpace* space, Address start);
808 HeapObjectIterator(PagedSpace* space,
809 Address start,
810 HeapObjectCallback size_func);
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100811 HeapObjectIterator(Page* page, HeapObjectCallback size_func);
Steve Blocka7e24c12009-10-30 11:49:00 +0000812
Ben Murdoch85b71792012-04-11 18:30:58 +0100813 inline HeapObject* next() {
814 return (cur_addr_ < cur_limit_) ? FromCurrentPage() : FromNextPage();
Leon Clarked91b9f72010-01-27 17:25:45 +0000815 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000816
Ben Murdoch85b71792012-04-11 18:30:58 +0100817 // implementation of ObjectIterator.
818 virtual HeapObject* next_object() { return next(); }
Steve Blocka7e24c12009-10-30 11:49:00 +0000819
820 private:
Ben Murdoch85b71792012-04-11 18:30:58 +0100821 Address cur_addr_; // current iteration point
822 Address end_addr_; // end iteration point
823 Address cur_limit_; // current page limit
824 HeapObjectCallback size_func_; // size function
825 Page* end_page_; // caches the page of the end address
Steve Blocka7e24c12009-10-30 11:49:00 +0000826
Ben Murdoch85b71792012-04-11 18:30:58 +0100827 HeapObject* FromCurrentPage() {
828 ASSERT(cur_addr_ < cur_limit_);
Leon Clarked91b9f72010-01-27 17:25:45 +0000829
Ben Murdoch85b71792012-04-11 18:30:58 +0100830 HeapObject* obj = HeapObject::FromAddress(cur_addr_);
831 int obj_size = (size_func_ == NULL) ? obj->Size() : size_func_(obj);
832 ASSERT_OBJECT_SIZE(obj_size);
Leon Clarked91b9f72010-01-27 17:25:45 +0000833
Ben Murdoch85b71792012-04-11 18:30:58 +0100834 cur_addr_ += obj_size;
835 ASSERT(cur_addr_ <= cur_limit_);
836
837 return obj;
838 }
839
840 // Slow path of next, goes into the next page.
841 HeapObject* FromNextPage();
Steve Blocka7e24c12009-10-30 11:49:00 +0000842
843 // Initializes fields.
Ben Murdoch85b71792012-04-11 18:30:58 +0100844 void Initialize(Address start, Address end, HeapObjectCallback size_func);
845
846#ifdef DEBUG
847 // Verifies whether fields have valid values.
848 void Verify();
849#endif
Steve Blocka7e24c12009-10-30 11:49:00 +0000850};
851
852
853// -----------------------------------------------------------------------------
854// A PageIterator iterates the pages in a paged space.
Ben Murdoch85b71792012-04-11 18:30:58 +0100855//
856// The PageIterator class provides three modes for iterating pages in a space:
857// PAGES_IN_USE iterates pages containing allocated objects.
858// PAGES_USED_BY_MC iterates pages that hold relocated objects during a
859// mark-compact collection.
860// ALL_PAGES iterates all pages in the space.
861//
862// There are some caveats.
863//
864// (1) If the space expands during iteration, new pages will not be
865// returned by the iterator in any mode.
866//
867// (2) If new objects are allocated during iteration, they will appear
868// in pages returned by the iterator. Allocation may cause the
869// allocation pointer or MC allocation pointer in the last page to
870// change between constructing the iterator and iterating the last
871// page.
872//
873// (3) The space should not shrink during iteration, otherwise the
874// iterator will return deallocated pages.
Steve Blocka7e24c12009-10-30 11:49:00 +0000875
876class PageIterator BASE_EMBEDDED {
877 public:
Ben Murdoch85b71792012-04-11 18:30:58 +0100878 enum Mode {
879 PAGES_IN_USE,
880 PAGES_USED_BY_MC,
881 ALL_PAGES
882 };
883
884 PageIterator(PagedSpace* space, Mode mode);
Steve Blocka7e24c12009-10-30 11:49:00 +0000885
886 inline bool has_next();
887 inline Page* next();
888
889 private:
890 PagedSpace* space_;
891 Page* prev_page_; // Previous page returned.
Ben Murdoch85b71792012-04-11 18:30:58 +0100892 Page* stop_page_; // Page to stop at (last page returned by the iterator).
Steve Blocka7e24c12009-10-30 11:49:00 +0000893};
894
895
896// -----------------------------------------------------------------------------
Ben Murdoch85b71792012-04-11 18:30:58 +0100897// A space has a list of pages. The next page can be accessed via
898// Page::next_page() call. The next page of the last page is an
899// invalid page pointer. A space can expand and shrink dynamically.
Steve Blocka7e24c12009-10-30 11:49:00 +0000900
901// An abstraction of allocation and relocation pointers in a page-structured
902// space.
903class AllocationInfo {
904 public:
Ben Murdoch85b71792012-04-11 18:30:58 +0100905 Address top; // current allocation top
906 Address limit; // current allocation limit
Steve Blocka7e24c12009-10-30 11:49:00 +0000907
908#ifdef DEBUG
909 bool VerifyPagedAllocation() {
910 return (Page::FromAllocationTop(top) == Page::FromAllocationTop(limit))
911 && (top <= limit);
912 }
913#endif
914};
915
916
917// An abstraction of the accounting statistics of a page-structured space.
Ben Murdoch85b71792012-04-11 18:30:58 +0100918// The 'capacity' of a space is the number of object-area bytes (ie, not
Steve Blocka7e24c12009-10-30 11:49:00 +0000919// including page bookkeeping structures) currently in the space. The 'size'
920// of a space is the number of allocated bytes, the 'waste' in the space is
921// the number of bytes that are not allocated and not available to
Ben Murdoch85b71792012-04-11 18:30:58 +0100922// allocation without reorganizing the space via a GC (eg, small blocks due
Steve Blocka7e24c12009-10-30 11:49:00 +0000923// to internal fragmentation, top of page areas in map space), and the bytes
924// 'available' is the number of unallocated bytes that are not waste. The
925// capacity is the sum of size, waste, and available.
926//
927// The stats are only set by functions that ensure they stay balanced. These
928// functions increase or decrease one of the non-capacity stats in
929// conjunction with capacity, or else they always balance increases and
930// decreases to the non-capacity stats.
931class AllocationStats BASE_EMBEDDED {
932 public:
933 AllocationStats() { Clear(); }
934
Ben Murdoch85b71792012-04-11 18:30:58 +0100935 // Zero out all the allocation statistics (ie, no capacity).
Steve Blocka7e24c12009-10-30 11:49:00 +0000936 void Clear() {
937 capacity_ = 0;
Ben Murdoch85b71792012-04-11 18:30:58 +0100938 available_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000939 size_ = 0;
940 waste_ = 0;
941 }
942
Ben Murdoch85b71792012-04-11 18:30:58 +0100943 // Reset the allocation statistics (ie, available = capacity with no
Steve Blocka7e24c12009-10-30 11:49:00 +0000944 // wasted or allocated bytes).
945 void Reset() {
Ben Murdoch85b71792012-04-11 18:30:58 +0100946 available_ = capacity_;
Steve Blocka7e24c12009-10-30 11:49:00 +0000947 size_ = 0;
948 waste_ = 0;
949 }
950
951 // Accessors for the allocation statistics.
Ben Murdochf87a2032010-10-22 12:50:53 +0100952 intptr_t Capacity() { return capacity_; }
Ben Murdoch85b71792012-04-11 18:30:58 +0100953 intptr_t Available() { return available_; }
Ben Murdochf87a2032010-10-22 12:50:53 +0100954 intptr_t Size() { return size_; }
955 intptr_t Waste() { return waste_; }
Steve Blocka7e24c12009-10-30 11:49:00 +0000956
Ben Murdoch85b71792012-04-11 18:30:58 +0100957 // Grow the space by adding available bytes.
Steve Blocka7e24c12009-10-30 11:49:00 +0000958 void ExpandSpace(int size_in_bytes) {
959 capacity_ += size_in_bytes;
Ben Murdoch85b71792012-04-11 18:30:58 +0100960 available_ += size_in_bytes;
Steve Blocka7e24c12009-10-30 11:49:00 +0000961 }
962
Ben Murdoch85b71792012-04-11 18:30:58 +0100963 // Shrink the space by removing available bytes.
Steve Blocka7e24c12009-10-30 11:49:00 +0000964 void ShrinkSpace(int size_in_bytes) {
965 capacity_ -= size_in_bytes;
Ben Murdoch85b71792012-04-11 18:30:58 +0100966 available_ -= size_in_bytes;
Steve Blocka7e24c12009-10-30 11:49:00 +0000967 }
968
969 // Allocate from available bytes (available -> size).
Ben Murdochf87a2032010-10-22 12:50:53 +0100970 void AllocateBytes(intptr_t size_in_bytes) {
Ben Murdoch85b71792012-04-11 18:30:58 +0100971 available_ -= size_in_bytes;
Steve Blocka7e24c12009-10-30 11:49:00 +0000972 size_ += size_in_bytes;
973 }
974
975 // Free allocated bytes, making them available (size -> available).
Ben Murdochf87a2032010-10-22 12:50:53 +0100976 void DeallocateBytes(intptr_t size_in_bytes) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000977 size_ -= size_in_bytes;
Ben Murdoch85b71792012-04-11 18:30:58 +0100978 available_ += size_in_bytes;
Steve Blocka7e24c12009-10-30 11:49:00 +0000979 }
980
981 // Waste free bytes (available -> waste).
982 void WasteBytes(int size_in_bytes) {
Ben Murdoch85b71792012-04-11 18:30:58 +0100983 available_ -= size_in_bytes;
Steve Blocka7e24c12009-10-30 11:49:00 +0000984 waste_ += size_in_bytes;
Ben Murdoch85b71792012-04-11 18:30:58 +0100985 }
986
987 // Consider the wasted bytes to be allocated, as they contain filler
988 // objects (waste -> size).
989 void FillWastedBytes(intptr_t size_in_bytes) {
990 waste_ -= size_in_bytes;
991 size_ += size_in_bytes;
Steve Blocka7e24c12009-10-30 11:49:00 +0000992 }
993
994 private:
Ben Murdochf87a2032010-10-22 12:50:53 +0100995 intptr_t capacity_;
Ben Murdoch85b71792012-04-11 18:30:58 +0100996 intptr_t available_;
Ben Murdochf87a2032010-10-22 12:50:53 +0100997 intptr_t size_;
998 intptr_t waste_;
Steve Blocka7e24c12009-10-30 11:49:00 +0000999};
1000
1001
1002class PagedSpace : public Space {
1003 public:
1004 // Creates a space with a maximum capacity, and an id.
Steve Block44f0eee2011-05-26 01:26:41 +01001005 PagedSpace(Heap* heap,
1006 intptr_t max_capacity,
Ben Murdochf87a2032010-10-22 12:50:53 +01001007 AllocationSpace id,
1008 Executability executable);
Steve Blocka7e24c12009-10-30 11:49:00 +00001009
1010 virtual ~PagedSpace() {}
1011
1012 // Set up the space using the given address range of virtual memory (from
1013 // the memory allocator's initial chunk) if possible. If the block of
1014 // addresses is not big enough to contain a single page-aligned page, a
1015 // fresh chunk will be allocated.
Ben Murdoch85b71792012-04-11 18:30:58 +01001016 bool Setup(Address start, size_t size);
Steve Blocka7e24c12009-10-30 11:49:00 +00001017
1018 // Returns true if the space has been successfully set up and not
1019 // subsequently torn down.
Ben Murdoch85b71792012-04-11 18:30:58 +01001020 bool HasBeenSetup();
Steve Blocka7e24c12009-10-30 11:49:00 +00001021
1022 // Cleans up the space, frees all pages in this space except those belonging
1023 // to the initial chunk, uncommits addresses in the initial chunk.
1024 void TearDown();
1025
1026 // Checks whether an object/address is in this space.
1027 inline bool Contains(Address a);
1028 bool Contains(HeapObject* o) { return Contains(o->address()); }
Ben Murdoch85b71792012-04-11 18:30:58 +01001029 // Never crashes even if a is not a valid pointer.
1030 inline bool SafeContains(Address a);
Steve Blocka7e24c12009-10-30 11:49:00 +00001031
1032 // Given an address occupied by a live object, return that object if it is
1033 // in this space, or Failure::Exception() if it is not. The implementation
1034 // iterates over objects in the page containing the address, the cost is
1035 // linear in the number of objects in the page. It may be slow.
John Reck59135872010-11-02 12:39:01 -07001036 MUST_USE_RESULT MaybeObject* FindObject(Address addr);
Steve Blocka7e24c12009-10-30 11:49:00 +00001037
Ben Murdoch85b71792012-04-11 18:30:58 +01001038 // Checks whether page is currently in use by this space.
1039 bool IsUsed(Page* page);
Steve Blocka7e24c12009-10-30 11:49:00 +00001040
Ben Murdoch85b71792012-04-11 18:30:58 +01001041 void MarkAllPagesClean();
1042
1043 // Prepares for a mark-compact GC.
1044 virtual void PrepareForMarkCompact(bool will_compact);
1045
1046 // The top of allocation in a page in this space. Undefined if page is unused.
1047 Address PageAllocationTop(Page* page) {
1048 return page == TopPageOf(allocation_info_) ? top()
1049 : PageAllocationLimit(page);
1050 }
1051
1052 // The limit of allocation for a page in this space.
1053 virtual Address PageAllocationLimit(Page* page) = 0;
1054
1055 void FlushTopPageWatermark() {
1056 AllocationTopPage()->SetCachedAllocationWatermark(top());
1057 AllocationTopPage()->InvalidateWatermark(true);
1058 }
1059
1060 // Current capacity without growing (Size() + Available() + Waste()).
Ben Murdochf87a2032010-10-22 12:50:53 +01001061 intptr_t Capacity() { return accounting_stats_.Capacity(); }
Steve Blocka7e24c12009-10-30 11:49:00 +00001062
Steve Block3ce2e202009-11-05 08:53:23 +00001063 // Total amount of memory committed for this space. For paged
1064 // spaces this equals the capacity.
Ben Murdochf87a2032010-10-22 12:50:53 +01001065 intptr_t CommittedMemory() { return Capacity(); }
Steve Block3ce2e202009-11-05 08:53:23 +00001066
Ben Murdoch85b71792012-04-11 18:30:58 +01001067 // Available bytes without growing.
1068 intptr_t Available() { return accounting_stats_.Available(); }
Steve Blocka7e24c12009-10-30 11:49:00 +00001069
Ben Murdoch85b71792012-04-11 18:30:58 +01001070 // Allocated bytes in this space.
Ben Murdochf87a2032010-10-22 12:50:53 +01001071 virtual intptr_t Size() { return accounting_stats_.Size(); }
Steve Blocka7e24c12009-10-30 11:49:00 +00001072
Ben Murdoch85b71792012-04-11 18:30:58 +01001073 // Wasted bytes due to fragmentation and not recoverable until the
1074 // next GC of this space.
1075 intptr_t Waste() { return accounting_stats_.Waste(); }
Steve Blocka7e24c12009-10-30 11:49:00 +00001076
Ben Murdoch85b71792012-04-11 18:30:58 +01001077 // Returns the address of the first object in this space.
1078 Address bottom() { return first_page_->ObjectAreaStart(); }
Steve Blocka7e24c12009-10-30 11:49:00 +00001079
1080 // Returns the allocation pointer in this space.
Ben Murdochc7cc0282012-03-05 14:35:55 +00001081 Address top() { return allocation_info_.top; }
Steve Blocka7e24c12009-10-30 11:49:00 +00001082
1083 // Allocate the requested number of bytes in the space if possible, return a
1084 // failure object if not.
John Reck59135872010-11-02 12:39:01 -07001085 MUST_USE_RESULT inline MaybeObject* AllocateRaw(int size_in_bytes);
Steve Blocka7e24c12009-10-30 11:49:00 +00001086
Ben Murdoch85b71792012-04-11 18:30:58 +01001087 // Allocate the requested number of bytes for relocation during mark-compact
1088 // collection.
1089 MUST_USE_RESULT inline MaybeObject* MCAllocateRaw(int size_in_bytes);
1090
Leon Clarkee46be812010-01-19 14:06:41 +00001091 virtual bool ReserveSpace(int bytes);
1092
Ben Murdoch85b71792012-04-11 18:30:58 +01001093 // Used by ReserveSpace.
1094 virtual void PutRestOfCurrentPageOnFreeList(Page* current_page) = 0;
1095
1096 // Free all pages in range from prev (exclusive) to last (inclusive).
1097 // Freed pages are moved to the end of page list.
1098 void FreePages(Page* prev, Page* last);
1099
1100 // Deallocates a block.
1101 virtual void DeallocateBlock(Address start,
1102 int size_in_bytes,
1103 bool add_to_freelist) = 0;
Kristian Monsen80d68ea2010-09-08 11:05:35 +01001104
Steve Block6ded16b2010-05-10 14:33:55 +01001105 // Set space allocation info.
Ben Murdoch85b71792012-04-11 18:30:58 +01001106 void SetTop(Address top) {
Steve Block6ded16b2010-05-10 14:33:55 +01001107 allocation_info_.top = top;
Ben Murdoch85b71792012-04-11 18:30:58 +01001108 allocation_info_.limit = PageAllocationLimit(Page::FromAllocationTop(top));
Steve Block6ded16b2010-05-10 14:33:55 +01001109 }
1110
Ben Murdoch85b71792012-04-11 18:30:58 +01001111 // ---------------------------------------------------------------------------
1112 // Mark-compact collection support functions
1113
1114 // Set the relocation point to the beginning of the space.
1115 void MCResetRelocationInfo();
1116
1117 // Writes relocation info to the top page.
1118 void MCWriteRelocationInfoToPage() {
1119 TopPageOf(mc_forwarding_info_)->
1120 SetAllocationWatermark(mc_forwarding_info_.top);
Steve Blocka7e24c12009-10-30 11:49:00 +00001121 }
1122
Ben Murdoch85b71792012-04-11 18:30:58 +01001123 // Computes the offset of a given address in this space to the beginning
1124 // of the space.
1125 int MCSpaceOffsetForAddress(Address addr);
Steve Blocka7e24c12009-10-30 11:49:00 +00001126
Ben Murdoch85b71792012-04-11 18:30:58 +01001127 // Updates the allocation pointer to the relocation top after a mark-compact
1128 // collection.
1129 virtual void MCCommitRelocationInfo() = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +00001130
Ben Murdoch85b71792012-04-11 18:30:58 +01001131 // Releases half of unused pages.
1132 void Shrink();
Steve Blocka7e24c12009-10-30 11:49:00 +00001133
Ben Murdoch85b71792012-04-11 18:30:58 +01001134 // Ensures that the capacity is at least 'capacity'. Returns false on failure.
1135 bool EnsureCapacity(int capacity);
Steve Blocka7e24c12009-10-30 11:49:00 +00001136
Steve Blocka7e24c12009-10-30 11:49:00 +00001137#ifdef DEBUG
1138 // Print meta info and objects in this space.
1139 virtual void Print();
1140
1141 // Verify integrity of this space.
1142 virtual void Verify(ObjectVisitor* visitor);
1143
1144 // Overridden by subclasses to verify space-specific object
1145 // properties (e.g., only maps or free-list nodes are in map space).
1146 virtual void VerifyObject(HeapObject* obj) {}
1147
1148 // Report code object related statistics
1149 void CollectCodeStatistics();
1150 static void ReportCodeStatistics();
1151 static void ResetCodeStatistics();
1152#endif
1153
Ben Murdoch85b71792012-04-11 18:30:58 +01001154 // Returns the page of the allocation pointer.
1155 Page* AllocationTopPage() { return TopPageOf(allocation_info_); }
Steve Block6ded16b2010-05-10 14:33:55 +01001156
Ben Murdoch85b71792012-04-11 18:30:58 +01001157 void RelinkPageListInChunkOrder(bool deallocate_blocks);
Kristian Monsen80d68ea2010-09-08 11:05:35 +01001158
Steve Blocka7e24c12009-10-30 11:49:00 +00001159 protected:
1160 // Maximum capacity of this space.
Ben Murdochf87a2032010-10-22 12:50:53 +01001161 intptr_t max_capacity_;
Steve Blocka7e24c12009-10-30 11:49:00 +00001162
1163 // Accounting information for this space.
1164 AllocationStats accounting_stats_;
1165
Ben Murdoch85b71792012-04-11 18:30:58 +01001166 // The first page in this space.
1167 Page* first_page_;
Steve Blocka7e24c12009-10-30 11:49:00 +00001168
Ben Murdoch85b71792012-04-11 18:30:58 +01001169 // The last page in this space. Initially set in Setup, updated in
1170 // Expand and Shrink.
1171 Page* last_page_;
1172
1173 // True if pages owned by this space are linked in chunk-order.
1174 // See comment for class MemoryAllocator for definition of chunk-order.
1175 bool page_list_is_chunk_ordered_;
Steve Block6ded16b2010-05-10 14:33:55 +01001176
Steve Blocka7e24c12009-10-30 11:49:00 +00001177 // Normal allocation information.
1178 AllocationInfo allocation_info_;
1179
Ben Murdoch85b71792012-04-11 18:30:58 +01001180 // Relocation information during mark-compact collections.
1181 AllocationInfo mc_forwarding_info_;
1182
Steve Blocka7e24c12009-10-30 11:49:00 +00001183 // Bytes of each page that cannot be allocated. Possibly non-zero
1184 // for pages in spaces with only fixed-size objects. Always zero
1185 // for pages in spaces with variable sized objects (those pages are
1186 // padded with free-list nodes).
1187 int page_extra_;
1188
Ben Murdoch85b71792012-04-11 18:30:58 +01001189 // Sets allocation pointer to a page bottom.
1190 static void SetAllocationInfo(AllocationInfo* alloc_info, Page* p);
Steve Blocka7e24c12009-10-30 11:49:00 +00001191
Ben Murdoch85b71792012-04-11 18:30:58 +01001192 // Returns the top page specified by an allocation info structure.
1193 static Page* TopPageOf(AllocationInfo alloc_info) {
1194 return Page::FromAllocationTop(alloc_info.limit);
1195 }
Leon Clarked91b9f72010-01-27 17:25:45 +00001196
Ben Murdoch85b71792012-04-11 18:30:58 +01001197 int CountPagesToTop() {
1198 Page* p = Page::FromAllocationTop(allocation_info_.top);
1199 PageIterator it(this, PageIterator::ALL_PAGES);
1200 int counter = 1;
1201 while (it.has_next()) {
1202 if (it.next() == p) return counter;
1203 counter++;
1204 }
1205 UNREACHABLE();
1206 return -1;
1207 }
Ben Murdochc7cc0282012-03-05 14:35:55 +00001208
Steve Blocka7e24c12009-10-30 11:49:00 +00001209 // Expands the space by allocating a fixed number of pages. Returns false if
Ben Murdoch85b71792012-04-11 18:30:58 +01001210 // it cannot allocate requested number of pages from OS. Newly allocated
1211 // pages are append to the last_page;
1212 bool Expand(Page* last_page);
Steve Blocka7e24c12009-10-30 11:49:00 +00001213
Ben Murdoch85b71792012-04-11 18:30:58 +01001214 // Generic fast case allocation function that tries linear allocation in
1215 // the top page of 'alloc_info'. Returns NULL on failure.
1216 inline HeapObject* AllocateLinearly(AllocationInfo* alloc_info,
1217 int size_in_bytes);
1218
1219 // During normal allocation or deserialization, roll to the next page in
1220 // the space (there is assumed to be one) and allocate there. This
1221 // function is space-dependent.
1222 virtual HeapObject* AllocateInNextPage(Page* current_page,
1223 int size_in_bytes) = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +00001224
1225 // Slow path of AllocateRaw. This function is space-dependent.
Ben Murdoch85b71792012-04-11 18:30:58 +01001226 MUST_USE_RESULT virtual HeapObject* SlowAllocateRaw(int size_in_bytes) = 0;
1227
1228 // Slow path of MCAllocateRaw.
1229 MUST_USE_RESULT HeapObject* SlowMCAllocateRaw(int size_in_bytes);
1230
1231#ifdef DEBUG
1232 // Returns the number of total pages in this space.
1233 int CountTotalPages();
1234#endif
1235
1236 private:
1237 // Returns a pointer to the page of the relocation pointer.
1238 Page* MCRelocationTopPage() { return TopPageOf(mc_forwarding_info_); }
Steve Blocka7e24c12009-10-30 11:49:00 +00001239
Steve Blocka7e24c12009-10-30 11:49:00 +00001240 friend class PageIterator;
1241};
1242
1243
Steve Blocka7e24c12009-10-30 11:49:00 +00001244class NumberAndSizeInfo BASE_EMBEDDED {
1245 public:
1246 NumberAndSizeInfo() : number_(0), bytes_(0) {}
1247
1248 int number() const { return number_; }
1249 void increment_number(int num) { number_ += num; }
1250
1251 int bytes() const { return bytes_; }
1252 void increment_bytes(int size) { bytes_ += size; }
1253
1254 void clear() {
1255 number_ = 0;
1256 bytes_ = 0;
1257 }
1258
1259 private:
1260 int number_;
1261 int bytes_;
1262};
1263
1264
1265// HistogramInfo class for recording a single "bar" of a histogram. This
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001266// class is used for collecting statistics to print to the log file.
Steve Blocka7e24c12009-10-30 11:49:00 +00001267class HistogramInfo: public NumberAndSizeInfo {
1268 public:
1269 HistogramInfo() : NumberAndSizeInfo() {}
1270
1271 const char* name() { return name_; }
1272 void set_name(const char* name) { name_ = name; }
1273
1274 private:
1275 const char* name_;
1276};
Steve Blocka7e24c12009-10-30 11:49:00 +00001277
1278
1279// -----------------------------------------------------------------------------
1280// SemiSpace in young generation
1281//
Ben Murdoch85b71792012-04-11 18:30:58 +01001282// A semispace is a contiguous chunk of memory. The mark-compact collector
1283// uses the memory in the from space as a marking stack when tracing live
1284// objects.
Steve Blocka7e24c12009-10-30 11:49:00 +00001285
1286class SemiSpace : public Space {
1287 public:
1288 // Constructor.
Ben Murdoch85b71792012-04-11 18:30:58 +01001289 explicit SemiSpace(Heap* heap) : Space(heap, NEW_SPACE, NOT_EXECUTABLE) {
1290 start_ = NULL;
1291 age_mark_ = NULL;
1292 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001293
1294 // Sets up the semispace using the given chunk.
Ben Murdoch85b71792012-04-11 18:30:58 +01001295 bool Setup(Address start, int initial_capacity, int maximum_capacity);
Steve Blocka7e24c12009-10-30 11:49:00 +00001296
1297 // Tear down the space. Heap memory was not allocated by the space, so it
1298 // is not deallocated here.
1299 void TearDown();
1300
1301 // True if the space has been set up but not torn down.
Ben Murdoch85b71792012-04-11 18:30:58 +01001302 bool HasBeenSetup() { return start_ != NULL; }
1303
1304 // Grow the size of the semispace by committing extra virtual memory.
1305 // Assumes that the caller has checked that the semispace has not reached
1306 // its maximum capacity (and thus there is space available in the reserved
1307 // address range to grow).
1308 bool Grow();
Steve Blocka7e24c12009-10-30 11:49:00 +00001309
Steve Blocka7e24c12009-10-30 11:49:00 +00001310 // Grow the semispace to the new capacity. The new capacity
Ben Murdoch85b71792012-04-11 18:30:58 +01001311 // requested must be larger than the current capacity.
Steve Blocka7e24c12009-10-30 11:49:00 +00001312 bool GrowTo(int new_capacity);
1313
1314 // Shrinks the semispace to the new capacity. The new capacity
1315 // requested must be more than the amount of used memory in the
1316 // semispace and less than the current capacity.
1317 bool ShrinkTo(int new_capacity);
1318
Ben Murdoch85b71792012-04-11 18:30:58 +01001319 // Returns the start address of the space.
1320 Address low() { return start_; }
Steve Blocka7e24c12009-10-30 11:49:00 +00001321 // Returns one past the end address of the space.
Ben Murdoch85b71792012-04-11 18:30:58 +01001322 Address high() { return low() + capacity_; }
Steve Blocka7e24c12009-10-30 11:49:00 +00001323
1324 // Age mark accessors.
1325 Address age_mark() { return age_mark_; }
Ben Murdoch85b71792012-04-11 18:30:58 +01001326 void set_age_mark(Address mark) { age_mark_ = mark; }
Steve Blocka7e24c12009-10-30 11:49:00 +00001327
1328 // True if the address is in the address range of this semispace (not
1329 // necessarily below the allocation pointer).
1330 bool Contains(Address a) {
1331 return (reinterpret_cast<uintptr_t>(a) & address_mask_)
1332 == reinterpret_cast<uintptr_t>(start_);
1333 }
1334
1335 // True if the object is a heap object in the address range of this
1336 // semispace (not necessarily below the allocation pointer).
1337 bool Contains(Object* o) {
1338 return (reinterpret_cast<uintptr_t>(o) & object_mask_) == object_expected_;
1339 }
1340
Ben Murdoch85b71792012-04-11 18:30:58 +01001341 // The offset of an address from the beginning of the space.
1342 int SpaceOffsetForAddress(Address addr) {
1343 return static_cast<int>(addr - low());
1344 }
1345
Leon Clarkee46be812010-01-19 14:06:41 +00001346 // If we don't have these here then SemiSpace will be abstract. However
1347 // they should never be called.
Ben Murdochf87a2032010-10-22 12:50:53 +01001348 virtual intptr_t Size() {
Steve Blocka7e24c12009-10-30 11:49:00 +00001349 UNREACHABLE();
1350 return 0;
1351 }
1352
Leon Clarkee46be812010-01-19 14:06:41 +00001353 virtual bool ReserveSpace(int bytes) {
1354 UNREACHABLE();
1355 return false;
1356 }
1357
Steve Blocka7e24c12009-10-30 11:49:00 +00001358 bool is_committed() { return committed_; }
1359 bool Commit();
1360 bool Uncommit();
1361
1362#ifdef DEBUG
1363 virtual void Print();
1364 virtual void Verify();
1365#endif
1366
1367 // Returns the current capacity of the semi space.
1368 int Capacity() { return capacity_; }
1369
1370 // Returns the maximum capacity of the semi space.
1371 int MaximumCapacity() { return maximum_capacity_; }
1372
1373 // Returns the initial capacity of the semi space.
1374 int InitialCapacity() { return initial_capacity_; }
1375
1376 private:
1377 // The current and maximum capacity of the space.
1378 int capacity_;
1379 int maximum_capacity_;
1380 int initial_capacity_;
1381
1382 // The start address of the space.
1383 Address start_;
1384 // Used to govern object promotion during mark-compact collection.
1385 Address age_mark_;
1386
1387 // Masks and comparison values to test for containment in this semispace.
1388 uintptr_t address_mask_;
1389 uintptr_t object_mask_;
1390 uintptr_t object_expected_;
1391
1392 bool committed_;
1393
1394 public:
1395 TRACK_MEMORY("SemiSpace")
1396};
1397
1398
1399// A SemiSpaceIterator is an ObjectIterator that iterates over the active
1400// semispace of the heap's new space. It iterates over the objects in the
1401// semispace from a given start address (defaulting to the bottom of the
1402// semispace) to the top of the semispace. New objects allocated after the
1403// iterator is created are not iterated.
1404class SemiSpaceIterator : public ObjectIterator {
1405 public:
1406 // Create an iterator over the objects in the given space. If no start
1407 // address is given, the iterator starts from the bottom of the space. If
1408 // no size function is given, the iterator calls Object::Size().
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001409 explicit SemiSpaceIterator(NewSpace* space);
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001410 SemiSpaceIterator(NewSpace* space, HeapObjectCallback size_func);
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001411 SemiSpaceIterator(NewSpace* space, Address start);
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001412
Ben Murdoch85b71792012-04-11 18:30:58 +01001413 HeapObject* next() {
Leon Clarked91b9f72010-01-27 17:25:45 +00001414 if (current_ == limit_) return NULL;
Steve Blocka7e24c12009-10-30 11:49:00 +00001415
1416 HeapObject* object = HeapObject::FromAddress(current_);
1417 int size = (size_func_ == NULL) ? object->Size() : size_func_(object);
1418
1419 current_ += size;
1420 return object;
1421 }
1422
1423 // Implementation of the ObjectIterator functions.
Ben Murdoch85b71792012-04-11 18:30:58 +01001424 virtual HeapObject* next_object() { return next(); }
Steve Blocka7e24c12009-10-30 11:49:00 +00001425
1426 private:
Ben Murdoch85b71792012-04-11 18:30:58 +01001427 void Initialize(NewSpace* space, Address start, Address end,
Steve Blocka7e24c12009-10-30 11:49:00 +00001428 HeapObjectCallback size_func);
1429
Ben Murdoch85b71792012-04-11 18:30:58 +01001430 // The semispace.
1431 SemiSpace* space_;
Steve Blocka7e24c12009-10-30 11:49:00 +00001432 // The current iteration point.
1433 Address current_;
1434 // The end of iteration.
1435 Address limit_;
1436 // The callback function.
1437 HeapObjectCallback size_func_;
1438};
1439
1440
1441// -----------------------------------------------------------------------------
1442// The young generation space.
1443//
1444// The new space consists of a contiguous pair of semispaces. It simply
1445// forwards most functions to the appropriate semispace.
1446
1447class NewSpace : public Space {
1448 public:
1449 // Constructor.
Steve Block44f0eee2011-05-26 01:26:41 +01001450 explicit NewSpace(Heap* heap)
1451 : Space(heap, NEW_SPACE, NOT_EXECUTABLE),
Ben Murdoch85b71792012-04-11 18:30:58 +01001452 to_space_(heap),
1453 from_space_(heap) {}
Steve Blocka7e24c12009-10-30 11:49:00 +00001454
1455 // Sets up the new space using the given chunk.
Ben Murdoch85b71792012-04-11 18:30:58 +01001456 bool Setup(Address start, int size);
Steve Blocka7e24c12009-10-30 11:49:00 +00001457
1458 // Tears down the space. Heap memory was not allocated by the space, so it
1459 // is not deallocated here.
1460 void TearDown();
1461
1462 // True if the space has been set up but not torn down.
Ben Murdoch85b71792012-04-11 18:30:58 +01001463 bool HasBeenSetup() {
1464 return to_space_.HasBeenSetup() && from_space_.HasBeenSetup();
Steve Blocka7e24c12009-10-30 11:49:00 +00001465 }
1466
1467 // Flip the pair of spaces.
1468 void Flip();
1469
1470 // Grow the capacity of the semispaces. Assumes that they are not at
1471 // their maximum capacity.
1472 void Grow();
1473
1474 // Shrink the capacity of the semispaces.
1475 void Shrink();
1476
1477 // True if the address or object lies in the address range of either
1478 // semispace (not necessarily below the allocation pointer).
1479 bool Contains(Address a) {
1480 return (reinterpret_cast<uintptr_t>(a) & address_mask_)
1481 == reinterpret_cast<uintptr_t>(start_);
1482 }
1483 bool Contains(Object* o) {
Ben Murdoch85b71792012-04-11 18:30:58 +01001484 return (reinterpret_cast<uintptr_t>(o) & object_mask_) == object_expected_;
Steve Blocka7e24c12009-10-30 11:49:00 +00001485 }
1486
1487 // Return the allocated bytes in the active semispace.
Ben Murdoch85b71792012-04-11 18:30:58 +01001488 virtual intptr_t Size() { return static_cast<int>(top() - bottom()); }
Ben Murdochf87a2032010-10-22 12:50:53 +01001489 // The same, but returning an int. We have to have the one that returns
1490 // intptr_t because it is inherited, but if we know we are dealing with the
1491 // new space, which can't get as big as the other spaces then this is useful:
1492 int SizeAsInt() { return static_cast<int>(Size()); }
Steve Block3ce2e202009-11-05 08:53:23 +00001493
Steve Blocka7e24c12009-10-30 11:49:00 +00001494 // Return the current capacity of a semispace.
Ben Murdochf87a2032010-10-22 12:50:53 +01001495 intptr_t Capacity() {
Steve Blocka7e24c12009-10-30 11:49:00 +00001496 ASSERT(to_space_.Capacity() == from_space_.Capacity());
1497 return to_space_.Capacity();
1498 }
Steve Block3ce2e202009-11-05 08:53:23 +00001499
1500 // Return the total amount of memory committed for new space.
Ben Murdochf87a2032010-10-22 12:50:53 +01001501 intptr_t CommittedMemory() {
Steve Block3ce2e202009-11-05 08:53:23 +00001502 if (from_space_.is_committed()) return 2 * Capacity();
1503 return Capacity();
1504 }
1505
Ben Murdoch85b71792012-04-11 18:30:58 +01001506 // Return the available bytes without growing in the active semispace.
1507 intptr_t Available() { return Capacity() - Size(); }
Steve Blocka7e24c12009-10-30 11:49:00 +00001508
1509 // Return the maximum capacity of a semispace.
1510 int MaximumCapacity() {
1511 ASSERT(to_space_.MaximumCapacity() == from_space_.MaximumCapacity());
1512 return to_space_.MaximumCapacity();
1513 }
1514
1515 // Returns the initial capacity of a semispace.
1516 int InitialCapacity() {
1517 ASSERT(to_space_.InitialCapacity() == from_space_.InitialCapacity());
1518 return to_space_.InitialCapacity();
1519 }
1520
1521 // Return the address of the allocation pointer in the active semispace.
Ben Murdoch85b71792012-04-11 18:30:58 +01001522 Address top() { return allocation_info_.top; }
Steve Blocka7e24c12009-10-30 11:49:00 +00001523 // Return the address of the first object in the active semispace.
Ben Murdoch85b71792012-04-11 18:30:58 +01001524 Address bottom() { return to_space_.low(); }
Steve Blocka7e24c12009-10-30 11:49:00 +00001525
1526 // Get the age mark of the inactive semispace.
1527 Address age_mark() { return from_space_.age_mark(); }
1528 // Set the age mark in the active semispace.
1529 void set_age_mark(Address mark) { to_space_.set_age_mark(mark); }
1530
1531 // The start address of the space and a bit mask. Anding an address in the
1532 // new space with the mask will result in the start address.
1533 Address start() { return start_; }
1534 uintptr_t mask() { return address_mask_; }
1535
1536 // The allocation top and limit addresses.
1537 Address* allocation_top_address() { return &allocation_info_.top; }
1538 Address* allocation_limit_address() { return &allocation_info_.limit; }
1539
Ben Murdoch85b71792012-04-11 18:30:58 +01001540 MUST_USE_RESULT MaybeObject* AllocateRaw(int size_in_bytes) {
1541 return AllocateRawInternal(size_in_bytes, &allocation_info_);
1542 }
1543
1544 // Allocate the requested number of bytes for relocation during mark-compact
1545 // collection.
1546 MUST_USE_RESULT MaybeObject* MCAllocateRaw(int size_in_bytes) {
1547 return AllocateRawInternal(size_in_bytes, &mc_forwarding_info_);
1548 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001549
1550 // Reset the allocation pointer to the beginning of the active semispace.
1551 void ResetAllocationInfo();
Ben Murdoch85b71792012-04-11 18:30:58 +01001552 // Reset the reloction pointer to the bottom of the inactive semispace in
1553 // preparation for mark-compact collection.
1554 void MCResetRelocationInfo();
1555 // Update the allocation pointer in the active semispace after a
1556 // mark-compact collection.
1557 void MCCommitRelocationInfo();
Steve Blocka7e24c12009-10-30 11:49:00 +00001558
Ben Murdoch85b71792012-04-11 18:30:58 +01001559 // Get the extent of the inactive semispace (for use as a marking stack).
1560 Address FromSpaceLow() { return from_space_.low(); }
1561 Address FromSpaceHigh() { return from_space_.high(); }
1562
1563 // Get the extent of the active semispace (to sweep newly copied objects
1564 // during a scavenge collection).
1565 Address ToSpaceLow() { return to_space_.low(); }
1566 Address ToSpaceHigh() { return to_space_.high(); }
1567
1568 // Offsets from the beginning of the semispaces.
1569 int ToSpaceOffsetForAddress(Address a) {
1570 return to_space_.SpaceOffsetForAddress(a);
Steve Blocka7e24c12009-10-30 11:49:00 +00001571 }
Ben Murdoch85b71792012-04-11 18:30:58 +01001572 int FromSpaceOffsetForAddress(Address a) {
1573 return from_space_.SpaceOffsetForAddress(a);
Steve Blocka7e24c12009-10-30 11:49:00 +00001574 }
1575
1576 // True if the object is a heap object in the address range of the
1577 // respective semispace (not necessarily below the allocation pointer of the
1578 // semispace).
Ben Murdoch85b71792012-04-11 18:30:58 +01001579 bool ToSpaceContains(Object* o) { return to_space_.Contains(o); }
1580 bool FromSpaceContains(Object* o) { return from_space_.Contains(o); }
Steve Blocka7e24c12009-10-30 11:49:00 +00001581
Ben Murdoch85b71792012-04-11 18:30:58 +01001582 bool ToSpaceContains(Address a) { return to_space_.Contains(a); }
1583 bool FromSpaceContains(Address a) { return from_space_.Contains(a); }
Steve Blocka7e24c12009-10-30 11:49:00 +00001584
Leon Clarkee46be812010-01-19 14:06:41 +00001585 virtual bool ReserveSpace(int bytes);
1586
Ben Murdochb0fe1622011-05-05 13:52:32 +01001587 // Resizes a sequential string which must be the most recent thing that was
1588 // allocated in new space.
1589 template <typename StringType>
1590 inline void ShrinkStringAtAllocationBoundary(String* string, int len);
1591
Steve Blocka7e24c12009-10-30 11:49:00 +00001592#ifdef DEBUG
1593 // Verify the active semispace.
1594 virtual void Verify();
1595 // Print the active semispace.
1596 virtual void Print() { to_space_.Print(); }
1597#endif
1598
Steve Blocka7e24c12009-10-30 11:49:00 +00001599 // Iterates the active semispace to collect statistics.
1600 void CollectStatistics();
1601 // Reports previously collected statistics of the active semispace.
1602 void ReportStatistics();
1603 // Clears previously collected statistics.
1604 void ClearHistograms();
1605
1606 // Record the allocation or promotion of a heap object. Note that we don't
1607 // record every single allocation, but only those that happen in the
1608 // to space during a scavenge GC.
1609 void RecordAllocation(HeapObject* obj);
1610 void RecordPromotion(HeapObject* obj);
Steve Blocka7e24c12009-10-30 11:49:00 +00001611
1612 // Return whether the operation succeded.
1613 bool CommitFromSpaceIfNeeded() {
1614 if (from_space_.is_committed()) return true;
1615 return from_space_.Commit();
1616 }
1617
1618 bool UncommitFromSpace() {
1619 if (!from_space_.is_committed()) return true;
1620 return from_space_.Uncommit();
1621 }
1622
1623 private:
1624 // The semispaces.
1625 SemiSpace to_space_;
1626 SemiSpace from_space_;
1627
1628 // Start address and bit mask for containment testing.
1629 Address start_;
1630 uintptr_t address_mask_;
1631 uintptr_t object_mask_;
1632 uintptr_t object_expected_;
1633
1634 // Allocation pointer and limit for normal allocation and allocation during
1635 // mark-compact collection.
1636 AllocationInfo allocation_info_;
Ben Murdoch85b71792012-04-11 18:30:58 +01001637 AllocationInfo mc_forwarding_info_;
Steve Blocka7e24c12009-10-30 11:49:00 +00001638
Steve Blocka7e24c12009-10-30 11:49:00 +00001639 HistogramInfo* allocated_histogram_;
1640 HistogramInfo* promoted_histogram_;
Steve Blocka7e24c12009-10-30 11:49:00 +00001641
Ben Murdoch85b71792012-04-11 18:30:58 +01001642 // Implementation of AllocateRaw and MCAllocateRaw.
1643 MUST_USE_RESULT inline MaybeObject* AllocateRawInternal(
1644 int size_in_bytes,
1645 AllocationInfo* alloc_info);
Steve Blocka7e24c12009-10-30 11:49:00 +00001646
1647 friend class SemiSpaceIterator;
1648
1649 public:
1650 TRACK_MEMORY("NewSpace")
1651};
1652
1653
1654// -----------------------------------------------------------------------------
Ben Murdoch85b71792012-04-11 18:30:58 +01001655// Free lists for old object spaces
1656//
1657// Free-list nodes are free blocks in the heap. They look like heap objects
1658// (free-list node pointers have the heap object tag, and they have a map like
1659// a heap object). They have a size and a next pointer. The next pointer is
1660// the raw address of the next free list node (or NULL).
1661class FreeListNode: public HeapObject {
1662 public:
1663 // Obtain a free-list node from a raw address. This is not a cast because
1664 // it does not check nor require that the first word at the address is a map
1665 // pointer.
1666 static FreeListNode* FromAddress(Address address) {
1667 return reinterpret_cast<FreeListNode*>(HeapObject::FromAddress(address));
1668 }
1669
1670 static inline bool IsFreeListNode(HeapObject* object);
1671
1672 // Set the size in bytes, which can be read with HeapObject::Size(). This
1673 // function also writes a map to the first word of the block so that it
1674 // looks like a heap object to the garbage collector and heap iteration
1675 // functions.
1676 void set_size(Heap* heap, int size_in_bytes);
1677
1678 // Accessors for the next field.
1679 inline Address next(Heap* heap);
1680 inline void set_next(Heap* heap, Address next);
1681
1682 private:
1683 static const int kNextOffset = POINTER_SIZE_ALIGN(ByteArray::kHeaderSize);
1684
1685 DISALLOW_IMPLICIT_CONSTRUCTORS(FreeListNode);
1686};
1687
1688
1689// The free list for the old space.
1690class OldSpaceFreeList BASE_EMBEDDED {
1691 public:
1692 OldSpaceFreeList(Heap* heap, AllocationSpace owner);
1693
1694 // Clear the free list.
1695 void Reset();
1696
1697 // Return the number of bytes available on the free list.
1698 intptr_t available() { return available_; }
1699
1700 // Place a node on the free list. The block of size 'size_in_bytes'
1701 // starting at 'start' is placed on the free list. The return value is the
1702 // number of bytes that have been lost due to internal fragmentation by
1703 // freeing the block. Bookkeeping information will be written to the block,
1704 // ie, its contents will be destroyed. The start address should be word
1705 // aligned, and the size should be a non-zero multiple of the word size.
1706 int Free(Address start, int size_in_bytes);
1707
1708 // Allocate a block of size 'size_in_bytes' from the free list. The block
1709 // is unitialized. A failure is returned if no block is available. The
1710 // number of bytes lost to fragmentation is returned in the output parameter
1711 // 'wasted_bytes'. The size should be a non-zero multiple of the word size.
1712 MUST_USE_RESULT MaybeObject* Allocate(int size_in_bytes, int* wasted_bytes);
1713
1714 void MarkNodes();
1715
1716 private:
1717 // The size range of blocks, in bytes. (Smaller allocations are allowed, but
1718 // will always result in waste.)
1719 static const int kMinBlockSize = 2 * kPointerSize;
1720 static const int kMaxBlockSize = Page::kMaxHeapObjectSize;
1721
1722 Heap* heap_;
1723
1724 // The identity of the owning space, for building allocation Failure
1725 // objects.
1726 AllocationSpace owner_;
1727
1728 // Total available bytes in all blocks on this free list.
1729 int available_;
1730
1731 // Blocks are put on exact free lists in an array, indexed by size in words.
1732 // The available sizes are kept in an increasingly ordered list. Entries
1733 // corresponding to sizes < kMinBlockSize always have an empty free list
1734 // (but index kHead is used for the head of the size list).
1735 struct SizeNode {
1736 // Address of the head FreeListNode of the implied block size or NULL.
1737 Address head_node_;
1738 // Size (words) of the next larger available size if head_node_ != NULL.
1739 int next_size_;
1740 };
1741 static const int kFreeListsLength = kMaxBlockSize / kPointerSize + 1;
1742 SizeNode free_[kFreeListsLength];
1743
1744 // Sentinel elements for the size list. Real elements are in ]kHead..kEnd[.
1745 static const int kHead = kMinBlockSize / kPointerSize - 1;
1746 static const int kEnd = kMaxInt;
1747
1748 // We keep a "finger" in the size list to speed up a common pattern:
1749 // repeated requests for the same or increasing sizes.
1750 int finger_;
1751
1752 // Starting from *prev, find and return the smallest size >= index (words),
1753 // or kEnd. Update *prev to be the largest size < index, or kHead.
1754 int FindSize(int index, int* prev) {
1755 int cur = free_[*prev].next_size_;
1756 while (cur < index) {
1757 *prev = cur;
1758 cur = free_[cur].next_size_;
1759 }
1760 return cur;
1761 }
1762
1763 // Remove an existing element from the size list.
1764 void RemoveSize(int index) {
1765 int prev = kHead;
1766 int cur = FindSize(index, &prev);
1767 ASSERT(cur == index);
1768 free_[prev].next_size_ = free_[cur].next_size_;
1769 finger_ = prev;
1770 }
1771
1772 // Insert a new element into the size list.
1773 void InsertSize(int index) {
1774 int prev = kHead;
1775 int cur = FindSize(index, &prev);
1776 ASSERT(cur != index);
1777 free_[prev].next_size_ = index;
1778 free_[index].next_size_ = cur;
1779 }
1780
1781 // The size list is not updated during a sequence of calls to Free, but is
1782 // rebuilt before the next allocation.
1783 void RebuildSizeList();
1784 bool needs_rebuild_;
1785
1786#ifdef DEBUG
1787 // Does this free list contain a free block located at the address of 'node'?
1788 bool Contains(FreeListNode* node);
1789#endif
1790
1791 DISALLOW_COPY_AND_ASSIGN(OldSpaceFreeList);
1792};
1793
1794
1795// The free list for the map space.
1796class FixedSizeFreeList BASE_EMBEDDED {
1797 public:
1798 FixedSizeFreeList(Heap* heap, AllocationSpace owner, int object_size);
1799
1800 // Clear the free list.
1801 void Reset();
1802
1803 // Return the number of bytes available on the free list.
1804 intptr_t available() { return available_; }
1805
1806 // Place a node on the free list. The block starting at 'start' (assumed to
1807 // have size object_size_) is placed on the free list. Bookkeeping
1808 // information will be written to the block, ie, its contents will be
1809 // destroyed. The start address should be word aligned.
1810 void Free(Address start);
1811
1812 // Allocate a fixed sized block from the free list. The block is unitialized.
1813 // A failure is returned if no block is available.
1814 MUST_USE_RESULT MaybeObject* Allocate();
1815
1816 void MarkNodes();
1817
1818 private:
1819 Heap* heap_;
1820
1821 // Available bytes on the free list.
1822 intptr_t available_;
1823
1824 // The head of the free list.
1825 Address head_;
1826
1827 // The tail of the free list.
1828 Address tail_;
1829
1830 // The identity of the owning space, for building allocation Failure
1831 // objects.
1832 AllocationSpace owner_;
1833
1834 // The size of the objects in this space.
1835 int object_size_;
1836
1837 DISALLOW_COPY_AND_ASSIGN(FixedSizeFreeList);
1838};
1839
1840
1841// -----------------------------------------------------------------------------
Steve Blocka7e24c12009-10-30 11:49:00 +00001842// Old object space (excluding map objects)
1843
1844class OldSpace : public PagedSpace {
1845 public:
1846 // Creates an old space object with a given maximum capacity.
1847 // The constructor does not allocate pages from OS.
Steve Block44f0eee2011-05-26 01:26:41 +01001848 OldSpace(Heap* heap,
1849 intptr_t max_capacity,
1850 AllocationSpace id,
1851 Executability executable)
Ben Murdoch85b71792012-04-11 18:30:58 +01001852 : PagedSpace(heap, max_capacity, id, executable),
1853 free_list_(heap, id) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001854 page_extra_ = 0;
1855 }
1856
Ben Murdoch85b71792012-04-11 18:30:58 +01001857 // The bytes available on the free list (ie, not above the linear allocation
1858 // pointer).
1859 intptr_t AvailableFree() { return free_list_.available(); }
1860
Steve Block6ded16b2010-05-10 14:33:55 +01001861 // The limit of allocation for a page in this space.
1862 virtual Address PageAllocationLimit(Page* page) {
Ben Murdoch85b71792012-04-11 18:30:58 +01001863 return page->ObjectAreaEnd();
Steve Blocka7e24c12009-10-30 11:49:00 +00001864 }
1865
Ben Murdoch85b71792012-04-11 18:30:58 +01001866 // Give a block of memory to the space's free list. It might be added to
1867 // the free list or accounted as waste.
1868 // If add_to_freelist is false then just accounting stats are updated and
1869 // no attempt to add area to free list is made.
1870 void Free(Address start, int size_in_bytes, bool add_to_freelist) {
1871 accounting_stats_.DeallocateBytes(size_in_bytes);
1872
1873 if (add_to_freelist) {
1874 int wasted_bytes = free_list_.Free(start, size_in_bytes);
1875 accounting_stats_.WasteBytes(wasted_bytes);
1876 }
1877 }
1878
1879 virtual void DeallocateBlock(Address start,
1880 int size_in_bytes,
1881 bool add_to_freelist);
1882
1883 // Prepare for full garbage collection. Resets the relocation pointer and
1884 // clears the free list.
1885 virtual void PrepareForMarkCompact(bool will_compact);
1886
1887 // Updates the allocation pointer to the relocation top after a mark-compact
1888 // collection.
1889 virtual void MCCommitRelocationInfo();
1890
1891 virtual void PutRestOfCurrentPageOnFreeList(Page* current_page);
1892
1893 void MarkFreeListNodes() { free_list_.MarkNodes(); }
1894
1895#ifdef DEBUG
1896 // Reports statistics for the space
1897 void ReportStatistics();
1898#endif
1899
1900 protected:
1901 // Virtual function in the superclass. Slow path of AllocateRaw.
1902 MUST_USE_RESULT HeapObject* SlowAllocateRaw(int size_in_bytes);
1903
1904 // Virtual function in the superclass. Allocate linearly at the start of
1905 // the page after current_page (there is assumed to be one).
1906 HeapObject* AllocateInNextPage(Page* current_page, int size_in_bytes);
1907
1908 private:
1909 // The space's free list.
1910 OldSpaceFreeList free_list_;
1911
Steve Blocka7e24c12009-10-30 11:49:00 +00001912 public:
1913 TRACK_MEMORY("OldSpace")
1914};
1915
1916
1917// -----------------------------------------------------------------------------
1918// Old space for objects of a fixed size
1919
1920class FixedSpace : public PagedSpace {
1921 public:
Steve Block44f0eee2011-05-26 01:26:41 +01001922 FixedSpace(Heap* heap,
1923 intptr_t max_capacity,
Steve Blocka7e24c12009-10-30 11:49:00 +00001924 AllocationSpace id,
1925 int object_size_in_bytes,
1926 const char* name)
Steve Block44f0eee2011-05-26 01:26:41 +01001927 : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE),
Steve Blocka7e24c12009-10-30 11:49:00 +00001928 object_size_in_bytes_(object_size_in_bytes),
Ben Murdoch85b71792012-04-11 18:30:58 +01001929 name_(name),
1930 free_list_(heap, id, object_size_in_bytes) {
1931 page_extra_ = Page::kObjectAreaSize % object_size_in_bytes;
Steve Blocka7e24c12009-10-30 11:49:00 +00001932 }
1933
Steve Block6ded16b2010-05-10 14:33:55 +01001934 // The limit of allocation for a page in this space.
1935 virtual Address PageAllocationLimit(Page* page) {
Ben Murdoch85b71792012-04-11 18:30:58 +01001936 return page->ObjectAreaEnd() - page_extra_;
Steve Blocka7e24c12009-10-30 11:49:00 +00001937 }
1938
1939 int object_size_in_bytes() { return object_size_in_bytes_; }
1940
Ben Murdoch85b71792012-04-11 18:30:58 +01001941 // Give a fixed sized block of memory to the space's free list.
1942 // If add_to_freelist is false then just accounting stats are updated and
1943 // no attempt to add area to free list is made.
1944 void Free(Address start, bool add_to_freelist) {
1945 if (add_to_freelist) {
1946 free_list_.Free(start);
1947 }
1948 accounting_stats_.DeallocateBytes(object_size_in_bytes_);
1949 }
1950
Steve Blocka7e24c12009-10-30 11:49:00 +00001951 // Prepares for a mark-compact GC.
Ben Murdoch85b71792012-04-11 18:30:58 +01001952 virtual void PrepareForMarkCompact(bool will_compact);
1953
1954 // Updates the allocation pointer to the relocation top after a mark-compact
1955 // collection.
1956 virtual void MCCommitRelocationInfo();
1957
1958 virtual void PutRestOfCurrentPageOnFreeList(Page* current_page);
1959
1960 virtual void DeallocateBlock(Address start,
1961 int size_in_bytes,
1962 bool add_to_freelist);
1963
1964 void MarkFreeListNodes() { free_list_.MarkNodes(); }
1965
1966#ifdef DEBUG
1967 // Reports statistic info of the space
1968 void ReportStatistics();
1969#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00001970
1971 protected:
Ben Murdoch85b71792012-04-11 18:30:58 +01001972 // Virtual function in the superclass. Slow path of AllocateRaw.
1973 MUST_USE_RESULT HeapObject* SlowAllocateRaw(int size_in_bytes);
1974
1975 // Virtual function in the superclass. Allocate linearly at the start of
1976 // the page after current_page (there is assumed to be one).
1977 HeapObject* AllocateInNextPage(Page* current_page, int size_in_bytes);
1978
Leon Clarkee46be812010-01-19 14:06:41 +00001979 void ResetFreeList() {
1980 free_list_.Reset();
1981 }
1982
Steve Blocka7e24c12009-10-30 11:49:00 +00001983 private:
1984 // The size of objects in this space.
1985 int object_size_in_bytes_;
1986
1987 // The name of this space.
1988 const char* name_;
Ben Murdoch85b71792012-04-11 18:30:58 +01001989
1990 // The space's free list.
1991 FixedSizeFreeList free_list_;
Steve Blocka7e24c12009-10-30 11:49:00 +00001992};
1993
1994
1995// -----------------------------------------------------------------------------
1996// Old space for all map objects
1997
1998class MapSpace : public FixedSpace {
1999 public:
2000 // Creates a map space object with a maximum capacity.
Ben Murdoch85b71792012-04-11 18:30:58 +01002001 MapSpace(Heap* heap,
2002 intptr_t max_capacity,
2003 int max_map_space_pages,
2004 AllocationSpace id)
Steve Block44f0eee2011-05-26 01:26:41 +01002005 : FixedSpace(heap, max_capacity, id, Map::kSize, "map"),
Ben Murdoch85b71792012-04-11 18:30:58 +01002006 max_map_space_pages_(max_map_space_pages) {
2007 ASSERT(max_map_space_pages < kMaxMapPageIndex);
Leon Clarked91b9f72010-01-27 17:25:45 +00002008 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002009
Ben Murdoch85b71792012-04-11 18:30:58 +01002010 // Prepares for a mark-compact GC.
2011 virtual void PrepareForMarkCompact(bool will_compact);
Steve Blocka7e24c12009-10-30 11:49:00 +00002012
Ben Murdoch85b71792012-04-11 18:30:58 +01002013 // Given an index, returns the page address.
2014 Address PageAddress(int page_index) { return page_addresses_[page_index]; }
2015
2016 static const int kMaxMapPageIndex = 1 << MapWord::kMapPageIndexBits;
2017
2018 // Are map pointers encodable into map word?
2019 bool MapPointersEncodable() {
2020 if (!FLAG_use_big_map_space) {
2021 ASSERT(CountPagesToTop() <= kMaxMapPageIndex);
2022 return true;
Leon Clarkee46be812010-01-19 14:06:41 +00002023 }
Ben Murdoch85b71792012-04-11 18:30:58 +01002024 return CountPagesToTop() <= max_map_space_pages_;
2025 }
2026
2027 // Should be called after forced sweep to find out if map space needs
2028 // compaction.
2029 bool NeedsCompaction(int live_maps) {
2030 return !MapPointersEncodable() && live_maps <= CompactionThreshold();
2031 }
2032
2033 Address TopAfterCompaction(int live_maps) {
2034 ASSERT(NeedsCompaction(live_maps));
2035
2036 int pages_left = live_maps / kMapsPerPage;
2037 PageIterator it(this, PageIterator::ALL_PAGES);
2038 while (pages_left-- > 0) {
2039 ASSERT(it.has_next());
2040 it.next()->SetRegionMarks(Page::kAllRegionsCleanMarks);
2041 }
2042 ASSERT(it.has_next());
2043 Page* top_page = it.next();
2044 top_page->SetRegionMarks(Page::kAllRegionsCleanMarks);
2045 ASSERT(top_page->is_valid());
2046
2047 int offset = live_maps % kMapsPerPage * Map::kSize;
2048 Address top = top_page->ObjectAreaStart() + offset;
2049 ASSERT(top < top_page->ObjectAreaEnd());
2050 ASSERT(Contains(top));
2051
2052 return top;
2053 }
2054
2055 void FinishCompaction(Address new_top, int live_maps) {
2056 Page* top_page = Page::FromAddress(new_top);
2057 ASSERT(top_page->is_valid());
2058
2059 SetAllocationInfo(&allocation_info_, top_page);
2060 allocation_info_.top = new_top;
2061
2062 int new_size = live_maps * Map::kSize;
2063 accounting_stats_.DeallocateBytes(accounting_stats_.Size());
2064 accounting_stats_.AllocateBytes(new_size);
2065
2066 // Flush allocation watermarks.
2067 for (Page* p = first_page_; p != top_page; p = p->next_page()) {
2068 p->SetAllocationWatermark(p->AllocationTop());
2069 }
2070 top_page->SetAllocationWatermark(new_top);
2071
2072#ifdef DEBUG
2073 if (FLAG_enable_slow_asserts) {
2074 intptr_t actual_size = 0;
2075 for (Page* p = first_page_; p != top_page; p = p->next_page())
2076 actual_size += kMapsPerPage * Map::kSize;
2077 actual_size += (new_top - top_page->ObjectAreaStart());
2078 ASSERT(accounting_stats_.Size() == actual_size);
2079 }
2080#endif
2081
2082 Shrink();
2083 ResetFreeList();
Leon Clarkee46be812010-01-19 14:06:41 +00002084 }
2085
Steve Blocka7e24c12009-10-30 11:49:00 +00002086 protected:
2087#ifdef DEBUG
2088 virtual void VerifyObject(HeapObject* obj);
2089#endif
2090
2091 private:
Ben Murdoch85b71792012-04-11 18:30:58 +01002092 static const int kMapsPerPage = Page::kObjectAreaSize / Map::kSize;
Leon Clarkee46be812010-01-19 14:06:41 +00002093
2094 // Do map space compaction if there is a page gap.
Leon Clarked91b9f72010-01-27 17:25:45 +00002095 int CompactionThreshold() {
2096 return kMapsPerPage * (max_map_space_pages_ - 1);
2097 }
2098
2099 const int max_map_space_pages_;
Leon Clarkee46be812010-01-19 14:06:41 +00002100
Ben Murdoch85b71792012-04-11 18:30:58 +01002101 // An array of page start address in a map space.
2102 Address page_addresses_[kMaxMapPageIndex];
2103
Steve Blocka7e24c12009-10-30 11:49:00 +00002104 public:
2105 TRACK_MEMORY("MapSpace")
2106};
2107
2108
2109// -----------------------------------------------------------------------------
2110// Old space for all global object property cell objects
2111
2112class CellSpace : public FixedSpace {
2113 public:
2114 // Creates a property cell space object with a maximum capacity.
Steve Block44f0eee2011-05-26 01:26:41 +01002115 CellSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id)
2116 : FixedSpace(heap, max_capacity, id, JSGlobalPropertyCell::kSize, "cell")
2117 {}
Steve Blocka7e24c12009-10-30 11:49:00 +00002118
2119 protected:
2120#ifdef DEBUG
2121 virtual void VerifyObject(HeapObject* obj);
2122#endif
2123
2124 public:
2125 TRACK_MEMORY("CellSpace")
2126};
2127
2128
2129// -----------------------------------------------------------------------------
2130// Large objects ( > Page::kMaxHeapObjectSize ) are allocated and managed by
2131// the large object space. A large object is allocated from OS heap with
2132// extra padding bytes (Page::kPageSize + Page::kObjectStartOffset).
2133// A large object always starts at Page::kObjectStartOffset to a page.
2134// Large objects do not move during garbage collections.
2135
Ben Murdoch85b71792012-04-11 18:30:58 +01002136// A LargeObjectChunk holds exactly one large object page with exactly one
2137// large object.
2138class LargeObjectChunk {
Steve Blocka7e24c12009-10-30 11:49:00 +00002139 public:
Ben Murdoch85b71792012-04-11 18:30:58 +01002140 // Allocates a new LargeObjectChunk that contains a large object page
2141 // (Page::kPageSize aligned) that has at least size_in_bytes (for a large
2142 // object) bytes after the object area start of that page.
2143 static LargeObjectChunk* New(int size_in_bytes, Executability executable);
Steve Blocka7e24c12009-10-30 11:49:00 +00002144
Ben Murdoch85b71792012-04-11 18:30:58 +01002145 // Free the memory associated with the chunk.
2146 void Free(Executability executable);
Steve Blocka7e24c12009-10-30 11:49:00 +00002147
Ben Murdoch85b71792012-04-11 18:30:58 +01002148 // Interpret a raw address as a large object chunk.
2149 static LargeObjectChunk* FromAddress(Address address) {
2150 return reinterpret_cast<LargeObjectChunk*>(address);
2151 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002152
Ben Murdoch85b71792012-04-11 18:30:58 +01002153 // Returns the address of this chunk.
2154 Address address() { return reinterpret_cast<Address>(this); }
2155
2156 Page* GetPage() {
2157 return Page::FromAddress(RoundUp(address(), Page::kPageSize));
2158 }
2159
2160 // Accessors for the fields of the chunk.
2161 LargeObjectChunk* next() { return next_; }
2162 void set_next(LargeObjectChunk* chunk) { next_ = chunk; }
2163 size_t size() { return size_ & ~Page::kPageFlagMask; }
2164
2165 // Compute the start address in the chunk.
2166 Address GetStartAddress() { return GetPage()->ObjectAreaStart(); }
2167
2168 // Returns the object in this chunk.
2169 HeapObject* GetObject() { return HeapObject::FromAddress(GetStartAddress()); }
2170
2171 // Given a requested size returns the physical size of a chunk to be
2172 // allocated.
2173 static int ChunkSizeFor(int size_in_bytes);
2174
2175 // Given a chunk size, returns the object size it can accommodate. Used by
2176 // LargeObjectSpace::Available.
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002177 static intptr_t ObjectSizeFor(intptr_t chunk_size) {
2178 if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return 0;
2179 return chunk_size - Page::kPageSize - Page::kObjectStartOffset;
2180 }
2181
Ben Murdoch85b71792012-04-11 18:30:58 +01002182 private:
2183 // A pointer to the next large object chunk in the space or NULL.
2184 LargeObjectChunk* next_;
2185
2186 // The total size of this chunk.
2187 size_t size_;
2188
2189 public:
2190 TRACK_MEMORY("LargeObjectChunk")
2191};
2192
2193
2194class LargeObjectSpace : public Space {
2195 public:
2196 LargeObjectSpace(Heap* heap, AllocationSpace id);
2197 virtual ~LargeObjectSpace() {}
2198
2199 // Initializes internal data structures.
2200 bool Setup();
2201
2202 // Releases internal resources, frees objects in this space.
2203 void TearDown();
2204
2205 // Allocates a (non-FixedArray, non-Code) large object.
2206 MUST_USE_RESULT MaybeObject* AllocateRaw(int size_in_bytes);
2207 // Allocates a large Code object.
2208 MUST_USE_RESULT MaybeObject* AllocateRawCode(int size_in_bytes);
2209 // Allocates a large FixedArray.
2210 MUST_USE_RESULT MaybeObject* AllocateRawFixedArray(int size_in_bytes);
Steve Blocka7e24c12009-10-30 11:49:00 +00002211
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002212 // Available bytes for objects in this space.
Steve Block44f0eee2011-05-26 01:26:41 +01002213 inline intptr_t Available();
Steve Blocka7e24c12009-10-30 11:49:00 +00002214
Ben Murdochf87a2032010-10-22 12:50:53 +01002215 virtual intptr_t Size() {
Steve Blocka7e24c12009-10-30 11:49:00 +00002216 return size_;
2217 }
2218
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08002219 virtual intptr_t SizeOfObjects() {
2220 return objects_size_;
2221 }
2222
Steve Blocka7e24c12009-10-30 11:49:00 +00002223 int PageCount() {
2224 return page_count_;
2225 }
2226
2227 // Finds an object for a given address, returns Failure::Exception()
2228 // if it is not found. The function iterates through all objects in this
2229 // space, may be slow.
John Reck59135872010-11-02 12:39:01 -07002230 MaybeObject* FindObject(Address a);
Steve Blocka7e24c12009-10-30 11:49:00 +00002231
Ben Murdoch85b71792012-04-11 18:30:58 +01002232 // Finds a large object page containing the given pc, returns NULL
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002233 // if such a page doesn't exist.
Ben Murdoch85b71792012-04-11 18:30:58 +01002234 LargeObjectChunk* FindChunkContainingPc(Address pc);
2235
2236 // Iterates objects covered by dirty regions.
2237 void IterateDirtyRegions(ObjectSlotCallback func);
Steve Blocka7e24c12009-10-30 11:49:00 +00002238
2239 // Frees unmarked objects.
2240 void FreeUnmarkedObjects();
2241
2242 // Checks whether a heap object is in this space; O(1).
2243 bool Contains(HeapObject* obj);
2244
2245 // Checks whether the space is empty.
Ben Murdoch85b71792012-04-11 18:30:58 +01002246 bool IsEmpty() { return first_chunk_ == NULL; }
Steve Blocka7e24c12009-10-30 11:49:00 +00002247
Leon Clarkee46be812010-01-19 14:06:41 +00002248 // See the comments for ReserveSpace in the Space class. This has to be
2249 // called after ReserveSpace has been called on the paged spaces, since they
2250 // may use some memory, leaving less for large objects.
2251 virtual bool ReserveSpace(int bytes);
2252
Steve Blocka7e24c12009-10-30 11:49:00 +00002253#ifdef DEBUG
2254 virtual void Verify();
2255 virtual void Print();
2256 void ReportStatistics();
2257 void CollectCodeStatistics();
Steve Blocka7e24c12009-10-30 11:49:00 +00002258#endif
2259 // Checks whether an address is in the object area in this space. It
2260 // iterates all objects in the space. May be slow.
2261 bool SlowContains(Address addr) { return !FindObject(addr)->IsFailure(); }
2262
2263 private:
2264 // The head of the linked list of large object chunks.
Ben Murdoch85b71792012-04-11 18:30:58 +01002265 LargeObjectChunk* first_chunk_;
Ben Murdochf87a2032010-10-22 12:50:53 +01002266 intptr_t size_; // allocated bytes
Steve Blocka7e24c12009-10-30 11:49:00 +00002267 int page_count_; // number of chunks
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08002268 intptr_t objects_size_; // size of objects
Ben Murdoch85b71792012-04-11 18:30:58 +01002269
2270 // Shared implementation of AllocateRaw, AllocateRawCode and
2271 // AllocateRawFixedArray.
2272 MUST_USE_RESULT MaybeObject* AllocateRawInternal(int requested_size,
2273 int object_size,
2274 Executability executable);
Steve Blocka7e24c12009-10-30 11:49:00 +00002275
Steve Blocka7e24c12009-10-30 11:49:00 +00002276 friend class LargeObjectIterator;
2277
2278 public:
2279 TRACK_MEMORY("LargeObjectSpace")
2280};
2281
2282
2283class LargeObjectIterator: public ObjectIterator {
2284 public:
2285 explicit LargeObjectIterator(LargeObjectSpace* space);
2286 LargeObjectIterator(LargeObjectSpace* space, HeapObjectCallback size_func);
2287
Ben Murdoch85b71792012-04-11 18:30:58 +01002288 HeapObject* next();
Steve Blocka7e24c12009-10-30 11:49:00 +00002289
2290 // implementation of ObjectIterator.
Ben Murdoch85b71792012-04-11 18:30:58 +01002291 virtual HeapObject* next_object() { return next(); }
Steve Blocka7e24c12009-10-30 11:49:00 +00002292
2293 private:
Ben Murdoch85b71792012-04-11 18:30:58 +01002294 LargeObjectChunk* current_;
Steve Blocka7e24c12009-10-30 11:49:00 +00002295 HeapObjectCallback size_func_;
2296};
2297
2298
Steve Block44f0eee2011-05-26 01:26:41 +01002299#ifdef DEBUG
2300struct CommentStatistic {
2301 const char* comment;
2302 int size;
2303 int count;
2304 void Clear() {
2305 comment = NULL;
2306 size = 0;
2307 count = 0;
2308 }
2309 // Must be small, since an iteration is used for lookup.
2310 static const int kMaxComments = 64;
2311};
2312#endif
2313
2314
Steve Blocka7e24c12009-10-30 11:49:00 +00002315} } // namespace v8::internal
2316
2317#endif // V8_SPACES_H_