blob: bd939d1e4465705eda89ff01303401b8b038c8cb [file] [log] [blame]
Ben Murdochb0fe1622011-05-05 13:52:32 +01001// Copyright 2006-2010 the V8 project authors. All rights reserved.
Steve Blocka7e24c12009-10-30 11:49:00 +00002// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#ifndef V8_SPACES_H_
29#define V8_SPACES_H_
30
31#include "list-inl.h"
32#include "log.h"
33
34namespace v8 {
35namespace internal {
36
Steve Block44f0eee2011-05-26 01:26:41 +010037class Isolate;
38
Steve Blocka7e24c12009-10-30 11:49:00 +000039// -----------------------------------------------------------------------------
40// Heap structures:
41//
42// A JS heap consists of a young generation, an old generation, and a large
43// object space. The young generation is divided into two semispaces. A
44// scavenger implements Cheney's copying algorithm. The old generation is
45// separated into a map space and an old object space. The map space contains
46// all (and only) map objects, the rest of old objects go into the old space.
47// The old generation is collected by a mark-sweep-compact collector.
48//
49// The semispaces of the young generation are contiguous. The old and map
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +010050// spaces consists of a list of pages. A page has a page header and an object
51// area. A page size is deliberately chosen as 8K bytes.
52// The first word of a page is an opaque page header that has the
Steve Blocka7e24c12009-10-30 11:49:00 +000053// address of the next page and its ownership information. The second word may
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +010054// have the allocation top address of this page. Heap objects are aligned to the
55// pointer size.
Steve Blocka7e24c12009-10-30 11:49:00 +000056//
57// There is a separate large object space for objects larger than
58// Page::kMaxHeapObjectSize, so that they do not have to move during
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +010059// collection. The large object space is paged. Pages in large object space
60// may be larger than 8K.
Steve Blocka7e24c12009-10-30 11:49:00 +000061//
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +010062// A card marking write barrier is used to keep track of intergenerational
63// references. Old space pages are divided into regions of Page::kRegionSize
64// size. Each region has a corresponding dirty bit in the page header which is
65// set if the region might contain pointers to new space. For details about
66// dirty bits encoding see comments in the Page::GetRegionNumberForAddress()
67// method body.
68//
69// During scavenges and mark-sweep collections we iterate intergenerational
70// pointers without decoding heap object maps so if the page belongs to old
71// pointer space or large object space it is essential to guarantee that
72// the page does not contain any garbage pointers to new space: every pointer
73// aligned word which satisfies the Heap::InNewSpace() predicate must be a
74// pointer to a live heap object in new space. Thus objects in old pointer
75// and large object spaces should have a special layout (e.g. no bare integer
76// fields). This requirement does not apply to map space which is iterated in
77// a special fashion. However we still require pointer fields of dead maps to
78// be cleaned.
79//
80// To enable lazy cleaning of old space pages we use a notion of allocation
81// watermark. Every pointer under watermark is considered to be well formed.
82// Page allocation watermark is not necessarily equal to page allocation top but
83// all alive objects on page should reside under allocation watermark.
84// During scavenge allocation watermark might be bumped and invalid pointers
85// might appear below it. To avoid following them we store a valid watermark
86// into special field in the page header and set a page WATERMARK_INVALIDATED
87// flag. For details see comments in the Page::SetAllocationWatermark() method
88// body.
89//
Steve Blocka7e24c12009-10-30 11:49:00 +000090
91// Some assertion macros used in the debugging mode.
92
Leon Clarkee46be812010-01-19 14:06:41 +000093#define ASSERT_PAGE_ALIGNED(address) \
Steve Blocka7e24c12009-10-30 11:49:00 +000094 ASSERT((OffsetFrom(address) & Page::kPageAlignmentMask) == 0)
95
Leon Clarkee46be812010-01-19 14:06:41 +000096#define ASSERT_OBJECT_ALIGNED(address) \
Steve Blocka7e24c12009-10-30 11:49:00 +000097 ASSERT((OffsetFrom(address) & kObjectAlignmentMask) == 0)
98
Leon Clarkee46be812010-01-19 14:06:41 +000099#define ASSERT_MAP_ALIGNED(address) \
100 ASSERT((OffsetFrom(address) & kMapAlignmentMask) == 0)
101
102#define ASSERT_OBJECT_SIZE(size) \
Steve Blocka7e24c12009-10-30 11:49:00 +0000103 ASSERT((0 < size) && (size <= Page::kMaxHeapObjectSize))
104
Leon Clarkee46be812010-01-19 14:06:41 +0000105#define ASSERT_PAGE_OFFSET(offset) \
106 ASSERT((Page::kObjectStartOffset <= offset) \
Steve Blocka7e24c12009-10-30 11:49:00 +0000107 && (offset <= Page::kPageSize))
108
Leon Clarkee46be812010-01-19 14:06:41 +0000109#define ASSERT_MAP_PAGE_INDEX(index) \
Steve Blocka7e24c12009-10-30 11:49:00 +0000110 ASSERT((0 <= index) && (index <= MapSpace::kMaxMapPageIndex))
111
112
113class PagedSpace;
114class MemoryAllocator;
115class AllocationInfo;
116
117// -----------------------------------------------------------------------------
118// A page normally has 8K bytes. Large object pages may be larger. A page
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100119// address is always aligned to the 8K page size.
Steve Blocka7e24c12009-10-30 11:49:00 +0000120//
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100121// Each page starts with a header of Page::kPageHeaderSize size which contains
122// bookkeeping data.
Steve Blocka7e24c12009-10-30 11:49:00 +0000123//
124// The mark-compact collector transforms a map pointer into a page index and a
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100125// page offset. The exact encoding is described in the comments for
Leon Clarkee46be812010-01-19 14:06:41 +0000126// class MapWord in objects.h.
Steve Blocka7e24c12009-10-30 11:49:00 +0000127//
128// The only way to get a page pointer is by calling factory methods:
129// Page* p = Page::FromAddress(addr); or
130// Page* p = Page::FromAllocationTop(top);
131class Page {
132 public:
133 // Returns the page containing a given address. The address ranges
134 // from [page_addr .. page_addr + kPageSize[
135 //
136 // Note that this function only works for addresses in normal paged
137 // spaces and addresses in the first 8K of large object pages (i.e.,
138 // the start of large objects but not necessarily derived pointers
139 // within them).
140 INLINE(static Page* FromAddress(Address a)) {
141 return reinterpret_cast<Page*>(OffsetFrom(a) & ~kPageAlignmentMask);
142 }
143
144 // Returns the page containing an allocation top. Because an allocation
145 // top address can be the upper bound of the page, we need to subtract
146 // it with kPointerSize first. The address ranges from
147 // [page_addr + kObjectStartOffset .. page_addr + kPageSize].
148 INLINE(static Page* FromAllocationTop(Address top)) {
149 Page* p = FromAddress(top - kPointerSize);
150 ASSERT_PAGE_OFFSET(p->Offset(top));
151 return p;
152 }
153
154 // Returns the start address of this page.
155 Address address() { return reinterpret_cast<Address>(this); }
156
157 // Checks whether this is a valid page address.
158 bool is_valid() { return address() != NULL; }
159
160 // Returns the next page of this page.
161 inline Page* next_page();
162
163 // Return the end of allocation in this page. Undefined for unused pages.
164 inline Address AllocationTop();
165
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100166 // Return the allocation watermark for the page.
167 // For old space pages it is guaranteed that the area under the watermark
168 // does not contain any garbage pointers to new space.
169 inline Address AllocationWatermark();
170
171 // Return the allocation watermark offset from the beginning of the page.
172 inline uint32_t AllocationWatermarkOffset();
173
174 inline void SetAllocationWatermark(Address allocation_watermark);
175
176 inline void SetCachedAllocationWatermark(Address allocation_watermark);
177 inline Address CachedAllocationWatermark();
178
Steve Blocka7e24c12009-10-30 11:49:00 +0000179 // Returns the start address of the object area in this page.
180 Address ObjectAreaStart() { return address() + kObjectStartOffset; }
181
182 // Returns the end address (exclusive) of the object area in this page.
183 Address ObjectAreaEnd() { return address() + Page::kPageSize; }
184
Steve Blocka7e24c12009-10-30 11:49:00 +0000185 // Checks whether an address is page aligned.
186 static bool IsAlignedToPageSize(Address a) {
187 return 0 == (OffsetFrom(a) & kPageAlignmentMask);
188 }
189
Steve Block6ded16b2010-05-10 14:33:55 +0100190 // True if this page was in use before current compaction started.
191 // Result is valid only for pages owned by paged spaces and
192 // only after PagedSpace::PrepareForMarkCompact was called.
193 inline bool WasInUseBeforeMC();
194
195 inline void SetWasInUseBeforeMC(bool was_in_use);
196
Steve Blocka7e24c12009-10-30 11:49:00 +0000197 // True if this page is a large object page.
Steve Block6ded16b2010-05-10 14:33:55 +0100198 inline bool IsLargeObjectPage();
199
200 inline void SetIsLargeObjectPage(bool is_large_object_page);
Steve Blocka7e24c12009-10-30 11:49:00 +0000201
Steve Block791712a2010-08-27 10:21:07 +0100202 inline bool IsPageExecutable();
203
204 inline void SetIsPageExecutable(bool is_page_executable);
205
Steve Blocka7e24c12009-10-30 11:49:00 +0000206 // Returns the offset of a given address to this page.
207 INLINE(int Offset(Address a)) {
Steve Blockd0582a62009-12-15 09:54:21 +0000208 int offset = static_cast<int>(a - address());
Steve Blocka7e24c12009-10-30 11:49:00 +0000209 ASSERT_PAGE_OFFSET(offset);
210 return offset;
211 }
212
213 // Returns the address for a given offset to the this page.
214 Address OffsetToAddress(int offset) {
215 ASSERT_PAGE_OFFSET(offset);
216 return address() + offset;
217 }
218
219 // ---------------------------------------------------------------------
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100220 // Card marking support
Steve Blocka7e24c12009-10-30 11:49:00 +0000221
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100222 static const uint32_t kAllRegionsCleanMarks = 0x0;
223 static const uint32_t kAllRegionsDirtyMarks = 0xFFFFFFFF;
Steve Blocka7e24c12009-10-30 11:49:00 +0000224
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100225 inline uint32_t GetRegionMarks();
226 inline void SetRegionMarks(uint32_t dirty);
Steve Blocka7e24c12009-10-30 11:49:00 +0000227
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100228 inline uint32_t GetRegionMaskForAddress(Address addr);
229 inline uint32_t GetRegionMaskForSpan(Address start, int length_in_bytes);
230 inline int GetRegionNumberForAddress(Address addr);
Steve Blocka7e24c12009-10-30 11:49:00 +0000231
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100232 inline void MarkRegionDirty(Address addr);
233 inline bool IsRegionDirty(Address addr);
Steve Blocka7e24c12009-10-30 11:49:00 +0000234
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100235 inline void ClearRegionMarks(Address start,
236 Address end,
237 bool reaches_limit);
Steve Blocka7e24c12009-10-30 11:49:00 +0000238
Steve Blocka7e24c12009-10-30 11:49:00 +0000239 // Page size in bytes. This must be a multiple of the OS page size.
240 static const int kPageSize = 1 << kPageSizeBits;
241
242 // Page size mask.
243 static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;
244
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100245 static const int kPageHeaderSize = kPointerSize + kPointerSize + kIntSize +
Steve Block44f0eee2011-05-26 01:26:41 +0100246 kIntSize + kPointerSize + kPointerSize;
Steve Blocka7e24c12009-10-30 11:49:00 +0000247
Kristian Monsen0d5e1162010-09-30 15:31:59 +0100248 // The start offset of the object area in a page. Aligned to both maps and
249 // code alignment to be suitable for both.
250 static const int kObjectStartOffset =
251 CODE_POINTER_ALIGN(MAP_POINTER_ALIGN(kPageHeaderSize));
Steve Blocka7e24c12009-10-30 11:49:00 +0000252
253 // Object area size in bytes.
254 static const int kObjectAreaSize = kPageSize - kObjectStartOffset;
255
256 // Maximum object size that fits in a page.
257 static const int kMaxHeapObjectSize = kObjectAreaSize;
258
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100259 static const int kDirtyFlagOffset = 2 * kPointerSize;
260 static const int kRegionSizeLog2 = 8;
261 static const int kRegionSize = 1 << kRegionSizeLog2;
262 static const intptr_t kRegionAlignmentMask = (kRegionSize - 1);
263
264 STATIC_CHECK(kRegionSize == kPageSize / kBitsPerInt);
265
Steve Block6ded16b2010-05-10 14:33:55 +0100266 enum PageFlag {
Steve Block791712a2010-08-27 10:21:07 +0100267 IS_NORMAL_PAGE = 0,
268 WAS_IN_USE_BEFORE_MC,
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100269
270 // Page allocation watermark was bumped by preallocation during scavenge.
271 // Correct watermark can be retrieved by CachedAllocationWatermark() method
Steve Block791712a2010-08-27 10:21:07 +0100272 WATERMARK_INVALIDATED,
273 IS_EXECUTABLE,
274 NUM_PAGE_FLAGS // Must be last
Steve Block6ded16b2010-05-10 14:33:55 +0100275 };
Steve Block791712a2010-08-27 10:21:07 +0100276 static const int kPageFlagMask = (1 << NUM_PAGE_FLAGS) - 1;
Steve Block6ded16b2010-05-10 14:33:55 +0100277
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100278 // To avoid an additional WATERMARK_INVALIDATED flag clearing pass during
279 // scavenge we just invalidate the watermark on each old space page after
280 // processing it. And then we flip the meaning of the WATERMARK_INVALIDATED
281 // flag at the beginning of the next scavenge and each page becomes marked as
282 // having a valid watermark.
283 //
284 // The following invariant must hold for pages in old pointer and map spaces:
285 // If page is in use then page is marked as having invalid watermark at
286 // the beginning and at the end of any GC.
287 //
288 // This invariant guarantees that after flipping flag meaning at the
289 // beginning of scavenge all pages in use will be marked as having valid
290 // watermark.
Steve Block44f0eee2011-05-26 01:26:41 +0100291 static inline void FlipMeaningOfInvalidatedWatermarkFlag(Heap* heap);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100292
293 // Returns true if the page allocation watermark was not altered during
294 // scavenge.
295 inline bool IsWatermarkValid();
296
297 inline void InvalidateWatermark(bool value);
298
Steve Block6ded16b2010-05-10 14:33:55 +0100299 inline bool GetPageFlag(PageFlag flag);
300 inline void SetPageFlag(PageFlag flag, bool value);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100301 inline void ClearPageFlags();
302
303 inline void ClearGCFields();
304
Steve Block791712a2010-08-27 10:21:07 +0100305 static const int kAllocationWatermarkOffsetShift = WATERMARK_INVALIDATED + 1;
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100306 static const int kAllocationWatermarkOffsetBits = kPageSizeBits + 1;
307 static const uint32_t kAllocationWatermarkOffsetMask =
308 ((1 << kAllocationWatermarkOffsetBits) - 1) <<
309 kAllocationWatermarkOffsetShift;
310
311 static const uint32_t kFlagsMask =
312 ((1 << kAllocationWatermarkOffsetShift) - 1);
313
314 STATIC_CHECK(kBitsPerInt - kAllocationWatermarkOffsetShift >=
315 kAllocationWatermarkOffsetBits);
316
Steve Blocka7e24c12009-10-30 11:49:00 +0000317 //---------------------------------------------------------------------------
318 // Page header description.
319 //
320 // If a page is not in the large object space, the first word,
321 // opaque_header, encodes the next page address (aligned to kPageSize 8K)
322 // and the chunk number (0 ~ 8K-1). Only MemoryAllocator should use
323 // opaque_header. The value range of the opaque_header is [0..kPageSize[,
324 // or [next_page_start, next_page_end[. It cannot point to a valid address
325 // in the current page. If a page is in the large object space, the first
326 // word *may* (if the page start and large object chunk start are the
327 // same) contain the address of the next large object chunk.
328 intptr_t opaque_header;
329
330 // If the page is not in the large object space, the low-order bit of the
331 // second word is set. If the page is in the large object space, the
332 // second word *may* (if the page start and large object chunk start are
333 // the same) contain the large object chunk size. In either case, the
334 // low-order bit for large object pages will be cleared.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100335 // For normal pages this word is used to store page flags and
336 // offset of allocation top.
337 intptr_t flags_;
Steve Blocka7e24c12009-10-30 11:49:00 +0000338
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100339 // This field contains dirty marks for regions covering the page. Only dirty
340 // regions might contain intergenerational references.
341 // Only 32 dirty marks are supported so for large object pages several regions
342 // might be mapped to a single dirty mark.
343 uint32_t dirty_regions_;
Steve Blocka7e24c12009-10-30 11:49:00 +0000344
345 // The index of the page in its owner space.
346 int mc_page_index;
347
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100348 // During mark-compact collections this field contains the forwarding address
349 // of the first live object in this page.
350 // During scavenge collection this field is used to store allocation watermark
351 // if it is altered during scavenge.
Steve Blocka7e24c12009-10-30 11:49:00 +0000352 Address mc_first_forwarded;
Steve Block44f0eee2011-05-26 01:26:41 +0100353
354 Heap* heap_;
Steve Blocka7e24c12009-10-30 11:49:00 +0000355};
356
357
358// ----------------------------------------------------------------------------
359// Space is the abstract superclass for all allocation spaces.
360class Space : public Malloced {
361 public:
Steve Block44f0eee2011-05-26 01:26:41 +0100362 Space(Heap* heap, AllocationSpace id, Executability executable)
363 : heap_(heap), id_(id), executable_(executable) {}
Steve Blocka7e24c12009-10-30 11:49:00 +0000364
365 virtual ~Space() {}
366
Steve Block44f0eee2011-05-26 01:26:41 +0100367 Heap* heap() const { return heap_; }
368
Steve Blocka7e24c12009-10-30 11:49:00 +0000369 // Does the space need executable memory?
370 Executability executable() { return executable_; }
371
372 // Identity used in error reporting.
373 AllocationSpace identity() { return id_; }
374
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -0800375 // Returns allocated size.
Ben Murdochf87a2032010-10-22 12:50:53 +0100376 virtual intptr_t Size() = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000377
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -0800378 // Returns size of objects. Can differ from the allocated size
379 // (e.g. see LargeObjectSpace).
380 virtual intptr_t SizeOfObjects() { return Size(); }
381
Steve Block6ded16b2010-05-10 14:33:55 +0100382#ifdef ENABLE_HEAP_PROTECTION
383 // Protect/unprotect the space by marking it read-only/writable.
384 virtual void Protect() = 0;
385 virtual void Unprotect() = 0;
386#endif
387
Steve Blocka7e24c12009-10-30 11:49:00 +0000388#ifdef DEBUG
389 virtual void Print() = 0;
390#endif
391
Leon Clarkee46be812010-01-19 14:06:41 +0000392 // After calling this we can allocate a certain number of bytes using only
393 // linear allocation (with a LinearAllocationScope and an AlwaysAllocateScope)
394 // without using freelists or causing a GC. This is used by partial
395 // snapshots. It returns true of space was reserved or false if a GC is
396 // needed. For paged spaces the space requested must include the space wasted
397 // at the end of each when allocating linearly.
398 virtual bool ReserveSpace(int bytes) = 0;
399
Steve Blocka7e24c12009-10-30 11:49:00 +0000400 private:
Steve Block44f0eee2011-05-26 01:26:41 +0100401 Heap* heap_;
Steve Blocka7e24c12009-10-30 11:49:00 +0000402 AllocationSpace id_;
403 Executability executable_;
404};
405
406
407// ----------------------------------------------------------------------------
408// All heap objects containing executable code (code objects) must be allocated
409// from a 2 GB range of memory, so that they can call each other using 32-bit
410// displacements. This happens automatically on 32-bit platforms, where 32-bit
411// displacements cover the entire 4GB virtual address space. On 64-bit
412// platforms, we support this using the CodeRange object, which reserves and
413// manages a range of virtual memory.
Steve Block44f0eee2011-05-26 01:26:41 +0100414class CodeRange {
Steve Blocka7e24c12009-10-30 11:49:00 +0000415 public:
416 // Reserves a range of virtual memory, but does not commit any of it.
417 // Can only be called once, at heap initialization time.
418 // Returns false on failure.
Steve Block44f0eee2011-05-26 01:26:41 +0100419 bool Setup(const size_t requested_size);
Steve Blocka7e24c12009-10-30 11:49:00 +0000420
421 // Frees the range of virtual memory, and frees the data structures used to
422 // manage it.
Steve Block44f0eee2011-05-26 01:26:41 +0100423 void TearDown();
Steve Blocka7e24c12009-10-30 11:49:00 +0000424
Steve Block44f0eee2011-05-26 01:26:41 +0100425 bool exists() { return code_range_ != NULL; }
426 bool contains(Address address) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000427 if (code_range_ == NULL) return false;
428 Address start = static_cast<Address>(code_range_->address());
429 return start <= address && address < start + code_range_->size();
430 }
431
432 // Allocates a chunk of memory from the large-object portion of
433 // the code range. On platforms with no separate code range, should
434 // not be called.
Steve Block44f0eee2011-05-26 01:26:41 +0100435 MUST_USE_RESULT void* AllocateRawMemory(const size_t requested,
436 size_t* allocated);
437 void FreeRawMemory(void* buf, size_t length);
Steve Blocka7e24c12009-10-30 11:49:00 +0000438
439 private:
Steve Block44f0eee2011-05-26 01:26:41 +0100440 CodeRange();
441
Steve Blocka7e24c12009-10-30 11:49:00 +0000442 // The reserved range of virtual memory that all code objects are put in.
Steve Block44f0eee2011-05-26 01:26:41 +0100443 VirtualMemory* code_range_;
Steve Blocka7e24c12009-10-30 11:49:00 +0000444 // Plain old data class, just a struct plus a constructor.
445 class FreeBlock {
446 public:
447 FreeBlock(Address start_arg, size_t size_arg)
448 : start(start_arg), size(size_arg) {}
449 FreeBlock(void* start_arg, size_t size_arg)
450 : start(static_cast<Address>(start_arg)), size(size_arg) {}
451
452 Address start;
453 size_t size;
454 };
455
456 // Freed blocks of memory are added to the free list. When the allocation
457 // list is exhausted, the free list is sorted and merged to make the new
458 // allocation list.
Steve Block44f0eee2011-05-26 01:26:41 +0100459 List<FreeBlock> free_list_;
Steve Blocka7e24c12009-10-30 11:49:00 +0000460 // Memory is allocated from the free blocks on the allocation list.
461 // The block at current_allocation_block_index_ is the current block.
Steve Block44f0eee2011-05-26 01:26:41 +0100462 List<FreeBlock> allocation_list_;
463 int current_allocation_block_index_;
Steve Blocka7e24c12009-10-30 11:49:00 +0000464
465 // Finds a block on the allocation list that contains at least the
466 // requested amount of memory. If none is found, sorts and merges
467 // the existing free memory blocks, and searches again.
468 // If none can be found, terminates V8 with FatalProcessOutOfMemory.
Steve Block44f0eee2011-05-26 01:26:41 +0100469 void GetNextAllocationBlock(size_t requested);
Steve Blocka7e24c12009-10-30 11:49:00 +0000470 // Compares the start addresses of two free blocks.
471 static int CompareFreeBlockAddress(const FreeBlock* left,
472 const FreeBlock* right);
Steve Block44f0eee2011-05-26 01:26:41 +0100473
474 friend class Isolate;
475
476 Isolate* isolate_;
477
478 DISALLOW_COPY_AND_ASSIGN(CodeRange);
Steve Blocka7e24c12009-10-30 11:49:00 +0000479};
480
481
482// ----------------------------------------------------------------------------
483// A space acquires chunks of memory from the operating system. The memory
484// allocator manages chunks for the paged heap spaces (old space and map
485// space). A paged chunk consists of pages. Pages in a chunk have contiguous
486// addresses and are linked as a list.
487//
488// The allocator keeps an initial chunk which is used for the new space. The
489// leftover regions of the initial chunk are used for the initial chunks of
490// old space and map space if they are big enough to hold at least one page.
491// The allocator assumes that there is one old space and one map space, each
492// expands the space by allocating kPagesPerChunk pages except the last
493// expansion (before running out of space). The first chunk may contain fewer
494// than kPagesPerChunk pages as well.
495//
496// The memory allocator also allocates chunks for the large object space, but
497// they are managed by the space itself. The new space does not expand.
Steve Block6ded16b2010-05-10 14:33:55 +0100498//
499// The fact that pages for paged spaces are allocated and deallocated in chunks
500// induces a constraint on the order of pages in a linked lists. We say that
501// pages are linked in the chunk-order if and only if every two consecutive
502// pages from the same chunk are consecutive in the linked list.
503//
504
Steve Blocka7e24c12009-10-30 11:49:00 +0000505
Steve Block44f0eee2011-05-26 01:26:41 +0100506class MemoryAllocator {
Steve Blocka7e24c12009-10-30 11:49:00 +0000507 public:
508 // Initializes its internal bookkeeping structures.
Russell Brenner90bac252010-11-18 13:33:46 -0800509 // Max capacity of the total space and executable memory limit.
Steve Block44f0eee2011-05-26 01:26:41 +0100510 bool Setup(intptr_t max_capacity, intptr_t capacity_executable);
Steve Blocka7e24c12009-10-30 11:49:00 +0000511
512 // Deletes valid chunks.
Steve Block44f0eee2011-05-26 01:26:41 +0100513 void TearDown();
Steve Blocka7e24c12009-10-30 11:49:00 +0000514
515 // Reserves an initial address range of virtual memory to be split between
516 // the two new space semispaces, the old space, and the map space. The
517 // memory is not yet committed or assigned to spaces and split into pages.
518 // The initial chunk is unmapped when the memory allocator is torn down.
519 // This function should only be called when there is not already a reserved
520 // initial chunk (initial_chunk_ should be NULL). It returns the start
521 // address of the initial chunk if successful, with the side effect of
522 // setting the initial chunk, or else NULL if unsuccessful and leaves the
523 // initial chunk NULL.
Steve Block44f0eee2011-05-26 01:26:41 +0100524 void* ReserveInitialChunk(const size_t requested);
Steve Blocka7e24c12009-10-30 11:49:00 +0000525
526 // Commits pages from an as-yet-unmanaged block of virtual memory into a
527 // paged space. The block should be part of the initial chunk reserved via
528 // a call to ReserveInitialChunk. The number of pages is always returned in
529 // the output parameter num_pages. This function assumes that the start
530 // address is non-null and that it is big enough to hold at least one
531 // page-aligned page. The call always succeeds, and num_pages is always
532 // greater than zero.
Steve Block44f0eee2011-05-26 01:26:41 +0100533 Page* CommitPages(Address start, size_t size, PagedSpace* owner,
534 int* num_pages);
Steve Blocka7e24c12009-10-30 11:49:00 +0000535
536 // Commit a contiguous block of memory from the initial chunk. Assumes that
537 // the address is not NULL, the size is greater than zero, and that the
538 // block is contained in the initial chunk. Returns true if it succeeded
539 // and false otherwise.
Steve Block44f0eee2011-05-26 01:26:41 +0100540 bool CommitBlock(Address start, size_t size, Executability executable);
Steve Blocka7e24c12009-10-30 11:49:00 +0000541
Steve Blocka7e24c12009-10-30 11:49:00 +0000542 // Uncommit a contiguous block of memory [start..(start+size)[.
543 // start is not NULL, the size is greater than zero, and the
544 // block is contained in the initial chunk. Returns true if it succeeded
545 // and false otherwise.
Steve Block44f0eee2011-05-26 01:26:41 +0100546 bool UncommitBlock(Address start, size_t size);
Steve Blocka7e24c12009-10-30 11:49:00 +0000547
Leon Clarke4515c472010-02-03 11:58:03 +0000548 // Zaps a contiguous block of memory [start..(start+size)[ thus
549 // filling it up with a recognizable non-NULL bit pattern.
Steve Block44f0eee2011-05-26 01:26:41 +0100550 void ZapBlock(Address start, size_t size);
Leon Clarke4515c472010-02-03 11:58:03 +0000551
Steve Blocka7e24c12009-10-30 11:49:00 +0000552 // Attempts to allocate the requested (non-zero) number of pages from the
553 // OS. Fewer pages might be allocated than requested. If it fails to
554 // allocate memory for the OS or cannot allocate a single page, this
555 // function returns an invalid page pointer (NULL). The caller must check
556 // whether the returned page is valid (by calling Page::is_valid()). It is
557 // guaranteed that allocated pages have contiguous addresses. The actual
558 // number of allocated pages is returned in the output parameter
559 // allocated_pages. If the PagedSpace owner is executable and there is
560 // a code range, the pages are allocated from the code range.
Steve Block44f0eee2011-05-26 01:26:41 +0100561 Page* AllocatePages(int requested_pages, int* allocated_pages,
562 PagedSpace* owner);
Steve Blocka7e24c12009-10-30 11:49:00 +0000563
Steve Block6ded16b2010-05-10 14:33:55 +0100564 // Frees pages from a given page and after. Requires pages to be
565 // linked in chunk-order (see comment for class).
566 // If 'p' is the first page of a chunk, pages from 'p' are freed
567 // and this function returns an invalid page pointer.
568 // Otherwise, the function searches a page after 'p' that is
569 // the first page of a chunk. Pages after the found page
570 // are freed and the function returns 'p'.
Steve Block44f0eee2011-05-26 01:26:41 +0100571 Page* FreePages(Page* p);
Steve Blocka7e24c12009-10-30 11:49:00 +0000572
Steve Block6ded16b2010-05-10 14:33:55 +0100573 // Frees all pages owned by given space.
Steve Block44f0eee2011-05-26 01:26:41 +0100574 void FreeAllPages(PagedSpace* space);
Steve Block6ded16b2010-05-10 14:33:55 +0100575
Steve Blocka7e24c12009-10-30 11:49:00 +0000576 // Allocates and frees raw memory of certain size.
577 // These are just thin wrappers around OS::Allocate and OS::Free,
578 // but keep track of allocated bytes as part of heap.
579 // If the flag is EXECUTABLE and a code range exists, the requested
580 // memory is allocated from the code range. If a code range exists
581 // and the freed memory is in it, the code range manages the freed memory.
Steve Block44f0eee2011-05-26 01:26:41 +0100582 MUST_USE_RESULT void* AllocateRawMemory(const size_t requested,
583 size_t* allocated,
584 Executability executable);
585 void FreeRawMemory(void* buf,
586 size_t length,
587 Executability executable);
588 void PerformAllocationCallback(ObjectSpace space,
589 AllocationAction action,
590 size_t size);
Iain Merrick9ac36c92010-09-13 15:29:50 +0100591
Steve Block44f0eee2011-05-26 01:26:41 +0100592 void AddMemoryAllocationCallback(MemoryAllocationCallback callback,
593 ObjectSpace space,
594 AllocationAction action);
595 void RemoveMemoryAllocationCallback(MemoryAllocationCallback callback);
596 bool MemoryAllocationCallbackRegistered(MemoryAllocationCallback callback);
Steve Blocka7e24c12009-10-30 11:49:00 +0000597
598 // Returns the maximum available bytes of heaps.
Steve Block44f0eee2011-05-26 01:26:41 +0100599 intptr_t Available() { return capacity_ < size_ ? 0 : capacity_ - size_; }
Steve Blocka7e24c12009-10-30 11:49:00 +0000600
601 // Returns allocated spaces in bytes.
Steve Block44f0eee2011-05-26 01:26:41 +0100602 intptr_t Size() { return size_; }
Steve Blocka7e24c12009-10-30 11:49:00 +0000603
Russell Brenner90bac252010-11-18 13:33:46 -0800604 // Returns the maximum available executable bytes of heaps.
Steve Block44f0eee2011-05-26 01:26:41 +0100605 intptr_t AvailableExecutable() {
Russell Brenner90bac252010-11-18 13:33:46 -0800606 if (capacity_executable_ < size_executable_) return 0;
607 return capacity_executable_ - size_executable_;
608 }
609
Steve Block791712a2010-08-27 10:21:07 +0100610 // Returns allocated executable spaces in bytes.
Steve Block44f0eee2011-05-26 01:26:41 +0100611 intptr_t SizeExecutable() { return size_executable_; }
Steve Block791712a2010-08-27 10:21:07 +0100612
Steve Blocka7e24c12009-10-30 11:49:00 +0000613 // Returns maximum available bytes that the old space can have.
Steve Block44f0eee2011-05-26 01:26:41 +0100614 intptr_t MaxAvailable() {
Steve Blocka7e24c12009-10-30 11:49:00 +0000615 return (Available() / Page::kPageSize) * Page::kObjectAreaSize;
616 }
617
618 // Links two pages.
Steve Block44f0eee2011-05-26 01:26:41 +0100619 inline void SetNextPage(Page* prev, Page* next);
Steve Blocka7e24c12009-10-30 11:49:00 +0000620
621 // Returns the next page of a given page.
Steve Block44f0eee2011-05-26 01:26:41 +0100622 inline Page* GetNextPage(Page* p);
Steve Blocka7e24c12009-10-30 11:49:00 +0000623
624 // Checks whether a page belongs to a space.
Steve Block44f0eee2011-05-26 01:26:41 +0100625 inline bool IsPageInSpace(Page* p, PagedSpace* space);
Steve Blocka7e24c12009-10-30 11:49:00 +0000626
627 // Returns the space that owns the given page.
Steve Block44f0eee2011-05-26 01:26:41 +0100628 inline PagedSpace* PageOwner(Page* page);
Steve Blocka7e24c12009-10-30 11:49:00 +0000629
630 // Finds the first/last page in the same chunk as a given page.
Steve Block44f0eee2011-05-26 01:26:41 +0100631 Page* FindFirstPageInSameChunk(Page* p);
632 Page* FindLastPageInSameChunk(Page* p);
Steve Blocka7e24c12009-10-30 11:49:00 +0000633
Steve Block6ded16b2010-05-10 14:33:55 +0100634 // Relinks list of pages owned by space to make it chunk-ordered.
635 // Returns new first and last pages of space.
636 // Also returns last page in relinked list which has WasInUsedBeforeMC
637 // flag set.
Steve Block44f0eee2011-05-26 01:26:41 +0100638 void RelinkPageListInChunkOrder(PagedSpace* space,
639 Page** first_page,
640 Page** last_page,
641 Page** last_page_in_use);
Steve Block6ded16b2010-05-10 14:33:55 +0100642
Steve Blocka7e24c12009-10-30 11:49:00 +0000643#ifdef ENABLE_HEAP_PROTECTION
644 // Protect/unprotect a block of memory by marking it read-only/writable.
Steve Block44f0eee2011-05-26 01:26:41 +0100645 inline void Protect(Address start, size_t size);
646 inline void Unprotect(Address start, size_t size,
647 Executability executable);
Steve Blocka7e24c12009-10-30 11:49:00 +0000648
649 // Protect/unprotect a chunk given a page in the chunk.
Steve Block44f0eee2011-05-26 01:26:41 +0100650 inline void ProtectChunkFromPage(Page* page);
651 inline void UnprotectChunkFromPage(Page* page);
Steve Blocka7e24c12009-10-30 11:49:00 +0000652#endif
653
654#ifdef DEBUG
655 // Reports statistic info of the space.
Steve Block44f0eee2011-05-26 01:26:41 +0100656 void ReportStatistics();
Steve Blocka7e24c12009-10-30 11:49:00 +0000657#endif
658
659 // Due to encoding limitation, we can only have 8K chunks.
Leon Clarkee46be812010-01-19 14:06:41 +0000660 static const int kMaxNofChunks = 1 << kPageSizeBits;
Steve Blocka7e24c12009-10-30 11:49:00 +0000661 // If a chunk has at least 16 pages, the maximum heap size is about
662 // 8K * 8K * 16 = 1G bytes.
663#ifdef V8_TARGET_ARCH_X64
664 static const int kPagesPerChunk = 32;
Ben Murdochb0fe1622011-05-05 13:52:32 +0100665 // On 64 bit the chunk table consists of 4 levels of 4096-entry tables.
666 static const int kPagesPerChunkLog2 = 5;
667 static const int kChunkTableLevels = 4;
668 static const int kChunkTableBitsPerLevel = 12;
Steve Blocka7e24c12009-10-30 11:49:00 +0000669#else
670 static const int kPagesPerChunk = 16;
Ben Murdochb0fe1622011-05-05 13:52:32 +0100671 // On 32 bit the chunk table consists of 2 levels of 256-entry tables.
672 static const int kPagesPerChunkLog2 = 4;
673 static const int kChunkTableLevels = 2;
674 static const int kChunkTableBitsPerLevel = 8;
Steve Blocka7e24c12009-10-30 11:49:00 +0000675#endif
Steve Blocka7e24c12009-10-30 11:49:00 +0000676
677 private:
Steve Block44f0eee2011-05-26 01:26:41 +0100678 MemoryAllocator();
679
Ben Murdochb0fe1622011-05-05 13:52:32 +0100680 static const int kChunkSize = kPagesPerChunk * Page::kPageSize;
681 static const int kChunkSizeLog2 = kPagesPerChunkLog2 + kPageSizeBits;
Ben Murdochb0fe1622011-05-05 13:52:32 +0100682
Steve Blocka7e24c12009-10-30 11:49:00 +0000683 // Maximum space size in bytes.
Steve Block44f0eee2011-05-26 01:26:41 +0100684 intptr_t capacity_;
Russell Brenner90bac252010-11-18 13:33:46 -0800685 // Maximum subset of capacity_ that can be executable
Steve Block44f0eee2011-05-26 01:26:41 +0100686 intptr_t capacity_executable_;
Ben Murdochb0fe1622011-05-05 13:52:32 +0100687
Steve Blocka7e24c12009-10-30 11:49:00 +0000688 // Allocated space size in bytes.
Steve Block44f0eee2011-05-26 01:26:41 +0100689 intptr_t size_;
690
Steve Block791712a2010-08-27 10:21:07 +0100691 // Allocated executable space size in bytes.
Steve Block44f0eee2011-05-26 01:26:41 +0100692 intptr_t size_executable_;
Steve Blocka7e24c12009-10-30 11:49:00 +0000693
Iain Merrick9ac36c92010-09-13 15:29:50 +0100694 struct MemoryAllocationCallbackRegistration {
695 MemoryAllocationCallbackRegistration(MemoryAllocationCallback callback,
696 ObjectSpace space,
697 AllocationAction action)
698 : callback(callback), space(space), action(action) {
699 }
700 MemoryAllocationCallback callback;
701 ObjectSpace space;
702 AllocationAction action;
703 };
704 // A List of callback that are triggered when memory is allocated or free'd
Steve Block44f0eee2011-05-26 01:26:41 +0100705 List<MemoryAllocationCallbackRegistration>
Iain Merrick9ac36c92010-09-13 15:29:50 +0100706 memory_allocation_callbacks_;
707
Steve Blocka7e24c12009-10-30 11:49:00 +0000708 // The initial chunk of virtual memory.
Steve Block44f0eee2011-05-26 01:26:41 +0100709 VirtualMemory* initial_chunk_;
Steve Blocka7e24c12009-10-30 11:49:00 +0000710
711 // Allocated chunk info: chunk start address, chunk size, and owning space.
712 class ChunkInfo BASE_EMBEDDED {
713 public:
Iain Merrick9ac36c92010-09-13 15:29:50 +0100714 ChunkInfo() : address_(NULL),
715 size_(0),
716 owner_(NULL),
Steve Block44f0eee2011-05-26 01:26:41 +0100717 executable_(NOT_EXECUTABLE),
718 owner_identity_(FIRST_SPACE) {}
Iain Merrick9ac36c92010-09-13 15:29:50 +0100719 inline void init(Address a, size_t s, PagedSpace* o);
Steve Blocka7e24c12009-10-30 11:49:00 +0000720 Address address() { return address_; }
721 size_t size() { return size_; }
722 PagedSpace* owner() { return owner_; }
Iain Merrick9ac36c92010-09-13 15:29:50 +0100723 // We save executability of the owner to allow using it
724 // when collecting stats after the owner has been destroyed.
725 Executability executable() const { return executable_; }
Steve Block44f0eee2011-05-26 01:26:41 +0100726 AllocationSpace owner_identity() const { return owner_identity_; }
Steve Blocka7e24c12009-10-30 11:49:00 +0000727
728 private:
729 Address address_;
730 size_t size_;
731 PagedSpace* owner_;
Iain Merrick9ac36c92010-09-13 15:29:50 +0100732 Executability executable_;
Steve Block44f0eee2011-05-26 01:26:41 +0100733 AllocationSpace owner_identity_;
Steve Blocka7e24c12009-10-30 11:49:00 +0000734 };
735
736 // Chunks_, free_chunk_ids_ and top_ act as a stack of free chunk ids.
Steve Block44f0eee2011-05-26 01:26:41 +0100737 List<ChunkInfo> chunks_;
738 List<int> free_chunk_ids_;
739 int max_nof_chunks_;
740 int top_;
Steve Blocka7e24c12009-10-30 11:49:00 +0000741
742 // Push/pop a free chunk id onto/from the stack.
Steve Block44f0eee2011-05-26 01:26:41 +0100743 void Push(int free_chunk_id);
744 int Pop();
745 bool OutOfChunkIds() { return top_ == 0; }
Steve Blocka7e24c12009-10-30 11:49:00 +0000746
747 // Frees a chunk.
Steve Block44f0eee2011-05-26 01:26:41 +0100748 void DeleteChunk(int chunk_id);
Ben Murdochb0fe1622011-05-05 13:52:32 +0100749
Steve Blocka7e24c12009-10-30 11:49:00 +0000750 // Basic check whether a chunk id is in the valid range.
Steve Block44f0eee2011-05-26 01:26:41 +0100751 inline bool IsValidChunkId(int chunk_id);
Steve Blocka7e24c12009-10-30 11:49:00 +0000752
753 // Checks whether a chunk id identifies an allocated chunk.
Steve Block44f0eee2011-05-26 01:26:41 +0100754 inline bool IsValidChunk(int chunk_id);
Steve Blocka7e24c12009-10-30 11:49:00 +0000755
756 // Returns the chunk id that a page belongs to.
Steve Block44f0eee2011-05-26 01:26:41 +0100757 inline int GetChunkId(Page* p);
Steve Blocka7e24c12009-10-30 11:49:00 +0000758
759 // True if the address lies in the initial chunk.
Steve Block44f0eee2011-05-26 01:26:41 +0100760 inline bool InInitialChunk(Address address);
Steve Blocka7e24c12009-10-30 11:49:00 +0000761
762 // Initializes pages in a chunk. Returns the first page address.
763 // This function and GetChunkId() are provided for the mark-compact
764 // collector to rebuild page headers in the from space, which is
765 // used as a marking stack and its page headers are destroyed.
Steve Block44f0eee2011-05-26 01:26:41 +0100766 Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk,
767 PagedSpace* owner);
Steve Block6ded16b2010-05-10 14:33:55 +0100768
Steve Block44f0eee2011-05-26 01:26:41 +0100769 Page* RelinkPagesInChunk(int chunk_id,
770 Address chunk_start,
771 size_t chunk_size,
772 Page* prev,
773 Page** last_page_in_use);
774
775 friend class Isolate;
776
777 Isolate* isolate_;
778
779 DISALLOW_COPY_AND_ASSIGN(MemoryAllocator);
Steve Blocka7e24c12009-10-30 11:49:00 +0000780};
781
782
783// -----------------------------------------------------------------------------
784// Interface for heap object iterator to be implemented by all object space
785// object iterators.
786//
Leon Clarked91b9f72010-01-27 17:25:45 +0000787// NOTE: The space specific object iterators also implements the own next()
788// method which is used to avoid using virtual functions
Steve Blocka7e24c12009-10-30 11:49:00 +0000789// iterating a specific space.
790
791class ObjectIterator : public Malloced {
792 public:
793 virtual ~ObjectIterator() { }
794
Steve Blocka7e24c12009-10-30 11:49:00 +0000795 virtual HeapObject* next_object() = 0;
796};
797
798
799// -----------------------------------------------------------------------------
800// Heap object iterator in new/old/map spaces.
801//
802// A HeapObjectIterator iterates objects from a given address to the
803// top of a space. The given address must be below the current
804// allocation pointer (space top). There are some caveats.
805//
806// (1) If the space top changes upward during iteration (because of
807// allocating new objects), the iterator does not iterate objects
808// above the original space top. The caller must create a new
809// iterator starting from the old top in order to visit these new
810// objects.
811//
812// (2) If new objects are allocated below the original allocation top
813// (e.g., free-list allocation in paged spaces), the new objects
814// may or may not be iterated depending on their position with
815// respect to the current point of iteration.
816//
817// (3) The space top should not change downward during iteration,
818// otherwise the iterator will return not-necessarily-valid
819// objects.
820
821class HeapObjectIterator: public ObjectIterator {
822 public:
823 // Creates a new object iterator in a given space. If a start
824 // address is not given, the iterator starts from the space bottom.
825 // If the size function is not given, the iterator calls the default
826 // Object::Size().
827 explicit HeapObjectIterator(PagedSpace* space);
828 HeapObjectIterator(PagedSpace* space, HeapObjectCallback size_func);
829 HeapObjectIterator(PagedSpace* space, Address start);
830 HeapObjectIterator(PagedSpace* space,
831 Address start,
832 HeapObjectCallback size_func);
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100833 HeapObjectIterator(Page* page, HeapObjectCallback size_func);
Steve Blocka7e24c12009-10-30 11:49:00 +0000834
Leon Clarked91b9f72010-01-27 17:25:45 +0000835 inline HeapObject* next() {
836 return (cur_addr_ < cur_limit_) ? FromCurrentPage() : FromNextPage();
837 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000838
839 // implementation of ObjectIterator.
Steve Blocka7e24c12009-10-30 11:49:00 +0000840 virtual HeapObject* next_object() { return next(); }
841
842 private:
843 Address cur_addr_; // current iteration point
844 Address end_addr_; // end iteration point
845 Address cur_limit_; // current page limit
846 HeapObjectCallback size_func_; // size function
847 Page* end_page_; // caches the page of the end address
848
Leon Clarked91b9f72010-01-27 17:25:45 +0000849 HeapObject* FromCurrentPage() {
850 ASSERT(cur_addr_ < cur_limit_);
851
852 HeapObject* obj = HeapObject::FromAddress(cur_addr_);
853 int obj_size = (size_func_ == NULL) ? obj->Size() : size_func_(obj);
854 ASSERT_OBJECT_SIZE(obj_size);
855
856 cur_addr_ += obj_size;
857 ASSERT(cur_addr_ <= cur_limit_);
858
859 return obj;
860 }
861
862 // Slow path of next, goes into the next page.
863 HeapObject* FromNextPage();
Steve Blocka7e24c12009-10-30 11:49:00 +0000864
865 // Initializes fields.
866 void Initialize(Address start, Address end, HeapObjectCallback size_func);
867
868#ifdef DEBUG
869 // Verifies whether fields have valid values.
870 void Verify();
871#endif
872};
873
874
875// -----------------------------------------------------------------------------
876// A PageIterator iterates the pages in a paged space.
877//
878// The PageIterator class provides three modes for iterating pages in a space:
879// PAGES_IN_USE iterates pages containing allocated objects.
880// PAGES_USED_BY_MC iterates pages that hold relocated objects during a
881// mark-compact collection.
882// ALL_PAGES iterates all pages in the space.
883//
884// There are some caveats.
885//
886// (1) If the space expands during iteration, new pages will not be
887// returned by the iterator in any mode.
888//
889// (2) If new objects are allocated during iteration, they will appear
890// in pages returned by the iterator. Allocation may cause the
891// allocation pointer or MC allocation pointer in the last page to
892// change between constructing the iterator and iterating the last
893// page.
894//
895// (3) The space should not shrink during iteration, otherwise the
896// iterator will return deallocated pages.
897
898class PageIterator BASE_EMBEDDED {
899 public:
900 enum Mode {
901 PAGES_IN_USE,
902 PAGES_USED_BY_MC,
903 ALL_PAGES
904 };
905
906 PageIterator(PagedSpace* space, Mode mode);
907
908 inline bool has_next();
909 inline Page* next();
910
911 private:
912 PagedSpace* space_;
913 Page* prev_page_; // Previous page returned.
914 Page* stop_page_; // Page to stop at (last page returned by the iterator).
915};
916
917
918// -----------------------------------------------------------------------------
919// A space has a list of pages. The next page can be accessed via
920// Page::next_page() call. The next page of the last page is an
921// invalid page pointer. A space can expand and shrink dynamically.
922
923// An abstraction of allocation and relocation pointers in a page-structured
924// space.
925class AllocationInfo {
926 public:
927 Address top; // current allocation top
928 Address limit; // current allocation limit
929
930#ifdef DEBUG
931 bool VerifyPagedAllocation() {
932 return (Page::FromAllocationTop(top) == Page::FromAllocationTop(limit))
933 && (top <= limit);
934 }
935#endif
936};
937
938
939// An abstraction of the accounting statistics of a page-structured space.
940// The 'capacity' of a space is the number of object-area bytes (ie, not
941// including page bookkeeping structures) currently in the space. The 'size'
942// of a space is the number of allocated bytes, the 'waste' in the space is
943// the number of bytes that are not allocated and not available to
944// allocation without reorganizing the space via a GC (eg, small blocks due
945// to internal fragmentation, top of page areas in map space), and the bytes
946// 'available' is the number of unallocated bytes that are not waste. The
947// capacity is the sum of size, waste, and available.
948//
949// The stats are only set by functions that ensure they stay balanced. These
950// functions increase or decrease one of the non-capacity stats in
951// conjunction with capacity, or else they always balance increases and
952// decreases to the non-capacity stats.
953class AllocationStats BASE_EMBEDDED {
954 public:
955 AllocationStats() { Clear(); }
956
957 // Zero out all the allocation statistics (ie, no capacity).
958 void Clear() {
959 capacity_ = 0;
960 available_ = 0;
961 size_ = 0;
962 waste_ = 0;
963 }
964
965 // Reset the allocation statistics (ie, available = capacity with no
966 // wasted or allocated bytes).
967 void Reset() {
968 available_ = capacity_;
969 size_ = 0;
970 waste_ = 0;
971 }
972
973 // Accessors for the allocation statistics.
Ben Murdochf87a2032010-10-22 12:50:53 +0100974 intptr_t Capacity() { return capacity_; }
975 intptr_t Available() { return available_; }
976 intptr_t Size() { return size_; }
977 intptr_t Waste() { return waste_; }
Steve Blocka7e24c12009-10-30 11:49:00 +0000978
979 // Grow the space by adding available bytes.
980 void ExpandSpace(int size_in_bytes) {
981 capacity_ += size_in_bytes;
982 available_ += size_in_bytes;
983 }
984
985 // Shrink the space by removing available bytes.
986 void ShrinkSpace(int size_in_bytes) {
987 capacity_ -= size_in_bytes;
988 available_ -= size_in_bytes;
989 }
990
991 // Allocate from available bytes (available -> size).
Ben Murdochf87a2032010-10-22 12:50:53 +0100992 void AllocateBytes(intptr_t size_in_bytes) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000993 available_ -= size_in_bytes;
994 size_ += size_in_bytes;
995 }
996
997 // Free allocated bytes, making them available (size -> available).
Ben Murdochf87a2032010-10-22 12:50:53 +0100998 void DeallocateBytes(intptr_t size_in_bytes) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000999 size_ -= size_in_bytes;
1000 available_ += size_in_bytes;
1001 }
1002
1003 // Waste free bytes (available -> waste).
1004 void WasteBytes(int size_in_bytes) {
1005 available_ -= size_in_bytes;
1006 waste_ += size_in_bytes;
1007 }
1008
1009 // Consider the wasted bytes to be allocated, as they contain filler
1010 // objects (waste -> size).
Ben Murdochf87a2032010-10-22 12:50:53 +01001011 void FillWastedBytes(intptr_t size_in_bytes) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001012 waste_ -= size_in_bytes;
1013 size_ += size_in_bytes;
1014 }
1015
1016 private:
Ben Murdochf87a2032010-10-22 12:50:53 +01001017 intptr_t capacity_;
1018 intptr_t available_;
1019 intptr_t size_;
1020 intptr_t waste_;
Steve Blocka7e24c12009-10-30 11:49:00 +00001021};
1022
1023
1024class PagedSpace : public Space {
1025 public:
1026 // Creates a space with a maximum capacity, and an id.
Steve Block44f0eee2011-05-26 01:26:41 +01001027 PagedSpace(Heap* heap,
1028 intptr_t max_capacity,
Ben Murdochf87a2032010-10-22 12:50:53 +01001029 AllocationSpace id,
1030 Executability executable);
Steve Blocka7e24c12009-10-30 11:49:00 +00001031
1032 virtual ~PagedSpace() {}
1033
1034 // Set up the space using the given address range of virtual memory (from
1035 // the memory allocator's initial chunk) if possible. If the block of
1036 // addresses is not big enough to contain a single page-aligned page, a
1037 // fresh chunk will be allocated.
1038 bool Setup(Address start, size_t size);
1039
1040 // Returns true if the space has been successfully set up and not
1041 // subsequently torn down.
1042 bool HasBeenSetup();
1043
1044 // Cleans up the space, frees all pages in this space except those belonging
1045 // to the initial chunk, uncommits addresses in the initial chunk.
1046 void TearDown();
1047
1048 // Checks whether an object/address is in this space.
1049 inline bool Contains(Address a);
1050 bool Contains(HeapObject* o) { return Contains(o->address()); }
Ben Murdochb0fe1622011-05-05 13:52:32 +01001051 // Never crashes even if a is not a valid pointer.
1052 inline bool SafeContains(Address a);
Steve Blocka7e24c12009-10-30 11:49:00 +00001053
1054 // Given an address occupied by a live object, return that object if it is
1055 // in this space, or Failure::Exception() if it is not. The implementation
1056 // iterates over objects in the page containing the address, the cost is
1057 // linear in the number of objects in the page. It may be slow.
John Reck59135872010-11-02 12:39:01 -07001058 MUST_USE_RESULT MaybeObject* FindObject(Address addr);
Steve Blocka7e24c12009-10-30 11:49:00 +00001059
1060 // Checks whether page is currently in use by this space.
1061 bool IsUsed(Page* page);
1062
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001063 void MarkAllPagesClean();
Steve Blocka7e24c12009-10-30 11:49:00 +00001064
1065 // Prepares for a mark-compact GC.
Steve Block6ded16b2010-05-10 14:33:55 +01001066 virtual void PrepareForMarkCompact(bool will_compact);
Steve Blocka7e24c12009-10-30 11:49:00 +00001067
Steve Block6ded16b2010-05-10 14:33:55 +01001068 // The top of allocation in a page in this space. Undefined if page is unused.
1069 Address PageAllocationTop(Page* page) {
1070 return page == TopPageOf(allocation_info_) ? top()
1071 : PageAllocationLimit(page);
1072 }
1073
1074 // The limit of allocation for a page in this space.
1075 virtual Address PageAllocationLimit(Page* page) = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +00001076
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001077 void FlushTopPageWatermark() {
1078 AllocationTopPage()->SetCachedAllocationWatermark(top());
1079 AllocationTopPage()->InvalidateWatermark(true);
1080 }
1081
Steve Blocka7e24c12009-10-30 11:49:00 +00001082 // Current capacity without growing (Size() + Available() + Waste()).
Ben Murdochf87a2032010-10-22 12:50:53 +01001083 intptr_t Capacity() { return accounting_stats_.Capacity(); }
Steve Blocka7e24c12009-10-30 11:49:00 +00001084
Steve Block3ce2e202009-11-05 08:53:23 +00001085 // Total amount of memory committed for this space. For paged
1086 // spaces this equals the capacity.
Ben Murdochf87a2032010-10-22 12:50:53 +01001087 intptr_t CommittedMemory() { return Capacity(); }
Steve Block3ce2e202009-11-05 08:53:23 +00001088
Steve Blocka7e24c12009-10-30 11:49:00 +00001089 // Available bytes without growing.
Ben Murdochf87a2032010-10-22 12:50:53 +01001090 intptr_t Available() { return accounting_stats_.Available(); }
Steve Blocka7e24c12009-10-30 11:49:00 +00001091
1092 // Allocated bytes in this space.
Ben Murdochf87a2032010-10-22 12:50:53 +01001093 virtual intptr_t Size() { return accounting_stats_.Size(); }
Steve Blocka7e24c12009-10-30 11:49:00 +00001094
1095 // Wasted bytes due to fragmentation and not recoverable until the
1096 // next GC of this space.
Ben Murdochf87a2032010-10-22 12:50:53 +01001097 intptr_t Waste() { return accounting_stats_.Waste(); }
Steve Blocka7e24c12009-10-30 11:49:00 +00001098
1099 // Returns the address of the first object in this space.
1100 Address bottom() { return first_page_->ObjectAreaStart(); }
1101
1102 // Returns the allocation pointer in this space.
1103 Address top() { return allocation_info_.top; }
1104
1105 // Allocate the requested number of bytes in the space if possible, return a
1106 // failure object if not.
John Reck59135872010-11-02 12:39:01 -07001107 MUST_USE_RESULT inline MaybeObject* AllocateRaw(int size_in_bytes);
Steve Blocka7e24c12009-10-30 11:49:00 +00001108
1109 // Allocate the requested number of bytes for relocation during mark-compact
1110 // collection.
John Reck59135872010-11-02 12:39:01 -07001111 MUST_USE_RESULT inline MaybeObject* MCAllocateRaw(int size_in_bytes);
Steve Blocka7e24c12009-10-30 11:49:00 +00001112
Leon Clarkee46be812010-01-19 14:06:41 +00001113 virtual bool ReserveSpace(int bytes);
1114
1115 // Used by ReserveSpace.
1116 virtual void PutRestOfCurrentPageOnFreeList(Page* current_page) = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +00001117
Steve Block6ded16b2010-05-10 14:33:55 +01001118 // Free all pages in range from prev (exclusive) to last (inclusive).
1119 // Freed pages are moved to the end of page list.
1120 void FreePages(Page* prev, Page* last);
1121
Kristian Monsen80d68ea2010-09-08 11:05:35 +01001122 // Deallocates a block.
1123 virtual void DeallocateBlock(Address start,
1124 int size_in_bytes,
1125 bool add_to_freelist) = 0;
1126
Steve Block6ded16b2010-05-10 14:33:55 +01001127 // Set space allocation info.
1128 void SetTop(Address top) {
1129 allocation_info_.top = top;
1130 allocation_info_.limit = PageAllocationLimit(Page::FromAllocationTop(top));
1131 }
1132
Steve Blocka7e24c12009-10-30 11:49:00 +00001133 // ---------------------------------------------------------------------------
1134 // Mark-compact collection support functions
1135
1136 // Set the relocation point to the beginning of the space.
1137 void MCResetRelocationInfo();
1138
1139 // Writes relocation info to the top page.
1140 void MCWriteRelocationInfoToPage() {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001141 TopPageOf(mc_forwarding_info_)->
1142 SetAllocationWatermark(mc_forwarding_info_.top);
Steve Blocka7e24c12009-10-30 11:49:00 +00001143 }
1144
1145 // Computes the offset of a given address in this space to the beginning
1146 // of the space.
1147 int MCSpaceOffsetForAddress(Address addr);
1148
1149 // Updates the allocation pointer to the relocation top after a mark-compact
1150 // collection.
1151 virtual void MCCommitRelocationInfo() = 0;
1152
1153 // Releases half of unused pages.
1154 void Shrink();
1155
1156 // Ensures that the capacity is at least 'capacity'. Returns false on failure.
1157 bool EnsureCapacity(int capacity);
1158
1159#ifdef ENABLE_HEAP_PROTECTION
1160 // Protect/unprotect the space by marking it read-only/writable.
1161 void Protect();
1162 void Unprotect();
1163#endif
1164
1165#ifdef DEBUG
1166 // Print meta info and objects in this space.
1167 virtual void Print();
1168
1169 // Verify integrity of this space.
1170 virtual void Verify(ObjectVisitor* visitor);
1171
1172 // Overridden by subclasses to verify space-specific object
1173 // properties (e.g., only maps or free-list nodes are in map space).
1174 virtual void VerifyObject(HeapObject* obj) {}
1175
1176 // Report code object related statistics
1177 void CollectCodeStatistics();
1178 static void ReportCodeStatistics();
1179 static void ResetCodeStatistics();
1180#endif
1181
Steve Block6ded16b2010-05-10 14:33:55 +01001182 // Returns the page of the allocation pointer.
1183 Page* AllocationTopPage() { return TopPageOf(allocation_info_); }
1184
Kristian Monsen80d68ea2010-09-08 11:05:35 +01001185 void RelinkPageListInChunkOrder(bool deallocate_blocks);
1186
Steve Blocka7e24c12009-10-30 11:49:00 +00001187 protected:
1188 // Maximum capacity of this space.
Ben Murdochf87a2032010-10-22 12:50:53 +01001189 intptr_t max_capacity_;
Steve Blocka7e24c12009-10-30 11:49:00 +00001190
1191 // Accounting information for this space.
1192 AllocationStats accounting_stats_;
1193
1194 // The first page in this space.
1195 Page* first_page_;
1196
1197 // The last page in this space. Initially set in Setup, updated in
1198 // Expand and Shrink.
1199 Page* last_page_;
1200
Steve Block6ded16b2010-05-10 14:33:55 +01001201 // True if pages owned by this space are linked in chunk-order.
1202 // See comment for class MemoryAllocator for definition of chunk-order.
1203 bool page_list_is_chunk_ordered_;
1204
Steve Blocka7e24c12009-10-30 11:49:00 +00001205 // Normal allocation information.
1206 AllocationInfo allocation_info_;
1207
1208 // Relocation information during mark-compact collections.
1209 AllocationInfo mc_forwarding_info_;
1210
1211 // Bytes of each page that cannot be allocated. Possibly non-zero
1212 // for pages in spaces with only fixed-size objects. Always zero
1213 // for pages in spaces with variable sized objects (those pages are
1214 // padded with free-list nodes).
1215 int page_extra_;
1216
1217 // Sets allocation pointer to a page bottom.
1218 static void SetAllocationInfo(AllocationInfo* alloc_info, Page* p);
1219
1220 // Returns the top page specified by an allocation info structure.
1221 static Page* TopPageOf(AllocationInfo alloc_info) {
1222 return Page::FromAllocationTop(alloc_info.limit);
1223 }
1224
Leon Clarked91b9f72010-01-27 17:25:45 +00001225 int CountPagesToTop() {
1226 Page* p = Page::FromAllocationTop(allocation_info_.top);
1227 PageIterator it(this, PageIterator::ALL_PAGES);
1228 int counter = 1;
1229 while (it.has_next()) {
1230 if (it.next() == p) return counter;
1231 counter++;
1232 }
1233 UNREACHABLE();
1234 return -1;
1235 }
1236
Steve Blocka7e24c12009-10-30 11:49:00 +00001237 // Expands the space by allocating a fixed number of pages. Returns false if
1238 // it cannot allocate requested number of pages from OS. Newly allocated
1239 // pages are append to the last_page;
1240 bool Expand(Page* last_page);
1241
1242 // Generic fast case allocation function that tries linear allocation in
1243 // the top page of 'alloc_info'. Returns NULL on failure.
1244 inline HeapObject* AllocateLinearly(AllocationInfo* alloc_info,
1245 int size_in_bytes);
1246
1247 // During normal allocation or deserialization, roll to the next page in
1248 // the space (there is assumed to be one) and allocate there. This
1249 // function is space-dependent.
1250 virtual HeapObject* AllocateInNextPage(Page* current_page,
1251 int size_in_bytes) = 0;
1252
1253 // Slow path of AllocateRaw. This function is space-dependent.
John Reck59135872010-11-02 12:39:01 -07001254 MUST_USE_RESULT virtual HeapObject* SlowAllocateRaw(int size_in_bytes) = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +00001255
1256 // Slow path of MCAllocateRaw.
John Reck59135872010-11-02 12:39:01 -07001257 MUST_USE_RESULT HeapObject* SlowMCAllocateRaw(int size_in_bytes);
Steve Blocka7e24c12009-10-30 11:49:00 +00001258
1259#ifdef DEBUG
Leon Clarkee46be812010-01-19 14:06:41 +00001260 // Returns the number of total pages in this space.
1261 int CountTotalPages();
Steve Blocka7e24c12009-10-30 11:49:00 +00001262#endif
1263 private:
Steve Blocka7e24c12009-10-30 11:49:00 +00001264
1265 // Returns a pointer to the page of the relocation pointer.
1266 Page* MCRelocationTopPage() { return TopPageOf(mc_forwarding_info_); }
1267
Steve Blocka7e24c12009-10-30 11:49:00 +00001268 friend class PageIterator;
1269};
1270
1271
1272#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1273class NumberAndSizeInfo BASE_EMBEDDED {
1274 public:
1275 NumberAndSizeInfo() : number_(0), bytes_(0) {}
1276
1277 int number() const { return number_; }
1278 void increment_number(int num) { number_ += num; }
1279
1280 int bytes() const { return bytes_; }
1281 void increment_bytes(int size) { bytes_ += size; }
1282
1283 void clear() {
1284 number_ = 0;
1285 bytes_ = 0;
1286 }
1287
1288 private:
1289 int number_;
1290 int bytes_;
1291};
1292
1293
1294// HistogramInfo class for recording a single "bar" of a histogram. This
1295// class is used for collecting statistics to print to stdout (when compiled
1296// with DEBUG) or to the log file (when compiled with
1297// ENABLE_LOGGING_AND_PROFILING).
1298class HistogramInfo: public NumberAndSizeInfo {
1299 public:
1300 HistogramInfo() : NumberAndSizeInfo() {}
1301
1302 const char* name() { return name_; }
1303 void set_name(const char* name) { name_ = name; }
1304
1305 private:
1306 const char* name_;
1307};
1308#endif
1309
1310
1311// -----------------------------------------------------------------------------
1312// SemiSpace in young generation
1313//
1314// A semispace is a contiguous chunk of memory. The mark-compact collector
1315// uses the memory in the from space as a marking stack when tracing live
1316// objects.
1317
1318class SemiSpace : public Space {
1319 public:
1320 // Constructor.
Steve Block44f0eee2011-05-26 01:26:41 +01001321 explicit SemiSpace(Heap* heap) : Space(heap, NEW_SPACE, NOT_EXECUTABLE) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001322 start_ = NULL;
1323 age_mark_ = NULL;
1324 }
1325
1326 // Sets up the semispace using the given chunk.
1327 bool Setup(Address start, int initial_capacity, int maximum_capacity);
1328
1329 // Tear down the space. Heap memory was not allocated by the space, so it
1330 // is not deallocated here.
1331 void TearDown();
1332
1333 // True if the space has been set up but not torn down.
1334 bool HasBeenSetup() { return start_ != NULL; }
1335
1336 // Grow the size of the semispace by committing extra virtual memory.
1337 // Assumes that the caller has checked that the semispace has not reached
1338 // its maximum capacity (and thus there is space available in the reserved
1339 // address range to grow).
1340 bool Grow();
1341
1342 // Grow the semispace to the new capacity. The new capacity
1343 // requested must be larger than the current capacity.
1344 bool GrowTo(int new_capacity);
1345
1346 // Shrinks the semispace to the new capacity. The new capacity
1347 // requested must be more than the amount of used memory in the
1348 // semispace and less than the current capacity.
1349 bool ShrinkTo(int new_capacity);
1350
1351 // Returns the start address of the space.
1352 Address low() { return start_; }
1353 // Returns one past the end address of the space.
1354 Address high() { return low() + capacity_; }
1355
1356 // Age mark accessors.
1357 Address age_mark() { return age_mark_; }
1358 void set_age_mark(Address mark) { age_mark_ = mark; }
1359
1360 // True if the address is in the address range of this semispace (not
1361 // necessarily below the allocation pointer).
1362 bool Contains(Address a) {
1363 return (reinterpret_cast<uintptr_t>(a) & address_mask_)
1364 == reinterpret_cast<uintptr_t>(start_);
1365 }
1366
1367 // True if the object is a heap object in the address range of this
1368 // semispace (not necessarily below the allocation pointer).
1369 bool Contains(Object* o) {
1370 return (reinterpret_cast<uintptr_t>(o) & object_mask_) == object_expected_;
1371 }
1372
1373 // The offset of an address from the beginning of the space.
Steve Blockd0582a62009-12-15 09:54:21 +00001374 int SpaceOffsetForAddress(Address addr) {
1375 return static_cast<int>(addr - low());
1376 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001377
Leon Clarkee46be812010-01-19 14:06:41 +00001378 // If we don't have these here then SemiSpace will be abstract. However
1379 // they should never be called.
Ben Murdochf87a2032010-10-22 12:50:53 +01001380 virtual intptr_t Size() {
Steve Blocka7e24c12009-10-30 11:49:00 +00001381 UNREACHABLE();
1382 return 0;
1383 }
1384
Leon Clarkee46be812010-01-19 14:06:41 +00001385 virtual bool ReserveSpace(int bytes) {
1386 UNREACHABLE();
1387 return false;
1388 }
1389
Steve Blocka7e24c12009-10-30 11:49:00 +00001390 bool is_committed() { return committed_; }
1391 bool Commit();
1392 bool Uncommit();
1393
Steve Block6ded16b2010-05-10 14:33:55 +01001394#ifdef ENABLE_HEAP_PROTECTION
1395 // Protect/unprotect the space by marking it read-only/writable.
1396 virtual void Protect() {}
1397 virtual void Unprotect() {}
1398#endif
1399
Steve Blocka7e24c12009-10-30 11:49:00 +00001400#ifdef DEBUG
1401 virtual void Print();
1402 virtual void Verify();
1403#endif
1404
1405 // Returns the current capacity of the semi space.
1406 int Capacity() { return capacity_; }
1407
1408 // Returns the maximum capacity of the semi space.
1409 int MaximumCapacity() { return maximum_capacity_; }
1410
1411 // Returns the initial capacity of the semi space.
1412 int InitialCapacity() { return initial_capacity_; }
1413
1414 private:
1415 // The current and maximum capacity of the space.
1416 int capacity_;
1417 int maximum_capacity_;
1418 int initial_capacity_;
1419
1420 // The start address of the space.
1421 Address start_;
1422 // Used to govern object promotion during mark-compact collection.
1423 Address age_mark_;
1424
1425 // Masks and comparison values to test for containment in this semispace.
1426 uintptr_t address_mask_;
1427 uintptr_t object_mask_;
1428 uintptr_t object_expected_;
1429
1430 bool committed_;
1431
1432 public:
1433 TRACK_MEMORY("SemiSpace")
1434};
1435
1436
1437// A SemiSpaceIterator is an ObjectIterator that iterates over the active
1438// semispace of the heap's new space. It iterates over the objects in the
1439// semispace from a given start address (defaulting to the bottom of the
1440// semispace) to the top of the semispace. New objects allocated after the
1441// iterator is created are not iterated.
1442class SemiSpaceIterator : public ObjectIterator {
1443 public:
1444 // Create an iterator over the objects in the given space. If no start
1445 // address is given, the iterator starts from the bottom of the space. If
1446 // no size function is given, the iterator calls Object::Size().
1447 explicit SemiSpaceIterator(NewSpace* space);
1448 SemiSpaceIterator(NewSpace* space, HeapObjectCallback size_func);
1449 SemiSpaceIterator(NewSpace* space, Address start);
1450
Steve Blocka7e24c12009-10-30 11:49:00 +00001451 HeapObject* next() {
Leon Clarked91b9f72010-01-27 17:25:45 +00001452 if (current_ == limit_) return NULL;
Steve Blocka7e24c12009-10-30 11:49:00 +00001453
1454 HeapObject* object = HeapObject::FromAddress(current_);
1455 int size = (size_func_ == NULL) ? object->Size() : size_func_(object);
1456
1457 current_ += size;
1458 return object;
1459 }
1460
1461 // Implementation of the ObjectIterator functions.
Steve Blocka7e24c12009-10-30 11:49:00 +00001462 virtual HeapObject* next_object() { return next(); }
1463
1464 private:
1465 void Initialize(NewSpace* space, Address start, Address end,
1466 HeapObjectCallback size_func);
1467
1468 // The semispace.
1469 SemiSpace* space_;
1470 // The current iteration point.
1471 Address current_;
1472 // The end of iteration.
1473 Address limit_;
1474 // The callback function.
1475 HeapObjectCallback size_func_;
1476};
1477
1478
1479// -----------------------------------------------------------------------------
1480// The young generation space.
1481//
1482// The new space consists of a contiguous pair of semispaces. It simply
1483// forwards most functions to the appropriate semispace.
1484
1485class NewSpace : public Space {
1486 public:
1487 // Constructor.
Steve Block44f0eee2011-05-26 01:26:41 +01001488 explicit NewSpace(Heap* heap)
1489 : Space(heap, NEW_SPACE, NOT_EXECUTABLE),
1490 to_space_(heap),
1491 from_space_(heap) {}
Steve Blocka7e24c12009-10-30 11:49:00 +00001492
1493 // Sets up the new space using the given chunk.
1494 bool Setup(Address start, int size);
1495
1496 // Tears down the space. Heap memory was not allocated by the space, so it
1497 // is not deallocated here.
1498 void TearDown();
1499
1500 // True if the space has been set up but not torn down.
1501 bool HasBeenSetup() {
1502 return to_space_.HasBeenSetup() && from_space_.HasBeenSetup();
1503 }
1504
1505 // Flip the pair of spaces.
1506 void Flip();
1507
1508 // Grow the capacity of the semispaces. Assumes that they are not at
1509 // their maximum capacity.
1510 void Grow();
1511
1512 // Shrink the capacity of the semispaces.
1513 void Shrink();
1514
1515 // True if the address or object lies in the address range of either
1516 // semispace (not necessarily below the allocation pointer).
1517 bool Contains(Address a) {
1518 return (reinterpret_cast<uintptr_t>(a) & address_mask_)
1519 == reinterpret_cast<uintptr_t>(start_);
1520 }
1521 bool Contains(Object* o) {
1522 return (reinterpret_cast<uintptr_t>(o) & object_mask_) == object_expected_;
1523 }
1524
1525 // Return the allocated bytes in the active semispace.
Ben Murdochf87a2032010-10-22 12:50:53 +01001526 virtual intptr_t Size() { return static_cast<int>(top() - bottom()); }
1527 // The same, but returning an int. We have to have the one that returns
1528 // intptr_t because it is inherited, but if we know we are dealing with the
1529 // new space, which can't get as big as the other spaces then this is useful:
1530 int SizeAsInt() { return static_cast<int>(Size()); }
Steve Block3ce2e202009-11-05 08:53:23 +00001531
Steve Blocka7e24c12009-10-30 11:49:00 +00001532 // Return the current capacity of a semispace.
Ben Murdochf87a2032010-10-22 12:50:53 +01001533 intptr_t Capacity() {
Steve Blocka7e24c12009-10-30 11:49:00 +00001534 ASSERT(to_space_.Capacity() == from_space_.Capacity());
1535 return to_space_.Capacity();
1536 }
Steve Block3ce2e202009-11-05 08:53:23 +00001537
1538 // Return the total amount of memory committed for new space.
Ben Murdochf87a2032010-10-22 12:50:53 +01001539 intptr_t CommittedMemory() {
Steve Block3ce2e202009-11-05 08:53:23 +00001540 if (from_space_.is_committed()) return 2 * Capacity();
1541 return Capacity();
1542 }
1543
Steve Blocka7e24c12009-10-30 11:49:00 +00001544 // Return the available bytes without growing in the active semispace.
Ben Murdochf87a2032010-10-22 12:50:53 +01001545 intptr_t Available() { return Capacity() - Size(); }
Steve Blocka7e24c12009-10-30 11:49:00 +00001546
1547 // Return the maximum capacity of a semispace.
1548 int MaximumCapacity() {
1549 ASSERT(to_space_.MaximumCapacity() == from_space_.MaximumCapacity());
1550 return to_space_.MaximumCapacity();
1551 }
1552
1553 // Returns the initial capacity of a semispace.
1554 int InitialCapacity() {
1555 ASSERT(to_space_.InitialCapacity() == from_space_.InitialCapacity());
1556 return to_space_.InitialCapacity();
1557 }
1558
1559 // Return the address of the allocation pointer in the active semispace.
1560 Address top() { return allocation_info_.top; }
1561 // Return the address of the first object in the active semispace.
1562 Address bottom() { return to_space_.low(); }
1563
1564 // Get the age mark of the inactive semispace.
1565 Address age_mark() { return from_space_.age_mark(); }
1566 // Set the age mark in the active semispace.
1567 void set_age_mark(Address mark) { to_space_.set_age_mark(mark); }
1568
1569 // The start address of the space and a bit mask. Anding an address in the
1570 // new space with the mask will result in the start address.
1571 Address start() { return start_; }
1572 uintptr_t mask() { return address_mask_; }
1573
1574 // The allocation top and limit addresses.
1575 Address* allocation_top_address() { return &allocation_info_.top; }
1576 Address* allocation_limit_address() { return &allocation_info_.limit; }
1577
John Reck59135872010-11-02 12:39:01 -07001578 MUST_USE_RESULT MaybeObject* AllocateRaw(int size_in_bytes) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001579 return AllocateRawInternal(size_in_bytes, &allocation_info_);
1580 }
1581
1582 // Allocate the requested number of bytes for relocation during mark-compact
1583 // collection.
John Reck59135872010-11-02 12:39:01 -07001584 MUST_USE_RESULT MaybeObject* MCAllocateRaw(int size_in_bytes) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001585 return AllocateRawInternal(size_in_bytes, &mc_forwarding_info_);
1586 }
1587
1588 // Reset the allocation pointer to the beginning of the active semispace.
1589 void ResetAllocationInfo();
1590 // Reset the reloction pointer to the bottom of the inactive semispace in
1591 // preparation for mark-compact collection.
1592 void MCResetRelocationInfo();
1593 // Update the allocation pointer in the active semispace after a
1594 // mark-compact collection.
1595 void MCCommitRelocationInfo();
1596
1597 // Get the extent of the inactive semispace (for use as a marking stack).
1598 Address FromSpaceLow() { return from_space_.low(); }
1599 Address FromSpaceHigh() { return from_space_.high(); }
1600
1601 // Get the extent of the active semispace (to sweep newly copied objects
1602 // during a scavenge collection).
1603 Address ToSpaceLow() { return to_space_.low(); }
1604 Address ToSpaceHigh() { return to_space_.high(); }
1605
1606 // Offsets from the beginning of the semispaces.
1607 int ToSpaceOffsetForAddress(Address a) {
1608 return to_space_.SpaceOffsetForAddress(a);
1609 }
1610 int FromSpaceOffsetForAddress(Address a) {
1611 return from_space_.SpaceOffsetForAddress(a);
1612 }
1613
1614 // True if the object is a heap object in the address range of the
1615 // respective semispace (not necessarily below the allocation pointer of the
1616 // semispace).
1617 bool ToSpaceContains(Object* o) { return to_space_.Contains(o); }
1618 bool FromSpaceContains(Object* o) { return from_space_.Contains(o); }
1619
1620 bool ToSpaceContains(Address a) { return to_space_.Contains(a); }
1621 bool FromSpaceContains(Address a) { return from_space_.Contains(a); }
1622
Leon Clarkee46be812010-01-19 14:06:41 +00001623 virtual bool ReserveSpace(int bytes);
1624
Ben Murdochb0fe1622011-05-05 13:52:32 +01001625 // Resizes a sequential string which must be the most recent thing that was
1626 // allocated in new space.
1627 template <typename StringType>
1628 inline void ShrinkStringAtAllocationBoundary(String* string, int len);
1629
Steve Blocka7e24c12009-10-30 11:49:00 +00001630#ifdef ENABLE_HEAP_PROTECTION
1631 // Protect/unprotect the space by marking it read-only/writable.
1632 virtual void Protect();
1633 virtual void Unprotect();
1634#endif
1635
1636#ifdef DEBUG
1637 // Verify the active semispace.
1638 virtual void Verify();
1639 // Print the active semispace.
1640 virtual void Print() { to_space_.Print(); }
1641#endif
1642
1643#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1644 // Iterates the active semispace to collect statistics.
1645 void CollectStatistics();
1646 // Reports previously collected statistics of the active semispace.
1647 void ReportStatistics();
1648 // Clears previously collected statistics.
1649 void ClearHistograms();
1650
1651 // Record the allocation or promotion of a heap object. Note that we don't
1652 // record every single allocation, but only those that happen in the
1653 // to space during a scavenge GC.
1654 void RecordAllocation(HeapObject* obj);
1655 void RecordPromotion(HeapObject* obj);
1656#endif
1657
1658 // Return whether the operation succeded.
1659 bool CommitFromSpaceIfNeeded() {
1660 if (from_space_.is_committed()) return true;
1661 return from_space_.Commit();
1662 }
1663
1664 bool UncommitFromSpace() {
1665 if (!from_space_.is_committed()) return true;
1666 return from_space_.Uncommit();
1667 }
1668
1669 private:
1670 // The semispaces.
1671 SemiSpace to_space_;
1672 SemiSpace from_space_;
1673
1674 // Start address and bit mask for containment testing.
1675 Address start_;
1676 uintptr_t address_mask_;
1677 uintptr_t object_mask_;
1678 uintptr_t object_expected_;
1679
1680 // Allocation pointer and limit for normal allocation and allocation during
1681 // mark-compact collection.
1682 AllocationInfo allocation_info_;
1683 AllocationInfo mc_forwarding_info_;
1684
1685#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1686 HistogramInfo* allocated_histogram_;
1687 HistogramInfo* promoted_histogram_;
1688#endif
1689
1690 // Implementation of AllocateRaw and MCAllocateRaw.
John Reck59135872010-11-02 12:39:01 -07001691 MUST_USE_RESULT inline MaybeObject* AllocateRawInternal(
1692 int size_in_bytes,
1693 AllocationInfo* alloc_info);
Steve Blocka7e24c12009-10-30 11:49:00 +00001694
1695 friend class SemiSpaceIterator;
1696
1697 public:
1698 TRACK_MEMORY("NewSpace")
1699};
1700
1701
1702// -----------------------------------------------------------------------------
1703// Free lists for old object spaces
1704//
1705// Free-list nodes are free blocks in the heap. They look like heap objects
1706// (free-list node pointers have the heap object tag, and they have a map like
1707// a heap object). They have a size and a next pointer. The next pointer is
1708// the raw address of the next free list node (or NULL).
1709class FreeListNode: public HeapObject {
1710 public:
1711 // Obtain a free-list node from a raw address. This is not a cast because
1712 // it does not check nor require that the first word at the address is a map
1713 // pointer.
1714 static FreeListNode* FromAddress(Address address) {
1715 return reinterpret_cast<FreeListNode*>(HeapObject::FromAddress(address));
1716 }
1717
Steve Block3ce2e202009-11-05 08:53:23 +00001718 static inline bool IsFreeListNode(HeapObject* object);
1719
Steve Blocka7e24c12009-10-30 11:49:00 +00001720 // Set the size in bytes, which can be read with HeapObject::Size(). This
1721 // function also writes a map to the first word of the block so that it
1722 // looks like a heap object to the garbage collector and heap iteration
1723 // functions.
Steve Block44f0eee2011-05-26 01:26:41 +01001724 void set_size(Heap* heap, int size_in_bytes);
Steve Blocka7e24c12009-10-30 11:49:00 +00001725
1726 // Accessors for the next field.
Steve Block44f0eee2011-05-26 01:26:41 +01001727 inline Address next(Heap* heap);
1728 inline void set_next(Heap* heap, Address next);
Steve Blocka7e24c12009-10-30 11:49:00 +00001729
1730 private:
1731 static const int kNextOffset = POINTER_SIZE_ALIGN(ByteArray::kHeaderSize);
1732
1733 DISALLOW_IMPLICIT_CONSTRUCTORS(FreeListNode);
1734};
1735
1736
1737// The free list for the old space.
1738class OldSpaceFreeList BASE_EMBEDDED {
1739 public:
Steve Block44f0eee2011-05-26 01:26:41 +01001740 OldSpaceFreeList(Heap* heap, AllocationSpace owner);
Steve Blocka7e24c12009-10-30 11:49:00 +00001741
1742 // Clear the free list.
1743 void Reset();
1744
1745 // Return the number of bytes available on the free list.
Ben Murdochf87a2032010-10-22 12:50:53 +01001746 intptr_t available() { return available_; }
Steve Blocka7e24c12009-10-30 11:49:00 +00001747
1748 // Place a node on the free list. The block of size 'size_in_bytes'
1749 // starting at 'start' is placed on the free list. The return value is the
1750 // number of bytes that have been lost due to internal fragmentation by
1751 // freeing the block. Bookkeeping information will be written to the block,
1752 // ie, its contents will be destroyed. The start address should be word
1753 // aligned, and the size should be a non-zero multiple of the word size.
1754 int Free(Address start, int size_in_bytes);
1755
1756 // Allocate a block of size 'size_in_bytes' from the free list. The block
1757 // is unitialized. A failure is returned if no block is available. The
1758 // number of bytes lost to fragmentation is returned in the output parameter
1759 // 'wasted_bytes'. The size should be a non-zero multiple of the word size.
John Reck59135872010-11-02 12:39:01 -07001760 MUST_USE_RESULT MaybeObject* Allocate(int size_in_bytes, int* wasted_bytes);
Steve Blocka7e24c12009-10-30 11:49:00 +00001761
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08001762 void MarkNodes();
1763
Steve Blocka7e24c12009-10-30 11:49:00 +00001764 private:
1765 // The size range of blocks, in bytes. (Smaller allocations are allowed, but
1766 // will always result in waste.)
1767 static const int kMinBlockSize = 2 * kPointerSize;
1768 static const int kMaxBlockSize = Page::kMaxHeapObjectSize;
1769
Steve Block44f0eee2011-05-26 01:26:41 +01001770 Heap* heap_;
1771
Steve Blocka7e24c12009-10-30 11:49:00 +00001772 // The identity of the owning space, for building allocation Failure
1773 // objects.
1774 AllocationSpace owner_;
1775
1776 // Total available bytes in all blocks on this free list.
1777 int available_;
1778
1779 // Blocks are put on exact free lists in an array, indexed by size in words.
1780 // The available sizes are kept in an increasingly ordered list. Entries
1781 // corresponding to sizes < kMinBlockSize always have an empty free list
1782 // (but index kHead is used for the head of the size list).
1783 struct SizeNode {
1784 // Address of the head FreeListNode of the implied block size or NULL.
1785 Address head_node_;
1786 // Size (words) of the next larger available size if head_node_ != NULL.
1787 int next_size_;
1788 };
1789 static const int kFreeListsLength = kMaxBlockSize / kPointerSize + 1;
1790 SizeNode free_[kFreeListsLength];
1791
1792 // Sentinel elements for the size list. Real elements are in ]kHead..kEnd[.
1793 static const int kHead = kMinBlockSize / kPointerSize - 1;
1794 static const int kEnd = kMaxInt;
1795
1796 // We keep a "finger" in the size list to speed up a common pattern:
1797 // repeated requests for the same or increasing sizes.
1798 int finger_;
1799
1800 // Starting from *prev, find and return the smallest size >= index (words),
1801 // or kEnd. Update *prev to be the largest size < index, or kHead.
1802 int FindSize(int index, int* prev) {
1803 int cur = free_[*prev].next_size_;
1804 while (cur < index) {
1805 *prev = cur;
1806 cur = free_[cur].next_size_;
1807 }
1808 return cur;
1809 }
1810
1811 // Remove an existing element from the size list.
1812 void RemoveSize(int index) {
1813 int prev = kHead;
1814 int cur = FindSize(index, &prev);
1815 ASSERT(cur == index);
1816 free_[prev].next_size_ = free_[cur].next_size_;
1817 finger_ = prev;
1818 }
1819
1820 // Insert a new element into the size list.
1821 void InsertSize(int index) {
1822 int prev = kHead;
1823 int cur = FindSize(index, &prev);
1824 ASSERT(cur != index);
1825 free_[prev].next_size_ = index;
1826 free_[index].next_size_ = cur;
1827 }
1828
1829 // The size list is not updated during a sequence of calls to Free, but is
1830 // rebuilt before the next allocation.
1831 void RebuildSizeList();
1832 bool needs_rebuild_;
1833
1834#ifdef DEBUG
1835 // Does this free list contain a free block located at the address of 'node'?
1836 bool Contains(FreeListNode* node);
1837#endif
1838
1839 DISALLOW_COPY_AND_ASSIGN(OldSpaceFreeList);
1840};
1841
1842
1843// The free list for the map space.
1844class FixedSizeFreeList BASE_EMBEDDED {
1845 public:
Steve Block44f0eee2011-05-26 01:26:41 +01001846 FixedSizeFreeList(Heap* heap, AllocationSpace owner, int object_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00001847
1848 // Clear the free list.
1849 void Reset();
1850
1851 // Return the number of bytes available on the free list.
Ben Murdochf87a2032010-10-22 12:50:53 +01001852 intptr_t available() { return available_; }
Steve Blocka7e24c12009-10-30 11:49:00 +00001853
1854 // Place a node on the free list. The block starting at 'start' (assumed to
1855 // have size object_size_) is placed on the free list. Bookkeeping
1856 // information will be written to the block, ie, its contents will be
1857 // destroyed. The start address should be word aligned.
1858 void Free(Address start);
1859
1860 // Allocate a fixed sized block from the free list. The block is unitialized.
1861 // A failure is returned if no block is available.
John Reck59135872010-11-02 12:39:01 -07001862 MUST_USE_RESULT MaybeObject* Allocate();
Steve Blocka7e24c12009-10-30 11:49:00 +00001863
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08001864 void MarkNodes();
1865
Steve Blocka7e24c12009-10-30 11:49:00 +00001866 private:
Steve Block44f0eee2011-05-26 01:26:41 +01001867
1868 Heap* heap_;
1869
Steve Blocka7e24c12009-10-30 11:49:00 +00001870 // Available bytes on the free list.
Ben Murdochf87a2032010-10-22 12:50:53 +01001871 intptr_t available_;
Steve Blocka7e24c12009-10-30 11:49:00 +00001872
1873 // The head of the free list.
1874 Address head_;
1875
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001876 // The tail of the free list.
1877 Address tail_;
1878
Steve Blocka7e24c12009-10-30 11:49:00 +00001879 // The identity of the owning space, for building allocation Failure
1880 // objects.
1881 AllocationSpace owner_;
1882
1883 // The size of the objects in this space.
1884 int object_size_;
1885
1886 DISALLOW_COPY_AND_ASSIGN(FixedSizeFreeList);
1887};
1888
1889
1890// -----------------------------------------------------------------------------
1891// Old object space (excluding map objects)
1892
1893class OldSpace : public PagedSpace {
1894 public:
1895 // Creates an old space object with a given maximum capacity.
1896 // The constructor does not allocate pages from OS.
Steve Block44f0eee2011-05-26 01:26:41 +01001897 OldSpace(Heap* heap,
1898 intptr_t max_capacity,
1899 AllocationSpace id,
1900 Executability executable)
1901 : PagedSpace(heap, max_capacity, id, executable),
1902 free_list_(heap, id) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001903 page_extra_ = 0;
1904 }
1905
1906 // The bytes available on the free list (ie, not above the linear allocation
1907 // pointer).
Ben Murdochf87a2032010-10-22 12:50:53 +01001908 intptr_t AvailableFree() { return free_list_.available(); }
Steve Blocka7e24c12009-10-30 11:49:00 +00001909
Steve Block6ded16b2010-05-10 14:33:55 +01001910 // The limit of allocation for a page in this space.
1911 virtual Address PageAllocationLimit(Page* page) {
1912 return page->ObjectAreaEnd();
Steve Blocka7e24c12009-10-30 11:49:00 +00001913 }
1914
1915 // Give a block of memory to the space's free list. It might be added to
1916 // the free list or accounted as waste.
Steve Block6ded16b2010-05-10 14:33:55 +01001917 // If add_to_freelist is false then just accounting stats are updated and
1918 // no attempt to add area to free list is made.
1919 void Free(Address start, int size_in_bytes, bool add_to_freelist) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001920 accounting_stats_.DeallocateBytes(size_in_bytes);
Steve Block6ded16b2010-05-10 14:33:55 +01001921
1922 if (add_to_freelist) {
1923 int wasted_bytes = free_list_.Free(start, size_in_bytes);
1924 accounting_stats_.WasteBytes(wasted_bytes);
1925 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001926 }
1927
Kristian Monsen80d68ea2010-09-08 11:05:35 +01001928 virtual void DeallocateBlock(Address start,
1929 int size_in_bytes,
1930 bool add_to_freelist);
1931
Steve Blocka7e24c12009-10-30 11:49:00 +00001932 // Prepare for full garbage collection. Resets the relocation pointer and
1933 // clears the free list.
1934 virtual void PrepareForMarkCompact(bool will_compact);
1935
1936 // Updates the allocation pointer to the relocation top after a mark-compact
1937 // collection.
1938 virtual void MCCommitRelocationInfo();
1939
Leon Clarkee46be812010-01-19 14:06:41 +00001940 virtual void PutRestOfCurrentPageOnFreeList(Page* current_page);
1941
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08001942 void MarkFreeListNodes() { free_list_.MarkNodes(); }
1943
Steve Blocka7e24c12009-10-30 11:49:00 +00001944#ifdef DEBUG
1945 // Reports statistics for the space
1946 void ReportStatistics();
Steve Blocka7e24c12009-10-30 11:49:00 +00001947#endif
1948
1949 protected:
1950 // Virtual function in the superclass. Slow path of AllocateRaw.
John Reck59135872010-11-02 12:39:01 -07001951 MUST_USE_RESULT HeapObject* SlowAllocateRaw(int size_in_bytes);
Steve Blocka7e24c12009-10-30 11:49:00 +00001952
1953 // Virtual function in the superclass. Allocate linearly at the start of
1954 // the page after current_page (there is assumed to be one).
1955 HeapObject* AllocateInNextPage(Page* current_page, int size_in_bytes);
1956
1957 private:
1958 // The space's free list.
1959 OldSpaceFreeList free_list_;
1960
1961 public:
1962 TRACK_MEMORY("OldSpace")
1963};
1964
1965
1966// -----------------------------------------------------------------------------
1967// Old space for objects of a fixed size
1968
1969class FixedSpace : public PagedSpace {
1970 public:
Steve Block44f0eee2011-05-26 01:26:41 +01001971 FixedSpace(Heap* heap,
1972 intptr_t max_capacity,
Steve Blocka7e24c12009-10-30 11:49:00 +00001973 AllocationSpace id,
1974 int object_size_in_bytes,
1975 const char* name)
Steve Block44f0eee2011-05-26 01:26:41 +01001976 : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE),
Steve Blocka7e24c12009-10-30 11:49:00 +00001977 object_size_in_bytes_(object_size_in_bytes),
1978 name_(name),
Steve Block44f0eee2011-05-26 01:26:41 +01001979 free_list_(heap, id, object_size_in_bytes) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001980 page_extra_ = Page::kObjectAreaSize % object_size_in_bytes;
1981 }
1982
Steve Block6ded16b2010-05-10 14:33:55 +01001983 // The limit of allocation for a page in this space.
1984 virtual Address PageAllocationLimit(Page* page) {
1985 return page->ObjectAreaEnd() - page_extra_;
Steve Blocka7e24c12009-10-30 11:49:00 +00001986 }
1987
1988 int object_size_in_bytes() { return object_size_in_bytes_; }
1989
1990 // Give a fixed sized block of memory to the space's free list.
Steve Block6ded16b2010-05-10 14:33:55 +01001991 // If add_to_freelist is false then just accounting stats are updated and
1992 // no attempt to add area to free list is made.
1993 void Free(Address start, bool add_to_freelist) {
1994 if (add_to_freelist) {
1995 free_list_.Free(start);
1996 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001997 accounting_stats_.DeallocateBytes(object_size_in_bytes_);
1998 }
1999
2000 // Prepares for a mark-compact GC.
2001 virtual void PrepareForMarkCompact(bool will_compact);
2002
2003 // Updates the allocation pointer to the relocation top after a mark-compact
2004 // collection.
2005 virtual void MCCommitRelocationInfo();
2006
Leon Clarkee46be812010-01-19 14:06:41 +00002007 virtual void PutRestOfCurrentPageOnFreeList(Page* current_page);
2008
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002009 virtual void DeallocateBlock(Address start,
2010 int size_in_bytes,
2011 bool add_to_freelist);
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08002012
2013 void MarkFreeListNodes() { free_list_.MarkNodes(); }
2014
Steve Blocka7e24c12009-10-30 11:49:00 +00002015#ifdef DEBUG
2016 // Reports statistic info of the space
2017 void ReportStatistics();
Steve Blocka7e24c12009-10-30 11:49:00 +00002018#endif
2019
2020 protected:
2021 // Virtual function in the superclass. Slow path of AllocateRaw.
John Reck59135872010-11-02 12:39:01 -07002022 MUST_USE_RESULT HeapObject* SlowAllocateRaw(int size_in_bytes);
Steve Blocka7e24c12009-10-30 11:49:00 +00002023
2024 // Virtual function in the superclass. Allocate linearly at the start of
2025 // the page after current_page (there is assumed to be one).
2026 HeapObject* AllocateInNextPage(Page* current_page, int size_in_bytes);
2027
Leon Clarkee46be812010-01-19 14:06:41 +00002028 void ResetFreeList() {
2029 free_list_.Reset();
2030 }
2031
Steve Blocka7e24c12009-10-30 11:49:00 +00002032 private:
2033 // The size of objects in this space.
2034 int object_size_in_bytes_;
2035
2036 // The name of this space.
2037 const char* name_;
2038
2039 // The space's free list.
2040 FixedSizeFreeList free_list_;
2041};
2042
2043
2044// -----------------------------------------------------------------------------
2045// Old space for all map objects
2046
2047class MapSpace : public FixedSpace {
2048 public:
2049 // Creates a map space object with a maximum capacity.
Steve Block44f0eee2011-05-26 01:26:41 +01002050 MapSpace(Heap* heap,
2051 intptr_t max_capacity,
2052 int max_map_space_pages,
2053 AllocationSpace id)
2054 : FixedSpace(heap, max_capacity, id, Map::kSize, "map"),
Leon Clarked91b9f72010-01-27 17:25:45 +00002055 max_map_space_pages_(max_map_space_pages) {
2056 ASSERT(max_map_space_pages < kMaxMapPageIndex);
2057 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002058
2059 // Prepares for a mark-compact GC.
2060 virtual void PrepareForMarkCompact(bool will_compact);
2061
2062 // Given an index, returns the page address.
2063 Address PageAddress(int page_index) { return page_addresses_[page_index]; }
2064
Leon Clarked91b9f72010-01-27 17:25:45 +00002065 static const int kMaxMapPageIndex = 1 << MapWord::kMapPageIndexBits;
Steve Blocka7e24c12009-10-30 11:49:00 +00002066
Leon Clarkee46be812010-01-19 14:06:41 +00002067 // Are map pointers encodable into map word?
2068 bool MapPointersEncodable() {
2069 if (!FLAG_use_big_map_space) {
Leon Clarked91b9f72010-01-27 17:25:45 +00002070 ASSERT(CountPagesToTop() <= kMaxMapPageIndex);
Leon Clarkee46be812010-01-19 14:06:41 +00002071 return true;
2072 }
Leon Clarked91b9f72010-01-27 17:25:45 +00002073 return CountPagesToTop() <= max_map_space_pages_;
Leon Clarkee46be812010-01-19 14:06:41 +00002074 }
2075
2076 // Should be called after forced sweep to find out if map space needs
2077 // compaction.
2078 bool NeedsCompaction(int live_maps) {
Leon Clarked91b9f72010-01-27 17:25:45 +00002079 return !MapPointersEncodable() && live_maps <= CompactionThreshold();
Leon Clarkee46be812010-01-19 14:06:41 +00002080 }
2081
2082 Address TopAfterCompaction(int live_maps) {
2083 ASSERT(NeedsCompaction(live_maps));
2084
2085 int pages_left = live_maps / kMapsPerPage;
2086 PageIterator it(this, PageIterator::ALL_PAGES);
2087 while (pages_left-- > 0) {
2088 ASSERT(it.has_next());
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002089 it.next()->SetRegionMarks(Page::kAllRegionsCleanMarks);
Leon Clarkee46be812010-01-19 14:06:41 +00002090 }
2091 ASSERT(it.has_next());
2092 Page* top_page = it.next();
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002093 top_page->SetRegionMarks(Page::kAllRegionsCleanMarks);
Leon Clarkee46be812010-01-19 14:06:41 +00002094 ASSERT(top_page->is_valid());
2095
2096 int offset = live_maps % kMapsPerPage * Map::kSize;
2097 Address top = top_page->ObjectAreaStart() + offset;
2098 ASSERT(top < top_page->ObjectAreaEnd());
2099 ASSERT(Contains(top));
2100
2101 return top;
2102 }
2103
2104 void FinishCompaction(Address new_top, int live_maps) {
2105 Page* top_page = Page::FromAddress(new_top);
2106 ASSERT(top_page->is_valid());
2107
2108 SetAllocationInfo(&allocation_info_, top_page);
2109 allocation_info_.top = new_top;
2110
2111 int new_size = live_maps * Map::kSize;
2112 accounting_stats_.DeallocateBytes(accounting_stats_.Size());
2113 accounting_stats_.AllocateBytes(new_size);
2114
Ben Murdoche0cee9b2011-05-25 10:26:03 +01002115 // Flush allocation watermarks.
2116 for (Page* p = first_page_; p != top_page; p = p->next_page()) {
2117 p->SetAllocationWatermark(p->AllocationTop());
2118 }
2119 top_page->SetAllocationWatermark(new_top);
2120
Leon Clarkee46be812010-01-19 14:06:41 +00002121#ifdef DEBUG
2122 if (FLAG_enable_slow_asserts) {
Leon Clarked91b9f72010-01-27 17:25:45 +00002123 intptr_t actual_size = 0;
Leon Clarkee46be812010-01-19 14:06:41 +00002124 for (Page* p = first_page_; p != top_page; p = p->next_page())
2125 actual_size += kMapsPerPage * Map::kSize;
2126 actual_size += (new_top - top_page->ObjectAreaStart());
2127 ASSERT(accounting_stats_.Size() == actual_size);
2128 }
2129#endif
2130
2131 Shrink();
2132 ResetFreeList();
2133 }
2134
Steve Blocka7e24c12009-10-30 11:49:00 +00002135 protected:
2136#ifdef DEBUG
2137 virtual void VerifyObject(HeapObject* obj);
2138#endif
2139
2140 private:
Leon Clarkee46be812010-01-19 14:06:41 +00002141 static const int kMapsPerPage = Page::kObjectAreaSize / Map::kSize;
2142
2143 // Do map space compaction if there is a page gap.
Leon Clarked91b9f72010-01-27 17:25:45 +00002144 int CompactionThreshold() {
2145 return kMapsPerPage * (max_map_space_pages_ - 1);
2146 }
2147
2148 const int max_map_space_pages_;
Leon Clarkee46be812010-01-19 14:06:41 +00002149
Steve Blocka7e24c12009-10-30 11:49:00 +00002150 // An array of page start address in a map space.
Leon Clarked91b9f72010-01-27 17:25:45 +00002151 Address page_addresses_[kMaxMapPageIndex];
Steve Blocka7e24c12009-10-30 11:49:00 +00002152
2153 public:
2154 TRACK_MEMORY("MapSpace")
2155};
2156
2157
2158// -----------------------------------------------------------------------------
2159// Old space for all global object property cell objects
2160
2161class CellSpace : public FixedSpace {
2162 public:
2163 // Creates a property cell space object with a maximum capacity.
Steve Block44f0eee2011-05-26 01:26:41 +01002164 CellSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id)
2165 : FixedSpace(heap, max_capacity, id, JSGlobalPropertyCell::kSize, "cell")
2166 {}
Steve Blocka7e24c12009-10-30 11:49:00 +00002167
2168 protected:
2169#ifdef DEBUG
2170 virtual void VerifyObject(HeapObject* obj);
2171#endif
2172
2173 public:
2174 TRACK_MEMORY("CellSpace")
2175};
2176
2177
2178// -----------------------------------------------------------------------------
2179// Large objects ( > Page::kMaxHeapObjectSize ) are allocated and managed by
2180// the large object space. A large object is allocated from OS heap with
2181// extra padding bytes (Page::kPageSize + Page::kObjectStartOffset).
2182// A large object always starts at Page::kObjectStartOffset to a page.
2183// Large objects do not move during garbage collections.
2184
2185// A LargeObjectChunk holds exactly one large object page with exactly one
2186// large object.
2187class LargeObjectChunk {
2188 public:
2189 // Allocates a new LargeObjectChunk that contains a large object page
2190 // (Page::kPageSize aligned) that has at least size_in_bytes (for a large
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002191 // object) bytes after the object area start of that page.
Ben Murdochb0fe1622011-05-05 13:52:32 +01002192 static LargeObjectChunk* New(int size_in_bytes, Executability executable);
2193
2194 // Free the memory associated with the chunk.
2195 inline void Free(Executability executable);
Steve Blocka7e24c12009-10-30 11:49:00 +00002196
2197 // Interpret a raw address as a large object chunk.
2198 static LargeObjectChunk* FromAddress(Address address) {
2199 return reinterpret_cast<LargeObjectChunk*>(address);
2200 }
2201
2202 // Returns the address of this chunk.
2203 Address address() { return reinterpret_cast<Address>(this); }
2204
2205 // Accessors for the fields of the chunk.
2206 LargeObjectChunk* next() { return next_; }
2207 void set_next(LargeObjectChunk* chunk) { next_ = chunk; }
Steve Block791712a2010-08-27 10:21:07 +01002208 size_t size() { return size_ & ~Page::kPageFlagMask; }
Ben Murdochb0fe1622011-05-05 13:52:32 +01002209
2210 // Compute the start address in the chunk.
2211 inline Address GetStartAddress();
Steve Blocka7e24c12009-10-30 11:49:00 +00002212
2213 // Returns the object in this chunk.
Ben Murdochb0fe1622011-05-05 13:52:32 +01002214 HeapObject* GetObject() { return HeapObject::FromAddress(GetStartAddress()); }
Steve Blocka7e24c12009-10-30 11:49:00 +00002215
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002216 // Given a requested size returns the physical size of a chunk to be
2217 // allocated.
Steve Blocka7e24c12009-10-30 11:49:00 +00002218 static int ChunkSizeFor(int size_in_bytes);
2219
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002220 // Given a chunk size, returns the object size it can accommodate. Used by
2221 // LargeObjectSpace::Available.
Ben Murdochf87a2032010-10-22 12:50:53 +01002222 static intptr_t ObjectSizeFor(intptr_t chunk_size) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002223 if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return 0;
2224 return chunk_size - Page::kPageSize - Page::kObjectStartOffset;
2225 }
2226
2227 private:
2228 // A pointer to the next large object chunk in the space or NULL.
2229 LargeObjectChunk* next_;
2230
Ben Murdochb0fe1622011-05-05 13:52:32 +01002231 // The total size of this chunk.
Steve Blocka7e24c12009-10-30 11:49:00 +00002232 size_t size_;
2233
2234 public:
2235 TRACK_MEMORY("LargeObjectChunk")
2236};
2237
2238
2239class LargeObjectSpace : public Space {
2240 public:
Steve Block44f0eee2011-05-26 01:26:41 +01002241 LargeObjectSpace(Heap* heap, AllocationSpace id);
Steve Blocka7e24c12009-10-30 11:49:00 +00002242 virtual ~LargeObjectSpace() {}
2243
2244 // Initializes internal data structures.
2245 bool Setup();
2246
2247 // Releases internal resources, frees objects in this space.
2248 void TearDown();
2249
2250 // Allocates a (non-FixedArray, non-Code) large object.
John Reck59135872010-11-02 12:39:01 -07002251 MUST_USE_RESULT MaybeObject* AllocateRaw(int size_in_bytes);
Steve Blocka7e24c12009-10-30 11:49:00 +00002252 // Allocates a large Code object.
John Reck59135872010-11-02 12:39:01 -07002253 MUST_USE_RESULT MaybeObject* AllocateRawCode(int size_in_bytes);
Steve Blocka7e24c12009-10-30 11:49:00 +00002254 // Allocates a large FixedArray.
John Reck59135872010-11-02 12:39:01 -07002255 MUST_USE_RESULT MaybeObject* AllocateRawFixedArray(int size_in_bytes);
Steve Blocka7e24c12009-10-30 11:49:00 +00002256
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002257 // Available bytes for objects in this space.
Steve Block44f0eee2011-05-26 01:26:41 +01002258 inline intptr_t Available();
Steve Blocka7e24c12009-10-30 11:49:00 +00002259
Ben Murdochf87a2032010-10-22 12:50:53 +01002260 virtual intptr_t Size() {
Steve Blocka7e24c12009-10-30 11:49:00 +00002261 return size_;
2262 }
2263
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08002264 virtual intptr_t SizeOfObjects() {
2265 return objects_size_;
2266 }
2267
Steve Blocka7e24c12009-10-30 11:49:00 +00002268 int PageCount() {
2269 return page_count_;
2270 }
2271
2272 // Finds an object for a given address, returns Failure::Exception()
2273 // if it is not found. The function iterates through all objects in this
2274 // space, may be slow.
John Reck59135872010-11-02 12:39:01 -07002275 MaybeObject* FindObject(Address a);
Steve Blocka7e24c12009-10-30 11:49:00 +00002276
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002277 // Finds a large object page containing the given pc, returns NULL
2278 // if such a page doesn't exist.
2279 LargeObjectChunk* FindChunkContainingPc(Address pc);
2280
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002281 // Iterates objects covered by dirty regions.
2282 void IterateDirtyRegions(ObjectSlotCallback func);
Steve Blocka7e24c12009-10-30 11:49:00 +00002283
2284 // Frees unmarked objects.
2285 void FreeUnmarkedObjects();
2286
2287 // Checks whether a heap object is in this space; O(1).
2288 bool Contains(HeapObject* obj);
2289
2290 // Checks whether the space is empty.
2291 bool IsEmpty() { return first_chunk_ == NULL; }
2292
Leon Clarkee46be812010-01-19 14:06:41 +00002293 // See the comments for ReserveSpace in the Space class. This has to be
2294 // called after ReserveSpace has been called on the paged spaces, since they
2295 // may use some memory, leaving less for large objects.
2296 virtual bool ReserveSpace(int bytes);
2297
Steve Blocka7e24c12009-10-30 11:49:00 +00002298#ifdef ENABLE_HEAP_PROTECTION
2299 // Protect/unprotect the space by marking it read-only/writable.
2300 void Protect();
2301 void Unprotect();
2302#endif
2303
2304#ifdef DEBUG
2305 virtual void Verify();
2306 virtual void Print();
2307 void ReportStatistics();
2308 void CollectCodeStatistics();
Steve Blocka7e24c12009-10-30 11:49:00 +00002309#endif
2310 // Checks whether an address is in the object area in this space. It
2311 // iterates all objects in the space. May be slow.
2312 bool SlowContains(Address addr) { return !FindObject(addr)->IsFailure(); }
2313
2314 private:
2315 // The head of the linked list of large object chunks.
2316 LargeObjectChunk* first_chunk_;
Ben Murdochf87a2032010-10-22 12:50:53 +01002317 intptr_t size_; // allocated bytes
Steve Blocka7e24c12009-10-30 11:49:00 +00002318 int page_count_; // number of chunks
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08002319 intptr_t objects_size_; // size of objects
Steve Blocka7e24c12009-10-30 11:49:00 +00002320
2321 // Shared implementation of AllocateRaw, AllocateRawCode and
2322 // AllocateRawFixedArray.
John Reck59135872010-11-02 12:39:01 -07002323 MUST_USE_RESULT MaybeObject* AllocateRawInternal(int requested_size,
2324 int object_size,
2325 Executability executable);
Steve Blocka7e24c12009-10-30 11:49:00 +00002326
Steve Blocka7e24c12009-10-30 11:49:00 +00002327 friend class LargeObjectIterator;
2328
2329 public:
2330 TRACK_MEMORY("LargeObjectSpace")
2331};
2332
2333
2334class LargeObjectIterator: public ObjectIterator {
2335 public:
2336 explicit LargeObjectIterator(LargeObjectSpace* space);
2337 LargeObjectIterator(LargeObjectSpace* space, HeapObjectCallback size_func);
2338
Steve Blocka7e24c12009-10-30 11:49:00 +00002339 HeapObject* next();
2340
2341 // implementation of ObjectIterator.
Steve Blocka7e24c12009-10-30 11:49:00 +00002342 virtual HeapObject* next_object() { return next(); }
2343
2344 private:
2345 LargeObjectChunk* current_;
2346 HeapObjectCallback size_func_;
2347};
2348
2349
Steve Block44f0eee2011-05-26 01:26:41 +01002350#ifdef DEBUG
2351struct CommentStatistic {
2352 const char* comment;
2353 int size;
2354 int count;
2355 void Clear() {
2356 comment = NULL;
2357 size = 0;
2358 count = 0;
2359 }
2360 // Must be small, since an iteration is used for lookup.
2361 static const int kMaxComments = 64;
2362};
2363#endif
2364
2365
Steve Blocka7e24c12009-10-30 11:49:00 +00002366} } // namespace v8::internal
2367
2368#endif // V8_SPACES_H_