blob: 3ed2fe8b98a346798350c45a50d12754074487df [file] [log] [blame]
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001// Copyright 2006-2008 the V8 project authors. All rights reserved.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#ifndef V8_SPACES_H_
29#define V8_SPACES_H_
30
31#include "list-inl.h"
32#include "log.h"
33
kasperl@chromium.org71affb52009-05-26 05:44:31 +000034namespace v8 {
35namespace internal {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000036
37// -----------------------------------------------------------------------------
38// Heap structures:
39//
40// A JS heap consists of a young generation, an old generation, and a large
41// object space. The young generation is divided into two semispaces. A
42// scavenger implements Cheney's copying algorithm. The old generation is
43// separated into a map space and an old object space. The map space contains
44// all (and only) map objects, the rest of old objects go into the old space.
45// The old generation is collected by a mark-sweep-compact collector.
46//
47// The semispaces of the young generation are contiguous. The old and map
ricow@chromium.org30ce4112010-05-31 10:38:25 +000048// spaces consists of a list of pages. A page has a page header and an object
49// area. A page size is deliberately chosen as 8K bytes.
50// The first word of a page is an opaque page header that has the
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000051// address of the next page and its ownership information. The second word may
ricow@chromium.org30ce4112010-05-31 10:38:25 +000052// have the allocation top address of this page. Heap objects are aligned to the
53// pointer size.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000054//
55// There is a separate large object space for objects larger than
56// Page::kMaxHeapObjectSize, so that they do not have to move during
ricow@chromium.org30ce4112010-05-31 10:38:25 +000057// collection. The large object space is paged. Pages in large object space
58// may be larger than 8K.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000059//
ricow@chromium.org30ce4112010-05-31 10:38:25 +000060// A card marking write barrier is used to keep track of intergenerational
61// references. Old space pages are divided into regions of Page::kRegionSize
62// size. Each region has a corresponding dirty bit in the page header which is
63// set if the region might contain pointers to new space. For details about
64// dirty bits encoding see comments in the Page::GetRegionNumberForAddress()
65// method body.
66//
67// During scavenges and mark-sweep collections we iterate intergenerational
68// pointers without decoding heap object maps so if the page belongs to old
69// pointer space or large object space it is essential to guarantee that
70// the page does not contain any garbage pointers to new space: every pointer
71// aligned word which satisfies the Heap::InNewSpace() predicate must be a
72// pointer to a live heap object in new space. Thus objects in old pointer
73// and large object spaces should have a special layout (e.g. no bare integer
74// fields). This requirement does not apply to map space which is iterated in
75// a special fashion. However we still require pointer fields of dead maps to
76// be cleaned.
77//
78// To enable lazy cleaning of old space pages we use a notion of allocation
79// watermark. Every pointer under watermark is considered to be well formed.
80// Page allocation watermark is not necessarily equal to page allocation top but
81// all alive objects on page should reside under allocation watermark.
82// During scavenge allocation watermark might be bumped and invalid pointers
83// might appear below it. To avoid following them we store a valid watermark
84// into special field in the page header and set a page WATERMARK_INVALIDATED
85// flag. For details see comments in the Page::SetAllocationWatermark() method
86// body.
87//
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000088
89// Some assertion macros used in the debugging mode.
90
sgjesse@chromium.org846fb742009-12-18 08:56:33 +000091#define ASSERT_PAGE_ALIGNED(address) \
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000092 ASSERT((OffsetFrom(address) & Page::kPageAlignmentMask) == 0)
93
sgjesse@chromium.org846fb742009-12-18 08:56:33 +000094#define ASSERT_OBJECT_ALIGNED(address) \
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000095 ASSERT((OffsetFrom(address) & kObjectAlignmentMask) == 0)
96
sgjesse@chromium.org846fb742009-12-18 08:56:33 +000097#define ASSERT_MAP_ALIGNED(address) \
98 ASSERT((OffsetFrom(address) & kMapAlignmentMask) == 0)
99
100#define ASSERT_OBJECT_SIZE(size) \
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000101 ASSERT((0 < size) && (size <= Page::kMaxHeapObjectSize))
102
sgjesse@chromium.org846fb742009-12-18 08:56:33 +0000103#define ASSERT_PAGE_OFFSET(offset) \
104 ASSERT((Page::kObjectStartOffset <= offset) \
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000105 && (offset <= Page::kPageSize))
106
sgjesse@chromium.org846fb742009-12-18 08:56:33 +0000107#define ASSERT_MAP_PAGE_INDEX(index) \
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000108 ASSERT((0 <= index) && (index <= MapSpace::kMaxMapPageIndex))
109
110
111class PagedSpace;
112class MemoryAllocator;
kasper.lund7276f142008-07-30 08:49:36 +0000113class AllocationInfo;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000114
115// -----------------------------------------------------------------------------
116// A page normally has 8K bytes. Large object pages may be larger. A page
ricow@chromium.org30ce4112010-05-31 10:38:25 +0000117// address is always aligned to the 8K page size.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000118//
ricow@chromium.org30ce4112010-05-31 10:38:25 +0000119// Each page starts with a header of Page::kPageHeaderSize size which contains
120// bookkeeping data.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000121//
122// The mark-compact collector transforms a map pointer into a page index and a
ricow@chromium.org30ce4112010-05-31 10:38:25 +0000123// page offset. The exact encoding is described in the comments for
sgjesse@chromium.org846fb742009-12-18 08:56:33 +0000124// class MapWord in objects.h.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000125//
126// The only way to get a page pointer is by calling factory methods:
127// Page* p = Page::FromAddress(addr); or
128// Page* p = Page::FromAllocationTop(top);
129class Page {
130 public:
131 // Returns the page containing a given address. The address ranges
132 // from [page_addr .. page_addr + kPageSize[
133 //
134 // Note that this function only works for addresses in normal paged
sgjesse@chromium.orgb9d7da12009-08-05 08:38:10 +0000135 // spaces and addresses in the first 8K of large object pages (i.e.,
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000136 // the start of large objects but not necessarily derived pointers
137 // within them).
138 INLINE(static Page* FromAddress(Address a)) {
139 return reinterpret_cast<Page*>(OffsetFrom(a) & ~kPageAlignmentMask);
140 }
141
142 // Returns the page containing an allocation top. Because an allocation
143 // top address can be the upper bound of the page, we need to subtract
144 // it with kPointerSize first. The address ranges from
145 // [page_addr + kObjectStartOffset .. page_addr + kPageSize].
146 INLINE(static Page* FromAllocationTop(Address top)) {
147 Page* p = FromAddress(top - kPointerSize);
148 ASSERT_PAGE_OFFSET(p->Offset(top));
149 return p;
150 }
151
152 // Returns the start address of this page.
153 Address address() { return reinterpret_cast<Address>(this); }
154
155 // Checks whether this is a valid page address.
156 bool is_valid() { return address() != NULL; }
157
158 // Returns the next page of this page.
159 inline Page* next_page();
160
kasper.lund7276f142008-07-30 08:49:36 +0000161 // Return the end of allocation in this page. Undefined for unused pages.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000162 inline Address AllocationTop();
163
ricow@chromium.org30ce4112010-05-31 10:38:25 +0000164 // Return the allocation watermark for the page.
165 // For old space pages it is guaranteed that the area under the watermark
166 // does not contain any garbage pointers to new space.
167 inline Address AllocationWatermark();
168
169 // Return the allocation watermark offset from the beginning of the page.
170 inline uint32_t AllocationWatermarkOffset();
171
172 inline void SetAllocationWatermark(Address allocation_watermark);
173
174 inline void SetCachedAllocationWatermark(Address allocation_watermark);
175 inline Address CachedAllocationWatermark();
176
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000177 // Returns the start address of the object area in this page.
178 Address ObjectAreaStart() { return address() + kObjectStartOffset; }
179
180 // Returns the end address (exclusive) of the object area in this page.
181 Address ObjectAreaEnd() { return address() + Page::kPageSize; }
182
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000183 // Checks whether an address is page aligned.
184 static bool IsAlignedToPageSize(Address a) {
185 return 0 == (OffsetFrom(a) & kPageAlignmentMask);
186 }
187
fschneider@chromium.org013f3e12010-04-26 13:27:52 +0000188 // True if this page was in use before current compaction started.
189 // Result is valid only for pages owned by paged spaces and
190 // only after PagedSpace::PrepareForMarkCompact was called.
191 inline bool WasInUseBeforeMC();
192
193 inline void SetWasInUseBeforeMC(bool was_in_use);
194
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000195 // True if this page is a large object page.
fschneider@chromium.org013f3e12010-04-26 13:27:52 +0000196 inline bool IsLargeObjectPage();
197
198 inline void SetIsLargeObjectPage(bool is_large_object_page);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000199
erik.corry@gmail.com145eff52010-08-23 11:36:18 +0000200 inline bool IsPageExecutable();
201
202 inline void SetIsPageExecutable(bool is_page_executable);
203
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000204 // Returns the offset of a given address to this page.
205 INLINE(int Offset(Address a)) {
ager@chromium.orgc4c92722009-11-18 14:12:51 +0000206 int offset = static_cast<int>(a - address());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000207 ASSERT_PAGE_OFFSET(offset);
208 return offset;
209 }
210
211 // Returns the address for a given offset to the this page.
212 Address OffsetToAddress(int offset) {
213 ASSERT_PAGE_OFFSET(offset);
214 return address() + offset;
215 }
216
217 // ---------------------------------------------------------------------
ricow@chromium.org30ce4112010-05-31 10:38:25 +0000218 // Card marking support
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000219
ricow@chromium.org30ce4112010-05-31 10:38:25 +0000220 static const uint32_t kAllRegionsCleanMarks = 0x0;
221 static const uint32_t kAllRegionsDirtyMarks = 0xFFFFFFFF;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000222
ricow@chromium.org30ce4112010-05-31 10:38:25 +0000223 inline uint32_t GetRegionMarks();
224 inline void SetRegionMarks(uint32_t dirty);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000225
ricow@chromium.org30ce4112010-05-31 10:38:25 +0000226 inline uint32_t GetRegionMaskForAddress(Address addr);
ager@chromium.org2cc82ae2010-06-14 07:35:38 +0000227 inline uint32_t GetRegionMaskForSpan(Address start, int length_in_bytes);
ricow@chromium.org30ce4112010-05-31 10:38:25 +0000228 inline int GetRegionNumberForAddress(Address addr);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000229
ricow@chromium.org30ce4112010-05-31 10:38:25 +0000230 inline void MarkRegionDirty(Address addr);
231 inline bool IsRegionDirty(Address addr);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000232
ricow@chromium.org30ce4112010-05-31 10:38:25 +0000233 inline void ClearRegionMarks(Address start,
234 Address end,
235 bool reaches_limit);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000236
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000237 // Page size in bytes. This must be a multiple of the OS page size.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000238 static const int kPageSize = 1 << kPageSizeBits;
239
240 // Page size mask.
kasperl@chromium.org71affb52009-05-26 05:44:31 +0000241 static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000242
ricow@chromium.org30ce4112010-05-31 10:38:25 +0000243 static const int kPageHeaderSize = kPointerSize + kPointerSize + kIntSize +
244 kIntSize + kPointerSize;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000245
whesse@chromium.org4a1fe7d2010-09-27 12:32:04 +0000246 // The start offset of the object area in a page. Aligned to both maps and
247 // code alignment to be suitable for both.
248 static const int kObjectStartOffset =
249 CODE_POINTER_ALIGN(MAP_POINTER_ALIGN(kPageHeaderSize));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000250
251 // Object area size in bytes.
252 static const int kObjectAreaSize = kPageSize - kObjectStartOffset;
253
254 // Maximum object size that fits in a page.
255 static const int kMaxHeapObjectSize = kObjectAreaSize;
256
ricow@chromium.org30ce4112010-05-31 10:38:25 +0000257 static const int kDirtyFlagOffset = 2 * kPointerSize;
258 static const int kRegionSizeLog2 = 8;
259 static const int kRegionSize = 1 << kRegionSizeLog2;
260 static const intptr_t kRegionAlignmentMask = (kRegionSize - 1);
261
262 STATIC_CHECK(kRegionSize == kPageSize / kBitsPerInt);
263
fschneider@chromium.org013f3e12010-04-26 13:27:52 +0000264 enum PageFlag {
erik.corry@gmail.com145eff52010-08-23 11:36:18 +0000265 IS_NORMAL_PAGE = 0,
266 WAS_IN_USE_BEFORE_MC,
ricow@chromium.org30ce4112010-05-31 10:38:25 +0000267
268 // Page allocation watermark was bumped by preallocation during scavenge.
269 // Correct watermark can be retrieved by CachedAllocationWatermark() method
erik.corry@gmail.com145eff52010-08-23 11:36:18 +0000270 WATERMARK_INVALIDATED,
271 IS_EXECUTABLE,
272 NUM_PAGE_FLAGS // Must be last
fschneider@chromium.org013f3e12010-04-26 13:27:52 +0000273 };
erik.corry@gmail.com145eff52010-08-23 11:36:18 +0000274 static const int kPageFlagMask = (1 << NUM_PAGE_FLAGS) - 1;
fschneider@chromium.org013f3e12010-04-26 13:27:52 +0000275
ricow@chromium.org30ce4112010-05-31 10:38:25 +0000276 // To avoid an additional WATERMARK_INVALIDATED flag clearing pass during
277 // scavenge we just invalidate the watermark on each old space page after
278 // processing it. And then we flip the meaning of the WATERMARK_INVALIDATED
279 // flag at the beginning of the next scavenge and each page becomes marked as
280 // having a valid watermark.
281 //
282 // The following invariant must hold for pages in old pointer and map spaces:
283 // If page is in use then page is marked as having invalid watermark at
284 // the beginning and at the end of any GC.
285 //
286 // This invariant guarantees that after flipping flag meaning at the
287 // beginning of scavenge all pages in use will be marked as having valid
288 // watermark.
289 static inline void FlipMeaningOfInvalidatedWatermarkFlag();
290
291 // Returns true if the page allocation watermark was not altered during
292 // scavenge.
293 inline bool IsWatermarkValid();
294
295 inline void InvalidateWatermark(bool value);
296
fschneider@chromium.org013f3e12010-04-26 13:27:52 +0000297 inline bool GetPageFlag(PageFlag flag);
298 inline void SetPageFlag(PageFlag flag, bool value);
ricow@chromium.org30ce4112010-05-31 10:38:25 +0000299 inline void ClearPageFlags();
300
301 inline void ClearGCFields();
302
erik.corry@gmail.com145eff52010-08-23 11:36:18 +0000303 static const int kAllocationWatermarkOffsetShift = WATERMARK_INVALIDATED + 1;
ricow@chromium.org30ce4112010-05-31 10:38:25 +0000304 static const int kAllocationWatermarkOffsetBits = kPageSizeBits + 1;
305 static const uint32_t kAllocationWatermarkOffsetMask =
306 ((1 << kAllocationWatermarkOffsetBits) - 1) <<
307 kAllocationWatermarkOffsetShift;
308
309 static const uint32_t kFlagsMask =
310 ((1 << kAllocationWatermarkOffsetShift) - 1);
311
312 STATIC_CHECK(kBitsPerInt - kAllocationWatermarkOffsetShift >=
313 kAllocationWatermarkOffsetBits);
314
315 // This field contains the meaning of the WATERMARK_INVALIDATED flag.
316 // Instead of clearing this flag from all pages we just flip
317 // its meaning at the beginning of a scavenge.
318 static intptr_t watermark_invalidated_mark_;
fschneider@chromium.org013f3e12010-04-26 13:27:52 +0000319
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000320 //---------------------------------------------------------------------------
321 // Page header description.
322 //
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000323 // If a page is not in the large object space, the first word,
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000324 // opaque_header, encodes the next page address (aligned to kPageSize 8K)
325 // and the chunk number (0 ~ 8K-1). Only MemoryAllocator should use
326 // opaque_header. The value range of the opaque_header is [0..kPageSize[,
327 // or [next_page_start, next_page_end[. It cannot point to a valid address
328 // in the current page. If a page is in the large object space, the first
329 // word *may* (if the page start and large object chunk start are the
330 // same) contain the address of the next large object chunk.
kasperl@chromium.org71affb52009-05-26 05:44:31 +0000331 intptr_t opaque_header;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000332
333 // If the page is not in the large object space, the low-order bit of the
334 // second word is set. If the page is in the large object space, the
335 // second word *may* (if the page start and large object chunk start are
336 // the same) contain the large object chunk size. In either case, the
337 // low-order bit for large object pages will be cleared.
ricow@chromium.org30ce4112010-05-31 10:38:25 +0000338 // For normal pages this word is used to store page flags and
339 // offset of allocation top.
340 intptr_t flags_;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000341
ricow@chromium.org30ce4112010-05-31 10:38:25 +0000342 // This field contains dirty marks for regions covering the page. Only dirty
343 // regions might contain intergenerational references.
344 // Only 32 dirty marks are supported so for large object pages several regions
345 // might be mapped to a single dirty mark.
346 uint32_t dirty_regions_;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000347
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000348 // The index of the page in its owner space.
349 int mc_page_index;
350
ricow@chromium.org30ce4112010-05-31 10:38:25 +0000351 // During mark-compact collections this field contains the forwarding address
352 // of the first live object in this page.
353 // During scavenge collection this field is used to store allocation watermark
354 // if it is altered during scavenge.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000355 Address mc_first_forwarded;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000356};
357
358
359// ----------------------------------------------------------------------------
kasper.lund7276f142008-07-30 08:49:36 +0000360// Space is the abstract superclass for all allocation spaces.
361class Space : public Malloced {
362 public:
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000363 Space(AllocationSpace id, Executability executable)
kasper.lund7276f142008-07-30 08:49:36 +0000364 : id_(id), executable_(executable) {}
kasperl@chromium.orgf5aa8372009-03-24 14:47:14 +0000365
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000366 virtual ~Space() {}
kasperl@chromium.orgf5aa8372009-03-24 14:47:14 +0000367
kasper.lund7276f142008-07-30 08:49:36 +0000368 // Does the space need executable memory?
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000369 Executability executable() { return executable_; }
kasperl@chromium.orgf5aa8372009-03-24 14:47:14 +0000370
kasper.lund7276f142008-07-30 08:49:36 +0000371 // Identity used in error reporting.
372 AllocationSpace identity() { return id_; }
kasperl@chromium.orgf5aa8372009-03-24 14:47:14 +0000373
kmillikin@chromium.orgf05f2912010-09-30 10:07:24 +0000374 virtual intptr_t Size() = 0;
kasperl@chromium.orgf5aa8372009-03-24 14:47:14 +0000375
ager@chromium.org357bf652010-04-12 11:30:10 +0000376#ifdef ENABLE_HEAP_PROTECTION
377 // Protect/unprotect the space by marking it read-only/writable.
378 virtual void Protect() = 0;
379 virtual void Unprotect() = 0;
380#endif
381
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000382#ifdef DEBUG
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000383 virtual void Print() = 0;
384#endif
kasperl@chromium.orgf5aa8372009-03-24 14:47:14 +0000385
fschneider@chromium.org0c20e672010-01-14 15:28:53 +0000386 // After calling this we can allocate a certain number of bytes using only
387 // linear allocation (with a LinearAllocationScope and an AlwaysAllocateScope)
388 // without using freelists or causing a GC. This is used by partial
389 // snapshots. It returns true of space was reserved or false if a GC is
390 // needed. For paged spaces the space requested must include the space wasted
391 // at the end of each when allocating linearly.
392 virtual bool ReserveSpace(int bytes) = 0;
393
kasper.lund7276f142008-07-30 08:49:36 +0000394 private:
395 AllocationSpace id_;
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000396 Executability executable_;
kasper.lund7276f142008-07-30 08:49:36 +0000397};
398
399
400// ----------------------------------------------------------------------------
sgjesse@chromium.orgc5145742009-10-07 09:00:33 +0000401// All heap objects containing executable code (code objects) must be allocated
402// from a 2 GB range of memory, so that they can call each other using 32-bit
403// displacements. This happens automatically on 32-bit platforms, where 32-bit
404// displacements cover the entire 4GB virtual address space. On 64-bit
405// platforms, we support this using the CodeRange object, which reserves and
406// manages a range of virtual memory.
407class CodeRange : public AllStatic {
408 public:
409 // Reserves a range of virtual memory, but does not commit any of it.
410 // Can only be called once, at heap initialization time.
411 // Returns false on failure.
412 static bool Setup(const size_t requested_size);
413
414 // Frees the range of virtual memory, and frees the data structures used to
415 // manage it.
416 static void TearDown();
417
418 static bool exists() { return code_range_ != NULL; }
419 static bool contains(Address address) {
420 if (code_range_ == NULL) return false;
421 Address start = static_cast<Address>(code_range_->address());
422 return start <= address && address < start + code_range_->size();
423 }
424
425 // Allocates a chunk of memory from the large-object portion of
426 // the code range. On platforms with no separate code range, should
427 // not be called.
lrn@chromium.org303ada72010-10-27 09:33:13 +0000428 MUST_USE_RESULT static void* AllocateRawMemory(const size_t requested,
429 size_t* allocated);
sgjesse@chromium.orgc5145742009-10-07 09:00:33 +0000430 static void FreeRawMemory(void* buf, size_t length);
431
432 private:
433 // The reserved range of virtual memory that all code objects are put in.
434 static VirtualMemory* code_range_;
435 // Plain old data class, just a struct plus a constructor.
436 class FreeBlock {
437 public:
438 FreeBlock(Address start_arg, size_t size_arg)
439 : start(start_arg), size(size_arg) {}
440 FreeBlock(void* start_arg, size_t size_arg)
441 : start(static_cast<Address>(start_arg)), size(size_arg) {}
442
443 Address start;
444 size_t size;
445 };
446
447 // Freed blocks of memory are added to the free list. When the allocation
448 // list is exhausted, the free list is sorted and merged to make the new
449 // allocation list.
450 static List<FreeBlock> free_list_;
451 // Memory is allocated from the free blocks on the allocation list.
452 // The block at current_allocation_block_index_ is the current block.
453 static List<FreeBlock> allocation_list_;
454 static int current_allocation_block_index_;
455
456 // Finds a block on the allocation list that contains at least the
457 // requested amount of memory. If none is found, sorts and merges
458 // the existing free memory blocks, and searches again.
459 // If none can be found, terminates V8 with FatalProcessOutOfMemory.
460 static void GetNextAllocationBlock(size_t requested);
461 // Compares the start addresses of two free blocks.
462 static int CompareFreeBlockAddress(const FreeBlock* left,
463 const FreeBlock* right);
464};
465
466
467// ----------------------------------------------------------------------------
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000468// A space acquires chunks of memory from the operating system. The memory
469// allocator manages chunks for the paged heap spaces (old space and map
470// space). A paged chunk consists of pages. Pages in a chunk have contiguous
471// addresses and are linked as a list.
472//
473// The allocator keeps an initial chunk which is used for the new space. The
474// leftover regions of the initial chunk are used for the initial chunks of
475// old space and map space if they are big enough to hold at least one page.
476// The allocator assumes that there is one old space and one map space, each
477// expands the space by allocating kPagesPerChunk pages except the last
478// expansion (before running out of space). The first chunk may contain fewer
479// than kPagesPerChunk pages as well.
480//
481// The memory allocator also allocates chunks for the large object space, but
482// they are managed by the space itself. The new space does not expand.
fschneider@chromium.org013f3e12010-04-26 13:27:52 +0000483//
484// The fact that pages for paged spaces are allocated and deallocated in chunks
485// induces a constraint on the order of pages in a linked lists. We say that
486// pages are linked in the chunk-order if and only if every two consecutive
487// pages from the same chunk are consecutive in the linked list.
488//
489
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000490
491class MemoryAllocator : public AllStatic {
492 public:
493 // Initializes its internal bookkeeping structures.
494 // Max capacity of the total space.
kmillikin@chromium.orgf05f2912010-09-30 10:07:24 +0000495 static bool Setup(intptr_t max_capacity);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000496
497 // Deletes valid chunks.
498 static void TearDown();
499
500 // Reserves an initial address range of virtual memory to be split between
501 // the two new space semispaces, the old space, and the map space. The
502 // memory is not yet committed or assigned to spaces and split into pages.
503 // The initial chunk is unmapped when the memory allocator is torn down.
504 // This function should only be called when there is not already a reserved
505 // initial chunk (initial_chunk_ should be NULL). It returns the start
506 // address of the initial chunk if successful, with the side effect of
507 // setting the initial chunk, or else NULL if unsuccessful and leaves the
508 // initial chunk NULL.
509 static void* ReserveInitialChunk(const size_t requested);
510
511 // Commits pages from an as-yet-unmanaged block of virtual memory into a
512 // paged space. The block should be part of the initial chunk reserved via
513 // a call to ReserveInitialChunk. The number of pages is always returned in
514 // the output parameter num_pages. This function assumes that the start
515 // address is non-null and that it is big enough to hold at least one
516 // page-aligned page. The call always succeeds, and num_pages is always
517 // greater than zero.
518 static Page* CommitPages(Address start, size_t size, PagedSpace* owner,
519 int* num_pages);
520
521 // Commit a contiguous block of memory from the initial chunk. Assumes that
522 // the address is not NULL, the size is greater than zero, and that the
523 // block is contained in the initial chunk. Returns true if it succeeded
524 // and false otherwise.
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000525 static bool CommitBlock(Address start, size_t size, Executability executable);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000526
ager@chromium.orgadd848f2009-08-13 12:44:13 +0000527 // Uncommit a contiguous block of memory [start..(start+size)[.
528 // start is not NULL, the size is greater than zero, and the
529 // block is contained in the initial chunk. Returns true if it succeeded
530 // and false otherwise.
531 static bool UncommitBlock(Address start, size_t size);
532
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +0000533 // Zaps a contiguous block of memory [start..(start+size)[ thus
534 // filling it up with a recognizable non-NULL bit pattern.
535 static void ZapBlock(Address start, size_t size);
536
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000537 // Attempts to allocate the requested (non-zero) number of pages from the
538 // OS. Fewer pages might be allocated than requested. If it fails to
539 // allocate memory for the OS or cannot allocate a single page, this
540 // function returns an invalid page pointer (NULL). The caller must check
541 // whether the returned page is valid (by calling Page::is_valid()). It is
542 // guaranteed that allocated pages have contiguous addresses. The actual
sgjesse@chromium.orgc5145742009-10-07 09:00:33 +0000543 // number of allocated pages is returned in the output parameter
544 // allocated_pages. If the PagedSpace owner is executable and there is
545 // a code range, the pages are allocated from the code range.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000546 static Page* AllocatePages(int requested_pages, int* allocated_pages,
547 PagedSpace* owner);
548
fschneider@chromium.org013f3e12010-04-26 13:27:52 +0000549 // Frees pages from a given page and after. Requires pages to be
550 // linked in chunk-order (see comment for class).
551 // If 'p' is the first page of a chunk, pages from 'p' are freed
552 // and this function returns an invalid page pointer.
553 // Otherwise, the function searches a page after 'p' that is
554 // the first page of a chunk. Pages after the found page
555 // are freed and the function returns 'p'.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000556 static Page* FreePages(Page* p);
557
fschneider@chromium.org013f3e12010-04-26 13:27:52 +0000558 // Frees all pages owned by given space.
559 static void FreeAllPages(PagedSpace* space);
560
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000561 // Allocates and frees raw memory of certain size.
562 // These are just thin wrappers around OS::Allocate and OS::Free,
563 // but keep track of allocated bytes as part of heap.
sgjesse@chromium.orgc5145742009-10-07 09:00:33 +0000564 // If the flag is EXECUTABLE and a code range exists, the requested
565 // memory is allocated from the code range. If a code range exists
566 // and the freed memory is in it, the code range manages the freed memory.
lrn@chromium.org303ada72010-10-27 09:33:13 +0000567 MUST_USE_RESULT static void* AllocateRawMemory(const size_t requested,
568 size_t* allocated,
569 Executability executable);
erik.corry@gmail.com145eff52010-08-23 11:36:18 +0000570 static void FreeRawMemory(void* buf,
571 size_t length,
572 Executability executable);
kmillikin@chromium.org3cdd9e12010-09-06 11:39:48 +0000573 static void PerformAllocationCallback(ObjectSpace space,
574 AllocationAction action,
575 size_t size);
576
577 static void AddMemoryAllocationCallback(MemoryAllocationCallback callback,
578 ObjectSpace space,
579 AllocationAction action);
580 static void RemoveMemoryAllocationCallback(
581 MemoryAllocationCallback callback);
582 static bool MemoryAllocationCallbackRegistered(
583 MemoryAllocationCallback callback);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000584
585 // Returns the maximum available bytes of heaps.
kmillikin@chromium.orgf05f2912010-09-30 10:07:24 +0000586 static intptr_t Available() {
587 return capacity_ < size_ ? 0 : capacity_ - size_;
588 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000589
kasperl@chromium.orge959c182009-07-27 08:59:04 +0000590 // Returns allocated spaces in bytes.
kmillikin@chromium.orgf05f2912010-09-30 10:07:24 +0000591 static intptr_t Size() { return size_; }
kasperl@chromium.orge959c182009-07-27 08:59:04 +0000592
erik.corry@gmail.com145eff52010-08-23 11:36:18 +0000593 // Returns allocated executable spaces in bytes.
kmillikin@chromium.orgf05f2912010-09-30 10:07:24 +0000594 static intptr_t SizeExecutable() { return size_executable_; }
erik.corry@gmail.com145eff52010-08-23 11:36:18 +0000595
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000596 // Returns maximum available bytes that the old space can have.
kmillikin@chromium.orgf05f2912010-09-30 10:07:24 +0000597 static intptr_t MaxAvailable() {
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000598 return (Available() / Page::kPageSize) * Page::kObjectAreaSize;
599 }
600
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000601 // Links two pages.
602 static inline void SetNextPage(Page* prev, Page* next);
603
604 // Returns the next page of a given page.
605 static inline Page* GetNextPage(Page* p);
606
607 // Checks whether a page belongs to a space.
608 static inline bool IsPageInSpace(Page* p, PagedSpace* space);
609
610 // Returns the space that owns the given page.
611 static inline PagedSpace* PageOwner(Page* page);
612
613 // Finds the first/last page in the same chunk as a given page.
614 static Page* FindFirstPageInSameChunk(Page* p);
615 static Page* FindLastPageInSameChunk(Page* p);
616
fschneider@chromium.org013f3e12010-04-26 13:27:52 +0000617 // Relinks list of pages owned by space to make it chunk-ordered.
618 // Returns new first and last pages of space.
619 // Also returns last page in relinked list which has WasInUsedBeforeMC
620 // flag set.
621 static void RelinkPageListInChunkOrder(PagedSpace* space,
622 Page** first_page,
623 Page** last_page,
624 Page** last_page_in_use);
625
kasperl@chromium.orgf5aa8372009-03-24 14:47:14 +0000626#ifdef ENABLE_HEAP_PROTECTION
627 // Protect/unprotect a block of memory by marking it read-only/writable.
628 static inline void Protect(Address start, size_t size);
629 static inline void Unprotect(Address start, size_t size,
630 Executability executable);
631
632 // Protect/unprotect a chunk given a page in the chunk.
633 static inline void ProtectChunkFromPage(Page* page);
634 static inline void UnprotectChunkFromPage(Page* page);
635#endif
636
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000637#ifdef DEBUG
638 // Reports statistic info of the space.
639 static void ReportStatistics();
640#endif
641
642 // Due to encoding limitation, we can only have 8K chunks.
sgjesse@chromium.org846fb742009-12-18 08:56:33 +0000643 static const int kMaxNofChunks = 1 << kPageSizeBits;
ager@chromium.orga1645e22009-09-09 19:27:10 +0000644 // If a chunk has at least 16 pages, the maximum heap size is about
645 // 8K * 8K * 16 = 1G bytes.
646#ifdef V8_TARGET_ARCH_X64
647 static const int kPagesPerChunk = 32;
kasperl@chromium.orge959c182009-07-27 08:59:04 +0000648#else
ager@chromium.orga1645e22009-09-09 19:27:10 +0000649 static const int kPagesPerChunk = 16;
kasperl@chromium.orge959c182009-07-27 08:59:04 +0000650#endif
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000651 static const int kChunkSize = kPagesPerChunk * Page::kPageSize;
652
653 private:
654 // Maximum space size in bytes.
kmillikin@chromium.orgf05f2912010-09-30 10:07:24 +0000655 static intptr_t capacity_;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000656
657 // Allocated space size in bytes.
kmillikin@chromium.orgf05f2912010-09-30 10:07:24 +0000658 static intptr_t size_;
erik.corry@gmail.com145eff52010-08-23 11:36:18 +0000659 // Allocated executable space size in bytes.
kmillikin@chromium.orgf05f2912010-09-30 10:07:24 +0000660 static intptr_t size_executable_;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000661
kmillikin@chromium.org3cdd9e12010-09-06 11:39:48 +0000662 struct MemoryAllocationCallbackRegistration {
663 MemoryAllocationCallbackRegistration(MemoryAllocationCallback callback,
664 ObjectSpace space,
665 AllocationAction action)
666 : callback(callback), space(space), action(action) {
667 }
668 MemoryAllocationCallback callback;
669 ObjectSpace space;
670 AllocationAction action;
671 };
672 // A List of callback that are triggered when memory is allocated or free'd
673 static List<MemoryAllocationCallbackRegistration>
674 memory_allocation_callbacks_;
675
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000676 // The initial chunk of virtual memory.
677 static VirtualMemory* initial_chunk_;
678
679 // Allocated chunk info: chunk start address, chunk size, and owning space.
680 class ChunkInfo BASE_EMBEDDED {
681 public:
kmillikin@chromium.org3cdd9e12010-09-06 11:39:48 +0000682 ChunkInfo() : address_(NULL),
683 size_(0),
684 owner_(NULL),
685 executable_(NOT_EXECUTABLE) {}
686 inline void init(Address a, size_t s, PagedSpace* o);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000687 Address address() { return address_; }
688 size_t size() { return size_; }
689 PagedSpace* owner() { return owner_; }
kmillikin@chromium.org3cdd9e12010-09-06 11:39:48 +0000690 // We save executability of the owner to allow using it
691 // when collecting stats after the owner has been destroyed.
692 Executability executable() const { return executable_; }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000693
694 private:
695 Address address_;
696 size_t size_;
697 PagedSpace* owner_;
kmillikin@chromium.org3cdd9e12010-09-06 11:39:48 +0000698 Executability executable_;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000699 };
700
701 // Chunks_, free_chunk_ids_ and top_ act as a stack of free chunk ids.
702 static List<ChunkInfo> chunks_;
703 static List<int> free_chunk_ids_;
704 static int max_nof_chunks_;
705 static int top_;
706
707 // Push/pop a free chunk id onto/from the stack.
708 static void Push(int free_chunk_id);
709 static int Pop();
710 static bool OutOfChunkIds() { return top_ == 0; }
711
712 // Frees a chunk.
713 static void DeleteChunk(int chunk_id);
714
715 // Basic check whether a chunk id is in the valid range.
716 static inline bool IsValidChunkId(int chunk_id);
717
718 // Checks whether a chunk id identifies an allocated chunk.
719 static inline bool IsValidChunk(int chunk_id);
720
721 // Returns the chunk id that a page belongs to.
722 static inline int GetChunkId(Page* p);
723
kasperl@chromium.orgf5aa8372009-03-24 14:47:14 +0000724 // True if the address lies in the initial chunk.
725 static inline bool InInitialChunk(Address address);
726
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000727 // Initializes pages in a chunk. Returns the first page address.
728 // This function and GetChunkId() are provided for the mark-compact
729 // collector to rebuild page headers in the from space, which is
730 // used as a marking stack and its page headers are destroyed.
731 static Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk,
732 PagedSpace* owner);
fschneider@chromium.org013f3e12010-04-26 13:27:52 +0000733
734 static Page* RelinkPagesInChunk(int chunk_id,
735 Address chunk_start,
736 size_t chunk_size,
737 Page* prev,
738 Page** last_page_in_use);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000739};
740
741
742// -----------------------------------------------------------------------------
743// Interface for heap object iterator to be implemented by all object space
744// object iterators.
745//
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +0000746// NOTE: The space specific object iterators also implements the own next()
747// method which is used to avoid using virtual functions
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000748// iterating a specific space.
749
750class ObjectIterator : public Malloced {
751 public:
752 virtual ~ObjectIterator() { }
753
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000754 virtual HeapObject* next_object() = 0;
755};
756
757
758// -----------------------------------------------------------------------------
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000759// Heap object iterator in new/old/map spaces.
760//
761// A HeapObjectIterator iterates objects from a given address to the
762// top of a space. The given address must be below the current
kasperl@chromium.orgb3284ad2009-05-18 06:12:45 +0000763// allocation pointer (space top). There are some caveats.
764//
765// (1) If the space top changes upward during iteration (because of
766// allocating new objects), the iterator does not iterate objects
767// above the original space top. The caller must create a new
768// iterator starting from the old top in order to visit these new
769// objects.
770//
771// (2) If new objects are allocated below the original allocation top
772// (e.g., free-list allocation in paged spaces), the new objects
773// may or may not be iterated depending on their position with
774// respect to the current point of iteration.
775//
776// (3) The space top should not change downward during iteration,
777// otherwise the iterator will return not-necessarily-valid
778// objects.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000779
780class HeapObjectIterator: public ObjectIterator {
781 public:
782 // Creates a new object iterator in a given space. If a start
783 // address is not given, the iterator starts from the space bottom.
784 // If the size function is not given, the iterator calls the default
785 // Object::Size().
786 explicit HeapObjectIterator(PagedSpace* space);
787 HeapObjectIterator(PagedSpace* space, HeapObjectCallback size_func);
788 HeapObjectIterator(PagedSpace* space, Address start);
789 HeapObjectIterator(PagedSpace* space,
790 Address start,
791 HeapObjectCallback size_func);
ricow@chromium.orgd236f4d2010-09-01 06:52:08 +0000792 HeapObjectIterator(Page* page, HeapObjectCallback size_func);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000793
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +0000794 inline HeapObject* next() {
795 return (cur_addr_ < cur_limit_) ? FromCurrentPage() : FromNextPage();
796 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000797
798 // implementation of ObjectIterator.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000799 virtual HeapObject* next_object() { return next(); }
800
801 private:
802 Address cur_addr_; // current iteration point
803 Address end_addr_; // end iteration point
804 Address cur_limit_; // current page limit
805 HeapObjectCallback size_func_; // size function
806 Page* end_page_; // caches the page of the end address
807
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +0000808 HeapObject* FromCurrentPage() {
809 ASSERT(cur_addr_ < cur_limit_);
810
811 HeapObject* obj = HeapObject::FromAddress(cur_addr_);
812 int obj_size = (size_func_ == NULL) ? obj->Size() : size_func_(obj);
813 ASSERT_OBJECT_SIZE(obj_size);
814
815 cur_addr_ += obj_size;
816 ASSERT(cur_addr_ <= cur_limit_);
817
818 return obj;
819 }
820
821 // Slow path of next, goes into the next page.
822 HeapObject* FromNextPage();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000823
824 // Initializes fields.
825 void Initialize(Address start, Address end, HeapObjectCallback size_func);
826
827#ifdef DEBUG
828 // Verifies whether fields have valid values.
829 void Verify();
830#endif
831};
832
833
834// -----------------------------------------------------------------------------
kasperl@chromium.orgb3284ad2009-05-18 06:12:45 +0000835// A PageIterator iterates the pages in a paged space.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000836//
837// The PageIterator class provides three modes for iterating pages in a space:
kasperl@chromium.orgb3284ad2009-05-18 06:12:45 +0000838// PAGES_IN_USE iterates pages containing allocated objects.
839// PAGES_USED_BY_MC iterates pages that hold relocated objects during a
840// mark-compact collection.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000841// ALL_PAGES iterates all pages in the space.
kasperl@chromium.orgb3284ad2009-05-18 06:12:45 +0000842//
843// There are some caveats.
844//
845// (1) If the space expands during iteration, new pages will not be
846// returned by the iterator in any mode.
847//
848// (2) If new objects are allocated during iteration, they will appear
849// in pages returned by the iterator. Allocation may cause the
850// allocation pointer or MC allocation pointer in the last page to
851// change between constructing the iterator and iterating the last
852// page.
853//
854// (3) The space should not shrink during iteration, otherwise the
855// iterator will return deallocated pages.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000856
857class PageIterator BASE_EMBEDDED {
858 public:
kasperl@chromium.orgb3284ad2009-05-18 06:12:45 +0000859 enum Mode {
860 PAGES_IN_USE,
861 PAGES_USED_BY_MC,
862 ALL_PAGES
863 };
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000864
865 PageIterator(PagedSpace* space, Mode mode);
866
867 inline bool has_next();
868 inline Page* next();
869
870 private:
kasperl@chromium.orgb3284ad2009-05-18 06:12:45 +0000871 PagedSpace* space_;
872 Page* prev_page_; // Previous page returned.
873 Page* stop_page_; // Page to stop at (last page returned by the iterator).
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000874};
875
876
877// -----------------------------------------------------------------------------
878// A space has a list of pages. The next page can be accessed via
879// Page::next_page() call. The next page of the last page is an
880// invalid page pointer. A space can expand and shrink dynamically.
881
882// An abstraction of allocation and relocation pointers in a page-structured
883// space.
kasper.lund7276f142008-07-30 08:49:36 +0000884class AllocationInfo {
885 public:
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000886 Address top; // current allocation top
887 Address limit; // current allocation limit
kasper.lund7276f142008-07-30 08:49:36 +0000888
889#ifdef DEBUG
890 bool VerifyPagedAllocation() {
891 return (Page::FromAllocationTop(top) == Page::FromAllocationTop(limit))
892 && (top <= limit);
893 }
894#endif
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000895};
896
897
898// An abstraction of the accounting statistics of a page-structured space.
899// The 'capacity' of a space is the number of object-area bytes (ie, not
900// including page bookkeeping structures) currently in the space. The 'size'
901// of a space is the number of allocated bytes, the 'waste' in the space is
902// the number of bytes that are not allocated and not available to
903// allocation without reorganizing the space via a GC (eg, small blocks due
904// to internal fragmentation, top of page areas in map space), and the bytes
905// 'available' is the number of unallocated bytes that are not waste. The
906// capacity is the sum of size, waste, and available.
907//
908// The stats are only set by functions that ensure they stay balanced. These
909// functions increase or decrease one of the non-capacity stats in
910// conjunction with capacity, or else they always balance increases and
911// decreases to the non-capacity stats.
912class AllocationStats BASE_EMBEDDED {
913 public:
914 AllocationStats() { Clear(); }
915
916 // Zero out all the allocation statistics (ie, no capacity).
917 void Clear() {
918 capacity_ = 0;
919 available_ = 0;
920 size_ = 0;
921 waste_ = 0;
922 }
923
924 // Reset the allocation statistics (ie, available = capacity with no
925 // wasted or allocated bytes).
926 void Reset() {
927 available_ = capacity_;
928 size_ = 0;
929 waste_ = 0;
930 }
931
932 // Accessors for the allocation statistics.
kmillikin@chromium.orgf05f2912010-09-30 10:07:24 +0000933 intptr_t Capacity() { return capacity_; }
934 intptr_t Available() { return available_; }
935 intptr_t Size() { return size_; }
936 intptr_t Waste() { return waste_; }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000937
938 // Grow the space by adding available bytes.
939 void ExpandSpace(int size_in_bytes) {
940 capacity_ += size_in_bytes;
941 available_ += size_in_bytes;
942 }
943
944 // Shrink the space by removing available bytes.
945 void ShrinkSpace(int size_in_bytes) {
946 capacity_ -= size_in_bytes;
947 available_ -= size_in_bytes;
948 }
949
950 // Allocate from available bytes (available -> size).
kmillikin@chromium.orgf05f2912010-09-30 10:07:24 +0000951 void AllocateBytes(intptr_t size_in_bytes) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000952 available_ -= size_in_bytes;
953 size_ += size_in_bytes;
954 }
955
956 // Free allocated bytes, making them available (size -> available).
kmillikin@chromium.orgf05f2912010-09-30 10:07:24 +0000957 void DeallocateBytes(intptr_t size_in_bytes) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000958 size_ -= size_in_bytes;
959 available_ += size_in_bytes;
960 }
961
962 // Waste free bytes (available -> waste).
963 void WasteBytes(int size_in_bytes) {
964 available_ -= size_in_bytes;
965 waste_ += size_in_bytes;
966 }
967
968 // Consider the wasted bytes to be allocated, as they contain filler
969 // objects (waste -> size).
kmillikin@chromium.orgf05f2912010-09-30 10:07:24 +0000970 void FillWastedBytes(intptr_t size_in_bytes) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000971 waste_ -= size_in_bytes;
972 size_ += size_in_bytes;
973 }
974
975 private:
kmillikin@chromium.orgf05f2912010-09-30 10:07:24 +0000976 intptr_t capacity_;
977 intptr_t available_;
978 intptr_t size_;
979 intptr_t waste_;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000980};
981
982
kasper.lund7276f142008-07-30 08:49:36 +0000983class PagedSpace : public Space {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000984 public:
985 // Creates a space with a maximum capacity, and an id.
kmillikin@chromium.orgf05f2912010-09-30 10:07:24 +0000986 PagedSpace(intptr_t max_capacity,
987 AllocationSpace id,
988 Executability executable);
kasper.lund7276f142008-07-30 08:49:36 +0000989
990 virtual ~PagedSpace() {}
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000991
992 // Set up the space using the given address range of virtual memory (from
993 // the memory allocator's initial chunk) if possible. If the block of
994 // addresses is not big enough to contain a single page-aligned page, a
995 // fresh chunk will be allocated.
996 bool Setup(Address start, size_t size);
997
998 // Returns true if the space has been successfully set up and not
999 // subsequently torn down.
1000 bool HasBeenSetup();
1001
1002 // Cleans up the space, frees all pages in this space except those belonging
1003 // to the initial chunk, uncommits addresses in the initial chunk.
1004 void TearDown();
1005
1006 // Checks whether an object/address is in this space.
1007 inline bool Contains(Address a);
1008 bool Contains(HeapObject* o) { return Contains(o->address()); }
1009
kasper.lund7276f142008-07-30 08:49:36 +00001010 // Given an address occupied by a live object, return that object if it is
1011 // in this space, or Failure::Exception() if it is not. The implementation
1012 // iterates over objects in the page containing the address, the cost is
1013 // linear in the number of objects in the page. It may be slow.
lrn@chromium.org303ada72010-10-27 09:33:13 +00001014 MUST_USE_RESULT MaybeObject* FindObject(Address addr);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001015
kasper.lund7276f142008-07-30 08:49:36 +00001016 // Checks whether page is currently in use by this space.
1017 bool IsUsed(Page* page);
1018
ricow@chromium.org30ce4112010-05-31 10:38:25 +00001019 void MarkAllPagesClean();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001020
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001021 // Prepares for a mark-compact GC.
fschneider@chromium.org013f3e12010-04-26 13:27:52 +00001022 virtual void PrepareForMarkCompact(bool will_compact);
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001023
fschneider@chromium.org013f3e12010-04-26 13:27:52 +00001024 // The top of allocation in a page in this space. Undefined if page is unused.
1025 Address PageAllocationTop(Page* page) {
1026 return page == TopPageOf(allocation_info_) ? top()
1027 : PageAllocationLimit(page);
1028 }
1029
1030 // The limit of allocation for a page in this space.
1031 virtual Address PageAllocationLimit(Page* page) = 0;
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001032
ricow@chromium.org30ce4112010-05-31 10:38:25 +00001033 void FlushTopPageWatermark() {
1034 AllocationTopPage()->SetCachedAllocationWatermark(top());
1035 AllocationTopPage()->InvalidateWatermark(true);
1036 }
1037
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001038 // Current capacity without growing (Size() + Available() + Waste()).
kmillikin@chromium.orgf05f2912010-09-30 10:07:24 +00001039 intptr_t Capacity() { return accounting_stats_.Capacity(); }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001040
ager@chromium.org3811b432009-10-28 14:53:37 +00001041 // Total amount of memory committed for this space. For paged
1042 // spaces this equals the capacity.
kmillikin@chromium.orgf05f2912010-09-30 10:07:24 +00001043 intptr_t CommittedMemory() { return Capacity(); }
ager@chromium.org3811b432009-10-28 14:53:37 +00001044
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001045 // Available bytes without growing.
kmillikin@chromium.orgf05f2912010-09-30 10:07:24 +00001046 intptr_t Available() { return accounting_stats_.Available(); }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001047
1048 // Allocated bytes in this space.
kmillikin@chromium.orgf05f2912010-09-30 10:07:24 +00001049 virtual intptr_t Size() { return accounting_stats_.Size(); }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001050
1051 // Wasted bytes due to fragmentation and not recoverable until the
1052 // next GC of this space.
kmillikin@chromium.orgf05f2912010-09-30 10:07:24 +00001053 intptr_t Waste() { return accounting_stats_.Waste(); }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001054
1055 // Returns the address of the first object in this space.
1056 Address bottom() { return first_page_->ObjectAreaStart(); }
1057
1058 // Returns the allocation pointer in this space.
1059 Address top() { return allocation_info_.top; }
1060
kasper.lund7276f142008-07-30 08:49:36 +00001061 // Allocate the requested number of bytes in the space if possible, return a
1062 // failure object if not.
lrn@chromium.org303ada72010-10-27 09:33:13 +00001063 MUST_USE_RESULT inline MaybeObject* AllocateRaw(int size_in_bytes);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001064
kasper.lund7276f142008-07-30 08:49:36 +00001065 // Allocate the requested number of bytes for relocation during mark-compact
1066 // collection.
lrn@chromium.org303ada72010-10-27 09:33:13 +00001067 MUST_USE_RESULT inline MaybeObject* MCAllocateRaw(int size_in_bytes);
kasper.lund7276f142008-07-30 08:49:36 +00001068
fschneider@chromium.org0c20e672010-01-14 15:28:53 +00001069 virtual bool ReserveSpace(int bytes);
1070
1071 // Used by ReserveSpace.
1072 virtual void PutRestOfCurrentPageOnFreeList(Page* current_page) = 0;
kasper.lund7276f142008-07-30 08:49:36 +00001073
fschneider@chromium.org013f3e12010-04-26 13:27:52 +00001074 // Free all pages in range from prev (exclusive) to last (inclusive).
1075 // Freed pages are moved to the end of page list.
1076 void FreePages(Page* prev, Page* last);
1077
ricow@chromium.orgd236f4d2010-09-01 06:52:08 +00001078 // Deallocates a block.
1079 virtual void DeallocateBlock(Address start,
1080 int size_in_bytes,
1081 bool add_to_freelist) = 0;
1082
fschneider@chromium.org013f3e12010-04-26 13:27:52 +00001083 // Set space allocation info.
1084 void SetTop(Address top) {
1085 allocation_info_.top = top;
1086 allocation_info_.limit = PageAllocationLimit(Page::FromAllocationTop(top));
1087 }
1088
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001089 // ---------------------------------------------------------------------------
1090 // Mark-compact collection support functions
1091
1092 // Set the relocation point to the beginning of the space.
1093 void MCResetRelocationInfo();
1094
1095 // Writes relocation info to the top page.
1096 void MCWriteRelocationInfoToPage() {
ricow@chromium.org30ce4112010-05-31 10:38:25 +00001097 TopPageOf(mc_forwarding_info_)->
1098 SetAllocationWatermark(mc_forwarding_info_.top);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001099 }
1100
1101 // Computes the offset of a given address in this space to the beginning
1102 // of the space.
1103 int MCSpaceOffsetForAddress(Address addr);
1104
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001105 // Updates the allocation pointer to the relocation top after a mark-compact
1106 // collection.
1107 virtual void MCCommitRelocationInfo() = 0;
1108
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001109 // Releases half of unused pages.
1110 void Shrink();
1111
1112 // Ensures that the capacity is at least 'capacity'. Returns false on failure.
1113 bool EnsureCapacity(int capacity);
1114
kasperl@chromium.orgf5aa8372009-03-24 14:47:14 +00001115#ifdef ENABLE_HEAP_PROTECTION
1116 // Protect/unprotect the space by marking it read-only/writable.
1117 void Protect();
1118 void Unprotect();
1119#endif
1120
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001121#ifdef DEBUG
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001122 // Print meta info and objects in this space.
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001123 virtual void Print();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001124
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001125 // Verify integrity of this space.
1126 virtual void Verify(ObjectVisitor* visitor);
1127
1128 // Overridden by subclasses to verify space-specific object
1129 // properties (e.g., only maps or free-list nodes are in map space).
1130 virtual void VerifyObject(HeapObject* obj) {}
1131
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001132 // Report code object related statistics
1133 void CollectCodeStatistics();
1134 static void ReportCodeStatistics();
1135 static void ResetCodeStatistics();
1136#endif
1137
fschneider@chromium.org013f3e12010-04-26 13:27:52 +00001138 // Returns the page of the allocation pointer.
1139 Page* AllocationTopPage() { return TopPageOf(allocation_info_); }
1140
ricow@chromium.orgd236f4d2010-09-01 06:52:08 +00001141 void RelinkPageListInChunkOrder(bool deallocate_blocks);
1142
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001143 protected:
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001144 // Maximum capacity of this space.
kmillikin@chromium.orgf05f2912010-09-30 10:07:24 +00001145 intptr_t max_capacity_;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001146
1147 // Accounting information for this space.
1148 AllocationStats accounting_stats_;
1149
1150 // The first page in this space.
1151 Page* first_page_;
1152
kasperl@chromium.orgb3284ad2009-05-18 06:12:45 +00001153 // The last page in this space. Initially set in Setup, updated in
1154 // Expand and Shrink.
1155 Page* last_page_;
1156
fschneider@chromium.org013f3e12010-04-26 13:27:52 +00001157 // True if pages owned by this space are linked in chunk-order.
1158 // See comment for class MemoryAllocator for definition of chunk-order.
1159 bool page_list_is_chunk_ordered_;
1160
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001161 // Normal allocation information.
1162 AllocationInfo allocation_info_;
1163
1164 // Relocation information during mark-compact collections.
1165 AllocationInfo mc_forwarding_info_;
1166
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001167 // Bytes of each page that cannot be allocated. Possibly non-zero
1168 // for pages in spaces with only fixed-size objects. Always zero
1169 // for pages in spaces with variable sized objects (those pages are
1170 // padded with free-list nodes).
1171 int page_extra_;
1172
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001173 // Sets allocation pointer to a page bottom.
1174 static void SetAllocationInfo(AllocationInfo* alloc_info, Page* p);
1175
1176 // Returns the top page specified by an allocation info structure.
1177 static Page* TopPageOf(AllocationInfo alloc_info) {
1178 return Page::FromAllocationTop(alloc_info.limit);
1179 }
1180
kasperl@chromium.orgeac059f2010-01-25 11:02:06 +00001181 int CountPagesToTop() {
1182 Page* p = Page::FromAllocationTop(allocation_info_.top);
1183 PageIterator it(this, PageIterator::ALL_PAGES);
1184 int counter = 1;
1185 while (it.has_next()) {
1186 if (it.next() == p) return counter;
1187 counter++;
1188 }
1189 UNREACHABLE();
1190 return -1;
1191 }
1192
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001193 // Expands the space by allocating a fixed number of pages. Returns false if
1194 // it cannot allocate requested number of pages from OS. Newly allocated
ager@chromium.org32912102009-01-16 10:38:43 +00001195 // pages are append to the last_page;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001196 bool Expand(Page* last_page);
1197
kasper.lund7276f142008-07-30 08:49:36 +00001198 // Generic fast case allocation function that tries linear allocation in
1199 // the top page of 'alloc_info'. Returns NULL on failure.
1200 inline HeapObject* AllocateLinearly(AllocationInfo* alloc_info,
1201 int size_in_bytes);
1202
1203 // During normal allocation or deserialization, roll to the next page in
1204 // the space (there is assumed to be one) and allocate there. This
1205 // function is space-dependent.
1206 virtual HeapObject* AllocateInNextPage(Page* current_page,
1207 int size_in_bytes) = 0;
1208
1209 // Slow path of AllocateRaw. This function is space-dependent.
lrn@chromium.org303ada72010-10-27 09:33:13 +00001210 MUST_USE_RESULT virtual HeapObject* SlowAllocateRaw(int size_in_bytes) = 0;
kasper.lund7276f142008-07-30 08:49:36 +00001211
1212 // Slow path of MCAllocateRaw.
lrn@chromium.org303ada72010-10-27 09:33:13 +00001213 MUST_USE_RESULT HeapObject* SlowMCAllocateRaw(int size_in_bytes);
kasper.lund7276f142008-07-30 08:49:36 +00001214
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001215#ifdef DEBUG
fschneider@chromium.org0c20e672010-01-14 15:28:53 +00001216 // Returns the number of total pages in this space.
1217 int CountTotalPages();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001218#endif
1219 private:
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001220
1221 // Returns a pointer to the page of the relocation pointer.
1222 Page* MCRelocationTopPage() { return TopPageOf(mc_forwarding_info_); }
1223
kasperl@chromium.orgf5aa8372009-03-24 14:47:14 +00001224 friend class PageIterator;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001225};
1226
1227
1228#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
sgjesse@chromium.org0b6db592009-07-30 14:48:31 +00001229class NumberAndSizeInfo BASE_EMBEDDED {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001230 public:
sgjesse@chromium.org0b6db592009-07-30 14:48:31 +00001231 NumberAndSizeInfo() : number_(0), bytes_(0) {}
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001232
sgjesse@chromium.org0b6db592009-07-30 14:48:31 +00001233 int number() const { return number_; }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001234 void increment_number(int num) { number_ += num; }
1235
sgjesse@chromium.org0b6db592009-07-30 14:48:31 +00001236 int bytes() const { return bytes_; }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001237 void increment_bytes(int size) { bytes_ += size; }
1238
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001239 void clear() {
1240 number_ = 0;
1241 bytes_ = 0;
1242 }
1243
1244 private:
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001245 int number_;
1246 int bytes_;
1247};
sgjesse@chromium.org0b6db592009-07-30 14:48:31 +00001248
1249
1250// HistogramInfo class for recording a single "bar" of a histogram. This
1251// class is used for collecting statistics to print to stdout (when compiled
1252// with DEBUG) or to the log file (when compiled with
1253// ENABLE_LOGGING_AND_PROFILING).
1254class HistogramInfo: public NumberAndSizeInfo {
1255 public:
1256 HistogramInfo() : NumberAndSizeInfo() {}
1257
1258 const char* name() { return name_; }
1259 void set_name(const char* name) { name_ = name; }
1260
1261 private:
1262 const char* name_;
1263};
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001264#endif
1265
1266
1267// -----------------------------------------------------------------------------
1268// SemiSpace in young generation
1269//
1270// A semispace is a contiguous chunk of memory. The mark-compact collector
1271// uses the memory in the from space as a marking stack when tracing live
1272// objects.
1273
kasper.lund7276f142008-07-30 08:49:36 +00001274class SemiSpace : public Space {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001275 public:
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001276 // Constructor.
1277 SemiSpace() :Space(NEW_SPACE, NOT_EXECUTABLE) {
1278 start_ = NULL;
1279 age_mark_ = NULL;
1280 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001281
1282 // Sets up the semispace using the given chunk.
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001283 bool Setup(Address start, int initial_capacity, int maximum_capacity);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001284
1285 // Tear down the space. Heap memory was not allocated by the space, so it
1286 // is not deallocated here.
1287 void TearDown();
1288
1289 // True if the space has been set up but not torn down.
1290 bool HasBeenSetup() { return start_ != NULL; }
1291
christian.plesner.hansen@gmail.com5a6af922009-08-12 14:20:51 +00001292 // Grow the size of the semispace by committing extra virtual memory.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001293 // Assumes that the caller has checked that the semispace has not reached
ager@chromium.org32912102009-01-16 10:38:43 +00001294 // its maximum capacity (and thus there is space available in the reserved
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001295 // address range to grow).
christian.plesner.hansen@gmail.com5a6af922009-08-12 14:20:51 +00001296 bool Grow();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001297
ager@chromium.orgab99eea2009-08-25 07:05:41 +00001298 // Grow the semispace to the new capacity. The new capacity
1299 // requested must be larger than the current capacity.
1300 bool GrowTo(int new_capacity);
1301
1302 // Shrinks the semispace to the new capacity. The new capacity
1303 // requested must be more than the amount of used memory in the
1304 // semispace and less than the current capacity.
1305 bool ShrinkTo(int new_capacity);
1306
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001307 // Returns the start address of the space.
1308 Address low() { return start_; }
1309 // Returns one past the end address of the space.
1310 Address high() { return low() + capacity_; }
1311
1312 // Age mark accessors.
1313 Address age_mark() { return age_mark_; }
1314 void set_age_mark(Address mark) { age_mark_ = mark; }
1315
1316 // True if the address is in the address range of this semispace (not
1317 // necessarily below the allocation pointer).
1318 bool Contains(Address a) {
ager@chromium.org5ec48922009-05-05 07:25:34 +00001319 return (reinterpret_cast<uintptr_t>(a) & address_mask_)
1320 == reinterpret_cast<uintptr_t>(start_);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001321 }
1322
1323 // True if the object is a heap object in the address range of this
1324 // semispace (not necessarily below the allocation pointer).
1325 bool Contains(Object* o) {
ager@chromium.org5ec48922009-05-05 07:25:34 +00001326 return (reinterpret_cast<uintptr_t>(o) & object_mask_) == object_expected_;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001327 }
1328
ager@chromium.org32912102009-01-16 10:38:43 +00001329 // The offset of an address from the beginning of the space.
ager@chromium.orgc4c92722009-11-18 14:12:51 +00001330 int SpaceOffsetForAddress(Address addr) {
1331 return static_cast<int>(addr - low());
1332 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001333
fschneider@chromium.org0c20e672010-01-14 15:28:53 +00001334 // If we don't have these here then SemiSpace will be abstract. However
1335 // they should never be called.
kmillikin@chromium.orgf05f2912010-09-30 10:07:24 +00001336 virtual intptr_t Size() {
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001337 UNREACHABLE();
1338 return 0;
1339 }
1340
fschneider@chromium.org0c20e672010-01-14 15:28:53 +00001341 virtual bool ReserveSpace(int bytes) {
1342 UNREACHABLE();
1343 return false;
1344 }
1345
ager@chromium.orgadd848f2009-08-13 12:44:13 +00001346 bool is_committed() { return committed_; }
1347 bool Commit();
1348 bool Uncommit();
1349
ager@chromium.org357bf652010-04-12 11:30:10 +00001350#ifdef ENABLE_HEAP_PROTECTION
1351 // Protect/unprotect the space by marking it read-only/writable.
1352 virtual void Protect() {}
1353 virtual void Unprotect() {}
1354#endif
1355
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001356#ifdef DEBUG
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001357 virtual void Print();
1358 virtual void Verify();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001359#endif
1360
christian.plesner.hansen@gmail.com5a6af922009-08-12 14:20:51 +00001361 // Returns the current capacity of the semi space.
1362 int Capacity() { return capacity_; }
1363
sgjesse@chromium.org911335c2009-08-19 12:59:44 +00001364 // Returns the maximum capacity of the semi space.
1365 int MaximumCapacity() { return maximum_capacity_; }
1366
ager@chromium.orgab99eea2009-08-25 07:05:41 +00001367 // Returns the initial capacity of the semi space.
1368 int InitialCapacity() { return initial_capacity_; }
sgjesse@chromium.org911335c2009-08-19 12:59:44 +00001369
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001370 private:
1371 // The current and maximum capacity of the space.
1372 int capacity_;
1373 int maximum_capacity_;
ager@chromium.orgab99eea2009-08-25 07:05:41 +00001374 int initial_capacity_;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001375
1376 // The start address of the space.
1377 Address start_;
1378 // Used to govern object promotion during mark-compact collection.
1379 Address age_mark_;
1380
1381 // Masks and comparison values to test for containment in this semispace.
ager@chromium.org5ec48922009-05-05 07:25:34 +00001382 uintptr_t address_mask_;
1383 uintptr_t object_mask_;
1384 uintptr_t object_expected_;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001385
ager@chromium.orgadd848f2009-08-13 12:44:13 +00001386 bool committed_;
1387
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001388 public:
1389 TRACK_MEMORY("SemiSpace")
1390};
1391
1392
1393// A SemiSpaceIterator is an ObjectIterator that iterates over the active
1394// semispace of the heap's new space. It iterates over the objects in the
1395// semispace from a given start address (defaulting to the bottom of the
1396// semispace) to the top of the semispace. New objects allocated after the
1397// iterator is created are not iterated.
1398class SemiSpaceIterator : public ObjectIterator {
1399 public:
1400 // Create an iterator over the objects in the given space. If no start
1401 // address is given, the iterator starts from the bottom of the space. If
1402 // no size function is given, the iterator calls Object::Size().
1403 explicit SemiSpaceIterator(NewSpace* space);
1404 SemiSpaceIterator(NewSpace* space, HeapObjectCallback size_func);
1405 SemiSpaceIterator(NewSpace* space, Address start);
1406
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001407 HeapObject* next() {
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +00001408 if (current_ == limit_) return NULL;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001409
1410 HeapObject* object = HeapObject::FromAddress(current_);
1411 int size = (size_func_ == NULL) ? object->Size() : size_func_(object);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001412
1413 current_ += size;
1414 return object;
1415 }
1416
1417 // Implementation of the ObjectIterator functions.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001418 virtual HeapObject* next_object() { return next(); }
1419
1420 private:
1421 void Initialize(NewSpace* space, Address start, Address end,
1422 HeapObjectCallback size_func);
1423
1424 // The semispace.
1425 SemiSpace* space_;
1426 // The current iteration point.
1427 Address current_;
1428 // The end of iteration.
1429 Address limit_;
1430 // The callback function.
1431 HeapObjectCallback size_func_;
1432};
1433
1434
1435// -----------------------------------------------------------------------------
1436// The young generation space.
1437//
1438// The new space consists of a contiguous pair of semispaces. It simply
1439// forwards most functions to the appropriate semispace.
1440
kasper.lund7276f142008-07-30 08:49:36 +00001441class NewSpace : public Space {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001442 public:
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001443 // Constructor.
1444 NewSpace() : Space(NEW_SPACE, NOT_EXECUTABLE) {}
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001445
1446 // Sets up the new space using the given chunk.
1447 bool Setup(Address start, int size);
1448
1449 // Tears down the space. Heap memory was not allocated by the space, so it
1450 // is not deallocated here.
1451 void TearDown();
1452
1453 // True if the space has been set up but not torn down.
1454 bool HasBeenSetup() {
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001455 return to_space_.HasBeenSetup() && from_space_.HasBeenSetup();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001456 }
1457
1458 // Flip the pair of spaces.
1459 void Flip();
1460
christian.plesner.hansen@gmail.com5a6af922009-08-12 14:20:51 +00001461 // Grow the capacity of the semispaces. Assumes that they are not at
ager@chromium.orgab99eea2009-08-25 07:05:41 +00001462 // their maximum capacity.
1463 void Grow();
1464
1465 // Shrink the capacity of the semispaces.
1466 void Shrink();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001467
1468 // True if the address or object lies in the address range of either
1469 // semispace (not necessarily below the allocation pointer).
1470 bool Contains(Address a) {
ager@chromium.org5ec48922009-05-05 07:25:34 +00001471 return (reinterpret_cast<uintptr_t>(a) & address_mask_)
1472 == reinterpret_cast<uintptr_t>(start_);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001473 }
1474 bool Contains(Object* o) {
ager@chromium.org5ec48922009-05-05 07:25:34 +00001475 return (reinterpret_cast<uintptr_t>(o) & object_mask_) == object_expected_;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001476 }
1477
1478 // Return the allocated bytes in the active semispace.
kmillikin@chromium.orgf05f2912010-09-30 10:07:24 +00001479 virtual intptr_t Size() { return static_cast<int>(top() - bottom()); }
1480 // The same, but returning an int. We have to have the one that returns
1481 // intptr_t because it is inherited, but if we know we are dealing with the
1482 // new space, which can't get as big as the other spaces then this is useful:
1483 int SizeAsInt() { return static_cast<int>(Size()); }
ager@chromium.org3811b432009-10-28 14:53:37 +00001484
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001485 // Return the current capacity of a semispace.
kmillikin@chromium.orgf05f2912010-09-30 10:07:24 +00001486 intptr_t Capacity() {
sgjesse@chromium.org911335c2009-08-19 12:59:44 +00001487 ASSERT(to_space_.Capacity() == from_space_.Capacity());
1488 return to_space_.Capacity();
1489 }
ager@chromium.org3811b432009-10-28 14:53:37 +00001490
1491 // Return the total amount of memory committed for new space.
kmillikin@chromium.orgf05f2912010-09-30 10:07:24 +00001492 intptr_t CommittedMemory() {
ager@chromium.org3811b432009-10-28 14:53:37 +00001493 if (from_space_.is_committed()) return 2 * Capacity();
1494 return Capacity();
1495 }
1496
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001497 // Return the available bytes without growing in the active semispace.
kmillikin@chromium.orgf05f2912010-09-30 10:07:24 +00001498 intptr_t Available() { return Capacity() - Size(); }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001499
1500 // Return the maximum capacity of a semispace.
sgjesse@chromium.org911335c2009-08-19 12:59:44 +00001501 int MaximumCapacity() {
1502 ASSERT(to_space_.MaximumCapacity() == from_space_.MaximumCapacity());
1503 return to_space_.MaximumCapacity();
1504 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001505
ager@chromium.orgab99eea2009-08-25 07:05:41 +00001506 // Returns the initial capacity of a semispace.
1507 int InitialCapacity() {
1508 ASSERT(to_space_.InitialCapacity() == from_space_.InitialCapacity());
1509 return to_space_.InitialCapacity();
1510 }
1511
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001512 // Return the address of the allocation pointer in the active semispace.
1513 Address top() { return allocation_info_.top; }
1514 // Return the address of the first object in the active semispace.
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001515 Address bottom() { return to_space_.low(); }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001516
1517 // Get the age mark of the inactive semispace.
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001518 Address age_mark() { return from_space_.age_mark(); }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001519 // Set the age mark in the active semispace.
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001520 void set_age_mark(Address mark) { to_space_.set_age_mark(mark); }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001521
1522 // The start address of the space and a bit mask. Anding an address in the
1523 // new space with the mask will result in the start address.
1524 Address start() { return start_; }
sgjesse@chromium.orgb9d7da12009-08-05 08:38:10 +00001525 uintptr_t mask() { return address_mask_; }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001526
1527 // The allocation top and limit addresses.
1528 Address* allocation_top_address() { return &allocation_info_.top; }
1529 Address* allocation_limit_address() { return &allocation_info_.limit; }
1530
lrn@chromium.org303ada72010-10-27 09:33:13 +00001531 MUST_USE_RESULT MaybeObject* AllocateRaw(int size_in_bytes) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001532 return AllocateRawInternal(size_in_bytes, &allocation_info_);
1533 }
1534
1535 // Allocate the requested number of bytes for relocation during mark-compact
1536 // collection.
lrn@chromium.org303ada72010-10-27 09:33:13 +00001537 MUST_USE_RESULT MaybeObject* MCAllocateRaw(int size_in_bytes) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001538 return AllocateRawInternal(size_in_bytes, &mc_forwarding_info_);
1539 }
1540
1541 // Reset the allocation pointer to the beginning of the active semispace.
1542 void ResetAllocationInfo();
1543 // Reset the reloction pointer to the bottom of the inactive semispace in
1544 // preparation for mark-compact collection.
1545 void MCResetRelocationInfo();
1546 // Update the allocation pointer in the active semispace after a
1547 // mark-compact collection.
1548 void MCCommitRelocationInfo();
1549
1550 // Get the extent of the inactive semispace (for use as a marking stack).
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001551 Address FromSpaceLow() { return from_space_.low(); }
1552 Address FromSpaceHigh() { return from_space_.high(); }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001553
1554 // Get the extent of the active semispace (to sweep newly copied objects
1555 // during a scavenge collection).
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001556 Address ToSpaceLow() { return to_space_.low(); }
1557 Address ToSpaceHigh() { return to_space_.high(); }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001558
1559 // Offsets from the beginning of the semispaces.
1560 int ToSpaceOffsetForAddress(Address a) {
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001561 return to_space_.SpaceOffsetForAddress(a);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001562 }
1563 int FromSpaceOffsetForAddress(Address a) {
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001564 return from_space_.SpaceOffsetForAddress(a);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001565 }
1566
1567 // True if the object is a heap object in the address range of the
1568 // respective semispace (not necessarily below the allocation pointer of the
1569 // semispace).
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001570 bool ToSpaceContains(Object* o) { return to_space_.Contains(o); }
1571 bool FromSpaceContains(Object* o) { return from_space_.Contains(o); }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001572
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001573 bool ToSpaceContains(Address a) { return to_space_.Contains(a); }
1574 bool FromSpaceContains(Address a) { return from_space_.Contains(a); }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001575
fschneider@chromium.org0c20e672010-01-14 15:28:53 +00001576 virtual bool ReserveSpace(int bytes);
1577
kasperl@chromium.orgf5aa8372009-03-24 14:47:14 +00001578#ifdef ENABLE_HEAP_PROTECTION
1579 // Protect/unprotect the space by marking it read-only/writable.
1580 virtual void Protect();
1581 virtual void Unprotect();
1582#endif
1583
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001584#ifdef DEBUG
1585 // Verify the active semispace.
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001586 virtual void Verify();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001587 // Print the active semispace.
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001588 virtual void Print() { to_space_.Print(); }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001589#endif
1590
1591#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1592 // Iterates the active semispace to collect statistics.
1593 void CollectStatistics();
1594 // Reports previously collected statistics of the active semispace.
1595 void ReportStatistics();
1596 // Clears previously collected statistics.
1597 void ClearHistograms();
1598
1599 // Record the allocation or promotion of a heap object. Note that we don't
1600 // record every single allocation, but only those that happen in the
1601 // to space during a scavenge GC.
1602 void RecordAllocation(HeapObject* obj);
1603 void RecordPromotion(HeapObject* obj);
1604#endif
1605
ager@chromium.orgadd848f2009-08-13 12:44:13 +00001606 // Return whether the operation succeded.
1607 bool CommitFromSpaceIfNeeded() {
1608 if (from_space_.is_committed()) return true;
1609 return from_space_.Commit();
1610 }
1611
1612 bool UncommitFromSpace() {
1613 if (!from_space_.is_committed()) return true;
1614 return from_space_.Uncommit();
1615 }
1616
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001617 private:
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001618 // The semispaces.
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001619 SemiSpace to_space_;
1620 SemiSpace from_space_;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001621
1622 // Start address and bit mask for containment testing.
1623 Address start_;
ager@chromium.org9085a012009-05-11 19:22:57 +00001624 uintptr_t address_mask_;
1625 uintptr_t object_mask_;
1626 uintptr_t object_expected_;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001627
1628 // Allocation pointer and limit for normal allocation and allocation during
1629 // mark-compact collection.
1630 AllocationInfo allocation_info_;
1631 AllocationInfo mc_forwarding_info_;
1632
1633#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1634 HistogramInfo* allocated_histogram_;
1635 HistogramInfo* promoted_histogram_;
1636#endif
1637
1638 // Implementation of AllocateRaw and MCAllocateRaw.
lrn@chromium.org303ada72010-10-27 09:33:13 +00001639 MUST_USE_RESULT inline MaybeObject* AllocateRawInternal(
1640 int size_in_bytes,
1641 AllocationInfo* alloc_info);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001642
1643 friend class SemiSpaceIterator;
1644
1645 public:
1646 TRACK_MEMORY("NewSpace")
1647};
1648
1649
1650// -----------------------------------------------------------------------------
1651// Free lists for old object spaces
1652//
1653// Free-list nodes are free blocks in the heap. They look like heap objects
1654// (free-list node pointers have the heap object tag, and they have a map like
1655// a heap object). They have a size and a next pointer. The next pointer is
1656// the raw address of the next free list node (or NULL).
1657class FreeListNode: public HeapObject {
1658 public:
1659 // Obtain a free-list node from a raw address. This is not a cast because
1660 // it does not check nor require that the first word at the address is a map
1661 // pointer.
1662 static FreeListNode* FromAddress(Address address) {
1663 return reinterpret_cast<FreeListNode*>(HeapObject::FromAddress(address));
1664 }
1665
ager@chromium.org3811b432009-10-28 14:53:37 +00001666 static inline bool IsFreeListNode(HeapObject* object);
1667
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001668 // Set the size in bytes, which can be read with HeapObject::Size(). This
1669 // function also writes a map to the first word of the block so that it
1670 // looks like a heap object to the garbage collector and heap iteration
1671 // functions.
1672 void set_size(int size_in_bytes);
1673
1674 // Accessors for the next field.
1675 inline Address next();
1676 inline void set_next(Address next);
1677
1678 private:
kasperl@chromium.org2abc4502009-07-02 07:00:29 +00001679 static const int kNextOffset = POINTER_SIZE_ALIGN(ByteArray::kHeaderSize);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001680
1681 DISALLOW_IMPLICIT_CONSTRUCTORS(FreeListNode);
1682};
1683
1684
1685// The free list for the old space.
1686class OldSpaceFreeList BASE_EMBEDDED {
1687 public:
1688 explicit OldSpaceFreeList(AllocationSpace owner);
1689
1690 // Clear the free list.
1691 void Reset();
1692
1693 // Return the number of bytes available on the free list.
kmillikin@chromium.orgf05f2912010-09-30 10:07:24 +00001694 intptr_t available() { return available_; }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001695
1696 // Place a node on the free list. The block of size 'size_in_bytes'
1697 // starting at 'start' is placed on the free list. The return value is the
1698 // number of bytes that have been lost due to internal fragmentation by
1699 // freeing the block. Bookkeeping information will be written to the block,
1700 // ie, its contents will be destroyed. The start address should be word
1701 // aligned, and the size should be a non-zero multiple of the word size.
1702 int Free(Address start, int size_in_bytes);
1703
1704 // Allocate a block of size 'size_in_bytes' from the free list. The block
1705 // is unitialized. A failure is returned if no block is available. The
1706 // number of bytes lost to fragmentation is returned in the output parameter
1707 // 'wasted_bytes'. The size should be a non-zero multiple of the word size.
lrn@chromium.org303ada72010-10-27 09:33:13 +00001708 MUST_USE_RESULT MaybeObject* Allocate(int size_in_bytes, int* wasted_bytes);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001709
1710 private:
1711 // The size range of blocks, in bytes. (Smaller allocations are allowed, but
1712 // will always result in waste.)
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001713 static const int kMinBlockSize = 2 * kPointerSize;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001714 static const int kMaxBlockSize = Page::kMaxHeapObjectSize;
1715
1716 // The identity of the owning space, for building allocation Failure
1717 // objects.
1718 AllocationSpace owner_;
1719
1720 // Total available bytes in all blocks on this free list.
1721 int available_;
1722
1723 // Blocks are put on exact free lists in an array, indexed by size in words.
1724 // The available sizes are kept in an increasingly ordered list. Entries
1725 // corresponding to sizes < kMinBlockSize always have an empty free list
1726 // (but index kHead is used for the head of the size list).
1727 struct SizeNode {
1728 // Address of the head FreeListNode of the implied block size or NULL.
1729 Address head_node_;
1730 // Size (words) of the next larger available size if head_node_ != NULL.
1731 int next_size_;
1732 };
1733 static const int kFreeListsLength = kMaxBlockSize / kPointerSize + 1;
1734 SizeNode free_[kFreeListsLength];
1735
1736 // Sentinel elements for the size list. Real elements are in ]kHead..kEnd[.
1737 static const int kHead = kMinBlockSize / kPointerSize - 1;
1738 static const int kEnd = kMaxInt;
1739
1740 // We keep a "finger" in the size list to speed up a common pattern:
1741 // repeated requests for the same or increasing sizes.
1742 int finger_;
1743
1744 // Starting from *prev, find and return the smallest size >= index (words),
1745 // or kEnd. Update *prev to be the largest size < index, or kHead.
1746 int FindSize(int index, int* prev) {
1747 int cur = free_[*prev].next_size_;
1748 while (cur < index) {
1749 *prev = cur;
1750 cur = free_[cur].next_size_;
1751 }
1752 return cur;
1753 }
1754
1755 // Remove an existing element from the size list.
1756 void RemoveSize(int index) {
1757 int prev = kHead;
1758 int cur = FindSize(index, &prev);
1759 ASSERT(cur == index);
1760 free_[prev].next_size_ = free_[cur].next_size_;
1761 finger_ = prev;
1762 }
1763
1764 // Insert a new element into the size list.
1765 void InsertSize(int index) {
1766 int prev = kHead;
1767 int cur = FindSize(index, &prev);
1768 ASSERT(cur != index);
1769 free_[prev].next_size_ = index;
1770 free_[index].next_size_ = cur;
1771 }
1772
1773 // The size list is not updated during a sequence of calls to Free, but is
1774 // rebuilt before the next allocation.
1775 void RebuildSizeList();
1776 bool needs_rebuild_;
1777
kasper.lund7276f142008-07-30 08:49:36 +00001778#ifdef DEBUG
1779 // Does this free list contain a free block located at the address of 'node'?
1780 bool Contains(FreeListNode* node);
1781#endif
1782
mads.s.ager@gmail.com9a4089a2008-09-01 08:55:01 +00001783 DISALLOW_COPY_AND_ASSIGN(OldSpaceFreeList);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001784};
1785
1786
1787// The free list for the map space.
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001788class FixedSizeFreeList BASE_EMBEDDED {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001789 public:
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001790 FixedSizeFreeList(AllocationSpace owner, int object_size);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001791
1792 // Clear the free list.
1793 void Reset();
1794
1795 // Return the number of bytes available on the free list.
kmillikin@chromium.orgf05f2912010-09-30 10:07:24 +00001796 intptr_t available() { return available_; }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001797
1798 // Place a node on the free list. The block starting at 'start' (assumed to
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001799 // have size object_size_) is placed on the free list. Bookkeeping
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001800 // information will be written to the block, ie, its contents will be
1801 // destroyed. The start address should be word aligned.
1802 void Free(Address start);
1803
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001804 // Allocate a fixed sized block from the free list. The block is unitialized.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001805 // A failure is returned if no block is available.
lrn@chromium.org303ada72010-10-27 09:33:13 +00001806 MUST_USE_RESULT MaybeObject* Allocate();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001807
1808 private:
1809 // Available bytes on the free list.
kmillikin@chromium.orgf05f2912010-09-30 10:07:24 +00001810 intptr_t available_;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001811
1812 // The head of the free list.
1813 Address head_;
1814
ricow@chromium.org30ce4112010-05-31 10:38:25 +00001815 // The tail of the free list.
1816 Address tail_;
1817
kasper.lund7276f142008-07-30 08:49:36 +00001818 // The identity of the owning space, for building allocation Failure
1819 // objects.
1820 AllocationSpace owner_;
1821
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001822 // The size of the objects in this space.
1823 int object_size_;
1824
1825 DISALLOW_COPY_AND_ASSIGN(FixedSizeFreeList);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001826};
1827
1828
1829// -----------------------------------------------------------------------------
1830// Old object space (excluding map objects)
1831
1832class OldSpace : public PagedSpace {
1833 public:
1834 // Creates an old space object with a given maximum capacity.
1835 // The constructor does not allocate pages from OS.
kmillikin@chromium.orgf05f2912010-09-30 10:07:24 +00001836 explicit OldSpace(intptr_t max_capacity,
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001837 AllocationSpace id,
1838 Executability executable)
kasper.lund7276f142008-07-30 08:49:36 +00001839 : PagedSpace(max_capacity, id, executable), free_list_(id) {
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001840 page_extra_ = 0;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001841 }
1842
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001843 // The bytes available on the free list (ie, not above the linear allocation
1844 // pointer).
kmillikin@chromium.orgf05f2912010-09-30 10:07:24 +00001845 intptr_t AvailableFree() { return free_list_.available(); }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001846
fschneider@chromium.org013f3e12010-04-26 13:27:52 +00001847 // The limit of allocation for a page in this space.
1848 virtual Address PageAllocationLimit(Page* page) {
1849 return page->ObjectAreaEnd();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001850 }
1851
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001852 // Give a block of memory to the space's free list. It might be added to
1853 // the free list or accounted as waste.
fschneider@chromium.org013f3e12010-04-26 13:27:52 +00001854 // If add_to_freelist is false then just accounting stats are updated and
1855 // no attempt to add area to free list is made.
1856 void Free(Address start, int size_in_bytes, bool add_to_freelist) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001857 accounting_stats_.DeallocateBytes(size_in_bytes);
fschneider@chromium.org013f3e12010-04-26 13:27:52 +00001858
1859 if (add_to_freelist) {
1860 int wasted_bytes = free_list_.Free(start, size_in_bytes);
1861 accounting_stats_.WasteBytes(wasted_bytes);
1862 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001863 }
1864
ricow@chromium.orgd236f4d2010-09-01 06:52:08 +00001865 virtual void DeallocateBlock(Address start,
1866 int size_in_bytes,
1867 bool add_to_freelist);
1868
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001869 // Prepare for full garbage collection. Resets the relocation pointer and
1870 // clears the free list.
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001871 virtual void PrepareForMarkCompact(bool will_compact);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001872
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001873 // Updates the allocation pointer to the relocation top after a mark-compact
1874 // collection.
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001875 virtual void MCCommitRelocationInfo();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001876
fschneider@chromium.org0c20e672010-01-14 15:28:53 +00001877 virtual void PutRestOfCurrentPageOnFreeList(Page* current_page);
1878
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001879#ifdef DEBUG
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001880 // Reports statistics for the space
1881 void ReportStatistics();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001882#endif
1883
kasper.lund7276f142008-07-30 08:49:36 +00001884 protected:
1885 // Virtual function in the superclass. Slow path of AllocateRaw.
lrn@chromium.org303ada72010-10-27 09:33:13 +00001886 MUST_USE_RESULT HeapObject* SlowAllocateRaw(int size_in_bytes);
kasper.lund7276f142008-07-30 08:49:36 +00001887
1888 // Virtual function in the superclass. Allocate linearly at the start of
1889 // the page after current_page (there is assumed to be one).
1890 HeapObject* AllocateInNextPage(Page* current_page, int size_in_bytes);
1891
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001892 private:
1893 // The space's free list.
1894 OldSpaceFreeList free_list_;
1895
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001896 public:
1897 TRACK_MEMORY("OldSpace")
1898};
1899
1900
1901// -----------------------------------------------------------------------------
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001902// Old space for objects of a fixed size
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001903
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001904class FixedSpace : public PagedSpace {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001905 public:
kmillikin@chromium.orgf05f2912010-09-30 10:07:24 +00001906 FixedSpace(intptr_t max_capacity,
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001907 AllocationSpace id,
1908 int object_size_in_bytes,
1909 const char* name)
1910 : PagedSpace(max_capacity, id, NOT_EXECUTABLE),
1911 object_size_in_bytes_(object_size_in_bytes),
1912 name_(name),
1913 free_list_(id, object_size_in_bytes) {
1914 page_extra_ = Page::kObjectAreaSize % object_size_in_bytes;
1915 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001916
fschneider@chromium.org013f3e12010-04-26 13:27:52 +00001917 // The limit of allocation for a page in this space.
1918 virtual Address PageAllocationLimit(Page* page) {
1919 return page->ObjectAreaEnd() - page_extra_;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001920 }
1921
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001922 int object_size_in_bytes() { return object_size_in_bytes_; }
1923
1924 // Give a fixed sized block of memory to the space's free list.
fschneider@chromium.org013f3e12010-04-26 13:27:52 +00001925 // If add_to_freelist is false then just accounting stats are updated and
1926 // no attempt to add area to free list is made.
1927 void Free(Address start, bool add_to_freelist) {
1928 if (add_to_freelist) {
1929 free_list_.Free(start);
1930 }
sgjesse@chromium.org911335c2009-08-19 12:59:44 +00001931 accounting_stats_.DeallocateBytes(object_size_in_bytes_);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001932 }
1933
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001934 // Prepares for a mark-compact GC.
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001935 virtual void PrepareForMarkCompact(bool will_compact);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001936
1937 // Updates the allocation pointer to the relocation top after a mark-compact
1938 // collection.
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001939 virtual void MCCommitRelocationInfo();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001940
fschneider@chromium.org0c20e672010-01-14 15:28:53 +00001941 virtual void PutRestOfCurrentPageOnFreeList(Page* current_page);
1942
ricow@chromium.orgd236f4d2010-09-01 06:52:08 +00001943 virtual void DeallocateBlock(Address start,
1944 int size_in_bytes,
1945 bool add_to_freelist);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001946#ifdef DEBUG
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001947 // Reports statistic info of the space
1948 void ReportStatistics();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001949#endif
1950
kasper.lund7276f142008-07-30 08:49:36 +00001951 protected:
1952 // Virtual function in the superclass. Slow path of AllocateRaw.
lrn@chromium.org303ada72010-10-27 09:33:13 +00001953 MUST_USE_RESULT HeapObject* SlowAllocateRaw(int size_in_bytes);
kasper.lund7276f142008-07-30 08:49:36 +00001954
1955 // Virtual function in the superclass. Allocate linearly at the start of
1956 // the page after current_page (there is assumed to be one).
1957 HeapObject* AllocateInNextPage(Page* current_page, int size_in_bytes);
1958
fschneider@chromium.org0c20e672010-01-14 15:28:53 +00001959 void ResetFreeList() {
1960 free_list_.Reset();
1961 }
1962
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001963 private:
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001964 // The size of objects in this space.
1965 int object_size_in_bytes_;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001966
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001967 // The name of this space.
1968 const char* name_;
1969
1970 // The space's free list.
1971 FixedSizeFreeList free_list_;
1972};
1973
1974
1975// -----------------------------------------------------------------------------
1976// Old space for all map objects
1977
1978class MapSpace : public FixedSpace {
1979 public:
1980 // Creates a map space object with a maximum capacity.
kmillikin@chromium.orgf05f2912010-09-30 10:07:24 +00001981 MapSpace(intptr_t max_capacity, int max_map_space_pages, AllocationSpace id)
kasperl@chromium.orgeac059f2010-01-25 11:02:06 +00001982 : FixedSpace(max_capacity, id, Map::kSize, "map"),
1983 max_map_space_pages_(max_map_space_pages) {
1984 ASSERT(max_map_space_pages < kMaxMapPageIndex);
1985 }
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001986
1987 // Prepares for a mark-compact GC.
1988 virtual void PrepareForMarkCompact(bool will_compact);
1989
1990 // Given an index, returns the page address.
1991 Address PageAddress(int page_index) { return page_addresses_[page_index]; }
1992
kasperl@chromium.orgeac059f2010-01-25 11:02:06 +00001993 static const int kMaxMapPageIndex = 1 << MapWord::kMapPageIndexBits;
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001994
fschneider@chromium.org0c20e672010-01-14 15:28:53 +00001995 // Are map pointers encodable into map word?
1996 bool MapPointersEncodable() {
1997 if (!FLAG_use_big_map_space) {
kasperl@chromium.orgeac059f2010-01-25 11:02:06 +00001998 ASSERT(CountPagesToTop() <= kMaxMapPageIndex);
fschneider@chromium.org0c20e672010-01-14 15:28:53 +00001999 return true;
2000 }
kasperl@chromium.orgeac059f2010-01-25 11:02:06 +00002001 return CountPagesToTop() <= max_map_space_pages_;
fschneider@chromium.org0c20e672010-01-14 15:28:53 +00002002 }
2003
2004 // Should be called after forced sweep to find out if map space needs
2005 // compaction.
2006 bool NeedsCompaction(int live_maps) {
kasperl@chromium.orgeac059f2010-01-25 11:02:06 +00002007 return !MapPointersEncodable() && live_maps <= CompactionThreshold();
fschneider@chromium.org0c20e672010-01-14 15:28:53 +00002008 }
2009
2010 Address TopAfterCompaction(int live_maps) {
2011 ASSERT(NeedsCompaction(live_maps));
2012
2013 int pages_left = live_maps / kMapsPerPage;
2014 PageIterator it(this, PageIterator::ALL_PAGES);
2015 while (pages_left-- > 0) {
2016 ASSERT(it.has_next());
ricow@chromium.org30ce4112010-05-31 10:38:25 +00002017 it.next()->SetRegionMarks(Page::kAllRegionsCleanMarks);
fschneider@chromium.org0c20e672010-01-14 15:28:53 +00002018 }
2019 ASSERT(it.has_next());
2020 Page* top_page = it.next();
ricow@chromium.org30ce4112010-05-31 10:38:25 +00002021 top_page->SetRegionMarks(Page::kAllRegionsCleanMarks);
fschneider@chromium.org0c20e672010-01-14 15:28:53 +00002022 ASSERT(top_page->is_valid());
2023
2024 int offset = live_maps % kMapsPerPage * Map::kSize;
2025 Address top = top_page->ObjectAreaStart() + offset;
2026 ASSERT(top < top_page->ObjectAreaEnd());
2027 ASSERT(Contains(top));
2028
2029 return top;
2030 }
2031
2032 void FinishCompaction(Address new_top, int live_maps) {
2033 Page* top_page = Page::FromAddress(new_top);
2034 ASSERT(top_page->is_valid());
2035
2036 SetAllocationInfo(&allocation_info_, top_page);
2037 allocation_info_.top = new_top;
2038
2039 int new_size = live_maps * Map::kSize;
2040 accounting_stats_.DeallocateBytes(accounting_stats_.Size());
2041 accounting_stats_.AllocateBytes(new_size);
2042
2043#ifdef DEBUG
2044 if (FLAG_enable_slow_asserts) {
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +00002045 intptr_t actual_size = 0;
fschneider@chromium.org0c20e672010-01-14 15:28:53 +00002046 for (Page* p = first_page_; p != top_page; p = p->next_page())
2047 actual_size += kMapsPerPage * Map::kSize;
2048 actual_size += (new_top - top_page->ObjectAreaStart());
2049 ASSERT(accounting_stats_.Size() == actual_size);
2050 }
2051#endif
2052
2053 Shrink();
2054 ResetFreeList();
2055 }
2056
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00002057 protected:
2058#ifdef DEBUG
2059 virtual void VerifyObject(HeapObject* obj);
2060#endif
2061
2062 private:
fschneider@chromium.org0c20e672010-01-14 15:28:53 +00002063 static const int kMapsPerPage = Page::kObjectAreaSize / Map::kSize;
2064
2065 // Do map space compaction if there is a page gap.
kasperl@chromium.orgeac059f2010-01-25 11:02:06 +00002066 int CompactionThreshold() {
2067 return kMapsPerPage * (max_map_space_pages_ - 1);
2068 }
2069
2070 const int max_map_space_pages_;
fschneider@chromium.org0c20e672010-01-14 15:28:53 +00002071
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002072 // An array of page start address in a map space.
kasperl@chromium.orgeac059f2010-01-25 11:02:06 +00002073 Address page_addresses_[kMaxMapPageIndex];
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002074
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002075 public:
2076 TRACK_MEMORY("MapSpace")
2077};
2078
2079
2080// -----------------------------------------------------------------------------
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00002081// Old space for all global object property cell objects
2082
2083class CellSpace : public FixedSpace {
2084 public:
2085 // Creates a property cell space object with a maximum capacity.
kmillikin@chromium.orgf05f2912010-09-30 10:07:24 +00002086 CellSpace(intptr_t max_capacity, AllocationSpace id)
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00002087 : FixedSpace(max_capacity, id, JSGlobalPropertyCell::kSize, "cell") {}
2088
2089 protected:
2090#ifdef DEBUG
2091 virtual void VerifyObject(HeapObject* obj);
2092#endif
2093
2094 public:
ager@chromium.org4af710e2009-09-15 12:20:11 +00002095 TRACK_MEMORY("CellSpace")
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00002096};
2097
2098
2099// -----------------------------------------------------------------------------
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002100// Large objects ( > Page::kMaxHeapObjectSize ) are allocated and managed by
2101// the large object space. A large object is allocated from OS heap with
2102// extra padding bytes (Page::kPageSize + Page::kObjectStartOffset).
2103// A large object always starts at Page::kObjectStartOffset to a page.
2104// Large objects do not move during garbage collections.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002105
2106// A LargeObjectChunk holds exactly one large object page with exactly one
2107// large object.
2108class LargeObjectChunk {
2109 public:
2110 // Allocates a new LargeObjectChunk that contains a large object page
2111 // (Page::kPageSize aligned) that has at least size_in_bytes (for a large
ricow@chromium.org30ce4112010-05-31 10:38:25 +00002112 // object) bytes after the object area start of that page.
2113 // The allocated chunk size is set in the output parameter chunk_size.
kasper.lund7276f142008-07-30 08:49:36 +00002114 static LargeObjectChunk* New(int size_in_bytes,
2115 size_t* chunk_size,
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002116 Executability executable);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002117
2118 // Interpret a raw address as a large object chunk.
2119 static LargeObjectChunk* FromAddress(Address address) {
2120 return reinterpret_cast<LargeObjectChunk*>(address);
2121 }
2122
2123 // Returns the address of this chunk.
2124 Address address() { return reinterpret_cast<Address>(this); }
2125
2126 // Accessors for the fields of the chunk.
2127 LargeObjectChunk* next() { return next_; }
2128 void set_next(LargeObjectChunk* chunk) { next_ = chunk; }
2129
erik.corry@gmail.com145eff52010-08-23 11:36:18 +00002130 size_t size() { return size_ & ~Page::kPageFlagMask; }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002131 void set_size(size_t size_in_bytes) { size_ = size_in_bytes; }
2132
2133 // Returns the object in this chunk.
2134 inline HeapObject* GetObject();
2135
ricow@chromium.org30ce4112010-05-31 10:38:25 +00002136 // Given a requested size returns the physical size of a chunk to be
2137 // allocated.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002138 static int ChunkSizeFor(int size_in_bytes);
2139
ricow@chromium.org30ce4112010-05-31 10:38:25 +00002140 // Given a chunk size, returns the object size it can accommodate. Used by
2141 // LargeObjectSpace::Available.
kmillikin@chromium.orgf05f2912010-09-30 10:07:24 +00002142 static intptr_t ObjectSizeFor(intptr_t chunk_size) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002143 if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return 0;
2144 return chunk_size - Page::kPageSize - Page::kObjectStartOffset;
2145 }
2146
2147 private:
2148 // A pointer to the next large object chunk in the space or NULL.
2149 LargeObjectChunk* next_;
2150
2151 // The size of this chunk.
2152 size_t size_;
2153
2154 public:
2155 TRACK_MEMORY("LargeObjectChunk")
2156};
2157
2158
kasper.lund7276f142008-07-30 08:49:36 +00002159class LargeObjectSpace : public Space {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002160 public:
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002161 explicit LargeObjectSpace(AllocationSpace id);
2162 virtual ~LargeObjectSpace() {}
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002163
2164 // Initializes internal data structures.
2165 bool Setup();
2166
2167 // Releases internal resources, frees objects in this space.
2168 void TearDown();
2169
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002170 // Allocates a (non-FixedArray, non-Code) large object.
lrn@chromium.org303ada72010-10-27 09:33:13 +00002171 MUST_USE_RESULT MaybeObject* AllocateRaw(int size_in_bytes);
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002172 // Allocates a large Code object.
lrn@chromium.org303ada72010-10-27 09:33:13 +00002173 MUST_USE_RESULT MaybeObject* AllocateRawCode(int size_in_bytes);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002174 // Allocates a large FixedArray.
lrn@chromium.org303ada72010-10-27 09:33:13 +00002175 MUST_USE_RESULT MaybeObject* AllocateRawFixedArray(int size_in_bytes);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002176
ricow@chromium.org30ce4112010-05-31 10:38:25 +00002177 // Available bytes for objects in this space.
kmillikin@chromium.orgf05f2912010-09-30 10:07:24 +00002178 intptr_t Available() {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002179 return LargeObjectChunk::ObjectSizeFor(MemoryAllocator::Available());
2180 }
2181
kmillikin@chromium.orgf05f2912010-09-30 10:07:24 +00002182 virtual intptr_t Size() {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002183 return size_;
2184 }
2185
2186 int PageCount() {
2187 return page_count_;
2188 }
2189
2190 // Finds an object for a given address, returns Failure::Exception()
2191 // if it is not found. The function iterates through all objects in this
2192 // space, may be slow.
lrn@chromium.org303ada72010-10-27 09:33:13 +00002193 MaybeObject* FindObject(Address a);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002194
ricow@chromium.orgd236f4d2010-09-01 06:52:08 +00002195 // Finds a large object page containing the given pc, returns NULL
2196 // if such a page doesn't exist.
2197 LargeObjectChunk* FindChunkContainingPc(Address pc);
2198
ricow@chromium.org30ce4112010-05-31 10:38:25 +00002199 // Iterates objects covered by dirty regions.
2200 void IterateDirtyRegions(ObjectSlotCallback func);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002201
2202 // Frees unmarked objects.
2203 void FreeUnmarkedObjects();
2204
2205 // Checks whether a heap object is in this space; O(1).
2206 bool Contains(HeapObject* obj);
2207
2208 // Checks whether the space is empty.
2209 bool IsEmpty() { return first_chunk_ == NULL; }
2210
fschneider@chromium.org0c20e672010-01-14 15:28:53 +00002211 // See the comments for ReserveSpace in the Space class. This has to be
2212 // called after ReserveSpace has been called on the paged spaces, since they
2213 // may use some memory, leaving less for large objects.
2214 virtual bool ReserveSpace(int bytes);
2215
kasperl@chromium.orgf5aa8372009-03-24 14:47:14 +00002216#ifdef ENABLE_HEAP_PROTECTION
2217 // Protect/unprotect the space by marking it read-only/writable.
2218 void Protect();
2219 void Unprotect();
2220#endif
2221
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002222#ifdef DEBUG
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002223 virtual void Verify();
2224 virtual void Print();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002225 void ReportStatistics();
2226 void CollectCodeStatistics();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002227#endif
2228 // Checks whether an address is in the object area in this space. It
2229 // iterates all objects in the space. May be slow.
2230 bool SlowContains(Address addr) { return !FindObject(addr)->IsFailure(); }
2231
2232 private:
2233 // The head of the linked list of large object chunks.
2234 LargeObjectChunk* first_chunk_;
kmillikin@chromium.orgf05f2912010-09-30 10:07:24 +00002235 intptr_t size_; // allocated bytes
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002236 int page_count_; // number of chunks
2237
2238
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002239 // Shared implementation of AllocateRaw, AllocateRawCode and
2240 // AllocateRawFixedArray.
lrn@chromium.org303ada72010-10-27 09:33:13 +00002241 MUST_USE_RESULT MaybeObject* AllocateRawInternal(int requested_size,
2242 int object_size,
2243 Executability executable);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002244
kasperl@chromium.orgf5aa8372009-03-24 14:47:14 +00002245 friend class LargeObjectIterator;
2246
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002247 public:
2248 TRACK_MEMORY("LargeObjectSpace")
2249};
2250
2251
2252class LargeObjectIterator: public ObjectIterator {
2253 public:
2254 explicit LargeObjectIterator(LargeObjectSpace* space);
2255 LargeObjectIterator(LargeObjectSpace* space, HeapObjectCallback size_func);
2256
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002257 HeapObject* next();
2258
2259 // implementation of ObjectIterator.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002260 virtual HeapObject* next_object() { return next(); }
2261
2262 private:
2263 LargeObjectChunk* current_;
2264 HeapObjectCallback size_func_;
2265};
2266
2267
2268} } // namespace v8::internal
2269
2270#endif // V8_SPACES_H_