blob: 6602c899dfbcbe35024909152ead1034e73078ee [file] [log] [blame]
fschneider@chromium.orgfb144a02011-05-04 12:43:48 +00001// Copyright 2011 the V8 project authors. All rights reserved.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#ifndef V8_SPACES_H_
29#define V8_SPACES_H_
30
lrn@chromium.org1c092762011-05-09 09:42:16 +000031#include "allocation.h"
jkummerow@chromium.org531dfe82012-03-20 13:01:16 +000032#include "hashmap.h"
fschneider@chromium.orgfb144a02011-05-04 12:43:48 +000033#include "list.h"
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000034#include "log.h"
35
kasperl@chromium.org71affb52009-05-26 05:44:31 +000036namespace v8 {
37namespace internal {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000038
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +000039class Isolate;
40
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000041// -----------------------------------------------------------------------------
42// Heap structures:
43//
44// A JS heap consists of a young generation, an old generation, and a large
45// object space. The young generation is divided into two semispaces. A
46// scavenger implements Cheney's copying algorithm. The old generation is
47// separated into a map space and an old object space. The map space contains
48// all (and only) map objects, the rest of old objects go into the old space.
49// The old generation is collected by a mark-sweep-compact collector.
50//
51// The semispaces of the young generation are contiguous. The old and map
ricow@chromium.org30ce4112010-05-31 10:38:25 +000052// spaces consists of a list of pages. A page has a page header and an object
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +000053// area.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000054//
55// There is a separate large object space for objects larger than
56// Page::kMaxHeapObjectSize, so that they do not have to move during
ricow@chromium.org30ce4112010-05-31 10:38:25 +000057// collection. The large object space is paged. Pages in large object space
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +000058// may be larger than the page size.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000059//
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +000060// A store-buffer based write barrier is used to keep track of intergenerational
61// references. See store-buffer.h.
ricow@chromium.org30ce4112010-05-31 10:38:25 +000062//
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +000063// During scavenges and mark-sweep collections we sometimes (after a store
64// buffer overflow) iterate intergenerational pointers without decoding heap
65// object maps so if the page belongs to old pointer space or large object
66// space it is essential to guarantee that the page does not contain any
67// garbage pointers to new space: every pointer aligned word which satisfies
68// the Heap::InNewSpace() predicate must be a pointer to a live heap object in
69// new space. Thus objects in old pointer and large object spaces should have a
70// special layout (e.g. no bare integer fields). This requirement does not
71// apply to map space which is iterated in a special fashion. However we still
72// require pointer fields of dead maps to be cleaned.
ricow@chromium.org30ce4112010-05-31 10:38:25 +000073//
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +000074// To enable lazy cleaning of old space pages we can mark chunks of the page
75// as being garbage. Garbage sections are marked with a special map. These
76// sections are skipped when scanning the page, even if we are otherwise
77// scanning without regard for object boundaries. Garbage sections are chained
78// together to form a free list after a GC. Garbage sections created outside
79// of GCs by object trunctation etc. may not be in the free list chain. Very
80// small free spaces are ignored, they need only be cleaned of bogus pointers
81// into new space.
ricow@chromium.org30ce4112010-05-31 10:38:25 +000082//
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +000083// Each page may have up to one special garbage section. The start of this
84// section is denoted by the top field in the space. The end of the section
85// is denoted by the limit field in the space. This special garbage section
86// is not marked with a free space map in the data. The point of this section
87// is to enable linear allocation without having to constantly update the byte
88// array every time the top field is updated and a new object is created. The
89// special garbage section is not in the chain of garbage sections.
90//
91// Since the top and limit fields are in the space, not the page, only one page
92// has a special garbage section, and if the top and limit are equal then there
93// is no special garbage section.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000094
95// Some assertion macros used in the debugging mode.
96
sgjesse@chromium.org846fb742009-12-18 08:56:33 +000097#define ASSERT_PAGE_ALIGNED(address) \
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000098 ASSERT((OffsetFrom(address) & Page::kPageAlignmentMask) == 0)
99
sgjesse@chromium.org846fb742009-12-18 08:56:33 +0000100#define ASSERT_OBJECT_ALIGNED(address) \
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000101 ASSERT((OffsetFrom(address) & kObjectAlignmentMask) == 0)
102
sgjesse@chromium.org846fb742009-12-18 08:56:33 +0000103#define ASSERT_MAP_ALIGNED(address) \
104 ASSERT((OffsetFrom(address) & kMapAlignmentMask) == 0)
105
106#define ASSERT_OBJECT_SIZE(size) \
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +0000107 ASSERT((0 < size) && (size <= Page::kMaxNonCodeHeapObjectSize))
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000108
sgjesse@chromium.org846fb742009-12-18 08:56:33 +0000109#define ASSERT_PAGE_OFFSET(offset) \
110 ASSERT((Page::kObjectStartOffset <= offset) \
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000111 && (offset <= Page::kPageSize))
112
sgjesse@chromium.org846fb742009-12-18 08:56:33 +0000113#define ASSERT_MAP_PAGE_INDEX(index) \
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000114 ASSERT((0 <= index) && (index <= MapSpace::kMaxMapPageIndex))
115
116
117class PagedSpace;
118class MemoryAllocator;
kasper.lund7276f142008-07-30 08:49:36 +0000119class AllocationInfo;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000120class Space;
121class FreeList;
122class MemoryChunk;
123
124class MarkBit {
125 public:
126 typedef uint32_t CellType;
127
128 inline MarkBit(CellType* cell, CellType mask, bool data_only)
129 : cell_(cell), mask_(mask), data_only_(data_only) { }
130
131 inline CellType* cell() { return cell_; }
132 inline CellType mask() { return mask_; }
133
134#ifdef DEBUG
135 bool operator==(const MarkBit& other) {
136 return cell_ == other.cell_ && mask_ == other.mask_;
137 }
138#endif
139
140 inline void Set() { *cell_ |= mask_; }
141 inline bool Get() { return (*cell_ & mask_) != 0; }
142 inline void Clear() { *cell_ &= ~mask_; }
143
144 inline bool data_only() { return data_only_; }
145
146 inline MarkBit Next() {
147 CellType new_mask = mask_ << 1;
148 if (new_mask == 0) {
149 return MarkBit(cell_ + 1, 1, data_only_);
150 } else {
151 return MarkBit(cell_, new_mask, data_only_);
152 }
153 }
154
155 private:
156 CellType* cell_;
157 CellType mask_;
158 // This boolean indicates that the object is in a data-only space with no
159 // pointers. This enables some optimizations when marking.
160 // It is expected that this field is inlined and turned into control flow
161 // at the place where the MarkBit object is created.
162 bool data_only_;
163};
164
165
166// Bitmap is a sequence of cells each containing fixed number of bits.
167class Bitmap {
168 public:
169 static const uint32_t kBitsPerCell = 32;
170 static const uint32_t kBitsPerCellLog2 = 5;
171 static const uint32_t kBitIndexMask = kBitsPerCell - 1;
172 static const uint32_t kBytesPerCell = kBitsPerCell / kBitsPerByte;
173 static const uint32_t kBytesPerCellLog2 = kBitsPerCellLog2 - kBitsPerByteLog2;
174
175 static const size_t kLength =
176 (1 << kPageSizeBits) >> (kPointerSizeLog2);
177
178 static const size_t kSize =
179 (1 << kPageSizeBits) >> (kPointerSizeLog2 + kBitsPerByteLog2);
180
181
182 static int CellsForLength(int length) {
183 return (length + kBitsPerCell - 1) >> kBitsPerCellLog2;
184 }
185
186 int CellsCount() {
187 return CellsForLength(kLength);
188 }
189
190 static int SizeFor(int cells_count) {
191 return sizeof(MarkBit::CellType) * cells_count;
192 }
193
194 INLINE(static uint32_t IndexToCell(uint32_t index)) {
195 return index >> kBitsPerCellLog2;
196 }
197
198 INLINE(static uint32_t CellToIndex(uint32_t index)) {
199 return index << kBitsPerCellLog2;
200 }
201
202 INLINE(static uint32_t CellAlignIndex(uint32_t index)) {
203 return (index + kBitIndexMask) & ~kBitIndexMask;
204 }
205
206 INLINE(MarkBit::CellType* cells()) {
207 return reinterpret_cast<MarkBit::CellType*>(this);
208 }
209
210 INLINE(Address address()) {
211 return reinterpret_cast<Address>(this);
212 }
213
214 INLINE(static Bitmap* FromAddress(Address addr)) {
215 return reinterpret_cast<Bitmap*>(addr);
216 }
217
218 inline MarkBit MarkBitFromIndex(uint32_t index, bool data_only = false) {
219 MarkBit::CellType mask = 1 << (index & kBitIndexMask);
220 MarkBit::CellType* cell = this->cells() + (index >> kBitsPerCellLog2);
221 return MarkBit(cell, mask, data_only);
222 }
223
224 static inline void Clear(MemoryChunk* chunk);
225
226 static void PrintWord(uint32_t word, uint32_t himask = 0) {
227 for (uint32_t mask = 1; mask != 0; mask <<= 1) {
228 if ((mask & himask) != 0) PrintF("[");
229 PrintF((mask & word) ? "1" : "0");
230 if ((mask & himask) != 0) PrintF("]");
231 }
232 }
233
234 class CellPrinter {
235 public:
236 CellPrinter() : seq_start(0), seq_type(0), seq_length(0) { }
237
238 void Print(uint32_t pos, uint32_t cell) {
239 if (cell == seq_type) {
240 seq_length++;
241 return;
242 }
243
244 Flush();
245
246 if (IsSeq(cell)) {
247 seq_start = pos;
248 seq_length = 0;
249 seq_type = cell;
250 return;
251 }
252
253 PrintF("%d: ", pos);
254 PrintWord(cell);
255 PrintF("\n");
256 }
257
258 void Flush() {
259 if (seq_length > 0) {
260 PrintF("%d: %dx%d\n",
261 seq_start,
262 seq_type == 0 ? 0 : 1,
263 seq_length * kBitsPerCell);
264 seq_length = 0;
265 }
266 }
267
268 static bool IsSeq(uint32_t cell) { return cell == 0 || cell == 0xFFFFFFFF; }
269
270 private:
271 uint32_t seq_start;
272 uint32_t seq_type;
273 uint32_t seq_length;
274 };
275
276 void Print() {
277 CellPrinter printer;
278 for (int i = 0; i < CellsCount(); i++) {
279 printer.Print(i, cells()[i]);
280 }
281 printer.Flush();
282 PrintF("\n");
283 }
284
285 bool IsClean() {
286 for (int i = 0; i < CellsCount(); i++) {
287 if (cells()[i] != 0) return false;
288 }
289 return true;
290 }
291};
292
293
294class SkipList;
295class SlotsBuffer;
296
297// MemoryChunk represents a memory region owned by a specific space.
298// It is divided into the header and the body. Chunk start is always
ulan@chromium.org2efb9002012-01-19 15:36:35 +0000299// 1MB aligned. Start of the body is aligned so it can accommodate
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000300// any heap object.
301class MemoryChunk {
302 public:
303 // Only works if the pointer is in the first kPageSize of the MemoryChunk.
304 static MemoryChunk* FromAddress(Address a) {
305 return reinterpret_cast<MemoryChunk*>(OffsetFrom(a) & ~kAlignmentMask);
306 }
307
308 // Only works for addresses in pointer spaces, not data or code spaces.
309 static inline MemoryChunk* FromAnyPointerAddress(Address addr);
310
311 Address address() { return reinterpret_cast<Address>(this); }
312
313 bool is_valid() { return address() != NULL; }
314
315 MemoryChunk* next_chunk() const { return next_chunk_; }
316 MemoryChunk* prev_chunk() const { return prev_chunk_; }
317
318 void set_next_chunk(MemoryChunk* next) { next_chunk_ = next; }
319 void set_prev_chunk(MemoryChunk* prev) { prev_chunk_ = prev; }
320
321 Space* owner() const {
322 if ((reinterpret_cast<intptr_t>(owner_) & kFailureTagMask) ==
323 kFailureTag) {
324 return reinterpret_cast<Space*>(owner_ - kFailureTag);
325 } else {
326 return NULL;
327 }
328 }
329
330 void set_owner(Space* space) {
331 ASSERT((reinterpret_cast<intptr_t>(space) & kFailureTagMask) == 0);
332 owner_ = reinterpret_cast<Address>(space) + kFailureTag;
333 ASSERT((reinterpret_cast<intptr_t>(owner_) & kFailureTagMask) ==
334 kFailureTag);
335 }
336
337 VirtualMemory* reserved_memory() {
338 return &reservation_;
339 }
340
341 void InitializeReservedMemory() {
342 reservation_.Reset();
343 }
344
345 void set_reserved_memory(VirtualMemory* reservation) {
346 ASSERT_NOT_NULL(reservation);
347 reservation_.TakeControl(reservation);
348 }
349
350 bool scan_on_scavenge() { return IsFlagSet(SCAN_ON_SCAVENGE); }
351 void initialize_scan_on_scavenge(bool scan) {
352 if (scan) {
353 SetFlag(SCAN_ON_SCAVENGE);
354 } else {
355 ClearFlag(SCAN_ON_SCAVENGE);
356 }
357 }
358 inline void set_scan_on_scavenge(bool scan);
359
360 int store_buffer_counter() { return store_buffer_counter_; }
361 void set_store_buffer_counter(int counter) {
362 store_buffer_counter_ = counter;
363 }
364
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000365 bool Contains(Address addr) {
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +0000366 return addr >= area_start() && addr < area_end();
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000367 }
368
369 // Checks whether addr can be a limit of addresses in this page.
370 // It's a limit if it's in the page, or if it's just after the
371 // last byte of the page.
372 bool ContainsLimit(Address addr) {
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +0000373 return addr >= area_start() && addr <= area_end();
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000374 }
375
376 enum MemoryChunkFlags {
377 IS_EXECUTABLE,
378 ABOUT_TO_BE_FREED,
379 POINTERS_TO_HERE_ARE_INTERESTING,
380 POINTERS_FROM_HERE_ARE_INTERESTING,
381 SCAN_ON_SCAVENGE,
382 IN_FROM_SPACE, // Mutually exclusive with IN_TO_SPACE.
383 IN_TO_SPACE, // All pages in new space has one of these two set.
384 NEW_SPACE_BELOW_AGE_MARK,
385 CONTAINS_ONLY_DATA,
386 EVACUATION_CANDIDATE,
387 RESCAN_ON_EVACUATION,
388
389 // Pages swept precisely can be iterated, hitting only the live objects.
390 // Whereas those swept conservatively cannot be iterated over. Both flags
391 // indicate that marking bits have been cleared by the sweeper, otherwise
392 // marking bits are still intact.
393 WAS_SWEPT_PRECISELY,
394 WAS_SWEPT_CONSERVATIVELY,
395
396 // Last flag, keep at bottom.
397 NUM_MEMORY_CHUNK_FLAGS
398 };
399
400
401 static const int kPointersToHereAreInterestingMask =
402 1 << POINTERS_TO_HERE_ARE_INTERESTING;
403
404 static const int kPointersFromHereAreInterestingMask =
405 1 << POINTERS_FROM_HERE_ARE_INTERESTING;
406
407 static const int kEvacuationCandidateMask =
408 1 << EVACUATION_CANDIDATE;
409
410 static const int kSkipEvacuationSlotsRecordingMask =
411 (1 << EVACUATION_CANDIDATE) |
412 (1 << RESCAN_ON_EVACUATION) |
413 (1 << IN_FROM_SPACE) |
414 (1 << IN_TO_SPACE);
415
416
417 void SetFlag(int flag) {
418 flags_ |= static_cast<uintptr_t>(1) << flag;
419 }
420
421 void ClearFlag(int flag) {
422 flags_ &= ~(static_cast<uintptr_t>(1) << flag);
423 }
424
425 void SetFlagTo(int flag, bool value) {
426 if (value) {
427 SetFlag(flag);
428 } else {
429 ClearFlag(flag);
430 }
431 }
432
433 bool IsFlagSet(int flag) {
434 return (flags_ & (static_cast<uintptr_t>(1) << flag)) != 0;
435 }
436
437 // Set or clear multiple flags at a time. The flags in the mask
438 // are set to the value in "flags", the rest retain the current value
439 // in flags_.
440 void SetFlags(intptr_t flags, intptr_t mask) {
441 flags_ = (flags_ & ~mask) | (flags & mask);
442 }
443
444 // Return all current flags.
445 intptr_t GetFlags() { return flags_; }
446
447 // Manage live byte count (count of bytes known to be live,
448 // because they are marked black).
449 void ResetLiveBytes() {
rossberg@chromium.orgb4b2aa62011-10-13 09:49:59 +0000450 if (FLAG_gc_verbose) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000451 PrintF("ResetLiveBytes:%p:%x->0\n",
452 static_cast<void*>(this), live_byte_count_);
453 }
454 live_byte_count_ = 0;
455 }
456 void IncrementLiveBytes(int by) {
rossberg@chromium.orgb4b2aa62011-10-13 09:49:59 +0000457 if (FLAG_gc_verbose) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000458 printf("UpdateLiveBytes:%p:%x%c=%x->%x\n",
459 static_cast<void*>(this), live_byte_count_,
460 ((by < 0) ? '-' : '+'), ((by < 0) ? -by : by),
461 live_byte_count_ + by);
462 }
463 live_byte_count_ += by;
464 ASSERT_LE(static_cast<unsigned>(live_byte_count_), size_);
465 }
466 int LiveBytes() {
467 ASSERT(static_cast<unsigned>(live_byte_count_) <= size_);
468 return live_byte_count_;
469 }
ulan@chromium.org2efb9002012-01-19 15:36:35 +0000470
471 static void IncrementLiveBytesFromGC(Address address, int by) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000472 MemoryChunk::FromAddress(address)->IncrementLiveBytes(by);
473 }
474
ulan@chromium.org2efb9002012-01-19 15:36:35 +0000475 static void IncrementLiveBytesFromMutator(Address address, int by);
476
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000477 static const intptr_t kAlignment =
478 (static_cast<uintptr_t>(1) << kPageSizeBits);
479
480 static const intptr_t kAlignmentMask = kAlignment - 1;
481
482 static const intptr_t kSizeOffset = kPointerSize + kPointerSize;
483
484 static const intptr_t kLiveBytesOffset =
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +0000485 kSizeOffset + kPointerSize + kPointerSize + kPointerSize +
486 kPointerSize + kPointerSize +
487 kPointerSize + kPointerSize + kPointerSize + kIntSize;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000488
489 static const size_t kSlotsBufferOffset = kLiveBytesOffset + kIntSize;
490
491 static const size_t kHeaderSize =
492 kSlotsBufferOffset + kPointerSize + kPointerSize;
493
494 static const int kBodyOffset =
495 CODE_POINTER_ALIGN(MAP_POINTER_ALIGN(kHeaderSize + Bitmap::kSize));
496
497 // The start offset of the object area in a page. Aligned to both maps and
498 // code alignment to be suitable for both. Also aligned to 32 words because
499 // the marking bitmap is arranged in 32 bit chunks.
500 static const int kObjectStartAlignment = 32 * kPointerSize;
501 static const int kObjectStartOffset = kBodyOffset - 1 +
502 (kObjectStartAlignment - (kBodyOffset - 1) % kObjectStartAlignment);
503
504 size_t size() const { return size_; }
505
danno@chromium.org2c456792011-11-11 12:00:53 +0000506 void set_size(size_t size) {
507 size_ = size;
508 }
509
ulan@chromium.org9a21ec42012-03-06 08:42:24 +0000510 void SetArea(Address area_start, Address area_end) {
511 area_start_ = area_start;
512 area_end_ = area_end;
513 }
514
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000515 Executability executable() {
516 return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
517 }
518
519 bool ContainsOnlyData() {
520 return IsFlagSet(CONTAINS_ONLY_DATA);
521 }
522
523 bool InNewSpace() {
524 return (flags_ & ((1 << IN_FROM_SPACE) | (1 << IN_TO_SPACE))) != 0;
525 }
526
527 bool InToSpace() {
528 return IsFlagSet(IN_TO_SPACE);
529 }
530
531 bool InFromSpace() {
532 return IsFlagSet(IN_FROM_SPACE);
533 }
534
535 // ---------------------------------------------------------------------
536 // Markbits support
537
538 inline Bitmap* markbits() {
539 return Bitmap::FromAddress(address() + kHeaderSize);
540 }
541
542 void PrintMarkbits() { markbits()->Print(); }
543
544 inline uint32_t AddressToMarkbitIndex(Address addr) {
545 return static_cast<uint32_t>(addr - this->address()) >> kPointerSizeLog2;
546 }
547
548 inline static uint32_t FastAddressToMarkbitIndex(Address addr) {
549 const intptr_t offset =
550 reinterpret_cast<intptr_t>(addr) & kAlignmentMask;
551
552 return static_cast<uint32_t>(offset) >> kPointerSizeLog2;
553 }
554
555 inline Address MarkbitIndexToAddress(uint32_t index) {
556 return this->address() + (index << kPointerSizeLog2);
557 }
558
559 void InsertAfter(MemoryChunk* other);
560 void Unlink();
561
562 inline Heap* heap() { return heap_; }
563
564 static const int kFlagsOffset = kPointerSize * 3;
565
566 bool IsEvacuationCandidate() { return IsFlagSet(EVACUATION_CANDIDATE); }
567
568 bool ShouldSkipEvacuationSlotRecording() {
569 return (flags_ & kSkipEvacuationSlotsRecordingMask) != 0;
570 }
571
572 inline SkipList* skip_list() {
573 return skip_list_;
574 }
575
576 inline void set_skip_list(SkipList* skip_list) {
577 skip_list_ = skip_list;
578 }
579
580 inline SlotsBuffer* slots_buffer() {
581 return slots_buffer_;
582 }
583
584 inline SlotsBuffer** slots_buffer_address() {
585 return &slots_buffer_;
586 }
587
588 void MarkEvacuationCandidate() {
589 ASSERT(slots_buffer_ == NULL);
590 SetFlag(EVACUATION_CANDIDATE);
591 }
592
593 void ClearEvacuationCandidate() {
594 ASSERT(slots_buffer_ == NULL);
595 ClearFlag(EVACUATION_CANDIDATE);
596 }
597
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +0000598 Address area_start() { return area_start_; }
599 Address area_end() { return area_end_; }
600 int area_size() {
601 return static_cast<int>(area_end() - area_start());
602 }
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000603
604 protected:
605 MemoryChunk* next_chunk_;
606 MemoryChunk* prev_chunk_;
607 size_t size_;
608 intptr_t flags_;
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +0000609
610 // Start and end of allocatable memory on this chunk.
611 Address area_start_;
612 Address area_end_;
613
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000614 // If the chunk needs to remember its memory reservation, it is stored here.
615 VirtualMemory reservation_;
616 // The identity of the owning space. This is tagged as a failure pointer, but
617 // no failure can be in an object, so this can be distinguished from any entry
618 // in a fixed array.
619 Address owner_;
620 Heap* heap_;
621 // Used by the store buffer to keep track of which pages to mark scan-on-
622 // scavenge.
623 int store_buffer_counter_;
624 // Count of bytes marked black on page.
625 int live_byte_count_;
626 SlotsBuffer* slots_buffer_;
627 SkipList* skip_list_;
628
629 static MemoryChunk* Initialize(Heap* heap,
630 Address base,
631 size_t size,
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +0000632 Address area_start,
633 Address area_end,
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000634 Executability executable,
635 Space* owner);
636
637 friend class MemoryAllocator;
638};
639
fschneider@chromium.org7d10be52012-04-10 12:30:14 +0000640
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000641STATIC_CHECK(sizeof(MemoryChunk) <= MemoryChunk::kHeaderSize);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000642
fschneider@chromium.org7d10be52012-04-10 12:30:14 +0000643
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000644// -----------------------------------------------------------------------------
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000645// A page is a memory chunk of a size 1MB. Large object pages may be larger.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000646//
647// The only way to get a page pointer is by calling factory methods:
648// Page* p = Page::FromAddress(addr); or
649// Page* p = Page::FromAllocationTop(top);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000650class Page : public MemoryChunk {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000651 public:
652 // Returns the page containing a given address. The address ranges
653 // from [page_addr .. page_addr + kPageSize[
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000654 // This only works if the object is in fact in a page. See also MemoryChunk::
655 // FromAddress() and FromAnyAddress().
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000656 INLINE(static Page* FromAddress(Address a)) {
657 return reinterpret_cast<Page*>(OffsetFrom(a) & ~kPageAlignmentMask);
658 }
659
660 // Returns the page containing an allocation top. Because an allocation
661 // top address can be the upper bound of the page, we need to subtract
662 // it with kPointerSize first. The address ranges from
663 // [page_addr + kObjectStartOffset .. page_addr + kPageSize].
664 INLINE(static Page* FromAllocationTop(Address top)) {
665 Page* p = FromAddress(top - kPointerSize);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000666 return p;
667 }
668
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000669 // Returns the next page in the chain of pages owned by a space.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000670 inline Page* next_page();
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000671 inline Page* prev_page();
672 inline void set_next_page(Page* page);
673 inline void set_prev_page(Page* page);
ricow@chromium.org30ce4112010-05-31 10:38:25 +0000674
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000675 // Checks whether an address is page aligned.
676 static bool IsAlignedToPageSize(Address a) {
677 return 0 == (OffsetFrom(a) & kPageAlignmentMask);
678 }
679
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000680 // Returns the offset of a given address to this page.
681 INLINE(int Offset(Address a)) {
ager@chromium.orgc4c92722009-11-18 14:12:51 +0000682 int offset = static_cast<int>(a - address());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000683 return offset;
684 }
685
686 // Returns the address for a given offset to the this page.
687 Address OffsetToAddress(int offset) {
688 ASSERT_PAGE_OFFSET(offset);
689 return address() + offset;
690 }
691
692 // ---------------------------------------------------------------------
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000693
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000694 // Page size in bytes. This must be a multiple of the OS page size.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000695 static const int kPageSize = 1 << kPageSizeBits;
696
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000697 // Object area size in bytes.
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +0000698 static const int kNonCodeObjectAreaSize = kPageSize - kObjectStartOffset;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000699
700 // Maximum object size that fits in a page.
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +0000701 static const int kMaxNonCodeHeapObjectSize = kNonCodeObjectAreaSize;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000702
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +0000703 // Page size mask.
704 static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;
ricow@chromium.org30ce4112010-05-31 10:38:25 +0000705
706 inline void ClearGCFields();
707
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000708 static inline Page* Initialize(Heap* heap,
709 MemoryChunk* chunk,
710 Executability executable,
711 PagedSpace* owner);
ricow@chromium.org30ce4112010-05-31 10:38:25 +0000712
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000713 void InitializeAsAnchor(PagedSpace* owner);
ricow@chromium.org30ce4112010-05-31 10:38:25 +0000714
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000715 bool WasSweptPrecisely() { return IsFlagSet(WAS_SWEPT_PRECISELY); }
716 bool WasSweptConservatively() { return IsFlagSet(WAS_SWEPT_CONSERVATIVELY); }
717 bool WasSwept() { return WasSweptPrecisely() || WasSweptConservatively(); }
ricow@chromium.org30ce4112010-05-31 10:38:25 +0000718
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000719 void MarkSweptPrecisely() { SetFlag(WAS_SWEPT_PRECISELY); }
720 void MarkSweptConservatively() { SetFlag(WAS_SWEPT_CONSERVATIVELY); }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000721
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000722 void ClearSweptPrecisely() { ClearFlag(WAS_SWEPT_PRECISELY); }
723 void ClearSweptConservatively() { ClearFlag(WAS_SWEPT_CONSERVATIVELY); }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000724
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000725#ifdef DEBUG
726 void Print();
727#endif // DEBUG
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000728
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000729 friend class MemoryAllocator;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000730};
731
732
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000733STATIC_CHECK(sizeof(Page) <= MemoryChunk::kHeaderSize);
734
735
736class LargePage : public MemoryChunk {
737 public:
738 HeapObject* GetObject() {
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +0000739 return HeapObject::FromAddress(area_start());
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000740 }
741
742 inline LargePage* next_page() const {
743 return static_cast<LargePage*>(next_chunk());
744 }
745
746 inline void set_next_page(LargePage* page) {
747 set_next_chunk(page);
748 }
749 private:
750 static inline LargePage* Initialize(Heap* heap, MemoryChunk* chunk);
751
752 friend class MemoryAllocator;
753};
754
755STATIC_CHECK(sizeof(LargePage) <= MemoryChunk::kHeaderSize);
756
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000757// ----------------------------------------------------------------------------
kasper.lund7276f142008-07-30 08:49:36 +0000758// Space is the abstract superclass for all allocation spaces.
759class Space : public Malloced {
760 public:
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000761 Space(Heap* heap, AllocationSpace id, Executability executable)
762 : heap_(heap), id_(id), executable_(executable) {}
kasperl@chromium.orgf5aa8372009-03-24 14:47:14 +0000763
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000764 virtual ~Space() {}
kasperl@chromium.orgf5aa8372009-03-24 14:47:14 +0000765
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000766 Heap* heap() const { return heap_; }
767
kasper.lund7276f142008-07-30 08:49:36 +0000768 // Does the space need executable memory?
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000769 Executability executable() { return executable_; }
kasperl@chromium.orgf5aa8372009-03-24 14:47:14 +0000770
kasper.lund7276f142008-07-30 08:49:36 +0000771 // Identity used in error reporting.
772 AllocationSpace identity() { return id_; }
kasperl@chromium.orgf5aa8372009-03-24 14:47:14 +0000773
erik.corry@gmail.com4a6c3272010-11-18 12:04:40 +0000774 // Returns allocated size.
kmillikin@chromium.orgf05f2912010-09-30 10:07:24 +0000775 virtual intptr_t Size() = 0;
kasperl@chromium.orgf5aa8372009-03-24 14:47:14 +0000776
erik.corry@gmail.com4a6c3272010-11-18 12:04:40 +0000777 // Returns size of objects. Can differ from the allocated size
778 // (e.g. see LargeObjectSpace).
779 virtual intptr_t SizeOfObjects() { return Size(); }
780
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000781 virtual int RoundSizeDownToObjectAlignment(int size) {
782 if (id_ == CODE_SPACE) {
783 return RoundDown(size, kCodeAlignment);
784 } else {
785 return RoundDown(size, kPointerSize);
786 }
787 }
788
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000789#ifdef DEBUG
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000790 virtual void Print() = 0;
791#endif
kasperl@chromium.orgf5aa8372009-03-24 14:47:14 +0000792
fschneider@chromium.org0c20e672010-01-14 15:28:53 +0000793 // After calling this we can allocate a certain number of bytes using only
794 // linear allocation (with a LinearAllocationScope and an AlwaysAllocateScope)
795 // without using freelists or causing a GC. This is used by partial
796 // snapshots. It returns true of space was reserved or false if a GC is
797 // needed. For paged spaces the space requested must include the space wasted
798 // at the end of each when allocating linearly.
799 virtual bool ReserveSpace(int bytes) = 0;
800
kasper.lund7276f142008-07-30 08:49:36 +0000801 private:
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000802 Heap* heap_;
kasper.lund7276f142008-07-30 08:49:36 +0000803 AllocationSpace id_;
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000804 Executability executable_;
kasper.lund7276f142008-07-30 08:49:36 +0000805};
806
807
808// ----------------------------------------------------------------------------
sgjesse@chromium.orgc5145742009-10-07 09:00:33 +0000809// All heap objects containing executable code (code objects) must be allocated
810// from a 2 GB range of memory, so that they can call each other using 32-bit
811// displacements. This happens automatically on 32-bit platforms, where 32-bit
812// displacements cover the entire 4GB virtual address space. On 64-bit
813// platforms, we support this using the CodeRange object, which reserves and
814// manages a range of virtual memory.
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000815class CodeRange {
sgjesse@chromium.orgc5145742009-10-07 09:00:33 +0000816 public:
kmillikin@chromium.org7c2628c2011-08-10 11:27:35 +0000817 explicit CodeRange(Isolate* isolate);
rossberg@chromium.org28a37082011-08-22 11:03:23 +0000818 ~CodeRange() { TearDown(); }
kmillikin@chromium.org7c2628c2011-08-10 11:27:35 +0000819
sgjesse@chromium.orgc5145742009-10-07 09:00:33 +0000820 // Reserves a range of virtual memory, but does not commit any of it.
821 // Can only be called once, at heap initialization time.
822 // Returns false on failure.
erik.corry@gmail.comf2038fb2012-01-16 11:42:08 +0000823 bool SetUp(const size_t requested_size);
sgjesse@chromium.orgc5145742009-10-07 09:00:33 +0000824
825 // Frees the range of virtual memory, and frees the data structures used to
826 // manage it.
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000827 void TearDown();
sgjesse@chromium.orgc5145742009-10-07 09:00:33 +0000828
kmillikin@chromium.org7c2628c2011-08-10 11:27:35 +0000829 bool exists() { return this != NULL && code_range_ != NULL; }
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000830 bool contains(Address address) {
kmillikin@chromium.org7c2628c2011-08-10 11:27:35 +0000831 if (this == NULL || code_range_ == NULL) return false;
sgjesse@chromium.orgc5145742009-10-07 09:00:33 +0000832 Address start = static_cast<Address>(code_range_->address());
833 return start <= address && address < start + code_range_->size();
834 }
835
836 // Allocates a chunk of memory from the large-object portion of
837 // the code range. On platforms with no separate code range, should
838 // not be called.
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000839 MUST_USE_RESULT Address AllocateRawMemory(const size_t requested,
840 size_t* allocated);
841 void FreeRawMemory(Address buf, size_t length);
sgjesse@chromium.orgc5145742009-10-07 09:00:33 +0000842
843 private:
kmillikin@chromium.org7c2628c2011-08-10 11:27:35 +0000844 Isolate* isolate_;
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000845
sgjesse@chromium.orgc5145742009-10-07 09:00:33 +0000846 // The reserved range of virtual memory that all code objects are put in.
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000847 VirtualMemory* code_range_;
sgjesse@chromium.orgc5145742009-10-07 09:00:33 +0000848 // Plain old data class, just a struct plus a constructor.
849 class FreeBlock {
850 public:
851 FreeBlock(Address start_arg, size_t size_arg)
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000852 : start(start_arg), size(size_arg) {
853 ASSERT(IsAddressAligned(start, MemoryChunk::kAlignment));
854 ASSERT(size >= static_cast<size_t>(Page::kPageSize));
855 }
sgjesse@chromium.orgc5145742009-10-07 09:00:33 +0000856 FreeBlock(void* start_arg, size_t size_arg)
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000857 : start(static_cast<Address>(start_arg)), size(size_arg) {
858 ASSERT(IsAddressAligned(start, MemoryChunk::kAlignment));
859 ASSERT(size >= static_cast<size_t>(Page::kPageSize));
860 }
sgjesse@chromium.orgc5145742009-10-07 09:00:33 +0000861
862 Address start;
863 size_t size;
864 };
865
866 // Freed blocks of memory are added to the free list. When the allocation
867 // list is exhausted, the free list is sorted and merged to make the new
868 // allocation list.
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000869 List<FreeBlock> free_list_;
sgjesse@chromium.orgc5145742009-10-07 09:00:33 +0000870 // Memory is allocated from the free blocks on the allocation list.
871 // The block at current_allocation_block_index_ is the current block.
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000872 List<FreeBlock> allocation_list_;
873 int current_allocation_block_index_;
sgjesse@chromium.orgc5145742009-10-07 09:00:33 +0000874
875 // Finds a block on the allocation list that contains at least the
876 // requested amount of memory. If none is found, sorts and merges
877 // the existing free memory blocks, and searches again.
878 // If none can be found, terminates V8 with FatalProcessOutOfMemory.
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000879 void GetNextAllocationBlock(size_t requested);
sgjesse@chromium.orgc5145742009-10-07 09:00:33 +0000880 // Compares the start addresses of two free blocks.
881 static int CompareFreeBlockAddress(const FreeBlock* left,
882 const FreeBlock* right);
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000883
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000884 DISALLOW_COPY_AND_ASSIGN(CodeRange);
sgjesse@chromium.orgc5145742009-10-07 09:00:33 +0000885};
886
887
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000888class SkipList {
889 public:
890 SkipList() {
891 Clear();
892 }
893
894 void Clear() {
895 for (int idx = 0; idx < kSize; idx++) {
896 starts_[idx] = reinterpret_cast<Address>(-1);
897 }
898 }
899
900 Address StartFor(Address addr) {
901 return starts_[RegionNumber(addr)];
902 }
903
904 void AddObject(Address addr, int size) {
905 int start_region = RegionNumber(addr);
906 int end_region = RegionNumber(addr + size - kPointerSize);
907 for (int idx = start_region; idx <= end_region; idx++) {
908 if (starts_[idx] > addr) starts_[idx] = addr;
909 }
910 }
911
912 static inline int RegionNumber(Address addr) {
913 return (OffsetFrom(addr) & Page::kPageAlignmentMask) >> kRegionSizeLog2;
914 }
915
916 static void Update(Address addr, int size) {
917 Page* page = Page::FromAddress(addr);
918 SkipList* list = page->skip_list();
919 if (list == NULL) {
920 list = new SkipList();
921 page->set_skip_list(list);
922 }
923
924 list->AddObject(addr, size);
925 }
926
927 private:
928 static const int kRegionSizeLog2 = 13;
929 static const int kRegionSize = 1 << kRegionSizeLog2;
930 static const int kSize = Page::kPageSize / kRegionSize;
931
932 STATIC_ASSERT(Page::kPageSize % kRegionSize == 0);
933
934 Address starts_[kSize];
935};
936
937
sgjesse@chromium.orgc5145742009-10-07 09:00:33 +0000938// ----------------------------------------------------------------------------
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000939// A space acquires chunks of memory from the operating system. The memory
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000940// allocator allocated and deallocates pages for the paged heap spaces and large
941// pages for large object space.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000942//
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000943// Each space has to manage it's own pages.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000944//
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000945class MemoryAllocator {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000946 public:
kmillikin@chromium.org7c2628c2011-08-10 11:27:35 +0000947 explicit MemoryAllocator(Isolate* isolate);
948
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000949 // Initializes its internal bookkeeping structures.
ager@chromium.org01fe7df2010-11-10 11:59:11 +0000950 // Max capacity of the total space and executable memory limit.
erik.corry@gmail.comf2038fb2012-01-16 11:42:08 +0000951 bool SetUp(intptr_t max_capacity, intptr_t capacity_executable);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000952
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000953 void TearDown();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000954
fschneider@chromium.org7d10be52012-04-10 12:30:14 +0000955 Page* AllocatePage(
956 intptr_t size, PagedSpace* owner, Executability executable);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000957
fschneider@chromium.org7d10be52012-04-10 12:30:14 +0000958 LargePage* AllocateLargePage(
959 intptr_t object_size, Space* owner, Executability executable);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000960
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000961 void Free(MemoryChunk* chunk);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000962
963 // Returns the maximum available bytes of heaps.
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000964 intptr_t Available() { return capacity_ < size_ ? 0 : capacity_ - size_; }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000965
kasperl@chromium.orge959c182009-07-27 08:59:04 +0000966 // Returns allocated spaces in bytes.
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000967 intptr_t Size() { return size_; }
kasperl@chromium.orge959c182009-07-27 08:59:04 +0000968
ager@chromium.org01fe7df2010-11-10 11:59:11 +0000969 // Returns the maximum available executable bytes of heaps.
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000970 intptr_t AvailableExecutable() {
ager@chromium.org01fe7df2010-11-10 11:59:11 +0000971 if (capacity_executable_ < size_executable_) return 0;
972 return capacity_executable_ - size_executable_;
973 }
974
erik.corry@gmail.com145eff52010-08-23 11:36:18 +0000975 // Returns allocated executable spaces in bytes.
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000976 intptr_t SizeExecutable() { return size_executable_; }
erik.corry@gmail.com145eff52010-08-23 11:36:18 +0000977
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000978 // Returns maximum available bytes that the old space can have.
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000979 intptr_t MaxAvailable() {
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +0000980 return (Available() / Page::kPageSize) * Page::kMaxNonCodeHeapObjectSize;
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000981 }
982
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000983#ifdef DEBUG
984 // Reports statistic info of the space.
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000985 void ReportStatistics();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000986#endif
987
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000988 MemoryChunk* AllocateChunk(intptr_t body_size,
989 Executability executable,
990 Space* space);
991
992 Address ReserveAlignedMemory(size_t requested,
993 size_t alignment,
994 VirtualMemory* controller);
995 Address AllocateAlignedMemory(size_t requested,
996 size_t alignment,
997 Executability executable,
998 VirtualMemory* controller);
999
1000 void FreeMemory(VirtualMemory* reservation, Executability executable);
1001 void FreeMemory(Address addr, size_t size, Executability executable);
1002
1003 // Commit a contiguous block of memory from the initial chunk. Assumes that
1004 // the address is not NULL, the size is greater than zero, and that the
1005 // block is contained in the initial chunk. Returns true if it succeeded
1006 // and false otherwise.
1007 bool CommitBlock(Address start, size_t size, Executability executable);
1008
1009 // Uncommit a contiguous block of memory [start..(start+size)[.
1010 // start is not NULL, the size is greater than zero, and the
1011 // block is contained in the initial chunk. Returns true if it succeeded
1012 // and false otherwise.
1013 bool UncommitBlock(Address start, size_t size);
1014
1015 // Zaps a contiguous block of memory [start..(start+size)[ thus
1016 // filling it up with a recognizable non-NULL bit pattern.
1017 void ZapBlock(Address start, size_t size);
1018
1019 void PerformAllocationCallback(ObjectSpace space,
1020 AllocationAction action,
1021 size_t size);
1022
1023 void AddMemoryAllocationCallback(MemoryAllocationCallback callback,
1024 ObjectSpace space,
1025 AllocationAction action);
1026
1027 void RemoveMemoryAllocationCallback(
1028 MemoryAllocationCallback callback);
1029
1030 bool MemoryAllocationCallbackRegistered(
1031 MemoryAllocationCallback callback);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001032
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +00001033 static int CodePageGuardStartOffset();
1034
1035 static int CodePageGuardSize();
1036
1037 static int CodePageAreaStartOffset();
1038
1039 static int CodePageAreaEndOffset();
1040
1041 static int CodePageAreaSize() {
1042 return CodePageAreaEndOffset() - CodePageAreaStartOffset();
1043 }
1044
danno@chromium.org2c26cb12012-05-03 09:06:43 +00001045 MUST_USE_RESULT static bool CommitCodePage(VirtualMemory* vm,
1046 Address start,
1047 size_t size);
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +00001048
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001049 private:
kmillikin@chromium.org7c2628c2011-08-10 11:27:35 +00001050 Isolate* isolate_;
1051
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001052 // Maximum space size in bytes.
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001053 size_t capacity_;
ager@chromium.org01fe7df2010-11-10 11:59:11 +00001054 // Maximum subset of capacity_ that can be executable
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001055 size_t capacity_executable_;
kasperl@chromium.orga5551262010-12-07 12:49:48 +00001056
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001057 // Allocated space size in bytes.
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001058 size_t size_;
erik.corry@gmail.com145eff52010-08-23 11:36:18 +00001059 // Allocated executable space size in bytes.
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001060 size_t size_executable_;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001061
kmillikin@chromium.org3cdd9e12010-09-06 11:39:48 +00001062 struct MemoryAllocationCallbackRegistration {
1063 MemoryAllocationCallbackRegistration(MemoryAllocationCallback callback,
1064 ObjectSpace space,
1065 AllocationAction action)
1066 : callback(callback), space(space), action(action) {
1067 }
1068 MemoryAllocationCallback callback;
1069 ObjectSpace space;
1070 AllocationAction action;
1071 };
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001072
kmillikin@chromium.org3cdd9e12010-09-06 11:39:48 +00001073 // A List of callback that are triggered when memory is allocated or free'd
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001074 List<MemoryAllocationCallbackRegistration>
kmillikin@chromium.org3cdd9e12010-09-06 11:39:48 +00001075 memory_allocation_callbacks_;
1076
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001077 // Initializes pages in a chunk. Returns the first page address.
1078 // This function and GetChunkId() are provided for the mark-compact
1079 // collector to rebuild page headers in the from space, which is
1080 // used as a marking stack and its page headers are destroyed.
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001081 Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk,
1082 PagedSpace* owner);
fschneider@chromium.org013f3e12010-04-26 13:27:52 +00001083
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001084 DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001085};
1086
1087
1088// -----------------------------------------------------------------------------
1089// Interface for heap object iterator to be implemented by all object space
1090// object iterators.
1091//
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +00001092// NOTE: The space specific object iterators also implements the own next()
1093// method which is used to avoid using virtual functions
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001094// iterating a specific space.
1095
1096class ObjectIterator : public Malloced {
1097 public:
1098 virtual ~ObjectIterator() { }
1099
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001100 virtual HeapObject* next_object() = 0;
1101};
1102
1103
1104// -----------------------------------------------------------------------------
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001105// Heap object iterator in new/old/map spaces.
1106//
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001107// A HeapObjectIterator iterates objects from the bottom of the given space
1108// to its top or from the bottom of the given page to its top.
kasperl@chromium.orgb3284ad2009-05-18 06:12:45 +00001109//
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001110// If objects are allocated in the page during iteration the iterator may
1111// or may not iterate over those objects. The caller must create a new
1112// iterator in order to be sure to visit these new objects.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001113class HeapObjectIterator: public ObjectIterator {
1114 public:
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001115 // Creates a new object iterator in a given space.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001116 // If the size function is not given, the iterator calls the default
1117 // Object::Size().
1118 explicit HeapObjectIterator(PagedSpace* space);
1119 HeapObjectIterator(PagedSpace* space, HeapObjectCallback size_func);
ricow@chromium.orgd236f4d2010-09-01 06:52:08 +00001120 HeapObjectIterator(Page* page, HeapObjectCallback size_func);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001121
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001122 // Advance to the next object, skipping free spaces and other fillers and
1123 // skipping the special garbage section of which there is one per space.
1124 // Returns NULL when the iteration has ended.
1125 inline HeapObject* Next() {
1126 do {
1127 HeapObject* next_obj = FromCurrentPage();
1128 if (next_obj != NULL) return next_obj;
1129 } while (AdvanceToNextPage());
1130 return NULL;
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +00001131 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001132
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001133 virtual HeapObject* next_object() {
1134 return Next();
1135 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001136
1137 private:
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001138 enum PageMode { kOnePageOnly, kAllPagesInSpace };
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001139
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001140 Address cur_addr_; // Current iteration point.
1141 Address cur_end_; // End iteration point.
1142 HeapObjectCallback size_func_; // Size function or NULL.
1143 PagedSpace* space_;
1144 PageMode page_mode_;
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +00001145
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001146 // Fast (inlined) path of next().
1147 inline HeapObject* FromCurrentPage();
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +00001148
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001149 // Slow path of next(), goes into the next page. Returns false if the
1150 // iteration has ended.
1151 bool AdvanceToNextPage();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001152
1153 // Initializes fields.
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001154 inline void Initialize(PagedSpace* owner,
1155 Address start,
1156 Address end,
1157 PageMode mode,
1158 HeapObjectCallback size_func);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001159};
1160
1161
1162// -----------------------------------------------------------------------------
kasperl@chromium.orgb3284ad2009-05-18 06:12:45 +00001163// A PageIterator iterates the pages in a paged space.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001164
1165class PageIterator BASE_EMBEDDED {
1166 public:
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001167 explicit inline PageIterator(PagedSpace* space);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001168
1169 inline bool has_next();
1170 inline Page* next();
1171
1172 private:
kasperl@chromium.orgb3284ad2009-05-18 06:12:45 +00001173 PagedSpace* space_;
1174 Page* prev_page_; // Previous page returned.
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001175 // Next page that will be returned. Cached here so that we can use this
1176 // iterator for operations that deallocate pages.
1177 Page* next_page_;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001178};
1179
1180
1181// -----------------------------------------------------------------------------
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001182// A space has a circular list of pages. The next page can be accessed via
1183// Page::next_page() call.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001184
1185// An abstraction of allocation and relocation pointers in a page-structured
1186// space.
kasper.lund7276f142008-07-30 08:49:36 +00001187class AllocationInfo {
1188 public:
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001189 AllocationInfo() : top(NULL), limit(NULL) {
1190 }
1191
1192 Address top; // Current allocation top.
1193 Address limit; // Current allocation limit.
kasper.lund7276f142008-07-30 08:49:36 +00001194
1195#ifdef DEBUG
1196 bool VerifyPagedAllocation() {
1197 return (Page::FromAllocationTop(top) == Page::FromAllocationTop(limit))
1198 && (top <= limit);
1199 }
1200#endif
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001201};
1202
1203
1204// An abstraction of the accounting statistics of a page-structured space.
ulan@chromium.org2efb9002012-01-19 15:36:35 +00001205// The 'capacity' of a space is the number of object-area bytes (i.e., not
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001206// including page bookkeeping structures) currently in the space. The 'size'
1207// of a space is the number of allocated bytes, the 'waste' in the space is
1208// the number of bytes that are not allocated and not available to
ulan@chromium.org2efb9002012-01-19 15:36:35 +00001209// allocation without reorganizing the space via a GC (e.g. small blocks due
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001210// to internal fragmentation, top of page areas in map space), and the bytes
1211// 'available' is the number of unallocated bytes that are not waste. The
1212// capacity is the sum of size, waste, and available.
1213//
1214// The stats are only set by functions that ensure they stay balanced. These
1215// functions increase or decrease one of the non-capacity stats in
1216// conjunction with capacity, or else they always balance increases and
1217// decreases to the non-capacity stats.
1218class AllocationStats BASE_EMBEDDED {
1219 public:
1220 AllocationStats() { Clear(); }
1221
ulan@chromium.org2efb9002012-01-19 15:36:35 +00001222 // Zero out all the allocation statistics (i.e., no capacity).
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001223 void Clear() {
1224 capacity_ = 0;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001225 size_ = 0;
1226 waste_ = 0;
1227 }
1228
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001229 void ClearSizeWaste() {
1230 size_ = capacity_;
1231 waste_ = 0;
1232 }
1233
ulan@chromium.org2efb9002012-01-19 15:36:35 +00001234 // Reset the allocation statistics (i.e., available = capacity with no
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001235 // wasted or allocated bytes).
1236 void Reset() {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001237 size_ = 0;
1238 waste_ = 0;
1239 }
1240
1241 // Accessors for the allocation statistics.
kmillikin@chromium.orgf05f2912010-09-30 10:07:24 +00001242 intptr_t Capacity() { return capacity_; }
kmillikin@chromium.orgf05f2912010-09-30 10:07:24 +00001243 intptr_t Size() { return size_; }
1244 intptr_t Waste() { return waste_; }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001245
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001246 // Grow the space by adding available bytes. They are initially marked as
1247 // being in use (part of the size), but will normally be immediately freed,
1248 // putting them on the free list and removing them from size_.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001249 void ExpandSpace(int size_in_bytes) {
1250 capacity_ += size_in_bytes;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001251 size_ += size_in_bytes;
1252 ASSERT(size_ >= 0);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001253 }
1254
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001255 // Shrink the space by removing available bytes. Since shrinking is done
1256 // during sweeping, bytes have been marked as being in use (part of the size)
1257 // and are hereby freed.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001258 void ShrinkSpace(int size_in_bytes) {
1259 capacity_ -= size_in_bytes;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001260 size_ -= size_in_bytes;
1261 ASSERT(size_ >= 0);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001262 }
1263
1264 // Allocate from available bytes (available -> size).
kmillikin@chromium.orgf05f2912010-09-30 10:07:24 +00001265 void AllocateBytes(intptr_t size_in_bytes) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001266 size_ += size_in_bytes;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001267 ASSERT(size_ >= 0);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001268 }
1269
1270 // Free allocated bytes, making them available (size -> available).
kmillikin@chromium.orgf05f2912010-09-30 10:07:24 +00001271 void DeallocateBytes(intptr_t size_in_bytes) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001272 size_ -= size_in_bytes;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001273 ASSERT(size_ >= 0);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001274 }
1275
1276 // Waste free bytes (available -> waste).
1277 void WasteBytes(int size_in_bytes) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001278 size_ -= size_in_bytes;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001279 waste_ += size_in_bytes;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001280 ASSERT(size_ >= 0);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001281 }
1282
1283 private:
kmillikin@chromium.orgf05f2912010-09-30 10:07:24 +00001284 intptr_t capacity_;
kmillikin@chromium.orgf05f2912010-09-30 10:07:24 +00001285 intptr_t size_;
1286 intptr_t waste_;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001287};
1288
1289
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001290// -----------------------------------------------------------------------------
1291// Free lists for old object spaces
1292//
1293// Free-list nodes are free blocks in the heap. They look like heap objects
1294// (free-list node pointers have the heap object tag, and they have a map like
1295// a heap object). They have a size and a next pointer. The next pointer is
1296// the raw address of the next free list node (or NULL).
1297class FreeListNode: public HeapObject {
1298 public:
1299 // Obtain a free-list node from a raw address. This is not a cast because
1300 // it does not check nor require that the first word at the address is a map
1301 // pointer.
1302 static FreeListNode* FromAddress(Address address) {
1303 return reinterpret_cast<FreeListNode*>(HeapObject::FromAddress(address));
1304 }
1305
1306 static inline bool IsFreeListNode(HeapObject* object);
1307
1308 // Set the size in bytes, which can be read with HeapObject::Size(). This
1309 // function also writes a map to the first word of the block so that it
1310 // looks like a heap object to the garbage collector and heap iteration
1311 // functions.
1312 void set_size(Heap* heap, int size_in_bytes);
1313
1314 // Accessors for the next field.
1315 inline FreeListNode* next();
1316 inline FreeListNode** next_address();
1317 inline void set_next(FreeListNode* next);
1318
1319 inline void Zap();
1320
1321 private:
1322 static const int kNextOffset = POINTER_SIZE_ALIGN(FreeSpace::kHeaderSize);
1323
1324 DISALLOW_IMPLICIT_CONSTRUCTORS(FreeListNode);
1325};
1326
1327
1328// The free list for the old space. The free list is organized in such a way
1329// as to encourage objects allocated around the same time to be near each
1330// other. The normal way to allocate is intended to be by bumping a 'top'
1331// pointer until it hits a 'limit' pointer. When the limit is hit we need to
1332// find a new space to allocate from. This is done with the free list, which
1333// is divided up into rough categories to cut down on waste. Having finer
1334// categories would scatter allocation more.
1335
1336// The old space free list is organized in categories.
1337// 1-31 words: Such small free areas are discarded for efficiency reasons.
1338// They can be reclaimed by the compactor. However the distance between top
1339// and limit may be this small.
1340// 32-255 words: There is a list of spaces this large. It is used for top and
1341// limit when the object we need to allocate is 1-31 words in size. These
1342// spaces are called small.
1343// 256-2047 words: There is a list of spaces this large. It is used for top and
1344// limit when the object we need to allocate is 32-255 words in size. These
1345// spaces are called medium.
1346// 1048-16383 words: There is a list of spaces this large. It is used for top
1347// and limit when the object we need to allocate is 256-2047 words in size.
1348// These spaces are call large.
1349// At least 16384 words. This list is for objects of 2048 words or larger.
1350// Empty pages are added to this list. These spaces are called huge.
1351class FreeList BASE_EMBEDDED {
1352 public:
1353 explicit FreeList(PagedSpace* owner);
1354
1355 // Clear the free list.
1356 void Reset();
1357
1358 // Return the number of bytes available on the free list.
1359 intptr_t available() { return available_; }
1360
1361 // Place a node on the free list. The block of size 'size_in_bytes'
1362 // starting at 'start' is placed on the free list. The return value is the
1363 // number of bytes that have been lost due to internal fragmentation by
1364 // freeing the block. Bookkeeping information will be written to the block,
ulan@chromium.org2efb9002012-01-19 15:36:35 +00001365 // i.e., its contents will be destroyed. The start address should be word
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001366 // aligned, and the size should be a non-zero multiple of the word size.
1367 int Free(Address start, int size_in_bytes);
1368
1369 // Allocate a block of size 'size_in_bytes' from the free list. The block
1370 // is unitialized. A failure is returned if no block is available. The
1371 // number of bytes lost to fragmentation is returned in the output parameter
1372 // 'wasted_bytes'. The size should be a non-zero multiple of the word size.
1373 MUST_USE_RESULT HeapObject* Allocate(int size_in_bytes);
1374
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001375#ifdef DEBUG
1376 void Zap();
1377 static intptr_t SumFreeList(FreeListNode* node);
1378 static int FreeListLength(FreeListNode* cur);
1379 intptr_t SumFreeLists();
1380 bool IsVeryLong();
1381#endif
1382
danno@chromium.org2c456792011-11-11 12:00:53 +00001383 struct SizeStats {
1384 intptr_t Total() {
1385 return small_size_ + medium_size_ + large_size_ + huge_size_;
1386 }
1387
1388 intptr_t small_size_;
1389 intptr_t medium_size_;
1390 intptr_t large_size_;
1391 intptr_t huge_size_;
1392 };
1393
1394 void CountFreeListItems(Page* p, SizeStats* sizes);
1395
1396 intptr_t EvictFreeListItems(Page* p);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001397
1398 private:
1399 // The size range of blocks, in bytes.
1400 static const int kMinBlockSize = 3 * kPointerSize;
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +00001401 static const int kMaxBlockSize = Page::kMaxNonCodeHeapObjectSize;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001402
1403 FreeListNode* PickNodeFromList(FreeListNode** list, int* node_size);
1404
1405 FreeListNode* FindNodeFor(int size_in_bytes, int* node_size);
1406
1407 PagedSpace* owner_;
1408 Heap* heap_;
1409
1410 // Total available bytes in all blocks on this free list.
1411 int available_;
1412
1413 static const int kSmallListMin = 0x20 * kPointerSize;
1414 static const int kSmallListMax = 0xff * kPointerSize;
1415 static const int kMediumListMax = 0x7ff * kPointerSize;
1416 static const int kLargeListMax = 0x3fff * kPointerSize;
1417 static const int kSmallAllocationMax = kSmallListMin - kPointerSize;
1418 static const int kMediumAllocationMax = kSmallListMax;
1419 static const int kLargeAllocationMax = kMediumListMax;
1420 FreeListNode* small_list_;
1421 FreeListNode* medium_list_;
1422 FreeListNode* large_list_;
1423 FreeListNode* huge_list_;
1424
1425 DISALLOW_IMPLICIT_CONSTRUCTORS(FreeList);
1426};
1427
1428
kasper.lund7276f142008-07-30 08:49:36 +00001429class PagedSpace : public Space {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001430 public:
1431 // Creates a space with a maximum capacity, and an id.
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001432 PagedSpace(Heap* heap,
1433 intptr_t max_capacity,
kmillikin@chromium.orgf05f2912010-09-30 10:07:24 +00001434 AllocationSpace id,
1435 Executability executable);
kasper.lund7276f142008-07-30 08:49:36 +00001436
1437 virtual ~PagedSpace() {}
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001438
1439 // Set up the space using the given address range of virtual memory (from
1440 // the memory allocator's initial chunk) if possible. If the block of
1441 // addresses is not big enough to contain a single page-aligned page, a
1442 // fresh chunk will be allocated.
erik.corry@gmail.comf2038fb2012-01-16 11:42:08 +00001443 bool SetUp();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001444
1445 // Returns true if the space has been successfully set up and not
1446 // subsequently torn down.
erik.corry@gmail.comf2038fb2012-01-16 11:42:08 +00001447 bool HasBeenSetUp();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001448
1449 // Cleans up the space, frees all pages in this space except those belonging
1450 // to the initial chunk, uncommits addresses in the initial chunk.
1451 void TearDown();
1452
1453 // Checks whether an object/address is in this space.
1454 inline bool Contains(Address a);
1455 bool Contains(HeapObject* o) { return Contains(o->address()); }
1456
kasper.lund7276f142008-07-30 08:49:36 +00001457 // Given an address occupied by a live object, return that object if it is
1458 // in this space, or Failure::Exception() if it is not. The implementation
1459 // iterates over objects in the page containing the address, the cost is
1460 // linear in the number of objects in the page. It may be slow.
lrn@chromium.org303ada72010-10-27 09:33:13 +00001461 MUST_USE_RESULT MaybeObject* FindObject(Address addr);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001462
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001463 // Prepares for a mark-compact GC.
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001464 virtual void PrepareForMarkCompact();
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001465
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001466 // Current capacity without growing (Size() + Available()).
kmillikin@chromium.orgf05f2912010-09-30 10:07:24 +00001467 intptr_t Capacity() { return accounting_stats_.Capacity(); }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001468
ager@chromium.org3811b432009-10-28 14:53:37 +00001469 // Total amount of memory committed for this space. For paged
1470 // spaces this equals the capacity.
kmillikin@chromium.orgf05f2912010-09-30 10:07:24 +00001471 intptr_t CommittedMemory() { return Capacity(); }
ager@chromium.org3811b432009-10-28 14:53:37 +00001472
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001473 // Sets the capacity, the available space and the wasted space to zero.
1474 // The stats are rebuilt during sweeping by adding each page to the
1475 // capacity and the size when it is encountered. As free spaces are
1476 // discovered during the sweeping they are subtracted from the size and added
1477 // to the available and wasted totals.
1478 void ClearStats() {
1479 accounting_stats_.ClearSizeWaste();
1480 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001481
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001482 // Available bytes without growing. These are the bytes on the free list.
1483 // The bytes in the linear allocation area are not included in this total
1484 // because updating the stats would slow down allocation. New pages are
1485 // immediately added to the free list so they show up here.
1486 intptr_t Available() { return free_list_.available(); }
1487
1488 // Allocated bytes in this space. Garbage bytes that were not found due to
1489 // lazy sweeping are counted as being allocated! The bytes in the current
1490 // linear allocation area (between top and limit) are also counted here.
kmillikin@chromium.orgf05f2912010-09-30 10:07:24 +00001491 virtual intptr_t Size() { return accounting_stats_.Size(); }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001492
erik.corry@gmail.comf2038fb2012-01-16 11:42:08 +00001493 // As size, but the bytes in lazily swept pages are estimated and the bytes
1494 // in the current linear allocation area are not included.
1495 virtual intptr_t SizeOfObjects() {
1496 ASSERT(!IsSweepingComplete() || (unswept_free_bytes_ == 0));
1497 return Size() - unswept_free_bytes_ - (limit() - top());
1498 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001499
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001500 // Wasted bytes in this space. These are just the bytes that were thrown away
1501 // due to being too small to use for allocation. They do not include the
1502 // free bytes that were not found at all due to lazy sweeping.
1503 virtual intptr_t Waste() { return accounting_stats_.Waste(); }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001504
1505 // Returns the allocation pointer in this space.
erik.corry@gmail.comf2038fb2012-01-16 11:42:08 +00001506 Address top() { return allocation_info_.top; }
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001507 Address limit() { return allocation_info_.limit; }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001508
kasper.lund7276f142008-07-30 08:49:36 +00001509 // Allocate the requested number of bytes in the space if possible, return a
1510 // failure object if not.
lrn@chromium.org303ada72010-10-27 09:33:13 +00001511 MUST_USE_RESULT inline MaybeObject* AllocateRaw(int size_in_bytes);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001512
fschneider@chromium.org0c20e672010-01-14 15:28:53 +00001513 virtual bool ReserveSpace(int bytes);
1514
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001515 // Give a block of memory to the space's free list. It might be added to
1516 // the free list or accounted as waste.
1517 // If add_to_freelist is false then just accounting stats are updated and
1518 // no attempt to add area to free list is made.
1519 int Free(Address start, int size_in_bytes) {
1520 int wasted = free_list_.Free(start, size_in_bytes);
1521 accounting_stats_.DeallocateBytes(size_in_bytes - wasted);
1522 return size_in_bytes - wasted;
1523 }
ricow@chromium.orgd236f4d2010-09-01 06:52:08 +00001524
jkummerow@chromium.org28faa982012-04-13 09:58:30 +00001525 void ResetFreeList() {
1526 free_list_.Reset();
1527 }
1528
fschneider@chromium.org013f3e12010-04-26 13:27:52 +00001529 // Set space allocation info.
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001530 void SetTop(Address top, Address limit) {
1531 ASSERT(top == limit ||
1532 Page::FromAddress(top) == Page::FromAddress(limit - 1));
fschneider@chromium.org013f3e12010-04-26 13:27:52 +00001533 allocation_info_.top = top;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001534 allocation_info_.limit = limit;
fschneider@chromium.org013f3e12010-04-26 13:27:52 +00001535 }
1536
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001537 void Allocate(int bytes) {
1538 accounting_stats_.AllocateBytes(bytes);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001539 }
1540
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001541 void IncreaseCapacity(int size) {
1542 accounting_stats_.ExpandSpace(size);
1543 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001544
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001545 // Releases an unused page and shrinks the space.
1546 void ReleasePage(Page* page);
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001547
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001548 // Releases all of the unused pages.
1549 void ReleaseAllUnusedPages();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001550
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001551 // The dummy page that anchors the linked list of pages.
1552 Page* anchor() { return &anchor_; }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001553
1554#ifdef DEBUG
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001555 // Print meta info and objects in this space.
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001556 virtual void Print();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001557
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001558 // Verify integrity of this space.
1559 virtual void Verify(ObjectVisitor* visitor);
1560
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001561 // Reports statistics for the space
1562 void ReportStatistics();
1563
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001564 // Overridden by subclasses to verify space-specific object
1565 // properties (e.g., only maps or free-list nodes are in map space).
1566 virtual void VerifyObject(HeapObject* obj) {}
1567
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001568 // Report code object related statistics
1569 void CollectCodeStatistics();
1570 static void ReportCodeStatistics();
1571 static void ResetCodeStatistics();
1572#endif
1573
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001574 bool was_swept_conservatively() { return was_swept_conservatively_; }
1575 void set_was_swept_conservatively(bool b) { was_swept_conservatively_ = b; }
fschneider@chromium.org013f3e12010-04-26 13:27:52 +00001576
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001577 // Evacuation candidates are swept by evacuator. Needs to return a valid
1578 // result before _and_ after evacuation has finished.
1579 static bool ShouldBeSweptLazily(Page* p) {
1580 return !p->IsEvacuationCandidate() &&
1581 !p->IsFlagSet(Page::RESCAN_ON_EVACUATION) &&
1582 !p->WasSweptPrecisely();
1583 }
1584
danno@chromium.org2c456792011-11-11 12:00:53 +00001585 void SetPagesToSweep(Page* first) {
erik.corry@gmail.comf2038fb2012-01-16 11:42:08 +00001586 ASSERT(unswept_free_bytes_ == 0);
mstarzinger@chromium.org1b3afd12011-11-29 14:28:56 +00001587 if (first == &anchor_) first = NULL;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001588 first_unswept_page_ = first;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001589 }
1590
ulan@chromium.org2efb9002012-01-19 15:36:35 +00001591 void IncrementUnsweptFreeBytes(int by) {
1592 unswept_free_bytes_ += by;
1593 }
1594
1595 void IncreaseUnsweptFreeBytes(Page* p) {
1596 ASSERT(ShouldBeSweptLazily(p));
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +00001597 unswept_free_bytes_ += (p->area_size() - p->LiveBytes());
erik.corry@gmail.comf2038fb2012-01-16 11:42:08 +00001598 }
1599
ulan@chromium.org2efb9002012-01-19 15:36:35 +00001600 void DecreaseUnsweptFreeBytes(Page* p) {
1601 ASSERT(ShouldBeSweptLazily(p));
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +00001602 unswept_free_bytes_ -= (p->area_size() - p->LiveBytes());
ulan@chromium.org2efb9002012-01-19 15:36:35 +00001603 }
1604
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001605 bool AdvanceSweeper(intptr_t bytes_to_sweep);
1606
1607 bool IsSweepingComplete() {
1608 return !first_unswept_page_->is_valid();
1609 }
1610
1611 Page* FirstPage() { return anchor_.next_page(); }
1612 Page* LastPage() { return anchor_.prev_page(); }
1613
rossberg@chromium.org994edf62012-02-06 10:12:55 +00001614 void CountFreeListItems(Page* p, FreeList::SizeStats* sizes) {
1615 free_list_.CountFreeListItems(p, sizes);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001616 }
1617
1618 void EvictEvacuationCandidatesFromFreeLists();
1619
1620 bool CanExpand();
ricow@chromium.orgd236f4d2010-09-01 06:52:08 +00001621
mstarzinger@chromium.org1b3afd12011-11-29 14:28:56 +00001622 // Returns the number of total pages in this space.
1623 int CountTotalPages();
1624
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +00001625 // Return size of allocatable area on a page in this space.
1626 inline int AreaSize() {
1627 return area_size_;
1628 }
1629
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001630 protected:
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +00001631 int area_size_;
1632
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001633 // Maximum capacity of this space.
kmillikin@chromium.orgf05f2912010-09-30 10:07:24 +00001634 intptr_t max_capacity_;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001635
fschneider@chromium.org7d10be52012-04-10 12:30:14 +00001636 intptr_t SizeOfFirstPage();
1637
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001638 // Accounting information for this space.
1639 AllocationStats accounting_stats_;
1640
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001641 // The dummy page that anchors the double linked list of pages.
1642 Page anchor_;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001643
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001644 // The space's free list.
1645 FreeList free_list_;
fschneider@chromium.org013f3e12010-04-26 13:27:52 +00001646
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001647 // Normal allocation information.
1648 AllocationInfo allocation_info_;
1649
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001650 // Bytes of each page that cannot be allocated. Possibly non-zero
1651 // for pages in spaces with only fixed-size objects. Always zero
1652 // for pages in spaces with variable sized objects (those pages are
1653 // padded with free-list nodes).
1654 int page_extra_;
1655
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001656 bool was_swept_conservatively_;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001657
erik.corry@gmail.comf2038fb2012-01-16 11:42:08 +00001658 // The first page to be swept when the lazy sweeper advances. Is set
1659 // to NULL when all pages have been swept.
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001660 Page* first_unswept_page_;
kasperl@chromium.orgeac059f2010-01-25 11:02:06 +00001661
erik.corry@gmail.comf2038fb2012-01-16 11:42:08 +00001662 // The number of free bytes which could be reclaimed by advancing the
1663 // lazy sweeper. This is only an estimation because lazy sweeping is
1664 // done conservatively.
1665 intptr_t unswept_free_bytes_;
1666
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001667 // Expands the space by allocating a fixed number of pages. Returns false if
svenpanne@chromium.orgecb9dd62011-12-01 08:22:35 +00001668 // it cannot allocate requested number of pages from OS, or if the hard heap
1669 // size limit has been hit.
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001670 bool Expand();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001671
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001672 // Generic fast case allocation function that tries linear allocation at the
1673 // address denoted by top in allocation_info_.
1674 inline HeapObject* AllocateLinearly(int size_in_bytes);
kasper.lund7276f142008-07-30 08:49:36 +00001675
1676 // Slow path of AllocateRaw. This function is space-dependent.
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001677 MUST_USE_RESULT virtual HeapObject* SlowAllocateRaw(int size_in_bytes);
kasper.lund7276f142008-07-30 08:49:36 +00001678
kasperl@chromium.orgf5aa8372009-03-24 14:47:14 +00001679 friend class PageIterator;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001680};
1681
1682
sgjesse@chromium.org0b6db592009-07-30 14:48:31 +00001683class NumberAndSizeInfo BASE_EMBEDDED {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001684 public:
sgjesse@chromium.org0b6db592009-07-30 14:48:31 +00001685 NumberAndSizeInfo() : number_(0), bytes_(0) {}
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001686
sgjesse@chromium.org0b6db592009-07-30 14:48:31 +00001687 int number() const { return number_; }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001688 void increment_number(int num) { number_ += num; }
1689
sgjesse@chromium.org0b6db592009-07-30 14:48:31 +00001690 int bytes() const { return bytes_; }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001691 void increment_bytes(int size) { bytes_ += size; }
1692
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001693 void clear() {
1694 number_ = 0;
1695 bytes_ = 0;
1696 }
1697
1698 private:
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001699 int number_;
1700 int bytes_;
1701};
sgjesse@chromium.org0b6db592009-07-30 14:48:31 +00001702
1703
1704// HistogramInfo class for recording a single "bar" of a histogram. This
whesse@chromium.org030d38e2011-07-13 13:23:34 +00001705// class is used for collecting statistics to print to the log file.
sgjesse@chromium.org0b6db592009-07-30 14:48:31 +00001706class HistogramInfo: public NumberAndSizeInfo {
1707 public:
1708 HistogramInfo() : NumberAndSizeInfo() {}
1709
1710 const char* name() { return name_; }
1711 void set_name(const char* name) { name_ = name; }
1712
1713 private:
1714 const char* name_;
1715};
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001716
1717
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001718enum SemiSpaceId {
1719 kFromSpace = 0,
1720 kToSpace = 1
1721};
1722
1723
1724class SemiSpace;
1725
1726
1727class NewSpacePage : public MemoryChunk {
1728 public:
1729 // GC related flags copied from from-space to to-space when
1730 // flipping semispaces.
1731 static const intptr_t kCopyOnFlipFlagsMask =
1732 (1 << MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) |
1733 (1 << MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING) |
1734 (1 << MemoryChunk::SCAN_ON_SCAVENGE);
1735
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +00001736 static const int kAreaSize = Page::kNonCodeObjectAreaSize;
1737
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001738 inline NewSpacePage* next_page() const {
1739 return static_cast<NewSpacePage*>(next_chunk());
1740 }
1741
1742 inline void set_next_page(NewSpacePage* page) {
1743 set_next_chunk(page);
1744 }
1745
1746 inline NewSpacePage* prev_page() const {
1747 return static_cast<NewSpacePage*>(prev_chunk());
1748 }
1749
1750 inline void set_prev_page(NewSpacePage* page) {
1751 set_prev_chunk(page);
1752 }
1753
1754 SemiSpace* semi_space() {
1755 return reinterpret_cast<SemiSpace*>(owner());
1756 }
1757
1758 bool is_anchor() { return !this->InNewSpace(); }
1759
1760 static bool IsAtStart(Address addr) {
1761 return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask)
1762 == kObjectStartOffset;
1763 }
1764
1765 static bool IsAtEnd(Address addr) {
1766 return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask) == 0;
1767 }
1768
1769 Address address() {
1770 return reinterpret_cast<Address>(this);
1771 }
1772
1773 // Finds the NewSpacePage containg the given address.
1774 static inline NewSpacePage* FromAddress(Address address_in_page) {
1775 Address page_start =
1776 reinterpret_cast<Address>(reinterpret_cast<uintptr_t>(address_in_page) &
1777 ~Page::kPageAlignmentMask);
1778 NewSpacePage* page = reinterpret_cast<NewSpacePage*>(page_start);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001779 return page;
1780 }
1781
1782 // Find the page for a limit address. A limit address is either an address
1783 // inside a page, or the address right after the last byte of a page.
1784 static inline NewSpacePage* FromLimit(Address address_limit) {
1785 return NewSpacePage::FromAddress(address_limit - 1);
1786 }
1787
1788 private:
1789 // Create a NewSpacePage object that is only used as anchor
1790 // for the doubly-linked list of real pages.
1791 explicit NewSpacePage(SemiSpace* owner) {
1792 InitializeAsAnchor(owner);
1793 }
1794
1795 static NewSpacePage* Initialize(Heap* heap,
1796 Address start,
1797 SemiSpace* semi_space);
1798
1799 // Intialize a fake NewSpacePage used as sentinel at the ends
1800 // of a doubly-linked list of real NewSpacePages.
1801 // Only uses the prev/next links, and sets flags to not be in new-space.
1802 void InitializeAsAnchor(SemiSpace* owner);
1803
1804 friend class SemiSpace;
1805 friend class SemiSpaceIterator;
1806};
1807
1808
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001809// -----------------------------------------------------------------------------
1810// SemiSpace in young generation
1811//
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001812// A semispace is a contiguous chunk of memory holding page-like memory
1813// chunks. The mark-compact collector uses the memory of the first page in
1814// the from space as a marking stack when tracing live objects.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001815
kasper.lund7276f142008-07-30 08:49:36 +00001816class SemiSpace : public Space {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001817 public:
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001818 // Constructor.
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001819 SemiSpace(Heap* heap, SemiSpaceId semispace)
1820 : Space(heap, NEW_SPACE, NOT_EXECUTABLE),
1821 start_(NULL),
1822 age_mark_(NULL),
1823 id_(semispace),
1824 anchor_(this),
1825 current_page_(NULL) { }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001826
1827 // Sets up the semispace using the given chunk.
yangguo@chromium.org659ceec2012-01-26 07:37:54 +00001828 void SetUp(Address start, int initial_capacity, int maximum_capacity);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001829
1830 // Tear down the space. Heap memory was not allocated by the space, so it
1831 // is not deallocated here.
1832 void TearDown();
1833
1834 // True if the space has been set up but not torn down.
erik.corry@gmail.comf2038fb2012-01-16 11:42:08 +00001835 bool HasBeenSetUp() { return start_ != NULL; }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001836
ager@chromium.orgab99eea2009-08-25 07:05:41 +00001837 // Grow the semispace to the new capacity. The new capacity
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001838 // requested must be larger than the current capacity and less than
1839 // the maximum capacity.
ager@chromium.orgab99eea2009-08-25 07:05:41 +00001840 bool GrowTo(int new_capacity);
1841
1842 // Shrinks the semispace to the new capacity. The new capacity
1843 // requested must be more than the amount of used memory in the
1844 // semispace and less than the current capacity.
1845 bool ShrinkTo(int new_capacity);
1846
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001847 // Returns the start address of the first page of the space.
1848 Address space_start() {
1849 ASSERT(anchor_.next_page() != &anchor_);
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +00001850 return anchor_.next_page()->area_start();
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001851 }
1852
1853 // Returns the start address of the current page of the space.
1854 Address page_low() {
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +00001855 return current_page_->area_start();
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001856 }
1857
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001858 // Returns one past the end address of the space.
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001859 Address space_end() {
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +00001860 return anchor_.prev_page()->area_end();
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001861 }
1862
1863 // Returns one past the end address of the current page of the space.
1864 Address page_high() {
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +00001865 return current_page_->area_end();
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001866 }
1867
1868 bool AdvancePage() {
1869 NewSpacePage* next_page = current_page_->next_page();
1870 if (next_page == anchor()) return false;
1871 current_page_ = next_page;
1872 return true;
1873 }
1874
1875 // Resets the space to using the first page.
1876 void Reset();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001877
1878 // Age mark accessors.
1879 Address age_mark() { return age_mark_; }
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001880 void set_age_mark(Address mark);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001881
1882 // True if the address is in the address range of this semispace (not
1883 // necessarily below the allocation pointer).
1884 bool Contains(Address a) {
ager@chromium.org5ec48922009-05-05 07:25:34 +00001885 return (reinterpret_cast<uintptr_t>(a) & address_mask_)
1886 == reinterpret_cast<uintptr_t>(start_);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001887 }
1888
1889 // True if the object is a heap object in the address range of this
1890 // semispace (not necessarily below the allocation pointer).
1891 bool Contains(Object* o) {
ager@chromium.org5ec48922009-05-05 07:25:34 +00001892 return (reinterpret_cast<uintptr_t>(o) & object_mask_) == object_expected_;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001893 }
1894
fschneider@chromium.org0c20e672010-01-14 15:28:53 +00001895 // If we don't have these here then SemiSpace will be abstract. However
1896 // they should never be called.
kmillikin@chromium.orgf05f2912010-09-30 10:07:24 +00001897 virtual intptr_t Size() {
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001898 UNREACHABLE();
1899 return 0;
1900 }
1901
fschneider@chromium.org0c20e672010-01-14 15:28:53 +00001902 virtual bool ReserveSpace(int bytes) {
1903 UNREACHABLE();
1904 return false;
1905 }
1906
ager@chromium.orgadd848f2009-08-13 12:44:13 +00001907 bool is_committed() { return committed_; }
1908 bool Commit();
1909 bool Uncommit();
1910
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001911 NewSpacePage* first_page() { return anchor_.next_page(); }
1912 NewSpacePage* current_page() { return current_page_; }
1913
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001914#ifdef DEBUG
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001915 virtual void Print();
1916 virtual void Verify();
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001917 // Validate a range of of addresses in a SemiSpace.
1918 // The "from" address must be on a page prior to the "to" address,
1919 // in the linked page order, or it must be earlier on the same page.
1920 static void AssertValidRange(Address from, Address to);
1921#else
1922 // Do nothing.
1923 inline static void AssertValidRange(Address from, Address to) {}
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001924#endif
1925
christian.plesner.hansen@gmail.com5a6af922009-08-12 14:20:51 +00001926 // Returns the current capacity of the semi space.
1927 int Capacity() { return capacity_; }
1928
sgjesse@chromium.org911335c2009-08-19 12:59:44 +00001929 // Returns the maximum capacity of the semi space.
1930 int MaximumCapacity() { return maximum_capacity_; }
1931
ager@chromium.orgab99eea2009-08-25 07:05:41 +00001932 // Returns the initial capacity of the semi space.
1933 int InitialCapacity() { return initial_capacity_; }
sgjesse@chromium.org911335c2009-08-19 12:59:44 +00001934
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001935 SemiSpaceId id() { return id_; }
1936
1937 static void Swap(SemiSpace* from, SemiSpace* to);
1938
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001939 private:
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001940 // Flips the semispace between being from-space and to-space.
1941 // Copies the flags into the masked positions on all pages in the space.
1942 void FlipPages(intptr_t flags, intptr_t flag_mask);
1943
1944 NewSpacePage* anchor() { return &anchor_; }
1945
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001946 // The current and maximum capacity of the space.
1947 int capacity_;
1948 int maximum_capacity_;
ager@chromium.orgab99eea2009-08-25 07:05:41 +00001949 int initial_capacity_;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001950
1951 // The start address of the space.
1952 Address start_;
1953 // Used to govern object promotion during mark-compact collection.
1954 Address age_mark_;
1955
1956 // Masks and comparison values to test for containment in this semispace.
ager@chromium.org5ec48922009-05-05 07:25:34 +00001957 uintptr_t address_mask_;
1958 uintptr_t object_mask_;
1959 uintptr_t object_expected_;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001960
ager@chromium.orgadd848f2009-08-13 12:44:13 +00001961 bool committed_;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001962 SemiSpaceId id_;
ager@chromium.orgadd848f2009-08-13 12:44:13 +00001963
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001964 NewSpacePage anchor_;
1965 NewSpacePage* current_page_;
1966
1967 friend class SemiSpaceIterator;
1968 friend class NewSpacePageIterator;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001969 public:
1970 TRACK_MEMORY("SemiSpace")
1971};
1972
1973
1974// A SemiSpaceIterator is an ObjectIterator that iterates over the active
1975// semispace of the heap's new space. It iterates over the objects in the
1976// semispace from a given start address (defaulting to the bottom of the
1977// semispace) to the top of the semispace. New objects allocated after the
1978// iterator is created are not iterated.
1979class SemiSpaceIterator : public ObjectIterator {
1980 public:
1981 // Create an iterator over the objects in the given space. If no start
1982 // address is given, the iterator starts from the bottom of the space. If
1983 // no size function is given, the iterator calls Object::Size().
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001984
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001985 // Iterate over all of allocated to-space.
1986 explicit SemiSpaceIterator(NewSpace* space);
1987 // Iterate over all of allocated to-space, with a custome size function.
1988 SemiSpaceIterator(NewSpace* space, HeapObjectCallback size_func);
1989 // Iterate over part of allocated to-space, from start to the end
1990 // of allocation.
1991 SemiSpaceIterator(NewSpace* space, Address start);
1992 // Iterate from one address to another in the same semi-space.
1993 SemiSpaceIterator(Address from, Address to);
1994
1995 HeapObject* Next() {
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +00001996 if (current_ == limit_) return NULL;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001997 if (NewSpacePage::IsAtEnd(current_)) {
1998 NewSpacePage* page = NewSpacePage::FromLimit(current_);
1999 page = page->next_page();
2000 ASSERT(!page->is_anchor());
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +00002001 current_ = page->area_start();
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002002 if (current_ == limit_) return NULL;
2003 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002004
2005 HeapObject* object = HeapObject::FromAddress(current_);
2006 int size = (size_func_ == NULL) ? object->Size() : size_func_(object);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002007
2008 current_ += size;
2009 return object;
2010 }
2011
2012 // Implementation of the ObjectIterator functions.
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002013 virtual HeapObject* next_object() { return Next(); }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002014
2015 private:
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002016 void Initialize(Address start,
2017 Address end,
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002018 HeapObjectCallback size_func);
2019
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002020 // The current iteration point.
2021 Address current_;
2022 // The end of iteration.
2023 Address limit_;
2024 // The callback function.
2025 HeapObjectCallback size_func_;
2026};
2027
2028
2029// -----------------------------------------------------------------------------
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002030// A PageIterator iterates the pages in a semi-space.
2031class NewSpacePageIterator BASE_EMBEDDED {
2032 public:
2033 // Make an iterator that runs over all pages in to-space.
2034 explicit inline NewSpacePageIterator(NewSpace* space);
2035
2036 // Make an iterator that runs over all pages in the given semispace,
2037 // even those not used in allocation.
2038 explicit inline NewSpacePageIterator(SemiSpace* space);
2039
2040 // Make iterator that iterates from the page containing start
2041 // to the page that contains limit in the same semispace.
2042 inline NewSpacePageIterator(Address start, Address limit);
2043
2044 inline bool has_next();
2045 inline NewSpacePage* next();
2046
2047 private:
2048 NewSpacePage* prev_page_; // Previous page returned.
2049 // Next page that will be returned. Cached here so that we can use this
2050 // iterator for operations that deallocate pages.
2051 NewSpacePage* next_page_;
2052 // Last page returned.
2053 NewSpacePage* last_page_;
2054};
2055
2056
2057// -----------------------------------------------------------------------------
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002058// The young generation space.
2059//
2060// The new space consists of a contiguous pair of semispaces. It simply
2061// forwards most functions to the appropriate semispace.
2062
kasper.lund7276f142008-07-30 08:49:36 +00002063class NewSpace : public Space {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002064 public:
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00002065 // Constructor.
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002066 explicit NewSpace(Heap* heap)
2067 : Space(heap, NEW_SPACE, NOT_EXECUTABLE),
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002068 to_space_(heap, kToSpace),
2069 from_space_(heap, kFromSpace),
2070 reservation_(),
2071 inline_allocation_limit_step_(0) {}
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002072
2073 // Sets up the new space using the given chunk.
erik.corry@gmail.comf2038fb2012-01-16 11:42:08 +00002074 bool SetUp(int reserved_semispace_size_, int max_semispace_size);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002075
2076 // Tears down the space. Heap memory was not allocated by the space, so it
2077 // is not deallocated here.
2078 void TearDown();
2079
2080 // True if the space has been set up but not torn down.
erik.corry@gmail.comf2038fb2012-01-16 11:42:08 +00002081 bool HasBeenSetUp() {
2082 return to_space_.HasBeenSetUp() && from_space_.HasBeenSetUp();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002083 }
2084
2085 // Flip the pair of spaces.
2086 void Flip();
2087
christian.plesner.hansen@gmail.com5a6af922009-08-12 14:20:51 +00002088 // Grow the capacity of the semispaces. Assumes that they are not at
ager@chromium.orgab99eea2009-08-25 07:05:41 +00002089 // their maximum capacity.
2090 void Grow();
2091
2092 // Shrink the capacity of the semispaces.
2093 void Shrink();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002094
2095 // True if the address or object lies in the address range of either
2096 // semispace (not necessarily below the allocation pointer).
2097 bool Contains(Address a) {
ager@chromium.org5ec48922009-05-05 07:25:34 +00002098 return (reinterpret_cast<uintptr_t>(a) & address_mask_)
2099 == reinterpret_cast<uintptr_t>(start_);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002100 }
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002101
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002102 bool Contains(Object* o) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002103 Address a = reinterpret_cast<Address>(o);
2104 return (reinterpret_cast<uintptr_t>(a) & object_mask_) == object_expected_;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002105 }
2106
2107 // Return the allocated bytes in the active semispace.
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002108 virtual intptr_t Size() {
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +00002109 return pages_used_ * NewSpacePage::kAreaSize +
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002110 static_cast<int>(top() - to_space_.page_low());
2111 }
2112
kmillikin@chromium.orgf05f2912010-09-30 10:07:24 +00002113 // The same, but returning an int. We have to have the one that returns
2114 // intptr_t because it is inherited, but if we know we are dealing with the
2115 // new space, which can't get as big as the other spaces then this is useful:
2116 int SizeAsInt() { return static_cast<int>(Size()); }
ager@chromium.org3811b432009-10-28 14:53:37 +00002117
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002118 // Return the current capacity of a semispace.
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002119 intptr_t EffectiveCapacity() {
erik.corry@gmail.com394dbcf2011-10-27 07:38:48 +00002120 SLOW_ASSERT(to_space_.Capacity() == from_space_.Capacity());
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +00002121 return (to_space_.Capacity() / Page::kPageSize) * NewSpacePage::kAreaSize;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002122 }
2123
2124 // Return the current capacity of a semispace.
kmillikin@chromium.orgf05f2912010-09-30 10:07:24 +00002125 intptr_t Capacity() {
sgjesse@chromium.org911335c2009-08-19 12:59:44 +00002126 ASSERT(to_space_.Capacity() == from_space_.Capacity());
2127 return to_space_.Capacity();
2128 }
ager@chromium.org3811b432009-10-28 14:53:37 +00002129
2130 // Return the total amount of memory committed for new space.
kmillikin@chromium.orgf05f2912010-09-30 10:07:24 +00002131 intptr_t CommittedMemory() {
ager@chromium.org3811b432009-10-28 14:53:37 +00002132 if (from_space_.is_committed()) return 2 * Capacity();
2133 return Capacity();
2134 }
2135
erik.corry@gmail.com394dbcf2011-10-27 07:38:48 +00002136 // Return the available bytes without growing.
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002137 intptr_t Available() {
erik.corry@gmail.com394dbcf2011-10-27 07:38:48 +00002138 return Capacity() - Size();
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002139 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002140
2141 // Return the maximum capacity of a semispace.
sgjesse@chromium.org911335c2009-08-19 12:59:44 +00002142 int MaximumCapacity() {
2143 ASSERT(to_space_.MaximumCapacity() == from_space_.MaximumCapacity());
2144 return to_space_.MaximumCapacity();
2145 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002146
ager@chromium.orgab99eea2009-08-25 07:05:41 +00002147 // Returns the initial capacity of a semispace.
2148 int InitialCapacity() {
2149 ASSERT(to_space_.InitialCapacity() == from_space_.InitialCapacity());
2150 return to_space_.InitialCapacity();
2151 }
2152
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002153 // Return the address of the allocation pointer in the active semispace.
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002154 Address top() {
2155 ASSERT(to_space_.current_page()->ContainsLimit(allocation_info_.top));
2156 return allocation_info_.top;
2157 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002158 // Return the address of the first object in the active semispace.
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002159 Address bottom() { return to_space_.space_start(); }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002160
2161 // Get the age mark of the inactive semispace.
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00002162 Address age_mark() { return from_space_.age_mark(); }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002163 // Set the age mark in the active semispace.
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00002164 void set_age_mark(Address mark) { to_space_.set_age_mark(mark); }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002165
2166 // The start address of the space and a bit mask. Anding an address in the
2167 // new space with the mask will result in the start address.
2168 Address start() { return start_; }
sgjesse@chromium.orgb9d7da12009-08-05 08:38:10 +00002169 uintptr_t mask() { return address_mask_; }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002170
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002171 INLINE(uint32_t AddressToMarkbitIndex(Address addr)) {
2172 ASSERT(Contains(addr));
2173 ASSERT(IsAligned(OffsetFrom(addr), kPointerSize) ||
2174 IsAligned(OffsetFrom(addr) - 1, kPointerSize));
2175 return static_cast<uint32_t>(addr - start_) >> kPointerSizeLog2;
2176 }
2177
2178 INLINE(Address MarkbitIndexToAddress(uint32_t index)) {
2179 return reinterpret_cast<Address>(index << kPointerSizeLog2);
2180 }
2181
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002182 // The allocation top and limit addresses.
2183 Address* allocation_top_address() { return &allocation_info_.top; }
2184 Address* allocation_limit_address() { return &allocation_info_.limit; }
2185
danno@chromium.orgc612e022011-11-10 11:38:15 +00002186 MUST_USE_RESULT INLINE(MaybeObject* AllocateRaw(int size_in_bytes));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002187
2188 // Reset the allocation pointer to the beginning of the active semispace.
2189 void ResetAllocationInfo();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002190
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002191 void LowerInlineAllocationLimit(intptr_t step) {
2192 inline_allocation_limit_step_ = step;
2193 if (step == 0) {
2194 allocation_info_.limit = to_space_.page_high();
2195 } else {
2196 allocation_info_.limit = Min(
2197 allocation_info_.top + inline_allocation_limit_step_,
2198 allocation_info_.limit);
2199 }
2200 top_on_previous_step_ = allocation_info_.top;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002201 }
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002202
2203 // Get the extent of the inactive semispace (for use as a marking stack,
2204 // or to zap it). Notice: space-addresses are not necessarily on the
2205 // same page, so FromSpaceStart() might be above FromSpaceEnd().
2206 Address FromSpacePageLow() { return from_space_.page_low(); }
2207 Address FromSpacePageHigh() { return from_space_.page_high(); }
2208 Address FromSpaceStart() { return from_space_.space_start(); }
2209 Address FromSpaceEnd() { return from_space_.space_end(); }
2210
2211 // Get the extent of the active semispace's pages' memory.
2212 Address ToSpaceStart() { return to_space_.space_start(); }
2213 Address ToSpaceEnd() { return to_space_.space_end(); }
2214
2215 inline bool ToSpaceContains(Address address) {
2216 return to_space_.Contains(address);
2217 }
2218 inline bool FromSpaceContains(Address address) {
2219 return from_space_.Contains(address);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002220 }
2221
2222 // True if the object is a heap object in the address range of the
2223 // respective semispace (not necessarily below the allocation pointer of the
2224 // semispace).
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002225 inline bool ToSpaceContains(Object* o) { return to_space_.Contains(o); }
2226 inline bool FromSpaceContains(Object* o) { return from_space_.Contains(o); }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002227
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002228 // Try to switch the active semispace to a new, empty, page.
2229 // Returns false if this isn't possible or reasonable (i.e., there
2230 // are no pages, or the current page is already empty), or true
2231 // if successful.
2232 bool AddFreshPage();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002233
fschneider@chromium.org0c20e672010-01-14 15:28:53 +00002234 virtual bool ReserveSpace(int bytes);
2235
ager@chromium.org5f0c45f2010-12-17 08:51:21 +00002236 // Resizes a sequential string which must be the most recent thing that was
2237 // allocated in new space.
2238 template <typename StringType>
2239 inline void ShrinkStringAtAllocationBoundary(String* string, int len);
2240
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002241#ifdef DEBUG
2242 // Verify the active semispace.
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002243 virtual void Verify();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002244 // Print the active semispace.
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00002245 virtual void Print() { to_space_.Print(); }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002246#endif
2247
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002248 // Iterates the active semispace to collect statistics.
2249 void CollectStatistics();
2250 // Reports previously collected statistics of the active semispace.
2251 void ReportStatistics();
2252 // Clears previously collected statistics.
2253 void ClearHistograms();
2254
2255 // Record the allocation or promotion of a heap object. Note that we don't
2256 // record every single allocation, but only those that happen in the
2257 // to space during a scavenge GC.
2258 void RecordAllocation(HeapObject* obj);
2259 void RecordPromotion(HeapObject* obj);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002260
ager@chromium.orgadd848f2009-08-13 12:44:13 +00002261 // Return whether the operation succeded.
2262 bool CommitFromSpaceIfNeeded() {
2263 if (from_space_.is_committed()) return true;
2264 return from_space_.Commit();
2265 }
2266
2267 bool UncommitFromSpace() {
2268 if (!from_space_.is_committed()) return true;
2269 return from_space_.Uncommit();
2270 }
2271
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002272 inline intptr_t inline_allocation_limit_step() {
2273 return inline_allocation_limit_step_;
2274 }
2275
2276 SemiSpace* active_space() { return &to_space_; }
2277
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002278 private:
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002279 // Update allocation info to match the current to-space page.
2280 void UpdateAllocationInfo();
2281
2282 Address chunk_base_;
2283 uintptr_t chunk_size_;
2284
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002285 // The semispaces.
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00002286 SemiSpace to_space_;
2287 SemiSpace from_space_;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002288 VirtualMemory reservation_;
2289 int pages_used_;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002290
2291 // Start address and bit mask for containment testing.
2292 Address start_;
ager@chromium.org9085a012009-05-11 19:22:57 +00002293 uintptr_t address_mask_;
2294 uintptr_t object_mask_;
2295 uintptr_t object_expected_;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002296
2297 // Allocation pointer and limit for normal allocation and allocation during
2298 // mark-compact collection.
2299 AllocationInfo allocation_info_;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002300
2301 // When incremental marking is active we will set allocation_info_.limit
2302 // to be lower than actual limit and then will gradually increase it
2303 // in steps to guarantee that we do incremental marking steps even
2304 // when all allocation is performed from inlined generated code.
2305 intptr_t inline_allocation_limit_step_;
2306
2307 Address top_on_previous_step_;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002308
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002309 HistogramInfo* allocated_histogram_;
2310 HistogramInfo* promoted_histogram_;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002311
danno@chromium.orgc612e022011-11-10 11:38:15 +00002312 MUST_USE_RESULT MaybeObject* SlowAllocateRaw(int size_in_bytes);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002313
2314 friend class SemiSpaceIterator;
2315
2316 public:
2317 TRACK_MEMORY("NewSpace")
2318};
2319
2320
2321// -----------------------------------------------------------------------------
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002322// Old object space (excluding map objects)
2323
2324class OldSpace : public PagedSpace {
2325 public:
2326 // Creates an old space object with a given maximum capacity.
2327 // The constructor does not allocate pages from OS.
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002328 OldSpace(Heap* heap,
2329 intptr_t max_capacity,
2330 AllocationSpace id,
2331 Executability executable)
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002332 : PagedSpace(heap, max_capacity, id, executable) {
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00002333 page_extra_ = 0;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002334 }
2335
fschneider@chromium.org013f3e12010-04-26 13:27:52 +00002336 // The limit of allocation for a page in this space.
2337 virtual Address PageAllocationLimit(Page* page) {
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +00002338 return page->area_end();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002339 }
2340
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002341 public:
2342 TRACK_MEMORY("OldSpace")
2343};
2344
2345
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002346// For contiguous spaces, top should be in the space (or at the end) and limit
2347// should be the end of the space.
2348#define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space) \
erik.corry@gmail.com394dbcf2011-10-27 07:38:48 +00002349 SLOW_ASSERT((space).page_low() <= (info).top \
2350 && (info).top <= (space).page_high() \
2351 && (info).limit <= (space).page_high())
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002352
2353
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002354// -----------------------------------------------------------------------------
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00002355// Old space for objects of a fixed size
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002356
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00002357class FixedSpace : public PagedSpace {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002358 public:
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002359 FixedSpace(Heap* heap,
2360 intptr_t max_capacity,
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00002361 AllocationSpace id,
2362 int object_size_in_bytes,
2363 const char* name)
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002364 : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE),
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00002365 object_size_in_bytes_(object_size_in_bytes),
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002366 name_(name) {
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +00002367 page_extra_ = Page::kNonCodeObjectAreaSize % object_size_in_bytes;
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00002368 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002369
fschneider@chromium.org013f3e12010-04-26 13:27:52 +00002370 // The limit of allocation for a page in this space.
2371 virtual Address PageAllocationLimit(Page* page) {
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +00002372 return page->area_end() - page_extra_;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002373 }
2374
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00002375 int object_size_in_bytes() { return object_size_in_bytes_; }
2376
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002377 // Prepares for a mark-compact GC.
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002378 virtual void PrepareForMarkCompact();
erik.corry@gmail.com4a6c3272010-11-18 12:04:40 +00002379
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002380 private:
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00002381 // The size of objects in this space.
2382 int object_size_in_bytes_;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002383
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00002384 // The name of this space.
2385 const char* name_;
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00002386};
2387
2388
2389// -----------------------------------------------------------------------------
2390// Old space for all map objects
2391
2392class MapSpace : public FixedSpace {
2393 public:
2394 // Creates a map space object with a maximum capacity.
yangguo@chromium.org78d1ad42012-02-09 13:53:47 +00002395 MapSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id)
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002396 : FixedSpace(heap, max_capacity, id, Map::kSize, "map"),
yangguo@chromium.org78d1ad42012-02-09 13:53:47 +00002397 max_map_space_pages_(kMaxMapPageIndex - 1) {
kasperl@chromium.orgeac059f2010-01-25 11:02:06 +00002398 }
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00002399
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00002400 // Given an index, returns the page address.
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002401 // TODO(1600): this limit is artifical just to keep code compilable
2402 static const int kMaxMapPageIndex = 1 << 16;
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00002403
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002404 virtual int RoundSizeDownToObjectAlignment(int size) {
2405 if (IsPowerOf2(Map::kSize)) {
2406 return RoundDown(size, Map::kSize);
2407 } else {
2408 return (size / Map::kSize) * Map::kSize;
fschneider@chromium.org0c20e672010-01-14 15:28:53 +00002409 }
fschneider@chromium.org0c20e672010-01-14 15:28:53 +00002410 }
2411
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00002412 protected:
2413#ifdef DEBUG
2414 virtual void VerifyObject(HeapObject* obj);
2415#endif
2416
2417 private:
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +00002418 static const int kMapsPerPage = Page::kNonCodeObjectAreaSize / Map::kSize;
fschneider@chromium.org0c20e672010-01-14 15:28:53 +00002419
2420 // Do map space compaction if there is a page gap.
kasperl@chromium.orgeac059f2010-01-25 11:02:06 +00002421 int CompactionThreshold() {
2422 return kMapsPerPage * (max_map_space_pages_ - 1);
2423 }
2424
2425 const int max_map_space_pages_;
fschneider@chromium.org0c20e672010-01-14 15:28:53 +00002426
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002427 public:
2428 TRACK_MEMORY("MapSpace")
2429};
2430
2431
2432// -----------------------------------------------------------------------------
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00002433// Old space for all global object property cell objects
2434
2435class CellSpace : public FixedSpace {
2436 public:
2437 // Creates a property cell space object with a maximum capacity.
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002438 CellSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id)
2439 : FixedSpace(heap, max_capacity, id, JSGlobalPropertyCell::kSize, "cell")
2440 {}
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00002441
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002442 virtual int RoundSizeDownToObjectAlignment(int size) {
2443 if (IsPowerOf2(JSGlobalPropertyCell::kSize)) {
2444 return RoundDown(size, JSGlobalPropertyCell::kSize);
2445 } else {
2446 return (size / JSGlobalPropertyCell::kSize) * JSGlobalPropertyCell::kSize;
2447 }
2448 }
2449
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00002450 protected:
2451#ifdef DEBUG
2452 virtual void VerifyObject(HeapObject* obj);
2453#endif
2454
2455 public:
ager@chromium.org4af710e2009-09-15 12:20:11 +00002456 TRACK_MEMORY("CellSpace")
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00002457};
2458
2459
2460// -----------------------------------------------------------------------------
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002461// Large objects ( > Page::kMaxHeapObjectSize ) are allocated and managed by
2462// the large object space. A large object is allocated from OS heap with
2463// extra padding bytes (Page::kPageSize + Page::kObjectStartOffset).
2464// A large object always starts at Page::kObjectStartOffset to a page.
2465// Large objects do not move during garbage collections.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002466
kasper.lund7276f142008-07-30 08:49:36 +00002467class LargeObjectSpace : public Space {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002468 public:
erik.corry@gmail.com394dbcf2011-10-27 07:38:48 +00002469 LargeObjectSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id);
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002470 virtual ~LargeObjectSpace() {}
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002471
2472 // Initializes internal data structures.
erik.corry@gmail.comf2038fb2012-01-16 11:42:08 +00002473 bool SetUp();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002474
2475 // Releases internal resources, frees objects in this space.
2476 void TearDown();
2477
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002478 static intptr_t ObjectSizeFor(intptr_t chunk_size) {
2479 if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return 0;
2480 return chunk_size - Page::kPageSize - Page::kObjectStartOffset;
2481 }
2482
2483 // Shared implementation of AllocateRaw, AllocateRawCode and
2484 // AllocateRawFixedArray.
2485 MUST_USE_RESULT MaybeObject* AllocateRaw(int object_size,
2486 Executability executable);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002487
ricow@chromium.org30ce4112010-05-31 10:38:25 +00002488 // Available bytes for objects in this space.
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002489 inline intptr_t Available();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002490
kmillikin@chromium.orgf05f2912010-09-30 10:07:24 +00002491 virtual intptr_t Size() {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002492 return size_;
2493 }
2494
erik.corry@gmail.com4a6c3272010-11-18 12:04:40 +00002495 virtual intptr_t SizeOfObjects() {
2496 return objects_size_;
2497 }
2498
jkummerow@chromium.org7a6fc812012-06-27 11:12:38 +00002499 intptr_t CommittedMemory() {
2500 return Size();
2501 }
2502
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002503 int PageCount() {
2504 return page_count_;
2505 }
2506
2507 // Finds an object for a given address, returns Failure::Exception()
2508 // if it is not found. The function iterates through all objects in this
2509 // space, may be slow.
lrn@chromium.org303ada72010-10-27 09:33:13 +00002510 MaybeObject* FindObject(Address a);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002511
jkummerow@chromium.org531dfe82012-03-20 13:01:16 +00002512 // Finds a large object page containing the given address, returns NULL
ricow@chromium.orgd236f4d2010-09-01 06:52:08 +00002513 // if such a page doesn't exist.
jkummerow@chromium.org531dfe82012-03-20 13:01:16 +00002514 LargePage* FindPage(Address a);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002515
2516 // Frees unmarked objects.
2517 void FreeUnmarkedObjects();
2518
2519 // Checks whether a heap object is in this space; O(1).
2520 bool Contains(HeapObject* obj);
2521
2522 // Checks whether the space is empty.
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002523 bool IsEmpty() { return first_page_ == NULL; }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002524
fschneider@chromium.org0c20e672010-01-14 15:28:53 +00002525 // See the comments for ReserveSpace in the Space class. This has to be
2526 // called after ReserveSpace has been called on the paged spaces, since they
2527 // may use some memory, leaving less for large objects.
2528 virtual bool ReserveSpace(int bytes);
2529
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002530 LargePage* first_page() { return first_page_; }
2531
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002532#ifdef DEBUG
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002533 virtual void Verify();
2534 virtual void Print();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002535 void ReportStatistics();
2536 void CollectCodeStatistics();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002537#endif
2538 // Checks whether an address is in the object area in this space. It
2539 // iterates all objects in the space. May be slow.
2540 bool SlowContains(Address addr) { return !FindObject(addr)->IsFailure(); }
2541
2542 private:
erik.corry@gmail.com394dbcf2011-10-27 07:38:48 +00002543 intptr_t max_capacity_;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002544 // The head of the linked list of large object chunks.
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002545 LargePage* first_page_;
kmillikin@chromium.orgf05f2912010-09-30 10:07:24 +00002546 intptr_t size_; // allocated bytes
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002547 int page_count_; // number of chunks
erik.corry@gmail.com4a6c3272010-11-18 12:04:40 +00002548 intptr_t objects_size_; // size of objects
jkummerow@chromium.org531dfe82012-03-20 13:01:16 +00002549 // Map MemoryChunk::kAlignment-aligned chunks to large pages covering them
2550 HashMap chunk_map_;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002551
kasperl@chromium.orgf5aa8372009-03-24 14:47:14 +00002552 friend class LargeObjectIterator;
2553
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002554 public:
2555 TRACK_MEMORY("LargeObjectSpace")
2556};
2557
2558
2559class LargeObjectIterator: public ObjectIterator {
2560 public:
2561 explicit LargeObjectIterator(LargeObjectSpace* space);
2562 LargeObjectIterator(LargeObjectSpace* space, HeapObjectCallback size_func);
2563
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002564 HeapObject* Next();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002565
2566 // implementation of ObjectIterator.
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002567 virtual HeapObject* next_object() { return Next(); }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002568
2569 private:
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002570 LargePage* current_;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002571 HeapObjectCallback size_func_;
2572};
2573
2574
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002575// Iterates over the chunks (pages and large object pages) that can contain
2576// pointers to new space.
2577class PointerChunkIterator BASE_EMBEDDED {
2578 public:
2579 inline explicit PointerChunkIterator(Heap* heap);
2580
2581 // Return NULL when the iterator is done.
2582 MemoryChunk* next() {
2583 switch (state_) {
2584 case kOldPointerState: {
2585 if (old_pointer_iterator_.has_next()) {
2586 return old_pointer_iterator_.next();
2587 }
2588 state_ = kMapState;
2589 // Fall through.
2590 }
2591 case kMapState: {
2592 if (map_iterator_.has_next()) {
2593 return map_iterator_.next();
2594 }
2595 state_ = kLargeObjectState;
2596 // Fall through.
2597 }
2598 case kLargeObjectState: {
2599 HeapObject* heap_object;
2600 do {
2601 heap_object = lo_iterator_.Next();
2602 if (heap_object == NULL) {
2603 state_ = kFinishedState;
2604 return NULL;
2605 }
2606 // Fixed arrays are the only pointer-containing objects in large
2607 // object space.
2608 } while (!heap_object->IsFixedArray());
2609 MemoryChunk* answer = MemoryChunk::FromAddress(heap_object->address());
2610 return answer;
2611 }
2612 case kFinishedState:
2613 return NULL;
2614 default:
2615 break;
2616 }
2617 UNREACHABLE();
2618 return NULL;
2619 }
2620
2621
2622 private:
2623 enum State {
2624 kOldPointerState,
2625 kMapState,
2626 kLargeObjectState,
2627 kFinishedState
2628 };
2629 State state_;
2630 PageIterator old_pointer_iterator_;
2631 PageIterator map_iterator_;
2632 LargeObjectIterator lo_iterator_;
2633};
2634
2635
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002636#ifdef DEBUG
2637struct CommentStatistic {
2638 const char* comment;
2639 int size;
2640 int count;
2641 void Clear() {
2642 comment = NULL;
2643 size = 0;
2644 count = 0;
2645 }
2646 // Must be small, since an iteration is used for lookup.
2647 static const int kMaxComments = 64;
2648};
2649#endif
2650
2651
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002652} } // namespace v8::internal
2653
2654#endif // V8_SPACES_H_