blob: b2fc0a1293f2b96a899532d2a0b704248bc80cff [file] [log] [blame]
Ben Murdoch257744e2011-11-30 15:57:28 +00001// Copyright 2011 the V8 project authors. All rights reserved.
Steve Blocka7e24c12009-10-30 11:49:00 +00002// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#ifndef V8_SPACES_H_
29#define V8_SPACES_H_
30
Ben Murdoch257744e2011-11-30 15:57:28 +000031#include "allocation.h"
32#include "list.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000033#include "log.h"
34
35namespace v8 {
36namespace internal {
37
Steve Block44f0eee2011-05-26 01:26:41 +010038class Isolate;
39
Steve Blocka7e24c12009-10-30 11:49:00 +000040// -----------------------------------------------------------------------------
41// Heap structures:
42//
43// A JS heap consists of a young generation, an old generation, and a large
44// object space. The young generation is divided into two semispaces. A
45// scavenger implements Cheney's copying algorithm. The old generation is
46// separated into a map space and an old object space. The map space contains
47// all (and only) map objects, the rest of old objects go into the old space.
48// The old generation is collected by a mark-sweep-compact collector.
49//
50// The semispaces of the young generation are contiguous. The old and map
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +010051// spaces consists of a list of pages. A page has a page header and an object
Ben Murdoch592a9fc2012-03-05 11:04:45 +000052// area.
Steve Blocka7e24c12009-10-30 11:49:00 +000053//
54// There is a separate large object space for objects larger than
55// Page::kMaxHeapObjectSize, so that they do not have to move during
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +010056// collection. The large object space is paged. Pages in large object space
Ben Murdoch592a9fc2012-03-05 11:04:45 +000057// may be larger than the page size.
Steve Blocka7e24c12009-10-30 11:49:00 +000058//
Ben Murdoch592a9fc2012-03-05 11:04:45 +000059// A store-buffer based write barrier is used to keep track of intergenerational
60// references. See store-buffer.h.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +010061//
Ben Murdoch592a9fc2012-03-05 11:04:45 +000062// During scavenges and mark-sweep collections we sometimes (after a store
63// buffer overflow) iterate intergenerational pointers without decoding heap
64// object maps so if the page belongs to old pointer space or large object
65// space it is essential to guarantee that the page does not contain any
66// garbage pointers to new space: every pointer aligned word which satisfies
67// the Heap::InNewSpace() predicate must be a pointer to a live heap object in
68// new space. Thus objects in old pointer and large object spaces should have a
69// special layout (e.g. no bare integer fields). This requirement does not
70// apply to map space which is iterated in a special fashion. However we still
71// require pointer fields of dead maps to be cleaned.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +010072//
Ben Murdoch592a9fc2012-03-05 11:04:45 +000073// To enable lazy cleaning of old space pages we can mark chunks of the page
74// as being garbage. Garbage sections are marked with a special map. These
75// sections are skipped when scanning the page, even if we are otherwise
76// scanning without regard for object boundaries. Garbage sections are chained
77// together to form a free list after a GC. Garbage sections created outside
78// of GCs by object trunctation etc. may not be in the free list chain. Very
79// small free spaces are ignored, they need only be cleaned of bogus pointers
80// into new space.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +010081//
Ben Murdoch592a9fc2012-03-05 11:04:45 +000082// Each page may have up to one special garbage section. The start of this
83// section is denoted by the top field in the space. The end of the section
84// is denoted by the limit field in the space. This special garbage section
85// is not marked with a free space map in the data. The point of this section
86// is to enable linear allocation without having to constantly update the byte
87// array every time the top field is updated and a new object is created. The
88// special garbage section is not in the chain of garbage sections.
89//
90// Since the top and limit fields are in the space, not the page, only one page
91// has a special garbage section, and if the top and limit are equal then there
92// is no special garbage section.
Steve Blocka7e24c12009-10-30 11:49:00 +000093
94// Some assertion macros used in the debugging mode.
95
Leon Clarkee46be812010-01-19 14:06:41 +000096#define ASSERT_PAGE_ALIGNED(address) \
Steve Blocka7e24c12009-10-30 11:49:00 +000097 ASSERT((OffsetFrom(address) & Page::kPageAlignmentMask) == 0)
98
Leon Clarkee46be812010-01-19 14:06:41 +000099#define ASSERT_OBJECT_ALIGNED(address) \
Steve Blocka7e24c12009-10-30 11:49:00 +0000100 ASSERT((OffsetFrom(address) & kObjectAlignmentMask) == 0)
101
Leon Clarkee46be812010-01-19 14:06:41 +0000102#define ASSERT_MAP_ALIGNED(address) \
103 ASSERT((OffsetFrom(address) & kMapAlignmentMask) == 0)
104
105#define ASSERT_OBJECT_SIZE(size) \
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000106 ASSERT((0 < size) && (size <= Page::kMaxNonCodeHeapObjectSize))
Steve Blocka7e24c12009-10-30 11:49:00 +0000107
Leon Clarkee46be812010-01-19 14:06:41 +0000108#define ASSERT_PAGE_OFFSET(offset) \
109 ASSERT((Page::kObjectStartOffset <= offset) \
Steve Blocka7e24c12009-10-30 11:49:00 +0000110 && (offset <= Page::kPageSize))
111
Leon Clarkee46be812010-01-19 14:06:41 +0000112#define ASSERT_MAP_PAGE_INDEX(index) \
Steve Blocka7e24c12009-10-30 11:49:00 +0000113 ASSERT((0 <= index) && (index <= MapSpace::kMaxMapPageIndex))
114
115
116class PagedSpace;
117class MemoryAllocator;
118class AllocationInfo;
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000119class Space;
120class FreeList;
121class MemoryChunk;
122
123class MarkBit {
124 public:
125 typedef uint32_t CellType;
126
127 inline MarkBit(CellType* cell, CellType mask, bool data_only)
128 : cell_(cell), mask_(mask), data_only_(data_only) { }
129
130 inline CellType* cell() { return cell_; }
131 inline CellType mask() { return mask_; }
132
133#ifdef DEBUG
134 bool operator==(const MarkBit& other) {
135 return cell_ == other.cell_ && mask_ == other.mask_;
136 }
137#endif
138
139 inline void Set() { *cell_ |= mask_; }
140 inline bool Get() { return (*cell_ & mask_) != 0; }
141 inline void Clear() { *cell_ &= ~mask_; }
142
143 inline bool data_only() { return data_only_; }
144
145 inline MarkBit Next() {
146 CellType new_mask = mask_ << 1;
147 if (new_mask == 0) {
148 return MarkBit(cell_ + 1, 1, data_only_);
149 } else {
150 return MarkBit(cell_, new_mask, data_only_);
151 }
152 }
153
154 private:
155 CellType* cell_;
156 CellType mask_;
157 // This boolean indicates that the object is in a data-only space with no
158 // pointers. This enables some optimizations when marking.
159 // It is expected that this field is inlined and turned into control flow
160 // at the place where the MarkBit object is created.
161 bool data_only_;
162};
163
164
165// Bitmap is a sequence of cells each containing fixed number of bits.
166class Bitmap {
167 public:
168 static const uint32_t kBitsPerCell = 32;
169 static const uint32_t kBitsPerCellLog2 = 5;
170 static const uint32_t kBitIndexMask = kBitsPerCell - 1;
171 static const uint32_t kBytesPerCell = kBitsPerCell / kBitsPerByte;
172 static const uint32_t kBytesPerCellLog2 = kBitsPerCellLog2 - kBitsPerByteLog2;
173
174 static const size_t kLength =
175 (1 << kPageSizeBits) >> (kPointerSizeLog2);
176
177 static const size_t kSize =
178 (1 << kPageSizeBits) >> (kPointerSizeLog2 + kBitsPerByteLog2);
179
180
181 static int CellsForLength(int length) {
182 return (length + kBitsPerCell - 1) >> kBitsPerCellLog2;
183 }
184
185 int CellsCount() {
186 return CellsForLength(kLength);
187 }
188
189 static int SizeFor(int cells_count) {
190 return sizeof(MarkBit::CellType) * cells_count;
191 }
192
193 INLINE(static uint32_t IndexToCell(uint32_t index)) {
194 return index >> kBitsPerCellLog2;
195 }
196
197 INLINE(static uint32_t CellToIndex(uint32_t index)) {
198 return index << kBitsPerCellLog2;
199 }
200
201 INLINE(static uint32_t CellAlignIndex(uint32_t index)) {
202 return (index + kBitIndexMask) & ~kBitIndexMask;
203 }
204
205 INLINE(MarkBit::CellType* cells()) {
206 return reinterpret_cast<MarkBit::CellType*>(this);
207 }
208
209 INLINE(Address address()) {
210 return reinterpret_cast<Address>(this);
211 }
212
213 INLINE(static Bitmap* FromAddress(Address addr)) {
214 return reinterpret_cast<Bitmap*>(addr);
215 }
216
217 inline MarkBit MarkBitFromIndex(uint32_t index, bool data_only = false) {
218 MarkBit::CellType mask = 1 << (index & kBitIndexMask);
219 MarkBit::CellType* cell = this->cells() + (index >> kBitsPerCellLog2);
220 return MarkBit(cell, mask, data_only);
221 }
222
223 static inline void Clear(MemoryChunk* chunk);
224
225 static void PrintWord(uint32_t word, uint32_t himask = 0) {
226 for (uint32_t mask = 1; mask != 0; mask <<= 1) {
227 if ((mask & himask) != 0) PrintF("[");
228 PrintF((mask & word) ? "1" : "0");
229 if ((mask & himask) != 0) PrintF("]");
230 }
231 }
232
233 class CellPrinter {
234 public:
235 CellPrinter() : seq_start(0), seq_type(0), seq_length(0) { }
236
237 void Print(uint32_t pos, uint32_t cell) {
238 if (cell == seq_type) {
239 seq_length++;
240 return;
241 }
242
243 Flush();
244
245 if (IsSeq(cell)) {
246 seq_start = pos;
247 seq_length = 0;
248 seq_type = cell;
249 return;
250 }
251
252 PrintF("%d: ", pos);
253 PrintWord(cell);
254 PrintF("\n");
255 }
256
257 void Flush() {
258 if (seq_length > 0) {
259 PrintF("%d: %dx%d\n",
260 seq_start,
261 seq_type == 0 ? 0 : 1,
262 seq_length * kBitsPerCell);
263 seq_length = 0;
264 }
265 }
266
267 static bool IsSeq(uint32_t cell) { return cell == 0 || cell == 0xFFFFFFFF; }
268
269 private:
270 uint32_t seq_start;
271 uint32_t seq_type;
272 uint32_t seq_length;
273 };
274
275 void Print() {
276 CellPrinter printer;
277 for (int i = 0; i < CellsCount(); i++) {
278 printer.Print(i, cells()[i]);
279 }
280 printer.Flush();
281 PrintF("\n");
282 }
283
284 bool IsClean() {
285 for (int i = 0; i < CellsCount(); i++) {
286 if (cells()[i] != 0) return false;
287 }
288 return true;
289 }
290};
291
292
293class SkipList;
294class SlotsBuffer;
295
296// MemoryChunk represents a memory region owned by a specific space.
297// It is divided into the header and the body. Chunk start is always
Ben Murdochc7cc0282012-03-05 14:35:55 +0000298// 1MB aligned. Start of the body is aligned so it can accommodate
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000299// any heap object.
300class MemoryChunk {
301 public:
302 // Only works if the pointer is in the first kPageSize of the MemoryChunk.
303 static MemoryChunk* FromAddress(Address a) {
304 return reinterpret_cast<MemoryChunk*>(OffsetFrom(a) & ~kAlignmentMask);
305 }
306
307 // Only works for addresses in pointer spaces, not data or code spaces.
308 static inline MemoryChunk* FromAnyPointerAddress(Address addr);
309
310 Address address() { return reinterpret_cast<Address>(this); }
311
312 bool is_valid() { return address() != NULL; }
313
314 MemoryChunk* next_chunk() const { return next_chunk_; }
315 MemoryChunk* prev_chunk() const { return prev_chunk_; }
316
317 void set_next_chunk(MemoryChunk* next) { next_chunk_ = next; }
318 void set_prev_chunk(MemoryChunk* prev) { prev_chunk_ = prev; }
319
320 Space* owner() const {
321 if ((reinterpret_cast<intptr_t>(owner_) & kFailureTagMask) ==
322 kFailureTag) {
323 return reinterpret_cast<Space*>(owner_ - kFailureTag);
324 } else {
325 return NULL;
326 }
327 }
328
329 void set_owner(Space* space) {
330 ASSERT((reinterpret_cast<intptr_t>(space) & kFailureTagMask) == 0);
331 owner_ = reinterpret_cast<Address>(space) + kFailureTag;
332 ASSERT((reinterpret_cast<intptr_t>(owner_) & kFailureTagMask) ==
333 kFailureTag);
334 }
335
336 VirtualMemory* reserved_memory() {
337 return &reservation_;
338 }
339
340 void InitializeReservedMemory() {
341 reservation_.Reset();
342 }
343
344 void set_reserved_memory(VirtualMemory* reservation) {
345 ASSERT_NOT_NULL(reservation);
346 reservation_.TakeControl(reservation);
347 }
348
349 bool scan_on_scavenge() { return IsFlagSet(SCAN_ON_SCAVENGE); }
350 void initialize_scan_on_scavenge(bool scan) {
351 if (scan) {
352 SetFlag(SCAN_ON_SCAVENGE);
353 } else {
354 ClearFlag(SCAN_ON_SCAVENGE);
355 }
356 }
357 inline void set_scan_on_scavenge(bool scan);
358
359 int store_buffer_counter() { return store_buffer_counter_; }
360 void set_store_buffer_counter(int counter) {
361 store_buffer_counter_ = counter;
362 }
363
364 bool Contains(Address addr) {
365 return addr >= area_start() && addr < area_end();
366 }
367
368 // Checks whether addr can be a limit of addresses in this page.
369 // It's a limit if it's in the page, or if it's just after the
370 // last byte of the page.
371 bool ContainsLimit(Address addr) {
372 return addr >= area_start() && addr <= area_end();
373 }
374
375 enum MemoryChunkFlags {
376 IS_EXECUTABLE,
377 ABOUT_TO_BE_FREED,
378 POINTERS_TO_HERE_ARE_INTERESTING,
379 POINTERS_FROM_HERE_ARE_INTERESTING,
380 SCAN_ON_SCAVENGE,
381 IN_FROM_SPACE, // Mutually exclusive with IN_TO_SPACE.
382 IN_TO_SPACE, // All pages in new space has one of these two set.
383 NEW_SPACE_BELOW_AGE_MARK,
384 CONTAINS_ONLY_DATA,
385 EVACUATION_CANDIDATE,
386 RESCAN_ON_EVACUATION,
387
388 // Pages swept precisely can be iterated, hitting only the live objects.
389 // Whereas those swept conservatively cannot be iterated over. Both flags
390 // indicate that marking bits have been cleared by the sweeper, otherwise
391 // marking bits are still intact.
392 WAS_SWEPT_PRECISELY,
393 WAS_SWEPT_CONSERVATIVELY,
394
395 // Last flag, keep at bottom.
396 NUM_MEMORY_CHUNK_FLAGS
397 };
398
399
400 static const int kPointersToHereAreInterestingMask =
401 1 << POINTERS_TO_HERE_ARE_INTERESTING;
402
403 static const int kPointersFromHereAreInterestingMask =
404 1 << POINTERS_FROM_HERE_ARE_INTERESTING;
405
406 static const int kEvacuationCandidateMask =
407 1 << EVACUATION_CANDIDATE;
408
409 static const int kSkipEvacuationSlotsRecordingMask =
410 (1 << EVACUATION_CANDIDATE) |
411 (1 << RESCAN_ON_EVACUATION) |
412 (1 << IN_FROM_SPACE) |
413 (1 << IN_TO_SPACE);
414
415
416 void SetFlag(int flag) {
417 flags_ |= static_cast<uintptr_t>(1) << flag;
418 }
419
420 void ClearFlag(int flag) {
421 flags_ &= ~(static_cast<uintptr_t>(1) << flag);
422 }
423
424 void SetFlagTo(int flag, bool value) {
425 if (value) {
426 SetFlag(flag);
427 } else {
428 ClearFlag(flag);
429 }
430 }
431
432 bool IsFlagSet(int flag) {
433 return (flags_ & (static_cast<uintptr_t>(1) << flag)) != 0;
434 }
435
436 // Set or clear multiple flags at a time. The flags in the mask
437 // are set to the value in "flags", the rest retain the current value
438 // in flags_.
439 void SetFlags(intptr_t flags, intptr_t mask) {
440 flags_ = (flags_ & ~mask) | (flags & mask);
441 }
442
443 // Return all current flags.
444 intptr_t GetFlags() { return flags_; }
445
446 // Manage live byte count (count of bytes known to be live,
447 // because they are marked black).
448 void ResetLiveBytes() {
449 if (FLAG_gc_verbose) {
450 PrintF("ResetLiveBytes:%p:%x->0\n",
451 static_cast<void*>(this), live_byte_count_);
452 }
453 live_byte_count_ = 0;
454 }
455 void IncrementLiveBytes(int by) {
456 if (FLAG_gc_verbose) {
457 printf("UpdateLiveBytes:%p:%x%c=%x->%x\n",
458 static_cast<void*>(this), live_byte_count_,
459 ((by < 0) ? '-' : '+'), ((by < 0) ? -by : by),
460 live_byte_count_ + by);
461 }
462 live_byte_count_ += by;
463 ASSERT_LE(static_cast<unsigned>(live_byte_count_), size_);
464 }
465 int LiveBytes() {
466 ASSERT(static_cast<unsigned>(live_byte_count_) <= size_);
467 return live_byte_count_;
468 }
Ben Murdochc7cc0282012-03-05 14:35:55 +0000469
470 static void IncrementLiveBytesFromGC(Address address, int by) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000471 MemoryChunk::FromAddress(address)->IncrementLiveBytes(by);
472 }
473
Ben Murdochc7cc0282012-03-05 14:35:55 +0000474 static void IncrementLiveBytesFromMutator(Address address, int by);
475
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000476 static const intptr_t kAlignment =
477 (static_cast<uintptr_t>(1) << kPageSizeBits);
478
479 static const intptr_t kAlignmentMask = kAlignment - 1;
480
481 static const intptr_t kSizeOffset = kPointerSize + kPointerSize;
482
483 static const intptr_t kLiveBytesOffset =
484 kSizeOffset + kPointerSize + kPointerSize + kPointerSize +
485 kPointerSize + kPointerSize +
486 kPointerSize + kPointerSize + kPointerSize + kIntSize;
487
488 static const size_t kSlotsBufferOffset = kLiveBytesOffset + kIntSize;
489
490 static const size_t kHeaderSize =
491 kSlotsBufferOffset + kPointerSize + kPointerSize;
492
493 static const int kBodyOffset =
494 CODE_POINTER_ALIGN(MAP_POINTER_ALIGN(kHeaderSize + Bitmap::kSize));
495
496 // The start offset of the object area in a page. Aligned to both maps and
497 // code alignment to be suitable for both. Also aligned to 32 words because
498 // the marking bitmap is arranged in 32 bit chunks.
499 static const int kObjectStartAlignment = 32 * kPointerSize;
500 static const int kObjectStartOffset = kBodyOffset - 1 +
501 (kObjectStartAlignment - (kBodyOffset - 1) % kObjectStartAlignment);
502
503 size_t size() const { return size_; }
504
505 void set_size(size_t size) {
506 size_ = size;
507 }
508
509 Executability executable() {
510 return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
511 }
512
513 bool ContainsOnlyData() {
514 return IsFlagSet(CONTAINS_ONLY_DATA);
515 }
516
517 bool InNewSpace() {
518 return (flags_ & ((1 << IN_FROM_SPACE) | (1 << IN_TO_SPACE))) != 0;
519 }
520
521 bool InToSpace() {
522 return IsFlagSet(IN_TO_SPACE);
523 }
524
525 bool InFromSpace() {
526 return IsFlagSet(IN_FROM_SPACE);
527 }
528
529 // ---------------------------------------------------------------------
530 // Markbits support
531
532 inline Bitmap* markbits() {
533 return Bitmap::FromAddress(address() + kHeaderSize);
534 }
535
536 void PrintMarkbits() { markbits()->Print(); }
537
538 inline uint32_t AddressToMarkbitIndex(Address addr) {
539 return static_cast<uint32_t>(addr - this->address()) >> kPointerSizeLog2;
540 }
541
542 inline static uint32_t FastAddressToMarkbitIndex(Address addr) {
543 const intptr_t offset =
544 reinterpret_cast<intptr_t>(addr) & kAlignmentMask;
545
546 return static_cast<uint32_t>(offset) >> kPointerSizeLog2;
547 }
548
549 inline Address MarkbitIndexToAddress(uint32_t index) {
550 return this->address() + (index << kPointerSizeLog2);
551 }
552
553 void InsertAfter(MemoryChunk* other);
554 void Unlink();
555
556 inline Heap* heap() { return heap_; }
557
558 static const int kFlagsOffset = kPointerSize * 3;
559
560 bool IsEvacuationCandidate() { return IsFlagSet(EVACUATION_CANDIDATE); }
561
562 bool ShouldSkipEvacuationSlotRecording() {
563 return (flags_ & kSkipEvacuationSlotsRecordingMask) != 0;
564 }
565
566 inline SkipList* skip_list() {
567 return skip_list_;
568 }
569
570 inline void set_skip_list(SkipList* skip_list) {
571 skip_list_ = skip_list;
572 }
573
574 inline SlotsBuffer* slots_buffer() {
575 return slots_buffer_;
576 }
577
578 inline SlotsBuffer** slots_buffer_address() {
579 return &slots_buffer_;
580 }
581
582 void MarkEvacuationCandidate() {
583 ASSERT(slots_buffer_ == NULL);
584 SetFlag(EVACUATION_CANDIDATE);
585 }
586
587 void ClearEvacuationCandidate() {
588 ASSERT(slots_buffer_ == NULL);
589 ClearFlag(EVACUATION_CANDIDATE);
590 }
591
592 Address area_start() { return area_start_; }
593 Address area_end() { return area_end_; }
594 int area_size() {
595 return static_cast<int>(area_end() - area_start());
596 }
597
598 protected:
599 MemoryChunk* next_chunk_;
600 MemoryChunk* prev_chunk_;
601 size_t size_;
602 intptr_t flags_;
603
604 // Start and end of allocatable memory on this chunk.
605 Address area_start_;
606 Address area_end_;
607
608 // If the chunk needs to remember its memory reservation, it is stored here.
609 VirtualMemory reservation_;
610 // The identity of the owning space. This is tagged as a failure pointer, but
611 // no failure can be in an object, so this can be distinguished from any entry
612 // in a fixed array.
613 Address owner_;
614 Heap* heap_;
615 // Used by the store buffer to keep track of which pages to mark scan-on-
616 // scavenge.
617 int store_buffer_counter_;
618 // Count of bytes marked black on page.
619 int live_byte_count_;
620 SlotsBuffer* slots_buffer_;
621 SkipList* skip_list_;
622
623 static MemoryChunk* Initialize(Heap* heap,
624 Address base,
625 size_t size,
626 Address area_start,
627 Address area_end,
628 Executability executable,
629 Space* owner);
630
631 friend class MemoryAllocator;
632};
633
634STATIC_CHECK(sizeof(MemoryChunk) <= MemoryChunk::kHeaderSize);
Steve Blocka7e24c12009-10-30 11:49:00 +0000635
636// -----------------------------------------------------------------------------
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000637// A page is a memory chunk of a size 1MB. Large object pages may be larger.
Steve Blocka7e24c12009-10-30 11:49:00 +0000638//
639// The only way to get a page pointer is by calling factory methods:
640// Page* p = Page::FromAddress(addr); or
641// Page* p = Page::FromAllocationTop(top);
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000642class Page : public MemoryChunk {
Steve Blocka7e24c12009-10-30 11:49:00 +0000643 public:
644 // Returns the page containing a given address. The address ranges
645 // from [page_addr .. page_addr + kPageSize[
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000646 // This only works if the object is in fact in a page. See also MemoryChunk::
647 // FromAddress() and FromAnyAddress().
Steve Blocka7e24c12009-10-30 11:49:00 +0000648 INLINE(static Page* FromAddress(Address a)) {
649 return reinterpret_cast<Page*>(OffsetFrom(a) & ~kPageAlignmentMask);
650 }
651
652 // Returns the page containing an allocation top. Because an allocation
653 // top address can be the upper bound of the page, we need to subtract
654 // it with kPointerSize first. The address ranges from
655 // [page_addr + kObjectStartOffset .. page_addr + kPageSize].
656 INLINE(static Page* FromAllocationTop(Address top)) {
657 Page* p = FromAddress(top - kPointerSize);
Steve Blocka7e24c12009-10-30 11:49:00 +0000658 return p;
659 }
660
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000661 // Returns the next page in the chain of pages owned by a space.
Steve Blocka7e24c12009-10-30 11:49:00 +0000662 inline Page* next_page();
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000663 inline Page* prev_page();
664 inline void set_next_page(Page* page);
665 inline void set_prev_page(Page* page);
Steve Blocka7e24c12009-10-30 11:49:00 +0000666
Steve Blocka7e24c12009-10-30 11:49:00 +0000667 // Checks whether an address is page aligned.
668 static bool IsAlignedToPageSize(Address a) {
669 return 0 == (OffsetFrom(a) & kPageAlignmentMask);
670 }
671
Steve Blocka7e24c12009-10-30 11:49:00 +0000672 // Returns the offset of a given address to this page.
673 INLINE(int Offset(Address a)) {
Steve Blockd0582a62009-12-15 09:54:21 +0000674 int offset = static_cast<int>(a - address());
Steve Blocka7e24c12009-10-30 11:49:00 +0000675 return offset;
676 }
677
678 // Returns the address for a given offset to the this page.
679 Address OffsetToAddress(int offset) {
680 ASSERT_PAGE_OFFSET(offset);
681 return address() + offset;
682 }
683
684 // ---------------------------------------------------------------------
Steve Blocka7e24c12009-10-30 11:49:00 +0000685
Steve Blocka7e24c12009-10-30 11:49:00 +0000686 // Page size in bytes. This must be a multiple of the OS page size.
687 static const int kPageSize = 1 << kPageSizeBits;
688
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000689 // Object area size in bytes.
690 static const int kNonCodeObjectAreaSize = kPageSize - kObjectStartOffset;
691
692 // Maximum object size that fits in a page.
693 static const int kMaxNonCodeHeapObjectSize = kNonCodeObjectAreaSize;
694
Steve Blocka7e24c12009-10-30 11:49:00 +0000695 // Page size mask.
696 static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;
697
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100698 inline void ClearGCFields();
699
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000700 static inline Page* Initialize(Heap* heap,
701 MemoryChunk* chunk,
702 Executability executable,
703 PagedSpace* owner);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100704
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000705 void InitializeAsAnchor(PagedSpace* owner);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100706
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000707 bool WasSweptPrecisely() { return IsFlagSet(WAS_SWEPT_PRECISELY); }
708 bool WasSweptConservatively() { return IsFlagSet(WAS_SWEPT_CONSERVATIVELY); }
709 bool WasSwept() { return WasSweptPrecisely() || WasSweptConservatively(); }
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100710
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000711 void MarkSweptPrecisely() { SetFlag(WAS_SWEPT_PRECISELY); }
712 void MarkSweptConservatively() { SetFlag(WAS_SWEPT_CONSERVATIVELY); }
Steve Blocka7e24c12009-10-30 11:49:00 +0000713
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000714 void ClearSweptPrecisely() { ClearFlag(WAS_SWEPT_PRECISELY); }
715 void ClearSweptConservatively() { ClearFlag(WAS_SWEPT_CONSERVATIVELY); }
Steve Blocka7e24c12009-10-30 11:49:00 +0000716
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000717#ifdef DEBUG
718 void Print();
719#endif // DEBUG
Steve Blocka7e24c12009-10-30 11:49:00 +0000720
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000721 friend class MemoryAllocator;
Steve Blocka7e24c12009-10-30 11:49:00 +0000722};
723
724
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000725STATIC_CHECK(sizeof(Page) <= MemoryChunk::kHeaderSize);
726
727
728class LargePage : public MemoryChunk {
729 public:
730 HeapObject* GetObject() {
731 return HeapObject::FromAddress(area_start());
732 }
733
734 inline LargePage* next_page() const {
735 return static_cast<LargePage*>(next_chunk());
736 }
737
738 inline void set_next_page(LargePage* page) {
739 set_next_chunk(page);
740 }
741 private:
742 static inline LargePage* Initialize(Heap* heap, MemoryChunk* chunk);
743
744 friend class MemoryAllocator;
745};
746
747STATIC_CHECK(sizeof(LargePage) <= MemoryChunk::kHeaderSize);
748
Steve Blocka7e24c12009-10-30 11:49:00 +0000749// ----------------------------------------------------------------------------
750// Space is the abstract superclass for all allocation spaces.
751class Space : public Malloced {
752 public:
Steve Block44f0eee2011-05-26 01:26:41 +0100753 Space(Heap* heap, AllocationSpace id, Executability executable)
754 : heap_(heap), id_(id), executable_(executable) {}
Steve Blocka7e24c12009-10-30 11:49:00 +0000755
756 virtual ~Space() {}
757
Steve Block44f0eee2011-05-26 01:26:41 +0100758 Heap* heap() const { return heap_; }
759
Steve Blocka7e24c12009-10-30 11:49:00 +0000760 // Does the space need executable memory?
761 Executability executable() { return executable_; }
762
763 // Identity used in error reporting.
764 AllocationSpace identity() { return id_; }
765
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -0800766 // Returns allocated size.
Ben Murdochf87a2032010-10-22 12:50:53 +0100767 virtual intptr_t Size() = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000768
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -0800769 // Returns size of objects. Can differ from the allocated size
770 // (e.g. see LargeObjectSpace).
771 virtual intptr_t SizeOfObjects() { return Size(); }
772
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000773 virtual int RoundSizeDownToObjectAlignment(int size) {
774 if (id_ == CODE_SPACE) {
775 return RoundDown(size, kCodeAlignment);
776 } else {
777 return RoundDown(size, kPointerSize);
778 }
779 }
780
Steve Blocka7e24c12009-10-30 11:49:00 +0000781#ifdef DEBUG
782 virtual void Print() = 0;
783#endif
784
Leon Clarkee46be812010-01-19 14:06:41 +0000785 // After calling this we can allocate a certain number of bytes using only
786 // linear allocation (with a LinearAllocationScope and an AlwaysAllocateScope)
787 // without using freelists or causing a GC. This is used by partial
788 // snapshots. It returns true of space was reserved or false if a GC is
789 // needed. For paged spaces the space requested must include the space wasted
790 // at the end of each when allocating linearly.
791 virtual bool ReserveSpace(int bytes) = 0;
792
Steve Blocka7e24c12009-10-30 11:49:00 +0000793 private:
Steve Block44f0eee2011-05-26 01:26:41 +0100794 Heap* heap_;
Steve Blocka7e24c12009-10-30 11:49:00 +0000795 AllocationSpace id_;
796 Executability executable_;
797};
798
799
800// ----------------------------------------------------------------------------
801// All heap objects containing executable code (code objects) must be allocated
802// from a 2 GB range of memory, so that they can call each other using 32-bit
803// displacements. This happens automatically on 32-bit platforms, where 32-bit
804// displacements cover the entire 4GB virtual address space. On 64-bit
805// platforms, we support this using the CodeRange object, which reserves and
806// manages a range of virtual memory.
Steve Block44f0eee2011-05-26 01:26:41 +0100807class CodeRange {
Steve Blocka7e24c12009-10-30 11:49:00 +0000808 public:
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000809 explicit CodeRange(Isolate* isolate);
810 ~CodeRange() { TearDown(); }
811
Steve Blocka7e24c12009-10-30 11:49:00 +0000812 // Reserves a range of virtual memory, but does not commit any of it.
813 // Can only be called once, at heap initialization time.
814 // Returns false on failure.
Ben Murdochc7cc0282012-03-05 14:35:55 +0000815 bool SetUp(const size_t requested_size);
Steve Blocka7e24c12009-10-30 11:49:00 +0000816
817 // Frees the range of virtual memory, and frees the data structures used to
818 // manage it.
Steve Block44f0eee2011-05-26 01:26:41 +0100819 void TearDown();
Steve Blocka7e24c12009-10-30 11:49:00 +0000820
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000821 bool exists() { return this != NULL && code_range_ != NULL; }
Steve Block44f0eee2011-05-26 01:26:41 +0100822 bool contains(Address address) {
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000823 if (this == NULL || code_range_ == NULL) return false;
Steve Blocka7e24c12009-10-30 11:49:00 +0000824 Address start = static_cast<Address>(code_range_->address());
825 return start <= address && address < start + code_range_->size();
826 }
827
828 // Allocates a chunk of memory from the large-object portion of
829 // the code range. On platforms with no separate code range, should
830 // not be called.
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000831 MUST_USE_RESULT Address AllocateRawMemory(const size_t requested,
832 size_t* allocated);
833 void FreeRawMemory(Address buf, size_t length);
Steve Blocka7e24c12009-10-30 11:49:00 +0000834
835 private:
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000836 Isolate* isolate_;
Steve Block44f0eee2011-05-26 01:26:41 +0100837
Steve Blocka7e24c12009-10-30 11:49:00 +0000838 // The reserved range of virtual memory that all code objects are put in.
Steve Block44f0eee2011-05-26 01:26:41 +0100839 VirtualMemory* code_range_;
Steve Blocka7e24c12009-10-30 11:49:00 +0000840 // Plain old data class, just a struct plus a constructor.
841 class FreeBlock {
842 public:
843 FreeBlock(Address start_arg, size_t size_arg)
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000844 : start(start_arg), size(size_arg) {
845 ASSERT(IsAddressAligned(start, MemoryChunk::kAlignment));
846 ASSERT(size >= static_cast<size_t>(Page::kPageSize));
847 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000848 FreeBlock(void* start_arg, size_t size_arg)
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000849 : start(static_cast<Address>(start_arg)), size(size_arg) {
850 ASSERT(IsAddressAligned(start, MemoryChunk::kAlignment));
851 ASSERT(size >= static_cast<size_t>(Page::kPageSize));
852 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000853
854 Address start;
855 size_t size;
856 };
857
858 // Freed blocks of memory are added to the free list. When the allocation
859 // list is exhausted, the free list is sorted and merged to make the new
860 // allocation list.
Steve Block44f0eee2011-05-26 01:26:41 +0100861 List<FreeBlock> free_list_;
Steve Blocka7e24c12009-10-30 11:49:00 +0000862 // Memory is allocated from the free blocks on the allocation list.
863 // The block at current_allocation_block_index_ is the current block.
Steve Block44f0eee2011-05-26 01:26:41 +0100864 List<FreeBlock> allocation_list_;
865 int current_allocation_block_index_;
Steve Blocka7e24c12009-10-30 11:49:00 +0000866
867 // Finds a block on the allocation list that contains at least the
868 // requested amount of memory. If none is found, sorts and merges
869 // the existing free memory blocks, and searches again.
870 // If none can be found, terminates V8 with FatalProcessOutOfMemory.
Steve Block44f0eee2011-05-26 01:26:41 +0100871 void GetNextAllocationBlock(size_t requested);
Steve Blocka7e24c12009-10-30 11:49:00 +0000872 // Compares the start addresses of two free blocks.
873 static int CompareFreeBlockAddress(const FreeBlock* left,
874 const FreeBlock* right);
Steve Block44f0eee2011-05-26 01:26:41 +0100875
Steve Block44f0eee2011-05-26 01:26:41 +0100876 DISALLOW_COPY_AND_ASSIGN(CodeRange);
Steve Blocka7e24c12009-10-30 11:49:00 +0000877};
878
879
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000880class SkipList {
881 public:
882 SkipList() {
883 Clear();
884 }
885
886 void Clear() {
887 for (int idx = 0; idx < kSize; idx++) {
888 starts_[idx] = reinterpret_cast<Address>(-1);
889 }
890 }
891
892 Address StartFor(Address addr) {
893 return starts_[RegionNumber(addr)];
894 }
895
896 void AddObject(Address addr, int size) {
897 int start_region = RegionNumber(addr);
898 int end_region = RegionNumber(addr + size - kPointerSize);
899 for (int idx = start_region; idx <= end_region; idx++) {
900 if (starts_[idx] > addr) starts_[idx] = addr;
901 }
902 }
903
904 static inline int RegionNumber(Address addr) {
905 return (OffsetFrom(addr) & Page::kPageAlignmentMask) >> kRegionSizeLog2;
906 }
907
908 static void Update(Address addr, int size) {
909 Page* page = Page::FromAddress(addr);
910 SkipList* list = page->skip_list();
911 if (list == NULL) {
912 list = new SkipList();
913 page->set_skip_list(list);
914 }
915
916 list->AddObject(addr, size);
917 }
918
919 private:
920 static const int kRegionSizeLog2 = 13;
921 static const int kRegionSize = 1 << kRegionSizeLog2;
922 static const int kSize = Page::kPageSize / kRegionSize;
923
924 STATIC_ASSERT(Page::kPageSize % kRegionSize == 0);
925
926 Address starts_[kSize];
927};
928
929
Steve Blocka7e24c12009-10-30 11:49:00 +0000930// ----------------------------------------------------------------------------
931// A space acquires chunks of memory from the operating system. The memory
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000932// allocator allocated and deallocates pages for the paged heap spaces and large
933// pages for large object space.
Steve Blocka7e24c12009-10-30 11:49:00 +0000934//
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000935// Each space has to manage it's own pages.
Steve Blocka7e24c12009-10-30 11:49:00 +0000936//
Steve Block44f0eee2011-05-26 01:26:41 +0100937class MemoryAllocator {
Steve Blocka7e24c12009-10-30 11:49:00 +0000938 public:
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000939 explicit MemoryAllocator(Isolate* isolate);
940
Steve Blocka7e24c12009-10-30 11:49:00 +0000941 // Initializes its internal bookkeeping structures.
Russell Brenner90bac252010-11-18 13:33:46 -0800942 // Max capacity of the total space and executable memory limit.
Ben Murdochc7cc0282012-03-05 14:35:55 +0000943 bool SetUp(intptr_t max_capacity, intptr_t capacity_executable);
Steve Blocka7e24c12009-10-30 11:49:00 +0000944
Steve Block44f0eee2011-05-26 01:26:41 +0100945 void TearDown();
Steve Blocka7e24c12009-10-30 11:49:00 +0000946
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000947 Page* AllocatePage(PagedSpace* owner, Executability executable);
Steve Blocka7e24c12009-10-30 11:49:00 +0000948
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000949 LargePage* AllocateLargePage(intptr_t object_size,
950 Executability executable,
951 Space* owner);
Steve Blocka7e24c12009-10-30 11:49:00 +0000952
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000953 void Free(MemoryChunk* chunk);
Steve Blocka7e24c12009-10-30 11:49:00 +0000954
955 // Returns the maximum available bytes of heaps.
Steve Block44f0eee2011-05-26 01:26:41 +0100956 intptr_t Available() { return capacity_ < size_ ? 0 : capacity_ - size_; }
Steve Blocka7e24c12009-10-30 11:49:00 +0000957
958 // Returns allocated spaces in bytes.
Steve Block44f0eee2011-05-26 01:26:41 +0100959 intptr_t Size() { return size_; }
Steve Blocka7e24c12009-10-30 11:49:00 +0000960
Russell Brenner90bac252010-11-18 13:33:46 -0800961 // Returns the maximum available executable bytes of heaps.
Steve Block44f0eee2011-05-26 01:26:41 +0100962 intptr_t AvailableExecutable() {
Russell Brenner90bac252010-11-18 13:33:46 -0800963 if (capacity_executable_ < size_executable_) return 0;
964 return capacity_executable_ - size_executable_;
965 }
966
Steve Block791712a2010-08-27 10:21:07 +0100967 // Returns allocated executable spaces in bytes.
Steve Block44f0eee2011-05-26 01:26:41 +0100968 intptr_t SizeExecutable() { return size_executable_; }
Steve Block791712a2010-08-27 10:21:07 +0100969
Steve Blocka7e24c12009-10-30 11:49:00 +0000970 // Returns maximum available bytes that the old space can have.
Steve Block44f0eee2011-05-26 01:26:41 +0100971 intptr_t MaxAvailable() {
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000972 return (Available() / Page::kPageSize) * Page::kMaxNonCodeHeapObjectSize;
Steve Blocka7e24c12009-10-30 11:49:00 +0000973 }
974
Steve Blocka7e24c12009-10-30 11:49:00 +0000975#ifdef DEBUG
976 // Reports statistic info of the space.
Steve Block44f0eee2011-05-26 01:26:41 +0100977 void ReportStatistics();
Steve Blocka7e24c12009-10-30 11:49:00 +0000978#endif
979
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000980 MemoryChunk* AllocateChunk(intptr_t body_size,
981 Executability executable,
982 Space* space);
983
984 Address ReserveAlignedMemory(size_t requested,
985 size_t alignment,
986 VirtualMemory* controller);
987 Address AllocateAlignedMemory(size_t requested,
988 size_t alignment,
989 Executability executable,
990 VirtualMemory* controller);
991
992 void FreeMemory(VirtualMemory* reservation, Executability executable);
993 void FreeMemory(Address addr, size_t size, Executability executable);
994
995 // Commit a contiguous block of memory from the initial chunk. Assumes that
996 // the address is not NULL, the size is greater than zero, and that the
997 // block is contained in the initial chunk. Returns true if it succeeded
998 // and false otherwise.
999 bool CommitBlock(Address start, size_t size, Executability executable);
1000
1001 // Uncommit a contiguous block of memory [start..(start+size)[.
1002 // start is not NULL, the size is greater than zero, and the
1003 // block is contained in the initial chunk. Returns true if it succeeded
1004 // and false otherwise.
1005 bool UncommitBlock(Address start, size_t size);
1006
1007 // Zaps a contiguous block of memory [start..(start+size)[ thus
1008 // filling it up with a recognizable non-NULL bit pattern.
1009 void ZapBlock(Address start, size_t size);
1010
1011 void PerformAllocationCallback(ObjectSpace space,
1012 AllocationAction action,
1013 size_t size);
1014
1015 void AddMemoryAllocationCallback(MemoryAllocationCallback callback,
1016 ObjectSpace space,
1017 AllocationAction action);
1018
1019 void RemoveMemoryAllocationCallback(
1020 MemoryAllocationCallback callback);
1021
1022 bool MemoryAllocationCallbackRegistered(
1023 MemoryAllocationCallback callback);
1024
1025 static int CodePageGuardStartOffset();
1026
1027 static int CodePageGuardSize();
1028
1029 static int CodePageAreaStartOffset();
1030
1031 static int CodePageAreaEndOffset();
1032
1033 static int CodePageAreaSize() {
1034 return CodePageAreaEndOffset() - CodePageAreaStartOffset();
1035 }
1036
1037 static bool CommitCodePage(VirtualMemory* vm, Address start, size_t size);
Steve Blocka7e24c12009-10-30 11:49:00 +00001038
1039 private:
Ben Murdoch69a99ed2011-11-30 16:03:39 +00001040 Isolate* isolate_;
1041
Steve Blocka7e24c12009-10-30 11:49:00 +00001042 // Maximum space size in bytes.
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001043 size_t capacity_;
Russell Brenner90bac252010-11-18 13:33:46 -08001044 // Maximum subset of capacity_ that can be executable
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001045 size_t capacity_executable_;
Ben Murdochb0fe1622011-05-05 13:52:32 +01001046
Steve Blocka7e24c12009-10-30 11:49:00 +00001047 // Allocated space size in bytes.
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001048 size_t size_;
Steve Block791712a2010-08-27 10:21:07 +01001049 // Allocated executable space size in bytes.
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001050 size_t size_executable_;
Steve Blocka7e24c12009-10-30 11:49:00 +00001051
Iain Merrick9ac36c92010-09-13 15:29:50 +01001052 struct MemoryAllocationCallbackRegistration {
1053 MemoryAllocationCallbackRegistration(MemoryAllocationCallback callback,
1054 ObjectSpace space,
1055 AllocationAction action)
1056 : callback(callback), space(space), action(action) {
1057 }
1058 MemoryAllocationCallback callback;
1059 ObjectSpace space;
1060 AllocationAction action;
1061 };
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001062
Iain Merrick9ac36c92010-09-13 15:29:50 +01001063 // A List of callback that are triggered when memory is allocated or free'd
Steve Block44f0eee2011-05-26 01:26:41 +01001064 List<MemoryAllocationCallbackRegistration>
Iain Merrick9ac36c92010-09-13 15:29:50 +01001065 memory_allocation_callbacks_;
1066
Steve Blocka7e24c12009-10-30 11:49:00 +00001067 // Initializes pages in a chunk. Returns the first page address.
1068 // This function and GetChunkId() are provided for the mark-compact
1069 // collector to rebuild page headers in the from space, which is
1070 // used as a marking stack and its page headers are destroyed.
Steve Block44f0eee2011-05-26 01:26:41 +01001071 Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk,
1072 PagedSpace* owner);
Steve Block6ded16b2010-05-10 14:33:55 +01001073
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001074 DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator);
Steve Blocka7e24c12009-10-30 11:49:00 +00001075};
1076
1077
1078// -----------------------------------------------------------------------------
1079// Interface for heap object iterator to be implemented by all object space
1080// object iterators.
1081//
Leon Clarked91b9f72010-01-27 17:25:45 +00001082// NOTE: The space specific object iterators also implements the own next()
1083// method which is used to avoid using virtual functions
Steve Blocka7e24c12009-10-30 11:49:00 +00001084// iterating a specific space.
1085
1086class ObjectIterator : public Malloced {
1087 public:
1088 virtual ~ObjectIterator() { }
1089
Steve Blocka7e24c12009-10-30 11:49:00 +00001090 virtual HeapObject* next_object() = 0;
1091};
1092
1093
1094// -----------------------------------------------------------------------------
1095// Heap object iterator in new/old/map spaces.
1096//
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001097// A HeapObjectIterator iterates objects from the bottom of the given space
1098// to its top or from the bottom of the given page to its top.
Steve Blocka7e24c12009-10-30 11:49:00 +00001099//
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001100// If objects are allocated in the page during iteration the iterator may
1101// or may not iterate over those objects. The caller must create a new
1102// iterator in order to be sure to visit these new objects.
Steve Blocka7e24c12009-10-30 11:49:00 +00001103class HeapObjectIterator: public ObjectIterator {
1104 public:
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001105 // Creates a new object iterator in a given space.
Steve Blocka7e24c12009-10-30 11:49:00 +00001106 // If the size function is not given, the iterator calls the default
1107 // Object::Size().
1108 explicit HeapObjectIterator(PagedSpace* space);
1109 HeapObjectIterator(PagedSpace* space, HeapObjectCallback size_func);
Kristian Monsen80d68ea2010-09-08 11:05:35 +01001110 HeapObjectIterator(Page* page, HeapObjectCallback size_func);
Steve Blocka7e24c12009-10-30 11:49:00 +00001111
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001112 // Advance to the next object, skipping free spaces and other fillers and
1113 // skipping the special garbage section of which there is one per space.
1114 // Returns NULL when the iteration has ended.
1115 inline HeapObject* Next() {
1116 do {
1117 HeapObject* next_obj = FromCurrentPage();
1118 if (next_obj != NULL) return next_obj;
1119 } while (AdvanceToNextPage());
1120 return NULL;
Leon Clarked91b9f72010-01-27 17:25:45 +00001121 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001122
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001123 virtual HeapObject* next_object() {
1124 return Next();
1125 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001126
1127 private:
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001128 enum PageMode { kOnePageOnly, kAllPagesInSpace };
Steve Blocka7e24c12009-10-30 11:49:00 +00001129
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001130 Address cur_addr_; // Current iteration point.
1131 Address cur_end_; // End iteration point.
1132 HeapObjectCallback size_func_; // Size function or NULL.
1133 PagedSpace* space_;
1134 PageMode page_mode_;
Leon Clarked91b9f72010-01-27 17:25:45 +00001135
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001136 // Fast (inlined) path of next().
1137 inline HeapObject* FromCurrentPage();
Leon Clarked91b9f72010-01-27 17:25:45 +00001138
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001139 // Slow path of next(), goes into the next page. Returns false if the
1140 // iteration has ended.
1141 bool AdvanceToNextPage();
Steve Blocka7e24c12009-10-30 11:49:00 +00001142
1143 // Initializes fields.
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001144 inline void Initialize(PagedSpace* owner,
1145 Address start,
1146 Address end,
1147 PageMode mode,
1148 HeapObjectCallback size_func);
Steve Blocka7e24c12009-10-30 11:49:00 +00001149};
1150
1151
1152// -----------------------------------------------------------------------------
1153// A PageIterator iterates the pages in a paged space.
Steve Blocka7e24c12009-10-30 11:49:00 +00001154
1155class PageIterator BASE_EMBEDDED {
1156 public:
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001157 explicit inline PageIterator(PagedSpace* space);
Steve Blocka7e24c12009-10-30 11:49:00 +00001158
1159 inline bool has_next();
1160 inline Page* next();
1161
1162 private:
1163 PagedSpace* space_;
1164 Page* prev_page_; // Previous page returned.
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001165 // Next page that will be returned. Cached here so that we can use this
1166 // iterator for operations that deallocate pages.
1167 Page* next_page_;
Steve Blocka7e24c12009-10-30 11:49:00 +00001168};
1169
1170
1171// -----------------------------------------------------------------------------
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001172// A space has a circular list of pages. The next page can be accessed via
1173// Page::next_page() call.
Steve Blocka7e24c12009-10-30 11:49:00 +00001174
1175// An abstraction of allocation and relocation pointers in a page-structured
1176// space.
1177class AllocationInfo {
1178 public:
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001179 AllocationInfo() : top(NULL), limit(NULL) {
1180 }
1181
1182 Address top; // Current allocation top.
1183 Address limit; // Current allocation limit.
Steve Blocka7e24c12009-10-30 11:49:00 +00001184
1185#ifdef DEBUG
1186 bool VerifyPagedAllocation() {
1187 return (Page::FromAllocationTop(top) == Page::FromAllocationTop(limit))
1188 && (top <= limit);
1189 }
1190#endif
1191};
1192
1193
1194// An abstraction of the accounting statistics of a page-structured space.
Ben Murdochc7cc0282012-03-05 14:35:55 +00001195// The 'capacity' of a space is the number of object-area bytes (i.e., not
Steve Blocka7e24c12009-10-30 11:49:00 +00001196// including page bookkeeping structures) currently in the space. The 'size'
1197// of a space is the number of allocated bytes, the 'waste' in the space is
1198// the number of bytes that are not allocated and not available to
Ben Murdochc7cc0282012-03-05 14:35:55 +00001199// allocation without reorganizing the space via a GC (e.g. small blocks due
Steve Blocka7e24c12009-10-30 11:49:00 +00001200// to internal fragmentation, top of page areas in map space), and the bytes
1201// 'available' is the number of unallocated bytes that are not waste. The
1202// capacity is the sum of size, waste, and available.
1203//
1204// The stats are only set by functions that ensure they stay balanced. These
1205// functions increase or decrease one of the non-capacity stats in
1206// conjunction with capacity, or else they always balance increases and
1207// decreases to the non-capacity stats.
1208class AllocationStats BASE_EMBEDDED {
1209 public:
1210 AllocationStats() { Clear(); }
1211
Ben Murdochc7cc0282012-03-05 14:35:55 +00001212 // Zero out all the allocation statistics (i.e., no capacity).
Steve Blocka7e24c12009-10-30 11:49:00 +00001213 void Clear() {
1214 capacity_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +00001215 size_ = 0;
1216 waste_ = 0;
1217 }
1218
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001219 void ClearSizeWaste() {
1220 size_ = capacity_;
1221 waste_ = 0;
1222 }
1223
Ben Murdochc7cc0282012-03-05 14:35:55 +00001224 // Reset the allocation statistics (i.e., available = capacity with no
Steve Blocka7e24c12009-10-30 11:49:00 +00001225 // wasted or allocated bytes).
1226 void Reset() {
Steve Blocka7e24c12009-10-30 11:49:00 +00001227 size_ = 0;
1228 waste_ = 0;
1229 }
1230
1231 // Accessors for the allocation statistics.
Ben Murdochf87a2032010-10-22 12:50:53 +01001232 intptr_t Capacity() { return capacity_; }
Ben Murdochf87a2032010-10-22 12:50:53 +01001233 intptr_t Size() { return size_; }
1234 intptr_t Waste() { return waste_; }
Steve Blocka7e24c12009-10-30 11:49:00 +00001235
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001236 // Grow the space by adding available bytes. They are initially marked as
1237 // being in use (part of the size), but will normally be immediately freed,
1238 // putting them on the free list and removing them from size_.
Steve Blocka7e24c12009-10-30 11:49:00 +00001239 void ExpandSpace(int size_in_bytes) {
1240 capacity_ += size_in_bytes;
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001241 size_ += size_in_bytes;
1242 ASSERT(size_ >= 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00001243 }
1244
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001245 // Shrink the space by removing available bytes. Since shrinking is done
1246 // during sweeping, bytes have been marked as being in use (part of the size)
1247 // and are hereby freed.
Steve Blocka7e24c12009-10-30 11:49:00 +00001248 void ShrinkSpace(int size_in_bytes) {
1249 capacity_ -= size_in_bytes;
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001250 size_ -= size_in_bytes;
1251 ASSERT(size_ >= 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00001252 }
1253
1254 // Allocate from available bytes (available -> size).
Ben Murdochf87a2032010-10-22 12:50:53 +01001255 void AllocateBytes(intptr_t size_in_bytes) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001256 size_ += size_in_bytes;
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001257 ASSERT(size_ >= 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00001258 }
1259
1260 // Free allocated bytes, making them available (size -> available).
Ben Murdochf87a2032010-10-22 12:50:53 +01001261 void DeallocateBytes(intptr_t size_in_bytes) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001262 size_ -= size_in_bytes;
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001263 ASSERT(size_ >= 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00001264 }
1265
1266 // Waste free bytes (available -> waste).
1267 void WasteBytes(int size_in_bytes) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001268 size_ -= size_in_bytes;
Steve Blocka7e24c12009-10-30 11:49:00 +00001269 waste_ += size_in_bytes;
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001270 ASSERT(size_ >= 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00001271 }
1272
1273 private:
Ben Murdochf87a2032010-10-22 12:50:53 +01001274 intptr_t capacity_;
Ben Murdochf87a2032010-10-22 12:50:53 +01001275 intptr_t size_;
1276 intptr_t waste_;
Steve Blocka7e24c12009-10-30 11:49:00 +00001277};
1278
1279
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001280// -----------------------------------------------------------------------------
1281// Free lists for old object spaces
1282//
1283// Free-list nodes are free blocks in the heap. They look like heap objects
1284// (free-list node pointers have the heap object tag, and they have a map like
1285// a heap object). They have a size and a next pointer. The next pointer is
1286// the raw address of the next free list node (or NULL).
1287class FreeListNode: public HeapObject {
1288 public:
1289 // Obtain a free-list node from a raw address. This is not a cast because
1290 // it does not check nor require that the first word at the address is a map
1291 // pointer.
1292 static FreeListNode* FromAddress(Address address) {
1293 return reinterpret_cast<FreeListNode*>(HeapObject::FromAddress(address));
1294 }
1295
1296 static inline bool IsFreeListNode(HeapObject* object);
1297
1298 // Set the size in bytes, which can be read with HeapObject::Size(). This
1299 // function also writes a map to the first word of the block so that it
1300 // looks like a heap object to the garbage collector and heap iteration
1301 // functions.
1302 void set_size(Heap* heap, int size_in_bytes);
1303
1304 // Accessors for the next field.
1305 inline FreeListNode* next();
1306 inline FreeListNode** next_address();
1307 inline void set_next(FreeListNode* next);
1308
1309 inline void Zap();
1310
1311 private:
1312 static const int kNextOffset = POINTER_SIZE_ALIGN(FreeSpace::kHeaderSize);
1313
1314 DISALLOW_IMPLICIT_CONSTRUCTORS(FreeListNode);
1315};
1316
1317
1318// The free list for the old space. The free list is organized in such a way
1319// as to encourage objects allocated around the same time to be near each
1320// other. The normal way to allocate is intended to be by bumping a 'top'
1321// pointer until it hits a 'limit' pointer. When the limit is hit we need to
1322// find a new space to allocate from. This is done with the free list, which
1323// is divided up into rough categories to cut down on waste. Having finer
1324// categories would scatter allocation more.
1325
1326// The old space free list is organized in categories.
1327// 1-31 words: Such small free areas are discarded for efficiency reasons.
1328// They can be reclaimed by the compactor. However the distance between top
1329// and limit may be this small.
1330// 32-255 words: There is a list of spaces this large. It is used for top and
1331// limit when the object we need to allocate is 1-31 words in size. These
1332// spaces are called small.
1333// 256-2047 words: There is a list of spaces this large. It is used for top and
1334// limit when the object we need to allocate is 32-255 words in size. These
1335// spaces are called medium.
1336// 1048-16383 words: There is a list of spaces this large. It is used for top
1337// and limit when the object we need to allocate is 256-2047 words in size.
1338// These spaces are call large.
1339// At least 16384 words. This list is for objects of 2048 words or larger.
1340// Empty pages are added to this list. These spaces are called huge.
1341class FreeList BASE_EMBEDDED {
1342 public:
1343 explicit FreeList(PagedSpace* owner);
1344
1345 // Clear the free list.
1346 void Reset();
1347
1348 // Return the number of bytes available on the free list.
1349 intptr_t available() { return available_; }
1350
1351 // Place a node on the free list. The block of size 'size_in_bytes'
1352 // starting at 'start' is placed on the free list. The return value is the
1353 // number of bytes that have been lost due to internal fragmentation by
1354 // freeing the block. Bookkeeping information will be written to the block,
Ben Murdochc7cc0282012-03-05 14:35:55 +00001355 // i.e., its contents will be destroyed. The start address should be word
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001356 // aligned, and the size should be a non-zero multiple of the word size.
1357 int Free(Address start, int size_in_bytes);
1358
1359 // Allocate a block of size 'size_in_bytes' from the free list. The block
1360 // is unitialized. A failure is returned if no block is available. The
1361 // number of bytes lost to fragmentation is returned in the output parameter
1362 // 'wasted_bytes'. The size should be a non-zero multiple of the word size.
1363 MUST_USE_RESULT HeapObject* Allocate(int size_in_bytes);
1364
1365#ifdef DEBUG
1366 void Zap();
1367 static intptr_t SumFreeList(FreeListNode* node);
1368 static int FreeListLength(FreeListNode* cur);
1369 intptr_t SumFreeLists();
1370 bool IsVeryLong();
1371#endif
1372
1373 struct SizeStats {
1374 intptr_t Total() {
1375 return small_size_ + medium_size_ + large_size_ + huge_size_;
1376 }
1377
1378 intptr_t small_size_;
1379 intptr_t medium_size_;
1380 intptr_t large_size_;
1381 intptr_t huge_size_;
1382 };
1383
1384 void CountFreeListItems(Page* p, SizeStats* sizes);
1385
1386 intptr_t EvictFreeListItems(Page* p);
1387
1388 private:
1389 // The size range of blocks, in bytes.
1390 static const int kMinBlockSize = 3 * kPointerSize;
1391 static const int kMaxBlockSize = Page::kMaxNonCodeHeapObjectSize;
1392
1393 FreeListNode* PickNodeFromList(FreeListNode** list, int* node_size);
1394
1395 FreeListNode* FindNodeFor(int size_in_bytes, int* node_size);
1396
1397 PagedSpace* owner_;
1398 Heap* heap_;
1399
1400 // Total available bytes in all blocks on this free list.
1401 int available_;
1402
1403 static const int kSmallListMin = 0x20 * kPointerSize;
1404 static const int kSmallListMax = 0xff * kPointerSize;
1405 static const int kMediumListMax = 0x7ff * kPointerSize;
1406 static const int kLargeListMax = 0x3fff * kPointerSize;
1407 static const int kSmallAllocationMax = kSmallListMin - kPointerSize;
1408 static const int kMediumAllocationMax = kSmallListMax;
1409 static const int kLargeAllocationMax = kMediumListMax;
1410 FreeListNode* small_list_;
1411 FreeListNode* medium_list_;
1412 FreeListNode* large_list_;
1413 FreeListNode* huge_list_;
1414
1415 DISALLOW_IMPLICIT_CONSTRUCTORS(FreeList);
1416};
1417
1418
Steve Blocka7e24c12009-10-30 11:49:00 +00001419class PagedSpace : public Space {
1420 public:
1421 // Creates a space with a maximum capacity, and an id.
Steve Block44f0eee2011-05-26 01:26:41 +01001422 PagedSpace(Heap* heap,
1423 intptr_t max_capacity,
Ben Murdochf87a2032010-10-22 12:50:53 +01001424 AllocationSpace id,
1425 Executability executable);
Steve Blocka7e24c12009-10-30 11:49:00 +00001426
1427 virtual ~PagedSpace() {}
1428
1429 // Set up the space using the given address range of virtual memory (from
1430 // the memory allocator's initial chunk) if possible. If the block of
1431 // addresses is not big enough to contain a single page-aligned page, a
1432 // fresh chunk will be allocated.
Ben Murdochc7cc0282012-03-05 14:35:55 +00001433 bool SetUp();
Steve Blocka7e24c12009-10-30 11:49:00 +00001434
1435 // Returns true if the space has been successfully set up and not
1436 // subsequently torn down.
Ben Murdochc7cc0282012-03-05 14:35:55 +00001437 bool HasBeenSetUp();
Steve Blocka7e24c12009-10-30 11:49:00 +00001438
1439 // Cleans up the space, frees all pages in this space except those belonging
1440 // to the initial chunk, uncommits addresses in the initial chunk.
1441 void TearDown();
1442
1443 // Checks whether an object/address is in this space.
1444 inline bool Contains(Address a);
1445 bool Contains(HeapObject* o) { return Contains(o->address()); }
1446
1447 // Given an address occupied by a live object, return that object if it is
1448 // in this space, or Failure::Exception() if it is not. The implementation
1449 // iterates over objects in the page containing the address, the cost is
1450 // linear in the number of objects in the page. It may be slow.
John Reck59135872010-11-02 12:39:01 -07001451 MUST_USE_RESULT MaybeObject* FindObject(Address addr);
Steve Blocka7e24c12009-10-30 11:49:00 +00001452
Steve Blocka7e24c12009-10-30 11:49:00 +00001453 // Prepares for a mark-compact GC.
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001454 virtual void PrepareForMarkCompact();
Steve Blocka7e24c12009-10-30 11:49:00 +00001455
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001456 // Current capacity without growing (Size() + Available()).
Ben Murdochf87a2032010-10-22 12:50:53 +01001457 intptr_t Capacity() { return accounting_stats_.Capacity(); }
Steve Blocka7e24c12009-10-30 11:49:00 +00001458
Steve Block3ce2e202009-11-05 08:53:23 +00001459 // Total amount of memory committed for this space. For paged
1460 // spaces this equals the capacity.
Ben Murdochf87a2032010-10-22 12:50:53 +01001461 intptr_t CommittedMemory() { return Capacity(); }
Steve Block3ce2e202009-11-05 08:53:23 +00001462
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001463 // Sets the capacity, the available space and the wasted space to zero.
1464 // The stats are rebuilt during sweeping by adding each page to the
1465 // capacity and the size when it is encountered. As free spaces are
1466 // discovered during the sweeping they are subtracted from the size and added
1467 // to the available and wasted totals.
1468 void ClearStats() {
1469 accounting_stats_.ClearSizeWaste();
1470 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001471
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001472 // Available bytes without growing. These are the bytes on the free list.
1473 // The bytes in the linear allocation area are not included in this total
1474 // because updating the stats would slow down allocation. New pages are
1475 // immediately added to the free list so they show up here.
1476 intptr_t Available() { return free_list_.available(); }
1477
1478 // Allocated bytes in this space. Garbage bytes that were not found due to
1479 // lazy sweeping are counted as being allocated! The bytes in the current
1480 // linear allocation area (between top and limit) are also counted here.
Ben Murdochf87a2032010-10-22 12:50:53 +01001481 virtual intptr_t Size() { return accounting_stats_.Size(); }
Steve Blocka7e24c12009-10-30 11:49:00 +00001482
Ben Murdochc7cc0282012-03-05 14:35:55 +00001483 // As size, but the bytes in lazily swept pages are estimated and the bytes
1484 // in the current linear allocation area are not included.
1485 virtual intptr_t SizeOfObjects() {
1486 ASSERT(!IsSweepingComplete() || (unswept_free_bytes_ == 0));
1487 return Size() - unswept_free_bytes_ - (limit() - top());
1488 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001489
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001490 // Wasted bytes in this space. These are just the bytes that were thrown away
1491 // due to being too small to use for allocation. They do not include the
1492 // free bytes that were not found at all due to lazy sweeping.
1493 virtual intptr_t Waste() { return accounting_stats_.Waste(); }
Steve Blocka7e24c12009-10-30 11:49:00 +00001494
1495 // Returns the allocation pointer in this space.
Ben Murdochc7cc0282012-03-05 14:35:55 +00001496 Address top() { return allocation_info_.top; }
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001497 Address limit() { return allocation_info_.limit; }
Steve Blocka7e24c12009-10-30 11:49:00 +00001498
1499 // Allocate the requested number of bytes in the space if possible, return a
1500 // failure object if not.
John Reck59135872010-11-02 12:39:01 -07001501 MUST_USE_RESULT inline MaybeObject* AllocateRaw(int size_in_bytes);
Steve Blocka7e24c12009-10-30 11:49:00 +00001502
Leon Clarkee46be812010-01-19 14:06:41 +00001503 virtual bool ReserveSpace(int bytes);
1504
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001505 // Give a block of memory to the space's free list. It might be added to
1506 // the free list or accounted as waste.
1507 // If add_to_freelist is false then just accounting stats are updated and
1508 // no attempt to add area to free list is made.
1509 int Free(Address start, int size_in_bytes) {
1510 int wasted = free_list_.Free(start, size_in_bytes);
1511 accounting_stats_.DeallocateBytes(size_in_bytes - wasted);
1512 return size_in_bytes - wasted;
1513 }
Kristian Monsen80d68ea2010-09-08 11:05:35 +01001514
Steve Block6ded16b2010-05-10 14:33:55 +01001515 // Set space allocation info.
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001516 void SetTop(Address top, Address limit) {
1517 ASSERT(top == limit ||
1518 Page::FromAddress(top) == Page::FromAddress(limit - 1));
Steve Block6ded16b2010-05-10 14:33:55 +01001519 allocation_info_.top = top;
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001520 allocation_info_.limit = limit;
Steve Block6ded16b2010-05-10 14:33:55 +01001521 }
1522
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001523 void Allocate(int bytes) {
1524 accounting_stats_.AllocateBytes(bytes);
Steve Blocka7e24c12009-10-30 11:49:00 +00001525 }
1526
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001527 void IncreaseCapacity(int size) {
1528 accounting_stats_.ExpandSpace(size);
1529 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001530
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001531 // Releases an unused page and shrinks the space.
1532 void ReleasePage(Page* page);
Steve Blocka7e24c12009-10-30 11:49:00 +00001533
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001534 // Releases all of the unused pages.
1535 void ReleaseAllUnusedPages();
Steve Blocka7e24c12009-10-30 11:49:00 +00001536
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001537 // The dummy page that anchors the linked list of pages.
1538 Page* anchor() { return &anchor_; }
Steve Blocka7e24c12009-10-30 11:49:00 +00001539
Steve Blocka7e24c12009-10-30 11:49:00 +00001540#ifdef DEBUG
1541 // Print meta info and objects in this space.
1542 virtual void Print();
1543
1544 // Verify integrity of this space.
1545 virtual void Verify(ObjectVisitor* visitor);
1546
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001547 // Reports statistics for the space
1548 void ReportStatistics();
1549
Steve Blocka7e24c12009-10-30 11:49:00 +00001550 // Overridden by subclasses to verify space-specific object
1551 // properties (e.g., only maps or free-list nodes are in map space).
1552 virtual void VerifyObject(HeapObject* obj) {}
1553
1554 // Report code object related statistics
1555 void CollectCodeStatistics();
1556 static void ReportCodeStatistics();
1557 static void ResetCodeStatistics();
1558#endif
1559
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001560 bool was_swept_conservatively() { return was_swept_conservatively_; }
1561 void set_was_swept_conservatively(bool b) { was_swept_conservatively_ = b; }
Steve Block6ded16b2010-05-10 14:33:55 +01001562
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001563 // Evacuation candidates are swept by evacuator. Needs to return a valid
1564 // result before _and_ after evacuation has finished.
1565 static bool ShouldBeSweptLazily(Page* p) {
1566 return !p->IsEvacuationCandidate() &&
1567 !p->IsFlagSet(Page::RESCAN_ON_EVACUATION) &&
1568 !p->WasSweptPrecisely();
1569 }
1570
1571 void SetPagesToSweep(Page* first) {
Ben Murdochc7cc0282012-03-05 14:35:55 +00001572 ASSERT(unswept_free_bytes_ == 0);
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001573 if (first == &anchor_) first = NULL;
1574 first_unswept_page_ = first;
1575 }
1576
Ben Murdochc7cc0282012-03-05 14:35:55 +00001577 void IncrementUnsweptFreeBytes(int by) {
1578 unswept_free_bytes_ += by;
1579 }
1580
1581 void IncreaseUnsweptFreeBytes(Page* p) {
1582 ASSERT(ShouldBeSweptLazily(p));
1583 unswept_free_bytes_ += (p->area_size() - p->LiveBytes());
1584 }
1585
1586 void DecreaseUnsweptFreeBytes(Page* p) {
1587 ASSERT(ShouldBeSweptLazily(p));
1588 unswept_free_bytes_ -= (p->area_size() - p->LiveBytes());
1589 }
1590
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001591 bool AdvanceSweeper(intptr_t bytes_to_sweep);
1592
1593 bool IsSweepingComplete() {
1594 return !first_unswept_page_->is_valid();
1595 }
1596
1597 Page* FirstPage() { return anchor_.next_page(); }
1598 Page* LastPage() { return anchor_.prev_page(); }
1599
1600 // Returns zero for pages that have so little fragmentation that it is not
1601 // worth defragmenting them. Otherwise a positive integer that gives an
1602 // estimate of fragmentation on an arbitrary scale.
1603 int Fragmentation(Page* p) {
1604 FreeList::SizeStats sizes;
1605 free_list_.CountFreeListItems(p, &sizes);
1606
1607 intptr_t ratio;
1608 intptr_t ratio_threshold;
1609 if (identity() == CODE_SPACE) {
1610 ratio = (sizes.medium_size_ * 10 + sizes.large_size_ * 2) * 100 /
1611 AreaSize();
1612 ratio_threshold = 10;
1613 } else {
1614 ratio = (sizes.small_size_ * 5 + sizes.medium_size_) * 100 /
1615 AreaSize();
1616 ratio_threshold = 15;
1617 }
1618
1619 if (FLAG_trace_fragmentation) {
1620 PrintF("%p [%d]: %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %s\n",
1621 reinterpret_cast<void*>(p),
1622 identity(),
1623 static_cast<int>(sizes.small_size_),
1624 static_cast<double>(sizes.small_size_ * 100) /
1625 AreaSize(),
1626 static_cast<int>(sizes.medium_size_),
1627 static_cast<double>(sizes.medium_size_ * 100) /
1628 AreaSize(),
1629 static_cast<int>(sizes.large_size_),
1630 static_cast<double>(sizes.large_size_ * 100) /
1631 AreaSize(),
1632 static_cast<int>(sizes.huge_size_),
1633 static_cast<double>(sizes.huge_size_ * 100) /
1634 AreaSize(),
1635 (ratio > ratio_threshold) ? "[fragmented]" : "");
1636 }
1637
1638 if (FLAG_always_compact && sizes.Total() != AreaSize()) {
1639 return 1;
1640 }
1641 if (ratio <= ratio_threshold) return 0; // Not fragmented.
1642
1643 return static_cast<int>(ratio - ratio_threshold);
1644 }
1645
1646 void EvictEvacuationCandidatesFromFreeLists();
1647
1648 bool CanExpand();
1649
1650 // Returns the number of total pages in this space.
1651 int CountTotalPages();
1652
1653 // Return size of allocatable area on a page in this space.
1654 inline int AreaSize() {
1655 return area_size_;
1656 }
Kristian Monsen80d68ea2010-09-08 11:05:35 +01001657
Steve Blocka7e24c12009-10-30 11:49:00 +00001658 protected:
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001659 int area_size_;
1660
Steve Blocka7e24c12009-10-30 11:49:00 +00001661 // Maximum capacity of this space.
Ben Murdochf87a2032010-10-22 12:50:53 +01001662 intptr_t max_capacity_;
Steve Blocka7e24c12009-10-30 11:49:00 +00001663
1664 // Accounting information for this space.
1665 AllocationStats accounting_stats_;
1666
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001667 // The dummy page that anchors the double linked list of pages.
1668 Page anchor_;
Steve Blocka7e24c12009-10-30 11:49:00 +00001669
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001670 // The space's free list.
1671 FreeList free_list_;
Steve Block6ded16b2010-05-10 14:33:55 +01001672
Steve Blocka7e24c12009-10-30 11:49:00 +00001673 // Normal allocation information.
1674 AllocationInfo allocation_info_;
1675
Steve Blocka7e24c12009-10-30 11:49:00 +00001676 // Bytes of each page that cannot be allocated. Possibly non-zero
1677 // for pages in spaces with only fixed-size objects. Always zero
1678 // for pages in spaces with variable sized objects (those pages are
1679 // padded with free-list nodes).
1680 int page_extra_;
1681
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001682 bool was_swept_conservatively_;
Steve Blocka7e24c12009-10-30 11:49:00 +00001683
Ben Murdochc7cc0282012-03-05 14:35:55 +00001684 // The first page to be swept when the lazy sweeper advances. Is set
1685 // to NULL when all pages have been swept.
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001686 Page* first_unswept_page_;
Leon Clarked91b9f72010-01-27 17:25:45 +00001687
Ben Murdochc7cc0282012-03-05 14:35:55 +00001688 // The number of free bytes which could be reclaimed by advancing the
1689 // lazy sweeper. This is only an estimation because lazy sweeping is
1690 // done conservatively.
1691 intptr_t unswept_free_bytes_;
1692
Steve Blocka7e24c12009-10-30 11:49:00 +00001693 // Expands the space by allocating a fixed number of pages. Returns false if
Ben Murdochc7cc0282012-03-05 14:35:55 +00001694 // it cannot allocate requested number of pages from OS, or if the hard heap
1695 // size limit has been hit.
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001696 bool Expand();
Steve Blocka7e24c12009-10-30 11:49:00 +00001697
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001698 // Generic fast case allocation function that tries linear allocation at the
1699 // address denoted by top in allocation_info_.
1700 inline HeapObject* AllocateLinearly(int size_in_bytes);
Steve Blocka7e24c12009-10-30 11:49:00 +00001701
1702 // Slow path of AllocateRaw. This function is space-dependent.
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001703 MUST_USE_RESULT virtual HeapObject* SlowAllocateRaw(int size_in_bytes);
Steve Blocka7e24c12009-10-30 11:49:00 +00001704
Steve Blocka7e24c12009-10-30 11:49:00 +00001705 friend class PageIterator;
1706};
1707
1708
Steve Blocka7e24c12009-10-30 11:49:00 +00001709class NumberAndSizeInfo BASE_EMBEDDED {
1710 public:
1711 NumberAndSizeInfo() : number_(0), bytes_(0) {}
1712
1713 int number() const { return number_; }
1714 void increment_number(int num) { number_ += num; }
1715
1716 int bytes() const { return bytes_; }
1717 void increment_bytes(int size) { bytes_ += size; }
1718
1719 void clear() {
1720 number_ = 0;
1721 bytes_ = 0;
1722 }
1723
1724 private:
1725 int number_;
1726 int bytes_;
1727};
1728
1729
1730// HistogramInfo class for recording a single "bar" of a histogram. This
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001731// class is used for collecting statistics to print to the log file.
Steve Blocka7e24c12009-10-30 11:49:00 +00001732class HistogramInfo: public NumberAndSizeInfo {
1733 public:
1734 HistogramInfo() : NumberAndSizeInfo() {}
1735
1736 const char* name() { return name_; }
1737 void set_name(const char* name) { name_ = name; }
1738
1739 private:
1740 const char* name_;
1741};
Steve Blocka7e24c12009-10-30 11:49:00 +00001742
1743
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001744enum SemiSpaceId {
1745 kFromSpace = 0,
1746 kToSpace = 1
1747};
1748
1749
1750class SemiSpace;
1751
1752
1753class NewSpacePage : public MemoryChunk {
1754 public:
1755 // GC related flags copied from from-space to to-space when
1756 // flipping semispaces.
1757 static const intptr_t kCopyOnFlipFlagsMask =
1758 (1 << MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) |
1759 (1 << MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING) |
1760 (1 << MemoryChunk::SCAN_ON_SCAVENGE);
1761
1762 static const int kAreaSize = Page::kNonCodeObjectAreaSize;
1763
1764 inline NewSpacePage* next_page() const {
1765 return static_cast<NewSpacePage*>(next_chunk());
1766 }
1767
1768 inline void set_next_page(NewSpacePage* page) {
1769 set_next_chunk(page);
1770 }
1771
1772 inline NewSpacePage* prev_page() const {
1773 return static_cast<NewSpacePage*>(prev_chunk());
1774 }
1775
1776 inline void set_prev_page(NewSpacePage* page) {
1777 set_prev_chunk(page);
1778 }
1779
1780 SemiSpace* semi_space() {
1781 return reinterpret_cast<SemiSpace*>(owner());
1782 }
1783
1784 bool is_anchor() { return !this->InNewSpace(); }
1785
1786 static bool IsAtStart(Address addr) {
1787 return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask)
1788 == kObjectStartOffset;
1789 }
1790
1791 static bool IsAtEnd(Address addr) {
1792 return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask) == 0;
1793 }
1794
1795 Address address() {
1796 return reinterpret_cast<Address>(this);
1797 }
1798
1799 // Finds the NewSpacePage containg the given address.
1800 static inline NewSpacePage* FromAddress(Address address_in_page) {
1801 Address page_start =
1802 reinterpret_cast<Address>(reinterpret_cast<uintptr_t>(address_in_page) &
1803 ~Page::kPageAlignmentMask);
1804 NewSpacePage* page = reinterpret_cast<NewSpacePage*>(page_start);
1805 return page;
1806 }
1807
1808 // Find the page for a limit address. A limit address is either an address
1809 // inside a page, or the address right after the last byte of a page.
1810 static inline NewSpacePage* FromLimit(Address address_limit) {
1811 return NewSpacePage::FromAddress(address_limit - 1);
1812 }
1813
1814 private:
1815 // Create a NewSpacePage object that is only used as anchor
1816 // for the doubly-linked list of real pages.
1817 explicit NewSpacePage(SemiSpace* owner) {
1818 InitializeAsAnchor(owner);
1819 }
1820
1821 static NewSpacePage* Initialize(Heap* heap,
1822 Address start,
1823 SemiSpace* semi_space);
1824
1825 // Intialize a fake NewSpacePage used as sentinel at the ends
1826 // of a doubly-linked list of real NewSpacePages.
1827 // Only uses the prev/next links, and sets flags to not be in new-space.
1828 void InitializeAsAnchor(SemiSpace* owner);
1829
1830 friend class SemiSpace;
1831 friend class SemiSpaceIterator;
1832};
1833
1834
Steve Blocka7e24c12009-10-30 11:49:00 +00001835// -----------------------------------------------------------------------------
1836// SemiSpace in young generation
1837//
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001838// A semispace is a contiguous chunk of memory holding page-like memory
1839// chunks. The mark-compact collector uses the memory of the first page in
1840// the from space as a marking stack when tracing live objects.
Steve Blocka7e24c12009-10-30 11:49:00 +00001841
1842class SemiSpace : public Space {
1843 public:
1844 // Constructor.
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001845 SemiSpace(Heap* heap, SemiSpaceId semispace)
1846 : Space(heap, NEW_SPACE, NOT_EXECUTABLE),
1847 start_(NULL),
1848 age_mark_(NULL),
1849 id_(semispace),
1850 anchor_(this),
1851 current_page_(NULL) { }
Steve Blocka7e24c12009-10-30 11:49:00 +00001852
1853 // Sets up the semispace using the given chunk.
Ben Murdochc7cc0282012-03-05 14:35:55 +00001854 void SetUp(Address start, int initial_capacity, int maximum_capacity);
Steve Blocka7e24c12009-10-30 11:49:00 +00001855
1856 // Tear down the space. Heap memory was not allocated by the space, so it
1857 // is not deallocated here.
1858 void TearDown();
1859
1860 // True if the space has been set up but not torn down.
Ben Murdochc7cc0282012-03-05 14:35:55 +00001861 bool HasBeenSetUp() { return start_ != NULL; }
Steve Blocka7e24c12009-10-30 11:49:00 +00001862
Steve Blocka7e24c12009-10-30 11:49:00 +00001863 // Grow the semispace to the new capacity. The new capacity
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001864 // requested must be larger than the current capacity and less than
1865 // the maximum capacity.
Steve Blocka7e24c12009-10-30 11:49:00 +00001866 bool GrowTo(int new_capacity);
1867
1868 // Shrinks the semispace to the new capacity. The new capacity
1869 // requested must be more than the amount of used memory in the
1870 // semispace and less than the current capacity.
1871 bool ShrinkTo(int new_capacity);
1872
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001873 // Returns the start address of the first page of the space.
1874 Address space_start() {
1875 ASSERT(anchor_.next_page() != &anchor_);
1876 return anchor_.next_page()->area_start();
1877 }
1878
1879 // Returns the start address of the current page of the space.
1880 Address page_low() {
1881 return current_page_->area_start();
1882 }
1883
Steve Blocka7e24c12009-10-30 11:49:00 +00001884 // Returns one past the end address of the space.
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001885 Address space_end() {
1886 return anchor_.prev_page()->area_end();
1887 }
1888
1889 // Returns one past the end address of the current page of the space.
1890 Address page_high() {
1891 return current_page_->area_end();
1892 }
1893
1894 bool AdvancePage() {
1895 NewSpacePage* next_page = current_page_->next_page();
1896 if (next_page == anchor()) return false;
1897 current_page_ = next_page;
1898 return true;
1899 }
1900
1901 // Resets the space to using the first page.
1902 void Reset();
Steve Blocka7e24c12009-10-30 11:49:00 +00001903
1904 // Age mark accessors.
1905 Address age_mark() { return age_mark_; }
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001906 void set_age_mark(Address mark);
Steve Blocka7e24c12009-10-30 11:49:00 +00001907
1908 // True if the address is in the address range of this semispace (not
1909 // necessarily below the allocation pointer).
1910 bool Contains(Address a) {
1911 return (reinterpret_cast<uintptr_t>(a) & address_mask_)
1912 == reinterpret_cast<uintptr_t>(start_);
1913 }
1914
1915 // True if the object is a heap object in the address range of this
1916 // semispace (not necessarily below the allocation pointer).
1917 bool Contains(Object* o) {
1918 return (reinterpret_cast<uintptr_t>(o) & object_mask_) == object_expected_;
1919 }
1920
Leon Clarkee46be812010-01-19 14:06:41 +00001921 // If we don't have these here then SemiSpace will be abstract. However
1922 // they should never be called.
Ben Murdochf87a2032010-10-22 12:50:53 +01001923 virtual intptr_t Size() {
Steve Blocka7e24c12009-10-30 11:49:00 +00001924 UNREACHABLE();
1925 return 0;
1926 }
1927
Leon Clarkee46be812010-01-19 14:06:41 +00001928 virtual bool ReserveSpace(int bytes) {
1929 UNREACHABLE();
1930 return false;
1931 }
1932
Steve Blocka7e24c12009-10-30 11:49:00 +00001933 bool is_committed() { return committed_; }
1934 bool Commit();
1935 bool Uncommit();
1936
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001937 NewSpacePage* first_page() { return anchor_.next_page(); }
1938 NewSpacePage* current_page() { return current_page_; }
1939
Steve Blocka7e24c12009-10-30 11:49:00 +00001940#ifdef DEBUG
1941 virtual void Print();
1942 virtual void Verify();
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001943 // Validate a range of of addresses in a SemiSpace.
1944 // The "from" address must be on a page prior to the "to" address,
1945 // in the linked page order, or it must be earlier on the same page.
1946 static void AssertValidRange(Address from, Address to);
1947#else
1948 // Do nothing.
1949 inline static void AssertValidRange(Address from, Address to) {}
Steve Blocka7e24c12009-10-30 11:49:00 +00001950#endif
1951
1952 // Returns the current capacity of the semi space.
1953 int Capacity() { return capacity_; }
1954
1955 // Returns the maximum capacity of the semi space.
1956 int MaximumCapacity() { return maximum_capacity_; }
1957
1958 // Returns the initial capacity of the semi space.
1959 int InitialCapacity() { return initial_capacity_; }
1960
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001961 SemiSpaceId id() { return id_; }
1962
1963 static void Swap(SemiSpace* from, SemiSpace* to);
1964
Steve Blocka7e24c12009-10-30 11:49:00 +00001965 private:
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001966 // Flips the semispace between being from-space and to-space.
1967 // Copies the flags into the masked positions on all pages in the space.
1968 void FlipPages(intptr_t flags, intptr_t flag_mask);
1969
1970 NewSpacePage* anchor() { return &anchor_; }
1971
Steve Blocka7e24c12009-10-30 11:49:00 +00001972 // The current and maximum capacity of the space.
1973 int capacity_;
1974 int maximum_capacity_;
1975 int initial_capacity_;
1976
1977 // The start address of the space.
1978 Address start_;
1979 // Used to govern object promotion during mark-compact collection.
1980 Address age_mark_;
1981
1982 // Masks and comparison values to test for containment in this semispace.
1983 uintptr_t address_mask_;
1984 uintptr_t object_mask_;
1985 uintptr_t object_expected_;
1986
1987 bool committed_;
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001988 SemiSpaceId id_;
Steve Blocka7e24c12009-10-30 11:49:00 +00001989
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001990 NewSpacePage anchor_;
1991 NewSpacePage* current_page_;
1992
1993 friend class SemiSpaceIterator;
1994 friend class NewSpacePageIterator;
Steve Blocka7e24c12009-10-30 11:49:00 +00001995 public:
1996 TRACK_MEMORY("SemiSpace")
1997};
1998
1999
2000// A SemiSpaceIterator is an ObjectIterator that iterates over the active
2001// semispace of the heap's new space. It iterates over the objects in the
2002// semispace from a given start address (defaulting to the bottom of the
2003// semispace) to the top of the semispace. New objects allocated after the
2004// iterator is created are not iterated.
2005class SemiSpaceIterator : public ObjectIterator {
2006 public:
2007 // Create an iterator over the objects in the given space. If no start
2008 // address is given, the iterator starts from the bottom of the space. If
2009 // no size function is given, the iterator calls Object::Size().
Steve Blocka7e24c12009-10-30 11:49:00 +00002010
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002011 // Iterate over all of allocated to-space.
2012 explicit SemiSpaceIterator(NewSpace* space);
2013 // Iterate over all of allocated to-space, with a custome size function.
2014 SemiSpaceIterator(NewSpace* space, HeapObjectCallback size_func);
2015 // Iterate over part of allocated to-space, from start to the end
2016 // of allocation.
2017 SemiSpaceIterator(NewSpace* space, Address start);
2018 // Iterate from one address to another in the same semi-space.
2019 SemiSpaceIterator(Address from, Address to);
2020
2021 HeapObject* Next() {
Leon Clarked91b9f72010-01-27 17:25:45 +00002022 if (current_ == limit_) return NULL;
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002023 if (NewSpacePage::IsAtEnd(current_)) {
2024 NewSpacePage* page = NewSpacePage::FromLimit(current_);
2025 page = page->next_page();
2026 ASSERT(!page->is_anchor());
2027 current_ = page->area_start();
2028 if (current_ == limit_) return NULL;
2029 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002030
2031 HeapObject* object = HeapObject::FromAddress(current_);
2032 int size = (size_func_ == NULL) ? object->Size() : size_func_(object);
2033
2034 current_ += size;
2035 return object;
2036 }
2037
2038 // Implementation of the ObjectIterator functions.
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002039 virtual HeapObject* next_object() { return Next(); }
Steve Blocka7e24c12009-10-30 11:49:00 +00002040
2041 private:
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002042 void Initialize(Address start,
2043 Address end,
Steve Blocka7e24c12009-10-30 11:49:00 +00002044 HeapObjectCallback size_func);
2045
Steve Blocka7e24c12009-10-30 11:49:00 +00002046 // The current iteration point.
2047 Address current_;
2048 // The end of iteration.
2049 Address limit_;
2050 // The callback function.
2051 HeapObjectCallback size_func_;
2052};
2053
2054
2055// -----------------------------------------------------------------------------
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002056// A PageIterator iterates the pages in a semi-space.
2057class NewSpacePageIterator BASE_EMBEDDED {
2058 public:
2059 // Make an iterator that runs over all pages in to-space.
2060 explicit inline NewSpacePageIterator(NewSpace* space);
2061
2062 // Make an iterator that runs over all pages in the given semispace,
2063 // even those not used in allocation.
2064 explicit inline NewSpacePageIterator(SemiSpace* space);
2065
2066 // Make iterator that iterates from the page containing start
2067 // to the page that contains limit in the same semispace.
2068 inline NewSpacePageIterator(Address start, Address limit);
2069
2070 inline bool has_next();
2071 inline NewSpacePage* next();
2072
2073 private:
2074 NewSpacePage* prev_page_; // Previous page returned.
2075 // Next page that will be returned. Cached here so that we can use this
2076 // iterator for operations that deallocate pages.
2077 NewSpacePage* next_page_;
2078 // Last page returned.
2079 NewSpacePage* last_page_;
2080};
2081
2082
2083// -----------------------------------------------------------------------------
Steve Blocka7e24c12009-10-30 11:49:00 +00002084// The young generation space.
2085//
2086// The new space consists of a contiguous pair of semispaces. It simply
2087// forwards most functions to the appropriate semispace.
2088
2089class NewSpace : public Space {
2090 public:
2091 // Constructor.
Steve Block44f0eee2011-05-26 01:26:41 +01002092 explicit NewSpace(Heap* heap)
2093 : Space(heap, NEW_SPACE, NOT_EXECUTABLE),
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002094 to_space_(heap, kToSpace),
2095 from_space_(heap, kFromSpace),
2096 reservation_(),
2097 inline_allocation_limit_step_(0) {}
Steve Blocka7e24c12009-10-30 11:49:00 +00002098
2099 // Sets up the new space using the given chunk.
Ben Murdochc7cc0282012-03-05 14:35:55 +00002100 bool SetUp(int reserved_semispace_size_, int max_semispace_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002101
2102 // Tears down the space. Heap memory was not allocated by the space, so it
2103 // is not deallocated here.
2104 void TearDown();
2105
2106 // True if the space has been set up but not torn down.
Ben Murdochc7cc0282012-03-05 14:35:55 +00002107 bool HasBeenSetUp() {
2108 return to_space_.HasBeenSetUp() && from_space_.HasBeenSetUp();
Steve Blocka7e24c12009-10-30 11:49:00 +00002109 }
2110
2111 // Flip the pair of spaces.
2112 void Flip();
2113
2114 // Grow the capacity of the semispaces. Assumes that they are not at
2115 // their maximum capacity.
2116 void Grow();
2117
2118 // Shrink the capacity of the semispaces.
2119 void Shrink();
2120
2121 // True if the address or object lies in the address range of either
2122 // semispace (not necessarily below the allocation pointer).
2123 bool Contains(Address a) {
2124 return (reinterpret_cast<uintptr_t>(a) & address_mask_)
2125 == reinterpret_cast<uintptr_t>(start_);
2126 }
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002127
Steve Blocka7e24c12009-10-30 11:49:00 +00002128 bool Contains(Object* o) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002129 Address a = reinterpret_cast<Address>(o);
2130 return (reinterpret_cast<uintptr_t>(a) & object_mask_) == object_expected_;
Steve Blocka7e24c12009-10-30 11:49:00 +00002131 }
2132
2133 // Return the allocated bytes in the active semispace.
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002134 virtual intptr_t Size() {
2135 return pages_used_ * NewSpacePage::kAreaSize +
2136 static_cast<int>(top() - to_space_.page_low());
2137 }
2138
Ben Murdochf87a2032010-10-22 12:50:53 +01002139 // The same, but returning an int. We have to have the one that returns
2140 // intptr_t because it is inherited, but if we know we are dealing with the
2141 // new space, which can't get as big as the other spaces then this is useful:
2142 int SizeAsInt() { return static_cast<int>(Size()); }
Steve Block3ce2e202009-11-05 08:53:23 +00002143
Steve Blocka7e24c12009-10-30 11:49:00 +00002144 // Return the current capacity of a semispace.
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002145 intptr_t EffectiveCapacity() {
2146 SLOW_ASSERT(to_space_.Capacity() == from_space_.Capacity());
2147 return (to_space_.Capacity() / Page::kPageSize) * NewSpacePage::kAreaSize;
2148 }
2149
2150 // Return the current capacity of a semispace.
Ben Murdochf87a2032010-10-22 12:50:53 +01002151 intptr_t Capacity() {
Steve Blocka7e24c12009-10-30 11:49:00 +00002152 ASSERT(to_space_.Capacity() == from_space_.Capacity());
2153 return to_space_.Capacity();
2154 }
Steve Block3ce2e202009-11-05 08:53:23 +00002155
2156 // Return the total amount of memory committed for new space.
Ben Murdochf87a2032010-10-22 12:50:53 +01002157 intptr_t CommittedMemory() {
Steve Block3ce2e202009-11-05 08:53:23 +00002158 if (from_space_.is_committed()) return 2 * Capacity();
2159 return Capacity();
2160 }
2161
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002162 // Return the available bytes without growing.
2163 intptr_t Available() {
2164 return Capacity() - Size();
2165 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002166
2167 // Return the maximum capacity of a semispace.
2168 int MaximumCapacity() {
2169 ASSERT(to_space_.MaximumCapacity() == from_space_.MaximumCapacity());
2170 return to_space_.MaximumCapacity();
2171 }
2172
2173 // Returns the initial capacity of a semispace.
2174 int InitialCapacity() {
2175 ASSERT(to_space_.InitialCapacity() == from_space_.InitialCapacity());
2176 return to_space_.InitialCapacity();
2177 }
2178
2179 // Return the address of the allocation pointer in the active semispace.
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002180 Address top() {
2181 ASSERT(to_space_.current_page()->ContainsLimit(allocation_info_.top));
2182 return allocation_info_.top;
2183 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002184 // Return the address of the first object in the active semispace.
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002185 Address bottom() { return to_space_.space_start(); }
Steve Blocka7e24c12009-10-30 11:49:00 +00002186
2187 // Get the age mark of the inactive semispace.
2188 Address age_mark() { return from_space_.age_mark(); }
2189 // Set the age mark in the active semispace.
2190 void set_age_mark(Address mark) { to_space_.set_age_mark(mark); }
2191
2192 // The start address of the space and a bit mask. Anding an address in the
2193 // new space with the mask will result in the start address.
2194 Address start() { return start_; }
2195 uintptr_t mask() { return address_mask_; }
2196
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002197 INLINE(uint32_t AddressToMarkbitIndex(Address addr)) {
2198 ASSERT(Contains(addr));
2199 ASSERT(IsAligned(OffsetFrom(addr), kPointerSize) ||
2200 IsAligned(OffsetFrom(addr) - 1, kPointerSize));
2201 return static_cast<uint32_t>(addr - start_) >> kPointerSizeLog2;
2202 }
2203
2204 INLINE(Address MarkbitIndexToAddress(uint32_t index)) {
2205 return reinterpret_cast<Address>(index << kPointerSizeLog2);
2206 }
2207
Steve Blocka7e24c12009-10-30 11:49:00 +00002208 // The allocation top and limit addresses.
2209 Address* allocation_top_address() { return &allocation_info_.top; }
2210 Address* allocation_limit_address() { return &allocation_info_.limit; }
2211
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002212 MUST_USE_RESULT INLINE(MaybeObject* AllocateRaw(int size_in_bytes));
Steve Blocka7e24c12009-10-30 11:49:00 +00002213
2214 // Reset the allocation pointer to the beginning of the active semispace.
2215 void ResetAllocationInfo();
Steve Blocka7e24c12009-10-30 11:49:00 +00002216
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002217 void LowerInlineAllocationLimit(intptr_t step) {
2218 inline_allocation_limit_step_ = step;
2219 if (step == 0) {
2220 allocation_info_.limit = to_space_.page_high();
2221 } else {
2222 allocation_info_.limit = Min(
2223 allocation_info_.top + inline_allocation_limit_step_,
2224 allocation_info_.limit);
2225 }
2226 top_on_previous_step_ = allocation_info_.top;
Steve Blocka7e24c12009-10-30 11:49:00 +00002227 }
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002228
2229 // Get the extent of the inactive semispace (for use as a marking stack,
2230 // or to zap it). Notice: space-addresses are not necessarily on the
2231 // same page, so FromSpaceStart() might be above FromSpaceEnd().
2232 Address FromSpacePageLow() { return from_space_.page_low(); }
2233 Address FromSpacePageHigh() { return from_space_.page_high(); }
2234 Address FromSpaceStart() { return from_space_.space_start(); }
2235 Address FromSpaceEnd() { return from_space_.space_end(); }
2236
2237 // Get the extent of the active semispace's pages' memory.
2238 Address ToSpaceStart() { return to_space_.space_start(); }
2239 Address ToSpaceEnd() { return to_space_.space_end(); }
2240
2241 inline bool ToSpaceContains(Address address) {
2242 return to_space_.Contains(address);
2243 }
2244 inline bool FromSpaceContains(Address address) {
2245 return from_space_.Contains(address);
Steve Blocka7e24c12009-10-30 11:49:00 +00002246 }
2247
2248 // True if the object is a heap object in the address range of the
2249 // respective semispace (not necessarily below the allocation pointer of the
2250 // semispace).
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002251 inline bool ToSpaceContains(Object* o) { return to_space_.Contains(o); }
2252 inline bool FromSpaceContains(Object* o) { return from_space_.Contains(o); }
Steve Blocka7e24c12009-10-30 11:49:00 +00002253
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002254 // Try to switch the active semispace to a new, empty, page.
2255 // Returns false if this isn't possible or reasonable (i.e., there
2256 // are no pages, or the current page is already empty), or true
2257 // if successful.
2258 bool AddFreshPage();
Steve Blocka7e24c12009-10-30 11:49:00 +00002259
Leon Clarkee46be812010-01-19 14:06:41 +00002260 virtual bool ReserveSpace(int bytes);
2261
Ben Murdochb0fe1622011-05-05 13:52:32 +01002262 // Resizes a sequential string which must be the most recent thing that was
2263 // allocated in new space.
2264 template <typename StringType>
2265 inline void ShrinkStringAtAllocationBoundary(String* string, int len);
2266
Steve Blocka7e24c12009-10-30 11:49:00 +00002267#ifdef DEBUG
2268 // Verify the active semispace.
2269 virtual void Verify();
2270 // Print the active semispace.
2271 virtual void Print() { to_space_.Print(); }
2272#endif
2273
Steve Blocka7e24c12009-10-30 11:49:00 +00002274 // Iterates the active semispace to collect statistics.
2275 void CollectStatistics();
2276 // Reports previously collected statistics of the active semispace.
2277 void ReportStatistics();
2278 // Clears previously collected statistics.
2279 void ClearHistograms();
2280
2281 // Record the allocation or promotion of a heap object. Note that we don't
2282 // record every single allocation, but only those that happen in the
2283 // to space during a scavenge GC.
2284 void RecordAllocation(HeapObject* obj);
2285 void RecordPromotion(HeapObject* obj);
Steve Blocka7e24c12009-10-30 11:49:00 +00002286
2287 // Return whether the operation succeded.
2288 bool CommitFromSpaceIfNeeded() {
2289 if (from_space_.is_committed()) return true;
2290 return from_space_.Commit();
2291 }
2292
2293 bool UncommitFromSpace() {
2294 if (!from_space_.is_committed()) return true;
2295 return from_space_.Uncommit();
2296 }
2297
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002298 inline intptr_t inline_allocation_limit_step() {
2299 return inline_allocation_limit_step_;
2300 }
2301
2302 SemiSpace* active_space() { return &to_space_; }
2303
Steve Blocka7e24c12009-10-30 11:49:00 +00002304 private:
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002305 // Update allocation info to match the current to-space page.
2306 void UpdateAllocationInfo();
2307
2308 Address chunk_base_;
2309 uintptr_t chunk_size_;
2310
Steve Blocka7e24c12009-10-30 11:49:00 +00002311 // The semispaces.
2312 SemiSpace to_space_;
2313 SemiSpace from_space_;
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002314 VirtualMemory reservation_;
2315 int pages_used_;
Steve Blocka7e24c12009-10-30 11:49:00 +00002316
2317 // Start address and bit mask for containment testing.
2318 Address start_;
2319 uintptr_t address_mask_;
2320 uintptr_t object_mask_;
2321 uintptr_t object_expected_;
2322
2323 // Allocation pointer and limit for normal allocation and allocation during
2324 // mark-compact collection.
2325 AllocationInfo allocation_info_;
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002326
2327 // When incremental marking is active we will set allocation_info_.limit
2328 // to be lower than actual limit and then will gradually increase it
2329 // in steps to guarantee that we do incremental marking steps even
2330 // when all allocation is performed from inlined generated code.
2331 intptr_t inline_allocation_limit_step_;
2332
2333 Address top_on_previous_step_;
Steve Blocka7e24c12009-10-30 11:49:00 +00002334
Steve Blocka7e24c12009-10-30 11:49:00 +00002335 HistogramInfo* allocated_histogram_;
2336 HistogramInfo* promoted_histogram_;
Steve Blocka7e24c12009-10-30 11:49:00 +00002337
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002338 MUST_USE_RESULT MaybeObject* SlowAllocateRaw(int size_in_bytes);
Steve Blocka7e24c12009-10-30 11:49:00 +00002339
2340 friend class SemiSpaceIterator;
2341
2342 public:
2343 TRACK_MEMORY("NewSpace")
2344};
2345
2346
2347// -----------------------------------------------------------------------------
Steve Blocka7e24c12009-10-30 11:49:00 +00002348// Old object space (excluding map objects)
2349
2350class OldSpace : public PagedSpace {
2351 public:
2352 // Creates an old space object with a given maximum capacity.
2353 // The constructor does not allocate pages from OS.
Steve Block44f0eee2011-05-26 01:26:41 +01002354 OldSpace(Heap* heap,
2355 intptr_t max_capacity,
2356 AllocationSpace id,
2357 Executability executable)
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002358 : PagedSpace(heap, max_capacity, id, executable) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002359 page_extra_ = 0;
2360 }
2361
Steve Block6ded16b2010-05-10 14:33:55 +01002362 // The limit of allocation for a page in this space.
2363 virtual Address PageAllocationLimit(Page* page) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002364 return page->area_end();
Steve Blocka7e24c12009-10-30 11:49:00 +00002365 }
2366
Steve Blocka7e24c12009-10-30 11:49:00 +00002367 public:
2368 TRACK_MEMORY("OldSpace")
2369};
2370
2371
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002372// For contiguous spaces, top should be in the space (or at the end) and limit
2373// should be the end of the space.
2374#define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space) \
2375 SLOW_ASSERT((space).page_low() <= (info).top \
2376 && (info).top <= (space).page_high() \
2377 && (info).limit <= (space).page_high())
2378
2379
Steve Blocka7e24c12009-10-30 11:49:00 +00002380// -----------------------------------------------------------------------------
2381// Old space for objects of a fixed size
2382
2383class FixedSpace : public PagedSpace {
2384 public:
Steve Block44f0eee2011-05-26 01:26:41 +01002385 FixedSpace(Heap* heap,
2386 intptr_t max_capacity,
Steve Blocka7e24c12009-10-30 11:49:00 +00002387 AllocationSpace id,
2388 int object_size_in_bytes,
2389 const char* name)
Steve Block44f0eee2011-05-26 01:26:41 +01002390 : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE),
Steve Blocka7e24c12009-10-30 11:49:00 +00002391 object_size_in_bytes_(object_size_in_bytes),
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002392 name_(name) {
2393 page_extra_ = Page::kNonCodeObjectAreaSize % object_size_in_bytes;
Steve Blocka7e24c12009-10-30 11:49:00 +00002394 }
2395
Steve Block6ded16b2010-05-10 14:33:55 +01002396 // The limit of allocation for a page in this space.
2397 virtual Address PageAllocationLimit(Page* page) {
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002398 return page->area_end() - page_extra_;
Steve Blocka7e24c12009-10-30 11:49:00 +00002399 }
2400
2401 int object_size_in_bytes() { return object_size_in_bytes_; }
2402
Steve Blocka7e24c12009-10-30 11:49:00 +00002403 // Prepares for a mark-compact GC.
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002404 virtual void PrepareForMarkCompact();
Steve Blocka7e24c12009-10-30 11:49:00 +00002405
2406 protected:
Leon Clarkee46be812010-01-19 14:06:41 +00002407 void ResetFreeList() {
2408 free_list_.Reset();
2409 }
2410
Steve Blocka7e24c12009-10-30 11:49:00 +00002411 private:
2412 // The size of objects in this space.
2413 int object_size_in_bytes_;
2414
2415 // The name of this space.
2416 const char* name_;
Steve Blocka7e24c12009-10-30 11:49:00 +00002417};
2418
2419
2420// -----------------------------------------------------------------------------
2421// Old space for all map objects
2422
2423class MapSpace : public FixedSpace {
2424 public:
2425 // Creates a map space object with a maximum capacity.
Steve Block44f0eee2011-05-26 01:26:41 +01002426 MapSpace(Heap* heap,
2427 intptr_t max_capacity,
2428 int max_map_space_pages,
2429 AllocationSpace id)
2430 : FixedSpace(heap, max_capacity, id, Map::kSize, "map"),
Leon Clarked91b9f72010-01-27 17:25:45 +00002431 max_map_space_pages_(max_map_space_pages) {
Leon Clarked91b9f72010-01-27 17:25:45 +00002432 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002433
Steve Blocka7e24c12009-10-30 11:49:00 +00002434 // Given an index, returns the page address.
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002435 // TODO(1600): this limit is artifical just to keep code compilable
2436 static const int kMaxMapPageIndex = 1 << 16;
Steve Blocka7e24c12009-10-30 11:49:00 +00002437
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002438 virtual int RoundSizeDownToObjectAlignment(int size) {
2439 if (IsPowerOf2(Map::kSize)) {
2440 return RoundDown(size, Map::kSize);
2441 } else {
2442 return (size / Map::kSize) * Map::kSize;
Leon Clarkee46be812010-01-19 14:06:41 +00002443 }
Leon Clarkee46be812010-01-19 14:06:41 +00002444 }
2445
Steve Blocka7e24c12009-10-30 11:49:00 +00002446 protected:
2447#ifdef DEBUG
2448 virtual void VerifyObject(HeapObject* obj);
2449#endif
2450
2451 private:
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002452 static const int kMapsPerPage = Page::kNonCodeObjectAreaSize / Map::kSize;
Leon Clarkee46be812010-01-19 14:06:41 +00002453
2454 // Do map space compaction if there is a page gap.
Leon Clarked91b9f72010-01-27 17:25:45 +00002455 int CompactionThreshold() {
2456 return kMapsPerPage * (max_map_space_pages_ - 1);
2457 }
2458
2459 const int max_map_space_pages_;
Leon Clarkee46be812010-01-19 14:06:41 +00002460
Steve Blocka7e24c12009-10-30 11:49:00 +00002461 public:
2462 TRACK_MEMORY("MapSpace")
2463};
2464
2465
2466// -----------------------------------------------------------------------------
2467// Old space for all global object property cell objects
2468
2469class CellSpace : public FixedSpace {
2470 public:
2471 // Creates a property cell space object with a maximum capacity.
Steve Block44f0eee2011-05-26 01:26:41 +01002472 CellSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id)
2473 : FixedSpace(heap, max_capacity, id, JSGlobalPropertyCell::kSize, "cell")
2474 {}
Steve Blocka7e24c12009-10-30 11:49:00 +00002475
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002476 virtual int RoundSizeDownToObjectAlignment(int size) {
2477 if (IsPowerOf2(JSGlobalPropertyCell::kSize)) {
2478 return RoundDown(size, JSGlobalPropertyCell::kSize);
2479 } else {
2480 return (size / JSGlobalPropertyCell::kSize) * JSGlobalPropertyCell::kSize;
2481 }
2482 }
2483
Steve Blocka7e24c12009-10-30 11:49:00 +00002484 protected:
2485#ifdef DEBUG
2486 virtual void VerifyObject(HeapObject* obj);
2487#endif
2488
2489 public:
2490 TRACK_MEMORY("CellSpace")
2491};
2492
2493
2494// -----------------------------------------------------------------------------
2495// Large objects ( > Page::kMaxHeapObjectSize ) are allocated and managed by
2496// the large object space. A large object is allocated from OS heap with
2497// extra padding bytes (Page::kPageSize + Page::kObjectStartOffset).
2498// A large object always starts at Page::kObjectStartOffset to a page.
2499// Large objects do not move during garbage collections.
2500
Steve Blocka7e24c12009-10-30 11:49:00 +00002501class LargeObjectSpace : public Space {
2502 public:
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002503 LargeObjectSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id);
Steve Blocka7e24c12009-10-30 11:49:00 +00002504 virtual ~LargeObjectSpace() {}
2505
2506 // Initializes internal data structures.
Ben Murdochc7cc0282012-03-05 14:35:55 +00002507 bool SetUp();
Steve Blocka7e24c12009-10-30 11:49:00 +00002508
2509 // Releases internal resources, frees objects in this space.
2510 void TearDown();
2511
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002512 static intptr_t ObjectSizeFor(intptr_t chunk_size) {
2513 if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return 0;
2514 return chunk_size - Page::kPageSize - Page::kObjectStartOffset;
2515 }
2516
2517 // Shared implementation of AllocateRaw, AllocateRawCode and
2518 // AllocateRawFixedArray.
2519 MUST_USE_RESULT MaybeObject* AllocateRaw(int object_size,
2520 Executability executable);
Steve Blocka7e24c12009-10-30 11:49:00 +00002521
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002522 // Available bytes for objects in this space.
Steve Block44f0eee2011-05-26 01:26:41 +01002523 inline intptr_t Available();
Steve Blocka7e24c12009-10-30 11:49:00 +00002524
Ben Murdochf87a2032010-10-22 12:50:53 +01002525 virtual intptr_t Size() {
Steve Blocka7e24c12009-10-30 11:49:00 +00002526 return size_;
2527 }
2528
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08002529 virtual intptr_t SizeOfObjects() {
2530 return objects_size_;
2531 }
2532
Steve Blocka7e24c12009-10-30 11:49:00 +00002533 int PageCount() {
2534 return page_count_;
2535 }
2536
2537 // Finds an object for a given address, returns Failure::Exception()
2538 // if it is not found. The function iterates through all objects in this
2539 // space, may be slow.
John Reck59135872010-11-02 12:39:01 -07002540 MaybeObject* FindObject(Address a);
Steve Blocka7e24c12009-10-30 11:49:00 +00002541
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002542 // Finds a large object page containing the given pc, returns NULL
2543 // if such a page doesn't exist.
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002544 LargePage* FindPageContainingPc(Address pc);
Steve Blocka7e24c12009-10-30 11:49:00 +00002545
2546 // Frees unmarked objects.
2547 void FreeUnmarkedObjects();
2548
2549 // Checks whether a heap object is in this space; O(1).
2550 bool Contains(HeapObject* obj);
2551
2552 // Checks whether the space is empty.
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002553 bool IsEmpty() { return first_page_ == NULL; }
Steve Blocka7e24c12009-10-30 11:49:00 +00002554
Leon Clarkee46be812010-01-19 14:06:41 +00002555 // See the comments for ReserveSpace in the Space class. This has to be
2556 // called after ReserveSpace has been called on the paged spaces, since they
2557 // may use some memory, leaving less for large objects.
2558 virtual bool ReserveSpace(int bytes);
2559
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002560 LargePage* first_page() { return first_page_; }
2561
Steve Blocka7e24c12009-10-30 11:49:00 +00002562#ifdef DEBUG
2563 virtual void Verify();
2564 virtual void Print();
2565 void ReportStatistics();
2566 void CollectCodeStatistics();
Steve Blocka7e24c12009-10-30 11:49:00 +00002567#endif
2568 // Checks whether an address is in the object area in this space. It
2569 // iterates all objects in the space. May be slow.
2570 bool SlowContains(Address addr) { return !FindObject(addr)->IsFailure(); }
2571
2572 private:
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002573 intptr_t max_capacity_;
Steve Blocka7e24c12009-10-30 11:49:00 +00002574 // The head of the linked list of large object chunks.
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002575 LargePage* first_page_;
Ben Murdochf87a2032010-10-22 12:50:53 +01002576 intptr_t size_; // allocated bytes
Steve Blocka7e24c12009-10-30 11:49:00 +00002577 int page_count_; // number of chunks
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08002578 intptr_t objects_size_; // size of objects
Steve Blocka7e24c12009-10-30 11:49:00 +00002579
Steve Blocka7e24c12009-10-30 11:49:00 +00002580 friend class LargeObjectIterator;
2581
2582 public:
2583 TRACK_MEMORY("LargeObjectSpace")
2584};
2585
2586
2587class LargeObjectIterator: public ObjectIterator {
2588 public:
2589 explicit LargeObjectIterator(LargeObjectSpace* space);
2590 LargeObjectIterator(LargeObjectSpace* space, HeapObjectCallback size_func);
2591
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002592 HeapObject* Next();
Steve Blocka7e24c12009-10-30 11:49:00 +00002593
2594 // implementation of ObjectIterator.
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002595 virtual HeapObject* next_object() { return Next(); }
Steve Blocka7e24c12009-10-30 11:49:00 +00002596
2597 private:
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002598 LargePage* current_;
Steve Blocka7e24c12009-10-30 11:49:00 +00002599 HeapObjectCallback size_func_;
2600};
2601
2602
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002603// Iterates over the chunks (pages and large object pages) that can contain
2604// pointers to new space.
2605class PointerChunkIterator BASE_EMBEDDED {
2606 public:
2607 inline explicit PointerChunkIterator(Heap* heap);
2608
2609 // Return NULL when the iterator is done.
2610 MemoryChunk* next() {
2611 switch (state_) {
2612 case kOldPointerState: {
2613 if (old_pointer_iterator_.has_next()) {
2614 return old_pointer_iterator_.next();
2615 }
2616 state_ = kMapState;
2617 // Fall through.
2618 }
2619 case kMapState: {
2620 if (map_iterator_.has_next()) {
2621 return map_iterator_.next();
2622 }
2623 state_ = kLargeObjectState;
2624 // Fall through.
2625 }
2626 case kLargeObjectState: {
2627 HeapObject* heap_object;
2628 do {
2629 heap_object = lo_iterator_.Next();
2630 if (heap_object == NULL) {
2631 state_ = kFinishedState;
2632 return NULL;
2633 }
2634 // Fixed arrays are the only pointer-containing objects in large
2635 // object space.
2636 } while (!heap_object->IsFixedArray());
2637 MemoryChunk* answer = MemoryChunk::FromAddress(heap_object->address());
2638 return answer;
2639 }
2640 case kFinishedState:
2641 return NULL;
2642 default:
2643 break;
2644 }
2645 UNREACHABLE();
2646 return NULL;
2647 }
2648
2649
2650 private:
2651 enum State {
2652 kOldPointerState,
2653 kMapState,
2654 kLargeObjectState,
2655 kFinishedState
2656 };
2657 State state_;
2658 PageIterator old_pointer_iterator_;
2659 PageIterator map_iterator_;
2660 LargeObjectIterator lo_iterator_;
2661};
2662
2663
Steve Block44f0eee2011-05-26 01:26:41 +01002664#ifdef DEBUG
2665struct CommentStatistic {
2666 const char* comment;
2667 int size;
2668 int count;
2669 void Clear() {
2670 comment = NULL;
2671 size = 0;
2672 count = 0;
2673 }
2674 // Must be small, since an iteration is used for lookup.
2675 static const int kMaxComments = 64;
2676};
2677#endif
2678
2679
Steve Blocka7e24c12009-10-30 11:49:00 +00002680} } // namespace v8::internal
2681
2682#endif // V8_SPACES_H_