blob: 9ecb3c432eb0ac279eeb819e55a80d6d7180a553 [file] [log] [blame]
Ben Murdoch257744e2011-11-30 15:57:28 +00001// Copyright 2011 the V8 project authors. All rights reserved.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
Steve Blocka7e24c12009-10-30 11:49:00 +00004
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005#ifndef V8_HEAP_SPACES_H_
6#define V8_HEAP_SPACES_H_
Steve Blocka7e24c12009-10-30 11:49:00 +00007
Ben Murdochb8a8cc12014-11-26 15:28:44 +00008#include "src/allocation.h"
9#include "src/base/atomicops.h"
10#include "src/base/bits.h"
11#include "src/base/platform/mutex.h"
12#include "src/hashmap.h"
13#include "src/list.h"
14#include "src/log.h"
15#include "src/utils.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000016
17namespace v8 {
18namespace internal {
19
Steve Block44f0eee2011-05-26 01:26:41 +010020class Isolate;
21
Steve Blocka7e24c12009-10-30 11:49:00 +000022// -----------------------------------------------------------------------------
23// Heap structures:
24//
25// A JS heap consists of a young generation, an old generation, and a large
26// object space. The young generation is divided into two semispaces. A
27// scavenger implements Cheney's copying algorithm. The old generation is
28// separated into a map space and an old object space. The map space contains
29// all (and only) map objects, the rest of old objects go into the old space.
30// The old generation is collected by a mark-sweep-compact collector.
31//
32// The semispaces of the young generation are contiguous. The old and map
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +010033// spaces consists of a list of pages. A page has a page header and an object
Ben Murdoch3ef787d2012-04-12 10:51:47 +010034// area.
Steve Blocka7e24c12009-10-30 11:49:00 +000035//
36// There is a separate large object space for objects larger than
37// Page::kMaxHeapObjectSize, so that they do not have to move during
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +010038// collection. The large object space is paged. Pages in large object space
Ben Murdoch3ef787d2012-04-12 10:51:47 +010039// may be larger than the page size.
Steve Blocka7e24c12009-10-30 11:49:00 +000040//
Ben Murdoch3ef787d2012-04-12 10:51:47 +010041// A store-buffer based write barrier is used to keep track of intergenerational
Ben Murdochb8a8cc12014-11-26 15:28:44 +000042// references. See heap/store-buffer.h.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +010043//
Ben Murdoch3ef787d2012-04-12 10:51:47 +010044// During scavenges and mark-sweep collections we sometimes (after a store
45// buffer overflow) iterate intergenerational pointers without decoding heap
46// object maps so if the page belongs to old pointer space or large object
47// space it is essential to guarantee that the page does not contain any
48// garbage pointers to new space: every pointer aligned word which satisfies
49// the Heap::InNewSpace() predicate must be a pointer to a live heap object in
50// new space. Thus objects in old pointer and large object spaces should have a
51// special layout (e.g. no bare integer fields). This requirement does not
52// apply to map space which is iterated in a special fashion. However we still
53// require pointer fields of dead maps to be cleaned.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +010054//
Ben Murdoch3ef787d2012-04-12 10:51:47 +010055// To enable lazy cleaning of old space pages we can mark chunks of the page
56// as being garbage. Garbage sections are marked with a special map. These
57// sections are skipped when scanning the page, even if we are otherwise
58// scanning without regard for object boundaries. Garbage sections are chained
59// together to form a free list after a GC. Garbage sections created outside
60// of GCs by object trunctation etc. may not be in the free list chain. Very
61// small free spaces are ignored, they need only be cleaned of bogus pointers
62// into new space.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +010063//
Ben Murdoch3ef787d2012-04-12 10:51:47 +010064// Each page may have up to one special garbage section. The start of this
65// section is denoted by the top field in the space. The end of the section
66// is denoted by the limit field in the space. This special garbage section
67// is not marked with a free space map in the data. The point of this section
68// is to enable linear allocation without having to constantly update the byte
69// array every time the top field is updated and a new object is created. The
70// special garbage section is not in the chain of garbage sections.
71//
72// Since the top and limit fields are in the space, not the page, only one page
73// has a special garbage section, and if the top and limit are equal then there
74// is no special garbage section.
Steve Blocka7e24c12009-10-30 11:49:00 +000075
76// Some assertion macros used in the debugging mode.
77
Ben Murdochb8a8cc12014-11-26 15:28:44 +000078#define DCHECK_PAGE_ALIGNED(address) \
79 DCHECK((OffsetFrom(address) & Page::kPageAlignmentMask) == 0)
Steve Blocka7e24c12009-10-30 11:49:00 +000080
Ben Murdochb8a8cc12014-11-26 15:28:44 +000081#define DCHECK_OBJECT_ALIGNED(address) \
82 DCHECK((OffsetFrom(address) & kObjectAlignmentMask) == 0)
Steve Blocka7e24c12009-10-30 11:49:00 +000083
Ben Murdochb8a8cc12014-11-26 15:28:44 +000084#define DCHECK_OBJECT_SIZE(size) \
85 DCHECK((0 < size) && (size <= Page::kMaxRegularHeapObjectSize))
Leon Clarkee46be812010-01-19 14:06:41 +000086
Ben Murdochb8a8cc12014-11-26 15:28:44 +000087#define DCHECK_PAGE_OFFSET(offset) \
88 DCHECK((Page::kObjectStartOffset <= offset) && (offset <= Page::kPageSize))
Steve Blocka7e24c12009-10-30 11:49:00 +000089
Ben Murdochb8a8cc12014-11-26 15:28:44 +000090#define DCHECK_MAP_PAGE_INDEX(index) \
91 DCHECK((0 <= index) && (index <= MapSpace::kMaxMapPageIndex))
Steve Blocka7e24c12009-10-30 11:49:00 +000092
93
94class PagedSpace;
95class MemoryAllocator;
96class AllocationInfo;
Ben Murdoch3ef787d2012-04-12 10:51:47 +010097class Space;
98class FreeList;
99class MemoryChunk;
100
101class MarkBit {
102 public:
103 typedef uint32_t CellType;
104
105 inline MarkBit(CellType* cell, CellType mask, bool data_only)
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000106 : cell_(cell), mask_(mask), data_only_(data_only) {}
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100107
108 inline CellType* cell() { return cell_; }
109 inline CellType mask() { return mask_; }
110
111#ifdef DEBUG
112 bool operator==(const MarkBit& other) {
113 return cell_ == other.cell_ && mask_ == other.mask_;
114 }
115#endif
116
117 inline void Set() { *cell_ |= mask_; }
118 inline bool Get() { return (*cell_ & mask_) != 0; }
119 inline void Clear() { *cell_ &= ~mask_; }
120
121 inline bool data_only() { return data_only_; }
122
123 inline MarkBit Next() {
124 CellType new_mask = mask_ << 1;
125 if (new_mask == 0) {
126 return MarkBit(cell_ + 1, 1, data_only_);
127 } else {
128 return MarkBit(cell_, new_mask, data_only_);
129 }
130 }
131
132 private:
133 CellType* cell_;
134 CellType mask_;
135 // This boolean indicates that the object is in a data-only space with no
136 // pointers. This enables some optimizations when marking.
137 // It is expected that this field is inlined and turned into control flow
138 // at the place where the MarkBit object is created.
139 bool data_only_;
140};
141
142
143// Bitmap is a sequence of cells each containing fixed number of bits.
144class Bitmap {
145 public:
146 static const uint32_t kBitsPerCell = 32;
147 static const uint32_t kBitsPerCellLog2 = 5;
148 static const uint32_t kBitIndexMask = kBitsPerCell - 1;
149 static const uint32_t kBytesPerCell = kBitsPerCell / kBitsPerByte;
150 static const uint32_t kBytesPerCellLog2 = kBitsPerCellLog2 - kBitsPerByteLog2;
151
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000152 static const size_t kLength = (1 << kPageSizeBits) >> (kPointerSizeLog2);
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100153
154 static const size_t kSize =
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000155 (1 << kPageSizeBits) >> (kPointerSizeLog2 + kBitsPerByteLog2);
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100156
157
158 static int CellsForLength(int length) {
159 return (length + kBitsPerCell - 1) >> kBitsPerCellLog2;
160 }
161
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000162 int CellsCount() { return CellsForLength(kLength); }
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100163
164 static int SizeFor(int cells_count) {
165 return sizeof(MarkBit::CellType) * cells_count;
166 }
167
168 INLINE(static uint32_t IndexToCell(uint32_t index)) {
169 return index >> kBitsPerCellLog2;
170 }
171
172 INLINE(static uint32_t CellToIndex(uint32_t index)) {
173 return index << kBitsPerCellLog2;
174 }
175
176 INLINE(static uint32_t CellAlignIndex(uint32_t index)) {
177 return (index + kBitIndexMask) & ~kBitIndexMask;
178 }
179
180 INLINE(MarkBit::CellType* cells()) {
181 return reinterpret_cast<MarkBit::CellType*>(this);
182 }
183
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000184 INLINE(Address address()) { return reinterpret_cast<Address>(this); }
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100185
186 INLINE(static Bitmap* FromAddress(Address addr)) {
187 return reinterpret_cast<Bitmap*>(addr);
188 }
189
190 inline MarkBit MarkBitFromIndex(uint32_t index, bool data_only = false) {
191 MarkBit::CellType mask = 1 << (index & kBitIndexMask);
192 MarkBit::CellType* cell = this->cells() + (index >> kBitsPerCellLog2);
193 return MarkBit(cell, mask, data_only);
194 }
195
196 static inline void Clear(MemoryChunk* chunk);
197
198 static void PrintWord(uint32_t word, uint32_t himask = 0) {
199 for (uint32_t mask = 1; mask != 0; mask <<= 1) {
200 if ((mask & himask) != 0) PrintF("[");
201 PrintF((mask & word) ? "1" : "0");
202 if ((mask & himask) != 0) PrintF("]");
203 }
204 }
205
206 class CellPrinter {
207 public:
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000208 CellPrinter() : seq_start(0), seq_type(0), seq_length(0) {}
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100209
210 void Print(uint32_t pos, uint32_t cell) {
211 if (cell == seq_type) {
212 seq_length++;
213 return;
214 }
215
216 Flush();
217
218 if (IsSeq(cell)) {
219 seq_start = pos;
220 seq_length = 0;
221 seq_type = cell;
222 return;
223 }
224
225 PrintF("%d: ", pos);
226 PrintWord(cell);
227 PrintF("\n");
228 }
229
230 void Flush() {
231 if (seq_length > 0) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000232 PrintF("%d: %dx%d\n", seq_start, seq_type == 0 ? 0 : 1,
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100233 seq_length * kBitsPerCell);
234 seq_length = 0;
235 }
236 }
237
238 static bool IsSeq(uint32_t cell) { return cell == 0 || cell == 0xFFFFFFFF; }
239
240 private:
241 uint32_t seq_start;
242 uint32_t seq_type;
243 uint32_t seq_length;
244 };
245
246 void Print() {
247 CellPrinter printer;
248 for (int i = 0; i < CellsCount(); i++) {
249 printer.Print(i, cells()[i]);
250 }
251 printer.Flush();
252 PrintF("\n");
253 }
254
255 bool IsClean() {
256 for (int i = 0; i < CellsCount(); i++) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000257 if (cells()[i] != 0) {
258 return false;
259 }
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100260 }
261 return true;
262 }
263};
264
265
266class SkipList;
267class SlotsBuffer;
268
269// MemoryChunk represents a memory region owned by a specific space.
270// It is divided into the header and the body. Chunk start is always
271// 1MB aligned. Start of the body is aligned so it can accommodate
272// any heap object.
273class MemoryChunk {
274 public:
275 // Only works if the pointer is in the first kPageSize of the MemoryChunk.
276 static MemoryChunk* FromAddress(Address a) {
277 return reinterpret_cast<MemoryChunk*>(OffsetFrom(a) & ~kAlignmentMask);
278 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000279 static const MemoryChunk* FromAddress(const byte* a) {
280 return reinterpret_cast<const MemoryChunk*>(OffsetFrom(a) &
281 ~kAlignmentMask);
282 }
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100283
284 // Only works for addresses in pointer spaces, not data or code spaces.
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000285 static inline MemoryChunk* FromAnyPointerAddress(Heap* heap, Address addr);
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100286
287 Address address() { return reinterpret_cast<Address>(this); }
288
289 bool is_valid() { return address() != NULL; }
290
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000291 MemoryChunk* next_chunk() const {
292 return reinterpret_cast<MemoryChunk*>(base::Acquire_Load(&next_chunk_));
293 }
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100294
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000295 MemoryChunk* prev_chunk() const {
296 return reinterpret_cast<MemoryChunk*>(base::Acquire_Load(&prev_chunk_));
297 }
298
299 void set_next_chunk(MemoryChunk* next) {
300 base::Release_Store(&next_chunk_, reinterpret_cast<base::AtomicWord>(next));
301 }
302
303 void set_prev_chunk(MemoryChunk* prev) {
304 base::Release_Store(&prev_chunk_, reinterpret_cast<base::AtomicWord>(prev));
305 }
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100306
307 Space* owner() const {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000308 if ((reinterpret_cast<intptr_t>(owner_) & kPageHeaderTagMask) ==
309 kPageHeaderTag) {
310 return reinterpret_cast<Space*>(reinterpret_cast<intptr_t>(owner_) -
311 kPageHeaderTag);
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100312 } else {
313 return NULL;
314 }
315 }
316
317 void set_owner(Space* space) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000318 DCHECK((reinterpret_cast<intptr_t>(space) & kPageHeaderTagMask) == 0);
319 owner_ = reinterpret_cast<Address>(space) + kPageHeaderTag;
320 DCHECK((reinterpret_cast<intptr_t>(owner_) & kPageHeaderTagMask) ==
321 kPageHeaderTag);
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100322 }
323
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000324 base::VirtualMemory* reserved_memory() { return &reservation_; }
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100325
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000326 void InitializeReservedMemory() { reservation_.Reset(); }
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100327
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000328 void set_reserved_memory(base::VirtualMemory* reservation) {
329 DCHECK_NOT_NULL(reservation);
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100330 reservation_.TakeControl(reservation);
331 }
332
333 bool scan_on_scavenge() { return IsFlagSet(SCAN_ON_SCAVENGE); }
334 void initialize_scan_on_scavenge(bool scan) {
335 if (scan) {
336 SetFlag(SCAN_ON_SCAVENGE);
337 } else {
338 ClearFlag(SCAN_ON_SCAVENGE);
339 }
340 }
341 inline void set_scan_on_scavenge(bool scan);
342
343 int store_buffer_counter() { return store_buffer_counter_; }
344 void set_store_buffer_counter(int counter) {
345 store_buffer_counter_ = counter;
346 }
347
348 bool Contains(Address addr) {
349 return addr >= area_start() && addr < area_end();
350 }
351
352 // Checks whether addr can be a limit of addresses in this page.
353 // It's a limit if it's in the page, or if it's just after the
354 // last byte of the page.
355 bool ContainsLimit(Address addr) {
356 return addr >= area_start() && addr <= area_end();
357 }
358
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000359 // Every n write barrier invocations we go to runtime even though
360 // we could have handled it in generated code. This lets us check
361 // whether we have hit the limit and should do some more marking.
362 static const int kWriteBarrierCounterGranularity = 500;
363
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100364 enum MemoryChunkFlags {
365 IS_EXECUTABLE,
366 ABOUT_TO_BE_FREED,
367 POINTERS_TO_HERE_ARE_INTERESTING,
368 POINTERS_FROM_HERE_ARE_INTERESTING,
369 SCAN_ON_SCAVENGE,
370 IN_FROM_SPACE, // Mutually exclusive with IN_TO_SPACE.
371 IN_TO_SPACE, // All pages in new space has one of these two set.
372 NEW_SPACE_BELOW_AGE_MARK,
373 CONTAINS_ONLY_DATA,
374 EVACUATION_CANDIDATE,
375 RESCAN_ON_EVACUATION,
376
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000377 // WAS_SWEPT indicates that marking bits have been cleared by the sweeper,
378 // otherwise marking bits are still intact.
379 WAS_SWEPT,
380
381 // Large objects can have a progress bar in their page header. These object
382 // are scanned in increments and will be kept black while being scanned.
383 // Even if the mutator writes to them they will be kept black and a white
384 // to grey transition is performed in the value.
385 HAS_PROGRESS_BAR,
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100386
387 // Last flag, keep at bottom.
388 NUM_MEMORY_CHUNK_FLAGS
389 };
390
391
392 static const int kPointersToHereAreInterestingMask =
393 1 << POINTERS_TO_HERE_ARE_INTERESTING;
394
395 static const int kPointersFromHereAreInterestingMask =
396 1 << POINTERS_FROM_HERE_ARE_INTERESTING;
397
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000398 static const int kEvacuationCandidateMask = 1 << EVACUATION_CANDIDATE;
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100399
400 static const int kSkipEvacuationSlotsRecordingMask =
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000401 (1 << EVACUATION_CANDIDATE) | (1 << RESCAN_ON_EVACUATION) |
402 (1 << IN_FROM_SPACE) | (1 << IN_TO_SPACE);
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100403
404
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000405 void SetFlag(int flag) { flags_ |= static_cast<uintptr_t>(1) << flag; }
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100406
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000407 void ClearFlag(int flag) { flags_ &= ~(static_cast<uintptr_t>(1) << flag); }
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100408
409 void SetFlagTo(int flag, bool value) {
410 if (value) {
411 SetFlag(flag);
412 } else {
413 ClearFlag(flag);
414 }
415 }
416
417 bool IsFlagSet(int flag) {
418 return (flags_ & (static_cast<uintptr_t>(1) << flag)) != 0;
419 }
420
421 // Set or clear multiple flags at a time. The flags in the mask
422 // are set to the value in "flags", the rest retain the current value
423 // in flags_.
424 void SetFlags(intptr_t flags, intptr_t mask) {
425 flags_ = (flags_ & ~mask) | (flags & mask);
426 }
427
428 // Return all current flags.
429 intptr_t GetFlags() { return flags_; }
430
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000431
432 // SWEEPING_DONE - The page state when sweeping is complete or sweeping must
433 // not be performed on that page.
434 // SWEEPING_FINALIZE - A sweeper thread is done sweeping this page and will
435 // not touch the page memory anymore.
436 // SWEEPING_IN_PROGRESS - This page is currently swept by a sweeper thread.
437 // SWEEPING_PENDING - This page is ready for parallel sweeping.
438 enum ParallelSweepingState {
439 SWEEPING_DONE,
440 SWEEPING_FINALIZE,
441 SWEEPING_IN_PROGRESS,
442 SWEEPING_PENDING
443 };
444
445 ParallelSweepingState parallel_sweeping() {
446 return static_cast<ParallelSweepingState>(
447 base::Acquire_Load(&parallel_sweeping_));
448 }
449
450 void set_parallel_sweeping(ParallelSweepingState state) {
451 base::Release_Store(&parallel_sweeping_, state);
452 }
453
454 bool TryParallelSweeping() {
455 return base::Acquire_CompareAndSwap(&parallel_sweeping_, SWEEPING_PENDING,
456 SWEEPING_IN_PROGRESS) ==
457 SWEEPING_PENDING;
458 }
459
460 bool SweepingCompleted() { return parallel_sweeping() <= SWEEPING_FINALIZE; }
461
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100462 // Manage live byte count (count of bytes known to be live,
463 // because they are marked black).
464 void ResetLiveBytes() {
465 if (FLAG_gc_verbose) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000466 PrintF("ResetLiveBytes:%p:%x->0\n", static_cast<void*>(this),
467 live_byte_count_);
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100468 }
469 live_byte_count_ = 0;
470 }
471 void IncrementLiveBytes(int by) {
472 if (FLAG_gc_verbose) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000473 printf("UpdateLiveBytes:%p:%x%c=%x->%x\n", static_cast<void*>(this),
474 live_byte_count_, ((by < 0) ? '-' : '+'), ((by < 0) ? -by : by),
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100475 live_byte_count_ + by);
476 }
477 live_byte_count_ += by;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000478 DCHECK_LE(static_cast<unsigned>(live_byte_count_), size_);
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100479 }
480 int LiveBytes() {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000481 DCHECK(static_cast<unsigned>(live_byte_count_) <= size_);
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100482 return live_byte_count_;
483 }
484
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000485 int write_barrier_counter() {
486 return static_cast<int>(write_barrier_counter_);
487 }
488
489 void set_write_barrier_counter(int counter) {
490 write_barrier_counter_ = counter;
491 }
492
493 int progress_bar() {
494 DCHECK(IsFlagSet(HAS_PROGRESS_BAR));
495 return progress_bar_;
496 }
497
498 void set_progress_bar(int progress_bar) {
499 DCHECK(IsFlagSet(HAS_PROGRESS_BAR));
500 progress_bar_ = progress_bar;
501 }
502
503 void ResetProgressBar() {
504 if (IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) {
505 set_progress_bar(0);
506 ClearFlag(MemoryChunk::HAS_PROGRESS_BAR);
507 }
508 }
509
510 bool IsLeftOfProgressBar(Object** slot) {
511 Address slot_address = reinterpret_cast<Address>(slot);
512 DCHECK(slot_address > this->address());
513 return (slot_address - (this->address() + kObjectStartOffset)) <
514 progress_bar();
515 }
516
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100517 static void IncrementLiveBytesFromGC(Address address, int by) {
518 MemoryChunk::FromAddress(address)->IncrementLiveBytes(by);
519 }
520
521 static void IncrementLiveBytesFromMutator(Address address, int by);
522
523 static const intptr_t kAlignment =
524 (static_cast<uintptr_t>(1) << kPageSizeBits);
525
526 static const intptr_t kAlignmentMask = kAlignment - 1;
527
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000528 static const intptr_t kSizeOffset = 0;
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100529
530 static const intptr_t kLiveBytesOffset =
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000531 kSizeOffset + kPointerSize + kPointerSize + kPointerSize + kPointerSize +
532 kPointerSize + kPointerSize + kPointerSize + kPointerSize + kIntSize;
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100533
534 static const size_t kSlotsBufferOffset = kLiveBytesOffset + kIntSize;
535
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000536 static const size_t kWriteBarrierCounterOffset =
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100537 kSlotsBufferOffset + kPointerSize + kPointerSize;
538
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000539 static const size_t kHeaderSize =
540 kWriteBarrierCounterOffset + kPointerSize + kIntSize + kIntSize +
541 kPointerSize + 5 * kPointerSize + kPointerSize + kPointerSize;
542
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100543 static const int kBodyOffset =
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000544 CODE_POINTER_ALIGN(kHeaderSize + Bitmap::kSize);
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100545
546 // The start offset of the object area in a page. Aligned to both maps and
547 // code alignment to be suitable for both. Also aligned to 32 words because
548 // the marking bitmap is arranged in 32 bit chunks.
549 static const int kObjectStartAlignment = 32 * kPointerSize;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000550 static const int kObjectStartOffset =
551 kBodyOffset - 1 +
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100552 (kObjectStartAlignment - (kBodyOffset - 1) % kObjectStartAlignment);
553
554 size_t size() const { return size_; }
555
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000556 void set_size(size_t size) { size_ = size; }
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100557
558 void SetArea(Address area_start, Address area_end) {
559 area_start_ = area_start;
560 area_end_ = area_end;
561 }
562
563 Executability executable() {
564 return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
565 }
566
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000567 bool ContainsOnlyData() { return IsFlagSet(CONTAINS_ONLY_DATA); }
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100568
569 bool InNewSpace() {
570 return (flags_ & ((1 << IN_FROM_SPACE) | (1 << IN_TO_SPACE))) != 0;
571 }
572
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000573 bool InToSpace() { return IsFlagSet(IN_TO_SPACE); }
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100574
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000575 bool InFromSpace() { return IsFlagSet(IN_FROM_SPACE); }
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100576
577 // ---------------------------------------------------------------------
578 // Markbits support
579
580 inline Bitmap* markbits() {
581 return Bitmap::FromAddress(address() + kHeaderSize);
582 }
583
584 void PrintMarkbits() { markbits()->Print(); }
585
586 inline uint32_t AddressToMarkbitIndex(Address addr) {
587 return static_cast<uint32_t>(addr - this->address()) >> kPointerSizeLog2;
588 }
589
590 inline static uint32_t FastAddressToMarkbitIndex(Address addr) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000591 const intptr_t offset = reinterpret_cast<intptr_t>(addr) & kAlignmentMask;
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100592
593 return static_cast<uint32_t>(offset) >> kPointerSizeLog2;
594 }
595
596 inline Address MarkbitIndexToAddress(uint32_t index) {
597 return this->address() + (index << kPointerSizeLog2);
598 }
599
600 void InsertAfter(MemoryChunk* other);
601 void Unlink();
602
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000603 inline Heap* heap() const { return heap_; }
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100604
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000605 static const int kFlagsOffset = kPointerSize;
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100606
607 bool IsEvacuationCandidate() { return IsFlagSet(EVACUATION_CANDIDATE); }
608
609 bool ShouldSkipEvacuationSlotRecording() {
610 return (flags_ & kSkipEvacuationSlotsRecordingMask) != 0;
611 }
612
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000613 inline SkipList* skip_list() { return skip_list_; }
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100614
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000615 inline void set_skip_list(SkipList* skip_list) { skip_list_ = skip_list; }
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100616
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000617 inline SlotsBuffer* slots_buffer() { return slots_buffer_; }
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100618
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000619 inline SlotsBuffer** slots_buffer_address() { return &slots_buffer_; }
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100620
621 void MarkEvacuationCandidate() {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000622 DCHECK(slots_buffer_ == NULL);
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100623 SetFlag(EVACUATION_CANDIDATE);
624 }
625
626 void ClearEvacuationCandidate() {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000627 DCHECK(slots_buffer_ == NULL);
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100628 ClearFlag(EVACUATION_CANDIDATE);
629 }
630
631 Address area_start() { return area_start_; }
632 Address area_end() { return area_end_; }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000633 int area_size() { return static_cast<int>(area_end() - area_start()); }
634 bool CommitArea(size_t requested);
635
636 // Approximate amount of physical memory committed for this chunk.
637 size_t CommittedPhysicalMemory() { return high_water_mark_; }
638
639 static inline void UpdateHighWaterMark(Address mark);
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100640
641 protected:
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100642 size_t size_;
643 intptr_t flags_;
644
645 // Start and end of allocatable memory on this chunk.
646 Address area_start_;
647 Address area_end_;
648
649 // If the chunk needs to remember its memory reservation, it is stored here.
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000650 base::VirtualMemory reservation_;
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100651 // The identity of the owning space. This is tagged as a failure pointer, but
652 // no failure can be in an object, so this can be distinguished from any entry
653 // in a fixed array.
654 Address owner_;
655 Heap* heap_;
656 // Used by the store buffer to keep track of which pages to mark scan-on-
657 // scavenge.
658 int store_buffer_counter_;
659 // Count of bytes marked black on page.
660 int live_byte_count_;
661 SlotsBuffer* slots_buffer_;
662 SkipList* skip_list_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000663 intptr_t write_barrier_counter_;
664 // Used by the incremental marker to keep track of the scanning progress in
665 // large objects that have a progress bar and are scanned in increments.
666 int progress_bar_;
667 // Assuming the initial allocation on a page is sequential,
668 // count highest number of bytes ever allocated on the page.
669 int high_water_mark_;
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100670
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000671 base::AtomicWord parallel_sweeping_;
672
673 // PagedSpace free-list statistics.
674 intptr_t available_in_small_free_list_;
675 intptr_t available_in_medium_free_list_;
676 intptr_t available_in_large_free_list_;
677 intptr_t available_in_huge_free_list_;
678 intptr_t non_available_small_blocks_;
679
680 static MemoryChunk* Initialize(Heap* heap, Address base, size_t size,
681 Address area_start, Address area_end,
682 Executability executable, Space* owner);
683
684 private:
685 // next_chunk_ holds a pointer of type MemoryChunk
686 base::AtomicWord next_chunk_;
687 // prev_chunk_ holds a pointer of type MemoryChunk
688 base::AtomicWord prev_chunk_;
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100689
690 friend class MemoryAllocator;
691};
692
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000693
694STATIC_ASSERT(sizeof(MemoryChunk) <= MemoryChunk::kHeaderSize);
695
Steve Blocka7e24c12009-10-30 11:49:00 +0000696
697// -----------------------------------------------------------------------------
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100698// A page is a memory chunk of a size 1MB. Large object pages may be larger.
Steve Blocka7e24c12009-10-30 11:49:00 +0000699//
700// The only way to get a page pointer is by calling factory methods:
701// Page* p = Page::FromAddress(addr); or
702// Page* p = Page::FromAllocationTop(top);
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100703class Page : public MemoryChunk {
Steve Blocka7e24c12009-10-30 11:49:00 +0000704 public:
705 // Returns the page containing a given address. The address ranges
706 // from [page_addr .. page_addr + kPageSize[
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100707 // This only works if the object is in fact in a page. See also MemoryChunk::
708 // FromAddress() and FromAnyAddress().
Steve Blocka7e24c12009-10-30 11:49:00 +0000709 INLINE(static Page* FromAddress(Address a)) {
710 return reinterpret_cast<Page*>(OffsetFrom(a) & ~kPageAlignmentMask);
711 }
712
713 // Returns the page containing an allocation top. Because an allocation
714 // top address can be the upper bound of the page, we need to subtract
715 // it with kPointerSize first. The address ranges from
716 // [page_addr + kObjectStartOffset .. page_addr + kPageSize].
717 INLINE(static Page* FromAllocationTop(Address top)) {
718 Page* p = FromAddress(top - kPointerSize);
Steve Blocka7e24c12009-10-30 11:49:00 +0000719 return p;
720 }
721
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100722 // Returns the next page in the chain of pages owned by a space.
Steve Blocka7e24c12009-10-30 11:49:00 +0000723 inline Page* next_page();
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100724 inline Page* prev_page();
725 inline void set_next_page(Page* page);
726 inline void set_prev_page(Page* page);
Steve Blocka7e24c12009-10-30 11:49:00 +0000727
Steve Blocka7e24c12009-10-30 11:49:00 +0000728 // Checks whether an address is page aligned.
729 static bool IsAlignedToPageSize(Address a) {
730 return 0 == (OffsetFrom(a) & kPageAlignmentMask);
731 }
732
Steve Blocka7e24c12009-10-30 11:49:00 +0000733 // Returns the offset of a given address to this page.
734 INLINE(int Offset(Address a)) {
Steve Blockd0582a62009-12-15 09:54:21 +0000735 int offset = static_cast<int>(a - address());
Steve Blocka7e24c12009-10-30 11:49:00 +0000736 return offset;
737 }
738
739 // Returns the address for a given offset to the this page.
740 Address OffsetToAddress(int offset) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000741 DCHECK_PAGE_OFFSET(offset);
Steve Blocka7e24c12009-10-30 11:49:00 +0000742 return address() + offset;
743 }
744
745 // ---------------------------------------------------------------------
Steve Blocka7e24c12009-10-30 11:49:00 +0000746
Steve Blocka7e24c12009-10-30 11:49:00 +0000747 // Page size in bytes. This must be a multiple of the OS page size.
748 static const int kPageSize = 1 << kPageSizeBits;
749
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000750 // Maximum object size that fits in a page. Objects larger than that size
751 // are allocated in large object space and are never moved in memory. This
752 // also applies to new space allocation, since objects are never migrated
753 // from new space to large object space. Takes double alignment into account.
754 static const int kMaxRegularHeapObjectSize = kPageSize - kObjectStartOffset;
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100755
Steve Blocka7e24c12009-10-30 11:49:00 +0000756 // Page size mask.
757 static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;
758
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100759 inline void ClearGCFields();
760
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000761 static inline Page* Initialize(Heap* heap, MemoryChunk* chunk,
762 Executability executable, PagedSpace* owner);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100763
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100764 void InitializeAsAnchor(PagedSpace* owner);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100765
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000766 bool WasSwept() { return IsFlagSet(WAS_SWEPT); }
767 void SetWasSwept() { SetFlag(WAS_SWEPT); }
768 void ClearWasSwept() { ClearFlag(WAS_SWEPT); }
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100769
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000770 void ResetFreeListStatistics();
Steve Blocka7e24c12009-10-30 11:49:00 +0000771
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000772#define FRAGMENTATION_STATS_ACCESSORS(type, name) \
773 type name() { return name##_; } \
774 void set_##name(type name) { name##_ = name; } \
775 void add_##name(type name) { name##_ += name; }
776
777 FRAGMENTATION_STATS_ACCESSORS(intptr_t, non_available_small_blocks)
778 FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_small_free_list)
779 FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_medium_free_list)
780 FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_large_free_list)
781 FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_huge_free_list)
782
783#undef FRAGMENTATION_STATS_ACCESSORS
Steve Blocka7e24c12009-10-30 11:49:00 +0000784
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100785#ifdef DEBUG
786 void Print();
787#endif // DEBUG
Steve Blocka7e24c12009-10-30 11:49:00 +0000788
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100789 friend class MemoryAllocator;
Steve Blocka7e24c12009-10-30 11:49:00 +0000790};
791
792
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000793STATIC_ASSERT(sizeof(Page) <= MemoryChunk::kHeaderSize);
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100794
795
796class LargePage : public MemoryChunk {
797 public:
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000798 HeapObject* GetObject() { return HeapObject::FromAddress(area_start()); }
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100799
800 inline LargePage* next_page() const {
801 return static_cast<LargePage*>(next_chunk());
802 }
803
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000804 inline void set_next_page(LargePage* page) { set_next_chunk(page); }
805
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100806 private:
807 static inline LargePage* Initialize(Heap* heap, MemoryChunk* chunk);
808
809 friend class MemoryAllocator;
810};
811
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000812STATIC_ASSERT(sizeof(LargePage) <= MemoryChunk::kHeaderSize);
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100813
Steve Blocka7e24c12009-10-30 11:49:00 +0000814// ----------------------------------------------------------------------------
815// Space is the abstract superclass for all allocation spaces.
816class Space : public Malloced {
817 public:
Steve Block44f0eee2011-05-26 01:26:41 +0100818 Space(Heap* heap, AllocationSpace id, Executability executable)
819 : heap_(heap), id_(id), executable_(executable) {}
Steve Blocka7e24c12009-10-30 11:49:00 +0000820
821 virtual ~Space() {}
822
Steve Block44f0eee2011-05-26 01:26:41 +0100823 Heap* heap() const { return heap_; }
824
Steve Blocka7e24c12009-10-30 11:49:00 +0000825 // Does the space need executable memory?
826 Executability executable() { return executable_; }
827
828 // Identity used in error reporting.
829 AllocationSpace identity() { return id_; }
830
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -0800831 // Returns allocated size.
Ben Murdochf87a2032010-10-22 12:50:53 +0100832 virtual intptr_t Size() = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000833
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -0800834 // Returns size of objects. Can differ from the allocated size
835 // (e.g. see LargeObjectSpace).
836 virtual intptr_t SizeOfObjects() { return Size(); }
837
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100838 virtual int RoundSizeDownToObjectAlignment(int size) {
839 if (id_ == CODE_SPACE) {
840 return RoundDown(size, kCodeAlignment);
841 } else {
842 return RoundDown(size, kPointerSize);
843 }
844 }
845
Steve Blocka7e24c12009-10-30 11:49:00 +0000846#ifdef DEBUG
847 virtual void Print() = 0;
848#endif
849
850 private:
Steve Block44f0eee2011-05-26 01:26:41 +0100851 Heap* heap_;
Steve Blocka7e24c12009-10-30 11:49:00 +0000852 AllocationSpace id_;
853 Executability executable_;
854};
855
856
857// ----------------------------------------------------------------------------
858// All heap objects containing executable code (code objects) must be allocated
859// from a 2 GB range of memory, so that they can call each other using 32-bit
860// displacements. This happens automatically on 32-bit platforms, where 32-bit
861// displacements cover the entire 4GB virtual address space. On 64-bit
862// platforms, we support this using the CodeRange object, which reserves and
863// manages a range of virtual memory.
Steve Block44f0eee2011-05-26 01:26:41 +0100864class CodeRange {
Steve Blocka7e24c12009-10-30 11:49:00 +0000865 public:
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000866 explicit CodeRange(Isolate* isolate);
867 ~CodeRange() { TearDown(); }
868
Steve Blocka7e24c12009-10-30 11:49:00 +0000869 // Reserves a range of virtual memory, but does not commit any of it.
870 // Can only be called once, at heap initialization time.
871 // Returns false on failure.
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000872 bool SetUp(size_t requested_size);
Steve Blocka7e24c12009-10-30 11:49:00 +0000873
874 // Frees the range of virtual memory, and frees the data structures used to
875 // manage it.
Steve Block44f0eee2011-05-26 01:26:41 +0100876 void TearDown();
Steve Blocka7e24c12009-10-30 11:49:00 +0000877
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000878 bool valid() { return code_range_ != NULL; }
879 Address start() {
880 DCHECK(valid());
881 return static_cast<Address>(code_range_->address());
882 }
Steve Block44f0eee2011-05-26 01:26:41 +0100883 bool contains(Address address) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000884 if (!valid()) return false;
Steve Blocka7e24c12009-10-30 11:49:00 +0000885 Address start = static_cast<Address>(code_range_->address());
886 return start <= address && address < start + code_range_->size();
887 }
888
889 // Allocates a chunk of memory from the large-object portion of
890 // the code range. On platforms with no separate code range, should
891 // not be called.
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000892 MUST_USE_RESULT Address AllocateRawMemory(const size_t requested_size,
893 const size_t commit_size,
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100894 size_t* allocated);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000895 bool CommitRawMemory(Address start, size_t length);
896 bool UncommitRawMemory(Address start, size_t length);
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100897 void FreeRawMemory(Address buf, size_t length);
Steve Blocka7e24c12009-10-30 11:49:00 +0000898
899 private:
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000900 Isolate* isolate_;
Steve Block44f0eee2011-05-26 01:26:41 +0100901
Steve Blocka7e24c12009-10-30 11:49:00 +0000902 // The reserved range of virtual memory that all code objects are put in.
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000903 base::VirtualMemory* code_range_;
Steve Blocka7e24c12009-10-30 11:49:00 +0000904 // Plain old data class, just a struct plus a constructor.
905 class FreeBlock {
906 public:
907 FreeBlock(Address start_arg, size_t size_arg)
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100908 : start(start_arg), size(size_arg) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000909 DCHECK(IsAddressAligned(start, MemoryChunk::kAlignment));
910 DCHECK(size >= static_cast<size_t>(Page::kPageSize));
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100911 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000912 FreeBlock(void* start_arg, size_t size_arg)
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100913 : start(static_cast<Address>(start_arg)), size(size_arg) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000914 DCHECK(IsAddressAligned(start, MemoryChunk::kAlignment));
915 DCHECK(size >= static_cast<size_t>(Page::kPageSize));
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100916 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000917
918 Address start;
919 size_t size;
920 };
921
922 // Freed blocks of memory are added to the free list. When the allocation
923 // list is exhausted, the free list is sorted and merged to make the new
924 // allocation list.
Steve Block44f0eee2011-05-26 01:26:41 +0100925 List<FreeBlock> free_list_;
Steve Blocka7e24c12009-10-30 11:49:00 +0000926 // Memory is allocated from the free blocks on the allocation list.
927 // The block at current_allocation_block_index_ is the current block.
Steve Block44f0eee2011-05-26 01:26:41 +0100928 List<FreeBlock> allocation_list_;
929 int current_allocation_block_index_;
Steve Blocka7e24c12009-10-30 11:49:00 +0000930
931 // Finds a block on the allocation list that contains at least the
932 // requested amount of memory. If none is found, sorts and merges
933 // the existing free memory blocks, and searches again.
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000934 // If none can be found, returns false.
935 bool GetNextAllocationBlock(size_t requested);
Steve Blocka7e24c12009-10-30 11:49:00 +0000936 // Compares the start addresses of two free blocks.
937 static int CompareFreeBlockAddress(const FreeBlock* left,
938 const FreeBlock* right);
Steve Block44f0eee2011-05-26 01:26:41 +0100939
Steve Block44f0eee2011-05-26 01:26:41 +0100940 DISALLOW_COPY_AND_ASSIGN(CodeRange);
Steve Blocka7e24c12009-10-30 11:49:00 +0000941};
942
943
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100944class SkipList {
945 public:
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000946 SkipList() { Clear(); }
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100947
948 void Clear() {
949 for (int idx = 0; idx < kSize; idx++) {
950 starts_[idx] = reinterpret_cast<Address>(-1);
951 }
952 }
953
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000954 Address StartFor(Address addr) { return starts_[RegionNumber(addr)]; }
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100955
956 void AddObject(Address addr, int size) {
957 int start_region = RegionNumber(addr);
958 int end_region = RegionNumber(addr + size - kPointerSize);
959 for (int idx = start_region; idx <= end_region; idx++) {
960 if (starts_[idx] > addr) starts_[idx] = addr;
961 }
962 }
963
964 static inline int RegionNumber(Address addr) {
965 return (OffsetFrom(addr) & Page::kPageAlignmentMask) >> kRegionSizeLog2;
966 }
967
968 static void Update(Address addr, int size) {
969 Page* page = Page::FromAddress(addr);
970 SkipList* list = page->skip_list();
971 if (list == NULL) {
972 list = new SkipList();
973 page->set_skip_list(list);
974 }
975
976 list->AddObject(addr, size);
977 }
978
979 private:
980 static const int kRegionSizeLog2 = 13;
981 static const int kRegionSize = 1 << kRegionSizeLog2;
982 static const int kSize = Page::kPageSize / kRegionSize;
983
984 STATIC_ASSERT(Page::kPageSize % kRegionSize == 0);
985
986 Address starts_[kSize];
987};
988
989
Steve Blocka7e24c12009-10-30 11:49:00 +0000990// ----------------------------------------------------------------------------
991// A space acquires chunks of memory from the operating system. The memory
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100992// allocator allocated and deallocates pages for the paged heap spaces and large
993// pages for large object space.
Steve Blocka7e24c12009-10-30 11:49:00 +0000994//
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100995// Each space has to manage it's own pages.
Steve Blocka7e24c12009-10-30 11:49:00 +0000996//
Steve Block44f0eee2011-05-26 01:26:41 +0100997class MemoryAllocator {
Steve Blocka7e24c12009-10-30 11:49:00 +0000998 public:
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000999 explicit MemoryAllocator(Isolate* isolate);
1000
Steve Blocka7e24c12009-10-30 11:49:00 +00001001 // Initializes its internal bookkeeping structures.
Russell Brenner90bac252010-11-18 13:33:46 -08001002 // Max capacity of the total space and executable memory limit.
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001003 bool SetUp(intptr_t max_capacity, intptr_t capacity_executable);
Steve Blocka7e24c12009-10-30 11:49:00 +00001004
Steve Block44f0eee2011-05-26 01:26:41 +01001005 void TearDown();
Steve Blocka7e24c12009-10-30 11:49:00 +00001006
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001007 Page* AllocatePage(intptr_t size, PagedSpace* owner,
1008 Executability executable);
Steve Blocka7e24c12009-10-30 11:49:00 +00001009
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001010 LargePage* AllocateLargePage(intptr_t object_size, Space* owner,
1011 Executability executable);
Steve Blocka7e24c12009-10-30 11:49:00 +00001012
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001013 void Free(MemoryChunk* chunk);
Steve Blocka7e24c12009-10-30 11:49:00 +00001014
1015 // Returns the maximum available bytes of heaps.
Steve Block44f0eee2011-05-26 01:26:41 +01001016 intptr_t Available() { return capacity_ < size_ ? 0 : capacity_ - size_; }
Steve Blocka7e24c12009-10-30 11:49:00 +00001017
1018 // Returns allocated spaces in bytes.
Steve Block44f0eee2011-05-26 01:26:41 +01001019 intptr_t Size() { return size_; }
Steve Blocka7e24c12009-10-30 11:49:00 +00001020
Russell Brenner90bac252010-11-18 13:33:46 -08001021 // Returns the maximum available executable bytes of heaps.
Steve Block44f0eee2011-05-26 01:26:41 +01001022 intptr_t AvailableExecutable() {
Russell Brenner90bac252010-11-18 13:33:46 -08001023 if (capacity_executable_ < size_executable_) return 0;
1024 return capacity_executable_ - size_executable_;
1025 }
1026
Steve Block791712a2010-08-27 10:21:07 +01001027 // Returns allocated executable spaces in bytes.
Steve Block44f0eee2011-05-26 01:26:41 +01001028 intptr_t SizeExecutable() { return size_executable_; }
Steve Block791712a2010-08-27 10:21:07 +01001029
Steve Blocka7e24c12009-10-30 11:49:00 +00001030 // Returns maximum available bytes that the old space can have.
Steve Block44f0eee2011-05-26 01:26:41 +01001031 intptr_t MaxAvailable() {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001032 return (Available() / Page::kPageSize) * Page::kMaxRegularHeapObjectSize;
1033 }
1034
1035 // Returns an indication of whether a pointer is in a space that has
1036 // been allocated by this MemoryAllocator.
1037 V8_INLINE bool IsOutsideAllocatedSpace(const void* address) const {
1038 return address < lowest_ever_allocated_ ||
1039 address >= highest_ever_allocated_;
Steve Blocka7e24c12009-10-30 11:49:00 +00001040 }
1041
Steve Blocka7e24c12009-10-30 11:49:00 +00001042#ifdef DEBUG
1043 // Reports statistic info of the space.
Steve Block44f0eee2011-05-26 01:26:41 +01001044 void ReportStatistics();
Steve Blocka7e24c12009-10-30 11:49:00 +00001045#endif
1046
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001047 // Returns a MemoryChunk in which the memory region from commit_area_size to
1048 // reserve_area_size of the chunk area is reserved but not committed, it
1049 // could be committed later by calling MemoryChunk::CommitArea.
1050 MemoryChunk* AllocateChunk(intptr_t reserve_area_size,
1051 intptr_t commit_area_size,
1052 Executability executable, Space* space);
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001053
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001054 Address ReserveAlignedMemory(size_t requested, size_t alignment,
1055 base::VirtualMemory* controller);
1056 Address AllocateAlignedMemory(size_t reserve_size, size_t commit_size,
1057 size_t alignment, Executability executable,
1058 base::VirtualMemory* controller);
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001059
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001060 bool CommitMemory(Address addr, size_t size, Executability executable);
1061
1062 void FreeMemory(base::VirtualMemory* reservation, Executability executable);
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001063 void FreeMemory(Address addr, size_t size, Executability executable);
1064
1065 // Commit a contiguous block of memory from the initial chunk. Assumes that
1066 // the address is not NULL, the size is greater than zero, and that the
1067 // block is contained in the initial chunk. Returns true if it succeeded
1068 // and false otherwise.
1069 bool CommitBlock(Address start, size_t size, Executability executable);
1070
1071 // Uncommit a contiguous block of memory [start..(start+size)[.
1072 // start is not NULL, the size is greater than zero, and the
1073 // block is contained in the initial chunk. Returns true if it succeeded
1074 // and false otherwise.
1075 bool UncommitBlock(Address start, size_t size);
1076
1077 // Zaps a contiguous block of memory [start..(start+size)[ thus
1078 // filling it up with a recognizable non-NULL bit pattern.
1079 void ZapBlock(Address start, size_t size);
1080
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001081 void PerformAllocationCallback(ObjectSpace space, AllocationAction action,
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001082 size_t size);
1083
1084 void AddMemoryAllocationCallback(MemoryAllocationCallback callback,
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001085 ObjectSpace space, AllocationAction action);
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001086
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001087 void RemoveMemoryAllocationCallback(MemoryAllocationCallback callback);
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001088
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001089 bool MemoryAllocationCallbackRegistered(MemoryAllocationCallback callback);
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001090
1091 static int CodePageGuardStartOffset();
1092
1093 static int CodePageGuardSize();
1094
1095 static int CodePageAreaStartOffset();
1096
1097 static int CodePageAreaEndOffset();
1098
1099 static int CodePageAreaSize() {
1100 return CodePageAreaEndOffset() - CodePageAreaStartOffset();
1101 }
1102
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001103 MUST_USE_RESULT bool CommitExecutableMemory(base::VirtualMemory* vm,
1104 Address start, size_t commit_size,
1105 size_t reserved_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00001106
1107 private:
Ben Murdoch69a99ed2011-11-30 16:03:39 +00001108 Isolate* isolate_;
1109
Steve Blocka7e24c12009-10-30 11:49:00 +00001110 // Maximum space size in bytes.
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001111 size_t capacity_;
Russell Brenner90bac252010-11-18 13:33:46 -08001112 // Maximum subset of capacity_ that can be executable
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001113 size_t capacity_executable_;
Ben Murdochb0fe1622011-05-05 13:52:32 +01001114
Steve Blocka7e24c12009-10-30 11:49:00 +00001115 // Allocated space size in bytes.
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001116 size_t size_;
Steve Block791712a2010-08-27 10:21:07 +01001117 // Allocated executable space size in bytes.
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001118 size_t size_executable_;
Steve Blocka7e24c12009-10-30 11:49:00 +00001119
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001120 // We keep the lowest and highest addresses allocated as a quick way
1121 // of determining that pointers are outside the heap. The estimate is
1122 // conservative, i.e. not all addrsses in 'allocated' space are allocated
1123 // to our heap. The range is [lowest, highest[, inclusive on the low end
1124 // and exclusive on the high end.
1125 void* lowest_ever_allocated_;
1126 void* highest_ever_allocated_;
1127
Iain Merrick9ac36c92010-09-13 15:29:50 +01001128 struct MemoryAllocationCallbackRegistration {
1129 MemoryAllocationCallbackRegistration(MemoryAllocationCallback callback,
1130 ObjectSpace space,
1131 AllocationAction action)
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001132 : callback(callback), space(space), action(action) {}
Iain Merrick9ac36c92010-09-13 15:29:50 +01001133 MemoryAllocationCallback callback;
1134 ObjectSpace space;
1135 AllocationAction action;
1136 };
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001137
Iain Merrick9ac36c92010-09-13 15:29:50 +01001138 // A List of callback that are triggered when memory is allocated or free'd
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001139 List<MemoryAllocationCallbackRegistration> memory_allocation_callbacks_;
Iain Merrick9ac36c92010-09-13 15:29:50 +01001140
Steve Blocka7e24c12009-10-30 11:49:00 +00001141 // Initializes pages in a chunk. Returns the first page address.
1142 // This function and GetChunkId() are provided for the mark-compact
1143 // collector to rebuild page headers in the from space, which is
1144 // used as a marking stack and its page headers are destroyed.
Steve Block44f0eee2011-05-26 01:26:41 +01001145 Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk,
1146 PagedSpace* owner);
Steve Block6ded16b2010-05-10 14:33:55 +01001147
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001148 void UpdateAllocatedSpaceLimits(void* low, void* high) {
1149 lowest_ever_allocated_ = Min(lowest_ever_allocated_, low);
1150 highest_ever_allocated_ = Max(highest_ever_allocated_, high);
1151 }
1152
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001153 DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator);
Steve Blocka7e24c12009-10-30 11:49:00 +00001154};
1155
1156
1157// -----------------------------------------------------------------------------
1158// Interface for heap object iterator to be implemented by all object space
1159// object iterators.
1160//
Leon Clarked91b9f72010-01-27 17:25:45 +00001161// NOTE: The space specific object iterators also implements the own next()
1162// method which is used to avoid using virtual functions
Steve Blocka7e24c12009-10-30 11:49:00 +00001163// iterating a specific space.
1164
1165class ObjectIterator : public Malloced {
1166 public:
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001167 virtual ~ObjectIterator() {}
Steve Blocka7e24c12009-10-30 11:49:00 +00001168
Steve Blocka7e24c12009-10-30 11:49:00 +00001169 virtual HeapObject* next_object() = 0;
1170};
1171
1172
1173// -----------------------------------------------------------------------------
1174// Heap object iterator in new/old/map spaces.
1175//
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001176// A HeapObjectIterator iterates objects from the bottom of the given space
1177// to its top or from the bottom of the given page to its top.
Steve Blocka7e24c12009-10-30 11:49:00 +00001178//
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001179// If objects are allocated in the page during iteration the iterator may
1180// or may not iterate over those objects. The caller must create a new
1181// iterator in order to be sure to visit these new objects.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001182class HeapObjectIterator : public ObjectIterator {
Steve Blocka7e24c12009-10-30 11:49:00 +00001183 public:
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001184 // Creates a new object iterator in a given space.
Steve Blocka7e24c12009-10-30 11:49:00 +00001185 // If the size function is not given, the iterator calls the default
1186 // Object::Size().
1187 explicit HeapObjectIterator(PagedSpace* space);
1188 HeapObjectIterator(PagedSpace* space, HeapObjectCallback size_func);
Kristian Monsen80d68ea2010-09-08 11:05:35 +01001189 HeapObjectIterator(Page* page, HeapObjectCallback size_func);
Steve Blocka7e24c12009-10-30 11:49:00 +00001190
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001191 // Advance to the next object, skipping free spaces and other fillers and
1192 // skipping the special garbage section of which there is one per space.
1193 // Returns NULL when the iteration has ended.
1194 inline HeapObject* Next() {
1195 do {
1196 HeapObject* next_obj = FromCurrentPage();
1197 if (next_obj != NULL) return next_obj;
1198 } while (AdvanceToNextPage());
1199 return NULL;
Leon Clarked91b9f72010-01-27 17:25:45 +00001200 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001201
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001202 virtual HeapObject* next_object() { return Next(); }
Steve Blocka7e24c12009-10-30 11:49:00 +00001203
1204 private:
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001205 enum PageMode { kOnePageOnly, kAllPagesInSpace };
Steve Blocka7e24c12009-10-30 11:49:00 +00001206
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001207 Address cur_addr_; // Current iteration point.
1208 Address cur_end_; // End iteration point.
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001209 HeapObjectCallback size_func_; // Size function or NULL.
1210 PagedSpace* space_;
1211 PageMode page_mode_;
Leon Clarked91b9f72010-01-27 17:25:45 +00001212
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001213 // Fast (inlined) path of next().
1214 inline HeapObject* FromCurrentPage();
Leon Clarked91b9f72010-01-27 17:25:45 +00001215
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001216 // Slow path of next(), goes into the next page. Returns false if the
1217 // iteration has ended.
1218 bool AdvanceToNextPage();
Steve Blocka7e24c12009-10-30 11:49:00 +00001219
1220 // Initializes fields.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001221 inline void Initialize(PagedSpace* owner, Address start, Address end,
1222 PageMode mode, HeapObjectCallback size_func);
Steve Blocka7e24c12009-10-30 11:49:00 +00001223};
1224
1225
1226// -----------------------------------------------------------------------------
1227// A PageIterator iterates the pages in a paged space.
Steve Blocka7e24c12009-10-30 11:49:00 +00001228
1229class PageIterator BASE_EMBEDDED {
1230 public:
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001231 explicit inline PageIterator(PagedSpace* space);
Steve Blocka7e24c12009-10-30 11:49:00 +00001232
1233 inline bool has_next();
1234 inline Page* next();
1235
1236 private:
1237 PagedSpace* space_;
1238 Page* prev_page_; // Previous page returned.
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001239 // Next page that will be returned. Cached here so that we can use this
1240 // iterator for operations that deallocate pages.
1241 Page* next_page_;
Steve Blocka7e24c12009-10-30 11:49:00 +00001242};
1243
1244
1245// -----------------------------------------------------------------------------
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001246// A space has a circular list of pages. The next page can be accessed via
1247// Page::next_page() call.
Steve Blocka7e24c12009-10-30 11:49:00 +00001248
1249// An abstraction of allocation and relocation pointers in a page-structured
1250// space.
1251class AllocationInfo {
1252 public:
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001253 AllocationInfo() : top_(NULL), limit_(NULL) {}
1254
1255 INLINE(void set_top(Address top)) {
1256 SLOW_DCHECK(top == NULL ||
1257 (reinterpret_cast<intptr_t>(top) & HeapObjectTagMask()) == 0);
1258 top_ = top;
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001259 }
1260
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001261 INLINE(Address top()) const {
1262 SLOW_DCHECK(top_ == NULL ||
1263 (reinterpret_cast<intptr_t>(top_) & HeapObjectTagMask()) == 0);
1264 return top_;
1265 }
1266
1267 Address* top_address() { return &top_; }
1268
1269 INLINE(void set_limit(Address limit)) {
1270 SLOW_DCHECK(limit == NULL ||
1271 (reinterpret_cast<intptr_t>(limit) & HeapObjectTagMask()) == 0);
1272 limit_ = limit;
1273 }
1274
1275 INLINE(Address limit()) const {
1276 SLOW_DCHECK(limit_ == NULL ||
1277 (reinterpret_cast<intptr_t>(limit_) & HeapObjectTagMask()) ==
1278 0);
1279 return limit_;
1280 }
1281
1282 Address* limit_address() { return &limit_; }
Steve Blocka7e24c12009-10-30 11:49:00 +00001283
1284#ifdef DEBUG
1285 bool VerifyPagedAllocation() {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001286 return (Page::FromAllocationTop(top_) == Page::FromAllocationTop(limit_)) &&
1287 (top_ <= limit_);
Steve Blocka7e24c12009-10-30 11:49:00 +00001288 }
1289#endif
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001290
1291 private:
1292 // Current allocation top.
1293 Address top_;
1294 // Current allocation limit.
1295 Address limit_;
Steve Blocka7e24c12009-10-30 11:49:00 +00001296};
1297
1298
1299// An abstraction of the accounting statistics of a page-structured space.
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001300// The 'capacity' of a space is the number of object-area bytes (i.e., not
Steve Blocka7e24c12009-10-30 11:49:00 +00001301// including page bookkeeping structures) currently in the space. The 'size'
1302// of a space is the number of allocated bytes, the 'waste' in the space is
1303// the number of bytes that are not allocated and not available to
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001304// allocation without reorganizing the space via a GC (e.g. small blocks due
Steve Blocka7e24c12009-10-30 11:49:00 +00001305// to internal fragmentation, top of page areas in map space), and the bytes
1306// 'available' is the number of unallocated bytes that are not waste. The
1307// capacity is the sum of size, waste, and available.
1308//
1309// The stats are only set by functions that ensure they stay balanced. These
1310// functions increase or decrease one of the non-capacity stats in
1311// conjunction with capacity, or else they always balance increases and
1312// decreases to the non-capacity stats.
1313class AllocationStats BASE_EMBEDDED {
1314 public:
1315 AllocationStats() { Clear(); }
1316
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001317 // Zero out all the allocation statistics (i.e., no capacity).
Steve Blocka7e24c12009-10-30 11:49:00 +00001318 void Clear() {
1319 capacity_ = 0;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001320 max_capacity_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +00001321 size_ = 0;
1322 waste_ = 0;
1323 }
1324
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001325 void ClearSizeWaste() {
1326 size_ = capacity_;
1327 waste_ = 0;
1328 }
1329
1330 // Reset the allocation statistics (i.e., available = capacity with no
Steve Blocka7e24c12009-10-30 11:49:00 +00001331 // wasted or allocated bytes).
1332 void Reset() {
Steve Blocka7e24c12009-10-30 11:49:00 +00001333 size_ = 0;
1334 waste_ = 0;
1335 }
1336
1337 // Accessors for the allocation statistics.
Ben Murdochf87a2032010-10-22 12:50:53 +01001338 intptr_t Capacity() { return capacity_; }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001339 intptr_t MaxCapacity() { return max_capacity_; }
Ben Murdochf87a2032010-10-22 12:50:53 +01001340 intptr_t Size() { return size_; }
1341 intptr_t Waste() { return waste_; }
Steve Blocka7e24c12009-10-30 11:49:00 +00001342
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001343 // Grow the space by adding available bytes. They are initially marked as
1344 // being in use (part of the size), but will normally be immediately freed,
1345 // putting them on the free list and removing them from size_.
Steve Blocka7e24c12009-10-30 11:49:00 +00001346 void ExpandSpace(int size_in_bytes) {
1347 capacity_ += size_in_bytes;
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001348 size_ += size_in_bytes;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001349 if (capacity_ > max_capacity_) {
1350 max_capacity_ = capacity_;
1351 }
1352 DCHECK(size_ >= 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00001353 }
1354
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001355 // Shrink the space by removing available bytes. Since shrinking is done
1356 // during sweeping, bytes have been marked as being in use (part of the size)
1357 // and are hereby freed.
Steve Blocka7e24c12009-10-30 11:49:00 +00001358 void ShrinkSpace(int size_in_bytes) {
1359 capacity_ -= size_in_bytes;
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001360 size_ -= size_in_bytes;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001361 DCHECK(size_ >= 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00001362 }
1363
1364 // Allocate from available bytes (available -> size).
Ben Murdochf87a2032010-10-22 12:50:53 +01001365 void AllocateBytes(intptr_t size_in_bytes) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001366 size_ += size_in_bytes;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001367 DCHECK(size_ >= 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00001368 }
1369
1370 // Free allocated bytes, making them available (size -> available).
Ben Murdochf87a2032010-10-22 12:50:53 +01001371 void DeallocateBytes(intptr_t size_in_bytes) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001372 size_ -= size_in_bytes;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001373 DCHECK(size_ >= 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00001374 }
1375
1376 // Waste free bytes (available -> waste).
1377 void WasteBytes(int size_in_bytes) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001378 DCHECK(size_in_bytes >= 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00001379 waste_ += size_in_bytes;
Steve Blocka7e24c12009-10-30 11:49:00 +00001380 }
1381
1382 private:
Ben Murdochf87a2032010-10-22 12:50:53 +01001383 intptr_t capacity_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001384 intptr_t max_capacity_;
Ben Murdochf87a2032010-10-22 12:50:53 +01001385 intptr_t size_;
1386 intptr_t waste_;
Steve Blocka7e24c12009-10-30 11:49:00 +00001387};
1388
1389
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001390// -----------------------------------------------------------------------------
1391// Free lists for old object spaces
1392//
1393// Free-list nodes are free blocks in the heap. They look like heap objects
1394// (free-list node pointers have the heap object tag, and they have a map like
1395// a heap object). They have a size and a next pointer. The next pointer is
1396// the raw address of the next free list node (or NULL).
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001397class FreeListNode : public HeapObject {
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001398 public:
1399 // Obtain a free-list node from a raw address. This is not a cast because
1400 // it does not check nor require that the first word at the address is a map
1401 // pointer.
1402 static FreeListNode* FromAddress(Address address) {
1403 return reinterpret_cast<FreeListNode*>(HeapObject::FromAddress(address));
1404 }
1405
1406 static inline bool IsFreeListNode(HeapObject* object);
1407
1408 // Set the size in bytes, which can be read with HeapObject::Size(). This
1409 // function also writes a map to the first word of the block so that it
1410 // looks like a heap object to the garbage collector and heap iteration
1411 // functions.
1412 void set_size(Heap* heap, int size_in_bytes);
1413
1414 // Accessors for the next field.
1415 inline FreeListNode* next();
1416 inline FreeListNode** next_address();
1417 inline void set_next(FreeListNode* next);
1418
1419 inline void Zap();
1420
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001421 static inline FreeListNode* cast(Object* object) {
1422 return reinterpret_cast<FreeListNode*>(object);
1423 }
1424
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001425 private:
1426 static const int kNextOffset = POINTER_SIZE_ALIGN(FreeSpace::kHeaderSize);
1427
1428 DISALLOW_IMPLICIT_CONSTRUCTORS(FreeListNode);
1429};
1430
1431
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001432// The free list category holds a pointer to the top element and a pointer to
1433// the end element of the linked list of free memory blocks.
1434class FreeListCategory {
1435 public:
1436 FreeListCategory() : top_(0), end_(NULL), available_(0) {}
1437
1438 intptr_t Concatenate(FreeListCategory* category);
1439
1440 void Reset();
1441
1442 void Free(FreeListNode* node, int size_in_bytes);
1443
1444 FreeListNode* PickNodeFromList(int* node_size);
1445 FreeListNode* PickNodeFromList(int size_in_bytes, int* node_size);
1446
1447 intptr_t EvictFreeListItemsInList(Page* p);
1448 bool ContainsPageFreeListItemsInList(Page* p);
1449
1450 void RepairFreeList(Heap* heap);
1451
1452 FreeListNode* top() const {
1453 return reinterpret_cast<FreeListNode*>(base::NoBarrier_Load(&top_));
1454 }
1455
1456 void set_top(FreeListNode* top) {
1457 base::NoBarrier_Store(&top_, reinterpret_cast<base::AtomicWord>(top));
1458 }
1459
1460 FreeListNode** GetEndAddress() { return &end_; }
1461 FreeListNode* end() const { return end_; }
1462 void set_end(FreeListNode* end) { end_ = end; }
1463
1464 int* GetAvailableAddress() { return &available_; }
1465 int available() const { return available_; }
1466 void set_available(int available) { available_ = available; }
1467
1468 base::Mutex* mutex() { return &mutex_; }
1469
1470 bool IsEmpty() { return top() == 0; }
1471
1472#ifdef DEBUG
1473 intptr_t SumFreeList();
1474 int FreeListLength();
1475#endif
1476
1477 private:
1478 // top_ points to the top FreeListNode* in the free list category.
1479 base::AtomicWord top_;
1480 FreeListNode* end_;
1481 base::Mutex mutex_;
1482
1483 // Total available bytes in all blocks of this free list category.
1484 int available_;
1485};
1486
1487
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001488// The free list for the old space. The free list is organized in such a way
1489// as to encourage objects allocated around the same time to be near each
1490// other. The normal way to allocate is intended to be by bumping a 'top'
1491// pointer until it hits a 'limit' pointer. When the limit is hit we need to
1492// find a new space to allocate from. This is done with the free list, which
1493// is divided up into rough categories to cut down on waste. Having finer
1494// categories would scatter allocation more.
1495
1496// The old space free list is organized in categories.
1497// 1-31 words: Such small free areas are discarded for efficiency reasons.
1498// They can be reclaimed by the compactor. However the distance between top
1499// and limit may be this small.
1500// 32-255 words: There is a list of spaces this large. It is used for top and
1501// limit when the object we need to allocate is 1-31 words in size. These
1502// spaces are called small.
1503// 256-2047 words: There is a list of spaces this large. It is used for top and
1504// limit when the object we need to allocate is 32-255 words in size. These
1505// spaces are called medium.
1506// 1048-16383 words: There is a list of spaces this large. It is used for top
1507// and limit when the object we need to allocate is 256-2047 words in size.
1508// These spaces are call large.
1509// At least 16384 words. This list is for objects of 2048 words or larger.
1510// Empty pages are added to this list. These spaces are called huge.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001511class FreeList {
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001512 public:
1513 explicit FreeList(PagedSpace* owner);
1514
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001515 intptr_t Concatenate(FreeList* free_list);
1516
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001517 // Clear the free list.
1518 void Reset();
1519
1520 // Return the number of bytes available on the free list.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001521 intptr_t available() {
1522 return small_list_.available() + medium_list_.available() +
1523 large_list_.available() + huge_list_.available();
1524 }
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001525
1526 // Place a node on the free list. The block of size 'size_in_bytes'
1527 // starting at 'start' is placed on the free list. The return value is the
1528 // number of bytes that have been lost due to internal fragmentation by
1529 // freeing the block. Bookkeeping information will be written to the block,
1530 // i.e., its contents will be destroyed. The start address should be word
1531 // aligned, and the size should be a non-zero multiple of the word size.
1532 int Free(Address start, int size_in_bytes);
1533
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001534 // This method returns how much memory can be allocated after freeing
1535 // maximum_freed memory.
1536 static inline int GuaranteedAllocatable(int maximum_freed) {
1537 if (maximum_freed < kSmallListMin) {
1538 return 0;
1539 } else if (maximum_freed <= kSmallListMax) {
1540 return kSmallAllocationMax;
1541 } else if (maximum_freed <= kMediumListMax) {
1542 return kMediumAllocationMax;
1543 } else if (maximum_freed <= kLargeListMax) {
1544 return kLargeAllocationMax;
1545 }
1546 return maximum_freed;
1547 }
1548
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001549 // Allocate a block of size 'size_in_bytes' from the free list. The block
1550 // is unitialized. A failure is returned if no block is available. The
1551 // number of bytes lost to fragmentation is returned in the output parameter
1552 // 'wasted_bytes'. The size should be a non-zero multiple of the word size.
1553 MUST_USE_RESULT HeapObject* Allocate(int size_in_bytes);
1554
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001555 bool IsEmpty() {
1556 return small_list_.IsEmpty() && medium_list_.IsEmpty() &&
1557 large_list_.IsEmpty() && huge_list_.IsEmpty();
1558 }
1559
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001560#ifdef DEBUG
1561 void Zap();
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001562 intptr_t SumFreeLists();
1563 bool IsVeryLong();
1564#endif
1565
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001566 // Used after booting the VM.
1567 void RepairLists(Heap* heap);
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001568
1569 intptr_t EvictFreeListItems(Page* p);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001570 bool ContainsPageFreeListItems(Page* p);
1571
1572 FreeListCategory* small_list() { return &small_list_; }
1573 FreeListCategory* medium_list() { return &medium_list_; }
1574 FreeListCategory* large_list() { return &large_list_; }
1575 FreeListCategory* huge_list() { return &huge_list_; }
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001576
1577 private:
1578 // The size range of blocks, in bytes.
1579 static const int kMinBlockSize = 3 * kPointerSize;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001580 static const int kMaxBlockSize = Page::kMaxRegularHeapObjectSize;
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001581
1582 FreeListNode* FindNodeFor(int size_in_bytes, int* node_size);
1583
1584 PagedSpace* owner_;
1585 Heap* heap_;
1586
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001587 static const int kSmallListMin = 0x20 * kPointerSize;
1588 static const int kSmallListMax = 0xff * kPointerSize;
1589 static const int kMediumListMax = 0x7ff * kPointerSize;
1590 static const int kLargeListMax = 0x3fff * kPointerSize;
1591 static const int kSmallAllocationMax = kSmallListMin - kPointerSize;
1592 static const int kMediumAllocationMax = kSmallListMax;
1593 static const int kLargeAllocationMax = kMediumListMax;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001594 FreeListCategory small_list_;
1595 FreeListCategory medium_list_;
1596 FreeListCategory large_list_;
1597 FreeListCategory huge_list_;
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001598
1599 DISALLOW_IMPLICIT_CONSTRUCTORS(FreeList);
1600};
1601
1602
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001603class AllocationResult {
1604 public:
1605 // Implicit constructor from Object*.
1606 AllocationResult(Object* object) // NOLINT
1607 : object_(object),
1608 retry_space_(INVALID_SPACE) {}
1609
1610 AllocationResult() : object_(NULL), retry_space_(INVALID_SPACE) {}
1611
1612 static inline AllocationResult Retry(AllocationSpace space = NEW_SPACE) {
1613 return AllocationResult(space);
1614 }
1615
1616 inline bool IsRetry() { return retry_space_ != INVALID_SPACE; }
1617
1618 template <typename T>
1619 bool To(T** obj) {
1620 if (IsRetry()) return false;
1621 *obj = T::cast(object_);
1622 return true;
1623 }
1624
1625 Object* ToObjectChecked() {
1626 CHECK(!IsRetry());
1627 return object_;
1628 }
1629
1630 AllocationSpace RetrySpace() {
1631 DCHECK(IsRetry());
1632 return retry_space_;
1633 }
1634
1635 private:
1636 explicit AllocationResult(AllocationSpace space)
1637 : object_(NULL), retry_space_(space) {}
1638
1639 Object* object_;
1640 AllocationSpace retry_space_;
1641};
1642
1643
Steve Blocka7e24c12009-10-30 11:49:00 +00001644class PagedSpace : public Space {
1645 public:
1646 // Creates a space with a maximum capacity, and an id.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001647 PagedSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id,
Ben Murdochf87a2032010-10-22 12:50:53 +01001648 Executability executable);
Steve Blocka7e24c12009-10-30 11:49:00 +00001649
1650 virtual ~PagedSpace() {}
1651
1652 // Set up the space using the given address range of virtual memory (from
1653 // the memory allocator's initial chunk) if possible. If the block of
1654 // addresses is not big enough to contain a single page-aligned page, a
1655 // fresh chunk will be allocated.
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001656 bool SetUp();
Steve Blocka7e24c12009-10-30 11:49:00 +00001657
1658 // Returns true if the space has been successfully set up and not
1659 // subsequently torn down.
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001660 bool HasBeenSetUp();
Steve Blocka7e24c12009-10-30 11:49:00 +00001661
1662 // Cleans up the space, frees all pages in this space except those belonging
1663 // to the initial chunk, uncommits addresses in the initial chunk.
1664 void TearDown();
1665
1666 // Checks whether an object/address is in this space.
1667 inline bool Contains(Address a);
1668 bool Contains(HeapObject* o) { return Contains(o->address()); }
1669
1670 // Given an address occupied by a live object, return that object if it is
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001671 // in this space, or a Smi if it is not. The implementation iterates over
1672 // objects in the page containing the address, the cost is linear in the
1673 // number of objects in the page. It may be slow.
1674 Object* FindObject(Address addr);
1675
1676 // During boot the free_space_map is created, and afterwards we may need
1677 // to write it into the free list nodes that were already created.
1678 void RepairFreeListsAfterBoot();
Steve Blocka7e24c12009-10-30 11:49:00 +00001679
Ben Murdoch85b71792012-04-11 18:30:58 +01001680 // Prepares for a mark-compact GC.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001681 void PrepareForMarkCompact();
Ben Murdoch85b71792012-04-11 18:30:58 +01001682
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001683 // Current capacity without growing (Size() + Available()).
Ben Murdochf87a2032010-10-22 12:50:53 +01001684 intptr_t Capacity() { return accounting_stats_.Capacity(); }
Steve Blocka7e24c12009-10-30 11:49:00 +00001685
Steve Block3ce2e202009-11-05 08:53:23 +00001686 // Total amount of memory committed for this space. For paged
1687 // spaces this equals the capacity.
Ben Murdochf87a2032010-10-22 12:50:53 +01001688 intptr_t CommittedMemory() { return Capacity(); }
Steve Block3ce2e202009-11-05 08:53:23 +00001689
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001690 // The maximum amount of memory ever committed for this space.
1691 intptr_t MaximumCommittedMemory() { return accounting_stats_.MaxCapacity(); }
1692
1693 // Approximate amount of physical memory committed for this space.
1694 size_t CommittedPhysicalMemory();
1695
1696 struct SizeStats {
1697 intptr_t Total() {
1698 return small_size_ + medium_size_ + large_size_ + huge_size_;
1699 }
1700
1701 intptr_t small_size_;
1702 intptr_t medium_size_;
1703 intptr_t large_size_;
1704 intptr_t huge_size_;
1705 };
1706
1707 void ObtainFreeListStatistics(Page* p, SizeStats* sizes);
1708 void ResetFreeListStatistics();
1709
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001710 // Sets the capacity, the available space and the wasted space to zero.
1711 // The stats are rebuilt during sweeping by adding each page to the
1712 // capacity and the size when it is encountered. As free spaces are
1713 // discovered during the sweeping they are subtracted from the size and added
1714 // to the available and wasted totals.
1715 void ClearStats() {
1716 accounting_stats_.ClearSizeWaste();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001717 ResetFreeListStatistics();
1718 }
1719
1720 // Increases the number of available bytes of that space.
1721 void AddToAccountingStats(intptr_t bytes) {
1722 accounting_stats_.DeallocateBytes(bytes);
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001723 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001724
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001725 // Available bytes without growing. These are the bytes on the free list.
1726 // The bytes in the linear allocation area are not included in this total
1727 // because updating the stats would slow down allocation. New pages are
1728 // immediately added to the free list so they show up here.
1729 intptr_t Available() { return free_list_.available(); }
1730
1731 // Allocated bytes in this space. Garbage bytes that were not found due to
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001732 // concurrent sweeping are counted as being allocated! The bytes in the
1733 // current linear allocation area (between top and limit) are also counted
1734 // here.
Ben Murdochf87a2032010-10-22 12:50:53 +01001735 virtual intptr_t Size() { return accounting_stats_.Size(); }
Steve Blocka7e24c12009-10-30 11:49:00 +00001736
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001737 // As size, but the bytes in lazily swept pages are estimated and the bytes
1738 // in the current linear allocation area are not included.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001739 virtual intptr_t SizeOfObjects();
Steve Blocka7e24c12009-10-30 11:49:00 +00001740
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001741 // Wasted bytes in this space. These are just the bytes that were thrown away
1742 // due to being too small to use for allocation. They do not include the
1743 // free bytes that were not found at all due to lazy sweeping.
1744 virtual intptr_t Waste() { return accounting_stats_.Waste(); }
Steve Blocka7e24c12009-10-30 11:49:00 +00001745
1746 // Returns the allocation pointer in this space.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001747 Address top() { return allocation_info_.top(); }
1748 Address limit() { return allocation_info_.limit(); }
1749
1750 // The allocation top address.
1751 Address* allocation_top_address() { return allocation_info_.top_address(); }
1752
1753 // The allocation limit address.
1754 Address* allocation_limit_address() {
1755 return allocation_info_.limit_address();
1756 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001757
1758 // Allocate the requested number of bytes in the space if possible, return a
1759 // failure object if not.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001760 MUST_USE_RESULT inline AllocationResult AllocateRaw(int size_in_bytes);
Leon Clarkee46be812010-01-19 14:06:41 +00001761
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001762 // Give a block of memory to the space's free list. It might be added to
1763 // the free list or accounted as waste.
1764 // If add_to_freelist is false then just accounting stats are updated and
1765 // no attempt to add area to free list is made.
1766 int Free(Address start, int size_in_bytes) {
1767 int wasted = free_list_.Free(start, size_in_bytes);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001768 accounting_stats_.DeallocateBytes(size_in_bytes);
1769 accounting_stats_.WasteBytes(wasted);
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001770 return size_in_bytes - wasted;
1771 }
Kristian Monsen80d68ea2010-09-08 11:05:35 +01001772
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001773 void ResetFreeList() { free_list_.Reset(); }
1774
Steve Block6ded16b2010-05-10 14:33:55 +01001775 // Set space allocation info.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001776 void SetTopAndLimit(Address top, Address limit) {
1777 DCHECK(top == limit ||
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001778 Page::FromAddress(top) == Page::FromAddress(limit - 1));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001779 MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
1780 allocation_info_.set_top(top);
1781 allocation_info_.set_limit(limit);
Steve Block6ded16b2010-05-10 14:33:55 +01001782 }
1783
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001784 // Empty space allocation info, returning unused area to free list.
1785 void EmptyAllocationInfo() {
1786 // Mark the old linear allocation area with a free space map so it can be
1787 // skipped when scanning the heap.
1788 int old_linear_size = static_cast<int>(limit() - top());
1789 Free(top(), old_linear_size);
1790 SetTopAndLimit(NULL, NULL);
Steve Blocka7e24c12009-10-30 11:49:00 +00001791 }
1792
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001793 void Allocate(int bytes) { accounting_stats_.AllocateBytes(bytes); }
1794
1795 void IncreaseCapacity(int size);
Steve Blocka7e24c12009-10-30 11:49:00 +00001796
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001797 // Releases an unused page and shrinks the space.
1798 void ReleasePage(Page* page);
Steve Blocka7e24c12009-10-30 11:49:00 +00001799
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001800 // The dummy page that anchors the linked list of pages.
1801 Page* anchor() { return &anchor_; }
Steve Blocka7e24c12009-10-30 11:49:00 +00001802
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001803#ifdef VERIFY_HEAP
1804 // Verify integrity of this space.
1805 virtual void Verify(ObjectVisitor* visitor);
1806
1807 // Overridden by subclasses to verify space-specific object
1808 // properties (e.g., only maps or free-list nodes are in map space).
1809 virtual void VerifyObject(HeapObject* obj) {}
1810#endif
1811
Steve Blocka7e24c12009-10-30 11:49:00 +00001812#ifdef DEBUG
1813 // Print meta info and objects in this space.
1814 virtual void Print();
1815
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001816 // Reports statistics for the space
1817 void ReportStatistics();
1818
Steve Blocka7e24c12009-10-30 11:49:00 +00001819 // Report code object related statistics
1820 void CollectCodeStatistics();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001821 static void ReportCodeStatistics(Isolate* isolate);
1822 static void ResetCodeStatistics(Isolate* isolate);
Steve Blocka7e24c12009-10-30 11:49:00 +00001823#endif
1824
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001825 // Evacuation candidates are swept by evacuator. Needs to return a valid
1826 // result before _and_ after evacuation has finished.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001827 static bool ShouldBeSweptBySweeperThreads(Page* p) {
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001828 return !p->IsEvacuationCandidate() &&
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001829 !p->IsFlagSet(Page::RESCAN_ON_EVACUATION) && !p->WasSwept();
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001830 }
1831
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001832 void IncrementUnsweptFreeBytes(intptr_t by) { unswept_free_bytes_ += by; }
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001833
1834 void IncreaseUnsweptFreeBytes(Page* p) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001835 DCHECK(ShouldBeSweptBySweeperThreads(p));
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001836 unswept_free_bytes_ += (p->area_size() - p->LiveBytes());
1837 }
1838
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001839 void DecrementUnsweptFreeBytes(intptr_t by) { unswept_free_bytes_ -= by; }
1840
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001841 void DecreaseUnsweptFreeBytes(Page* p) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001842 DCHECK(ShouldBeSweptBySweeperThreads(p));
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001843 unswept_free_bytes_ -= (p->area_size() - p->LiveBytes());
1844 }
1845
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001846 void ResetUnsweptFreeBytes() { unswept_free_bytes_ = 0; }
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001847
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001848 // This function tries to steal size_in_bytes memory from the sweeper threads
1849 // free-lists. If it does not succeed stealing enough memory, it will wait
1850 // for the sweeper threads to finish sweeping.
1851 // It returns true when sweeping is completed and false otherwise.
1852 bool EnsureSweeperProgress(intptr_t size_in_bytes);
1853
1854 void set_end_of_unswept_pages(Page* page) { end_of_unswept_pages_ = page; }
1855
1856 Page* end_of_unswept_pages() { return end_of_unswept_pages_; }
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001857
1858 Page* FirstPage() { return anchor_.next_page(); }
1859 Page* LastPage() { return anchor_.prev_page(); }
1860
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001861 void EvictEvacuationCandidatesFromFreeLists();
1862
1863 bool CanExpand();
1864
1865 // Returns the number of total pages in this space.
1866 int CountTotalPages();
1867
1868 // Return size of allocatable area on a page in this space.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001869 inline int AreaSize() { return area_size_; }
1870
1871 void CreateEmergencyMemory();
1872 void FreeEmergencyMemory();
1873 void UseEmergencyMemory();
1874
1875 bool HasEmergencyMemory() { return emergency_memory_ != NULL; }
Kristian Monsen80d68ea2010-09-08 11:05:35 +01001876
Steve Blocka7e24c12009-10-30 11:49:00 +00001877 protected:
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001878 FreeList* free_list() { return &free_list_; }
1879
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001880 int area_size_;
1881
Steve Blocka7e24c12009-10-30 11:49:00 +00001882 // Maximum capacity of this space.
Ben Murdochf87a2032010-10-22 12:50:53 +01001883 intptr_t max_capacity_;
Steve Blocka7e24c12009-10-30 11:49:00 +00001884
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001885 intptr_t SizeOfFirstPage();
1886
Steve Blocka7e24c12009-10-30 11:49:00 +00001887 // Accounting information for this space.
1888 AllocationStats accounting_stats_;
1889
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001890 // The dummy page that anchors the double linked list of pages.
1891 Page anchor_;
Steve Blocka7e24c12009-10-30 11:49:00 +00001892
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001893 // The space's free list.
1894 FreeList free_list_;
Steve Block6ded16b2010-05-10 14:33:55 +01001895
Steve Blocka7e24c12009-10-30 11:49:00 +00001896 // Normal allocation information.
1897 AllocationInfo allocation_info_;
1898
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001899 // The number of free bytes which could be reclaimed by advancing the
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001900 // concurrent sweeper threads.
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001901 intptr_t unswept_free_bytes_;
Ben Murdochc7cc0282012-03-05 14:35:55 +00001902
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001903 // The sweeper threads iterate over the list of pointer and data space pages
1904 // and sweep these pages concurrently. They will stop sweeping after the
1905 // end_of_unswept_pages_ page.
1906 Page* end_of_unswept_pages_;
1907
1908 // Emergency memory is the memory of a full page for a given space, allocated
1909 // conservatively before evacuating a page. If compaction fails due to out
1910 // of memory error the emergency memory can be used to complete compaction.
1911 // If not used, the emergency memory is released after compaction.
1912 MemoryChunk* emergency_memory_;
1913
Steve Blocka7e24c12009-10-30 11:49:00 +00001914 // Expands the space by allocating a fixed number of pages. Returns false if
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001915 // it cannot allocate requested number of pages from OS, or if the hard heap
1916 // size limit has been hit.
1917 bool Expand();
Steve Blocka7e24c12009-10-30 11:49:00 +00001918
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001919 // Generic fast case allocation function that tries linear allocation at the
1920 // address denoted by top in allocation_info_.
1921 inline HeapObject* AllocateLinearly(int size_in_bytes);
Steve Blocka7e24c12009-10-30 11:49:00 +00001922
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001923 // If sweeping is still in progress try to sweep unswept pages. If that is
1924 // not successful, wait for the sweeper threads and re-try free-list
1925 // allocation.
1926 MUST_USE_RESULT HeapObject* WaitForSweeperThreadsAndRetryAllocation(
1927 int size_in_bytes);
1928
Steve Blocka7e24c12009-10-30 11:49:00 +00001929 // Slow path of AllocateRaw. This function is space-dependent.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001930 MUST_USE_RESULT HeapObject* SlowAllocateRaw(int size_in_bytes);
Steve Blocka7e24c12009-10-30 11:49:00 +00001931
Steve Blocka7e24c12009-10-30 11:49:00 +00001932 friend class PageIterator;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001933 friend class MarkCompactCollector;
Steve Blocka7e24c12009-10-30 11:49:00 +00001934};
1935
1936
Steve Blocka7e24c12009-10-30 11:49:00 +00001937class NumberAndSizeInfo BASE_EMBEDDED {
1938 public:
1939 NumberAndSizeInfo() : number_(0), bytes_(0) {}
1940
1941 int number() const { return number_; }
1942 void increment_number(int num) { number_ += num; }
1943
1944 int bytes() const { return bytes_; }
1945 void increment_bytes(int size) { bytes_ += size; }
1946
1947 void clear() {
1948 number_ = 0;
1949 bytes_ = 0;
1950 }
1951
1952 private:
1953 int number_;
1954 int bytes_;
1955};
1956
1957
1958// HistogramInfo class for recording a single "bar" of a histogram. This
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001959// class is used for collecting statistics to print to the log file.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001960class HistogramInfo : public NumberAndSizeInfo {
Steve Blocka7e24c12009-10-30 11:49:00 +00001961 public:
1962 HistogramInfo() : NumberAndSizeInfo() {}
1963
1964 const char* name() { return name_; }
1965 void set_name(const char* name) { name_ = name; }
1966
1967 private:
1968 const char* name_;
1969};
Steve Blocka7e24c12009-10-30 11:49:00 +00001970
1971
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001972enum SemiSpaceId { kFromSpace = 0, kToSpace = 1 };
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001973
1974
1975class SemiSpace;
1976
1977
1978class NewSpacePage : public MemoryChunk {
1979 public:
1980 // GC related flags copied from from-space to to-space when
1981 // flipping semispaces.
1982 static const intptr_t kCopyOnFlipFlagsMask =
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001983 (1 << MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) |
1984 (1 << MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING) |
1985 (1 << MemoryChunk::SCAN_ON_SCAVENGE);
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001986
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001987 static const int kAreaSize = Page::kMaxRegularHeapObjectSize;
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001988
1989 inline NewSpacePage* next_page() const {
1990 return static_cast<NewSpacePage*>(next_chunk());
1991 }
1992
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001993 inline void set_next_page(NewSpacePage* page) { set_next_chunk(page); }
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001994
1995 inline NewSpacePage* prev_page() const {
1996 return static_cast<NewSpacePage*>(prev_chunk());
1997 }
1998
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001999 inline void set_prev_page(NewSpacePage* page) { set_prev_chunk(page); }
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002000
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002001 SemiSpace* semi_space() { return reinterpret_cast<SemiSpace*>(owner()); }
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002002
2003 bool is_anchor() { return !this->InNewSpace(); }
2004
2005 static bool IsAtStart(Address addr) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002006 return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask) ==
2007 kObjectStartOffset;
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002008 }
2009
2010 static bool IsAtEnd(Address addr) {
2011 return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask) == 0;
2012 }
2013
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002014 Address address() { return reinterpret_cast<Address>(this); }
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002015
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002016 // Finds the NewSpacePage containing the given address.
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002017 static inline NewSpacePage* FromAddress(Address address_in_page) {
2018 Address page_start =
2019 reinterpret_cast<Address>(reinterpret_cast<uintptr_t>(address_in_page) &
2020 ~Page::kPageAlignmentMask);
2021 NewSpacePage* page = reinterpret_cast<NewSpacePage*>(page_start);
2022 return page;
2023 }
2024
2025 // Find the page for a limit address. A limit address is either an address
2026 // inside a page, or the address right after the last byte of a page.
2027 static inline NewSpacePage* FromLimit(Address address_limit) {
2028 return NewSpacePage::FromAddress(address_limit - 1);
2029 }
2030
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002031 // Checks if address1 and address2 are on the same new space page.
2032 static inline bool OnSamePage(Address address1, Address address2) {
2033 return NewSpacePage::FromAddress(address1) ==
2034 NewSpacePage::FromAddress(address2);
2035 }
2036
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002037 private:
2038 // Create a NewSpacePage object that is only used as anchor
2039 // for the doubly-linked list of real pages.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002040 explicit NewSpacePage(SemiSpace* owner) { InitializeAsAnchor(owner); }
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002041
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002042 static NewSpacePage* Initialize(Heap* heap, Address start,
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002043 SemiSpace* semi_space);
2044
2045 // Intialize a fake NewSpacePage used as sentinel at the ends
2046 // of a doubly-linked list of real NewSpacePages.
2047 // Only uses the prev/next links, and sets flags to not be in new-space.
2048 void InitializeAsAnchor(SemiSpace* owner);
2049
2050 friend class SemiSpace;
2051 friend class SemiSpaceIterator;
2052};
2053
2054
Steve Blocka7e24c12009-10-30 11:49:00 +00002055// -----------------------------------------------------------------------------
2056// SemiSpace in young generation
2057//
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002058// A semispace is a contiguous chunk of memory holding page-like memory
2059// chunks. The mark-compact collector uses the memory of the first page in
2060// the from space as a marking stack when tracing live objects.
Steve Blocka7e24c12009-10-30 11:49:00 +00002061
2062class SemiSpace : public Space {
2063 public:
2064 // Constructor.
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002065 SemiSpace(Heap* heap, SemiSpaceId semispace)
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002066 : Space(heap, NEW_SPACE, NOT_EXECUTABLE),
2067 start_(NULL),
2068 age_mark_(NULL),
2069 id_(semispace),
2070 anchor_(this),
2071 current_page_(NULL) {}
Steve Blocka7e24c12009-10-30 11:49:00 +00002072
2073 // Sets up the semispace using the given chunk.
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002074 void SetUp(Address start, int initial_capacity, int maximum_capacity);
Steve Blocka7e24c12009-10-30 11:49:00 +00002075
2076 // Tear down the space. Heap memory was not allocated by the space, so it
2077 // is not deallocated here.
2078 void TearDown();
2079
2080 // True if the space has been set up but not torn down.
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002081 bool HasBeenSetUp() { return start_ != NULL; }
Steve Blocka7e24c12009-10-30 11:49:00 +00002082
Steve Blocka7e24c12009-10-30 11:49:00 +00002083 // Grow the semispace to the new capacity. The new capacity
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002084 // requested must be larger than the current capacity and less than
2085 // the maximum capacity.
Steve Blocka7e24c12009-10-30 11:49:00 +00002086 bool GrowTo(int new_capacity);
2087
2088 // Shrinks the semispace to the new capacity. The new capacity
2089 // requested must be more than the amount of used memory in the
2090 // semispace and less than the current capacity.
2091 bool ShrinkTo(int new_capacity);
2092
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002093 // Returns the start address of the first page of the space.
2094 Address space_start() {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002095 DCHECK(anchor_.next_page() != &anchor_);
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002096 return anchor_.next_page()->area_start();
2097 }
2098
2099 // Returns the start address of the current page of the space.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002100 Address page_low() { return current_page_->area_start(); }
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002101
Steve Blocka7e24c12009-10-30 11:49:00 +00002102 // Returns one past the end address of the space.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002103 Address space_end() { return anchor_.prev_page()->area_end(); }
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002104
2105 // Returns one past the end address of the current page of the space.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002106 Address page_high() { return current_page_->area_end(); }
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002107
2108 bool AdvancePage() {
2109 NewSpacePage* next_page = current_page_->next_page();
2110 if (next_page == anchor()) return false;
2111 current_page_ = next_page;
2112 return true;
2113 }
2114
2115 // Resets the space to using the first page.
2116 void Reset();
Steve Blocka7e24c12009-10-30 11:49:00 +00002117
2118 // Age mark accessors.
2119 Address age_mark() { return age_mark_; }
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002120 void set_age_mark(Address mark);
Steve Blocka7e24c12009-10-30 11:49:00 +00002121
2122 // True if the address is in the address range of this semispace (not
2123 // necessarily below the allocation pointer).
2124 bool Contains(Address a) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002125 return (reinterpret_cast<uintptr_t>(a) & address_mask_) ==
2126 reinterpret_cast<uintptr_t>(start_);
Steve Blocka7e24c12009-10-30 11:49:00 +00002127 }
2128
2129 // True if the object is a heap object in the address range of this
2130 // semispace (not necessarily below the allocation pointer).
2131 bool Contains(Object* o) {
2132 return (reinterpret_cast<uintptr_t>(o) & object_mask_) == object_expected_;
2133 }
2134
Leon Clarkee46be812010-01-19 14:06:41 +00002135 // If we don't have these here then SemiSpace will be abstract. However
2136 // they should never be called.
Ben Murdochf87a2032010-10-22 12:50:53 +01002137 virtual intptr_t Size() {
Steve Blocka7e24c12009-10-30 11:49:00 +00002138 UNREACHABLE();
2139 return 0;
2140 }
2141
2142 bool is_committed() { return committed_; }
2143 bool Commit();
2144 bool Uncommit();
2145
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002146 NewSpacePage* first_page() { return anchor_.next_page(); }
2147 NewSpacePage* current_page() { return current_page_; }
2148
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002149#ifdef VERIFY_HEAP
2150 virtual void Verify();
2151#endif
2152
Steve Blocka7e24c12009-10-30 11:49:00 +00002153#ifdef DEBUG
2154 virtual void Print();
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002155 // Validate a range of of addresses in a SemiSpace.
2156 // The "from" address must be on a page prior to the "to" address,
2157 // in the linked page order, or it must be earlier on the same page.
2158 static void AssertValidRange(Address from, Address to);
2159#else
2160 // Do nothing.
2161 inline static void AssertValidRange(Address from, Address to) {}
Steve Blocka7e24c12009-10-30 11:49:00 +00002162#endif
2163
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002164 // Returns the current total capacity of the semispace.
2165 int TotalCapacity() { return total_capacity_; }
Steve Blocka7e24c12009-10-30 11:49:00 +00002166
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002167 // Returns the maximum total capacity of the semispace.
2168 int MaximumTotalCapacity() { return maximum_total_capacity_; }
Steve Blocka7e24c12009-10-30 11:49:00 +00002169
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002170 // Returns the initial capacity of the semispace.
2171 int InitialTotalCapacity() { return initial_total_capacity_; }
Steve Blocka7e24c12009-10-30 11:49:00 +00002172
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002173 SemiSpaceId id() { return id_; }
2174
2175 static void Swap(SemiSpace* from, SemiSpace* to);
2176
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002177 // Returns the maximum amount of memory ever committed by the semi space.
2178 size_t MaximumCommittedMemory() { return maximum_committed_; }
2179
2180 // Approximate amount of physical memory committed for this space.
2181 size_t CommittedPhysicalMemory();
2182
Steve Blocka7e24c12009-10-30 11:49:00 +00002183 private:
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002184 // Flips the semispace between being from-space and to-space.
2185 // Copies the flags into the masked positions on all pages in the space.
2186 void FlipPages(intptr_t flags, intptr_t flag_mask);
2187
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002188 // Updates Capacity and MaximumCommitted based on new capacity.
2189 void SetCapacity(int new_capacity);
2190
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002191 NewSpacePage* anchor() { return &anchor_; }
2192
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002193 // The current and maximum total capacity of the space.
2194 int total_capacity_;
2195 int maximum_total_capacity_;
2196 int initial_total_capacity_;
2197
2198 intptr_t maximum_committed_;
Steve Blocka7e24c12009-10-30 11:49:00 +00002199
2200 // The start address of the space.
2201 Address start_;
2202 // Used to govern object promotion during mark-compact collection.
2203 Address age_mark_;
2204
2205 // Masks and comparison values to test for containment in this semispace.
2206 uintptr_t address_mask_;
2207 uintptr_t object_mask_;
2208 uintptr_t object_expected_;
2209
2210 bool committed_;
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002211 SemiSpaceId id_;
Steve Blocka7e24c12009-10-30 11:49:00 +00002212
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002213 NewSpacePage anchor_;
2214 NewSpacePage* current_page_;
2215
2216 friend class SemiSpaceIterator;
2217 friend class NewSpacePageIterator;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002218
Steve Blocka7e24c12009-10-30 11:49:00 +00002219 public:
2220 TRACK_MEMORY("SemiSpace")
2221};
2222
2223
2224// A SemiSpaceIterator is an ObjectIterator that iterates over the active
2225// semispace of the heap's new space. It iterates over the objects in the
2226// semispace from a given start address (defaulting to the bottom of the
2227// semispace) to the top of the semispace. New objects allocated after the
2228// iterator is created are not iterated.
2229class SemiSpaceIterator : public ObjectIterator {
2230 public:
2231 // Create an iterator over the objects in the given space. If no start
2232 // address is given, the iterator starts from the bottom of the space. If
2233 // no size function is given, the iterator calls Object::Size().
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002234
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002235 // Iterate over all of allocated to-space.
2236 explicit SemiSpaceIterator(NewSpace* space);
2237 // Iterate over all of allocated to-space, with a custome size function.
2238 SemiSpaceIterator(NewSpace* space, HeapObjectCallback size_func);
2239 // Iterate over part of allocated to-space, from start to the end
2240 // of allocation.
2241 SemiSpaceIterator(NewSpace* space, Address start);
2242 // Iterate from one address to another in the same semi-space.
2243 SemiSpaceIterator(Address from, Address to);
2244
2245 HeapObject* Next() {
Leon Clarked91b9f72010-01-27 17:25:45 +00002246 if (current_ == limit_) return NULL;
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002247 if (NewSpacePage::IsAtEnd(current_)) {
2248 NewSpacePage* page = NewSpacePage::FromLimit(current_);
2249 page = page->next_page();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002250 DCHECK(!page->is_anchor());
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002251 current_ = page->area_start();
2252 if (current_ == limit_) return NULL;
2253 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002254
2255 HeapObject* object = HeapObject::FromAddress(current_);
2256 int size = (size_func_ == NULL) ? object->Size() : size_func_(object);
2257
2258 current_ += size;
2259 return object;
2260 }
2261
2262 // Implementation of the ObjectIterator functions.
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002263 virtual HeapObject* next_object() { return Next(); }
Steve Blocka7e24c12009-10-30 11:49:00 +00002264
2265 private:
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002266 void Initialize(Address start, Address end, HeapObjectCallback size_func);
Steve Blocka7e24c12009-10-30 11:49:00 +00002267
Steve Blocka7e24c12009-10-30 11:49:00 +00002268 // The current iteration point.
2269 Address current_;
2270 // The end of iteration.
2271 Address limit_;
2272 // The callback function.
2273 HeapObjectCallback size_func_;
2274};
2275
2276
2277// -----------------------------------------------------------------------------
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002278// A PageIterator iterates the pages in a semi-space.
2279class NewSpacePageIterator BASE_EMBEDDED {
2280 public:
2281 // Make an iterator that runs over all pages in to-space.
2282 explicit inline NewSpacePageIterator(NewSpace* space);
2283
2284 // Make an iterator that runs over all pages in the given semispace,
2285 // even those not used in allocation.
2286 explicit inline NewSpacePageIterator(SemiSpace* space);
2287
2288 // Make iterator that iterates from the page containing start
2289 // to the page that contains limit in the same semispace.
2290 inline NewSpacePageIterator(Address start, Address limit);
2291
2292 inline bool has_next();
2293 inline NewSpacePage* next();
2294
2295 private:
2296 NewSpacePage* prev_page_; // Previous page returned.
2297 // Next page that will be returned. Cached here so that we can use this
2298 // iterator for operations that deallocate pages.
2299 NewSpacePage* next_page_;
2300 // Last page returned.
2301 NewSpacePage* last_page_;
2302};
2303
2304
2305// -----------------------------------------------------------------------------
Steve Blocka7e24c12009-10-30 11:49:00 +00002306// The young generation space.
2307//
2308// The new space consists of a contiguous pair of semispaces. It simply
2309// forwards most functions to the appropriate semispace.
2310
2311class NewSpace : public Space {
2312 public:
2313 // Constructor.
Steve Block44f0eee2011-05-26 01:26:41 +01002314 explicit NewSpace(Heap* heap)
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002315 : Space(heap, NEW_SPACE, NOT_EXECUTABLE),
2316 to_space_(heap, kToSpace),
2317 from_space_(heap, kFromSpace),
2318 reservation_(),
2319 inline_allocation_limit_step_(0) {}
Steve Blocka7e24c12009-10-30 11:49:00 +00002320
2321 // Sets up the new space using the given chunk.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002322 bool SetUp(int reserved_semispace_size_, int max_semi_space_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002323
2324 // Tears down the space. Heap memory was not allocated by the space, so it
2325 // is not deallocated here.
2326 void TearDown();
2327
2328 // True if the space has been set up but not torn down.
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002329 bool HasBeenSetUp() {
2330 return to_space_.HasBeenSetUp() && from_space_.HasBeenSetUp();
Steve Blocka7e24c12009-10-30 11:49:00 +00002331 }
2332
2333 // Flip the pair of spaces.
2334 void Flip();
2335
2336 // Grow the capacity of the semispaces. Assumes that they are not at
2337 // their maximum capacity.
2338 void Grow();
2339
2340 // Shrink the capacity of the semispaces.
2341 void Shrink();
2342
2343 // True if the address or object lies in the address range of either
2344 // semispace (not necessarily below the allocation pointer).
2345 bool Contains(Address a) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002346 return (reinterpret_cast<uintptr_t>(a) & address_mask_) ==
2347 reinterpret_cast<uintptr_t>(start_);
Steve Blocka7e24c12009-10-30 11:49:00 +00002348 }
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002349
Steve Blocka7e24c12009-10-30 11:49:00 +00002350 bool Contains(Object* o) {
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002351 Address a = reinterpret_cast<Address>(o);
2352 return (reinterpret_cast<uintptr_t>(a) & object_mask_) == object_expected_;
Steve Blocka7e24c12009-10-30 11:49:00 +00002353 }
2354
2355 // Return the allocated bytes in the active semispace.
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002356 virtual intptr_t Size() {
2357 return pages_used_ * NewSpacePage::kAreaSize +
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002358 static_cast<int>(top() - to_space_.page_low());
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002359 }
2360
Ben Murdochf87a2032010-10-22 12:50:53 +01002361 // The same, but returning an int. We have to have the one that returns
2362 // intptr_t because it is inherited, but if we know we are dealing with the
2363 // new space, which can't get as big as the other spaces then this is useful:
2364 int SizeAsInt() { return static_cast<int>(Size()); }
Steve Block3ce2e202009-11-05 08:53:23 +00002365
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002366 // Return the allocatable capacity of a semispace.
2367 intptr_t Capacity() {
2368 SLOW_DCHECK(to_space_.TotalCapacity() == from_space_.TotalCapacity());
2369 return (to_space_.TotalCapacity() / Page::kPageSize) *
2370 NewSpacePage::kAreaSize;
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002371 }
2372
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002373 // Return the current size of a semispace, allocatable and non-allocatable
2374 // memory.
2375 intptr_t TotalCapacity() {
2376 DCHECK(to_space_.TotalCapacity() == from_space_.TotalCapacity());
2377 return to_space_.TotalCapacity();
Steve Blocka7e24c12009-10-30 11:49:00 +00002378 }
Steve Block3ce2e202009-11-05 08:53:23 +00002379
2380 // Return the total amount of memory committed for new space.
Ben Murdochf87a2032010-10-22 12:50:53 +01002381 intptr_t CommittedMemory() {
Steve Block3ce2e202009-11-05 08:53:23 +00002382 if (from_space_.is_committed()) return 2 * Capacity();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002383 return TotalCapacity();
Steve Block3ce2e202009-11-05 08:53:23 +00002384 }
2385
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002386 // Return the total amount of memory committed for new space.
2387 intptr_t MaximumCommittedMemory() {
2388 return to_space_.MaximumCommittedMemory() +
2389 from_space_.MaximumCommittedMemory();
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002390 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002391
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002392 // Approximate amount of physical memory committed for this space.
2393 size_t CommittedPhysicalMemory();
2394
2395 // Return the available bytes without growing.
2396 intptr_t Available() { return Capacity() - Size(); }
2397
Steve Blocka7e24c12009-10-30 11:49:00 +00002398 // Return the maximum capacity of a semispace.
2399 int MaximumCapacity() {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002400 DCHECK(to_space_.MaximumTotalCapacity() ==
2401 from_space_.MaximumTotalCapacity());
2402 return to_space_.MaximumTotalCapacity();
Steve Blocka7e24c12009-10-30 11:49:00 +00002403 }
2404
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002405 bool IsAtMaximumCapacity() { return TotalCapacity() == MaximumCapacity(); }
2406
Steve Blocka7e24c12009-10-30 11:49:00 +00002407 // Returns the initial capacity of a semispace.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002408 int InitialTotalCapacity() {
2409 DCHECK(to_space_.InitialTotalCapacity() ==
2410 from_space_.InitialTotalCapacity());
2411 return to_space_.InitialTotalCapacity();
Steve Blocka7e24c12009-10-30 11:49:00 +00002412 }
2413
2414 // Return the address of the allocation pointer in the active semispace.
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002415 Address top() {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002416 DCHECK(to_space_.current_page()->ContainsLimit(allocation_info_.top()));
2417 return allocation_info_.top();
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002418 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002419
2420 void set_top(Address top) {
2421 DCHECK(to_space_.current_page()->ContainsLimit(top));
2422 allocation_info_.set_top(top);
2423 }
2424
2425 // Return the address of the allocation pointer limit in the active semispace.
2426 Address limit() {
2427 DCHECK(to_space_.current_page()->ContainsLimit(allocation_info_.limit()));
2428 return allocation_info_.limit();
2429 }
2430
Steve Blocka7e24c12009-10-30 11:49:00 +00002431 // Return the address of the first object in the active semispace.
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002432 Address bottom() { return to_space_.space_start(); }
Steve Blocka7e24c12009-10-30 11:49:00 +00002433
2434 // Get the age mark of the inactive semispace.
2435 Address age_mark() { return from_space_.age_mark(); }
2436 // Set the age mark in the active semispace.
2437 void set_age_mark(Address mark) { to_space_.set_age_mark(mark); }
2438
2439 // The start address of the space and a bit mask. Anding an address in the
2440 // new space with the mask will result in the start address.
2441 Address start() { return start_; }
2442 uintptr_t mask() { return address_mask_; }
2443
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002444 INLINE(uint32_t AddressToMarkbitIndex(Address addr)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002445 DCHECK(Contains(addr));
2446 DCHECK(IsAligned(OffsetFrom(addr), kPointerSize) ||
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002447 IsAligned(OffsetFrom(addr) - 1, kPointerSize));
2448 return static_cast<uint32_t>(addr - start_) >> kPointerSizeLog2;
2449 }
2450
2451 INLINE(Address MarkbitIndexToAddress(uint32_t index)) {
2452 return reinterpret_cast<Address>(index << kPointerSizeLog2);
2453 }
2454
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002455 // The allocation top and limit address.
2456 Address* allocation_top_address() { return allocation_info_.top_address(); }
Steve Blocka7e24c12009-10-30 11:49:00 +00002457
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002458 // The allocation limit address.
2459 Address* allocation_limit_address() {
2460 return allocation_info_.limit_address();
2461 }
2462
2463 MUST_USE_RESULT INLINE(AllocationResult AllocateRaw(int size_in_bytes));
Steve Blocka7e24c12009-10-30 11:49:00 +00002464
2465 // Reset the allocation pointer to the beginning of the active semispace.
2466 void ResetAllocationInfo();
Steve Blocka7e24c12009-10-30 11:49:00 +00002467
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002468 void UpdateInlineAllocationLimit(int size_in_bytes);
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002469 void LowerInlineAllocationLimit(intptr_t step) {
2470 inline_allocation_limit_step_ = step;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002471 UpdateInlineAllocationLimit(0);
2472 top_on_previous_step_ = allocation_info_.top();
Steve Blocka7e24c12009-10-30 11:49:00 +00002473 }
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002474
2475 // Get the extent of the inactive semispace (for use as a marking stack,
2476 // or to zap it). Notice: space-addresses are not necessarily on the
2477 // same page, so FromSpaceStart() might be above FromSpaceEnd().
2478 Address FromSpacePageLow() { return from_space_.page_low(); }
2479 Address FromSpacePageHigh() { return from_space_.page_high(); }
2480 Address FromSpaceStart() { return from_space_.space_start(); }
2481 Address FromSpaceEnd() { return from_space_.space_end(); }
2482
2483 // Get the extent of the active semispace's pages' memory.
2484 Address ToSpaceStart() { return to_space_.space_start(); }
2485 Address ToSpaceEnd() { return to_space_.space_end(); }
2486
2487 inline bool ToSpaceContains(Address address) {
2488 return to_space_.Contains(address);
2489 }
2490 inline bool FromSpaceContains(Address address) {
2491 return from_space_.Contains(address);
Steve Blocka7e24c12009-10-30 11:49:00 +00002492 }
2493
2494 // True if the object is a heap object in the address range of the
2495 // respective semispace (not necessarily below the allocation pointer of the
2496 // semispace).
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002497 inline bool ToSpaceContains(Object* o) { return to_space_.Contains(o); }
2498 inline bool FromSpaceContains(Object* o) { return from_space_.Contains(o); }
Steve Blocka7e24c12009-10-30 11:49:00 +00002499
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002500 // Try to switch the active semispace to a new, empty, page.
2501 // Returns false if this isn't possible or reasonable (i.e., there
2502 // are no pages, or the current page is already empty), or true
2503 // if successful.
2504 bool AddFreshPage();
Steve Blocka7e24c12009-10-30 11:49:00 +00002505
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002506#ifdef VERIFY_HEAP
Steve Blocka7e24c12009-10-30 11:49:00 +00002507 // Verify the active semispace.
2508 virtual void Verify();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002509#endif
2510
2511#ifdef DEBUG
Steve Blocka7e24c12009-10-30 11:49:00 +00002512 // Print the active semispace.
2513 virtual void Print() { to_space_.Print(); }
2514#endif
2515
Steve Blocka7e24c12009-10-30 11:49:00 +00002516 // Iterates the active semispace to collect statistics.
2517 void CollectStatistics();
2518 // Reports previously collected statistics of the active semispace.
2519 void ReportStatistics();
2520 // Clears previously collected statistics.
2521 void ClearHistograms();
2522
2523 // Record the allocation or promotion of a heap object. Note that we don't
2524 // record every single allocation, but only those that happen in the
2525 // to space during a scavenge GC.
2526 void RecordAllocation(HeapObject* obj);
2527 void RecordPromotion(HeapObject* obj);
Steve Blocka7e24c12009-10-30 11:49:00 +00002528
2529 // Return whether the operation succeded.
2530 bool CommitFromSpaceIfNeeded() {
2531 if (from_space_.is_committed()) return true;
2532 return from_space_.Commit();
2533 }
2534
2535 bool UncommitFromSpace() {
2536 if (!from_space_.is_committed()) return true;
2537 return from_space_.Uncommit();
2538 }
2539
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002540 inline intptr_t inline_allocation_limit_step() {
2541 return inline_allocation_limit_step_;
2542 }
2543
2544 SemiSpace* active_space() { return &to_space_; }
2545
Steve Blocka7e24c12009-10-30 11:49:00 +00002546 private:
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002547 // Update allocation info to match the current to-space page.
2548 void UpdateAllocationInfo();
2549
2550 Address chunk_base_;
2551 uintptr_t chunk_size_;
2552
Steve Blocka7e24c12009-10-30 11:49:00 +00002553 // The semispaces.
2554 SemiSpace to_space_;
2555 SemiSpace from_space_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002556 base::VirtualMemory reservation_;
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002557 int pages_used_;
Steve Blocka7e24c12009-10-30 11:49:00 +00002558
2559 // Start address and bit mask for containment testing.
2560 Address start_;
2561 uintptr_t address_mask_;
2562 uintptr_t object_mask_;
2563 uintptr_t object_expected_;
2564
2565 // Allocation pointer and limit for normal allocation and allocation during
2566 // mark-compact collection.
2567 AllocationInfo allocation_info_;
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002568
2569 // When incremental marking is active we will set allocation_info_.limit
2570 // to be lower than actual limit and then will gradually increase it
2571 // in steps to guarantee that we do incremental marking steps even
2572 // when all allocation is performed from inlined generated code.
2573 intptr_t inline_allocation_limit_step_;
2574
2575 Address top_on_previous_step_;
Steve Blocka7e24c12009-10-30 11:49:00 +00002576
Steve Blocka7e24c12009-10-30 11:49:00 +00002577 HistogramInfo* allocated_histogram_;
2578 HistogramInfo* promoted_histogram_;
Steve Blocka7e24c12009-10-30 11:49:00 +00002579
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002580 MUST_USE_RESULT AllocationResult SlowAllocateRaw(int size_in_bytes);
Steve Blocka7e24c12009-10-30 11:49:00 +00002581
2582 friend class SemiSpaceIterator;
2583
2584 public:
2585 TRACK_MEMORY("NewSpace")
2586};
2587
2588
2589// -----------------------------------------------------------------------------
Steve Blocka7e24c12009-10-30 11:49:00 +00002590// Old object space (excluding map objects)
2591
2592class OldSpace : public PagedSpace {
2593 public:
2594 // Creates an old space object with a given maximum capacity.
2595 // The constructor does not allocate pages from OS.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002596 OldSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id,
Steve Block44f0eee2011-05-26 01:26:41 +01002597 Executability executable)
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002598 : PagedSpace(heap, max_capacity, id, executable) {}
Steve Blocka7e24c12009-10-30 11:49:00 +00002599
Steve Blocka7e24c12009-10-30 11:49:00 +00002600 public:
2601 TRACK_MEMORY("OldSpace")
2602};
2603
2604
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002605// For contiguous spaces, top should be in the space (or at the end) and limit
2606// should be the end of the space.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002607#define DCHECK_SEMISPACE_ALLOCATION_INFO(info, space) \
2608 SLOW_DCHECK((space).page_low() <= (info).top() && \
2609 (info).top() <= (space).page_high() && \
2610 (info).limit() <= (space).page_high())
Steve Blocka7e24c12009-10-30 11:49:00 +00002611
2612
2613// -----------------------------------------------------------------------------
2614// Old space for all map objects
2615
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002616class MapSpace : public PagedSpace {
Steve Blocka7e24c12009-10-30 11:49:00 +00002617 public:
2618 // Creates a map space object with a maximum capacity.
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002619 MapSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id)
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002620 : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE),
2621 max_map_space_pages_(kMaxMapPageIndex - 1) {}
Steve Blocka7e24c12009-10-30 11:49:00 +00002622
Ben Murdoch85b71792012-04-11 18:30:58 +01002623 // Given an index, returns the page address.
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002624 // TODO(1600): this limit is artifical just to keep code compilable
2625 static const int kMaxMapPageIndex = 1 << 16;
Ben Murdoch85b71792012-04-11 18:30:58 +01002626
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002627 virtual int RoundSizeDownToObjectAlignment(int size) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002628 if (base::bits::IsPowerOfTwo32(Map::kSize)) {
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002629 return RoundDown(size, Map::kSize);
2630 } else {
2631 return (size / Map::kSize) * Map::kSize;
Leon Clarkee46be812010-01-19 14:06:41 +00002632 }
Leon Clarkee46be812010-01-19 14:06:41 +00002633 }
2634
Steve Blocka7e24c12009-10-30 11:49:00 +00002635 protected:
Steve Blocka7e24c12009-10-30 11:49:00 +00002636 virtual void VerifyObject(HeapObject* obj);
Steve Blocka7e24c12009-10-30 11:49:00 +00002637
2638 private:
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002639 static const int kMapsPerPage = Page::kMaxRegularHeapObjectSize / Map::kSize;
Leon Clarkee46be812010-01-19 14:06:41 +00002640
2641 // Do map space compaction if there is a page gap.
Leon Clarked91b9f72010-01-27 17:25:45 +00002642 int CompactionThreshold() {
2643 return kMapsPerPage * (max_map_space_pages_ - 1);
2644 }
2645
2646 const int max_map_space_pages_;
Leon Clarkee46be812010-01-19 14:06:41 +00002647
Steve Blocka7e24c12009-10-30 11:49:00 +00002648 public:
2649 TRACK_MEMORY("MapSpace")
2650};
2651
2652
2653// -----------------------------------------------------------------------------
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002654// Old space for simple property cell objects
Steve Blocka7e24c12009-10-30 11:49:00 +00002655
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002656class CellSpace : public PagedSpace {
Steve Blocka7e24c12009-10-30 11:49:00 +00002657 public:
2658 // Creates a property cell space object with a maximum capacity.
Steve Block44f0eee2011-05-26 01:26:41 +01002659 CellSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id)
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002660 : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE) {}
Steve Blocka7e24c12009-10-30 11:49:00 +00002661
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002662 virtual int RoundSizeDownToObjectAlignment(int size) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002663 if (base::bits::IsPowerOfTwo32(Cell::kSize)) {
2664 return RoundDown(size, Cell::kSize);
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002665 } else {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002666 return (size / Cell::kSize) * Cell::kSize;
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002667 }
2668 }
2669
Steve Blocka7e24c12009-10-30 11:49:00 +00002670 protected:
Steve Blocka7e24c12009-10-30 11:49:00 +00002671 virtual void VerifyObject(HeapObject* obj);
Steve Blocka7e24c12009-10-30 11:49:00 +00002672
2673 public:
2674 TRACK_MEMORY("CellSpace")
2675};
2676
2677
2678// -----------------------------------------------------------------------------
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002679// Old space for all global object property cell objects
2680
2681class PropertyCellSpace : public PagedSpace {
2682 public:
2683 // Creates a property cell space object with a maximum capacity.
2684 PropertyCellSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id)
2685 : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE) {}
2686
2687 virtual int RoundSizeDownToObjectAlignment(int size) {
2688 if (base::bits::IsPowerOfTwo32(PropertyCell::kSize)) {
2689 return RoundDown(size, PropertyCell::kSize);
2690 } else {
2691 return (size / PropertyCell::kSize) * PropertyCell::kSize;
2692 }
2693 }
2694
2695 protected:
2696 virtual void VerifyObject(HeapObject* obj);
2697
2698 public:
2699 TRACK_MEMORY("PropertyCellSpace")
2700};
2701
2702
2703// -----------------------------------------------------------------------------
Steve Blocka7e24c12009-10-30 11:49:00 +00002704// Large objects ( > Page::kMaxHeapObjectSize ) are allocated and managed by
2705// the large object space. A large object is allocated from OS heap with
2706// extra padding bytes (Page::kPageSize + Page::kObjectStartOffset).
2707// A large object always starts at Page::kObjectStartOffset to a page.
2708// Large objects do not move during garbage collections.
2709
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002710class LargeObjectSpace : public Space {
Steve Blocka7e24c12009-10-30 11:49:00 +00002711 public:
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002712 LargeObjectSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id);
2713 virtual ~LargeObjectSpace() {}
Steve Blocka7e24c12009-10-30 11:49:00 +00002714
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002715 // Initializes internal data structures.
2716 bool SetUp();
Steve Blocka7e24c12009-10-30 11:49:00 +00002717
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002718 // Releases internal resources, frees objects in this space.
2719 void TearDown();
Steve Blocka7e24c12009-10-30 11:49:00 +00002720
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002721 static intptr_t ObjectSizeFor(intptr_t chunk_size) {
2722 if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return 0;
2723 return chunk_size - Page::kPageSize - Page::kObjectStartOffset;
2724 }
2725
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002726 // Shared implementation of AllocateRaw, AllocateRawCode and
2727 // AllocateRawFixedArray.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002728 MUST_USE_RESULT AllocationResult
2729 AllocateRaw(int object_size, Executability executable);
Steve Blocka7e24c12009-10-30 11:49:00 +00002730
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002731 // Available bytes for objects in this space.
Steve Block44f0eee2011-05-26 01:26:41 +01002732 inline intptr_t Available();
Steve Blocka7e24c12009-10-30 11:49:00 +00002733
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002734 virtual intptr_t Size() { return size_; }
Steve Blocka7e24c12009-10-30 11:49:00 +00002735
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002736 virtual intptr_t SizeOfObjects() { return objects_size_; }
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08002737
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002738 intptr_t MaximumCommittedMemory() { return maximum_committed_; }
Steve Blocka7e24c12009-10-30 11:49:00 +00002739
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002740 intptr_t CommittedMemory() { return Size(); }
2741
2742 // Approximate amount of physical memory committed for this space.
2743 size_t CommittedPhysicalMemory();
2744
2745 int PageCount() { return page_count_; }
2746
2747 // Finds an object for a given address, returns a Smi if it is not found.
2748 // The function iterates through all objects in this space, may be slow.
2749 Object* FindObject(Address a);
Steve Blocka7e24c12009-10-30 11:49:00 +00002750
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002751 // Finds a large object page containing the given address, returns NULL
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002752 // if such a page doesn't exist.
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002753 LargePage* FindPage(Address a);
Steve Blocka7e24c12009-10-30 11:49:00 +00002754
2755 // Frees unmarked objects.
2756 void FreeUnmarkedObjects();
2757
2758 // Checks whether a heap object is in this space; O(1).
2759 bool Contains(HeapObject* obj);
2760
2761 // Checks whether the space is empty.
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002762 bool IsEmpty() { return first_page_ == NULL; }
Steve Blocka7e24c12009-10-30 11:49:00 +00002763
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002764 LargePage* first_page() { return first_page_; }
2765
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002766#ifdef VERIFY_HEAP
Steve Blocka7e24c12009-10-30 11:49:00 +00002767 virtual void Verify();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002768#endif
2769
2770#ifdef DEBUG
Steve Blocka7e24c12009-10-30 11:49:00 +00002771 virtual void Print();
2772 void ReportStatistics();
2773 void CollectCodeStatistics();
Steve Blocka7e24c12009-10-30 11:49:00 +00002774#endif
2775 // Checks whether an address is in the object area in this space. It
2776 // iterates all objects in the space. May be slow.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002777 bool SlowContains(Address addr) { return FindObject(addr)->IsHeapObject(); }
Steve Blocka7e24c12009-10-30 11:49:00 +00002778
2779 private:
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002780 intptr_t max_capacity_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002781 intptr_t maximum_committed_;
Steve Blocka7e24c12009-10-30 11:49:00 +00002782 // The head of the linked list of large object chunks.
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002783 LargePage* first_page_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002784 intptr_t size_; // allocated bytes
2785 int page_count_; // number of chunks
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08002786 intptr_t objects_size_; // size of objects
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002787 // Map MemoryChunk::kAlignment-aligned chunks to large pages covering them
2788 HashMap chunk_map_;
Steve Blocka7e24c12009-10-30 11:49:00 +00002789
Steve Blocka7e24c12009-10-30 11:49:00 +00002790 friend class LargeObjectIterator;
2791
2792 public:
2793 TRACK_MEMORY("LargeObjectSpace")
2794};
2795
2796
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002797class LargeObjectIterator : public ObjectIterator {
Steve Blocka7e24c12009-10-30 11:49:00 +00002798 public:
2799 explicit LargeObjectIterator(LargeObjectSpace* space);
2800 LargeObjectIterator(LargeObjectSpace* space, HeapObjectCallback size_func);
2801
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002802 HeapObject* Next();
Steve Blocka7e24c12009-10-30 11:49:00 +00002803
2804 // implementation of ObjectIterator.
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002805 virtual HeapObject* next_object() { return Next(); }
Steve Blocka7e24c12009-10-30 11:49:00 +00002806
2807 private:
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002808 LargePage* current_;
Steve Blocka7e24c12009-10-30 11:49:00 +00002809 HeapObjectCallback size_func_;
2810};
2811
2812
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002813// Iterates over the chunks (pages and large object pages) that can contain
2814// pointers to new space.
2815class PointerChunkIterator BASE_EMBEDDED {
2816 public:
2817 inline explicit PointerChunkIterator(Heap* heap);
2818
2819 // Return NULL when the iterator is done.
2820 MemoryChunk* next() {
2821 switch (state_) {
2822 case kOldPointerState: {
2823 if (old_pointer_iterator_.has_next()) {
2824 return old_pointer_iterator_.next();
2825 }
2826 state_ = kMapState;
2827 // Fall through.
2828 }
2829 case kMapState: {
2830 if (map_iterator_.has_next()) {
2831 return map_iterator_.next();
2832 }
2833 state_ = kLargeObjectState;
2834 // Fall through.
2835 }
2836 case kLargeObjectState: {
2837 HeapObject* heap_object;
2838 do {
2839 heap_object = lo_iterator_.Next();
2840 if (heap_object == NULL) {
2841 state_ = kFinishedState;
2842 return NULL;
2843 }
2844 // Fixed arrays are the only pointer-containing objects in large
2845 // object space.
2846 } while (!heap_object->IsFixedArray());
2847 MemoryChunk* answer = MemoryChunk::FromAddress(heap_object->address());
2848 return answer;
2849 }
2850 case kFinishedState:
2851 return NULL;
2852 default:
2853 break;
2854 }
2855 UNREACHABLE();
2856 return NULL;
2857 }
2858
2859
2860 private:
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002861 enum State { kOldPointerState, kMapState, kLargeObjectState, kFinishedState };
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002862 State state_;
2863 PageIterator old_pointer_iterator_;
2864 PageIterator map_iterator_;
2865 LargeObjectIterator lo_iterator_;
2866};
2867
2868
Steve Block44f0eee2011-05-26 01:26:41 +01002869#ifdef DEBUG
2870struct CommentStatistic {
2871 const char* comment;
2872 int size;
2873 int count;
2874 void Clear() {
2875 comment = NULL;
2876 size = 0;
2877 count = 0;
2878 }
2879 // Must be small, since an iteration is used for lookup.
2880 static const int kMaxComments = 64;
2881};
2882#endif
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002883}
2884} // namespace v8::internal
Steve Block44f0eee2011-05-26 01:26:41 +01002885
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002886#endif // V8_HEAP_SPACES_H_