blob: a8102cabc7c5093b97bc59c45298c22fcb06ac72 [file] [log] [blame]
Ben Murdoch257744e2011-11-30 15:57:28 +00001// Copyright 2011 the V8 project authors. All rights reserved.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
Steve Blocka7e24c12009-10-30 11:49:00 +00004
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005#ifndef V8_HEAP_SPACES_H_
6#define V8_HEAP_SPACES_H_
Steve Blocka7e24c12009-10-30 11:49:00 +00007
Ben Murdochb8a8cc12014-11-26 15:28:44 +00008#include "src/allocation.h"
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00009#include "src/atomic-utils.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000010#include "src/base/atomicops.h"
11#include "src/base/bits.h"
12#include "src/base/platform/mutex.h"
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000013#include "src/flags.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000014#include "src/hashmap.h"
15#include "src/list.h"
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000016#include "src/objects.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000017#include "src/utils.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000018
19namespace v8 {
20namespace internal {
21
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000022class CompactionSpaceCollection;
Steve Block44f0eee2011-05-26 01:26:41 +010023class Isolate;
24
Steve Blocka7e24c12009-10-30 11:49:00 +000025// -----------------------------------------------------------------------------
26// Heap structures:
27//
28// A JS heap consists of a young generation, an old generation, and a large
29// object space. The young generation is divided into two semispaces. A
30// scavenger implements Cheney's copying algorithm. The old generation is
31// separated into a map space and an old object space. The map space contains
32// all (and only) map objects, the rest of old objects go into the old space.
33// The old generation is collected by a mark-sweep-compact collector.
34//
35// The semispaces of the young generation are contiguous. The old and map
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +010036// spaces consists of a list of pages. A page has a page header and an object
Ben Murdoch3ef787d2012-04-12 10:51:47 +010037// area.
Steve Blocka7e24c12009-10-30 11:49:00 +000038//
39// There is a separate large object space for objects larger than
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000040// Page::kMaxRegularHeapObjectSize, so that they do not have to move during
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +010041// collection. The large object space is paged. Pages in large object space
Ben Murdoch3ef787d2012-04-12 10:51:47 +010042// may be larger than the page size.
Steve Blocka7e24c12009-10-30 11:49:00 +000043//
Ben Murdoch3ef787d2012-04-12 10:51:47 +010044// A store-buffer based write barrier is used to keep track of intergenerational
Ben Murdochb8a8cc12014-11-26 15:28:44 +000045// references. See heap/store-buffer.h.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +010046//
Ben Murdoch3ef787d2012-04-12 10:51:47 +010047// During scavenges and mark-sweep collections we sometimes (after a store
48// buffer overflow) iterate intergenerational pointers without decoding heap
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000049// object maps so if the page belongs to old space or large object space
50// it is essential to guarantee that the page does not contain any
Ben Murdoch3ef787d2012-04-12 10:51:47 +010051// garbage pointers to new space: every pointer aligned word which satisfies
52// the Heap::InNewSpace() predicate must be a pointer to a live heap object in
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000053// new space. Thus objects in old space and large object spaces should have a
Ben Murdoch3ef787d2012-04-12 10:51:47 +010054// special layout (e.g. no bare integer fields). This requirement does not
55// apply to map space which is iterated in a special fashion. However we still
56// require pointer fields of dead maps to be cleaned.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +010057//
Ben Murdoch3ef787d2012-04-12 10:51:47 +010058// To enable lazy cleaning of old space pages we can mark chunks of the page
59// as being garbage. Garbage sections are marked with a special map. These
60// sections are skipped when scanning the page, even if we are otherwise
61// scanning without regard for object boundaries. Garbage sections are chained
62// together to form a free list after a GC. Garbage sections created outside
63// of GCs by object trunctation etc. may not be in the free list chain. Very
64// small free spaces are ignored, they need only be cleaned of bogus pointers
65// into new space.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +010066//
Ben Murdoch3ef787d2012-04-12 10:51:47 +010067// Each page may have up to one special garbage section. The start of this
68// section is denoted by the top field in the space. The end of the section
69// is denoted by the limit field in the space. This special garbage section
70// is not marked with a free space map in the data. The point of this section
71// is to enable linear allocation without having to constantly update the byte
72// array every time the top field is updated and a new object is created. The
73// special garbage section is not in the chain of garbage sections.
74//
75// Since the top and limit fields are in the space, not the page, only one page
76// has a special garbage section, and if the top and limit are equal then there
77// is no special garbage section.
Steve Blocka7e24c12009-10-30 11:49:00 +000078
79// Some assertion macros used in the debugging mode.
80
Ben Murdochb8a8cc12014-11-26 15:28:44 +000081#define DCHECK_PAGE_ALIGNED(address) \
82 DCHECK((OffsetFrom(address) & Page::kPageAlignmentMask) == 0)
Steve Blocka7e24c12009-10-30 11:49:00 +000083
Ben Murdochb8a8cc12014-11-26 15:28:44 +000084#define DCHECK_OBJECT_ALIGNED(address) \
85 DCHECK((OffsetFrom(address) & kObjectAlignmentMask) == 0)
Steve Blocka7e24c12009-10-30 11:49:00 +000086
Ben Murdochb8a8cc12014-11-26 15:28:44 +000087#define DCHECK_OBJECT_SIZE(size) \
88 DCHECK((0 < size) && (size <= Page::kMaxRegularHeapObjectSize))
Leon Clarkee46be812010-01-19 14:06:41 +000089
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000090#define DCHECK_CODEOBJECT_SIZE(size, code_space) \
91 DCHECK((0 < size) && (size <= code_space->AreaSize()))
92
Ben Murdochb8a8cc12014-11-26 15:28:44 +000093#define DCHECK_PAGE_OFFSET(offset) \
94 DCHECK((Page::kObjectStartOffset <= offset) && (offset <= Page::kPageSize))
Steve Blocka7e24c12009-10-30 11:49:00 +000095
Ben Murdochb8a8cc12014-11-26 15:28:44 +000096#define DCHECK_MAP_PAGE_INDEX(index) \
97 DCHECK((0 <= index) && (index <= MapSpace::kMaxMapPageIndex))
Steve Blocka7e24c12009-10-30 11:49:00 +000098
Steve Blocka7e24c12009-10-30 11:49:00 +000099class AllocationInfo;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000100class CompactionSpace;
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100101class FreeList;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000102class MemoryAllocator;
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100103class MemoryChunk;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000104class PagedSpace;
105class Space;
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100106
107class MarkBit {
108 public:
109 typedef uint32_t CellType;
110
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000111 inline MarkBit(CellType* cell, CellType mask) : cell_(cell), mask_(mask) {}
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100112
113#ifdef DEBUG
114 bool operator==(const MarkBit& other) {
115 return cell_ == other.cell_ && mask_ == other.mask_;
116 }
117#endif
118
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000119 private:
120 inline CellType* cell() { return cell_; }
121 inline CellType mask() { return mask_; }
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100122
123 inline MarkBit Next() {
124 CellType new_mask = mask_ << 1;
125 if (new_mask == 0) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000126 return MarkBit(cell_ + 1, 1);
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100127 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000128 return MarkBit(cell_, new_mask);
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100129 }
130 }
131
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000132 inline void Set() { *cell_ |= mask_; }
133 inline bool Get() { return (*cell_ & mask_) != 0; }
134 inline void Clear() { *cell_ &= ~mask_; }
135
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100136 CellType* cell_;
137 CellType mask_;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000138
139 friend class Marking;
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100140};
141
142
143// Bitmap is a sequence of cells each containing fixed number of bits.
144class Bitmap {
145 public:
146 static const uint32_t kBitsPerCell = 32;
147 static const uint32_t kBitsPerCellLog2 = 5;
148 static const uint32_t kBitIndexMask = kBitsPerCell - 1;
149 static const uint32_t kBytesPerCell = kBitsPerCell / kBitsPerByte;
150 static const uint32_t kBytesPerCellLog2 = kBitsPerCellLog2 - kBitsPerByteLog2;
151
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000152 static const size_t kLength = (1 << kPageSizeBits) >> (kPointerSizeLog2);
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100153
154 static const size_t kSize =
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000155 (1 << kPageSizeBits) >> (kPointerSizeLog2 + kBitsPerByteLog2);
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100156
157
158 static int CellsForLength(int length) {
159 return (length + kBitsPerCell - 1) >> kBitsPerCellLog2;
160 }
161
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000162 int CellsCount() { return CellsForLength(kLength); }
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100163
164 static int SizeFor(int cells_count) {
165 return sizeof(MarkBit::CellType) * cells_count;
166 }
167
168 INLINE(static uint32_t IndexToCell(uint32_t index)) {
169 return index >> kBitsPerCellLog2;
170 }
171
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000172 V8_INLINE static uint32_t IndexInCell(uint32_t index) {
173 return index & kBitIndexMask;
174 }
175
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100176 INLINE(static uint32_t CellToIndex(uint32_t index)) {
177 return index << kBitsPerCellLog2;
178 }
179
180 INLINE(static uint32_t CellAlignIndex(uint32_t index)) {
181 return (index + kBitIndexMask) & ~kBitIndexMask;
182 }
183
184 INLINE(MarkBit::CellType* cells()) {
185 return reinterpret_cast<MarkBit::CellType*>(this);
186 }
187
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000188 INLINE(Address address()) { return reinterpret_cast<Address>(this); }
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100189
190 INLINE(static Bitmap* FromAddress(Address addr)) {
191 return reinterpret_cast<Bitmap*>(addr);
192 }
193
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000194 inline MarkBit MarkBitFromIndex(uint32_t index) {
195 MarkBit::CellType mask = 1u << IndexInCell(index);
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100196 MarkBit::CellType* cell = this->cells() + (index >> kBitsPerCellLog2);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000197 return MarkBit(cell, mask);
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100198 }
199
200 static inline void Clear(MemoryChunk* chunk);
201
202 static void PrintWord(uint32_t word, uint32_t himask = 0) {
203 for (uint32_t mask = 1; mask != 0; mask <<= 1) {
204 if ((mask & himask) != 0) PrintF("[");
205 PrintF((mask & word) ? "1" : "0");
206 if ((mask & himask) != 0) PrintF("]");
207 }
208 }
209
210 class CellPrinter {
211 public:
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000212 CellPrinter() : seq_start(0), seq_type(0), seq_length(0) {}
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100213
214 void Print(uint32_t pos, uint32_t cell) {
215 if (cell == seq_type) {
216 seq_length++;
217 return;
218 }
219
220 Flush();
221
222 if (IsSeq(cell)) {
223 seq_start = pos;
224 seq_length = 0;
225 seq_type = cell;
226 return;
227 }
228
229 PrintF("%d: ", pos);
230 PrintWord(cell);
231 PrintF("\n");
232 }
233
234 void Flush() {
235 if (seq_length > 0) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000236 PrintF("%d: %dx%d\n", seq_start, seq_type == 0 ? 0 : 1,
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100237 seq_length * kBitsPerCell);
238 seq_length = 0;
239 }
240 }
241
242 static bool IsSeq(uint32_t cell) { return cell == 0 || cell == 0xFFFFFFFF; }
243
244 private:
245 uint32_t seq_start;
246 uint32_t seq_type;
247 uint32_t seq_length;
248 };
249
250 void Print() {
251 CellPrinter printer;
252 for (int i = 0; i < CellsCount(); i++) {
253 printer.Print(i, cells()[i]);
254 }
255 printer.Flush();
256 PrintF("\n");
257 }
258
259 bool IsClean() {
260 for (int i = 0; i < CellsCount(); i++) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000261 if (cells()[i] != 0) {
262 return false;
263 }
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100264 }
265 return true;
266 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000267
268 // Clears all bits starting from {cell_base_index} up to and excluding
269 // {index}. Note that {cell_base_index} is required to be cell aligned.
270 void ClearRange(uint32_t cell_base_index, uint32_t index) {
271 DCHECK_EQ(IndexInCell(cell_base_index), 0u);
272 DCHECK_GE(index, cell_base_index);
273 uint32_t start_cell_index = IndexToCell(cell_base_index);
274 uint32_t end_cell_index = IndexToCell(index);
275 DCHECK_GE(end_cell_index, start_cell_index);
276 // Clear all cells till the cell containing the last index.
277 for (uint32_t i = start_cell_index; i < end_cell_index; i++) {
278 cells()[i] = 0;
279 }
280 // Clear all bits in the last cell till the last bit before index.
281 uint32_t clear_mask = ~((1u << IndexInCell(index)) - 1);
282 cells()[end_cell_index] &= clear_mask;
283 }
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100284};
285
286
287class SkipList;
288class SlotsBuffer;
289
290// MemoryChunk represents a memory region owned by a specific space.
291// It is divided into the header and the body. Chunk start is always
292// 1MB aligned. Start of the body is aligned so it can accommodate
293// any heap object.
294class MemoryChunk {
295 public:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000296 enum MemoryChunkFlags {
297 IS_EXECUTABLE,
298 ABOUT_TO_BE_FREED,
299 POINTERS_TO_HERE_ARE_INTERESTING,
300 POINTERS_FROM_HERE_ARE_INTERESTING,
301 SCAN_ON_SCAVENGE,
302 IN_FROM_SPACE, // Mutually exclusive with IN_TO_SPACE.
303 IN_TO_SPACE, // All pages in new space has one of these two set.
304 NEW_SPACE_BELOW_AGE_MARK,
305 EVACUATION_CANDIDATE,
306 RESCAN_ON_EVACUATION,
307 NEVER_EVACUATE, // May contain immortal immutables.
308 POPULAR_PAGE, // Slots buffer of this page overflowed on the previous GC.
309
310 // WAS_SWEPT indicates that marking bits have been cleared by the sweeper,
311 // otherwise marking bits are still intact.
312 WAS_SWEPT,
313
314 // Large objects can have a progress bar in their page header. These object
315 // are scanned in increments and will be kept black while being scanned.
316 // Even if the mutator writes to them they will be kept black and a white
317 // to grey transition is performed in the value.
318 HAS_PROGRESS_BAR,
319
320 // This flag is intended to be used for testing. Works only when both
321 // FLAG_stress_compaction and FLAG_manual_evacuation_candidates_selection
322 // are set. It forces the page to become an evacuation candidate at next
323 // candidates selection cycle.
324 FORCE_EVACUATION_CANDIDATE_FOR_TESTING,
325
326 // This flag is inteded to be used for testing.
327 NEVER_ALLOCATE_ON_PAGE,
328
329 // The memory chunk is already logically freed, however the actual freeing
330 // still has to be performed.
331 PRE_FREED,
332
333 // |COMPACTION_WAS_ABORTED|: Indicates that the compaction in this page
334 // has been aborted and needs special handling by the sweeper.
335 COMPACTION_WAS_ABORTED,
336
337 // Last flag, keep at bottom.
338 NUM_MEMORY_CHUNK_FLAGS
339 };
340
341 // |kCompactionDone|: Initial compaction state of a |MemoryChunk|.
342 // |kCompactingInProgress|: Parallel compaction is currently in progress.
343 // |kCompactingFinalize|: Parallel compaction is done but the chunk needs to
344 // be finalized.
345 // |kCompactingAborted|: Parallel compaction has been aborted, which should
346 // for now only happen in OOM scenarios.
347 enum ParallelCompactingState {
348 kCompactingDone,
349 kCompactingInProgress,
350 kCompactingFinalize,
351 kCompactingAborted,
352 };
353
354 // |kSweepingDone|: The page state when sweeping is complete or sweeping must
355 // not be performed on that page.
356 // |kSweepingFinalize|: A sweeper thread is done sweeping this page and will
357 // not touch the page memory anymore.
358 // |kSweepingInProgress|: This page is currently swept by a sweeper thread.
359 // |kSweepingPending|: This page is ready for parallel sweeping.
360 enum ParallelSweepingState {
361 kSweepingDone,
362 kSweepingFinalize,
363 kSweepingInProgress,
364 kSweepingPending
365 };
366
367 // Every n write barrier invocations we go to runtime even though
368 // we could have handled it in generated code. This lets us check
369 // whether we have hit the limit and should do some more marking.
370 static const int kWriteBarrierCounterGranularity = 500;
371
372 static const int kPointersToHereAreInterestingMask =
373 1 << POINTERS_TO_HERE_ARE_INTERESTING;
374
375 static const int kPointersFromHereAreInterestingMask =
376 1 << POINTERS_FROM_HERE_ARE_INTERESTING;
377
378 static const int kEvacuationCandidateMask = 1 << EVACUATION_CANDIDATE;
379
380 static const int kSkipEvacuationSlotsRecordingMask =
381 (1 << EVACUATION_CANDIDATE) | (1 << RESCAN_ON_EVACUATION) |
382 (1 << IN_FROM_SPACE) | (1 << IN_TO_SPACE);
383
384 static const intptr_t kAlignment =
385 (static_cast<uintptr_t>(1) << kPageSizeBits);
386
387 static const intptr_t kAlignmentMask = kAlignment - 1;
388
389 static const intptr_t kSizeOffset = 0;
390
391 static const intptr_t kLiveBytesOffset =
392 kSizeOffset + kPointerSize // size_t size
393 + kIntptrSize // intptr_t flags_
394 + kPointerSize // Address area_start_
395 + kPointerSize // Address area_end_
396 + 2 * kPointerSize // base::VirtualMemory reservation_
397 + kPointerSize // Address owner_
398 + kPointerSize // Heap* heap_
399 + kIntSize; // int store_buffer_counter_
400
401 static const size_t kSlotsBufferOffset =
402 kLiveBytesOffset + kIntSize; // int live_byte_count_
403
404 static const size_t kWriteBarrierCounterOffset =
405 kSlotsBufferOffset + kPointerSize // SlotsBuffer* slots_buffer_;
406 + kPointerSize; // SkipList* skip_list_;
407
408 static const size_t kMinHeaderSize =
409 kWriteBarrierCounterOffset +
410 kIntptrSize // intptr_t write_barrier_counter_
411 + kIntSize // int progress_bar_
412 + kPointerSize // AtomicValue high_water_mark_
413 + kPointerSize // base::Mutex* mutex_
414 + kPointerSize // base::AtomicWord parallel_sweeping_
415 + kPointerSize // AtomicValue parallel_compaction_
416 + 5 * kPointerSize // AtomicNumber free-list statistics
417 + kPointerSize // AtomicValue next_chunk_
418 + kPointerSize; // AtomicValue prev_chunk_
419
420 // We add some more space to the computed header size to amount for missing
421 // alignment requirements in our computation.
422 // Try to get kHeaderSize properly aligned on 32-bit and 64-bit machines.
423 static const size_t kHeaderSize = kMinHeaderSize + kIntSize;
424
425 static const int kBodyOffset =
426 CODE_POINTER_ALIGN(kHeaderSize + Bitmap::kSize);
427
428 // The start offset of the object area in a page. Aligned to both maps and
429 // code alignment to be suitable for both. Also aligned to 32 words because
430 // the marking bitmap is arranged in 32 bit chunks.
431 static const int kObjectStartAlignment = 32 * kPointerSize;
432 static const int kObjectStartOffset =
433 kBodyOffset - 1 +
434 (kObjectStartAlignment - (kBodyOffset - 1) % kObjectStartAlignment);
435
436 static const int kFlagsOffset = kPointerSize;
437
438 static void IncrementLiveBytesFromMutator(HeapObject* object, int by);
439
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100440 // Only works if the pointer is in the first kPageSize of the MemoryChunk.
441 static MemoryChunk* FromAddress(Address a) {
442 return reinterpret_cast<MemoryChunk*>(OffsetFrom(a) & ~kAlignmentMask);
443 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000444
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000445 static const MemoryChunk* FromAddress(const byte* a) {
446 return reinterpret_cast<const MemoryChunk*>(OffsetFrom(a) &
447 ~kAlignmentMask);
448 }
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100449
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000450 static void IncrementLiveBytesFromGC(HeapObject* object, int by) {
451 MemoryChunk::FromAddress(object->address())->IncrementLiveBytes(by);
452 }
453
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100454 // Only works for addresses in pointer spaces, not data or code spaces.
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000455 static inline MemoryChunk* FromAnyPointerAddress(Heap* heap, Address addr);
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100456
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000457 static inline uint32_t FastAddressToMarkbitIndex(Address addr) {
458 const intptr_t offset = reinterpret_cast<intptr_t>(addr) & kAlignmentMask;
459 return static_cast<uint32_t>(offset) >> kPointerSizeLog2;
460 }
461
462 static inline void UpdateHighWaterMark(Address mark) {
463 if (mark == nullptr) return;
464 // Need to subtract one from the mark because when a chunk is full the
465 // top points to the next address after the chunk, which effectively belongs
466 // to another chunk. See the comment to Page::FromAllocationTop.
467 MemoryChunk* chunk = MemoryChunk::FromAddress(mark - 1);
468 intptr_t new_mark = static_cast<intptr_t>(mark - chunk->address());
469 intptr_t old_mark = 0;
470 do {
471 old_mark = chunk->high_water_mark_.Value();
472 } while ((new_mark > old_mark) &&
473 !chunk->high_water_mark_.TrySetValue(old_mark, new_mark));
474 }
475
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100476 Address address() { return reinterpret_cast<Address>(this); }
477
478 bool is_valid() { return address() != NULL; }
479
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000480 MemoryChunk* next_chunk() { return next_chunk_.Value(); }
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100481
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000482 MemoryChunk* prev_chunk() { return prev_chunk_.Value(); }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000483
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000484 void set_next_chunk(MemoryChunk* next) { next_chunk_.SetValue(next); }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000485
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000486 void set_prev_chunk(MemoryChunk* prev) { prev_chunk_.SetValue(prev); }
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100487
488 Space* owner() const {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000489 if ((reinterpret_cast<intptr_t>(owner_) & kPageHeaderTagMask) ==
490 kPageHeaderTag) {
491 return reinterpret_cast<Space*>(reinterpret_cast<intptr_t>(owner_) -
492 kPageHeaderTag);
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100493 } else {
494 return NULL;
495 }
496 }
497
498 void set_owner(Space* space) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000499 DCHECK((reinterpret_cast<intptr_t>(space) & kPageHeaderTagMask) == 0);
500 owner_ = reinterpret_cast<Address>(space) + kPageHeaderTag;
501 DCHECK((reinterpret_cast<intptr_t>(owner_) & kPageHeaderTagMask) ==
502 kPageHeaderTag);
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100503 }
504
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000505 base::VirtualMemory* reserved_memory() { return &reservation_; }
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100506
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000507 void set_reserved_memory(base::VirtualMemory* reservation) {
508 DCHECK_NOT_NULL(reservation);
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100509 reservation_.TakeControl(reservation);
510 }
511
512 bool scan_on_scavenge() { return IsFlagSet(SCAN_ON_SCAVENGE); }
513 void initialize_scan_on_scavenge(bool scan) {
514 if (scan) {
515 SetFlag(SCAN_ON_SCAVENGE);
516 } else {
517 ClearFlag(SCAN_ON_SCAVENGE);
518 }
519 }
520 inline void set_scan_on_scavenge(bool scan);
521
522 int store_buffer_counter() { return store_buffer_counter_; }
523 void set_store_buffer_counter(int counter) {
524 store_buffer_counter_ = counter;
525 }
526
527 bool Contains(Address addr) {
528 return addr >= area_start() && addr < area_end();
529 }
530
531 // Checks whether addr can be a limit of addresses in this page.
532 // It's a limit if it's in the page, or if it's just after the
533 // last byte of the page.
534 bool ContainsLimit(Address addr) {
535 return addr >= area_start() && addr <= area_end();
536 }
537
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000538 void SetFlag(int flag) { flags_ |= static_cast<uintptr_t>(1) << flag; }
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100539
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000540 void ClearFlag(int flag) { flags_ &= ~(static_cast<uintptr_t>(1) << flag); }
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100541
542 void SetFlagTo(int flag, bool value) {
543 if (value) {
544 SetFlag(flag);
545 } else {
546 ClearFlag(flag);
547 }
548 }
549
550 bool IsFlagSet(int flag) {
551 return (flags_ & (static_cast<uintptr_t>(1) << flag)) != 0;
552 }
553
554 // Set or clear multiple flags at a time. The flags in the mask
555 // are set to the value in "flags", the rest retain the current value
556 // in flags_.
557 void SetFlags(intptr_t flags, intptr_t mask) {
558 flags_ = (flags_ & ~mask) | (flags & mask);
559 }
560
561 // Return all current flags.
562 intptr_t GetFlags() { return flags_; }
563
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000564 AtomicValue<ParallelSweepingState>& parallel_sweeping_state() {
565 return parallel_sweeping_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000566 }
567
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000568 AtomicValue<ParallelCompactingState>& parallel_compaction_state() {
569 return parallel_compaction_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000570 }
571
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000572 bool TryLock() { return mutex_->TryLock(); }
573
574 base::Mutex* mutex() { return mutex_; }
575
576 // WaitUntilSweepingCompleted only works when concurrent sweeping is in
577 // progress. In particular, when we know that right before this call a
578 // sweeper thread was sweeping this page.
579 void WaitUntilSweepingCompleted() {
580 mutex_->Lock();
581 mutex_->Unlock();
582 DCHECK(SweepingCompleted());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000583 }
584
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000585 bool SweepingCompleted() {
586 return parallel_sweeping_state().Value() <= kSweepingFinalize;
587 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000588
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100589 // Manage live byte count (count of bytes known to be live,
590 // because they are marked black).
591 void ResetLiveBytes() {
592 if (FLAG_gc_verbose) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000593 PrintF("ResetLiveBytes:%p:%x->0\n", static_cast<void*>(this),
594 live_byte_count_);
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100595 }
596 live_byte_count_ = 0;
597 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000598
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100599 void IncrementLiveBytes(int by) {
600 if (FLAG_gc_verbose) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000601 printf("UpdateLiveBytes:%p:%x%c=%x->%x\n", static_cast<void*>(this),
602 live_byte_count_, ((by < 0) ? '-' : '+'), ((by < 0) ? -by : by),
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100603 live_byte_count_ + by);
604 }
605 live_byte_count_ += by;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000606 DCHECK_GE(live_byte_count_, 0);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000607 DCHECK_LE(static_cast<unsigned>(live_byte_count_), size_);
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100608 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000609
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100610 int LiveBytes() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000611 DCHECK_LE(static_cast<unsigned>(live_byte_count_), size_);
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100612 return live_byte_count_;
613 }
614
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000615 void SetLiveBytes(int live_bytes) {
616 DCHECK_GE(live_bytes, 0);
617 DCHECK_LE(static_cast<unsigned>(live_bytes), size_);
618 live_byte_count_ = live_bytes;
619 }
620
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000621 int write_barrier_counter() {
622 return static_cast<int>(write_barrier_counter_);
623 }
624
625 void set_write_barrier_counter(int counter) {
626 write_barrier_counter_ = counter;
627 }
628
629 int progress_bar() {
630 DCHECK(IsFlagSet(HAS_PROGRESS_BAR));
631 return progress_bar_;
632 }
633
634 void set_progress_bar(int progress_bar) {
635 DCHECK(IsFlagSet(HAS_PROGRESS_BAR));
636 progress_bar_ = progress_bar;
637 }
638
639 void ResetProgressBar() {
640 if (IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) {
641 set_progress_bar(0);
642 ClearFlag(MemoryChunk::HAS_PROGRESS_BAR);
643 }
644 }
645
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100646 size_t size() const { return size_; }
647
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000648 void set_size(size_t size) { size_ = size; }
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100649
650 void SetArea(Address area_start, Address area_end) {
651 area_start_ = area_start;
652 area_end_ = area_end;
653 }
654
655 Executability executable() {
656 return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
657 }
658
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100659 bool InNewSpace() {
660 return (flags_ & ((1 << IN_FROM_SPACE) | (1 << IN_TO_SPACE))) != 0;
661 }
662
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000663 bool InToSpace() { return IsFlagSet(IN_TO_SPACE); }
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100664
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000665 bool InFromSpace() { return IsFlagSet(IN_FROM_SPACE); }
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100666
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100667 // Markbits support
668
669 inline Bitmap* markbits() {
670 return Bitmap::FromAddress(address() + kHeaderSize);
671 }
672
673 void PrintMarkbits() { markbits()->Print(); }
674
675 inline uint32_t AddressToMarkbitIndex(Address addr) {
676 return static_cast<uint32_t>(addr - this->address()) >> kPointerSizeLog2;
677 }
678
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100679 inline Address MarkbitIndexToAddress(uint32_t index) {
680 return this->address() + (index << kPointerSizeLog2);
681 }
682
683 void InsertAfter(MemoryChunk* other);
684 void Unlink();
685
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000686 inline Heap* heap() const { return heap_; }
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100687
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000688 bool NeverEvacuate() { return IsFlagSet(NEVER_EVACUATE); }
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100689
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000690 void MarkNeverEvacuate() { SetFlag(NEVER_EVACUATE); }
691
692 bool IsEvacuationCandidate() {
693 DCHECK(!(IsFlagSet(NEVER_EVACUATE) && IsFlagSet(EVACUATION_CANDIDATE)));
694 return IsFlagSet(EVACUATION_CANDIDATE);
695 }
696
697 bool CanAllocate() {
698 return !IsEvacuationCandidate() && !IsFlagSet(NEVER_ALLOCATE_ON_PAGE);
699 }
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100700
701 bool ShouldSkipEvacuationSlotRecording() {
702 return (flags_ & kSkipEvacuationSlotsRecordingMask) != 0;
703 }
704
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000705 inline SkipList* skip_list() { return skip_list_; }
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100706
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000707 inline void set_skip_list(SkipList* skip_list) { skip_list_ = skip_list; }
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100708
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000709 inline SlotsBuffer* slots_buffer() { return slots_buffer_; }
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100710
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000711 inline SlotsBuffer** slots_buffer_address() { return &slots_buffer_; }
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100712
713 void MarkEvacuationCandidate() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000714 DCHECK(!IsFlagSet(NEVER_EVACUATE));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000715 DCHECK(slots_buffer_ == NULL);
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100716 SetFlag(EVACUATION_CANDIDATE);
717 }
718
719 void ClearEvacuationCandidate() {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000720 DCHECK(slots_buffer_ == NULL);
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100721 ClearFlag(EVACUATION_CANDIDATE);
722 }
723
724 Address area_start() { return area_start_; }
725 Address area_end() { return area_end_; }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000726 int area_size() { return static_cast<int>(area_end() - area_start()); }
727 bool CommitArea(size_t requested);
728
729 // Approximate amount of physical memory committed for this chunk.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000730 size_t CommittedPhysicalMemory() { return high_water_mark_.Value(); }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000731
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000732 // Should be called when memory chunk is about to be freed.
733 void ReleaseAllocatedMemory();
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100734
735 protected:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000736 static MemoryChunk* Initialize(Heap* heap, Address base, size_t size,
737 Address area_start, Address area_end,
738 Executability executable, Space* owner);
739
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100740 size_t size_;
741 intptr_t flags_;
742
743 // Start and end of allocatable memory on this chunk.
744 Address area_start_;
745 Address area_end_;
746
747 // If the chunk needs to remember its memory reservation, it is stored here.
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000748 base::VirtualMemory reservation_;
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100749 // The identity of the owning space. This is tagged as a failure pointer, but
750 // no failure can be in an object, so this can be distinguished from any entry
751 // in a fixed array.
752 Address owner_;
753 Heap* heap_;
754 // Used by the store buffer to keep track of which pages to mark scan-on-
755 // scavenge.
756 int store_buffer_counter_;
757 // Count of bytes marked black on page.
758 int live_byte_count_;
759 SlotsBuffer* slots_buffer_;
760 SkipList* skip_list_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000761 intptr_t write_barrier_counter_;
762 // Used by the incremental marker to keep track of the scanning progress in
763 // large objects that have a progress bar and are scanned in increments.
764 int progress_bar_;
765 // Assuming the initial allocation on a page is sequential,
766 // count highest number of bytes ever allocated on the page.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000767 AtomicValue<intptr_t> high_water_mark_;
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100768
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000769 base::Mutex* mutex_;
770 AtomicValue<ParallelSweepingState> parallel_sweeping_;
771 AtomicValue<ParallelCompactingState> parallel_compaction_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000772
773 // PagedSpace free-list statistics.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000774 AtomicNumber<intptr_t> available_in_small_free_list_;
775 AtomicNumber<intptr_t> available_in_medium_free_list_;
776 AtomicNumber<intptr_t> available_in_large_free_list_;
777 AtomicNumber<intptr_t> available_in_huge_free_list_;
778 AtomicNumber<intptr_t> non_available_small_blocks_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000779
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000780 // next_chunk_ holds a pointer of type MemoryChunk
781 AtomicValue<MemoryChunk*> next_chunk_;
782 // prev_chunk_ holds a pointer of type MemoryChunk
783 AtomicValue<MemoryChunk*> prev_chunk_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000784
785 private:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000786 void InitializeReservedMemory() { reservation_.Reset(); }
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100787
788 friend class MemoryAllocator;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000789 friend class MemoryChunkValidator;
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100790};
791
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000792
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000793enum FreeListCategoryType { kSmall, kMedium, kLarge, kHuge };
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000794
Steve Blocka7e24c12009-10-30 11:49:00 +0000795
796// -----------------------------------------------------------------------------
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100797// A page is a memory chunk of a size 1MB. Large object pages may be larger.
Steve Blocka7e24c12009-10-30 11:49:00 +0000798//
799// The only way to get a page pointer is by calling factory methods:
800// Page* p = Page::FromAddress(addr); or
801// Page* p = Page::FromAllocationTop(top);
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100802class Page : public MemoryChunk {
Steve Blocka7e24c12009-10-30 11:49:00 +0000803 public:
804 // Returns the page containing a given address. The address ranges
805 // from [page_addr .. page_addr + kPageSize[
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100806 // This only works if the object is in fact in a page. See also MemoryChunk::
807 // FromAddress() and FromAnyAddress().
Steve Blocka7e24c12009-10-30 11:49:00 +0000808 INLINE(static Page* FromAddress(Address a)) {
809 return reinterpret_cast<Page*>(OffsetFrom(a) & ~kPageAlignmentMask);
810 }
811
812 // Returns the page containing an allocation top. Because an allocation
813 // top address can be the upper bound of the page, we need to subtract
814 // it with kPointerSize first. The address ranges from
815 // [page_addr + kObjectStartOffset .. page_addr + kPageSize].
816 INLINE(static Page* FromAllocationTop(Address top)) {
817 Page* p = FromAddress(top - kPointerSize);
Steve Blocka7e24c12009-10-30 11:49:00 +0000818 return p;
819 }
820
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100821 // Returns the next page in the chain of pages owned by a space.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000822 inline Page* next_page() {
823 DCHECK(next_chunk()->owner() == owner());
824 return static_cast<Page*>(next_chunk());
825 }
826 inline Page* prev_page() {
827 DCHECK(prev_chunk()->owner() == owner());
828 return static_cast<Page*>(prev_chunk());
829 }
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100830 inline void set_next_page(Page* page);
831 inline void set_prev_page(Page* page);
Steve Blocka7e24c12009-10-30 11:49:00 +0000832
Steve Blocka7e24c12009-10-30 11:49:00 +0000833 // Checks whether an address is page aligned.
834 static bool IsAlignedToPageSize(Address a) {
835 return 0 == (OffsetFrom(a) & kPageAlignmentMask);
836 }
837
Steve Blocka7e24c12009-10-30 11:49:00 +0000838 // Returns the offset of a given address to this page.
839 INLINE(int Offset(Address a)) {
Steve Blockd0582a62009-12-15 09:54:21 +0000840 int offset = static_cast<int>(a - address());
Steve Blocka7e24c12009-10-30 11:49:00 +0000841 return offset;
842 }
843
844 // Returns the address for a given offset to the this page.
845 Address OffsetToAddress(int offset) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000846 DCHECK_PAGE_OFFSET(offset);
Steve Blocka7e24c12009-10-30 11:49:00 +0000847 return address() + offset;
848 }
849
850 // ---------------------------------------------------------------------
Steve Blocka7e24c12009-10-30 11:49:00 +0000851
Steve Blocka7e24c12009-10-30 11:49:00 +0000852 // Page size in bytes. This must be a multiple of the OS page size.
853 static const int kPageSize = 1 << kPageSizeBits;
854
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000855 // Maximum object size that gets allocated into regular pages. Objects larger
856 // than that size are allocated in large object space and are never moved in
857 // memory. This also applies to new space allocation, since objects are never
858 // migrated from new space to large object space. Takes double alignment into
859 // account.
860 // TODO(hpayer): This limit should be way smaller but we currently have
861 // short living objects >256K.
862 static const int kMaxRegularHeapObjectSize = 600 * KB;
863
864 static const int kAllocatableMemory = kPageSize - kObjectStartOffset;
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100865
Steve Blocka7e24c12009-10-30 11:49:00 +0000866 // Page size mask.
867 static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;
868
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100869 inline void ClearGCFields();
870
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000871 static inline Page* Initialize(Heap* heap, MemoryChunk* chunk,
872 Executability executable, PagedSpace* owner);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100873
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100874 void InitializeAsAnchor(PagedSpace* owner);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100875
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000876 bool WasSwept() { return IsFlagSet(WAS_SWEPT); }
877 void SetWasSwept() { SetFlag(WAS_SWEPT); }
878 void ClearWasSwept() { ClearFlag(WAS_SWEPT); }
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100879
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000880 void ResetFreeListStatistics();
Steve Blocka7e24c12009-10-30 11:49:00 +0000881
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000882 int LiveBytesFromFreeList() {
883 return static_cast<int>(
884 area_size() - non_available_small_blocks() -
885 available_in_small_free_list() - available_in_medium_free_list() -
886 available_in_large_free_list() - available_in_huge_free_list());
887 }
888
889#define FRAGMENTATION_STATS_ACCESSORS(type, name) \
890 type name() { return name##_.Value(); } \
891 void set_##name(type name) { name##_.SetValue(name); } \
892 void add_##name(type name) { name##_.Increment(name); }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000893
894 FRAGMENTATION_STATS_ACCESSORS(intptr_t, non_available_small_blocks)
895 FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_small_free_list)
896 FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_medium_free_list)
897 FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_large_free_list)
898 FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_huge_free_list)
899
900#undef FRAGMENTATION_STATS_ACCESSORS
Steve Blocka7e24c12009-10-30 11:49:00 +0000901
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000902 void add_available_in_free_list(FreeListCategoryType type, intptr_t bytes) {
903 switch (type) {
904 case kSmall:
905 add_available_in_small_free_list(bytes);
906 break;
907 case kMedium:
908 add_available_in_medium_free_list(bytes);
909 break;
910 case kLarge:
911 add_available_in_large_free_list(bytes);
912 break;
913 case kHuge:
914 add_available_in_huge_free_list(bytes);
915 break;
916 default:
917 UNREACHABLE();
918 }
919 }
920
921 intptr_t available_in_free_list(FreeListCategoryType type) {
922 switch (type) {
923 case kSmall:
924 return available_in_small_free_list();
925 case kMedium:
926 return available_in_medium_free_list();
927 case kLarge:
928 return available_in_large_free_list();
929 case kHuge:
930 return available_in_huge_free_list();
931 default:
932 UNREACHABLE();
933 }
934 UNREACHABLE();
935 return 0;
936 }
937
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100938#ifdef DEBUG
939 void Print();
940#endif // DEBUG
Steve Blocka7e24c12009-10-30 11:49:00 +0000941
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100942 friend class MemoryAllocator;
Steve Blocka7e24c12009-10-30 11:49:00 +0000943};
944
945
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100946class LargePage : public MemoryChunk {
947 public:
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000948 HeapObject* GetObject() { return HeapObject::FromAddress(area_start()); }
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100949
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000950 inline LargePage* next_page() {
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100951 return static_cast<LargePage*>(next_chunk());
952 }
953
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000954 inline void set_next_page(LargePage* page) { set_next_chunk(page); }
955
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100956 private:
957 static inline LargePage* Initialize(Heap* heap, MemoryChunk* chunk);
958
959 friend class MemoryAllocator;
960};
961
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100962
Steve Blocka7e24c12009-10-30 11:49:00 +0000963// ----------------------------------------------------------------------------
964// Space is the abstract superclass for all allocation spaces.
965class Space : public Malloced {
966 public:
Steve Block44f0eee2011-05-26 01:26:41 +0100967 Space(Heap* heap, AllocationSpace id, Executability executable)
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000968 : heap_(heap),
969 id_(id),
970 executable_(executable),
971 committed_(0),
972 max_committed_(0) {}
Steve Blocka7e24c12009-10-30 11:49:00 +0000973
974 virtual ~Space() {}
975
Steve Block44f0eee2011-05-26 01:26:41 +0100976 Heap* heap() const { return heap_; }
977
Steve Blocka7e24c12009-10-30 11:49:00 +0000978 // Does the space need executable memory?
979 Executability executable() { return executable_; }
980
981 // Identity used in error reporting.
982 AllocationSpace identity() { return id_; }
983
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000984 // Return the total amount committed memory for this space, i.e., allocatable
985 // memory and page headers.
986 virtual intptr_t CommittedMemory() { return committed_; }
987
988 virtual intptr_t MaximumCommittedMemory() { return max_committed_; }
989
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -0800990 // Returns allocated size.
Ben Murdochf87a2032010-10-22 12:50:53 +0100991 virtual intptr_t Size() = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000992
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -0800993 // Returns size of objects. Can differ from the allocated size
994 // (e.g. see LargeObjectSpace).
995 virtual intptr_t SizeOfObjects() { return Size(); }
996
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000997 // Approximate amount of physical memory committed for this space.
998 virtual size_t CommittedPhysicalMemory() = 0;
999
1000 // Return the available bytes without growing.
1001 virtual intptr_t Available() = 0;
1002
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001003 virtual int RoundSizeDownToObjectAlignment(int size) {
1004 if (id_ == CODE_SPACE) {
1005 return RoundDown(size, kCodeAlignment);
1006 } else {
1007 return RoundDown(size, kPointerSize);
1008 }
1009 }
1010
Steve Blocka7e24c12009-10-30 11:49:00 +00001011#ifdef DEBUG
1012 virtual void Print() = 0;
1013#endif
1014
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001015 protected:
1016 void AccountCommitted(intptr_t bytes) {
1017 DCHECK_GE(bytes, 0);
1018 committed_ += bytes;
1019 if (committed_ > max_committed_) {
1020 max_committed_ = committed_;
1021 }
1022 }
1023
1024 void AccountUncommitted(intptr_t bytes) {
1025 DCHECK_GE(bytes, 0);
1026 committed_ -= bytes;
1027 DCHECK_GE(committed_, 0);
1028 }
1029
Steve Blocka7e24c12009-10-30 11:49:00 +00001030 private:
Steve Block44f0eee2011-05-26 01:26:41 +01001031 Heap* heap_;
Steve Blocka7e24c12009-10-30 11:49:00 +00001032 AllocationSpace id_;
1033 Executability executable_;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001034
1035 // Keeps track of committed memory in a space.
1036 intptr_t committed_;
1037 intptr_t max_committed_;
1038};
1039
1040
1041class MemoryChunkValidator {
1042 // Computed offsets should match the compiler generated ones.
1043 STATIC_ASSERT(MemoryChunk::kSizeOffset == offsetof(MemoryChunk, size_));
1044 STATIC_ASSERT(MemoryChunk::kLiveBytesOffset ==
1045 offsetof(MemoryChunk, live_byte_count_));
1046 STATIC_ASSERT(MemoryChunk::kSlotsBufferOffset ==
1047 offsetof(MemoryChunk, slots_buffer_));
1048 STATIC_ASSERT(MemoryChunk::kWriteBarrierCounterOffset ==
1049 offsetof(MemoryChunk, write_barrier_counter_));
1050
1051 // Validate our estimates on the header size.
1052 STATIC_ASSERT(sizeof(MemoryChunk) <= MemoryChunk::kHeaderSize);
1053 STATIC_ASSERT(sizeof(LargePage) <= MemoryChunk::kHeaderSize);
1054 STATIC_ASSERT(sizeof(Page) <= MemoryChunk::kHeaderSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00001055};
1056
1057
1058// ----------------------------------------------------------------------------
1059// All heap objects containing executable code (code objects) must be allocated
1060// from a 2 GB range of memory, so that they can call each other using 32-bit
1061// displacements. This happens automatically on 32-bit platforms, where 32-bit
1062// displacements cover the entire 4GB virtual address space. On 64-bit
1063// platforms, we support this using the CodeRange object, which reserves and
1064// manages a range of virtual memory.
Steve Block44f0eee2011-05-26 01:26:41 +01001065class CodeRange {
Steve Blocka7e24c12009-10-30 11:49:00 +00001066 public:
Ben Murdoch69a99ed2011-11-30 16:03:39 +00001067 explicit CodeRange(Isolate* isolate);
1068 ~CodeRange() { TearDown(); }
1069
Steve Blocka7e24c12009-10-30 11:49:00 +00001070 // Reserves a range of virtual memory, but does not commit any of it.
1071 // Can only be called once, at heap initialization time.
1072 // Returns false on failure.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001073 bool SetUp(size_t requested_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00001074
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001075 bool valid() { return code_range_ != NULL; }
1076 Address start() {
1077 DCHECK(valid());
1078 return static_cast<Address>(code_range_->address());
1079 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001080 size_t size() {
1081 DCHECK(valid());
1082 return code_range_->size();
1083 }
Steve Block44f0eee2011-05-26 01:26:41 +01001084 bool contains(Address address) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001085 if (!valid()) return false;
Steve Blocka7e24c12009-10-30 11:49:00 +00001086 Address start = static_cast<Address>(code_range_->address());
1087 return start <= address && address < start + code_range_->size();
1088 }
1089
1090 // Allocates a chunk of memory from the large-object portion of
1091 // the code range. On platforms with no separate code range, should
1092 // not be called.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001093 MUST_USE_RESULT Address AllocateRawMemory(const size_t requested_size,
1094 const size_t commit_size,
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001095 size_t* allocated);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001096 bool CommitRawMemory(Address start, size_t length);
1097 bool UncommitRawMemory(Address start, size_t length);
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001098 void FreeRawMemory(Address buf, size_t length);
Steve Blocka7e24c12009-10-30 11:49:00 +00001099
1100 private:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001101 // Frees the range of virtual memory, and frees the data structures used to
1102 // manage it.
1103 void TearDown();
1104
Ben Murdoch69a99ed2011-11-30 16:03:39 +00001105 Isolate* isolate_;
Steve Block44f0eee2011-05-26 01:26:41 +01001106
Steve Blocka7e24c12009-10-30 11:49:00 +00001107 // The reserved range of virtual memory that all code objects are put in.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001108 base::VirtualMemory* code_range_;
Steve Blocka7e24c12009-10-30 11:49:00 +00001109 // Plain old data class, just a struct plus a constructor.
1110 class FreeBlock {
1111 public:
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001112 FreeBlock() : start(0), size(0) {}
Steve Blocka7e24c12009-10-30 11:49:00 +00001113 FreeBlock(Address start_arg, size_t size_arg)
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001114 : start(start_arg), size(size_arg) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001115 DCHECK(IsAddressAligned(start, MemoryChunk::kAlignment));
1116 DCHECK(size >= static_cast<size_t>(Page::kPageSize));
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001117 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001118 FreeBlock(void* start_arg, size_t size_arg)
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001119 : start(static_cast<Address>(start_arg)), size(size_arg) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001120 DCHECK(IsAddressAligned(start, MemoryChunk::kAlignment));
1121 DCHECK(size >= static_cast<size_t>(Page::kPageSize));
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001122 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001123
1124 Address start;
1125 size_t size;
1126 };
1127
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001128 // The global mutex guards free_list_ and allocation_list_ as GC threads may
1129 // access both lists concurrently to the main thread.
1130 base::Mutex code_range_mutex_;
1131
Steve Blocka7e24c12009-10-30 11:49:00 +00001132 // Freed blocks of memory are added to the free list. When the allocation
1133 // list is exhausted, the free list is sorted and merged to make the new
1134 // allocation list.
Steve Block44f0eee2011-05-26 01:26:41 +01001135 List<FreeBlock> free_list_;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001136
Steve Blocka7e24c12009-10-30 11:49:00 +00001137 // Memory is allocated from the free blocks on the allocation list.
1138 // The block at current_allocation_block_index_ is the current block.
Steve Block44f0eee2011-05-26 01:26:41 +01001139 List<FreeBlock> allocation_list_;
1140 int current_allocation_block_index_;
Steve Blocka7e24c12009-10-30 11:49:00 +00001141
1142 // Finds a block on the allocation list that contains at least the
1143 // requested amount of memory. If none is found, sorts and merges
1144 // the existing free memory blocks, and searches again.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001145 // If none can be found, returns false.
1146 bool GetNextAllocationBlock(size_t requested);
Steve Blocka7e24c12009-10-30 11:49:00 +00001147 // Compares the start addresses of two free blocks.
1148 static int CompareFreeBlockAddress(const FreeBlock* left,
1149 const FreeBlock* right);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001150 bool ReserveBlock(const size_t requested_size, FreeBlock* block);
1151 void ReleaseBlock(const FreeBlock* block);
Steve Block44f0eee2011-05-26 01:26:41 +01001152
Steve Block44f0eee2011-05-26 01:26:41 +01001153 DISALLOW_COPY_AND_ASSIGN(CodeRange);
Steve Blocka7e24c12009-10-30 11:49:00 +00001154};
1155
1156
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001157class SkipList {
1158 public:
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001159 SkipList() { Clear(); }
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001160
1161 void Clear() {
1162 for (int idx = 0; idx < kSize; idx++) {
1163 starts_[idx] = reinterpret_cast<Address>(-1);
1164 }
1165 }
1166
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001167 Address StartFor(Address addr) { return starts_[RegionNumber(addr)]; }
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001168
1169 void AddObject(Address addr, int size) {
1170 int start_region = RegionNumber(addr);
1171 int end_region = RegionNumber(addr + size - kPointerSize);
1172 for (int idx = start_region; idx <= end_region; idx++) {
1173 if (starts_[idx] > addr) starts_[idx] = addr;
1174 }
1175 }
1176
1177 static inline int RegionNumber(Address addr) {
1178 return (OffsetFrom(addr) & Page::kPageAlignmentMask) >> kRegionSizeLog2;
1179 }
1180
1181 static void Update(Address addr, int size) {
1182 Page* page = Page::FromAddress(addr);
1183 SkipList* list = page->skip_list();
1184 if (list == NULL) {
1185 list = new SkipList();
1186 page->set_skip_list(list);
1187 }
1188
1189 list->AddObject(addr, size);
1190 }
1191
1192 private:
1193 static const int kRegionSizeLog2 = 13;
1194 static const int kRegionSize = 1 << kRegionSizeLog2;
1195 static const int kSize = Page::kPageSize / kRegionSize;
1196
1197 STATIC_ASSERT(Page::kPageSize % kRegionSize == 0);
1198
1199 Address starts_[kSize];
1200};
1201
1202
Steve Blocka7e24c12009-10-30 11:49:00 +00001203// ----------------------------------------------------------------------------
1204// A space acquires chunks of memory from the operating system. The memory
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001205// allocator allocated and deallocates pages for the paged heap spaces and large
1206// pages for large object space.
Steve Blocka7e24c12009-10-30 11:49:00 +00001207//
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001208// Each space has to manage it's own pages.
Steve Blocka7e24c12009-10-30 11:49:00 +00001209//
Steve Block44f0eee2011-05-26 01:26:41 +01001210class MemoryAllocator {
Steve Blocka7e24c12009-10-30 11:49:00 +00001211 public:
Ben Murdoch69a99ed2011-11-30 16:03:39 +00001212 explicit MemoryAllocator(Isolate* isolate);
1213
Steve Blocka7e24c12009-10-30 11:49:00 +00001214 // Initializes its internal bookkeeping structures.
Russell Brenner90bac252010-11-18 13:33:46 -08001215 // Max capacity of the total space and executable memory limit.
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001216 bool SetUp(intptr_t max_capacity, intptr_t capacity_executable);
Steve Blocka7e24c12009-10-30 11:49:00 +00001217
Steve Block44f0eee2011-05-26 01:26:41 +01001218 void TearDown();
Steve Blocka7e24c12009-10-30 11:49:00 +00001219
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001220 Page* AllocatePage(intptr_t size, PagedSpace* owner,
1221 Executability executable);
Steve Blocka7e24c12009-10-30 11:49:00 +00001222
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001223 LargePage* AllocateLargePage(intptr_t object_size, Space* owner,
1224 Executability executable);
Steve Blocka7e24c12009-10-30 11:49:00 +00001225
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001226 // PreFree logically frees the object, i.e., it takes care of the size
1227 // bookkeeping and calls the allocation callback.
1228 void PreFreeMemory(MemoryChunk* chunk);
1229
1230 // FreeMemory can be called concurrently when PreFree was executed before.
1231 void PerformFreeMemory(MemoryChunk* chunk);
1232
1233 // Free is a wrapper method, which calls PreFree and PerformFreeMemory
1234 // together.
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001235 void Free(MemoryChunk* chunk);
Steve Blocka7e24c12009-10-30 11:49:00 +00001236
Steve Blocka7e24c12009-10-30 11:49:00 +00001237 // Returns allocated spaces in bytes.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001238 intptr_t Size() { return size_.Value(); }
1239
1240 // Returns allocated executable spaces in bytes.
1241 intptr_t SizeExecutable() { return size_executable_.Value(); }
1242
1243 // Returns the maximum available bytes of heaps.
1244 intptr_t Available() {
1245 intptr_t size = Size();
1246 return capacity_ < size ? 0 : capacity_ - size;
1247 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001248
Russell Brenner90bac252010-11-18 13:33:46 -08001249 // Returns the maximum available executable bytes of heaps.
Steve Block44f0eee2011-05-26 01:26:41 +01001250 intptr_t AvailableExecutable() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001251 intptr_t executable_size = SizeExecutable();
1252 if (capacity_executable_ < executable_size) return 0;
1253 return capacity_executable_ - executable_size;
Russell Brenner90bac252010-11-18 13:33:46 -08001254 }
1255
Steve Blocka7e24c12009-10-30 11:49:00 +00001256 // Returns maximum available bytes that the old space can have.
Steve Block44f0eee2011-05-26 01:26:41 +01001257 intptr_t MaxAvailable() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001258 return (Available() / Page::kPageSize) * Page::kAllocatableMemory;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001259 }
1260
1261 // Returns an indication of whether a pointer is in a space that has
1262 // been allocated by this MemoryAllocator.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001263 V8_INLINE bool IsOutsideAllocatedSpace(const void* address) {
1264 return address < lowest_ever_allocated_.Value() ||
1265 address >= highest_ever_allocated_.Value();
Steve Blocka7e24c12009-10-30 11:49:00 +00001266 }
1267
Steve Blocka7e24c12009-10-30 11:49:00 +00001268#ifdef DEBUG
1269 // Reports statistic info of the space.
Steve Block44f0eee2011-05-26 01:26:41 +01001270 void ReportStatistics();
Steve Blocka7e24c12009-10-30 11:49:00 +00001271#endif
1272
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001273 // Returns a MemoryChunk in which the memory region from commit_area_size to
1274 // reserve_area_size of the chunk area is reserved but not committed, it
1275 // could be committed later by calling MemoryChunk::CommitArea.
1276 MemoryChunk* AllocateChunk(intptr_t reserve_area_size,
1277 intptr_t commit_area_size,
1278 Executability executable, Space* space);
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001279
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001280 Address ReserveAlignedMemory(size_t requested, size_t alignment,
1281 base::VirtualMemory* controller);
1282 Address AllocateAlignedMemory(size_t reserve_size, size_t commit_size,
1283 size_t alignment, Executability executable,
1284 base::VirtualMemory* controller);
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001285
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001286 bool CommitMemory(Address addr, size_t size, Executability executable);
1287
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001288 void FreeNewSpaceMemory(Address addr, base::VirtualMemory* reservation,
1289 Executability executable);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001290 void FreeMemory(base::VirtualMemory* reservation, Executability executable);
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001291 void FreeMemory(Address addr, size_t size, Executability executable);
1292
1293 // Commit a contiguous block of memory from the initial chunk. Assumes that
1294 // the address is not NULL, the size is greater than zero, and that the
1295 // block is contained in the initial chunk. Returns true if it succeeded
1296 // and false otherwise.
1297 bool CommitBlock(Address start, size_t size, Executability executable);
1298
1299 // Uncommit a contiguous block of memory [start..(start+size)[.
1300 // start is not NULL, the size is greater than zero, and the
1301 // block is contained in the initial chunk. Returns true if it succeeded
1302 // and false otherwise.
1303 bool UncommitBlock(Address start, size_t size);
1304
1305 // Zaps a contiguous block of memory [start..(start+size)[ thus
1306 // filling it up with a recognizable non-NULL bit pattern.
1307 void ZapBlock(Address start, size_t size);
1308
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001309 void PerformAllocationCallback(ObjectSpace space, AllocationAction action,
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001310 size_t size);
1311
1312 void AddMemoryAllocationCallback(MemoryAllocationCallback callback,
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001313 ObjectSpace space, AllocationAction action);
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001314
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001315 void RemoveMemoryAllocationCallback(MemoryAllocationCallback callback);
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001316
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001317 bool MemoryAllocationCallbackRegistered(MemoryAllocationCallback callback);
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001318
1319 static int CodePageGuardStartOffset();
1320
1321 static int CodePageGuardSize();
1322
1323 static int CodePageAreaStartOffset();
1324
1325 static int CodePageAreaEndOffset();
1326
1327 static int CodePageAreaSize() {
1328 return CodePageAreaEndOffset() - CodePageAreaStartOffset();
1329 }
1330
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001331 static int PageAreaSize(AllocationSpace space) {
1332 DCHECK_NE(LO_SPACE, space);
1333 return (space == CODE_SPACE) ? CodePageAreaSize()
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001334 : Page::kAllocatableMemory;
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001335 }
1336
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001337 MUST_USE_RESULT bool CommitExecutableMemory(base::VirtualMemory* vm,
1338 Address start, size_t commit_size,
1339 size_t reserved_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00001340
1341 private:
Ben Murdoch69a99ed2011-11-30 16:03:39 +00001342 Isolate* isolate_;
1343
Steve Blocka7e24c12009-10-30 11:49:00 +00001344 // Maximum space size in bytes.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001345 intptr_t capacity_;
Russell Brenner90bac252010-11-18 13:33:46 -08001346 // Maximum subset of capacity_ that can be executable
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001347 intptr_t capacity_executable_;
Ben Murdochb0fe1622011-05-05 13:52:32 +01001348
Steve Blocka7e24c12009-10-30 11:49:00 +00001349 // Allocated space size in bytes.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001350 AtomicNumber<intptr_t> size_;
Steve Block791712a2010-08-27 10:21:07 +01001351 // Allocated executable space size in bytes.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001352 AtomicNumber<intptr_t> size_executable_;
Steve Blocka7e24c12009-10-30 11:49:00 +00001353
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001354 // We keep the lowest and highest addresses allocated as a quick way
1355 // of determining that pointers are outside the heap. The estimate is
1356 // conservative, i.e. not all addrsses in 'allocated' space are allocated
1357 // to our heap. The range is [lowest, highest[, inclusive on the low end
1358 // and exclusive on the high end.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001359 AtomicValue<void*> lowest_ever_allocated_;
1360 AtomicValue<void*> highest_ever_allocated_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001361
Iain Merrick9ac36c92010-09-13 15:29:50 +01001362 struct MemoryAllocationCallbackRegistration {
1363 MemoryAllocationCallbackRegistration(MemoryAllocationCallback callback,
1364 ObjectSpace space,
1365 AllocationAction action)
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001366 : callback(callback), space(space), action(action) {}
Iain Merrick9ac36c92010-09-13 15:29:50 +01001367 MemoryAllocationCallback callback;
1368 ObjectSpace space;
1369 AllocationAction action;
1370 };
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001371
Iain Merrick9ac36c92010-09-13 15:29:50 +01001372 // A List of callback that are triggered when memory is allocated or free'd
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001373 List<MemoryAllocationCallbackRegistration> memory_allocation_callbacks_;
Iain Merrick9ac36c92010-09-13 15:29:50 +01001374
Steve Blocka7e24c12009-10-30 11:49:00 +00001375 // Initializes pages in a chunk. Returns the first page address.
1376 // This function and GetChunkId() are provided for the mark-compact
1377 // collector to rebuild page headers in the from space, which is
1378 // used as a marking stack and its page headers are destroyed.
Steve Block44f0eee2011-05-26 01:26:41 +01001379 Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk,
1380 PagedSpace* owner);
Steve Block6ded16b2010-05-10 14:33:55 +01001381
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001382 void UpdateAllocatedSpaceLimits(void* low, void* high) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001383 // The use of atomic primitives does not guarantee correctness (wrt.
1384 // desired semantics) by default. The loop here ensures that we update the
1385 // values only if they did not change in between.
1386 void* ptr = nullptr;
1387 do {
1388 ptr = lowest_ever_allocated_.Value();
1389 } while ((low < ptr) && !lowest_ever_allocated_.TrySetValue(ptr, low));
1390 do {
1391 ptr = highest_ever_allocated_.Value();
1392 } while ((high > ptr) && !highest_ever_allocated_.TrySetValue(ptr, high));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001393 }
1394
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001395 DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator);
Steve Blocka7e24c12009-10-30 11:49:00 +00001396};
1397
1398
1399// -----------------------------------------------------------------------------
1400// Interface for heap object iterator to be implemented by all object space
1401// object iterators.
1402//
Leon Clarked91b9f72010-01-27 17:25:45 +00001403// NOTE: The space specific object iterators also implements the own next()
1404// method which is used to avoid using virtual functions
Steve Blocka7e24c12009-10-30 11:49:00 +00001405// iterating a specific space.
1406
1407class ObjectIterator : public Malloced {
1408 public:
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001409 virtual ~ObjectIterator() {}
Steve Blocka7e24c12009-10-30 11:49:00 +00001410
Steve Blocka7e24c12009-10-30 11:49:00 +00001411 virtual HeapObject* next_object() = 0;
1412};
1413
1414
1415// -----------------------------------------------------------------------------
1416// Heap object iterator in new/old/map spaces.
1417//
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001418// A HeapObjectIterator iterates objects from the bottom of the given space
1419// to its top or from the bottom of the given page to its top.
Steve Blocka7e24c12009-10-30 11:49:00 +00001420//
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001421// If objects are allocated in the page during iteration the iterator may
1422// or may not iterate over those objects. The caller must create a new
1423// iterator in order to be sure to visit these new objects.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001424class HeapObjectIterator : public ObjectIterator {
Steve Blocka7e24c12009-10-30 11:49:00 +00001425 public:
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001426 // Creates a new object iterator in a given space.
Steve Blocka7e24c12009-10-30 11:49:00 +00001427 explicit HeapObjectIterator(PagedSpace* space);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001428 explicit HeapObjectIterator(Page* page);
Steve Blocka7e24c12009-10-30 11:49:00 +00001429
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001430 // Advance to the next object, skipping free spaces and other fillers and
1431 // skipping the special garbage section of which there is one per space.
1432 // Returns NULL when the iteration has ended.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001433 inline HeapObject* Next();
1434 inline HeapObject* next_object() override;
Steve Blocka7e24c12009-10-30 11:49:00 +00001435
1436 private:
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001437 enum PageMode { kOnePageOnly, kAllPagesInSpace };
Steve Blocka7e24c12009-10-30 11:49:00 +00001438
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001439 Address cur_addr_; // Current iteration point.
1440 Address cur_end_; // End iteration point.
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001441 PagedSpace* space_;
1442 PageMode page_mode_;
Leon Clarked91b9f72010-01-27 17:25:45 +00001443
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001444 // Fast (inlined) path of next().
1445 inline HeapObject* FromCurrentPage();
Leon Clarked91b9f72010-01-27 17:25:45 +00001446
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001447 // Slow path of next(), goes into the next page. Returns false if the
1448 // iteration has ended.
1449 bool AdvanceToNextPage();
Steve Blocka7e24c12009-10-30 11:49:00 +00001450
1451 // Initializes fields.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001452 inline void Initialize(PagedSpace* owner, Address start, Address end,
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001453 PageMode mode);
Steve Blocka7e24c12009-10-30 11:49:00 +00001454};
1455
1456
1457// -----------------------------------------------------------------------------
1458// A PageIterator iterates the pages in a paged space.
Steve Blocka7e24c12009-10-30 11:49:00 +00001459
1460class PageIterator BASE_EMBEDDED {
1461 public:
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001462 explicit inline PageIterator(PagedSpace* space);
Steve Blocka7e24c12009-10-30 11:49:00 +00001463
1464 inline bool has_next();
1465 inline Page* next();
1466
1467 private:
1468 PagedSpace* space_;
1469 Page* prev_page_; // Previous page returned.
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001470 // Next page that will be returned. Cached here so that we can use this
1471 // iterator for operations that deallocate pages.
1472 Page* next_page_;
Steve Blocka7e24c12009-10-30 11:49:00 +00001473};
1474
1475
1476// -----------------------------------------------------------------------------
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001477// A space has a circular list of pages. The next page can be accessed via
1478// Page::next_page() call.
Steve Blocka7e24c12009-10-30 11:49:00 +00001479
1480// An abstraction of allocation and relocation pointers in a page-structured
1481// space.
1482class AllocationInfo {
1483 public:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001484 AllocationInfo() : top_(nullptr), limit_(nullptr) {}
1485 AllocationInfo(Address top, Address limit) : top_(top), limit_(limit) {}
1486
1487 void Reset(Address top, Address limit) {
1488 set_top(top);
1489 set_limit(limit);
1490 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001491
1492 INLINE(void set_top(Address top)) {
1493 SLOW_DCHECK(top == NULL ||
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001494 (reinterpret_cast<intptr_t>(top) & kHeapObjectTagMask) == 0);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001495 top_ = top;
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001496 }
1497
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001498 INLINE(Address top()) const {
1499 SLOW_DCHECK(top_ == NULL ||
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001500 (reinterpret_cast<intptr_t>(top_) & kHeapObjectTagMask) == 0);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001501 return top_;
1502 }
1503
1504 Address* top_address() { return &top_; }
1505
1506 INLINE(void set_limit(Address limit)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001507 limit_ = limit;
1508 }
1509
1510 INLINE(Address limit()) const {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001511 return limit_;
1512 }
1513
1514 Address* limit_address() { return &limit_; }
Steve Blocka7e24c12009-10-30 11:49:00 +00001515
1516#ifdef DEBUG
1517 bool VerifyPagedAllocation() {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001518 return (Page::FromAllocationTop(top_) == Page::FromAllocationTop(limit_)) &&
1519 (top_ <= limit_);
Steve Blocka7e24c12009-10-30 11:49:00 +00001520 }
1521#endif
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001522
1523 private:
1524 // Current allocation top.
1525 Address top_;
1526 // Current allocation limit.
1527 Address limit_;
Steve Blocka7e24c12009-10-30 11:49:00 +00001528};
1529
1530
1531// An abstraction of the accounting statistics of a page-structured space.
Steve Blocka7e24c12009-10-30 11:49:00 +00001532//
1533// The stats are only set by functions that ensure they stay balanced. These
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001534// functions increase or decrease one of the non-capacity stats in conjunction
1535// with capacity, or else they always balance increases and decreases to the
1536// non-capacity stats.
Steve Blocka7e24c12009-10-30 11:49:00 +00001537class AllocationStats BASE_EMBEDDED {
1538 public:
1539 AllocationStats() { Clear(); }
1540
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001541 // Zero out all the allocation statistics (i.e., no capacity).
Steve Blocka7e24c12009-10-30 11:49:00 +00001542 void Clear() {
1543 capacity_ = 0;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001544 max_capacity_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +00001545 size_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +00001546 }
1547
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001548 void ClearSize() { size_ = capacity_; }
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001549
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001550 // Reset the allocation statistics (i.e., available = capacity with no wasted
1551 // or allocated bytes).
Steve Blocka7e24c12009-10-30 11:49:00 +00001552 void Reset() {
Steve Blocka7e24c12009-10-30 11:49:00 +00001553 size_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +00001554 }
1555
1556 // Accessors for the allocation statistics.
Ben Murdochf87a2032010-10-22 12:50:53 +01001557 intptr_t Capacity() { return capacity_; }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001558 intptr_t MaxCapacity() { return max_capacity_; }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001559 intptr_t Size() {
1560 CHECK_GE(size_, 0);
1561 return size_;
1562 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001563
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001564 // Grow the space by adding available bytes. They are initially marked as
1565 // being in use (part of the size), but will normally be immediately freed,
1566 // putting them on the free list and removing them from size_.
Steve Blocka7e24c12009-10-30 11:49:00 +00001567 void ExpandSpace(int size_in_bytes) {
1568 capacity_ += size_in_bytes;
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001569 size_ += size_in_bytes;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001570 if (capacity_ > max_capacity_) {
1571 max_capacity_ = capacity_;
1572 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001573 CHECK(size_ >= 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00001574 }
1575
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001576 // Shrink the space by removing available bytes. Since shrinking is done
1577 // during sweeping, bytes have been marked as being in use (part of the size)
1578 // and are hereby freed.
Steve Blocka7e24c12009-10-30 11:49:00 +00001579 void ShrinkSpace(int size_in_bytes) {
1580 capacity_ -= size_in_bytes;
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001581 size_ -= size_in_bytes;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001582 CHECK(size_ >= 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00001583 }
1584
1585 // Allocate from available bytes (available -> size).
Ben Murdochf87a2032010-10-22 12:50:53 +01001586 void AllocateBytes(intptr_t size_in_bytes) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001587 size_ += size_in_bytes;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001588 CHECK(size_ >= 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00001589 }
1590
1591 // Free allocated bytes, making them available (size -> available).
Ben Murdochf87a2032010-10-22 12:50:53 +01001592 void DeallocateBytes(intptr_t size_in_bytes) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001593 size_ -= size_in_bytes;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001594 CHECK_GE(size_, 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00001595 }
1596
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001597 // Merge {other} into {this}.
1598 void Merge(const AllocationStats& other) {
1599 capacity_ += other.capacity_;
1600 size_ += other.size_;
1601 if (other.max_capacity_ > max_capacity_) {
1602 max_capacity_ = other.max_capacity_;
1603 }
1604 CHECK_GE(size_, 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00001605 }
1606
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001607 void DecreaseCapacity(intptr_t size_in_bytes) {
1608 capacity_ -= size_in_bytes;
1609 CHECK_GE(capacity_, 0);
1610 CHECK_GE(capacity_, size_);
1611 }
1612
1613 void IncreaseCapacity(intptr_t size_in_bytes) { capacity_ += size_in_bytes; }
1614
Steve Blocka7e24c12009-10-30 11:49:00 +00001615 private:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001616 // |capacity_|: The number of object-area bytes (i.e., not including page
1617 // bookkeeping structures) currently in the space.
Ben Murdochf87a2032010-10-22 12:50:53 +01001618 intptr_t capacity_;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001619
1620 // |max_capacity_|: The maximum capacity ever observed.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001621 intptr_t max_capacity_;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001622
1623 // |size_|: The number of allocated bytes.
Ben Murdochf87a2032010-10-22 12:50:53 +01001624 intptr_t size_;
Steve Blocka7e24c12009-10-30 11:49:00 +00001625};
1626
1627
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001628// A free list category maintains a linked list of free memory blocks.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001629class FreeListCategory {
1630 public:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001631 explicit FreeListCategory(FreeList* owner, FreeListCategoryType type)
1632 : type_(type),
1633 top_(nullptr),
1634 end_(nullptr),
1635 available_(0),
1636 owner_(owner) {}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001637
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001638 // Concatenates {category} into {this}.
1639 //
1640 // Note: Thread-safe.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001641 intptr_t Concatenate(FreeListCategory* category);
1642
1643 void Reset();
1644
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001645 void Free(FreeSpace* node, int size_in_bytes);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001646
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001647 // Pick a node from the list.
1648 FreeSpace* PickNodeFromList(int* node_size);
1649
1650 // Pick a node from the list and compare it against {size_in_bytes}. If the
1651 // node's size is greater or equal return the node and null otherwise.
1652 FreeSpace* PickNodeFromList(int size_in_bytes, int* node_size);
1653
1654 // Search for a node of size {size_in_bytes}.
1655 FreeSpace* SearchForNodeInList(int size_in_bytes, int* node_size);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001656
1657 intptr_t EvictFreeListItemsInList(Page* p);
1658 bool ContainsPageFreeListItemsInList(Page* p);
1659
1660 void RepairFreeList(Heap* heap);
1661
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001662 bool IsEmpty() { return top() == nullptr; }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001663
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001664 FreeList* owner() { return owner_; }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001665 int available() const { return available_; }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001666
1667#ifdef DEBUG
1668 intptr_t SumFreeList();
1669 int FreeListLength();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001670 bool IsVeryLong();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001671#endif
1672
1673 private:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001674 // For debug builds we accurately compute free lists lengths up until
1675 // {kVeryLongFreeList} by manually walking the list.
1676 static const int kVeryLongFreeList = 500;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001677
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001678 FreeSpace* top() { return top_.Value(); }
1679 void set_top(FreeSpace* top) { top_.SetValue(top); }
1680
1681 FreeSpace* end() const { return end_; }
1682 void set_end(FreeSpace* end) { end_ = end; }
1683
1684 // |type_|: The type of this free list category.
1685 FreeListCategoryType type_;
1686
1687 // |top_|: Points to the top FreeSpace* in the free list category.
1688 AtomicValue<FreeSpace*> top_;
1689
1690 // |end_|: Points to the end FreeSpace* in the free list category.
1691 FreeSpace* end_;
1692
1693 // |available_|: Total available bytes in all blocks of this free list
1694 // category.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001695 int available_;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001696
1697 // |owner_|: The owning free list of this category.
1698 FreeList* owner_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001699};
1700
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001701// A free list maintaining free blocks of memory. The free list is organized in
1702// a way to encourage objects allocated around the same time to be near each
1703// other. The normal way to allocate is intended to be by bumping a 'top'
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001704// pointer until it hits a 'limit' pointer. When the limit is hit we need to
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001705// find a new space to allocate from. This is done with the free list, which is
1706// divided up into rough categories to cut down on waste. Having finer
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001707// categories would scatter allocation more.
1708
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001709// The free list is organized in categories as follows:
1710// 1-31 words (too small): Such small free areas are discarded for efficiency
1711// reasons. They can be reclaimed by the compactor. However the distance
1712// between top and limit may be this small.
1713// 32-255 words (small): Used for allocating free space between 1-31 words in
1714// size.
1715// 256-2047 words (medium): Used for allocating free space between 32-255 words
1716// in size.
1717// 1048-16383 words (large): Used for allocating free space between 256-2047
1718// words in size.
1719// At least 16384 words (huge): This list is for objects of 2048 words or
1720// larger. Empty pages are also added to this list.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001721class FreeList {
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001722 public:
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001723 // This method returns how much memory can be allocated after freeing
1724 // maximum_freed memory.
1725 static inline int GuaranteedAllocatable(int maximum_freed) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001726 if (maximum_freed <= kSmallListMin) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001727 return 0;
1728 } else if (maximum_freed <= kSmallListMax) {
1729 return kSmallAllocationMax;
1730 } else if (maximum_freed <= kMediumListMax) {
1731 return kMediumAllocationMax;
1732 } else if (maximum_freed <= kLargeListMax) {
1733 return kLargeAllocationMax;
1734 }
1735 return maximum_freed;
1736 }
1737
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001738 explicit FreeList(PagedSpace* owner);
1739
1740 // The method concatenates {other} into {this} and returns the added bytes,
1741 // including waste.
1742 //
1743 // Note: Thread-safe.
1744 intptr_t Concatenate(FreeList* other);
1745
1746 // Adds a node on the free list. The block of size {size_in_bytes} starting
1747 // at {start} is placed on the free list. The return value is the number of
1748 // bytes that were not added to the free list, because they freed memory block
1749 // was too small. Bookkeeping information will be written to the block, i.e.,
1750 // its contents will be destroyed. The start address should be word aligned,
1751 // and the size should be a non-zero multiple of the word size.
1752 int Free(Address start, int size_in_bytes);
1753
1754 // Allocate a block of size {size_in_bytes} from the free list. The block is
1755 // unitialized. A failure is returned if no block is available. The size
1756 // should be a non-zero multiple of the word size.
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001757 MUST_USE_RESULT HeapObject* Allocate(int size_in_bytes);
1758
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001759 // Clear the free list.
1760 void Reset();
1761
1762 void ResetStats() { wasted_bytes_ = 0; }
1763
1764 // Return the number of bytes available on the free list.
1765 intptr_t Available() {
1766 return small_list_.available() + medium_list_.available() +
1767 large_list_.available() + huge_list_.available();
1768 }
1769
1770 // The method tries to find a {FreeSpace} node of at least {size_in_bytes}
1771 // size in the free list category exactly matching the size. If no suitable
1772 // node could be found, the method falls back to retrieving a {FreeSpace}
1773 // from the large or huge free list category.
1774 //
1775 // Can be used concurrently.
1776 MUST_USE_RESULT FreeSpace* TryRemoveMemory(intptr_t hint_size_in_bytes);
1777
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001778 bool IsEmpty() {
1779 return small_list_.IsEmpty() && medium_list_.IsEmpty() &&
1780 large_list_.IsEmpty() && huge_list_.IsEmpty();
1781 }
1782
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001783 // Used after booting the VM.
1784 void RepairLists(Heap* heap);
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001785
1786 intptr_t EvictFreeListItems(Page* p);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001787 bool ContainsPageFreeListItems(Page* p);
1788
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001789 PagedSpace* owner() { return owner_; }
1790 intptr_t wasted_bytes() { return wasted_bytes_; }
1791 base::Mutex* mutex() { return &mutex_; }
1792
1793#ifdef DEBUG
1794 void Zap();
1795 intptr_t SumFreeLists();
1796 bool IsVeryLong();
1797#endif
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001798
1799 private:
1800 // The size range of blocks, in bytes.
1801 static const int kMinBlockSize = 3 * kPointerSize;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001802 static const int kMaxBlockSize = Page::kAllocatableMemory;
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001803
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001804 static const int kSmallListMin = 0x1f * kPointerSize;
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001805 static const int kSmallListMax = 0xff * kPointerSize;
1806 static const int kMediumListMax = 0x7ff * kPointerSize;
1807 static const int kLargeListMax = 0x3fff * kPointerSize;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001808 static const int kSmallAllocationMax = kSmallListMin;
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001809 static const int kMediumAllocationMax = kSmallListMax;
1810 static const int kLargeAllocationMax = kMediumListMax;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001811
1812 FreeSpace* FindNodeFor(int size_in_bytes, int* node_size);
1813 FreeSpace* FindNodeIn(FreeListCategoryType category, int* node_size);
1814
1815 FreeListCategory* GetFreeListCategory(FreeListCategoryType category) {
1816 switch (category) {
1817 case kSmall:
1818 return &small_list_;
1819 case kMedium:
1820 return &medium_list_;
1821 case kLarge:
1822 return &large_list_;
1823 case kHuge:
1824 return &huge_list_;
1825 default:
1826 UNREACHABLE();
1827 }
1828 UNREACHABLE();
1829 return nullptr;
1830 }
1831
1832 PagedSpace* owner_;
1833 base::Mutex mutex_;
1834 intptr_t wasted_bytes_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001835 FreeListCategory small_list_;
1836 FreeListCategory medium_list_;
1837 FreeListCategory large_list_;
1838 FreeListCategory huge_list_;
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001839
1840 DISALLOW_IMPLICIT_CONSTRUCTORS(FreeList);
1841};
1842
1843
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001844class AllocationResult {
1845 public:
1846 // Implicit constructor from Object*.
1847 AllocationResult(Object* object) // NOLINT
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001848 : object_(object) {
1849 // AllocationResults can't return Smis, which are used to represent
1850 // failure and the space to retry in.
1851 CHECK(!object->IsSmi());
1852 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001853
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001854 AllocationResult() : object_(Smi::FromInt(NEW_SPACE)) {}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001855
1856 static inline AllocationResult Retry(AllocationSpace space = NEW_SPACE) {
1857 return AllocationResult(space);
1858 }
1859
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001860 inline bool IsRetry() { return object_->IsSmi(); }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001861
1862 template <typename T>
1863 bool To(T** obj) {
1864 if (IsRetry()) return false;
1865 *obj = T::cast(object_);
1866 return true;
1867 }
1868
1869 Object* ToObjectChecked() {
1870 CHECK(!IsRetry());
1871 return object_;
1872 }
1873
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001874 inline AllocationSpace RetrySpace();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001875
1876 private:
1877 explicit AllocationResult(AllocationSpace space)
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001878 : object_(Smi::FromInt(static_cast<int>(space))) {}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001879
1880 Object* object_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001881};
1882
1883
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001884STATIC_ASSERT(sizeof(AllocationResult) == kPointerSize);
1885
1886
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001887// LocalAllocationBuffer represents a linear allocation area that is created
1888// from a given {AllocationResult} and can be used to allocate memory without
1889// synchronization.
1890//
1891// The buffer is properly closed upon destruction and reassignment.
1892// Example:
1893// {
1894// AllocationResult result = ...;
1895// LocalAllocationBuffer a(heap, result, size);
1896// LocalAllocationBuffer b = a;
1897// CHECK(!a.IsValid());
1898// CHECK(b.IsValid());
1899// // {a} is invalid now and cannot be used for further allocations.
1900// }
1901// // Since {b} went out of scope, the LAB is closed, resulting in creating a
1902// // filler object for the remaining area.
1903class LocalAllocationBuffer {
1904 public:
1905 // Indicates that a buffer cannot be used for allocations anymore. Can result
1906 // from either reassigning a buffer, or trying to construct it from an
1907 // invalid {AllocationResult}.
1908 static inline LocalAllocationBuffer InvalidBuffer();
1909
1910 // Creates a new LAB from a given {AllocationResult}. Results in
1911 // InvalidBuffer if the result indicates a retry.
1912 static inline LocalAllocationBuffer FromResult(Heap* heap,
1913 AllocationResult result,
1914 intptr_t size);
1915
1916 ~LocalAllocationBuffer() { Close(); }
1917
1918 // Convert to C++11 move-semantics once allowed by the style guide.
1919 LocalAllocationBuffer(const LocalAllocationBuffer& other);
1920 LocalAllocationBuffer& operator=(const LocalAllocationBuffer& other);
1921
1922 MUST_USE_RESULT inline AllocationResult AllocateRawAligned(
1923 int size_in_bytes, AllocationAlignment alignment);
1924
1925 inline bool IsValid() { return allocation_info_.top() != nullptr; }
1926
1927 // Try to merge LABs, which is only possible when they are adjacent in memory.
1928 // Returns true if the merge was successful, false otherwise.
1929 inline bool TryMerge(LocalAllocationBuffer* other);
1930
1931 private:
1932 LocalAllocationBuffer(Heap* heap, AllocationInfo allocation_info);
1933
1934 void Close();
1935
1936 Heap* heap_;
1937 AllocationInfo allocation_info_;
1938};
1939
1940
Steve Blocka7e24c12009-10-30 11:49:00 +00001941class PagedSpace : public Space {
1942 public:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001943 static const intptr_t kCompactionMemoryWanted = 500 * KB;
Steve Blocka7e24c12009-10-30 11:49:00 +00001944
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001945 // Creates a space with an id.
1946 PagedSpace(Heap* heap, AllocationSpace id, Executability executable);
1947
1948 ~PagedSpace() override { TearDown(); }
Steve Blocka7e24c12009-10-30 11:49:00 +00001949
1950 // Set up the space using the given address range of virtual memory (from
1951 // the memory allocator's initial chunk) if possible. If the block of
1952 // addresses is not big enough to contain a single page-aligned page, a
1953 // fresh chunk will be allocated.
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001954 bool SetUp();
Steve Blocka7e24c12009-10-30 11:49:00 +00001955
1956 // Returns true if the space has been successfully set up and not
1957 // subsequently torn down.
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001958 bool HasBeenSetUp();
Steve Blocka7e24c12009-10-30 11:49:00 +00001959
Steve Blocka7e24c12009-10-30 11:49:00 +00001960 // Checks whether an object/address is in this space.
1961 inline bool Contains(Address a);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001962 inline bool Contains(HeapObject* o);
1963 // Unlike Contains() methods it is safe to call this one even for addresses
1964 // of unmapped memory.
1965 bool ContainsSafe(Address addr);
Steve Blocka7e24c12009-10-30 11:49:00 +00001966
1967 // Given an address occupied by a live object, return that object if it is
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001968 // in this space, or a Smi if it is not. The implementation iterates over
1969 // objects in the page containing the address, the cost is linear in the
1970 // number of objects in the page. It may be slow.
1971 Object* FindObject(Address addr);
1972
1973 // During boot the free_space_map is created, and afterwards we may need
1974 // to write it into the free list nodes that were already created.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001975 void RepairFreeListsAfterDeserialization();
Steve Blocka7e24c12009-10-30 11:49:00 +00001976
Ben Murdoch85b71792012-04-11 18:30:58 +01001977 // Prepares for a mark-compact GC.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001978 void PrepareForMarkCompact();
Ben Murdoch85b71792012-04-11 18:30:58 +01001979
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001980 // Current capacity without growing (Size() + Available()).
Ben Murdochf87a2032010-10-22 12:50:53 +01001981 intptr_t Capacity() { return accounting_stats_.Capacity(); }
Steve Blocka7e24c12009-10-30 11:49:00 +00001982
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001983 // Approximate amount of physical memory committed for this space.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001984 size_t CommittedPhysicalMemory() override;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001985
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001986 void ResetFreeListStatistics();
1987
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001988 // Sets the capacity, the available space and the wasted space to zero.
1989 // The stats are rebuilt during sweeping by adding each page to the
1990 // capacity and the size when it is encountered. As free spaces are
1991 // discovered during the sweeping they are subtracted from the size and added
1992 // to the available and wasted totals.
1993 void ClearStats() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001994 accounting_stats_.ClearSize();
1995 free_list_.ResetStats();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001996 ResetFreeListStatistics();
1997 }
1998
1999 // Increases the number of available bytes of that space.
2000 void AddToAccountingStats(intptr_t bytes) {
2001 accounting_stats_.DeallocateBytes(bytes);
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002002 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002003
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002004 // Available bytes without growing. These are the bytes on the free list.
2005 // The bytes in the linear allocation area are not included in this total
2006 // because updating the stats would slow down allocation. New pages are
2007 // immediately added to the free list so they show up here.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002008 intptr_t Available() override { return free_list_.Available(); }
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002009
2010 // Allocated bytes in this space. Garbage bytes that were not found due to
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002011 // concurrent sweeping are counted as being allocated! The bytes in the
2012 // current linear allocation area (between top and limit) are also counted
2013 // here.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002014 intptr_t Size() override { return accounting_stats_.Size(); }
Steve Blocka7e24c12009-10-30 11:49:00 +00002015
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002016 // As size, but the bytes in lazily swept pages are estimated and the bytes
2017 // in the current linear allocation area are not included.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002018 intptr_t SizeOfObjects() override;
Steve Blocka7e24c12009-10-30 11:49:00 +00002019
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002020 // Wasted bytes in this space. These are just the bytes that were thrown away
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002021 // due to being too small to use for allocation.
2022 virtual intptr_t Waste() { return free_list_.wasted_bytes(); }
Steve Blocka7e24c12009-10-30 11:49:00 +00002023
2024 // Returns the allocation pointer in this space.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002025 Address top() { return allocation_info_.top(); }
2026 Address limit() { return allocation_info_.limit(); }
2027
2028 // The allocation top address.
2029 Address* allocation_top_address() { return allocation_info_.top_address(); }
2030
2031 // The allocation limit address.
2032 Address* allocation_limit_address() {
2033 return allocation_info_.limit_address();
2034 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002035
2036 // Allocate the requested number of bytes in the space if possible, return a
2037 // failure object if not.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002038 MUST_USE_RESULT inline AllocationResult AllocateRawUnaligned(
2039 int size_in_bytes);
2040
2041 MUST_USE_RESULT inline AllocationResult AllocateRawUnalignedSynchronized(
2042 int size_in_bytes);
2043
2044 // Allocate the requested number of bytes in the space double aligned if
2045 // possible, return a failure object if not.
2046 MUST_USE_RESULT inline AllocationResult AllocateRawAligned(
2047 int size_in_bytes, AllocationAlignment alignment);
2048
2049 // Allocate the requested number of bytes in the space and consider allocation
2050 // alignment if needed.
2051 MUST_USE_RESULT inline AllocationResult AllocateRaw(
2052 int size_in_bytes, AllocationAlignment alignment);
Leon Clarkee46be812010-01-19 14:06:41 +00002053
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002054 // Give a block of memory to the space's free list. It might be added to
2055 // the free list or accounted as waste.
2056 // If add_to_freelist is false then just accounting stats are updated and
2057 // no attempt to add area to free list is made.
2058 int Free(Address start, int size_in_bytes) {
2059 int wasted = free_list_.Free(start, size_in_bytes);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002060 accounting_stats_.DeallocateBytes(size_in_bytes);
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002061 return size_in_bytes - wasted;
2062 }
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002063
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002064 void ResetFreeList() { free_list_.Reset(); }
2065
Steve Block6ded16b2010-05-10 14:33:55 +01002066 // Set space allocation info.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002067 void SetTopAndLimit(Address top, Address limit) {
2068 DCHECK(top == limit ||
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002069 Page::FromAddress(top) == Page::FromAddress(limit - 1));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002070 MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002071 allocation_info_.Reset(top, limit);
Steve Block6ded16b2010-05-10 14:33:55 +01002072 }
2073
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002074 // Empty space allocation info, returning unused area to free list.
2075 void EmptyAllocationInfo() {
2076 // Mark the old linear allocation area with a free space map so it can be
2077 // skipped when scanning the heap.
2078 int old_linear_size = static_cast<int>(limit() - top());
2079 Free(top(), old_linear_size);
2080 SetTopAndLimit(NULL, NULL);
Steve Blocka7e24c12009-10-30 11:49:00 +00002081 }
2082
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002083 void Allocate(int bytes) { accounting_stats_.AllocateBytes(bytes); }
2084
2085 void IncreaseCapacity(int size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002086
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002087 // Releases an unused page and shrinks the space.
2088 void ReleasePage(Page* page);
Steve Blocka7e24c12009-10-30 11:49:00 +00002089
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002090 // The dummy page that anchors the linked list of pages.
2091 Page* anchor() { return &anchor_; }
Steve Blocka7e24c12009-10-30 11:49:00 +00002092
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002093#ifdef VERIFY_HEAP
2094 // Verify integrity of this space.
2095 virtual void Verify(ObjectVisitor* visitor);
2096
2097 // Overridden by subclasses to verify space-specific object
2098 // properties (e.g., only maps or free-list nodes are in map space).
2099 virtual void VerifyObject(HeapObject* obj) {}
2100#endif
2101
Steve Blocka7e24c12009-10-30 11:49:00 +00002102#ifdef DEBUG
2103 // Print meta info and objects in this space.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002104 void Print() override;
Steve Blocka7e24c12009-10-30 11:49:00 +00002105
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002106 // Reports statistics for the space
2107 void ReportStatistics();
2108
Steve Blocka7e24c12009-10-30 11:49:00 +00002109 // Report code object related statistics
2110 void CollectCodeStatistics();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002111 static void ReportCodeStatistics(Isolate* isolate);
2112 static void ResetCodeStatistics(Isolate* isolate);
Steve Blocka7e24c12009-10-30 11:49:00 +00002113#endif
2114
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002115 // Evacuation candidates are swept by evacuator. Needs to return a valid
2116 // result before _and_ after evacuation has finished.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002117 static bool ShouldBeSweptBySweeperThreads(Page* p) {
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002118 return !p->IsEvacuationCandidate() &&
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002119 !p->IsFlagSet(Page::RESCAN_ON_EVACUATION) && !p->WasSwept();
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002120 }
2121
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002122 // This function tries to steal size_in_bytes memory from the sweeper threads
2123 // free-lists. If it does not succeed stealing enough memory, it will wait
2124 // for the sweeper threads to finish sweeping.
2125 // It returns true when sweeping is completed and false otherwise.
2126 bool EnsureSweeperProgress(intptr_t size_in_bytes);
2127
2128 void set_end_of_unswept_pages(Page* page) { end_of_unswept_pages_ = page; }
2129
2130 Page* end_of_unswept_pages() { return end_of_unswept_pages_; }
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002131
2132 Page* FirstPage() { return anchor_.next_page(); }
2133 Page* LastPage() { return anchor_.prev_page(); }
2134
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002135 void EvictEvacuationCandidatesFromLinearAllocationArea();
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002136
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002137 bool CanExpand(size_t size);
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002138
2139 // Returns the number of total pages in this space.
2140 int CountTotalPages();
2141
2142 // Return size of allocatable area on a page in this space.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002143 inline int AreaSize() { return area_size_; }
2144
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002145 virtual bool is_local() { return false; }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002146
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002147 // Merges {other} into the current space. Note that this modifies {other},
2148 // e.g., removes its bump pointer area and resets statistics.
2149 void MergeCompactionSpace(CompactionSpace* other);
2150
2151 void DivideUponCompactionSpaces(CompactionSpaceCollection** other, int num,
2152 intptr_t limit = kCompactionMemoryWanted);
2153
2154 // Refills the free list from the corresponding free list filled by the
2155 // sweeper.
2156 virtual void RefillFreeList();
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002157
Steve Blocka7e24c12009-10-30 11:49:00 +00002158 protected:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002159 void AddMemory(Address start, intptr_t size);
2160
2161 FreeSpace* TryRemoveMemory(intptr_t size_in_bytes);
2162
2163 void MoveOverFreeMemory(PagedSpace* other);
2164
2165 // PagedSpaces that should be included in snapshots have different, i.e.,
2166 // smaller, initial pages.
2167 virtual bool snapshotable() { return true; }
2168
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002169 FreeList* free_list() { return &free_list_; }
2170
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002171 bool HasPages() { return anchor_.next_page() != &anchor_; }
2172
2173 // Cleans up the space, frees all pages in this space except those belonging
2174 // to the initial chunk, uncommits addresses in the initial chunk.
2175 void TearDown();
2176
2177 // Expands the space by allocating a fixed number of pages. Returns false if
2178 // it cannot allocate requested number of pages from OS, or if the hard heap
2179 // size limit has been hit.
2180 bool Expand();
2181
2182 // Generic fast case allocation function that tries linear allocation at the
2183 // address denoted by top in allocation_info_.
2184 inline HeapObject* AllocateLinearly(int size_in_bytes);
2185
2186 // Generic fast case allocation function that tries aligned linear allocation
2187 // at the address denoted by top in allocation_info_. Writes the aligned
2188 // allocation size, which includes the filler size, to size_in_bytes.
2189 inline HeapObject* AllocateLinearlyAligned(int* size_in_bytes,
2190 AllocationAlignment alignment);
2191
2192 // If sweeping is still in progress try to sweep unswept pages. If that is
2193 // not successful, wait for the sweeper threads and re-try free-list
2194 // allocation.
2195 MUST_USE_RESULT virtual HeapObject* SweepAndRetryAllocation(
2196 int size_in_bytes);
2197
2198 // Slow path of AllocateRaw. This function is space-dependent.
2199 MUST_USE_RESULT HeapObject* SlowAllocateRaw(int size_in_bytes);
2200
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002201 int area_size_;
2202
Steve Blocka7e24c12009-10-30 11:49:00 +00002203 // Accounting information for this space.
2204 AllocationStats accounting_stats_;
2205
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002206 // The dummy page that anchors the double linked list of pages.
2207 Page anchor_;
Steve Blocka7e24c12009-10-30 11:49:00 +00002208
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002209 // The space's free list.
2210 FreeList free_list_;
Steve Block6ded16b2010-05-10 14:33:55 +01002211
Steve Blocka7e24c12009-10-30 11:49:00 +00002212 // Normal allocation information.
2213 AllocationInfo allocation_info_;
2214
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002215 // The sweeper threads iterate over the list of pointer and data space pages
2216 // and sweep these pages concurrently. They will stop sweeping after the
2217 // end_of_unswept_pages_ page.
2218 Page* end_of_unswept_pages_;
2219
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002220 // Mutex guarding any concurrent access to the space.
2221 base::Mutex space_mutex_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002222
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002223 friend class MarkCompactCollector;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002224 friend class PageIterator;
2225
2226 // Used in cctest.
2227 friend class HeapTester;
Steve Blocka7e24c12009-10-30 11:49:00 +00002228};
2229
2230
Steve Blocka7e24c12009-10-30 11:49:00 +00002231class NumberAndSizeInfo BASE_EMBEDDED {
2232 public:
2233 NumberAndSizeInfo() : number_(0), bytes_(0) {}
2234
2235 int number() const { return number_; }
2236 void increment_number(int num) { number_ += num; }
2237
2238 int bytes() const { return bytes_; }
2239 void increment_bytes(int size) { bytes_ += size; }
2240
2241 void clear() {
2242 number_ = 0;
2243 bytes_ = 0;
2244 }
2245
2246 private:
2247 int number_;
2248 int bytes_;
2249};
2250
2251
2252// HistogramInfo class for recording a single "bar" of a histogram. This
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002253// class is used for collecting statistics to print to the log file.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002254class HistogramInfo : public NumberAndSizeInfo {
Steve Blocka7e24c12009-10-30 11:49:00 +00002255 public:
2256 HistogramInfo() : NumberAndSizeInfo() {}
2257
2258 const char* name() { return name_; }
2259 void set_name(const char* name) { name_ = name; }
2260
2261 private:
2262 const char* name_;
2263};
Steve Blocka7e24c12009-10-30 11:49:00 +00002264
2265
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002266enum SemiSpaceId { kFromSpace = 0, kToSpace = 1 };
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002267
2268
2269class SemiSpace;
2270
2271
2272class NewSpacePage : public MemoryChunk {
2273 public:
2274 // GC related flags copied from from-space to to-space when
2275 // flipping semispaces.
2276 static const intptr_t kCopyOnFlipFlagsMask =
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002277 (1 << MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) |
2278 (1 << MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING) |
2279 (1 << MemoryChunk::SCAN_ON_SCAVENGE);
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002280
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002281 static const int kAreaSize = Page::kAllocatableMemory;
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002282
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002283 inline NewSpacePage* next_page() {
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002284 return static_cast<NewSpacePage*>(next_chunk());
2285 }
2286
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002287 inline void set_next_page(NewSpacePage* page) { set_next_chunk(page); }
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002288
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002289 inline NewSpacePage* prev_page() {
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002290 return static_cast<NewSpacePage*>(prev_chunk());
2291 }
2292
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002293 inline void set_prev_page(NewSpacePage* page) { set_prev_chunk(page); }
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002294
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002295 SemiSpace* semi_space() { return reinterpret_cast<SemiSpace*>(owner()); }
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002296
2297 bool is_anchor() { return !this->InNewSpace(); }
2298
2299 static bool IsAtStart(Address addr) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002300 return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask) ==
2301 kObjectStartOffset;
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002302 }
2303
2304 static bool IsAtEnd(Address addr) {
2305 return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask) == 0;
2306 }
2307
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002308 Address address() { return reinterpret_cast<Address>(this); }
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002309
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002310 // Finds the NewSpacePage containing the given address.
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002311 static inline NewSpacePage* FromAddress(Address address_in_page) {
2312 Address page_start =
2313 reinterpret_cast<Address>(reinterpret_cast<uintptr_t>(address_in_page) &
2314 ~Page::kPageAlignmentMask);
2315 NewSpacePage* page = reinterpret_cast<NewSpacePage*>(page_start);
2316 return page;
2317 }
2318
2319 // Find the page for a limit address. A limit address is either an address
2320 // inside a page, or the address right after the last byte of a page.
2321 static inline NewSpacePage* FromLimit(Address address_limit) {
2322 return NewSpacePage::FromAddress(address_limit - 1);
2323 }
2324
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002325 // Checks if address1 and address2 are on the same new space page.
2326 static inline bool OnSamePage(Address address1, Address address2) {
2327 return NewSpacePage::FromAddress(address1) ==
2328 NewSpacePage::FromAddress(address2);
2329 }
2330
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002331 private:
2332 // Create a NewSpacePage object that is only used as anchor
2333 // for the doubly-linked list of real pages.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002334 explicit NewSpacePage(SemiSpace* owner) { InitializeAsAnchor(owner); }
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002335
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002336 static NewSpacePage* Initialize(Heap* heap, Address start,
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002337 SemiSpace* semi_space);
2338
2339 // Intialize a fake NewSpacePage used as sentinel at the ends
2340 // of a doubly-linked list of real NewSpacePages.
2341 // Only uses the prev/next links, and sets flags to not be in new-space.
2342 void InitializeAsAnchor(SemiSpace* owner);
2343
2344 friend class SemiSpace;
2345 friend class SemiSpaceIterator;
2346};
2347
2348
Steve Blocka7e24c12009-10-30 11:49:00 +00002349// -----------------------------------------------------------------------------
2350// SemiSpace in young generation
2351//
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002352// A semispace is a contiguous chunk of memory holding page-like memory
2353// chunks. The mark-compact collector uses the memory of the first page in
2354// the from space as a marking stack when tracing live objects.
Steve Blocka7e24c12009-10-30 11:49:00 +00002355
2356class SemiSpace : public Space {
2357 public:
2358 // Constructor.
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002359 SemiSpace(Heap* heap, SemiSpaceId semispace)
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002360 : Space(heap, NEW_SPACE, NOT_EXECUTABLE),
2361 start_(NULL),
2362 age_mark_(NULL),
2363 id_(semispace),
2364 anchor_(this),
2365 current_page_(NULL) {}
Steve Blocka7e24c12009-10-30 11:49:00 +00002366
2367 // Sets up the semispace using the given chunk.
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002368 void SetUp(Address start, int initial_capacity, int target_capacity,
2369 int maximum_capacity);
Steve Blocka7e24c12009-10-30 11:49:00 +00002370
2371 // Tear down the space. Heap memory was not allocated by the space, so it
2372 // is not deallocated here.
2373 void TearDown();
2374
2375 // True if the space has been set up but not torn down.
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002376 bool HasBeenSetUp() { return start_ != NULL; }
Steve Blocka7e24c12009-10-30 11:49:00 +00002377
Steve Blocka7e24c12009-10-30 11:49:00 +00002378 // Grow the semispace to the new capacity. The new capacity
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002379 // requested must be larger than the current capacity and less than
2380 // the maximum capacity.
Steve Blocka7e24c12009-10-30 11:49:00 +00002381 bool GrowTo(int new_capacity);
2382
2383 // Shrinks the semispace to the new capacity. The new capacity
2384 // requested must be more than the amount of used memory in the
2385 // semispace and less than the current capacity.
2386 bool ShrinkTo(int new_capacity);
2387
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002388 // Sets the total capacity. Only possible when the space is not committed.
2389 bool SetTotalCapacity(int new_capacity);
2390
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002391 // Returns the start address of the first page of the space.
2392 Address space_start() {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002393 DCHECK(anchor_.next_page() != &anchor_);
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002394 return anchor_.next_page()->area_start();
2395 }
2396
2397 // Returns the start address of the current page of the space.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002398 Address page_low() { return current_page_->area_start(); }
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002399
Steve Blocka7e24c12009-10-30 11:49:00 +00002400 // Returns one past the end address of the space.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002401 Address space_end() { return anchor_.prev_page()->area_end(); }
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002402
2403 // Returns one past the end address of the current page of the space.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002404 Address page_high() { return current_page_->area_end(); }
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002405
2406 bool AdvancePage() {
2407 NewSpacePage* next_page = current_page_->next_page();
2408 if (next_page == anchor()) return false;
2409 current_page_ = next_page;
2410 return true;
2411 }
2412
2413 // Resets the space to using the first page.
2414 void Reset();
Steve Blocka7e24c12009-10-30 11:49:00 +00002415
2416 // Age mark accessors.
2417 Address age_mark() { return age_mark_; }
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002418 void set_age_mark(Address mark);
Steve Blocka7e24c12009-10-30 11:49:00 +00002419
2420 // True if the address is in the address range of this semispace (not
2421 // necessarily below the allocation pointer).
2422 bool Contains(Address a) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002423 return (reinterpret_cast<uintptr_t>(a) & address_mask_) ==
2424 reinterpret_cast<uintptr_t>(start_);
Steve Blocka7e24c12009-10-30 11:49:00 +00002425 }
2426
2427 // True if the object is a heap object in the address range of this
2428 // semispace (not necessarily below the allocation pointer).
2429 bool Contains(Object* o) {
2430 return (reinterpret_cast<uintptr_t>(o) & object_mask_) == object_expected_;
2431 }
2432
Leon Clarkee46be812010-01-19 14:06:41 +00002433 // If we don't have these here then SemiSpace will be abstract. However
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002434 // they should never be called:
2435
2436 intptr_t Size() override {
Steve Blocka7e24c12009-10-30 11:49:00 +00002437 UNREACHABLE();
2438 return 0;
2439 }
2440
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002441 intptr_t SizeOfObjects() override { return Size(); }
2442
2443 intptr_t Available() override {
2444 UNREACHABLE();
2445 return 0;
2446 }
2447
2448
Steve Blocka7e24c12009-10-30 11:49:00 +00002449 bool is_committed() { return committed_; }
2450 bool Commit();
2451 bool Uncommit();
2452
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002453 NewSpacePage* first_page() { return anchor_.next_page(); }
2454 NewSpacePage* current_page() { return current_page_; }
2455
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002456#ifdef VERIFY_HEAP
2457 virtual void Verify();
2458#endif
2459
Steve Blocka7e24c12009-10-30 11:49:00 +00002460#ifdef DEBUG
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002461 void Print() override;
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002462 // Validate a range of of addresses in a SemiSpace.
2463 // The "from" address must be on a page prior to the "to" address,
2464 // in the linked page order, or it must be earlier on the same page.
2465 static void AssertValidRange(Address from, Address to);
2466#else
2467 // Do nothing.
2468 inline static void AssertValidRange(Address from, Address to) {}
Steve Blocka7e24c12009-10-30 11:49:00 +00002469#endif
2470
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002471 // Returns the current total capacity of the semispace.
2472 int TotalCapacity() { return total_capacity_; }
Steve Blocka7e24c12009-10-30 11:49:00 +00002473
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002474 // Returns the target for total capacity of the semispace.
2475 int TargetCapacity() { return target_capacity_; }
2476
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002477 // Returns the maximum total capacity of the semispace.
2478 int MaximumTotalCapacity() { return maximum_total_capacity_; }
Steve Blocka7e24c12009-10-30 11:49:00 +00002479
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002480 // Returns the initial capacity of the semispace.
2481 int InitialTotalCapacity() { return initial_total_capacity_; }
Steve Blocka7e24c12009-10-30 11:49:00 +00002482
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002483 SemiSpaceId id() { return id_; }
2484
2485 static void Swap(SemiSpace* from, SemiSpace* to);
2486
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002487 // Approximate amount of physical memory committed for this space.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002488 size_t CommittedPhysicalMemory() override;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002489
Steve Blocka7e24c12009-10-30 11:49:00 +00002490 private:
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002491 // Flips the semispace between being from-space and to-space.
2492 // Copies the flags into the masked positions on all pages in the space.
2493 void FlipPages(intptr_t flags, intptr_t flag_mask);
2494
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002495 // Updates Capacity and MaximumCommitted based on new capacity.
2496 void SetCapacity(int new_capacity);
2497
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002498 NewSpacePage* anchor() { return &anchor_; }
2499
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002500 // The current and maximum total capacity of the space.
2501 int total_capacity_;
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002502 int target_capacity_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002503 int maximum_total_capacity_;
2504 int initial_total_capacity_;
2505
Steve Blocka7e24c12009-10-30 11:49:00 +00002506 // The start address of the space.
2507 Address start_;
2508 // Used to govern object promotion during mark-compact collection.
2509 Address age_mark_;
2510
2511 // Masks and comparison values to test for containment in this semispace.
2512 uintptr_t address_mask_;
2513 uintptr_t object_mask_;
2514 uintptr_t object_expected_;
2515
2516 bool committed_;
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002517 SemiSpaceId id_;
Steve Blocka7e24c12009-10-30 11:49:00 +00002518
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002519 NewSpacePage anchor_;
2520 NewSpacePage* current_page_;
2521
2522 friend class SemiSpaceIterator;
2523 friend class NewSpacePageIterator;
Steve Blocka7e24c12009-10-30 11:49:00 +00002524};
2525
2526
2527// A SemiSpaceIterator is an ObjectIterator that iterates over the active
2528// semispace of the heap's new space. It iterates over the objects in the
2529// semispace from a given start address (defaulting to the bottom of the
2530// semispace) to the top of the semispace. New objects allocated after the
2531// iterator is created are not iterated.
2532class SemiSpaceIterator : public ObjectIterator {
2533 public:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002534 // Create an iterator over the allocated objects in the given to-space.
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002535 explicit SemiSpaceIterator(NewSpace* space);
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002536
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002537 inline HeapObject* Next();
Steve Blocka7e24c12009-10-30 11:49:00 +00002538
2539 // Implementation of the ObjectIterator functions.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002540 inline HeapObject* next_object() override;
Steve Blocka7e24c12009-10-30 11:49:00 +00002541
2542 private:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002543 void Initialize(Address start, Address end);
Steve Blocka7e24c12009-10-30 11:49:00 +00002544
Steve Blocka7e24c12009-10-30 11:49:00 +00002545 // The current iteration point.
2546 Address current_;
2547 // The end of iteration.
2548 Address limit_;
Steve Blocka7e24c12009-10-30 11:49:00 +00002549};
2550
2551
2552// -----------------------------------------------------------------------------
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002553// A PageIterator iterates the pages in a semi-space.
2554class NewSpacePageIterator BASE_EMBEDDED {
2555 public:
2556 // Make an iterator that runs over all pages in to-space.
2557 explicit inline NewSpacePageIterator(NewSpace* space);
2558
2559 // Make an iterator that runs over all pages in the given semispace,
2560 // even those not used in allocation.
2561 explicit inline NewSpacePageIterator(SemiSpace* space);
2562
2563 // Make iterator that iterates from the page containing start
2564 // to the page that contains limit in the same semispace.
2565 inline NewSpacePageIterator(Address start, Address limit);
2566
2567 inline bool has_next();
2568 inline NewSpacePage* next();
2569
2570 private:
2571 NewSpacePage* prev_page_; // Previous page returned.
2572 // Next page that will be returned. Cached here so that we can use this
2573 // iterator for operations that deallocate pages.
2574 NewSpacePage* next_page_;
2575 // Last page returned.
2576 NewSpacePage* last_page_;
2577};
2578
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002579// -----------------------------------------------------------------------------
2580// Allows observation of inline allocation in the new space.
2581class InlineAllocationObserver {
2582 public:
2583 explicit InlineAllocationObserver(intptr_t step_size)
2584 : step_size_(step_size), bytes_to_next_step_(step_size) {
2585 DCHECK(step_size >= kPointerSize);
2586 }
2587 virtual ~InlineAllocationObserver() {}
2588
2589 private:
2590 intptr_t step_size() const { return step_size_; }
2591 intptr_t bytes_to_next_step() const { return bytes_to_next_step_; }
2592
2593 // Pure virtual method provided by the subclasses that gets called when at
2594 // least step_size bytes have been allocated. soon_object is the address just
2595 // allocated (but not yet initialized.) size is the size of the object as
2596 // requested (i.e. w/o the alignment fillers). Some complexities to be aware
2597 // of:
2598 // 1) soon_object will be nullptr in cases where we end up observing an
2599 // allocation that happens to be a filler space (e.g. page boundaries.)
2600 // 2) size is the requested size at the time of allocation. Right-trimming
2601 // may change the object size dynamically.
2602 // 3) soon_object may actually be the first object in an allocation-folding
2603 // group. In such a case size is the size of the group rather than the
2604 // first object.
2605 virtual void Step(int bytes_allocated, Address soon_object, size_t size) = 0;
2606
2607 // Called each time the new space does an inline allocation step. This may be
2608 // more frequently than the step_size we are monitoring (e.g. when there are
2609 // multiple observers, or when page or space boundary is encountered.)
2610 void InlineAllocationStep(int bytes_allocated, Address soon_object,
2611 size_t size) {
2612 bytes_to_next_step_ -= bytes_allocated;
2613 if (bytes_to_next_step_ <= 0) {
2614 Step(static_cast<int>(step_size_ - bytes_to_next_step_), soon_object,
2615 size);
2616 bytes_to_next_step_ = step_size_;
2617 }
2618 }
2619
2620 intptr_t step_size_;
2621 intptr_t bytes_to_next_step_;
2622
2623 friend class NewSpace;
2624
2625 DISALLOW_COPY_AND_ASSIGN(InlineAllocationObserver);
2626};
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002627
2628// -----------------------------------------------------------------------------
Steve Blocka7e24c12009-10-30 11:49:00 +00002629// The young generation space.
2630//
2631// The new space consists of a contiguous pair of semispaces. It simply
2632// forwards most functions to the appropriate semispace.
2633
2634class NewSpace : public Space {
2635 public:
2636 // Constructor.
Steve Block44f0eee2011-05-26 01:26:41 +01002637 explicit NewSpace(Heap* heap)
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002638 : Space(heap, NEW_SPACE, NOT_EXECUTABLE),
2639 to_space_(heap, kToSpace),
2640 from_space_(heap, kFromSpace),
2641 reservation_(),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002642 top_on_previous_step_(0),
2643 inline_allocation_observers_paused_(false) {}
Steve Blocka7e24c12009-10-30 11:49:00 +00002644
2645 // Sets up the new space using the given chunk.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002646 bool SetUp(int reserved_semispace_size_, int max_semi_space_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002647
2648 // Tears down the space. Heap memory was not allocated by the space, so it
2649 // is not deallocated here.
2650 void TearDown();
2651
2652 // True if the space has been set up but not torn down.
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002653 bool HasBeenSetUp() {
2654 return to_space_.HasBeenSetUp() && from_space_.HasBeenSetUp();
Steve Blocka7e24c12009-10-30 11:49:00 +00002655 }
2656
2657 // Flip the pair of spaces.
2658 void Flip();
2659
2660 // Grow the capacity of the semispaces. Assumes that they are not at
2661 // their maximum capacity.
2662 void Grow();
2663
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002664 // Grow the capacity of the semispaces by one page.
2665 bool GrowOnePage();
2666
Steve Blocka7e24c12009-10-30 11:49:00 +00002667 // Shrink the capacity of the semispaces.
2668 void Shrink();
2669
2670 // True if the address or object lies in the address range of either
2671 // semispace (not necessarily below the allocation pointer).
2672 bool Contains(Address a) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002673 return (reinterpret_cast<uintptr_t>(a) & address_mask_) ==
2674 reinterpret_cast<uintptr_t>(start_);
Steve Blocka7e24c12009-10-30 11:49:00 +00002675 }
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002676
Steve Blocka7e24c12009-10-30 11:49:00 +00002677 bool Contains(Object* o) {
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002678 Address a = reinterpret_cast<Address>(o);
2679 return (reinterpret_cast<uintptr_t>(a) & object_mask_) == object_expected_;
Steve Blocka7e24c12009-10-30 11:49:00 +00002680 }
2681
2682 // Return the allocated bytes in the active semispace.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002683 intptr_t Size() override {
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002684 return pages_used_ * NewSpacePage::kAreaSize +
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002685 static_cast<int>(top() - to_space_.page_low());
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002686 }
2687
Ben Murdochf87a2032010-10-22 12:50:53 +01002688 // The same, but returning an int. We have to have the one that returns
2689 // intptr_t because it is inherited, but if we know we are dealing with the
2690 // new space, which can't get as big as the other spaces then this is useful:
2691 int SizeAsInt() { return static_cast<int>(Size()); }
Steve Block3ce2e202009-11-05 08:53:23 +00002692
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002693 // Return the allocatable capacity of a semispace.
2694 intptr_t Capacity() {
2695 SLOW_DCHECK(to_space_.TotalCapacity() == from_space_.TotalCapacity());
2696 return (to_space_.TotalCapacity() / Page::kPageSize) *
2697 NewSpacePage::kAreaSize;
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002698 }
2699
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002700 // Return the current size of a semispace, allocatable and non-allocatable
2701 // memory.
2702 intptr_t TotalCapacity() {
2703 DCHECK(to_space_.TotalCapacity() == from_space_.TotalCapacity());
2704 return to_space_.TotalCapacity();
Steve Blocka7e24c12009-10-30 11:49:00 +00002705 }
Steve Block3ce2e202009-11-05 08:53:23 +00002706
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002707 // Committed memory for NewSpace is the committed memory of both semi-spaces
2708 // combined.
2709 intptr_t CommittedMemory() override {
2710 return from_space_.CommittedMemory() + to_space_.CommittedMemory();
Steve Block3ce2e202009-11-05 08:53:23 +00002711 }
2712
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002713 intptr_t MaximumCommittedMemory() override {
2714 return from_space_.MaximumCommittedMemory() +
2715 to_space_.MaximumCommittedMemory();
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002716 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002717
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002718 // Approximate amount of physical memory committed for this space.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002719 size_t CommittedPhysicalMemory() override;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002720
2721 // Return the available bytes without growing.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002722 intptr_t Available() override { return Capacity() - Size(); }
2723
2724 intptr_t PagesFromStart(Address addr) {
2725 return static_cast<intptr_t>(addr - bottom()) / Page::kPageSize;
2726 }
2727
2728 size_t AllocatedSinceLastGC() {
2729 intptr_t allocated = top() - to_space_.age_mark();
2730 if (allocated < 0) {
2731 // Runtime has lowered the top below the age mark.
2732 return 0;
2733 }
2734 // Correctly account for non-allocatable regions at the beginning of
2735 // each page from the age_mark() to the top().
2736 intptr_t pages =
2737 PagesFromStart(top()) - PagesFromStart(to_space_.age_mark());
2738 allocated -= pages * (NewSpacePage::kObjectStartOffset);
2739 DCHECK(0 <= allocated && allocated <= Size());
2740 return static_cast<size_t>(allocated);
2741 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002742
Steve Blocka7e24c12009-10-30 11:49:00 +00002743 // Return the maximum capacity of a semispace.
2744 int MaximumCapacity() {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002745 DCHECK(to_space_.MaximumTotalCapacity() ==
2746 from_space_.MaximumTotalCapacity());
2747 return to_space_.MaximumTotalCapacity();
Steve Blocka7e24c12009-10-30 11:49:00 +00002748 }
2749
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002750 bool IsAtMaximumCapacity() { return TotalCapacity() == MaximumCapacity(); }
2751
Steve Blocka7e24c12009-10-30 11:49:00 +00002752 // Returns the initial capacity of a semispace.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002753 int InitialTotalCapacity() {
2754 DCHECK(to_space_.InitialTotalCapacity() ==
2755 from_space_.InitialTotalCapacity());
2756 return to_space_.InitialTotalCapacity();
Steve Blocka7e24c12009-10-30 11:49:00 +00002757 }
2758
2759 // Return the address of the allocation pointer in the active semispace.
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002760 Address top() {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002761 DCHECK(to_space_.current_page()->ContainsLimit(allocation_info_.top()));
2762 return allocation_info_.top();
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002763 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002764
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002765 // Return the address of the allocation pointer limit in the active semispace.
2766 Address limit() {
2767 DCHECK(to_space_.current_page()->ContainsLimit(allocation_info_.limit()));
2768 return allocation_info_.limit();
2769 }
2770
Steve Blocka7e24c12009-10-30 11:49:00 +00002771 // Return the address of the first object in the active semispace.
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002772 Address bottom() { return to_space_.space_start(); }
Steve Blocka7e24c12009-10-30 11:49:00 +00002773
2774 // Get the age mark of the inactive semispace.
2775 Address age_mark() { return from_space_.age_mark(); }
2776 // Set the age mark in the active semispace.
2777 void set_age_mark(Address mark) { to_space_.set_age_mark(mark); }
2778
2779 // The start address of the space and a bit mask. Anding an address in the
2780 // new space with the mask will result in the start address.
2781 Address start() { return start_; }
2782 uintptr_t mask() { return address_mask_; }
2783
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002784 INLINE(uint32_t AddressToMarkbitIndex(Address addr)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002785 DCHECK(Contains(addr));
2786 DCHECK(IsAligned(OffsetFrom(addr), kPointerSize) ||
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002787 IsAligned(OffsetFrom(addr) - 1, kPointerSize));
2788 return static_cast<uint32_t>(addr - start_) >> kPointerSizeLog2;
2789 }
2790
2791 INLINE(Address MarkbitIndexToAddress(uint32_t index)) {
2792 return reinterpret_cast<Address>(index << kPointerSizeLog2);
2793 }
2794
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002795 // The allocation top and limit address.
2796 Address* allocation_top_address() { return allocation_info_.top_address(); }
Steve Blocka7e24c12009-10-30 11:49:00 +00002797
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002798 // The allocation limit address.
2799 Address* allocation_limit_address() {
2800 return allocation_info_.limit_address();
2801 }
2802
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002803 MUST_USE_RESULT INLINE(AllocationResult AllocateRawAligned(
2804 int size_in_bytes, AllocationAlignment alignment));
2805
2806 MUST_USE_RESULT INLINE(
2807 AllocationResult AllocateRawUnaligned(int size_in_bytes));
2808
2809 MUST_USE_RESULT INLINE(AllocationResult AllocateRaw(
2810 int size_in_bytes, AllocationAlignment alignment));
2811
2812 MUST_USE_RESULT inline AllocationResult AllocateRawSynchronized(
2813 int size_in_bytes, AllocationAlignment alignment);
Steve Blocka7e24c12009-10-30 11:49:00 +00002814
2815 // Reset the allocation pointer to the beginning of the active semispace.
2816 void ResetAllocationInfo();
Steve Blocka7e24c12009-10-30 11:49:00 +00002817
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002818 void UpdateInlineAllocationLimit(int size_in_bytes);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002819
2820 // Allows observation of inline allocation. The observer->Step() method gets
2821 // called after every step_size bytes have been allocated (approximately).
2822 // This works by adjusting the allocation limit to a lower value and adjusting
2823 // it after each step.
2824 void AddInlineAllocationObserver(InlineAllocationObserver* observer);
2825
2826 // Removes a previously installed observer.
2827 void RemoveInlineAllocationObserver(InlineAllocationObserver* observer);
2828
2829 void DisableInlineAllocationSteps() {
2830 top_on_previous_step_ = 0;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002831 UpdateInlineAllocationLimit(0);
Steve Blocka7e24c12009-10-30 11:49:00 +00002832 }
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002833
2834 // Get the extent of the inactive semispace (for use as a marking stack,
2835 // or to zap it). Notice: space-addresses are not necessarily on the
2836 // same page, so FromSpaceStart() might be above FromSpaceEnd().
2837 Address FromSpacePageLow() { return from_space_.page_low(); }
2838 Address FromSpacePageHigh() { return from_space_.page_high(); }
2839 Address FromSpaceStart() { return from_space_.space_start(); }
2840 Address FromSpaceEnd() { return from_space_.space_end(); }
2841
2842 // Get the extent of the active semispace's pages' memory.
2843 Address ToSpaceStart() { return to_space_.space_start(); }
2844 Address ToSpaceEnd() { return to_space_.space_end(); }
2845
2846 inline bool ToSpaceContains(Address address) {
2847 return to_space_.Contains(address);
2848 }
2849 inline bool FromSpaceContains(Address address) {
2850 return from_space_.Contains(address);
Steve Blocka7e24c12009-10-30 11:49:00 +00002851 }
2852
2853 // True if the object is a heap object in the address range of the
2854 // respective semispace (not necessarily below the allocation pointer of the
2855 // semispace).
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002856 inline bool ToSpaceContains(Object* o) { return to_space_.Contains(o); }
2857 inline bool FromSpaceContains(Object* o) { return from_space_.Contains(o); }
Steve Blocka7e24c12009-10-30 11:49:00 +00002858
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002859 // Try to switch the active semispace to a new, empty, page.
2860 // Returns false if this isn't possible or reasonable (i.e., there
2861 // are no pages, or the current page is already empty), or true
2862 // if successful.
2863 bool AddFreshPage();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002864 bool AddFreshPageSynchronized();
Steve Blocka7e24c12009-10-30 11:49:00 +00002865
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002866#ifdef VERIFY_HEAP
Steve Blocka7e24c12009-10-30 11:49:00 +00002867 // Verify the active semispace.
2868 virtual void Verify();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002869#endif
2870
2871#ifdef DEBUG
Steve Blocka7e24c12009-10-30 11:49:00 +00002872 // Print the active semispace.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002873 void Print() override { to_space_.Print(); }
Steve Blocka7e24c12009-10-30 11:49:00 +00002874#endif
2875
Steve Blocka7e24c12009-10-30 11:49:00 +00002876 // Iterates the active semispace to collect statistics.
2877 void CollectStatistics();
2878 // Reports previously collected statistics of the active semispace.
2879 void ReportStatistics();
2880 // Clears previously collected statistics.
2881 void ClearHistograms();
2882
2883 // Record the allocation or promotion of a heap object. Note that we don't
2884 // record every single allocation, but only those that happen in the
2885 // to space during a scavenge GC.
2886 void RecordAllocation(HeapObject* obj);
2887 void RecordPromotion(HeapObject* obj);
Steve Blocka7e24c12009-10-30 11:49:00 +00002888
2889 // Return whether the operation succeded.
2890 bool CommitFromSpaceIfNeeded() {
2891 if (from_space_.is_committed()) return true;
2892 return from_space_.Commit();
2893 }
2894
2895 bool UncommitFromSpace() {
2896 if (!from_space_.is_committed()) return true;
2897 return from_space_.Uncommit();
2898 }
2899
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002900 bool IsFromSpaceCommitted() { return from_space_.is_committed(); }
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002901
2902 SemiSpace* active_space() { return &to_space_; }
2903
Steve Blocka7e24c12009-10-30 11:49:00 +00002904 private:
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002905 // Update allocation info to match the current to-space page.
2906 void UpdateAllocationInfo();
2907
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002908 base::Mutex mutex_;
2909
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002910 Address chunk_base_;
2911 uintptr_t chunk_size_;
2912
Steve Blocka7e24c12009-10-30 11:49:00 +00002913 // The semispaces.
2914 SemiSpace to_space_;
2915 SemiSpace from_space_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002916 base::VirtualMemory reservation_;
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002917 int pages_used_;
Steve Blocka7e24c12009-10-30 11:49:00 +00002918
2919 // Start address and bit mask for containment testing.
2920 Address start_;
2921 uintptr_t address_mask_;
2922 uintptr_t object_mask_;
2923 uintptr_t object_expected_;
2924
2925 // Allocation pointer and limit for normal allocation and allocation during
2926 // mark-compact collection.
2927 AllocationInfo allocation_info_;
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002928
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002929 // When inline allocation stepping is active, either because of incremental
2930 // marking or because of idle scavenge, we 'interrupt' inline allocation every
2931 // once in a while. This is done by setting allocation_info_.limit to be lower
2932 // than the actual limit and and increasing it in steps to guarantee that the
2933 // observers are notified periodically.
2934 List<InlineAllocationObserver*> inline_allocation_observers_;
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002935 Address top_on_previous_step_;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002936 bool inline_allocation_observers_paused_;
Steve Blocka7e24c12009-10-30 11:49:00 +00002937
Steve Blocka7e24c12009-10-30 11:49:00 +00002938 HistogramInfo* allocated_histogram_;
2939 HistogramInfo* promoted_histogram_;
Steve Blocka7e24c12009-10-30 11:49:00 +00002940
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002941 bool EnsureAllocation(int size_in_bytes, AllocationAlignment alignment);
Steve Blocka7e24c12009-10-30 11:49:00 +00002942
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002943 // If we are doing inline allocation in steps, this method performs the 'step'
2944 // operation. top is the memory address of the bump pointer at the last
2945 // inline allocation (i.e. it determines the numbers of bytes actually
2946 // allocated since the last step.) new_top is the address of the bump pointer
2947 // where the next byte is going to be allocated from. top and new_top may be
2948 // different when we cross a page boundary or reset the space.
2949 void InlineAllocationStep(Address top, Address new_top, Address soon_object,
2950 size_t size);
2951 intptr_t GetNextInlineAllocationStepSize();
2952 void StartNextInlineAllocationStep();
2953 void PauseInlineAllocationObservers();
2954 void ResumeInlineAllocationObservers();
2955
2956 friend class PauseInlineAllocationObserversScope;
Steve Blocka7e24c12009-10-30 11:49:00 +00002957 friend class SemiSpaceIterator;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002958};
Steve Blocka7e24c12009-10-30 11:49:00 +00002959
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002960class PauseInlineAllocationObserversScope {
Steve Blocka7e24c12009-10-30 11:49:00 +00002961 public:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002962 explicit PauseInlineAllocationObserversScope(NewSpace* new_space)
2963 : new_space_(new_space) {
2964 new_space_->PauseInlineAllocationObservers();
2965 }
2966 ~PauseInlineAllocationObserversScope() {
2967 new_space_->ResumeInlineAllocationObservers();
2968 }
2969
2970 private:
2971 NewSpace* new_space_;
2972 DISALLOW_COPY_AND_ASSIGN(PauseInlineAllocationObserversScope);
2973};
2974
2975// -----------------------------------------------------------------------------
2976// Compaction space that is used temporarily during compaction.
2977
2978class CompactionSpace : public PagedSpace {
2979 public:
2980 CompactionSpace(Heap* heap, AllocationSpace id, Executability executable)
2981 : PagedSpace(heap, id, executable) {}
2982
2983 // Adds external memory starting at {start} of {size_in_bytes} to the space.
2984 void AddExternalMemory(Address start, int size_in_bytes) {
2985 IncreaseCapacity(size_in_bytes);
2986 Free(start, size_in_bytes);
2987 }
2988
2989 bool is_local() override { return true; }
2990
2991 void RefillFreeList() override;
2992
2993 protected:
2994 // The space is temporary and not included in any snapshots.
2995 bool snapshotable() override { return false; }
2996
2997 MUST_USE_RESULT HeapObject* SweepAndRetryAllocation(
2998 int size_in_bytes) override;
2999};
3000
3001
3002// A collection of |CompactionSpace|s used by a single compaction task.
3003class CompactionSpaceCollection : public Malloced {
3004 public:
3005 explicit CompactionSpaceCollection(Heap* heap)
3006 : old_space_(heap, OLD_SPACE, Executability::NOT_EXECUTABLE),
3007 code_space_(heap, CODE_SPACE, Executability::EXECUTABLE),
3008 duration_(0.0),
3009 bytes_compacted_(0) {}
3010
3011 CompactionSpace* Get(AllocationSpace space) {
3012 switch (space) {
3013 case OLD_SPACE:
3014 return &old_space_;
3015 case CODE_SPACE:
3016 return &code_space_;
3017 default:
3018 UNREACHABLE();
3019 }
3020 UNREACHABLE();
3021 return nullptr;
3022 }
3023
3024 void ReportCompactionProgress(double duration, intptr_t bytes_compacted) {
3025 duration_ += duration;
3026 bytes_compacted_ += bytes_compacted;
3027 }
3028
3029 double duration() const { return duration_; }
3030 intptr_t bytes_compacted() const { return bytes_compacted_; }
3031
3032 private:
3033 CompactionSpace old_space_;
3034 CompactionSpace code_space_;
3035
3036 // Book keeping.
3037 double duration_;
3038 intptr_t bytes_compacted_;
Steve Blocka7e24c12009-10-30 11:49:00 +00003039};
3040
3041
3042// -----------------------------------------------------------------------------
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003043// Old object space (includes the old space of objects and code space)
Steve Blocka7e24c12009-10-30 11:49:00 +00003044
3045class OldSpace : public PagedSpace {
3046 public:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003047 // Creates an old space object. The constructor does not allocate pages
3048 // from OS.
3049 OldSpace(Heap* heap, AllocationSpace id, Executability executable)
3050 : PagedSpace(heap, id, executable) {}
Steve Blocka7e24c12009-10-30 11:49:00 +00003051};
3052
3053
Ben Murdoch3ef787d2012-04-12 10:51:47 +01003054// For contiguous spaces, top should be in the space (or at the end) and limit
3055// should be the end of the space.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003056#define DCHECK_SEMISPACE_ALLOCATION_INFO(info, space) \
3057 SLOW_DCHECK((space).page_low() <= (info).top() && \
3058 (info).top() <= (space).page_high() && \
3059 (info).limit() <= (space).page_high())
Steve Blocka7e24c12009-10-30 11:49:00 +00003060
3061
3062// -----------------------------------------------------------------------------
3063// Old space for all map objects
3064
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003065class MapSpace : public PagedSpace {
Steve Blocka7e24c12009-10-30 11:49:00 +00003066 public:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003067 // Creates a map space object.
3068 MapSpace(Heap* heap, AllocationSpace id)
3069 : PagedSpace(heap, id, NOT_EXECUTABLE),
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003070 max_map_space_pages_(kMaxMapPageIndex - 1) {}
Steve Blocka7e24c12009-10-30 11:49:00 +00003071
Ben Murdoch85b71792012-04-11 18:30:58 +01003072 // Given an index, returns the page address.
Ben Murdoch3ef787d2012-04-12 10:51:47 +01003073 // TODO(1600): this limit is artifical just to keep code compilable
3074 static const int kMaxMapPageIndex = 1 << 16;
Ben Murdoch85b71792012-04-11 18:30:58 +01003075
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003076 int RoundSizeDownToObjectAlignment(int size) override {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003077 if (base::bits::IsPowerOfTwo32(Map::kSize)) {
Ben Murdoch3ef787d2012-04-12 10:51:47 +01003078 return RoundDown(size, Map::kSize);
3079 } else {
3080 return (size / Map::kSize) * Map::kSize;
Leon Clarkee46be812010-01-19 14:06:41 +00003081 }
Leon Clarkee46be812010-01-19 14:06:41 +00003082 }
3083
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003084#ifdef VERIFY_HEAP
3085 void VerifyObject(HeapObject* obj) override;
3086#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00003087
3088 private:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003089 static const int kMapsPerPage = Page::kAllocatableMemory / Map::kSize;
Leon Clarkee46be812010-01-19 14:06:41 +00003090
3091 // Do map space compaction if there is a page gap.
Leon Clarked91b9f72010-01-27 17:25:45 +00003092 int CompactionThreshold() {
3093 return kMapsPerPage * (max_map_space_pages_ - 1);
3094 }
3095
3096 const int max_map_space_pages_;
Steve Blocka7e24c12009-10-30 11:49:00 +00003097};
3098
3099
3100// -----------------------------------------------------------------------------
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003101// Large objects ( > Page::kMaxRegularHeapObjectSize ) are allocated and
3102// managed by the large object space. A large object is allocated from OS
3103// heap with extra padding bytes (Page::kPageSize + Page::kObjectStartOffset).
Steve Blocka7e24c12009-10-30 11:49:00 +00003104// A large object always starts at Page::kObjectStartOffset to a page.
3105// Large objects do not move during garbage collections.
3106
Ben Murdoch3ef787d2012-04-12 10:51:47 +01003107class LargeObjectSpace : public Space {
Steve Blocka7e24c12009-10-30 11:49:00 +00003108 public:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003109 LargeObjectSpace(Heap* heap, AllocationSpace id);
3110 virtual ~LargeObjectSpace();
Steve Blocka7e24c12009-10-30 11:49:00 +00003111
Ben Murdoch3ef787d2012-04-12 10:51:47 +01003112 // Initializes internal data structures.
3113 bool SetUp();
Steve Blocka7e24c12009-10-30 11:49:00 +00003114
Ben Murdoch3ef787d2012-04-12 10:51:47 +01003115 // Releases internal resources, frees objects in this space.
3116 void TearDown();
Steve Blocka7e24c12009-10-30 11:49:00 +00003117
Ben Murdoch592a9fc2012-03-05 11:04:45 +00003118 static intptr_t ObjectSizeFor(intptr_t chunk_size) {
3119 if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return 0;
3120 return chunk_size - Page::kPageSize - Page::kObjectStartOffset;
3121 }
3122
Ben Murdoch3ef787d2012-04-12 10:51:47 +01003123 // Shared implementation of AllocateRaw, AllocateRawCode and
3124 // AllocateRawFixedArray.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003125 MUST_USE_RESULT AllocationResult
3126 AllocateRaw(int object_size, Executability executable);
Steve Blocka7e24c12009-10-30 11:49:00 +00003127
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003128 // Available bytes for objects in this space.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003129 inline intptr_t Available() override;
Steve Blocka7e24c12009-10-30 11:49:00 +00003130
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003131 intptr_t Size() override { return size_; }
Steve Blocka7e24c12009-10-30 11:49:00 +00003132
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003133 intptr_t SizeOfObjects() override { return objects_size_; }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003134
3135 // Approximate amount of physical memory committed for this space.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003136 size_t CommittedPhysicalMemory() override;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003137
3138 int PageCount() { return page_count_; }
3139
3140 // Finds an object for a given address, returns a Smi if it is not found.
3141 // The function iterates through all objects in this space, may be slow.
3142 Object* FindObject(Address a);
Steve Blocka7e24c12009-10-30 11:49:00 +00003143
Ben Murdoch3ef787d2012-04-12 10:51:47 +01003144 // Finds a large object page containing the given address, returns NULL
Kristian Monsen80d68ea2010-09-08 11:05:35 +01003145 // if such a page doesn't exist.
Ben Murdoch3ef787d2012-04-12 10:51:47 +01003146 LargePage* FindPage(Address a);
Steve Blocka7e24c12009-10-30 11:49:00 +00003147
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003148 // Clears the marking state of live objects.
3149 void ClearMarkingStateOfLiveObjects();
3150
Steve Blocka7e24c12009-10-30 11:49:00 +00003151 // Frees unmarked objects.
3152 void FreeUnmarkedObjects();
3153
3154 // Checks whether a heap object is in this space; O(1).
3155 bool Contains(HeapObject* obj);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003156 bool Contains(Address address);
Steve Blocka7e24c12009-10-30 11:49:00 +00003157
3158 // Checks whether the space is empty.
Ben Murdoch3ef787d2012-04-12 10:51:47 +01003159 bool IsEmpty() { return first_page_ == NULL; }
Steve Blocka7e24c12009-10-30 11:49:00 +00003160
Ben Murdoch3ef787d2012-04-12 10:51:47 +01003161 LargePage* first_page() { return first_page_; }
3162
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003163#ifdef VERIFY_HEAP
Steve Blocka7e24c12009-10-30 11:49:00 +00003164 virtual void Verify();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003165#endif
3166
3167#ifdef DEBUG
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003168 void Print() override;
Steve Blocka7e24c12009-10-30 11:49:00 +00003169 void ReportStatistics();
3170 void CollectCodeStatistics();
Steve Blocka7e24c12009-10-30 11:49:00 +00003171#endif
3172 // Checks whether an address is in the object area in this space. It
3173 // iterates all objects in the space. May be slow.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003174 bool SlowContains(Address addr) { return FindObject(addr)->IsHeapObject(); }
Steve Blocka7e24c12009-10-30 11:49:00 +00003175
3176 private:
3177 // The head of the linked list of large object chunks.
Ben Murdoch3ef787d2012-04-12 10:51:47 +01003178 LargePage* first_page_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003179 intptr_t size_; // allocated bytes
3180 int page_count_; // number of chunks
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08003181 intptr_t objects_size_; // size of objects
Ben Murdoch3ef787d2012-04-12 10:51:47 +01003182 // Map MemoryChunk::kAlignment-aligned chunks to large pages covering them
3183 HashMap chunk_map_;
Steve Blocka7e24c12009-10-30 11:49:00 +00003184
Steve Blocka7e24c12009-10-30 11:49:00 +00003185 friend class LargeObjectIterator;
Steve Blocka7e24c12009-10-30 11:49:00 +00003186};
3187
3188
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003189class LargeObjectIterator : public ObjectIterator {
Steve Blocka7e24c12009-10-30 11:49:00 +00003190 public:
3191 explicit LargeObjectIterator(LargeObjectSpace* space);
Steve Blocka7e24c12009-10-30 11:49:00 +00003192
Ben Murdoch3ef787d2012-04-12 10:51:47 +01003193 HeapObject* Next();
Steve Blocka7e24c12009-10-30 11:49:00 +00003194
3195 // implementation of ObjectIterator.
Ben Murdoch3ef787d2012-04-12 10:51:47 +01003196 virtual HeapObject* next_object() { return Next(); }
Steve Blocka7e24c12009-10-30 11:49:00 +00003197
3198 private:
Ben Murdoch3ef787d2012-04-12 10:51:47 +01003199 LargePage* current_;
Steve Blocka7e24c12009-10-30 11:49:00 +00003200};
3201
3202
Ben Murdoch3ef787d2012-04-12 10:51:47 +01003203// Iterates over the chunks (pages and large object pages) that can contain
3204// pointers to new space.
3205class PointerChunkIterator BASE_EMBEDDED {
3206 public:
3207 inline explicit PointerChunkIterator(Heap* heap);
3208
3209 // Return NULL when the iterator is done.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003210 inline MemoryChunk* next();
Ben Murdoch3ef787d2012-04-12 10:51:47 +01003211
3212 private:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003213 enum State { kOldSpaceState, kMapState, kLargeObjectState, kFinishedState };
Ben Murdoch3ef787d2012-04-12 10:51:47 +01003214 State state_;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003215 PageIterator old_iterator_;
Ben Murdoch3ef787d2012-04-12 10:51:47 +01003216 PageIterator map_iterator_;
3217 LargeObjectIterator lo_iterator_;
3218};
3219
3220
Steve Block44f0eee2011-05-26 01:26:41 +01003221#ifdef DEBUG
3222struct CommentStatistic {
3223 const char* comment;
3224 int size;
3225 int count;
3226 void Clear() {
3227 comment = NULL;
3228 size = 0;
3229 count = 0;
3230 }
3231 // Must be small, since an iteration is used for lookup.
3232 static const int kMaxComments = 64;
3233};
3234#endif
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003235} // namespace internal
3236} // namespace v8
Steve Block44f0eee2011-05-26 01:26:41 +01003237
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003238#endif // V8_HEAP_SPACES_H_