blob: c0d399f94c5b63ae50a0665849a330a929b283f5 [file] [log] [blame]
Ben Murdoch257744e2011-11-30 15:57:28 +00001// Copyright 2011 the V8 project authors. All rights reserved.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
Steve Blocka7e24c12009-10-30 11:49:00 +00004
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005#ifndef V8_HEAP_SPACES_H_
6#define V8_HEAP_SPACES_H_
Steve Blocka7e24c12009-10-30 11:49:00 +00007
Ben Murdochb8a8cc12014-11-26 15:28:44 +00008#include "src/allocation.h"
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00009#include "src/atomic-utils.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000010#include "src/base/atomicops.h"
11#include "src/base/bits.h"
12#include "src/base/platform/mutex.h"
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000013#include "src/flags.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000014#include "src/hashmap.h"
15#include "src/list.h"
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000016#include "src/objects.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000017#include "src/utils.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000018
19namespace v8 {
20namespace internal {
21
Ben Murdoch097c5b22016-05-18 11:27:45 +010022class AllocationInfo;
23class AllocationObserver;
24class CompactionSpace;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000025class CompactionSpaceCollection;
Ben Murdoch097c5b22016-05-18 11:27:45 +010026class FreeList;
Steve Block44f0eee2011-05-26 01:26:41 +010027class Isolate;
Ben Murdoch097c5b22016-05-18 11:27:45 +010028class MemoryAllocator;
29class MemoryChunk;
30class PagedSpace;
31class SemiSpace;
32class SkipList;
33class SlotsBuffer;
34class SlotSet;
35class Space;
Steve Block44f0eee2011-05-26 01:26:41 +010036
Steve Blocka7e24c12009-10-30 11:49:00 +000037// -----------------------------------------------------------------------------
38// Heap structures:
39//
40// A JS heap consists of a young generation, an old generation, and a large
41// object space. The young generation is divided into two semispaces. A
42// scavenger implements Cheney's copying algorithm. The old generation is
43// separated into a map space and an old object space. The map space contains
44// all (and only) map objects, the rest of old objects go into the old space.
45// The old generation is collected by a mark-sweep-compact collector.
46//
47// The semispaces of the young generation are contiguous. The old and map
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +010048// spaces consists of a list of pages. A page has a page header and an object
Ben Murdoch3ef787d2012-04-12 10:51:47 +010049// area.
Steve Blocka7e24c12009-10-30 11:49:00 +000050//
51// There is a separate large object space for objects larger than
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000052// Page::kMaxRegularHeapObjectSize, so that they do not have to move during
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +010053// collection. The large object space is paged. Pages in large object space
Ben Murdoch3ef787d2012-04-12 10:51:47 +010054// may be larger than the page size.
Steve Blocka7e24c12009-10-30 11:49:00 +000055//
Ben Murdoch3ef787d2012-04-12 10:51:47 +010056// A store-buffer based write barrier is used to keep track of intergenerational
Ben Murdochb8a8cc12014-11-26 15:28:44 +000057// references. See heap/store-buffer.h.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +010058//
Ben Murdoch3ef787d2012-04-12 10:51:47 +010059// During scavenges and mark-sweep collections we sometimes (after a store
60// buffer overflow) iterate intergenerational pointers without decoding heap
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000061// object maps so if the page belongs to old space or large object space
62// it is essential to guarantee that the page does not contain any
Ben Murdoch3ef787d2012-04-12 10:51:47 +010063// garbage pointers to new space: every pointer aligned word which satisfies
64// the Heap::InNewSpace() predicate must be a pointer to a live heap object in
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000065// new space. Thus objects in old space and large object spaces should have a
Ben Murdoch3ef787d2012-04-12 10:51:47 +010066// special layout (e.g. no bare integer fields). This requirement does not
67// apply to map space which is iterated in a special fashion. However we still
68// require pointer fields of dead maps to be cleaned.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +010069//
Ben Murdoch3ef787d2012-04-12 10:51:47 +010070// To enable lazy cleaning of old space pages we can mark chunks of the page
71// as being garbage. Garbage sections are marked with a special map. These
72// sections are skipped when scanning the page, even if we are otherwise
73// scanning without regard for object boundaries. Garbage sections are chained
74// together to form a free list after a GC. Garbage sections created outside
75// of GCs by object trunctation etc. may not be in the free list chain. Very
76// small free spaces are ignored, they need only be cleaned of bogus pointers
77// into new space.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +010078//
Ben Murdoch3ef787d2012-04-12 10:51:47 +010079// Each page may have up to one special garbage section. The start of this
80// section is denoted by the top field in the space. The end of the section
81// is denoted by the limit field in the space. This special garbage section
82// is not marked with a free space map in the data. The point of this section
83// is to enable linear allocation without having to constantly update the byte
84// array every time the top field is updated and a new object is created. The
85// special garbage section is not in the chain of garbage sections.
86//
87// Since the top and limit fields are in the space, not the page, only one page
88// has a special garbage section, and if the top and limit are equal then there
89// is no special garbage section.
Steve Blocka7e24c12009-10-30 11:49:00 +000090
91// Some assertion macros used in the debugging mode.
92
Ben Murdochb8a8cc12014-11-26 15:28:44 +000093#define DCHECK_PAGE_ALIGNED(address) \
94 DCHECK((OffsetFrom(address) & Page::kPageAlignmentMask) == 0)
Steve Blocka7e24c12009-10-30 11:49:00 +000095
Ben Murdochb8a8cc12014-11-26 15:28:44 +000096#define DCHECK_OBJECT_ALIGNED(address) \
97 DCHECK((OffsetFrom(address) & kObjectAlignmentMask) == 0)
Steve Blocka7e24c12009-10-30 11:49:00 +000098
Ben Murdochb8a8cc12014-11-26 15:28:44 +000099#define DCHECK_OBJECT_SIZE(size) \
100 DCHECK((0 < size) && (size <= Page::kMaxRegularHeapObjectSize))
Leon Clarkee46be812010-01-19 14:06:41 +0000101
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000102#define DCHECK_CODEOBJECT_SIZE(size, code_space) \
103 DCHECK((0 < size) && (size <= code_space->AreaSize()))
104
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000105#define DCHECK_PAGE_OFFSET(offset) \
106 DCHECK((Page::kObjectStartOffset <= offset) && (offset <= Page::kPageSize))
Steve Blocka7e24c12009-10-30 11:49:00 +0000107
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000108#define DCHECK_MAP_PAGE_INDEX(index) \
109 DCHECK((0 <= index) && (index <= MapSpace::kMaxMapPageIndex))
Steve Blocka7e24c12009-10-30 11:49:00 +0000110
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100111
112class MarkBit {
113 public:
114 typedef uint32_t CellType;
115
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000116 inline MarkBit(CellType* cell, CellType mask) : cell_(cell), mask_(mask) {}
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100117
118#ifdef DEBUG
119 bool operator==(const MarkBit& other) {
120 return cell_ == other.cell_ && mask_ == other.mask_;
121 }
122#endif
123
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000124 private:
125 inline CellType* cell() { return cell_; }
126 inline CellType mask() { return mask_; }
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100127
128 inline MarkBit Next() {
129 CellType new_mask = mask_ << 1;
130 if (new_mask == 0) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000131 return MarkBit(cell_ + 1, 1);
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100132 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000133 return MarkBit(cell_, new_mask);
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100134 }
135 }
136
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000137 inline void Set() { *cell_ |= mask_; }
138 inline bool Get() { return (*cell_ & mask_) != 0; }
139 inline void Clear() { *cell_ &= ~mask_; }
140
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100141 CellType* cell_;
142 CellType mask_;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000143
144 friend class Marking;
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100145};
146
147
148// Bitmap is a sequence of cells each containing fixed number of bits.
149class Bitmap {
150 public:
151 static const uint32_t kBitsPerCell = 32;
152 static const uint32_t kBitsPerCellLog2 = 5;
153 static const uint32_t kBitIndexMask = kBitsPerCell - 1;
154 static const uint32_t kBytesPerCell = kBitsPerCell / kBitsPerByte;
155 static const uint32_t kBytesPerCellLog2 = kBitsPerCellLog2 - kBitsPerByteLog2;
156
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000157 static const size_t kLength = (1 << kPageSizeBits) >> (kPointerSizeLog2);
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100158
159 static const size_t kSize =
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000160 (1 << kPageSizeBits) >> (kPointerSizeLog2 + kBitsPerByteLog2);
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100161
162
163 static int CellsForLength(int length) {
164 return (length + kBitsPerCell - 1) >> kBitsPerCellLog2;
165 }
166
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000167 int CellsCount() { return CellsForLength(kLength); }
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100168
169 static int SizeFor(int cells_count) {
170 return sizeof(MarkBit::CellType) * cells_count;
171 }
172
173 INLINE(static uint32_t IndexToCell(uint32_t index)) {
174 return index >> kBitsPerCellLog2;
175 }
176
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000177 V8_INLINE static uint32_t IndexInCell(uint32_t index) {
178 return index & kBitIndexMask;
179 }
180
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100181 INLINE(static uint32_t CellToIndex(uint32_t index)) {
182 return index << kBitsPerCellLog2;
183 }
184
185 INLINE(static uint32_t CellAlignIndex(uint32_t index)) {
186 return (index + kBitIndexMask) & ~kBitIndexMask;
187 }
188
189 INLINE(MarkBit::CellType* cells()) {
190 return reinterpret_cast<MarkBit::CellType*>(this);
191 }
192
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000193 INLINE(Address address()) { return reinterpret_cast<Address>(this); }
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100194
195 INLINE(static Bitmap* FromAddress(Address addr)) {
196 return reinterpret_cast<Bitmap*>(addr);
197 }
198
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000199 inline MarkBit MarkBitFromIndex(uint32_t index) {
200 MarkBit::CellType mask = 1u << IndexInCell(index);
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100201 MarkBit::CellType* cell = this->cells() + (index >> kBitsPerCellLog2);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000202 return MarkBit(cell, mask);
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100203 }
204
205 static inline void Clear(MemoryChunk* chunk);
206
207 static void PrintWord(uint32_t word, uint32_t himask = 0) {
208 for (uint32_t mask = 1; mask != 0; mask <<= 1) {
209 if ((mask & himask) != 0) PrintF("[");
210 PrintF((mask & word) ? "1" : "0");
211 if ((mask & himask) != 0) PrintF("]");
212 }
213 }
214
215 class CellPrinter {
216 public:
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000217 CellPrinter() : seq_start(0), seq_type(0), seq_length(0) {}
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100218
219 void Print(uint32_t pos, uint32_t cell) {
220 if (cell == seq_type) {
221 seq_length++;
222 return;
223 }
224
225 Flush();
226
227 if (IsSeq(cell)) {
228 seq_start = pos;
229 seq_length = 0;
230 seq_type = cell;
231 return;
232 }
233
234 PrintF("%d: ", pos);
235 PrintWord(cell);
236 PrintF("\n");
237 }
238
239 void Flush() {
240 if (seq_length > 0) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000241 PrintF("%d: %dx%d\n", seq_start, seq_type == 0 ? 0 : 1,
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100242 seq_length * kBitsPerCell);
243 seq_length = 0;
244 }
245 }
246
247 static bool IsSeq(uint32_t cell) { return cell == 0 || cell == 0xFFFFFFFF; }
248
249 private:
250 uint32_t seq_start;
251 uint32_t seq_type;
252 uint32_t seq_length;
253 };
254
255 void Print() {
256 CellPrinter printer;
257 for (int i = 0; i < CellsCount(); i++) {
258 printer.Print(i, cells()[i]);
259 }
260 printer.Flush();
261 PrintF("\n");
262 }
263
264 bool IsClean() {
265 for (int i = 0; i < CellsCount(); i++) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000266 if (cells()[i] != 0) {
267 return false;
268 }
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100269 }
270 return true;
271 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000272
273 // Clears all bits starting from {cell_base_index} up to and excluding
274 // {index}. Note that {cell_base_index} is required to be cell aligned.
275 void ClearRange(uint32_t cell_base_index, uint32_t index) {
276 DCHECK_EQ(IndexInCell(cell_base_index), 0u);
277 DCHECK_GE(index, cell_base_index);
278 uint32_t start_cell_index = IndexToCell(cell_base_index);
279 uint32_t end_cell_index = IndexToCell(index);
280 DCHECK_GE(end_cell_index, start_cell_index);
281 // Clear all cells till the cell containing the last index.
282 for (uint32_t i = start_cell_index; i < end_cell_index; i++) {
283 cells()[i] = 0;
284 }
285 // Clear all bits in the last cell till the last bit before index.
286 uint32_t clear_mask = ~((1u << IndexInCell(index)) - 1);
287 cells()[end_cell_index] &= clear_mask;
288 }
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100289};
290
291
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100292// MemoryChunk represents a memory region owned by a specific space.
293// It is divided into the header and the body. Chunk start is always
294// 1MB aligned. Start of the body is aligned so it can accommodate
295// any heap object.
296class MemoryChunk {
297 public:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000298 enum MemoryChunkFlags {
299 IS_EXECUTABLE,
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000300 POINTERS_TO_HERE_ARE_INTERESTING,
301 POINTERS_FROM_HERE_ARE_INTERESTING,
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000302 IN_FROM_SPACE, // Mutually exclusive with IN_TO_SPACE.
303 IN_TO_SPACE, // All pages in new space has one of these two set.
304 NEW_SPACE_BELOW_AGE_MARK,
305 EVACUATION_CANDIDATE,
306 RESCAN_ON_EVACUATION,
307 NEVER_EVACUATE, // May contain immortal immutables.
308 POPULAR_PAGE, // Slots buffer of this page overflowed on the previous GC.
309
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000310 // Large objects can have a progress bar in their page header. These object
311 // are scanned in increments and will be kept black while being scanned.
312 // Even if the mutator writes to them they will be kept black and a white
313 // to grey transition is performed in the value.
314 HAS_PROGRESS_BAR,
315
316 // This flag is intended to be used for testing. Works only when both
317 // FLAG_stress_compaction and FLAG_manual_evacuation_candidates_selection
318 // are set. It forces the page to become an evacuation candidate at next
319 // candidates selection cycle.
320 FORCE_EVACUATION_CANDIDATE_FOR_TESTING,
321
Ben Murdoch097c5b22016-05-18 11:27:45 +0100322 // This flag is intended to be used for testing.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000323 NEVER_ALLOCATE_ON_PAGE,
324
325 // The memory chunk is already logically freed, however the actual freeing
326 // still has to be performed.
327 PRE_FREED,
328
329 // |COMPACTION_WAS_ABORTED|: Indicates that the compaction in this page
330 // has been aborted and needs special handling by the sweeper.
331 COMPACTION_WAS_ABORTED,
332
333 // Last flag, keep at bottom.
334 NUM_MEMORY_CHUNK_FLAGS
335 };
336
337 // |kCompactionDone|: Initial compaction state of a |MemoryChunk|.
338 // |kCompactingInProgress|: Parallel compaction is currently in progress.
339 // |kCompactingFinalize|: Parallel compaction is done but the chunk needs to
340 // be finalized.
341 // |kCompactingAborted|: Parallel compaction has been aborted, which should
342 // for now only happen in OOM scenarios.
343 enum ParallelCompactingState {
344 kCompactingDone,
345 kCompactingInProgress,
346 kCompactingFinalize,
347 kCompactingAborted,
348 };
349
350 // |kSweepingDone|: The page state when sweeping is complete or sweeping must
Ben Murdoch097c5b22016-05-18 11:27:45 +0100351 // not be performed on that page. Sweeper threads that are done with their
352 // work will set this value and not touch the page anymore.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000353 // |kSweepingPending|: This page is ready for parallel sweeping.
Ben Murdoch097c5b22016-05-18 11:27:45 +0100354 // |kSweepingInProgress|: This page is currently swept by a sweeper thread.
355 enum ConcurrentSweepingState {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000356 kSweepingDone,
Ben Murdoch097c5b22016-05-18 11:27:45 +0100357 kSweepingPending,
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000358 kSweepingInProgress,
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000359 };
360
361 // Every n write barrier invocations we go to runtime even though
362 // we could have handled it in generated code. This lets us check
363 // whether we have hit the limit and should do some more marking.
364 static const int kWriteBarrierCounterGranularity = 500;
365
366 static const int kPointersToHereAreInterestingMask =
367 1 << POINTERS_TO_HERE_ARE_INTERESTING;
368
369 static const int kPointersFromHereAreInterestingMask =
370 1 << POINTERS_FROM_HERE_ARE_INTERESTING;
371
372 static const int kEvacuationCandidateMask = 1 << EVACUATION_CANDIDATE;
373
374 static const int kSkipEvacuationSlotsRecordingMask =
375 (1 << EVACUATION_CANDIDATE) | (1 << RESCAN_ON_EVACUATION) |
376 (1 << IN_FROM_SPACE) | (1 << IN_TO_SPACE);
377
378 static const intptr_t kAlignment =
379 (static_cast<uintptr_t>(1) << kPageSizeBits);
380
381 static const intptr_t kAlignmentMask = kAlignment - 1;
382
383 static const intptr_t kSizeOffset = 0;
384
385 static const intptr_t kLiveBytesOffset =
386 kSizeOffset + kPointerSize // size_t size
387 + kIntptrSize // intptr_t flags_
388 + kPointerSize // Address area_start_
389 + kPointerSize // Address area_end_
390 + 2 * kPointerSize // base::VirtualMemory reservation_
391 + kPointerSize // Address owner_
392 + kPointerSize // Heap* heap_
Ben Murdoch097c5b22016-05-18 11:27:45 +0100393 + kIntSize; // int progress_bar_
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000394
395 static const size_t kSlotsBufferOffset =
396 kLiveBytesOffset + kIntSize; // int live_byte_count_
397
398 static const size_t kWriteBarrierCounterOffset =
399 kSlotsBufferOffset + kPointerSize // SlotsBuffer* slots_buffer_;
Ben Murdoch097c5b22016-05-18 11:27:45 +0100400 + kPointerSize // SlotSet* old_to_new_slots_;
401 + kPointerSize // SlotSet* old_to_old_slots_;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000402 + kPointerSize; // SkipList* skip_list_;
403
404 static const size_t kMinHeaderSize =
405 kWriteBarrierCounterOffset +
406 kIntptrSize // intptr_t write_barrier_counter_
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000407 + kPointerSize // AtomicValue high_water_mark_
408 + kPointerSize // base::Mutex* mutex_
409 + kPointerSize // base::AtomicWord parallel_sweeping_
410 + kPointerSize // AtomicValue parallel_compaction_
Ben Murdoch097c5b22016-05-18 11:27:45 +0100411 + 2 * kPointerSize // AtomicNumber free-list statistics
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000412 + kPointerSize // AtomicValue next_chunk_
413 + kPointerSize; // AtomicValue prev_chunk_
414
415 // We add some more space to the computed header size to amount for missing
416 // alignment requirements in our computation.
417 // Try to get kHeaderSize properly aligned on 32-bit and 64-bit machines.
Ben Murdoch097c5b22016-05-18 11:27:45 +0100418 static const size_t kHeaderSize = kMinHeaderSize;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000419
420 static const int kBodyOffset =
421 CODE_POINTER_ALIGN(kHeaderSize + Bitmap::kSize);
422
423 // The start offset of the object area in a page. Aligned to both maps and
424 // code alignment to be suitable for both. Also aligned to 32 words because
425 // the marking bitmap is arranged in 32 bit chunks.
426 static const int kObjectStartAlignment = 32 * kPointerSize;
427 static const int kObjectStartOffset =
428 kBodyOffset - 1 +
429 (kObjectStartAlignment - (kBodyOffset - 1) % kObjectStartAlignment);
430
431 static const int kFlagsOffset = kPointerSize;
432
Ben Murdoch097c5b22016-05-18 11:27:45 +0100433 static inline void IncrementLiveBytesFromMutator(HeapObject* object, int by);
434 static inline void IncrementLiveBytesFromGC(HeapObject* object, int by);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000435
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100436 // Only works if the pointer is in the first kPageSize of the MemoryChunk.
437 static MemoryChunk* FromAddress(Address a) {
438 return reinterpret_cast<MemoryChunk*>(OffsetFrom(a) & ~kAlignmentMask);
439 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000440
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000441 static inline MemoryChunk* FromAnyPointerAddress(Heap* heap, Address addr);
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100442
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000443 static inline void UpdateHighWaterMark(Address mark) {
444 if (mark == nullptr) return;
445 // Need to subtract one from the mark because when a chunk is full the
446 // top points to the next address after the chunk, which effectively belongs
447 // to another chunk. See the comment to Page::FromAllocationTop.
448 MemoryChunk* chunk = MemoryChunk::FromAddress(mark - 1);
449 intptr_t new_mark = static_cast<intptr_t>(mark - chunk->address());
450 intptr_t old_mark = 0;
451 do {
452 old_mark = chunk->high_water_mark_.Value();
453 } while ((new_mark > old_mark) &&
454 !chunk->high_water_mark_.TrySetValue(old_mark, new_mark));
455 }
456
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100457 Address address() { return reinterpret_cast<Address>(this); }
458
459 bool is_valid() { return address() != NULL; }
460
Ben Murdoch097c5b22016-05-18 11:27:45 +0100461 base::Mutex* mutex() { return mutex_; }
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100462
463 bool Contains(Address addr) {
464 return addr >= area_start() && addr < area_end();
465 }
466
Ben Murdoch097c5b22016-05-18 11:27:45 +0100467 // Checks whether |addr| can be a limit of addresses in this page. It's a
468 // limit if it's in the page, or if it's just after the last byte of the page.
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100469 bool ContainsLimit(Address addr) {
470 return addr >= area_start() && addr <= area_end();
471 }
472
Ben Murdoch097c5b22016-05-18 11:27:45 +0100473 AtomicValue<ConcurrentSweepingState>& concurrent_sweeping_state() {
474 return concurrent_sweeping_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000475 }
476
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000477 AtomicValue<ParallelCompactingState>& parallel_compaction_state() {
478 return parallel_compaction_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000479 }
480
Ben Murdoch097c5b22016-05-18 11:27:45 +0100481 // Manage live byte count, i.e., count of bytes in black objects.
482 inline void ResetLiveBytes();
483 inline void IncrementLiveBytes(int by);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000484
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100485 int LiveBytes() {
Ben Murdoch097c5b22016-05-18 11:27:45 +0100486 DCHECK_LE(static_cast<size_t>(live_byte_count_), size_);
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100487 return live_byte_count_;
488 }
489
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000490 void SetLiveBytes(int live_bytes) {
491 DCHECK_GE(live_bytes, 0);
Ben Murdoch097c5b22016-05-18 11:27:45 +0100492 DCHECK_LE(static_cast<size_t>(live_bytes), size_);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000493 live_byte_count_ = live_bytes;
494 }
495
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000496 int write_barrier_counter() {
497 return static_cast<int>(write_barrier_counter_);
498 }
499
500 void set_write_barrier_counter(int counter) {
501 write_barrier_counter_ = counter;
502 }
503
Ben Murdoch097c5b22016-05-18 11:27:45 +0100504 size_t size() const { return size_; }
505
506 inline Heap* heap() const { return heap_; }
507
508 inline SkipList* skip_list() { return skip_list_; }
509
510 inline void set_skip_list(SkipList* skip_list) { skip_list_ = skip_list; }
511
512 inline SlotsBuffer* slots_buffer() { return slots_buffer_; }
513
514 inline SlotsBuffer** slots_buffer_address() { return &slots_buffer_; }
515
516 inline SlotSet* old_to_new_slots() { return old_to_new_slots_; }
517 inline SlotSet* old_to_old_slots() { return old_to_old_slots_; }
518
519 void AllocateOldToNewSlots();
520 void ReleaseOldToNewSlots();
521 void AllocateOldToOldSlots();
522 void ReleaseOldToOldSlots();
523
524 Address area_start() { return area_start_; }
525 Address area_end() { return area_end_; }
526 int area_size() { return static_cast<int>(area_end() - area_start()); }
527
528 bool CommitArea(size_t requested);
529
530 // Approximate amount of physical memory committed for this chunk.
531 size_t CommittedPhysicalMemory() { return high_water_mark_.Value(); }
532
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000533 int progress_bar() {
534 DCHECK(IsFlagSet(HAS_PROGRESS_BAR));
535 return progress_bar_;
536 }
537
538 void set_progress_bar(int progress_bar) {
539 DCHECK(IsFlagSet(HAS_PROGRESS_BAR));
540 progress_bar_ = progress_bar;
541 }
542
543 void ResetProgressBar() {
544 if (IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) {
545 set_progress_bar(0);
546 ClearFlag(MemoryChunk::HAS_PROGRESS_BAR);
547 }
548 }
549
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100550 inline Bitmap* markbits() {
551 return Bitmap::FromAddress(address() + kHeaderSize);
552 }
553
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100554 inline uint32_t AddressToMarkbitIndex(Address addr) {
555 return static_cast<uint32_t>(addr - this->address()) >> kPointerSizeLog2;
556 }
557
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100558 inline Address MarkbitIndexToAddress(uint32_t index) {
559 return this->address() + (index << kPointerSizeLog2);
560 }
561
Ben Murdoch097c5b22016-05-18 11:27:45 +0100562 void PrintMarkbits() { markbits()->Print(); }
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100563
Ben Murdoch097c5b22016-05-18 11:27:45 +0100564 void SetFlag(int flag) { flags_ |= static_cast<uintptr_t>(1) << flag; }
565
566 void ClearFlag(int flag) { flags_ &= ~(static_cast<uintptr_t>(1) << flag); }
567
568 bool IsFlagSet(int flag) {
569 return (flags_ & (static_cast<uintptr_t>(1) << flag)) != 0;
570 }
571
572 // Set or clear multiple flags at a time. The flags in the mask are set to
573 // the value in "flags", the rest retain the current value in |flags_|.
574 void SetFlags(intptr_t flags, intptr_t mask) {
575 flags_ = (flags_ & ~mask) | (flags & mask);
576 }
577
578 // Return all current flags.
579 intptr_t GetFlags() { return flags_; }
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100580
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000581 bool NeverEvacuate() { return IsFlagSet(NEVER_EVACUATE); }
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100582
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000583 void MarkNeverEvacuate() { SetFlag(NEVER_EVACUATE); }
584
585 bool IsEvacuationCandidate() {
586 DCHECK(!(IsFlagSet(NEVER_EVACUATE) && IsFlagSet(EVACUATION_CANDIDATE)));
587 return IsFlagSet(EVACUATION_CANDIDATE);
588 }
589
590 bool CanAllocate() {
591 return !IsEvacuationCandidate() && !IsFlagSet(NEVER_ALLOCATE_ON_PAGE);
592 }
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100593
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100594 void MarkEvacuationCandidate() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000595 DCHECK(!IsFlagSet(NEVER_EVACUATE));
Ben Murdoch097c5b22016-05-18 11:27:45 +0100596 DCHECK_NULL(slots_buffer_);
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100597 SetFlag(EVACUATION_CANDIDATE);
598 }
599
600 void ClearEvacuationCandidate() {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000601 DCHECK(slots_buffer_ == NULL);
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100602 ClearFlag(EVACUATION_CANDIDATE);
603 }
604
Ben Murdoch097c5b22016-05-18 11:27:45 +0100605 bool ShouldSkipEvacuationSlotRecording() {
606 return (flags_ & kSkipEvacuationSlotsRecordingMask) != 0;
607 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000608
Ben Murdoch097c5b22016-05-18 11:27:45 +0100609 Executability executable() {
610 return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
611 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000612
Ben Murdoch097c5b22016-05-18 11:27:45 +0100613 bool InNewSpace() {
614 return (flags_ & ((1 << IN_FROM_SPACE) | (1 << IN_TO_SPACE))) != 0;
615 }
616
617 bool InToSpace() { return IsFlagSet(IN_TO_SPACE); }
618
619 bool InFromSpace() { return IsFlagSet(IN_FROM_SPACE); }
620
621 MemoryChunk* next_chunk() { return next_chunk_.Value(); }
622
623 MemoryChunk* prev_chunk() { return prev_chunk_.Value(); }
624
625 void set_next_chunk(MemoryChunk* next) { next_chunk_.SetValue(next); }
626
627 void set_prev_chunk(MemoryChunk* prev) { prev_chunk_.SetValue(prev); }
628
629 Space* owner() const {
630 if ((reinterpret_cast<intptr_t>(owner_) & kPageHeaderTagMask) ==
631 kPageHeaderTag) {
632 return reinterpret_cast<Space*>(reinterpret_cast<intptr_t>(owner_) -
633 kPageHeaderTag);
634 } else {
635 return nullptr;
636 }
637 }
638
639 void set_owner(Space* space) {
640 DCHECK((reinterpret_cast<intptr_t>(space) & kPageHeaderTagMask) == 0);
641 owner_ = reinterpret_cast<Address>(space) + kPageHeaderTag;
642 DCHECK((reinterpret_cast<intptr_t>(owner_) & kPageHeaderTagMask) ==
643 kPageHeaderTag);
644 }
645
646 bool HasPageHeader() { return owner() != nullptr; }
647
648 void InsertAfter(MemoryChunk* other);
649 void Unlink();
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100650
651 protected:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000652 static MemoryChunk* Initialize(Heap* heap, Address base, size_t size,
653 Address area_start, Address area_end,
Ben Murdoch097c5b22016-05-18 11:27:45 +0100654 Executability executable, Space* owner,
655 base::VirtualMemory* reservation);
656
657 // Should be called when memory chunk is about to be freed.
658 void ReleaseAllocatedMemory();
659
660 base::VirtualMemory* reserved_memory() { return &reservation_; }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000661
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100662 size_t size_;
663 intptr_t flags_;
664
665 // Start and end of allocatable memory on this chunk.
666 Address area_start_;
667 Address area_end_;
668
669 // If the chunk needs to remember its memory reservation, it is stored here.
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000670 base::VirtualMemory reservation_;
Ben Murdoch097c5b22016-05-18 11:27:45 +0100671
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100672 // The identity of the owning space. This is tagged as a failure pointer, but
673 // no failure can be in an object, so this can be distinguished from any entry
674 // in a fixed array.
675 Address owner_;
Ben Murdoch097c5b22016-05-18 11:27:45 +0100676
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100677 Heap* heap_;
Ben Murdoch097c5b22016-05-18 11:27:45 +0100678
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000679 // Used by the incremental marker to keep track of the scanning progress in
680 // large objects that have a progress bar and are scanned in increments.
681 int progress_bar_;
Ben Murdoch097c5b22016-05-18 11:27:45 +0100682
683 // Count of bytes marked black on page.
684 int live_byte_count_;
685
686 SlotsBuffer* slots_buffer_;
687
688 // A single slot set for small pages (of size kPageSize) or an array of slot
689 // set for large pages. In the latter case the number of entries in the array
690 // is ceil(size() / kPageSize).
691 SlotSet* old_to_new_slots_;
692 SlotSet* old_to_old_slots_;
693
694 SkipList* skip_list_;
695
696 intptr_t write_barrier_counter_;
697
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000698 // Assuming the initial allocation on a page is sequential,
699 // count highest number of bytes ever allocated on the page.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000700 AtomicValue<intptr_t> high_water_mark_;
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100701
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000702 base::Mutex* mutex_;
Ben Murdoch097c5b22016-05-18 11:27:45 +0100703
704 AtomicValue<ConcurrentSweepingState> concurrent_sweeping_;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000705 AtomicValue<ParallelCompactingState> parallel_compaction_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000706
707 // PagedSpace free-list statistics.
Ben Murdoch097c5b22016-05-18 11:27:45 +0100708 AtomicNumber<intptr_t> available_in_free_list_;
709 AtomicNumber<intptr_t> wasted_memory_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000710
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000711 // next_chunk_ holds a pointer of type MemoryChunk
712 AtomicValue<MemoryChunk*> next_chunk_;
713 // prev_chunk_ holds a pointer of type MemoryChunk
714 AtomicValue<MemoryChunk*> prev_chunk_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000715
716 private:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000717 void InitializeReservedMemory() { reservation_.Reset(); }
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100718
719 friend class MemoryAllocator;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000720 friend class MemoryChunkValidator;
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100721};
722
Ben Murdoch097c5b22016-05-18 11:27:45 +0100723enum FreeListCategoryType {
724 kSmall,
725 kMedium,
726 kLarge,
727 kHuge,
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000728
Ben Murdoch097c5b22016-05-18 11:27:45 +0100729 kFirstCategory = kSmall,
730 kLastCategory = kHuge,
731 kNumberOfCategories = kLastCategory + 1
732};
Steve Blocka7e24c12009-10-30 11:49:00 +0000733
734// -----------------------------------------------------------------------------
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100735// A page is a memory chunk of a size 1MB. Large object pages may be larger.
Steve Blocka7e24c12009-10-30 11:49:00 +0000736//
737// The only way to get a page pointer is by calling factory methods:
738// Page* p = Page::FromAddress(addr); or
739// Page* p = Page::FromAllocationTop(top);
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100740class Page : public MemoryChunk {
Steve Blocka7e24c12009-10-30 11:49:00 +0000741 public:
742 // Returns the page containing a given address. The address ranges
743 // from [page_addr .. page_addr + kPageSize[
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100744 // This only works if the object is in fact in a page. See also MemoryChunk::
745 // FromAddress() and FromAnyAddress().
Steve Blocka7e24c12009-10-30 11:49:00 +0000746 INLINE(static Page* FromAddress(Address a)) {
747 return reinterpret_cast<Page*>(OffsetFrom(a) & ~kPageAlignmentMask);
748 }
749
Ben Murdoch097c5b22016-05-18 11:27:45 +0100750 // Only works for addresses in pointer spaces, not code space.
751 inline static Page* FromAnyPointerAddress(Heap* heap, Address addr);
752
Steve Blocka7e24c12009-10-30 11:49:00 +0000753 // Returns the page containing an allocation top. Because an allocation
754 // top address can be the upper bound of the page, we need to subtract
755 // it with kPointerSize first. The address ranges from
756 // [page_addr + kObjectStartOffset .. page_addr + kPageSize].
757 INLINE(static Page* FromAllocationTop(Address top)) {
758 Page* p = FromAddress(top - kPointerSize);
Steve Blocka7e24c12009-10-30 11:49:00 +0000759 return p;
760 }
761
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100762 // Returns the next page in the chain of pages owned by a space.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000763 inline Page* next_page() {
764 DCHECK(next_chunk()->owner() == owner());
765 return static_cast<Page*>(next_chunk());
766 }
767 inline Page* prev_page() {
768 DCHECK(prev_chunk()->owner() == owner());
769 return static_cast<Page*>(prev_chunk());
770 }
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100771 inline void set_next_page(Page* page);
772 inline void set_prev_page(Page* page);
Steve Blocka7e24c12009-10-30 11:49:00 +0000773
Steve Blocka7e24c12009-10-30 11:49:00 +0000774 // Checks whether an address is page aligned.
775 static bool IsAlignedToPageSize(Address a) {
776 return 0 == (OffsetFrom(a) & kPageAlignmentMask);
777 }
778
Steve Blocka7e24c12009-10-30 11:49:00 +0000779 // Returns the offset of a given address to this page.
780 INLINE(int Offset(Address a)) {
Steve Blockd0582a62009-12-15 09:54:21 +0000781 int offset = static_cast<int>(a - address());
Steve Blocka7e24c12009-10-30 11:49:00 +0000782 return offset;
783 }
784
785 // Returns the address for a given offset to the this page.
786 Address OffsetToAddress(int offset) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000787 DCHECK_PAGE_OFFSET(offset);
Steve Blocka7e24c12009-10-30 11:49:00 +0000788 return address() + offset;
789 }
790
791 // ---------------------------------------------------------------------
Steve Blocka7e24c12009-10-30 11:49:00 +0000792
Steve Blocka7e24c12009-10-30 11:49:00 +0000793 // Page size in bytes. This must be a multiple of the OS page size.
794 static const int kPageSize = 1 << kPageSizeBits;
795
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000796 // Maximum object size that gets allocated into regular pages. Objects larger
797 // than that size are allocated in large object space and are never moved in
798 // memory. This also applies to new space allocation, since objects are never
799 // migrated from new space to large object space. Takes double alignment into
800 // account.
801 // TODO(hpayer): This limit should be way smaller but we currently have
802 // short living objects >256K.
803 static const int kMaxRegularHeapObjectSize = 600 * KB;
804
805 static const int kAllocatableMemory = kPageSize - kObjectStartOffset;
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100806
Steve Blocka7e24c12009-10-30 11:49:00 +0000807 // Page size mask.
808 static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;
809
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100810 inline void ClearGCFields();
811
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000812 static inline Page* Initialize(Heap* heap, MemoryChunk* chunk,
813 Executability executable, PagedSpace* owner);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100814
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100815 void InitializeAsAnchor(PagedSpace* owner);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100816
Ben Murdoch097c5b22016-05-18 11:27:45 +0100817 // WaitUntilSweepingCompleted only works when concurrent sweeping is in
818 // progress. In particular, when we know that right before this call a
819 // sweeper thread was sweeping this page.
820 void WaitUntilSweepingCompleted() {
821 mutex_->Lock();
822 mutex_->Unlock();
823 DCHECK(SweepingDone());
824 }
825
826 bool SweepingDone() {
827 return concurrent_sweeping_state().Value() == kSweepingDone;
828 }
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100829
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000830 void ResetFreeListStatistics();
Steve Blocka7e24c12009-10-30 11:49:00 +0000831
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000832 int LiveBytesFromFreeList() {
Ben Murdoch097c5b22016-05-18 11:27:45 +0100833 return static_cast<int>(area_size() - wasted_memory() -
834 available_in_free_list());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000835 }
836
837#define FRAGMENTATION_STATS_ACCESSORS(type, name) \
838 type name() { return name##_.Value(); } \
839 void set_##name(type name) { name##_.SetValue(name); } \
840 void add_##name(type name) { name##_.Increment(name); }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000841
Ben Murdoch097c5b22016-05-18 11:27:45 +0100842 FRAGMENTATION_STATS_ACCESSORS(intptr_t, wasted_memory)
843 FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_free_list)
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000844
845#undef FRAGMENTATION_STATS_ACCESSORS
Steve Blocka7e24c12009-10-30 11:49:00 +0000846
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100847#ifdef DEBUG
848 void Print();
849#endif // DEBUG
Steve Blocka7e24c12009-10-30 11:49:00 +0000850
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100851 friend class MemoryAllocator;
Steve Blocka7e24c12009-10-30 11:49:00 +0000852};
853
854
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100855class LargePage : public MemoryChunk {
856 public:
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000857 HeapObject* GetObject() { return HeapObject::FromAddress(area_start()); }
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100858
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000859 inline LargePage* next_page() {
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100860 return static_cast<LargePage*>(next_chunk());
861 }
862
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000863 inline void set_next_page(LargePage* page) { set_next_chunk(page); }
864
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100865 private:
866 static inline LargePage* Initialize(Heap* heap, MemoryChunk* chunk);
867
868 friend class MemoryAllocator;
869};
870
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100871
Steve Blocka7e24c12009-10-30 11:49:00 +0000872// ----------------------------------------------------------------------------
873// Space is the abstract superclass for all allocation spaces.
874class Space : public Malloced {
875 public:
Steve Block44f0eee2011-05-26 01:26:41 +0100876 Space(Heap* heap, AllocationSpace id, Executability executable)
Ben Murdoch097c5b22016-05-18 11:27:45 +0100877 : allocation_observers_(new List<AllocationObserver*>()),
878 allocation_observers_paused_(false),
879 heap_(heap),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000880 id_(id),
881 executable_(executable),
882 committed_(0),
883 max_committed_(0) {}
Steve Blocka7e24c12009-10-30 11:49:00 +0000884
885 virtual ~Space() {}
886
Steve Block44f0eee2011-05-26 01:26:41 +0100887 Heap* heap() const { return heap_; }
888
Steve Blocka7e24c12009-10-30 11:49:00 +0000889 // Does the space need executable memory?
890 Executability executable() { return executable_; }
891
892 // Identity used in error reporting.
893 AllocationSpace identity() { return id_; }
894
Ben Murdoch097c5b22016-05-18 11:27:45 +0100895 virtual void AddAllocationObserver(AllocationObserver* observer) {
896 allocation_observers_->Add(observer);
897 }
898
899 virtual void RemoveAllocationObserver(AllocationObserver* observer) {
900 bool removed = allocation_observers_->RemoveElement(observer);
901 USE(removed);
902 DCHECK(removed);
903 }
904
905 virtual void PauseAllocationObservers() {
906 allocation_observers_paused_ = true;
907 }
908
909 virtual void ResumeAllocationObservers() {
910 allocation_observers_paused_ = false;
911 }
912
913 void AllocationStep(Address soon_object, int size);
914
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000915 // Return the total amount committed memory for this space, i.e., allocatable
916 // memory and page headers.
917 virtual intptr_t CommittedMemory() { return committed_; }
918
919 virtual intptr_t MaximumCommittedMemory() { return max_committed_; }
920
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -0800921 // Returns allocated size.
Ben Murdochf87a2032010-10-22 12:50:53 +0100922 virtual intptr_t Size() = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000923
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -0800924 // Returns size of objects. Can differ from the allocated size
925 // (e.g. see LargeObjectSpace).
926 virtual intptr_t SizeOfObjects() { return Size(); }
927
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000928 // Approximate amount of physical memory committed for this space.
929 virtual size_t CommittedPhysicalMemory() = 0;
930
931 // Return the available bytes without growing.
932 virtual intptr_t Available() = 0;
933
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100934 virtual int RoundSizeDownToObjectAlignment(int size) {
935 if (id_ == CODE_SPACE) {
936 return RoundDown(size, kCodeAlignment);
937 } else {
938 return RoundDown(size, kPointerSize);
939 }
940 }
941
Steve Blocka7e24c12009-10-30 11:49:00 +0000942#ifdef DEBUG
943 virtual void Print() = 0;
944#endif
945
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000946 protected:
947 void AccountCommitted(intptr_t bytes) {
948 DCHECK_GE(bytes, 0);
949 committed_ += bytes;
950 if (committed_ > max_committed_) {
951 max_committed_ = committed_;
952 }
953 }
954
955 void AccountUncommitted(intptr_t bytes) {
956 DCHECK_GE(bytes, 0);
957 committed_ -= bytes;
958 DCHECK_GE(committed_, 0);
959 }
960
Ben Murdoch097c5b22016-05-18 11:27:45 +0100961 v8::base::SmartPointer<List<AllocationObserver*>> allocation_observers_;
962 bool allocation_observers_paused_;
963
Steve Blocka7e24c12009-10-30 11:49:00 +0000964 private:
Steve Block44f0eee2011-05-26 01:26:41 +0100965 Heap* heap_;
Steve Blocka7e24c12009-10-30 11:49:00 +0000966 AllocationSpace id_;
967 Executability executable_;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000968
969 // Keeps track of committed memory in a space.
970 intptr_t committed_;
971 intptr_t max_committed_;
972};
973
974
975class MemoryChunkValidator {
976 // Computed offsets should match the compiler generated ones.
977 STATIC_ASSERT(MemoryChunk::kSizeOffset == offsetof(MemoryChunk, size_));
978 STATIC_ASSERT(MemoryChunk::kLiveBytesOffset ==
979 offsetof(MemoryChunk, live_byte_count_));
980 STATIC_ASSERT(MemoryChunk::kSlotsBufferOffset ==
981 offsetof(MemoryChunk, slots_buffer_));
982 STATIC_ASSERT(MemoryChunk::kWriteBarrierCounterOffset ==
983 offsetof(MemoryChunk, write_barrier_counter_));
984
985 // Validate our estimates on the header size.
986 STATIC_ASSERT(sizeof(MemoryChunk) <= MemoryChunk::kHeaderSize);
987 STATIC_ASSERT(sizeof(LargePage) <= MemoryChunk::kHeaderSize);
988 STATIC_ASSERT(sizeof(Page) <= MemoryChunk::kHeaderSize);
Steve Blocka7e24c12009-10-30 11:49:00 +0000989};
990
991
992// ----------------------------------------------------------------------------
993// All heap objects containing executable code (code objects) must be allocated
994// from a 2 GB range of memory, so that they can call each other using 32-bit
995// displacements. This happens automatically on 32-bit platforms, where 32-bit
996// displacements cover the entire 4GB virtual address space. On 64-bit
997// platforms, we support this using the CodeRange object, which reserves and
998// manages a range of virtual memory.
Steve Block44f0eee2011-05-26 01:26:41 +0100999class CodeRange {
Steve Blocka7e24c12009-10-30 11:49:00 +00001000 public:
Ben Murdoch69a99ed2011-11-30 16:03:39 +00001001 explicit CodeRange(Isolate* isolate);
1002 ~CodeRange() { TearDown(); }
1003
Steve Blocka7e24c12009-10-30 11:49:00 +00001004 // Reserves a range of virtual memory, but does not commit any of it.
1005 // Can only be called once, at heap initialization time.
1006 // Returns false on failure.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001007 bool SetUp(size_t requested_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00001008
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001009 bool valid() { return code_range_ != NULL; }
1010 Address start() {
1011 DCHECK(valid());
1012 return static_cast<Address>(code_range_->address());
1013 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001014 size_t size() {
1015 DCHECK(valid());
1016 return code_range_->size();
1017 }
Steve Block44f0eee2011-05-26 01:26:41 +01001018 bool contains(Address address) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001019 if (!valid()) return false;
Steve Blocka7e24c12009-10-30 11:49:00 +00001020 Address start = static_cast<Address>(code_range_->address());
1021 return start <= address && address < start + code_range_->size();
1022 }
1023
1024 // Allocates a chunk of memory from the large-object portion of
1025 // the code range. On platforms with no separate code range, should
1026 // not be called.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001027 MUST_USE_RESULT Address AllocateRawMemory(const size_t requested_size,
1028 const size_t commit_size,
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001029 size_t* allocated);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001030 bool CommitRawMemory(Address start, size_t length);
1031 bool UncommitRawMemory(Address start, size_t length);
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001032 void FreeRawMemory(Address buf, size_t length);
Steve Blocka7e24c12009-10-30 11:49:00 +00001033
1034 private:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001035 // Frees the range of virtual memory, and frees the data structures used to
1036 // manage it.
1037 void TearDown();
1038
Ben Murdoch69a99ed2011-11-30 16:03:39 +00001039 Isolate* isolate_;
Steve Block44f0eee2011-05-26 01:26:41 +01001040
Steve Blocka7e24c12009-10-30 11:49:00 +00001041 // The reserved range of virtual memory that all code objects are put in.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001042 base::VirtualMemory* code_range_;
Steve Blocka7e24c12009-10-30 11:49:00 +00001043 // Plain old data class, just a struct plus a constructor.
1044 class FreeBlock {
1045 public:
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001046 FreeBlock() : start(0), size(0) {}
Steve Blocka7e24c12009-10-30 11:49:00 +00001047 FreeBlock(Address start_arg, size_t size_arg)
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001048 : start(start_arg), size(size_arg) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001049 DCHECK(IsAddressAligned(start, MemoryChunk::kAlignment));
1050 DCHECK(size >= static_cast<size_t>(Page::kPageSize));
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001051 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001052 FreeBlock(void* start_arg, size_t size_arg)
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001053 : start(static_cast<Address>(start_arg)), size(size_arg) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001054 DCHECK(IsAddressAligned(start, MemoryChunk::kAlignment));
1055 DCHECK(size >= static_cast<size_t>(Page::kPageSize));
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001056 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001057
1058 Address start;
1059 size_t size;
1060 };
1061
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001062 // The global mutex guards free_list_ and allocation_list_ as GC threads may
1063 // access both lists concurrently to the main thread.
1064 base::Mutex code_range_mutex_;
1065
Steve Blocka7e24c12009-10-30 11:49:00 +00001066 // Freed blocks of memory are added to the free list. When the allocation
1067 // list is exhausted, the free list is sorted and merged to make the new
1068 // allocation list.
Steve Block44f0eee2011-05-26 01:26:41 +01001069 List<FreeBlock> free_list_;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001070
Steve Blocka7e24c12009-10-30 11:49:00 +00001071 // Memory is allocated from the free blocks on the allocation list.
1072 // The block at current_allocation_block_index_ is the current block.
Steve Block44f0eee2011-05-26 01:26:41 +01001073 List<FreeBlock> allocation_list_;
1074 int current_allocation_block_index_;
Steve Blocka7e24c12009-10-30 11:49:00 +00001075
1076 // Finds a block on the allocation list that contains at least the
1077 // requested amount of memory. If none is found, sorts and merges
1078 // the existing free memory blocks, and searches again.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001079 // If none can be found, returns false.
1080 bool GetNextAllocationBlock(size_t requested);
Steve Blocka7e24c12009-10-30 11:49:00 +00001081 // Compares the start addresses of two free blocks.
1082 static int CompareFreeBlockAddress(const FreeBlock* left,
1083 const FreeBlock* right);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001084 bool ReserveBlock(const size_t requested_size, FreeBlock* block);
1085 void ReleaseBlock(const FreeBlock* block);
Steve Block44f0eee2011-05-26 01:26:41 +01001086
Steve Block44f0eee2011-05-26 01:26:41 +01001087 DISALLOW_COPY_AND_ASSIGN(CodeRange);
Steve Blocka7e24c12009-10-30 11:49:00 +00001088};
1089
1090
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001091class SkipList {
1092 public:
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001093 SkipList() { Clear(); }
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001094
1095 void Clear() {
1096 for (int idx = 0; idx < kSize; idx++) {
1097 starts_[idx] = reinterpret_cast<Address>(-1);
1098 }
1099 }
1100
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001101 Address StartFor(Address addr) { return starts_[RegionNumber(addr)]; }
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001102
1103 void AddObject(Address addr, int size) {
1104 int start_region = RegionNumber(addr);
1105 int end_region = RegionNumber(addr + size - kPointerSize);
1106 for (int idx = start_region; idx <= end_region; idx++) {
1107 if (starts_[idx] > addr) starts_[idx] = addr;
1108 }
1109 }
1110
1111 static inline int RegionNumber(Address addr) {
1112 return (OffsetFrom(addr) & Page::kPageAlignmentMask) >> kRegionSizeLog2;
1113 }
1114
1115 static void Update(Address addr, int size) {
1116 Page* page = Page::FromAddress(addr);
1117 SkipList* list = page->skip_list();
1118 if (list == NULL) {
1119 list = new SkipList();
1120 page->set_skip_list(list);
1121 }
1122
1123 list->AddObject(addr, size);
1124 }
1125
1126 private:
1127 static const int kRegionSizeLog2 = 13;
1128 static const int kRegionSize = 1 << kRegionSizeLog2;
1129 static const int kSize = Page::kPageSize / kRegionSize;
1130
1131 STATIC_ASSERT(Page::kPageSize % kRegionSize == 0);
1132
1133 Address starts_[kSize];
1134};
1135
1136
Steve Blocka7e24c12009-10-30 11:49:00 +00001137// ----------------------------------------------------------------------------
1138// A space acquires chunks of memory from the operating system. The memory
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001139// allocator allocated and deallocates pages for the paged heap spaces and large
1140// pages for large object space.
Steve Blocka7e24c12009-10-30 11:49:00 +00001141//
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001142// Each space has to manage it's own pages.
Steve Blocka7e24c12009-10-30 11:49:00 +00001143//
Steve Block44f0eee2011-05-26 01:26:41 +01001144class MemoryAllocator {
Steve Blocka7e24c12009-10-30 11:49:00 +00001145 public:
Ben Murdoch69a99ed2011-11-30 16:03:39 +00001146 explicit MemoryAllocator(Isolate* isolate);
1147
Steve Blocka7e24c12009-10-30 11:49:00 +00001148 // Initializes its internal bookkeeping structures.
Russell Brenner90bac252010-11-18 13:33:46 -08001149 // Max capacity of the total space and executable memory limit.
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001150 bool SetUp(intptr_t max_capacity, intptr_t capacity_executable);
Steve Blocka7e24c12009-10-30 11:49:00 +00001151
Steve Block44f0eee2011-05-26 01:26:41 +01001152 void TearDown();
Steve Blocka7e24c12009-10-30 11:49:00 +00001153
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001154 Page* AllocatePage(intptr_t size, PagedSpace* owner,
1155 Executability executable);
Steve Blocka7e24c12009-10-30 11:49:00 +00001156
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001157 LargePage* AllocateLargePage(intptr_t object_size, Space* owner,
1158 Executability executable);
Steve Blocka7e24c12009-10-30 11:49:00 +00001159
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001160 // PreFree logically frees the object, i.e., it takes care of the size
1161 // bookkeeping and calls the allocation callback.
1162 void PreFreeMemory(MemoryChunk* chunk);
1163
1164 // FreeMemory can be called concurrently when PreFree was executed before.
1165 void PerformFreeMemory(MemoryChunk* chunk);
1166
1167 // Free is a wrapper method, which calls PreFree and PerformFreeMemory
1168 // together.
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001169 void Free(MemoryChunk* chunk);
Steve Blocka7e24c12009-10-30 11:49:00 +00001170
Steve Blocka7e24c12009-10-30 11:49:00 +00001171 // Returns allocated spaces in bytes.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001172 intptr_t Size() { return size_.Value(); }
1173
1174 // Returns allocated executable spaces in bytes.
1175 intptr_t SizeExecutable() { return size_executable_.Value(); }
1176
1177 // Returns the maximum available bytes of heaps.
1178 intptr_t Available() {
1179 intptr_t size = Size();
1180 return capacity_ < size ? 0 : capacity_ - size;
1181 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001182
Russell Brenner90bac252010-11-18 13:33:46 -08001183 // Returns the maximum available executable bytes of heaps.
Steve Block44f0eee2011-05-26 01:26:41 +01001184 intptr_t AvailableExecutable() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001185 intptr_t executable_size = SizeExecutable();
1186 if (capacity_executable_ < executable_size) return 0;
1187 return capacity_executable_ - executable_size;
Russell Brenner90bac252010-11-18 13:33:46 -08001188 }
1189
Steve Blocka7e24c12009-10-30 11:49:00 +00001190 // Returns maximum available bytes that the old space can have.
Steve Block44f0eee2011-05-26 01:26:41 +01001191 intptr_t MaxAvailable() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001192 return (Available() / Page::kPageSize) * Page::kAllocatableMemory;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001193 }
1194
1195 // Returns an indication of whether a pointer is in a space that has
1196 // been allocated by this MemoryAllocator.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001197 V8_INLINE bool IsOutsideAllocatedSpace(const void* address) {
1198 return address < lowest_ever_allocated_.Value() ||
1199 address >= highest_ever_allocated_.Value();
Steve Blocka7e24c12009-10-30 11:49:00 +00001200 }
1201
Steve Blocka7e24c12009-10-30 11:49:00 +00001202#ifdef DEBUG
1203 // Reports statistic info of the space.
Steve Block44f0eee2011-05-26 01:26:41 +01001204 void ReportStatistics();
Steve Blocka7e24c12009-10-30 11:49:00 +00001205#endif
1206
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001207 // Returns a MemoryChunk in which the memory region from commit_area_size to
1208 // reserve_area_size of the chunk area is reserved but not committed, it
1209 // could be committed later by calling MemoryChunk::CommitArea.
1210 MemoryChunk* AllocateChunk(intptr_t reserve_area_size,
1211 intptr_t commit_area_size,
1212 Executability executable, Space* space);
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001213
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001214 Address ReserveAlignedMemory(size_t requested, size_t alignment,
1215 base::VirtualMemory* controller);
1216 Address AllocateAlignedMemory(size_t reserve_size, size_t commit_size,
1217 size_t alignment, Executability executable,
1218 base::VirtualMemory* controller);
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001219
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001220 bool CommitMemory(Address addr, size_t size, Executability executable);
1221
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001222 void FreeNewSpaceMemory(Address addr, base::VirtualMemory* reservation,
1223 Executability executable);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001224 void FreeMemory(base::VirtualMemory* reservation, Executability executable);
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001225 void FreeMemory(Address addr, size_t size, Executability executable);
1226
1227 // Commit a contiguous block of memory from the initial chunk. Assumes that
1228 // the address is not NULL, the size is greater than zero, and that the
1229 // block is contained in the initial chunk. Returns true if it succeeded
1230 // and false otherwise.
1231 bool CommitBlock(Address start, size_t size, Executability executable);
1232
1233 // Uncommit a contiguous block of memory [start..(start+size)[.
1234 // start is not NULL, the size is greater than zero, and the
1235 // block is contained in the initial chunk. Returns true if it succeeded
1236 // and false otherwise.
1237 bool UncommitBlock(Address start, size_t size);
1238
1239 // Zaps a contiguous block of memory [start..(start+size)[ thus
1240 // filling it up with a recognizable non-NULL bit pattern.
1241 void ZapBlock(Address start, size_t size);
1242
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001243 void PerformAllocationCallback(ObjectSpace space, AllocationAction action,
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001244 size_t size);
1245
1246 void AddMemoryAllocationCallback(MemoryAllocationCallback callback,
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001247 ObjectSpace space, AllocationAction action);
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001248
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001249 void RemoveMemoryAllocationCallback(MemoryAllocationCallback callback);
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001250
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001251 bool MemoryAllocationCallbackRegistered(MemoryAllocationCallback callback);
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001252
1253 static int CodePageGuardStartOffset();
1254
1255 static int CodePageGuardSize();
1256
1257 static int CodePageAreaStartOffset();
1258
1259 static int CodePageAreaEndOffset();
1260
1261 static int CodePageAreaSize() {
1262 return CodePageAreaEndOffset() - CodePageAreaStartOffset();
1263 }
1264
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001265 static int PageAreaSize(AllocationSpace space) {
1266 DCHECK_NE(LO_SPACE, space);
1267 return (space == CODE_SPACE) ? CodePageAreaSize()
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001268 : Page::kAllocatableMemory;
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001269 }
1270
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001271 MUST_USE_RESULT bool CommitExecutableMemory(base::VirtualMemory* vm,
1272 Address start, size_t commit_size,
1273 size_t reserved_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00001274
1275 private:
Ben Murdoch69a99ed2011-11-30 16:03:39 +00001276 Isolate* isolate_;
1277
Steve Blocka7e24c12009-10-30 11:49:00 +00001278 // Maximum space size in bytes.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001279 intptr_t capacity_;
Russell Brenner90bac252010-11-18 13:33:46 -08001280 // Maximum subset of capacity_ that can be executable
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001281 intptr_t capacity_executable_;
Ben Murdochb0fe1622011-05-05 13:52:32 +01001282
Steve Blocka7e24c12009-10-30 11:49:00 +00001283 // Allocated space size in bytes.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001284 AtomicNumber<intptr_t> size_;
Steve Block791712a2010-08-27 10:21:07 +01001285 // Allocated executable space size in bytes.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001286 AtomicNumber<intptr_t> size_executable_;
Steve Blocka7e24c12009-10-30 11:49:00 +00001287
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001288 // We keep the lowest and highest addresses allocated as a quick way
1289 // of determining that pointers are outside the heap. The estimate is
1290 // conservative, i.e. not all addrsses in 'allocated' space are allocated
1291 // to our heap. The range is [lowest, highest[, inclusive on the low end
1292 // and exclusive on the high end.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001293 AtomicValue<void*> lowest_ever_allocated_;
1294 AtomicValue<void*> highest_ever_allocated_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001295
Iain Merrick9ac36c92010-09-13 15:29:50 +01001296 struct MemoryAllocationCallbackRegistration {
1297 MemoryAllocationCallbackRegistration(MemoryAllocationCallback callback,
1298 ObjectSpace space,
1299 AllocationAction action)
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001300 : callback(callback), space(space), action(action) {}
Iain Merrick9ac36c92010-09-13 15:29:50 +01001301 MemoryAllocationCallback callback;
1302 ObjectSpace space;
1303 AllocationAction action;
1304 };
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001305
Iain Merrick9ac36c92010-09-13 15:29:50 +01001306 // A List of callback that are triggered when memory is allocated or free'd
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001307 List<MemoryAllocationCallbackRegistration> memory_allocation_callbacks_;
Iain Merrick9ac36c92010-09-13 15:29:50 +01001308
Steve Blocka7e24c12009-10-30 11:49:00 +00001309 // Initializes pages in a chunk. Returns the first page address.
1310 // This function and GetChunkId() are provided for the mark-compact
1311 // collector to rebuild page headers in the from space, which is
1312 // used as a marking stack and its page headers are destroyed.
Steve Block44f0eee2011-05-26 01:26:41 +01001313 Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk,
1314 PagedSpace* owner);
Steve Block6ded16b2010-05-10 14:33:55 +01001315
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001316 void UpdateAllocatedSpaceLimits(void* low, void* high) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001317 // The use of atomic primitives does not guarantee correctness (wrt.
1318 // desired semantics) by default. The loop here ensures that we update the
1319 // values only if they did not change in between.
1320 void* ptr = nullptr;
1321 do {
1322 ptr = lowest_ever_allocated_.Value();
1323 } while ((low < ptr) && !lowest_ever_allocated_.TrySetValue(ptr, low));
1324 do {
1325 ptr = highest_ever_allocated_.Value();
1326 } while ((high > ptr) && !highest_ever_allocated_.TrySetValue(ptr, high));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001327 }
1328
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001329 DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator);
Steve Blocka7e24c12009-10-30 11:49:00 +00001330};
1331
1332
1333// -----------------------------------------------------------------------------
1334// Interface for heap object iterator to be implemented by all object space
1335// object iterators.
1336//
Leon Clarked91b9f72010-01-27 17:25:45 +00001337// NOTE: The space specific object iterators also implements the own next()
1338// method which is used to avoid using virtual functions
Steve Blocka7e24c12009-10-30 11:49:00 +00001339// iterating a specific space.
1340
1341class ObjectIterator : public Malloced {
1342 public:
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001343 virtual ~ObjectIterator() {}
Steve Blocka7e24c12009-10-30 11:49:00 +00001344
Steve Blocka7e24c12009-10-30 11:49:00 +00001345 virtual HeapObject* next_object() = 0;
1346};
1347
1348
1349// -----------------------------------------------------------------------------
1350// Heap object iterator in new/old/map spaces.
1351//
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001352// A HeapObjectIterator iterates objects from the bottom of the given space
1353// to its top or from the bottom of the given page to its top.
Steve Blocka7e24c12009-10-30 11:49:00 +00001354//
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001355// If objects are allocated in the page during iteration the iterator may
1356// or may not iterate over those objects. The caller must create a new
1357// iterator in order to be sure to visit these new objects.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001358class HeapObjectIterator : public ObjectIterator {
Steve Blocka7e24c12009-10-30 11:49:00 +00001359 public:
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001360 // Creates a new object iterator in a given space.
Steve Blocka7e24c12009-10-30 11:49:00 +00001361 explicit HeapObjectIterator(PagedSpace* space);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001362 explicit HeapObjectIterator(Page* page);
Steve Blocka7e24c12009-10-30 11:49:00 +00001363
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001364 // Advance to the next object, skipping free spaces and other fillers and
1365 // skipping the special garbage section of which there is one per space.
1366 // Returns NULL when the iteration has ended.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001367 inline HeapObject* Next();
1368 inline HeapObject* next_object() override;
Steve Blocka7e24c12009-10-30 11:49:00 +00001369
1370 private:
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001371 enum PageMode { kOnePageOnly, kAllPagesInSpace };
Steve Blocka7e24c12009-10-30 11:49:00 +00001372
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001373 Address cur_addr_; // Current iteration point.
1374 Address cur_end_; // End iteration point.
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001375 PagedSpace* space_;
1376 PageMode page_mode_;
Leon Clarked91b9f72010-01-27 17:25:45 +00001377
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001378 // Fast (inlined) path of next().
1379 inline HeapObject* FromCurrentPage();
Leon Clarked91b9f72010-01-27 17:25:45 +00001380
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001381 // Slow path of next(), goes into the next page. Returns false if the
1382 // iteration has ended.
1383 bool AdvanceToNextPage();
Steve Blocka7e24c12009-10-30 11:49:00 +00001384
1385 // Initializes fields.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001386 inline void Initialize(PagedSpace* owner, Address start, Address end,
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001387 PageMode mode);
Steve Blocka7e24c12009-10-30 11:49:00 +00001388};
1389
1390
1391// -----------------------------------------------------------------------------
1392// A PageIterator iterates the pages in a paged space.
Steve Blocka7e24c12009-10-30 11:49:00 +00001393
1394class PageIterator BASE_EMBEDDED {
1395 public:
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001396 explicit inline PageIterator(PagedSpace* space);
Steve Blocka7e24c12009-10-30 11:49:00 +00001397
1398 inline bool has_next();
1399 inline Page* next();
1400
1401 private:
1402 PagedSpace* space_;
1403 Page* prev_page_; // Previous page returned.
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001404 // Next page that will be returned. Cached here so that we can use this
1405 // iterator for operations that deallocate pages.
1406 Page* next_page_;
Steve Blocka7e24c12009-10-30 11:49:00 +00001407};
1408
1409
1410// -----------------------------------------------------------------------------
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001411// A space has a circular list of pages. The next page can be accessed via
1412// Page::next_page() call.
Steve Blocka7e24c12009-10-30 11:49:00 +00001413
1414// An abstraction of allocation and relocation pointers in a page-structured
1415// space.
1416class AllocationInfo {
1417 public:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001418 AllocationInfo() : top_(nullptr), limit_(nullptr) {}
1419 AllocationInfo(Address top, Address limit) : top_(top), limit_(limit) {}
1420
1421 void Reset(Address top, Address limit) {
1422 set_top(top);
1423 set_limit(limit);
1424 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001425
1426 INLINE(void set_top(Address top)) {
1427 SLOW_DCHECK(top == NULL ||
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001428 (reinterpret_cast<intptr_t>(top) & kHeapObjectTagMask) == 0);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001429 top_ = top;
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001430 }
1431
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001432 INLINE(Address top()) const {
1433 SLOW_DCHECK(top_ == NULL ||
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001434 (reinterpret_cast<intptr_t>(top_) & kHeapObjectTagMask) == 0);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001435 return top_;
1436 }
1437
1438 Address* top_address() { return &top_; }
1439
1440 INLINE(void set_limit(Address limit)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001441 limit_ = limit;
1442 }
1443
1444 INLINE(Address limit()) const {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001445 return limit_;
1446 }
1447
1448 Address* limit_address() { return &limit_; }
Steve Blocka7e24c12009-10-30 11:49:00 +00001449
1450#ifdef DEBUG
1451 bool VerifyPagedAllocation() {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001452 return (Page::FromAllocationTop(top_) == Page::FromAllocationTop(limit_)) &&
1453 (top_ <= limit_);
Steve Blocka7e24c12009-10-30 11:49:00 +00001454 }
1455#endif
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001456
1457 private:
1458 // Current allocation top.
1459 Address top_;
1460 // Current allocation limit.
1461 Address limit_;
Steve Blocka7e24c12009-10-30 11:49:00 +00001462};
1463
1464
1465// An abstraction of the accounting statistics of a page-structured space.
Steve Blocka7e24c12009-10-30 11:49:00 +00001466//
1467// The stats are only set by functions that ensure they stay balanced. These
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001468// functions increase or decrease one of the non-capacity stats in conjunction
1469// with capacity, or else they always balance increases and decreases to the
1470// non-capacity stats.
Steve Blocka7e24c12009-10-30 11:49:00 +00001471class AllocationStats BASE_EMBEDDED {
1472 public:
1473 AllocationStats() { Clear(); }
1474
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001475 // Zero out all the allocation statistics (i.e., no capacity).
Steve Blocka7e24c12009-10-30 11:49:00 +00001476 void Clear() {
1477 capacity_ = 0;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001478 max_capacity_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +00001479 size_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +00001480 }
1481
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001482 void ClearSize() { size_ = capacity_; }
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001483
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001484 // Reset the allocation statistics (i.e., available = capacity with no wasted
1485 // or allocated bytes).
Steve Blocka7e24c12009-10-30 11:49:00 +00001486 void Reset() {
Steve Blocka7e24c12009-10-30 11:49:00 +00001487 size_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +00001488 }
1489
1490 // Accessors for the allocation statistics.
Ben Murdochf87a2032010-10-22 12:50:53 +01001491 intptr_t Capacity() { return capacity_; }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001492 intptr_t MaxCapacity() { return max_capacity_; }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001493 intptr_t Size() {
1494 CHECK_GE(size_, 0);
1495 return size_;
1496 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001497
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001498 // Grow the space by adding available bytes. They are initially marked as
1499 // being in use (part of the size), but will normally be immediately freed,
1500 // putting them on the free list and removing them from size_.
Steve Blocka7e24c12009-10-30 11:49:00 +00001501 void ExpandSpace(int size_in_bytes) {
1502 capacity_ += size_in_bytes;
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001503 size_ += size_in_bytes;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001504 if (capacity_ > max_capacity_) {
1505 max_capacity_ = capacity_;
1506 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001507 CHECK(size_ >= 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00001508 }
1509
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001510 // Shrink the space by removing available bytes. Since shrinking is done
1511 // during sweeping, bytes have been marked as being in use (part of the size)
1512 // and are hereby freed.
Steve Blocka7e24c12009-10-30 11:49:00 +00001513 void ShrinkSpace(int size_in_bytes) {
1514 capacity_ -= size_in_bytes;
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001515 size_ -= size_in_bytes;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001516 CHECK(size_ >= 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00001517 }
1518
1519 // Allocate from available bytes (available -> size).
Ben Murdochf87a2032010-10-22 12:50:53 +01001520 void AllocateBytes(intptr_t size_in_bytes) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001521 size_ += size_in_bytes;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001522 CHECK(size_ >= 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00001523 }
1524
1525 // Free allocated bytes, making them available (size -> available).
Ben Murdochf87a2032010-10-22 12:50:53 +01001526 void DeallocateBytes(intptr_t size_in_bytes) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001527 size_ -= size_in_bytes;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001528 CHECK_GE(size_, 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00001529 }
1530
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001531 // Merge {other} into {this}.
1532 void Merge(const AllocationStats& other) {
1533 capacity_ += other.capacity_;
1534 size_ += other.size_;
1535 if (other.max_capacity_ > max_capacity_) {
1536 max_capacity_ = other.max_capacity_;
1537 }
1538 CHECK_GE(size_, 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00001539 }
1540
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001541 void DecreaseCapacity(intptr_t size_in_bytes) {
1542 capacity_ -= size_in_bytes;
1543 CHECK_GE(capacity_, 0);
1544 CHECK_GE(capacity_, size_);
1545 }
1546
1547 void IncreaseCapacity(intptr_t size_in_bytes) { capacity_ += size_in_bytes; }
1548
Steve Blocka7e24c12009-10-30 11:49:00 +00001549 private:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001550 // |capacity_|: The number of object-area bytes (i.e., not including page
1551 // bookkeeping structures) currently in the space.
Ben Murdochf87a2032010-10-22 12:50:53 +01001552 intptr_t capacity_;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001553
1554 // |max_capacity_|: The maximum capacity ever observed.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001555 intptr_t max_capacity_;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001556
1557 // |size_|: The number of allocated bytes.
Ben Murdochf87a2032010-10-22 12:50:53 +01001558 intptr_t size_;
Steve Blocka7e24c12009-10-30 11:49:00 +00001559};
1560
1561
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001562// A free list category maintains a linked list of free memory blocks.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001563class FreeListCategory {
1564 public:
Ben Murdoch097c5b22016-05-18 11:27:45 +01001565 FreeListCategory() : top_(nullptr), end_(nullptr), available_(0) {}
1566
1567 void Initialize(FreeList* owner, FreeListCategoryType type) {
1568 owner_ = owner;
1569 type_ = type;
1570 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001571
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001572 // Concatenates {category} into {this}.
1573 //
1574 // Note: Thread-safe.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001575 intptr_t Concatenate(FreeListCategory* category);
1576
1577 void Reset();
1578
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001579 void Free(FreeSpace* node, int size_in_bytes);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001580
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001581 // Pick a node from the list.
1582 FreeSpace* PickNodeFromList(int* node_size);
1583
1584 // Pick a node from the list and compare it against {size_in_bytes}. If the
1585 // node's size is greater or equal return the node and null otherwise.
1586 FreeSpace* PickNodeFromList(int size_in_bytes, int* node_size);
1587
1588 // Search for a node of size {size_in_bytes}.
1589 FreeSpace* SearchForNodeInList(int size_in_bytes, int* node_size);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001590
1591 intptr_t EvictFreeListItemsInList(Page* p);
1592 bool ContainsPageFreeListItemsInList(Page* p);
1593
1594 void RepairFreeList(Heap* heap);
1595
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001596 bool IsEmpty() { return top() == nullptr; }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001597
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001598 FreeList* owner() { return owner_; }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001599 int available() const { return available_; }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001600
1601#ifdef DEBUG
1602 intptr_t SumFreeList();
1603 int FreeListLength();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001604 bool IsVeryLong();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001605#endif
1606
1607 private:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001608 // For debug builds we accurately compute free lists lengths up until
1609 // {kVeryLongFreeList} by manually walking the list.
1610 static const int kVeryLongFreeList = 500;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001611
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001612 FreeSpace* top() { return top_.Value(); }
1613 void set_top(FreeSpace* top) { top_.SetValue(top); }
1614
1615 FreeSpace* end() const { return end_; }
1616 void set_end(FreeSpace* end) { end_ = end; }
1617
1618 // |type_|: The type of this free list category.
1619 FreeListCategoryType type_;
1620
1621 // |top_|: Points to the top FreeSpace* in the free list category.
1622 AtomicValue<FreeSpace*> top_;
1623
1624 // |end_|: Points to the end FreeSpace* in the free list category.
1625 FreeSpace* end_;
1626
1627 // |available_|: Total available bytes in all blocks of this free list
1628 // category.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001629 int available_;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001630
1631 // |owner_|: The owning free list of this category.
1632 FreeList* owner_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001633};
1634
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001635// A free list maintaining free blocks of memory. The free list is organized in
1636// a way to encourage objects allocated around the same time to be near each
1637// other. The normal way to allocate is intended to be by bumping a 'top'
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001638// pointer until it hits a 'limit' pointer. When the limit is hit we need to
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001639// find a new space to allocate from. This is done with the free list, which is
1640// divided up into rough categories to cut down on waste. Having finer
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001641// categories would scatter allocation more.
1642
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001643// The free list is organized in categories as follows:
1644// 1-31 words (too small): Such small free areas are discarded for efficiency
1645// reasons. They can be reclaimed by the compactor. However the distance
1646// between top and limit may be this small.
1647// 32-255 words (small): Used for allocating free space between 1-31 words in
1648// size.
1649// 256-2047 words (medium): Used for allocating free space between 32-255 words
1650// in size.
1651// 1048-16383 words (large): Used for allocating free space between 256-2047
1652// words in size.
1653// At least 16384 words (huge): This list is for objects of 2048 words or
1654// larger. Empty pages are also added to this list.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001655class FreeList {
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001656 public:
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001657 // This method returns how much memory can be allocated after freeing
1658 // maximum_freed memory.
1659 static inline int GuaranteedAllocatable(int maximum_freed) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001660 if (maximum_freed <= kSmallListMin) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001661 return 0;
1662 } else if (maximum_freed <= kSmallListMax) {
1663 return kSmallAllocationMax;
1664 } else if (maximum_freed <= kMediumListMax) {
1665 return kMediumAllocationMax;
1666 } else if (maximum_freed <= kLargeListMax) {
1667 return kLargeAllocationMax;
1668 }
1669 return maximum_freed;
1670 }
1671
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001672 explicit FreeList(PagedSpace* owner);
1673
1674 // The method concatenates {other} into {this} and returns the added bytes,
1675 // including waste.
1676 //
1677 // Note: Thread-safe.
1678 intptr_t Concatenate(FreeList* other);
1679
1680 // Adds a node on the free list. The block of size {size_in_bytes} starting
1681 // at {start} is placed on the free list. The return value is the number of
1682 // bytes that were not added to the free list, because they freed memory block
1683 // was too small. Bookkeeping information will be written to the block, i.e.,
1684 // its contents will be destroyed. The start address should be word aligned,
1685 // and the size should be a non-zero multiple of the word size.
1686 int Free(Address start, int size_in_bytes);
1687
1688 // Allocate a block of size {size_in_bytes} from the free list. The block is
1689 // unitialized. A failure is returned if no block is available. The size
1690 // should be a non-zero multiple of the word size.
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001691 MUST_USE_RESULT HeapObject* Allocate(int size_in_bytes);
1692
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001693 // Clear the free list.
1694 void Reset();
1695
1696 void ResetStats() { wasted_bytes_ = 0; }
1697
1698 // Return the number of bytes available on the free list.
1699 intptr_t Available() {
Ben Murdoch097c5b22016-05-18 11:27:45 +01001700 intptr_t available = 0;
1701 for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
1702 available += category_[i].available();
1703 }
1704 return available;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001705 }
1706
1707 // The method tries to find a {FreeSpace} node of at least {size_in_bytes}
1708 // size in the free list category exactly matching the size. If no suitable
1709 // node could be found, the method falls back to retrieving a {FreeSpace}
1710 // from the large or huge free list category.
1711 //
1712 // Can be used concurrently.
1713 MUST_USE_RESULT FreeSpace* TryRemoveMemory(intptr_t hint_size_in_bytes);
1714
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001715 bool IsEmpty() {
Ben Murdoch097c5b22016-05-18 11:27:45 +01001716 for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
1717 if (!category_[i].IsEmpty()) return false;
1718 }
1719 return true;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001720 }
1721
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001722 // Used after booting the VM.
1723 void RepairLists(Heap* heap);
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001724
1725 intptr_t EvictFreeListItems(Page* p);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001726 bool ContainsPageFreeListItems(Page* p);
1727
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001728 PagedSpace* owner() { return owner_; }
1729 intptr_t wasted_bytes() { return wasted_bytes_; }
1730 base::Mutex* mutex() { return &mutex_; }
1731
1732#ifdef DEBUG
1733 void Zap();
1734 intptr_t SumFreeLists();
1735 bool IsVeryLong();
1736#endif
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001737
1738 private:
1739 // The size range of blocks, in bytes.
1740 static const int kMinBlockSize = 3 * kPointerSize;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001741 static const int kMaxBlockSize = Page::kAllocatableMemory;
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001742
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001743 static const int kSmallListMin = 0x1f * kPointerSize;
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001744 static const int kSmallListMax = 0xff * kPointerSize;
1745 static const int kMediumListMax = 0x7ff * kPointerSize;
1746 static const int kLargeListMax = 0x3fff * kPointerSize;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001747 static const int kSmallAllocationMax = kSmallListMin;
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001748 static const int kMediumAllocationMax = kSmallListMax;
1749 static const int kLargeAllocationMax = kMediumListMax;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001750
1751 FreeSpace* FindNodeFor(int size_in_bytes, int* node_size);
1752 FreeSpace* FindNodeIn(FreeListCategoryType category, int* node_size);
1753
1754 FreeListCategory* GetFreeListCategory(FreeListCategoryType category) {
Ben Murdoch097c5b22016-05-18 11:27:45 +01001755 return &category_[category];
1756 }
1757
1758 FreeListCategoryType SelectFreeListCategoryType(size_t size_in_bytes) {
1759 if (size_in_bytes <= kSmallListMax) {
1760 return kSmall;
1761 } else if (size_in_bytes <= kMediumListMax) {
1762 return kMedium;
1763 } else if (size_in_bytes <= kLargeListMax) {
1764 return kLarge;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001765 }
Ben Murdoch097c5b22016-05-18 11:27:45 +01001766 return kHuge;
1767 }
1768
1769 FreeListCategoryType SelectFastAllocationFreeListCategoryType(
1770 size_t size_in_bytes) {
1771 if (size_in_bytes <= kSmallAllocationMax) {
1772 return kSmall;
1773 } else if (size_in_bytes <= kMediumAllocationMax) {
1774 return kMedium;
1775 } else if (size_in_bytes <= kLargeAllocationMax) {
1776 return kLarge;
1777 }
1778 return kHuge;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001779 }
1780
1781 PagedSpace* owner_;
1782 base::Mutex mutex_;
1783 intptr_t wasted_bytes_;
Ben Murdoch097c5b22016-05-18 11:27:45 +01001784 FreeListCategory category_[kNumberOfCategories];
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001785
1786 DISALLOW_IMPLICIT_CONSTRUCTORS(FreeList);
1787};
1788
1789
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001790class AllocationResult {
1791 public:
1792 // Implicit constructor from Object*.
1793 AllocationResult(Object* object) // NOLINT
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001794 : object_(object) {
1795 // AllocationResults can't return Smis, which are used to represent
1796 // failure and the space to retry in.
1797 CHECK(!object->IsSmi());
1798 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001799
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001800 AllocationResult() : object_(Smi::FromInt(NEW_SPACE)) {}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001801
1802 static inline AllocationResult Retry(AllocationSpace space = NEW_SPACE) {
1803 return AllocationResult(space);
1804 }
1805
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001806 inline bool IsRetry() { return object_->IsSmi(); }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001807
1808 template <typename T>
1809 bool To(T** obj) {
1810 if (IsRetry()) return false;
1811 *obj = T::cast(object_);
1812 return true;
1813 }
1814
1815 Object* ToObjectChecked() {
1816 CHECK(!IsRetry());
1817 return object_;
1818 }
1819
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001820 inline AllocationSpace RetrySpace();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001821
1822 private:
1823 explicit AllocationResult(AllocationSpace space)
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001824 : object_(Smi::FromInt(static_cast<int>(space))) {}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001825
1826 Object* object_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001827};
1828
1829
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001830STATIC_ASSERT(sizeof(AllocationResult) == kPointerSize);
1831
1832
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001833// LocalAllocationBuffer represents a linear allocation area that is created
1834// from a given {AllocationResult} and can be used to allocate memory without
1835// synchronization.
1836//
1837// The buffer is properly closed upon destruction and reassignment.
1838// Example:
1839// {
1840// AllocationResult result = ...;
1841// LocalAllocationBuffer a(heap, result, size);
1842// LocalAllocationBuffer b = a;
1843// CHECK(!a.IsValid());
1844// CHECK(b.IsValid());
1845// // {a} is invalid now and cannot be used for further allocations.
1846// }
1847// // Since {b} went out of scope, the LAB is closed, resulting in creating a
1848// // filler object for the remaining area.
1849class LocalAllocationBuffer {
1850 public:
1851 // Indicates that a buffer cannot be used for allocations anymore. Can result
1852 // from either reassigning a buffer, or trying to construct it from an
1853 // invalid {AllocationResult}.
1854 static inline LocalAllocationBuffer InvalidBuffer();
1855
1856 // Creates a new LAB from a given {AllocationResult}. Results in
1857 // InvalidBuffer if the result indicates a retry.
1858 static inline LocalAllocationBuffer FromResult(Heap* heap,
1859 AllocationResult result,
1860 intptr_t size);
1861
1862 ~LocalAllocationBuffer() { Close(); }
1863
1864 // Convert to C++11 move-semantics once allowed by the style guide.
1865 LocalAllocationBuffer(const LocalAllocationBuffer& other);
1866 LocalAllocationBuffer& operator=(const LocalAllocationBuffer& other);
1867
1868 MUST_USE_RESULT inline AllocationResult AllocateRawAligned(
1869 int size_in_bytes, AllocationAlignment alignment);
1870
1871 inline bool IsValid() { return allocation_info_.top() != nullptr; }
1872
1873 // Try to merge LABs, which is only possible when they are adjacent in memory.
1874 // Returns true if the merge was successful, false otherwise.
1875 inline bool TryMerge(LocalAllocationBuffer* other);
1876
1877 private:
1878 LocalAllocationBuffer(Heap* heap, AllocationInfo allocation_info);
1879
1880 void Close();
1881
1882 Heap* heap_;
1883 AllocationInfo allocation_info_;
1884};
1885
1886
Steve Blocka7e24c12009-10-30 11:49:00 +00001887class PagedSpace : public Space {
1888 public:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001889 static const intptr_t kCompactionMemoryWanted = 500 * KB;
Steve Blocka7e24c12009-10-30 11:49:00 +00001890
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001891 // Creates a space with an id.
1892 PagedSpace(Heap* heap, AllocationSpace id, Executability executable);
1893
1894 ~PagedSpace() override { TearDown(); }
Steve Blocka7e24c12009-10-30 11:49:00 +00001895
1896 // Set up the space using the given address range of virtual memory (from
1897 // the memory allocator's initial chunk) if possible. If the block of
1898 // addresses is not big enough to contain a single page-aligned page, a
1899 // fresh chunk will be allocated.
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001900 bool SetUp();
Steve Blocka7e24c12009-10-30 11:49:00 +00001901
1902 // Returns true if the space has been successfully set up and not
1903 // subsequently torn down.
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001904 bool HasBeenSetUp();
Steve Blocka7e24c12009-10-30 11:49:00 +00001905
Steve Blocka7e24c12009-10-30 11:49:00 +00001906 // Checks whether an object/address is in this space.
1907 inline bool Contains(Address a);
Ben Murdoch097c5b22016-05-18 11:27:45 +01001908 inline bool Contains(Object* o);
1909 bool ContainsSlow(Address addr);
Steve Blocka7e24c12009-10-30 11:49:00 +00001910
1911 // Given an address occupied by a live object, return that object if it is
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001912 // in this space, or a Smi if it is not. The implementation iterates over
1913 // objects in the page containing the address, the cost is linear in the
1914 // number of objects in the page. It may be slow.
1915 Object* FindObject(Address addr);
1916
1917 // During boot the free_space_map is created, and afterwards we may need
1918 // to write it into the free list nodes that were already created.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001919 void RepairFreeListsAfterDeserialization();
Steve Blocka7e24c12009-10-30 11:49:00 +00001920
Ben Murdoch85b71792012-04-11 18:30:58 +01001921 // Prepares for a mark-compact GC.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001922 void PrepareForMarkCompact();
Ben Murdoch85b71792012-04-11 18:30:58 +01001923
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001924 // Current capacity without growing (Size() + Available()).
Ben Murdochf87a2032010-10-22 12:50:53 +01001925 intptr_t Capacity() { return accounting_stats_.Capacity(); }
Steve Blocka7e24c12009-10-30 11:49:00 +00001926
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001927 // Approximate amount of physical memory committed for this space.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001928 size_t CommittedPhysicalMemory() override;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001929
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001930 void ResetFreeListStatistics();
1931
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001932 // Sets the capacity, the available space and the wasted space to zero.
1933 // The stats are rebuilt during sweeping by adding each page to the
1934 // capacity and the size when it is encountered. As free spaces are
1935 // discovered during the sweeping they are subtracted from the size and added
1936 // to the available and wasted totals.
1937 void ClearStats() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001938 accounting_stats_.ClearSize();
1939 free_list_.ResetStats();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001940 ResetFreeListStatistics();
1941 }
1942
1943 // Increases the number of available bytes of that space.
1944 void AddToAccountingStats(intptr_t bytes) {
1945 accounting_stats_.DeallocateBytes(bytes);
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001946 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001947
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001948 // Available bytes without growing. These are the bytes on the free list.
1949 // The bytes in the linear allocation area are not included in this total
1950 // because updating the stats would slow down allocation. New pages are
1951 // immediately added to the free list so they show up here.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001952 intptr_t Available() override { return free_list_.Available(); }
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001953
1954 // Allocated bytes in this space. Garbage bytes that were not found due to
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001955 // concurrent sweeping are counted as being allocated! The bytes in the
1956 // current linear allocation area (between top and limit) are also counted
1957 // here.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001958 intptr_t Size() override { return accounting_stats_.Size(); }
Steve Blocka7e24c12009-10-30 11:49:00 +00001959
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001960 // As size, but the bytes in lazily swept pages are estimated and the bytes
1961 // in the current linear allocation area are not included.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001962 intptr_t SizeOfObjects() override;
Steve Blocka7e24c12009-10-30 11:49:00 +00001963
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001964 // Wasted bytes in this space. These are just the bytes that were thrown away
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001965 // due to being too small to use for allocation.
1966 virtual intptr_t Waste() { return free_list_.wasted_bytes(); }
Steve Blocka7e24c12009-10-30 11:49:00 +00001967
1968 // Returns the allocation pointer in this space.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001969 Address top() { return allocation_info_.top(); }
1970 Address limit() { return allocation_info_.limit(); }
1971
1972 // The allocation top address.
1973 Address* allocation_top_address() { return allocation_info_.top_address(); }
1974
1975 // The allocation limit address.
1976 Address* allocation_limit_address() {
1977 return allocation_info_.limit_address();
1978 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001979
1980 // Allocate the requested number of bytes in the space if possible, return a
1981 // failure object if not.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001982 MUST_USE_RESULT inline AllocationResult AllocateRawUnaligned(
1983 int size_in_bytes);
1984
1985 MUST_USE_RESULT inline AllocationResult AllocateRawUnalignedSynchronized(
1986 int size_in_bytes);
1987
1988 // Allocate the requested number of bytes in the space double aligned if
1989 // possible, return a failure object if not.
1990 MUST_USE_RESULT inline AllocationResult AllocateRawAligned(
1991 int size_in_bytes, AllocationAlignment alignment);
1992
1993 // Allocate the requested number of bytes in the space and consider allocation
1994 // alignment if needed.
1995 MUST_USE_RESULT inline AllocationResult AllocateRaw(
1996 int size_in_bytes, AllocationAlignment alignment);
Leon Clarkee46be812010-01-19 14:06:41 +00001997
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001998 // Give a block of memory to the space's free list. It might be added to
1999 // the free list or accounted as waste.
2000 // If add_to_freelist is false then just accounting stats are updated and
2001 // no attempt to add area to free list is made.
2002 int Free(Address start, int size_in_bytes) {
2003 int wasted = free_list_.Free(start, size_in_bytes);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002004 accounting_stats_.DeallocateBytes(size_in_bytes);
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002005 return size_in_bytes - wasted;
2006 }
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002007
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002008 void ResetFreeList() { free_list_.Reset(); }
2009
Steve Block6ded16b2010-05-10 14:33:55 +01002010 // Set space allocation info.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002011 void SetTopAndLimit(Address top, Address limit) {
2012 DCHECK(top == limit ||
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002013 Page::FromAddress(top) == Page::FromAddress(limit - 1));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002014 MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002015 allocation_info_.Reset(top, limit);
Steve Block6ded16b2010-05-10 14:33:55 +01002016 }
2017
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002018 // Empty space allocation info, returning unused area to free list.
2019 void EmptyAllocationInfo() {
2020 // Mark the old linear allocation area with a free space map so it can be
2021 // skipped when scanning the heap.
2022 int old_linear_size = static_cast<int>(limit() - top());
2023 Free(top(), old_linear_size);
2024 SetTopAndLimit(NULL, NULL);
Steve Blocka7e24c12009-10-30 11:49:00 +00002025 }
2026
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002027 void Allocate(int bytes) { accounting_stats_.AllocateBytes(bytes); }
2028
2029 void IncreaseCapacity(int size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002030
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002031 // Releases an unused page and shrinks the space.
Ben Murdoch097c5b22016-05-18 11:27:45 +01002032 void ReleasePage(Page* page, bool evict_free_list_items);
Steve Blocka7e24c12009-10-30 11:49:00 +00002033
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002034 // The dummy page that anchors the linked list of pages.
2035 Page* anchor() { return &anchor_; }
Steve Blocka7e24c12009-10-30 11:49:00 +00002036
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002037#ifdef VERIFY_HEAP
2038 // Verify integrity of this space.
2039 virtual void Verify(ObjectVisitor* visitor);
2040
2041 // Overridden by subclasses to verify space-specific object
2042 // properties (e.g., only maps or free-list nodes are in map space).
2043 virtual void VerifyObject(HeapObject* obj) {}
2044#endif
2045
Steve Blocka7e24c12009-10-30 11:49:00 +00002046#ifdef DEBUG
2047 // Print meta info and objects in this space.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002048 void Print() override;
Steve Blocka7e24c12009-10-30 11:49:00 +00002049
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002050 // Reports statistics for the space
2051 void ReportStatistics();
2052
Steve Blocka7e24c12009-10-30 11:49:00 +00002053 // Report code object related statistics
2054 void CollectCodeStatistics();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002055 static void ReportCodeStatistics(Isolate* isolate);
2056 static void ResetCodeStatistics(Isolate* isolate);
Steve Blocka7e24c12009-10-30 11:49:00 +00002057#endif
2058
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002059 // This function tries to steal size_in_bytes memory from the sweeper threads
2060 // free-lists. If it does not succeed stealing enough memory, it will wait
2061 // for the sweeper threads to finish sweeping.
2062 // It returns true when sweeping is completed and false otherwise.
2063 bool EnsureSweeperProgress(intptr_t size_in_bytes);
2064
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002065 Page* FirstPage() { return anchor_.next_page(); }
2066 Page* LastPage() { return anchor_.prev_page(); }
2067
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002068 void EvictEvacuationCandidatesFromLinearAllocationArea();
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002069
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002070 bool CanExpand(size_t size);
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002071
2072 // Returns the number of total pages in this space.
2073 int CountTotalPages();
2074
2075 // Return size of allocatable area on a page in this space.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002076 inline int AreaSize() { return area_size_; }
2077
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002078 virtual bool is_local() { return false; }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002079
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002080 // Merges {other} into the current space. Note that this modifies {other},
2081 // e.g., removes its bump pointer area and resets statistics.
2082 void MergeCompactionSpace(CompactionSpace* other);
2083
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002084 // Refills the free list from the corresponding free list filled by the
2085 // sweeper.
2086 virtual void RefillFreeList();
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002087
Steve Blocka7e24c12009-10-30 11:49:00 +00002088 protected:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002089 void AddMemory(Address start, intptr_t size);
2090
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002091 void MoveOverFreeMemory(PagedSpace* other);
2092
2093 // PagedSpaces that should be included in snapshots have different, i.e.,
2094 // smaller, initial pages.
2095 virtual bool snapshotable() { return true; }
2096
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002097 FreeList* free_list() { return &free_list_; }
2098
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002099 bool HasPages() { return anchor_.next_page() != &anchor_; }
2100
2101 // Cleans up the space, frees all pages in this space except those belonging
2102 // to the initial chunk, uncommits addresses in the initial chunk.
2103 void TearDown();
2104
2105 // Expands the space by allocating a fixed number of pages. Returns false if
2106 // it cannot allocate requested number of pages from OS, or if the hard heap
2107 // size limit has been hit.
2108 bool Expand();
2109
2110 // Generic fast case allocation function that tries linear allocation at the
2111 // address denoted by top in allocation_info_.
2112 inline HeapObject* AllocateLinearly(int size_in_bytes);
2113
2114 // Generic fast case allocation function that tries aligned linear allocation
2115 // at the address denoted by top in allocation_info_. Writes the aligned
2116 // allocation size, which includes the filler size, to size_in_bytes.
2117 inline HeapObject* AllocateLinearlyAligned(int* size_in_bytes,
2118 AllocationAlignment alignment);
2119
2120 // If sweeping is still in progress try to sweep unswept pages. If that is
2121 // not successful, wait for the sweeper threads and re-try free-list
2122 // allocation.
2123 MUST_USE_RESULT virtual HeapObject* SweepAndRetryAllocation(
2124 int size_in_bytes);
2125
2126 // Slow path of AllocateRaw. This function is space-dependent.
2127 MUST_USE_RESULT HeapObject* SlowAllocateRaw(int size_in_bytes);
2128
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002129 int area_size_;
2130
Steve Blocka7e24c12009-10-30 11:49:00 +00002131 // Accounting information for this space.
2132 AllocationStats accounting_stats_;
2133
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002134 // The dummy page that anchors the double linked list of pages.
2135 Page anchor_;
Steve Blocka7e24c12009-10-30 11:49:00 +00002136
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002137 // The space's free list.
2138 FreeList free_list_;
Steve Block6ded16b2010-05-10 14:33:55 +01002139
Steve Blocka7e24c12009-10-30 11:49:00 +00002140 // Normal allocation information.
2141 AllocationInfo allocation_info_;
2142
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002143 // Mutex guarding any concurrent access to the space.
2144 base::Mutex space_mutex_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002145
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002146 friend class MarkCompactCollector;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002147 friend class PageIterator;
2148
2149 // Used in cctest.
2150 friend class HeapTester;
Steve Blocka7e24c12009-10-30 11:49:00 +00002151};
2152
2153
Steve Blocka7e24c12009-10-30 11:49:00 +00002154class NumberAndSizeInfo BASE_EMBEDDED {
2155 public:
2156 NumberAndSizeInfo() : number_(0), bytes_(0) {}
2157
2158 int number() const { return number_; }
2159 void increment_number(int num) { number_ += num; }
2160
2161 int bytes() const { return bytes_; }
2162 void increment_bytes(int size) { bytes_ += size; }
2163
2164 void clear() {
2165 number_ = 0;
2166 bytes_ = 0;
2167 }
2168
2169 private:
2170 int number_;
2171 int bytes_;
2172};
2173
2174
2175// HistogramInfo class for recording a single "bar" of a histogram. This
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002176// class is used for collecting statistics to print to the log file.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002177class HistogramInfo : public NumberAndSizeInfo {
Steve Blocka7e24c12009-10-30 11:49:00 +00002178 public:
2179 HistogramInfo() : NumberAndSizeInfo() {}
2180
2181 const char* name() { return name_; }
2182 void set_name(const char* name) { name_ = name; }
2183
2184 private:
2185 const char* name_;
2186};
Steve Blocka7e24c12009-10-30 11:49:00 +00002187
2188
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002189enum SemiSpaceId { kFromSpace = 0, kToSpace = 1 };
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002190
2191
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002192class NewSpacePage : public MemoryChunk {
2193 public:
2194 // GC related flags copied from from-space to to-space when
2195 // flipping semispaces.
2196 static const intptr_t kCopyOnFlipFlagsMask =
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002197 (1 << MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) |
Ben Murdoch097c5b22016-05-18 11:27:45 +01002198 (1 << MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002199
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002200 static const int kAreaSize = Page::kAllocatableMemory;
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002201
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002202 inline NewSpacePage* next_page() {
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002203 return static_cast<NewSpacePage*>(next_chunk());
2204 }
2205
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002206 inline void set_next_page(NewSpacePage* page) { set_next_chunk(page); }
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002207
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002208 inline NewSpacePage* prev_page() {
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002209 return static_cast<NewSpacePage*>(prev_chunk());
2210 }
2211
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002212 inline void set_prev_page(NewSpacePage* page) { set_prev_chunk(page); }
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002213
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002214 SemiSpace* semi_space() { return reinterpret_cast<SemiSpace*>(owner()); }
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002215
2216 bool is_anchor() { return !this->InNewSpace(); }
2217
2218 static bool IsAtStart(Address addr) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002219 return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask) ==
2220 kObjectStartOffset;
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002221 }
2222
2223 static bool IsAtEnd(Address addr) {
2224 return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask) == 0;
2225 }
2226
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002227 Address address() { return reinterpret_cast<Address>(this); }
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002228
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002229 // Finds the NewSpacePage containing the given address.
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002230 static inline NewSpacePage* FromAddress(Address address_in_page) {
2231 Address page_start =
2232 reinterpret_cast<Address>(reinterpret_cast<uintptr_t>(address_in_page) &
2233 ~Page::kPageAlignmentMask);
2234 NewSpacePage* page = reinterpret_cast<NewSpacePage*>(page_start);
2235 return page;
2236 }
2237
2238 // Find the page for a limit address. A limit address is either an address
2239 // inside a page, or the address right after the last byte of a page.
2240 static inline NewSpacePage* FromLimit(Address address_limit) {
2241 return NewSpacePage::FromAddress(address_limit - 1);
2242 }
2243
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002244 // Checks if address1 and address2 are on the same new space page.
2245 static inline bool OnSamePage(Address address1, Address address2) {
2246 return NewSpacePage::FromAddress(address1) ==
2247 NewSpacePage::FromAddress(address2);
2248 }
2249
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002250 private:
2251 // Create a NewSpacePage object that is only used as anchor
2252 // for the doubly-linked list of real pages.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002253 explicit NewSpacePage(SemiSpace* owner) { InitializeAsAnchor(owner); }
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002254
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002255 static NewSpacePage* Initialize(Heap* heap, Address start,
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002256 SemiSpace* semi_space);
2257
2258 // Intialize a fake NewSpacePage used as sentinel at the ends
2259 // of a doubly-linked list of real NewSpacePages.
2260 // Only uses the prev/next links, and sets flags to not be in new-space.
2261 void InitializeAsAnchor(SemiSpace* owner);
2262
2263 friend class SemiSpace;
2264 friend class SemiSpaceIterator;
2265};
2266
2267
Steve Blocka7e24c12009-10-30 11:49:00 +00002268// -----------------------------------------------------------------------------
2269// SemiSpace in young generation
2270//
Ben Murdoch097c5b22016-05-18 11:27:45 +01002271// A SemiSpace is a contiguous chunk of memory holding page-like memory chunks.
2272// The mark-compact collector uses the memory of the first page in the from
2273// space as a marking stack when tracing live objects.
Steve Blocka7e24c12009-10-30 11:49:00 +00002274class SemiSpace : public Space {
2275 public:
Ben Murdoch097c5b22016-05-18 11:27:45 +01002276 static void Swap(SemiSpace* from, SemiSpace* to);
2277
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002278 SemiSpace(Heap* heap, SemiSpaceId semispace)
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002279 : Space(heap, NEW_SPACE, NOT_EXECUTABLE),
Ben Murdoch097c5b22016-05-18 11:27:45 +01002280 current_capacity_(0),
2281 maximum_capacity_(0),
2282 minimum_capacity_(0),
2283 start_(nullptr),
2284 age_mark_(nullptr),
2285 committed_(false),
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002286 id_(semispace),
2287 anchor_(this),
Ben Murdoch097c5b22016-05-18 11:27:45 +01002288 current_page_(nullptr) {}
Steve Blocka7e24c12009-10-30 11:49:00 +00002289
Ben Murdoch097c5b22016-05-18 11:27:45 +01002290 inline bool Contains(HeapObject* o);
2291 inline bool Contains(Object* o);
2292 inline bool ContainsSlow(Address a);
2293
2294 // Creates a space in the young generation. The constructor does not
2295 // allocate memory from the OS.
2296 void SetUp(Address start, int initial_capacity, int maximum_capacity);
Steve Blocka7e24c12009-10-30 11:49:00 +00002297
2298 // Tear down the space. Heap memory was not allocated by the space, so it
2299 // is not deallocated here.
2300 void TearDown();
2301
2302 // True if the space has been set up but not torn down.
Ben Murdoch097c5b22016-05-18 11:27:45 +01002303 bool HasBeenSetUp() { return start_ != nullptr; }
Steve Blocka7e24c12009-10-30 11:49:00 +00002304
Steve Blocka7e24c12009-10-30 11:49:00 +00002305 // Grow the semispace to the new capacity. The new capacity
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002306 // requested must be larger than the current capacity and less than
2307 // the maximum capacity.
Steve Blocka7e24c12009-10-30 11:49:00 +00002308 bool GrowTo(int new_capacity);
2309
2310 // Shrinks the semispace to the new capacity. The new capacity
2311 // requested must be more than the amount of used memory in the
2312 // semispace and less than the current capacity.
2313 bool ShrinkTo(int new_capacity);
2314
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002315 // Returns the start address of the first page of the space.
2316 Address space_start() {
Ben Murdoch097c5b22016-05-18 11:27:45 +01002317 DCHECK_NE(anchor_.next_page(), &anchor_);
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002318 return anchor_.next_page()->area_start();
2319 }
2320
2321 // Returns the start address of the current page of the space.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002322 Address page_low() { return current_page_->area_start(); }
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002323
Steve Blocka7e24c12009-10-30 11:49:00 +00002324 // Returns one past the end address of the space.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002325 Address space_end() { return anchor_.prev_page()->area_end(); }
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002326
2327 // Returns one past the end address of the current page of the space.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002328 Address page_high() { return current_page_->area_end(); }
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002329
2330 bool AdvancePage() {
2331 NewSpacePage* next_page = current_page_->next_page();
2332 if (next_page == anchor()) return false;
2333 current_page_ = next_page;
2334 return true;
2335 }
2336
2337 // Resets the space to using the first page.
2338 void Reset();
Steve Blocka7e24c12009-10-30 11:49:00 +00002339
2340 // Age mark accessors.
2341 Address age_mark() { return age_mark_; }
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002342 void set_age_mark(Address mark);
Steve Blocka7e24c12009-10-30 11:49:00 +00002343
Ben Murdoch097c5b22016-05-18 11:27:45 +01002344 bool is_committed() { return committed_; }
2345 bool Commit();
2346 bool Uncommit();
Steve Blocka7e24c12009-10-30 11:49:00 +00002347
Ben Murdoch097c5b22016-05-18 11:27:45 +01002348 NewSpacePage* first_page() { return anchor_.next_page(); }
2349 NewSpacePage* current_page() { return current_page_; }
2350
2351 // Returns the current total capacity of the semispace.
2352 int current_capacity() { return current_capacity_; }
2353
2354 // Returns the maximum total capacity of the semispace.
2355 int maximum_capacity() { return maximum_capacity_; }
2356
2357 // Returns the initial capacity of the semispace.
2358 int minimum_capacity() { return minimum_capacity_; }
2359
2360 SemiSpaceId id() { return id_; }
2361
2362 // Approximate amount of physical memory committed for this space.
2363 size_t CommittedPhysicalMemory() override;
Steve Blocka7e24c12009-10-30 11:49:00 +00002364
Leon Clarkee46be812010-01-19 14:06:41 +00002365 // If we don't have these here then SemiSpace will be abstract. However
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002366 // they should never be called:
2367
2368 intptr_t Size() override {
Steve Blocka7e24c12009-10-30 11:49:00 +00002369 UNREACHABLE();
2370 return 0;
2371 }
2372
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002373 intptr_t SizeOfObjects() override { return Size(); }
2374
2375 intptr_t Available() override {
2376 UNREACHABLE();
2377 return 0;
2378 }
2379
Steve Blocka7e24c12009-10-30 11:49:00 +00002380#ifdef DEBUG
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002381 void Print() override;
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002382 // Validate a range of of addresses in a SemiSpace.
2383 // The "from" address must be on a page prior to the "to" address,
2384 // in the linked page order, or it must be earlier on the same page.
2385 static void AssertValidRange(Address from, Address to);
2386#else
2387 // Do nothing.
2388 inline static void AssertValidRange(Address from, Address to) {}
Steve Blocka7e24c12009-10-30 11:49:00 +00002389#endif
2390
Ben Murdoch097c5b22016-05-18 11:27:45 +01002391#ifdef VERIFY_HEAP
2392 virtual void Verify();
2393#endif
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002394
Steve Blocka7e24c12009-10-30 11:49:00 +00002395 private:
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002396 NewSpacePage* anchor() { return &anchor_; }
2397
Ben Murdoch097c5b22016-05-18 11:27:45 +01002398 void set_current_capacity(int new_capacity) {
2399 current_capacity_ = new_capacity;
2400 }
2401
2402 // Copies the flags into the masked positions on all pages in the space.
2403 void FixPagesFlags(intptr_t flags, intptr_t flag_mask);
2404
2405 // The currently committed space capacity.
2406 int current_capacity_;
2407
2408 // The maximum capacity that can be used by this space.
2409 int maximum_capacity_;
2410
2411 // The mimnimum capacity for the space. A space cannot shrink below this size.
2412 int minimum_capacity_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002413
Steve Blocka7e24c12009-10-30 11:49:00 +00002414 // The start address of the space.
2415 Address start_;
2416 // Used to govern object promotion during mark-compact collection.
2417 Address age_mark_;
2418
Steve Blocka7e24c12009-10-30 11:49:00 +00002419 bool committed_;
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002420 SemiSpaceId id_;
Steve Blocka7e24c12009-10-30 11:49:00 +00002421
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002422 NewSpacePage anchor_;
2423 NewSpacePage* current_page_;
2424
2425 friend class SemiSpaceIterator;
2426 friend class NewSpacePageIterator;
Steve Blocka7e24c12009-10-30 11:49:00 +00002427};
2428
2429
2430// A SemiSpaceIterator is an ObjectIterator that iterates over the active
2431// semispace of the heap's new space. It iterates over the objects in the
2432// semispace from a given start address (defaulting to the bottom of the
2433// semispace) to the top of the semispace. New objects allocated after the
2434// iterator is created are not iterated.
2435class SemiSpaceIterator : public ObjectIterator {
2436 public:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002437 // Create an iterator over the allocated objects in the given to-space.
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002438 explicit SemiSpaceIterator(NewSpace* space);
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002439
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002440 inline HeapObject* Next();
Steve Blocka7e24c12009-10-30 11:49:00 +00002441
2442 // Implementation of the ObjectIterator functions.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002443 inline HeapObject* next_object() override;
Steve Blocka7e24c12009-10-30 11:49:00 +00002444
2445 private:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002446 void Initialize(Address start, Address end);
Steve Blocka7e24c12009-10-30 11:49:00 +00002447
Steve Blocka7e24c12009-10-30 11:49:00 +00002448 // The current iteration point.
2449 Address current_;
2450 // The end of iteration.
2451 Address limit_;
Steve Blocka7e24c12009-10-30 11:49:00 +00002452};
2453
2454
2455// -----------------------------------------------------------------------------
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002456// A PageIterator iterates the pages in a semi-space.
2457class NewSpacePageIterator BASE_EMBEDDED {
2458 public:
2459 // Make an iterator that runs over all pages in to-space.
2460 explicit inline NewSpacePageIterator(NewSpace* space);
2461
2462 // Make an iterator that runs over all pages in the given semispace,
2463 // even those not used in allocation.
2464 explicit inline NewSpacePageIterator(SemiSpace* space);
2465
2466 // Make iterator that iterates from the page containing start
2467 // to the page that contains limit in the same semispace.
2468 inline NewSpacePageIterator(Address start, Address limit);
2469
2470 inline bool has_next();
2471 inline NewSpacePage* next();
2472
2473 private:
2474 NewSpacePage* prev_page_; // Previous page returned.
2475 // Next page that will be returned. Cached here so that we can use this
2476 // iterator for operations that deallocate pages.
2477 NewSpacePage* next_page_;
2478 // Last page returned.
2479 NewSpacePage* last_page_;
2480};
2481
2482
2483// -----------------------------------------------------------------------------
Steve Blocka7e24c12009-10-30 11:49:00 +00002484// The young generation space.
2485//
2486// The new space consists of a contiguous pair of semispaces. It simply
2487// forwards most functions to the appropriate semispace.
2488
2489class NewSpace : public Space {
2490 public:
2491 // Constructor.
Steve Block44f0eee2011-05-26 01:26:41 +01002492 explicit NewSpace(Heap* heap)
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002493 : Space(heap, NEW_SPACE, NOT_EXECUTABLE),
2494 to_space_(heap, kToSpace),
2495 from_space_(heap, kFromSpace),
2496 reservation_(),
Ben Murdoch097c5b22016-05-18 11:27:45 +01002497 top_on_previous_step_(0) {}
2498
2499 inline bool Contains(HeapObject* o);
2500 inline bool ContainsSlow(Address a);
2501 inline bool Contains(Object* o);
Steve Blocka7e24c12009-10-30 11:49:00 +00002502
2503 // Sets up the new space using the given chunk.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002504 bool SetUp(int reserved_semispace_size_, int max_semi_space_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002505
2506 // Tears down the space. Heap memory was not allocated by the space, so it
2507 // is not deallocated here.
2508 void TearDown();
2509
2510 // True if the space has been set up but not torn down.
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002511 bool HasBeenSetUp() {
2512 return to_space_.HasBeenSetUp() && from_space_.HasBeenSetUp();
Steve Blocka7e24c12009-10-30 11:49:00 +00002513 }
2514
2515 // Flip the pair of spaces.
2516 void Flip();
2517
2518 // Grow the capacity of the semispaces. Assumes that they are not at
2519 // their maximum capacity.
2520 void Grow();
2521
2522 // Shrink the capacity of the semispaces.
2523 void Shrink();
2524
Steve Blocka7e24c12009-10-30 11:49:00 +00002525 // Return the allocated bytes in the active semispace.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002526 intptr_t Size() override {
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002527 return pages_used_ * NewSpacePage::kAreaSize +
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002528 static_cast<int>(top() - to_space_.page_low());
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002529 }
2530
Ben Murdochf87a2032010-10-22 12:50:53 +01002531 // The same, but returning an int. We have to have the one that returns
2532 // intptr_t because it is inherited, but if we know we are dealing with the
2533 // new space, which can't get as big as the other spaces then this is useful:
2534 int SizeAsInt() { return static_cast<int>(Size()); }
Steve Block3ce2e202009-11-05 08:53:23 +00002535
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002536 // Return the allocatable capacity of a semispace.
2537 intptr_t Capacity() {
Ben Murdoch097c5b22016-05-18 11:27:45 +01002538 SLOW_DCHECK(to_space_.current_capacity() == from_space_.current_capacity());
2539 return (to_space_.current_capacity() / Page::kPageSize) *
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002540 NewSpacePage::kAreaSize;
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002541 }
2542
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002543 // Return the current size of a semispace, allocatable and non-allocatable
2544 // memory.
2545 intptr_t TotalCapacity() {
Ben Murdoch097c5b22016-05-18 11:27:45 +01002546 DCHECK(to_space_.current_capacity() == from_space_.current_capacity());
2547 return to_space_.current_capacity();
Steve Blocka7e24c12009-10-30 11:49:00 +00002548 }
Steve Block3ce2e202009-11-05 08:53:23 +00002549
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002550 // Committed memory for NewSpace is the committed memory of both semi-spaces
2551 // combined.
2552 intptr_t CommittedMemory() override {
2553 return from_space_.CommittedMemory() + to_space_.CommittedMemory();
Steve Block3ce2e202009-11-05 08:53:23 +00002554 }
2555
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002556 intptr_t MaximumCommittedMemory() override {
2557 return from_space_.MaximumCommittedMemory() +
2558 to_space_.MaximumCommittedMemory();
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002559 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002560
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002561 // Approximate amount of physical memory committed for this space.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002562 size_t CommittedPhysicalMemory() override;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002563
2564 // Return the available bytes without growing.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002565 intptr_t Available() override { return Capacity() - Size(); }
2566
2567 intptr_t PagesFromStart(Address addr) {
2568 return static_cast<intptr_t>(addr - bottom()) / Page::kPageSize;
2569 }
2570
2571 size_t AllocatedSinceLastGC() {
2572 intptr_t allocated = top() - to_space_.age_mark();
2573 if (allocated < 0) {
2574 // Runtime has lowered the top below the age mark.
2575 return 0;
2576 }
2577 // Correctly account for non-allocatable regions at the beginning of
2578 // each page from the age_mark() to the top().
2579 intptr_t pages =
2580 PagesFromStart(top()) - PagesFromStart(to_space_.age_mark());
2581 allocated -= pages * (NewSpacePage::kObjectStartOffset);
2582 DCHECK(0 <= allocated && allocated <= Size());
2583 return static_cast<size_t>(allocated);
2584 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002585
Steve Blocka7e24c12009-10-30 11:49:00 +00002586 // Return the maximum capacity of a semispace.
2587 int MaximumCapacity() {
Ben Murdoch097c5b22016-05-18 11:27:45 +01002588 DCHECK(to_space_.maximum_capacity() == from_space_.maximum_capacity());
2589 return to_space_.maximum_capacity();
Steve Blocka7e24c12009-10-30 11:49:00 +00002590 }
2591
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002592 bool IsAtMaximumCapacity() { return TotalCapacity() == MaximumCapacity(); }
2593
Steve Blocka7e24c12009-10-30 11:49:00 +00002594 // Returns the initial capacity of a semispace.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002595 int InitialTotalCapacity() {
Ben Murdoch097c5b22016-05-18 11:27:45 +01002596 DCHECK(to_space_.minimum_capacity() == from_space_.minimum_capacity());
2597 return to_space_.minimum_capacity();
Steve Blocka7e24c12009-10-30 11:49:00 +00002598 }
2599
2600 // Return the address of the allocation pointer in the active semispace.
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002601 Address top() {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002602 DCHECK(to_space_.current_page()->ContainsLimit(allocation_info_.top()));
2603 return allocation_info_.top();
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002604 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002605
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002606 // Return the address of the allocation pointer limit in the active semispace.
2607 Address limit() {
2608 DCHECK(to_space_.current_page()->ContainsLimit(allocation_info_.limit()));
2609 return allocation_info_.limit();
2610 }
2611
Steve Blocka7e24c12009-10-30 11:49:00 +00002612 // Return the address of the first object in the active semispace.
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002613 Address bottom() { return to_space_.space_start(); }
Steve Blocka7e24c12009-10-30 11:49:00 +00002614
2615 // Get the age mark of the inactive semispace.
2616 Address age_mark() { return from_space_.age_mark(); }
2617 // Set the age mark in the active semispace.
2618 void set_age_mark(Address mark) { to_space_.set_age_mark(mark); }
2619
2620 // The start address of the space and a bit mask. Anding an address in the
2621 // new space with the mask will result in the start address.
2622 Address start() { return start_; }
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002623
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002624 // The allocation top and limit address.
2625 Address* allocation_top_address() { return allocation_info_.top_address(); }
Steve Blocka7e24c12009-10-30 11:49:00 +00002626
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002627 // The allocation limit address.
2628 Address* allocation_limit_address() {
2629 return allocation_info_.limit_address();
2630 }
2631
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002632 MUST_USE_RESULT INLINE(AllocationResult AllocateRawAligned(
2633 int size_in_bytes, AllocationAlignment alignment));
2634
2635 MUST_USE_RESULT INLINE(
2636 AllocationResult AllocateRawUnaligned(int size_in_bytes));
2637
2638 MUST_USE_RESULT INLINE(AllocationResult AllocateRaw(
2639 int size_in_bytes, AllocationAlignment alignment));
2640
2641 MUST_USE_RESULT inline AllocationResult AllocateRawSynchronized(
2642 int size_in_bytes, AllocationAlignment alignment);
Steve Blocka7e24c12009-10-30 11:49:00 +00002643
2644 // Reset the allocation pointer to the beginning of the active semispace.
2645 void ResetAllocationInfo();
Steve Blocka7e24c12009-10-30 11:49:00 +00002646
Ben Murdoch097c5b22016-05-18 11:27:45 +01002647 // When inline allocation stepping is active, either because of incremental
2648 // marking, idle scavenge, or allocation statistics gathering, we 'interrupt'
2649 // inline allocation every once in a while. This is done by setting
2650 // allocation_info_.limit to be lower than the actual limit and and increasing
2651 // it in steps to guarantee that the observers are notified periodically.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002652 void UpdateInlineAllocationLimit(int size_in_bytes);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002653
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002654 void DisableInlineAllocationSteps() {
2655 top_on_previous_step_ = 0;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002656 UpdateInlineAllocationLimit(0);
Steve Blocka7e24c12009-10-30 11:49:00 +00002657 }
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002658
Ben Murdoch097c5b22016-05-18 11:27:45 +01002659 // Allows observation of inline allocation. The observer->Step() method gets
2660 // called after every step_size bytes have been allocated (approximately).
2661 // This works by adjusting the allocation limit to a lower value and adjusting
2662 // it after each step.
2663 void AddAllocationObserver(AllocationObserver* observer) override;
2664
2665 void RemoveAllocationObserver(AllocationObserver* observer) override;
2666
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002667 // Get the extent of the inactive semispace (for use as a marking stack,
2668 // or to zap it). Notice: space-addresses are not necessarily on the
2669 // same page, so FromSpaceStart() might be above FromSpaceEnd().
2670 Address FromSpacePageLow() { return from_space_.page_low(); }
2671 Address FromSpacePageHigh() { return from_space_.page_high(); }
2672 Address FromSpaceStart() { return from_space_.space_start(); }
2673 Address FromSpaceEnd() { return from_space_.space_end(); }
2674
2675 // Get the extent of the active semispace's pages' memory.
2676 Address ToSpaceStart() { return to_space_.space_start(); }
2677 Address ToSpaceEnd() { return to_space_.space_end(); }
2678
Ben Murdoch097c5b22016-05-18 11:27:45 +01002679 inline bool ToSpaceContainsSlow(Address a);
2680 inline bool FromSpaceContainsSlow(Address a);
2681 inline bool ToSpaceContains(Object* o);
2682 inline bool FromSpaceContains(Object* o);
Steve Blocka7e24c12009-10-30 11:49:00 +00002683
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002684 // Try to switch the active semispace to a new, empty, page.
2685 // Returns false if this isn't possible or reasonable (i.e., there
2686 // are no pages, or the current page is already empty), or true
2687 // if successful.
2688 bool AddFreshPage();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002689 bool AddFreshPageSynchronized();
Steve Blocka7e24c12009-10-30 11:49:00 +00002690
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002691#ifdef VERIFY_HEAP
Steve Blocka7e24c12009-10-30 11:49:00 +00002692 // Verify the active semispace.
2693 virtual void Verify();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002694#endif
2695
2696#ifdef DEBUG
Steve Blocka7e24c12009-10-30 11:49:00 +00002697 // Print the active semispace.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002698 void Print() override { to_space_.Print(); }
Steve Blocka7e24c12009-10-30 11:49:00 +00002699#endif
2700
Steve Blocka7e24c12009-10-30 11:49:00 +00002701 // Iterates the active semispace to collect statistics.
2702 void CollectStatistics();
2703 // Reports previously collected statistics of the active semispace.
2704 void ReportStatistics();
2705 // Clears previously collected statistics.
2706 void ClearHistograms();
2707
2708 // Record the allocation or promotion of a heap object. Note that we don't
2709 // record every single allocation, but only those that happen in the
2710 // to space during a scavenge GC.
2711 void RecordAllocation(HeapObject* obj);
2712 void RecordPromotion(HeapObject* obj);
Steve Blocka7e24c12009-10-30 11:49:00 +00002713
2714 // Return whether the operation succeded.
2715 bool CommitFromSpaceIfNeeded() {
2716 if (from_space_.is_committed()) return true;
2717 return from_space_.Commit();
2718 }
2719
2720 bool UncommitFromSpace() {
2721 if (!from_space_.is_committed()) return true;
2722 return from_space_.Uncommit();
2723 }
2724
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002725 bool IsFromSpaceCommitted() { return from_space_.is_committed(); }
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002726
2727 SemiSpace* active_space() { return &to_space_; }
2728
Ben Murdoch097c5b22016-05-18 11:27:45 +01002729 void PauseAllocationObservers() override;
2730 void ResumeAllocationObservers() override;
2731
Steve Blocka7e24c12009-10-30 11:49:00 +00002732 private:
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002733 // Update allocation info to match the current to-space page.
2734 void UpdateAllocationInfo();
2735
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002736 base::Mutex mutex_;
2737
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002738 Address chunk_base_;
2739 uintptr_t chunk_size_;
2740
Steve Blocka7e24c12009-10-30 11:49:00 +00002741 // The semispaces.
2742 SemiSpace to_space_;
2743 SemiSpace from_space_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002744 base::VirtualMemory reservation_;
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002745 int pages_used_;
Steve Blocka7e24c12009-10-30 11:49:00 +00002746
2747 // Start address and bit mask for containment testing.
2748 Address start_;
Steve Blocka7e24c12009-10-30 11:49:00 +00002749
2750 // Allocation pointer and limit for normal allocation and allocation during
2751 // mark-compact collection.
2752 AllocationInfo allocation_info_;
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002753
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002754 Address top_on_previous_step_;
Steve Blocka7e24c12009-10-30 11:49:00 +00002755
Steve Blocka7e24c12009-10-30 11:49:00 +00002756 HistogramInfo* allocated_histogram_;
2757 HistogramInfo* promoted_histogram_;
Steve Blocka7e24c12009-10-30 11:49:00 +00002758
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002759 bool EnsureAllocation(int size_in_bytes, AllocationAlignment alignment);
Steve Blocka7e24c12009-10-30 11:49:00 +00002760
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002761 // If we are doing inline allocation in steps, this method performs the 'step'
2762 // operation. top is the memory address of the bump pointer at the last
2763 // inline allocation (i.e. it determines the numbers of bytes actually
2764 // allocated since the last step.) new_top is the address of the bump pointer
2765 // where the next byte is going to be allocated from. top and new_top may be
2766 // different when we cross a page boundary or reset the space.
2767 void InlineAllocationStep(Address top, Address new_top, Address soon_object,
2768 size_t size);
2769 intptr_t GetNextInlineAllocationStepSize();
2770 void StartNextInlineAllocationStep();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002771
Steve Blocka7e24c12009-10-30 11:49:00 +00002772 friend class SemiSpaceIterator;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002773};
Steve Blocka7e24c12009-10-30 11:49:00 +00002774
Ben Murdoch097c5b22016-05-18 11:27:45 +01002775class PauseAllocationObserversScope {
Steve Blocka7e24c12009-10-30 11:49:00 +00002776 public:
Ben Murdoch097c5b22016-05-18 11:27:45 +01002777 explicit PauseAllocationObserversScope(Heap* heap);
2778 ~PauseAllocationObserversScope();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002779
2780 private:
Ben Murdoch097c5b22016-05-18 11:27:45 +01002781 Heap* heap_;
2782 DISALLOW_COPY_AND_ASSIGN(PauseAllocationObserversScope);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002783};
2784
2785// -----------------------------------------------------------------------------
2786// Compaction space that is used temporarily during compaction.
2787
2788class CompactionSpace : public PagedSpace {
2789 public:
2790 CompactionSpace(Heap* heap, AllocationSpace id, Executability executable)
2791 : PagedSpace(heap, id, executable) {}
2792
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002793 bool is_local() override { return true; }
2794
2795 void RefillFreeList() override;
2796
2797 protected:
2798 // The space is temporary and not included in any snapshots.
2799 bool snapshotable() override { return false; }
2800
2801 MUST_USE_RESULT HeapObject* SweepAndRetryAllocation(
2802 int size_in_bytes) override;
2803};
2804
2805
2806// A collection of |CompactionSpace|s used by a single compaction task.
2807class CompactionSpaceCollection : public Malloced {
2808 public:
2809 explicit CompactionSpaceCollection(Heap* heap)
2810 : old_space_(heap, OLD_SPACE, Executability::NOT_EXECUTABLE),
Ben Murdoch097c5b22016-05-18 11:27:45 +01002811 code_space_(heap, CODE_SPACE, Executability::EXECUTABLE) {}
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002812
2813 CompactionSpace* Get(AllocationSpace space) {
2814 switch (space) {
2815 case OLD_SPACE:
2816 return &old_space_;
2817 case CODE_SPACE:
2818 return &code_space_;
2819 default:
2820 UNREACHABLE();
2821 }
2822 UNREACHABLE();
2823 return nullptr;
2824 }
2825
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002826 private:
2827 CompactionSpace old_space_;
2828 CompactionSpace code_space_;
Steve Blocka7e24c12009-10-30 11:49:00 +00002829};
2830
2831
2832// -----------------------------------------------------------------------------
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002833// Old object space (includes the old space of objects and code space)
Steve Blocka7e24c12009-10-30 11:49:00 +00002834
2835class OldSpace : public PagedSpace {
2836 public:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002837 // Creates an old space object. The constructor does not allocate pages
2838 // from OS.
2839 OldSpace(Heap* heap, AllocationSpace id, Executability executable)
2840 : PagedSpace(heap, id, executable) {}
Steve Blocka7e24c12009-10-30 11:49:00 +00002841};
2842
2843
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002844// For contiguous spaces, top should be in the space (or at the end) and limit
2845// should be the end of the space.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002846#define DCHECK_SEMISPACE_ALLOCATION_INFO(info, space) \
2847 SLOW_DCHECK((space).page_low() <= (info).top() && \
2848 (info).top() <= (space).page_high() && \
2849 (info).limit() <= (space).page_high())
Steve Blocka7e24c12009-10-30 11:49:00 +00002850
2851
2852// -----------------------------------------------------------------------------
2853// Old space for all map objects
2854
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002855class MapSpace : public PagedSpace {
Steve Blocka7e24c12009-10-30 11:49:00 +00002856 public:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002857 // Creates a map space object.
2858 MapSpace(Heap* heap, AllocationSpace id)
2859 : PagedSpace(heap, id, NOT_EXECUTABLE),
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002860 max_map_space_pages_(kMaxMapPageIndex - 1) {}
Steve Blocka7e24c12009-10-30 11:49:00 +00002861
Ben Murdoch85b71792012-04-11 18:30:58 +01002862 // Given an index, returns the page address.
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002863 // TODO(1600): this limit is artifical just to keep code compilable
2864 static const int kMaxMapPageIndex = 1 << 16;
Ben Murdoch85b71792012-04-11 18:30:58 +01002865
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002866 int RoundSizeDownToObjectAlignment(int size) override {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002867 if (base::bits::IsPowerOfTwo32(Map::kSize)) {
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002868 return RoundDown(size, Map::kSize);
2869 } else {
2870 return (size / Map::kSize) * Map::kSize;
Leon Clarkee46be812010-01-19 14:06:41 +00002871 }
Leon Clarkee46be812010-01-19 14:06:41 +00002872 }
2873
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002874#ifdef VERIFY_HEAP
2875 void VerifyObject(HeapObject* obj) override;
2876#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00002877
2878 private:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002879 static const int kMapsPerPage = Page::kAllocatableMemory / Map::kSize;
Leon Clarkee46be812010-01-19 14:06:41 +00002880
2881 // Do map space compaction if there is a page gap.
Leon Clarked91b9f72010-01-27 17:25:45 +00002882 int CompactionThreshold() {
2883 return kMapsPerPage * (max_map_space_pages_ - 1);
2884 }
2885
2886 const int max_map_space_pages_;
Steve Blocka7e24c12009-10-30 11:49:00 +00002887};
2888
2889
2890// -----------------------------------------------------------------------------
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002891// Large objects ( > Page::kMaxRegularHeapObjectSize ) are allocated and
2892// managed by the large object space. A large object is allocated from OS
2893// heap with extra padding bytes (Page::kPageSize + Page::kObjectStartOffset).
Steve Blocka7e24c12009-10-30 11:49:00 +00002894// A large object always starts at Page::kObjectStartOffset to a page.
2895// Large objects do not move during garbage collections.
2896
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002897class LargeObjectSpace : public Space {
Steve Blocka7e24c12009-10-30 11:49:00 +00002898 public:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002899 LargeObjectSpace(Heap* heap, AllocationSpace id);
2900 virtual ~LargeObjectSpace();
Steve Blocka7e24c12009-10-30 11:49:00 +00002901
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002902 // Initializes internal data structures.
2903 bool SetUp();
Steve Blocka7e24c12009-10-30 11:49:00 +00002904
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002905 // Releases internal resources, frees objects in this space.
2906 void TearDown();
Steve Blocka7e24c12009-10-30 11:49:00 +00002907
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002908 static intptr_t ObjectSizeFor(intptr_t chunk_size) {
2909 if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return 0;
2910 return chunk_size - Page::kPageSize - Page::kObjectStartOffset;
2911 }
2912
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002913 // Shared implementation of AllocateRaw, AllocateRawCode and
2914 // AllocateRawFixedArray.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002915 MUST_USE_RESULT AllocationResult
2916 AllocateRaw(int object_size, Executability executable);
Steve Blocka7e24c12009-10-30 11:49:00 +00002917
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002918 // Available bytes for objects in this space.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002919 inline intptr_t Available() override;
Steve Blocka7e24c12009-10-30 11:49:00 +00002920
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002921 intptr_t Size() override { return size_; }
Steve Blocka7e24c12009-10-30 11:49:00 +00002922
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002923 intptr_t SizeOfObjects() override { return objects_size_; }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002924
2925 // Approximate amount of physical memory committed for this space.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002926 size_t CommittedPhysicalMemory() override;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002927
2928 int PageCount() { return page_count_; }
2929
2930 // Finds an object for a given address, returns a Smi if it is not found.
2931 // The function iterates through all objects in this space, may be slow.
2932 Object* FindObject(Address a);
Steve Blocka7e24c12009-10-30 11:49:00 +00002933
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002934 // Finds a large object page containing the given address, returns NULL
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002935 // if such a page doesn't exist.
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002936 LargePage* FindPage(Address a);
Steve Blocka7e24c12009-10-30 11:49:00 +00002937
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002938 // Clears the marking state of live objects.
2939 void ClearMarkingStateOfLiveObjects();
2940
Steve Blocka7e24c12009-10-30 11:49:00 +00002941 // Frees unmarked objects.
2942 void FreeUnmarkedObjects();
2943
2944 // Checks whether a heap object is in this space; O(1).
2945 bool Contains(HeapObject* obj);
Ben Murdoch097c5b22016-05-18 11:27:45 +01002946 // Checks whether an address is in the object area in this space. Iterates
2947 // all objects in the space. May be slow.
2948 bool ContainsSlow(Address addr) { return FindObject(addr)->IsHeapObject(); }
Steve Blocka7e24c12009-10-30 11:49:00 +00002949
2950 // Checks whether the space is empty.
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002951 bool IsEmpty() { return first_page_ == NULL; }
Steve Blocka7e24c12009-10-30 11:49:00 +00002952
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002953 LargePage* first_page() { return first_page_; }
2954
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002955#ifdef VERIFY_HEAP
Steve Blocka7e24c12009-10-30 11:49:00 +00002956 virtual void Verify();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002957#endif
2958
2959#ifdef DEBUG
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002960 void Print() override;
Steve Blocka7e24c12009-10-30 11:49:00 +00002961 void ReportStatistics();
2962 void CollectCodeStatistics();
Steve Blocka7e24c12009-10-30 11:49:00 +00002963#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00002964
2965 private:
2966 // The head of the linked list of large object chunks.
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002967 LargePage* first_page_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002968 intptr_t size_; // allocated bytes
2969 int page_count_; // number of chunks
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08002970 intptr_t objects_size_; // size of objects
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002971 // Map MemoryChunk::kAlignment-aligned chunks to large pages covering them
2972 HashMap chunk_map_;
Steve Blocka7e24c12009-10-30 11:49:00 +00002973
Steve Blocka7e24c12009-10-30 11:49:00 +00002974 friend class LargeObjectIterator;
Steve Blocka7e24c12009-10-30 11:49:00 +00002975};
2976
2977
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002978class LargeObjectIterator : public ObjectIterator {
Steve Blocka7e24c12009-10-30 11:49:00 +00002979 public:
2980 explicit LargeObjectIterator(LargeObjectSpace* space);
Steve Blocka7e24c12009-10-30 11:49:00 +00002981
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002982 HeapObject* Next();
Steve Blocka7e24c12009-10-30 11:49:00 +00002983
2984 // implementation of ObjectIterator.
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002985 virtual HeapObject* next_object() { return Next(); }
Steve Blocka7e24c12009-10-30 11:49:00 +00002986
2987 private:
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002988 LargePage* current_;
Steve Blocka7e24c12009-10-30 11:49:00 +00002989};
2990
2991
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002992// Iterates over the chunks (pages and large object pages) that can contain
2993// pointers to new space.
2994class PointerChunkIterator BASE_EMBEDDED {
2995 public:
2996 inline explicit PointerChunkIterator(Heap* heap);
2997
2998 // Return NULL when the iterator is done.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002999 inline MemoryChunk* next();
Ben Murdoch3ef787d2012-04-12 10:51:47 +01003000
3001 private:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003002 enum State { kOldSpaceState, kMapState, kLargeObjectState, kFinishedState };
Ben Murdoch3ef787d2012-04-12 10:51:47 +01003003 State state_;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003004 PageIterator old_iterator_;
Ben Murdoch3ef787d2012-04-12 10:51:47 +01003005 PageIterator map_iterator_;
3006 LargeObjectIterator lo_iterator_;
3007};
3008
3009
Steve Block44f0eee2011-05-26 01:26:41 +01003010#ifdef DEBUG
3011struct CommentStatistic {
3012 const char* comment;
3013 int size;
3014 int count;
3015 void Clear() {
3016 comment = NULL;
3017 size = 0;
3018 count = 0;
3019 }
3020 // Must be small, since an iteration is used for lookup.
3021 static const int kMaxComments = 64;
3022};
3023#endif
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003024} // namespace internal
3025} // namespace v8
Steve Block44f0eee2011-05-26 01:26:41 +01003026
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003027#endif // V8_HEAP_SPACES_H_