blob: 04c89a8fab94598cd6ee849937e047d42978d58a [file] [log] [blame]
Ben Murdoch257744e2011-11-30 15:57:28 +00001// Copyright 2011 the V8 project authors. All rights reserved.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
Steve Blocka7e24c12009-10-30 11:49:00 +00004
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005#ifndef V8_HEAP_SPACES_H_
6#define V8_HEAP_SPACES_H_
Steve Blocka7e24c12009-10-30 11:49:00 +00007
Ben Murdochc5610432016-08-08 18:44:38 +01008#include <list>
9
Ben Murdochb8a8cc12014-11-26 15:28:44 +000010#include "src/allocation.h"
Ben Murdochc5610432016-08-08 18:44:38 +010011#include "src/base/atomic-utils.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000012#include "src/base/atomicops.h"
13#include "src/base/bits.h"
Ben Murdoch61f157c2016-09-16 13:49:30 +010014#include "src/base/hashmap.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000015#include "src/base/platform/mutex.h"
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000016#include "src/flags.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000017#include "src/list.h"
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000018#include "src/objects.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000019#include "src/utils.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000020
21namespace v8 {
22namespace internal {
23
Ben Murdoch097c5b22016-05-18 11:27:45 +010024class AllocationInfo;
25class AllocationObserver;
26class CompactionSpace;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000027class CompactionSpaceCollection;
Ben Murdoch097c5b22016-05-18 11:27:45 +010028class FreeList;
Steve Block44f0eee2011-05-26 01:26:41 +010029class Isolate;
Ben Murdoch61f157c2016-09-16 13:49:30 +010030class LocalArrayBufferTracker;
Ben Murdoch097c5b22016-05-18 11:27:45 +010031class MemoryAllocator;
32class MemoryChunk;
Ben Murdochda12d292016-06-02 14:46:10 +010033class Page;
Ben Murdoch097c5b22016-05-18 11:27:45 +010034class PagedSpace;
35class SemiSpace;
36class SkipList;
37class SlotsBuffer;
38class SlotSet;
Ben Murdochda12d292016-06-02 14:46:10 +010039class TypedSlotSet;
Ben Murdoch097c5b22016-05-18 11:27:45 +010040class Space;
Steve Block44f0eee2011-05-26 01:26:41 +010041
Steve Blocka7e24c12009-10-30 11:49:00 +000042// -----------------------------------------------------------------------------
43// Heap structures:
44//
45// A JS heap consists of a young generation, an old generation, and a large
46// object space. The young generation is divided into two semispaces. A
47// scavenger implements Cheney's copying algorithm. The old generation is
48// separated into a map space and an old object space. The map space contains
49// all (and only) map objects, the rest of old objects go into the old space.
50// The old generation is collected by a mark-sweep-compact collector.
51//
52// The semispaces of the young generation are contiguous. The old and map
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +010053// spaces consists of a list of pages. A page has a page header and an object
Ben Murdoch3ef787d2012-04-12 10:51:47 +010054// area.
Steve Blocka7e24c12009-10-30 11:49:00 +000055//
56// There is a separate large object space for objects larger than
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000057// Page::kMaxRegularHeapObjectSize, so that they do not have to move during
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +010058// collection. The large object space is paged. Pages in large object space
Ben Murdoch3ef787d2012-04-12 10:51:47 +010059// may be larger than the page size.
Steve Blocka7e24c12009-10-30 11:49:00 +000060//
Ben Murdoch3ef787d2012-04-12 10:51:47 +010061// A store-buffer based write barrier is used to keep track of intergenerational
Ben Murdochb8a8cc12014-11-26 15:28:44 +000062// references. See heap/store-buffer.h.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +010063//
Ben Murdoch3ef787d2012-04-12 10:51:47 +010064// During scavenges and mark-sweep collections we sometimes (after a store
65// buffer overflow) iterate intergenerational pointers without decoding heap
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000066// object maps so if the page belongs to old space or large object space
67// it is essential to guarantee that the page does not contain any
Ben Murdoch3ef787d2012-04-12 10:51:47 +010068// garbage pointers to new space: every pointer aligned word which satisfies
69// the Heap::InNewSpace() predicate must be a pointer to a live heap object in
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000070// new space. Thus objects in old space and large object spaces should have a
Ben Murdoch3ef787d2012-04-12 10:51:47 +010071// special layout (e.g. no bare integer fields). This requirement does not
72// apply to map space which is iterated in a special fashion. However we still
73// require pointer fields of dead maps to be cleaned.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +010074//
Ben Murdoch3ef787d2012-04-12 10:51:47 +010075// To enable lazy cleaning of old space pages we can mark chunks of the page
76// as being garbage. Garbage sections are marked with a special map. These
77// sections are skipped when scanning the page, even if we are otherwise
78// scanning without regard for object boundaries. Garbage sections are chained
79// together to form a free list after a GC. Garbage sections created outside
80// of GCs by object trunctation etc. may not be in the free list chain. Very
81// small free spaces are ignored, they need only be cleaned of bogus pointers
82// into new space.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +010083//
Ben Murdoch3ef787d2012-04-12 10:51:47 +010084// Each page may have up to one special garbage section. The start of this
85// section is denoted by the top field in the space. The end of the section
86// is denoted by the limit field in the space. This special garbage section
87// is not marked with a free space map in the data. The point of this section
88// is to enable linear allocation without having to constantly update the byte
89// array every time the top field is updated and a new object is created. The
90// special garbage section is not in the chain of garbage sections.
91//
92// Since the top and limit fields are in the space, not the page, only one page
93// has a special garbage section, and if the top and limit are equal then there
94// is no special garbage section.
Steve Blocka7e24c12009-10-30 11:49:00 +000095
96// Some assertion macros used in the debugging mode.
97
Ben Murdochb8a8cc12014-11-26 15:28:44 +000098#define DCHECK_PAGE_ALIGNED(address) \
99 DCHECK((OffsetFrom(address) & Page::kPageAlignmentMask) == 0)
Steve Blocka7e24c12009-10-30 11:49:00 +0000100
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000101#define DCHECK_OBJECT_ALIGNED(address) \
102 DCHECK((OffsetFrom(address) & kObjectAlignmentMask) == 0)
Steve Blocka7e24c12009-10-30 11:49:00 +0000103
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000104#define DCHECK_OBJECT_SIZE(size) \
105 DCHECK((0 < size) && (size <= Page::kMaxRegularHeapObjectSize))
Leon Clarkee46be812010-01-19 14:06:41 +0000106
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000107#define DCHECK_CODEOBJECT_SIZE(size, code_space) \
108 DCHECK((0 < size) && (size <= code_space->AreaSize()))
109
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000110#define DCHECK_PAGE_OFFSET(offset) \
111 DCHECK((Page::kObjectStartOffset <= offset) && (offset <= Page::kPageSize))
Steve Blocka7e24c12009-10-30 11:49:00 +0000112
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100113class MarkBit {
114 public:
115 typedef uint32_t CellType;
116
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000117 inline MarkBit(CellType* cell, CellType mask) : cell_(cell), mask_(mask) {}
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100118
119#ifdef DEBUG
120 bool operator==(const MarkBit& other) {
121 return cell_ == other.cell_ && mask_ == other.mask_;
122 }
123#endif
124
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000125 private:
126 inline CellType* cell() { return cell_; }
127 inline CellType mask() { return mask_; }
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100128
129 inline MarkBit Next() {
130 CellType new_mask = mask_ << 1;
131 if (new_mask == 0) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000132 return MarkBit(cell_ + 1, 1);
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100133 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000134 return MarkBit(cell_, new_mask);
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100135 }
136 }
137
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000138 inline void Set() { *cell_ |= mask_; }
139 inline bool Get() { return (*cell_ & mask_) != 0; }
140 inline void Clear() { *cell_ &= ~mask_; }
141
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100142 CellType* cell_;
143 CellType mask_;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000144
145 friend class Marking;
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100146};
147
148
149// Bitmap is a sequence of cells each containing fixed number of bits.
150class Bitmap {
151 public:
152 static const uint32_t kBitsPerCell = 32;
153 static const uint32_t kBitsPerCellLog2 = 5;
154 static const uint32_t kBitIndexMask = kBitsPerCell - 1;
155 static const uint32_t kBytesPerCell = kBitsPerCell / kBitsPerByte;
156 static const uint32_t kBytesPerCellLog2 = kBitsPerCellLog2 - kBitsPerByteLog2;
157
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000158 static const size_t kLength = (1 << kPageSizeBits) >> (kPointerSizeLog2);
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100159
160 static const size_t kSize =
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000161 (1 << kPageSizeBits) >> (kPointerSizeLog2 + kBitsPerByteLog2);
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100162
163
164 static int CellsForLength(int length) {
165 return (length + kBitsPerCell - 1) >> kBitsPerCellLog2;
166 }
167
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000168 int CellsCount() { return CellsForLength(kLength); }
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100169
170 static int SizeFor(int cells_count) {
171 return sizeof(MarkBit::CellType) * cells_count;
172 }
173
174 INLINE(static uint32_t IndexToCell(uint32_t index)) {
175 return index >> kBitsPerCellLog2;
176 }
177
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000178 V8_INLINE static uint32_t IndexInCell(uint32_t index) {
179 return index & kBitIndexMask;
180 }
181
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100182 INLINE(static uint32_t CellToIndex(uint32_t index)) {
183 return index << kBitsPerCellLog2;
184 }
185
186 INLINE(static uint32_t CellAlignIndex(uint32_t index)) {
187 return (index + kBitIndexMask) & ~kBitIndexMask;
188 }
189
190 INLINE(MarkBit::CellType* cells()) {
191 return reinterpret_cast<MarkBit::CellType*>(this);
192 }
193
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000194 INLINE(Address address()) { return reinterpret_cast<Address>(this); }
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100195
196 INLINE(static Bitmap* FromAddress(Address addr)) {
197 return reinterpret_cast<Bitmap*>(addr);
198 }
199
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000200 inline MarkBit MarkBitFromIndex(uint32_t index) {
201 MarkBit::CellType mask = 1u << IndexInCell(index);
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100202 MarkBit::CellType* cell = this->cells() + (index >> kBitsPerCellLog2);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000203 return MarkBit(cell, mask);
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100204 }
205
206 static inline void Clear(MemoryChunk* chunk);
207
Ben Murdochda12d292016-06-02 14:46:10 +0100208 static inline void SetAllBits(MemoryChunk* chunk);
209
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100210 static void PrintWord(uint32_t word, uint32_t himask = 0) {
211 for (uint32_t mask = 1; mask != 0; mask <<= 1) {
212 if ((mask & himask) != 0) PrintF("[");
213 PrintF((mask & word) ? "1" : "0");
214 if ((mask & himask) != 0) PrintF("]");
215 }
216 }
217
218 class CellPrinter {
219 public:
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000220 CellPrinter() : seq_start(0), seq_type(0), seq_length(0) {}
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100221
222 void Print(uint32_t pos, uint32_t cell) {
223 if (cell == seq_type) {
224 seq_length++;
225 return;
226 }
227
228 Flush();
229
230 if (IsSeq(cell)) {
231 seq_start = pos;
232 seq_length = 0;
233 seq_type = cell;
234 return;
235 }
236
237 PrintF("%d: ", pos);
238 PrintWord(cell);
239 PrintF("\n");
240 }
241
242 void Flush() {
243 if (seq_length > 0) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000244 PrintF("%d: %dx%d\n", seq_start, seq_type == 0 ? 0 : 1,
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100245 seq_length * kBitsPerCell);
246 seq_length = 0;
247 }
248 }
249
250 static bool IsSeq(uint32_t cell) { return cell == 0 || cell == 0xFFFFFFFF; }
251
252 private:
253 uint32_t seq_start;
254 uint32_t seq_type;
255 uint32_t seq_length;
256 };
257
258 void Print() {
259 CellPrinter printer;
260 for (int i = 0; i < CellsCount(); i++) {
261 printer.Print(i, cells()[i]);
262 }
263 printer.Flush();
264 PrintF("\n");
265 }
266
267 bool IsClean() {
268 for (int i = 0; i < CellsCount(); i++) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000269 if (cells()[i] != 0) {
270 return false;
271 }
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100272 }
273 return true;
274 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000275
276 // Clears all bits starting from {cell_base_index} up to and excluding
277 // {index}. Note that {cell_base_index} is required to be cell aligned.
278 void ClearRange(uint32_t cell_base_index, uint32_t index) {
279 DCHECK_EQ(IndexInCell(cell_base_index), 0u);
280 DCHECK_GE(index, cell_base_index);
281 uint32_t start_cell_index = IndexToCell(cell_base_index);
282 uint32_t end_cell_index = IndexToCell(index);
283 DCHECK_GE(end_cell_index, start_cell_index);
284 // Clear all cells till the cell containing the last index.
285 for (uint32_t i = start_cell_index; i < end_cell_index; i++) {
286 cells()[i] = 0;
287 }
288 // Clear all bits in the last cell till the last bit before index.
289 uint32_t clear_mask = ~((1u << IndexInCell(index)) - 1);
290 cells()[end_cell_index] &= clear_mask;
291 }
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100292};
293
Ben Murdochda12d292016-06-02 14:46:10 +0100294enum FreeListCategoryType {
295 kTiniest,
296 kTiny,
297 kSmall,
298 kMedium,
299 kLarge,
300 kHuge,
301
302 kFirstCategory = kTiniest,
303 kLastCategory = kHuge,
304 kNumberOfCategories = kLastCategory + 1,
305 kInvalidCategory
306};
307
308enum FreeMode { kLinkCategory, kDoNotLinkCategory };
309
310// A free list category maintains a linked list of free memory blocks.
311class FreeListCategory {
312 public:
313 static const int kSize = kIntSize + // FreeListCategoryType type_
314 kIntSize + // int available_
315 kPointerSize + // FreeSpace* top_
316 kPointerSize + // FreeListCategory* prev_
317 kPointerSize; // FreeListCategory* next_
318
319 FreeListCategory()
320 : type_(kInvalidCategory),
321 available_(0),
322 top_(nullptr),
323 prev_(nullptr),
324 next_(nullptr) {}
325
326 void Initialize(FreeListCategoryType type) {
327 type_ = type;
328 available_ = 0;
329 top_ = nullptr;
330 prev_ = nullptr;
331 next_ = nullptr;
332 }
333
334 void Invalidate();
335
336 void Reset();
337
338 void ResetStats() { Reset(); }
339
340 void RepairFreeList(Heap* heap);
341
342 // Relinks the category into the currently owning free list. Requires that the
343 // category is currently unlinked.
344 void Relink();
345
346 bool Free(FreeSpace* node, int size_in_bytes, FreeMode mode);
347
348 // Picks a node from the list and stores its size in |node_size|. Returns
349 // nullptr if the category is empty.
350 FreeSpace* PickNodeFromList(int* node_size);
351
352 // Performs a single try to pick a node of at least |minimum_size| from the
353 // category. Stores the actual size in |node_size|. Returns nullptr if no
354 // node is found.
355 FreeSpace* TryPickNodeFromList(int minimum_size, int* node_size);
356
357 // Picks a node of at least |minimum_size| from the category. Stores the
358 // actual size in |node_size|. Returns nullptr if no node is found.
359 FreeSpace* SearchForNodeInList(int minimum_size, int* node_size);
360
361 inline FreeList* owner();
362 inline bool is_linked();
363 bool is_empty() { return top() == nullptr; }
364 int available() const { return available_; }
365
366#ifdef DEBUG
367 intptr_t SumFreeList();
368 int FreeListLength();
369#endif
370
371 private:
372 // For debug builds we accurately compute free lists lengths up until
373 // {kVeryLongFreeList} by manually walking the list.
374 static const int kVeryLongFreeList = 500;
375
376 inline Page* page();
377
378 FreeSpace* top() { return top_; }
379 void set_top(FreeSpace* top) { top_ = top; }
380 FreeListCategory* prev() { return prev_; }
381 void set_prev(FreeListCategory* prev) { prev_ = prev; }
382 FreeListCategory* next() { return next_; }
383 void set_next(FreeListCategory* next) { next_ = next; }
384
385 // |type_|: The type of this free list category.
386 FreeListCategoryType type_;
387
388 // |available_|: Total available bytes in all blocks of this free list
389 // category.
390 int available_;
391
392 // |top_|: Points to the top FreeSpace* in the free list category.
393 FreeSpace* top_;
394
395 FreeListCategory* prev_;
396 FreeListCategory* next_;
397
398 friend class FreeList;
399 friend class PagedSpace;
400};
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100401
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100402// MemoryChunk represents a memory region owned by a specific space.
403// It is divided into the header and the body. Chunk start is always
404// 1MB aligned. Start of the body is aligned so it can accommodate
405// any heap object.
406class MemoryChunk {
407 public:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000408 enum MemoryChunkFlags {
409 IS_EXECUTABLE,
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000410 POINTERS_TO_HERE_ARE_INTERESTING,
411 POINTERS_FROM_HERE_ARE_INTERESTING,
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000412 IN_FROM_SPACE, // Mutually exclusive with IN_TO_SPACE.
413 IN_TO_SPACE, // All pages in new space has one of these two set.
414 NEW_SPACE_BELOW_AGE_MARK,
415 EVACUATION_CANDIDATE,
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000416 NEVER_EVACUATE, // May contain immortal immutables.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000417
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000418 // Large objects can have a progress bar in their page header. These object
419 // are scanned in increments and will be kept black while being scanned.
420 // Even if the mutator writes to them they will be kept black and a white
421 // to grey transition is performed in the value.
422 HAS_PROGRESS_BAR,
423
Ben Murdochc5610432016-08-08 18:44:38 +0100424 // |PAGE_NEW_OLD_PROMOTION|: A page tagged with this flag has been promoted
425 // from new to old space during evacuation.
426 PAGE_NEW_OLD_PROMOTION,
427
Ben Murdoch61f157c2016-09-16 13:49:30 +0100428 // |PAGE_NEW_NEW_PROMOTION|: A page tagged with this flag has been moved
429 // within the new space during evacuation.
430 PAGE_NEW_NEW_PROMOTION,
431
Ben Murdochda12d292016-06-02 14:46:10 +0100432 // A black page has all mark bits set to 1 (black). A black page currently
433 // cannot be iterated because it is not swept. Moreover live bytes are also
434 // not updated.
435 BLACK_PAGE,
436
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000437 // This flag is intended to be used for testing. Works only when both
438 // FLAG_stress_compaction and FLAG_manual_evacuation_candidates_selection
439 // are set. It forces the page to become an evacuation candidate at next
440 // candidates selection cycle.
441 FORCE_EVACUATION_CANDIDATE_FOR_TESTING,
442
Ben Murdoch097c5b22016-05-18 11:27:45 +0100443 // This flag is intended to be used for testing.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000444 NEVER_ALLOCATE_ON_PAGE,
445
446 // The memory chunk is already logically freed, however the actual freeing
447 // still has to be performed.
448 PRE_FREED,
449
Ben Murdochc5610432016-08-08 18:44:38 +0100450 // |POOLED|: When actually freeing this chunk, only uncommit and do not
451 // give up the reservation as we still reuse the chunk at some point.
452 POOLED,
453
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000454 // |COMPACTION_WAS_ABORTED|: Indicates that the compaction in this page
455 // has been aborted and needs special handling by the sweeper.
456 COMPACTION_WAS_ABORTED,
457
Ben Murdoch61f157c2016-09-16 13:49:30 +0100458 // |COMPACTION_WAS_ABORTED_FOR_TESTING|: During stress testing evacuation
459 // on pages is sometimes aborted. The flag is used to avoid repeatedly
460 // triggering on the same page.
461 COMPACTION_WAS_ABORTED_FOR_TESTING,
462
Ben Murdochc5610432016-08-08 18:44:38 +0100463 // |ANCHOR|: Flag is set if page is an anchor.
464 ANCHOR,
465
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000466 // Last flag, keep at bottom.
467 NUM_MEMORY_CHUNK_FLAGS
468 };
469
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000470 // |kSweepingDone|: The page state when sweeping is complete or sweeping must
Ben Murdoch097c5b22016-05-18 11:27:45 +0100471 // not be performed on that page. Sweeper threads that are done with their
472 // work will set this value and not touch the page anymore.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000473 // |kSweepingPending|: This page is ready for parallel sweeping.
Ben Murdoch097c5b22016-05-18 11:27:45 +0100474 // |kSweepingInProgress|: This page is currently swept by a sweeper thread.
475 enum ConcurrentSweepingState {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000476 kSweepingDone,
Ben Murdoch097c5b22016-05-18 11:27:45 +0100477 kSweepingPending,
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000478 kSweepingInProgress,
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000479 };
480
481 // Every n write barrier invocations we go to runtime even though
482 // we could have handled it in generated code. This lets us check
483 // whether we have hit the limit and should do some more marking.
484 static const int kWriteBarrierCounterGranularity = 500;
485
486 static const int kPointersToHereAreInterestingMask =
487 1 << POINTERS_TO_HERE_ARE_INTERESTING;
488
489 static const int kPointersFromHereAreInterestingMask =
490 1 << POINTERS_FROM_HERE_ARE_INTERESTING;
491
492 static const int kEvacuationCandidateMask = 1 << EVACUATION_CANDIDATE;
493
494 static const int kSkipEvacuationSlotsRecordingMask =
Ben Murdochda12d292016-06-02 14:46:10 +0100495 (1 << EVACUATION_CANDIDATE) | (1 << IN_FROM_SPACE) | (1 << IN_TO_SPACE);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000496
497 static const intptr_t kAlignment =
498 (static_cast<uintptr_t>(1) << kPageSizeBits);
499
500 static const intptr_t kAlignmentMask = kAlignment - 1;
501
502 static const intptr_t kSizeOffset = 0;
503
Ben Murdochda12d292016-06-02 14:46:10 +0100504 static const intptr_t kFlagsOffset = kSizeOffset + kPointerSize;
505
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000506 static const intptr_t kLiveBytesOffset =
507 kSizeOffset + kPointerSize // size_t size
508 + kIntptrSize // intptr_t flags_
509 + kPointerSize // Address area_start_
510 + kPointerSize // Address area_end_
511 + 2 * kPointerSize // base::VirtualMemory reservation_
512 + kPointerSize // Address owner_
513 + kPointerSize // Heap* heap_
Ben Murdoch097c5b22016-05-18 11:27:45 +0100514 + kIntSize; // int progress_bar_
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000515
Ben Murdochda12d292016-06-02 14:46:10 +0100516 static const size_t kOldToNewSlotsOffset =
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000517 kLiveBytesOffset + kIntSize; // int live_byte_count_
518
519 static const size_t kWriteBarrierCounterOffset =
Ben Murdochda12d292016-06-02 14:46:10 +0100520 kOldToNewSlotsOffset + kPointerSize // SlotSet* old_to_new_slots_;
521 + kPointerSize // SlotSet* old_to_old_slots_;
Ben Murdoch61f157c2016-09-16 13:49:30 +0100522 + kPointerSize // TypedSlotSet* typed_old_to_new_slots_;
Ben Murdochda12d292016-06-02 14:46:10 +0100523 + kPointerSize // TypedSlotSet* typed_old_to_old_slots_;
524 + kPointerSize; // SkipList* skip_list_;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000525
526 static const size_t kMinHeaderSize =
527 kWriteBarrierCounterOffset +
528 kIntptrSize // intptr_t write_barrier_counter_
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000529 + kPointerSize // AtomicValue high_water_mark_
530 + kPointerSize // base::Mutex* mutex_
Ben Murdochda12d292016-06-02 14:46:10 +0100531 + kPointerSize // base::AtomicWord concurrent_sweeping_
Ben Murdoch097c5b22016-05-18 11:27:45 +0100532 + 2 * kPointerSize // AtomicNumber free-list statistics
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000533 + kPointerSize // AtomicValue next_chunk_
Ben Murdochda12d292016-06-02 14:46:10 +0100534 + kPointerSize // AtomicValue prev_chunk_
535 // FreeListCategory categories_[kNumberOfCategories]
Ben Murdoch61f157c2016-09-16 13:49:30 +0100536 + FreeListCategory::kSize * kNumberOfCategories +
537 kPointerSize; // LocalArrayBufferTracker* local_tracker_;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000538
539 // We add some more space to the computed header size to amount for missing
540 // alignment requirements in our computation.
541 // Try to get kHeaderSize properly aligned on 32-bit and 64-bit machines.
Ben Murdoch097c5b22016-05-18 11:27:45 +0100542 static const size_t kHeaderSize = kMinHeaderSize;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000543
544 static const int kBodyOffset =
545 CODE_POINTER_ALIGN(kHeaderSize + Bitmap::kSize);
546
547 // The start offset of the object area in a page. Aligned to both maps and
548 // code alignment to be suitable for both. Also aligned to 32 words because
549 // the marking bitmap is arranged in 32 bit chunks.
550 static const int kObjectStartAlignment = 32 * kPointerSize;
551 static const int kObjectStartOffset =
552 kBodyOffset - 1 +
553 (kObjectStartAlignment - (kBodyOffset - 1) % kObjectStartAlignment);
554
Ben Murdochda12d292016-06-02 14:46:10 +0100555 // Page size in bytes. This must be a multiple of the OS page size.
556 static const int kPageSize = 1 << kPageSizeBits;
557 static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;
558
559 static const int kAllocatableMemory = kPageSize - kObjectStartOffset;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000560
Ben Murdoch097c5b22016-05-18 11:27:45 +0100561 static inline void IncrementLiveBytesFromMutator(HeapObject* object, int by);
562 static inline void IncrementLiveBytesFromGC(HeapObject* object, int by);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000563
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100564 // Only works if the pointer is in the first kPageSize of the MemoryChunk.
565 static MemoryChunk* FromAddress(Address a) {
566 return reinterpret_cast<MemoryChunk*>(OffsetFrom(a) & ~kAlignmentMask);
567 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000568
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000569 static inline MemoryChunk* FromAnyPointerAddress(Heap* heap, Address addr);
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100570
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000571 static inline void UpdateHighWaterMark(Address mark) {
572 if (mark == nullptr) return;
573 // Need to subtract one from the mark because when a chunk is full the
574 // top points to the next address after the chunk, which effectively belongs
Ben Murdochc5610432016-08-08 18:44:38 +0100575 // to another chunk. See the comment to Page::FromTopOrLimit.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000576 MemoryChunk* chunk = MemoryChunk::FromAddress(mark - 1);
577 intptr_t new_mark = static_cast<intptr_t>(mark - chunk->address());
578 intptr_t old_mark = 0;
579 do {
580 old_mark = chunk->high_water_mark_.Value();
581 } while ((new_mark > old_mark) &&
582 !chunk->high_water_mark_.TrySetValue(old_mark, new_mark));
583 }
584
Ben Murdochc5610432016-08-08 18:44:38 +0100585 static bool IsValid(MemoryChunk* chunk) { return chunk != nullptr; }
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100586
Ben Murdochc5610432016-08-08 18:44:38 +0100587 Address address() { return reinterpret_cast<Address>(this); }
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100588
Ben Murdoch097c5b22016-05-18 11:27:45 +0100589 base::Mutex* mutex() { return mutex_; }
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100590
591 bool Contains(Address addr) {
592 return addr >= area_start() && addr < area_end();
593 }
594
Ben Murdoch097c5b22016-05-18 11:27:45 +0100595 // Checks whether |addr| can be a limit of addresses in this page. It's a
596 // limit if it's in the page, or if it's just after the last byte of the page.
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100597 bool ContainsLimit(Address addr) {
598 return addr >= area_start() && addr <= area_end();
599 }
600
Ben Murdochc5610432016-08-08 18:44:38 +0100601 base::AtomicValue<ConcurrentSweepingState>& concurrent_sweeping_state() {
Ben Murdoch097c5b22016-05-18 11:27:45 +0100602 return concurrent_sweeping_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000603 }
604
Ben Murdoch097c5b22016-05-18 11:27:45 +0100605 // Manage live byte count, i.e., count of bytes in black objects.
606 inline void ResetLiveBytes();
607 inline void IncrementLiveBytes(int by);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000608
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100609 int LiveBytes() {
Ben Murdochda12d292016-06-02 14:46:10 +0100610 DCHECK_LE(static_cast<unsigned>(live_byte_count_), size_);
611 DCHECK(!IsFlagSet(BLACK_PAGE) || live_byte_count_ == 0);
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100612 return live_byte_count_;
613 }
614
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000615 void SetLiveBytes(int live_bytes) {
Ben Murdochda12d292016-06-02 14:46:10 +0100616 if (IsFlagSet(BLACK_PAGE)) return;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000617 DCHECK_GE(live_bytes, 0);
Ben Murdoch097c5b22016-05-18 11:27:45 +0100618 DCHECK_LE(static_cast<size_t>(live_bytes), size_);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000619 live_byte_count_ = live_bytes;
620 }
621
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000622 int write_barrier_counter() {
623 return static_cast<int>(write_barrier_counter_);
624 }
625
626 void set_write_barrier_counter(int counter) {
627 write_barrier_counter_ = counter;
628 }
629
Ben Murdoch097c5b22016-05-18 11:27:45 +0100630 size_t size() const { return size_; }
631
632 inline Heap* heap() const { return heap_; }
633
634 inline SkipList* skip_list() { return skip_list_; }
635
636 inline void set_skip_list(SkipList* skip_list) { skip_list_ = skip_list; }
637
Ben Murdoch097c5b22016-05-18 11:27:45 +0100638 inline SlotSet* old_to_new_slots() { return old_to_new_slots_; }
639 inline SlotSet* old_to_old_slots() { return old_to_old_slots_; }
Ben Murdoch61f157c2016-09-16 13:49:30 +0100640 inline TypedSlotSet* typed_old_to_new_slots() {
641 return typed_old_to_new_slots_;
642 }
Ben Murdochda12d292016-06-02 14:46:10 +0100643 inline TypedSlotSet* typed_old_to_old_slots() {
644 return typed_old_to_old_slots_;
645 }
Ben Murdoch61f157c2016-09-16 13:49:30 +0100646 inline LocalArrayBufferTracker* local_tracker() { return local_tracker_; }
Ben Murdoch097c5b22016-05-18 11:27:45 +0100647
648 void AllocateOldToNewSlots();
649 void ReleaseOldToNewSlots();
650 void AllocateOldToOldSlots();
651 void ReleaseOldToOldSlots();
Ben Murdoch61f157c2016-09-16 13:49:30 +0100652 void AllocateTypedOldToNewSlots();
653 void ReleaseTypedOldToNewSlots();
Ben Murdochda12d292016-06-02 14:46:10 +0100654 void AllocateTypedOldToOldSlots();
655 void ReleaseTypedOldToOldSlots();
Ben Murdoch61f157c2016-09-16 13:49:30 +0100656 void AllocateLocalTracker();
657 void ReleaseLocalTracker();
Ben Murdoch097c5b22016-05-18 11:27:45 +0100658
659 Address area_start() { return area_start_; }
660 Address area_end() { return area_end_; }
661 int area_size() { return static_cast<int>(area_end() - area_start()); }
662
663 bool CommitArea(size_t requested);
664
665 // Approximate amount of physical memory committed for this chunk.
666 size_t CommittedPhysicalMemory() { return high_water_mark_.Value(); }
667
Ben Murdoch61f157c2016-09-16 13:49:30 +0100668 Address HighWaterMark() { return address() + high_water_mark_.Value(); }
669
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000670 int progress_bar() {
671 DCHECK(IsFlagSet(HAS_PROGRESS_BAR));
672 return progress_bar_;
673 }
674
675 void set_progress_bar(int progress_bar) {
676 DCHECK(IsFlagSet(HAS_PROGRESS_BAR));
677 progress_bar_ = progress_bar;
678 }
679
680 void ResetProgressBar() {
681 if (IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) {
682 set_progress_bar(0);
683 ClearFlag(MemoryChunk::HAS_PROGRESS_BAR);
684 }
685 }
686
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100687 inline Bitmap* markbits() {
688 return Bitmap::FromAddress(address() + kHeaderSize);
689 }
690
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100691 inline uint32_t AddressToMarkbitIndex(Address addr) {
692 return static_cast<uint32_t>(addr - this->address()) >> kPointerSizeLog2;
693 }
694
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100695 inline Address MarkbitIndexToAddress(uint32_t index) {
696 return this->address() + (index << kPointerSizeLog2);
697 }
698
Ben Murdoch097c5b22016-05-18 11:27:45 +0100699 void PrintMarkbits() { markbits()->Print(); }
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100700
Ben Murdoch097c5b22016-05-18 11:27:45 +0100701 void SetFlag(int flag) { flags_ |= static_cast<uintptr_t>(1) << flag; }
702
703 void ClearFlag(int flag) { flags_ &= ~(static_cast<uintptr_t>(1) << flag); }
704
705 bool IsFlagSet(int flag) {
706 return (flags_ & (static_cast<uintptr_t>(1) << flag)) != 0;
707 }
708
709 // Set or clear multiple flags at a time. The flags in the mask are set to
710 // the value in "flags", the rest retain the current value in |flags_|.
711 void SetFlags(intptr_t flags, intptr_t mask) {
712 flags_ = (flags_ & ~mask) | (flags & mask);
713 }
714
715 // Return all current flags.
716 intptr_t GetFlags() { return flags_; }
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100717
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000718 bool NeverEvacuate() { return IsFlagSet(NEVER_EVACUATE); }
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100719
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000720 void MarkNeverEvacuate() { SetFlag(NEVER_EVACUATE); }
721
722 bool IsEvacuationCandidate() {
723 DCHECK(!(IsFlagSet(NEVER_EVACUATE) && IsFlagSet(EVACUATION_CANDIDATE)));
724 return IsFlagSet(EVACUATION_CANDIDATE);
725 }
726
727 bool CanAllocate() {
728 return !IsEvacuationCandidate() && !IsFlagSet(NEVER_ALLOCATE_ON_PAGE);
729 }
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100730
Ben Murdoch097c5b22016-05-18 11:27:45 +0100731 bool ShouldSkipEvacuationSlotRecording() {
Ben Murdoch61f157c2016-09-16 13:49:30 +0100732 return ((flags_ & kSkipEvacuationSlotsRecordingMask) != 0) &&
733 !IsFlagSet(COMPACTION_WAS_ABORTED);
Ben Murdoch097c5b22016-05-18 11:27:45 +0100734 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000735
Ben Murdoch097c5b22016-05-18 11:27:45 +0100736 Executability executable() {
737 return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
738 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000739
Ben Murdoch097c5b22016-05-18 11:27:45 +0100740 bool InNewSpace() {
741 return (flags_ & ((1 << IN_FROM_SPACE) | (1 << IN_TO_SPACE))) != 0;
742 }
743
744 bool InToSpace() { return IsFlagSet(IN_TO_SPACE); }
745
746 bool InFromSpace() { return IsFlagSet(IN_FROM_SPACE); }
747
748 MemoryChunk* next_chunk() { return next_chunk_.Value(); }
749
750 MemoryChunk* prev_chunk() { return prev_chunk_.Value(); }
751
752 void set_next_chunk(MemoryChunk* next) { next_chunk_.SetValue(next); }
753
754 void set_prev_chunk(MemoryChunk* prev) { prev_chunk_.SetValue(prev); }
755
756 Space* owner() const {
757 if ((reinterpret_cast<intptr_t>(owner_) & kPageHeaderTagMask) ==
758 kPageHeaderTag) {
759 return reinterpret_cast<Space*>(reinterpret_cast<intptr_t>(owner_) -
760 kPageHeaderTag);
761 } else {
762 return nullptr;
763 }
764 }
765
766 void set_owner(Space* space) {
767 DCHECK((reinterpret_cast<intptr_t>(space) & kPageHeaderTagMask) == 0);
768 owner_ = reinterpret_cast<Address>(space) + kPageHeaderTag;
769 DCHECK((reinterpret_cast<intptr_t>(owner_) & kPageHeaderTagMask) ==
770 kPageHeaderTag);
771 }
772
773 bool HasPageHeader() { return owner() != nullptr; }
774
775 void InsertAfter(MemoryChunk* other);
776 void Unlink();
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100777
778 protected:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000779 static MemoryChunk* Initialize(Heap* heap, Address base, size_t size,
780 Address area_start, Address area_end,
Ben Murdoch097c5b22016-05-18 11:27:45 +0100781 Executability executable, Space* owner,
782 base::VirtualMemory* reservation);
783
784 // Should be called when memory chunk is about to be freed.
785 void ReleaseAllocatedMemory();
786
787 base::VirtualMemory* reserved_memory() { return &reservation_; }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000788
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100789 size_t size_;
790 intptr_t flags_;
791
792 // Start and end of allocatable memory on this chunk.
793 Address area_start_;
794 Address area_end_;
795
796 // If the chunk needs to remember its memory reservation, it is stored here.
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000797 base::VirtualMemory reservation_;
Ben Murdoch097c5b22016-05-18 11:27:45 +0100798
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100799 // The identity of the owning space. This is tagged as a failure pointer, but
800 // no failure can be in an object, so this can be distinguished from any entry
801 // in a fixed array.
802 Address owner_;
Ben Murdoch097c5b22016-05-18 11:27:45 +0100803
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100804 Heap* heap_;
Ben Murdoch097c5b22016-05-18 11:27:45 +0100805
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000806 // Used by the incremental marker to keep track of the scanning progress in
807 // large objects that have a progress bar and are scanned in increments.
808 int progress_bar_;
Ben Murdoch097c5b22016-05-18 11:27:45 +0100809
810 // Count of bytes marked black on page.
811 int live_byte_count_;
812
Ben Murdoch097c5b22016-05-18 11:27:45 +0100813 // A single slot set for small pages (of size kPageSize) or an array of slot
814 // set for large pages. In the latter case the number of entries in the array
815 // is ceil(size() / kPageSize).
816 SlotSet* old_to_new_slots_;
817 SlotSet* old_to_old_slots_;
Ben Murdoch61f157c2016-09-16 13:49:30 +0100818 TypedSlotSet* typed_old_to_new_slots_;
Ben Murdochda12d292016-06-02 14:46:10 +0100819 TypedSlotSet* typed_old_to_old_slots_;
Ben Murdoch097c5b22016-05-18 11:27:45 +0100820
821 SkipList* skip_list_;
822
823 intptr_t write_barrier_counter_;
824
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000825 // Assuming the initial allocation on a page is sequential,
826 // count highest number of bytes ever allocated on the page.
Ben Murdochc5610432016-08-08 18:44:38 +0100827 base::AtomicValue<intptr_t> high_water_mark_;
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100828
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000829 base::Mutex* mutex_;
Ben Murdoch097c5b22016-05-18 11:27:45 +0100830
Ben Murdochc5610432016-08-08 18:44:38 +0100831 base::AtomicValue<ConcurrentSweepingState> concurrent_sweeping_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000832
833 // PagedSpace free-list statistics.
Ben Murdochc5610432016-08-08 18:44:38 +0100834 base::AtomicNumber<intptr_t> available_in_free_list_;
835 base::AtomicNumber<intptr_t> wasted_memory_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000836
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000837 // next_chunk_ holds a pointer of type MemoryChunk
Ben Murdochc5610432016-08-08 18:44:38 +0100838 base::AtomicValue<MemoryChunk*> next_chunk_;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000839 // prev_chunk_ holds a pointer of type MemoryChunk
Ben Murdochc5610432016-08-08 18:44:38 +0100840 base::AtomicValue<MemoryChunk*> prev_chunk_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000841
Ben Murdochda12d292016-06-02 14:46:10 +0100842 FreeListCategory categories_[kNumberOfCategories];
843
Ben Murdoch61f157c2016-09-16 13:49:30 +0100844 LocalArrayBufferTracker* local_tracker_;
845
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000846 private:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000847 void InitializeReservedMemory() { reservation_.Reset(); }
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100848
849 friend class MemoryAllocator;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000850 friend class MemoryChunkValidator;
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100851};
852
Steve Blocka7e24c12009-10-30 11:49:00 +0000853// -----------------------------------------------------------------------------
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100854// A page is a memory chunk of a size 1MB. Large object pages may be larger.
Steve Blocka7e24c12009-10-30 11:49:00 +0000855//
856// The only way to get a page pointer is by calling factory methods:
857// Page* p = Page::FromAddress(addr); or
Ben Murdochc5610432016-08-08 18:44:38 +0100858// Page* p = Page::FromTopOrLimit(top);
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100859class Page : public MemoryChunk {
Steve Blocka7e24c12009-10-30 11:49:00 +0000860 public:
Ben Murdochc5610432016-08-08 18:44:38 +0100861 static const intptr_t kCopyAllFlags = ~0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000862
Ben Murdochc5610432016-08-08 18:44:38 +0100863 // Page flags copied from from-space to to-space when flipping semispaces.
864 static const intptr_t kCopyOnFlipFlagsMask =
865 (1 << MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) |
866 (1 << MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
Steve Blocka7e24c12009-10-30 11:49:00 +0000867
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000868 // Maximum object size that gets allocated into regular pages. Objects larger
869 // than that size are allocated in large object space and are never moved in
870 // memory. This also applies to new space allocation, since objects are never
871 // migrated from new space to large object space. Takes double alignment into
872 // account.
873 // TODO(hpayer): This limit should be way smaller but we currently have
874 // short living objects >256K.
875 static const int kMaxRegularHeapObjectSize = 600 * KB;
876
Ben Murdochc5610432016-08-08 18:44:38 +0100877 static inline Page* ConvertNewToOld(Page* old_page, PagedSpace* new_owner);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100878
Ben Murdochc5610432016-08-08 18:44:38 +0100879 // Returns the page containing a given address. The address ranges
880 // from [page_addr .. page_addr + kPageSize[. This only works if the object
881 // is in fact in a page.
882 static Page* FromAddress(Address addr) {
883 return reinterpret_cast<Page*>(OffsetFrom(addr) & ~kPageAlignmentMask);
884 }
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100885
Ben Murdochc5610432016-08-08 18:44:38 +0100886 // Returns the page containing the address provided. The address can
887 // potentially point righter after the page. To be also safe for tagged values
888 // we subtract a hole word. The valid address ranges from
889 // [page_addr + kObjectStartOffset .. page_addr + kPageSize + kPointerSize].
890 static Page* FromAllocationAreaAddress(Address address) {
891 return Page::FromAddress(address - kPointerSize);
892 }
893
894 // Checks if address1 and address2 are on the same new space page.
895 static bool OnSamePage(Address address1, Address address2) {
896 return Page::FromAddress(address1) == Page::FromAddress(address2);
897 }
898
899 // Checks whether an address is page aligned.
900 static bool IsAlignedToPageSize(Address addr) {
901 return (OffsetFrom(addr) & kPageAlignmentMask) == 0;
902 }
903
904 static bool IsAtObjectStart(Address addr) {
905 return (reinterpret_cast<intptr_t>(addr) & kPageAlignmentMask) ==
906 kObjectStartOffset;
907 }
908
909 inline static Page* FromAnyPointerAddress(Heap* heap, Address addr);
910
911 // Create a Page object that is only used as anchor for the doubly-linked
912 // list of real pages.
913 explicit Page(Space* owner) { InitializeAsAnchor(owner); }
914
915 inline void MarkNeverAllocateForTesting();
916 inline void MarkEvacuationCandidate();
917 inline void ClearEvacuationCandidate();
918
919 Page* next_page() { return static_cast<Page*>(next_chunk()); }
920 Page* prev_page() { return static_cast<Page*>(prev_chunk()); }
921 void set_next_page(Page* page) { set_next_chunk(page); }
922 void set_prev_page(Page* page) { set_prev_chunk(page); }
923
924 template <typename Callback>
925 inline void ForAllFreeListCategories(Callback callback) {
926 for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
927 callback(&categories_[i]);
928 }
929 }
930
931 // Returns the offset of a given address to this page.
932 inline int Offset(Address a) {
933 int offset = static_cast<int>(a - address());
934 return offset;
935 }
936
937 // Returns the address for a given offset to the this page.
938 Address OffsetToAddress(int offset) {
939 DCHECK_PAGE_OFFSET(offset);
940 return address() + offset;
941 }
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100942
Ben Murdoch097c5b22016-05-18 11:27:45 +0100943 // WaitUntilSweepingCompleted only works when concurrent sweeping is in
944 // progress. In particular, when we know that right before this call a
945 // sweeper thread was sweeping this page.
946 void WaitUntilSweepingCompleted() {
947 mutex_->Lock();
948 mutex_->Unlock();
949 DCHECK(SweepingDone());
950 }
951
952 bool SweepingDone() {
953 return concurrent_sweeping_state().Value() == kSweepingDone;
954 }
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100955
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000956 void ResetFreeListStatistics();
Steve Blocka7e24c12009-10-30 11:49:00 +0000957
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000958 int LiveBytesFromFreeList() {
Ben Murdoch097c5b22016-05-18 11:27:45 +0100959 return static_cast<int>(area_size() - wasted_memory() -
960 available_in_free_list());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000961 }
962
Ben Murdochda12d292016-06-02 14:46:10 +0100963 FreeListCategory* free_list_category(FreeListCategoryType type) {
964 return &categories_[type];
965 }
966
Ben Murdochc5610432016-08-08 18:44:38 +0100967 bool is_anchor() { return IsFlagSet(Page::ANCHOR); }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000968
Ben Murdochc5610432016-08-08 18:44:38 +0100969 intptr_t wasted_memory() { return wasted_memory_.Value(); }
970 void add_wasted_memory(intptr_t waste) { wasted_memory_.Increment(waste); }
971 intptr_t available_in_free_list() { return available_in_free_list_.Value(); }
972 void add_available_in_free_list(intptr_t available) {
973 available_in_free_list_.Increment(available);
974 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000975
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100976#ifdef DEBUG
977 void Print();
978#endif // DEBUG
Steve Blocka7e24c12009-10-30 11:49:00 +0000979
Ben Murdochda12d292016-06-02 14:46:10 +0100980 private:
Ben Murdochc5610432016-08-08 18:44:38 +0100981 enum InitializationMode { kFreeMemory, kDoNotFreeMemory };
982
983 template <InitializationMode mode = kFreeMemory>
984 static inline Page* Initialize(Heap* heap, MemoryChunk* chunk,
985 Executability executable, PagedSpace* owner);
986 static inline Page* Initialize(Heap* heap, MemoryChunk* chunk,
987 Executability executable, SemiSpace* owner);
988
Ben Murdochda12d292016-06-02 14:46:10 +0100989 inline void InitializeFreeListCategories();
990
Ben Murdochc5610432016-08-08 18:44:38 +0100991 void InitializeAsAnchor(Space* owner);
992
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100993 friend class MemoryAllocator;
Steve Blocka7e24c12009-10-30 11:49:00 +0000994};
995
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100996class LargePage : public MemoryChunk {
997 public:
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000998 HeapObject* GetObject() { return HeapObject::FromAddress(area_start()); }
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100999
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001000 inline LargePage* next_page() {
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001001 return static_cast<LargePage*>(next_chunk());
1002 }
1003
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001004 inline void set_next_page(LargePage* page) { set_next_chunk(page); }
1005
Ben Murdochda12d292016-06-02 14:46:10 +01001006 // A limit to guarantee that we do not overflow typed slot offset in
1007 // the old to old remembered set.
1008 // Note that this limit is higher than what assembler already imposes on
1009 // x64 and ia32 architectures.
1010 static const int kMaxCodePageSize = 512 * MB;
1011
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001012 private:
Ben Murdochc5610432016-08-08 18:44:38 +01001013 static inline LargePage* Initialize(Heap* heap, MemoryChunk* chunk,
1014 Executability executable, Space* owner);
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001015
1016 friend class MemoryAllocator;
1017};
1018
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001019
Steve Blocka7e24c12009-10-30 11:49:00 +00001020// ----------------------------------------------------------------------------
1021// Space is the abstract superclass for all allocation spaces.
1022class Space : public Malloced {
1023 public:
Steve Block44f0eee2011-05-26 01:26:41 +01001024 Space(Heap* heap, AllocationSpace id, Executability executable)
Ben Murdoch097c5b22016-05-18 11:27:45 +01001025 : allocation_observers_(new List<AllocationObserver*>()),
1026 allocation_observers_paused_(false),
1027 heap_(heap),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001028 id_(id),
1029 executable_(executable),
1030 committed_(0),
1031 max_committed_(0) {}
Steve Blocka7e24c12009-10-30 11:49:00 +00001032
1033 virtual ~Space() {}
1034
Steve Block44f0eee2011-05-26 01:26:41 +01001035 Heap* heap() const { return heap_; }
1036
Steve Blocka7e24c12009-10-30 11:49:00 +00001037 // Does the space need executable memory?
1038 Executability executable() { return executable_; }
1039
1040 // Identity used in error reporting.
1041 AllocationSpace identity() { return id_; }
1042
Ben Murdoch097c5b22016-05-18 11:27:45 +01001043 virtual void AddAllocationObserver(AllocationObserver* observer) {
1044 allocation_observers_->Add(observer);
1045 }
1046
1047 virtual void RemoveAllocationObserver(AllocationObserver* observer) {
1048 bool removed = allocation_observers_->RemoveElement(observer);
1049 USE(removed);
1050 DCHECK(removed);
1051 }
1052
1053 virtual void PauseAllocationObservers() {
1054 allocation_observers_paused_ = true;
1055 }
1056
1057 virtual void ResumeAllocationObservers() {
1058 allocation_observers_paused_ = false;
1059 }
1060
1061 void AllocationStep(Address soon_object, int size);
1062
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001063 // Return the total amount committed memory for this space, i.e., allocatable
1064 // memory and page headers.
1065 virtual intptr_t CommittedMemory() { return committed_; }
1066
1067 virtual intptr_t MaximumCommittedMemory() { return max_committed_; }
1068
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08001069 // Returns allocated size.
Ben Murdochf87a2032010-10-22 12:50:53 +01001070 virtual intptr_t Size() = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +00001071
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08001072 // Returns size of objects. Can differ from the allocated size
1073 // (e.g. see LargeObjectSpace).
1074 virtual intptr_t SizeOfObjects() { return Size(); }
1075
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001076 // Approximate amount of physical memory committed for this space.
1077 virtual size_t CommittedPhysicalMemory() = 0;
1078
1079 // Return the available bytes without growing.
1080 virtual intptr_t Available() = 0;
1081
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001082 virtual int RoundSizeDownToObjectAlignment(int size) {
1083 if (id_ == CODE_SPACE) {
1084 return RoundDown(size, kCodeAlignment);
1085 } else {
1086 return RoundDown(size, kPointerSize);
1087 }
1088 }
1089
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001090 void AccountCommitted(intptr_t bytes) {
1091 DCHECK_GE(bytes, 0);
1092 committed_ += bytes;
1093 if (committed_ > max_committed_) {
1094 max_committed_ = committed_;
1095 }
1096 }
1097
1098 void AccountUncommitted(intptr_t bytes) {
1099 DCHECK_GE(bytes, 0);
1100 committed_ -= bytes;
1101 DCHECK_GE(committed_, 0);
1102 }
1103
Ben Murdochc5610432016-08-08 18:44:38 +01001104#ifdef DEBUG
1105 virtual void Print() = 0;
1106#endif
1107
1108 protected:
Ben Murdoch097c5b22016-05-18 11:27:45 +01001109 v8::base::SmartPointer<List<AllocationObserver*>> allocation_observers_;
1110 bool allocation_observers_paused_;
1111
Steve Blocka7e24c12009-10-30 11:49:00 +00001112 private:
Steve Block44f0eee2011-05-26 01:26:41 +01001113 Heap* heap_;
Steve Blocka7e24c12009-10-30 11:49:00 +00001114 AllocationSpace id_;
1115 Executability executable_;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001116
1117 // Keeps track of committed memory in a space.
1118 intptr_t committed_;
1119 intptr_t max_committed_;
1120};
1121
1122
1123class MemoryChunkValidator {
1124 // Computed offsets should match the compiler generated ones.
1125 STATIC_ASSERT(MemoryChunk::kSizeOffset == offsetof(MemoryChunk, size_));
1126 STATIC_ASSERT(MemoryChunk::kLiveBytesOffset ==
1127 offsetof(MemoryChunk, live_byte_count_));
Ben Murdochda12d292016-06-02 14:46:10 +01001128 STATIC_ASSERT(MemoryChunk::kOldToNewSlotsOffset ==
1129 offsetof(MemoryChunk, old_to_new_slots_));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001130 STATIC_ASSERT(MemoryChunk::kWriteBarrierCounterOffset ==
1131 offsetof(MemoryChunk, write_barrier_counter_));
1132
1133 // Validate our estimates on the header size.
1134 STATIC_ASSERT(sizeof(MemoryChunk) <= MemoryChunk::kHeaderSize);
1135 STATIC_ASSERT(sizeof(LargePage) <= MemoryChunk::kHeaderSize);
1136 STATIC_ASSERT(sizeof(Page) <= MemoryChunk::kHeaderSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00001137};
1138
1139
1140// ----------------------------------------------------------------------------
1141// All heap objects containing executable code (code objects) must be allocated
1142// from a 2 GB range of memory, so that they can call each other using 32-bit
1143// displacements. This happens automatically on 32-bit platforms, where 32-bit
1144// displacements cover the entire 4GB virtual address space. On 64-bit
1145// platforms, we support this using the CodeRange object, which reserves and
1146// manages a range of virtual memory.
Steve Block44f0eee2011-05-26 01:26:41 +01001147class CodeRange {
Steve Blocka7e24c12009-10-30 11:49:00 +00001148 public:
Ben Murdoch69a99ed2011-11-30 16:03:39 +00001149 explicit CodeRange(Isolate* isolate);
1150 ~CodeRange() { TearDown(); }
1151
Steve Blocka7e24c12009-10-30 11:49:00 +00001152 // Reserves a range of virtual memory, but does not commit any of it.
1153 // Can only be called once, at heap initialization time.
1154 // Returns false on failure.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001155 bool SetUp(size_t requested_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00001156
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001157 bool valid() { return code_range_ != NULL; }
1158 Address start() {
1159 DCHECK(valid());
1160 return static_cast<Address>(code_range_->address());
1161 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001162 size_t size() {
1163 DCHECK(valid());
1164 return code_range_->size();
1165 }
Steve Block44f0eee2011-05-26 01:26:41 +01001166 bool contains(Address address) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001167 if (!valid()) return false;
Steve Blocka7e24c12009-10-30 11:49:00 +00001168 Address start = static_cast<Address>(code_range_->address());
1169 return start <= address && address < start + code_range_->size();
1170 }
1171
1172 // Allocates a chunk of memory from the large-object portion of
1173 // the code range. On platforms with no separate code range, should
1174 // not be called.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001175 MUST_USE_RESULT Address AllocateRawMemory(const size_t requested_size,
1176 const size_t commit_size,
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001177 size_t* allocated);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001178 bool CommitRawMemory(Address start, size_t length);
1179 bool UncommitRawMemory(Address start, size_t length);
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001180 void FreeRawMemory(Address buf, size_t length);
Steve Blocka7e24c12009-10-30 11:49:00 +00001181
1182 private:
Steve Blocka7e24c12009-10-30 11:49:00 +00001183 class FreeBlock {
1184 public:
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001185 FreeBlock() : start(0), size(0) {}
Steve Blocka7e24c12009-10-30 11:49:00 +00001186 FreeBlock(Address start_arg, size_t size_arg)
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001187 : start(start_arg), size(size_arg) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001188 DCHECK(IsAddressAligned(start, MemoryChunk::kAlignment));
1189 DCHECK(size >= static_cast<size_t>(Page::kPageSize));
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001190 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001191 FreeBlock(void* start_arg, size_t size_arg)
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001192 : start(static_cast<Address>(start_arg)), size(size_arg) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001193 DCHECK(IsAddressAligned(start, MemoryChunk::kAlignment));
1194 DCHECK(size >= static_cast<size_t>(Page::kPageSize));
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001195 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001196
1197 Address start;
1198 size_t size;
1199 };
1200
Ben Murdoch61f157c2016-09-16 13:49:30 +01001201 // Frees the range of virtual memory, and frees the data structures used to
1202 // manage it.
1203 void TearDown();
1204
1205 // Finds a block on the allocation list that contains at least the
1206 // requested amount of memory. If none is found, sorts and merges
1207 // the existing free memory blocks, and searches again.
1208 // If none can be found, returns false.
1209 bool GetNextAllocationBlock(size_t requested);
1210 // Compares the start addresses of two free blocks.
1211 static int CompareFreeBlockAddress(const FreeBlock* left,
1212 const FreeBlock* right);
1213 bool ReserveBlock(const size_t requested_size, FreeBlock* block);
1214 void ReleaseBlock(const FreeBlock* block);
1215
1216 Isolate* isolate_;
1217
1218 // The reserved range of virtual memory that all code objects are put in.
1219 base::VirtualMemory* code_range_;
1220
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001221 // The global mutex guards free_list_ and allocation_list_ as GC threads may
1222 // access both lists concurrently to the main thread.
1223 base::Mutex code_range_mutex_;
1224
Steve Blocka7e24c12009-10-30 11:49:00 +00001225 // Freed blocks of memory are added to the free list. When the allocation
1226 // list is exhausted, the free list is sorted and merged to make the new
1227 // allocation list.
Steve Block44f0eee2011-05-26 01:26:41 +01001228 List<FreeBlock> free_list_;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001229
Steve Blocka7e24c12009-10-30 11:49:00 +00001230 // Memory is allocated from the free blocks on the allocation list.
1231 // The block at current_allocation_block_index_ is the current block.
Steve Block44f0eee2011-05-26 01:26:41 +01001232 List<FreeBlock> allocation_list_;
1233 int current_allocation_block_index_;
Steve Blocka7e24c12009-10-30 11:49:00 +00001234
Steve Block44f0eee2011-05-26 01:26:41 +01001235 DISALLOW_COPY_AND_ASSIGN(CodeRange);
Steve Blocka7e24c12009-10-30 11:49:00 +00001236};
1237
1238
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001239class SkipList {
1240 public:
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001241 SkipList() { Clear(); }
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001242
1243 void Clear() {
1244 for (int idx = 0; idx < kSize; idx++) {
1245 starts_[idx] = reinterpret_cast<Address>(-1);
1246 }
1247 }
1248
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001249 Address StartFor(Address addr) { return starts_[RegionNumber(addr)]; }
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001250
1251 void AddObject(Address addr, int size) {
1252 int start_region = RegionNumber(addr);
1253 int end_region = RegionNumber(addr + size - kPointerSize);
1254 for (int idx = start_region; idx <= end_region; idx++) {
Ben Murdochda12d292016-06-02 14:46:10 +01001255 if (starts_[idx] > addr) {
1256 starts_[idx] = addr;
1257 } else {
1258 // In the first region, there may already be an object closer to the
1259 // start of the region. Do not change the start in that case. If this
1260 // is not the first region, you probably added overlapping objects.
1261 DCHECK_EQ(start_region, idx);
1262 }
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001263 }
1264 }
1265
1266 static inline int RegionNumber(Address addr) {
1267 return (OffsetFrom(addr) & Page::kPageAlignmentMask) >> kRegionSizeLog2;
1268 }
1269
1270 static void Update(Address addr, int size) {
1271 Page* page = Page::FromAddress(addr);
1272 SkipList* list = page->skip_list();
1273 if (list == NULL) {
1274 list = new SkipList();
1275 page->set_skip_list(list);
1276 }
1277
1278 list->AddObject(addr, size);
1279 }
1280
1281 private:
1282 static const int kRegionSizeLog2 = 13;
1283 static const int kRegionSize = 1 << kRegionSizeLog2;
1284 static const int kSize = Page::kPageSize / kRegionSize;
1285
1286 STATIC_ASSERT(Page::kPageSize % kRegionSize == 0);
1287
1288 Address starts_[kSize];
1289};
1290
1291
Steve Blocka7e24c12009-10-30 11:49:00 +00001292// ----------------------------------------------------------------------------
1293// A space acquires chunks of memory from the operating system. The memory
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001294// allocator allocated and deallocates pages for the paged heap spaces and large
1295// pages for large object space.
Steve Block44f0eee2011-05-26 01:26:41 +01001296class MemoryAllocator {
Steve Blocka7e24c12009-10-30 11:49:00 +00001297 public:
Ben Murdochc5610432016-08-08 18:44:38 +01001298 // Unmapper takes care of concurrently unmapping and uncommitting memory
1299 // chunks.
1300 class Unmapper {
1301 public:
1302 class UnmapFreeMemoryTask;
1303
1304 explicit Unmapper(MemoryAllocator* allocator)
1305 : allocator_(allocator),
1306 pending_unmapping_tasks_semaphore_(0),
1307 concurrent_unmapping_tasks_active_(0) {}
1308
1309 void AddMemoryChunkSafe(MemoryChunk* chunk) {
1310 if ((chunk->size() == Page::kPageSize) &&
1311 (chunk->executable() != EXECUTABLE)) {
1312 AddMemoryChunkSafe<kRegular>(chunk);
1313 } else {
1314 AddMemoryChunkSafe<kNonRegular>(chunk);
1315 }
1316 }
1317
1318 MemoryChunk* TryGetPooledMemoryChunkSafe() {
1319 // Procedure:
1320 // (1) Try to get a chunk that was declared as pooled and already has
1321 // been uncommitted.
1322 // (2) Try to steal any memory chunk of kPageSize that would've been
1323 // unmapped.
1324 MemoryChunk* chunk = GetMemoryChunkSafe<kPooled>();
1325 if (chunk == nullptr) {
1326 chunk = GetMemoryChunkSafe<kRegular>();
1327 if (chunk != nullptr) {
1328 // For stolen chunks we need to manually free any allocated memory.
1329 chunk->ReleaseAllocatedMemory();
1330 }
1331 }
1332 return chunk;
1333 }
1334
1335 void FreeQueuedChunks();
1336 bool WaitUntilCompleted();
1337
1338 private:
1339 enum ChunkQueueType {
1340 kRegular, // Pages of kPageSize that do not live in a CodeRange and
1341 // can thus be used for stealing.
1342 kNonRegular, // Large chunks and executable chunks.
1343 kPooled, // Pooled chunks, already uncommited and ready for reuse.
1344 kNumberOfChunkQueues,
1345 };
1346
1347 template <ChunkQueueType type>
1348 void AddMemoryChunkSafe(MemoryChunk* chunk) {
1349 base::LockGuard<base::Mutex> guard(&mutex_);
Ben Murdoch61f157c2016-09-16 13:49:30 +01001350 if (type != kRegular || allocator_->CanFreeMemoryChunk(chunk)) {
1351 chunks_[type].push_back(chunk);
1352 } else {
1353 DCHECK_EQ(type, kRegular);
1354 delayed_regular_chunks_.push_back(chunk);
1355 }
Ben Murdochc5610432016-08-08 18:44:38 +01001356 }
1357
1358 template <ChunkQueueType type>
1359 MemoryChunk* GetMemoryChunkSafe() {
1360 base::LockGuard<base::Mutex> guard(&mutex_);
1361 if (chunks_[type].empty()) return nullptr;
1362 MemoryChunk* chunk = chunks_[type].front();
1363 chunks_[type].pop_front();
1364 return chunk;
1365 }
1366
Ben Murdoch61f157c2016-09-16 13:49:30 +01001367 void ReconsiderDelayedChunks();
Ben Murdochc5610432016-08-08 18:44:38 +01001368 void PerformFreeMemoryOnQueuedChunks();
1369
1370 base::Mutex mutex_;
1371 MemoryAllocator* allocator_;
1372 std::list<MemoryChunk*> chunks_[kNumberOfChunkQueues];
Ben Murdoch61f157c2016-09-16 13:49:30 +01001373 // Delayed chunks cannot be processed in the current unmapping cycle because
1374 // of dependencies such as an active sweeper.
1375 // See MemoryAllocator::CanFreeMemoryChunk.
1376 std::list<MemoryChunk*> delayed_regular_chunks_;
Ben Murdochc5610432016-08-08 18:44:38 +01001377 base::Semaphore pending_unmapping_tasks_semaphore_;
1378 intptr_t concurrent_unmapping_tasks_active_;
1379
1380 friend class MemoryAllocator;
1381 };
1382
Ben Murdochda12d292016-06-02 14:46:10 +01001383 enum AllocationMode {
1384 kRegular,
1385 kPooled,
1386 };
Ben Murdochc5610432016-08-08 18:44:38 +01001387 enum FreeMode {
1388 kFull,
1389 kPreFreeAndQueue,
1390 kPooledAndQueue,
1391 };
Ben Murdochda12d292016-06-02 14:46:10 +01001392
Ben Murdoch69a99ed2011-11-30 16:03:39 +00001393 explicit MemoryAllocator(Isolate* isolate);
1394
Steve Blocka7e24c12009-10-30 11:49:00 +00001395 // Initializes its internal bookkeeping structures.
Russell Brenner90bac252010-11-18 13:33:46 -08001396 // Max capacity of the total space and executable memory limit.
Ben Murdochc5610432016-08-08 18:44:38 +01001397 bool SetUp(intptr_t max_capacity, intptr_t capacity_executable,
1398 intptr_t code_range_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00001399
Steve Block44f0eee2011-05-26 01:26:41 +01001400 void TearDown();
Steve Blocka7e24c12009-10-30 11:49:00 +00001401
Ben Murdochc5610432016-08-08 18:44:38 +01001402 // Allocates a Page from the allocator. AllocationMode is used to indicate
1403 // whether pooled allocation, which only works for MemoryChunk::kPageSize,
1404 // should be tried first.
1405 template <MemoryAllocator::AllocationMode alloc_mode = kRegular,
Ben Murdochda12d292016-06-02 14:46:10 +01001406 typename SpaceType>
Ben Murdochc5610432016-08-08 18:44:38 +01001407 Page* AllocatePage(intptr_t size, SpaceType* owner, Executability executable);
Steve Blocka7e24c12009-10-30 11:49:00 +00001408
Ben Murdochc5610432016-08-08 18:44:38 +01001409 LargePage* AllocateLargePage(intptr_t size, LargeObjectSpace* owner,
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001410 Executability executable);
Steve Blocka7e24c12009-10-30 11:49:00 +00001411
Ben Murdochc5610432016-08-08 18:44:38 +01001412 template <MemoryAllocator::FreeMode mode = kFull>
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001413 void Free(MemoryChunk* chunk);
Steve Blocka7e24c12009-10-30 11:49:00 +00001414
Ben Murdoch61f157c2016-09-16 13:49:30 +01001415 bool CanFreeMemoryChunk(MemoryChunk* chunk);
1416
Steve Blocka7e24c12009-10-30 11:49:00 +00001417 // Returns allocated spaces in bytes.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001418 intptr_t Size() { return size_.Value(); }
1419
1420 // Returns allocated executable spaces in bytes.
1421 intptr_t SizeExecutable() { return size_executable_.Value(); }
1422
1423 // Returns the maximum available bytes of heaps.
1424 intptr_t Available() {
1425 intptr_t size = Size();
1426 return capacity_ < size ? 0 : capacity_ - size;
1427 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001428
Russell Brenner90bac252010-11-18 13:33:46 -08001429 // Returns the maximum available executable bytes of heaps.
Steve Block44f0eee2011-05-26 01:26:41 +01001430 intptr_t AvailableExecutable() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001431 intptr_t executable_size = SizeExecutable();
1432 if (capacity_executable_ < executable_size) return 0;
1433 return capacity_executable_ - executable_size;
Russell Brenner90bac252010-11-18 13:33:46 -08001434 }
1435
Steve Blocka7e24c12009-10-30 11:49:00 +00001436 // Returns maximum available bytes that the old space can have.
Steve Block44f0eee2011-05-26 01:26:41 +01001437 intptr_t MaxAvailable() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001438 return (Available() / Page::kPageSize) * Page::kAllocatableMemory;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001439 }
1440
1441 // Returns an indication of whether a pointer is in a space that has
1442 // been allocated by this MemoryAllocator.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001443 V8_INLINE bool IsOutsideAllocatedSpace(const void* address) {
1444 return address < lowest_ever_allocated_.Value() ||
1445 address >= highest_ever_allocated_.Value();
Steve Blocka7e24c12009-10-30 11:49:00 +00001446 }
1447
Steve Blocka7e24c12009-10-30 11:49:00 +00001448#ifdef DEBUG
1449 // Reports statistic info of the space.
Steve Block44f0eee2011-05-26 01:26:41 +01001450 void ReportStatistics();
Steve Blocka7e24c12009-10-30 11:49:00 +00001451#endif
1452
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001453 // Returns a MemoryChunk in which the memory region from commit_area_size to
1454 // reserve_area_size of the chunk area is reserved but not committed, it
1455 // could be committed later by calling MemoryChunk::CommitArea.
1456 MemoryChunk* AllocateChunk(intptr_t reserve_area_size,
1457 intptr_t commit_area_size,
1458 Executability executable, Space* space);
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001459
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001460 Address ReserveAlignedMemory(size_t requested, size_t alignment,
1461 base::VirtualMemory* controller);
1462 Address AllocateAlignedMemory(size_t reserve_size, size_t commit_size,
1463 size_t alignment, Executability executable,
1464 base::VirtualMemory* controller);
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001465
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001466 bool CommitMemory(Address addr, size_t size, Executability executable);
1467
1468 void FreeMemory(base::VirtualMemory* reservation, Executability executable);
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001469 void FreeMemory(Address addr, size_t size, Executability executable);
1470
1471 // Commit a contiguous block of memory from the initial chunk. Assumes that
1472 // the address is not NULL, the size is greater than zero, and that the
1473 // block is contained in the initial chunk. Returns true if it succeeded
1474 // and false otherwise.
1475 bool CommitBlock(Address start, size_t size, Executability executable);
1476
1477 // Uncommit a contiguous block of memory [start..(start+size)[.
1478 // start is not NULL, the size is greater than zero, and the
1479 // block is contained in the initial chunk. Returns true if it succeeded
1480 // and false otherwise.
1481 bool UncommitBlock(Address start, size_t size);
1482
1483 // Zaps a contiguous block of memory [start..(start+size)[ thus
1484 // filling it up with a recognizable non-NULL bit pattern.
1485 void ZapBlock(Address start, size_t size);
1486
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001487 static int CodePageGuardStartOffset();
1488
1489 static int CodePageGuardSize();
1490
1491 static int CodePageAreaStartOffset();
1492
1493 static int CodePageAreaEndOffset();
1494
1495 static int CodePageAreaSize() {
1496 return CodePageAreaEndOffset() - CodePageAreaStartOffset();
1497 }
1498
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001499 static int PageAreaSize(AllocationSpace space) {
1500 DCHECK_NE(LO_SPACE, space);
1501 return (space == CODE_SPACE) ? CodePageAreaSize()
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001502 : Page::kAllocatableMemory;
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001503 }
1504
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001505 MUST_USE_RESULT bool CommitExecutableMemory(base::VirtualMemory* vm,
1506 Address start, size_t commit_size,
1507 size_t reserved_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00001508
Ben Murdochc5610432016-08-08 18:44:38 +01001509 CodeRange* code_range() { return code_range_; }
1510 Unmapper* unmapper() { return &unmapper_; }
1511
Steve Blocka7e24c12009-10-30 11:49:00 +00001512 private:
Ben Murdochc5610432016-08-08 18:44:38 +01001513 // PreFree logically frees the object, i.e., it takes care of the size
1514 // bookkeeping and calls the allocation callback.
1515 void PreFreeMemory(MemoryChunk* chunk);
1516
1517 // FreeMemory can be called concurrently when PreFree was executed before.
1518 void PerformFreeMemory(MemoryChunk* chunk);
1519
Ben Murdochda12d292016-06-02 14:46:10 +01001520 // See AllocatePage for public interface. Note that currently we only support
1521 // pools for NOT_EXECUTABLE pages of size MemoryChunk::kPageSize.
1522 template <typename SpaceType>
1523 MemoryChunk* AllocatePagePooled(SpaceType* owner);
1524
Ben Murdoch69a99ed2011-11-30 16:03:39 +00001525 Isolate* isolate_;
1526
Ben Murdochc5610432016-08-08 18:44:38 +01001527 CodeRange* code_range_;
1528
Steve Blocka7e24c12009-10-30 11:49:00 +00001529 // Maximum space size in bytes.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001530 intptr_t capacity_;
Russell Brenner90bac252010-11-18 13:33:46 -08001531 // Maximum subset of capacity_ that can be executable
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001532 intptr_t capacity_executable_;
Ben Murdochb0fe1622011-05-05 13:52:32 +01001533
Steve Blocka7e24c12009-10-30 11:49:00 +00001534 // Allocated space size in bytes.
Ben Murdochc5610432016-08-08 18:44:38 +01001535 base::AtomicNumber<intptr_t> size_;
Steve Block791712a2010-08-27 10:21:07 +01001536 // Allocated executable space size in bytes.
Ben Murdochc5610432016-08-08 18:44:38 +01001537 base::AtomicNumber<intptr_t> size_executable_;
Steve Blocka7e24c12009-10-30 11:49:00 +00001538
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001539 // We keep the lowest and highest addresses allocated as a quick way
1540 // of determining that pointers are outside the heap. The estimate is
1541 // conservative, i.e. not all addrsses in 'allocated' space are allocated
1542 // to our heap. The range is [lowest, highest[, inclusive on the low end
1543 // and exclusive on the high end.
Ben Murdochc5610432016-08-08 18:44:38 +01001544 base::AtomicValue<void*> lowest_ever_allocated_;
1545 base::AtomicValue<void*> highest_ever_allocated_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001546
Steve Blocka7e24c12009-10-30 11:49:00 +00001547 // Initializes pages in a chunk. Returns the first page address.
1548 // This function and GetChunkId() are provided for the mark-compact
1549 // collector to rebuild page headers in the from space, which is
1550 // used as a marking stack and its page headers are destroyed.
Steve Block44f0eee2011-05-26 01:26:41 +01001551 Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk,
1552 PagedSpace* owner);
Steve Block6ded16b2010-05-10 14:33:55 +01001553
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001554 void UpdateAllocatedSpaceLimits(void* low, void* high) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001555 // The use of atomic primitives does not guarantee correctness (wrt.
1556 // desired semantics) by default. The loop here ensures that we update the
1557 // values only if they did not change in between.
1558 void* ptr = nullptr;
1559 do {
1560 ptr = lowest_ever_allocated_.Value();
1561 } while ((low < ptr) && !lowest_ever_allocated_.TrySetValue(ptr, low));
1562 do {
1563 ptr = highest_ever_allocated_.Value();
1564 } while ((high > ptr) && !highest_ever_allocated_.TrySetValue(ptr, high));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001565 }
1566
Ben Murdochc5610432016-08-08 18:44:38 +01001567 base::VirtualMemory last_chunk_;
1568 Unmapper unmapper_;
1569
1570 friend class TestCodeRangeScope;
Ben Murdochda12d292016-06-02 14:46:10 +01001571
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001572 DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator);
Steve Blocka7e24c12009-10-30 11:49:00 +00001573};
1574
1575
1576// -----------------------------------------------------------------------------
1577// Interface for heap object iterator to be implemented by all object space
1578// object iterators.
1579//
Leon Clarked91b9f72010-01-27 17:25:45 +00001580// NOTE: The space specific object iterators also implements the own next()
1581// method which is used to avoid using virtual functions
Steve Blocka7e24c12009-10-30 11:49:00 +00001582// iterating a specific space.
1583
1584class ObjectIterator : public Malloced {
1585 public:
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001586 virtual ~ObjectIterator() {}
Ben Murdoch61f157c2016-09-16 13:49:30 +01001587 virtual HeapObject* Next() = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +00001588};
1589
Ben Murdoch61f157c2016-09-16 13:49:30 +01001590template <class PAGE_TYPE>
1591class PageIteratorImpl
1592 : public std::iterator<std::forward_iterator_tag, PAGE_TYPE> {
1593 public:
1594 explicit PageIteratorImpl(PAGE_TYPE* p) : p_(p) {}
1595 PageIteratorImpl(const PageIteratorImpl<PAGE_TYPE>& other) : p_(other.p_) {}
1596 PAGE_TYPE* operator*() { return p_; }
1597 bool operator==(const PageIteratorImpl<PAGE_TYPE>& rhs) {
1598 return rhs.p_ == p_;
1599 }
1600 bool operator!=(const PageIteratorImpl<PAGE_TYPE>& rhs) {
1601 return rhs.p_ != p_;
1602 }
1603 inline PageIteratorImpl<PAGE_TYPE>& operator++();
1604 inline PageIteratorImpl<PAGE_TYPE> operator++(int);
1605
1606 private:
1607 PAGE_TYPE* p_;
1608};
1609
1610typedef PageIteratorImpl<Page> PageIterator;
1611typedef PageIteratorImpl<LargePage> LargePageIterator;
1612
1613class PageRange {
1614 public:
1615 typedef PageIterator iterator;
1616 PageRange(Page* begin, Page* end) : begin_(begin), end_(end) {}
1617 explicit PageRange(Page* page) : PageRange(page, page->next_page()) {}
1618 iterator begin() { return iterator(begin_); }
1619 iterator end() { return iterator(end_); }
1620
1621 private:
1622 Page* begin_;
1623 Page* end_;
1624};
Steve Blocka7e24c12009-10-30 11:49:00 +00001625
1626// -----------------------------------------------------------------------------
1627// Heap object iterator in new/old/map spaces.
1628//
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001629// A HeapObjectIterator iterates objects from the bottom of the given space
1630// to its top or from the bottom of the given page to its top.
Steve Blocka7e24c12009-10-30 11:49:00 +00001631//
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001632// If objects are allocated in the page during iteration the iterator may
1633// or may not iterate over those objects. The caller must create a new
1634// iterator in order to be sure to visit these new objects.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001635class HeapObjectIterator : public ObjectIterator {
Steve Blocka7e24c12009-10-30 11:49:00 +00001636 public:
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001637 // Creates a new object iterator in a given space.
Steve Blocka7e24c12009-10-30 11:49:00 +00001638 explicit HeapObjectIterator(PagedSpace* space);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001639 explicit HeapObjectIterator(Page* page);
Steve Blocka7e24c12009-10-30 11:49:00 +00001640
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001641 // Advance to the next object, skipping free spaces and other fillers and
1642 // skipping the special garbage section of which there is one per space.
Ben Murdoch61f157c2016-09-16 13:49:30 +01001643 // Returns nullptr when the iteration has ended.
1644 inline HeapObject* Next() override;
Steve Blocka7e24c12009-10-30 11:49:00 +00001645
1646 private:
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001647 // Fast (inlined) path of next().
1648 inline HeapObject* FromCurrentPage();
Leon Clarked91b9f72010-01-27 17:25:45 +00001649
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001650 // Slow path of next(), goes into the next page. Returns false if the
1651 // iteration has ended.
1652 bool AdvanceToNextPage();
Steve Blocka7e24c12009-10-30 11:49:00 +00001653
Ben Murdoch61f157c2016-09-16 13:49:30 +01001654 Address cur_addr_; // Current iteration point.
1655 Address cur_end_; // End iteration point.
Steve Blocka7e24c12009-10-30 11:49:00 +00001656 PagedSpace* space_;
Ben Murdoch61f157c2016-09-16 13:49:30 +01001657 PageRange page_range_;
1658 PageRange::iterator current_page_;
Steve Blocka7e24c12009-10-30 11:49:00 +00001659};
1660
1661
1662// -----------------------------------------------------------------------------
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001663// A space has a circular list of pages. The next page can be accessed via
1664// Page::next_page() call.
Steve Blocka7e24c12009-10-30 11:49:00 +00001665
1666// An abstraction of allocation and relocation pointers in a page-structured
1667// space.
1668class AllocationInfo {
1669 public:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001670 AllocationInfo() : top_(nullptr), limit_(nullptr) {}
1671 AllocationInfo(Address top, Address limit) : top_(top), limit_(limit) {}
1672
1673 void Reset(Address top, Address limit) {
1674 set_top(top);
1675 set_limit(limit);
1676 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001677
1678 INLINE(void set_top(Address top)) {
1679 SLOW_DCHECK(top == NULL ||
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001680 (reinterpret_cast<intptr_t>(top) & kHeapObjectTagMask) == 0);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001681 top_ = top;
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001682 }
1683
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001684 INLINE(Address top()) const {
1685 SLOW_DCHECK(top_ == NULL ||
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001686 (reinterpret_cast<intptr_t>(top_) & kHeapObjectTagMask) == 0);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001687 return top_;
1688 }
1689
1690 Address* top_address() { return &top_; }
1691
1692 INLINE(void set_limit(Address limit)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001693 limit_ = limit;
1694 }
1695
1696 INLINE(Address limit()) const {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001697 return limit_;
1698 }
1699
1700 Address* limit_address() { return &limit_; }
Steve Blocka7e24c12009-10-30 11:49:00 +00001701
1702#ifdef DEBUG
1703 bool VerifyPagedAllocation() {
Ben Murdochc5610432016-08-08 18:44:38 +01001704 return (Page::FromAllocationAreaAddress(top_) ==
1705 Page::FromAllocationAreaAddress(limit_)) &&
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001706 (top_ <= limit_);
Steve Blocka7e24c12009-10-30 11:49:00 +00001707 }
1708#endif
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001709
1710 private:
1711 // Current allocation top.
1712 Address top_;
1713 // Current allocation limit.
1714 Address limit_;
Steve Blocka7e24c12009-10-30 11:49:00 +00001715};
1716
1717
1718// An abstraction of the accounting statistics of a page-structured space.
Steve Blocka7e24c12009-10-30 11:49:00 +00001719//
1720// The stats are only set by functions that ensure they stay balanced. These
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001721// functions increase or decrease one of the non-capacity stats in conjunction
1722// with capacity, or else they always balance increases and decreases to the
1723// non-capacity stats.
Steve Blocka7e24c12009-10-30 11:49:00 +00001724class AllocationStats BASE_EMBEDDED {
1725 public:
1726 AllocationStats() { Clear(); }
1727
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001728 // Zero out all the allocation statistics (i.e., no capacity).
Steve Blocka7e24c12009-10-30 11:49:00 +00001729 void Clear() {
1730 capacity_ = 0;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001731 max_capacity_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +00001732 size_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +00001733 }
1734
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001735 void ClearSize() { size_ = capacity_; }
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001736
Steve Blocka7e24c12009-10-30 11:49:00 +00001737 // Accessors for the allocation statistics.
Ben Murdochf87a2032010-10-22 12:50:53 +01001738 intptr_t Capacity() { return capacity_; }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001739 intptr_t MaxCapacity() { return max_capacity_; }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001740 intptr_t Size() {
1741 CHECK_GE(size_, 0);
1742 return size_;
1743 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001744
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001745 // Grow the space by adding available bytes. They are initially marked as
1746 // being in use (part of the size), but will normally be immediately freed,
1747 // putting them on the free list and removing them from size_.
Steve Blocka7e24c12009-10-30 11:49:00 +00001748 void ExpandSpace(int size_in_bytes) {
1749 capacity_ += size_in_bytes;
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001750 size_ += size_in_bytes;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001751 if (capacity_ > max_capacity_) {
1752 max_capacity_ = capacity_;
1753 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001754 CHECK(size_ >= 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00001755 }
1756
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001757 // Shrink the space by removing available bytes. Since shrinking is done
1758 // during sweeping, bytes have been marked as being in use (part of the size)
1759 // and are hereby freed.
Steve Blocka7e24c12009-10-30 11:49:00 +00001760 void ShrinkSpace(int size_in_bytes) {
1761 capacity_ -= size_in_bytes;
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001762 size_ -= size_in_bytes;
Ben Murdochda12d292016-06-02 14:46:10 +01001763 CHECK_GE(size_, 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00001764 }
1765
1766 // Allocate from available bytes (available -> size).
Ben Murdochf87a2032010-10-22 12:50:53 +01001767 void AllocateBytes(intptr_t size_in_bytes) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001768 size_ += size_in_bytes;
Ben Murdochda12d292016-06-02 14:46:10 +01001769 CHECK_GE(size_, 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00001770 }
1771
1772 // Free allocated bytes, making them available (size -> available).
Ben Murdochf87a2032010-10-22 12:50:53 +01001773 void DeallocateBytes(intptr_t size_in_bytes) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001774 size_ -= size_in_bytes;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001775 CHECK_GE(size_, 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00001776 }
1777
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001778 // Merge {other} into {this}.
1779 void Merge(const AllocationStats& other) {
1780 capacity_ += other.capacity_;
1781 size_ += other.size_;
1782 if (other.max_capacity_ > max_capacity_) {
1783 max_capacity_ = other.max_capacity_;
1784 }
1785 CHECK_GE(size_, 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00001786 }
1787
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001788 void DecreaseCapacity(intptr_t size_in_bytes) {
1789 capacity_ -= size_in_bytes;
1790 CHECK_GE(capacity_, 0);
1791 CHECK_GE(capacity_, size_);
1792 }
1793
1794 void IncreaseCapacity(intptr_t size_in_bytes) { capacity_ += size_in_bytes; }
1795
Steve Blocka7e24c12009-10-30 11:49:00 +00001796 private:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001797 // |capacity_|: The number of object-area bytes (i.e., not including page
1798 // bookkeeping structures) currently in the space.
Ben Murdochf87a2032010-10-22 12:50:53 +01001799 intptr_t capacity_;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001800
1801 // |max_capacity_|: The maximum capacity ever observed.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001802 intptr_t max_capacity_;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001803
1804 // |size_|: The number of allocated bytes.
Ben Murdochf87a2032010-10-22 12:50:53 +01001805 intptr_t size_;
Steve Blocka7e24c12009-10-30 11:49:00 +00001806};
1807
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001808// A free list maintaining free blocks of memory. The free list is organized in
1809// a way to encourage objects allocated around the same time to be near each
1810// other. The normal way to allocate is intended to be by bumping a 'top'
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001811// pointer until it hits a 'limit' pointer. When the limit is hit we need to
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001812// find a new space to allocate from. This is done with the free list, which is
1813// divided up into rough categories to cut down on waste. Having finer
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001814// categories would scatter allocation more.
1815
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001816// The free list is organized in categories as follows:
Ben Murdochda12d292016-06-02 14:46:10 +01001817// kMinBlockSize-10 words (tiniest): The tiniest blocks are only used for
1818// allocation, when categories >= small do not have entries anymore.
1819// 11-31 words (tiny): The tiny blocks are only used for allocation, when
1820// categories >= small do not have entries anymore.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001821// 32-255 words (small): Used for allocating free space between 1-31 words in
1822// size.
1823// 256-2047 words (medium): Used for allocating free space between 32-255 words
1824// in size.
1825// 1048-16383 words (large): Used for allocating free space between 256-2047
1826// words in size.
1827// At least 16384 words (huge): This list is for objects of 2048 words or
1828// larger. Empty pages are also added to this list.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001829class FreeList {
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001830 public:
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001831 // This method returns how much memory can be allocated after freeing
1832 // maximum_freed memory.
1833 static inline int GuaranteedAllocatable(int maximum_freed) {
Ben Murdochda12d292016-06-02 14:46:10 +01001834 if (maximum_freed <= kTiniestListMax) {
1835 // Since we are not iterating over all list entries, we cannot guarantee
1836 // that we can find the maximum freed block in that free list.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001837 return 0;
Ben Murdochda12d292016-06-02 14:46:10 +01001838 } else if (maximum_freed <= kTinyListMax) {
1839 return kTinyAllocationMax;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001840 } else if (maximum_freed <= kSmallListMax) {
1841 return kSmallAllocationMax;
1842 } else if (maximum_freed <= kMediumListMax) {
1843 return kMediumAllocationMax;
1844 } else if (maximum_freed <= kLargeListMax) {
1845 return kLargeAllocationMax;
1846 }
1847 return maximum_freed;
1848 }
1849
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001850 explicit FreeList(PagedSpace* owner);
1851
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001852 // Adds a node on the free list. The block of size {size_in_bytes} starting
1853 // at {start} is placed on the free list. The return value is the number of
1854 // bytes that were not added to the free list, because they freed memory block
1855 // was too small. Bookkeeping information will be written to the block, i.e.,
1856 // its contents will be destroyed. The start address should be word aligned,
1857 // and the size should be a non-zero multiple of the word size.
Ben Murdochda12d292016-06-02 14:46:10 +01001858 int Free(Address start, int size_in_bytes, FreeMode mode);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001859
1860 // Allocate a block of size {size_in_bytes} from the free list. The block is
1861 // unitialized. A failure is returned if no block is available. The size
1862 // should be a non-zero multiple of the word size.
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001863 MUST_USE_RESULT HeapObject* Allocate(int size_in_bytes);
1864
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001865 // Clear the free list.
1866 void Reset();
1867
Ben Murdochda12d292016-06-02 14:46:10 +01001868 void ResetStats() {
1869 wasted_bytes_.SetValue(0);
1870 ForAllFreeListCategories(
1871 [](FreeListCategory* category) { category->ResetStats(); });
1872 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001873
1874 // Return the number of bytes available on the free list.
1875 intptr_t Available() {
Ben Murdoch097c5b22016-05-18 11:27:45 +01001876 intptr_t available = 0;
Ben Murdochda12d292016-06-02 14:46:10 +01001877 ForAllFreeListCategories([&available](FreeListCategory* category) {
1878 available += category->available();
1879 });
Ben Murdoch097c5b22016-05-18 11:27:45 +01001880 return available;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001881 }
1882
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001883 bool IsEmpty() {
Ben Murdochda12d292016-06-02 14:46:10 +01001884 bool empty = true;
1885 ForAllFreeListCategories([&empty](FreeListCategory* category) {
1886 if (!category->is_empty()) empty = false;
1887 });
1888 return empty;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001889 }
1890
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001891 // Used after booting the VM.
1892 void RepairLists(Heap* heap);
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001893
Ben Murdochda12d292016-06-02 14:46:10 +01001894 intptr_t EvictFreeListItems(Page* page);
1895 bool ContainsPageFreeListItems(Page* page);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001896
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001897 PagedSpace* owner() { return owner_; }
Ben Murdochda12d292016-06-02 14:46:10 +01001898 intptr_t wasted_bytes() { return wasted_bytes_.Value(); }
1899
1900 template <typename Callback>
1901 void ForAllFreeListCategories(FreeListCategoryType type, Callback callback) {
1902 FreeListCategory* current = categories_[type];
1903 while (current != nullptr) {
1904 FreeListCategory* next = current->next();
1905 callback(current);
1906 current = next;
1907 }
1908 }
1909
1910 template <typename Callback>
1911 void ForAllFreeListCategories(Callback callback) {
1912 for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
1913 ForAllFreeListCategories(static_cast<FreeListCategoryType>(i), callback);
1914 }
1915 }
1916
1917 bool AddCategory(FreeListCategory* category);
1918 void RemoveCategory(FreeListCategory* category);
1919 void PrintCategories(FreeListCategoryType type);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001920
1921#ifdef DEBUG
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001922 intptr_t SumFreeLists();
1923 bool IsVeryLong();
1924#endif
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001925
1926 private:
Ben Murdochda12d292016-06-02 14:46:10 +01001927 class FreeListCategoryIterator {
1928 public:
1929 FreeListCategoryIterator(FreeList* free_list, FreeListCategoryType type)
1930 : current_(free_list->categories_[type]) {}
1931
1932 bool HasNext() { return current_ != nullptr; }
1933
1934 FreeListCategory* Next() {
1935 DCHECK(HasNext());
1936 FreeListCategory* tmp = current_;
1937 current_ = current_->next();
1938 return tmp;
1939 }
1940
1941 private:
1942 FreeListCategory* current_;
1943 };
1944
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001945 // The size range of blocks, in bytes.
1946 static const int kMinBlockSize = 3 * kPointerSize;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001947 static const int kMaxBlockSize = Page::kAllocatableMemory;
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001948
Ben Murdochda12d292016-06-02 14:46:10 +01001949 static const int kTiniestListMax = 0xa * kPointerSize;
1950 static const int kTinyListMax = 0x1f * kPointerSize;
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001951 static const int kSmallListMax = 0xff * kPointerSize;
1952 static const int kMediumListMax = 0x7ff * kPointerSize;
1953 static const int kLargeListMax = 0x3fff * kPointerSize;
Ben Murdochda12d292016-06-02 14:46:10 +01001954 static const int kTinyAllocationMax = kTiniestListMax;
1955 static const int kSmallAllocationMax = kTinyListMax;
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001956 static const int kMediumAllocationMax = kSmallListMax;
1957 static const int kLargeAllocationMax = kMediumListMax;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001958
1959 FreeSpace* FindNodeFor(int size_in_bytes, int* node_size);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001960
Ben Murdochda12d292016-06-02 14:46:10 +01001961 // Walks all available categories for a given |type| and tries to retrieve
1962 // a node. Returns nullptr if the category is empty.
1963 FreeSpace* FindNodeIn(FreeListCategoryType type, int* node_size);
1964
1965 // Tries to retrieve a node from the first category in a given |type|.
1966 // Returns nullptr if the category is empty.
1967 FreeSpace* TryFindNodeIn(FreeListCategoryType type, int* node_size,
1968 int minimum_size);
1969
1970 // Searches a given |type| for a node of at least |minimum_size|.
1971 FreeSpace* SearchForNodeInList(FreeListCategoryType type, int* node_size,
1972 int minimum_size);
Ben Murdoch097c5b22016-05-18 11:27:45 +01001973
1974 FreeListCategoryType SelectFreeListCategoryType(size_t size_in_bytes) {
Ben Murdochda12d292016-06-02 14:46:10 +01001975 if (size_in_bytes <= kTiniestListMax) {
1976 return kTiniest;
1977 } else if (size_in_bytes <= kTinyListMax) {
1978 return kTiny;
1979 } else if (size_in_bytes <= kSmallListMax) {
Ben Murdoch097c5b22016-05-18 11:27:45 +01001980 return kSmall;
1981 } else if (size_in_bytes <= kMediumListMax) {
1982 return kMedium;
1983 } else if (size_in_bytes <= kLargeListMax) {
1984 return kLarge;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001985 }
Ben Murdoch097c5b22016-05-18 11:27:45 +01001986 return kHuge;
1987 }
1988
Ben Murdochda12d292016-06-02 14:46:10 +01001989 // The tiny categories are not used for fast allocation.
Ben Murdoch097c5b22016-05-18 11:27:45 +01001990 FreeListCategoryType SelectFastAllocationFreeListCategoryType(
1991 size_t size_in_bytes) {
1992 if (size_in_bytes <= kSmallAllocationMax) {
1993 return kSmall;
1994 } else if (size_in_bytes <= kMediumAllocationMax) {
1995 return kMedium;
1996 } else if (size_in_bytes <= kLargeAllocationMax) {
1997 return kLarge;
1998 }
1999 return kHuge;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002000 }
2001
Ben Murdochda12d292016-06-02 14:46:10 +01002002 FreeListCategory* top(FreeListCategoryType type) { return categories_[type]; }
2003
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002004 PagedSpace* owner_;
Ben Murdochc5610432016-08-08 18:44:38 +01002005 base::AtomicNumber<intptr_t> wasted_bytes_;
Ben Murdochda12d292016-06-02 14:46:10 +01002006 FreeListCategory* categories_[kNumberOfCategories];
2007
2008 friend class FreeListCategory;
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002009
2010 DISALLOW_IMPLICIT_CONSTRUCTORS(FreeList);
2011};
2012
2013
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002014class AllocationResult {
2015 public:
2016 // Implicit constructor from Object*.
2017 AllocationResult(Object* object) // NOLINT
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002018 : object_(object) {
2019 // AllocationResults can't return Smis, which are used to represent
2020 // failure and the space to retry in.
2021 CHECK(!object->IsSmi());
2022 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002023
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002024 AllocationResult() : object_(Smi::FromInt(NEW_SPACE)) {}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002025
2026 static inline AllocationResult Retry(AllocationSpace space = NEW_SPACE) {
2027 return AllocationResult(space);
2028 }
2029
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002030 inline bool IsRetry() { return object_->IsSmi(); }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002031
2032 template <typename T>
2033 bool To(T** obj) {
2034 if (IsRetry()) return false;
2035 *obj = T::cast(object_);
2036 return true;
2037 }
2038
2039 Object* ToObjectChecked() {
2040 CHECK(!IsRetry());
2041 return object_;
2042 }
2043
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002044 inline AllocationSpace RetrySpace();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002045
2046 private:
2047 explicit AllocationResult(AllocationSpace space)
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002048 : object_(Smi::FromInt(static_cast<int>(space))) {}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002049
2050 Object* object_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002051};
2052
2053
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002054STATIC_ASSERT(sizeof(AllocationResult) == kPointerSize);
2055
2056
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002057// LocalAllocationBuffer represents a linear allocation area that is created
2058// from a given {AllocationResult} and can be used to allocate memory without
2059// synchronization.
2060//
2061// The buffer is properly closed upon destruction and reassignment.
2062// Example:
2063// {
2064// AllocationResult result = ...;
2065// LocalAllocationBuffer a(heap, result, size);
2066// LocalAllocationBuffer b = a;
2067// CHECK(!a.IsValid());
2068// CHECK(b.IsValid());
2069// // {a} is invalid now and cannot be used for further allocations.
2070// }
2071// // Since {b} went out of scope, the LAB is closed, resulting in creating a
2072// // filler object for the remaining area.
2073class LocalAllocationBuffer {
2074 public:
2075 // Indicates that a buffer cannot be used for allocations anymore. Can result
2076 // from either reassigning a buffer, or trying to construct it from an
2077 // invalid {AllocationResult}.
2078 static inline LocalAllocationBuffer InvalidBuffer();
2079
2080 // Creates a new LAB from a given {AllocationResult}. Results in
2081 // InvalidBuffer if the result indicates a retry.
2082 static inline LocalAllocationBuffer FromResult(Heap* heap,
2083 AllocationResult result,
2084 intptr_t size);
2085
2086 ~LocalAllocationBuffer() { Close(); }
2087
2088 // Convert to C++11 move-semantics once allowed by the style guide.
2089 LocalAllocationBuffer(const LocalAllocationBuffer& other);
2090 LocalAllocationBuffer& operator=(const LocalAllocationBuffer& other);
2091
2092 MUST_USE_RESULT inline AllocationResult AllocateRawAligned(
2093 int size_in_bytes, AllocationAlignment alignment);
2094
2095 inline bool IsValid() { return allocation_info_.top() != nullptr; }
2096
2097 // Try to merge LABs, which is only possible when they are adjacent in memory.
2098 // Returns true if the merge was successful, false otherwise.
2099 inline bool TryMerge(LocalAllocationBuffer* other);
2100
2101 private:
2102 LocalAllocationBuffer(Heap* heap, AllocationInfo allocation_info);
2103
2104 void Close();
2105
2106 Heap* heap_;
2107 AllocationInfo allocation_info_;
2108};
2109
Ben Murdoch61f157c2016-09-16 13:49:30 +01002110class NewSpacePageRange {
2111 public:
2112 typedef PageRange::iterator iterator;
2113 inline NewSpacePageRange(Address start, Address limit);
2114 iterator begin() { return range_.begin(); }
2115 iterator end() { return range_.end(); }
2116
2117 private:
2118 PageRange range_;
2119};
2120
Steve Blocka7e24c12009-10-30 11:49:00 +00002121class PagedSpace : public Space {
2122 public:
Ben Murdoch61f157c2016-09-16 13:49:30 +01002123 typedef PageIterator iterator;
2124
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002125 static const intptr_t kCompactionMemoryWanted = 500 * KB;
Steve Blocka7e24c12009-10-30 11:49:00 +00002126
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002127 // Creates a space with an id.
2128 PagedSpace(Heap* heap, AllocationSpace id, Executability executable);
2129
2130 ~PagedSpace() override { TearDown(); }
Steve Blocka7e24c12009-10-30 11:49:00 +00002131
2132 // Set up the space using the given address range of virtual memory (from
2133 // the memory allocator's initial chunk) if possible. If the block of
2134 // addresses is not big enough to contain a single page-aligned page, a
2135 // fresh chunk will be allocated.
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002136 bool SetUp();
Steve Blocka7e24c12009-10-30 11:49:00 +00002137
2138 // Returns true if the space has been successfully set up and not
2139 // subsequently torn down.
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002140 bool HasBeenSetUp();
Steve Blocka7e24c12009-10-30 11:49:00 +00002141
Steve Blocka7e24c12009-10-30 11:49:00 +00002142 // Checks whether an object/address is in this space.
2143 inline bool Contains(Address a);
Ben Murdoch097c5b22016-05-18 11:27:45 +01002144 inline bool Contains(Object* o);
2145 bool ContainsSlow(Address addr);
Steve Blocka7e24c12009-10-30 11:49:00 +00002146
2147 // Given an address occupied by a live object, return that object if it is
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002148 // in this space, or a Smi if it is not. The implementation iterates over
2149 // objects in the page containing the address, the cost is linear in the
2150 // number of objects in the page. It may be slow.
2151 Object* FindObject(Address addr);
2152
2153 // During boot the free_space_map is created, and afterwards we may need
2154 // to write it into the free list nodes that were already created.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002155 void RepairFreeListsAfterDeserialization();
Steve Blocka7e24c12009-10-30 11:49:00 +00002156
Ben Murdoch85b71792012-04-11 18:30:58 +01002157 // Prepares for a mark-compact GC.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002158 void PrepareForMarkCompact();
Ben Murdoch85b71792012-04-11 18:30:58 +01002159
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002160 // Current capacity without growing (Size() + Available()).
Ben Murdochf87a2032010-10-22 12:50:53 +01002161 intptr_t Capacity() { return accounting_stats_.Capacity(); }
Steve Blocka7e24c12009-10-30 11:49:00 +00002162
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002163 // Approximate amount of physical memory committed for this space.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002164 size_t CommittedPhysicalMemory() override;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002165
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002166 void ResetFreeListStatistics();
2167
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002168 // Sets the capacity, the available space and the wasted space to zero.
2169 // The stats are rebuilt during sweeping by adding each page to the
2170 // capacity and the size when it is encountered. As free spaces are
2171 // discovered during the sweeping they are subtracted from the size and added
2172 // to the available and wasted totals.
2173 void ClearStats() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002174 accounting_stats_.ClearSize();
2175 free_list_.ResetStats();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002176 ResetFreeListStatistics();
2177 }
2178
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002179 // Available bytes without growing. These are the bytes on the free list.
2180 // The bytes in the linear allocation area are not included in this total
2181 // because updating the stats would slow down allocation. New pages are
2182 // immediately added to the free list so they show up here.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002183 intptr_t Available() override { return free_list_.Available(); }
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002184
2185 // Allocated bytes in this space. Garbage bytes that were not found due to
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002186 // concurrent sweeping are counted as being allocated! The bytes in the
2187 // current linear allocation area (between top and limit) are also counted
2188 // here.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002189 intptr_t Size() override { return accounting_stats_.Size(); }
Steve Blocka7e24c12009-10-30 11:49:00 +00002190
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002191 // As size, but the bytes in lazily swept pages are estimated and the bytes
2192 // in the current linear allocation area are not included.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002193 intptr_t SizeOfObjects() override;
Steve Blocka7e24c12009-10-30 11:49:00 +00002194
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002195 // Wasted bytes in this space. These are just the bytes that were thrown away
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002196 // due to being too small to use for allocation.
2197 virtual intptr_t Waste() { return free_list_.wasted_bytes(); }
Steve Blocka7e24c12009-10-30 11:49:00 +00002198
2199 // Returns the allocation pointer in this space.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002200 Address top() { return allocation_info_.top(); }
2201 Address limit() { return allocation_info_.limit(); }
2202
2203 // The allocation top address.
2204 Address* allocation_top_address() { return allocation_info_.top_address(); }
2205
2206 // The allocation limit address.
2207 Address* allocation_limit_address() {
2208 return allocation_info_.limit_address();
2209 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002210
Ben Murdochda12d292016-06-02 14:46:10 +01002211 enum UpdateSkipList { UPDATE_SKIP_LIST, IGNORE_SKIP_LIST };
2212
Steve Blocka7e24c12009-10-30 11:49:00 +00002213 // Allocate the requested number of bytes in the space if possible, return a
Ben Murdochda12d292016-06-02 14:46:10 +01002214 // failure object if not. Only use IGNORE_SKIP_LIST if the skip list is going
2215 // to be manually updated later.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002216 MUST_USE_RESULT inline AllocationResult AllocateRawUnaligned(
Ben Murdochda12d292016-06-02 14:46:10 +01002217 int size_in_bytes, UpdateSkipList update_skip_list = UPDATE_SKIP_LIST);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002218
2219 MUST_USE_RESULT inline AllocationResult AllocateRawUnalignedSynchronized(
2220 int size_in_bytes);
2221
2222 // Allocate the requested number of bytes in the space double aligned if
2223 // possible, return a failure object if not.
2224 MUST_USE_RESULT inline AllocationResult AllocateRawAligned(
2225 int size_in_bytes, AllocationAlignment alignment);
2226
2227 // Allocate the requested number of bytes in the space and consider allocation
2228 // alignment if needed.
2229 MUST_USE_RESULT inline AllocationResult AllocateRaw(
2230 int size_in_bytes, AllocationAlignment alignment);
Leon Clarkee46be812010-01-19 14:06:41 +00002231
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002232 // Give a block of memory to the space's free list. It might be added to
2233 // the free list or accounted as waste.
2234 // If add_to_freelist is false then just accounting stats are updated and
2235 // no attempt to add area to free list is made.
2236 int Free(Address start, int size_in_bytes) {
Ben Murdochda12d292016-06-02 14:46:10 +01002237 int wasted = free_list_.Free(start, size_in_bytes, kLinkCategory);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002238 accounting_stats_.DeallocateBytes(size_in_bytes);
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002239 return size_in_bytes - wasted;
2240 }
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002241
Ben Murdochda12d292016-06-02 14:46:10 +01002242 int UnaccountedFree(Address start, int size_in_bytes) {
2243 int wasted = free_list_.Free(start, size_in_bytes, kDoNotLinkCategory);
2244 return size_in_bytes - wasted;
2245 }
2246
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002247 void ResetFreeList() { free_list_.Reset(); }
2248
Steve Block6ded16b2010-05-10 14:33:55 +01002249 // Set space allocation info.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002250 void SetTopAndLimit(Address top, Address limit) {
2251 DCHECK(top == limit ||
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002252 Page::FromAddress(top) == Page::FromAddress(limit - 1));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002253 MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002254 allocation_info_.Reset(top, limit);
Steve Block6ded16b2010-05-10 14:33:55 +01002255 }
2256
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002257 // Empty space allocation info, returning unused area to free list.
2258 void EmptyAllocationInfo() {
2259 // Mark the old linear allocation area with a free space map so it can be
2260 // skipped when scanning the heap.
2261 int old_linear_size = static_cast<int>(limit() - top());
2262 Free(top(), old_linear_size);
2263 SetTopAndLimit(NULL, NULL);
Steve Blocka7e24c12009-10-30 11:49:00 +00002264 }
2265
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002266 void Allocate(int bytes) { accounting_stats_.AllocateBytes(bytes); }
2267
2268 void IncreaseCapacity(int size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002269
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002270 // Releases an unused page and shrinks the space.
Ben Murdochda12d292016-06-02 14:46:10 +01002271 void ReleasePage(Page* page);
Steve Blocka7e24c12009-10-30 11:49:00 +00002272
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002273 // The dummy page that anchors the linked list of pages.
2274 Page* anchor() { return &anchor_; }
Steve Blocka7e24c12009-10-30 11:49:00 +00002275
Ben Murdoch61f157c2016-09-16 13:49:30 +01002276 // Collect code size related statistics
2277 void CollectCodeStatistics();
2278
2279 // Reset code size related statistics
2280 static void ResetCodeAndMetadataStatistics(Isolate* isolate);
2281
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002282#ifdef VERIFY_HEAP
2283 // Verify integrity of this space.
2284 virtual void Verify(ObjectVisitor* visitor);
2285
2286 // Overridden by subclasses to verify space-specific object
2287 // properties (e.g., only maps or free-list nodes are in map space).
2288 virtual void VerifyObject(HeapObject* obj) {}
2289#endif
2290
Steve Blocka7e24c12009-10-30 11:49:00 +00002291#ifdef DEBUG
2292 // Print meta info and objects in this space.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002293 void Print() override;
Steve Blocka7e24c12009-10-30 11:49:00 +00002294
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002295 // Reports statistics for the space
2296 void ReportStatistics();
2297
Steve Blocka7e24c12009-10-30 11:49:00 +00002298 // Report code object related statistics
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002299 static void ReportCodeStatistics(Isolate* isolate);
2300 static void ResetCodeStatistics(Isolate* isolate);
Steve Blocka7e24c12009-10-30 11:49:00 +00002301#endif
2302
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002303 Page* FirstPage() { return anchor_.next_page(); }
2304 Page* LastPage() { return anchor_.prev_page(); }
2305
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002306 void EvictEvacuationCandidatesFromLinearAllocationArea();
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002307
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002308 bool CanExpand(size_t size);
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002309
2310 // Returns the number of total pages in this space.
2311 int CountTotalPages();
2312
2313 // Return size of allocatable area on a page in this space.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002314 inline int AreaSize() { return area_size_; }
2315
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002316 virtual bool is_local() { return false; }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002317
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002318 // Merges {other} into the current space. Note that this modifies {other},
2319 // e.g., removes its bump pointer area and resets statistics.
2320 void MergeCompactionSpace(CompactionSpace* other);
2321
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002322 // Refills the free list from the corresponding free list filled by the
2323 // sweeper.
2324 virtual void RefillFreeList();
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002325
Ben Murdochda12d292016-06-02 14:46:10 +01002326 FreeList* free_list() { return &free_list_; }
2327
2328 base::Mutex* mutex() { return &space_mutex_; }
2329
2330 inline void UnlinkFreeListCategories(Page* page);
2331 inline intptr_t RelinkFreeListCategories(Page* page);
2332
Ben Murdoch61f157c2016-09-16 13:49:30 +01002333 iterator begin() { return iterator(anchor_.next_page()); }
2334 iterator end() { return iterator(&anchor_); }
2335
Steve Blocka7e24c12009-10-30 11:49:00 +00002336 protected:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002337 // PagedSpaces that should be included in snapshots have different, i.e.,
2338 // smaller, initial pages.
2339 virtual bool snapshotable() { return true; }
2340
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002341 bool HasPages() { return anchor_.next_page() != &anchor_; }
2342
2343 // Cleans up the space, frees all pages in this space except those belonging
2344 // to the initial chunk, uncommits addresses in the initial chunk.
2345 void TearDown();
2346
2347 // Expands the space by allocating a fixed number of pages. Returns false if
2348 // it cannot allocate requested number of pages from OS, or if the hard heap
2349 // size limit has been hit.
2350 bool Expand();
2351
2352 // Generic fast case allocation function that tries linear allocation at the
2353 // address denoted by top in allocation_info_.
2354 inline HeapObject* AllocateLinearly(int size_in_bytes);
2355
2356 // Generic fast case allocation function that tries aligned linear allocation
2357 // at the address denoted by top in allocation_info_. Writes the aligned
2358 // allocation size, which includes the filler size, to size_in_bytes.
2359 inline HeapObject* AllocateLinearlyAligned(int* size_in_bytes,
2360 AllocationAlignment alignment);
2361
2362 // If sweeping is still in progress try to sweep unswept pages. If that is
2363 // not successful, wait for the sweeper threads and re-try free-list
2364 // allocation.
2365 MUST_USE_RESULT virtual HeapObject* SweepAndRetryAllocation(
2366 int size_in_bytes);
2367
2368 // Slow path of AllocateRaw. This function is space-dependent.
2369 MUST_USE_RESULT HeapObject* SlowAllocateRaw(int size_in_bytes);
2370
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002371 int area_size_;
2372
Steve Blocka7e24c12009-10-30 11:49:00 +00002373 // Accounting information for this space.
2374 AllocationStats accounting_stats_;
2375
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002376 // The dummy page that anchors the double linked list of pages.
2377 Page anchor_;
Steve Blocka7e24c12009-10-30 11:49:00 +00002378
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002379 // The space's free list.
2380 FreeList free_list_;
Steve Block6ded16b2010-05-10 14:33:55 +01002381
Steve Blocka7e24c12009-10-30 11:49:00 +00002382 // Normal allocation information.
2383 AllocationInfo allocation_info_;
2384
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002385 // Mutex guarding any concurrent access to the space.
2386 base::Mutex space_mutex_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002387
Ben Murdochda12d292016-06-02 14:46:10 +01002388 friend class IncrementalMarking;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002389 friend class MarkCompactCollector;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002390
2391 // Used in cctest.
2392 friend class HeapTester;
Steve Blocka7e24c12009-10-30 11:49:00 +00002393};
2394
2395
Steve Blocka7e24c12009-10-30 11:49:00 +00002396class NumberAndSizeInfo BASE_EMBEDDED {
2397 public:
2398 NumberAndSizeInfo() : number_(0), bytes_(0) {}
2399
2400 int number() const { return number_; }
2401 void increment_number(int num) { number_ += num; }
2402
2403 int bytes() const { return bytes_; }
2404 void increment_bytes(int size) { bytes_ += size; }
2405
2406 void clear() {
2407 number_ = 0;
2408 bytes_ = 0;
2409 }
2410
2411 private:
2412 int number_;
2413 int bytes_;
2414};
2415
2416
2417// HistogramInfo class for recording a single "bar" of a histogram. This
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002418// class is used for collecting statistics to print to the log file.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002419class HistogramInfo : public NumberAndSizeInfo {
Steve Blocka7e24c12009-10-30 11:49:00 +00002420 public:
2421 HistogramInfo() : NumberAndSizeInfo() {}
2422
2423 const char* name() { return name_; }
2424 void set_name(const char* name) { name_ = name; }
2425
2426 private:
2427 const char* name_;
2428};
Steve Blocka7e24c12009-10-30 11:49:00 +00002429
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002430enum SemiSpaceId { kFromSpace = 0, kToSpace = 1 };
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002431
Steve Blocka7e24c12009-10-30 11:49:00 +00002432// -----------------------------------------------------------------------------
2433// SemiSpace in young generation
2434//
Ben Murdoch097c5b22016-05-18 11:27:45 +01002435// A SemiSpace is a contiguous chunk of memory holding page-like memory chunks.
2436// The mark-compact collector uses the memory of the first page in the from
2437// space as a marking stack when tracing live objects.
Steve Blocka7e24c12009-10-30 11:49:00 +00002438class SemiSpace : public Space {
2439 public:
Ben Murdoch61f157c2016-09-16 13:49:30 +01002440 typedef PageIterator iterator;
2441
Ben Murdoch097c5b22016-05-18 11:27:45 +01002442 static void Swap(SemiSpace* from, SemiSpace* to);
2443
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002444 SemiSpace(Heap* heap, SemiSpaceId semispace)
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002445 : Space(heap, NEW_SPACE, NOT_EXECUTABLE),
Ben Murdoch097c5b22016-05-18 11:27:45 +01002446 current_capacity_(0),
2447 maximum_capacity_(0),
2448 minimum_capacity_(0),
Ben Murdoch097c5b22016-05-18 11:27:45 +01002449 age_mark_(nullptr),
2450 committed_(false),
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002451 id_(semispace),
2452 anchor_(this),
Ben Murdoch61f157c2016-09-16 13:49:30 +01002453 current_page_(nullptr),
2454 pages_used_(0) {}
Steve Blocka7e24c12009-10-30 11:49:00 +00002455
Ben Murdoch097c5b22016-05-18 11:27:45 +01002456 inline bool Contains(HeapObject* o);
2457 inline bool Contains(Object* o);
2458 inline bool ContainsSlow(Address a);
2459
Ben Murdochda12d292016-06-02 14:46:10 +01002460 void SetUp(int initial_capacity, int maximum_capacity);
Steve Blocka7e24c12009-10-30 11:49:00 +00002461 void TearDown();
Ben Murdochda12d292016-06-02 14:46:10 +01002462 bool HasBeenSetUp() { return maximum_capacity_ != 0; }
Steve Blocka7e24c12009-10-30 11:49:00 +00002463
Ben Murdochda12d292016-06-02 14:46:10 +01002464 bool Commit();
2465 bool Uncommit();
2466 bool is_committed() { return committed_; }
Steve Blocka7e24c12009-10-30 11:49:00 +00002467
Ben Murdochda12d292016-06-02 14:46:10 +01002468 // Grow the semispace to the new capacity. The new capacity requested must
2469 // be larger than the current capacity and less than the maximum capacity.
Steve Blocka7e24c12009-10-30 11:49:00 +00002470 bool GrowTo(int new_capacity);
2471
Ben Murdochda12d292016-06-02 14:46:10 +01002472 // Shrinks the semispace to the new capacity. The new capacity requested
2473 // must be more than the amount of used memory in the semispace and less
2474 // than the current capacity.
Steve Blocka7e24c12009-10-30 11:49:00 +00002475 bool ShrinkTo(int new_capacity);
2476
Ben Murdoch61f157c2016-09-16 13:49:30 +01002477 bool EnsureCurrentCapacity();
2478
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002479 // Returns the start address of the first page of the space.
2480 Address space_start() {
Ben Murdochda12d292016-06-02 14:46:10 +01002481 DCHECK_NE(anchor_.next_page(), anchor());
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002482 return anchor_.next_page()->area_start();
2483 }
2484
Ben Murdochc5610432016-08-08 18:44:38 +01002485 Page* first_page() { return anchor_.next_page(); }
2486 Page* current_page() { return current_page_; }
Ben Murdoch61f157c2016-09-16 13:49:30 +01002487 int pages_used() { return pages_used_; }
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002488
Steve Blocka7e24c12009-10-30 11:49:00 +00002489 // Returns one past the end address of the space.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002490 Address space_end() { return anchor_.prev_page()->area_end(); }
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002491
Ben Murdochda12d292016-06-02 14:46:10 +01002492 // Returns the start address of the current page of the space.
2493 Address page_low() { return current_page_->area_start(); }
2494
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002495 // Returns one past the end address of the current page of the space.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002496 Address page_high() { return current_page_->area_end(); }
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002497
2498 bool AdvancePage() {
Ben Murdochc5610432016-08-08 18:44:38 +01002499 Page* next_page = current_page_->next_page();
Ben Murdoch61f157c2016-09-16 13:49:30 +01002500 // We cannot expand if we reached the maximum number of pages already. Note
2501 // that we need to account for the next page already for this check as we
2502 // could potentially fill the whole page after advancing.
2503 const bool reached_max_pages = (pages_used_ + 1) == max_pages();
2504 if (next_page == anchor() || reached_max_pages) {
2505 return false;
2506 }
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002507 current_page_ = next_page;
Ben Murdoch61f157c2016-09-16 13:49:30 +01002508 pages_used_++;
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002509 return true;
2510 }
2511
2512 // Resets the space to using the first page.
2513 void Reset();
Steve Blocka7e24c12009-10-30 11:49:00 +00002514
Ben Murdoch61f157c2016-09-16 13:49:30 +01002515 void RemovePage(Page* page);
2516 void PrependPage(Page* page);
Ben Murdochc5610432016-08-08 18:44:38 +01002517
Steve Blocka7e24c12009-10-30 11:49:00 +00002518 // Age mark accessors.
2519 Address age_mark() { return age_mark_; }
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002520 void set_age_mark(Address mark);
Steve Blocka7e24c12009-10-30 11:49:00 +00002521
Ben Murdochda12d292016-06-02 14:46:10 +01002522 // Returns the current capacity of the semispace.
Ben Murdoch097c5b22016-05-18 11:27:45 +01002523 int current_capacity() { return current_capacity_; }
2524
Ben Murdochda12d292016-06-02 14:46:10 +01002525 // Returns the maximum capacity of the semispace.
Ben Murdoch097c5b22016-05-18 11:27:45 +01002526 int maximum_capacity() { return maximum_capacity_; }
2527
2528 // Returns the initial capacity of the semispace.
2529 int minimum_capacity() { return minimum_capacity_; }
2530
2531 SemiSpaceId id() { return id_; }
2532
2533 // Approximate amount of physical memory committed for this space.
2534 size_t CommittedPhysicalMemory() override;
Steve Blocka7e24c12009-10-30 11:49:00 +00002535
Leon Clarkee46be812010-01-19 14:06:41 +00002536 // If we don't have these here then SemiSpace will be abstract. However
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002537 // they should never be called:
2538
2539 intptr_t Size() override {
Steve Blocka7e24c12009-10-30 11:49:00 +00002540 UNREACHABLE();
2541 return 0;
2542 }
2543
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002544 intptr_t SizeOfObjects() override { return Size(); }
2545
2546 intptr_t Available() override {
2547 UNREACHABLE();
2548 return 0;
2549 }
2550
Steve Blocka7e24c12009-10-30 11:49:00 +00002551#ifdef DEBUG
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002552 void Print() override;
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002553 // Validate a range of of addresses in a SemiSpace.
2554 // The "from" address must be on a page prior to the "to" address,
2555 // in the linked page order, or it must be earlier on the same page.
2556 static void AssertValidRange(Address from, Address to);
2557#else
2558 // Do nothing.
2559 inline static void AssertValidRange(Address from, Address to) {}
Steve Blocka7e24c12009-10-30 11:49:00 +00002560#endif
2561
Ben Murdoch097c5b22016-05-18 11:27:45 +01002562#ifdef VERIFY_HEAP
2563 virtual void Verify();
2564#endif
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002565
Ben Murdoch61f157c2016-09-16 13:49:30 +01002566 iterator begin() { return iterator(anchor_.next_page()); }
2567 iterator end() { return iterator(anchor()); }
2568
Steve Blocka7e24c12009-10-30 11:49:00 +00002569 private:
Ben Murdochc5610432016-08-08 18:44:38 +01002570 void RewindPages(Page* start, int num_pages);
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002571
Ben Murdochc5610432016-08-08 18:44:38 +01002572 inline Page* anchor() { return &anchor_; }
Ben Murdoch61f157c2016-09-16 13:49:30 +01002573 inline int max_pages() { return current_capacity_ / Page::kPageSize; }
Ben Murdoch097c5b22016-05-18 11:27:45 +01002574
2575 // Copies the flags into the masked positions on all pages in the space.
2576 void FixPagesFlags(intptr_t flags, intptr_t flag_mask);
2577
2578 // The currently committed space capacity.
2579 int current_capacity_;
2580
Ben Murdoch61f157c2016-09-16 13:49:30 +01002581 // The maximum capacity that can be used by this space. A space cannot grow
2582 // beyond that size.
Ben Murdoch097c5b22016-05-18 11:27:45 +01002583 int maximum_capacity_;
2584
Ben Murdochda12d292016-06-02 14:46:10 +01002585 // The minimum capacity for the space. A space cannot shrink below this size.
Ben Murdoch097c5b22016-05-18 11:27:45 +01002586 int minimum_capacity_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002587
Steve Blocka7e24c12009-10-30 11:49:00 +00002588 // Used to govern object promotion during mark-compact collection.
2589 Address age_mark_;
2590
Steve Blocka7e24c12009-10-30 11:49:00 +00002591 bool committed_;
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002592 SemiSpaceId id_;
Steve Blocka7e24c12009-10-30 11:49:00 +00002593
Ben Murdochc5610432016-08-08 18:44:38 +01002594 Page anchor_;
2595 Page* current_page_;
Ben Murdoch61f157c2016-09-16 13:49:30 +01002596 int pages_used_;
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002597
Ben Murdoch61f157c2016-09-16 13:49:30 +01002598 friend class NewSpace;
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002599 friend class SemiSpaceIterator;
Steve Blocka7e24c12009-10-30 11:49:00 +00002600};
2601
2602
2603// A SemiSpaceIterator is an ObjectIterator that iterates over the active
2604// semispace of the heap's new space. It iterates over the objects in the
2605// semispace from a given start address (defaulting to the bottom of the
2606// semispace) to the top of the semispace. New objects allocated after the
2607// iterator is created are not iterated.
2608class SemiSpaceIterator : public ObjectIterator {
2609 public:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002610 // Create an iterator over the allocated objects in the given to-space.
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002611 explicit SemiSpaceIterator(NewSpace* space);
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002612
Ben Murdoch61f157c2016-09-16 13:49:30 +01002613 inline HeapObject* Next() override;
Steve Blocka7e24c12009-10-30 11:49:00 +00002614
2615 private:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002616 void Initialize(Address start, Address end);
Steve Blocka7e24c12009-10-30 11:49:00 +00002617
Steve Blocka7e24c12009-10-30 11:49:00 +00002618 // The current iteration point.
2619 Address current_;
2620 // The end of iteration.
2621 Address limit_;
Steve Blocka7e24c12009-10-30 11:49:00 +00002622};
2623
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002624// -----------------------------------------------------------------------------
Steve Blocka7e24c12009-10-30 11:49:00 +00002625// The young generation space.
2626//
2627// The new space consists of a contiguous pair of semispaces. It simply
2628// forwards most functions to the appropriate semispace.
2629
2630class NewSpace : public Space {
2631 public:
Ben Murdoch61f157c2016-09-16 13:49:30 +01002632 typedef PageIterator iterator;
2633
Steve Block44f0eee2011-05-26 01:26:41 +01002634 explicit NewSpace(Heap* heap)
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002635 : Space(heap, NEW_SPACE, NOT_EXECUTABLE),
2636 to_space_(heap, kToSpace),
2637 from_space_(heap, kFromSpace),
2638 reservation_(),
Ben Murdochda12d292016-06-02 14:46:10 +01002639 top_on_previous_step_(0),
2640 allocated_histogram_(nullptr),
2641 promoted_histogram_(nullptr) {}
Ben Murdoch097c5b22016-05-18 11:27:45 +01002642
2643 inline bool Contains(HeapObject* o);
2644 inline bool ContainsSlow(Address a);
2645 inline bool Contains(Object* o);
Steve Blocka7e24c12009-10-30 11:49:00 +00002646
Ben Murdochda12d292016-06-02 14:46:10 +01002647 bool SetUp(int initial_semispace_capacity, int max_semispace_capacity);
Steve Blocka7e24c12009-10-30 11:49:00 +00002648
2649 // Tears down the space. Heap memory was not allocated by the space, so it
2650 // is not deallocated here.
2651 void TearDown();
2652
2653 // True if the space has been set up but not torn down.
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002654 bool HasBeenSetUp() {
2655 return to_space_.HasBeenSetUp() && from_space_.HasBeenSetUp();
Steve Blocka7e24c12009-10-30 11:49:00 +00002656 }
2657
2658 // Flip the pair of spaces.
2659 void Flip();
2660
2661 // Grow the capacity of the semispaces. Assumes that they are not at
2662 // their maximum capacity.
2663 void Grow();
2664
2665 // Shrink the capacity of the semispaces.
2666 void Shrink();
2667
Steve Blocka7e24c12009-10-30 11:49:00 +00002668 // Return the allocated bytes in the active semispace.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002669 intptr_t Size() override {
Ben Murdoch61f157c2016-09-16 13:49:30 +01002670 return to_space_.pages_used() * Page::kAllocatableMemory +
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002671 static_cast<int>(top() - to_space_.page_low());
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002672 }
2673
Ben Murdochf87a2032010-10-22 12:50:53 +01002674 // The same, but returning an int. We have to have the one that returns
2675 // intptr_t because it is inherited, but if we know we are dealing with the
2676 // new space, which can't get as big as the other spaces then this is useful:
2677 int SizeAsInt() { return static_cast<int>(Size()); }
Steve Block3ce2e202009-11-05 08:53:23 +00002678
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002679 // Return the allocatable capacity of a semispace.
2680 intptr_t Capacity() {
Ben Murdoch097c5b22016-05-18 11:27:45 +01002681 SLOW_DCHECK(to_space_.current_capacity() == from_space_.current_capacity());
2682 return (to_space_.current_capacity() / Page::kPageSize) *
Ben Murdochc5610432016-08-08 18:44:38 +01002683 Page::kAllocatableMemory;
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002684 }
2685
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002686 // Return the current size of a semispace, allocatable and non-allocatable
2687 // memory.
2688 intptr_t TotalCapacity() {
Ben Murdoch097c5b22016-05-18 11:27:45 +01002689 DCHECK(to_space_.current_capacity() == from_space_.current_capacity());
2690 return to_space_.current_capacity();
Steve Blocka7e24c12009-10-30 11:49:00 +00002691 }
Steve Block3ce2e202009-11-05 08:53:23 +00002692
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002693 // Committed memory for NewSpace is the committed memory of both semi-spaces
2694 // combined.
2695 intptr_t CommittedMemory() override {
2696 return from_space_.CommittedMemory() + to_space_.CommittedMemory();
Steve Block3ce2e202009-11-05 08:53:23 +00002697 }
2698
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002699 intptr_t MaximumCommittedMemory() override {
2700 return from_space_.MaximumCommittedMemory() +
2701 to_space_.MaximumCommittedMemory();
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002702 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002703
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002704 // Approximate amount of physical memory committed for this space.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002705 size_t CommittedPhysicalMemory() override;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002706
2707 // Return the available bytes without growing.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002708 intptr_t Available() override { return Capacity() - Size(); }
2709
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002710 size_t AllocatedSinceLastGC() {
Ben Murdochda12d292016-06-02 14:46:10 +01002711 bool seen_age_mark = false;
2712 Address age_mark = to_space_.age_mark();
Ben Murdochc5610432016-08-08 18:44:38 +01002713 Page* current_page = to_space_.first_page();
2714 Page* age_mark_page = Page::FromAddress(age_mark);
2715 Page* last_page = Page::FromAddress(top() - kPointerSize);
Ben Murdochda12d292016-06-02 14:46:10 +01002716 if (age_mark_page == last_page) {
2717 if (top() - age_mark >= 0) {
2718 return top() - age_mark;
2719 }
2720 // Top was reset at some point, invalidating this metric.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002721 return 0;
2722 }
Ben Murdochda12d292016-06-02 14:46:10 +01002723 while (current_page != last_page) {
2724 if (current_page == age_mark_page) {
2725 seen_age_mark = true;
2726 break;
2727 }
2728 current_page = current_page->next_page();
2729 }
2730 if (!seen_age_mark) {
2731 // Top was reset at some point, invalidating this metric.
2732 return 0;
2733 }
2734 intptr_t allocated = age_mark_page->area_end() - age_mark;
2735 DCHECK_EQ(current_page, age_mark_page);
2736 current_page = age_mark_page->next_page();
2737 while (current_page != last_page) {
Ben Murdochc5610432016-08-08 18:44:38 +01002738 allocated += Page::kAllocatableMemory;
Ben Murdochda12d292016-06-02 14:46:10 +01002739 current_page = current_page->next_page();
2740 }
2741 allocated += top() - current_page->area_start();
2742 DCHECK_LE(0, allocated);
2743 DCHECK_LE(allocated, Size());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002744 return static_cast<size_t>(allocated);
2745 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002746
Ben Murdoch61f157c2016-09-16 13:49:30 +01002747 void MovePageFromSpaceToSpace(Page* page) {
Ben Murdochc5610432016-08-08 18:44:38 +01002748 DCHECK(page->InFromSpace());
Ben Murdoch61f157c2016-09-16 13:49:30 +01002749 from_space_.RemovePage(page);
2750 to_space_.PrependPage(page);
Ben Murdochc5610432016-08-08 18:44:38 +01002751 }
2752
Ben Murdoch61f157c2016-09-16 13:49:30 +01002753 bool Rebalance();
2754
Steve Blocka7e24c12009-10-30 11:49:00 +00002755 // Return the maximum capacity of a semispace.
2756 int MaximumCapacity() {
Ben Murdoch097c5b22016-05-18 11:27:45 +01002757 DCHECK(to_space_.maximum_capacity() == from_space_.maximum_capacity());
2758 return to_space_.maximum_capacity();
Steve Blocka7e24c12009-10-30 11:49:00 +00002759 }
2760
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002761 bool IsAtMaximumCapacity() { return TotalCapacity() == MaximumCapacity(); }
2762
Steve Blocka7e24c12009-10-30 11:49:00 +00002763 // Returns the initial capacity of a semispace.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002764 int InitialTotalCapacity() {
Ben Murdoch097c5b22016-05-18 11:27:45 +01002765 DCHECK(to_space_.minimum_capacity() == from_space_.minimum_capacity());
2766 return to_space_.minimum_capacity();
Steve Blocka7e24c12009-10-30 11:49:00 +00002767 }
2768
2769 // Return the address of the allocation pointer in the active semispace.
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002770 Address top() {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002771 DCHECK(to_space_.current_page()->ContainsLimit(allocation_info_.top()));
2772 return allocation_info_.top();
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002773 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002774
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002775 // Return the address of the allocation pointer limit in the active semispace.
2776 Address limit() {
2777 DCHECK(to_space_.current_page()->ContainsLimit(allocation_info_.limit()));
2778 return allocation_info_.limit();
2779 }
2780
Steve Blocka7e24c12009-10-30 11:49:00 +00002781 // Return the address of the first object in the active semispace.
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002782 Address bottom() { return to_space_.space_start(); }
Steve Blocka7e24c12009-10-30 11:49:00 +00002783
2784 // Get the age mark of the inactive semispace.
2785 Address age_mark() { return from_space_.age_mark(); }
2786 // Set the age mark in the active semispace.
2787 void set_age_mark(Address mark) { to_space_.set_age_mark(mark); }
2788
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002789 // The allocation top and limit address.
2790 Address* allocation_top_address() { return allocation_info_.top_address(); }
Steve Blocka7e24c12009-10-30 11:49:00 +00002791
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002792 // The allocation limit address.
2793 Address* allocation_limit_address() {
2794 return allocation_info_.limit_address();
2795 }
2796
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002797 MUST_USE_RESULT INLINE(AllocationResult AllocateRawAligned(
2798 int size_in_bytes, AllocationAlignment alignment));
2799
2800 MUST_USE_RESULT INLINE(
2801 AllocationResult AllocateRawUnaligned(int size_in_bytes));
2802
2803 MUST_USE_RESULT INLINE(AllocationResult AllocateRaw(
2804 int size_in_bytes, AllocationAlignment alignment));
2805
2806 MUST_USE_RESULT inline AllocationResult AllocateRawSynchronized(
2807 int size_in_bytes, AllocationAlignment alignment);
Steve Blocka7e24c12009-10-30 11:49:00 +00002808
2809 // Reset the allocation pointer to the beginning of the active semispace.
2810 void ResetAllocationInfo();
Steve Blocka7e24c12009-10-30 11:49:00 +00002811
Ben Murdoch097c5b22016-05-18 11:27:45 +01002812 // When inline allocation stepping is active, either because of incremental
2813 // marking, idle scavenge, or allocation statistics gathering, we 'interrupt'
2814 // inline allocation every once in a while. This is done by setting
2815 // allocation_info_.limit to be lower than the actual limit and and increasing
2816 // it in steps to guarantee that the observers are notified periodically.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002817 void UpdateInlineAllocationLimit(int size_in_bytes);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002818
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002819 void DisableInlineAllocationSteps() {
2820 top_on_previous_step_ = 0;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002821 UpdateInlineAllocationLimit(0);
Steve Blocka7e24c12009-10-30 11:49:00 +00002822 }
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002823
Ben Murdoch097c5b22016-05-18 11:27:45 +01002824 // Allows observation of inline allocation. The observer->Step() method gets
2825 // called after every step_size bytes have been allocated (approximately).
2826 // This works by adjusting the allocation limit to a lower value and adjusting
2827 // it after each step.
2828 void AddAllocationObserver(AllocationObserver* observer) override;
2829
2830 void RemoveAllocationObserver(AllocationObserver* observer) override;
2831
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002832 // Get the extent of the inactive semispace (for use as a marking stack,
2833 // or to zap it). Notice: space-addresses are not necessarily on the
2834 // same page, so FromSpaceStart() might be above FromSpaceEnd().
2835 Address FromSpacePageLow() { return from_space_.page_low(); }
2836 Address FromSpacePageHigh() { return from_space_.page_high(); }
2837 Address FromSpaceStart() { return from_space_.space_start(); }
2838 Address FromSpaceEnd() { return from_space_.space_end(); }
2839
2840 // Get the extent of the active semispace's pages' memory.
2841 Address ToSpaceStart() { return to_space_.space_start(); }
2842 Address ToSpaceEnd() { return to_space_.space_end(); }
2843
Ben Murdoch097c5b22016-05-18 11:27:45 +01002844 inline bool ToSpaceContainsSlow(Address a);
2845 inline bool FromSpaceContainsSlow(Address a);
2846 inline bool ToSpaceContains(Object* o);
2847 inline bool FromSpaceContains(Object* o);
Steve Blocka7e24c12009-10-30 11:49:00 +00002848
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002849 // Try to switch the active semispace to a new, empty, page.
2850 // Returns false if this isn't possible or reasonable (i.e., there
2851 // are no pages, or the current page is already empty), or true
2852 // if successful.
2853 bool AddFreshPage();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002854 bool AddFreshPageSynchronized();
Steve Blocka7e24c12009-10-30 11:49:00 +00002855
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002856#ifdef VERIFY_HEAP
Steve Blocka7e24c12009-10-30 11:49:00 +00002857 // Verify the active semispace.
2858 virtual void Verify();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002859#endif
2860
2861#ifdef DEBUG
Steve Blocka7e24c12009-10-30 11:49:00 +00002862 // Print the active semispace.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002863 void Print() override { to_space_.Print(); }
Steve Blocka7e24c12009-10-30 11:49:00 +00002864#endif
2865
Steve Blocka7e24c12009-10-30 11:49:00 +00002866 // Iterates the active semispace to collect statistics.
2867 void CollectStatistics();
2868 // Reports previously collected statistics of the active semispace.
2869 void ReportStatistics();
2870 // Clears previously collected statistics.
2871 void ClearHistograms();
2872
2873 // Record the allocation or promotion of a heap object. Note that we don't
2874 // record every single allocation, but only those that happen in the
2875 // to space during a scavenge GC.
2876 void RecordAllocation(HeapObject* obj);
2877 void RecordPromotion(HeapObject* obj);
Steve Blocka7e24c12009-10-30 11:49:00 +00002878
2879 // Return whether the operation succeded.
2880 bool CommitFromSpaceIfNeeded() {
2881 if (from_space_.is_committed()) return true;
2882 return from_space_.Commit();
2883 }
2884
2885 bool UncommitFromSpace() {
2886 if (!from_space_.is_committed()) return true;
2887 return from_space_.Uncommit();
2888 }
2889
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002890 bool IsFromSpaceCommitted() { return from_space_.is_committed(); }
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002891
2892 SemiSpace* active_space() { return &to_space_; }
2893
Ben Murdoch097c5b22016-05-18 11:27:45 +01002894 void PauseAllocationObservers() override;
2895 void ResumeAllocationObservers() override;
2896
Ben Murdoch61f157c2016-09-16 13:49:30 +01002897 iterator begin() { return to_space_.begin(); }
2898 iterator end() { return to_space_.end(); }
2899
Steve Blocka7e24c12009-10-30 11:49:00 +00002900 private:
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002901 // Update allocation info to match the current to-space page.
2902 void UpdateAllocationInfo();
2903
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002904 base::Mutex mutex_;
2905
Steve Blocka7e24c12009-10-30 11:49:00 +00002906 // The semispaces.
2907 SemiSpace to_space_;
2908 SemiSpace from_space_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002909 base::VirtualMemory reservation_;
Steve Blocka7e24c12009-10-30 11:49:00 +00002910
Steve Blocka7e24c12009-10-30 11:49:00 +00002911 // Allocation pointer and limit for normal allocation and allocation during
2912 // mark-compact collection.
2913 AllocationInfo allocation_info_;
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002914
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002915 Address top_on_previous_step_;
Steve Blocka7e24c12009-10-30 11:49:00 +00002916
Steve Blocka7e24c12009-10-30 11:49:00 +00002917 HistogramInfo* allocated_histogram_;
2918 HistogramInfo* promoted_histogram_;
Steve Blocka7e24c12009-10-30 11:49:00 +00002919
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002920 bool EnsureAllocation(int size_in_bytes, AllocationAlignment alignment);
Steve Blocka7e24c12009-10-30 11:49:00 +00002921
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002922 // If we are doing inline allocation in steps, this method performs the 'step'
2923 // operation. top is the memory address of the bump pointer at the last
2924 // inline allocation (i.e. it determines the numbers of bytes actually
2925 // allocated since the last step.) new_top is the address of the bump pointer
2926 // where the next byte is going to be allocated from. top and new_top may be
2927 // different when we cross a page boundary or reset the space.
2928 void InlineAllocationStep(Address top, Address new_top, Address soon_object,
2929 size_t size);
2930 intptr_t GetNextInlineAllocationStepSize();
2931 void StartNextInlineAllocationStep();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002932
Steve Blocka7e24c12009-10-30 11:49:00 +00002933 friend class SemiSpaceIterator;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002934};
Steve Blocka7e24c12009-10-30 11:49:00 +00002935
Ben Murdoch097c5b22016-05-18 11:27:45 +01002936class PauseAllocationObserversScope {
Steve Blocka7e24c12009-10-30 11:49:00 +00002937 public:
Ben Murdoch097c5b22016-05-18 11:27:45 +01002938 explicit PauseAllocationObserversScope(Heap* heap);
2939 ~PauseAllocationObserversScope();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002940
2941 private:
Ben Murdoch097c5b22016-05-18 11:27:45 +01002942 Heap* heap_;
2943 DISALLOW_COPY_AND_ASSIGN(PauseAllocationObserversScope);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002944};
2945
2946// -----------------------------------------------------------------------------
2947// Compaction space that is used temporarily during compaction.
2948
2949class CompactionSpace : public PagedSpace {
2950 public:
2951 CompactionSpace(Heap* heap, AllocationSpace id, Executability executable)
2952 : PagedSpace(heap, id, executable) {}
2953
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002954 bool is_local() override { return true; }
2955
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002956 protected:
2957 // The space is temporary and not included in any snapshots.
2958 bool snapshotable() override { return false; }
2959
2960 MUST_USE_RESULT HeapObject* SweepAndRetryAllocation(
2961 int size_in_bytes) override;
2962};
2963
2964
2965// A collection of |CompactionSpace|s used by a single compaction task.
2966class CompactionSpaceCollection : public Malloced {
2967 public:
2968 explicit CompactionSpaceCollection(Heap* heap)
2969 : old_space_(heap, OLD_SPACE, Executability::NOT_EXECUTABLE),
Ben Murdoch097c5b22016-05-18 11:27:45 +01002970 code_space_(heap, CODE_SPACE, Executability::EXECUTABLE) {}
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002971
2972 CompactionSpace* Get(AllocationSpace space) {
2973 switch (space) {
2974 case OLD_SPACE:
2975 return &old_space_;
2976 case CODE_SPACE:
2977 return &code_space_;
2978 default:
2979 UNREACHABLE();
2980 }
2981 UNREACHABLE();
2982 return nullptr;
2983 }
2984
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002985 private:
2986 CompactionSpace old_space_;
2987 CompactionSpace code_space_;
Steve Blocka7e24c12009-10-30 11:49:00 +00002988};
2989
2990
2991// -----------------------------------------------------------------------------
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002992// Old object space (includes the old space of objects and code space)
Steve Blocka7e24c12009-10-30 11:49:00 +00002993
2994class OldSpace : public PagedSpace {
2995 public:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002996 // Creates an old space object. The constructor does not allocate pages
2997 // from OS.
2998 OldSpace(Heap* heap, AllocationSpace id, Executability executable)
2999 : PagedSpace(heap, id, executable) {}
Steve Blocka7e24c12009-10-30 11:49:00 +00003000};
3001
3002
Ben Murdoch3ef787d2012-04-12 10:51:47 +01003003// For contiguous spaces, top should be in the space (or at the end) and limit
3004// should be the end of the space.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003005#define DCHECK_SEMISPACE_ALLOCATION_INFO(info, space) \
3006 SLOW_DCHECK((space).page_low() <= (info).top() && \
3007 (info).top() <= (space).page_high() && \
3008 (info).limit() <= (space).page_high())
Steve Blocka7e24c12009-10-30 11:49:00 +00003009
3010
3011// -----------------------------------------------------------------------------
3012// Old space for all map objects
3013
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003014class MapSpace : public PagedSpace {
Steve Blocka7e24c12009-10-30 11:49:00 +00003015 public:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003016 // Creates a map space object.
3017 MapSpace(Heap* heap, AllocationSpace id)
Ben Murdochda12d292016-06-02 14:46:10 +01003018 : PagedSpace(heap, id, NOT_EXECUTABLE) {}
Ben Murdoch85b71792012-04-11 18:30:58 +01003019
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003020 int RoundSizeDownToObjectAlignment(int size) override {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003021 if (base::bits::IsPowerOfTwo32(Map::kSize)) {
Ben Murdoch3ef787d2012-04-12 10:51:47 +01003022 return RoundDown(size, Map::kSize);
3023 } else {
3024 return (size / Map::kSize) * Map::kSize;
Leon Clarkee46be812010-01-19 14:06:41 +00003025 }
Leon Clarkee46be812010-01-19 14:06:41 +00003026 }
3027
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003028#ifdef VERIFY_HEAP
3029 void VerifyObject(HeapObject* obj) override;
3030#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00003031};
3032
3033
3034// -----------------------------------------------------------------------------
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003035// Large objects ( > Page::kMaxRegularHeapObjectSize ) are allocated and
3036// managed by the large object space. A large object is allocated from OS
3037// heap with extra padding bytes (Page::kPageSize + Page::kObjectStartOffset).
Steve Blocka7e24c12009-10-30 11:49:00 +00003038// A large object always starts at Page::kObjectStartOffset to a page.
3039// Large objects do not move during garbage collections.
3040
Ben Murdoch3ef787d2012-04-12 10:51:47 +01003041class LargeObjectSpace : public Space {
Steve Blocka7e24c12009-10-30 11:49:00 +00003042 public:
Ben Murdoch61f157c2016-09-16 13:49:30 +01003043 typedef LargePageIterator iterator;
3044
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003045 LargeObjectSpace(Heap* heap, AllocationSpace id);
3046 virtual ~LargeObjectSpace();
Steve Blocka7e24c12009-10-30 11:49:00 +00003047
Ben Murdoch3ef787d2012-04-12 10:51:47 +01003048 // Initializes internal data structures.
3049 bool SetUp();
Steve Blocka7e24c12009-10-30 11:49:00 +00003050
Ben Murdoch3ef787d2012-04-12 10:51:47 +01003051 // Releases internal resources, frees objects in this space.
3052 void TearDown();
Steve Blocka7e24c12009-10-30 11:49:00 +00003053
Ben Murdoch592a9fc2012-03-05 11:04:45 +00003054 static intptr_t ObjectSizeFor(intptr_t chunk_size) {
3055 if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return 0;
3056 return chunk_size - Page::kPageSize - Page::kObjectStartOffset;
3057 }
3058
Ben Murdoch3ef787d2012-04-12 10:51:47 +01003059 // Shared implementation of AllocateRaw, AllocateRawCode and
3060 // AllocateRawFixedArray.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003061 MUST_USE_RESULT AllocationResult
3062 AllocateRaw(int object_size, Executability executable);
Steve Blocka7e24c12009-10-30 11:49:00 +00003063
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003064 // Available bytes for objects in this space.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003065 inline intptr_t Available() override;
Steve Blocka7e24c12009-10-30 11:49:00 +00003066
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003067 intptr_t Size() override { return size_; }
Steve Blocka7e24c12009-10-30 11:49:00 +00003068
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003069 intptr_t SizeOfObjects() override { return objects_size_; }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003070
3071 // Approximate amount of physical memory committed for this space.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003072 size_t CommittedPhysicalMemory() override;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003073
3074 int PageCount() { return page_count_; }
3075
3076 // Finds an object for a given address, returns a Smi if it is not found.
3077 // The function iterates through all objects in this space, may be slow.
3078 Object* FindObject(Address a);
Steve Blocka7e24c12009-10-30 11:49:00 +00003079
Ben Murdoch3ef787d2012-04-12 10:51:47 +01003080 // Finds a large object page containing the given address, returns NULL
Kristian Monsen80d68ea2010-09-08 11:05:35 +01003081 // if such a page doesn't exist.
Ben Murdoch3ef787d2012-04-12 10:51:47 +01003082 LargePage* FindPage(Address a);
Steve Blocka7e24c12009-10-30 11:49:00 +00003083
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003084 // Clears the marking state of live objects.
3085 void ClearMarkingStateOfLiveObjects();
3086
Steve Blocka7e24c12009-10-30 11:49:00 +00003087 // Frees unmarked objects.
3088 void FreeUnmarkedObjects();
3089
3090 // Checks whether a heap object is in this space; O(1).
3091 bool Contains(HeapObject* obj);
Ben Murdoch097c5b22016-05-18 11:27:45 +01003092 // Checks whether an address is in the object area in this space. Iterates
3093 // all objects in the space. May be slow.
3094 bool ContainsSlow(Address addr) { return FindObject(addr)->IsHeapObject(); }
Steve Blocka7e24c12009-10-30 11:49:00 +00003095
3096 // Checks whether the space is empty.
Ben Murdoch3ef787d2012-04-12 10:51:47 +01003097 bool IsEmpty() { return first_page_ == NULL; }
Steve Blocka7e24c12009-10-30 11:49:00 +00003098
Ben Murdochda12d292016-06-02 14:46:10 +01003099 void AdjustLiveBytes(int by) { objects_size_ += by; }
3100
Ben Murdoch3ef787d2012-04-12 10:51:47 +01003101 LargePage* first_page() { return first_page_; }
3102
Ben Murdoch61f157c2016-09-16 13:49:30 +01003103 // Collect code statistics.
3104 void CollectCodeStatistics();
3105
3106 iterator begin() { return iterator(first_page_); }
3107 iterator end() { return iterator(nullptr); }
3108
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003109#ifdef VERIFY_HEAP
Steve Blocka7e24c12009-10-30 11:49:00 +00003110 virtual void Verify();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003111#endif
3112
3113#ifdef DEBUG
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003114 void Print() override;
Steve Blocka7e24c12009-10-30 11:49:00 +00003115 void ReportStatistics();
Steve Blocka7e24c12009-10-30 11:49:00 +00003116#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00003117
3118 private:
3119 // The head of the linked list of large object chunks.
Ben Murdoch3ef787d2012-04-12 10:51:47 +01003120 LargePage* first_page_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003121 intptr_t size_; // allocated bytes
3122 int page_count_; // number of chunks
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08003123 intptr_t objects_size_; // size of objects
Ben Murdoch3ef787d2012-04-12 10:51:47 +01003124 // Map MemoryChunk::kAlignment-aligned chunks to large pages covering them
Ben Murdoch61f157c2016-09-16 13:49:30 +01003125 base::HashMap chunk_map_;
Steve Blocka7e24c12009-10-30 11:49:00 +00003126
Steve Blocka7e24c12009-10-30 11:49:00 +00003127 friend class LargeObjectIterator;
Steve Blocka7e24c12009-10-30 11:49:00 +00003128};
3129
3130
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003131class LargeObjectIterator : public ObjectIterator {
Steve Blocka7e24c12009-10-30 11:49:00 +00003132 public:
3133 explicit LargeObjectIterator(LargeObjectSpace* space);
Steve Blocka7e24c12009-10-30 11:49:00 +00003134
Ben Murdoch61f157c2016-09-16 13:49:30 +01003135 HeapObject* Next() override;
Steve Blocka7e24c12009-10-30 11:49:00 +00003136
3137 private:
Ben Murdoch3ef787d2012-04-12 10:51:47 +01003138 LargePage* current_;
Steve Blocka7e24c12009-10-30 11:49:00 +00003139};
3140
Ben Murdoch3ef787d2012-04-12 10:51:47 +01003141// Iterates over the chunks (pages and large object pages) that can contain
Ben Murdochda12d292016-06-02 14:46:10 +01003142// pointers to new space or to evacuation candidates.
3143class MemoryChunkIterator BASE_EMBEDDED {
Ben Murdoch3ef787d2012-04-12 10:51:47 +01003144 public:
Ben Murdoch61f157c2016-09-16 13:49:30 +01003145 inline explicit MemoryChunkIterator(Heap* heap);
Ben Murdoch3ef787d2012-04-12 10:51:47 +01003146
3147 // Return NULL when the iterator is done.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003148 inline MemoryChunk* next();
Ben Murdoch3ef787d2012-04-12 10:51:47 +01003149
3150 private:
Ben Murdochda12d292016-06-02 14:46:10 +01003151 enum State {
3152 kOldSpaceState,
3153 kMapState,
3154 kCodeState,
3155 kLargeObjectState,
3156 kFinishedState
3157 };
Ben Murdoch61f157c2016-09-16 13:49:30 +01003158 Heap* heap_;
Ben Murdoch3ef787d2012-04-12 10:51:47 +01003159 State state_;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003160 PageIterator old_iterator_;
Ben Murdochda12d292016-06-02 14:46:10 +01003161 PageIterator code_iterator_;
Ben Murdoch3ef787d2012-04-12 10:51:47 +01003162 PageIterator map_iterator_;
Ben Murdochda12d292016-06-02 14:46:10 +01003163 LargePageIterator lo_iterator_;
Ben Murdoch3ef787d2012-04-12 10:51:47 +01003164};
3165
Steve Block44f0eee2011-05-26 01:26:41 +01003166#ifdef DEBUG
3167struct CommentStatistic {
3168 const char* comment;
3169 int size;
3170 int count;
3171 void Clear() {
3172 comment = NULL;
3173 size = 0;
3174 count = 0;
3175 }
3176 // Must be small, since an iteration is used for lookup.
3177 static const int kMaxComments = 64;
3178};
3179#endif
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003180} // namespace internal
3181} // namespace v8
Steve Block44f0eee2011-05-26 01:26:41 +01003182
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003183#endif // V8_HEAP_SPACES_H_