blob: 67e9aae114326e8a57f55751115c063882b30712 [file] [log] [blame]
Ben Murdoch257744e2011-11-30 15:57:28 +00001// Copyright 2011 the V8 project authors. All rights reserved.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
Steve Blocka7e24c12009-10-30 11:49:00 +00004
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005#ifndef V8_HEAP_SPACES_H_
6#define V8_HEAP_SPACES_H_
Steve Blocka7e24c12009-10-30 11:49:00 +00007
Ben Murdochc5610432016-08-08 18:44:38 +01008#include <list>
9
Ben Murdochb8a8cc12014-11-26 15:28:44 +000010#include "src/allocation.h"
Ben Murdochc5610432016-08-08 18:44:38 +010011#include "src/base/atomic-utils.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000012#include "src/base/atomicops.h"
13#include "src/base/bits.h"
14#include "src/base/platform/mutex.h"
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000015#include "src/flags.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000016#include "src/hashmap.h"
17#include "src/list.h"
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000018#include "src/objects.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000019#include "src/utils.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000020
21namespace v8 {
22namespace internal {
23
Ben Murdoch097c5b22016-05-18 11:27:45 +010024class AllocationInfo;
25class AllocationObserver;
26class CompactionSpace;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000027class CompactionSpaceCollection;
Ben Murdoch097c5b22016-05-18 11:27:45 +010028class FreeList;
Steve Block44f0eee2011-05-26 01:26:41 +010029class Isolate;
Ben Murdoch097c5b22016-05-18 11:27:45 +010030class MemoryAllocator;
31class MemoryChunk;
Ben Murdochda12d292016-06-02 14:46:10 +010032class Page;
Ben Murdoch097c5b22016-05-18 11:27:45 +010033class PagedSpace;
34class SemiSpace;
35class SkipList;
36class SlotsBuffer;
37class SlotSet;
Ben Murdochda12d292016-06-02 14:46:10 +010038class TypedSlotSet;
Ben Murdoch097c5b22016-05-18 11:27:45 +010039class Space;
Steve Block44f0eee2011-05-26 01:26:41 +010040
Steve Blocka7e24c12009-10-30 11:49:00 +000041// -----------------------------------------------------------------------------
42// Heap structures:
43//
44// A JS heap consists of a young generation, an old generation, and a large
45// object space. The young generation is divided into two semispaces. A
46// scavenger implements Cheney's copying algorithm. The old generation is
47// separated into a map space and an old object space. The map space contains
48// all (and only) map objects, the rest of old objects go into the old space.
49// The old generation is collected by a mark-sweep-compact collector.
50//
51// The semispaces of the young generation are contiguous. The old and map
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +010052// spaces consists of a list of pages. A page has a page header and an object
Ben Murdoch3ef787d2012-04-12 10:51:47 +010053// area.
Steve Blocka7e24c12009-10-30 11:49:00 +000054//
55// There is a separate large object space for objects larger than
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000056// Page::kMaxRegularHeapObjectSize, so that they do not have to move during
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +010057// collection. The large object space is paged. Pages in large object space
Ben Murdoch3ef787d2012-04-12 10:51:47 +010058// may be larger than the page size.
Steve Blocka7e24c12009-10-30 11:49:00 +000059//
Ben Murdoch3ef787d2012-04-12 10:51:47 +010060// A store-buffer based write barrier is used to keep track of intergenerational
Ben Murdochb8a8cc12014-11-26 15:28:44 +000061// references. See heap/store-buffer.h.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +010062//
Ben Murdoch3ef787d2012-04-12 10:51:47 +010063// During scavenges and mark-sweep collections we sometimes (after a store
64// buffer overflow) iterate intergenerational pointers without decoding heap
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000065// object maps so if the page belongs to old space or large object space
66// it is essential to guarantee that the page does not contain any
Ben Murdoch3ef787d2012-04-12 10:51:47 +010067// garbage pointers to new space: every pointer aligned word which satisfies
68// the Heap::InNewSpace() predicate must be a pointer to a live heap object in
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000069// new space. Thus objects in old space and large object spaces should have a
Ben Murdoch3ef787d2012-04-12 10:51:47 +010070// special layout (e.g. no bare integer fields). This requirement does not
71// apply to map space which is iterated in a special fashion. However we still
72// require pointer fields of dead maps to be cleaned.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +010073//
Ben Murdoch3ef787d2012-04-12 10:51:47 +010074// To enable lazy cleaning of old space pages we can mark chunks of the page
75// as being garbage. Garbage sections are marked with a special map. These
76// sections are skipped when scanning the page, even if we are otherwise
77// scanning without regard for object boundaries. Garbage sections are chained
78// together to form a free list after a GC. Garbage sections created outside
79// of GCs by object trunctation etc. may not be in the free list chain. Very
80// small free spaces are ignored, they need only be cleaned of bogus pointers
81// into new space.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +010082//
Ben Murdoch3ef787d2012-04-12 10:51:47 +010083// Each page may have up to one special garbage section. The start of this
84// section is denoted by the top field in the space. The end of the section
85// is denoted by the limit field in the space. This special garbage section
86// is not marked with a free space map in the data. The point of this section
87// is to enable linear allocation without having to constantly update the byte
88// array every time the top field is updated and a new object is created. The
89// special garbage section is not in the chain of garbage sections.
90//
91// Since the top and limit fields are in the space, not the page, only one page
92// has a special garbage section, and if the top and limit are equal then there
93// is no special garbage section.
Steve Blocka7e24c12009-10-30 11:49:00 +000094
95// Some assertion macros used in the debugging mode.
96
Ben Murdochb8a8cc12014-11-26 15:28:44 +000097#define DCHECK_PAGE_ALIGNED(address) \
98 DCHECK((OffsetFrom(address) & Page::kPageAlignmentMask) == 0)
Steve Blocka7e24c12009-10-30 11:49:00 +000099
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000100#define DCHECK_OBJECT_ALIGNED(address) \
101 DCHECK((OffsetFrom(address) & kObjectAlignmentMask) == 0)
Steve Blocka7e24c12009-10-30 11:49:00 +0000102
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000103#define DCHECK_OBJECT_SIZE(size) \
104 DCHECK((0 < size) && (size <= Page::kMaxRegularHeapObjectSize))
Leon Clarkee46be812010-01-19 14:06:41 +0000105
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000106#define DCHECK_CODEOBJECT_SIZE(size, code_space) \
107 DCHECK((0 < size) && (size <= code_space->AreaSize()))
108
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000109#define DCHECK_PAGE_OFFSET(offset) \
110 DCHECK((Page::kObjectStartOffset <= offset) && (offset <= Page::kPageSize))
Steve Blocka7e24c12009-10-30 11:49:00 +0000111
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100112class MarkBit {
113 public:
114 typedef uint32_t CellType;
115
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000116 inline MarkBit(CellType* cell, CellType mask) : cell_(cell), mask_(mask) {}
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100117
118#ifdef DEBUG
119 bool operator==(const MarkBit& other) {
120 return cell_ == other.cell_ && mask_ == other.mask_;
121 }
122#endif
123
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000124 private:
125 inline CellType* cell() { return cell_; }
126 inline CellType mask() { return mask_; }
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100127
128 inline MarkBit Next() {
129 CellType new_mask = mask_ << 1;
130 if (new_mask == 0) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000131 return MarkBit(cell_ + 1, 1);
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100132 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000133 return MarkBit(cell_, new_mask);
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100134 }
135 }
136
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000137 inline void Set() { *cell_ |= mask_; }
138 inline bool Get() { return (*cell_ & mask_) != 0; }
139 inline void Clear() { *cell_ &= ~mask_; }
140
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100141 CellType* cell_;
142 CellType mask_;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000143
144 friend class Marking;
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100145};
146
147
148// Bitmap is a sequence of cells each containing fixed number of bits.
149class Bitmap {
150 public:
151 static const uint32_t kBitsPerCell = 32;
152 static const uint32_t kBitsPerCellLog2 = 5;
153 static const uint32_t kBitIndexMask = kBitsPerCell - 1;
154 static const uint32_t kBytesPerCell = kBitsPerCell / kBitsPerByte;
155 static const uint32_t kBytesPerCellLog2 = kBitsPerCellLog2 - kBitsPerByteLog2;
156
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000157 static const size_t kLength = (1 << kPageSizeBits) >> (kPointerSizeLog2);
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100158
159 static const size_t kSize =
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000160 (1 << kPageSizeBits) >> (kPointerSizeLog2 + kBitsPerByteLog2);
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100161
162
163 static int CellsForLength(int length) {
164 return (length + kBitsPerCell - 1) >> kBitsPerCellLog2;
165 }
166
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000167 int CellsCount() { return CellsForLength(kLength); }
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100168
169 static int SizeFor(int cells_count) {
170 return sizeof(MarkBit::CellType) * cells_count;
171 }
172
173 INLINE(static uint32_t IndexToCell(uint32_t index)) {
174 return index >> kBitsPerCellLog2;
175 }
176
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000177 V8_INLINE static uint32_t IndexInCell(uint32_t index) {
178 return index & kBitIndexMask;
179 }
180
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100181 INLINE(static uint32_t CellToIndex(uint32_t index)) {
182 return index << kBitsPerCellLog2;
183 }
184
185 INLINE(static uint32_t CellAlignIndex(uint32_t index)) {
186 return (index + kBitIndexMask) & ~kBitIndexMask;
187 }
188
189 INLINE(MarkBit::CellType* cells()) {
190 return reinterpret_cast<MarkBit::CellType*>(this);
191 }
192
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000193 INLINE(Address address()) { return reinterpret_cast<Address>(this); }
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100194
195 INLINE(static Bitmap* FromAddress(Address addr)) {
196 return reinterpret_cast<Bitmap*>(addr);
197 }
198
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000199 inline MarkBit MarkBitFromIndex(uint32_t index) {
200 MarkBit::CellType mask = 1u << IndexInCell(index);
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100201 MarkBit::CellType* cell = this->cells() + (index >> kBitsPerCellLog2);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000202 return MarkBit(cell, mask);
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100203 }
204
205 static inline void Clear(MemoryChunk* chunk);
206
Ben Murdochda12d292016-06-02 14:46:10 +0100207 static inline void SetAllBits(MemoryChunk* chunk);
208
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100209 static void PrintWord(uint32_t word, uint32_t himask = 0) {
210 for (uint32_t mask = 1; mask != 0; mask <<= 1) {
211 if ((mask & himask) != 0) PrintF("[");
212 PrintF((mask & word) ? "1" : "0");
213 if ((mask & himask) != 0) PrintF("]");
214 }
215 }
216
217 class CellPrinter {
218 public:
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000219 CellPrinter() : seq_start(0), seq_type(0), seq_length(0) {}
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100220
221 void Print(uint32_t pos, uint32_t cell) {
222 if (cell == seq_type) {
223 seq_length++;
224 return;
225 }
226
227 Flush();
228
229 if (IsSeq(cell)) {
230 seq_start = pos;
231 seq_length = 0;
232 seq_type = cell;
233 return;
234 }
235
236 PrintF("%d: ", pos);
237 PrintWord(cell);
238 PrintF("\n");
239 }
240
241 void Flush() {
242 if (seq_length > 0) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000243 PrintF("%d: %dx%d\n", seq_start, seq_type == 0 ? 0 : 1,
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100244 seq_length * kBitsPerCell);
245 seq_length = 0;
246 }
247 }
248
249 static bool IsSeq(uint32_t cell) { return cell == 0 || cell == 0xFFFFFFFF; }
250
251 private:
252 uint32_t seq_start;
253 uint32_t seq_type;
254 uint32_t seq_length;
255 };
256
257 void Print() {
258 CellPrinter printer;
259 for (int i = 0; i < CellsCount(); i++) {
260 printer.Print(i, cells()[i]);
261 }
262 printer.Flush();
263 PrintF("\n");
264 }
265
266 bool IsClean() {
267 for (int i = 0; i < CellsCount(); i++) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000268 if (cells()[i] != 0) {
269 return false;
270 }
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100271 }
272 return true;
273 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000274
275 // Clears all bits starting from {cell_base_index} up to and excluding
276 // {index}. Note that {cell_base_index} is required to be cell aligned.
277 void ClearRange(uint32_t cell_base_index, uint32_t index) {
278 DCHECK_EQ(IndexInCell(cell_base_index), 0u);
279 DCHECK_GE(index, cell_base_index);
280 uint32_t start_cell_index = IndexToCell(cell_base_index);
281 uint32_t end_cell_index = IndexToCell(index);
282 DCHECK_GE(end_cell_index, start_cell_index);
283 // Clear all cells till the cell containing the last index.
284 for (uint32_t i = start_cell_index; i < end_cell_index; i++) {
285 cells()[i] = 0;
286 }
287 // Clear all bits in the last cell till the last bit before index.
288 uint32_t clear_mask = ~((1u << IndexInCell(index)) - 1);
289 cells()[end_cell_index] &= clear_mask;
290 }
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100291};
292
Ben Murdochda12d292016-06-02 14:46:10 +0100293enum FreeListCategoryType {
294 kTiniest,
295 kTiny,
296 kSmall,
297 kMedium,
298 kLarge,
299 kHuge,
300
301 kFirstCategory = kTiniest,
302 kLastCategory = kHuge,
303 kNumberOfCategories = kLastCategory + 1,
304 kInvalidCategory
305};
306
307enum FreeMode { kLinkCategory, kDoNotLinkCategory };
308
309// A free list category maintains a linked list of free memory blocks.
310class FreeListCategory {
311 public:
312 static const int kSize = kIntSize + // FreeListCategoryType type_
313 kIntSize + // int available_
314 kPointerSize + // FreeSpace* top_
315 kPointerSize + // FreeListCategory* prev_
316 kPointerSize; // FreeListCategory* next_
317
318 FreeListCategory()
319 : type_(kInvalidCategory),
320 available_(0),
321 top_(nullptr),
322 prev_(nullptr),
323 next_(nullptr) {}
324
325 void Initialize(FreeListCategoryType type) {
326 type_ = type;
327 available_ = 0;
328 top_ = nullptr;
329 prev_ = nullptr;
330 next_ = nullptr;
331 }
332
333 void Invalidate();
334
335 void Reset();
336
337 void ResetStats() { Reset(); }
338
339 void RepairFreeList(Heap* heap);
340
341 // Relinks the category into the currently owning free list. Requires that the
342 // category is currently unlinked.
343 void Relink();
344
345 bool Free(FreeSpace* node, int size_in_bytes, FreeMode mode);
346
347 // Picks a node from the list and stores its size in |node_size|. Returns
348 // nullptr if the category is empty.
349 FreeSpace* PickNodeFromList(int* node_size);
350
351 // Performs a single try to pick a node of at least |minimum_size| from the
352 // category. Stores the actual size in |node_size|. Returns nullptr if no
353 // node is found.
354 FreeSpace* TryPickNodeFromList(int minimum_size, int* node_size);
355
356 // Picks a node of at least |minimum_size| from the category. Stores the
357 // actual size in |node_size|. Returns nullptr if no node is found.
358 FreeSpace* SearchForNodeInList(int minimum_size, int* node_size);
359
360 inline FreeList* owner();
361 inline bool is_linked();
362 bool is_empty() { return top() == nullptr; }
363 int available() const { return available_; }
364
365#ifdef DEBUG
366 intptr_t SumFreeList();
367 int FreeListLength();
368#endif
369
370 private:
371 // For debug builds we accurately compute free lists lengths up until
372 // {kVeryLongFreeList} by manually walking the list.
373 static const int kVeryLongFreeList = 500;
374
375 inline Page* page();
376
377 FreeSpace* top() { return top_; }
378 void set_top(FreeSpace* top) { top_ = top; }
379 FreeListCategory* prev() { return prev_; }
380 void set_prev(FreeListCategory* prev) { prev_ = prev; }
381 FreeListCategory* next() { return next_; }
382 void set_next(FreeListCategory* next) { next_ = next; }
383
384 // |type_|: The type of this free list category.
385 FreeListCategoryType type_;
386
387 // |available_|: Total available bytes in all blocks of this free list
388 // category.
389 int available_;
390
391 // |top_|: Points to the top FreeSpace* in the free list category.
392 FreeSpace* top_;
393
394 FreeListCategory* prev_;
395 FreeListCategory* next_;
396
397 friend class FreeList;
398 friend class PagedSpace;
399};
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100400
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100401// MemoryChunk represents a memory region owned by a specific space.
402// It is divided into the header and the body. Chunk start is always
403// 1MB aligned. Start of the body is aligned so it can accommodate
404// any heap object.
405class MemoryChunk {
406 public:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000407 enum MemoryChunkFlags {
408 IS_EXECUTABLE,
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000409 POINTERS_TO_HERE_ARE_INTERESTING,
410 POINTERS_FROM_HERE_ARE_INTERESTING,
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000411 IN_FROM_SPACE, // Mutually exclusive with IN_TO_SPACE.
412 IN_TO_SPACE, // All pages in new space has one of these two set.
413 NEW_SPACE_BELOW_AGE_MARK,
414 EVACUATION_CANDIDATE,
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000415 NEVER_EVACUATE, // May contain immortal immutables.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000416
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000417 // Large objects can have a progress bar in their page header. These object
418 // are scanned in increments and will be kept black while being scanned.
419 // Even if the mutator writes to them they will be kept black and a white
420 // to grey transition is performed in the value.
421 HAS_PROGRESS_BAR,
422
Ben Murdochc5610432016-08-08 18:44:38 +0100423 // |PAGE_NEW_OLD_PROMOTION|: A page tagged with this flag has been promoted
424 // from new to old space during evacuation.
425 PAGE_NEW_OLD_PROMOTION,
426
Ben Murdochda12d292016-06-02 14:46:10 +0100427 // A black page has all mark bits set to 1 (black). A black page currently
428 // cannot be iterated because it is not swept. Moreover live bytes are also
429 // not updated.
430 BLACK_PAGE,
431
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000432 // This flag is intended to be used for testing. Works only when both
433 // FLAG_stress_compaction and FLAG_manual_evacuation_candidates_selection
434 // are set. It forces the page to become an evacuation candidate at next
435 // candidates selection cycle.
436 FORCE_EVACUATION_CANDIDATE_FOR_TESTING,
437
Ben Murdoch097c5b22016-05-18 11:27:45 +0100438 // This flag is intended to be used for testing.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000439 NEVER_ALLOCATE_ON_PAGE,
440
441 // The memory chunk is already logically freed, however the actual freeing
442 // still has to be performed.
443 PRE_FREED,
444
Ben Murdochc5610432016-08-08 18:44:38 +0100445 // |POOLED|: When actually freeing this chunk, only uncommit and do not
446 // give up the reservation as we still reuse the chunk at some point.
447 POOLED,
448
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000449 // |COMPACTION_WAS_ABORTED|: Indicates that the compaction in this page
450 // has been aborted and needs special handling by the sweeper.
451 COMPACTION_WAS_ABORTED,
452
Ben Murdochc5610432016-08-08 18:44:38 +0100453 // |ANCHOR|: Flag is set if page is an anchor.
454 ANCHOR,
455
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000456 // Last flag, keep at bottom.
457 NUM_MEMORY_CHUNK_FLAGS
458 };
459
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000460 // |kSweepingDone|: The page state when sweeping is complete or sweeping must
Ben Murdoch097c5b22016-05-18 11:27:45 +0100461 // not be performed on that page. Sweeper threads that are done with their
462 // work will set this value and not touch the page anymore.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000463 // |kSweepingPending|: This page is ready for parallel sweeping.
Ben Murdoch097c5b22016-05-18 11:27:45 +0100464 // |kSweepingInProgress|: This page is currently swept by a sweeper thread.
465 enum ConcurrentSweepingState {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000466 kSweepingDone,
Ben Murdoch097c5b22016-05-18 11:27:45 +0100467 kSweepingPending,
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000468 kSweepingInProgress,
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000469 };
470
471 // Every n write barrier invocations we go to runtime even though
472 // we could have handled it in generated code. This lets us check
473 // whether we have hit the limit and should do some more marking.
474 static const int kWriteBarrierCounterGranularity = 500;
475
476 static const int kPointersToHereAreInterestingMask =
477 1 << POINTERS_TO_HERE_ARE_INTERESTING;
478
479 static const int kPointersFromHereAreInterestingMask =
480 1 << POINTERS_FROM_HERE_ARE_INTERESTING;
481
482 static const int kEvacuationCandidateMask = 1 << EVACUATION_CANDIDATE;
483
484 static const int kSkipEvacuationSlotsRecordingMask =
Ben Murdochda12d292016-06-02 14:46:10 +0100485 (1 << EVACUATION_CANDIDATE) | (1 << IN_FROM_SPACE) | (1 << IN_TO_SPACE);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000486
487 static const intptr_t kAlignment =
488 (static_cast<uintptr_t>(1) << kPageSizeBits);
489
490 static const intptr_t kAlignmentMask = kAlignment - 1;
491
492 static const intptr_t kSizeOffset = 0;
493
Ben Murdochda12d292016-06-02 14:46:10 +0100494 static const intptr_t kFlagsOffset = kSizeOffset + kPointerSize;
495
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000496 static const intptr_t kLiveBytesOffset =
497 kSizeOffset + kPointerSize // size_t size
498 + kIntptrSize // intptr_t flags_
499 + kPointerSize // Address area_start_
500 + kPointerSize // Address area_end_
501 + 2 * kPointerSize // base::VirtualMemory reservation_
502 + kPointerSize // Address owner_
503 + kPointerSize // Heap* heap_
Ben Murdoch097c5b22016-05-18 11:27:45 +0100504 + kIntSize; // int progress_bar_
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000505
Ben Murdochda12d292016-06-02 14:46:10 +0100506 static const size_t kOldToNewSlotsOffset =
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000507 kLiveBytesOffset + kIntSize; // int live_byte_count_
508
509 static const size_t kWriteBarrierCounterOffset =
Ben Murdochda12d292016-06-02 14:46:10 +0100510 kOldToNewSlotsOffset + kPointerSize // SlotSet* old_to_new_slots_;
511 + kPointerSize // SlotSet* old_to_old_slots_;
512 + kPointerSize // TypedSlotSet* typed_old_to_old_slots_;
513 + kPointerSize; // SkipList* skip_list_;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000514
515 static const size_t kMinHeaderSize =
516 kWriteBarrierCounterOffset +
517 kIntptrSize // intptr_t write_barrier_counter_
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000518 + kPointerSize // AtomicValue high_water_mark_
519 + kPointerSize // base::Mutex* mutex_
Ben Murdochda12d292016-06-02 14:46:10 +0100520 + kPointerSize // base::AtomicWord concurrent_sweeping_
Ben Murdoch097c5b22016-05-18 11:27:45 +0100521 + 2 * kPointerSize // AtomicNumber free-list statistics
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000522 + kPointerSize // AtomicValue next_chunk_
Ben Murdochda12d292016-06-02 14:46:10 +0100523 + kPointerSize // AtomicValue prev_chunk_
524 // FreeListCategory categories_[kNumberOfCategories]
525 + FreeListCategory::kSize * kNumberOfCategories;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000526
527 // We add some more space to the computed header size to amount for missing
528 // alignment requirements in our computation.
529 // Try to get kHeaderSize properly aligned on 32-bit and 64-bit machines.
Ben Murdoch097c5b22016-05-18 11:27:45 +0100530 static const size_t kHeaderSize = kMinHeaderSize;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000531
532 static const int kBodyOffset =
533 CODE_POINTER_ALIGN(kHeaderSize + Bitmap::kSize);
534
535 // The start offset of the object area in a page. Aligned to both maps and
536 // code alignment to be suitable for both. Also aligned to 32 words because
537 // the marking bitmap is arranged in 32 bit chunks.
538 static const int kObjectStartAlignment = 32 * kPointerSize;
539 static const int kObjectStartOffset =
540 kBodyOffset - 1 +
541 (kObjectStartAlignment - (kBodyOffset - 1) % kObjectStartAlignment);
542
Ben Murdochda12d292016-06-02 14:46:10 +0100543 // Page size in bytes. This must be a multiple of the OS page size.
544 static const int kPageSize = 1 << kPageSizeBits;
545 static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;
546
547 static const int kAllocatableMemory = kPageSize - kObjectStartOffset;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000548
Ben Murdoch097c5b22016-05-18 11:27:45 +0100549 static inline void IncrementLiveBytesFromMutator(HeapObject* object, int by);
550 static inline void IncrementLiveBytesFromGC(HeapObject* object, int by);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000551
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100552 // Only works if the pointer is in the first kPageSize of the MemoryChunk.
553 static MemoryChunk* FromAddress(Address a) {
554 return reinterpret_cast<MemoryChunk*>(OffsetFrom(a) & ~kAlignmentMask);
555 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000556
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000557 static inline MemoryChunk* FromAnyPointerAddress(Heap* heap, Address addr);
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100558
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000559 static inline void UpdateHighWaterMark(Address mark) {
560 if (mark == nullptr) return;
561 // Need to subtract one from the mark because when a chunk is full the
562 // top points to the next address after the chunk, which effectively belongs
Ben Murdochc5610432016-08-08 18:44:38 +0100563 // to another chunk. See the comment to Page::FromTopOrLimit.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000564 MemoryChunk* chunk = MemoryChunk::FromAddress(mark - 1);
565 intptr_t new_mark = static_cast<intptr_t>(mark - chunk->address());
566 intptr_t old_mark = 0;
567 do {
568 old_mark = chunk->high_water_mark_.Value();
569 } while ((new_mark > old_mark) &&
570 !chunk->high_water_mark_.TrySetValue(old_mark, new_mark));
571 }
572
Ben Murdochc5610432016-08-08 18:44:38 +0100573 static bool IsValid(MemoryChunk* chunk) { return chunk != nullptr; }
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100574
Ben Murdochc5610432016-08-08 18:44:38 +0100575 Address address() { return reinterpret_cast<Address>(this); }
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100576
Ben Murdoch097c5b22016-05-18 11:27:45 +0100577 base::Mutex* mutex() { return mutex_; }
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100578
579 bool Contains(Address addr) {
580 return addr >= area_start() && addr < area_end();
581 }
582
Ben Murdoch097c5b22016-05-18 11:27:45 +0100583 // Checks whether |addr| can be a limit of addresses in this page. It's a
584 // limit if it's in the page, or if it's just after the last byte of the page.
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100585 bool ContainsLimit(Address addr) {
586 return addr >= area_start() && addr <= area_end();
587 }
588
Ben Murdochc5610432016-08-08 18:44:38 +0100589 base::AtomicValue<ConcurrentSweepingState>& concurrent_sweeping_state() {
Ben Murdoch097c5b22016-05-18 11:27:45 +0100590 return concurrent_sweeping_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000591 }
592
Ben Murdoch097c5b22016-05-18 11:27:45 +0100593 // Manage live byte count, i.e., count of bytes in black objects.
594 inline void ResetLiveBytes();
595 inline void IncrementLiveBytes(int by);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000596
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100597 int LiveBytes() {
Ben Murdochda12d292016-06-02 14:46:10 +0100598 DCHECK_LE(static_cast<unsigned>(live_byte_count_), size_);
599 DCHECK(!IsFlagSet(BLACK_PAGE) || live_byte_count_ == 0);
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100600 return live_byte_count_;
601 }
602
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000603 void SetLiveBytes(int live_bytes) {
Ben Murdochda12d292016-06-02 14:46:10 +0100604 if (IsFlagSet(BLACK_PAGE)) return;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000605 DCHECK_GE(live_bytes, 0);
Ben Murdoch097c5b22016-05-18 11:27:45 +0100606 DCHECK_LE(static_cast<size_t>(live_bytes), size_);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000607 live_byte_count_ = live_bytes;
608 }
609
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000610 int write_barrier_counter() {
611 return static_cast<int>(write_barrier_counter_);
612 }
613
614 void set_write_barrier_counter(int counter) {
615 write_barrier_counter_ = counter;
616 }
617
Ben Murdoch097c5b22016-05-18 11:27:45 +0100618 size_t size() const { return size_; }
619
620 inline Heap* heap() const { return heap_; }
621
622 inline SkipList* skip_list() { return skip_list_; }
623
624 inline void set_skip_list(SkipList* skip_list) { skip_list_ = skip_list; }
625
Ben Murdoch097c5b22016-05-18 11:27:45 +0100626 inline SlotSet* old_to_new_slots() { return old_to_new_slots_; }
627 inline SlotSet* old_to_old_slots() { return old_to_old_slots_; }
Ben Murdochda12d292016-06-02 14:46:10 +0100628 inline TypedSlotSet* typed_old_to_old_slots() {
629 return typed_old_to_old_slots_;
630 }
Ben Murdoch097c5b22016-05-18 11:27:45 +0100631
632 void AllocateOldToNewSlots();
633 void ReleaseOldToNewSlots();
634 void AllocateOldToOldSlots();
635 void ReleaseOldToOldSlots();
Ben Murdochda12d292016-06-02 14:46:10 +0100636 void AllocateTypedOldToOldSlots();
637 void ReleaseTypedOldToOldSlots();
Ben Murdoch097c5b22016-05-18 11:27:45 +0100638
639 Address area_start() { return area_start_; }
640 Address area_end() { return area_end_; }
641 int area_size() { return static_cast<int>(area_end() - area_start()); }
642
643 bool CommitArea(size_t requested);
644
645 // Approximate amount of physical memory committed for this chunk.
646 size_t CommittedPhysicalMemory() { return high_water_mark_.Value(); }
647
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000648 int progress_bar() {
649 DCHECK(IsFlagSet(HAS_PROGRESS_BAR));
650 return progress_bar_;
651 }
652
653 void set_progress_bar(int progress_bar) {
654 DCHECK(IsFlagSet(HAS_PROGRESS_BAR));
655 progress_bar_ = progress_bar;
656 }
657
658 void ResetProgressBar() {
659 if (IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) {
660 set_progress_bar(0);
661 ClearFlag(MemoryChunk::HAS_PROGRESS_BAR);
662 }
663 }
664
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100665 inline Bitmap* markbits() {
666 return Bitmap::FromAddress(address() + kHeaderSize);
667 }
668
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100669 inline uint32_t AddressToMarkbitIndex(Address addr) {
670 return static_cast<uint32_t>(addr - this->address()) >> kPointerSizeLog2;
671 }
672
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100673 inline Address MarkbitIndexToAddress(uint32_t index) {
674 return this->address() + (index << kPointerSizeLog2);
675 }
676
Ben Murdoch097c5b22016-05-18 11:27:45 +0100677 void PrintMarkbits() { markbits()->Print(); }
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100678
Ben Murdoch097c5b22016-05-18 11:27:45 +0100679 void SetFlag(int flag) { flags_ |= static_cast<uintptr_t>(1) << flag; }
680
681 void ClearFlag(int flag) { flags_ &= ~(static_cast<uintptr_t>(1) << flag); }
682
683 bool IsFlagSet(int flag) {
684 return (flags_ & (static_cast<uintptr_t>(1) << flag)) != 0;
685 }
686
687 // Set or clear multiple flags at a time. The flags in the mask are set to
688 // the value in "flags", the rest retain the current value in |flags_|.
689 void SetFlags(intptr_t flags, intptr_t mask) {
690 flags_ = (flags_ & ~mask) | (flags & mask);
691 }
692
693 // Return all current flags.
694 intptr_t GetFlags() { return flags_; }
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100695
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000696 bool NeverEvacuate() { return IsFlagSet(NEVER_EVACUATE); }
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100697
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000698 void MarkNeverEvacuate() { SetFlag(NEVER_EVACUATE); }
699
700 bool IsEvacuationCandidate() {
701 DCHECK(!(IsFlagSet(NEVER_EVACUATE) && IsFlagSet(EVACUATION_CANDIDATE)));
702 return IsFlagSet(EVACUATION_CANDIDATE);
703 }
704
705 bool CanAllocate() {
706 return !IsEvacuationCandidate() && !IsFlagSet(NEVER_ALLOCATE_ON_PAGE);
707 }
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100708
Ben Murdoch097c5b22016-05-18 11:27:45 +0100709 bool ShouldSkipEvacuationSlotRecording() {
710 return (flags_ & kSkipEvacuationSlotsRecordingMask) != 0;
711 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000712
Ben Murdoch097c5b22016-05-18 11:27:45 +0100713 Executability executable() {
714 return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
715 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000716
Ben Murdoch097c5b22016-05-18 11:27:45 +0100717 bool InNewSpace() {
718 return (flags_ & ((1 << IN_FROM_SPACE) | (1 << IN_TO_SPACE))) != 0;
719 }
720
721 bool InToSpace() { return IsFlagSet(IN_TO_SPACE); }
722
723 bool InFromSpace() { return IsFlagSet(IN_FROM_SPACE); }
724
725 MemoryChunk* next_chunk() { return next_chunk_.Value(); }
726
727 MemoryChunk* prev_chunk() { return prev_chunk_.Value(); }
728
729 void set_next_chunk(MemoryChunk* next) { next_chunk_.SetValue(next); }
730
731 void set_prev_chunk(MemoryChunk* prev) { prev_chunk_.SetValue(prev); }
732
733 Space* owner() const {
734 if ((reinterpret_cast<intptr_t>(owner_) & kPageHeaderTagMask) ==
735 kPageHeaderTag) {
736 return reinterpret_cast<Space*>(reinterpret_cast<intptr_t>(owner_) -
737 kPageHeaderTag);
738 } else {
739 return nullptr;
740 }
741 }
742
743 void set_owner(Space* space) {
744 DCHECK((reinterpret_cast<intptr_t>(space) & kPageHeaderTagMask) == 0);
745 owner_ = reinterpret_cast<Address>(space) + kPageHeaderTag;
746 DCHECK((reinterpret_cast<intptr_t>(owner_) & kPageHeaderTagMask) ==
747 kPageHeaderTag);
748 }
749
750 bool HasPageHeader() { return owner() != nullptr; }
751
752 void InsertAfter(MemoryChunk* other);
753 void Unlink();
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100754
755 protected:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000756 static MemoryChunk* Initialize(Heap* heap, Address base, size_t size,
757 Address area_start, Address area_end,
Ben Murdoch097c5b22016-05-18 11:27:45 +0100758 Executability executable, Space* owner,
759 base::VirtualMemory* reservation);
760
761 // Should be called when memory chunk is about to be freed.
762 void ReleaseAllocatedMemory();
763
764 base::VirtualMemory* reserved_memory() { return &reservation_; }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000765
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100766 size_t size_;
767 intptr_t flags_;
768
769 // Start and end of allocatable memory on this chunk.
770 Address area_start_;
771 Address area_end_;
772
773 // If the chunk needs to remember its memory reservation, it is stored here.
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000774 base::VirtualMemory reservation_;
Ben Murdoch097c5b22016-05-18 11:27:45 +0100775
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100776 // The identity of the owning space. This is tagged as a failure pointer, but
777 // no failure can be in an object, so this can be distinguished from any entry
778 // in a fixed array.
779 Address owner_;
Ben Murdoch097c5b22016-05-18 11:27:45 +0100780
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100781 Heap* heap_;
Ben Murdoch097c5b22016-05-18 11:27:45 +0100782
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000783 // Used by the incremental marker to keep track of the scanning progress in
784 // large objects that have a progress bar and are scanned in increments.
785 int progress_bar_;
Ben Murdoch097c5b22016-05-18 11:27:45 +0100786
787 // Count of bytes marked black on page.
788 int live_byte_count_;
789
Ben Murdoch097c5b22016-05-18 11:27:45 +0100790 // A single slot set for small pages (of size kPageSize) or an array of slot
791 // set for large pages. In the latter case the number of entries in the array
792 // is ceil(size() / kPageSize).
793 SlotSet* old_to_new_slots_;
794 SlotSet* old_to_old_slots_;
Ben Murdochda12d292016-06-02 14:46:10 +0100795 TypedSlotSet* typed_old_to_old_slots_;
Ben Murdoch097c5b22016-05-18 11:27:45 +0100796
797 SkipList* skip_list_;
798
799 intptr_t write_barrier_counter_;
800
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000801 // Assuming the initial allocation on a page is sequential,
802 // count highest number of bytes ever allocated on the page.
Ben Murdochc5610432016-08-08 18:44:38 +0100803 base::AtomicValue<intptr_t> high_water_mark_;
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100804
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000805 base::Mutex* mutex_;
Ben Murdoch097c5b22016-05-18 11:27:45 +0100806
Ben Murdochc5610432016-08-08 18:44:38 +0100807 base::AtomicValue<ConcurrentSweepingState> concurrent_sweeping_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000808
809 // PagedSpace free-list statistics.
Ben Murdochc5610432016-08-08 18:44:38 +0100810 base::AtomicNumber<intptr_t> available_in_free_list_;
811 base::AtomicNumber<intptr_t> wasted_memory_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000812
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000813 // next_chunk_ holds a pointer of type MemoryChunk
Ben Murdochc5610432016-08-08 18:44:38 +0100814 base::AtomicValue<MemoryChunk*> next_chunk_;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000815 // prev_chunk_ holds a pointer of type MemoryChunk
Ben Murdochc5610432016-08-08 18:44:38 +0100816 base::AtomicValue<MemoryChunk*> prev_chunk_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000817
Ben Murdochda12d292016-06-02 14:46:10 +0100818 FreeListCategory categories_[kNumberOfCategories];
819
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000820 private:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000821 void InitializeReservedMemory() { reservation_.Reset(); }
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100822
823 friend class MemoryAllocator;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000824 friend class MemoryChunkValidator;
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100825};
826
Steve Blocka7e24c12009-10-30 11:49:00 +0000827// -----------------------------------------------------------------------------
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100828// A page is a memory chunk of a size 1MB. Large object pages may be larger.
Steve Blocka7e24c12009-10-30 11:49:00 +0000829//
830// The only way to get a page pointer is by calling factory methods:
831// Page* p = Page::FromAddress(addr); or
Ben Murdochc5610432016-08-08 18:44:38 +0100832// Page* p = Page::FromTopOrLimit(top);
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100833class Page : public MemoryChunk {
Steve Blocka7e24c12009-10-30 11:49:00 +0000834 public:
Ben Murdochc5610432016-08-08 18:44:38 +0100835 static const intptr_t kCopyAllFlags = ~0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000836
Ben Murdochc5610432016-08-08 18:44:38 +0100837 // Page flags copied from from-space to to-space when flipping semispaces.
838 static const intptr_t kCopyOnFlipFlagsMask =
839 (1 << MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) |
840 (1 << MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
Steve Blocka7e24c12009-10-30 11:49:00 +0000841
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000842 // Maximum object size that gets allocated into regular pages. Objects larger
843 // than that size are allocated in large object space and are never moved in
844 // memory. This also applies to new space allocation, since objects are never
845 // migrated from new space to large object space. Takes double alignment into
846 // account.
847 // TODO(hpayer): This limit should be way smaller but we currently have
848 // short living objects >256K.
849 static const int kMaxRegularHeapObjectSize = 600 * KB;
850
Ben Murdochc5610432016-08-08 18:44:38 +0100851 static inline Page* ConvertNewToOld(Page* old_page, PagedSpace* new_owner);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100852
Ben Murdochc5610432016-08-08 18:44:38 +0100853 // Returns the page containing a given address. The address ranges
854 // from [page_addr .. page_addr + kPageSize[. This only works if the object
855 // is in fact in a page.
856 static Page* FromAddress(Address addr) {
857 return reinterpret_cast<Page*>(OffsetFrom(addr) & ~kPageAlignmentMask);
858 }
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100859
Ben Murdochc5610432016-08-08 18:44:38 +0100860 // Returns the page containing the address provided. The address can
861 // potentially point righter after the page. To be also safe for tagged values
862 // we subtract a hole word. The valid address ranges from
863 // [page_addr + kObjectStartOffset .. page_addr + kPageSize + kPointerSize].
864 static Page* FromAllocationAreaAddress(Address address) {
865 return Page::FromAddress(address - kPointerSize);
866 }
867
868 // Checks if address1 and address2 are on the same new space page.
869 static bool OnSamePage(Address address1, Address address2) {
870 return Page::FromAddress(address1) == Page::FromAddress(address2);
871 }
872
873 // Checks whether an address is page aligned.
874 static bool IsAlignedToPageSize(Address addr) {
875 return (OffsetFrom(addr) & kPageAlignmentMask) == 0;
876 }
877
878 static bool IsAtObjectStart(Address addr) {
879 return (reinterpret_cast<intptr_t>(addr) & kPageAlignmentMask) ==
880 kObjectStartOffset;
881 }
882
883 inline static Page* FromAnyPointerAddress(Heap* heap, Address addr);
884
885 // Create a Page object that is only used as anchor for the doubly-linked
886 // list of real pages.
887 explicit Page(Space* owner) { InitializeAsAnchor(owner); }
888
889 inline void MarkNeverAllocateForTesting();
890 inline void MarkEvacuationCandidate();
891 inline void ClearEvacuationCandidate();
892
893 Page* next_page() { return static_cast<Page*>(next_chunk()); }
894 Page* prev_page() { return static_cast<Page*>(prev_chunk()); }
895 void set_next_page(Page* page) { set_next_chunk(page); }
896 void set_prev_page(Page* page) { set_prev_chunk(page); }
897
898 template <typename Callback>
899 inline void ForAllFreeListCategories(Callback callback) {
900 for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
901 callback(&categories_[i]);
902 }
903 }
904
905 // Returns the offset of a given address to this page.
906 inline int Offset(Address a) {
907 int offset = static_cast<int>(a - address());
908 return offset;
909 }
910
911 // Returns the address for a given offset to the this page.
912 Address OffsetToAddress(int offset) {
913 DCHECK_PAGE_OFFSET(offset);
914 return address() + offset;
915 }
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100916
Ben Murdoch097c5b22016-05-18 11:27:45 +0100917 // WaitUntilSweepingCompleted only works when concurrent sweeping is in
918 // progress. In particular, when we know that right before this call a
919 // sweeper thread was sweeping this page.
920 void WaitUntilSweepingCompleted() {
921 mutex_->Lock();
922 mutex_->Unlock();
923 DCHECK(SweepingDone());
924 }
925
926 bool SweepingDone() {
927 return concurrent_sweeping_state().Value() == kSweepingDone;
928 }
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100929
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000930 void ResetFreeListStatistics();
Steve Blocka7e24c12009-10-30 11:49:00 +0000931
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000932 int LiveBytesFromFreeList() {
Ben Murdoch097c5b22016-05-18 11:27:45 +0100933 return static_cast<int>(area_size() - wasted_memory() -
934 available_in_free_list());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000935 }
936
Ben Murdochda12d292016-06-02 14:46:10 +0100937 FreeListCategory* free_list_category(FreeListCategoryType type) {
938 return &categories_[type];
939 }
940
Ben Murdochc5610432016-08-08 18:44:38 +0100941 bool is_anchor() { return IsFlagSet(Page::ANCHOR); }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000942
Ben Murdochc5610432016-08-08 18:44:38 +0100943 intptr_t wasted_memory() { return wasted_memory_.Value(); }
944 void add_wasted_memory(intptr_t waste) { wasted_memory_.Increment(waste); }
945 intptr_t available_in_free_list() { return available_in_free_list_.Value(); }
946 void add_available_in_free_list(intptr_t available) {
947 available_in_free_list_.Increment(available);
948 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000949
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100950#ifdef DEBUG
951 void Print();
952#endif // DEBUG
Steve Blocka7e24c12009-10-30 11:49:00 +0000953
Ben Murdochda12d292016-06-02 14:46:10 +0100954 private:
Ben Murdochc5610432016-08-08 18:44:38 +0100955 enum InitializationMode { kFreeMemory, kDoNotFreeMemory };
956
957 template <InitializationMode mode = kFreeMemory>
958 static inline Page* Initialize(Heap* heap, MemoryChunk* chunk,
959 Executability executable, PagedSpace* owner);
960 static inline Page* Initialize(Heap* heap, MemoryChunk* chunk,
961 Executability executable, SemiSpace* owner);
962
Ben Murdochda12d292016-06-02 14:46:10 +0100963 inline void InitializeFreeListCategories();
964
Ben Murdochc5610432016-08-08 18:44:38 +0100965 void InitializeAsAnchor(Space* owner);
966
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100967 friend class MemoryAllocator;
Steve Blocka7e24c12009-10-30 11:49:00 +0000968};
969
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100970class LargePage : public MemoryChunk {
971 public:
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000972 HeapObject* GetObject() { return HeapObject::FromAddress(area_start()); }
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100973
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000974 inline LargePage* next_page() {
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100975 return static_cast<LargePage*>(next_chunk());
976 }
977
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000978 inline void set_next_page(LargePage* page) { set_next_chunk(page); }
979
Ben Murdochda12d292016-06-02 14:46:10 +0100980 // A limit to guarantee that we do not overflow typed slot offset in
981 // the old to old remembered set.
982 // Note that this limit is higher than what assembler already imposes on
983 // x64 and ia32 architectures.
984 static const int kMaxCodePageSize = 512 * MB;
985
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100986 private:
Ben Murdochc5610432016-08-08 18:44:38 +0100987 static inline LargePage* Initialize(Heap* heap, MemoryChunk* chunk,
988 Executability executable, Space* owner);
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100989
990 friend class MemoryAllocator;
991};
992
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100993
Steve Blocka7e24c12009-10-30 11:49:00 +0000994// ----------------------------------------------------------------------------
995// Space is the abstract superclass for all allocation spaces.
996class Space : public Malloced {
997 public:
Steve Block44f0eee2011-05-26 01:26:41 +0100998 Space(Heap* heap, AllocationSpace id, Executability executable)
Ben Murdoch097c5b22016-05-18 11:27:45 +0100999 : allocation_observers_(new List<AllocationObserver*>()),
1000 allocation_observers_paused_(false),
1001 heap_(heap),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001002 id_(id),
1003 executable_(executable),
1004 committed_(0),
1005 max_committed_(0) {}
Steve Blocka7e24c12009-10-30 11:49:00 +00001006
1007 virtual ~Space() {}
1008
Steve Block44f0eee2011-05-26 01:26:41 +01001009 Heap* heap() const { return heap_; }
1010
Steve Blocka7e24c12009-10-30 11:49:00 +00001011 // Does the space need executable memory?
1012 Executability executable() { return executable_; }
1013
1014 // Identity used in error reporting.
1015 AllocationSpace identity() { return id_; }
1016
Ben Murdoch097c5b22016-05-18 11:27:45 +01001017 virtual void AddAllocationObserver(AllocationObserver* observer) {
1018 allocation_observers_->Add(observer);
1019 }
1020
1021 virtual void RemoveAllocationObserver(AllocationObserver* observer) {
1022 bool removed = allocation_observers_->RemoveElement(observer);
1023 USE(removed);
1024 DCHECK(removed);
1025 }
1026
1027 virtual void PauseAllocationObservers() {
1028 allocation_observers_paused_ = true;
1029 }
1030
1031 virtual void ResumeAllocationObservers() {
1032 allocation_observers_paused_ = false;
1033 }
1034
1035 void AllocationStep(Address soon_object, int size);
1036
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001037 // Return the total amount committed memory for this space, i.e., allocatable
1038 // memory and page headers.
1039 virtual intptr_t CommittedMemory() { return committed_; }
1040
1041 virtual intptr_t MaximumCommittedMemory() { return max_committed_; }
1042
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08001043 // Returns allocated size.
Ben Murdochf87a2032010-10-22 12:50:53 +01001044 virtual intptr_t Size() = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +00001045
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08001046 // Returns size of objects. Can differ from the allocated size
1047 // (e.g. see LargeObjectSpace).
1048 virtual intptr_t SizeOfObjects() { return Size(); }
1049
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001050 // Approximate amount of physical memory committed for this space.
1051 virtual size_t CommittedPhysicalMemory() = 0;
1052
1053 // Return the available bytes without growing.
1054 virtual intptr_t Available() = 0;
1055
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001056 virtual int RoundSizeDownToObjectAlignment(int size) {
1057 if (id_ == CODE_SPACE) {
1058 return RoundDown(size, kCodeAlignment);
1059 } else {
1060 return RoundDown(size, kPointerSize);
1061 }
1062 }
1063
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001064 void AccountCommitted(intptr_t bytes) {
1065 DCHECK_GE(bytes, 0);
1066 committed_ += bytes;
1067 if (committed_ > max_committed_) {
1068 max_committed_ = committed_;
1069 }
1070 }
1071
1072 void AccountUncommitted(intptr_t bytes) {
1073 DCHECK_GE(bytes, 0);
1074 committed_ -= bytes;
1075 DCHECK_GE(committed_, 0);
1076 }
1077
Ben Murdochc5610432016-08-08 18:44:38 +01001078#ifdef DEBUG
1079 virtual void Print() = 0;
1080#endif
1081
1082 protected:
Ben Murdoch097c5b22016-05-18 11:27:45 +01001083 v8::base::SmartPointer<List<AllocationObserver*>> allocation_observers_;
1084 bool allocation_observers_paused_;
1085
Steve Blocka7e24c12009-10-30 11:49:00 +00001086 private:
Steve Block44f0eee2011-05-26 01:26:41 +01001087 Heap* heap_;
Steve Blocka7e24c12009-10-30 11:49:00 +00001088 AllocationSpace id_;
1089 Executability executable_;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001090
1091 // Keeps track of committed memory in a space.
1092 intptr_t committed_;
1093 intptr_t max_committed_;
1094};
1095
1096
1097class MemoryChunkValidator {
1098 // Computed offsets should match the compiler generated ones.
1099 STATIC_ASSERT(MemoryChunk::kSizeOffset == offsetof(MemoryChunk, size_));
1100 STATIC_ASSERT(MemoryChunk::kLiveBytesOffset ==
1101 offsetof(MemoryChunk, live_byte_count_));
Ben Murdochda12d292016-06-02 14:46:10 +01001102 STATIC_ASSERT(MemoryChunk::kOldToNewSlotsOffset ==
1103 offsetof(MemoryChunk, old_to_new_slots_));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001104 STATIC_ASSERT(MemoryChunk::kWriteBarrierCounterOffset ==
1105 offsetof(MemoryChunk, write_barrier_counter_));
1106
1107 // Validate our estimates on the header size.
1108 STATIC_ASSERT(sizeof(MemoryChunk) <= MemoryChunk::kHeaderSize);
1109 STATIC_ASSERT(sizeof(LargePage) <= MemoryChunk::kHeaderSize);
1110 STATIC_ASSERT(sizeof(Page) <= MemoryChunk::kHeaderSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00001111};
1112
1113
1114// ----------------------------------------------------------------------------
1115// All heap objects containing executable code (code objects) must be allocated
1116// from a 2 GB range of memory, so that they can call each other using 32-bit
1117// displacements. This happens automatically on 32-bit platforms, where 32-bit
1118// displacements cover the entire 4GB virtual address space. On 64-bit
1119// platforms, we support this using the CodeRange object, which reserves and
1120// manages a range of virtual memory.
Steve Block44f0eee2011-05-26 01:26:41 +01001121class CodeRange {
Steve Blocka7e24c12009-10-30 11:49:00 +00001122 public:
Ben Murdoch69a99ed2011-11-30 16:03:39 +00001123 explicit CodeRange(Isolate* isolate);
1124 ~CodeRange() { TearDown(); }
1125
Steve Blocka7e24c12009-10-30 11:49:00 +00001126 // Reserves a range of virtual memory, but does not commit any of it.
1127 // Can only be called once, at heap initialization time.
1128 // Returns false on failure.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001129 bool SetUp(size_t requested_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00001130
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001131 bool valid() { return code_range_ != NULL; }
1132 Address start() {
1133 DCHECK(valid());
1134 return static_cast<Address>(code_range_->address());
1135 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001136 size_t size() {
1137 DCHECK(valid());
1138 return code_range_->size();
1139 }
Steve Block44f0eee2011-05-26 01:26:41 +01001140 bool contains(Address address) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001141 if (!valid()) return false;
Steve Blocka7e24c12009-10-30 11:49:00 +00001142 Address start = static_cast<Address>(code_range_->address());
1143 return start <= address && address < start + code_range_->size();
1144 }
1145
1146 // Allocates a chunk of memory from the large-object portion of
1147 // the code range. On platforms with no separate code range, should
1148 // not be called.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001149 MUST_USE_RESULT Address AllocateRawMemory(const size_t requested_size,
1150 const size_t commit_size,
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001151 size_t* allocated);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001152 bool CommitRawMemory(Address start, size_t length);
1153 bool UncommitRawMemory(Address start, size_t length);
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001154 void FreeRawMemory(Address buf, size_t length);
Steve Blocka7e24c12009-10-30 11:49:00 +00001155
1156 private:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001157 // Frees the range of virtual memory, and frees the data structures used to
1158 // manage it.
1159 void TearDown();
1160
Ben Murdoch69a99ed2011-11-30 16:03:39 +00001161 Isolate* isolate_;
Steve Block44f0eee2011-05-26 01:26:41 +01001162
Steve Blocka7e24c12009-10-30 11:49:00 +00001163 // The reserved range of virtual memory that all code objects are put in.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001164 base::VirtualMemory* code_range_;
Steve Blocka7e24c12009-10-30 11:49:00 +00001165 // Plain old data class, just a struct plus a constructor.
1166 class FreeBlock {
1167 public:
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001168 FreeBlock() : start(0), size(0) {}
Steve Blocka7e24c12009-10-30 11:49:00 +00001169 FreeBlock(Address start_arg, size_t size_arg)
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001170 : start(start_arg), size(size_arg) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001171 DCHECK(IsAddressAligned(start, MemoryChunk::kAlignment));
1172 DCHECK(size >= static_cast<size_t>(Page::kPageSize));
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001173 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001174 FreeBlock(void* start_arg, size_t size_arg)
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001175 : start(static_cast<Address>(start_arg)), size(size_arg) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001176 DCHECK(IsAddressAligned(start, MemoryChunk::kAlignment));
1177 DCHECK(size >= static_cast<size_t>(Page::kPageSize));
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001178 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001179
1180 Address start;
1181 size_t size;
1182 };
1183
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001184 // The global mutex guards free_list_ and allocation_list_ as GC threads may
1185 // access both lists concurrently to the main thread.
1186 base::Mutex code_range_mutex_;
1187
Steve Blocka7e24c12009-10-30 11:49:00 +00001188 // Freed blocks of memory are added to the free list. When the allocation
1189 // list is exhausted, the free list is sorted and merged to make the new
1190 // allocation list.
Steve Block44f0eee2011-05-26 01:26:41 +01001191 List<FreeBlock> free_list_;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001192
Steve Blocka7e24c12009-10-30 11:49:00 +00001193 // Memory is allocated from the free blocks on the allocation list.
1194 // The block at current_allocation_block_index_ is the current block.
Steve Block44f0eee2011-05-26 01:26:41 +01001195 List<FreeBlock> allocation_list_;
1196 int current_allocation_block_index_;
Steve Blocka7e24c12009-10-30 11:49:00 +00001197
1198 // Finds a block on the allocation list that contains at least the
1199 // requested amount of memory. If none is found, sorts and merges
1200 // the existing free memory blocks, and searches again.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001201 // If none can be found, returns false.
1202 bool GetNextAllocationBlock(size_t requested);
Steve Blocka7e24c12009-10-30 11:49:00 +00001203 // Compares the start addresses of two free blocks.
1204 static int CompareFreeBlockAddress(const FreeBlock* left,
1205 const FreeBlock* right);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001206 bool ReserveBlock(const size_t requested_size, FreeBlock* block);
1207 void ReleaseBlock(const FreeBlock* block);
Steve Block44f0eee2011-05-26 01:26:41 +01001208
Steve Block44f0eee2011-05-26 01:26:41 +01001209 DISALLOW_COPY_AND_ASSIGN(CodeRange);
Steve Blocka7e24c12009-10-30 11:49:00 +00001210};
1211
1212
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001213class SkipList {
1214 public:
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001215 SkipList() { Clear(); }
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001216
1217 void Clear() {
1218 for (int idx = 0; idx < kSize; idx++) {
1219 starts_[idx] = reinterpret_cast<Address>(-1);
1220 }
1221 }
1222
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001223 Address StartFor(Address addr) { return starts_[RegionNumber(addr)]; }
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001224
1225 void AddObject(Address addr, int size) {
1226 int start_region = RegionNumber(addr);
1227 int end_region = RegionNumber(addr + size - kPointerSize);
1228 for (int idx = start_region; idx <= end_region; idx++) {
Ben Murdochda12d292016-06-02 14:46:10 +01001229 if (starts_[idx] > addr) {
1230 starts_[idx] = addr;
1231 } else {
1232 // In the first region, there may already be an object closer to the
1233 // start of the region. Do not change the start in that case. If this
1234 // is not the first region, you probably added overlapping objects.
1235 DCHECK_EQ(start_region, idx);
1236 }
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001237 }
1238 }
1239
1240 static inline int RegionNumber(Address addr) {
1241 return (OffsetFrom(addr) & Page::kPageAlignmentMask) >> kRegionSizeLog2;
1242 }
1243
1244 static void Update(Address addr, int size) {
1245 Page* page = Page::FromAddress(addr);
1246 SkipList* list = page->skip_list();
1247 if (list == NULL) {
1248 list = new SkipList();
1249 page->set_skip_list(list);
1250 }
1251
1252 list->AddObject(addr, size);
1253 }
1254
1255 private:
1256 static const int kRegionSizeLog2 = 13;
1257 static const int kRegionSize = 1 << kRegionSizeLog2;
1258 static const int kSize = Page::kPageSize / kRegionSize;
1259
1260 STATIC_ASSERT(Page::kPageSize % kRegionSize == 0);
1261
1262 Address starts_[kSize];
1263};
1264
1265
Steve Blocka7e24c12009-10-30 11:49:00 +00001266// ----------------------------------------------------------------------------
1267// A space acquires chunks of memory from the operating system. The memory
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001268// allocator allocated and deallocates pages for the paged heap spaces and large
1269// pages for large object space.
Steve Block44f0eee2011-05-26 01:26:41 +01001270class MemoryAllocator {
Steve Blocka7e24c12009-10-30 11:49:00 +00001271 public:
Ben Murdochc5610432016-08-08 18:44:38 +01001272 // Unmapper takes care of concurrently unmapping and uncommitting memory
1273 // chunks.
1274 class Unmapper {
1275 public:
1276 class UnmapFreeMemoryTask;
1277
1278 explicit Unmapper(MemoryAllocator* allocator)
1279 : allocator_(allocator),
1280 pending_unmapping_tasks_semaphore_(0),
1281 concurrent_unmapping_tasks_active_(0) {}
1282
1283 void AddMemoryChunkSafe(MemoryChunk* chunk) {
1284 if ((chunk->size() == Page::kPageSize) &&
1285 (chunk->executable() != EXECUTABLE)) {
1286 AddMemoryChunkSafe<kRegular>(chunk);
1287 } else {
1288 AddMemoryChunkSafe<kNonRegular>(chunk);
1289 }
1290 }
1291
1292 MemoryChunk* TryGetPooledMemoryChunkSafe() {
1293 // Procedure:
1294 // (1) Try to get a chunk that was declared as pooled and already has
1295 // been uncommitted.
1296 // (2) Try to steal any memory chunk of kPageSize that would've been
1297 // unmapped.
1298 MemoryChunk* chunk = GetMemoryChunkSafe<kPooled>();
1299 if (chunk == nullptr) {
1300 chunk = GetMemoryChunkSafe<kRegular>();
1301 if (chunk != nullptr) {
1302 // For stolen chunks we need to manually free any allocated memory.
1303 chunk->ReleaseAllocatedMemory();
1304 }
1305 }
1306 return chunk;
1307 }
1308
1309 void FreeQueuedChunks();
1310 bool WaitUntilCompleted();
1311
1312 private:
1313 enum ChunkQueueType {
1314 kRegular, // Pages of kPageSize that do not live in a CodeRange and
1315 // can thus be used for stealing.
1316 kNonRegular, // Large chunks and executable chunks.
1317 kPooled, // Pooled chunks, already uncommited and ready for reuse.
1318 kNumberOfChunkQueues,
1319 };
1320
1321 template <ChunkQueueType type>
1322 void AddMemoryChunkSafe(MemoryChunk* chunk) {
1323 base::LockGuard<base::Mutex> guard(&mutex_);
1324 chunks_[type].push_back(chunk);
1325 }
1326
1327 template <ChunkQueueType type>
1328 MemoryChunk* GetMemoryChunkSafe() {
1329 base::LockGuard<base::Mutex> guard(&mutex_);
1330 if (chunks_[type].empty()) return nullptr;
1331 MemoryChunk* chunk = chunks_[type].front();
1332 chunks_[type].pop_front();
1333 return chunk;
1334 }
1335
1336 void PerformFreeMemoryOnQueuedChunks();
1337
1338 base::Mutex mutex_;
1339 MemoryAllocator* allocator_;
1340 std::list<MemoryChunk*> chunks_[kNumberOfChunkQueues];
1341 base::Semaphore pending_unmapping_tasks_semaphore_;
1342 intptr_t concurrent_unmapping_tasks_active_;
1343
1344 friend class MemoryAllocator;
1345 };
1346
Ben Murdochda12d292016-06-02 14:46:10 +01001347 enum AllocationMode {
1348 kRegular,
1349 kPooled,
1350 };
Ben Murdochc5610432016-08-08 18:44:38 +01001351 enum FreeMode {
1352 kFull,
1353 kPreFreeAndQueue,
1354 kPooledAndQueue,
1355 };
Ben Murdochda12d292016-06-02 14:46:10 +01001356
Ben Murdoch69a99ed2011-11-30 16:03:39 +00001357 explicit MemoryAllocator(Isolate* isolate);
1358
Steve Blocka7e24c12009-10-30 11:49:00 +00001359 // Initializes its internal bookkeeping structures.
Russell Brenner90bac252010-11-18 13:33:46 -08001360 // Max capacity of the total space and executable memory limit.
Ben Murdochc5610432016-08-08 18:44:38 +01001361 bool SetUp(intptr_t max_capacity, intptr_t capacity_executable,
1362 intptr_t code_range_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00001363
Steve Block44f0eee2011-05-26 01:26:41 +01001364 void TearDown();
Steve Blocka7e24c12009-10-30 11:49:00 +00001365
Ben Murdochc5610432016-08-08 18:44:38 +01001366 // Allocates a Page from the allocator. AllocationMode is used to indicate
1367 // whether pooled allocation, which only works for MemoryChunk::kPageSize,
1368 // should be tried first.
1369 template <MemoryAllocator::AllocationMode alloc_mode = kRegular,
Ben Murdochda12d292016-06-02 14:46:10 +01001370 typename SpaceType>
Ben Murdochc5610432016-08-08 18:44:38 +01001371 Page* AllocatePage(intptr_t size, SpaceType* owner, Executability executable);
Steve Blocka7e24c12009-10-30 11:49:00 +00001372
Ben Murdochc5610432016-08-08 18:44:38 +01001373 LargePage* AllocateLargePage(intptr_t size, LargeObjectSpace* owner,
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001374 Executability executable);
Steve Blocka7e24c12009-10-30 11:49:00 +00001375
Ben Murdochc5610432016-08-08 18:44:38 +01001376 template <MemoryAllocator::FreeMode mode = kFull>
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001377 void Free(MemoryChunk* chunk);
Steve Blocka7e24c12009-10-30 11:49:00 +00001378
Steve Blocka7e24c12009-10-30 11:49:00 +00001379 // Returns allocated spaces in bytes.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001380 intptr_t Size() { return size_.Value(); }
1381
1382 // Returns allocated executable spaces in bytes.
1383 intptr_t SizeExecutable() { return size_executable_.Value(); }
1384
1385 // Returns the maximum available bytes of heaps.
1386 intptr_t Available() {
1387 intptr_t size = Size();
1388 return capacity_ < size ? 0 : capacity_ - size;
1389 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001390
Russell Brenner90bac252010-11-18 13:33:46 -08001391 // Returns the maximum available executable bytes of heaps.
Steve Block44f0eee2011-05-26 01:26:41 +01001392 intptr_t AvailableExecutable() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001393 intptr_t executable_size = SizeExecutable();
1394 if (capacity_executable_ < executable_size) return 0;
1395 return capacity_executable_ - executable_size;
Russell Brenner90bac252010-11-18 13:33:46 -08001396 }
1397
Steve Blocka7e24c12009-10-30 11:49:00 +00001398 // Returns maximum available bytes that the old space can have.
Steve Block44f0eee2011-05-26 01:26:41 +01001399 intptr_t MaxAvailable() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001400 return (Available() / Page::kPageSize) * Page::kAllocatableMemory;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001401 }
1402
1403 // Returns an indication of whether a pointer is in a space that has
1404 // been allocated by this MemoryAllocator.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001405 V8_INLINE bool IsOutsideAllocatedSpace(const void* address) {
1406 return address < lowest_ever_allocated_.Value() ||
1407 address >= highest_ever_allocated_.Value();
Steve Blocka7e24c12009-10-30 11:49:00 +00001408 }
1409
Steve Blocka7e24c12009-10-30 11:49:00 +00001410#ifdef DEBUG
1411 // Reports statistic info of the space.
Steve Block44f0eee2011-05-26 01:26:41 +01001412 void ReportStatistics();
Steve Blocka7e24c12009-10-30 11:49:00 +00001413#endif
1414
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001415 // Returns a MemoryChunk in which the memory region from commit_area_size to
1416 // reserve_area_size of the chunk area is reserved but not committed, it
1417 // could be committed later by calling MemoryChunk::CommitArea.
1418 MemoryChunk* AllocateChunk(intptr_t reserve_area_size,
1419 intptr_t commit_area_size,
1420 Executability executable, Space* space);
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001421
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001422 Address ReserveAlignedMemory(size_t requested, size_t alignment,
1423 base::VirtualMemory* controller);
1424 Address AllocateAlignedMemory(size_t reserve_size, size_t commit_size,
1425 size_t alignment, Executability executable,
1426 base::VirtualMemory* controller);
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001427
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001428 bool CommitMemory(Address addr, size_t size, Executability executable);
1429
1430 void FreeMemory(base::VirtualMemory* reservation, Executability executable);
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001431 void FreeMemory(Address addr, size_t size, Executability executable);
1432
1433 // Commit a contiguous block of memory from the initial chunk. Assumes that
1434 // the address is not NULL, the size is greater than zero, and that the
1435 // block is contained in the initial chunk. Returns true if it succeeded
1436 // and false otherwise.
1437 bool CommitBlock(Address start, size_t size, Executability executable);
1438
1439 // Uncommit a contiguous block of memory [start..(start+size)[.
1440 // start is not NULL, the size is greater than zero, and the
1441 // block is contained in the initial chunk. Returns true if it succeeded
1442 // and false otherwise.
1443 bool UncommitBlock(Address start, size_t size);
1444
1445 // Zaps a contiguous block of memory [start..(start+size)[ thus
1446 // filling it up with a recognizable non-NULL bit pattern.
1447 void ZapBlock(Address start, size_t size);
1448
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001449 void PerformAllocationCallback(ObjectSpace space, AllocationAction action,
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001450 size_t size);
1451
1452 void AddMemoryAllocationCallback(MemoryAllocationCallback callback,
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001453 ObjectSpace space, AllocationAction action);
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001454
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001455 void RemoveMemoryAllocationCallback(MemoryAllocationCallback callback);
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001456
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001457 bool MemoryAllocationCallbackRegistered(MemoryAllocationCallback callback);
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001458
1459 static int CodePageGuardStartOffset();
1460
1461 static int CodePageGuardSize();
1462
1463 static int CodePageAreaStartOffset();
1464
1465 static int CodePageAreaEndOffset();
1466
1467 static int CodePageAreaSize() {
1468 return CodePageAreaEndOffset() - CodePageAreaStartOffset();
1469 }
1470
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001471 static int PageAreaSize(AllocationSpace space) {
1472 DCHECK_NE(LO_SPACE, space);
1473 return (space == CODE_SPACE) ? CodePageAreaSize()
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001474 : Page::kAllocatableMemory;
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001475 }
1476
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001477 MUST_USE_RESULT bool CommitExecutableMemory(base::VirtualMemory* vm,
1478 Address start, size_t commit_size,
1479 size_t reserved_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00001480
Ben Murdochc5610432016-08-08 18:44:38 +01001481 CodeRange* code_range() { return code_range_; }
1482 Unmapper* unmapper() { return &unmapper_; }
1483
Steve Blocka7e24c12009-10-30 11:49:00 +00001484 private:
Ben Murdochc5610432016-08-08 18:44:38 +01001485 // PreFree logically frees the object, i.e., it takes care of the size
1486 // bookkeeping and calls the allocation callback.
1487 void PreFreeMemory(MemoryChunk* chunk);
1488
1489 // FreeMemory can be called concurrently when PreFree was executed before.
1490 void PerformFreeMemory(MemoryChunk* chunk);
1491
Ben Murdochda12d292016-06-02 14:46:10 +01001492 // See AllocatePage for public interface. Note that currently we only support
1493 // pools for NOT_EXECUTABLE pages of size MemoryChunk::kPageSize.
1494 template <typename SpaceType>
1495 MemoryChunk* AllocatePagePooled(SpaceType* owner);
1496
Ben Murdoch69a99ed2011-11-30 16:03:39 +00001497 Isolate* isolate_;
1498
Ben Murdochc5610432016-08-08 18:44:38 +01001499 CodeRange* code_range_;
1500
Steve Blocka7e24c12009-10-30 11:49:00 +00001501 // Maximum space size in bytes.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001502 intptr_t capacity_;
Russell Brenner90bac252010-11-18 13:33:46 -08001503 // Maximum subset of capacity_ that can be executable
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001504 intptr_t capacity_executable_;
Ben Murdochb0fe1622011-05-05 13:52:32 +01001505
Steve Blocka7e24c12009-10-30 11:49:00 +00001506 // Allocated space size in bytes.
Ben Murdochc5610432016-08-08 18:44:38 +01001507 base::AtomicNumber<intptr_t> size_;
Steve Block791712a2010-08-27 10:21:07 +01001508 // Allocated executable space size in bytes.
Ben Murdochc5610432016-08-08 18:44:38 +01001509 base::AtomicNumber<intptr_t> size_executable_;
Steve Blocka7e24c12009-10-30 11:49:00 +00001510
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001511 // We keep the lowest and highest addresses allocated as a quick way
1512 // of determining that pointers are outside the heap. The estimate is
1513 // conservative, i.e. not all addrsses in 'allocated' space are allocated
1514 // to our heap. The range is [lowest, highest[, inclusive on the low end
1515 // and exclusive on the high end.
Ben Murdochc5610432016-08-08 18:44:38 +01001516 base::AtomicValue<void*> lowest_ever_allocated_;
1517 base::AtomicValue<void*> highest_ever_allocated_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001518
Iain Merrick9ac36c92010-09-13 15:29:50 +01001519 struct MemoryAllocationCallbackRegistration {
1520 MemoryAllocationCallbackRegistration(MemoryAllocationCallback callback,
1521 ObjectSpace space,
1522 AllocationAction action)
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001523 : callback(callback), space(space), action(action) {}
Iain Merrick9ac36c92010-09-13 15:29:50 +01001524 MemoryAllocationCallback callback;
1525 ObjectSpace space;
1526 AllocationAction action;
1527 };
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001528
Iain Merrick9ac36c92010-09-13 15:29:50 +01001529 // A List of callback that are triggered when memory is allocated or free'd
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001530 List<MemoryAllocationCallbackRegistration> memory_allocation_callbacks_;
Iain Merrick9ac36c92010-09-13 15:29:50 +01001531
Steve Blocka7e24c12009-10-30 11:49:00 +00001532 // Initializes pages in a chunk. Returns the first page address.
1533 // This function and GetChunkId() are provided for the mark-compact
1534 // collector to rebuild page headers in the from space, which is
1535 // used as a marking stack and its page headers are destroyed.
Steve Block44f0eee2011-05-26 01:26:41 +01001536 Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk,
1537 PagedSpace* owner);
Steve Block6ded16b2010-05-10 14:33:55 +01001538
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001539 void UpdateAllocatedSpaceLimits(void* low, void* high) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001540 // The use of atomic primitives does not guarantee correctness (wrt.
1541 // desired semantics) by default. The loop here ensures that we update the
1542 // values only if they did not change in between.
1543 void* ptr = nullptr;
1544 do {
1545 ptr = lowest_ever_allocated_.Value();
1546 } while ((low < ptr) && !lowest_ever_allocated_.TrySetValue(ptr, low));
1547 do {
1548 ptr = highest_ever_allocated_.Value();
1549 } while ((high > ptr) && !highest_ever_allocated_.TrySetValue(ptr, high));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001550 }
1551
Ben Murdochc5610432016-08-08 18:44:38 +01001552 base::VirtualMemory last_chunk_;
1553 Unmapper unmapper_;
1554
1555 friend class TestCodeRangeScope;
Ben Murdochda12d292016-06-02 14:46:10 +01001556
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001557 DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator);
Steve Blocka7e24c12009-10-30 11:49:00 +00001558};
1559
1560
1561// -----------------------------------------------------------------------------
1562// Interface for heap object iterator to be implemented by all object space
1563// object iterators.
1564//
Leon Clarked91b9f72010-01-27 17:25:45 +00001565// NOTE: The space specific object iterators also implements the own next()
1566// method which is used to avoid using virtual functions
Steve Blocka7e24c12009-10-30 11:49:00 +00001567// iterating a specific space.
1568
1569class ObjectIterator : public Malloced {
1570 public:
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001571 virtual ~ObjectIterator() {}
Steve Blocka7e24c12009-10-30 11:49:00 +00001572
Steve Blocka7e24c12009-10-30 11:49:00 +00001573 virtual HeapObject* next_object() = 0;
1574};
1575
1576
1577// -----------------------------------------------------------------------------
1578// Heap object iterator in new/old/map spaces.
1579//
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001580// A HeapObjectIterator iterates objects from the bottom of the given space
1581// to its top or from the bottom of the given page to its top.
Steve Blocka7e24c12009-10-30 11:49:00 +00001582//
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001583// If objects are allocated in the page during iteration the iterator may
1584// or may not iterate over those objects. The caller must create a new
1585// iterator in order to be sure to visit these new objects.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001586class HeapObjectIterator : public ObjectIterator {
Steve Blocka7e24c12009-10-30 11:49:00 +00001587 public:
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001588 // Creates a new object iterator in a given space.
Steve Blocka7e24c12009-10-30 11:49:00 +00001589 explicit HeapObjectIterator(PagedSpace* space);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001590 explicit HeapObjectIterator(Page* page);
Steve Blocka7e24c12009-10-30 11:49:00 +00001591
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001592 // Advance to the next object, skipping free spaces and other fillers and
1593 // skipping the special garbage section of which there is one per space.
1594 // Returns NULL when the iteration has ended.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001595 inline HeapObject* Next();
1596 inline HeapObject* next_object() override;
Steve Blocka7e24c12009-10-30 11:49:00 +00001597
1598 private:
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001599 enum PageMode { kOnePageOnly, kAllPagesInSpace };
Steve Blocka7e24c12009-10-30 11:49:00 +00001600
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001601 Address cur_addr_; // Current iteration point.
1602 Address cur_end_; // End iteration point.
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001603 PagedSpace* space_;
1604 PageMode page_mode_;
Leon Clarked91b9f72010-01-27 17:25:45 +00001605
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001606 // Fast (inlined) path of next().
1607 inline HeapObject* FromCurrentPage();
Leon Clarked91b9f72010-01-27 17:25:45 +00001608
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001609 // Slow path of next(), goes into the next page. Returns false if the
1610 // iteration has ended.
1611 bool AdvanceToNextPage();
Steve Blocka7e24c12009-10-30 11:49:00 +00001612
1613 // Initializes fields.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001614 inline void Initialize(PagedSpace* owner, Address start, Address end,
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001615 PageMode mode);
Steve Blocka7e24c12009-10-30 11:49:00 +00001616};
1617
1618
1619// -----------------------------------------------------------------------------
1620// A PageIterator iterates the pages in a paged space.
Steve Blocka7e24c12009-10-30 11:49:00 +00001621
1622class PageIterator BASE_EMBEDDED {
1623 public:
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001624 explicit inline PageIterator(PagedSpace* space);
Steve Blocka7e24c12009-10-30 11:49:00 +00001625
1626 inline bool has_next();
1627 inline Page* next();
1628
1629 private:
1630 PagedSpace* space_;
1631 Page* prev_page_; // Previous page returned.
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001632 // Next page that will be returned. Cached here so that we can use this
1633 // iterator for operations that deallocate pages.
1634 Page* next_page_;
Steve Blocka7e24c12009-10-30 11:49:00 +00001635};
1636
1637
1638// -----------------------------------------------------------------------------
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001639// A space has a circular list of pages. The next page can be accessed via
1640// Page::next_page() call.
Steve Blocka7e24c12009-10-30 11:49:00 +00001641
1642// An abstraction of allocation and relocation pointers in a page-structured
1643// space.
1644class AllocationInfo {
1645 public:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001646 AllocationInfo() : top_(nullptr), limit_(nullptr) {}
1647 AllocationInfo(Address top, Address limit) : top_(top), limit_(limit) {}
1648
1649 void Reset(Address top, Address limit) {
1650 set_top(top);
1651 set_limit(limit);
1652 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001653
1654 INLINE(void set_top(Address top)) {
1655 SLOW_DCHECK(top == NULL ||
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001656 (reinterpret_cast<intptr_t>(top) & kHeapObjectTagMask) == 0);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001657 top_ = top;
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001658 }
1659
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001660 INLINE(Address top()) const {
1661 SLOW_DCHECK(top_ == NULL ||
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001662 (reinterpret_cast<intptr_t>(top_) & kHeapObjectTagMask) == 0);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001663 return top_;
1664 }
1665
1666 Address* top_address() { return &top_; }
1667
1668 INLINE(void set_limit(Address limit)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001669 limit_ = limit;
1670 }
1671
1672 INLINE(Address limit()) const {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001673 return limit_;
1674 }
1675
1676 Address* limit_address() { return &limit_; }
Steve Blocka7e24c12009-10-30 11:49:00 +00001677
1678#ifdef DEBUG
1679 bool VerifyPagedAllocation() {
Ben Murdochc5610432016-08-08 18:44:38 +01001680 return (Page::FromAllocationAreaAddress(top_) ==
1681 Page::FromAllocationAreaAddress(limit_)) &&
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001682 (top_ <= limit_);
Steve Blocka7e24c12009-10-30 11:49:00 +00001683 }
1684#endif
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001685
1686 private:
1687 // Current allocation top.
1688 Address top_;
1689 // Current allocation limit.
1690 Address limit_;
Steve Blocka7e24c12009-10-30 11:49:00 +00001691};
1692
1693
1694// An abstraction of the accounting statistics of a page-structured space.
Steve Blocka7e24c12009-10-30 11:49:00 +00001695//
1696// The stats are only set by functions that ensure they stay balanced. These
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001697// functions increase or decrease one of the non-capacity stats in conjunction
1698// with capacity, or else they always balance increases and decreases to the
1699// non-capacity stats.
Steve Blocka7e24c12009-10-30 11:49:00 +00001700class AllocationStats BASE_EMBEDDED {
1701 public:
1702 AllocationStats() { Clear(); }
1703
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001704 // Zero out all the allocation statistics (i.e., no capacity).
Steve Blocka7e24c12009-10-30 11:49:00 +00001705 void Clear() {
1706 capacity_ = 0;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001707 max_capacity_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +00001708 size_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +00001709 }
1710
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001711 void ClearSize() { size_ = capacity_; }
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001712
Steve Blocka7e24c12009-10-30 11:49:00 +00001713 // Accessors for the allocation statistics.
Ben Murdochf87a2032010-10-22 12:50:53 +01001714 intptr_t Capacity() { return capacity_; }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001715 intptr_t MaxCapacity() { return max_capacity_; }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001716 intptr_t Size() {
1717 CHECK_GE(size_, 0);
1718 return size_;
1719 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001720
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001721 // Grow the space by adding available bytes. They are initially marked as
1722 // being in use (part of the size), but will normally be immediately freed,
1723 // putting them on the free list and removing them from size_.
Steve Blocka7e24c12009-10-30 11:49:00 +00001724 void ExpandSpace(int size_in_bytes) {
1725 capacity_ += size_in_bytes;
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001726 size_ += size_in_bytes;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001727 if (capacity_ > max_capacity_) {
1728 max_capacity_ = capacity_;
1729 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001730 CHECK(size_ >= 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00001731 }
1732
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001733 // Shrink the space by removing available bytes. Since shrinking is done
1734 // during sweeping, bytes have been marked as being in use (part of the size)
1735 // and are hereby freed.
Steve Blocka7e24c12009-10-30 11:49:00 +00001736 void ShrinkSpace(int size_in_bytes) {
1737 capacity_ -= size_in_bytes;
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001738 size_ -= size_in_bytes;
Ben Murdochda12d292016-06-02 14:46:10 +01001739 CHECK_GE(size_, 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00001740 }
1741
1742 // Allocate from available bytes (available -> size).
Ben Murdochf87a2032010-10-22 12:50:53 +01001743 void AllocateBytes(intptr_t size_in_bytes) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001744 size_ += size_in_bytes;
Ben Murdochda12d292016-06-02 14:46:10 +01001745 CHECK_GE(size_, 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00001746 }
1747
1748 // Free allocated bytes, making them available (size -> available).
Ben Murdochf87a2032010-10-22 12:50:53 +01001749 void DeallocateBytes(intptr_t size_in_bytes) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001750 size_ -= size_in_bytes;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001751 CHECK_GE(size_, 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00001752 }
1753
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001754 // Merge {other} into {this}.
1755 void Merge(const AllocationStats& other) {
1756 capacity_ += other.capacity_;
1757 size_ += other.size_;
1758 if (other.max_capacity_ > max_capacity_) {
1759 max_capacity_ = other.max_capacity_;
1760 }
1761 CHECK_GE(size_, 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00001762 }
1763
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001764 void DecreaseCapacity(intptr_t size_in_bytes) {
1765 capacity_ -= size_in_bytes;
1766 CHECK_GE(capacity_, 0);
1767 CHECK_GE(capacity_, size_);
1768 }
1769
1770 void IncreaseCapacity(intptr_t size_in_bytes) { capacity_ += size_in_bytes; }
1771
Steve Blocka7e24c12009-10-30 11:49:00 +00001772 private:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001773 // |capacity_|: The number of object-area bytes (i.e., not including page
1774 // bookkeeping structures) currently in the space.
Ben Murdochf87a2032010-10-22 12:50:53 +01001775 intptr_t capacity_;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001776
1777 // |max_capacity_|: The maximum capacity ever observed.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001778 intptr_t max_capacity_;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001779
1780 // |size_|: The number of allocated bytes.
Ben Murdochf87a2032010-10-22 12:50:53 +01001781 intptr_t size_;
Steve Blocka7e24c12009-10-30 11:49:00 +00001782};
1783
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001784// A free list maintaining free blocks of memory. The free list is organized in
1785// a way to encourage objects allocated around the same time to be near each
1786// other. The normal way to allocate is intended to be by bumping a 'top'
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001787// pointer until it hits a 'limit' pointer. When the limit is hit we need to
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001788// find a new space to allocate from. This is done with the free list, which is
1789// divided up into rough categories to cut down on waste. Having finer
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001790// categories would scatter allocation more.
1791
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001792// The free list is organized in categories as follows:
Ben Murdochda12d292016-06-02 14:46:10 +01001793// kMinBlockSize-10 words (tiniest): The tiniest blocks are only used for
1794// allocation, when categories >= small do not have entries anymore.
1795// 11-31 words (tiny): The tiny blocks are only used for allocation, when
1796// categories >= small do not have entries anymore.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001797// 32-255 words (small): Used for allocating free space between 1-31 words in
1798// size.
1799// 256-2047 words (medium): Used for allocating free space between 32-255 words
1800// in size.
1801// 1048-16383 words (large): Used for allocating free space between 256-2047
1802// words in size.
1803// At least 16384 words (huge): This list is for objects of 2048 words or
1804// larger. Empty pages are also added to this list.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001805class FreeList {
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001806 public:
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001807 // This method returns how much memory can be allocated after freeing
1808 // maximum_freed memory.
1809 static inline int GuaranteedAllocatable(int maximum_freed) {
Ben Murdochda12d292016-06-02 14:46:10 +01001810 if (maximum_freed <= kTiniestListMax) {
1811 // Since we are not iterating over all list entries, we cannot guarantee
1812 // that we can find the maximum freed block in that free list.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001813 return 0;
Ben Murdochda12d292016-06-02 14:46:10 +01001814 } else if (maximum_freed <= kTinyListMax) {
1815 return kTinyAllocationMax;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001816 } else if (maximum_freed <= kSmallListMax) {
1817 return kSmallAllocationMax;
1818 } else if (maximum_freed <= kMediumListMax) {
1819 return kMediumAllocationMax;
1820 } else if (maximum_freed <= kLargeListMax) {
1821 return kLargeAllocationMax;
1822 }
1823 return maximum_freed;
1824 }
1825
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001826 explicit FreeList(PagedSpace* owner);
1827
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001828 // Adds a node on the free list. The block of size {size_in_bytes} starting
1829 // at {start} is placed on the free list. The return value is the number of
1830 // bytes that were not added to the free list, because they freed memory block
1831 // was too small. Bookkeeping information will be written to the block, i.e.,
1832 // its contents will be destroyed. The start address should be word aligned,
1833 // and the size should be a non-zero multiple of the word size.
Ben Murdochda12d292016-06-02 14:46:10 +01001834 int Free(Address start, int size_in_bytes, FreeMode mode);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001835
1836 // Allocate a block of size {size_in_bytes} from the free list. The block is
1837 // unitialized. A failure is returned if no block is available. The size
1838 // should be a non-zero multiple of the word size.
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001839 MUST_USE_RESULT HeapObject* Allocate(int size_in_bytes);
1840
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001841 // Clear the free list.
1842 void Reset();
1843
Ben Murdochda12d292016-06-02 14:46:10 +01001844 void ResetStats() {
1845 wasted_bytes_.SetValue(0);
1846 ForAllFreeListCategories(
1847 [](FreeListCategory* category) { category->ResetStats(); });
1848 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001849
1850 // Return the number of bytes available on the free list.
1851 intptr_t Available() {
Ben Murdoch097c5b22016-05-18 11:27:45 +01001852 intptr_t available = 0;
Ben Murdochda12d292016-06-02 14:46:10 +01001853 ForAllFreeListCategories([&available](FreeListCategory* category) {
1854 available += category->available();
1855 });
Ben Murdoch097c5b22016-05-18 11:27:45 +01001856 return available;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001857 }
1858
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001859 bool IsEmpty() {
Ben Murdochda12d292016-06-02 14:46:10 +01001860 bool empty = true;
1861 ForAllFreeListCategories([&empty](FreeListCategory* category) {
1862 if (!category->is_empty()) empty = false;
1863 });
1864 return empty;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001865 }
1866
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001867 // Used after booting the VM.
1868 void RepairLists(Heap* heap);
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001869
Ben Murdochda12d292016-06-02 14:46:10 +01001870 intptr_t EvictFreeListItems(Page* page);
1871 bool ContainsPageFreeListItems(Page* page);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001872
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001873 PagedSpace* owner() { return owner_; }
Ben Murdochda12d292016-06-02 14:46:10 +01001874 intptr_t wasted_bytes() { return wasted_bytes_.Value(); }
1875
1876 template <typename Callback>
1877 void ForAllFreeListCategories(FreeListCategoryType type, Callback callback) {
1878 FreeListCategory* current = categories_[type];
1879 while (current != nullptr) {
1880 FreeListCategory* next = current->next();
1881 callback(current);
1882 current = next;
1883 }
1884 }
1885
1886 template <typename Callback>
1887 void ForAllFreeListCategories(Callback callback) {
1888 for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
1889 ForAllFreeListCategories(static_cast<FreeListCategoryType>(i), callback);
1890 }
1891 }
1892
1893 bool AddCategory(FreeListCategory* category);
1894 void RemoveCategory(FreeListCategory* category);
1895 void PrintCategories(FreeListCategoryType type);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001896
1897#ifdef DEBUG
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001898 intptr_t SumFreeLists();
1899 bool IsVeryLong();
1900#endif
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001901
1902 private:
Ben Murdochda12d292016-06-02 14:46:10 +01001903 class FreeListCategoryIterator {
1904 public:
1905 FreeListCategoryIterator(FreeList* free_list, FreeListCategoryType type)
1906 : current_(free_list->categories_[type]) {}
1907
1908 bool HasNext() { return current_ != nullptr; }
1909
1910 FreeListCategory* Next() {
1911 DCHECK(HasNext());
1912 FreeListCategory* tmp = current_;
1913 current_ = current_->next();
1914 return tmp;
1915 }
1916
1917 private:
1918 FreeListCategory* current_;
1919 };
1920
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001921 // The size range of blocks, in bytes.
1922 static const int kMinBlockSize = 3 * kPointerSize;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001923 static const int kMaxBlockSize = Page::kAllocatableMemory;
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001924
Ben Murdochda12d292016-06-02 14:46:10 +01001925 static const int kTiniestListMax = 0xa * kPointerSize;
1926 static const int kTinyListMax = 0x1f * kPointerSize;
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001927 static const int kSmallListMax = 0xff * kPointerSize;
1928 static const int kMediumListMax = 0x7ff * kPointerSize;
1929 static const int kLargeListMax = 0x3fff * kPointerSize;
Ben Murdochda12d292016-06-02 14:46:10 +01001930 static const int kTinyAllocationMax = kTiniestListMax;
1931 static const int kSmallAllocationMax = kTinyListMax;
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001932 static const int kMediumAllocationMax = kSmallListMax;
1933 static const int kLargeAllocationMax = kMediumListMax;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001934
1935 FreeSpace* FindNodeFor(int size_in_bytes, int* node_size);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001936
Ben Murdochda12d292016-06-02 14:46:10 +01001937 // Walks all available categories for a given |type| and tries to retrieve
1938 // a node. Returns nullptr if the category is empty.
1939 FreeSpace* FindNodeIn(FreeListCategoryType type, int* node_size);
1940
1941 // Tries to retrieve a node from the first category in a given |type|.
1942 // Returns nullptr if the category is empty.
1943 FreeSpace* TryFindNodeIn(FreeListCategoryType type, int* node_size,
1944 int minimum_size);
1945
1946 // Searches a given |type| for a node of at least |minimum_size|.
1947 FreeSpace* SearchForNodeInList(FreeListCategoryType type, int* node_size,
1948 int minimum_size);
Ben Murdoch097c5b22016-05-18 11:27:45 +01001949
1950 FreeListCategoryType SelectFreeListCategoryType(size_t size_in_bytes) {
Ben Murdochda12d292016-06-02 14:46:10 +01001951 if (size_in_bytes <= kTiniestListMax) {
1952 return kTiniest;
1953 } else if (size_in_bytes <= kTinyListMax) {
1954 return kTiny;
1955 } else if (size_in_bytes <= kSmallListMax) {
Ben Murdoch097c5b22016-05-18 11:27:45 +01001956 return kSmall;
1957 } else if (size_in_bytes <= kMediumListMax) {
1958 return kMedium;
1959 } else if (size_in_bytes <= kLargeListMax) {
1960 return kLarge;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001961 }
Ben Murdoch097c5b22016-05-18 11:27:45 +01001962 return kHuge;
1963 }
1964
Ben Murdochda12d292016-06-02 14:46:10 +01001965 // The tiny categories are not used for fast allocation.
Ben Murdoch097c5b22016-05-18 11:27:45 +01001966 FreeListCategoryType SelectFastAllocationFreeListCategoryType(
1967 size_t size_in_bytes) {
1968 if (size_in_bytes <= kSmallAllocationMax) {
1969 return kSmall;
1970 } else if (size_in_bytes <= kMediumAllocationMax) {
1971 return kMedium;
1972 } else if (size_in_bytes <= kLargeAllocationMax) {
1973 return kLarge;
1974 }
1975 return kHuge;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001976 }
1977
Ben Murdochda12d292016-06-02 14:46:10 +01001978 FreeListCategory* top(FreeListCategoryType type) { return categories_[type]; }
1979
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001980 PagedSpace* owner_;
Ben Murdochc5610432016-08-08 18:44:38 +01001981 base::AtomicNumber<intptr_t> wasted_bytes_;
Ben Murdochda12d292016-06-02 14:46:10 +01001982 FreeListCategory* categories_[kNumberOfCategories];
1983
1984 friend class FreeListCategory;
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001985
1986 DISALLOW_IMPLICIT_CONSTRUCTORS(FreeList);
1987};
1988
1989
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001990class AllocationResult {
1991 public:
1992 // Implicit constructor from Object*.
1993 AllocationResult(Object* object) // NOLINT
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001994 : object_(object) {
1995 // AllocationResults can't return Smis, which are used to represent
1996 // failure and the space to retry in.
1997 CHECK(!object->IsSmi());
1998 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001999
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002000 AllocationResult() : object_(Smi::FromInt(NEW_SPACE)) {}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002001
2002 static inline AllocationResult Retry(AllocationSpace space = NEW_SPACE) {
2003 return AllocationResult(space);
2004 }
2005
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002006 inline bool IsRetry() { return object_->IsSmi(); }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002007
2008 template <typename T>
2009 bool To(T** obj) {
2010 if (IsRetry()) return false;
2011 *obj = T::cast(object_);
2012 return true;
2013 }
2014
2015 Object* ToObjectChecked() {
2016 CHECK(!IsRetry());
2017 return object_;
2018 }
2019
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002020 inline AllocationSpace RetrySpace();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002021
2022 private:
2023 explicit AllocationResult(AllocationSpace space)
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002024 : object_(Smi::FromInt(static_cast<int>(space))) {}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002025
2026 Object* object_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002027};
2028
2029
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002030STATIC_ASSERT(sizeof(AllocationResult) == kPointerSize);
2031
2032
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002033// LocalAllocationBuffer represents a linear allocation area that is created
2034// from a given {AllocationResult} and can be used to allocate memory without
2035// synchronization.
2036//
2037// The buffer is properly closed upon destruction and reassignment.
2038// Example:
2039// {
2040// AllocationResult result = ...;
2041// LocalAllocationBuffer a(heap, result, size);
2042// LocalAllocationBuffer b = a;
2043// CHECK(!a.IsValid());
2044// CHECK(b.IsValid());
2045// // {a} is invalid now and cannot be used for further allocations.
2046// }
2047// // Since {b} went out of scope, the LAB is closed, resulting in creating a
2048// // filler object for the remaining area.
2049class LocalAllocationBuffer {
2050 public:
2051 // Indicates that a buffer cannot be used for allocations anymore. Can result
2052 // from either reassigning a buffer, or trying to construct it from an
2053 // invalid {AllocationResult}.
2054 static inline LocalAllocationBuffer InvalidBuffer();
2055
2056 // Creates a new LAB from a given {AllocationResult}. Results in
2057 // InvalidBuffer if the result indicates a retry.
2058 static inline LocalAllocationBuffer FromResult(Heap* heap,
2059 AllocationResult result,
2060 intptr_t size);
2061
2062 ~LocalAllocationBuffer() { Close(); }
2063
2064 // Convert to C++11 move-semantics once allowed by the style guide.
2065 LocalAllocationBuffer(const LocalAllocationBuffer& other);
2066 LocalAllocationBuffer& operator=(const LocalAllocationBuffer& other);
2067
2068 MUST_USE_RESULT inline AllocationResult AllocateRawAligned(
2069 int size_in_bytes, AllocationAlignment alignment);
2070
2071 inline bool IsValid() { return allocation_info_.top() != nullptr; }
2072
2073 // Try to merge LABs, which is only possible when they are adjacent in memory.
2074 // Returns true if the merge was successful, false otherwise.
2075 inline bool TryMerge(LocalAllocationBuffer* other);
2076
2077 private:
2078 LocalAllocationBuffer(Heap* heap, AllocationInfo allocation_info);
2079
2080 void Close();
2081
2082 Heap* heap_;
2083 AllocationInfo allocation_info_;
2084};
2085
Steve Blocka7e24c12009-10-30 11:49:00 +00002086class PagedSpace : public Space {
2087 public:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002088 static const intptr_t kCompactionMemoryWanted = 500 * KB;
Steve Blocka7e24c12009-10-30 11:49:00 +00002089
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002090 // Creates a space with an id.
2091 PagedSpace(Heap* heap, AllocationSpace id, Executability executable);
2092
2093 ~PagedSpace() override { TearDown(); }
Steve Blocka7e24c12009-10-30 11:49:00 +00002094
2095 // Set up the space using the given address range of virtual memory (from
2096 // the memory allocator's initial chunk) if possible. If the block of
2097 // addresses is not big enough to contain a single page-aligned page, a
2098 // fresh chunk will be allocated.
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002099 bool SetUp();
Steve Blocka7e24c12009-10-30 11:49:00 +00002100
2101 // Returns true if the space has been successfully set up and not
2102 // subsequently torn down.
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002103 bool HasBeenSetUp();
Steve Blocka7e24c12009-10-30 11:49:00 +00002104
Steve Blocka7e24c12009-10-30 11:49:00 +00002105 // Checks whether an object/address is in this space.
2106 inline bool Contains(Address a);
Ben Murdoch097c5b22016-05-18 11:27:45 +01002107 inline bool Contains(Object* o);
2108 bool ContainsSlow(Address addr);
Steve Blocka7e24c12009-10-30 11:49:00 +00002109
2110 // Given an address occupied by a live object, return that object if it is
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002111 // in this space, or a Smi if it is not. The implementation iterates over
2112 // objects in the page containing the address, the cost is linear in the
2113 // number of objects in the page. It may be slow.
2114 Object* FindObject(Address addr);
2115
2116 // During boot the free_space_map is created, and afterwards we may need
2117 // to write it into the free list nodes that were already created.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002118 void RepairFreeListsAfterDeserialization();
Steve Blocka7e24c12009-10-30 11:49:00 +00002119
Ben Murdoch85b71792012-04-11 18:30:58 +01002120 // Prepares for a mark-compact GC.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002121 void PrepareForMarkCompact();
Ben Murdoch85b71792012-04-11 18:30:58 +01002122
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002123 // Current capacity without growing (Size() + Available()).
Ben Murdochf87a2032010-10-22 12:50:53 +01002124 intptr_t Capacity() { return accounting_stats_.Capacity(); }
Steve Blocka7e24c12009-10-30 11:49:00 +00002125
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002126 // Approximate amount of physical memory committed for this space.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002127 size_t CommittedPhysicalMemory() override;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002128
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002129 void ResetFreeListStatistics();
2130
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002131 // Sets the capacity, the available space and the wasted space to zero.
2132 // The stats are rebuilt during sweeping by adding each page to the
2133 // capacity and the size when it is encountered. As free spaces are
2134 // discovered during the sweeping they are subtracted from the size and added
2135 // to the available and wasted totals.
2136 void ClearStats() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002137 accounting_stats_.ClearSize();
2138 free_list_.ResetStats();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002139 ResetFreeListStatistics();
2140 }
2141
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002142 // Available bytes without growing. These are the bytes on the free list.
2143 // The bytes in the linear allocation area are not included in this total
2144 // because updating the stats would slow down allocation. New pages are
2145 // immediately added to the free list so they show up here.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002146 intptr_t Available() override { return free_list_.Available(); }
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002147
2148 // Allocated bytes in this space. Garbage bytes that were not found due to
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002149 // concurrent sweeping are counted as being allocated! The bytes in the
2150 // current linear allocation area (between top and limit) are also counted
2151 // here.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002152 intptr_t Size() override { return accounting_stats_.Size(); }
Steve Blocka7e24c12009-10-30 11:49:00 +00002153
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002154 // As size, but the bytes in lazily swept pages are estimated and the bytes
2155 // in the current linear allocation area are not included.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002156 intptr_t SizeOfObjects() override;
Steve Blocka7e24c12009-10-30 11:49:00 +00002157
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002158 // Wasted bytes in this space. These are just the bytes that were thrown away
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002159 // due to being too small to use for allocation.
2160 virtual intptr_t Waste() { return free_list_.wasted_bytes(); }
Steve Blocka7e24c12009-10-30 11:49:00 +00002161
2162 // Returns the allocation pointer in this space.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002163 Address top() { return allocation_info_.top(); }
2164 Address limit() { return allocation_info_.limit(); }
2165
2166 // The allocation top address.
2167 Address* allocation_top_address() { return allocation_info_.top_address(); }
2168
2169 // The allocation limit address.
2170 Address* allocation_limit_address() {
2171 return allocation_info_.limit_address();
2172 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002173
Ben Murdochda12d292016-06-02 14:46:10 +01002174 enum UpdateSkipList { UPDATE_SKIP_LIST, IGNORE_SKIP_LIST };
2175
Steve Blocka7e24c12009-10-30 11:49:00 +00002176 // Allocate the requested number of bytes in the space if possible, return a
Ben Murdochda12d292016-06-02 14:46:10 +01002177 // failure object if not. Only use IGNORE_SKIP_LIST if the skip list is going
2178 // to be manually updated later.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002179 MUST_USE_RESULT inline AllocationResult AllocateRawUnaligned(
Ben Murdochda12d292016-06-02 14:46:10 +01002180 int size_in_bytes, UpdateSkipList update_skip_list = UPDATE_SKIP_LIST);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002181
2182 MUST_USE_RESULT inline AllocationResult AllocateRawUnalignedSynchronized(
2183 int size_in_bytes);
2184
2185 // Allocate the requested number of bytes in the space double aligned if
2186 // possible, return a failure object if not.
2187 MUST_USE_RESULT inline AllocationResult AllocateRawAligned(
2188 int size_in_bytes, AllocationAlignment alignment);
2189
2190 // Allocate the requested number of bytes in the space and consider allocation
2191 // alignment if needed.
2192 MUST_USE_RESULT inline AllocationResult AllocateRaw(
2193 int size_in_bytes, AllocationAlignment alignment);
Leon Clarkee46be812010-01-19 14:06:41 +00002194
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002195 // Give a block of memory to the space's free list. It might be added to
2196 // the free list or accounted as waste.
2197 // If add_to_freelist is false then just accounting stats are updated and
2198 // no attempt to add area to free list is made.
2199 int Free(Address start, int size_in_bytes) {
Ben Murdochda12d292016-06-02 14:46:10 +01002200 int wasted = free_list_.Free(start, size_in_bytes, kLinkCategory);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002201 accounting_stats_.DeallocateBytes(size_in_bytes);
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002202 return size_in_bytes - wasted;
2203 }
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002204
Ben Murdochda12d292016-06-02 14:46:10 +01002205 int UnaccountedFree(Address start, int size_in_bytes) {
2206 int wasted = free_list_.Free(start, size_in_bytes, kDoNotLinkCategory);
2207 return size_in_bytes - wasted;
2208 }
2209
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002210 void ResetFreeList() { free_list_.Reset(); }
2211
Steve Block6ded16b2010-05-10 14:33:55 +01002212 // Set space allocation info.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002213 void SetTopAndLimit(Address top, Address limit) {
2214 DCHECK(top == limit ||
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002215 Page::FromAddress(top) == Page::FromAddress(limit - 1));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002216 MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002217 allocation_info_.Reset(top, limit);
Steve Block6ded16b2010-05-10 14:33:55 +01002218 }
2219
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002220 // Empty space allocation info, returning unused area to free list.
2221 void EmptyAllocationInfo() {
2222 // Mark the old linear allocation area with a free space map so it can be
2223 // skipped when scanning the heap.
2224 int old_linear_size = static_cast<int>(limit() - top());
2225 Free(top(), old_linear_size);
2226 SetTopAndLimit(NULL, NULL);
Steve Blocka7e24c12009-10-30 11:49:00 +00002227 }
2228
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002229 void Allocate(int bytes) { accounting_stats_.AllocateBytes(bytes); }
2230
2231 void IncreaseCapacity(int size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002232
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002233 // Releases an unused page and shrinks the space.
Ben Murdochda12d292016-06-02 14:46:10 +01002234 void ReleasePage(Page* page);
Steve Blocka7e24c12009-10-30 11:49:00 +00002235
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002236 // The dummy page that anchors the linked list of pages.
2237 Page* anchor() { return &anchor_; }
Steve Blocka7e24c12009-10-30 11:49:00 +00002238
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002239#ifdef VERIFY_HEAP
2240 // Verify integrity of this space.
2241 virtual void Verify(ObjectVisitor* visitor);
2242
2243 // Overridden by subclasses to verify space-specific object
2244 // properties (e.g., only maps or free-list nodes are in map space).
2245 virtual void VerifyObject(HeapObject* obj) {}
2246#endif
2247
Steve Blocka7e24c12009-10-30 11:49:00 +00002248#ifdef DEBUG
2249 // Print meta info and objects in this space.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002250 void Print() override;
Steve Blocka7e24c12009-10-30 11:49:00 +00002251
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002252 // Reports statistics for the space
2253 void ReportStatistics();
2254
Steve Blocka7e24c12009-10-30 11:49:00 +00002255 // Report code object related statistics
2256 void CollectCodeStatistics();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002257 static void ReportCodeStatistics(Isolate* isolate);
2258 static void ResetCodeStatistics(Isolate* isolate);
Steve Blocka7e24c12009-10-30 11:49:00 +00002259#endif
2260
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002261 Page* FirstPage() { return anchor_.next_page(); }
2262 Page* LastPage() { return anchor_.prev_page(); }
2263
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002264 void EvictEvacuationCandidatesFromLinearAllocationArea();
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002265
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002266 bool CanExpand(size_t size);
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002267
2268 // Returns the number of total pages in this space.
2269 int CountTotalPages();
2270
2271 // Return size of allocatable area on a page in this space.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002272 inline int AreaSize() { return area_size_; }
2273
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002274 virtual bool is_local() { return false; }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002275
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002276 // Merges {other} into the current space. Note that this modifies {other},
2277 // e.g., removes its bump pointer area and resets statistics.
2278 void MergeCompactionSpace(CompactionSpace* other);
2279
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002280 // Refills the free list from the corresponding free list filled by the
2281 // sweeper.
2282 virtual void RefillFreeList();
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002283
Ben Murdochda12d292016-06-02 14:46:10 +01002284 FreeList* free_list() { return &free_list_; }
2285
2286 base::Mutex* mutex() { return &space_mutex_; }
2287
2288 inline void UnlinkFreeListCategories(Page* page);
2289 inline intptr_t RelinkFreeListCategories(Page* page);
2290
Steve Blocka7e24c12009-10-30 11:49:00 +00002291 protected:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002292 // PagedSpaces that should be included in snapshots have different, i.e.,
2293 // smaller, initial pages.
2294 virtual bool snapshotable() { return true; }
2295
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002296 bool HasPages() { return anchor_.next_page() != &anchor_; }
2297
2298 // Cleans up the space, frees all pages in this space except those belonging
2299 // to the initial chunk, uncommits addresses in the initial chunk.
2300 void TearDown();
2301
2302 // Expands the space by allocating a fixed number of pages. Returns false if
2303 // it cannot allocate requested number of pages from OS, or if the hard heap
2304 // size limit has been hit.
2305 bool Expand();
2306
2307 // Generic fast case allocation function that tries linear allocation at the
2308 // address denoted by top in allocation_info_.
2309 inline HeapObject* AllocateLinearly(int size_in_bytes);
2310
2311 // Generic fast case allocation function that tries aligned linear allocation
2312 // at the address denoted by top in allocation_info_. Writes the aligned
2313 // allocation size, which includes the filler size, to size_in_bytes.
2314 inline HeapObject* AllocateLinearlyAligned(int* size_in_bytes,
2315 AllocationAlignment alignment);
2316
2317 // If sweeping is still in progress try to sweep unswept pages. If that is
2318 // not successful, wait for the sweeper threads and re-try free-list
2319 // allocation.
2320 MUST_USE_RESULT virtual HeapObject* SweepAndRetryAllocation(
2321 int size_in_bytes);
2322
2323 // Slow path of AllocateRaw. This function is space-dependent.
2324 MUST_USE_RESULT HeapObject* SlowAllocateRaw(int size_in_bytes);
2325
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002326 int area_size_;
2327
Steve Blocka7e24c12009-10-30 11:49:00 +00002328 // Accounting information for this space.
2329 AllocationStats accounting_stats_;
2330
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002331 // The dummy page that anchors the double linked list of pages.
2332 Page anchor_;
Steve Blocka7e24c12009-10-30 11:49:00 +00002333
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002334 // The space's free list.
2335 FreeList free_list_;
Steve Block6ded16b2010-05-10 14:33:55 +01002336
Steve Blocka7e24c12009-10-30 11:49:00 +00002337 // Normal allocation information.
2338 AllocationInfo allocation_info_;
2339
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002340 // Mutex guarding any concurrent access to the space.
2341 base::Mutex space_mutex_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002342
Ben Murdochda12d292016-06-02 14:46:10 +01002343 friend class IncrementalMarking;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002344 friend class MarkCompactCollector;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002345 friend class PageIterator;
2346
2347 // Used in cctest.
2348 friend class HeapTester;
Steve Blocka7e24c12009-10-30 11:49:00 +00002349};
2350
2351
Steve Blocka7e24c12009-10-30 11:49:00 +00002352class NumberAndSizeInfo BASE_EMBEDDED {
2353 public:
2354 NumberAndSizeInfo() : number_(0), bytes_(0) {}
2355
2356 int number() const { return number_; }
2357 void increment_number(int num) { number_ += num; }
2358
2359 int bytes() const { return bytes_; }
2360 void increment_bytes(int size) { bytes_ += size; }
2361
2362 void clear() {
2363 number_ = 0;
2364 bytes_ = 0;
2365 }
2366
2367 private:
2368 int number_;
2369 int bytes_;
2370};
2371
2372
2373// HistogramInfo class for recording a single "bar" of a histogram. This
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002374// class is used for collecting statistics to print to the log file.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002375class HistogramInfo : public NumberAndSizeInfo {
Steve Blocka7e24c12009-10-30 11:49:00 +00002376 public:
2377 HistogramInfo() : NumberAndSizeInfo() {}
2378
2379 const char* name() { return name_; }
2380 void set_name(const char* name) { name_ = name; }
2381
2382 private:
2383 const char* name_;
2384};
Steve Blocka7e24c12009-10-30 11:49:00 +00002385
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002386enum SemiSpaceId { kFromSpace = 0, kToSpace = 1 };
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002387
Steve Blocka7e24c12009-10-30 11:49:00 +00002388// -----------------------------------------------------------------------------
2389// SemiSpace in young generation
2390//
Ben Murdoch097c5b22016-05-18 11:27:45 +01002391// A SemiSpace is a contiguous chunk of memory holding page-like memory chunks.
2392// The mark-compact collector uses the memory of the first page in the from
2393// space as a marking stack when tracing live objects.
Steve Blocka7e24c12009-10-30 11:49:00 +00002394class SemiSpace : public Space {
2395 public:
Ben Murdoch097c5b22016-05-18 11:27:45 +01002396 static void Swap(SemiSpace* from, SemiSpace* to);
2397
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002398 SemiSpace(Heap* heap, SemiSpaceId semispace)
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002399 : Space(heap, NEW_SPACE, NOT_EXECUTABLE),
Ben Murdoch097c5b22016-05-18 11:27:45 +01002400 current_capacity_(0),
2401 maximum_capacity_(0),
2402 minimum_capacity_(0),
Ben Murdoch097c5b22016-05-18 11:27:45 +01002403 age_mark_(nullptr),
2404 committed_(false),
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002405 id_(semispace),
2406 anchor_(this),
Ben Murdoch097c5b22016-05-18 11:27:45 +01002407 current_page_(nullptr) {}
Steve Blocka7e24c12009-10-30 11:49:00 +00002408
Ben Murdoch097c5b22016-05-18 11:27:45 +01002409 inline bool Contains(HeapObject* o);
2410 inline bool Contains(Object* o);
2411 inline bool ContainsSlow(Address a);
2412
Ben Murdochda12d292016-06-02 14:46:10 +01002413 void SetUp(int initial_capacity, int maximum_capacity);
Steve Blocka7e24c12009-10-30 11:49:00 +00002414 void TearDown();
Ben Murdochda12d292016-06-02 14:46:10 +01002415 bool HasBeenSetUp() { return maximum_capacity_ != 0; }
Steve Blocka7e24c12009-10-30 11:49:00 +00002416
Ben Murdochda12d292016-06-02 14:46:10 +01002417 bool Commit();
2418 bool Uncommit();
2419 bool is_committed() { return committed_; }
Steve Blocka7e24c12009-10-30 11:49:00 +00002420
Ben Murdochda12d292016-06-02 14:46:10 +01002421 // Grow the semispace to the new capacity. The new capacity requested must
2422 // be larger than the current capacity and less than the maximum capacity.
Steve Blocka7e24c12009-10-30 11:49:00 +00002423 bool GrowTo(int new_capacity);
2424
Ben Murdochda12d292016-06-02 14:46:10 +01002425 // Shrinks the semispace to the new capacity. The new capacity requested
2426 // must be more than the amount of used memory in the semispace and less
2427 // than the current capacity.
Steve Blocka7e24c12009-10-30 11:49:00 +00002428 bool ShrinkTo(int new_capacity);
2429
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002430 // Returns the start address of the first page of the space.
2431 Address space_start() {
Ben Murdochda12d292016-06-02 14:46:10 +01002432 DCHECK_NE(anchor_.next_page(), anchor());
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002433 return anchor_.next_page()->area_start();
2434 }
2435
Ben Murdochc5610432016-08-08 18:44:38 +01002436 Page* first_page() { return anchor_.next_page(); }
2437 Page* current_page() { return current_page_; }
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002438
Steve Blocka7e24c12009-10-30 11:49:00 +00002439 // Returns one past the end address of the space.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002440 Address space_end() { return anchor_.prev_page()->area_end(); }
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002441
Ben Murdochda12d292016-06-02 14:46:10 +01002442 // Returns the start address of the current page of the space.
2443 Address page_low() { return current_page_->area_start(); }
2444
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002445 // Returns one past the end address of the current page of the space.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002446 Address page_high() { return current_page_->area_end(); }
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002447
2448 bool AdvancePage() {
Ben Murdochc5610432016-08-08 18:44:38 +01002449 Page* next_page = current_page_->next_page();
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002450 if (next_page == anchor()) return false;
2451 current_page_ = next_page;
2452 return true;
2453 }
2454
2455 // Resets the space to using the first page.
2456 void Reset();
Steve Blocka7e24c12009-10-30 11:49:00 +00002457
Ben Murdochc5610432016-08-08 18:44:38 +01002458 bool ReplaceWithEmptyPage(Page* page);
2459
Steve Blocka7e24c12009-10-30 11:49:00 +00002460 // Age mark accessors.
2461 Address age_mark() { return age_mark_; }
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002462 void set_age_mark(Address mark);
Steve Blocka7e24c12009-10-30 11:49:00 +00002463
Ben Murdochda12d292016-06-02 14:46:10 +01002464 // Returns the current capacity of the semispace.
Ben Murdoch097c5b22016-05-18 11:27:45 +01002465 int current_capacity() { return current_capacity_; }
2466
Ben Murdochda12d292016-06-02 14:46:10 +01002467 // Returns the maximum capacity of the semispace.
Ben Murdoch097c5b22016-05-18 11:27:45 +01002468 int maximum_capacity() { return maximum_capacity_; }
2469
2470 // Returns the initial capacity of the semispace.
2471 int minimum_capacity() { return minimum_capacity_; }
2472
2473 SemiSpaceId id() { return id_; }
2474
2475 // Approximate amount of physical memory committed for this space.
2476 size_t CommittedPhysicalMemory() override;
Steve Blocka7e24c12009-10-30 11:49:00 +00002477
Leon Clarkee46be812010-01-19 14:06:41 +00002478 // If we don't have these here then SemiSpace will be abstract. However
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002479 // they should never be called:
2480
2481 intptr_t Size() override {
Steve Blocka7e24c12009-10-30 11:49:00 +00002482 UNREACHABLE();
2483 return 0;
2484 }
2485
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002486 intptr_t SizeOfObjects() override { return Size(); }
2487
2488 intptr_t Available() override {
2489 UNREACHABLE();
2490 return 0;
2491 }
2492
Steve Blocka7e24c12009-10-30 11:49:00 +00002493#ifdef DEBUG
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002494 void Print() override;
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002495 // Validate a range of of addresses in a SemiSpace.
2496 // The "from" address must be on a page prior to the "to" address,
2497 // in the linked page order, or it must be earlier on the same page.
2498 static void AssertValidRange(Address from, Address to);
2499#else
2500 // Do nothing.
2501 inline static void AssertValidRange(Address from, Address to) {}
Steve Blocka7e24c12009-10-30 11:49:00 +00002502#endif
2503
Ben Murdoch097c5b22016-05-18 11:27:45 +01002504#ifdef VERIFY_HEAP
2505 virtual void Verify();
2506#endif
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002507
Steve Blocka7e24c12009-10-30 11:49:00 +00002508 private:
Ben Murdochc5610432016-08-08 18:44:38 +01002509 void RewindPages(Page* start, int num_pages);
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002510
Ben Murdochc5610432016-08-08 18:44:38 +01002511 inline Page* anchor() { return &anchor_; }
Ben Murdoch097c5b22016-05-18 11:27:45 +01002512
2513 // Copies the flags into the masked positions on all pages in the space.
2514 void FixPagesFlags(intptr_t flags, intptr_t flag_mask);
2515
2516 // The currently committed space capacity.
2517 int current_capacity_;
2518
2519 // The maximum capacity that can be used by this space.
2520 int maximum_capacity_;
2521
Ben Murdochda12d292016-06-02 14:46:10 +01002522 // The minimum capacity for the space. A space cannot shrink below this size.
Ben Murdoch097c5b22016-05-18 11:27:45 +01002523 int minimum_capacity_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002524
Steve Blocka7e24c12009-10-30 11:49:00 +00002525 // Used to govern object promotion during mark-compact collection.
2526 Address age_mark_;
2527
Steve Blocka7e24c12009-10-30 11:49:00 +00002528 bool committed_;
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002529 SemiSpaceId id_;
Steve Blocka7e24c12009-10-30 11:49:00 +00002530
Ben Murdochc5610432016-08-08 18:44:38 +01002531 Page anchor_;
2532 Page* current_page_;
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002533
2534 friend class SemiSpaceIterator;
2535 friend class NewSpacePageIterator;
Steve Blocka7e24c12009-10-30 11:49:00 +00002536};
2537
2538
2539// A SemiSpaceIterator is an ObjectIterator that iterates over the active
2540// semispace of the heap's new space. It iterates over the objects in the
2541// semispace from a given start address (defaulting to the bottom of the
2542// semispace) to the top of the semispace. New objects allocated after the
2543// iterator is created are not iterated.
2544class SemiSpaceIterator : public ObjectIterator {
2545 public:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002546 // Create an iterator over the allocated objects in the given to-space.
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002547 explicit SemiSpaceIterator(NewSpace* space);
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002548
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002549 inline HeapObject* Next();
Steve Blocka7e24c12009-10-30 11:49:00 +00002550
2551 // Implementation of the ObjectIterator functions.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002552 inline HeapObject* next_object() override;
Steve Blocka7e24c12009-10-30 11:49:00 +00002553
2554 private:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002555 void Initialize(Address start, Address end);
Steve Blocka7e24c12009-10-30 11:49:00 +00002556
Steve Blocka7e24c12009-10-30 11:49:00 +00002557 // The current iteration point.
2558 Address current_;
2559 // The end of iteration.
2560 Address limit_;
Steve Blocka7e24c12009-10-30 11:49:00 +00002561};
2562
2563
2564// -----------------------------------------------------------------------------
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002565// A PageIterator iterates the pages in a semi-space.
2566class NewSpacePageIterator BASE_EMBEDDED {
2567 public:
2568 // Make an iterator that runs over all pages in to-space.
2569 explicit inline NewSpacePageIterator(NewSpace* space);
2570
2571 // Make an iterator that runs over all pages in the given semispace,
2572 // even those not used in allocation.
2573 explicit inline NewSpacePageIterator(SemiSpace* space);
2574
2575 // Make iterator that iterates from the page containing start
2576 // to the page that contains limit in the same semispace.
2577 inline NewSpacePageIterator(Address start, Address limit);
2578
2579 inline bool has_next();
Ben Murdochc5610432016-08-08 18:44:38 +01002580 inline Page* next();
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002581
2582 private:
Ben Murdochc5610432016-08-08 18:44:38 +01002583 Page* prev_page_; // Previous page returned.
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002584 // Next page that will be returned. Cached here so that we can use this
2585 // iterator for operations that deallocate pages.
Ben Murdochc5610432016-08-08 18:44:38 +01002586 Page* next_page_;
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002587 // Last page returned.
Ben Murdochc5610432016-08-08 18:44:38 +01002588 Page* last_page_;
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002589};
2590
2591
2592// -----------------------------------------------------------------------------
Steve Blocka7e24c12009-10-30 11:49:00 +00002593// The young generation space.
2594//
2595// The new space consists of a contiguous pair of semispaces. It simply
2596// forwards most functions to the appropriate semispace.
2597
2598class NewSpace : public Space {
2599 public:
Steve Block44f0eee2011-05-26 01:26:41 +01002600 explicit NewSpace(Heap* heap)
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002601 : Space(heap, NEW_SPACE, NOT_EXECUTABLE),
2602 to_space_(heap, kToSpace),
2603 from_space_(heap, kFromSpace),
2604 reservation_(),
Ben Murdochda12d292016-06-02 14:46:10 +01002605 pages_used_(0),
2606 top_on_previous_step_(0),
2607 allocated_histogram_(nullptr),
2608 promoted_histogram_(nullptr) {}
Ben Murdoch097c5b22016-05-18 11:27:45 +01002609
2610 inline bool Contains(HeapObject* o);
2611 inline bool ContainsSlow(Address a);
2612 inline bool Contains(Object* o);
Steve Blocka7e24c12009-10-30 11:49:00 +00002613
Ben Murdochda12d292016-06-02 14:46:10 +01002614 bool SetUp(int initial_semispace_capacity, int max_semispace_capacity);
Steve Blocka7e24c12009-10-30 11:49:00 +00002615
2616 // Tears down the space. Heap memory was not allocated by the space, so it
2617 // is not deallocated here.
2618 void TearDown();
2619
2620 // True if the space has been set up but not torn down.
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002621 bool HasBeenSetUp() {
2622 return to_space_.HasBeenSetUp() && from_space_.HasBeenSetUp();
Steve Blocka7e24c12009-10-30 11:49:00 +00002623 }
2624
2625 // Flip the pair of spaces.
2626 void Flip();
2627
2628 // Grow the capacity of the semispaces. Assumes that they are not at
2629 // their maximum capacity.
2630 void Grow();
2631
2632 // Shrink the capacity of the semispaces.
2633 void Shrink();
2634
Steve Blocka7e24c12009-10-30 11:49:00 +00002635 // Return the allocated bytes in the active semispace.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002636 intptr_t Size() override {
Ben Murdochc5610432016-08-08 18:44:38 +01002637 return pages_used_ * Page::kAllocatableMemory +
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002638 static_cast<int>(top() - to_space_.page_low());
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002639 }
2640
Ben Murdochf87a2032010-10-22 12:50:53 +01002641 // The same, but returning an int. We have to have the one that returns
2642 // intptr_t because it is inherited, but if we know we are dealing with the
2643 // new space, which can't get as big as the other spaces then this is useful:
2644 int SizeAsInt() { return static_cast<int>(Size()); }
Steve Block3ce2e202009-11-05 08:53:23 +00002645
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002646 // Return the allocatable capacity of a semispace.
2647 intptr_t Capacity() {
Ben Murdoch097c5b22016-05-18 11:27:45 +01002648 SLOW_DCHECK(to_space_.current_capacity() == from_space_.current_capacity());
2649 return (to_space_.current_capacity() / Page::kPageSize) *
Ben Murdochc5610432016-08-08 18:44:38 +01002650 Page::kAllocatableMemory;
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002651 }
2652
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002653 // Return the current size of a semispace, allocatable and non-allocatable
2654 // memory.
2655 intptr_t TotalCapacity() {
Ben Murdoch097c5b22016-05-18 11:27:45 +01002656 DCHECK(to_space_.current_capacity() == from_space_.current_capacity());
2657 return to_space_.current_capacity();
Steve Blocka7e24c12009-10-30 11:49:00 +00002658 }
Steve Block3ce2e202009-11-05 08:53:23 +00002659
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002660 // Committed memory for NewSpace is the committed memory of both semi-spaces
2661 // combined.
2662 intptr_t CommittedMemory() override {
2663 return from_space_.CommittedMemory() + to_space_.CommittedMemory();
Steve Block3ce2e202009-11-05 08:53:23 +00002664 }
2665
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002666 intptr_t MaximumCommittedMemory() override {
2667 return from_space_.MaximumCommittedMemory() +
2668 to_space_.MaximumCommittedMemory();
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002669 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002670
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002671 // Approximate amount of physical memory committed for this space.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002672 size_t CommittedPhysicalMemory() override;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002673
2674 // Return the available bytes without growing.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002675 intptr_t Available() override { return Capacity() - Size(); }
2676
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002677 size_t AllocatedSinceLastGC() {
Ben Murdochda12d292016-06-02 14:46:10 +01002678 bool seen_age_mark = false;
2679 Address age_mark = to_space_.age_mark();
Ben Murdochc5610432016-08-08 18:44:38 +01002680 Page* current_page = to_space_.first_page();
2681 Page* age_mark_page = Page::FromAddress(age_mark);
2682 Page* last_page = Page::FromAddress(top() - kPointerSize);
Ben Murdochda12d292016-06-02 14:46:10 +01002683 if (age_mark_page == last_page) {
2684 if (top() - age_mark >= 0) {
2685 return top() - age_mark;
2686 }
2687 // Top was reset at some point, invalidating this metric.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002688 return 0;
2689 }
Ben Murdochda12d292016-06-02 14:46:10 +01002690 while (current_page != last_page) {
2691 if (current_page == age_mark_page) {
2692 seen_age_mark = true;
2693 break;
2694 }
2695 current_page = current_page->next_page();
2696 }
2697 if (!seen_age_mark) {
2698 // Top was reset at some point, invalidating this metric.
2699 return 0;
2700 }
2701 intptr_t allocated = age_mark_page->area_end() - age_mark;
2702 DCHECK_EQ(current_page, age_mark_page);
2703 current_page = age_mark_page->next_page();
2704 while (current_page != last_page) {
Ben Murdochc5610432016-08-08 18:44:38 +01002705 allocated += Page::kAllocatableMemory;
Ben Murdochda12d292016-06-02 14:46:10 +01002706 current_page = current_page->next_page();
2707 }
2708 allocated += top() - current_page->area_start();
2709 DCHECK_LE(0, allocated);
2710 DCHECK_LE(allocated, Size());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002711 return static_cast<size_t>(allocated);
2712 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002713
Ben Murdochc5610432016-08-08 18:44:38 +01002714 bool ReplaceWithEmptyPage(Page* page) {
2715 // This method is called after flipping the semispace.
2716 DCHECK(page->InFromSpace());
2717 return from_space_.ReplaceWithEmptyPage(page);
2718 }
2719
Steve Blocka7e24c12009-10-30 11:49:00 +00002720 // Return the maximum capacity of a semispace.
2721 int MaximumCapacity() {
Ben Murdoch097c5b22016-05-18 11:27:45 +01002722 DCHECK(to_space_.maximum_capacity() == from_space_.maximum_capacity());
2723 return to_space_.maximum_capacity();
Steve Blocka7e24c12009-10-30 11:49:00 +00002724 }
2725
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002726 bool IsAtMaximumCapacity() { return TotalCapacity() == MaximumCapacity(); }
2727
Steve Blocka7e24c12009-10-30 11:49:00 +00002728 // Returns the initial capacity of a semispace.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002729 int InitialTotalCapacity() {
Ben Murdoch097c5b22016-05-18 11:27:45 +01002730 DCHECK(to_space_.minimum_capacity() == from_space_.minimum_capacity());
2731 return to_space_.minimum_capacity();
Steve Blocka7e24c12009-10-30 11:49:00 +00002732 }
2733
2734 // Return the address of the allocation pointer in the active semispace.
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002735 Address top() {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002736 DCHECK(to_space_.current_page()->ContainsLimit(allocation_info_.top()));
2737 return allocation_info_.top();
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002738 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002739
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002740 // Return the address of the allocation pointer limit in the active semispace.
2741 Address limit() {
2742 DCHECK(to_space_.current_page()->ContainsLimit(allocation_info_.limit()));
2743 return allocation_info_.limit();
2744 }
2745
Steve Blocka7e24c12009-10-30 11:49:00 +00002746 // Return the address of the first object in the active semispace.
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002747 Address bottom() { return to_space_.space_start(); }
Steve Blocka7e24c12009-10-30 11:49:00 +00002748
2749 // Get the age mark of the inactive semispace.
2750 Address age_mark() { return from_space_.age_mark(); }
2751 // Set the age mark in the active semispace.
2752 void set_age_mark(Address mark) { to_space_.set_age_mark(mark); }
2753
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002754 // The allocation top and limit address.
2755 Address* allocation_top_address() { return allocation_info_.top_address(); }
Steve Blocka7e24c12009-10-30 11:49:00 +00002756
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002757 // The allocation limit address.
2758 Address* allocation_limit_address() {
2759 return allocation_info_.limit_address();
2760 }
2761
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002762 MUST_USE_RESULT INLINE(AllocationResult AllocateRawAligned(
2763 int size_in_bytes, AllocationAlignment alignment));
2764
2765 MUST_USE_RESULT INLINE(
2766 AllocationResult AllocateRawUnaligned(int size_in_bytes));
2767
2768 MUST_USE_RESULT INLINE(AllocationResult AllocateRaw(
2769 int size_in_bytes, AllocationAlignment alignment));
2770
2771 MUST_USE_RESULT inline AllocationResult AllocateRawSynchronized(
2772 int size_in_bytes, AllocationAlignment alignment);
Steve Blocka7e24c12009-10-30 11:49:00 +00002773
2774 // Reset the allocation pointer to the beginning of the active semispace.
2775 void ResetAllocationInfo();
Steve Blocka7e24c12009-10-30 11:49:00 +00002776
Ben Murdoch097c5b22016-05-18 11:27:45 +01002777 // When inline allocation stepping is active, either because of incremental
2778 // marking, idle scavenge, or allocation statistics gathering, we 'interrupt'
2779 // inline allocation every once in a while. This is done by setting
2780 // allocation_info_.limit to be lower than the actual limit and and increasing
2781 // it in steps to guarantee that the observers are notified periodically.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002782 void UpdateInlineAllocationLimit(int size_in_bytes);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002783
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002784 void DisableInlineAllocationSteps() {
2785 top_on_previous_step_ = 0;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002786 UpdateInlineAllocationLimit(0);
Steve Blocka7e24c12009-10-30 11:49:00 +00002787 }
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002788
Ben Murdoch097c5b22016-05-18 11:27:45 +01002789 // Allows observation of inline allocation. The observer->Step() method gets
2790 // called after every step_size bytes have been allocated (approximately).
2791 // This works by adjusting the allocation limit to a lower value and adjusting
2792 // it after each step.
2793 void AddAllocationObserver(AllocationObserver* observer) override;
2794
2795 void RemoveAllocationObserver(AllocationObserver* observer) override;
2796
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002797 // Get the extent of the inactive semispace (for use as a marking stack,
2798 // or to zap it). Notice: space-addresses are not necessarily on the
2799 // same page, so FromSpaceStart() might be above FromSpaceEnd().
2800 Address FromSpacePageLow() { return from_space_.page_low(); }
2801 Address FromSpacePageHigh() { return from_space_.page_high(); }
2802 Address FromSpaceStart() { return from_space_.space_start(); }
2803 Address FromSpaceEnd() { return from_space_.space_end(); }
2804
2805 // Get the extent of the active semispace's pages' memory.
2806 Address ToSpaceStart() { return to_space_.space_start(); }
2807 Address ToSpaceEnd() { return to_space_.space_end(); }
2808
Ben Murdoch097c5b22016-05-18 11:27:45 +01002809 inline bool ToSpaceContainsSlow(Address a);
2810 inline bool FromSpaceContainsSlow(Address a);
2811 inline bool ToSpaceContains(Object* o);
2812 inline bool FromSpaceContains(Object* o);
Steve Blocka7e24c12009-10-30 11:49:00 +00002813
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002814 // Try to switch the active semispace to a new, empty, page.
2815 // Returns false if this isn't possible or reasonable (i.e., there
2816 // are no pages, or the current page is already empty), or true
2817 // if successful.
2818 bool AddFreshPage();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002819 bool AddFreshPageSynchronized();
Steve Blocka7e24c12009-10-30 11:49:00 +00002820
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002821#ifdef VERIFY_HEAP
Steve Blocka7e24c12009-10-30 11:49:00 +00002822 // Verify the active semispace.
2823 virtual void Verify();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002824#endif
2825
2826#ifdef DEBUG
Steve Blocka7e24c12009-10-30 11:49:00 +00002827 // Print the active semispace.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002828 void Print() override { to_space_.Print(); }
Steve Blocka7e24c12009-10-30 11:49:00 +00002829#endif
2830
Steve Blocka7e24c12009-10-30 11:49:00 +00002831 // Iterates the active semispace to collect statistics.
2832 void CollectStatistics();
2833 // Reports previously collected statistics of the active semispace.
2834 void ReportStatistics();
2835 // Clears previously collected statistics.
2836 void ClearHistograms();
2837
2838 // Record the allocation or promotion of a heap object. Note that we don't
2839 // record every single allocation, but only those that happen in the
2840 // to space during a scavenge GC.
2841 void RecordAllocation(HeapObject* obj);
2842 void RecordPromotion(HeapObject* obj);
Steve Blocka7e24c12009-10-30 11:49:00 +00002843
2844 // Return whether the operation succeded.
2845 bool CommitFromSpaceIfNeeded() {
2846 if (from_space_.is_committed()) return true;
2847 return from_space_.Commit();
2848 }
2849
2850 bool UncommitFromSpace() {
2851 if (!from_space_.is_committed()) return true;
2852 return from_space_.Uncommit();
2853 }
2854
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002855 bool IsFromSpaceCommitted() { return from_space_.is_committed(); }
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002856
2857 SemiSpace* active_space() { return &to_space_; }
2858
Ben Murdoch097c5b22016-05-18 11:27:45 +01002859 void PauseAllocationObservers() override;
2860 void ResumeAllocationObservers() override;
2861
Steve Blocka7e24c12009-10-30 11:49:00 +00002862 private:
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002863 // Update allocation info to match the current to-space page.
2864 void UpdateAllocationInfo();
2865
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002866 base::Mutex mutex_;
2867
Steve Blocka7e24c12009-10-30 11:49:00 +00002868 // The semispaces.
2869 SemiSpace to_space_;
2870 SemiSpace from_space_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002871 base::VirtualMemory reservation_;
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002872 int pages_used_;
Steve Blocka7e24c12009-10-30 11:49:00 +00002873
Steve Blocka7e24c12009-10-30 11:49:00 +00002874 // Allocation pointer and limit for normal allocation and allocation during
2875 // mark-compact collection.
2876 AllocationInfo allocation_info_;
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002877
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002878 Address top_on_previous_step_;
Steve Blocka7e24c12009-10-30 11:49:00 +00002879
Steve Blocka7e24c12009-10-30 11:49:00 +00002880 HistogramInfo* allocated_histogram_;
2881 HistogramInfo* promoted_histogram_;
Steve Blocka7e24c12009-10-30 11:49:00 +00002882
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002883 bool EnsureAllocation(int size_in_bytes, AllocationAlignment alignment);
Steve Blocka7e24c12009-10-30 11:49:00 +00002884
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002885 // If we are doing inline allocation in steps, this method performs the 'step'
2886 // operation. top is the memory address of the bump pointer at the last
2887 // inline allocation (i.e. it determines the numbers of bytes actually
2888 // allocated since the last step.) new_top is the address of the bump pointer
2889 // where the next byte is going to be allocated from. top and new_top may be
2890 // different when we cross a page boundary or reset the space.
2891 void InlineAllocationStep(Address top, Address new_top, Address soon_object,
2892 size_t size);
2893 intptr_t GetNextInlineAllocationStepSize();
2894 void StartNextInlineAllocationStep();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002895
Steve Blocka7e24c12009-10-30 11:49:00 +00002896 friend class SemiSpaceIterator;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002897};
Steve Blocka7e24c12009-10-30 11:49:00 +00002898
Ben Murdoch097c5b22016-05-18 11:27:45 +01002899class PauseAllocationObserversScope {
Steve Blocka7e24c12009-10-30 11:49:00 +00002900 public:
Ben Murdoch097c5b22016-05-18 11:27:45 +01002901 explicit PauseAllocationObserversScope(Heap* heap);
2902 ~PauseAllocationObserversScope();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002903
2904 private:
Ben Murdoch097c5b22016-05-18 11:27:45 +01002905 Heap* heap_;
2906 DISALLOW_COPY_AND_ASSIGN(PauseAllocationObserversScope);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002907};
2908
2909// -----------------------------------------------------------------------------
2910// Compaction space that is used temporarily during compaction.
2911
2912class CompactionSpace : public PagedSpace {
2913 public:
2914 CompactionSpace(Heap* heap, AllocationSpace id, Executability executable)
2915 : PagedSpace(heap, id, executable) {}
2916
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002917 bool is_local() override { return true; }
2918
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002919 protected:
2920 // The space is temporary and not included in any snapshots.
2921 bool snapshotable() override { return false; }
2922
2923 MUST_USE_RESULT HeapObject* SweepAndRetryAllocation(
2924 int size_in_bytes) override;
2925};
2926
2927
2928// A collection of |CompactionSpace|s used by a single compaction task.
2929class CompactionSpaceCollection : public Malloced {
2930 public:
2931 explicit CompactionSpaceCollection(Heap* heap)
2932 : old_space_(heap, OLD_SPACE, Executability::NOT_EXECUTABLE),
Ben Murdoch097c5b22016-05-18 11:27:45 +01002933 code_space_(heap, CODE_SPACE, Executability::EXECUTABLE) {}
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002934
2935 CompactionSpace* Get(AllocationSpace space) {
2936 switch (space) {
2937 case OLD_SPACE:
2938 return &old_space_;
2939 case CODE_SPACE:
2940 return &code_space_;
2941 default:
2942 UNREACHABLE();
2943 }
2944 UNREACHABLE();
2945 return nullptr;
2946 }
2947
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002948 private:
2949 CompactionSpace old_space_;
2950 CompactionSpace code_space_;
Steve Blocka7e24c12009-10-30 11:49:00 +00002951};
2952
2953
2954// -----------------------------------------------------------------------------
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002955// Old object space (includes the old space of objects and code space)
Steve Blocka7e24c12009-10-30 11:49:00 +00002956
2957class OldSpace : public PagedSpace {
2958 public:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002959 // Creates an old space object. The constructor does not allocate pages
2960 // from OS.
2961 OldSpace(Heap* heap, AllocationSpace id, Executability executable)
2962 : PagedSpace(heap, id, executable) {}
Steve Blocka7e24c12009-10-30 11:49:00 +00002963};
2964
2965
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002966// For contiguous spaces, top should be in the space (or at the end) and limit
2967// should be the end of the space.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002968#define DCHECK_SEMISPACE_ALLOCATION_INFO(info, space) \
2969 SLOW_DCHECK((space).page_low() <= (info).top() && \
2970 (info).top() <= (space).page_high() && \
2971 (info).limit() <= (space).page_high())
Steve Blocka7e24c12009-10-30 11:49:00 +00002972
2973
2974// -----------------------------------------------------------------------------
2975// Old space for all map objects
2976
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002977class MapSpace : public PagedSpace {
Steve Blocka7e24c12009-10-30 11:49:00 +00002978 public:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002979 // Creates a map space object.
2980 MapSpace(Heap* heap, AllocationSpace id)
Ben Murdochda12d292016-06-02 14:46:10 +01002981 : PagedSpace(heap, id, NOT_EXECUTABLE) {}
Ben Murdoch85b71792012-04-11 18:30:58 +01002982
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002983 int RoundSizeDownToObjectAlignment(int size) override {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002984 if (base::bits::IsPowerOfTwo32(Map::kSize)) {
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002985 return RoundDown(size, Map::kSize);
2986 } else {
2987 return (size / Map::kSize) * Map::kSize;
Leon Clarkee46be812010-01-19 14:06:41 +00002988 }
Leon Clarkee46be812010-01-19 14:06:41 +00002989 }
2990
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002991#ifdef VERIFY_HEAP
2992 void VerifyObject(HeapObject* obj) override;
2993#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00002994};
2995
2996
2997// -----------------------------------------------------------------------------
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002998// Large objects ( > Page::kMaxRegularHeapObjectSize ) are allocated and
2999// managed by the large object space. A large object is allocated from OS
3000// heap with extra padding bytes (Page::kPageSize + Page::kObjectStartOffset).
Steve Blocka7e24c12009-10-30 11:49:00 +00003001// A large object always starts at Page::kObjectStartOffset to a page.
3002// Large objects do not move during garbage collections.
3003
Ben Murdoch3ef787d2012-04-12 10:51:47 +01003004class LargeObjectSpace : public Space {
Steve Blocka7e24c12009-10-30 11:49:00 +00003005 public:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003006 LargeObjectSpace(Heap* heap, AllocationSpace id);
3007 virtual ~LargeObjectSpace();
Steve Blocka7e24c12009-10-30 11:49:00 +00003008
Ben Murdoch3ef787d2012-04-12 10:51:47 +01003009 // Initializes internal data structures.
3010 bool SetUp();
Steve Blocka7e24c12009-10-30 11:49:00 +00003011
Ben Murdoch3ef787d2012-04-12 10:51:47 +01003012 // Releases internal resources, frees objects in this space.
3013 void TearDown();
Steve Blocka7e24c12009-10-30 11:49:00 +00003014
Ben Murdoch592a9fc2012-03-05 11:04:45 +00003015 static intptr_t ObjectSizeFor(intptr_t chunk_size) {
3016 if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return 0;
3017 return chunk_size - Page::kPageSize - Page::kObjectStartOffset;
3018 }
3019
Ben Murdoch3ef787d2012-04-12 10:51:47 +01003020 // Shared implementation of AllocateRaw, AllocateRawCode and
3021 // AllocateRawFixedArray.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003022 MUST_USE_RESULT AllocationResult
3023 AllocateRaw(int object_size, Executability executable);
Steve Blocka7e24c12009-10-30 11:49:00 +00003024
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003025 // Available bytes for objects in this space.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003026 inline intptr_t Available() override;
Steve Blocka7e24c12009-10-30 11:49:00 +00003027
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003028 intptr_t Size() override { return size_; }
Steve Blocka7e24c12009-10-30 11:49:00 +00003029
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003030 intptr_t SizeOfObjects() override { return objects_size_; }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003031
3032 // Approximate amount of physical memory committed for this space.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003033 size_t CommittedPhysicalMemory() override;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003034
3035 int PageCount() { return page_count_; }
3036
3037 // Finds an object for a given address, returns a Smi if it is not found.
3038 // The function iterates through all objects in this space, may be slow.
3039 Object* FindObject(Address a);
Steve Blocka7e24c12009-10-30 11:49:00 +00003040
Ben Murdoch3ef787d2012-04-12 10:51:47 +01003041 // Finds a large object page containing the given address, returns NULL
Kristian Monsen80d68ea2010-09-08 11:05:35 +01003042 // if such a page doesn't exist.
Ben Murdoch3ef787d2012-04-12 10:51:47 +01003043 LargePage* FindPage(Address a);
Steve Blocka7e24c12009-10-30 11:49:00 +00003044
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003045 // Clears the marking state of live objects.
3046 void ClearMarkingStateOfLiveObjects();
3047
Steve Blocka7e24c12009-10-30 11:49:00 +00003048 // Frees unmarked objects.
3049 void FreeUnmarkedObjects();
3050
3051 // Checks whether a heap object is in this space; O(1).
3052 bool Contains(HeapObject* obj);
Ben Murdoch097c5b22016-05-18 11:27:45 +01003053 // Checks whether an address is in the object area in this space. Iterates
3054 // all objects in the space. May be slow.
3055 bool ContainsSlow(Address addr) { return FindObject(addr)->IsHeapObject(); }
Steve Blocka7e24c12009-10-30 11:49:00 +00003056
3057 // Checks whether the space is empty.
Ben Murdoch3ef787d2012-04-12 10:51:47 +01003058 bool IsEmpty() { return first_page_ == NULL; }
Steve Blocka7e24c12009-10-30 11:49:00 +00003059
Ben Murdochda12d292016-06-02 14:46:10 +01003060 void AdjustLiveBytes(int by) { objects_size_ += by; }
3061
Ben Murdoch3ef787d2012-04-12 10:51:47 +01003062 LargePage* first_page() { return first_page_; }
3063
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003064#ifdef VERIFY_HEAP
Steve Blocka7e24c12009-10-30 11:49:00 +00003065 virtual void Verify();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003066#endif
3067
3068#ifdef DEBUG
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003069 void Print() override;
Steve Blocka7e24c12009-10-30 11:49:00 +00003070 void ReportStatistics();
3071 void CollectCodeStatistics();
Steve Blocka7e24c12009-10-30 11:49:00 +00003072#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00003073
3074 private:
3075 // The head of the linked list of large object chunks.
Ben Murdoch3ef787d2012-04-12 10:51:47 +01003076 LargePage* first_page_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003077 intptr_t size_; // allocated bytes
3078 int page_count_; // number of chunks
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08003079 intptr_t objects_size_; // size of objects
Ben Murdoch3ef787d2012-04-12 10:51:47 +01003080 // Map MemoryChunk::kAlignment-aligned chunks to large pages covering them
3081 HashMap chunk_map_;
Steve Blocka7e24c12009-10-30 11:49:00 +00003082
Steve Blocka7e24c12009-10-30 11:49:00 +00003083 friend class LargeObjectIterator;
Steve Blocka7e24c12009-10-30 11:49:00 +00003084};
3085
3086
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003087class LargeObjectIterator : public ObjectIterator {
Steve Blocka7e24c12009-10-30 11:49:00 +00003088 public:
3089 explicit LargeObjectIterator(LargeObjectSpace* space);
Steve Blocka7e24c12009-10-30 11:49:00 +00003090
Ben Murdoch3ef787d2012-04-12 10:51:47 +01003091 HeapObject* Next();
Steve Blocka7e24c12009-10-30 11:49:00 +00003092
3093 // implementation of ObjectIterator.
Ben Murdoch3ef787d2012-04-12 10:51:47 +01003094 virtual HeapObject* next_object() { return Next(); }
Steve Blocka7e24c12009-10-30 11:49:00 +00003095
3096 private:
Ben Murdoch3ef787d2012-04-12 10:51:47 +01003097 LargePage* current_;
Steve Blocka7e24c12009-10-30 11:49:00 +00003098};
3099
Ben Murdochda12d292016-06-02 14:46:10 +01003100class LargePageIterator BASE_EMBEDDED {
3101 public:
3102 explicit inline LargePageIterator(LargeObjectSpace* space);
3103
3104 inline LargePage* next();
3105
3106 private:
3107 LargePage* next_page_;
3108};
Steve Blocka7e24c12009-10-30 11:49:00 +00003109
Ben Murdoch3ef787d2012-04-12 10:51:47 +01003110// Iterates over the chunks (pages and large object pages) that can contain
Ben Murdochda12d292016-06-02 14:46:10 +01003111// pointers to new space or to evacuation candidates.
3112class MemoryChunkIterator BASE_EMBEDDED {
Ben Murdoch3ef787d2012-04-12 10:51:47 +01003113 public:
Ben Murdochda12d292016-06-02 14:46:10 +01003114 enum Mode { ALL, ALL_BUT_MAP_SPACE, ALL_BUT_CODE_SPACE };
3115 inline explicit MemoryChunkIterator(Heap* heap, Mode mode);
Ben Murdoch3ef787d2012-04-12 10:51:47 +01003116
3117 // Return NULL when the iterator is done.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003118 inline MemoryChunk* next();
Ben Murdoch3ef787d2012-04-12 10:51:47 +01003119
3120 private:
Ben Murdochda12d292016-06-02 14:46:10 +01003121 enum State {
3122 kOldSpaceState,
3123 kMapState,
3124 kCodeState,
3125 kLargeObjectState,
3126 kFinishedState
3127 };
Ben Murdoch3ef787d2012-04-12 10:51:47 +01003128 State state_;
Ben Murdochda12d292016-06-02 14:46:10 +01003129 const Mode mode_;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003130 PageIterator old_iterator_;
Ben Murdochda12d292016-06-02 14:46:10 +01003131 PageIterator code_iterator_;
Ben Murdoch3ef787d2012-04-12 10:51:47 +01003132 PageIterator map_iterator_;
Ben Murdochda12d292016-06-02 14:46:10 +01003133 LargePageIterator lo_iterator_;
Ben Murdoch3ef787d2012-04-12 10:51:47 +01003134};
3135
Steve Block44f0eee2011-05-26 01:26:41 +01003136#ifdef DEBUG
3137struct CommentStatistic {
3138 const char* comment;
3139 int size;
3140 int count;
3141 void Clear() {
3142 comment = NULL;
3143 size = 0;
3144 count = 0;
3145 }
3146 // Must be small, since an iteration is used for lookup.
3147 static const int kMaxComments = 64;
3148};
3149#endif
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003150} // namespace internal
3151} // namespace v8
Steve Block44f0eee2011-05-26 01:26:41 +01003152
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003153#endif // V8_HEAP_SPACES_H_