blob: 93a81cc933321668ce71c0130dc6db1d1ea6a2aa [file] [log] [blame]
Ben Murdoch257744e2011-11-30 15:57:28 +00001// Copyright 2011 the V8 project authors. All rights reserved.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
Steve Blocka7e24c12009-10-30 11:49:00 +00004
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005#ifndef V8_HEAP_SPACES_H_
6#define V8_HEAP_SPACES_H_
Steve Blocka7e24c12009-10-30 11:49:00 +00007
Ben Murdochb8a8cc12014-11-26 15:28:44 +00008#include "src/allocation.h"
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00009#include "src/atomic-utils.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000010#include "src/base/atomicops.h"
11#include "src/base/bits.h"
12#include "src/base/platform/mutex.h"
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000013#include "src/flags.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000014#include "src/hashmap.h"
15#include "src/list.h"
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000016#include "src/objects.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000017#include "src/utils.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000018
19namespace v8 {
20namespace internal {
21
Ben Murdoch097c5b22016-05-18 11:27:45 +010022class AllocationInfo;
23class AllocationObserver;
24class CompactionSpace;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000025class CompactionSpaceCollection;
Ben Murdoch097c5b22016-05-18 11:27:45 +010026class FreeList;
Steve Block44f0eee2011-05-26 01:26:41 +010027class Isolate;
Ben Murdoch097c5b22016-05-18 11:27:45 +010028class MemoryAllocator;
29class MemoryChunk;
Ben Murdochda12d292016-06-02 14:46:10 +010030class NewSpacePage;
31class Page;
Ben Murdoch097c5b22016-05-18 11:27:45 +010032class PagedSpace;
33class SemiSpace;
34class SkipList;
35class SlotsBuffer;
36class SlotSet;
Ben Murdochda12d292016-06-02 14:46:10 +010037class TypedSlotSet;
Ben Murdoch097c5b22016-05-18 11:27:45 +010038class Space;
Steve Block44f0eee2011-05-26 01:26:41 +010039
Steve Blocka7e24c12009-10-30 11:49:00 +000040// -----------------------------------------------------------------------------
41// Heap structures:
42//
43// A JS heap consists of a young generation, an old generation, and a large
44// object space. The young generation is divided into two semispaces. A
45// scavenger implements Cheney's copying algorithm. The old generation is
46// separated into a map space and an old object space. The map space contains
47// all (and only) map objects, the rest of old objects go into the old space.
48// The old generation is collected by a mark-sweep-compact collector.
49//
50// The semispaces of the young generation are contiguous. The old and map
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +010051// spaces consists of a list of pages. A page has a page header and an object
Ben Murdoch3ef787d2012-04-12 10:51:47 +010052// area.
Steve Blocka7e24c12009-10-30 11:49:00 +000053//
54// There is a separate large object space for objects larger than
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000055// Page::kMaxRegularHeapObjectSize, so that they do not have to move during
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +010056// collection. The large object space is paged. Pages in large object space
Ben Murdoch3ef787d2012-04-12 10:51:47 +010057// may be larger than the page size.
Steve Blocka7e24c12009-10-30 11:49:00 +000058//
Ben Murdoch3ef787d2012-04-12 10:51:47 +010059// A store-buffer based write barrier is used to keep track of intergenerational
Ben Murdochb8a8cc12014-11-26 15:28:44 +000060// references. See heap/store-buffer.h.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +010061//
Ben Murdoch3ef787d2012-04-12 10:51:47 +010062// During scavenges and mark-sweep collections we sometimes (after a store
63// buffer overflow) iterate intergenerational pointers without decoding heap
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000064// object maps so if the page belongs to old space or large object space
65// it is essential to guarantee that the page does not contain any
Ben Murdoch3ef787d2012-04-12 10:51:47 +010066// garbage pointers to new space: every pointer aligned word which satisfies
67// the Heap::InNewSpace() predicate must be a pointer to a live heap object in
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000068// new space. Thus objects in old space and large object spaces should have a
Ben Murdoch3ef787d2012-04-12 10:51:47 +010069// special layout (e.g. no bare integer fields). This requirement does not
70// apply to map space which is iterated in a special fashion. However we still
71// require pointer fields of dead maps to be cleaned.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +010072//
Ben Murdoch3ef787d2012-04-12 10:51:47 +010073// To enable lazy cleaning of old space pages we can mark chunks of the page
74// as being garbage. Garbage sections are marked with a special map. These
75// sections are skipped when scanning the page, even if we are otherwise
76// scanning without regard for object boundaries. Garbage sections are chained
77// together to form a free list after a GC. Garbage sections created outside
78// of GCs by object trunctation etc. may not be in the free list chain. Very
79// small free spaces are ignored, they need only be cleaned of bogus pointers
80// into new space.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +010081//
Ben Murdoch3ef787d2012-04-12 10:51:47 +010082// Each page may have up to one special garbage section. The start of this
83// section is denoted by the top field in the space. The end of the section
84// is denoted by the limit field in the space. This special garbage section
85// is not marked with a free space map in the data. The point of this section
86// is to enable linear allocation without having to constantly update the byte
87// array every time the top field is updated and a new object is created. The
88// special garbage section is not in the chain of garbage sections.
89//
90// Since the top and limit fields are in the space, not the page, only one page
91// has a special garbage section, and if the top and limit are equal then there
92// is no special garbage section.
Steve Blocka7e24c12009-10-30 11:49:00 +000093
94// Some assertion macros used in the debugging mode.
95
Ben Murdochb8a8cc12014-11-26 15:28:44 +000096#define DCHECK_PAGE_ALIGNED(address) \
97 DCHECK((OffsetFrom(address) & Page::kPageAlignmentMask) == 0)
Steve Blocka7e24c12009-10-30 11:49:00 +000098
Ben Murdochb8a8cc12014-11-26 15:28:44 +000099#define DCHECK_OBJECT_ALIGNED(address) \
100 DCHECK((OffsetFrom(address) & kObjectAlignmentMask) == 0)
Steve Blocka7e24c12009-10-30 11:49:00 +0000101
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000102#define DCHECK_OBJECT_SIZE(size) \
103 DCHECK((0 < size) && (size <= Page::kMaxRegularHeapObjectSize))
Leon Clarkee46be812010-01-19 14:06:41 +0000104
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000105#define DCHECK_CODEOBJECT_SIZE(size, code_space) \
106 DCHECK((0 < size) && (size <= code_space->AreaSize()))
107
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000108#define DCHECK_PAGE_OFFSET(offset) \
109 DCHECK((Page::kObjectStartOffset <= offset) && (offset <= Page::kPageSize))
Steve Blocka7e24c12009-10-30 11:49:00 +0000110
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100111class MarkBit {
112 public:
113 typedef uint32_t CellType;
114
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000115 inline MarkBit(CellType* cell, CellType mask) : cell_(cell), mask_(mask) {}
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100116
117#ifdef DEBUG
118 bool operator==(const MarkBit& other) {
119 return cell_ == other.cell_ && mask_ == other.mask_;
120 }
121#endif
122
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000123 private:
124 inline CellType* cell() { return cell_; }
125 inline CellType mask() { return mask_; }
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100126
127 inline MarkBit Next() {
128 CellType new_mask = mask_ << 1;
129 if (new_mask == 0) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000130 return MarkBit(cell_ + 1, 1);
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100131 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000132 return MarkBit(cell_, new_mask);
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100133 }
134 }
135
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000136 inline void Set() { *cell_ |= mask_; }
137 inline bool Get() { return (*cell_ & mask_) != 0; }
138 inline void Clear() { *cell_ &= ~mask_; }
139
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100140 CellType* cell_;
141 CellType mask_;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000142
143 friend class Marking;
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100144};
145
146
147// Bitmap is a sequence of cells each containing fixed number of bits.
148class Bitmap {
149 public:
150 static const uint32_t kBitsPerCell = 32;
151 static const uint32_t kBitsPerCellLog2 = 5;
152 static const uint32_t kBitIndexMask = kBitsPerCell - 1;
153 static const uint32_t kBytesPerCell = kBitsPerCell / kBitsPerByte;
154 static const uint32_t kBytesPerCellLog2 = kBitsPerCellLog2 - kBitsPerByteLog2;
155
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000156 static const size_t kLength = (1 << kPageSizeBits) >> (kPointerSizeLog2);
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100157
158 static const size_t kSize =
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000159 (1 << kPageSizeBits) >> (kPointerSizeLog2 + kBitsPerByteLog2);
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100160
161
162 static int CellsForLength(int length) {
163 return (length + kBitsPerCell - 1) >> kBitsPerCellLog2;
164 }
165
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000166 int CellsCount() { return CellsForLength(kLength); }
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100167
168 static int SizeFor(int cells_count) {
169 return sizeof(MarkBit::CellType) * cells_count;
170 }
171
172 INLINE(static uint32_t IndexToCell(uint32_t index)) {
173 return index >> kBitsPerCellLog2;
174 }
175
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000176 V8_INLINE static uint32_t IndexInCell(uint32_t index) {
177 return index & kBitIndexMask;
178 }
179
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100180 INLINE(static uint32_t CellToIndex(uint32_t index)) {
181 return index << kBitsPerCellLog2;
182 }
183
184 INLINE(static uint32_t CellAlignIndex(uint32_t index)) {
185 return (index + kBitIndexMask) & ~kBitIndexMask;
186 }
187
188 INLINE(MarkBit::CellType* cells()) {
189 return reinterpret_cast<MarkBit::CellType*>(this);
190 }
191
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000192 INLINE(Address address()) { return reinterpret_cast<Address>(this); }
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100193
194 INLINE(static Bitmap* FromAddress(Address addr)) {
195 return reinterpret_cast<Bitmap*>(addr);
196 }
197
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000198 inline MarkBit MarkBitFromIndex(uint32_t index) {
199 MarkBit::CellType mask = 1u << IndexInCell(index);
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100200 MarkBit::CellType* cell = this->cells() + (index >> kBitsPerCellLog2);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000201 return MarkBit(cell, mask);
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100202 }
203
204 static inline void Clear(MemoryChunk* chunk);
205
Ben Murdochda12d292016-06-02 14:46:10 +0100206 static inline void SetAllBits(MemoryChunk* chunk);
207
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100208 static void PrintWord(uint32_t word, uint32_t himask = 0) {
209 for (uint32_t mask = 1; mask != 0; mask <<= 1) {
210 if ((mask & himask) != 0) PrintF("[");
211 PrintF((mask & word) ? "1" : "0");
212 if ((mask & himask) != 0) PrintF("]");
213 }
214 }
215
216 class CellPrinter {
217 public:
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000218 CellPrinter() : seq_start(0), seq_type(0), seq_length(0) {}
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100219
220 void Print(uint32_t pos, uint32_t cell) {
221 if (cell == seq_type) {
222 seq_length++;
223 return;
224 }
225
226 Flush();
227
228 if (IsSeq(cell)) {
229 seq_start = pos;
230 seq_length = 0;
231 seq_type = cell;
232 return;
233 }
234
235 PrintF("%d: ", pos);
236 PrintWord(cell);
237 PrintF("\n");
238 }
239
240 void Flush() {
241 if (seq_length > 0) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000242 PrintF("%d: %dx%d\n", seq_start, seq_type == 0 ? 0 : 1,
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100243 seq_length * kBitsPerCell);
244 seq_length = 0;
245 }
246 }
247
248 static bool IsSeq(uint32_t cell) { return cell == 0 || cell == 0xFFFFFFFF; }
249
250 private:
251 uint32_t seq_start;
252 uint32_t seq_type;
253 uint32_t seq_length;
254 };
255
256 void Print() {
257 CellPrinter printer;
258 for (int i = 0; i < CellsCount(); i++) {
259 printer.Print(i, cells()[i]);
260 }
261 printer.Flush();
262 PrintF("\n");
263 }
264
265 bool IsClean() {
266 for (int i = 0; i < CellsCount(); i++) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000267 if (cells()[i] != 0) {
268 return false;
269 }
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100270 }
271 return true;
272 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000273
274 // Clears all bits starting from {cell_base_index} up to and excluding
275 // {index}. Note that {cell_base_index} is required to be cell aligned.
276 void ClearRange(uint32_t cell_base_index, uint32_t index) {
277 DCHECK_EQ(IndexInCell(cell_base_index), 0u);
278 DCHECK_GE(index, cell_base_index);
279 uint32_t start_cell_index = IndexToCell(cell_base_index);
280 uint32_t end_cell_index = IndexToCell(index);
281 DCHECK_GE(end_cell_index, start_cell_index);
282 // Clear all cells till the cell containing the last index.
283 for (uint32_t i = start_cell_index; i < end_cell_index; i++) {
284 cells()[i] = 0;
285 }
286 // Clear all bits in the last cell till the last bit before index.
287 uint32_t clear_mask = ~((1u << IndexInCell(index)) - 1);
288 cells()[end_cell_index] &= clear_mask;
289 }
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100290};
291
Ben Murdochda12d292016-06-02 14:46:10 +0100292enum FreeListCategoryType {
293 kTiniest,
294 kTiny,
295 kSmall,
296 kMedium,
297 kLarge,
298 kHuge,
299
300 kFirstCategory = kTiniest,
301 kLastCategory = kHuge,
302 kNumberOfCategories = kLastCategory + 1,
303 kInvalidCategory
304};
305
306enum FreeMode { kLinkCategory, kDoNotLinkCategory };
307
308// A free list category maintains a linked list of free memory blocks.
309class FreeListCategory {
310 public:
311 static const int kSize = kIntSize + // FreeListCategoryType type_
312 kIntSize + // int available_
313 kPointerSize + // FreeSpace* top_
314 kPointerSize + // FreeListCategory* prev_
315 kPointerSize; // FreeListCategory* next_
316
317 FreeListCategory()
318 : type_(kInvalidCategory),
319 available_(0),
320 top_(nullptr),
321 prev_(nullptr),
322 next_(nullptr) {}
323
324 void Initialize(FreeListCategoryType type) {
325 type_ = type;
326 available_ = 0;
327 top_ = nullptr;
328 prev_ = nullptr;
329 next_ = nullptr;
330 }
331
332 void Invalidate();
333
334 void Reset();
335
336 void ResetStats() { Reset(); }
337
338 void RepairFreeList(Heap* heap);
339
340 // Relinks the category into the currently owning free list. Requires that the
341 // category is currently unlinked.
342 void Relink();
343
344 bool Free(FreeSpace* node, int size_in_bytes, FreeMode mode);
345
346 // Picks a node from the list and stores its size in |node_size|. Returns
347 // nullptr if the category is empty.
348 FreeSpace* PickNodeFromList(int* node_size);
349
350 // Performs a single try to pick a node of at least |minimum_size| from the
351 // category. Stores the actual size in |node_size|. Returns nullptr if no
352 // node is found.
353 FreeSpace* TryPickNodeFromList(int minimum_size, int* node_size);
354
355 // Picks a node of at least |minimum_size| from the category. Stores the
356 // actual size in |node_size|. Returns nullptr if no node is found.
357 FreeSpace* SearchForNodeInList(int minimum_size, int* node_size);
358
359 inline FreeList* owner();
360 inline bool is_linked();
361 bool is_empty() { return top() == nullptr; }
362 int available() const { return available_; }
363
364#ifdef DEBUG
365 intptr_t SumFreeList();
366 int FreeListLength();
367#endif
368
369 private:
370 // For debug builds we accurately compute free lists lengths up until
371 // {kVeryLongFreeList} by manually walking the list.
372 static const int kVeryLongFreeList = 500;
373
374 inline Page* page();
375
376 FreeSpace* top() { return top_; }
377 void set_top(FreeSpace* top) { top_ = top; }
378 FreeListCategory* prev() { return prev_; }
379 void set_prev(FreeListCategory* prev) { prev_ = prev; }
380 FreeListCategory* next() { return next_; }
381 void set_next(FreeListCategory* next) { next_ = next; }
382
383 // |type_|: The type of this free list category.
384 FreeListCategoryType type_;
385
386 // |available_|: Total available bytes in all blocks of this free list
387 // category.
388 int available_;
389
390 // |top_|: Points to the top FreeSpace* in the free list category.
391 FreeSpace* top_;
392
393 FreeListCategory* prev_;
394 FreeListCategory* next_;
395
396 friend class FreeList;
397 friend class PagedSpace;
398};
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100399
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100400// MemoryChunk represents a memory region owned by a specific space.
401// It is divided into the header and the body. Chunk start is always
402// 1MB aligned. Start of the body is aligned so it can accommodate
403// any heap object.
404class MemoryChunk {
405 public:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000406 enum MemoryChunkFlags {
407 IS_EXECUTABLE,
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000408 POINTERS_TO_HERE_ARE_INTERESTING,
409 POINTERS_FROM_HERE_ARE_INTERESTING,
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000410 IN_FROM_SPACE, // Mutually exclusive with IN_TO_SPACE.
411 IN_TO_SPACE, // All pages in new space has one of these two set.
412 NEW_SPACE_BELOW_AGE_MARK,
413 EVACUATION_CANDIDATE,
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000414 NEVER_EVACUATE, // May contain immortal immutables.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000415
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000416 // Large objects can have a progress bar in their page header. These object
417 // are scanned in increments and will be kept black while being scanned.
418 // Even if the mutator writes to them they will be kept black and a white
419 // to grey transition is performed in the value.
420 HAS_PROGRESS_BAR,
421
Ben Murdochda12d292016-06-02 14:46:10 +0100422 // A black page has all mark bits set to 1 (black). A black page currently
423 // cannot be iterated because it is not swept. Moreover live bytes are also
424 // not updated.
425 BLACK_PAGE,
426
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000427 // This flag is intended to be used for testing. Works only when both
428 // FLAG_stress_compaction and FLAG_manual_evacuation_candidates_selection
429 // are set. It forces the page to become an evacuation candidate at next
430 // candidates selection cycle.
431 FORCE_EVACUATION_CANDIDATE_FOR_TESTING,
432
Ben Murdoch097c5b22016-05-18 11:27:45 +0100433 // This flag is intended to be used for testing.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000434 NEVER_ALLOCATE_ON_PAGE,
435
436 // The memory chunk is already logically freed, however the actual freeing
437 // still has to be performed.
438 PRE_FREED,
439
440 // |COMPACTION_WAS_ABORTED|: Indicates that the compaction in this page
441 // has been aborted and needs special handling by the sweeper.
442 COMPACTION_WAS_ABORTED,
443
444 // Last flag, keep at bottom.
445 NUM_MEMORY_CHUNK_FLAGS
446 };
447
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000448 // |kSweepingDone|: The page state when sweeping is complete or sweeping must
Ben Murdoch097c5b22016-05-18 11:27:45 +0100449 // not be performed on that page. Sweeper threads that are done with their
450 // work will set this value and not touch the page anymore.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000451 // |kSweepingPending|: This page is ready for parallel sweeping.
Ben Murdoch097c5b22016-05-18 11:27:45 +0100452 // |kSweepingInProgress|: This page is currently swept by a sweeper thread.
453 enum ConcurrentSweepingState {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000454 kSweepingDone,
Ben Murdoch097c5b22016-05-18 11:27:45 +0100455 kSweepingPending,
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000456 kSweepingInProgress,
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000457 };
458
459 // Every n write barrier invocations we go to runtime even though
460 // we could have handled it in generated code. This lets us check
461 // whether we have hit the limit and should do some more marking.
462 static const int kWriteBarrierCounterGranularity = 500;
463
464 static const int kPointersToHereAreInterestingMask =
465 1 << POINTERS_TO_HERE_ARE_INTERESTING;
466
467 static const int kPointersFromHereAreInterestingMask =
468 1 << POINTERS_FROM_HERE_ARE_INTERESTING;
469
470 static const int kEvacuationCandidateMask = 1 << EVACUATION_CANDIDATE;
471
472 static const int kSkipEvacuationSlotsRecordingMask =
Ben Murdochda12d292016-06-02 14:46:10 +0100473 (1 << EVACUATION_CANDIDATE) | (1 << IN_FROM_SPACE) | (1 << IN_TO_SPACE);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000474
475 static const intptr_t kAlignment =
476 (static_cast<uintptr_t>(1) << kPageSizeBits);
477
478 static const intptr_t kAlignmentMask = kAlignment - 1;
479
480 static const intptr_t kSizeOffset = 0;
481
Ben Murdochda12d292016-06-02 14:46:10 +0100482 static const intptr_t kFlagsOffset = kSizeOffset + kPointerSize;
483
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000484 static const intptr_t kLiveBytesOffset =
485 kSizeOffset + kPointerSize // size_t size
486 + kIntptrSize // intptr_t flags_
487 + kPointerSize // Address area_start_
488 + kPointerSize // Address area_end_
489 + 2 * kPointerSize // base::VirtualMemory reservation_
490 + kPointerSize // Address owner_
491 + kPointerSize // Heap* heap_
Ben Murdoch097c5b22016-05-18 11:27:45 +0100492 + kIntSize; // int progress_bar_
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000493
Ben Murdochda12d292016-06-02 14:46:10 +0100494 static const size_t kOldToNewSlotsOffset =
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000495 kLiveBytesOffset + kIntSize; // int live_byte_count_
496
497 static const size_t kWriteBarrierCounterOffset =
Ben Murdochda12d292016-06-02 14:46:10 +0100498 kOldToNewSlotsOffset + kPointerSize // SlotSet* old_to_new_slots_;
499 + kPointerSize // SlotSet* old_to_old_slots_;
500 + kPointerSize // TypedSlotSet* typed_old_to_old_slots_;
501 + kPointerSize; // SkipList* skip_list_;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000502
503 static const size_t kMinHeaderSize =
504 kWriteBarrierCounterOffset +
505 kIntptrSize // intptr_t write_barrier_counter_
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000506 + kPointerSize // AtomicValue high_water_mark_
507 + kPointerSize // base::Mutex* mutex_
Ben Murdochda12d292016-06-02 14:46:10 +0100508 + kPointerSize // base::AtomicWord concurrent_sweeping_
Ben Murdoch097c5b22016-05-18 11:27:45 +0100509 + 2 * kPointerSize // AtomicNumber free-list statistics
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000510 + kPointerSize // AtomicValue next_chunk_
Ben Murdochda12d292016-06-02 14:46:10 +0100511 + kPointerSize // AtomicValue prev_chunk_
512 // FreeListCategory categories_[kNumberOfCategories]
513 + FreeListCategory::kSize * kNumberOfCategories;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000514
515 // We add some more space to the computed header size to amount for missing
516 // alignment requirements in our computation.
517 // Try to get kHeaderSize properly aligned on 32-bit and 64-bit machines.
Ben Murdoch097c5b22016-05-18 11:27:45 +0100518 static const size_t kHeaderSize = kMinHeaderSize;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000519
520 static const int kBodyOffset =
521 CODE_POINTER_ALIGN(kHeaderSize + Bitmap::kSize);
522
523 // The start offset of the object area in a page. Aligned to both maps and
524 // code alignment to be suitable for both. Also aligned to 32 words because
525 // the marking bitmap is arranged in 32 bit chunks.
526 static const int kObjectStartAlignment = 32 * kPointerSize;
527 static const int kObjectStartOffset =
528 kBodyOffset - 1 +
529 (kObjectStartAlignment - (kBodyOffset - 1) % kObjectStartAlignment);
530
Ben Murdochda12d292016-06-02 14:46:10 +0100531 // Page size in bytes. This must be a multiple of the OS page size.
532 static const int kPageSize = 1 << kPageSizeBits;
533 static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;
534
535 static const int kAllocatableMemory = kPageSize - kObjectStartOffset;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000536
Ben Murdoch097c5b22016-05-18 11:27:45 +0100537 static inline void IncrementLiveBytesFromMutator(HeapObject* object, int by);
538 static inline void IncrementLiveBytesFromGC(HeapObject* object, int by);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000539
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100540 // Only works if the pointer is in the first kPageSize of the MemoryChunk.
541 static MemoryChunk* FromAddress(Address a) {
542 return reinterpret_cast<MemoryChunk*>(OffsetFrom(a) & ~kAlignmentMask);
543 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000544
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000545 static inline MemoryChunk* FromAnyPointerAddress(Heap* heap, Address addr);
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100546
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000547 static inline void UpdateHighWaterMark(Address mark) {
548 if (mark == nullptr) return;
549 // Need to subtract one from the mark because when a chunk is full the
550 // top points to the next address after the chunk, which effectively belongs
551 // to another chunk. See the comment to Page::FromAllocationTop.
552 MemoryChunk* chunk = MemoryChunk::FromAddress(mark - 1);
553 intptr_t new_mark = static_cast<intptr_t>(mark - chunk->address());
554 intptr_t old_mark = 0;
555 do {
556 old_mark = chunk->high_water_mark_.Value();
557 } while ((new_mark > old_mark) &&
558 !chunk->high_water_mark_.TrySetValue(old_mark, new_mark));
559 }
560
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100561 Address address() { return reinterpret_cast<Address>(this); }
562
563 bool is_valid() { return address() != NULL; }
564
Ben Murdoch097c5b22016-05-18 11:27:45 +0100565 base::Mutex* mutex() { return mutex_; }
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100566
567 bool Contains(Address addr) {
568 return addr >= area_start() && addr < area_end();
569 }
570
Ben Murdoch097c5b22016-05-18 11:27:45 +0100571 // Checks whether |addr| can be a limit of addresses in this page. It's a
572 // limit if it's in the page, or if it's just after the last byte of the page.
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100573 bool ContainsLimit(Address addr) {
574 return addr >= area_start() && addr <= area_end();
575 }
576
Ben Murdoch097c5b22016-05-18 11:27:45 +0100577 AtomicValue<ConcurrentSweepingState>& concurrent_sweeping_state() {
578 return concurrent_sweeping_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000579 }
580
Ben Murdoch097c5b22016-05-18 11:27:45 +0100581 // Manage live byte count, i.e., count of bytes in black objects.
582 inline void ResetLiveBytes();
583 inline void IncrementLiveBytes(int by);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000584
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100585 int LiveBytes() {
Ben Murdochda12d292016-06-02 14:46:10 +0100586 DCHECK_LE(static_cast<unsigned>(live_byte_count_), size_);
587 DCHECK(!IsFlagSet(BLACK_PAGE) || live_byte_count_ == 0);
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100588 return live_byte_count_;
589 }
590
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000591 void SetLiveBytes(int live_bytes) {
Ben Murdochda12d292016-06-02 14:46:10 +0100592 if (IsFlagSet(BLACK_PAGE)) return;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000593 DCHECK_GE(live_bytes, 0);
Ben Murdoch097c5b22016-05-18 11:27:45 +0100594 DCHECK_LE(static_cast<size_t>(live_bytes), size_);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000595 live_byte_count_ = live_bytes;
596 }
597
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000598 int write_barrier_counter() {
599 return static_cast<int>(write_barrier_counter_);
600 }
601
602 void set_write_barrier_counter(int counter) {
603 write_barrier_counter_ = counter;
604 }
605
Ben Murdoch097c5b22016-05-18 11:27:45 +0100606 size_t size() const { return size_; }
607
608 inline Heap* heap() const { return heap_; }
609
610 inline SkipList* skip_list() { return skip_list_; }
611
612 inline void set_skip_list(SkipList* skip_list) { skip_list_ = skip_list; }
613
Ben Murdoch097c5b22016-05-18 11:27:45 +0100614 inline SlotSet* old_to_new_slots() { return old_to_new_slots_; }
615 inline SlotSet* old_to_old_slots() { return old_to_old_slots_; }
Ben Murdochda12d292016-06-02 14:46:10 +0100616 inline TypedSlotSet* typed_old_to_old_slots() {
617 return typed_old_to_old_slots_;
618 }
Ben Murdoch097c5b22016-05-18 11:27:45 +0100619
620 void AllocateOldToNewSlots();
621 void ReleaseOldToNewSlots();
622 void AllocateOldToOldSlots();
623 void ReleaseOldToOldSlots();
Ben Murdochda12d292016-06-02 14:46:10 +0100624 void AllocateTypedOldToOldSlots();
625 void ReleaseTypedOldToOldSlots();
Ben Murdoch097c5b22016-05-18 11:27:45 +0100626
627 Address area_start() { return area_start_; }
628 Address area_end() { return area_end_; }
629 int area_size() { return static_cast<int>(area_end() - area_start()); }
630
631 bool CommitArea(size_t requested);
632
633 // Approximate amount of physical memory committed for this chunk.
634 size_t CommittedPhysicalMemory() { return high_water_mark_.Value(); }
635
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000636 int progress_bar() {
637 DCHECK(IsFlagSet(HAS_PROGRESS_BAR));
638 return progress_bar_;
639 }
640
641 void set_progress_bar(int progress_bar) {
642 DCHECK(IsFlagSet(HAS_PROGRESS_BAR));
643 progress_bar_ = progress_bar;
644 }
645
646 void ResetProgressBar() {
647 if (IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) {
648 set_progress_bar(0);
649 ClearFlag(MemoryChunk::HAS_PROGRESS_BAR);
650 }
651 }
652
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100653 inline Bitmap* markbits() {
654 return Bitmap::FromAddress(address() + kHeaderSize);
655 }
656
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100657 inline uint32_t AddressToMarkbitIndex(Address addr) {
658 return static_cast<uint32_t>(addr - this->address()) >> kPointerSizeLog2;
659 }
660
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100661 inline Address MarkbitIndexToAddress(uint32_t index) {
662 return this->address() + (index << kPointerSizeLog2);
663 }
664
Ben Murdoch097c5b22016-05-18 11:27:45 +0100665 void PrintMarkbits() { markbits()->Print(); }
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100666
Ben Murdoch097c5b22016-05-18 11:27:45 +0100667 void SetFlag(int flag) { flags_ |= static_cast<uintptr_t>(1) << flag; }
668
669 void ClearFlag(int flag) { flags_ &= ~(static_cast<uintptr_t>(1) << flag); }
670
671 bool IsFlagSet(int flag) {
672 return (flags_ & (static_cast<uintptr_t>(1) << flag)) != 0;
673 }
674
675 // Set or clear multiple flags at a time. The flags in the mask are set to
676 // the value in "flags", the rest retain the current value in |flags_|.
677 void SetFlags(intptr_t flags, intptr_t mask) {
678 flags_ = (flags_ & ~mask) | (flags & mask);
679 }
680
681 // Return all current flags.
682 intptr_t GetFlags() { return flags_; }
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100683
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000684 bool NeverEvacuate() { return IsFlagSet(NEVER_EVACUATE); }
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100685
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000686 void MarkNeverEvacuate() { SetFlag(NEVER_EVACUATE); }
687
688 bool IsEvacuationCandidate() {
689 DCHECK(!(IsFlagSet(NEVER_EVACUATE) && IsFlagSet(EVACUATION_CANDIDATE)));
690 return IsFlagSet(EVACUATION_CANDIDATE);
691 }
692
693 bool CanAllocate() {
694 return !IsEvacuationCandidate() && !IsFlagSet(NEVER_ALLOCATE_ON_PAGE);
695 }
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100696
Ben Murdoch097c5b22016-05-18 11:27:45 +0100697 bool ShouldSkipEvacuationSlotRecording() {
698 return (flags_ & kSkipEvacuationSlotsRecordingMask) != 0;
699 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000700
Ben Murdoch097c5b22016-05-18 11:27:45 +0100701 Executability executable() {
702 return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
703 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000704
Ben Murdoch097c5b22016-05-18 11:27:45 +0100705 bool InNewSpace() {
706 return (flags_ & ((1 << IN_FROM_SPACE) | (1 << IN_TO_SPACE))) != 0;
707 }
708
709 bool InToSpace() { return IsFlagSet(IN_TO_SPACE); }
710
711 bool InFromSpace() { return IsFlagSet(IN_FROM_SPACE); }
712
713 MemoryChunk* next_chunk() { return next_chunk_.Value(); }
714
715 MemoryChunk* prev_chunk() { return prev_chunk_.Value(); }
716
717 void set_next_chunk(MemoryChunk* next) { next_chunk_.SetValue(next); }
718
719 void set_prev_chunk(MemoryChunk* prev) { prev_chunk_.SetValue(prev); }
720
721 Space* owner() const {
722 if ((reinterpret_cast<intptr_t>(owner_) & kPageHeaderTagMask) ==
723 kPageHeaderTag) {
724 return reinterpret_cast<Space*>(reinterpret_cast<intptr_t>(owner_) -
725 kPageHeaderTag);
726 } else {
727 return nullptr;
728 }
729 }
730
731 void set_owner(Space* space) {
732 DCHECK((reinterpret_cast<intptr_t>(space) & kPageHeaderTagMask) == 0);
733 owner_ = reinterpret_cast<Address>(space) + kPageHeaderTag;
734 DCHECK((reinterpret_cast<intptr_t>(owner_) & kPageHeaderTagMask) ==
735 kPageHeaderTag);
736 }
737
738 bool HasPageHeader() { return owner() != nullptr; }
739
740 void InsertAfter(MemoryChunk* other);
741 void Unlink();
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100742
743 protected:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000744 static MemoryChunk* Initialize(Heap* heap, Address base, size_t size,
745 Address area_start, Address area_end,
Ben Murdoch097c5b22016-05-18 11:27:45 +0100746 Executability executable, Space* owner,
747 base::VirtualMemory* reservation);
748
749 // Should be called when memory chunk is about to be freed.
750 void ReleaseAllocatedMemory();
751
752 base::VirtualMemory* reserved_memory() { return &reservation_; }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000753
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100754 size_t size_;
755 intptr_t flags_;
756
757 // Start and end of allocatable memory on this chunk.
758 Address area_start_;
759 Address area_end_;
760
761 // If the chunk needs to remember its memory reservation, it is stored here.
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000762 base::VirtualMemory reservation_;
Ben Murdoch097c5b22016-05-18 11:27:45 +0100763
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100764 // The identity of the owning space. This is tagged as a failure pointer, but
765 // no failure can be in an object, so this can be distinguished from any entry
766 // in a fixed array.
767 Address owner_;
Ben Murdoch097c5b22016-05-18 11:27:45 +0100768
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100769 Heap* heap_;
Ben Murdoch097c5b22016-05-18 11:27:45 +0100770
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000771 // Used by the incremental marker to keep track of the scanning progress in
772 // large objects that have a progress bar and are scanned in increments.
773 int progress_bar_;
Ben Murdoch097c5b22016-05-18 11:27:45 +0100774
775 // Count of bytes marked black on page.
776 int live_byte_count_;
777
Ben Murdoch097c5b22016-05-18 11:27:45 +0100778 // A single slot set for small pages (of size kPageSize) or an array of slot
779 // set for large pages. In the latter case the number of entries in the array
780 // is ceil(size() / kPageSize).
781 SlotSet* old_to_new_slots_;
782 SlotSet* old_to_old_slots_;
Ben Murdochda12d292016-06-02 14:46:10 +0100783 TypedSlotSet* typed_old_to_old_slots_;
Ben Murdoch097c5b22016-05-18 11:27:45 +0100784
785 SkipList* skip_list_;
786
787 intptr_t write_barrier_counter_;
788
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000789 // Assuming the initial allocation on a page is sequential,
790 // count highest number of bytes ever allocated on the page.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000791 AtomicValue<intptr_t> high_water_mark_;
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100792
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000793 base::Mutex* mutex_;
Ben Murdoch097c5b22016-05-18 11:27:45 +0100794
795 AtomicValue<ConcurrentSweepingState> concurrent_sweeping_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000796
797 // PagedSpace free-list statistics.
Ben Murdoch097c5b22016-05-18 11:27:45 +0100798 AtomicNumber<intptr_t> available_in_free_list_;
799 AtomicNumber<intptr_t> wasted_memory_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000800
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000801 // next_chunk_ holds a pointer of type MemoryChunk
802 AtomicValue<MemoryChunk*> next_chunk_;
803 // prev_chunk_ holds a pointer of type MemoryChunk
804 AtomicValue<MemoryChunk*> prev_chunk_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000805
Ben Murdochda12d292016-06-02 14:46:10 +0100806 FreeListCategory categories_[kNumberOfCategories];
807
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000808 private:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000809 void InitializeReservedMemory() { reservation_.Reset(); }
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100810
811 friend class MemoryAllocator;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000812 friend class MemoryChunkValidator;
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100813};
814
Steve Blocka7e24c12009-10-30 11:49:00 +0000815// -----------------------------------------------------------------------------
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100816// A page is a memory chunk of a size 1MB. Large object pages may be larger.
Steve Blocka7e24c12009-10-30 11:49:00 +0000817//
818// The only way to get a page pointer is by calling factory methods:
819// Page* p = Page::FromAddress(addr); or
820// Page* p = Page::FromAllocationTop(top);
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100821class Page : public MemoryChunk {
Steve Blocka7e24c12009-10-30 11:49:00 +0000822 public:
823 // Returns the page containing a given address. The address ranges
824 // from [page_addr .. page_addr + kPageSize[
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100825 // This only works if the object is in fact in a page. See also MemoryChunk::
826 // FromAddress() and FromAnyAddress().
Steve Blocka7e24c12009-10-30 11:49:00 +0000827 INLINE(static Page* FromAddress(Address a)) {
828 return reinterpret_cast<Page*>(OffsetFrom(a) & ~kPageAlignmentMask);
829 }
830
Ben Murdoch097c5b22016-05-18 11:27:45 +0100831 // Only works for addresses in pointer spaces, not code space.
832 inline static Page* FromAnyPointerAddress(Heap* heap, Address addr);
833
Steve Blocka7e24c12009-10-30 11:49:00 +0000834 // Returns the page containing an allocation top. Because an allocation
835 // top address can be the upper bound of the page, we need to subtract
836 // it with kPointerSize first. The address ranges from
837 // [page_addr + kObjectStartOffset .. page_addr + kPageSize].
838 INLINE(static Page* FromAllocationTop(Address top)) {
839 Page* p = FromAddress(top - kPointerSize);
Steve Blocka7e24c12009-10-30 11:49:00 +0000840 return p;
841 }
842
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100843 // Returns the next page in the chain of pages owned by a space.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000844 inline Page* next_page() {
845 DCHECK(next_chunk()->owner() == owner());
846 return static_cast<Page*>(next_chunk());
847 }
848 inline Page* prev_page() {
849 DCHECK(prev_chunk()->owner() == owner());
850 return static_cast<Page*>(prev_chunk());
851 }
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100852 inline void set_next_page(Page* page);
853 inline void set_prev_page(Page* page);
Steve Blocka7e24c12009-10-30 11:49:00 +0000854
Steve Blocka7e24c12009-10-30 11:49:00 +0000855 // Checks whether an address is page aligned.
856 static bool IsAlignedToPageSize(Address a) {
857 return 0 == (OffsetFrom(a) & kPageAlignmentMask);
858 }
859
Steve Blocka7e24c12009-10-30 11:49:00 +0000860 // Returns the offset of a given address to this page.
861 INLINE(int Offset(Address a)) {
Steve Blockd0582a62009-12-15 09:54:21 +0000862 int offset = static_cast<int>(a - address());
Steve Blocka7e24c12009-10-30 11:49:00 +0000863 return offset;
864 }
865
866 // Returns the address for a given offset to the this page.
867 Address OffsetToAddress(int offset) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000868 DCHECK_PAGE_OFFSET(offset);
Steve Blocka7e24c12009-10-30 11:49:00 +0000869 return address() + offset;
870 }
871
872 // ---------------------------------------------------------------------
Steve Blocka7e24c12009-10-30 11:49:00 +0000873
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000874 // Maximum object size that gets allocated into regular pages. Objects larger
875 // than that size are allocated in large object space and are never moved in
876 // memory. This also applies to new space allocation, since objects are never
877 // migrated from new space to large object space. Takes double alignment into
878 // account.
879 // TODO(hpayer): This limit should be way smaller but we currently have
880 // short living objects >256K.
881 static const int kMaxRegularHeapObjectSize = 600 * KB;
882
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100883 inline void ClearGCFields();
884
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000885 static inline Page* Initialize(Heap* heap, MemoryChunk* chunk,
886 Executability executable, PagedSpace* owner);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100887
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100888 void InitializeAsAnchor(PagedSpace* owner);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100889
Ben Murdoch097c5b22016-05-18 11:27:45 +0100890 // WaitUntilSweepingCompleted only works when concurrent sweeping is in
891 // progress. In particular, when we know that right before this call a
892 // sweeper thread was sweeping this page.
893 void WaitUntilSweepingCompleted() {
894 mutex_->Lock();
895 mutex_->Unlock();
896 DCHECK(SweepingDone());
897 }
898
899 bool SweepingDone() {
900 return concurrent_sweeping_state().Value() == kSweepingDone;
901 }
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100902
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000903 void ResetFreeListStatistics();
Steve Blocka7e24c12009-10-30 11:49:00 +0000904
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000905 int LiveBytesFromFreeList() {
Ben Murdoch097c5b22016-05-18 11:27:45 +0100906 return static_cast<int>(area_size() - wasted_memory() -
907 available_in_free_list());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000908 }
909
Ben Murdochda12d292016-06-02 14:46:10 +0100910 template <typename Callback>
911 inline void ForAllFreeListCategories(Callback callback) {
912 for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
913 callback(&categories_[i]);
914 }
915 }
916
917 FreeListCategory* free_list_category(FreeListCategoryType type) {
918 return &categories_[type];
919 }
920
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000921#define FRAGMENTATION_STATS_ACCESSORS(type, name) \
922 type name() { return name##_.Value(); } \
923 void set_##name(type name) { name##_.SetValue(name); } \
924 void add_##name(type name) { name##_.Increment(name); }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000925
Ben Murdoch097c5b22016-05-18 11:27:45 +0100926 FRAGMENTATION_STATS_ACCESSORS(intptr_t, wasted_memory)
927 FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_free_list)
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000928
929#undef FRAGMENTATION_STATS_ACCESSORS
Steve Blocka7e24c12009-10-30 11:49:00 +0000930
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100931#ifdef DEBUG
932 void Print();
933#endif // DEBUG
Steve Blocka7e24c12009-10-30 11:49:00 +0000934
Ben Murdochda12d292016-06-02 14:46:10 +0100935 inline void MarkNeverAllocateForTesting();
936 inline void MarkEvacuationCandidate();
937 inline void ClearEvacuationCandidate();
938
939 private:
940 inline void InitializeFreeListCategories();
941
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100942 friend class MemoryAllocator;
Steve Blocka7e24c12009-10-30 11:49:00 +0000943};
944
945
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100946class LargePage : public MemoryChunk {
947 public:
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000948 HeapObject* GetObject() { return HeapObject::FromAddress(area_start()); }
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100949
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000950 inline LargePage* next_page() {
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100951 return static_cast<LargePage*>(next_chunk());
952 }
953
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000954 inline void set_next_page(LargePage* page) { set_next_chunk(page); }
955
Ben Murdochda12d292016-06-02 14:46:10 +0100956 // A limit to guarantee that we do not overflow typed slot offset in
957 // the old to old remembered set.
958 // Note that this limit is higher than what assembler already imposes on
959 // x64 and ia32 architectures.
960 static const int kMaxCodePageSize = 512 * MB;
961
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100962 private:
963 static inline LargePage* Initialize(Heap* heap, MemoryChunk* chunk);
964
965 friend class MemoryAllocator;
966};
967
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100968
Steve Blocka7e24c12009-10-30 11:49:00 +0000969// ----------------------------------------------------------------------------
970// Space is the abstract superclass for all allocation spaces.
971class Space : public Malloced {
972 public:
Steve Block44f0eee2011-05-26 01:26:41 +0100973 Space(Heap* heap, AllocationSpace id, Executability executable)
Ben Murdoch097c5b22016-05-18 11:27:45 +0100974 : allocation_observers_(new List<AllocationObserver*>()),
975 allocation_observers_paused_(false),
976 heap_(heap),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000977 id_(id),
978 executable_(executable),
979 committed_(0),
980 max_committed_(0) {}
Steve Blocka7e24c12009-10-30 11:49:00 +0000981
982 virtual ~Space() {}
983
Steve Block44f0eee2011-05-26 01:26:41 +0100984 Heap* heap() const { return heap_; }
985
Steve Blocka7e24c12009-10-30 11:49:00 +0000986 // Does the space need executable memory?
987 Executability executable() { return executable_; }
988
989 // Identity used in error reporting.
990 AllocationSpace identity() { return id_; }
991
Ben Murdoch097c5b22016-05-18 11:27:45 +0100992 virtual void AddAllocationObserver(AllocationObserver* observer) {
993 allocation_observers_->Add(observer);
994 }
995
996 virtual void RemoveAllocationObserver(AllocationObserver* observer) {
997 bool removed = allocation_observers_->RemoveElement(observer);
998 USE(removed);
999 DCHECK(removed);
1000 }
1001
1002 virtual void PauseAllocationObservers() {
1003 allocation_observers_paused_ = true;
1004 }
1005
1006 virtual void ResumeAllocationObservers() {
1007 allocation_observers_paused_ = false;
1008 }
1009
1010 void AllocationStep(Address soon_object, int size);
1011
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001012 // Return the total amount committed memory for this space, i.e., allocatable
1013 // memory and page headers.
1014 virtual intptr_t CommittedMemory() { return committed_; }
1015
1016 virtual intptr_t MaximumCommittedMemory() { return max_committed_; }
1017
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08001018 // Returns allocated size.
Ben Murdochf87a2032010-10-22 12:50:53 +01001019 virtual intptr_t Size() = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +00001020
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08001021 // Returns size of objects. Can differ from the allocated size
1022 // (e.g. see LargeObjectSpace).
1023 virtual intptr_t SizeOfObjects() { return Size(); }
1024
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001025 // Approximate amount of physical memory committed for this space.
1026 virtual size_t CommittedPhysicalMemory() = 0;
1027
1028 // Return the available bytes without growing.
1029 virtual intptr_t Available() = 0;
1030
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001031 virtual int RoundSizeDownToObjectAlignment(int size) {
1032 if (id_ == CODE_SPACE) {
1033 return RoundDown(size, kCodeAlignment);
1034 } else {
1035 return RoundDown(size, kPointerSize);
1036 }
1037 }
1038
Steve Blocka7e24c12009-10-30 11:49:00 +00001039#ifdef DEBUG
1040 virtual void Print() = 0;
1041#endif
1042
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001043 protected:
1044 void AccountCommitted(intptr_t bytes) {
1045 DCHECK_GE(bytes, 0);
1046 committed_ += bytes;
1047 if (committed_ > max_committed_) {
1048 max_committed_ = committed_;
1049 }
1050 }
1051
1052 void AccountUncommitted(intptr_t bytes) {
1053 DCHECK_GE(bytes, 0);
1054 committed_ -= bytes;
1055 DCHECK_GE(committed_, 0);
1056 }
1057
Ben Murdoch097c5b22016-05-18 11:27:45 +01001058 v8::base::SmartPointer<List<AllocationObserver*>> allocation_observers_;
1059 bool allocation_observers_paused_;
1060
Steve Blocka7e24c12009-10-30 11:49:00 +00001061 private:
Steve Block44f0eee2011-05-26 01:26:41 +01001062 Heap* heap_;
Steve Blocka7e24c12009-10-30 11:49:00 +00001063 AllocationSpace id_;
1064 Executability executable_;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001065
1066 // Keeps track of committed memory in a space.
1067 intptr_t committed_;
1068 intptr_t max_committed_;
1069};
1070
1071
1072class MemoryChunkValidator {
1073 // Computed offsets should match the compiler generated ones.
1074 STATIC_ASSERT(MemoryChunk::kSizeOffset == offsetof(MemoryChunk, size_));
1075 STATIC_ASSERT(MemoryChunk::kLiveBytesOffset ==
1076 offsetof(MemoryChunk, live_byte_count_));
Ben Murdochda12d292016-06-02 14:46:10 +01001077 STATIC_ASSERT(MemoryChunk::kOldToNewSlotsOffset ==
1078 offsetof(MemoryChunk, old_to_new_slots_));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001079 STATIC_ASSERT(MemoryChunk::kWriteBarrierCounterOffset ==
1080 offsetof(MemoryChunk, write_barrier_counter_));
1081
1082 // Validate our estimates on the header size.
1083 STATIC_ASSERT(sizeof(MemoryChunk) <= MemoryChunk::kHeaderSize);
1084 STATIC_ASSERT(sizeof(LargePage) <= MemoryChunk::kHeaderSize);
1085 STATIC_ASSERT(sizeof(Page) <= MemoryChunk::kHeaderSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00001086};
1087
1088
1089// ----------------------------------------------------------------------------
1090// All heap objects containing executable code (code objects) must be allocated
1091// from a 2 GB range of memory, so that they can call each other using 32-bit
1092// displacements. This happens automatically on 32-bit platforms, where 32-bit
1093// displacements cover the entire 4GB virtual address space. On 64-bit
1094// platforms, we support this using the CodeRange object, which reserves and
1095// manages a range of virtual memory.
Steve Block44f0eee2011-05-26 01:26:41 +01001096class CodeRange {
Steve Blocka7e24c12009-10-30 11:49:00 +00001097 public:
Ben Murdoch69a99ed2011-11-30 16:03:39 +00001098 explicit CodeRange(Isolate* isolate);
1099 ~CodeRange() { TearDown(); }
1100
Steve Blocka7e24c12009-10-30 11:49:00 +00001101 // Reserves a range of virtual memory, but does not commit any of it.
1102 // Can only be called once, at heap initialization time.
1103 // Returns false on failure.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001104 bool SetUp(size_t requested_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00001105
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001106 bool valid() { return code_range_ != NULL; }
1107 Address start() {
1108 DCHECK(valid());
1109 return static_cast<Address>(code_range_->address());
1110 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001111 size_t size() {
1112 DCHECK(valid());
1113 return code_range_->size();
1114 }
Steve Block44f0eee2011-05-26 01:26:41 +01001115 bool contains(Address address) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001116 if (!valid()) return false;
Steve Blocka7e24c12009-10-30 11:49:00 +00001117 Address start = static_cast<Address>(code_range_->address());
1118 return start <= address && address < start + code_range_->size();
1119 }
1120
1121 // Allocates a chunk of memory from the large-object portion of
1122 // the code range. On platforms with no separate code range, should
1123 // not be called.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001124 MUST_USE_RESULT Address AllocateRawMemory(const size_t requested_size,
1125 const size_t commit_size,
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001126 size_t* allocated);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001127 bool CommitRawMemory(Address start, size_t length);
1128 bool UncommitRawMemory(Address start, size_t length);
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001129 void FreeRawMemory(Address buf, size_t length);
Steve Blocka7e24c12009-10-30 11:49:00 +00001130
1131 private:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001132 // Frees the range of virtual memory, and frees the data structures used to
1133 // manage it.
1134 void TearDown();
1135
Ben Murdoch69a99ed2011-11-30 16:03:39 +00001136 Isolate* isolate_;
Steve Block44f0eee2011-05-26 01:26:41 +01001137
Steve Blocka7e24c12009-10-30 11:49:00 +00001138 // The reserved range of virtual memory that all code objects are put in.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001139 base::VirtualMemory* code_range_;
Steve Blocka7e24c12009-10-30 11:49:00 +00001140 // Plain old data class, just a struct plus a constructor.
1141 class FreeBlock {
1142 public:
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001143 FreeBlock() : start(0), size(0) {}
Steve Blocka7e24c12009-10-30 11:49:00 +00001144 FreeBlock(Address start_arg, size_t size_arg)
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001145 : start(start_arg), size(size_arg) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001146 DCHECK(IsAddressAligned(start, MemoryChunk::kAlignment));
1147 DCHECK(size >= static_cast<size_t>(Page::kPageSize));
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001148 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001149 FreeBlock(void* start_arg, size_t size_arg)
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001150 : start(static_cast<Address>(start_arg)), size(size_arg) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001151 DCHECK(IsAddressAligned(start, MemoryChunk::kAlignment));
1152 DCHECK(size >= static_cast<size_t>(Page::kPageSize));
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001153 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001154
1155 Address start;
1156 size_t size;
1157 };
1158
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001159 // The global mutex guards free_list_ and allocation_list_ as GC threads may
1160 // access both lists concurrently to the main thread.
1161 base::Mutex code_range_mutex_;
1162
Steve Blocka7e24c12009-10-30 11:49:00 +00001163 // Freed blocks of memory are added to the free list. When the allocation
1164 // list is exhausted, the free list is sorted and merged to make the new
1165 // allocation list.
Steve Block44f0eee2011-05-26 01:26:41 +01001166 List<FreeBlock> free_list_;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001167
Steve Blocka7e24c12009-10-30 11:49:00 +00001168 // Memory is allocated from the free blocks on the allocation list.
1169 // The block at current_allocation_block_index_ is the current block.
Steve Block44f0eee2011-05-26 01:26:41 +01001170 List<FreeBlock> allocation_list_;
1171 int current_allocation_block_index_;
Steve Blocka7e24c12009-10-30 11:49:00 +00001172
1173 // Finds a block on the allocation list that contains at least the
1174 // requested amount of memory. If none is found, sorts and merges
1175 // the existing free memory blocks, and searches again.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001176 // If none can be found, returns false.
1177 bool GetNextAllocationBlock(size_t requested);
Steve Blocka7e24c12009-10-30 11:49:00 +00001178 // Compares the start addresses of two free blocks.
1179 static int CompareFreeBlockAddress(const FreeBlock* left,
1180 const FreeBlock* right);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001181 bool ReserveBlock(const size_t requested_size, FreeBlock* block);
1182 void ReleaseBlock(const FreeBlock* block);
Steve Block44f0eee2011-05-26 01:26:41 +01001183
Steve Block44f0eee2011-05-26 01:26:41 +01001184 DISALLOW_COPY_AND_ASSIGN(CodeRange);
Steve Blocka7e24c12009-10-30 11:49:00 +00001185};
1186
1187
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001188class SkipList {
1189 public:
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001190 SkipList() { Clear(); }
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001191
1192 void Clear() {
1193 for (int idx = 0; idx < kSize; idx++) {
1194 starts_[idx] = reinterpret_cast<Address>(-1);
1195 }
1196 }
1197
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001198 Address StartFor(Address addr) { return starts_[RegionNumber(addr)]; }
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001199
1200 void AddObject(Address addr, int size) {
1201 int start_region = RegionNumber(addr);
1202 int end_region = RegionNumber(addr + size - kPointerSize);
1203 for (int idx = start_region; idx <= end_region; idx++) {
Ben Murdochda12d292016-06-02 14:46:10 +01001204 if (starts_[idx] > addr) {
1205 starts_[idx] = addr;
1206 } else {
1207 // In the first region, there may already be an object closer to the
1208 // start of the region. Do not change the start in that case. If this
1209 // is not the first region, you probably added overlapping objects.
1210 DCHECK_EQ(start_region, idx);
1211 }
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001212 }
1213 }
1214
1215 static inline int RegionNumber(Address addr) {
1216 return (OffsetFrom(addr) & Page::kPageAlignmentMask) >> kRegionSizeLog2;
1217 }
1218
1219 static void Update(Address addr, int size) {
1220 Page* page = Page::FromAddress(addr);
1221 SkipList* list = page->skip_list();
1222 if (list == NULL) {
1223 list = new SkipList();
1224 page->set_skip_list(list);
1225 }
1226
1227 list->AddObject(addr, size);
1228 }
1229
1230 private:
1231 static const int kRegionSizeLog2 = 13;
1232 static const int kRegionSize = 1 << kRegionSizeLog2;
1233 static const int kSize = Page::kPageSize / kRegionSize;
1234
1235 STATIC_ASSERT(Page::kPageSize % kRegionSize == 0);
1236
1237 Address starts_[kSize];
1238};
1239
1240
Steve Blocka7e24c12009-10-30 11:49:00 +00001241// ----------------------------------------------------------------------------
1242// A space acquires chunks of memory from the operating system. The memory
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001243// allocator allocated and deallocates pages for the paged heap spaces and large
1244// pages for large object space.
Steve Blocka7e24c12009-10-30 11:49:00 +00001245//
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001246// Each space has to manage it's own pages.
Steve Blocka7e24c12009-10-30 11:49:00 +00001247//
Steve Block44f0eee2011-05-26 01:26:41 +01001248class MemoryAllocator {
Steve Blocka7e24c12009-10-30 11:49:00 +00001249 public:
Ben Murdochda12d292016-06-02 14:46:10 +01001250 enum AllocationMode {
1251 kRegular,
1252 kPooled,
1253 };
1254
Ben Murdoch69a99ed2011-11-30 16:03:39 +00001255 explicit MemoryAllocator(Isolate* isolate);
1256
Steve Blocka7e24c12009-10-30 11:49:00 +00001257 // Initializes its internal bookkeeping structures.
Russell Brenner90bac252010-11-18 13:33:46 -08001258 // Max capacity of the total space and executable memory limit.
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001259 bool SetUp(intptr_t max_capacity, intptr_t capacity_executable);
Steve Blocka7e24c12009-10-30 11:49:00 +00001260
Steve Block44f0eee2011-05-26 01:26:41 +01001261 void TearDown();
Steve Blocka7e24c12009-10-30 11:49:00 +00001262
Ben Murdochda12d292016-06-02 14:46:10 +01001263 // Allocates either Page or NewSpacePage from the allocator. AllocationMode
1264 // is used to indicate whether pooled allocation, which only works for
1265 // MemoryChunk::kPageSize, should be tried first.
1266 template <typename PageType, MemoryAllocator::AllocationMode mode = kRegular,
1267 typename SpaceType>
1268 PageType* AllocatePage(intptr_t size, SpaceType* owner,
1269 Executability executable);
Steve Blocka7e24c12009-10-30 11:49:00 +00001270
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001271 LargePage* AllocateLargePage(intptr_t object_size, Space* owner,
1272 Executability executable);
Steve Blocka7e24c12009-10-30 11:49:00 +00001273
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001274 // PreFree logically frees the object, i.e., it takes care of the size
1275 // bookkeeping and calls the allocation callback.
1276 void PreFreeMemory(MemoryChunk* chunk);
1277
1278 // FreeMemory can be called concurrently when PreFree was executed before.
1279 void PerformFreeMemory(MemoryChunk* chunk);
1280
Ben Murdochda12d292016-06-02 14:46:10 +01001281 // Free is a wrapper method. For kRegular AllocationMode it calls PreFree and
1282 // PerformFreeMemory together. For kPooled it will dispatch to pooled free.
1283 template <MemoryAllocator::AllocationMode mode = kRegular>
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001284 void Free(MemoryChunk* chunk);
Steve Blocka7e24c12009-10-30 11:49:00 +00001285
Steve Blocka7e24c12009-10-30 11:49:00 +00001286 // Returns allocated spaces in bytes.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001287 intptr_t Size() { return size_.Value(); }
1288
1289 // Returns allocated executable spaces in bytes.
1290 intptr_t SizeExecutable() { return size_executable_.Value(); }
1291
1292 // Returns the maximum available bytes of heaps.
1293 intptr_t Available() {
1294 intptr_t size = Size();
1295 return capacity_ < size ? 0 : capacity_ - size;
1296 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001297
Russell Brenner90bac252010-11-18 13:33:46 -08001298 // Returns the maximum available executable bytes of heaps.
Steve Block44f0eee2011-05-26 01:26:41 +01001299 intptr_t AvailableExecutable() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001300 intptr_t executable_size = SizeExecutable();
1301 if (capacity_executable_ < executable_size) return 0;
1302 return capacity_executable_ - executable_size;
Russell Brenner90bac252010-11-18 13:33:46 -08001303 }
1304
Steve Blocka7e24c12009-10-30 11:49:00 +00001305 // Returns maximum available bytes that the old space can have.
Steve Block44f0eee2011-05-26 01:26:41 +01001306 intptr_t MaxAvailable() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001307 return (Available() / Page::kPageSize) * Page::kAllocatableMemory;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001308 }
1309
1310 // Returns an indication of whether a pointer is in a space that has
1311 // been allocated by this MemoryAllocator.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001312 V8_INLINE bool IsOutsideAllocatedSpace(const void* address) {
1313 return address < lowest_ever_allocated_.Value() ||
1314 address >= highest_ever_allocated_.Value();
Steve Blocka7e24c12009-10-30 11:49:00 +00001315 }
1316
Steve Blocka7e24c12009-10-30 11:49:00 +00001317#ifdef DEBUG
1318 // Reports statistic info of the space.
Steve Block44f0eee2011-05-26 01:26:41 +01001319 void ReportStatistics();
Steve Blocka7e24c12009-10-30 11:49:00 +00001320#endif
1321
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001322 // Returns a MemoryChunk in which the memory region from commit_area_size to
1323 // reserve_area_size of the chunk area is reserved but not committed, it
1324 // could be committed later by calling MemoryChunk::CommitArea.
1325 MemoryChunk* AllocateChunk(intptr_t reserve_area_size,
1326 intptr_t commit_area_size,
1327 Executability executable, Space* space);
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001328
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001329 Address ReserveAlignedMemory(size_t requested, size_t alignment,
1330 base::VirtualMemory* controller);
1331 Address AllocateAlignedMemory(size_t reserve_size, size_t commit_size,
1332 size_t alignment, Executability executable,
1333 base::VirtualMemory* controller);
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001334
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001335 bool CommitMemory(Address addr, size_t size, Executability executable);
1336
1337 void FreeMemory(base::VirtualMemory* reservation, Executability executable);
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001338 void FreeMemory(Address addr, size_t size, Executability executable);
1339
1340 // Commit a contiguous block of memory from the initial chunk. Assumes that
1341 // the address is not NULL, the size is greater than zero, and that the
1342 // block is contained in the initial chunk. Returns true if it succeeded
1343 // and false otherwise.
1344 bool CommitBlock(Address start, size_t size, Executability executable);
1345
1346 // Uncommit a contiguous block of memory [start..(start+size)[.
1347 // start is not NULL, the size is greater than zero, and the
1348 // block is contained in the initial chunk. Returns true if it succeeded
1349 // and false otherwise.
1350 bool UncommitBlock(Address start, size_t size);
1351
1352 // Zaps a contiguous block of memory [start..(start+size)[ thus
1353 // filling it up with a recognizable non-NULL bit pattern.
1354 void ZapBlock(Address start, size_t size);
1355
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001356 void PerformAllocationCallback(ObjectSpace space, AllocationAction action,
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001357 size_t size);
1358
1359 void AddMemoryAllocationCallback(MemoryAllocationCallback callback,
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001360 ObjectSpace space, AllocationAction action);
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001361
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001362 void RemoveMemoryAllocationCallback(MemoryAllocationCallback callback);
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001363
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001364 bool MemoryAllocationCallbackRegistered(MemoryAllocationCallback callback);
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001365
1366 static int CodePageGuardStartOffset();
1367
1368 static int CodePageGuardSize();
1369
1370 static int CodePageAreaStartOffset();
1371
1372 static int CodePageAreaEndOffset();
1373
1374 static int CodePageAreaSize() {
1375 return CodePageAreaEndOffset() - CodePageAreaStartOffset();
1376 }
1377
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001378 static int PageAreaSize(AllocationSpace space) {
1379 DCHECK_NE(LO_SPACE, space);
1380 return (space == CODE_SPACE) ? CodePageAreaSize()
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001381 : Page::kAllocatableMemory;
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001382 }
1383
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001384 MUST_USE_RESULT bool CommitExecutableMemory(base::VirtualMemory* vm,
1385 Address start, size_t commit_size,
1386 size_t reserved_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00001387
1388 private:
Ben Murdochda12d292016-06-02 14:46:10 +01001389 // See AllocatePage for public interface. Note that currently we only support
1390 // pools for NOT_EXECUTABLE pages of size MemoryChunk::kPageSize.
1391 template <typename SpaceType>
1392 MemoryChunk* AllocatePagePooled(SpaceType* owner);
1393
1394 // Free that chunk into the pool.
1395 void FreePooled(MemoryChunk* chunk);
1396
Ben Murdoch69a99ed2011-11-30 16:03:39 +00001397 Isolate* isolate_;
1398
Steve Blocka7e24c12009-10-30 11:49:00 +00001399 // Maximum space size in bytes.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001400 intptr_t capacity_;
Russell Brenner90bac252010-11-18 13:33:46 -08001401 // Maximum subset of capacity_ that can be executable
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001402 intptr_t capacity_executable_;
Ben Murdochb0fe1622011-05-05 13:52:32 +01001403
Steve Blocka7e24c12009-10-30 11:49:00 +00001404 // Allocated space size in bytes.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001405 AtomicNumber<intptr_t> size_;
Steve Block791712a2010-08-27 10:21:07 +01001406 // Allocated executable space size in bytes.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001407 AtomicNumber<intptr_t> size_executable_;
Steve Blocka7e24c12009-10-30 11:49:00 +00001408
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001409 // We keep the lowest and highest addresses allocated as a quick way
1410 // of determining that pointers are outside the heap. The estimate is
1411 // conservative, i.e. not all addrsses in 'allocated' space are allocated
1412 // to our heap. The range is [lowest, highest[, inclusive on the low end
1413 // and exclusive on the high end.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001414 AtomicValue<void*> lowest_ever_allocated_;
1415 AtomicValue<void*> highest_ever_allocated_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001416
Iain Merrick9ac36c92010-09-13 15:29:50 +01001417 struct MemoryAllocationCallbackRegistration {
1418 MemoryAllocationCallbackRegistration(MemoryAllocationCallback callback,
1419 ObjectSpace space,
1420 AllocationAction action)
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001421 : callback(callback), space(space), action(action) {}
Iain Merrick9ac36c92010-09-13 15:29:50 +01001422 MemoryAllocationCallback callback;
1423 ObjectSpace space;
1424 AllocationAction action;
1425 };
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001426
Iain Merrick9ac36c92010-09-13 15:29:50 +01001427 // A List of callback that are triggered when memory is allocated or free'd
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001428 List<MemoryAllocationCallbackRegistration> memory_allocation_callbacks_;
Iain Merrick9ac36c92010-09-13 15:29:50 +01001429
Steve Blocka7e24c12009-10-30 11:49:00 +00001430 // Initializes pages in a chunk. Returns the first page address.
1431 // This function and GetChunkId() are provided for the mark-compact
1432 // collector to rebuild page headers in the from space, which is
1433 // used as a marking stack and its page headers are destroyed.
Steve Block44f0eee2011-05-26 01:26:41 +01001434 Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk,
1435 PagedSpace* owner);
Steve Block6ded16b2010-05-10 14:33:55 +01001436
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001437 void UpdateAllocatedSpaceLimits(void* low, void* high) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001438 // The use of atomic primitives does not guarantee correctness (wrt.
1439 // desired semantics) by default. The loop here ensures that we update the
1440 // values only if they did not change in between.
1441 void* ptr = nullptr;
1442 do {
1443 ptr = lowest_ever_allocated_.Value();
1444 } while ((low < ptr) && !lowest_ever_allocated_.TrySetValue(ptr, low));
1445 do {
1446 ptr = highest_ever_allocated_.Value();
1447 } while ((high > ptr) && !highest_ever_allocated_.TrySetValue(ptr, high));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001448 }
1449
Ben Murdochda12d292016-06-02 14:46:10 +01001450 List<MemoryChunk*> chunk_pool_;
1451
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001452 DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator);
Steve Blocka7e24c12009-10-30 11:49:00 +00001453};
1454
1455
1456// -----------------------------------------------------------------------------
1457// Interface for heap object iterator to be implemented by all object space
1458// object iterators.
1459//
Leon Clarked91b9f72010-01-27 17:25:45 +00001460// NOTE: The space specific object iterators also implements the own next()
1461// method which is used to avoid using virtual functions
Steve Blocka7e24c12009-10-30 11:49:00 +00001462// iterating a specific space.
1463
1464class ObjectIterator : public Malloced {
1465 public:
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001466 virtual ~ObjectIterator() {}
Steve Blocka7e24c12009-10-30 11:49:00 +00001467
Steve Blocka7e24c12009-10-30 11:49:00 +00001468 virtual HeapObject* next_object() = 0;
1469};
1470
1471
1472// -----------------------------------------------------------------------------
1473// Heap object iterator in new/old/map spaces.
1474//
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001475// A HeapObjectIterator iterates objects from the bottom of the given space
1476// to its top or from the bottom of the given page to its top.
Steve Blocka7e24c12009-10-30 11:49:00 +00001477//
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001478// If objects are allocated in the page during iteration the iterator may
1479// or may not iterate over those objects. The caller must create a new
1480// iterator in order to be sure to visit these new objects.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001481class HeapObjectIterator : public ObjectIterator {
Steve Blocka7e24c12009-10-30 11:49:00 +00001482 public:
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001483 // Creates a new object iterator in a given space.
Steve Blocka7e24c12009-10-30 11:49:00 +00001484 explicit HeapObjectIterator(PagedSpace* space);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001485 explicit HeapObjectIterator(Page* page);
Steve Blocka7e24c12009-10-30 11:49:00 +00001486
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001487 // Advance to the next object, skipping free spaces and other fillers and
1488 // skipping the special garbage section of which there is one per space.
1489 // Returns NULL when the iteration has ended.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001490 inline HeapObject* Next();
1491 inline HeapObject* next_object() override;
Steve Blocka7e24c12009-10-30 11:49:00 +00001492
1493 private:
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001494 enum PageMode { kOnePageOnly, kAllPagesInSpace };
Steve Blocka7e24c12009-10-30 11:49:00 +00001495
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001496 Address cur_addr_; // Current iteration point.
1497 Address cur_end_; // End iteration point.
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001498 PagedSpace* space_;
1499 PageMode page_mode_;
Leon Clarked91b9f72010-01-27 17:25:45 +00001500
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001501 // Fast (inlined) path of next().
1502 inline HeapObject* FromCurrentPage();
Leon Clarked91b9f72010-01-27 17:25:45 +00001503
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001504 // Slow path of next(), goes into the next page. Returns false if the
1505 // iteration has ended.
1506 bool AdvanceToNextPage();
Steve Blocka7e24c12009-10-30 11:49:00 +00001507
1508 // Initializes fields.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001509 inline void Initialize(PagedSpace* owner, Address start, Address end,
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001510 PageMode mode);
Steve Blocka7e24c12009-10-30 11:49:00 +00001511};
1512
1513
1514// -----------------------------------------------------------------------------
1515// A PageIterator iterates the pages in a paged space.
Steve Blocka7e24c12009-10-30 11:49:00 +00001516
1517class PageIterator BASE_EMBEDDED {
1518 public:
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001519 explicit inline PageIterator(PagedSpace* space);
Steve Blocka7e24c12009-10-30 11:49:00 +00001520
1521 inline bool has_next();
1522 inline Page* next();
1523
1524 private:
1525 PagedSpace* space_;
1526 Page* prev_page_; // Previous page returned.
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001527 // Next page that will be returned. Cached here so that we can use this
1528 // iterator for operations that deallocate pages.
1529 Page* next_page_;
Steve Blocka7e24c12009-10-30 11:49:00 +00001530};
1531
1532
1533// -----------------------------------------------------------------------------
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001534// A space has a circular list of pages. The next page can be accessed via
1535// Page::next_page() call.
Steve Blocka7e24c12009-10-30 11:49:00 +00001536
1537// An abstraction of allocation and relocation pointers in a page-structured
1538// space.
1539class AllocationInfo {
1540 public:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001541 AllocationInfo() : top_(nullptr), limit_(nullptr) {}
1542 AllocationInfo(Address top, Address limit) : top_(top), limit_(limit) {}
1543
1544 void Reset(Address top, Address limit) {
1545 set_top(top);
1546 set_limit(limit);
1547 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001548
1549 INLINE(void set_top(Address top)) {
1550 SLOW_DCHECK(top == NULL ||
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001551 (reinterpret_cast<intptr_t>(top) & kHeapObjectTagMask) == 0);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001552 top_ = top;
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001553 }
1554
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001555 INLINE(Address top()) const {
1556 SLOW_DCHECK(top_ == NULL ||
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001557 (reinterpret_cast<intptr_t>(top_) & kHeapObjectTagMask) == 0);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001558 return top_;
1559 }
1560
1561 Address* top_address() { return &top_; }
1562
1563 INLINE(void set_limit(Address limit)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001564 limit_ = limit;
1565 }
1566
1567 INLINE(Address limit()) const {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001568 return limit_;
1569 }
1570
1571 Address* limit_address() { return &limit_; }
Steve Blocka7e24c12009-10-30 11:49:00 +00001572
1573#ifdef DEBUG
1574 bool VerifyPagedAllocation() {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001575 return (Page::FromAllocationTop(top_) == Page::FromAllocationTop(limit_)) &&
1576 (top_ <= limit_);
Steve Blocka7e24c12009-10-30 11:49:00 +00001577 }
1578#endif
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001579
1580 private:
1581 // Current allocation top.
1582 Address top_;
1583 // Current allocation limit.
1584 Address limit_;
Steve Blocka7e24c12009-10-30 11:49:00 +00001585};
1586
1587
1588// An abstraction of the accounting statistics of a page-structured space.
Steve Blocka7e24c12009-10-30 11:49:00 +00001589//
1590// The stats are only set by functions that ensure they stay balanced. These
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001591// functions increase or decrease one of the non-capacity stats in conjunction
1592// with capacity, or else they always balance increases and decreases to the
1593// non-capacity stats.
Steve Blocka7e24c12009-10-30 11:49:00 +00001594class AllocationStats BASE_EMBEDDED {
1595 public:
1596 AllocationStats() { Clear(); }
1597
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001598 // Zero out all the allocation statistics (i.e., no capacity).
Steve Blocka7e24c12009-10-30 11:49:00 +00001599 void Clear() {
1600 capacity_ = 0;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001601 max_capacity_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +00001602 size_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +00001603 }
1604
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001605 void ClearSize() { size_ = capacity_; }
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001606
Steve Blocka7e24c12009-10-30 11:49:00 +00001607 // Accessors for the allocation statistics.
Ben Murdochf87a2032010-10-22 12:50:53 +01001608 intptr_t Capacity() { return capacity_; }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001609 intptr_t MaxCapacity() { return max_capacity_; }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001610 intptr_t Size() {
1611 CHECK_GE(size_, 0);
1612 return size_;
1613 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001614
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001615 // Grow the space by adding available bytes. They are initially marked as
1616 // being in use (part of the size), but will normally be immediately freed,
1617 // putting them on the free list and removing them from size_.
Steve Blocka7e24c12009-10-30 11:49:00 +00001618 void ExpandSpace(int size_in_bytes) {
1619 capacity_ += size_in_bytes;
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001620 size_ += size_in_bytes;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001621 if (capacity_ > max_capacity_) {
1622 max_capacity_ = capacity_;
1623 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001624 CHECK(size_ >= 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00001625 }
1626
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001627 // Shrink the space by removing available bytes. Since shrinking is done
1628 // during sweeping, bytes have been marked as being in use (part of the size)
1629 // and are hereby freed.
Steve Blocka7e24c12009-10-30 11:49:00 +00001630 void ShrinkSpace(int size_in_bytes) {
1631 capacity_ -= size_in_bytes;
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001632 size_ -= size_in_bytes;
Ben Murdochda12d292016-06-02 14:46:10 +01001633 CHECK_GE(size_, 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00001634 }
1635
1636 // Allocate from available bytes (available -> size).
Ben Murdochf87a2032010-10-22 12:50:53 +01001637 void AllocateBytes(intptr_t size_in_bytes) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001638 size_ += size_in_bytes;
Ben Murdochda12d292016-06-02 14:46:10 +01001639 CHECK_GE(size_, 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00001640 }
1641
1642 // Free allocated bytes, making them available (size -> available).
Ben Murdochf87a2032010-10-22 12:50:53 +01001643 void DeallocateBytes(intptr_t size_in_bytes) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001644 size_ -= size_in_bytes;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001645 CHECK_GE(size_, 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00001646 }
1647
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001648 // Merge {other} into {this}.
1649 void Merge(const AllocationStats& other) {
1650 capacity_ += other.capacity_;
1651 size_ += other.size_;
1652 if (other.max_capacity_ > max_capacity_) {
1653 max_capacity_ = other.max_capacity_;
1654 }
1655 CHECK_GE(size_, 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00001656 }
1657
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001658 void DecreaseCapacity(intptr_t size_in_bytes) {
1659 capacity_ -= size_in_bytes;
1660 CHECK_GE(capacity_, 0);
1661 CHECK_GE(capacity_, size_);
1662 }
1663
1664 void IncreaseCapacity(intptr_t size_in_bytes) { capacity_ += size_in_bytes; }
1665
Steve Blocka7e24c12009-10-30 11:49:00 +00001666 private:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001667 // |capacity_|: The number of object-area bytes (i.e., not including page
1668 // bookkeeping structures) currently in the space.
Ben Murdochf87a2032010-10-22 12:50:53 +01001669 intptr_t capacity_;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001670
1671 // |max_capacity_|: The maximum capacity ever observed.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001672 intptr_t max_capacity_;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001673
1674 // |size_|: The number of allocated bytes.
Ben Murdochf87a2032010-10-22 12:50:53 +01001675 intptr_t size_;
Steve Blocka7e24c12009-10-30 11:49:00 +00001676};
1677
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001678// A free list maintaining free blocks of memory. The free list is organized in
1679// a way to encourage objects allocated around the same time to be near each
1680// other. The normal way to allocate is intended to be by bumping a 'top'
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001681// pointer until it hits a 'limit' pointer. When the limit is hit we need to
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001682// find a new space to allocate from. This is done with the free list, which is
1683// divided up into rough categories to cut down on waste. Having finer
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001684// categories would scatter allocation more.
1685
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001686// The free list is organized in categories as follows:
Ben Murdochda12d292016-06-02 14:46:10 +01001687// kMinBlockSize-10 words (tiniest): The tiniest blocks are only used for
1688// allocation, when categories >= small do not have entries anymore.
1689// 11-31 words (tiny): The tiny blocks are only used for allocation, when
1690// categories >= small do not have entries anymore.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001691// 32-255 words (small): Used for allocating free space between 1-31 words in
1692// size.
1693// 256-2047 words (medium): Used for allocating free space between 32-255 words
1694// in size.
1695// 1048-16383 words (large): Used for allocating free space between 256-2047
1696// words in size.
1697// At least 16384 words (huge): This list is for objects of 2048 words or
1698// larger. Empty pages are also added to this list.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001699class FreeList {
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001700 public:
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001701 // This method returns how much memory can be allocated after freeing
1702 // maximum_freed memory.
1703 static inline int GuaranteedAllocatable(int maximum_freed) {
Ben Murdochda12d292016-06-02 14:46:10 +01001704 if (maximum_freed <= kTiniestListMax) {
1705 // Since we are not iterating over all list entries, we cannot guarantee
1706 // that we can find the maximum freed block in that free list.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001707 return 0;
Ben Murdochda12d292016-06-02 14:46:10 +01001708 } else if (maximum_freed <= kTinyListMax) {
1709 return kTinyAllocationMax;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001710 } else if (maximum_freed <= kSmallListMax) {
1711 return kSmallAllocationMax;
1712 } else if (maximum_freed <= kMediumListMax) {
1713 return kMediumAllocationMax;
1714 } else if (maximum_freed <= kLargeListMax) {
1715 return kLargeAllocationMax;
1716 }
1717 return maximum_freed;
1718 }
1719
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001720 explicit FreeList(PagedSpace* owner);
1721
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001722 // Adds a node on the free list. The block of size {size_in_bytes} starting
1723 // at {start} is placed on the free list. The return value is the number of
1724 // bytes that were not added to the free list, because they freed memory block
1725 // was too small. Bookkeeping information will be written to the block, i.e.,
1726 // its contents will be destroyed. The start address should be word aligned,
1727 // and the size should be a non-zero multiple of the word size.
Ben Murdochda12d292016-06-02 14:46:10 +01001728 int Free(Address start, int size_in_bytes, FreeMode mode);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001729
1730 // Allocate a block of size {size_in_bytes} from the free list. The block is
1731 // unitialized. A failure is returned if no block is available. The size
1732 // should be a non-zero multiple of the word size.
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001733 MUST_USE_RESULT HeapObject* Allocate(int size_in_bytes);
1734
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001735 // Clear the free list.
1736 void Reset();
1737
Ben Murdochda12d292016-06-02 14:46:10 +01001738 void ResetStats() {
1739 wasted_bytes_.SetValue(0);
1740 ForAllFreeListCategories(
1741 [](FreeListCategory* category) { category->ResetStats(); });
1742 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001743
1744 // Return the number of bytes available on the free list.
1745 intptr_t Available() {
Ben Murdoch097c5b22016-05-18 11:27:45 +01001746 intptr_t available = 0;
Ben Murdochda12d292016-06-02 14:46:10 +01001747 ForAllFreeListCategories([&available](FreeListCategory* category) {
1748 available += category->available();
1749 });
Ben Murdoch097c5b22016-05-18 11:27:45 +01001750 return available;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001751 }
1752
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001753 bool IsEmpty() {
Ben Murdochda12d292016-06-02 14:46:10 +01001754 bool empty = true;
1755 ForAllFreeListCategories([&empty](FreeListCategory* category) {
1756 if (!category->is_empty()) empty = false;
1757 });
1758 return empty;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001759 }
1760
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001761 // Used after booting the VM.
1762 void RepairLists(Heap* heap);
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001763
Ben Murdochda12d292016-06-02 14:46:10 +01001764 intptr_t EvictFreeListItems(Page* page);
1765 bool ContainsPageFreeListItems(Page* page);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001766
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001767 PagedSpace* owner() { return owner_; }
Ben Murdochda12d292016-06-02 14:46:10 +01001768 intptr_t wasted_bytes() { return wasted_bytes_.Value(); }
1769
1770 template <typename Callback>
1771 void ForAllFreeListCategories(FreeListCategoryType type, Callback callback) {
1772 FreeListCategory* current = categories_[type];
1773 while (current != nullptr) {
1774 FreeListCategory* next = current->next();
1775 callback(current);
1776 current = next;
1777 }
1778 }
1779
1780 template <typename Callback>
1781 void ForAllFreeListCategories(Callback callback) {
1782 for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
1783 ForAllFreeListCategories(static_cast<FreeListCategoryType>(i), callback);
1784 }
1785 }
1786
1787 bool AddCategory(FreeListCategory* category);
1788 void RemoveCategory(FreeListCategory* category);
1789 void PrintCategories(FreeListCategoryType type);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001790
1791#ifdef DEBUG
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001792 intptr_t SumFreeLists();
1793 bool IsVeryLong();
1794#endif
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001795
1796 private:
Ben Murdochda12d292016-06-02 14:46:10 +01001797 class FreeListCategoryIterator {
1798 public:
1799 FreeListCategoryIterator(FreeList* free_list, FreeListCategoryType type)
1800 : current_(free_list->categories_[type]) {}
1801
1802 bool HasNext() { return current_ != nullptr; }
1803
1804 FreeListCategory* Next() {
1805 DCHECK(HasNext());
1806 FreeListCategory* tmp = current_;
1807 current_ = current_->next();
1808 return tmp;
1809 }
1810
1811 private:
1812 FreeListCategory* current_;
1813 };
1814
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001815 // The size range of blocks, in bytes.
1816 static const int kMinBlockSize = 3 * kPointerSize;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001817 static const int kMaxBlockSize = Page::kAllocatableMemory;
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001818
Ben Murdochda12d292016-06-02 14:46:10 +01001819 static const int kTiniestListMax = 0xa * kPointerSize;
1820 static const int kTinyListMax = 0x1f * kPointerSize;
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001821 static const int kSmallListMax = 0xff * kPointerSize;
1822 static const int kMediumListMax = 0x7ff * kPointerSize;
1823 static const int kLargeListMax = 0x3fff * kPointerSize;
Ben Murdochda12d292016-06-02 14:46:10 +01001824 static const int kTinyAllocationMax = kTiniestListMax;
1825 static const int kSmallAllocationMax = kTinyListMax;
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001826 static const int kMediumAllocationMax = kSmallListMax;
1827 static const int kLargeAllocationMax = kMediumListMax;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001828
1829 FreeSpace* FindNodeFor(int size_in_bytes, int* node_size);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001830
Ben Murdochda12d292016-06-02 14:46:10 +01001831 // Walks all available categories for a given |type| and tries to retrieve
1832 // a node. Returns nullptr if the category is empty.
1833 FreeSpace* FindNodeIn(FreeListCategoryType type, int* node_size);
1834
1835 // Tries to retrieve a node from the first category in a given |type|.
1836 // Returns nullptr if the category is empty.
1837 FreeSpace* TryFindNodeIn(FreeListCategoryType type, int* node_size,
1838 int minimum_size);
1839
1840 // Searches a given |type| for a node of at least |minimum_size|.
1841 FreeSpace* SearchForNodeInList(FreeListCategoryType type, int* node_size,
1842 int minimum_size);
Ben Murdoch097c5b22016-05-18 11:27:45 +01001843
1844 FreeListCategoryType SelectFreeListCategoryType(size_t size_in_bytes) {
Ben Murdochda12d292016-06-02 14:46:10 +01001845 if (size_in_bytes <= kTiniestListMax) {
1846 return kTiniest;
1847 } else if (size_in_bytes <= kTinyListMax) {
1848 return kTiny;
1849 } else if (size_in_bytes <= kSmallListMax) {
Ben Murdoch097c5b22016-05-18 11:27:45 +01001850 return kSmall;
1851 } else if (size_in_bytes <= kMediumListMax) {
1852 return kMedium;
1853 } else if (size_in_bytes <= kLargeListMax) {
1854 return kLarge;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001855 }
Ben Murdoch097c5b22016-05-18 11:27:45 +01001856 return kHuge;
1857 }
1858
Ben Murdochda12d292016-06-02 14:46:10 +01001859 // The tiny categories are not used for fast allocation.
Ben Murdoch097c5b22016-05-18 11:27:45 +01001860 FreeListCategoryType SelectFastAllocationFreeListCategoryType(
1861 size_t size_in_bytes) {
1862 if (size_in_bytes <= kSmallAllocationMax) {
1863 return kSmall;
1864 } else if (size_in_bytes <= kMediumAllocationMax) {
1865 return kMedium;
1866 } else if (size_in_bytes <= kLargeAllocationMax) {
1867 return kLarge;
1868 }
1869 return kHuge;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001870 }
1871
Ben Murdochda12d292016-06-02 14:46:10 +01001872 FreeListCategory* top(FreeListCategoryType type) { return categories_[type]; }
1873
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001874 PagedSpace* owner_;
Ben Murdochda12d292016-06-02 14:46:10 +01001875 AtomicNumber<intptr_t> wasted_bytes_;
1876 FreeListCategory* categories_[kNumberOfCategories];
1877
1878 friend class FreeListCategory;
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001879
1880 DISALLOW_IMPLICIT_CONSTRUCTORS(FreeList);
1881};
1882
1883
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001884class AllocationResult {
1885 public:
1886 // Implicit constructor from Object*.
1887 AllocationResult(Object* object) // NOLINT
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001888 : object_(object) {
1889 // AllocationResults can't return Smis, which are used to represent
1890 // failure and the space to retry in.
1891 CHECK(!object->IsSmi());
1892 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001893
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001894 AllocationResult() : object_(Smi::FromInt(NEW_SPACE)) {}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001895
1896 static inline AllocationResult Retry(AllocationSpace space = NEW_SPACE) {
1897 return AllocationResult(space);
1898 }
1899
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001900 inline bool IsRetry() { return object_->IsSmi(); }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001901
1902 template <typename T>
1903 bool To(T** obj) {
1904 if (IsRetry()) return false;
1905 *obj = T::cast(object_);
1906 return true;
1907 }
1908
1909 Object* ToObjectChecked() {
1910 CHECK(!IsRetry());
1911 return object_;
1912 }
1913
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001914 inline AllocationSpace RetrySpace();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001915
1916 private:
1917 explicit AllocationResult(AllocationSpace space)
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001918 : object_(Smi::FromInt(static_cast<int>(space))) {}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001919
1920 Object* object_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001921};
1922
1923
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001924STATIC_ASSERT(sizeof(AllocationResult) == kPointerSize);
1925
1926
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001927// LocalAllocationBuffer represents a linear allocation area that is created
1928// from a given {AllocationResult} and can be used to allocate memory without
1929// synchronization.
1930//
1931// The buffer is properly closed upon destruction and reassignment.
1932// Example:
1933// {
1934// AllocationResult result = ...;
1935// LocalAllocationBuffer a(heap, result, size);
1936// LocalAllocationBuffer b = a;
1937// CHECK(!a.IsValid());
1938// CHECK(b.IsValid());
1939// // {a} is invalid now and cannot be used for further allocations.
1940// }
1941// // Since {b} went out of scope, the LAB is closed, resulting in creating a
1942// // filler object for the remaining area.
1943class LocalAllocationBuffer {
1944 public:
1945 // Indicates that a buffer cannot be used for allocations anymore. Can result
1946 // from either reassigning a buffer, or trying to construct it from an
1947 // invalid {AllocationResult}.
1948 static inline LocalAllocationBuffer InvalidBuffer();
1949
1950 // Creates a new LAB from a given {AllocationResult}. Results in
1951 // InvalidBuffer if the result indicates a retry.
1952 static inline LocalAllocationBuffer FromResult(Heap* heap,
1953 AllocationResult result,
1954 intptr_t size);
1955
1956 ~LocalAllocationBuffer() { Close(); }
1957
1958 // Convert to C++11 move-semantics once allowed by the style guide.
1959 LocalAllocationBuffer(const LocalAllocationBuffer& other);
1960 LocalAllocationBuffer& operator=(const LocalAllocationBuffer& other);
1961
1962 MUST_USE_RESULT inline AllocationResult AllocateRawAligned(
1963 int size_in_bytes, AllocationAlignment alignment);
1964
1965 inline bool IsValid() { return allocation_info_.top() != nullptr; }
1966
1967 // Try to merge LABs, which is only possible when they are adjacent in memory.
1968 // Returns true if the merge was successful, false otherwise.
1969 inline bool TryMerge(LocalAllocationBuffer* other);
1970
1971 private:
1972 LocalAllocationBuffer(Heap* heap, AllocationInfo allocation_info);
1973
1974 void Close();
1975
1976 Heap* heap_;
1977 AllocationInfo allocation_info_;
1978};
1979
Steve Blocka7e24c12009-10-30 11:49:00 +00001980class PagedSpace : public Space {
1981 public:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001982 static const intptr_t kCompactionMemoryWanted = 500 * KB;
Steve Blocka7e24c12009-10-30 11:49:00 +00001983
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001984 // Creates a space with an id.
1985 PagedSpace(Heap* heap, AllocationSpace id, Executability executable);
1986
1987 ~PagedSpace() override { TearDown(); }
Steve Blocka7e24c12009-10-30 11:49:00 +00001988
1989 // Set up the space using the given address range of virtual memory (from
1990 // the memory allocator's initial chunk) if possible. If the block of
1991 // addresses is not big enough to contain a single page-aligned page, a
1992 // fresh chunk will be allocated.
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001993 bool SetUp();
Steve Blocka7e24c12009-10-30 11:49:00 +00001994
1995 // Returns true if the space has been successfully set up and not
1996 // subsequently torn down.
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001997 bool HasBeenSetUp();
Steve Blocka7e24c12009-10-30 11:49:00 +00001998
Steve Blocka7e24c12009-10-30 11:49:00 +00001999 // Checks whether an object/address is in this space.
2000 inline bool Contains(Address a);
Ben Murdoch097c5b22016-05-18 11:27:45 +01002001 inline bool Contains(Object* o);
2002 bool ContainsSlow(Address addr);
Steve Blocka7e24c12009-10-30 11:49:00 +00002003
2004 // Given an address occupied by a live object, return that object if it is
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002005 // in this space, or a Smi if it is not. The implementation iterates over
2006 // objects in the page containing the address, the cost is linear in the
2007 // number of objects in the page. It may be slow.
2008 Object* FindObject(Address addr);
2009
2010 // During boot the free_space_map is created, and afterwards we may need
2011 // to write it into the free list nodes that were already created.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002012 void RepairFreeListsAfterDeserialization();
Steve Blocka7e24c12009-10-30 11:49:00 +00002013
Ben Murdoch85b71792012-04-11 18:30:58 +01002014 // Prepares for a mark-compact GC.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002015 void PrepareForMarkCompact();
Ben Murdoch85b71792012-04-11 18:30:58 +01002016
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002017 // Current capacity without growing (Size() + Available()).
Ben Murdochf87a2032010-10-22 12:50:53 +01002018 intptr_t Capacity() { return accounting_stats_.Capacity(); }
Steve Blocka7e24c12009-10-30 11:49:00 +00002019
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002020 // Approximate amount of physical memory committed for this space.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002021 size_t CommittedPhysicalMemory() override;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002022
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002023 void ResetFreeListStatistics();
2024
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002025 // Sets the capacity, the available space and the wasted space to zero.
2026 // The stats are rebuilt during sweeping by adding each page to the
2027 // capacity and the size when it is encountered. As free spaces are
2028 // discovered during the sweeping they are subtracted from the size and added
2029 // to the available and wasted totals.
2030 void ClearStats() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002031 accounting_stats_.ClearSize();
2032 free_list_.ResetStats();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002033 ResetFreeListStatistics();
2034 }
2035
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002036 // Available bytes without growing. These are the bytes on the free list.
2037 // The bytes in the linear allocation area are not included in this total
2038 // because updating the stats would slow down allocation. New pages are
2039 // immediately added to the free list so they show up here.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002040 intptr_t Available() override { return free_list_.Available(); }
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002041
2042 // Allocated bytes in this space. Garbage bytes that were not found due to
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002043 // concurrent sweeping are counted as being allocated! The bytes in the
2044 // current linear allocation area (between top and limit) are also counted
2045 // here.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002046 intptr_t Size() override { return accounting_stats_.Size(); }
Steve Blocka7e24c12009-10-30 11:49:00 +00002047
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002048 // As size, but the bytes in lazily swept pages are estimated and the bytes
2049 // in the current linear allocation area are not included.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002050 intptr_t SizeOfObjects() override;
Steve Blocka7e24c12009-10-30 11:49:00 +00002051
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002052 // Wasted bytes in this space. These are just the bytes that were thrown away
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002053 // due to being too small to use for allocation.
2054 virtual intptr_t Waste() { return free_list_.wasted_bytes(); }
Steve Blocka7e24c12009-10-30 11:49:00 +00002055
2056 // Returns the allocation pointer in this space.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002057 Address top() { return allocation_info_.top(); }
2058 Address limit() { return allocation_info_.limit(); }
2059
2060 // The allocation top address.
2061 Address* allocation_top_address() { return allocation_info_.top_address(); }
2062
2063 // The allocation limit address.
2064 Address* allocation_limit_address() {
2065 return allocation_info_.limit_address();
2066 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002067
Ben Murdochda12d292016-06-02 14:46:10 +01002068 enum UpdateSkipList { UPDATE_SKIP_LIST, IGNORE_SKIP_LIST };
2069
Steve Blocka7e24c12009-10-30 11:49:00 +00002070 // Allocate the requested number of bytes in the space if possible, return a
Ben Murdochda12d292016-06-02 14:46:10 +01002071 // failure object if not. Only use IGNORE_SKIP_LIST if the skip list is going
2072 // to be manually updated later.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002073 MUST_USE_RESULT inline AllocationResult AllocateRawUnaligned(
Ben Murdochda12d292016-06-02 14:46:10 +01002074 int size_in_bytes, UpdateSkipList update_skip_list = UPDATE_SKIP_LIST);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002075
2076 MUST_USE_RESULT inline AllocationResult AllocateRawUnalignedSynchronized(
2077 int size_in_bytes);
2078
2079 // Allocate the requested number of bytes in the space double aligned if
2080 // possible, return a failure object if not.
2081 MUST_USE_RESULT inline AllocationResult AllocateRawAligned(
2082 int size_in_bytes, AllocationAlignment alignment);
2083
2084 // Allocate the requested number of bytes in the space and consider allocation
2085 // alignment if needed.
2086 MUST_USE_RESULT inline AllocationResult AllocateRaw(
2087 int size_in_bytes, AllocationAlignment alignment);
Leon Clarkee46be812010-01-19 14:06:41 +00002088
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002089 // Give a block of memory to the space's free list. It might be added to
2090 // the free list or accounted as waste.
2091 // If add_to_freelist is false then just accounting stats are updated and
2092 // no attempt to add area to free list is made.
2093 int Free(Address start, int size_in_bytes) {
Ben Murdochda12d292016-06-02 14:46:10 +01002094 int wasted = free_list_.Free(start, size_in_bytes, kLinkCategory);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002095 accounting_stats_.DeallocateBytes(size_in_bytes);
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002096 return size_in_bytes - wasted;
2097 }
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002098
Ben Murdochda12d292016-06-02 14:46:10 +01002099 int UnaccountedFree(Address start, int size_in_bytes) {
2100 int wasted = free_list_.Free(start, size_in_bytes, kDoNotLinkCategory);
2101 return size_in_bytes - wasted;
2102 }
2103
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002104 void ResetFreeList() { free_list_.Reset(); }
2105
Steve Block6ded16b2010-05-10 14:33:55 +01002106 // Set space allocation info.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002107 void SetTopAndLimit(Address top, Address limit) {
2108 DCHECK(top == limit ||
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002109 Page::FromAddress(top) == Page::FromAddress(limit - 1));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002110 MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002111 allocation_info_.Reset(top, limit);
Steve Block6ded16b2010-05-10 14:33:55 +01002112 }
2113
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002114 // Empty space allocation info, returning unused area to free list.
2115 void EmptyAllocationInfo() {
2116 // Mark the old linear allocation area with a free space map so it can be
2117 // skipped when scanning the heap.
2118 int old_linear_size = static_cast<int>(limit() - top());
2119 Free(top(), old_linear_size);
2120 SetTopAndLimit(NULL, NULL);
Steve Blocka7e24c12009-10-30 11:49:00 +00002121 }
2122
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002123 void Allocate(int bytes) { accounting_stats_.AllocateBytes(bytes); }
2124
2125 void IncreaseCapacity(int size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002126
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002127 // Releases an unused page and shrinks the space.
Ben Murdochda12d292016-06-02 14:46:10 +01002128 void ReleasePage(Page* page);
Steve Blocka7e24c12009-10-30 11:49:00 +00002129
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002130 // The dummy page that anchors the linked list of pages.
2131 Page* anchor() { return &anchor_; }
Steve Blocka7e24c12009-10-30 11:49:00 +00002132
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002133#ifdef VERIFY_HEAP
2134 // Verify integrity of this space.
2135 virtual void Verify(ObjectVisitor* visitor);
2136
2137 // Overridden by subclasses to verify space-specific object
2138 // properties (e.g., only maps or free-list nodes are in map space).
2139 virtual void VerifyObject(HeapObject* obj) {}
2140#endif
2141
Steve Blocka7e24c12009-10-30 11:49:00 +00002142#ifdef DEBUG
2143 // Print meta info and objects in this space.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002144 void Print() override;
Steve Blocka7e24c12009-10-30 11:49:00 +00002145
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002146 // Reports statistics for the space
2147 void ReportStatistics();
2148
Steve Blocka7e24c12009-10-30 11:49:00 +00002149 // Report code object related statistics
2150 void CollectCodeStatistics();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002151 static void ReportCodeStatistics(Isolate* isolate);
2152 static void ResetCodeStatistics(Isolate* isolate);
Steve Blocka7e24c12009-10-30 11:49:00 +00002153#endif
2154
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002155 // This function tries to steal size_in_bytes memory from the sweeper threads
2156 // free-lists. If it does not succeed stealing enough memory, it will wait
2157 // for the sweeper threads to finish sweeping.
2158 // It returns true when sweeping is completed and false otherwise.
2159 bool EnsureSweeperProgress(intptr_t size_in_bytes);
2160
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002161 Page* FirstPage() { return anchor_.next_page(); }
2162 Page* LastPage() { return anchor_.prev_page(); }
2163
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002164 void EvictEvacuationCandidatesFromLinearAllocationArea();
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002165
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002166 bool CanExpand(size_t size);
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002167
2168 // Returns the number of total pages in this space.
2169 int CountTotalPages();
2170
2171 // Return size of allocatable area on a page in this space.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002172 inline int AreaSize() { return area_size_; }
2173
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002174 virtual bool is_local() { return false; }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002175
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002176 // Merges {other} into the current space. Note that this modifies {other},
2177 // e.g., removes its bump pointer area and resets statistics.
2178 void MergeCompactionSpace(CompactionSpace* other);
2179
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002180 // Refills the free list from the corresponding free list filled by the
2181 // sweeper.
2182 virtual void RefillFreeList();
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002183
Ben Murdochda12d292016-06-02 14:46:10 +01002184 FreeList* free_list() { return &free_list_; }
2185
2186 base::Mutex* mutex() { return &space_mutex_; }
2187
2188 inline void UnlinkFreeListCategories(Page* page);
2189 inline intptr_t RelinkFreeListCategories(Page* page);
2190
Steve Blocka7e24c12009-10-30 11:49:00 +00002191 protected:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002192 // PagedSpaces that should be included in snapshots have different, i.e.,
2193 // smaller, initial pages.
2194 virtual bool snapshotable() { return true; }
2195
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002196 bool HasPages() { return anchor_.next_page() != &anchor_; }
2197
2198 // Cleans up the space, frees all pages in this space except those belonging
2199 // to the initial chunk, uncommits addresses in the initial chunk.
2200 void TearDown();
2201
2202 // Expands the space by allocating a fixed number of pages. Returns false if
2203 // it cannot allocate requested number of pages from OS, or if the hard heap
2204 // size limit has been hit.
2205 bool Expand();
2206
2207 // Generic fast case allocation function that tries linear allocation at the
2208 // address denoted by top in allocation_info_.
2209 inline HeapObject* AllocateLinearly(int size_in_bytes);
2210
2211 // Generic fast case allocation function that tries aligned linear allocation
2212 // at the address denoted by top in allocation_info_. Writes the aligned
2213 // allocation size, which includes the filler size, to size_in_bytes.
2214 inline HeapObject* AllocateLinearlyAligned(int* size_in_bytes,
2215 AllocationAlignment alignment);
2216
2217 // If sweeping is still in progress try to sweep unswept pages. If that is
2218 // not successful, wait for the sweeper threads and re-try free-list
2219 // allocation.
2220 MUST_USE_RESULT virtual HeapObject* SweepAndRetryAllocation(
2221 int size_in_bytes);
2222
2223 // Slow path of AllocateRaw. This function is space-dependent.
2224 MUST_USE_RESULT HeapObject* SlowAllocateRaw(int size_in_bytes);
2225
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002226 int area_size_;
2227
Steve Blocka7e24c12009-10-30 11:49:00 +00002228 // Accounting information for this space.
2229 AllocationStats accounting_stats_;
2230
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002231 // The dummy page that anchors the double linked list of pages.
2232 Page anchor_;
Steve Blocka7e24c12009-10-30 11:49:00 +00002233
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002234 // The space's free list.
2235 FreeList free_list_;
Steve Block6ded16b2010-05-10 14:33:55 +01002236
Steve Blocka7e24c12009-10-30 11:49:00 +00002237 // Normal allocation information.
2238 AllocationInfo allocation_info_;
2239
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002240 // Mutex guarding any concurrent access to the space.
2241 base::Mutex space_mutex_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002242
Ben Murdochda12d292016-06-02 14:46:10 +01002243 friend class IncrementalMarking;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002244 friend class MarkCompactCollector;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002245 friend class PageIterator;
2246
2247 // Used in cctest.
2248 friend class HeapTester;
Steve Blocka7e24c12009-10-30 11:49:00 +00002249};
2250
2251
Steve Blocka7e24c12009-10-30 11:49:00 +00002252class NumberAndSizeInfo BASE_EMBEDDED {
2253 public:
2254 NumberAndSizeInfo() : number_(0), bytes_(0) {}
2255
2256 int number() const { return number_; }
2257 void increment_number(int num) { number_ += num; }
2258
2259 int bytes() const { return bytes_; }
2260 void increment_bytes(int size) { bytes_ += size; }
2261
2262 void clear() {
2263 number_ = 0;
2264 bytes_ = 0;
2265 }
2266
2267 private:
2268 int number_;
2269 int bytes_;
2270};
2271
2272
2273// HistogramInfo class for recording a single "bar" of a histogram. This
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002274// class is used for collecting statistics to print to the log file.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002275class HistogramInfo : public NumberAndSizeInfo {
Steve Blocka7e24c12009-10-30 11:49:00 +00002276 public:
2277 HistogramInfo() : NumberAndSizeInfo() {}
2278
2279 const char* name() { return name_; }
2280 void set_name(const char* name) { name_ = name; }
2281
2282 private:
2283 const char* name_;
2284};
Steve Blocka7e24c12009-10-30 11:49:00 +00002285
2286
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002287enum SemiSpaceId { kFromSpace = 0, kToSpace = 1 };
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002288
2289
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002290class NewSpacePage : public MemoryChunk {
2291 public:
Ben Murdochda12d292016-06-02 14:46:10 +01002292 static inline NewSpacePage* Initialize(Heap* heap, MemoryChunk* chunk,
2293 Executability executable,
2294 SemiSpace* owner);
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002295
2296 static bool IsAtStart(Address addr) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002297 return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask) ==
2298 kObjectStartOffset;
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002299 }
2300
2301 static bool IsAtEnd(Address addr) {
2302 return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask) == 0;
2303 }
2304
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002305 // Finds the NewSpacePage containing the given address.
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002306 static inline NewSpacePage* FromAddress(Address address_in_page) {
2307 Address page_start =
2308 reinterpret_cast<Address>(reinterpret_cast<uintptr_t>(address_in_page) &
2309 ~Page::kPageAlignmentMask);
2310 NewSpacePage* page = reinterpret_cast<NewSpacePage*>(page_start);
2311 return page;
2312 }
2313
2314 // Find the page for a limit address. A limit address is either an address
2315 // inside a page, or the address right after the last byte of a page.
2316 static inline NewSpacePage* FromLimit(Address address_limit) {
2317 return NewSpacePage::FromAddress(address_limit - 1);
2318 }
2319
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002320 // Checks if address1 and address2 are on the same new space page.
2321 static inline bool OnSamePage(Address address1, Address address2) {
2322 return NewSpacePage::FromAddress(address1) ==
2323 NewSpacePage::FromAddress(address2);
2324 }
2325
Ben Murdochda12d292016-06-02 14:46:10 +01002326 inline NewSpacePage* next_page() {
2327 return static_cast<NewSpacePage*>(next_chunk());
2328 }
2329
2330 inline void set_next_page(NewSpacePage* page) { set_next_chunk(page); }
2331
2332 inline NewSpacePage* prev_page() {
2333 return static_cast<NewSpacePage*>(prev_chunk());
2334 }
2335
2336 inline void set_prev_page(NewSpacePage* page) { set_prev_chunk(page); }
2337
2338 SemiSpace* semi_space() { return reinterpret_cast<SemiSpace*>(owner()); }
2339
2340 bool is_anchor() { return !this->InNewSpace(); }
2341
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002342 private:
Ben Murdochda12d292016-06-02 14:46:10 +01002343 // GC related flags copied from from-space to to-space when
2344 // flipping semispaces.
2345 static const intptr_t kCopyOnFlipFlagsMask =
2346 (1 << MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) |
2347 (1 << MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
2348
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002349 // Create a NewSpacePage object that is only used as anchor
2350 // for the doubly-linked list of real pages.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002351 explicit NewSpacePage(SemiSpace* owner) { InitializeAsAnchor(owner); }
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002352
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002353 // Intialize a fake NewSpacePage used as sentinel at the ends
2354 // of a doubly-linked list of real NewSpacePages.
2355 // Only uses the prev/next links, and sets flags to not be in new-space.
2356 void InitializeAsAnchor(SemiSpace* owner);
2357
2358 friend class SemiSpace;
2359 friend class SemiSpaceIterator;
2360};
2361
2362
Steve Blocka7e24c12009-10-30 11:49:00 +00002363// -----------------------------------------------------------------------------
2364// SemiSpace in young generation
2365//
Ben Murdoch097c5b22016-05-18 11:27:45 +01002366// A SemiSpace is a contiguous chunk of memory holding page-like memory chunks.
2367// The mark-compact collector uses the memory of the first page in the from
2368// space as a marking stack when tracing live objects.
Steve Blocka7e24c12009-10-30 11:49:00 +00002369class SemiSpace : public Space {
2370 public:
Ben Murdoch097c5b22016-05-18 11:27:45 +01002371 static void Swap(SemiSpace* from, SemiSpace* to);
2372
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002373 SemiSpace(Heap* heap, SemiSpaceId semispace)
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002374 : Space(heap, NEW_SPACE, NOT_EXECUTABLE),
Ben Murdoch097c5b22016-05-18 11:27:45 +01002375 current_capacity_(0),
2376 maximum_capacity_(0),
2377 minimum_capacity_(0),
Ben Murdoch097c5b22016-05-18 11:27:45 +01002378 age_mark_(nullptr),
2379 committed_(false),
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002380 id_(semispace),
2381 anchor_(this),
Ben Murdoch097c5b22016-05-18 11:27:45 +01002382 current_page_(nullptr) {}
Steve Blocka7e24c12009-10-30 11:49:00 +00002383
Ben Murdoch097c5b22016-05-18 11:27:45 +01002384 inline bool Contains(HeapObject* o);
2385 inline bool Contains(Object* o);
2386 inline bool ContainsSlow(Address a);
2387
Ben Murdochda12d292016-06-02 14:46:10 +01002388 void SetUp(int initial_capacity, int maximum_capacity);
Steve Blocka7e24c12009-10-30 11:49:00 +00002389 void TearDown();
Ben Murdochda12d292016-06-02 14:46:10 +01002390 bool HasBeenSetUp() { return maximum_capacity_ != 0; }
Steve Blocka7e24c12009-10-30 11:49:00 +00002391
Ben Murdochda12d292016-06-02 14:46:10 +01002392 bool Commit();
2393 bool Uncommit();
2394 bool is_committed() { return committed_; }
Steve Blocka7e24c12009-10-30 11:49:00 +00002395
Ben Murdochda12d292016-06-02 14:46:10 +01002396 // Grow the semispace to the new capacity. The new capacity requested must
2397 // be larger than the current capacity and less than the maximum capacity.
Steve Blocka7e24c12009-10-30 11:49:00 +00002398 bool GrowTo(int new_capacity);
2399
Ben Murdochda12d292016-06-02 14:46:10 +01002400 // Shrinks the semispace to the new capacity. The new capacity requested
2401 // must be more than the amount of used memory in the semispace and less
2402 // than the current capacity.
Steve Blocka7e24c12009-10-30 11:49:00 +00002403 bool ShrinkTo(int new_capacity);
2404
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002405 // Returns the start address of the first page of the space.
2406 Address space_start() {
Ben Murdochda12d292016-06-02 14:46:10 +01002407 DCHECK_NE(anchor_.next_page(), anchor());
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002408 return anchor_.next_page()->area_start();
2409 }
2410
Ben Murdochda12d292016-06-02 14:46:10 +01002411 NewSpacePage* first_page() { return anchor_.next_page(); }
2412 NewSpacePage* current_page() { return current_page_; }
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002413
Steve Blocka7e24c12009-10-30 11:49:00 +00002414 // Returns one past the end address of the space.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002415 Address space_end() { return anchor_.prev_page()->area_end(); }
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002416
Ben Murdochda12d292016-06-02 14:46:10 +01002417 // Returns the start address of the current page of the space.
2418 Address page_low() { return current_page_->area_start(); }
2419
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002420 // Returns one past the end address of the current page of the space.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002421 Address page_high() { return current_page_->area_end(); }
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002422
2423 bool AdvancePage() {
2424 NewSpacePage* next_page = current_page_->next_page();
2425 if (next_page == anchor()) return false;
2426 current_page_ = next_page;
2427 return true;
2428 }
2429
2430 // Resets the space to using the first page.
2431 void Reset();
Steve Blocka7e24c12009-10-30 11:49:00 +00002432
2433 // Age mark accessors.
2434 Address age_mark() { return age_mark_; }
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002435 void set_age_mark(Address mark);
Steve Blocka7e24c12009-10-30 11:49:00 +00002436
Ben Murdochda12d292016-06-02 14:46:10 +01002437 // Returns the current capacity of the semispace.
Ben Murdoch097c5b22016-05-18 11:27:45 +01002438 int current_capacity() { return current_capacity_; }
2439
Ben Murdochda12d292016-06-02 14:46:10 +01002440 // Returns the maximum capacity of the semispace.
Ben Murdoch097c5b22016-05-18 11:27:45 +01002441 int maximum_capacity() { return maximum_capacity_; }
2442
2443 // Returns the initial capacity of the semispace.
2444 int minimum_capacity() { return minimum_capacity_; }
2445
2446 SemiSpaceId id() { return id_; }
2447
2448 // Approximate amount of physical memory committed for this space.
2449 size_t CommittedPhysicalMemory() override;
Steve Blocka7e24c12009-10-30 11:49:00 +00002450
Leon Clarkee46be812010-01-19 14:06:41 +00002451 // If we don't have these here then SemiSpace will be abstract. However
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002452 // they should never be called:
2453
2454 intptr_t Size() override {
Steve Blocka7e24c12009-10-30 11:49:00 +00002455 UNREACHABLE();
2456 return 0;
2457 }
2458
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002459 intptr_t SizeOfObjects() override { return Size(); }
2460
2461 intptr_t Available() override {
2462 UNREACHABLE();
2463 return 0;
2464 }
2465
Steve Blocka7e24c12009-10-30 11:49:00 +00002466#ifdef DEBUG
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002467 void Print() override;
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002468 // Validate a range of of addresses in a SemiSpace.
2469 // The "from" address must be on a page prior to the "to" address,
2470 // in the linked page order, or it must be earlier on the same page.
2471 static void AssertValidRange(Address from, Address to);
2472#else
2473 // Do nothing.
2474 inline static void AssertValidRange(Address from, Address to) {}
Steve Blocka7e24c12009-10-30 11:49:00 +00002475#endif
2476
Ben Murdoch097c5b22016-05-18 11:27:45 +01002477#ifdef VERIFY_HEAP
2478 virtual void Verify();
2479#endif
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002480
Steve Blocka7e24c12009-10-30 11:49:00 +00002481 private:
Ben Murdochda12d292016-06-02 14:46:10 +01002482 void RewindPages(NewSpacePage* start, int num_pages);
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002483
Ben Murdochda12d292016-06-02 14:46:10 +01002484 inline NewSpacePage* anchor() { return &anchor_; }
Ben Murdoch097c5b22016-05-18 11:27:45 +01002485
2486 // Copies the flags into the masked positions on all pages in the space.
2487 void FixPagesFlags(intptr_t flags, intptr_t flag_mask);
2488
2489 // The currently committed space capacity.
2490 int current_capacity_;
2491
2492 // The maximum capacity that can be used by this space.
2493 int maximum_capacity_;
2494
Ben Murdochda12d292016-06-02 14:46:10 +01002495 // The minimum capacity for the space. A space cannot shrink below this size.
Ben Murdoch097c5b22016-05-18 11:27:45 +01002496 int minimum_capacity_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002497
Steve Blocka7e24c12009-10-30 11:49:00 +00002498 // Used to govern object promotion during mark-compact collection.
2499 Address age_mark_;
2500
Steve Blocka7e24c12009-10-30 11:49:00 +00002501 bool committed_;
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002502 SemiSpaceId id_;
Steve Blocka7e24c12009-10-30 11:49:00 +00002503
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002504 NewSpacePage anchor_;
2505 NewSpacePage* current_page_;
2506
2507 friend class SemiSpaceIterator;
2508 friend class NewSpacePageIterator;
Steve Blocka7e24c12009-10-30 11:49:00 +00002509};
2510
2511
2512// A SemiSpaceIterator is an ObjectIterator that iterates over the active
2513// semispace of the heap's new space. It iterates over the objects in the
2514// semispace from a given start address (defaulting to the bottom of the
2515// semispace) to the top of the semispace. New objects allocated after the
2516// iterator is created are not iterated.
2517class SemiSpaceIterator : public ObjectIterator {
2518 public:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002519 // Create an iterator over the allocated objects in the given to-space.
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002520 explicit SemiSpaceIterator(NewSpace* space);
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002521
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002522 inline HeapObject* Next();
Steve Blocka7e24c12009-10-30 11:49:00 +00002523
2524 // Implementation of the ObjectIterator functions.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002525 inline HeapObject* next_object() override;
Steve Blocka7e24c12009-10-30 11:49:00 +00002526
2527 private:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002528 void Initialize(Address start, Address end);
Steve Blocka7e24c12009-10-30 11:49:00 +00002529
Steve Blocka7e24c12009-10-30 11:49:00 +00002530 // The current iteration point.
2531 Address current_;
2532 // The end of iteration.
2533 Address limit_;
Steve Blocka7e24c12009-10-30 11:49:00 +00002534};
2535
2536
2537// -----------------------------------------------------------------------------
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002538// A PageIterator iterates the pages in a semi-space.
2539class NewSpacePageIterator BASE_EMBEDDED {
2540 public:
2541 // Make an iterator that runs over all pages in to-space.
2542 explicit inline NewSpacePageIterator(NewSpace* space);
2543
2544 // Make an iterator that runs over all pages in the given semispace,
2545 // even those not used in allocation.
2546 explicit inline NewSpacePageIterator(SemiSpace* space);
2547
2548 // Make iterator that iterates from the page containing start
2549 // to the page that contains limit in the same semispace.
2550 inline NewSpacePageIterator(Address start, Address limit);
2551
2552 inline bool has_next();
2553 inline NewSpacePage* next();
2554
2555 private:
2556 NewSpacePage* prev_page_; // Previous page returned.
2557 // Next page that will be returned. Cached here so that we can use this
2558 // iterator for operations that deallocate pages.
2559 NewSpacePage* next_page_;
2560 // Last page returned.
2561 NewSpacePage* last_page_;
2562};
2563
2564
2565// -----------------------------------------------------------------------------
Steve Blocka7e24c12009-10-30 11:49:00 +00002566// The young generation space.
2567//
2568// The new space consists of a contiguous pair of semispaces. It simply
2569// forwards most functions to the appropriate semispace.
2570
2571class NewSpace : public Space {
2572 public:
Steve Block44f0eee2011-05-26 01:26:41 +01002573 explicit NewSpace(Heap* heap)
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002574 : Space(heap, NEW_SPACE, NOT_EXECUTABLE),
2575 to_space_(heap, kToSpace),
2576 from_space_(heap, kFromSpace),
2577 reservation_(),
Ben Murdochda12d292016-06-02 14:46:10 +01002578 pages_used_(0),
2579 top_on_previous_step_(0),
2580 allocated_histogram_(nullptr),
2581 promoted_histogram_(nullptr) {}
Ben Murdoch097c5b22016-05-18 11:27:45 +01002582
2583 inline bool Contains(HeapObject* o);
2584 inline bool ContainsSlow(Address a);
2585 inline bool Contains(Object* o);
Steve Blocka7e24c12009-10-30 11:49:00 +00002586
Ben Murdochda12d292016-06-02 14:46:10 +01002587 bool SetUp(int initial_semispace_capacity, int max_semispace_capacity);
Steve Blocka7e24c12009-10-30 11:49:00 +00002588
2589 // Tears down the space. Heap memory was not allocated by the space, so it
2590 // is not deallocated here.
2591 void TearDown();
2592
2593 // True if the space has been set up but not torn down.
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002594 bool HasBeenSetUp() {
2595 return to_space_.HasBeenSetUp() && from_space_.HasBeenSetUp();
Steve Blocka7e24c12009-10-30 11:49:00 +00002596 }
2597
2598 // Flip the pair of spaces.
2599 void Flip();
2600
2601 // Grow the capacity of the semispaces. Assumes that they are not at
2602 // their maximum capacity.
2603 void Grow();
2604
2605 // Shrink the capacity of the semispaces.
2606 void Shrink();
2607
Steve Blocka7e24c12009-10-30 11:49:00 +00002608 // Return the allocated bytes in the active semispace.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002609 intptr_t Size() override {
Ben Murdochda12d292016-06-02 14:46:10 +01002610 return pages_used_ * NewSpacePage::kAllocatableMemory +
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002611 static_cast<int>(top() - to_space_.page_low());
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002612 }
2613
Ben Murdochf87a2032010-10-22 12:50:53 +01002614 // The same, but returning an int. We have to have the one that returns
2615 // intptr_t because it is inherited, but if we know we are dealing with the
2616 // new space, which can't get as big as the other spaces then this is useful:
2617 int SizeAsInt() { return static_cast<int>(Size()); }
Steve Block3ce2e202009-11-05 08:53:23 +00002618
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002619 // Return the allocatable capacity of a semispace.
2620 intptr_t Capacity() {
Ben Murdoch097c5b22016-05-18 11:27:45 +01002621 SLOW_DCHECK(to_space_.current_capacity() == from_space_.current_capacity());
2622 return (to_space_.current_capacity() / Page::kPageSize) *
Ben Murdochda12d292016-06-02 14:46:10 +01002623 NewSpacePage::kAllocatableMemory;
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002624 }
2625
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002626 // Return the current size of a semispace, allocatable and non-allocatable
2627 // memory.
2628 intptr_t TotalCapacity() {
Ben Murdoch097c5b22016-05-18 11:27:45 +01002629 DCHECK(to_space_.current_capacity() == from_space_.current_capacity());
2630 return to_space_.current_capacity();
Steve Blocka7e24c12009-10-30 11:49:00 +00002631 }
Steve Block3ce2e202009-11-05 08:53:23 +00002632
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002633 // Committed memory for NewSpace is the committed memory of both semi-spaces
2634 // combined.
2635 intptr_t CommittedMemory() override {
2636 return from_space_.CommittedMemory() + to_space_.CommittedMemory();
Steve Block3ce2e202009-11-05 08:53:23 +00002637 }
2638
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002639 intptr_t MaximumCommittedMemory() override {
2640 return from_space_.MaximumCommittedMemory() +
2641 to_space_.MaximumCommittedMemory();
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002642 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002643
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002644 // Approximate amount of physical memory committed for this space.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002645 size_t CommittedPhysicalMemory() override;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002646
2647 // Return the available bytes without growing.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002648 intptr_t Available() override { return Capacity() - Size(); }
2649
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002650 size_t AllocatedSinceLastGC() {
Ben Murdochda12d292016-06-02 14:46:10 +01002651 bool seen_age_mark = false;
2652 Address age_mark = to_space_.age_mark();
2653 NewSpacePage* current_page = to_space_.first_page();
2654 NewSpacePage* age_mark_page = NewSpacePage::FromAddress(age_mark);
2655 NewSpacePage* last_page = NewSpacePage::FromAddress(top() - kPointerSize);
2656 if (age_mark_page == last_page) {
2657 if (top() - age_mark >= 0) {
2658 return top() - age_mark;
2659 }
2660 // Top was reset at some point, invalidating this metric.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002661 return 0;
2662 }
Ben Murdochda12d292016-06-02 14:46:10 +01002663 while (current_page != last_page) {
2664 if (current_page == age_mark_page) {
2665 seen_age_mark = true;
2666 break;
2667 }
2668 current_page = current_page->next_page();
2669 }
2670 if (!seen_age_mark) {
2671 // Top was reset at some point, invalidating this metric.
2672 return 0;
2673 }
2674 intptr_t allocated = age_mark_page->area_end() - age_mark;
2675 DCHECK_EQ(current_page, age_mark_page);
2676 current_page = age_mark_page->next_page();
2677 while (current_page != last_page) {
2678 allocated += NewSpacePage::kAllocatableMemory;
2679 current_page = current_page->next_page();
2680 }
2681 allocated += top() - current_page->area_start();
2682 DCHECK_LE(0, allocated);
2683 DCHECK_LE(allocated, Size());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002684 return static_cast<size_t>(allocated);
2685 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002686
Steve Blocka7e24c12009-10-30 11:49:00 +00002687 // Return the maximum capacity of a semispace.
2688 int MaximumCapacity() {
Ben Murdoch097c5b22016-05-18 11:27:45 +01002689 DCHECK(to_space_.maximum_capacity() == from_space_.maximum_capacity());
2690 return to_space_.maximum_capacity();
Steve Blocka7e24c12009-10-30 11:49:00 +00002691 }
2692
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002693 bool IsAtMaximumCapacity() { return TotalCapacity() == MaximumCapacity(); }
2694
Steve Blocka7e24c12009-10-30 11:49:00 +00002695 // Returns the initial capacity of a semispace.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002696 int InitialTotalCapacity() {
Ben Murdoch097c5b22016-05-18 11:27:45 +01002697 DCHECK(to_space_.minimum_capacity() == from_space_.minimum_capacity());
2698 return to_space_.minimum_capacity();
Steve Blocka7e24c12009-10-30 11:49:00 +00002699 }
2700
2701 // Return the address of the allocation pointer in the active semispace.
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002702 Address top() {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002703 DCHECK(to_space_.current_page()->ContainsLimit(allocation_info_.top()));
2704 return allocation_info_.top();
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002705 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002706
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002707 // Return the address of the allocation pointer limit in the active semispace.
2708 Address limit() {
2709 DCHECK(to_space_.current_page()->ContainsLimit(allocation_info_.limit()));
2710 return allocation_info_.limit();
2711 }
2712
Steve Blocka7e24c12009-10-30 11:49:00 +00002713 // Return the address of the first object in the active semispace.
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002714 Address bottom() { return to_space_.space_start(); }
Steve Blocka7e24c12009-10-30 11:49:00 +00002715
2716 // Get the age mark of the inactive semispace.
2717 Address age_mark() { return from_space_.age_mark(); }
2718 // Set the age mark in the active semispace.
2719 void set_age_mark(Address mark) { to_space_.set_age_mark(mark); }
2720
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002721 // The allocation top and limit address.
2722 Address* allocation_top_address() { return allocation_info_.top_address(); }
Steve Blocka7e24c12009-10-30 11:49:00 +00002723
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002724 // The allocation limit address.
2725 Address* allocation_limit_address() {
2726 return allocation_info_.limit_address();
2727 }
2728
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002729 MUST_USE_RESULT INLINE(AllocationResult AllocateRawAligned(
2730 int size_in_bytes, AllocationAlignment alignment));
2731
2732 MUST_USE_RESULT INLINE(
2733 AllocationResult AllocateRawUnaligned(int size_in_bytes));
2734
2735 MUST_USE_RESULT INLINE(AllocationResult AllocateRaw(
2736 int size_in_bytes, AllocationAlignment alignment));
2737
2738 MUST_USE_RESULT inline AllocationResult AllocateRawSynchronized(
2739 int size_in_bytes, AllocationAlignment alignment);
Steve Blocka7e24c12009-10-30 11:49:00 +00002740
2741 // Reset the allocation pointer to the beginning of the active semispace.
2742 void ResetAllocationInfo();
Steve Blocka7e24c12009-10-30 11:49:00 +00002743
Ben Murdoch097c5b22016-05-18 11:27:45 +01002744 // When inline allocation stepping is active, either because of incremental
2745 // marking, idle scavenge, or allocation statistics gathering, we 'interrupt'
2746 // inline allocation every once in a while. This is done by setting
2747 // allocation_info_.limit to be lower than the actual limit and and increasing
2748 // it in steps to guarantee that the observers are notified periodically.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002749 void UpdateInlineAllocationLimit(int size_in_bytes);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002750
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002751 void DisableInlineAllocationSteps() {
2752 top_on_previous_step_ = 0;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002753 UpdateInlineAllocationLimit(0);
Steve Blocka7e24c12009-10-30 11:49:00 +00002754 }
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002755
Ben Murdoch097c5b22016-05-18 11:27:45 +01002756 // Allows observation of inline allocation. The observer->Step() method gets
2757 // called after every step_size bytes have been allocated (approximately).
2758 // This works by adjusting the allocation limit to a lower value and adjusting
2759 // it after each step.
2760 void AddAllocationObserver(AllocationObserver* observer) override;
2761
2762 void RemoveAllocationObserver(AllocationObserver* observer) override;
2763
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002764 // Get the extent of the inactive semispace (for use as a marking stack,
2765 // or to zap it). Notice: space-addresses are not necessarily on the
2766 // same page, so FromSpaceStart() might be above FromSpaceEnd().
2767 Address FromSpacePageLow() { return from_space_.page_low(); }
2768 Address FromSpacePageHigh() { return from_space_.page_high(); }
2769 Address FromSpaceStart() { return from_space_.space_start(); }
2770 Address FromSpaceEnd() { return from_space_.space_end(); }
2771
2772 // Get the extent of the active semispace's pages' memory.
2773 Address ToSpaceStart() { return to_space_.space_start(); }
2774 Address ToSpaceEnd() { return to_space_.space_end(); }
2775
Ben Murdoch097c5b22016-05-18 11:27:45 +01002776 inline bool ToSpaceContainsSlow(Address a);
2777 inline bool FromSpaceContainsSlow(Address a);
2778 inline bool ToSpaceContains(Object* o);
2779 inline bool FromSpaceContains(Object* o);
Steve Blocka7e24c12009-10-30 11:49:00 +00002780
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002781 // Try to switch the active semispace to a new, empty, page.
2782 // Returns false if this isn't possible or reasonable (i.e., there
2783 // are no pages, or the current page is already empty), or true
2784 // if successful.
2785 bool AddFreshPage();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002786 bool AddFreshPageSynchronized();
Steve Blocka7e24c12009-10-30 11:49:00 +00002787
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002788#ifdef VERIFY_HEAP
Steve Blocka7e24c12009-10-30 11:49:00 +00002789 // Verify the active semispace.
2790 virtual void Verify();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002791#endif
2792
2793#ifdef DEBUG
Steve Blocka7e24c12009-10-30 11:49:00 +00002794 // Print the active semispace.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002795 void Print() override { to_space_.Print(); }
Steve Blocka7e24c12009-10-30 11:49:00 +00002796#endif
2797
Steve Blocka7e24c12009-10-30 11:49:00 +00002798 // Iterates the active semispace to collect statistics.
2799 void CollectStatistics();
2800 // Reports previously collected statistics of the active semispace.
2801 void ReportStatistics();
2802 // Clears previously collected statistics.
2803 void ClearHistograms();
2804
2805 // Record the allocation or promotion of a heap object. Note that we don't
2806 // record every single allocation, but only those that happen in the
2807 // to space during a scavenge GC.
2808 void RecordAllocation(HeapObject* obj);
2809 void RecordPromotion(HeapObject* obj);
Steve Blocka7e24c12009-10-30 11:49:00 +00002810
2811 // Return whether the operation succeded.
2812 bool CommitFromSpaceIfNeeded() {
2813 if (from_space_.is_committed()) return true;
2814 return from_space_.Commit();
2815 }
2816
2817 bool UncommitFromSpace() {
2818 if (!from_space_.is_committed()) return true;
2819 return from_space_.Uncommit();
2820 }
2821
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002822 bool IsFromSpaceCommitted() { return from_space_.is_committed(); }
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002823
2824 SemiSpace* active_space() { return &to_space_; }
2825
Ben Murdoch097c5b22016-05-18 11:27:45 +01002826 void PauseAllocationObservers() override;
2827 void ResumeAllocationObservers() override;
2828
Steve Blocka7e24c12009-10-30 11:49:00 +00002829 private:
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002830 // Update allocation info to match the current to-space page.
2831 void UpdateAllocationInfo();
2832
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002833 base::Mutex mutex_;
2834
Steve Blocka7e24c12009-10-30 11:49:00 +00002835 // The semispaces.
2836 SemiSpace to_space_;
2837 SemiSpace from_space_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002838 base::VirtualMemory reservation_;
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002839 int pages_used_;
Steve Blocka7e24c12009-10-30 11:49:00 +00002840
Steve Blocka7e24c12009-10-30 11:49:00 +00002841 // Allocation pointer and limit for normal allocation and allocation during
2842 // mark-compact collection.
2843 AllocationInfo allocation_info_;
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002844
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002845 Address top_on_previous_step_;
Steve Blocka7e24c12009-10-30 11:49:00 +00002846
Steve Blocka7e24c12009-10-30 11:49:00 +00002847 HistogramInfo* allocated_histogram_;
2848 HistogramInfo* promoted_histogram_;
Steve Blocka7e24c12009-10-30 11:49:00 +00002849
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002850 bool EnsureAllocation(int size_in_bytes, AllocationAlignment alignment);
Steve Blocka7e24c12009-10-30 11:49:00 +00002851
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002852 // If we are doing inline allocation in steps, this method performs the 'step'
2853 // operation. top is the memory address of the bump pointer at the last
2854 // inline allocation (i.e. it determines the numbers of bytes actually
2855 // allocated since the last step.) new_top is the address of the bump pointer
2856 // where the next byte is going to be allocated from. top and new_top may be
2857 // different when we cross a page boundary or reset the space.
2858 void InlineAllocationStep(Address top, Address new_top, Address soon_object,
2859 size_t size);
2860 intptr_t GetNextInlineAllocationStepSize();
2861 void StartNextInlineAllocationStep();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002862
Steve Blocka7e24c12009-10-30 11:49:00 +00002863 friend class SemiSpaceIterator;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002864};
Steve Blocka7e24c12009-10-30 11:49:00 +00002865
Ben Murdoch097c5b22016-05-18 11:27:45 +01002866class PauseAllocationObserversScope {
Steve Blocka7e24c12009-10-30 11:49:00 +00002867 public:
Ben Murdoch097c5b22016-05-18 11:27:45 +01002868 explicit PauseAllocationObserversScope(Heap* heap);
2869 ~PauseAllocationObserversScope();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002870
2871 private:
Ben Murdoch097c5b22016-05-18 11:27:45 +01002872 Heap* heap_;
2873 DISALLOW_COPY_AND_ASSIGN(PauseAllocationObserversScope);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002874};
2875
2876// -----------------------------------------------------------------------------
2877// Compaction space that is used temporarily during compaction.
2878
2879class CompactionSpace : public PagedSpace {
2880 public:
2881 CompactionSpace(Heap* heap, AllocationSpace id, Executability executable)
2882 : PagedSpace(heap, id, executable) {}
2883
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002884 bool is_local() override { return true; }
2885
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002886 protected:
2887 // The space is temporary and not included in any snapshots.
2888 bool snapshotable() override { return false; }
2889
2890 MUST_USE_RESULT HeapObject* SweepAndRetryAllocation(
2891 int size_in_bytes) override;
2892};
2893
2894
2895// A collection of |CompactionSpace|s used by a single compaction task.
2896class CompactionSpaceCollection : public Malloced {
2897 public:
2898 explicit CompactionSpaceCollection(Heap* heap)
2899 : old_space_(heap, OLD_SPACE, Executability::NOT_EXECUTABLE),
Ben Murdoch097c5b22016-05-18 11:27:45 +01002900 code_space_(heap, CODE_SPACE, Executability::EXECUTABLE) {}
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002901
2902 CompactionSpace* Get(AllocationSpace space) {
2903 switch (space) {
2904 case OLD_SPACE:
2905 return &old_space_;
2906 case CODE_SPACE:
2907 return &code_space_;
2908 default:
2909 UNREACHABLE();
2910 }
2911 UNREACHABLE();
2912 return nullptr;
2913 }
2914
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002915 private:
2916 CompactionSpace old_space_;
2917 CompactionSpace code_space_;
Steve Blocka7e24c12009-10-30 11:49:00 +00002918};
2919
2920
2921// -----------------------------------------------------------------------------
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002922// Old object space (includes the old space of objects and code space)
Steve Blocka7e24c12009-10-30 11:49:00 +00002923
2924class OldSpace : public PagedSpace {
2925 public:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002926 // Creates an old space object. The constructor does not allocate pages
2927 // from OS.
2928 OldSpace(Heap* heap, AllocationSpace id, Executability executable)
2929 : PagedSpace(heap, id, executable) {}
Steve Blocka7e24c12009-10-30 11:49:00 +00002930};
2931
2932
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002933// For contiguous spaces, top should be in the space (or at the end) and limit
2934// should be the end of the space.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002935#define DCHECK_SEMISPACE_ALLOCATION_INFO(info, space) \
2936 SLOW_DCHECK((space).page_low() <= (info).top() && \
2937 (info).top() <= (space).page_high() && \
2938 (info).limit() <= (space).page_high())
Steve Blocka7e24c12009-10-30 11:49:00 +00002939
2940
2941// -----------------------------------------------------------------------------
2942// Old space for all map objects
2943
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002944class MapSpace : public PagedSpace {
Steve Blocka7e24c12009-10-30 11:49:00 +00002945 public:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002946 // Creates a map space object.
2947 MapSpace(Heap* heap, AllocationSpace id)
Ben Murdochda12d292016-06-02 14:46:10 +01002948 : PagedSpace(heap, id, NOT_EXECUTABLE) {}
Ben Murdoch85b71792012-04-11 18:30:58 +01002949
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002950 int RoundSizeDownToObjectAlignment(int size) override {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002951 if (base::bits::IsPowerOfTwo32(Map::kSize)) {
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002952 return RoundDown(size, Map::kSize);
2953 } else {
2954 return (size / Map::kSize) * Map::kSize;
Leon Clarkee46be812010-01-19 14:06:41 +00002955 }
Leon Clarkee46be812010-01-19 14:06:41 +00002956 }
2957
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002958#ifdef VERIFY_HEAP
2959 void VerifyObject(HeapObject* obj) override;
2960#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00002961};
2962
2963
2964// -----------------------------------------------------------------------------
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002965// Large objects ( > Page::kMaxRegularHeapObjectSize ) are allocated and
2966// managed by the large object space. A large object is allocated from OS
2967// heap with extra padding bytes (Page::kPageSize + Page::kObjectStartOffset).
Steve Blocka7e24c12009-10-30 11:49:00 +00002968// A large object always starts at Page::kObjectStartOffset to a page.
2969// Large objects do not move during garbage collections.
2970
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002971class LargeObjectSpace : public Space {
Steve Blocka7e24c12009-10-30 11:49:00 +00002972 public:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002973 LargeObjectSpace(Heap* heap, AllocationSpace id);
2974 virtual ~LargeObjectSpace();
Steve Blocka7e24c12009-10-30 11:49:00 +00002975
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002976 // Initializes internal data structures.
2977 bool SetUp();
Steve Blocka7e24c12009-10-30 11:49:00 +00002978
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002979 // Releases internal resources, frees objects in this space.
2980 void TearDown();
Steve Blocka7e24c12009-10-30 11:49:00 +00002981
Ben Murdoch592a9fc2012-03-05 11:04:45 +00002982 static intptr_t ObjectSizeFor(intptr_t chunk_size) {
2983 if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return 0;
2984 return chunk_size - Page::kPageSize - Page::kObjectStartOffset;
2985 }
2986
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002987 // Shared implementation of AllocateRaw, AllocateRawCode and
2988 // AllocateRawFixedArray.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002989 MUST_USE_RESULT AllocationResult
2990 AllocateRaw(int object_size, Executability executable);
Steve Blocka7e24c12009-10-30 11:49:00 +00002991
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002992 // Available bytes for objects in this space.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002993 inline intptr_t Available() override;
Steve Blocka7e24c12009-10-30 11:49:00 +00002994
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002995 intptr_t Size() override { return size_; }
Steve Blocka7e24c12009-10-30 11:49:00 +00002996
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002997 intptr_t SizeOfObjects() override { return objects_size_; }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002998
2999 // Approximate amount of physical memory committed for this space.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003000 size_t CommittedPhysicalMemory() override;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003001
3002 int PageCount() { return page_count_; }
3003
3004 // Finds an object for a given address, returns a Smi if it is not found.
3005 // The function iterates through all objects in this space, may be slow.
3006 Object* FindObject(Address a);
Steve Blocka7e24c12009-10-30 11:49:00 +00003007
Ben Murdoch3ef787d2012-04-12 10:51:47 +01003008 // Finds a large object page containing the given address, returns NULL
Kristian Monsen80d68ea2010-09-08 11:05:35 +01003009 // if such a page doesn't exist.
Ben Murdoch3ef787d2012-04-12 10:51:47 +01003010 LargePage* FindPage(Address a);
Steve Blocka7e24c12009-10-30 11:49:00 +00003011
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003012 // Clears the marking state of live objects.
3013 void ClearMarkingStateOfLiveObjects();
3014
Steve Blocka7e24c12009-10-30 11:49:00 +00003015 // Frees unmarked objects.
3016 void FreeUnmarkedObjects();
3017
3018 // Checks whether a heap object is in this space; O(1).
3019 bool Contains(HeapObject* obj);
Ben Murdoch097c5b22016-05-18 11:27:45 +01003020 // Checks whether an address is in the object area in this space. Iterates
3021 // all objects in the space. May be slow.
3022 bool ContainsSlow(Address addr) { return FindObject(addr)->IsHeapObject(); }
Steve Blocka7e24c12009-10-30 11:49:00 +00003023
3024 // Checks whether the space is empty.
Ben Murdoch3ef787d2012-04-12 10:51:47 +01003025 bool IsEmpty() { return first_page_ == NULL; }
Steve Blocka7e24c12009-10-30 11:49:00 +00003026
Ben Murdochda12d292016-06-02 14:46:10 +01003027 void AdjustLiveBytes(int by) { objects_size_ += by; }
3028
Ben Murdoch3ef787d2012-04-12 10:51:47 +01003029 LargePage* first_page() { return first_page_; }
3030
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003031#ifdef VERIFY_HEAP
Steve Blocka7e24c12009-10-30 11:49:00 +00003032 virtual void Verify();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003033#endif
3034
3035#ifdef DEBUG
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003036 void Print() override;
Steve Blocka7e24c12009-10-30 11:49:00 +00003037 void ReportStatistics();
3038 void CollectCodeStatistics();
Steve Blocka7e24c12009-10-30 11:49:00 +00003039#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00003040
3041 private:
3042 // The head of the linked list of large object chunks.
Ben Murdoch3ef787d2012-04-12 10:51:47 +01003043 LargePage* first_page_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003044 intptr_t size_; // allocated bytes
3045 int page_count_; // number of chunks
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08003046 intptr_t objects_size_; // size of objects
Ben Murdoch3ef787d2012-04-12 10:51:47 +01003047 // Map MemoryChunk::kAlignment-aligned chunks to large pages covering them
3048 HashMap chunk_map_;
Steve Blocka7e24c12009-10-30 11:49:00 +00003049
Steve Blocka7e24c12009-10-30 11:49:00 +00003050 friend class LargeObjectIterator;
Steve Blocka7e24c12009-10-30 11:49:00 +00003051};
3052
3053
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003054class LargeObjectIterator : public ObjectIterator {
Steve Blocka7e24c12009-10-30 11:49:00 +00003055 public:
3056 explicit LargeObjectIterator(LargeObjectSpace* space);
Steve Blocka7e24c12009-10-30 11:49:00 +00003057
Ben Murdoch3ef787d2012-04-12 10:51:47 +01003058 HeapObject* Next();
Steve Blocka7e24c12009-10-30 11:49:00 +00003059
3060 // implementation of ObjectIterator.
Ben Murdoch3ef787d2012-04-12 10:51:47 +01003061 virtual HeapObject* next_object() { return Next(); }
Steve Blocka7e24c12009-10-30 11:49:00 +00003062
3063 private:
Ben Murdoch3ef787d2012-04-12 10:51:47 +01003064 LargePage* current_;
Steve Blocka7e24c12009-10-30 11:49:00 +00003065};
3066
Ben Murdochda12d292016-06-02 14:46:10 +01003067class LargePageIterator BASE_EMBEDDED {
3068 public:
3069 explicit inline LargePageIterator(LargeObjectSpace* space);
3070
3071 inline LargePage* next();
3072
3073 private:
3074 LargePage* next_page_;
3075};
Steve Blocka7e24c12009-10-30 11:49:00 +00003076
Ben Murdoch3ef787d2012-04-12 10:51:47 +01003077// Iterates over the chunks (pages and large object pages) that can contain
Ben Murdochda12d292016-06-02 14:46:10 +01003078// pointers to new space or to evacuation candidates.
3079class MemoryChunkIterator BASE_EMBEDDED {
Ben Murdoch3ef787d2012-04-12 10:51:47 +01003080 public:
Ben Murdochda12d292016-06-02 14:46:10 +01003081 enum Mode { ALL, ALL_BUT_MAP_SPACE, ALL_BUT_CODE_SPACE };
3082 inline explicit MemoryChunkIterator(Heap* heap, Mode mode);
Ben Murdoch3ef787d2012-04-12 10:51:47 +01003083
3084 // Return NULL when the iterator is done.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003085 inline MemoryChunk* next();
Ben Murdoch3ef787d2012-04-12 10:51:47 +01003086
3087 private:
Ben Murdochda12d292016-06-02 14:46:10 +01003088 enum State {
3089 kOldSpaceState,
3090 kMapState,
3091 kCodeState,
3092 kLargeObjectState,
3093 kFinishedState
3094 };
Ben Murdoch3ef787d2012-04-12 10:51:47 +01003095 State state_;
Ben Murdochda12d292016-06-02 14:46:10 +01003096 const Mode mode_;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003097 PageIterator old_iterator_;
Ben Murdochda12d292016-06-02 14:46:10 +01003098 PageIterator code_iterator_;
Ben Murdoch3ef787d2012-04-12 10:51:47 +01003099 PageIterator map_iterator_;
Ben Murdochda12d292016-06-02 14:46:10 +01003100 LargePageIterator lo_iterator_;
Ben Murdoch3ef787d2012-04-12 10:51:47 +01003101};
3102
Steve Block44f0eee2011-05-26 01:26:41 +01003103#ifdef DEBUG
3104struct CommentStatistic {
3105 const char* comment;
3106 int size;
3107 int count;
3108 void Clear() {
3109 comment = NULL;
3110 size = 0;
3111 count = 0;
3112 }
3113 // Must be small, since an iteration is used for lookup.
3114 static const int kMaxComments = 64;
3115};
3116#endif
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003117} // namespace internal
3118} // namespace v8
Steve Block44f0eee2011-05-26 01:26:41 +01003119
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003120#endif // V8_HEAP_SPACES_H_