Ben Murdoch | 257744e | 2011-11-30 15:57:28 +0000 | [diff] [blame] | 1 | // Copyright 2011 the V8 project authors. All rights reserved. |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2 | // Use of this source code is governed by a BSD-style license that can be |
| 3 | // found in the LICENSE file. |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 4 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 5 | #ifndef V8_HEAP_SPACES_H_ |
| 6 | #define V8_HEAP_SPACES_H_ |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 7 | |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame^] | 8 | #include <list> |
| 9 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 10 | #include "src/allocation.h" |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame^] | 11 | #include "src/base/atomic-utils.h" |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 12 | #include "src/base/atomicops.h" |
| 13 | #include "src/base/bits.h" |
| 14 | #include "src/base/platform/mutex.h" |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 15 | #include "src/flags.h" |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 16 | #include "src/hashmap.h" |
| 17 | #include "src/list.h" |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 18 | #include "src/objects.h" |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 19 | #include "src/utils.h" |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 20 | |
| 21 | namespace v8 { |
| 22 | namespace internal { |
| 23 | |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame] | 24 | class AllocationInfo; |
| 25 | class AllocationObserver; |
| 26 | class CompactionSpace; |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 27 | class CompactionSpaceCollection; |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame] | 28 | class FreeList; |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 29 | class Isolate; |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame] | 30 | class MemoryAllocator; |
| 31 | class MemoryChunk; |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 32 | class Page; |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame] | 33 | class PagedSpace; |
| 34 | class SemiSpace; |
| 35 | class SkipList; |
| 36 | class SlotsBuffer; |
| 37 | class SlotSet; |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 38 | class TypedSlotSet; |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame] | 39 | class Space; |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 40 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 41 | // ----------------------------------------------------------------------------- |
| 42 | // Heap structures: |
| 43 | // |
| 44 | // A JS heap consists of a young generation, an old generation, and a large |
| 45 | // object space. The young generation is divided into two semispaces. A |
| 46 | // scavenger implements Cheney's copying algorithm. The old generation is |
| 47 | // separated into a map space and an old object space. The map space contains |
| 48 | // all (and only) map objects, the rest of old objects go into the old space. |
| 49 | // The old generation is collected by a mark-sweep-compact collector. |
| 50 | // |
| 51 | // The semispaces of the young generation are contiguous. The old and map |
Ben Murdoch | 7f4d5bd | 2010-06-15 11:15:29 +0100 | [diff] [blame] | 52 | // spaces consists of a list of pages. A page has a page header and an object |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 53 | // area. |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 54 | // |
| 55 | // There is a separate large object space for objects larger than |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 56 | // Page::kMaxRegularHeapObjectSize, so that they do not have to move during |
Ben Murdoch | 7f4d5bd | 2010-06-15 11:15:29 +0100 | [diff] [blame] | 57 | // collection. The large object space is paged. Pages in large object space |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 58 | // may be larger than the page size. |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 59 | // |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 60 | // A store-buffer based write barrier is used to keep track of intergenerational |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 61 | // references. See heap/store-buffer.h. |
Ben Murdoch | 7f4d5bd | 2010-06-15 11:15:29 +0100 | [diff] [blame] | 62 | // |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 63 | // During scavenges and mark-sweep collections we sometimes (after a store |
| 64 | // buffer overflow) iterate intergenerational pointers without decoding heap |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 65 | // object maps so if the page belongs to old space or large object space |
| 66 | // it is essential to guarantee that the page does not contain any |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 67 | // garbage pointers to new space: every pointer aligned word which satisfies |
| 68 | // the Heap::InNewSpace() predicate must be a pointer to a live heap object in |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 69 | // new space. Thus objects in old space and large object spaces should have a |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 70 | // special layout (e.g. no bare integer fields). This requirement does not |
| 71 | // apply to map space which is iterated in a special fashion. However we still |
| 72 | // require pointer fields of dead maps to be cleaned. |
Ben Murdoch | 7f4d5bd | 2010-06-15 11:15:29 +0100 | [diff] [blame] | 73 | // |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 74 | // To enable lazy cleaning of old space pages we can mark chunks of the page |
| 75 | // as being garbage. Garbage sections are marked with a special map. These |
| 76 | // sections are skipped when scanning the page, even if we are otherwise |
| 77 | // scanning without regard for object boundaries. Garbage sections are chained |
| 78 | // together to form a free list after a GC. Garbage sections created outside |
| 79 | // of GCs by object trunctation etc. may not be in the free list chain. Very |
| 80 | // small free spaces are ignored, they need only be cleaned of bogus pointers |
| 81 | // into new space. |
Ben Murdoch | 7f4d5bd | 2010-06-15 11:15:29 +0100 | [diff] [blame] | 82 | // |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 83 | // Each page may have up to one special garbage section. The start of this |
| 84 | // section is denoted by the top field in the space. The end of the section |
| 85 | // is denoted by the limit field in the space. This special garbage section |
| 86 | // is not marked with a free space map in the data. The point of this section |
| 87 | // is to enable linear allocation without having to constantly update the byte |
| 88 | // array every time the top field is updated and a new object is created. The |
| 89 | // special garbage section is not in the chain of garbage sections. |
| 90 | // |
| 91 | // Since the top and limit fields are in the space, not the page, only one page |
| 92 | // has a special garbage section, and if the top and limit are equal then there |
| 93 | // is no special garbage section. |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 94 | |
| 95 | // Some assertion macros used in the debugging mode. |
| 96 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 97 | #define DCHECK_PAGE_ALIGNED(address) \ |
| 98 | DCHECK((OffsetFrom(address) & Page::kPageAlignmentMask) == 0) |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 99 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 100 | #define DCHECK_OBJECT_ALIGNED(address) \ |
| 101 | DCHECK((OffsetFrom(address) & kObjectAlignmentMask) == 0) |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 102 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 103 | #define DCHECK_OBJECT_SIZE(size) \ |
| 104 | DCHECK((0 < size) && (size <= Page::kMaxRegularHeapObjectSize)) |
Leon Clarke | e46be81 | 2010-01-19 14:06:41 +0000 | [diff] [blame] | 105 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 106 | #define DCHECK_CODEOBJECT_SIZE(size, code_space) \ |
| 107 | DCHECK((0 < size) && (size <= code_space->AreaSize())) |
| 108 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 109 | #define DCHECK_PAGE_OFFSET(offset) \ |
| 110 | DCHECK((Page::kObjectStartOffset <= offset) && (offset <= Page::kPageSize)) |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 111 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 112 | class MarkBit { |
| 113 | public: |
| 114 | typedef uint32_t CellType; |
| 115 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 116 | inline MarkBit(CellType* cell, CellType mask) : cell_(cell), mask_(mask) {} |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 117 | |
| 118 | #ifdef DEBUG |
| 119 | bool operator==(const MarkBit& other) { |
| 120 | return cell_ == other.cell_ && mask_ == other.mask_; |
| 121 | } |
| 122 | #endif |
| 123 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 124 | private: |
| 125 | inline CellType* cell() { return cell_; } |
| 126 | inline CellType mask() { return mask_; } |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 127 | |
| 128 | inline MarkBit Next() { |
| 129 | CellType new_mask = mask_ << 1; |
| 130 | if (new_mask == 0) { |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 131 | return MarkBit(cell_ + 1, 1); |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 132 | } else { |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 133 | return MarkBit(cell_, new_mask); |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 134 | } |
| 135 | } |
| 136 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 137 | inline void Set() { *cell_ |= mask_; } |
| 138 | inline bool Get() { return (*cell_ & mask_) != 0; } |
| 139 | inline void Clear() { *cell_ &= ~mask_; } |
| 140 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 141 | CellType* cell_; |
| 142 | CellType mask_; |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 143 | |
| 144 | friend class Marking; |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 145 | }; |
| 146 | |
| 147 | |
| 148 | // Bitmap is a sequence of cells each containing fixed number of bits. |
| 149 | class Bitmap { |
| 150 | public: |
| 151 | static const uint32_t kBitsPerCell = 32; |
| 152 | static const uint32_t kBitsPerCellLog2 = 5; |
| 153 | static const uint32_t kBitIndexMask = kBitsPerCell - 1; |
| 154 | static const uint32_t kBytesPerCell = kBitsPerCell / kBitsPerByte; |
| 155 | static const uint32_t kBytesPerCellLog2 = kBitsPerCellLog2 - kBitsPerByteLog2; |
| 156 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 157 | static const size_t kLength = (1 << kPageSizeBits) >> (kPointerSizeLog2); |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 158 | |
| 159 | static const size_t kSize = |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 160 | (1 << kPageSizeBits) >> (kPointerSizeLog2 + kBitsPerByteLog2); |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 161 | |
| 162 | |
| 163 | static int CellsForLength(int length) { |
| 164 | return (length + kBitsPerCell - 1) >> kBitsPerCellLog2; |
| 165 | } |
| 166 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 167 | int CellsCount() { return CellsForLength(kLength); } |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 168 | |
| 169 | static int SizeFor(int cells_count) { |
| 170 | return sizeof(MarkBit::CellType) * cells_count; |
| 171 | } |
| 172 | |
| 173 | INLINE(static uint32_t IndexToCell(uint32_t index)) { |
| 174 | return index >> kBitsPerCellLog2; |
| 175 | } |
| 176 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 177 | V8_INLINE static uint32_t IndexInCell(uint32_t index) { |
| 178 | return index & kBitIndexMask; |
| 179 | } |
| 180 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 181 | INLINE(static uint32_t CellToIndex(uint32_t index)) { |
| 182 | return index << kBitsPerCellLog2; |
| 183 | } |
| 184 | |
| 185 | INLINE(static uint32_t CellAlignIndex(uint32_t index)) { |
| 186 | return (index + kBitIndexMask) & ~kBitIndexMask; |
| 187 | } |
| 188 | |
| 189 | INLINE(MarkBit::CellType* cells()) { |
| 190 | return reinterpret_cast<MarkBit::CellType*>(this); |
| 191 | } |
| 192 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 193 | INLINE(Address address()) { return reinterpret_cast<Address>(this); } |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 194 | |
| 195 | INLINE(static Bitmap* FromAddress(Address addr)) { |
| 196 | return reinterpret_cast<Bitmap*>(addr); |
| 197 | } |
| 198 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 199 | inline MarkBit MarkBitFromIndex(uint32_t index) { |
| 200 | MarkBit::CellType mask = 1u << IndexInCell(index); |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 201 | MarkBit::CellType* cell = this->cells() + (index >> kBitsPerCellLog2); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 202 | return MarkBit(cell, mask); |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 203 | } |
| 204 | |
| 205 | static inline void Clear(MemoryChunk* chunk); |
| 206 | |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 207 | static inline void SetAllBits(MemoryChunk* chunk); |
| 208 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 209 | static void PrintWord(uint32_t word, uint32_t himask = 0) { |
| 210 | for (uint32_t mask = 1; mask != 0; mask <<= 1) { |
| 211 | if ((mask & himask) != 0) PrintF("["); |
| 212 | PrintF((mask & word) ? "1" : "0"); |
| 213 | if ((mask & himask) != 0) PrintF("]"); |
| 214 | } |
| 215 | } |
| 216 | |
| 217 | class CellPrinter { |
| 218 | public: |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 219 | CellPrinter() : seq_start(0), seq_type(0), seq_length(0) {} |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 220 | |
| 221 | void Print(uint32_t pos, uint32_t cell) { |
| 222 | if (cell == seq_type) { |
| 223 | seq_length++; |
| 224 | return; |
| 225 | } |
| 226 | |
| 227 | Flush(); |
| 228 | |
| 229 | if (IsSeq(cell)) { |
| 230 | seq_start = pos; |
| 231 | seq_length = 0; |
| 232 | seq_type = cell; |
| 233 | return; |
| 234 | } |
| 235 | |
| 236 | PrintF("%d: ", pos); |
| 237 | PrintWord(cell); |
| 238 | PrintF("\n"); |
| 239 | } |
| 240 | |
| 241 | void Flush() { |
| 242 | if (seq_length > 0) { |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 243 | PrintF("%d: %dx%d\n", seq_start, seq_type == 0 ? 0 : 1, |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 244 | seq_length * kBitsPerCell); |
| 245 | seq_length = 0; |
| 246 | } |
| 247 | } |
| 248 | |
| 249 | static bool IsSeq(uint32_t cell) { return cell == 0 || cell == 0xFFFFFFFF; } |
| 250 | |
| 251 | private: |
| 252 | uint32_t seq_start; |
| 253 | uint32_t seq_type; |
| 254 | uint32_t seq_length; |
| 255 | }; |
| 256 | |
| 257 | void Print() { |
| 258 | CellPrinter printer; |
| 259 | for (int i = 0; i < CellsCount(); i++) { |
| 260 | printer.Print(i, cells()[i]); |
| 261 | } |
| 262 | printer.Flush(); |
| 263 | PrintF("\n"); |
| 264 | } |
| 265 | |
| 266 | bool IsClean() { |
| 267 | for (int i = 0; i < CellsCount(); i++) { |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 268 | if (cells()[i] != 0) { |
| 269 | return false; |
| 270 | } |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 271 | } |
| 272 | return true; |
| 273 | } |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 274 | |
| 275 | // Clears all bits starting from {cell_base_index} up to and excluding |
| 276 | // {index}. Note that {cell_base_index} is required to be cell aligned. |
| 277 | void ClearRange(uint32_t cell_base_index, uint32_t index) { |
| 278 | DCHECK_EQ(IndexInCell(cell_base_index), 0u); |
| 279 | DCHECK_GE(index, cell_base_index); |
| 280 | uint32_t start_cell_index = IndexToCell(cell_base_index); |
| 281 | uint32_t end_cell_index = IndexToCell(index); |
| 282 | DCHECK_GE(end_cell_index, start_cell_index); |
| 283 | // Clear all cells till the cell containing the last index. |
| 284 | for (uint32_t i = start_cell_index; i < end_cell_index; i++) { |
| 285 | cells()[i] = 0; |
| 286 | } |
| 287 | // Clear all bits in the last cell till the last bit before index. |
| 288 | uint32_t clear_mask = ~((1u << IndexInCell(index)) - 1); |
| 289 | cells()[end_cell_index] &= clear_mask; |
| 290 | } |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 291 | }; |
| 292 | |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 293 | enum FreeListCategoryType { |
| 294 | kTiniest, |
| 295 | kTiny, |
| 296 | kSmall, |
| 297 | kMedium, |
| 298 | kLarge, |
| 299 | kHuge, |
| 300 | |
| 301 | kFirstCategory = kTiniest, |
| 302 | kLastCategory = kHuge, |
| 303 | kNumberOfCategories = kLastCategory + 1, |
| 304 | kInvalidCategory |
| 305 | }; |
| 306 | |
| 307 | enum FreeMode { kLinkCategory, kDoNotLinkCategory }; |
| 308 | |
| 309 | // A free list category maintains a linked list of free memory blocks. |
| 310 | class FreeListCategory { |
| 311 | public: |
| 312 | static const int kSize = kIntSize + // FreeListCategoryType type_ |
| 313 | kIntSize + // int available_ |
| 314 | kPointerSize + // FreeSpace* top_ |
| 315 | kPointerSize + // FreeListCategory* prev_ |
| 316 | kPointerSize; // FreeListCategory* next_ |
| 317 | |
| 318 | FreeListCategory() |
| 319 | : type_(kInvalidCategory), |
| 320 | available_(0), |
| 321 | top_(nullptr), |
| 322 | prev_(nullptr), |
| 323 | next_(nullptr) {} |
| 324 | |
| 325 | void Initialize(FreeListCategoryType type) { |
| 326 | type_ = type; |
| 327 | available_ = 0; |
| 328 | top_ = nullptr; |
| 329 | prev_ = nullptr; |
| 330 | next_ = nullptr; |
| 331 | } |
| 332 | |
| 333 | void Invalidate(); |
| 334 | |
| 335 | void Reset(); |
| 336 | |
| 337 | void ResetStats() { Reset(); } |
| 338 | |
| 339 | void RepairFreeList(Heap* heap); |
| 340 | |
| 341 | // Relinks the category into the currently owning free list. Requires that the |
| 342 | // category is currently unlinked. |
| 343 | void Relink(); |
| 344 | |
| 345 | bool Free(FreeSpace* node, int size_in_bytes, FreeMode mode); |
| 346 | |
| 347 | // Picks a node from the list and stores its size in |node_size|. Returns |
| 348 | // nullptr if the category is empty. |
| 349 | FreeSpace* PickNodeFromList(int* node_size); |
| 350 | |
| 351 | // Performs a single try to pick a node of at least |minimum_size| from the |
| 352 | // category. Stores the actual size in |node_size|. Returns nullptr if no |
| 353 | // node is found. |
| 354 | FreeSpace* TryPickNodeFromList(int minimum_size, int* node_size); |
| 355 | |
| 356 | // Picks a node of at least |minimum_size| from the category. Stores the |
| 357 | // actual size in |node_size|. Returns nullptr if no node is found. |
| 358 | FreeSpace* SearchForNodeInList(int minimum_size, int* node_size); |
| 359 | |
| 360 | inline FreeList* owner(); |
| 361 | inline bool is_linked(); |
| 362 | bool is_empty() { return top() == nullptr; } |
| 363 | int available() const { return available_; } |
| 364 | |
| 365 | #ifdef DEBUG |
| 366 | intptr_t SumFreeList(); |
| 367 | int FreeListLength(); |
| 368 | #endif |
| 369 | |
| 370 | private: |
| 371 | // For debug builds we accurately compute free lists lengths up until |
| 372 | // {kVeryLongFreeList} by manually walking the list. |
| 373 | static const int kVeryLongFreeList = 500; |
| 374 | |
| 375 | inline Page* page(); |
| 376 | |
| 377 | FreeSpace* top() { return top_; } |
| 378 | void set_top(FreeSpace* top) { top_ = top; } |
| 379 | FreeListCategory* prev() { return prev_; } |
| 380 | void set_prev(FreeListCategory* prev) { prev_ = prev; } |
| 381 | FreeListCategory* next() { return next_; } |
| 382 | void set_next(FreeListCategory* next) { next_ = next; } |
| 383 | |
| 384 | // |type_|: The type of this free list category. |
| 385 | FreeListCategoryType type_; |
| 386 | |
| 387 | // |available_|: Total available bytes in all blocks of this free list |
| 388 | // category. |
| 389 | int available_; |
| 390 | |
| 391 | // |top_|: Points to the top FreeSpace* in the free list category. |
| 392 | FreeSpace* top_; |
| 393 | |
| 394 | FreeListCategory* prev_; |
| 395 | FreeListCategory* next_; |
| 396 | |
| 397 | friend class FreeList; |
| 398 | friend class PagedSpace; |
| 399 | }; |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 400 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 401 | // MemoryChunk represents a memory region owned by a specific space. |
| 402 | // It is divided into the header and the body. Chunk start is always |
| 403 | // 1MB aligned. Start of the body is aligned so it can accommodate |
| 404 | // any heap object. |
| 405 | class MemoryChunk { |
| 406 | public: |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 407 | enum MemoryChunkFlags { |
| 408 | IS_EXECUTABLE, |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 409 | POINTERS_TO_HERE_ARE_INTERESTING, |
| 410 | POINTERS_FROM_HERE_ARE_INTERESTING, |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 411 | IN_FROM_SPACE, // Mutually exclusive with IN_TO_SPACE. |
| 412 | IN_TO_SPACE, // All pages in new space has one of these two set. |
| 413 | NEW_SPACE_BELOW_AGE_MARK, |
| 414 | EVACUATION_CANDIDATE, |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 415 | NEVER_EVACUATE, // May contain immortal immutables. |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 416 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 417 | // Large objects can have a progress bar in their page header. These object |
| 418 | // are scanned in increments and will be kept black while being scanned. |
| 419 | // Even if the mutator writes to them they will be kept black and a white |
| 420 | // to grey transition is performed in the value. |
| 421 | HAS_PROGRESS_BAR, |
| 422 | |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame^] | 423 | // |PAGE_NEW_OLD_PROMOTION|: A page tagged with this flag has been promoted |
| 424 | // from new to old space during evacuation. |
| 425 | PAGE_NEW_OLD_PROMOTION, |
| 426 | |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 427 | // A black page has all mark bits set to 1 (black). A black page currently |
| 428 | // cannot be iterated because it is not swept. Moreover live bytes are also |
| 429 | // not updated. |
| 430 | BLACK_PAGE, |
| 431 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 432 | // This flag is intended to be used for testing. Works only when both |
| 433 | // FLAG_stress_compaction and FLAG_manual_evacuation_candidates_selection |
| 434 | // are set. It forces the page to become an evacuation candidate at next |
| 435 | // candidates selection cycle. |
| 436 | FORCE_EVACUATION_CANDIDATE_FOR_TESTING, |
| 437 | |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame] | 438 | // This flag is intended to be used for testing. |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 439 | NEVER_ALLOCATE_ON_PAGE, |
| 440 | |
| 441 | // The memory chunk is already logically freed, however the actual freeing |
| 442 | // still has to be performed. |
| 443 | PRE_FREED, |
| 444 | |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame^] | 445 | // |POOLED|: When actually freeing this chunk, only uncommit and do not |
| 446 | // give up the reservation as we still reuse the chunk at some point. |
| 447 | POOLED, |
| 448 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 449 | // |COMPACTION_WAS_ABORTED|: Indicates that the compaction in this page |
| 450 | // has been aborted and needs special handling by the sweeper. |
| 451 | COMPACTION_WAS_ABORTED, |
| 452 | |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame^] | 453 | // |ANCHOR|: Flag is set if page is an anchor. |
| 454 | ANCHOR, |
| 455 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 456 | // Last flag, keep at bottom. |
| 457 | NUM_MEMORY_CHUNK_FLAGS |
| 458 | }; |
| 459 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 460 | // |kSweepingDone|: The page state when sweeping is complete or sweeping must |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame] | 461 | // not be performed on that page. Sweeper threads that are done with their |
| 462 | // work will set this value and not touch the page anymore. |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 463 | // |kSweepingPending|: This page is ready for parallel sweeping. |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame] | 464 | // |kSweepingInProgress|: This page is currently swept by a sweeper thread. |
| 465 | enum ConcurrentSweepingState { |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 466 | kSweepingDone, |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame] | 467 | kSweepingPending, |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 468 | kSweepingInProgress, |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 469 | }; |
| 470 | |
| 471 | // Every n write barrier invocations we go to runtime even though |
| 472 | // we could have handled it in generated code. This lets us check |
| 473 | // whether we have hit the limit and should do some more marking. |
| 474 | static const int kWriteBarrierCounterGranularity = 500; |
| 475 | |
| 476 | static const int kPointersToHereAreInterestingMask = |
| 477 | 1 << POINTERS_TO_HERE_ARE_INTERESTING; |
| 478 | |
| 479 | static const int kPointersFromHereAreInterestingMask = |
| 480 | 1 << POINTERS_FROM_HERE_ARE_INTERESTING; |
| 481 | |
| 482 | static const int kEvacuationCandidateMask = 1 << EVACUATION_CANDIDATE; |
| 483 | |
| 484 | static const int kSkipEvacuationSlotsRecordingMask = |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 485 | (1 << EVACUATION_CANDIDATE) | (1 << IN_FROM_SPACE) | (1 << IN_TO_SPACE); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 486 | |
| 487 | static const intptr_t kAlignment = |
| 488 | (static_cast<uintptr_t>(1) << kPageSizeBits); |
| 489 | |
| 490 | static const intptr_t kAlignmentMask = kAlignment - 1; |
| 491 | |
| 492 | static const intptr_t kSizeOffset = 0; |
| 493 | |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 494 | static const intptr_t kFlagsOffset = kSizeOffset + kPointerSize; |
| 495 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 496 | static const intptr_t kLiveBytesOffset = |
| 497 | kSizeOffset + kPointerSize // size_t size |
| 498 | + kIntptrSize // intptr_t flags_ |
| 499 | + kPointerSize // Address area_start_ |
| 500 | + kPointerSize // Address area_end_ |
| 501 | + 2 * kPointerSize // base::VirtualMemory reservation_ |
| 502 | + kPointerSize // Address owner_ |
| 503 | + kPointerSize // Heap* heap_ |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame] | 504 | + kIntSize; // int progress_bar_ |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 505 | |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 506 | static const size_t kOldToNewSlotsOffset = |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 507 | kLiveBytesOffset + kIntSize; // int live_byte_count_ |
| 508 | |
| 509 | static const size_t kWriteBarrierCounterOffset = |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 510 | kOldToNewSlotsOffset + kPointerSize // SlotSet* old_to_new_slots_; |
| 511 | + kPointerSize // SlotSet* old_to_old_slots_; |
| 512 | + kPointerSize // TypedSlotSet* typed_old_to_old_slots_; |
| 513 | + kPointerSize; // SkipList* skip_list_; |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 514 | |
| 515 | static const size_t kMinHeaderSize = |
| 516 | kWriteBarrierCounterOffset + |
| 517 | kIntptrSize // intptr_t write_barrier_counter_ |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 518 | + kPointerSize // AtomicValue high_water_mark_ |
| 519 | + kPointerSize // base::Mutex* mutex_ |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 520 | + kPointerSize // base::AtomicWord concurrent_sweeping_ |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame] | 521 | + 2 * kPointerSize // AtomicNumber free-list statistics |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 522 | + kPointerSize // AtomicValue next_chunk_ |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 523 | + kPointerSize // AtomicValue prev_chunk_ |
| 524 | // FreeListCategory categories_[kNumberOfCategories] |
| 525 | + FreeListCategory::kSize * kNumberOfCategories; |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 526 | |
| 527 | // We add some more space to the computed header size to amount for missing |
| 528 | // alignment requirements in our computation. |
| 529 | // Try to get kHeaderSize properly aligned on 32-bit and 64-bit machines. |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame] | 530 | static const size_t kHeaderSize = kMinHeaderSize; |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 531 | |
| 532 | static const int kBodyOffset = |
| 533 | CODE_POINTER_ALIGN(kHeaderSize + Bitmap::kSize); |
| 534 | |
| 535 | // The start offset of the object area in a page. Aligned to both maps and |
| 536 | // code alignment to be suitable for both. Also aligned to 32 words because |
| 537 | // the marking bitmap is arranged in 32 bit chunks. |
| 538 | static const int kObjectStartAlignment = 32 * kPointerSize; |
| 539 | static const int kObjectStartOffset = |
| 540 | kBodyOffset - 1 + |
| 541 | (kObjectStartAlignment - (kBodyOffset - 1) % kObjectStartAlignment); |
| 542 | |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 543 | // Page size in bytes. This must be a multiple of the OS page size. |
| 544 | static const int kPageSize = 1 << kPageSizeBits; |
| 545 | static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1; |
| 546 | |
| 547 | static const int kAllocatableMemory = kPageSize - kObjectStartOffset; |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 548 | |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame] | 549 | static inline void IncrementLiveBytesFromMutator(HeapObject* object, int by); |
| 550 | static inline void IncrementLiveBytesFromGC(HeapObject* object, int by); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 551 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 552 | // Only works if the pointer is in the first kPageSize of the MemoryChunk. |
| 553 | static MemoryChunk* FromAddress(Address a) { |
| 554 | return reinterpret_cast<MemoryChunk*>(OffsetFrom(a) & ~kAlignmentMask); |
| 555 | } |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 556 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 557 | static inline MemoryChunk* FromAnyPointerAddress(Heap* heap, Address addr); |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 558 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 559 | static inline void UpdateHighWaterMark(Address mark) { |
| 560 | if (mark == nullptr) return; |
| 561 | // Need to subtract one from the mark because when a chunk is full the |
| 562 | // top points to the next address after the chunk, which effectively belongs |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame^] | 563 | // to another chunk. See the comment to Page::FromTopOrLimit. |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 564 | MemoryChunk* chunk = MemoryChunk::FromAddress(mark - 1); |
| 565 | intptr_t new_mark = static_cast<intptr_t>(mark - chunk->address()); |
| 566 | intptr_t old_mark = 0; |
| 567 | do { |
| 568 | old_mark = chunk->high_water_mark_.Value(); |
| 569 | } while ((new_mark > old_mark) && |
| 570 | !chunk->high_water_mark_.TrySetValue(old_mark, new_mark)); |
| 571 | } |
| 572 | |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame^] | 573 | static bool IsValid(MemoryChunk* chunk) { return chunk != nullptr; } |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 574 | |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame^] | 575 | Address address() { return reinterpret_cast<Address>(this); } |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 576 | |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame] | 577 | base::Mutex* mutex() { return mutex_; } |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 578 | |
| 579 | bool Contains(Address addr) { |
| 580 | return addr >= area_start() && addr < area_end(); |
| 581 | } |
| 582 | |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame] | 583 | // Checks whether |addr| can be a limit of addresses in this page. It's a |
| 584 | // limit if it's in the page, or if it's just after the last byte of the page. |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 585 | bool ContainsLimit(Address addr) { |
| 586 | return addr >= area_start() && addr <= area_end(); |
| 587 | } |
| 588 | |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame^] | 589 | base::AtomicValue<ConcurrentSweepingState>& concurrent_sweeping_state() { |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame] | 590 | return concurrent_sweeping_; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 591 | } |
| 592 | |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame] | 593 | // Manage live byte count, i.e., count of bytes in black objects. |
| 594 | inline void ResetLiveBytes(); |
| 595 | inline void IncrementLiveBytes(int by); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 596 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 597 | int LiveBytes() { |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 598 | DCHECK_LE(static_cast<unsigned>(live_byte_count_), size_); |
| 599 | DCHECK(!IsFlagSet(BLACK_PAGE) || live_byte_count_ == 0); |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 600 | return live_byte_count_; |
| 601 | } |
| 602 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 603 | void SetLiveBytes(int live_bytes) { |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 604 | if (IsFlagSet(BLACK_PAGE)) return; |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 605 | DCHECK_GE(live_bytes, 0); |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame] | 606 | DCHECK_LE(static_cast<size_t>(live_bytes), size_); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 607 | live_byte_count_ = live_bytes; |
| 608 | } |
| 609 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 610 | int write_barrier_counter() { |
| 611 | return static_cast<int>(write_barrier_counter_); |
| 612 | } |
| 613 | |
| 614 | void set_write_barrier_counter(int counter) { |
| 615 | write_barrier_counter_ = counter; |
| 616 | } |
| 617 | |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame] | 618 | size_t size() const { return size_; } |
| 619 | |
| 620 | inline Heap* heap() const { return heap_; } |
| 621 | |
| 622 | inline SkipList* skip_list() { return skip_list_; } |
| 623 | |
| 624 | inline void set_skip_list(SkipList* skip_list) { skip_list_ = skip_list; } |
| 625 | |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame] | 626 | inline SlotSet* old_to_new_slots() { return old_to_new_slots_; } |
| 627 | inline SlotSet* old_to_old_slots() { return old_to_old_slots_; } |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 628 | inline TypedSlotSet* typed_old_to_old_slots() { |
| 629 | return typed_old_to_old_slots_; |
| 630 | } |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame] | 631 | |
| 632 | void AllocateOldToNewSlots(); |
| 633 | void ReleaseOldToNewSlots(); |
| 634 | void AllocateOldToOldSlots(); |
| 635 | void ReleaseOldToOldSlots(); |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 636 | void AllocateTypedOldToOldSlots(); |
| 637 | void ReleaseTypedOldToOldSlots(); |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame] | 638 | |
| 639 | Address area_start() { return area_start_; } |
| 640 | Address area_end() { return area_end_; } |
| 641 | int area_size() { return static_cast<int>(area_end() - area_start()); } |
| 642 | |
| 643 | bool CommitArea(size_t requested); |
| 644 | |
| 645 | // Approximate amount of physical memory committed for this chunk. |
| 646 | size_t CommittedPhysicalMemory() { return high_water_mark_.Value(); } |
| 647 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 648 | int progress_bar() { |
| 649 | DCHECK(IsFlagSet(HAS_PROGRESS_BAR)); |
| 650 | return progress_bar_; |
| 651 | } |
| 652 | |
| 653 | void set_progress_bar(int progress_bar) { |
| 654 | DCHECK(IsFlagSet(HAS_PROGRESS_BAR)); |
| 655 | progress_bar_ = progress_bar; |
| 656 | } |
| 657 | |
| 658 | void ResetProgressBar() { |
| 659 | if (IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) { |
| 660 | set_progress_bar(0); |
| 661 | ClearFlag(MemoryChunk::HAS_PROGRESS_BAR); |
| 662 | } |
| 663 | } |
| 664 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 665 | inline Bitmap* markbits() { |
| 666 | return Bitmap::FromAddress(address() + kHeaderSize); |
| 667 | } |
| 668 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 669 | inline uint32_t AddressToMarkbitIndex(Address addr) { |
| 670 | return static_cast<uint32_t>(addr - this->address()) >> kPointerSizeLog2; |
| 671 | } |
| 672 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 673 | inline Address MarkbitIndexToAddress(uint32_t index) { |
| 674 | return this->address() + (index << kPointerSizeLog2); |
| 675 | } |
| 676 | |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame] | 677 | void PrintMarkbits() { markbits()->Print(); } |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 678 | |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame] | 679 | void SetFlag(int flag) { flags_ |= static_cast<uintptr_t>(1) << flag; } |
| 680 | |
| 681 | void ClearFlag(int flag) { flags_ &= ~(static_cast<uintptr_t>(1) << flag); } |
| 682 | |
| 683 | bool IsFlagSet(int flag) { |
| 684 | return (flags_ & (static_cast<uintptr_t>(1) << flag)) != 0; |
| 685 | } |
| 686 | |
| 687 | // Set or clear multiple flags at a time. The flags in the mask are set to |
| 688 | // the value in "flags", the rest retain the current value in |flags_|. |
| 689 | void SetFlags(intptr_t flags, intptr_t mask) { |
| 690 | flags_ = (flags_ & ~mask) | (flags & mask); |
| 691 | } |
| 692 | |
| 693 | // Return all current flags. |
| 694 | intptr_t GetFlags() { return flags_; } |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 695 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 696 | bool NeverEvacuate() { return IsFlagSet(NEVER_EVACUATE); } |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 697 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 698 | void MarkNeverEvacuate() { SetFlag(NEVER_EVACUATE); } |
| 699 | |
| 700 | bool IsEvacuationCandidate() { |
| 701 | DCHECK(!(IsFlagSet(NEVER_EVACUATE) && IsFlagSet(EVACUATION_CANDIDATE))); |
| 702 | return IsFlagSet(EVACUATION_CANDIDATE); |
| 703 | } |
| 704 | |
| 705 | bool CanAllocate() { |
| 706 | return !IsEvacuationCandidate() && !IsFlagSet(NEVER_ALLOCATE_ON_PAGE); |
| 707 | } |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 708 | |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame] | 709 | bool ShouldSkipEvacuationSlotRecording() { |
| 710 | return (flags_ & kSkipEvacuationSlotsRecordingMask) != 0; |
| 711 | } |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 712 | |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame] | 713 | Executability executable() { |
| 714 | return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE; |
| 715 | } |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 716 | |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame] | 717 | bool InNewSpace() { |
| 718 | return (flags_ & ((1 << IN_FROM_SPACE) | (1 << IN_TO_SPACE))) != 0; |
| 719 | } |
| 720 | |
| 721 | bool InToSpace() { return IsFlagSet(IN_TO_SPACE); } |
| 722 | |
| 723 | bool InFromSpace() { return IsFlagSet(IN_FROM_SPACE); } |
| 724 | |
| 725 | MemoryChunk* next_chunk() { return next_chunk_.Value(); } |
| 726 | |
| 727 | MemoryChunk* prev_chunk() { return prev_chunk_.Value(); } |
| 728 | |
| 729 | void set_next_chunk(MemoryChunk* next) { next_chunk_.SetValue(next); } |
| 730 | |
| 731 | void set_prev_chunk(MemoryChunk* prev) { prev_chunk_.SetValue(prev); } |
| 732 | |
| 733 | Space* owner() const { |
| 734 | if ((reinterpret_cast<intptr_t>(owner_) & kPageHeaderTagMask) == |
| 735 | kPageHeaderTag) { |
| 736 | return reinterpret_cast<Space*>(reinterpret_cast<intptr_t>(owner_) - |
| 737 | kPageHeaderTag); |
| 738 | } else { |
| 739 | return nullptr; |
| 740 | } |
| 741 | } |
| 742 | |
| 743 | void set_owner(Space* space) { |
| 744 | DCHECK((reinterpret_cast<intptr_t>(space) & kPageHeaderTagMask) == 0); |
| 745 | owner_ = reinterpret_cast<Address>(space) + kPageHeaderTag; |
| 746 | DCHECK((reinterpret_cast<intptr_t>(owner_) & kPageHeaderTagMask) == |
| 747 | kPageHeaderTag); |
| 748 | } |
| 749 | |
| 750 | bool HasPageHeader() { return owner() != nullptr; } |
| 751 | |
| 752 | void InsertAfter(MemoryChunk* other); |
| 753 | void Unlink(); |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 754 | |
| 755 | protected: |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 756 | static MemoryChunk* Initialize(Heap* heap, Address base, size_t size, |
| 757 | Address area_start, Address area_end, |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame] | 758 | Executability executable, Space* owner, |
| 759 | base::VirtualMemory* reservation); |
| 760 | |
| 761 | // Should be called when memory chunk is about to be freed. |
| 762 | void ReleaseAllocatedMemory(); |
| 763 | |
| 764 | base::VirtualMemory* reserved_memory() { return &reservation_; } |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 765 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 766 | size_t size_; |
| 767 | intptr_t flags_; |
| 768 | |
| 769 | // Start and end of allocatable memory on this chunk. |
| 770 | Address area_start_; |
| 771 | Address area_end_; |
| 772 | |
| 773 | // If the chunk needs to remember its memory reservation, it is stored here. |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 774 | base::VirtualMemory reservation_; |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame] | 775 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 776 | // The identity of the owning space. This is tagged as a failure pointer, but |
| 777 | // no failure can be in an object, so this can be distinguished from any entry |
| 778 | // in a fixed array. |
| 779 | Address owner_; |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame] | 780 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 781 | Heap* heap_; |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame] | 782 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 783 | // Used by the incremental marker to keep track of the scanning progress in |
| 784 | // large objects that have a progress bar and are scanned in increments. |
| 785 | int progress_bar_; |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame] | 786 | |
| 787 | // Count of bytes marked black on page. |
| 788 | int live_byte_count_; |
| 789 | |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame] | 790 | // A single slot set for small pages (of size kPageSize) or an array of slot |
| 791 | // set for large pages. In the latter case the number of entries in the array |
| 792 | // is ceil(size() / kPageSize). |
| 793 | SlotSet* old_to_new_slots_; |
| 794 | SlotSet* old_to_old_slots_; |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 795 | TypedSlotSet* typed_old_to_old_slots_; |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame] | 796 | |
| 797 | SkipList* skip_list_; |
| 798 | |
| 799 | intptr_t write_barrier_counter_; |
| 800 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 801 | // Assuming the initial allocation on a page is sequential, |
| 802 | // count highest number of bytes ever allocated on the page. |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame^] | 803 | base::AtomicValue<intptr_t> high_water_mark_; |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 804 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 805 | base::Mutex* mutex_; |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame] | 806 | |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame^] | 807 | base::AtomicValue<ConcurrentSweepingState> concurrent_sweeping_; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 808 | |
| 809 | // PagedSpace free-list statistics. |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame^] | 810 | base::AtomicNumber<intptr_t> available_in_free_list_; |
| 811 | base::AtomicNumber<intptr_t> wasted_memory_; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 812 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 813 | // next_chunk_ holds a pointer of type MemoryChunk |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame^] | 814 | base::AtomicValue<MemoryChunk*> next_chunk_; |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 815 | // prev_chunk_ holds a pointer of type MemoryChunk |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame^] | 816 | base::AtomicValue<MemoryChunk*> prev_chunk_; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 817 | |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 818 | FreeListCategory categories_[kNumberOfCategories]; |
| 819 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 820 | private: |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 821 | void InitializeReservedMemory() { reservation_.Reset(); } |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 822 | |
| 823 | friend class MemoryAllocator; |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 824 | friend class MemoryChunkValidator; |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 825 | }; |
| 826 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 827 | // ----------------------------------------------------------------------------- |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 828 | // A page is a memory chunk of a size 1MB. Large object pages may be larger. |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 829 | // |
| 830 | // The only way to get a page pointer is by calling factory methods: |
| 831 | // Page* p = Page::FromAddress(addr); or |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame^] | 832 | // Page* p = Page::FromTopOrLimit(top); |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 833 | class Page : public MemoryChunk { |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 834 | public: |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame^] | 835 | static const intptr_t kCopyAllFlags = ~0; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 836 | |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame^] | 837 | // Page flags copied from from-space to to-space when flipping semispaces. |
| 838 | static const intptr_t kCopyOnFlipFlagsMask = |
| 839 | (1 << MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) | |
| 840 | (1 << MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 841 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 842 | // Maximum object size that gets allocated into regular pages. Objects larger |
| 843 | // than that size are allocated in large object space and are never moved in |
| 844 | // memory. This also applies to new space allocation, since objects are never |
| 845 | // migrated from new space to large object space. Takes double alignment into |
| 846 | // account. |
| 847 | // TODO(hpayer): This limit should be way smaller but we currently have |
| 848 | // short living objects >256K. |
| 849 | static const int kMaxRegularHeapObjectSize = 600 * KB; |
| 850 | |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame^] | 851 | static inline Page* ConvertNewToOld(Page* old_page, PagedSpace* new_owner); |
Ben Murdoch | 7f4d5bd | 2010-06-15 11:15:29 +0100 | [diff] [blame] | 852 | |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame^] | 853 | // Returns the page containing a given address. The address ranges |
| 854 | // from [page_addr .. page_addr + kPageSize[. This only works if the object |
| 855 | // is in fact in a page. |
| 856 | static Page* FromAddress(Address addr) { |
| 857 | return reinterpret_cast<Page*>(OffsetFrom(addr) & ~kPageAlignmentMask); |
| 858 | } |
Ben Murdoch | 7f4d5bd | 2010-06-15 11:15:29 +0100 | [diff] [blame] | 859 | |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame^] | 860 | // Returns the page containing the address provided. The address can |
| 861 | // potentially point righter after the page. To be also safe for tagged values |
| 862 | // we subtract a hole word. The valid address ranges from |
| 863 | // [page_addr + kObjectStartOffset .. page_addr + kPageSize + kPointerSize]. |
| 864 | static Page* FromAllocationAreaAddress(Address address) { |
| 865 | return Page::FromAddress(address - kPointerSize); |
| 866 | } |
| 867 | |
| 868 | // Checks if address1 and address2 are on the same new space page. |
| 869 | static bool OnSamePage(Address address1, Address address2) { |
| 870 | return Page::FromAddress(address1) == Page::FromAddress(address2); |
| 871 | } |
| 872 | |
| 873 | // Checks whether an address is page aligned. |
| 874 | static bool IsAlignedToPageSize(Address addr) { |
| 875 | return (OffsetFrom(addr) & kPageAlignmentMask) == 0; |
| 876 | } |
| 877 | |
| 878 | static bool IsAtObjectStart(Address addr) { |
| 879 | return (reinterpret_cast<intptr_t>(addr) & kPageAlignmentMask) == |
| 880 | kObjectStartOffset; |
| 881 | } |
| 882 | |
| 883 | inline static Page* FromAnyPointerAddress(Heap* heap, Address addr); |
| 884 | |
| 885 | // Create a Page object that is only used as anchor for the doubly-linked |
| 886 | // list of real pages. |
| 887 | explicit Page(Space* owner) { InitializeAsAnchor(owner); } |
| 888 | |
| 889 | inline void MarkNeverAllocateForTesting(); |
| 890 | inline void MarkEvacuationCandidate(); |
| 891 | inline void ClearEvacuationCandidate(); |
| 892 | |
| 893 | Page* next_page() { return static_cast<Page*>(next_chunk()); } |
| 894 | Page* prev_page() { return static_cast<Page*>(prev_chunk()); } |
| 895 | void set_next_page(Page* page) { set_next_chunk(page); } |
| 896 | void set_prev_page(Page* page) { set_prev_chunk(page); } |
| 897 | |
| 898 | template <typename Callback> |
| 899 | inline void ForAllFreeListCategories(Callback callback) { |
| 900 | for (int i = kFirstCategory; i < kNumberOfCategories; i++) { |
| 901 | callback(&categories_[i]); |
| 902 | } |
| 903 | } |
| 904 | |
| 905 | // Returns the offset of a given address to this page. |
| 906 | inline int Offset(Address a) { |
| 907 | int offset = static_cast<int>(a - address()); |
| 908 | return offset; |
| 909 | } |
| 910 | |
| 911 | // Returns the address for a given offset to the this page. |
| 912 | Address OffsetToAddress(int offset) { |
| 913 | DCHECK_PAGE_OFFSET(offset); |
| 914 | return address() + offset; |
| 915 | } |
Ben Murdoch | 7f4d5bd | 2010-06-15 11:15:29 +0100 | [diff] [blame] | 916 | |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame] | 917 | // WaitUntilSweepingCompleted only works when concurrent sweeping is in |
| 918 | // progress. In particular, when we know that right before this call a |
| 919 | // sweeper thread was sweeping this page. |
| 920 | void WaitUntilSweepingCompleted() { |
| 921 | mutex_->Lock(); |
| 922 | mutex_->Unlock(); |
| 923 | DCHECK(SweepingDone()); |
| 924 | } |
| 925 | |
| 926 | bool SweepingDone() { |
| 927 | return concurrent_sweeping_state().Value() == kSweepingDone; |
| 928 | } |
Ben Murdoch | 7f4d5bd | 2010-06-15 11:15:29 +0100 | [diff] [blame] | 929 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 930 | void ResetFreeListStatistics(); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 931 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 932 | int LiveBytesFromFreeList() { |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame] | 933 | return static_cast<int>(area_size() - wasted_memory() - |
| 934 | available_in_free_list()); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 935 | } |
| 936 | |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 937 | FreeListCategory* free_list_category(FreeListCategoryType type) { |
| 938 | return &categories_[type]; |
| 939 | } |
| 940 | |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame^] | 941 | bool is_anchor() { return IsFlagSet(Page::ANCHOR); } |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 942 | |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame^] | 943 | intptr_t wasted_memory() { return wasted_memory_.Value(); } |
| 944 | void add_wasted_memory(intptr_t waste) { wasted_memory_.Increment(waste); } |
| 945 | intptr_t available_in_free_list() { return available_in_free_list_.Value(); } |
| 946 | void add_available_in_free_list(intptr_t available) { |
| 947 | available_in_free_list_.Increment(available); |
| 948 | } |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 949 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 950 | #ifdef DEBUG |
| 951 | void Print(); |
| 952 | #endif // DEBUG |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 953 | |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 954 | private: |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame^] | 955 | enum InitializationMode { kFreeMemory, kDoNotFreeMemory }; |
| 956 | |
| 957 | template <InitializationMode mode = kFreeMemory> |
| 958 | static inline Page* Initialize(Heap* heap, MemoryChunk* chunk, |
| 959 | Executability executable, PagedSpace* owner); |
| 960 | static inline Page* Initialize(Heap* heap, MemoryChunk* chunk, |
| 961 | Executability executable, SemiSpace* owner); |
| 962 | |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 963 | inline void InitializeFreeListCategories(); |
| 964 | |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame^] | 965 | void InitializeAsAnchor(Space* owner); |
| 966 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 967 | friend class MemoryAllocator; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 968 | }; |
| 969 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 970 | class LargePage : public MemoryChunk { |
| 971 | public: |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 972 | HeapObject* GetObject() { return HeapObject::FromAddress(area_start()); } |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 973 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 974 | inline LargePage* next_page() { |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 975 | return static_cast<LargePage*>(next_chunk()); |
| 976 | } |
| 977 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 978 | inline void set_next_page(LargePage* page) { set_next_chunk(page); } |
| 979 | |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 980 | // A limit to guarantee that we do not overflow typed slot offset in |
| 981 | // the old to old remembered set. |
| 982 | // Note that this limit is higher than what assembler already imposes on |
| 983 | // x64 and ia32 architectures. |
| 984 | static const int kMaxCodePageSize = 512 * MB; |
| 985 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 986 | private: |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame^] | 987 | static inline LargePage* Initialize(Heap* heap, MemoryChunk* chunk, |
| 988 | Executability executable, Space* owner); |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 989 | |
| 990 | friend class MemoryAllocator; |
| 991 | }; |
| 992 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 993 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 994 | // ---------------------------------------------------------------------------- |
| 995 | // Space is the abstract superclass for all allocation spaces. |
| 996 | class Space : public Malloced { |
| 997 | public: |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 998 | Space(Heap* heap, AllocationSpace id, Executability executable) |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame] | 999 | : allocation_observers_(new List<AllocationObserver*>()), |
| 1000 | allocation_observers_paused_(false), |
| 1001 | heap_(heap), |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1002 | id_(id), |
| 1003 | executable_(executable), |
| 1004 | committed_(0), |
| 1005 | max_committed_(0) {} |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1006 | |
| 1007 | virtual ~Space() {} |
| 1008 | |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 1009 | Heap* heap() const { return heap_; } |
| 1010 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1011 | // Does the space need executable memory? |
| 1012 | Executability executable() { return executable_; } |
| 1013 | |
| 1014 | // Identity used in error reporting. |
| 1015 | AllocationSpace identity() { return id_; } |
| 1016 | |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame] | 1017 | virtual void AddAllocationObserver(AllocationObserver* observer) { |
| 1018 | allocation_observers_->Add(observer); |
| 1019 | } |
| 1020 | |
| 1021 | virtual void RemoveAllocationObserver(AllocationObserver* observer) { |
| 1022 | bool removed = allocation_observers_->RemoveElement(observer); |
| 1023 | USE(removed); |
| 1024 | DCHECK(removed); |
| 1025 | } |
| 1026 | |
| 1027 | virtual void PauseAllocationObservers() { |
| 1028 | allocation_observers_paused_ = true; |
| 1029 | } |
| 1030 | |
| 1031 | virtual void ResumeAllocationObservers() { |
| 1032 | allocation_observers_paused_ = false; |
| 1033 | } |
| 1034 | |
| 1035 | void AllocationStep(Address soon_object, int size); |
| 1036 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1037 | // Return the total amount committed memory for this space, i.e., allocatable |
| 1038 | // memory and page headers. |
| 1039 | virtual intptr_t CommittedMemory() { return committed_; } |
| 1040 | |
| 1041 | virtual intptr_t MaximumCommittedMemory() { return max_committed_; } |
| 1042 | |
Shimeng (Simon) Wang | 8a31eba | 2010-12-06 19:01:33 -0800 | [diff] [blame] | 1043 | // Returns allocated size. |
Ben Murdoch | f87a203 | 2010-10-22 12:50:53 +0100 | [diff] [blame] | 1044 | virtual intptr_t Size() = 0; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1045 | |
Shimeng (Simon) Wang | 8a31eba | 2010-12-06 19:01:33 -0800 | [diff] [blame] | 1046 | // Returns size of objects. Can differ from the allocated size |
| 1047 | // (e.g. see LargeObjectSpace). |
| 1048 | virtual intptr_t SizeOfObjects() { return Size(); } |
| 1049 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1050 | // Approximate amount of physical memory committed for this space. |
| 1051 | virtual size_t CommittedPhysicalMemory() = 0; |
| 1052 | |
| 1053 | // Return the available bytes without growing. |
| 1054 | virtual intptr_t Available() = 0; |
| 1055 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 1056 | virtual int RoundSizeDownToObjectAlignment(int size) { |
| 1057 | if (id_ == CODE_SPACE) { |
| 1058 | return RoundDown(size, kCodeAlignment); |
| 1059 | } else { |
| 1060 | return RoundDown(size, kPointerSize); |
| 1061 | } |
| 1062 | } |
| 1063 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1064 | void AccountCommitted(intptr_t bytes) { |
| 1065 | DCHECK_GE(bytes, 0); |
| 1066 | committed_ += bytes; |
| 1067 | if (committed_ > max_committed_) { |
| 1068 | max_committed_ = committed_; |
| 1069 | } |
| 1070 | } |
| 1071 | |
| 1072 | void AccountUncommitted(intptr_t bytes) { |
| 1073 | DCHECK_GE(bytes, 0); |
| 1074 | committed_ -= bytes; |
| 1075 | DCHECK_GE(committed_, 0); |
| 1076 | } |
| 1077 | |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame^] | 1078 | #ifdef DEBUG |
| 1079 | virtual void Print() = 0; |
| 1080 | #endif |
| 1081 | |
| 1082 | protected: |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame] | 1083 | v8::base::SmartPointer<List<AllocationObserver*>> allocation_observers_; |
| 1084 | bool allocation_observers_paused_; |
| 1085 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1086 | private: |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 1087 | Heap* heap_; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1088 | AllocationSpace id_; |
| 1089 | Executability executable_; |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1090 | |
| 1091 | // Keeps track of committed memory in a space. |
| 1092 | intptr_t committed_; |
| 1093 | intptr_t max_committed_; |
| 1094 | }; |
| 1095 | |
| 1096 | |
| 1097 | class MemoryChunkValidator { |
| 1098 | // Computed offsets should match the compiler generated ones. |
| 1099 | STATIC_ASSERT(MemoryChunk::kSizeOffset == offsetof(MemoryChunk, size_)); |
| 1100 | STATIC_ASSERT(MemoryChunk::kLiveBytesOffset == |
| 1101 | offsetof(MemoryChunk, live_byte_count_)); |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 1102 | STATIC_ASSERT(MemoryChunk::kOldToNewSlotsOffset == |
| 1103 | offsetof(MemoryChunk, old_to_new_slots_)); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1104 | STATIC_ASSERT(MemoryChunk::kWriteBarrierCounterOffset == |
| 1105 | offsetof(MemoryChunk, write_barrier_counter_)); |
| 1106 | |
| 1107 | // Validate our estimates on the header size. |
| 1108 | STATIC_ASSERT(sizeof(MemoryChunk) <= MemoryChunk::kHeaderSize); |
| 1109 | STATIC_ASSERT(sizeof(LargePage) <= MemoryChunk::kHeaderSize); |
| 1110 | STATIC_ASSERT(sizeof(Page) <= MemoryChunk::kHeaderSize); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1111 | }; |
| 1112 | |
| 1113 | |
| 1114 | // ---------------------------------------------------------------------------- |
| 1115 | // All heap objects containing executable code (code objects) must be allocated |
| 1116 | // from a 2 GB range of memory, so that they can call each other using 32-bit |
| 1117 | // displacements. This happens automatically on 32-bit platforms, where 32-bit |
| 1118 | // displacements cover the entire 4GB virtual address space. On 64-bit |
| 1119 | // platforms, we support this using the CodeRange object, which reserves and |
| 1120 | // manages a range of virtual memory. |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 1121 | class CodeRange { |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1122 | public: |
Ben Murdoch | 69a99ed | 2011-11-30 16:03:39 +0000 | [diff] [blame] | 1123 | explicit CodeRange(Isolate* isolate); |
| 1124 | ~CodeRange() { TearDown(); } |
| 1125 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1126 | // Reserves a range of virtual memory, but does not commit any of it. |
| 1127 | // Can only be called once, at heap initialization time. |
| 1128 | // Returns false on failure. |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1129 | bool SetUp(size_t requested_size); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1130 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1131 | bool valid() { return code_range_ != NULL; } |
| 1132 | Address start() { |
| 1133 | DCHECK(valid()); |
| 1134 | return static_cast<Address>(code_range_->address()); |
| 1135 | } |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 1136 | size_t size() { |
| 1137 | DCHECK(valid()); |
| 1138 | return code_range_->size(); |
| 1139 | } |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 1140 | bool contains(Address address) { |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1141 | if (!valid()) return false; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1142 | Address start = static_cast<Address>(code_range_->address()); |
| 1143 | return start <= address && address < start + code_range_->size(); |
| 1144 | } |
| 1145 | |
| 1146 | // Allocates a chunk of memory from the large-object portion of |
| 1147 | // the code range. On platforms with no separate code range, should |
| 1148 | // not be called. |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1149 | MUST_USE_RESULT Address AllocateRawMemory(const size_t requested_size, |
| 1150 | const size_t commit_size, |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 1151 | size_t* allocated); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1152 | bool CommitRawMemory(Address start, size_t length); |
| 1153 | bool UncommitRawMemory(Address start, size_t length); |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 1154 | void FreeRawMemory(Address buf, size_t length); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1155 | |
| 1156 | private: |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1157 | // Frees the range of virtual memory, and frees the data structures used to |
| 1158 | // manage it. |
| 1159 | void TearDown(); |
| 1160 | |
Ben Murdoch | 69a99ed | 2011-11-30 16:03:39 +0000 | [diff] [blame] | 1161 | Isolate* isolate_; |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 1162 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1163 | // The reserved range of virtual memory that all code objects are put in. |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1164 | base::VirtualMemory* code_range_; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1165 | // Plain old data class, just a struct plus a constructor. |
| 1166 | class FreeBlock { |
| 1167 | public: |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 1168 | FreeBlock() : start(0), size(0) {} |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1169 | FreeBlock(Address start_arg, size_t size_arg) |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 1170 | : start(start_arg), size(size_arg) { |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1171 | DCHECK(IsAddressAligned(start, MemoryChunk::kAlignment)); |
| 1172 | DCHECK(size >= static_cast<size_t>(Page::kPageSize)); |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 1173 | } |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1174 | FreeBlock(void* start_arg, size_t size_arg) |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 1175 | : start(static_cast<Address>(start_arg)), size(size_arg) { |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1176 | DCHECK(IsAddressAligned(start, MemoryChunk::kAlignment)); |
| 1177 | DCHECK(size >= static_cast<size_t>(Page::kPageSize)); |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 1178 | } |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1179 | |
| 1180 | Address start; |
| 1181 | size_t size; |
| 1182 | }; |
| 1183 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1184 | // The global mutex guards free_list_ and allocation_list_ as GC threads may |
| 1185 | // access both lists concurrently to the main thread. |
| 1186 | base::Mutex code_range_mutex_; |
| 1187 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1188 | // Freed blocks of memory are added to the free list. When the allocation |
| 1189 | // list is exhausted, the free list is sorted and merged to make the new |
| 1190 | // allocation list. |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 1191 | List<FreeBlock> free_list_; |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1192 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1193 | // Memory is allocated from the free blocks on the allocation list. |
| 1194 | // The block at current_allocation_block_index_ is the current block. |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 1195 | List<FreeBlock> allocation_list_; |
| 1196 | int current_allocation_block_index_; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1197 | |
| 1198 | // Finds a block on the allocation list that contains at least the |
| 1199 | // requested amount of memory. If none is found, sorts and merges |
| 1200 | // the existing free memory blocks, and searches again. |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1201 | // If none can be found, returns false. |
| 1202 | bool GetNextAllocationBlock(size_t requested); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1203 | // Compares the start addresses of two free blocks. |
| 1204 | static int CompareFreeBlockAddress(const FreeBlock* left, |
| 1205 | const FreeBlock* right); |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 1206 | bool ReserveBlock(const size_t requested_size, FreeBlock* block); |
| 1207 | void ReleaseBlock(const FreeBlock* block); |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 1208 | |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 1209 | DISALLOW_COPY_AND_ASSIGN(CodeRange); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1210 | }; |
| 1211 | |
| 1212 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 1213 | class SkipList { |
| 1214 | public: |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1215 | SkipList() { Clear(); } |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 1216 | |
| 1217 | void Clear() { |
| 1218 | for (int idx = 0; idx < kSize; idx++) { |
| 1219 | starts_[idx] = reinterpret_cast<Address>(-1); |
| 1220 | } |
| 1221 | } |
| 1222 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1223 | Address StartFor(Address addr) { return starts_[RegionNumber(addr)]; } |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 1224 | |
| 1225 | void AddObject(Address addr, int size) { |
| 1226 | int start_region = RegionNumber(addr); |
| 1227 | int end_region = RegionNumber(addr + size - kPointerSize); |
| 1228 | for (int idx = start_region; idx <= end_region; idx++) { |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 1229 | if (starts_[idx] > addr) { |
| 1230 | starts_[idx] = addr; |
| 1231 | } else { |
| 1232 | // In the first region, there may already be an object closer to the |
| 1233 | // start of the region. Do not change the start in that case. If this |
| 1234 | // is not the first region, you probably added overlapping objects. |
| 1235 | DCHECK_EQ(start_region, idx); |
| 1236 | } |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 1237 | } |
| 1238 | } |
| 1239 | |
| 1240 | static inline int RegionNumber(Address addr) { |
| 1241 | return (OffsetFrom(addr) & Page::kPageAlignmentMask) >> kRegionSizeLog2; |
| 1242 | } |
| 1243 | |
| 1244 | static void Update(Address addr, int size) { |
| 1245 | Page* page = Page::FromAddress(addr); |
| 1246 | SkipList* list = page->skip_list(); |
| 1247 | if (list == NULL) { |
| 1248 | list = new SkipList(); |
| 1249 | page->set_skip_list(list); |
| 1250 | } |
| 1251 | |
| 1252 | list->AddObject(addr, size); |
| 1253 | } |
| 1254 | |
| 1255 | private: |
| 1256 | static const int kRegionSizeLog2 = 13; |
| 1257 | static const int kRegionSize = 1 << kRegionSizeLog2; |
| 1258 | static const int kSize = Page::kPageSize / kRegionSize; |
| 1259 | |
| 1260 | STATIC_ASSERT(Page::kPageSize % kRegionSize == 0); |
| 1261 | |
| 1262 | Address starts_[kSize]; |
| 1263 | }; |
| 1264 | |
| 1265 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1266 | // ---------------------------------------------------------------------------- |
| 1267 | // A space acquires chunks of memory from the operating system. The memory |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 1268 | // allocator allocated and deallocates pages for the paged heap spaces and large |
| 1269 | // pages for large object space. |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 1270 | class MemoryAllocator { |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1271 | public: |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame^] | 1272 | // Unmapper takes care of concurrently unmapping and uncommitting memory |
| 1273 | // chunks. |
| 1274 | class Unmapper { |
| 1275 | public: |
| 1276 | class UnmapFreeMemoryTask; |
| 1277 | |
| 1278 | explicit Unmapper(MemoryAllocator* allocator) |
| 1279 | : allocator_(allocator), |
| 1280 | pending_unmapping_tasks_semaphore_(0), |
| 1281 | concurrent_unmapping_tasks_active_(0) {} |
| 1282 | |
| 1283 | void AddMemoryChunkSafe(MemoryChunk* chunk) { |
| 1284 | if ((chunk->size() == Page::kPageSize) && |
| 1285 | (chunk->executable() != EXECUTABLE)) { |
| 1286 | AddMemoryChunkSafe<kRegular>(chunk); |
| 1287 | } else { |
| 1288 | AddMemoryChunkSafe<kNonRegular>(chunk); |
| 1289 | } |
| 1290 | } |
| 1291 | |
| 1292 | MemoryChunk* TryGetPooledMemoryChunkSafe() { |
| 1293 | // Procedure: |
| 1294 | // (1) Try to get a chunk that was declared as pooled and already has |
| 1295 | // been uncommitted. |
| 1296 | // (2) Try to steal any memory chunk of kPageSize that would've been |
| 1297 | // unmapped. |
| 1298 | MemoryChunk* chunk = GetMemoryChunkSafe<kPooled>(); |
| 1299 | if (chunk == nullptr) { |
| 1300 | chunk = GetMemoryChunkSafe<kRegular>(); |
| 1301 | if (chunk != nullptr) { |
| 1302 | // For stolen chunks we need to manually free any allocated memory. |
| 1303 | chunk->ReleaseAllocatedMemory(); |
| 1304 | } |
| 1305 | } |
| 1306 | return chunk; |
| 1307 | } |
| 1308 | |
| 1309 | void FreeQueuedChunks(); |
| 1310 | bool WaitUntilCompleted(); |
| 1311 | |
| 1312 | private: |
| 1313 | enum ChunkQueueType { |
| 1314 | kRegular, // Pages of kPageSize that do not live in a CodeRange and |
| 1315 | // can thus be used for stealing. |
| 1316 | kNonRegular, // Large chunks and executable chunks. |
| 1317 | kPooled, // Pooled chunks, already uncommited and ready for reuse. |
| 1318 | kNumberOfChunkQueues, |
| 1319 | }; |
| 1320 | |
| 1321 | template <ChunkQueueType type> |
| 1322 | void AddMemoryChunkSafe(MemoryChunk* chunk) { |
| 1323 | base::LockGuard<base::Mutex> guard(&mutex_); |
| 1324 | chunks_[type].push_back(chunk); |
| 1325 | } |
| 1326 | |
| 1327 | template <ChunkQueueType type> |
| 1328 | MemoryChunk* GetMemoryChunkSafe() { |
| 1329 | base::LockGuard<base::Mutex> guard(&mutex_); |
| 1330 | if (chunks_[type].empty()) return nullptr; |
| 1331 | MemoryChunk* chunk = chunks_[type].front(); |
| 1332 | chunks_[type].pop_front(); |
| 1333 | return chunk; |
| 1334 | } |
| 1335 | |
| 1336 | void PerformFreeMemoryOnQueuedChunks(); |
| 1337 | |
| 1338 | base::Mutex mutex_; |
| 1339 | MemoryAllocator* allocator_; |
| 1340 | std::list<MemoryChunk*> chunks_[kNumberOfChunkQueues]; |
| 1341 | base::Semaphore pending_unmapping_tasks_semaphore_; |
| 1342 | intptr_t concurrent_unmapping_tasks_active_; |
| 1343 | |
| 1344 | friend class MemoryAllocator; |
| 1345 | }; |
| 1346 | |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 1347 | enum AllocationMode { |
| 1348 | kRegular, |
| 1349 | kPooled, |
| 1350 | }; |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame^] | 1351 | enum FreeMode { |
| 1352 | kFull, |
| 1353 | kPreFreeAndQueue, |
| 1354 | kPooledAndQueue, |
| 1355 | }; |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 1356 | |
Ben Murdoch | 69a99ed | 2011-11-30 16:03:39 +0000 | [diff] [blame] | 1357 | explicit MemoryAllocator(Isolate* isolate); |
| 1358 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1359 | // Initializes its internal bookkeeping structures. |
Russell Brenner | 90bac25 | 2010-11-18 13:33:46 -0800 | [diff] [blame] | 1360 | // Max capacity of the total space and executable memory limit. |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame^] | 1361 | bool SetUp(intptr_t max_capacity, intptr_t capacity_executable, |
| 1362 | intptr_t code_range_size); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1363 | |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 1364 | void TearDown(); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1365 | |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame^] | 1366 | // Allocates a Page from the allocator. AllocationMode is used to indicate |
| 1367 | // whether pooled allocation, which only works for MemoryChunk::kPageSize, |
| 1368 | // should be tried first. |
| 1369 | template <MemoryAllocator::AllocationMode alloc_mode = kRegular, |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 1370 | typename SpaceType> |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame^] | 1371 | Page* AllocatePage(intptr_t size, SpaceType* owner, Executability executable); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1372 | |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame^] | 1373 | LargePage* AllocateLargePage(intptr_t size, LargeObjectSpace* owner, |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1374 | Executability executable); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1375 | |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame^] | 1376 | template <MemoryAllocator::FreeMode mode = kFull> |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 1377 | void Free(MemoryChunk* chunk); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1378 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1379 | // Returns allocated spaces in bytes. |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1380 | intptr_t Size() { return size_.Value(); } |
| 1381 | |
| 1382 | // Returns allocated executable spaces in bytes. |
| 1383 | intptr_t SizeExecutable() { return size_executable_.Value(); } |
| 1384 | |
| 1385 | // Returns the maximum available bytes of heaps. |
| 1386 | intptr_t Available() { |
| 1387 | intptr_t size = Size(); |
| 1388 | return capacity_ < size ? 0 : capacity_ - size; |
| 1389 | } |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1390 | |
Russell Brenner | 90bac25 | 2010-11-18 13:33:46 -0800 | [diff] [blame] | 1391 | // Returns the maximum available executable bytes of heaps. |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 1392 | intptr_t AvailableExecutable() { |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1393 | intptr_t executable_size = SizeExecutable(); |
| 1394 | if (capacity_executable_ < executable_size) return 0; |
| 1395 | return capacity_executable_ - executable_size; |
Russell Brenner | 90bac25 | 2010-11-18 13:33:46 -0800 | [diff] [blame] | 1396 | } |
| 1397 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1398 | // Returns maximum available bytes that the old space can have. |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 1399 | intptr_t MaxAvailable() { |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1400 | return (Available() / Page::kPageSize) * Page::kAllocatableMemory; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1401 | } |
| 1402 | |
| 1403 | // Returns an indication of whether a pointer is in a space that has |
| 1404 | // been allocated by this MemoryAllocator. |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1405 | V8_INLINE bool IsOutsideAllocatedSpace(const void* address) { |
| 1406 | return address < lowest_ever_allocated_.Value() || |
| 1407 | address >= highest_ever_allocated_.Value(); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1408 | } |
| 1409 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1410 | #ifdef DEBUG |
| 1411 | // Reports statistic info of the space. |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 1412 | void ReportStatistics(); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1413 | #endif |
| 1414 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1415 | // Returns a MemoryChunk in which the memory region from commit_area_size to |
| 1416 | // reserve_area_size of the chunk area is reserved but not committed, it |
| 1417 | // could be committed later by calling MemoryChunk::CommitArea. |
| 1418 | MemoryChunk* AllocateChunk(intptr_t reserve_area_size, |
| 1419 | intptr_t commit_area_size, |
| 1420 | Executability executable, Space* space); |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 1421 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1422 | Address ReserveAlignedMemory(size_t requested, size_t alignment, |
| 1423 | base::VirtualMemory* controller); |
| 1424 | Address AllocateAlignedMemory(size_t reserve_size, size_t commit_size, |
| 1425 | size_t alignment, Executability executable, |
| 1426 | base::VirtualMemory* controller); |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 1427 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1428 | bool CommitMemory(Address addr, size_t size, Executability executable); |
| 1429 | |
| 1430 | void FreeMemory(base::VirtualMemory* reservation, Executability executable); |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 1431 | void FreeMemory(Address addr, size_t size, Executability executable); |
| 1432 | |
| 1433 | // Commit a contiguous block of memory from the initial chunk. Assumes that |
| 1434 | // the address is not NULL, the size is greater than zero, and that the |
| 1435 | // block is contained in the initial chunk. Returns true if it succeeded |
| 1436 | // and false otherwise. |
| 1437 | bool CommitBlock(Address start, size_t size, Executability executable); |
| 1438 | |
| 1439 | // Uncommit a contiguous block of memory [start..(start+size)[. |
| 1440 | // start is not NULL, the size is greater than zero, and the |
| 1441 | // block is contained in the initial chunk. Returns true if it succeeded |
| 1442 | // and false otherwise. |
| 1443 | bool UncommitBlock(Address start, size_t size); |
| 1444 | |
| 1445 | // Zaps a contiguous block of memory [start..(start+size)[ thus |
| 1446 | // filling it up with a recognizable non-NULL bit pattern. |
| 1447 | void ZapBlock(Address start, size_t size); |
| 1448 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1449 | void PerformAllocationCallback(ObjectSpace space, AllocationAction action, |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 1450 | size_t size); |
| 1451 | |
| 1452 | void AddMemoryAllocationCallback(MemoryAllocationCallback callback, |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1453 | ObjectSpace space, AllocationAction action); |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 1454 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1455 | void RemoveMemoryAllocationCallback(MemoryAllocationCallback callback); |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 1456 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1457 | bool MemoryAllocationCallbackRegistered(MemoryAllocationCallback callback); |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 1458 | |
| 1459 | static int CodePageGuardStartOffset(); |
| 1460 | |
| 1461 | static int CodePageGuardSize(); |
| 1462 | |
| 1463 | static int CodePageAreaStartOffset(); |
| 1464 | |
| 1465 | static int CodePageAreaEndOffset(); |
| 1466 | |
| 1467 | static int CodePageAreaSize() { |
| 1468 | return CodePageAreaEndOffset() - CodePageAreaStartOffset(); |
| 1469 | } |
| 1470 | |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 1471 | static int PageAreaSize(AllocationSpace space) { |
| 1472 | DCHECK_NE(LO_SPACE, space); |
| 1473 | return (space == CODE_SPACE) ? CodePageAreaSize() |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1474 | : Page::kAllocatableMemory; |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 1475 | } |
| 1476 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1477 | MUST_USE_RESULT bool CommitExecutableMemory(base::VirtualMemory* vm, |
| 1478 | Address start, size_t commit_size, |
| 1479 | size_t reserved_size); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1480 | |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame^] | 1481 | CodeRange* code_range() { return code_range_; } |
| 1482 | Unmapper* unmapper() { return &unmapper_; } |
| 1483 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1484 | private: |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame^] | 1485 | // PreFree logically frees the object, i.e., it takes care of the size |
| 1486 | // bookkeeping and calls the allocation callback. |
| 1487 | void PreFreeMemory(MemoryChunk* chunk); |
| 1488 | |
| 1489 | // FreeMemory can be called concurrently when PreFree was executed before. |
| 1490 | void PerformFreeMemory(MemoryChunk* chunk); |
| 1491 | |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 1492 | // See AllocatePage for public interface. Note that currently we only support |
| 1493 | // pools for NOT_EXECUTABLE pages of size MemoryChunk::kPageSize. |
| 1494 | template <typename SpaceType> |
| 1495 | MemoryChunk* AllocatePagePooled(SpaceType* owner); |
| 1496 | |
Ben Murdoch | 69a99ed | 2011-11-30 16:03:39 +0000 | [diff] [blame] | 1497 | Isolate* isolate_; |
| 1498 | |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame^] | 1499 | CodeRange* code_range_; |
| 1500 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1501 | // Maximum space size in bytes. |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1502 | intptr_t capacity_; |
Russell Brenner | 90bac25 | 2010-11-18 13:33:46 -0800 | [diff] [blame] | 1503 | // Maximum subset of capacity_ that can be executable |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1504 | intptr_t capacity_executable_; |
Ben Murdoch | b0fe162 | 2011-05-05 13:52:32 +0100 | [diff] [blame] | 1505 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1506 | // Allocated space size in bytes. |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame^] | 1507 | base::AtomicNumber<intptr_t> size_; |
Steve Block | 791712a | 2010-08-27 10:21:07 +0100 | [diff] [blame] | 1508 | // Allocated executable space size in bytes. |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame^] | 1509 | base::AtomicNumber<intptr_t> size_executable_; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1510 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1511 | // We keep the lowest and highest addresses allocated as a quick way |
| 1512 | // of determining that pointers are outside the heap. The estimate is |
| 1513 | // conservative, i.e. not all addrsses in 'allocated' space are allocated |
| 1514 | // to our heap. The range is [lowest, highest[, inclusive on the low end |
| 1515 | // and exclusive on the high end. |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame^] | 1516 | base::AtomicValue<void*> lowest_ever_allocated_; |
| 1517 | base::AtomicValue<void*> highest_ever_allocated_; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1518 | |
Iain Merrick | 9ac36c9 | 2010-09-13 15:29:50 +0100 | [diff] [blame] | 1519 | struct MemoryAllocationCallbackRegistration { |
| 1520 | MemoryAllocationCallbackRegistration(MemoryAllocationCallback callback, |
| 1521 | ObjectSpace space, |
| 1522 | AllocationAction action) |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1523 | : callback(callback), space(space), action(action) {} |
Iain Merrick | 9ac36c9 | 2010-09-13 15:29:50 +0100 | [diff] [blame] | 1524 | MemoryAllocationCallback callback; |
| 1525 | ObjectSpace space; |
| 1526 | AllocationAction action; |
| 1527 | }; |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 1528 | |
Iain Merrick | 9ac36c9 | 2010-09-13 15:29:50 +0100 | [diff] [blame] | 1529 | // A List of callback that are triggered when memory is allocated or free'd |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1530 | List<MemoryAllocationCallbackRegistration> memory_allocation_callbacks_; |
Iain Merrick | 9ac36c9 | 2010-09-13 15:29:50 +0100 | [diff] [blame] | 1531 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1532 | // Initializes pages in a chunk. Returns the first page address. |
| 1533 | // This function and GetChunkId() are provided for the mark-compact |
| 1534 | // collector to rebuild page headers in the from space, which is |
| 1535 | // used as a marking stack and its page headers are destroyed. |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 1536 | Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk, |
| 1537 | PagedSpace* owner); |
Steve Block | 6ded16b | 2010-05-10 14:33:55 +0100 | [diff] [blame] | 1538 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1539 | void UpdateAllocatedSpaceLimits(void* low, void* high) { |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1540 | // The use of atomic primitives does not guarantee correctness (wrt. |
| 1541 | // desired semantics) by default. The loop here ensures that we update the |
| 1542 | // values only if they did not change in between. |
| 1543 | void* ptr = nullptr; |
| 1544 | do { |
| 1545 | ptr = lowest_ever_allocated_.Value(); |
| 1546 | } while ((low < ptr) && !lowest_ever_allocated_.TrySetValue(ptr, low)); |
| 1547 | do { |
| 1548 | ptr = highest_ever_allocated_.Value(); |
| 1549 | } while ((high > ptr) && !highest_ever_allocated_.TrySetValue(ptr, high)); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1550 | } |
| 1551 | |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame^] | 1552 | base::VirtualMemory last_chunk_; |
| 1553 | Unmapper unmapper_; |
| 1554 | |
| 1555 | friend class TestCodeRangeScope; |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 1556 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 1557 | DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1558 | }; |
| 1559 | |
| 1560 | |
| 1561 | // ----------------------------------------------------------------------------- |
| 1562 | // Interface for heap object iterator to be implemented by all object space |
| 1563 | // object iterators. |
| 1564 | // |
Leon Clarke | d91b9f7 | 2010-01-27 17:25:45 +0000 | [diff] [blame] | 1565 | // NOTE: The space specific object iterators also implements the own next() |
| 1566 | // method which is used to avoid using virtual functions |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1567 | // iterating a specific space. |
| 1568 | |
| 1569 | class ObjectIterator : public Malloced { |
| 1570 | public: |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1571 | virtual ~ObjectIterator() {} |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1572 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1573 | virtual HeapObject* next_object() = 0; |
| 1574 | }; |
| 1575 | |
| 1576 | |
| 1577 | // ----------------------------------------------------------------------------- |
| 1578 | // Heap object iterator in new/old/map spaces. |
| 1579 | // |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 1580 | // A HeapObjectIterator iterates objects from the bottom of the given space |
| 1581 | // to its top or from the bottom of the given page to its top. |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1582 | // |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 1583 | // If objects are allocated in the page during iteration the iterator may |
| 1584 | // or may not iterate over those objects. The caller must create a new |
| 1585 | // iterator in order to be sure to visit these new objects. |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1586 | class HeapObjectIterator : public ObjectIterator { |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1587 | public: |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 1588 | // Creates a new object iterator in a given space. |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1589 | explicit HeapObjectIterator(PagedSpace* space); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1590 | explicit HeapObjectIterator(Page* page); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1591 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 1592 | // Advance to the next object, skipping free spaces and other fillers and |
| 1593 | // skipping the special garbage section of which there is one per space. |
| 1594 | // Returns NULL when the iteration has ended. |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1595 | inline HeapObject* Next(); |
| 1596 | inline HeapObject* next_object() override; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1597 | |
| 1598 | private: |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 1599 | enum PageMode { kOnePageOnly, kAllPagesInSpace }; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1600 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1601 | Address cur_addr_; // Current iteration point. |
| 1602 | Address cur_end_; // End iteration point. |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 1603 | PagedSpace* space_; |
| 1604 | PageMode page_mode_; |
Leon Clarke | d91b9f7 | 2010-01-27 17:25:45 +0000 | [diff] [blame] | 1605 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 1606 | // Fast (inlined) path of next(). |
| 1607 | inline HeapObject* FromCurrentPage(); |
Leon Clarke | d91b9f7 | 2010-01-27 17:25:45 +0000 | [diff] [blame] | 1608 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 1609 | // Slow path of next(), goes into the next page. Returns false if the |
| 1610 | // iteration has ended. |
| 1611 | bool AdvanceToNextPage(); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1612 | |
| 1613 | // Initializes fields. |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1614 | inline void Initialize(PagedSpace* owner, Address start, Address end, |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1615 | PageMode mode); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1616 | }; |
| 1617 | |
| 1618 | |
| 1619 | // ----------------------------------------------------------------------------- |
| 1620 | // A PageIterator iterates the pages in a paged space. |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1621 | |
| 1622 | class PageIterator BASE_EMBEDDED { |
| 1623 | public: |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 1624 | explicit inline PageIterator(PagedSpace* space); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1625 | |
| 1626 | inline bool has_next(); |
| 1627 | inline Page* next(); |
| 1628 | |
| 1629 | private: |
| 1630 | PagedSpace* space_; |
| 1631 | Page* prev_page_; // Previous page returned. |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 1632 | // Next page that will be returned. Cached here so that we can use this |
| 1633 | // iterator for operations that deallocate pages. |
| 1634 | Page* next_page_; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1635 | }; |
| 1636 | |
| 1637 | |
| 1638 | // ----------------------------------------------------------------------------- |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 1639 | // A space has a circular list of pages. The next page can be accessed via |
| 1640 | // Page::next_page() call. |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1641 | |
| 1642 | // An abstraction of allocation and relocation pointers in a page-structured |
| 1643 | // space. |
| 1644 | class AllocationInfo { |
| 1645 | public: |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1646 | AllocationInfo() : top_(nullptr), limit_(nullptr) {} |
| 1647 | AllocationInfo(Address top, Address limit) : top_(top), limit_(limit) {} |
| 1648 | |
| 1649 | void Reset(Address top, Address limit) { |
| 1650 | set_top(top); |
| 1651 | set_limit(limit); |
| 1652 | } |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1653 | |
| 1654 | INLINE(void set_top(Address top)) { |
| 1655 | SLOW_DCHECK(top == NULL || |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1656 | (reinterpret_cast<intptr_t>(top) & kHeapObjectTagMask) == 0); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1657 | top_ = top; |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 1658 | } |
| 1659 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1660 | INLINE(Address top()) const { |
| 1661 | SLOW_DCHECK(top_ == NULL || |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1662 | (reinterpret_cast<intptr_t>(top_) & kHeapObjectTagMask) == 0); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1663 | return top_; |
| 1664 | } |
| 1665 | |
| 1666 | Address* top_address() { return &top_; } |
| 1667 | |
| 1668 | INLINE(void set_limit(Address limit)) { |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1669 | limit_ = limit; |
| 1670 | } |
| 1671 | |
| 1672 | INLINE(Address limit()) const { |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1673 | return limit_; |
| 1674 | } |
| 1675 | |
| 1676 | Address* limit_address() { return &limit_; } |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1677 | |
| 1678 | #ifdef DEBUG |
| 1679 | bool VerifyPagedAllocation() { |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame^] | 1680 | return (Page::FromAllocationAreaAddress(top_) == |
| 1681 | Page::FromAllocationAreaAddress(limit_)) && |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1682 | (top_ <= limit_); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1683 | } |
| 1684 | #endif |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1685 | |
| 1686 | private: |
| 1687 | // Current allocation top. |
| 1688 | Address top_; |
| 1689 | // Current allocation limit. |
| 1690 | Address limit_; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1691 | }; |
| 1692 | |
| 1693 | |
| 1694 | // An abstraction of the accounting statistics of a page-structured space. |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1695 | // |
| 1696 | // The stats are only set by functions that ensure they stay balanced. These |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1697 | // functions increase or decrease one of the non-capacity stats in conjunction |
| 1698 | // with capacity, or else they always balance increases and decreases to the |
| 1699 | // non-capacity stats. |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1700 | class AllocationStats BASE_EMBEDDED { |
| 1701 | public: |
| 1702 | AllocationStats() { Clear(); } |
| 1703 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 1704 | // Zero out all the allocation statistics (i.e., no capacity). |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1705 | void Clear() { |
| 1706 | capacity_ = 0; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1707 | max_capacity_ = 0; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1708 | size_ = 0; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1709 | } |
| 1710 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1711 | void ClearSize() { size_ = capacity_; } |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 1712 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1713 | // Accessors for the allocation statistics. |
Ben Murdoch | f87a203 | 2010-10-22 12:50:53 +0100 | [diff] [blame] | 1714 | intptr_t Capacity() { return capacity_; } |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1715 | intptr_t MaxCapacity() { return max_capacity_; } |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1716 | intptr_t Size() { |
| 1717 | CHECK_GE(size_, 0); |
| 1718 | return size_; |
| 1719 | } |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1720 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 1721 | // Grow the space by adding available bytes. They are initially marked as |
| 1722 | // being in use (part of the size), but will normally be immediately freed, |
| 1723 | // putting them on the free list and removing them from size_. |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1724 | void ExpandSpace(int size_in_bytes) { |
| 1725 | capacity_ += size_in_bytes; |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 1726 | size_ += size_in_bytes; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1727 | if (capacity_ > max_capacity_) { |
| 1728 | max_capacity_ = capacity_; |
| 1729 | } |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1730 | CHECK(size_ >= 0); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1731 | } |
| 1732 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 1733 | // Shrink the space by removing available bytes. Since shrinking is done |
| 1734 | // during sweeping, bytes have been marked as being in use (part of the size) |
| 1735 | // and are hereby freed. |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1736 | void ShrinkSpace(int size_in_bytes) { |
| 1737 | capacity_ -= size_in_bytes; |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 1738 | size_ -= size_in_bytes; |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 1739 | CHECK_GE(size_, 0); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1740 | } |
| 1741 | |
| 1742 | // Allocate from available bytes (available -> size). |
Ben Murdoch | f87a203 | 2010-10-22 12:50:53 +0100 | [diff] [blame] | 1743 | void AllocateBytes(intptr_t size_in_bytes) { |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1744 | size_ += size_in_bytes; |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 1745 | CHECK_GE(size_, 0); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1746 | } |
| 1747 | |
| 1748 | // Free allocated bytes, making them available (size -> available). |
Ben Murdoch | f87a203 | 2010-10-22 12:50:53 +0100 | [diff] [blame] | 1749 | void DeallocateBytes(intptr_t size_in_bytes) { |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1750 | size_ -= size_in_bytes; |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1751 | CHECK_GE(size_, 0); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1752 | } |
| 1753 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1754 | // Merge {other} into {this}. |
| 1755 | void Merge(const AllocationStats& other) { |
| 1756 | capacity_ += other.capacity_; |
| 1757 | size_ += other.size_; |
| 1758 | if (other.max_capacity_ > max_capacity_) { |
| 1759 | max_capacity_ = other.max_capacity_; |
| 1760 | } |
| 1761 | CHECK_GE(size_, 0); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1762 | } |
| 1763 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1764 | void DecreaseCapacity(intptr_t size_in_bytes) { |
| 1765 | capacity_ -= size_in_bytes; |
| 1766 | CHECK_GE(capacity_, 0); |
| 1767 | CHECK_GE(capacity_, size_); |
| 1768 | } |
| 1769 | |
| 1770 | void IncreaseCapacity(intptr_t size_in_bytes) { capacity_ += size_in_bytes; } |
| 1771 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1772 | private: |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1773 | // |capacity_|: The number of object-area bytes (i.e., not including page |
| 1774 | // bookkeeping structures) currently in the space. |
Ben Murdoch | f87a203 | 2010-10-22 12:50:53 +0100 | [diff] [blame] | 1775 | intptr_t capacity_; |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1776 | |
| 1777 | // |max_capacity_|: The maximum capacity ever observed. |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1778 | intptr_t max_capacity_; |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1779 | |
| 1780 | // |size_|: The number of allocated bytes. |
Ben Murdoch | f87a203 | 2010-10-22 12:50:53 +0100 | [diff] [blame] | 1781 | intptr_t size_; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1782 | }; |
| 1783 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1784 | // A free list maintaining free blocks of memory. The free list is organized in |
| 1785 | // a way to encourage objects allocated around the same time to be near each |
| 1786 | // other. The normal way to allocate is intended to be by bumping a 'top' |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 1787 | // pointer until it hits a 'limit' pointer. When the limit is hit we need to |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1788 | // find a new space to allocate from. This is done with the free list, which is |
| 1789 | // divided up into rough categories to cut down on waste. Having finer |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 1790 | // categories would scatter allocation more. |
| 1791 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1792 | // The free list is organized in categories as follows: |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 1793 | // kMinBlockSize-10 words (tiniest): The tiniest blocks are only used for |
| 1794 | // allocation, when categories >= small do not have entries anymore. |
| 1795 | // 11-31 words (tiny): The tiny blocks are only used for allocation, when |
| 1796 | // categories >= small do not have entries anymore. |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1797 | // 32-255 words (small): Used for allocating free space between 1-31 words in |
| 1798 | // size. |
| 1799 | // 256-2047 words (medium): Used for allocating free space between 32-255 words |
| 1800 | // in size. |
| 1801 | // 1048-16383 words (large): Used for allocating free space between 256-2047 |
| 1802 | // words in size. |
| 1803 | // At least 16384 words (huge): This list is for objects of 2048 words or |
| 1804 | // larger. Empty pages are also added to this list. |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1805 | class FreeList { |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 1806 | public: |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1807 | // This method returns how much memory can be allocated after freeing |
| 1808 | // maximum_freed memory. |
| 1809 | static inline int GuaranteedAllocatable(int maximum_freed) { |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 1810 | if (maximum_freed <= kTiniestListMax) { |
| 1811 | // Since we are not iterating over all list entries, we cannot guarantee |
| 1812 | // that we can find the maximum freed block in that free list. |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1813 | return 0; |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 1814 | } else if (maximum_freed <= kTinyListMax) { |
| 1815 | return kTinyAllocationMax; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1816 | } else if (maximum_freed <= kSmallListMax) { |
| 1817 | return kSmallAllocationMax; |
| 1818 | } else if (maximum_freed <= kMediumListMax) { |
| 1819 | return kMediumAllocationMax; |
| 1820 | } else if (maximum_freed <= kLargeListMax) { |
| 1821 | return kLargeAllocationMax; |
| 1822 | } |
| 1823 | return maximum_freed; |
| 1824 | } |
| 1825 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1826 | explicit FreeList(PagedSpace* owner); |
| 1827 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1828 | // Adds a node on the free list. The block of size {size_in_bytes} starting |
| 1829 | // at {start} is placed on the free list. The return value is the number of |
| 1830 | // bytes that were not added to the free list, because they freed memory block |
| 1831 | // was too small. Bookkeeping information will be written to the block, i.e., |
| 1832 | // its contents will be destroyed. The start address should be word aligned, |
| 1833 | // and the size should be a non-zero multiple of the word size. |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 1834 | int Free(Address start, int size_in_bytes, FreeMode mode); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1835 | |
| 1836 | // Allocate a block of size {size_in_bytes} from the free list. The block is |
| 1837 | // unitialized. A failure is returned if no block is available. The size |
| 1838 | // should be a non-zero multiple of the word size. |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 1839 | MUST_USE_RESULT HeapObject* Allocate(int size_in_bytes); |
| 1840 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1841 | // Clear the free list. |
| 1842 | void Reset(); |
| 1843 | |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 1844 | void ResetStats() { |
| 1845 | wasted_bytes_.SetValue(0); |
| 1846 | ForAllFreeListCategories( |
| 1847 | [](FreeListCategory* category) { category->ResetStats(); }); |
| 1848 | } |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1849 | |
| 1850 | // Return the number of bytes available on the free list. |
| 1851 | intptr_t Available() { |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame] | 1852 | intptr_t available = 0; |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 1853 | ForAllFreeListCategories([&available](FreeListCategory* category) { |
| 1854 | available += category->available(); |
| 1855 | }); |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame] | 1856 | return available; |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1857 | } |
| 1858 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1859 | bool IsEmpty() { |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 1860 | bool empty = true; |
| 1861 | ForAllFreeListCategories([&empty](FreeListCategory* category) { |
| 1862 | if (!category->is_empty()) empty = false; |
| 1863 | }); |
| 1864 | return empty; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1865 | } |
| 1866 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1867 | // Used after booting the VM. |
| 1868 | void RepairLists(Heap* heap); |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 1869 | |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 1870 | intptr_t EvictFreeListItems(Page* page); |
| 1871 | bool ContainsPageFreeListItems(Page* page); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1872 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1873 | PagedSpace* owner() { return owner_; } |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 1874 | intptr_t wasted_bytes() { return wasted_bytes_.Value(); } |
| 1875 | |
| 1876 | template <typename Callback> |
| 1877 | void ForAllFreeListCategories(FreeListCategoryType type, Callback callback) { |
| 1878 | FreeListCategory* current = categories_[type]; |
| 1879 | while (current != nullptr) { |
| 1880 | FreeListCategory* next = current->next(); |
| 1881 | callback(current); |
| 1882 | current = next; |
| 1883 | } |
| 1884 | } |
| 1885 | |
| 1886 | template <typename Callback> |
| 1887 | void ForAllFreeListCategories(Callback callback) { |
| 1888 | for (int i = kFirstCategory; i < kNumberOfCategories; i++) { |
| 1889 | ForAllFreeListCategories(static_cast<FreeListCategoryType>(i), callback); |
| 1890 | } |
| 1891 | } |
| 1892 | |
| 1893 | bool AddCategory(FreeListCategory* category); |
| 1894 | void RemoveCategory(FreeListCategory* category); |
| 1895 | void PrintCategories(FreeListCategoryType type); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1896 | |
| 1897 | #ifdef DEBUG |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1898 | intptr_t SumFreeLists(); |
| 1899 | bool IsVeryLong(); |
| 1900 | #endif |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 1901 | |
| 1902 | private: |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 1903 | class FreeListCategoryIterator { |
| 1904 | public: |
| 1905 | FreeListCategoryIterator(FreeList* free_list, FreeListCategoryType type) |
| 1906 | : current_(free_list->categories_[type]) {} |
| 1907 | |
| 1908 | bool HasNext() { return current_ != nullptr; } |
| 1909 | |
| 1910 | FreeListCategory* Next() { |
| 1911 | DCHECK(HasNext()); |
| 1912 | FreeListCategory* tmp = current_; |
| 1913 | current_ = current_->next(); |
| 1914 | return tmp; |
| 1915 | } |
| 1916 | |
| 1917 | private: |
| 1918 | FreeListCategory* current_; |
| 1919 | }; |
| 1920 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 1921 | // The size range of blocks, in bytes. |
| 1922 | static const int kMinBlockSize = 3 * kPointerSize; |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1923 | static const int kMaxBlockSize = Page::kAllocatableMemory; |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 1924 | |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 1925 | static const int kTiniestListMax = 0xa * kPointerSize; |
| 1926 | static const int kTinyListMax = 0x1f * kPointerSize; |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 1927 | static const int kSmallListMax = 0xff * kPointerSize; |
| 1928 | static const int kMediumListMax = 0x7ff * kPointerSize; |
| 1929 | static const int kLargeListMax = 0x3fff * kPointerSize; |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 1930 | static const int kTinyAllocationMax = kTiniestListMax; |
| 1931 | static const int kSmallAllocationMax = kTinyListMax; |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 1932 | static const int kMediumAllocationMax = kSmallListMax; |
| 1933 | static const int kLargeAllocationMax = kMediumListMax; |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1934 | |
| 1935 | FreeSpace* FindNodeFor(int size_in_bytes, int* node_size); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1936 | |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 1937 | // Walks all available categories for a given |type| and tries to retrieve |
| 1938 | // a node. Returns nullptr if the category is empty. |
| 1939 | FreeSpace* FindNodeIn(FreeListCategoryType type, int* node_size); |
| 1940 | |
| 1941 | // Tries to retrieve a node from the first category in a given |type|. |
| 1942 | // Returns nullptr if the category is empty. |
| 1943 | FreeSpace* TryFindNodeIn(FreeListCategoryType type, int* node_size, |
| 1944 | int minimum_size); |
| 1945 | |
| 1946 | // Searches a given |type| for a node of at least |minimum_size|. |
| 1947 | FreeSpace* SearchForNodeInList(FreeListCategoryType type, int* node_size, |
| 1948 | int minimum_size); |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame] | 1949 | |
| 1950 | FreeListCategoryType SelectFreeListCategoryType(size_t size_in_bytes) { |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 1951 | if (size_in_bytes <= kTiniestListMax) { |
| 1952 | return kTiniest; |
| 1953 | } else if (size_in_bytes <= kTinyListMax) { |
| 1954 | return kTiny; |
| 1955 | } else if (size_in_bytes <= kSmallListMax) { |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame] | 1956 | return kSmall; |
| 1957 | } else if (size_in_bytes <= kMediumListMax) { |
| 1958 | return kMedium; |
| 1959 | } else if (size_in_bytes <= kLargeListMax) { |
| 1960 | return kLarge; |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1961 | } |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame] | 1962 | return kHuge; |
| 1963 | } |
| 1964 | |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 1965 | // The tiny categories are not used for fast allocation. |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame] | 1966 | FreeListCategoryType SelectFastAllocationFreeListCategoryType( |
| 1967 | size_t size_in_bytes) { |
| 1968 | if (size_in_bytes <= kSmallAllocationMax) { |
| 1969 | return kSmall; |
| 1970 | } else if (size_in_bytes <= kMediumAllocationMax) { |
| 1971 | return kMedium; |
| 1972 | } else if (size_in_bytes <= kLargeAllocationMax) { |
| 1973 | return kLarge; |
| 1974 | } |
| 1975 | return kHuge; |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1976 | } |
| 1977 | |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 1978 | FreeListCategory* top(FreeListCategoryType type) { return categories_[type]; } |
| 1979 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1980 | PagedSpace* owner_; |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame^] | 1981 | base::AtomicNumber<intptr_t> wasted_bytes_; |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 1982 | FreeListCategory* categories_[kNumberOfCategories]; |
| 1983 | |
| 1984 | friend class FreeListCategory; |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 1985 | |
| 1986 | DISALLOW_IMPLICIT_CONSTRUCTORS(FreeList); |
| 1987 | }; |
| 1988 | |
| 1989 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1990 | class AllocationResult { |
| 1991 | public: |
| 1992 | // Implicit constructor from Object*. |
| 1993 | AllocationResult(Object* object) // NOLINT |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 1994 | : object_(object) { |
| 1995 | // AllocationResults can't return Smis, which are used to represent |
| 1996 | // failure and the space to retry in. |
| 1997 | CHECK(!object->IsSmi()); |
| 1998 | } |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1999 | |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 2000 | AllocationResult() : object_(Smi::FromInt(NEW_SPACE)) {} |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2001 | |
| 2002 | static inline AllocationResult Retry(AllocationSpace space = NEW_SPACE) { |
| 2003 | return AllocationResult(space); |
| 2004 | } |
| 2005 | |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 2006 | inline bool IsRetry() { return object_->IsSmi(); } |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2007 | |
| 2008 | template <typename T> |
| 2009 | bool To(T** obj) { |
| 2010 | if (IsRetry()) return false; |
| 2011 | *obj = T::cast(object_); |
| 2012 | return true; |
| 2013 | } |
| 2014 | |
| 2015 | Object* ToObjectChecked() { |
| 2016 | CHECK(!IsRetry()); |
| 2017 | return object_; |
| 2018 | } |
| 2019 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2020 | inline AllocationSpace RetrySpace(); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2021 | |
| 2022 | private: |
| 2023 | explicit AllocationResult(AllocationSpace space) |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 2024 | : object_(Smi::FromInt(static_cast<int>(space))) {} |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2025 | |
| 2026 | Object* object_; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2027 | }; |
| 2028 | |
| 2029 | |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 2030 | STATIC_ASSERT(sizeof(AllocationResult) == kPointerSize); |
| 2031 | |
| 2032 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2033 | // LocalAllocationBuffer represents a linear allocation area that is created |
| 2034 | // from a given {AllocationResult} and can be used to allocate memory without |
| 2035 | // synchronization. |
| 2036 | // |
| 2037 | // The buffer is properly closed upon destruction and reassignment. |
| 2038 | // Example: |
| 2039 | // { |
| 2040 | // AllocationResult result = ...; |
| 2041 | // LocalAllocationBuffer a(heap, result, size); |
| 2042 | // LocalAllocationBuffer b = a; |
| 2043 | // CHECK(!a.IsValid()); |
| 2044 | // CHECK(b.IsValid()); |
| 2045 | // // {a} is invalid now and cannot be used for further allocations. |
| 2046 | // } |
| 2047 | // // Since {b} went out of scope, the LAB is closed, resulting in creating a |
| 2048 | // // filler object for the remaining area. |
| 2049 | class LocalAllocationBuffer { |
| 2050 | public: |
| 2051 | // Indicates that a buffer cannot be used for allocations anymore. Can result |
| 2052 | // from either reassigning a buffer, or trying to construct it from an |
| 2053 | // invalid {AllocationResult}. |
| 2054 | static inline LocalAllocationBuffer InvalidBuffer(); |
| 2055 | |
| 2056 | // Creates a new LAB from a given {AllocationResult}. Results in |
| 2057 | // InvalidBuffer if the result indicates a retry. |
| 2058 | static inline LocalAllocationBuffer FromResult(Heap* heap, |
| 2059 | AllocationResult result, |
| 2060 | intptr_t size); |
| 2061 | |
| 2062 | ~LocalAllocationBuffer() { Close(); } |
| 2063 | |
| 2064 | // Convert to C++11 move-semantics once allowed by the style guide. |
| 2065 | LocalAllocationBuffer(const LocalAllocationBuffer& other); |
| 2066 | LocalAllocationBuffer& operator=(const LocalAllocationBuffer& other); |
| 2067 | |
| 2068 | MUST_USE_RESULT inline AllocationResult AllocateRawAligned( |
| 2069 | int size_in_bytes, AllocationAlignment alignment); |
| 2070 | |
| 2071 | inline bool IsValid() { return allocation_info_.top() != nullptr; } |
| 2072 | |
| 2073 | // Try to merge LABs, which is only possible when they are adjacent in memory. |
| 2074 | // Returns true if the merge was successful, false otherwise. |
| 2075 | inline bool TryMerge(LocalAllocationBuffer* other); |
| 2076 | |
| 2077 | private: |
| 2078 | LocalAllocationBuffer(Heap* heap, AllocationInfo allocation_info); |
| 2079 | |
| 2080 | void Close(); |
| 2081 | |
| 2082 | Heap* heap_; |
| 2083 | AllocationInfo allocation_info_; |
| 2084 | }; |
| 2085 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2086 | class PagedSpace : public Space { |
| 2087 | public: |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2088 | static const intptr_t kCompactionMemoryWanted = 500 * KB; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2089 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2090 | // Creates a space with an id. |
| 2091 | PagedSpace(Heap* heap, AllocationSpace id, Executability executable); |
| 2092 | |
| 2093 | ~PagedSpace() override { TearDown(); } |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2094 | |
| 2095 | // Set up the space using the given address range of virtual memory (from |
| 2096 | // the memory allocator's initial chunk) if possible. If the block of |
| 2097 | // addresses is not big enough to contain a single page-aligned page, a |
| 2098 | // fresh chunk will be allocated. |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2099 | bool SetUp(); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2100 | |
| 2101 | // Returns true if the space has been successfully set up and not |
| 2102 | // subsequently torn down. |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2103 | bool HasBeenSetUp(); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2104 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2105 | // Checks whether an object/address is in this space. |
| 2106 | inline bool Contains(Address a); |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame] | 2107 | inline bool Contains(Object* o); |
| 2108 | bool ContainsSlow(Address addr); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2109 | |
| 2110 | // Given an address occupied by a live object, return that object if it is |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2111 | // in this space, or a Smi if it is not. The implementation iterates over |
| 2112 | // objects in the page containing the address, the cost is linear in the |
| 2113 | // number of objects in the page. It may be slow. |
| 2114 | Object* FindObject(Address addr); |
| 2115 | |
| 2116 | // During boot the free_space_map is created, and afterwards we may need |
| 2117 | // to write it into the free list nodes that were already created. |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2118 | void RepairFreeListsAfterDeserialization(); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2119 | |
Ben Murdoch | 85b7179 | 2012-04-11 18:30:58 +0100 | [diff] [blame] | 2120 | // Prepares for a mark-compact GC. |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2121 | void PrepareForMarkCompact(); |
Ben Murdoch | 85b7179 | 2012-04-11 18:30:58 +0100 | [diff] [blame] | 2122 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2123 | // Current capacity without growing (Size() + Available()). |
Ben Murdoch | f87a203 | 2010-10-22 12:50:53 +0100 | [diff] [blame] | 2124 | intptr_t Capacity() { return accounting_stats_.Capacity(); } |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2125 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2126 | // Approximate amount of physical memory committed for this space. |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2127 | size_t CommittedPhysicalMemory() override; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2128 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2129 | void ResetFreeListStatistics(); |
| 2130 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2131 | // Sets the capacity, the available space and the wasted space to zero. |
| 2132 | // The stats are rebuilt during sweeping by adding each page to the |
| 2133 | // capacity and the size when it is encountered. As free spaces are |
| 2134 | // discovered during the sweeping they are subtracted from the size and added |
| 2135 | // to the available and wasted totals. |
| 2136 | void ClearStats() { |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2137 | accounting_stats_.ClearSize(); |
| 2138 | free_list_.ResetStats(); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2139 | ResetFreeListStatistics(); |
| 2140 | } |
| 2141 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2142 | // Available bytes without growing. These are the bytes on the free list. |
| 2143 | // The bytes in the linear allocation area are not included in this total |
| 2144 | // because updating the stats would slow down allocation. New pages are |
| 2145 | // immediately added to the free list so they show up here. |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2146 | intptr_t Available() override { return free_list_.Available(); } |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2147 | |
| 2148 | // Allocated bytes in this space. Garbage bytes that were not found due to |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2149 | // concurrent sweeping are counted as being allocated! The bytes in the |
| 2150 | // current linear allocation area (between top and limit) are also counted |
| 2151 | // here. |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2152 | intptr_t Size() override { return accounting_stats_.Size(); } |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2153 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2154 | // As size, but the bytes in lazily swept pages are estimated and the bytes |
| 2155 | // in the current linear allocation area are not included. |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2156 | intptr_t SizeOfObjects() override; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2157 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2158 | // Wasted bytes in this space. These are just the bytes that were thrown away |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2159 | // due to being too small to use for allocation. |
| 2160 | virtual intptr_t Waste() { return free_list_.wasted_bytes(); } |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2161 | |
| 2162 | // Returns the allocation pointer in this space. |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2163 | Address top() { return allocation_info_.top(); } |
| 2164 | Address limit() { return allocation_info_.limit(); } |
| 2165 | |
| 2166 | // The allocation top address. |
| 2167 | Address* allocation_top_address() { return allocation_info_.top_address(); } |
| 2168 | |
| 2169 | // The allocation limit address. |
| 2170 | Address* allocation_limit_address() { |
| 2171 | return allocation_info_.limit_address(); |
| 2172 | } |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2173 | |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 2174 | enum UpdateSkipList { UPDATE_SKIP_LIST, IGNORE_SKIP_LIST }; |
| 2175 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2176 | // Allocate the requested number of bytes in the space if possible, return a |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 2177 | // failure object if not. Only use IGNORE_SKIP_LIST if the skip list is going |
| 2178 | // to be manually updated later. |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2179 | MUST_USE_RESULT inline AllocationResult AllocateRawUnaligned( |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 2180 | int size_in_bytes, UpdateSkipList update_skip_list = UPDATE_SKIP_LIST); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2181 | |
| 2182 | MUST_USE_RESULT inline AllocationResult AllocateRawUnalignedSynchronized( |
| 2183 | int size_in_bytes); |
| 2184 | |
| 2185 | // Allocate the requested number of bytes in the space double aligned if |
| 2186 | // possible, return a failure object if not. |
| 2187 | MUST_USE_RESULT inline AllocationResult AllocateRawAligned( |
| 2188 | int size_in_bytes, AllocationAlignment alignment); |
| 2189 | |
| 2190 | // Allocate the requested number of bytes in the space and consider allocation |
| 2191 | // alignment if needed. |
| 2192 | MUST_USE_RESULT inline AllocationResult AllocateRaw( |
| 2193 | int size_in_bytes, AllocationAlignment alignment); |
Leon Clarke | e46be81 | 2010-01-19 14:06:41 +0000 | [diff] [blame] | 2194 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2195 | // Give a block of memory to the space's free list. It might be added to |
| 2196 | // the free list or accounted as waste. |
| 2197 | // If add_to_freelist is false then just accounting stats are updated and |
| 2198 | // no attempt to add area to free list is made. |
| 2199 | int Free(Address start, int size_in_bytes) { |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 2200 | int wasted = free_list_.Free(start, size_in_bytes, kLinkCategory); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2201 | accounting_stats_.DeallocateBytes(size_in_bytes); |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2202 | return size_in_bytes - wasted; |
| 2203 | } |
Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 2204 | |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 2205 | int UnaccountedFree(Address start, int size_in_bytes) { |
| 2206 | int wasted = free_list_.Free(start, size_in_bytes, kDoNotLinkCategory); |
| 2207 | return size_in_bytes - wasted; |
| 2208 | } |
| 2209 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2210 | void ResetFreeList() { free_list_.Reset(); } |
| 2211 | |
Steve Block | 6ded16b | 2010-05-10 14:33:55 +0100 | [diff] [blame] | 2212 | // Set space allocation info. |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2213 | void SetTopAndLimit(Address top, Address limit) { |
| 2214 | DCHECK(top == limit || |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2215 | Page::FromAddress(top) == Page::FromAddress(limit - 1)); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2216 | MemoryChunk::UpdateHighWaterMark(allocation_info_.top()); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2217 | allocation_info_.Reset(top, limit); |
Steve Block | 6ded16b | 2010-05-10 14:33:55 +0100 | [diff] [blame] | 2218 | } |
| 2219 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2220 | // Empty space allocation info, returning unused area to free list. |
| 2221 | void EmptyAllocationInfo() { |
| 2222 | // Mark the old linear allocation area with a free space map so it can be |
| 2223 | // skipped when scanning the heap. |
| 2224 | int old_linear_size = static_cast<int>(limit() - top()); |
| 2225 | Free(top(), old_linear_size); |
| 2226 | SetTopAndLimit(NULL, NULL); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2227 | } |
| 2228 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2229 | void Allocate(int bytes) { accounting_stats_.AllocateBytes(bytes); } |
| 2230 | |
| 2231 | void IncreaseCapacity(int size); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2232 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2233 | // Releases an unused page and shrinks the space. |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 2234 | void ReleasePage(Page* page); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2235 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2236 | // The dummy page that anchors the linked list of pages. |
| 2237 | Page* anchor() { return &anchor_; } |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2238 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2239 | #ifdef VERIFY_HEAP |
| 2240 | // Verify integrity of this space. |
| 2241 | virtual void Verify(ObjectVisitor* visitor); |
| 2242 | |
| 2243 | // Overridden by subclasses to verify space-specific object |
| 2244 | // properties (e.g., only maps or free-list nodes are in map space). |
| 2245 | virtual void VerifyObject(HeapObject* obj) {} |
| 2246 | #endif |
| 2247 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2248 | #ifdef DEBUG |
| 2249 | // Print meta info and objects in this space. |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2250 | void Print() override; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2251 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2252 | // Reports statistics for the space |
| 2253 | void ReportStatistics(); |
| 2254 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2255 | // Report code object related statistics |
| 2256 | void CollectCodeStatistics(); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2257 | static void ReportCodeStatistics(Isolate* isolate); |
| 2258 | static void ResetCodeStatistics(Isolate* isolate); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2259 | #endif |
| 2260 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2261 | Page* FirstPage() { return anchor_.next_page(); } |
| 2262 | Page* LastPage() { return anchor_.prev_page(); } |
| 2263 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2264 | void EvictEvacuationCandidatesFromLinearAllocationArea(); |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2265 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2266 | bool CanExpand(size_t size); |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2267 | |
| 2268 | // Returns the number of total pages in this space. |
| 2269 | int CountTotalPages(); |
| 2270 | |
| 2271 | // Return size of allocatable area on a page in this space. |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2272 | inline int AreaSize() { return area_size_; } |
| 2273 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2274 | virtual bool is_local() { return false; } |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2275 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2276 | // Merges {other} into the current space. Note that this modifies {other}, |
| 2277 | // e.g., removes its bump pointer area and resets statistics. |
| 2278 | void MergeCompactionSpace(CompactionSpace* other); |
| 2279 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2280 | // Refills the free list from the corresponding free list filled by the |
| 2281 | // sweeper. |
| 2282 | virtual void RefillFreeList(); |
Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 2283 | |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 2284 | FreeList* free_list() { return &free_list_; } |
| 2285 | |
| 2286 | base::Mutex* mutex() { return &space_mutex_; } |
| 2287 | |
| 2288 | inline void UnlinkFreeListCategories(Page* page); |
| 2289 | inline intptr_t RelinkFreeListCategories(Page* page); |
| 2290 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2291 | protected: |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2292 | // PagedSpaces that should be included in snapshots have different, i.e., |
| 2293 | // smaller, initial pages. |
| 2294 | virtual bool snapshotable() { return true; } |
| 2295 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2296 | bool HasPages() { return anchor_.next_page() != &anchor_; } |
| 2297 | |
| 2298 | // Cleans up the space, frees all pages in this space except those belonging |
| 2299 | // to the initial chunk, uncommits addresses in the initial chunk. |
| 2300 | void TearDown(); |
| 2301 | |
| 2302 | // Expands the space by allocating a fixed number of pages. Returns false if |
| 2303 | // it cannot allocate requested number of pages from OS, or if the hard heap |
| 2304 | // size limit has been hit. |
| 2305 | bool Expand(); |
| 2306 | |
| 2307 | // Generic fast case allocation function that tries linear allocation at the |
| 2308 | // address denoted by top in allocation_info_. |
| 2309 | inline HeapObject* AllocateLinearly(int size_in_bytes); |
| 2310 | |
| 2311 | // Generic fast case allocation function that tries aligned linear allocation |
| 2312 | // at the address denoted by top in allocation_info_. Writes the aligned |
| 2313 | // allocation size, which includes the filler size, to size_in_bytes. |
| 2314 | inline HeapObject* AllocateLinearlyAligned(int* size_in_bytes, |
| 2315 | AllocationAlignment alignment); |
| 2316 | |
| 2317 | // If sweeping is still in progress try to sweep unswept pages. If that is |
| 2318 | // not successful, wait for the sweeper threads and re-try free-list |
| 2319 | // allocation. |
| 2320 | MUST_USE_RESULT virtual HeapObject* SweepAndRetryAllocation( |
| 2321 | int size_in_bytes); |
| 2322 | |
| 2323 | // Slow path of AllocateRaw. This function is space-dependent. |
| 2324 | MUST_USE_RESULT HeapObject* SlowAllocateRaw(int size_in_bytes); |
| 2325 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2326 | int area_size_; |
| 2327 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2328 | // Accounting information for this space. |
| 2329 | AllocationStats accounting_stats_; |
| 2330 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2331 | // The dummy page that anchors the double linked list of pages. |
| 2332 | Page anchor_; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2333 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2334 | // The space's free list. |
| 2335 | FreeList free_list_; |
Steve Block | 6ded16b | 2010-05-10 14:33:55 +0100 | [diff] [blame] | 2336 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2337 | // Normal allocation information. |
| 2338 | AllocationInfo allocation_info_; |
| 2339 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2340 | // Mutex guarding any concurrent access to the space. |
| 2341 | base::Mutex space_mutex_; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2342 | |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 2343 | friend class IncrementalMarking; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2344 | friend class MarkCompactCollector; |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2345 | friend class PageIterator; |
| 2346 | |
| 2347 | // Used in cctest. |
| 2348 | friend class HeapTester; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2349 | }; |
| 2350 | |
| 2351 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2352 | class NumberAndSizeInfo BASE_EMBEDDED { |
| 2353 | public: |
| 2354 | NumberAndSizeInfo() : number_(0), bytes_(0) {} |
| 2355 | |
| 2356 | int number() const { return number_; } |
| 2357 | void increment_number(int num) { number_ += num; } |
| 2358 | |
| 2359 | int bytes() const { return bytes_; } |
| 2360 | void increment_bytes(int size) { bytes_ += size; } |
| 2361 | |
| 2362 | void clear() { |
| 2363 | number_ = 0; |
| 2364 | bytes_ = 0; |
| 2365 | } |
| 2366 | |
| 2367 | private: |
| 2368 | int number_; |
| 2369 | int bytes_; |
| 2370 | }; |
| 2371 | |
| 2372 | |
| 2373 | // HistogramInfo class for recording a single "bar" of a histogram. This |
Ben Murdoch | 3fb3ca8 | 2011-12-02 17:19:32 +0000 | [diff] [blame] | 2374 | // class is used for collecting statistics to print to the log file. |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2375 | class HistogramInfo : public NumberAndSizeInfo { |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2376 | public: |
| 2377 | HistogramInfo() : NumberAndSizeInfo() {} |
| 2378 | |
| 2379 | const char* name() { return name_; } |
| 2380 | void set_name(const char* name) { name_ = name; } |
| 2381 | |
| 2382 | private: |
| 2383 | const char* name_; |
| 2384 | }; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2385 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2386 | enum SemiSpaceId { kFromSpace = 0, kToSpace = 1 }; |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2387 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2388 | // ----------------------------------------------------------------------------- |
| 2389 | // SemiSpace in young generation |
| 2390 | // |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame] | 2391 | // A SemiSpace is a contiguous chunk of memory holding page-like memory chunks. |
| 2392 | // The mark-compact collector uses the memory of the first page in the from |
| 2393 | // space as a marking stack when tracing live objects. |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2394 | class SemiSpace : public Space { |
| 2395 | public: |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame] | 2396 | static void Swap(SemiSpace* from, SemiSpace* to); |
| 2397 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2398 | SemiSpace(Heap* heap, SemiSpaceId semispace) |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2399 | : Space(heap, NEW_SPACE, NOT_EXECUTABLE), |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame] | 2400 | current_capacity_(0), |
| 2401 | maximum_capacity_(0), |
| 2402 | minimum_capacity_(0), |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame] | 2403 | age_mark_(nullptr), |
| 2404 | committed_(false), |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2405 | id_(semispace), |
| 2406 | anchor_(this), |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame] | 2407 | current_page_(nullptr) {} |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2408 | |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame] | 2409 | inline bool Contains(HeapObject* o); |
| 2410 | inline bool Contains(Object* o); |
| 2411 | inline bool ContainsSlow(Address a); |
| 2412 | |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 2413 | void SetUp(int initial_capacity, int maximum_capacity); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2414 | void TearDown(); |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 2415 | bool HasBeenSetUp() { return maximum_capacity_ != 0; } |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2416 | |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 2417 | bool Commit(); |
| 2418 | bool Uncommit(); |
| 2419 | bool is_committed() { return committed_; } |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2420 | |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 2421 | // Grow the semispace to the new capacity. The new capacity requested must |
| 2422 | // be larger than the current capacity and less than the maximum capacity. |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2423 | bool GrowTo(int new_capacity); |
| 2424 | |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 2425 | // Shrinks the semispace to the new capacity. The new capacity requested |
| 2426 | // must be more than the amount of used memory in the semispace and less |
| 2427 | // than the current capacity. |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2428 | bool ShrinkTo(int new_capacity); |
| 2429 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2430 | // Returns the start address of the first page of the space. |
| 2431 | Address space_start() { |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 2432 | DCHECK_NE(anchor_.next_page(), anchor()); |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2433 | return anchor_.next_page()->area_start(); |
| 2434 | } |
| 2435 | |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame^] | 2436 | Page* first_page() { return anchor_.next_page(); } |
| 2437 | Page* current_page() { return current_page_; } |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2438 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2439 | // Returns one past the end address of the space. |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2440 | Address space_end() { return anchor_.prev_page()->area_end(); } |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2441 | |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 2442 | // Returns the start address of the current page of the space. |
| 2443 | Address page_low() { return current_page_->area_start(); } |
| 2444 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2445 | // Returns one past the end address of the current page of the space. |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2446 | Address page_high() { return current_page_->area_end(); } |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2447 | |
| 2448 | bool AdvancePage() { |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame^] | 2449 | Page* next_page = current_page_->next_page(); |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2450 | if (next_page == anchor()) return false; |
| 2451 | current_page_ = next_page; |
| 2452 | return true; |
| 2453 | } |
| 2454 | |
| 2455 | // Resets the space to using the first page. |
| 2456 | void Reset(); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2457 | |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame^] | 2458 | bool ReplaceWithEmptyPage(Page* page); |
| 2459 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2460 | // Age mark accessors. |
| 2461 | Address age_mark() { return age_mark_; } |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2462 | void set_age_mark(Address mark); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2463 | |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 2464 | // Returns the current capacity of the semispace. |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame] | 2465 | int current_capacity() { return current_capacity_; } |
| 2466 | |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 2467 | // Returns the maximum capacity of the semispace. |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame] | 2468 | int maximum_capacity() { return maximum_capacity_; } |
| 2469 | |
| 2470 | // Returns the initial capacity of the semispace. |
| 2471 | int minimum_capacity() { return minimum_capacity_; } |
| 2472 | |
| 2473 | SemiSpaceId id() { return id_; } |
| 2474 | |
| 2475 | // Approximate amount of physical memory committed for this space. |
| 2476 | size_t CommittedPhysicalMemory() override; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2477 | |
Leon Clarke | e46be81 | 2010-01-19 14:06:41 +0000 | [diff] [blame] | 2478 | // If we don't have these here then SemiSpace will be abstract. However |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2479 | // they should never be called: |
| 2480 | |
| 2481 | intptr_t Size() override { |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2482 | UNREACHABLE(); |
| 2483 | return 0; |
| 2484 | } |
| 2485 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2486 | intptr_t SizeOfObjects() override { return Size(); } |
| 2487 | |
| 2488 | intptr_t Available() override { |
| 2489 | UNREACHABLE(); |
| 2490 | return 0; |
| 2491 | } |
| 2492 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2493 | #ifdef DEBUG |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2494 | void Print() override; |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2495 | // Validate a range of of addresses in a SemiSpace. |
| 2496 | // The "from" address must be on a page prior to the "to" address, |
| 2497 | // in the linked page order, or it must be earlier on the same page. |
| 2498 | static void AssertValidRange(Address from, Address to); |
| 2499 | #else |
| 2500 | // Do nothing. |
| 2501 | inline static void AssertValidRange(Address from, Address to) {} |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2502 | #endif |
| 2503 | |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame] | 2504 | #ifdef VERIFY_HEAP |
| 2505 | virtual void Verify(); |
| 2506 | #endif |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2507 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2508 | private: |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame^] | 2509 | void RewindPages(Page* start, int num_pages); |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2510 | |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame^] | 2511 | inline Page* anchor() { return &anchor_; } |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame] | 2512 | |
| 2513 | // Copies the flags into the masked positions on all pages in the space. |
| 2514 | void FixPagesFlags(intptr_t flags, intptr_t flag_mask); |
| 2515 | |
| 2516 | // The currently committed space capacity. |
| 2517 | int current_capacity_; |
| 2518 | |
| 2519 | // The maximum capacity that can be used by this space. |
| 2520 | int maximum_capacity_; |
| 2521 | |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 2522 | // The minimum capacity for the space. A space cannot shrink below this size. |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame] | 2523 | int minimum_capacity_; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2524 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2525 | // Used to govern object promotion during mark-compact collection. |
| 2526 | Address age_mark_; |
| 2527 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2528 | bool committed_; |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2529 | SemiSpaceId id_; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2530 | |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame^] | 2531 | Page anchor_; |
| 2532 | Page* current_page_; |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2533 | |
| 2534 | friend class SemiSpaceIterator; |
| 2535 | friend class NewSpacePageIterator; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2536 | }; |
| 2537 | |
| 2538 | |
| 2539 | // A SemiSpaceIterator is an ObjectIterator that iterates over the active |
| 2540 | // semispace of the heap's new space. It iterates over the objects in the |
| 2541 | // semispace from a given start address (defaulting to the bottom of the |
| 2542 | // semispace) to the top of the semispace. New objects allocated after the |
| 2543 | // iterator is created are not iterated. |
| 2544 | class SemiSpaceIterator : public ObjectIterator { |
| 2545 | public: |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2546 | // Create an iterator over the allocated objects in the given to-space. |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2547 | explicit SemiSpaceIterator(NewSpace* space); |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2548 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2549 | inline HeapObject* Next(); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2550 | |
| 2551 | // Implementation of the ObjectIterator functions. |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2552 | inline HeapObject* next_object() override; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2553 | |
| 2554 | private: |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2555 | void Initialize(Address start, Address end); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2556 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2557 | // The current iteration point. |
| 2558 | Address current_; |
| 2559 | // The end of iteration. |
| 2560 | Address limit_; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2561 | }; |
| 2562 | |
| 2563 | |
| 2564 | // ----------------------------------------------------------------------------- |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2565 | // A PageIterator iterates the pages in a semi-space. |
| 2566 | class NewSpacePageIterator BASE_EMBEDDED { |
| 2567 | public: |
| 2568 | // Make an iterator that runs over all pages in to-space. |
| 2569 | explicit inline NewSpacePageIterator(NewSpace* space); |
| 2570 | |
| 2571 | // Make an iterator that runs over all pages in the given semispace, |
| 2572 | // even those not used in allocation. |
| 2573 | explicit inline NewSpacePageIterator(SemiSpace* space); |
| 2574 | |
| 2575 | // Make iterator that iterates from the page containing start |
| 2576 | // to the page that contains limit in the same semispace. |
| 2577 | inline NewSpacePageIterator(Address start, Address limit); |
| 2578 | |
| 2579 | inline bool has_next(); |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame^] | 2580 | inline Page* next(); |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2581 | |
| 2582 | private: |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame^] | 2583 | Page* prev_page_; // Previous page returned. |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2584 | // Next page that will be returned. Cached here so that we can use this |
| 2585 | // iterator for operations that deallocate pages. |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame^] | 2586 | Page* next_page_; |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2587 | // Last page returned. |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame^] | 2588 | Page* last_page_; |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2589 | }; |
| 2590 | |
| 2591 | |
| 2592 | // ----------------------------------------------------------------------------- |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2593 | // The young generation space. |
| 2594 | // |
| 2595 | // The new space consists of a contiguous pair of semispaces. It simply |
| 2596 | // forwards most functions to the appropriate semispace. |
| 2597 | |
| 2598 | class NewSpace : public Space { |
| 2599 | public: |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 2600 | explicit NewSpace(Heap* heap) |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2601 | : Space(heap, NEW_SPACE, NOT_EXECUTABLE), |
| 2602 | to_space_(heap, kToSpace), |
| 2603 | from_space_(heap, kFromSpace), |
| 2604 | reservation_(), |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 2605 | pages_used_(0), |
| 2606 | top_on_previous_step_(0), |
| 2607 | allocated_histogram_(nullptr), |
| 2608 | promoted_histogram_(nullptr) {} |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame] | 2609 | |
| 2610 | inline bool Contains(HeapObject* o); |
| 2611 | inline bool ContainsSlow(Address a); |
| 2612 | inline bool Contains(Object* o); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2613 | |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 2614 | bool SetUp(int initial_semispace_capacity, int max_semispace_capacity); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2615 | |
| 2616 | // Tears down the space. Heap memory was not allocated by the space, so it |
| 2617 | // is not deallocated here. |
| 2618 | void TearDown(); |
| 2619 | |
| 2620 | // True if the space has been set up but not torn down. |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2621 | bool HasBeenSetUp() { |
| 2622 | return to_space_.HasBeenSetUp() && from_space_.HasBeenSetUp(); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2623 | } |
| 2624 | |
| 2625 | // Flip the pair of spaces. |
| 2626 | void Flip(); |
| 2627 | |
| 2628 | // Grow the capacity of the semispaces. Assumes that they are not at |
| 2629 | // their maximum capacity. |
| 2630 | void Grow(); |
| 2631 | |
| 2632 | // Shrink the capacity of the semispaces. |
| 2633 | void Shrink(); |
| 2634 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2635 | // Return the allocated bytes in the active semispace. |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2636 | intptr_t Size() override { |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame^] | 2637 | return pages_used_ * Page::kAllocatableMemory + |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2638 | static_cast<int>(top() - to_space_.page_low()); |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2639 | } |
| 2640 | |
Ben Murdoch | f87a203 | 2010-10-22 12:50:53 +0100 | [diff] [blame] | 2641 | // The same, but returning an int. We have to have the one that returns |
| 2642 | // intptr_t because it is inherited, but if we know we are dealing with the |
| 2643 | // new space, which can't get as big as the other spaces then this is useful: |
| 2644 | int SizeAsInt() { return static_cast<int>(Size()); } |
Steve Block | 3ce2e20 | 2009-11-05 08:53:23 +0000 | [diff] [blame] | 2645 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2646 | // Return the allocatable capacity of a semispace. |
| 2647 | intptr_t Capacity() { |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame] | 2648 | SLOW_DCHECK(to_space_.current_capacity() == from_space_.current_capacity()); |
| 2649 | return (to_space_.current_capacity() / Page::kPageSize) * |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame^] | 2650 | Page::kAllocatableMemory; |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2651 | } |
| 2652 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2653 | // Return the current size of a semispace, allocatable and non-allocatable |
| 2654 | // memory. |
| 2655 | intptr_t TotalCapacity() { |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame] | 2656 | DCHECK(to_space_.current_capacity() == from_space_.current_capacity()); |
| 2657 | return to_space_.current_capacity(); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2658 | } |
Steve Block | 3ce2e20 | 2009-11-05 08:53:23 +0000 | [diff] [blame] | 2659 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2660 | // Committed memory for NewSpace is the committed memory of both semi-spaces |
| 2661 | // combined. |
| 2662 | intptr_t CommittedMemory() override { |
| 2663 | return from_space_.CommittedMemory() + to_space_.CommittedMemory(); |
Steve Block | 3ce2e20 | 2009-11-05 08:53:23 +0000 | [diff] [blame] | 2664 | } |
| 2665 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2666 | intptr_t MaximumCommittedMemory() override { |
| 2667 | return from_space_.MaximumCommittedMemory() + |
| 2668 | to_space_.MaximumCommittedMemory(); |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2669 | } |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2670 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2671 | // Approximate amount of physical memory committed for this space. |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2672 | size_t CommittedPhysicalMemory() override; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2673 | |
| 2674 | // Return the available bytes without growing. |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2675 | intptr_t Available() override { return Capacity() - Size(); } |
| 2676 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2677 | size_t AllocatedSinceLastGC() { |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 2678 | bool seen_age_mark = false; |
| 2679 | Address age_mark = to_space_.age_mark(); |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame^] | 2680 | Page* current_page = to_space_.first_page(); |
| 2681 | Page* age_mark_page = Page::FromAddress(age_mark); |
| 2682 | Page* last_page = Page::FromAddress(top() - kPointerSize); |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 2683 | if (age_mark_page == last_page) { |
| 2684 | if (top() - age_mark >= 0) { |
| 2685 | return top() - age_mark; |
| 2686 | } |
| 2687 | // Top was reset at some point, invalidating this metric. |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2688 | return 0; |
| 2689 | } |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 2690 | while (current_page != last_page) { |
| 2691 | if (current_page == age_mark_page) { |
| 2692 | seen_age_mark = true; |
| 2693 | break; |
| 2694 | } |
| 2695 | current_page = current_page->next_page(); |
| 2696 | } |
| 2697 | if (!seen_age_mark) { |
| 2698 | // Top was reset at some point, invalidating this metric. |
| 2699 | return 0; |
| 2700 | } |
| 2701 | intptr_t allocated = age_mark_page->area_end() - age_mark; |
| 2702 | DCHECK_EQ(current_page, age_mark_page); |
| 2703 | current_page = age_mark_page->next_page(); |
| 2704 | while (current_page != last_page) { |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame^] | 2705 | allocated += Page::kAllocatableMemory; |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 2706 | current_page = current_page->next_page(); |
| 2707 | } |
| 2708 | allocated += top() - current_page->area_start(); |
| 2709 | DCHECK_LE(0, allocated); |
| 2710 | DCHECK_LE(allocated, Size()); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2711 | return static_cast<size_t>(allocated); |
| 2712 | } |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2713 | |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame^] | 2714 | bool ReplaceWithEmptyPage(Page* page) { |
| 2715 | // This method is called after flipping the semispace. |
| 2716 | DCHECK(page->InFromSpace()); |
| 2717 | return from_space_.ReplaceWithEmptyPage(page); |
| 2718 | } |
| 2719 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2720 | // Return the maximum capacity of a semispace. |
| 2721 | int MaximumCapacity() { |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame] | 2722 | DCHECK(to_space_.maximum_capacity() == from_space_.maximum_capacity()); |
| 2723 | return to_space_.maximum_capacity(); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2724 | } |
| 2725 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2726 | bool IsAtMaximumCapacity() { return TotalCapacity() == MaximumCapacity(); } |
| 2727 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2728 | // Returns the initial capacity of a semispace. |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2729 | int InitialTotalCapacity() { |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame] | 2730 | DCHECK(to_space_.minimum_capacity() == from_space_.minimum_capacity()); |
| 2731 | return to_space_.minimum_capacity(); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2732 | } |
| 2733 | |
| 2734 | // Return the address of the allocation pointer in the active semispace. |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2735 | Address top() { |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2736 | DCHECK(to_space_.current_page()->ContainsLimit(allocation_info_.top())); |
| 2737 | return allocation_info_.top(); |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2738 | } |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2739 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2740 | // Return the address of the allocation pointer limit in the active semispace. |
| 2741 | Address limit() { |
| 2742 | DCHECK(to_space_.current_page()->ContainsLimit(allocation_info_.limit())); |
| 2743 | return allocation_info_.limit(); |
| 2744 | } |
| 2745 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2746 | // Return the address of the first object in the active semispace. |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2747 | Address bottom() { return to_space_.space_start(); } |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2748 | |
| 2749 | // Get the age mark of the inactive semispace. |
| 2750 | Address age_mark() { return from_space_.age_mark(); } |
| 2751 | // Set the age mark in the active semispace. |
| 2752 | void set_age_mark(Address mark) { to_space_.set_age_mark(mark); } |
| 2753 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2754 | // The allocation top and limit address. |
| 2755 | Address* allocation_top_address() { return allocation_info_.top_address(); } |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2756 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2757 | // The allocation limit address. |
| 2758 | Address* allocation_limit_address() { |
| 2759 | return allocation_info_.limit_address(); |
| 2760 | } |
| 2761 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2762 | MUST_USE_RESULT INLINE(AllocationResult AllocateRawAligned( |
| 2763 | int size_in_bytes, AllocationAlignment alignment)); |
| 2764 | |
| 2765 | MUST_USE_RESULT INLINE( |
| 2766 | AllocationResult AllocateRawUnaligned(int size_in_bytes)); |
| 2767 | |
| 2768 | MUST_USE_RESULT INLINE(AllocationResult AllocateRaw( |
| 2769 | int size_in_bytes, AllocationAlignment alignment)); |
| 2770 | |
| 2771 | MUST_USE_RESULT inline AllocationResult AllocateRawSynchronized( |
| 2772 | int size_in_bytes, AllocationAlignment alignment); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2773 | |
| 2774 | // Reset the allocation pointer to the beginning of the active semispace. |
| 2775 | void ResetAllocationInfo(); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2776 | |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame] | 2777 | // When inline allocation stepping is active, either because of incremental |
| 2778 | // marking, idle scavenge, or allocation statistics gathering, we 'interrupt' |
| 2779 | // inline allocation every once in a while. This is done by setting |
| 2780 | // allocation_info_.limit to be lower than the actual limit and and increasing |
| 2781 | // it in steps to guarantee that the observers are notified periodically. |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2782 | void UpdateInlineAllocationLimit(int size_in_bytes); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2783 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2784 | void DisableInlineAllocationSteps() { |
| 2785 | top_on_previous_step_ = 0; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2786 | UpdateInlineAllocationLimit(0); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2787 | } |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2788 | |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame] | 2789 | // Allows observation of inline allocation. The observer->Step() method gets |
| 2790 | // called after every step_size bytes have been allocated (approximately). |
| 2791 | // This works by adjusting the allocation limit to a lower value and adjusting |
| 2792 | // it after each step. |
| 2793 | void AddAllocationObserver(AllocationObserver* observer) override; |
| 2794 | |
| 2795 | void RemoveAllocationObserver(AllocationObserver* observer) override; |
| 2796 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2797 | // Get the extent of the inactive semispace (for use as a marking stack, |
| 2798 | // or to zap it). Notice: space-addresses are not necessarily on the |
| 2799 | // same page, so FromSpaceStart() might be above FromSpaceEnd(). |
| 2800 | Address FromSpacePageLow() { return from_space_.page_low(); } |
| 2801 | Address FromSpacePageHigh() { return from_space_.page_high(); } |
| 2802 | Address FromSpaceStart() { return from_space_.space_start(); } |
| 2803 | Address FromSpaceEnd() { return from_space_.space_end(); } |
| 2804 | |
| 2805 | // Get the extent of the active semispace's pages' memory. |
| 2806 | Address ToSpaceStart() { return to_space_.space_start(); } |
| 2807 | Address ToSpaceEnd() { return to_space_.space_end(); } |
| 2808 | |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame] | 2809 | inline bool ToSpaceContainsSlow(Address a); |
| 2810 | inline bool FromSpaceContainsSlow(Address a); |
| 2811 | inline bool ToSpaceContains(Object* o); |
| 2812 | inline bool FromSpaceContains(Object* o); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2813 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2814 | // Try to switch the active semispace to a new, empty, page. |
| 2815 | // Returns false if this isn't possible or reasonable (i.e., there |
| 2816 | // are no pages, or the current page is already empty), or true |
| 2817 | // if successful. |
| 2818 | bool AddFreshPage(); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2819 | bool AddFreshPageSynchronized(); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2820 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2821 | #ifdef VERIFY_HEAP |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2822 | // Verify the active semispace. |
| 2823 | virtual void Verify(); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2824 | #endif |
| 2825 | |
| 2826 | #ifdef DEBUG |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2827 | // Print the active semispace. |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2828 | void Print() override { to_space_.Print(); } |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2829 | #endif |
| 2830 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2831 | // Iterates the active semispace to collect statistics. |
| 2832 | void CollectStatistics(); |
| 2833 | // Reports previously collected statistics of the active semispace. |
| 2834 | void ReportStatistics(); |
| 2835 | // Clears previously collected statistics. |
| 2836 | void ClearHistograms(); |
| 2837 | |
| 2838 | // Record the allocation or promotion of a heap object. Note that we don't |
| 2839 | // record every single allocation, but only those that happen in the |
| 2840 | // to space during a scavenge GC. |
| 2841 | void RecordAllocation(HeapObject* obj); |
| 2842 | void RecordPromotion(HeapObject* obj); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2843 | |
| 2844 | // Return whether the operation succeded. |
| 2845 | bool CommitFromSpaceIfNeeded() { |
| 2846 | if (from_space_.is_committed()) return true; |
| 2847 | return from_space_.Commit(); |
| 2848 | } |
| 2849 | |
| 2850 | bool UncommitFromSpace() { |
| 2851 | if (!from_space_.is_committed()) return true; |
| 2852 | return from_space_.Uncommit(); |
| 2853 | } |
| 2854 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2855 | bool IsFromSpaceCommitted() { return from_space_.is_committed(); } |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2856 | |
| 2857 | SemiSpace* active_space() { return &to_space_; } |
| 2858 | |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame] | 2859 | void PauseAllocationObservers() override; |
| 2860 | void ResumeAllocationObservers() override; |
| 2861 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2862 | private: |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2863 | // Update allocation info to match the current to-space page. |
| 2864 | void UpdateAllocationInfo(); |
| 2865 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2866 | base::Mutex mutex_; |
| 2867 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2868 | // The semispaces. |
| 2869 | SemiSpace to_space_; |
| 2870 | SemiSpace from_space_; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2871 | base::VirtualMemory reservation_; |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2872 | int pages_used_; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2873 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2874 | // Allocation pointer and limit for normal allocation and allocation during |
| 2875 | // mark-compact collection. |
| 2876 | AllocationInfo allocation_info_; |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2877 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2878 | Address top_on_previous_step_; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2879 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2880 | HistogramInfo* allocated_histogram_; |
| 2881 | HistogramInfo* promoted_histogram_; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2882 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2883 | bool EnsureAllocation(int size_in_bytes, AllocationAlignment alignment); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2884 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2885 | // If we are doing inline allocation in steps, this method performs the 'step' |
| 2886 | // operation. top is the memory address of the bump pointer at the last |
| 2887 | // inline allocation (i.e. it determines the numbers of bytes actually |
| 2888 | // allocated since the last step.) new_top is the address of the bump pointer |
| 2889 | // where the next byte is going to be allocated from. top and new_top may be |
| 2890 | // different when we cross a page boundary or reset the space. |
| 2891 | void InlineAllocationStep(Address top, Address new_top, Address soon_object, |
| 2892 | size_t size); |
| 2893 | intptr_t GetNextInlineAllocationStepSize(); |
| 2894 | void StartNextInlineAllocationStep(); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2895 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2896 | friend class SemiSpaceIterator; |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2897 | }; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2898 | |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame] | 2899 | class PauseAllocationObserversScope { |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2900 | public: |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame] | 2901 | explicit PauseAllocationObserversScope(Heap* heap); |
| 2902 | ~PauseAllocationObserversScope(); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2903 | |
| 2904 | private: |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame] | 2905 | Heap* heap_; |
| 2906 | DISALLOW_COPY_AND_ASSIGN(PauseAllocationObserversScope); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2907 | }; |
| 2908 | |
| 2909 | // ----------------------------------------------------------------------------- |
| 2910 | // Compaction space that is used temporarily during compaction. |
| 2911 | |
| 2912 | class CompactionSpace : public PagedSpace { |
| 2913 | public: |
| 2914 | CompactionSpace(Heap* heap, AllocationSpace id, Executability executable) |
| 2915 | : PagedSpace(heap, id, executable) {} |
| 2916 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2917 | bool is_local() override { return true; } |
| 2918 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2919 | protected: |
| 2920 | // The space is temporary and not included in any snapshots. |
| 2921 | bool snapshotable() override { return false; } |
| 2922 | |
| 2923 | MUST_USE_RESULT HeapObject* SweepAndRetryAllocation( |
| 2924 | int size_in_bytes) override; |
| 2925 | }; |
| 2926 | |
| 2927 | |
| 2928 | // A collection of |CompactionSpace|s used by a single compaction task. |
| 2929 | class CompactionSpaceCollection : public Malloced { |
| 2930 | public: |
| 2931 | explicit CompactionSpaceCollection(Heap* heap) |
| 2932 | : old_space_(heap, OLD_SPACE, Executability::NOT_EXECUTABLE), |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame] | 2933 | code_space_(heap, CODE_SPACE, Executability::EXECUTABLE) {} |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2934 | |
| 2935 | CompactionSpace* Get(AllocationSpace space) { |
| 2936 | switch (space) { |
| 2937 | case OLD_SPACE: |
| 2938 | return &old_space_; |
| 2939 | case CODE_SPACE: |
| 2940 | return &code_space_; |
| 2941 | default: |
| 2942 | UNREACHABLE(); |
| 2943 | } |
| 2944 | UNREACHABLE(); |
| 2945 | return nullptr; |
| 2946 | } |
| 2947 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2948 | private: |
| 2949 | CompactionSpace old_space_; |
| 2950 | CompactionSpace code_space_; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2951 | }; |
| 2952 | |
| 2953 | |
| 2954 | // ----------------------------------------------------------------------------- |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2955 | // Old object space (includes the old space of objects and code space) |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2956 | |
| 2957 | class OldSpace : public PagedSpace { |
| 2958 | public: |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2959 | // Creates an old space object. The constructor does not allocate pages |
| 2960 | // from OS. |
| 2961 | OldSpace(Heap* heap, AllocationSpace id, Executability executable) |
| 2962 | : PagedSpace(heap, id, executable) {} |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2963 | }; |
| 2964 | |
| 2965 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2966 | // For contiguous spaces, top should be in the space (or at the end) and limit |
| 2967 | // should be the end of the space. |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2968 | #define DCHECK_SEMISPACE_ALLOCATION_INFO(info, space) \ |
| 2969 | SLOW_DCHECK((space).page_low() <= (info).top() && \ |
| 2970 | (info).top() <= (space).page_high() && \ |
| 2971 | (info).limit() <= (space).page_high()) |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2972 | |
| 2973 | |
| 2974 | // ----------------------------------------------------------------------------- |
| 2975 | // Old space for all map objects |
| 2976 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2977 | class MapSpace : public PagedSpace { |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2978 | public: |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2979 | // Creates a map space object. |
| 2980 | MapSpace(Heap* heap, AllocationSpace id) |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 2981 | : PagedSpace(heap, id, NOT_EXECUTABLE) {} |
Ben Murdoch | 85b7179 | 2012-04-11 18:30:58 +0100 | [diff] [blame] | 2982 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2983 | int RoundSizeDownToObjectAlignment(int size) override { |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2984 | if (base::bits::IsPowerOfTwo32(Map::kSize)) { |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2985 | return RoundDown(size, Map::kSize); |
| 2986 | } else { |
| 2987 | return (size / Map::kSize) * Map::kSize; |
Leon Clarke | e46be81 | 2010-01-19 14:06:41 +0000 | [diff] [blame] | 2988 | } |
Leon Clarke | e46be81 | 2010-01-19 14:06:41 +0000 | [diff] [blame] | 2989 | } |
| 2990 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2991 | #ifdef VERIFY_HEAP |
| 2992 | void VerifyObject(HeapObject* obj) override; |
| 2993 | #endif |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2994 | }; |
| 2995 | |
| 2996 | |
| 2997 | // ----------------------------------------------------------------------------- |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2998 | // Large objects ( > Page::kMaxRegularHeapObjectSize ) are allocated and |
| 2999 | // managed by the large object space. A large object is allocated from OS |
| 3000 | // heap with extra padding bytes (Page::kPageSize + Page::kObjectStartOffset). |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 3001 | // A large object always starts at Page::kObjectStartOffset to a page. |
| 3002 | // Large objects do not move during garbage collections. |
| 3003 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 3004 | class LargeObjectSpace : public Space { |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 3005 | public: |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 3006 | LargeObjectSpace(Heap* heap, AllocationSpace id); |
| 3007 | virtual ~LargeObjectSpace(); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 3008 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 3009 | // Initializes internal data structures. |
| 3010 | bool SetUp(); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 3011 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 3012 | // Releases internal resources, frees objects in this space. |
| 3013 | void TearDown(); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 3014 | |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 3015 | static intptr_t ObjectSizeFor(intptr_t chunk_size) { |
| 3016 | if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return 0; |
| 3017 | return chunk_size - Page::kPageSize - Page::kObjectStartOffset; |
| 3018 | } |
| 3019 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 3020 | // Shared implementation of AllocateRaw, AllocateRawCode and |
| 3021 | // AllocateRawFixedArray. |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 3022 | MUST_USE_RESULT AllocationResult |
| 3023 | AllocateRaw(int object_size, Executability executable); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 3024 | |
Ben Murdoch | 7f4d5bd | 2010-06-15 11:15:29 +0100 | [diff] [blame] | 3025 | // Available bytes for objects in this space. |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 3026 | inline intptr_t Available() override; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 3027 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 3028 | intptr_t Size() override { return size_; } |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 3029 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 3030 | intptr_t SizeOfObjects() override { return objects_size_; } |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 3031 | |
| 3032 | // Approximate amount of physical memory committed for this space. |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 3033 | size_t CommittedPhysicalMemory() override; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 3034 | |
| 3035 | int PageCount() { return page_count_; } |
| 3036 | |
| 3037 | // Finds an object for a given address, returns a Smi if it is not found. |
| 3038 | // The function iterates through all objects in this space, may be slow. |
| 3039 | Object* FindObject(Address a); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 3040 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 3041 | // Finds a large object page containing the given address, returns NULL |
Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 3042 | // if such a page doesn't exist. |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 3043 | LargePage* FindPage(Address a); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 3044 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 3045 | // Clears the marking state of live objects. |
| 3046 | void ClearMarkingStateOfLiveObjects(); |
| 3047 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 3048 | // Frees unmarked objects. |
| 3049 | void FreeUnmarkedObjects(); |
| 3050 | |
| 3051 | // Checks whether a heap object is in this space; O(1). |
| 3052 | bool Contains(HeapObject* obj); |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame] | 3053 | // Checks whether an address is in the object area in this space. Iterates |
| 3054 | // all objects in the space. May be slow. |
| 3055 | bool ContainsSlow(Address addr) { return FindObject(addr)->IsHeapObject(); } |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 3056 | |
| 3057 | // Checks whether the space is empty. |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 3058 | bool IsEmpty() { return first_page_ == NULL; } |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 3059 | |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 3060 | void AdjustLiveBytes(int by) { objects_size_ += by; } |
| 3061 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 3062 | LargePage* first_page() { return first_page_; } |
| 3063 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 3064 | #ifdef VERIFY_HEAP |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 3065 | virtual void Verify(); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 3066 | #endif |
| 3067 | |
| 3068 | #ifdef DEBUG |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 3069 | void Print() override; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 3070 | void ReportStatistics(); |
| 3071 | void CollectCodeStatistics(); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 3072 | #endif |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 3073 | |
| 3074 | private: |
| 3075 | // The head of the linked list of large object chunks. |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 3076 | LargePage* first_page_; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 3077 | intptr_t size_; // allocated bytes |
| 3078 | int page_count_; // number of chunks |
Shimeng (Simon) Wang | 8a31eba | 2010-12-06 19:01:33 -0800 | [diff] [blame] | 3079 | intptr_t objects_size_; // size of objects |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 3080 | // Map MemoryChunk::kAlignment-aligned chunks to large pages covering them |
| 3081 | HashMap chunk_map_; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 3082 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 3083 | friend class LargeObjectIterator; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 3084 | }; |
| 3085 | |
| 3086 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 3087 | class LargeObjectIterator : public ObjectIterator { |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 3088 | public: |
| 3089 | explicit LargeObjectIterator(LargeObjectSpace* space); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 3090 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 3091 | HeapObject* Next(); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 3092 | |
| 3093 | // implementation of ObjectIterator. |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 3094 | virtual HeapObject* next_object() { return Next(); } |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 3095 | |
| 3096 | private: |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 3097 | LargePage* current_; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 3098 | }; |
| 3099 | |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 3100 | class LargePageIterator BASE_EMBEDDED { |
| 3101 | public: |
| 3102 | explicit inline LargePageIterator(LargeObjectSpace* space); |
| 3103 | |
| 3104 | inline LargePage* next(); |
| 3105 | |
| 3106 | private: |
| 3107 | LargePage* next_page_; |
| 3108 | }; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 3109 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 3110 | // Iterates over the chunks (pages and large object pages) that can contain |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 3111 | // pointers to new space or to evacuation candidates. |
| 3112 | class MemoryChunkIterator BASE_EMBEDDED { |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 3113 | public: |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 3114 | enum Mode { ALL, ALL_BUT_MAP_SPACE, ALL_BUT_CODE_SPACE }; |
| 3115 | inline explicit MemoryChunkIterator(Heap* heap, Mode mode); |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 3116 | |
| 3117 | // Return NULL when the iterator is done. |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 3118 | inline MemoryChunk* next(); |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 3119 | |
| 3120 | private: |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 3121 | enum State { |
| 3122 | kOldSpaceState, |
| 3123 | kMapState, |
| 3124 | kCodeState, |
| 3125 | kLargeObjectState, |
| 3126 | kFinishedState |
| 3127 | }; |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 3128 | State state_; |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 3129 | const Mode mode_; |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 3130 | PageIterator old_iterator_; |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 3131 | PageIterator code_iterator_; |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 3132 | PageIterator map_iterator_; |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 3133 | LargePageIterator lo_iterator_; |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 3134 | }; |
| 3135 | |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 3136 | #ifdef DEBUG |
| 3137 | struct CommentStatistic { |
| 3138 | const char* comment; |
| 3139 | int size; |
| 3140 | int count; |
| 3141 | void Clear() { |
| 3142 | comment = NULL; |
| 3143 | size = 0; |
| 3144 | count = 0; |
| 3145 | } |
| 3146 | // Must be small, since an iteration is used for lookup. |
| 3147 | static const int kMaxComments = 64; |
| 3148 | }; |
| 3149 | #endif |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 3150 | } // namespace internal |
| 3151 | } // namespace v8 |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 3152 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 3153 | #endif // V8_HEAP_SPACES_H_ |