Ben Murdoch | 257744e | 2011-11-30 15:57:28 +0000 | [diff] [blame] | 1 | // Copyright 2011 the V8 project authors. All rights reserved. |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2 | // Use of this source code is governed by a BSD-style license that can be |
| 3 | // found in the LICENSE file. |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 4 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 5 | #ifndef V8_HEAP_SPACES_H_ |
| 6 | #define V8_HEAP_SPACES_H_ |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 7 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 8 | #include "src/allocation.h" |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 9 | #include "src/atomic-utils.h" |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 10 | #include "src/base/atomicops.h" |
| 11 | #include "src/base/bits.h" |
| 12 | #include "src/base/platform/mutex.h" |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 13 | #include "src/flags.h" |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 14 | #include "src/hashmap.h" |
| 15 | #include "src/list.h" |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 16 | #include "src/objects.h" |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 17 | #include "src/utils.h" |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 18 | |
| 19 | namespace v8 { |
| 20 | namespace internal { |
| 21 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 22 | class CompactionSpaceCollection; |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 23 | class Isolate; |
| 24 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 25 | // ----------------------------------------------------------------------------- |
| 26 | // Heap structures: |
| 27 | // |
| 28 | // A JS heap consists of a young generation, an old generation, and a large |
| 29 | // object space. The young generation is divided into two semispaces. A |
| 30 | // scavenger implements Cheney's copying algorithm. The old generation is |
| 31 | // separated into a map space and an old object space. The map space contains |
| 32 | // all (and only) map objects, the rest of old objects go into the old space. |
| 33 | // The old generation is collected by a mark-sweep-compact collector. |
| 34 | // |
| 35 | // The semispaces of the young generation are contiguous. The old and map |
Ben Murdoch | 7f4d5bd | 2010-06-15 11:15:29 +0100 | [diff] [blame] | 36 | // spaces consists of a list of pages. A page has a page header and an object |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 37 | // area. |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 38 | // |
| 39 | // There is a separate large object space for objects larger than |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 40 | // Page::kMaxRegularHeapObjectSize, so that they do not have to move during |
Ben Murdoch | 7f4d5bd | 2010-06-15 11:15:29 +0100 | [diff] [blame] | 41 | // collection. The large object space is paged. Pages in large object space |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 42 | // may be larger than the page size. |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 43 | // |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 44 | // A store-buffer based write barrier is used to keep track of intergenerational |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 45 | // references. See heap/store-buffer.h. |
Ben Murdoch | 7f4d5bd | 2010-06-15 11:15:29 +0100 | [diff] [blame] | 46 | // |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 47 | // During scavenges and mark-sweep collections we sometimes (after a store |
| 48 | // buffer overflow) iterate intergenerational pointers without decoding heap |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 49 | // object maps so if the page belongs to old space or large object space |
| 50 | // it is essential to guarantee that the page does not contain any |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 51 | // garbage pointers to new space: every pointer aligned word which satisfies |
| 52 | // the Heap::InNewSpace() predicate must be a pointer to a live heap object in |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 53 | // new space. Thus objects in old space and large object spaces should have a |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 54 | // special layout (e.g. no bare integer fields). This requirement does not |
| 55 | // apply to map space which is iterated in a special fashion. However we still |
| 56 | // require pointer fields of dead maps to be cleaned. |
Ben Murdoch | 7f4d5bd | 2010-06-15 11:15:29 +0100 | [diff] [blame] | 57 | // |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 58 | // To enable lazy cleaning of old space pages we can mark chunks of the page |
| 59 | // as being garbage. Garbage sections are marked with a special map. These |
| 60 | // sections are skipped when scanning the page, even if we are otherwise |
| 61 | // scanning without regard for object boundaries. Garbage sections are chained |
| 62 | // together to form a free list after a GC. Garbage sections created outside |
| 63 | // of GCs by object trunctation etc. may not be in the free list chain. Very |
| 64 | // small free spaces are ignored, they need only be cleaned of bogus pointers |
| 65 | // into new space. |
Ben Murdoch | 7f4d5bd | 2010-06-15 11:15:29 +0100 | [diff] [blame] | 66 | // |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 67 | // Each page may have up to one special garbage section. The start of this |
| 68 | // section is denoted by the top field in the space. The end of the section |
| 69 | // is denoted by the limit field in the space. This special garbage section |
| 70 | // is not marked with a free space map in the data. The point of this section |
| 71 | // is to enable linear allocation without having to constantly update the byte |
| 72 | // array every time the top field is updated and a new object is created. The |
| 73 | // special garbage section is not in the chain of garbage sections. |
| 74 | // |
| 75 | // Since the top and limit fields are in the space, not the page, only one page |
| 76 | // has a special garbage section, and if the top and limit are equal then there |
| 77 | // is no special garbage section. |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 78 | |
| 79 | // Some assertion macros used in the debugging mode. |
| 80 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 81 | #define DCHECK_PAGE_ALIGNED(address) \ |
| 82 | DCHECK((OffsetFrom(address) & Page::kPageAlignmentMask) == 0) |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 83 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 84 | #define DCHECK_OBJECT_ALIGNED(address) \ |
| 85 | DCHECK((OffsetFrom(address) & kObjectAlignmentMask) == 0) |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 86 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 87 | #define DCHECK_OBJECT_SIZE(size) \ |
| 88 | DCHECK((0 < size) && (size <= Page::kMaxRegularHeapObjectSize)) |
Leon Clarke | e46be81 | 2010-01-19 14:06:41 +0000 | [diff] [blame] | 89 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 90 | #define DCHECK_CODEOBJECT_SIZE(size, code_space) \ |
| 91 | DCHECK((0 < size) && (size <= code_space->AreaSize())) |
| 92 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 93 | #define DCHECK_PAGE_OFFSET(offset) \ |
| 94 | DCHECK((Page::kObjectStartOffset <= offset) && (offset <= Page::kPageSize)) |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 95 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 96 | #define DCHECK_MAP_PAGE_INDEX(index) \ |
| 97 | DCHECK((0 <= index) && (index <= MapSpace::kMaxMapPageIndex)) |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 98 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 99 | class AllocationInfo; |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 100 | class CompactionSpace; |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 101 | class FreeList; |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 102 | class MemoryAllocator; |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 103 | class MemoryChunk; |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 104 | class PagedSpace; |
| 105 | class Space; |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 106 | |
| 107 | class MarkBit { |
| 108 | public: |
| 109 | typedef uint32_t CellType; |
| 110 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 111 | inline MarkBit(CellType* cell, CellType mask) : cell_(cell), mask_(mask) {} |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 112 | |
| 113 | #ifdef DEBUG |
| 114 | bool operator==(const MarkBit& other) { |
| 115 | return cell_ == other.cell_ && mask_ == other.mask_; |
| 116 | } |
| 117 | #endif |
| 118 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 119 | private: |
| 120 | inline CellType* cell() { return cell_; } |
| 121 | inline CellType mask() { return mask_; } |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 122 | |
| 123 | inline MarkBit Next() { |
| 124 | CellType new_mask = mask_ << 1; |
| 125 | if (new_mask == 0) { |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 126 | return MarkBit(cell_ + 1, 1); |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 127 | } else { |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 128 | return MarkBit(cell_, new_mask); |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 129 | } |
| 130 | } |
| 131 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 132 | inline void Set() { *cell_ |= mask_; } |
| 133 | inline bool Get() { return (*cell_ & mask_) != 0; } |
| 134 | inline void Clear() { *cell_ &= ~mask_; } |
| 135 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 136 | CellType* cell_; |
| 137 | CellType mask_; |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 138 | |
| 139 | friend class Marking; |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 140 | }; |
| 141 | |
| 142 | |
| 143 | // Bitmap is a sequence of cells each containing fixed number of bits. |
| 144 | class Bitmap { |
| 145 | public: |
| 146 | static const uint32_t kBitsPerCell = 32; |
| 147 | static const uint32_t kBitsPerCellLog2 = 5; |
| 148 | static const uint32_t kBitIndexMask = kBitsPerCell - 1; |
| 149 | static const uint32_t kBytesPerCell = kBitsPerCell / kBitsPerByte; |
| 150 | static const uint32_t kBytesPerCellLog2 = kBitsPerCellLog2 - kBitsPerByteLog2; |
| 151 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 152 | static const size_t kLength = (1 << kPageSizeBits) >> (kPointerSizeLog2); |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 153 | |
| 154 | static const size_t kSize = |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 155 | (1 << kPageSizeBits) >> (kPointerSizeLog2 + kBitsPerByteLog2); |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 156 | |
| 157 | |
| 158 | static int CellsForLength(int length) { |
| 159 | return (length + kBitsPerCell - 1) >> kBitsPerCellLog2; |
| 160 | } |
| 161 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 162 | int CellsCount() { return CellsForLength(kLength); } |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 163 | |
| 164 | static int SizeFor(int cells_count) { |
| 165 | return sizeof(MarkBit::CellType) * cells_count; |
| 166 | } |
| 167 | |
| 168 | INLINE(static uint32_t IndexToCell(uint32_t index)) { |
| 169 | return index >> kBitsPerCellLog2; |
| 170 | } |
| 171 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 172 | V8_INLINE static uint32_t IndexInCell(uint32_t index) { |
| 173 | return index & kBitIndexMask; |
| 174 | } |
| 175 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 176 | INLINE(static uint32_t CellToIndex(uint32_t index)) { |
| 177 | return index << kBitsPerCellLog2; |
| 178 | } |
| 179 | |
| 180 | INLINE(static uint32_t CellAlignIndex(uint32_t index)) { |
| 181 | return (index + kBitIndexMask) & ~kBitIndexMask; |
| 182 | } |
| 183 | |
| 184 | INLINE(MarkBit::CellType* cells()) { |
| 185 | return reinterpret_cast<MarkBit::CellType*>(this); |
| 186 | } |
| 187 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 188 | INLINE(Address address()) { return reinterpret_cast<Address>(this); } |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 189 | |
| 190 | INLINE(static Bitmap* FromAddress(Address addr)) { |
| 191 | return reinterpret_cast<Bitmap*>(addr); |
| 192 | } |
| 193 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 194 | inline MarkBit MarkBitFromIndex(uint32_t index) { |
| 195 | MarkBit::CellType mask = 1u << IndexInCell(index); |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 196 | MarkBit::CellType* cell = this->cells() + (index >> kBitsPerCellLog2); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 197 | return MarkBit(cell, mask); |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 198 | } |
| 199 | |
| 200 | static inline void Clear(MemoryChunk* chunk); |
| 201 | |
| 202 | static void PrintWord(uint32_t word, uint32_t himask = 0) { |
| 203 | for (uint32_t mask = 1; mask != 0; mask <<= 1) { |
| 204 | if ((mask & himask) != 0) PrintF("["); |
| 205 | PrintF((mask & word) ? "1" : "0"); |
| 206 | if ((mask & himask) != 0) PrintF("]"); |
| 207 | } |
| 208 | } |
| 209 | |
| 210 | class CellPrinter { |
| 211 | public: |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 212 | CellPrinter() : seq_start(0), seq_type(0), seq_length(0) {} |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 213 | |
| 214 | void Print(uint32_t pos, uint32_t cell) { |
| 215 | if (cell == seq_type) { |
| 216 | seq_length++; |
| 217 | return; |
| 218 | } |
| 219 | |
| 220 | Flush(); |
| 221 | |
| 222 | if (IsSeq(cell)) { |
| 223 | seq_start = pos; |
| 224 | seq_length = 0; |
| 225 | seq_type = cell; |
| 226 | return; |
| 227 | } |
| 228 | |
| 229 | PrintF("%d: ", pos); |
| 230 | PrintWord(cell); |
| 231 | PrintF("\n"); |
| 232 | } |
| 233 | |
| 234 | void Flush() { |
| 235 | if (seq_length > 0) { |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 236 | PrintF("%d: %dx%d\n", seq_start, seq_type == 0 ? 0 : 1, |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 237 | seq_length * kBitsPerCell); |
| 238 | seq_length = 0; |
| 239 | } |
| 240 | } |
| 241 | |
| 242 | static bool IsSeq(uint32_t cell) { return cell == 0 || cell == 0xFFFFFFFF; } |
| 243 | |
| 244 | private: |
| 245 | uint32_t seq_start; |
| 246 | uint32_t seq_type; |
| 247 | uint32_t seq_length; |
| 248 | }; |
| 249 | |
| 250 | void Print() { |
| 251 | CellPrinter printer; |
| 252 | for (int i = 0; i < CellsCount(); i++) { |
| 253 | printer.Print(i, cells()[i]); |
| 254 | } |
| 255 | printer.Flush(); |
| 256 | PrintF("\n"); |
| 257 | } |
| 258 | |
| 259 | bool IsClean() { |
| 260 | for (int i = 0; i < CellsCount(); i++) { |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 261 | if (cells()[i] != 0) { |
| 262 | return false; |
| 263 | } |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 264 | } |
| 265 | return true; |
| 266 | } |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 267 | |
| 268 | // Clears all bits starting from {cell_base_index} up to and excluding |
| 269 | // {index}. Note that {cell_base_index} is required to be cell aligned. |
| 270 | void ClearRange(uint32_t cell_base_index, uint32_t index) { |
| 271 | DCHECK_EQ(IndexInCell(cell_base_index), 0u); |
| 272 | DCHECK_GE(index, cell_base_index); |
| 273 | uint32_t start_cell_index = IndexToCell(cell_base_index); |
| 274 | uint32_t end_cell_index = IndexToCell(index); |
| 275 | DCHECK_GE(end_cell_index, start_cell_index); |
| 276 | // Clear all cells till the cell containing the last index. |
| 277 | for (uint32_t i = start_cell_index; i < end_cell_index; i++) { |
| 278 | cells()[i] = 0; |
| 279 | } |
| 280 | // Clear all bits in the last cell till the last bit before index. |
| 281 | uint32_t clear_mask = ~((1u << IndexInCell(index)) - 1); |
| 282 | cells()[end_cell_index] &= clear_mask; |
| 283 | } |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 284 | }; |
| 285 | |
| 286 | |
| 287 | class SkipList; |
| 288 | class SlotsBuffer; |
| 289 | |
| 290 | // MemoryChunk represents a memory region owned by a specific space. |
| 291 | // It is divided into the header and the body. Chunk start is always |
| 292 | // 1MB aligned. Start of the body is aligned so it can accommodate |
| 293 | // any heap object. |
| 294 | class MemoryChunk { |
| 295 | public: |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 296 | enum MemoryChunkFlags { |
| 297 | IS_EXECUTABLE, |
| 298 | ABOUT_TO_BE_FREED, |
| 299 | POINTERS_TO_HERE_ARE_INTERESTING, |
| 300 | POINTERS_FROM_HERE_ARE_INTERESTING, |
| 301 | SCAN_ON_SCAVENGE, |
| 302 | IN_FROM_SPACE, // Mutually exclusive with IN_TO_SPACE. |
| 303 | IN_TO_SPACE, // All pages in new space has one of these two set. |
| 304 | NEW_SPACE_BELOW_AGE_MARK, |
| 305 | EVACUATION_CANDIDATE, |
| 306 | RESCAN_ON_EVACUATION, |
| 307 | NEVER_EVACUATE, // May contain immortal immutables. |
| 308 | POPULAR_PAGE, // Slots buffer of this page overflowed on the previous GC. |
| 309 | |
| 310 | // WAS_SWEPT indicates that marking bits have been cleared by the sweeper, |
| 311 | // otherwise marking bits are still intact. |
| 312 | WAS_SWEPT, |
| 313 | |
| 314 | // Large objects can have a progress bar in their page header. These object |
| 315 | // are scanned in increments and will be kept black while being scanned. |
| 316 | // Even if the mutator writes to them they will be kept black and a white |
| 317 | // to grey transition is performed in the value. |
| 318 | HAS_PROGRESS_BAR, |
| 319 | |
| 320 | // This flag is intended to be used for testing. Works only when both |
| 321 | // FLAG_stress_compaction and FLAG_manual_evacuation_candidates_selection |
| 322 | // are set. It forces the page to become an evacuation candidate at next |
| 323 | // candidates selection cycle. |
| 324 | FORCE_EVACUATION_CANDIDATE_FOR_TESTING, |
| 325 | |
| 326 | // This flag is inteded to be used for testing. |
| 327 | NEVER_ALLOCATE_ON_PAGE, |
| 328 | |
| 329 | // The memory chunk is already logically freed, however the actual freeing |
| 330 | // still has to be performed. |
| 331 | PRE_FREED, |
| 332 | |
| 333 | // |COMPACTION_WAS_ABORTED|: Indicates that the compaction in this page |
| 334 | // has been aborted and needs special handling by the sweeper. |
| 335 | COMPACTION_WAS_ABORTED, |
| 336 | |
| 337 | // Last flag, keep at bottom. |
| 338 | NUM_MEMORY_CHUNK_FLAGS |
| 339 | }; |
| 340 | |
| 341 | // |kCompactionDone|: Initial compaction state of a |MemoryChunk|. |
| 342 | // |kCompactingInProgress|: Parallel compaction is currently in progress. |
| 343 | // |kCompactingFinalize|: Parallel compaction is done but the chunk needs to |
| 344 | // be finalized. |
| 345 | // |kCompactingAborted|: Parallel compaction has been aborted, which should |
| 346 | // for now only happen in OOM scenarios. |
| 347 | enum ParallelCompactingState { |
| 348 | kCompactingDone, |
| 349 | kCompactingInProgress, |
| 350 | kCompactingFinalize, |
| 351 | kCompactingAborted, |
| 352 | }; |
| 353 | |
| 354 | // |kSweepingDone|: The page state when sweeping is complete or sweeping must |
| 355 | // not be performed on that page. |
| 356 | // |kSweepingFinalize|: A sweeper thread is done sweeping this page and will |
| 357 | // not touch the page memory anymore. |
| 358 | // |kSweepingInProgress|: This page is currently swept by a sweeper thread. |
| 359 | // |kSweepingPending|: This page is ready for parallel sweeping. |
| 360 | enum ParallelSweepingState { |
| 361 | kSweepingDone, |
| 362 | kSweepingFinalize, |
| 363 | kSweepingInProgress, |
| 364 | kSweepingPending |
| 365 | }; |
| 366 | |
| 367 | // Every n write barrier invocations we go to runtime even though |
| 368 | // we could have handled it in generated code. This lets us check |
| 369 | // whether we have hit the limit and should do some more marking. |
| 370 | static const int kWriteBarrierCounterGranularity = 500; |
| 371 | |
| 372 | static const int kPointersToHereAreInterestingMask = |
| 373 | 1 << POINTERS_TO_HERE_ARE_INTERESTING; |
| 374 | |
| 375 | static const int kPointersFromHereAreInterestingMask = |
| 376 | 1 << POINTERS_FROM_HERE_ARE_INTERESTING; |
| 377 | |
| 378 | static const int kEvacuationCandidateMask = 1 << EVACUATION_CANDIDATE; |
| 379 | |
| 380 | static const int kSkipEvacuationSlotsRecordingMask = |
| 381 | (1 << EVACUATION_CANDIDATE) | (1 << RESCAN_ON_EVACUATION) | |
| 382 | (1 << IN_FROM_SPACE) | (1 << IN_TO_SPACE); |
| 383 | |
| 384 | static const intptr_t kAlignment = |
| 385 | (static_cast<uintptr_t>(1) << kPageSizeBits); |
| 386 | |
| 387 | static const intptr_t kAlignmentMask = kAlignment - 1; |
| 388 | |
| 389 | static const intptr_t kSizeOffset = 0; |
| 390 | |
| 391 | static const intptr_t kLiveBytesOffset = |
| 392 | kSizeOffset + kPointerSize // size_t size |
| 393 | + kIntptrSize // intptr_t flags_ |
| 394 | + kPointerSize // Address area_start_ |
| 395 | + kPointerSize // Address area_end_ |
| 396 | + 2 * kPointerSize // base::VirtualMemory reservation_ |
| 397 | + kPointerSize // Address owner_ |
| 398 | + kPointerSize // Heap* heap_ |
| 399 | + kIntSize; // int store_buffer_counter_ |
| 400 | |
| 401 | static const size_t kSlotsBufferOffset = |
| 402 | kLiveBytesOffset + kIntSize; // int live_byte_count_ |
| 403 | |
| 404 | static const size_t kWriteBarrierCounterOffset = |
| 405 | kSlotsBufferOffset + kPointerSize // SlotsBuffer* slots_buffer_; |
| 406 | + kPointerSize; // SkipList* skip_list_; |
| 407 | |
| 408 | static const size_t kMinHeaderSize = |
| 409 | kWriteBarrierCounterOffset + |
| 410 | kIntptrSize // intptr_t write_barrier_counter_ |
| 411 | + kIntSize // int progress_bar_ |
| 412 | + kPointerSize // AtomicValue high_water_mark_ |
| 413 | + kPointerSize // base::Mutex* mutex_ |
| 414 | + kPointerSize // base::AtomicWord parallel_sweeping_ |
| 415 | + kPointerSize // AtomicValue parallel_compaction_ |
| 416 | + 5 * kPointerSize // AtomicNumber free-list statistics |
| 417 | + kPointerSize // AtomicValue next_chunk_ |
| 418 | + kPointerSize; // AtomicValue prev_chunk_ |
| 419 | |
| 420 | // We add some more space to the computed header size to amount for missing |
| 421 | // alignment requirements in our computation. |
| 422 | // Try to get kHeaderSize properly aligned on 32-bit and 64-bit machines. |
| 423 | static const size_t kHeaderSize = kMinHeaderSize + kIntSize; |
| 424 | |
| 425 | static const int kBodyOffset = |
| 426 | CODE_POINTER_ALIGN(kHeaderSize + Bitmap::kSize); |
| 427 | |
| 428 | // The start offset of the object area in a page. Aligned to both maps and |
| 429 | // code alignment to be suitable for both. Also aligned to 32 words because |
| 430 | // the marking bitmap is arranged in 32 bit chunks. |
| 431 | static const int kObjectStartAlignment = 32 * kPointerSize; |
| 432 | static const int kObjectStartOffset = |
| 433 | kBodyOffset - 1 + |
| 434 | (kObjectStartAlignment - (kBodyOffset - 1) % kObjectStartAlignment); |
| 435 | |
| 436 | static const int kFlagsOffset = kPointerSize; |
| 437 | |
| 438 | static void IncrementLiveBytesFromMutator(HeapObject* object, int by); |
| 439 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 440 | // Only works if the pointer is in the first kPageSize of the MemoryChunk. |
| 441 | static MemoryChunk* FromAddress(Address a) { |
| 442 | return reinterpret_cast<MemoryChunk*>(OffsetFrom(a) & ~kAlignmentMask); |
| 443 | } |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 444 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 445 | static const MemoryChunk* FromAddress(const byte* a) { |
| 446 | return reinterpret_cast<const MemoryChunk*>(OffsetFrom(a) & |
| 447 | ~kAlignmentMask); |
| 448 | } |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 449 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 450 | static void IncrementLiveBytesFromGC(HeapObject* object, int by) { |
| 451 | MemoryChunk::FromAddress(object->address())->IncrementLiveBytes(by); |
| 452 | } |
| 453 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 454 | // Only works for addresses in pointer spaces, not data or code spaces. |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 455 | static inline MemoryChunk* FromAnyPointerAddress(Heap* heap, Address addr); |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 456 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 457 | static inline uint32_t FastAddressToMarkbitIndex(Address addr) { |
| 458 | const intptr_t offset = reinterpret_cast<intptr_t>(addr) & kAlignmentMask; |
| 459 | return static_cast<uint32_t>(offset) >> kPointerSizeLog2; |
| 460 | } |
| 461 | |
| 462 | static inline void UpdateHighWaterMark(Address mark) { |
| 463 | if (mark == nullptr) return; |
| 464 | // Need to subtract one from the mark because when a chunk is full the |
| 465 | // top points to the next address after the chunk, which effectively belongs |
| 466 | // to another chunk. See the comment to Page::FromAllocationTop. |
| 467 | MemoryChunk* chunk = MemoryChunk::FromAddress(mark - 1); |
| 468 | intptr_t new_mark = static_cast<intptr_t>(mark - chunk->address()); |
| 469 | intptr_t old_mark = 0; |
| 470 | do { |
| 471 | old_mark = chunk->high_water_mark_.Value(); |
| 472 | } while ((new_mark > old_mark) && |
| 473 | !chunk->high_water_mark_.TrySetValue(old_mark, new_mark)); |
| 474 | } |
| 475 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 476 | Address address() { return reinterpret_cast<Address>(this); } |
| 477 | |
| 478 | bool is_valid() { return address() != NULL; } |
| 479 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 480 | MemoryChunk* next_chunk() { return next_chunk_.Value(); } |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 481 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 482 | MemoryChunk* prev_chunk() { return prev_chunk_.Value(); } |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 483 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 484 | void set_next_chunk(MemoryChunk* next) { next_chunk_.SetValue(next); } |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 485 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 486 | void set_prev_chunk(MemoryChunk* prev) { prev_chunk_.SetValue(prev); } |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 487 | |
| 488 | Space* owner() const { |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 489 | if ((reinterpret_cast<intptr_t>(owner_) & kPageHeaderTagMask) == |
| 490 | kPageHeaderTag) { |
| 491 | return reinterpret_cast<Space*>(reinterpret_cast<intptr_t>(owner_) - |
| 492 | kPageHeaderTag); |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 493 | } else { |
| 494 | return NULL; |
| 495 | } |
| 496 | } |
| 497 | |
| 498 | void set_owner(Space* space) { |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 499 | DCHECK((reinterpret_cast<intptr_t>(space) & kPageHeaderTagMask) == 0); |
| 500 | owner_ = reinterpret_cast<Address>(space) + kPageHeaderTag; |
| 501 | DCHECK((reinterpret_cast<intptr_t>(owner_) & kPageHeaderTagMask) == |
| 502 | kPageHeaderTag); |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 503 | } |
| 504 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 505 | base::VirtualMemory* reserved_memory() { return &reservation_; } |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 506 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 507 | void set_reserved_memory(base::VirtualMemory* reservation) { |
| 508 | DCHECK_NOT_NULL(reservation); |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 509 | reservation_.TakeControl(reservation); |
| 510 | } |
| 511 | |
| 512 | bool scan_on_scavenge() { return IsFlagSet(SCAN_ON_SCAVENGE); } |
| 513 | void initialize_scan_on_scavenge(bool scan) { |
| 514 | if (scan) { |
| 515 | SetFlag(SCAN_ON_SCAVENGE); |
| 516 | } else { |
| 517 | ClearFlag(SCAN_ON_SCAVENGE); |
| 518 | } |
| 519 | } |
| 520 | inline void set_scan_on_scavenge(bool scan); |
| 521 | |
| 522 | int store_buffer_counter() { return store_buffer_counter_; } |
| 523 | void set_store_buffer_counter(int counter) { |
| 524 | store_buffer_counter_ = counter; |
| 525 | } |
| 526 | |
| 527 | bool Contains(Address addr) { |
| 528 | return addr >= area_start() && addr < area_end(); |
| 529 | } |
| 530 | |
| 531 | // Checks whether addr can be a limit of addresses in this page. |
| 532 | // It's a limit if it's in the page, or if it's just after the |
| 533 | // last byte of the page. |
| 534 | bool ContainsLimit(Address addr) { |
| 535 | return addr >= area_start() && addr <= area_end(); |
| 536 | } |
| 537 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 538 | void SetFlag(int flag) { flags_ |= static_cast<uintptr_t>(1) << flag; } |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 539 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 540 | void ClearFlag(int flag) { flags_ &= ~(static_cast<uintptr_t>(1) << flag); } |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 541 | |
| 542 | void SetFlagTo(int flag, bool value) { |
| 543 | if (value) { |
| 544 | SetFlag(flag); |
| 545 | } else { |
| 546 | ClearFlag(flag); |
| 547 | } |
| 548 | } |
| 549 | |
| 550 | bool IsFlagSet(int flag) { |
| 551 | return (flags_ & (static_cast<uintptr_t>(1) << flag)) != 0; |
| 552 | } |
| 553 | |
| 554 | // Set or clear multiple flags at a time. The flags in the mask |
| 555 | // are set to the value in "flags", the rest retain the current value |
| 556 | // in flags_. |
| 557 | void SetFlags(intptr_t flags, intptr_t mask) { |
| 558 | flags_ = (flags_ & ~mask) | (flags & mask); |
| 559 | } |
| 560 | |
| 561 | // Return all current flags. |
| 562 | intptr_t GetFlags() { return flags_; } |
| 563 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 564 | AtomicValue<ParallelSweepingState>& parallel_sweeping_state() { |
| 565 | return parallel_sweeping_; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 566 | } |
| 567 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 568 | AtomicValue<ParallelCompactingState>& parallel_compaction_state() { |
| 569 | return parallel_compaction_; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 570 | } |
| 571 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 572 | bool TryLock() { return mutex_->TryLock(); } |
| 573 | |
| 574 | base::Mutex* mutex() { return mutex_; } |
| 575 | |
| 576 | // WaitUntilSweepingCompleted only works when concurrent sweeping is in |
| 577 | // progress. In particular, when we know that right before this call a |
| 578 | // sweeper thread was sweeping this page. |
| 579 | void WaitUntilSweepingCompleted() { |
| 580 | mutex_->Lock(); |
| 581 | mutex_->Unlock(); |
| 582 | DCHECK(SweepingCompleted()); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 583 | } |
| 584 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 585 | bool SweepingCompleted() { |
| 586 | return parallel_sweeping_state().Value() <= kSweepingFinalize; |
| 587 | } |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 588 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 589 | // Manage live byte count (count of bytes known to be live, |
| 590 | // because they are marked black). |
| 591 | void ResetLiveBytes() { |
| 592 | if (FLAG_gc_verbose) { |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 593 | PrintF("ResetLiveBytes:%p:%x->0\n", static_cast<void*>(this), |
| 594 | live_byte_count_); |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 595 | } |
| 596 | live_byte_count_ = 0; |
| 597 | } |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 598 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 599 | void IncrementLiveBytes(int by) { |
| 600 | if (FLAG_gc_verbose) { |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 601 | printf("UpdateLiveBytes:%p:%x%c=%x->%x\n", static_cast<void*>(this), |
| 602 | live_byte_count_, ((by < 0) ? '-' : '+'), ((by < 0) ? -by : by), |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 603 | live_byte_count_ + by); |
| 604 | } |
| 605 | live_byte_count_ += by; |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 606 | DCHECK_GE(live_byte_count_, 0); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 607 | DCHECK_LE(static_cast<unsigned>(live_byte_count_), size_); |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 608 | } |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 609 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 610 | int LiveBytes() { |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 611 | DCHECK_LE(static_cast<unsigned>(live_byte_count_), size_); |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 612 | return live_byte_count_; |
| 613 | } |
| 614 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 615 | void SetLiveBytes(int live_bytes) { |
| 616 | DCHECK_GE(live_bytes, 0); |
| 617 | DCHECK_LE(static_cast<unsigned>(live_bytes), size_); |
| 618 | live_byte_count_ = live_bytes; |
| 619 | } |
| 620 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 621 | int write_barrier_counter() { |
| 622 | return static_cast<int>(write_barrier_counter_); |
| 623 | } |
| 624 | |
| 625 | void set_write_barrier_counter(int counter) { |
| 626 | write_barrier_counter_ = counter; |
| 627 | } |
| 628 | |
| 629 | int progress_bar() { |
| 630 | DCHECK(IsFlagSet(HAS_PROGRESS_BAR)); |
| 631 | return progress_bar_; |
| 632 | } |
| 633 | |
| 634 | void set_progress_bar(int progress_bar) { |
| 635 | DCHECK(IsFlagSet(HAS_PROGRESS_BAR)); |
| 636 | progress_bar_ = progress_bar; |
| 637 | } |
| 638 | |
| 639 | void ResetProgressBar() { |
| 640 | if (IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) { |
| 641 | set_progress_bar(0); |
| 642 | ClearFlag(MemoryChunk::HAS_PROGRESS_BAR); |
| 643 | } |
| 644 | } |
| 645 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 646 | size_t size() const { return size_; } |
| 647 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 648 | void set_size(size_t size) { size_ = size; } |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 649 | |
| 650 | void SetArea(Address area_start, Address area_end) { |
| 651 | area_start_ = area_start; |
| 652 | area_end_ = area_end; |
| 653 | } |
| 654 | |
| 655 | Executability executable() { |
| 656 | return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE; |
| 657 | } |
| 658 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 659 | bool InNewSpace() { |
| 660 | return (flags_ & ((1 << IN_FROM_SPACE) | (1 << IN_TO_SPACE))) != 0; |
| 661 | } |
| 662 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 663 | bool InToSpace() { return IsFlagSet(IN_TO_SPACE); } |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 664 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 665 | bool InFromSpace() { return IsFlagSet(IN_FROM_SPACE); } |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 666 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 667 | // Markbits support |
| 668 | |
| 669 | inline Bitmap* markbits() { |
| 670 | return Bitmap::FromAddress(address() + kHeaderSize); |
| 671 | } |
| 672 | |
| 673 | void PrintMarkbits() { markbits()->Print(); } |
| 674 | |
| 675 | inline uint32_t AddressToMarkbitIndex(Address addr) { |
| 676 | return static_cast<uint32_t>(addr - this->address()) >> kPointerSizeLog2; |
| 677 | } |
| 678 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 679 | inline Address MarkbitIndexToAddress(uint32_t index) { |
| 680 | return this->address() + (index << kPointerSizeLog2); |
| 681 | } |
| 682 | |
| 683 | void InsertAfter(MemoryChunk* other); |
| 684 | void Unlink(); |
| 685 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 686 | inline Heap* heap() const { return heap_; } |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 687 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 688 | bool NeverEvacuate() { return IsFlagSet(NEVER_EVACUATE); } |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 689 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 690 | void MarkNeverEvacuate() { SetFlag(NEVER_EVACUATE); } |
| 691 | |
| 692 | bool IsEvacuationCandidate() { |
| 693 | DCHECK(!(IsFlagSet(NEVER_EVACUATE) && IsFlagSet(EVACUATION_CANDIDATE))); |
| 694 | return IsFlagSet(EVACUATION_CANDIDATE); |
| 695 | } |
| 696 | |
| 697 | bool CanAllocate() { |
| 698 | return !IsEvacuationCandidate() && !IsFlagSet(NEVER_ALLOCATE_ON_PAGE); |
| 699 | } |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 700 | |
| 701 | bool ShouldSkipEvacuationSlotRecording() { |
| 702 | return (flags_ & kSkipEvacuationSlotsRecordingMask) != 0; |
| 703 | } |
| 704 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 705 | inline SkipList* skip_list() { return skip_list_; } |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 706 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 707 | inline void set_skip_list(SkipList* skip_list) { skip_list_ = skip_list; } |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 708 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 709 | inline SlotsBuffer* slots_buffer() { return slots_buffer_; } |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 710 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 711 | inline SlotsBuffer** slots_buffer_address() { return &slots_buffer_; } |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 712 | |
| 713 | void MarkEvacuationCandidate() { |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 714 | DCHECK(!IsFlagSet(NEVER_EVACUATE)); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 715 | DCHECK(slots_buffer_ == NULL); |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 716 | SetFlag(EVACUATION_CANDIDATE); |
| 717 | } |
| 718 | |
| 719 | void ClearEvacuationCandidate() { |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 720 | DCHECK(slots_buffer_ == NULL); |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 721 | ClearFlag(EVACUATION_CANDIDATE); |
| 722 | } |
| 723 | |
| 724 | Address area_start() { return area_start_; } |
| 725 | Address area_end() { return area_end_; } |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 726 | int area_size() { return static_cast<int>(area_end() - area_start()); } |
| 727 | bool CommitArea(size_t requested); |
| 728 | |
| 729 | // Approximate amount of physical memory committed for this chunk. |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 730 | size_t CommittedPhysicalMemory() { return high_water_mark_.Value(); } |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 731 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 732 | // Should be called when memory chunk is about to be freed. |
| 733 | void ReleaseAllocatedMemory(); |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 734 | |
| 735 | protected: |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 736 | static MemoryChunk* Initialize(Heap* heap, Address base, size_t size, |
| 737 | Address area_start, Address area_end, |
| 738 | Executability executable, Space* owner); |
| 739 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 740 | size_t size_; |
| 741 | intptr_t flags_; |
| 742 | |
| 743 | // Start and end of allocatable memory on this chunk. |
| 744 | Address area_start_; |
| 745 | Address area_end_; |
| 746 | |
| 747 | // If the chunk needs to remember its memory reservation, it is stored here. |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 748 | base::VirtualMemory reservation_; |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 749 | // The identity of the owning space. This is tagged as a failure pointer, but |
| 750 | // no failure can be in an object, so this can be distinguished from any entry |
| 751 | // in a fixed array. |
| 752 | Address owner_; |
| 753 | Heap* heap_; |
| 754 | // Used by the store buffer to keep track of which pages to mark scan-on- |
| 755 | // scavenge. |
| 756 | int store_buffer_counter_; |
| 757 | // Count of bytes marked black on page. |
| 758 | int live_byte_count_; |
| 759 | SlotsBuffer* slots_buffer_; |
| 760 | SkipList* skip_list_; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 761 | intptr_t write_barrier_counter_; |
| 762 | // Used by the incremental marker to keep track of the scanning progress in |
| 763 | // large objects that have a progress bar and are scanned in increments. |
| 764 | int progress_bar_; |
| 765 | // Assuming the initial allocation on a page is sequential, |
| 766 | // count highest number of bytes ever allocated on the page. |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 767 | AtomicValue<intptr_t> high_water_mark_; |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 768 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 769 | base::Mutex* mutex_; |
| 770 | AtomicValue<ParallelSweepingState> parallel_sweeping_; |
| 771 | AtomicValue<ParallelCompactingState> parallel_compaction_; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 772 | |
| 773 | // PagedSpace free-list statistics. |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 774 | AtomicNumber<intptr_t> available_in_small_free_list_; |
| 775 | AtomicNumber<intptr_t> available_in_medium_free_list_; |
| 776 | AtomicNumber<intptr_t> available_in_large_free_list_; |
| 777 | AtomicNumber<intptr_t> available_in_huge_free_list_; |
| 778 | AtomicNumber<intptr_t> non_available_small_blocks_; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 779 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 780 | // next_chunk_ holds a pointer of type MemoryChunk |
| 781 | AtomicValue<MemoryChunk*> next_chunk_; |
| 782 | // prev_chunk_ holds a pointer of type MemoryChunk |
| 783 | AtomicValue<MemoryChunk*> prev_chunk_; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 784 | |
| 785 | private: |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 786 | void InitializeReservedMemory() { reservation_.Reset(); } |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 787 | |
| 788 | friend class MemoryAllocator; |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 789 | friend class MemoryChunkValidator; |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 790 | }; |
| 791 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 792 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 793 | enum FreeListCategoryType { kSmall, kMedium, kLarge, kHuge }; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 794 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 795 | |
| 796 | // ----------------------------------------------------------------------------- |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 797 | // A page is a memory chunk of a size 1MB. Large object pages may be larger. |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 798 | // |
| 799 | // The only way to get a page pointer is by calling factory methods: |
| 800 | // Page* p = Page::FromAddress(addr); or |
| 801 | // Page* p = Page::FromAllocationTop(top); |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 802 | class Page : public MemoryChunk { |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 803 | public: |
| 804 | // Returns the page containing a given address. The address ranges |
| 805 | // from [page_addr .. page_addr + kPageSize[ |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 806 | // This only works if the object is in fact in a page. See also MemoryChunk:: |
| 807 | // FromAddress() and FromAnyAddress(). |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 808 | INLINE(static Page* FromAddress(Address a)) { |
| 809 | return reinterpret_cast<Page*>(OffsetFrom(a) & ~kPageAlignmentMask); |
| 810 | } |
| 811 | |
| 812 | // Returns the page containing an allocation top. Because an allocation |
| 813 | // top address can be the upper bound of the page, we need to subtract |
| 814 | // it with kPointerSize first. The address ranges from |
| 815 | // [page_addr + kObjectStartOffset .. page_addr + kPageSize]. |
| 816 | INLINE(static Page* FromAllocationTop(Address top)) { |
| 817 | Page* p = FromAddress(top - kPointerSize); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 818 | return p; |
| 819 | } |
| 820 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 821 | // Returns the next page in the chain of pages owned by a space. |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 822 | inline Page* next_page() { |
| 823 | DCHECK(next_chunk()->owner() == owner()); |
| 824 | return static_cast<Page*>(next_chunk()); |
| 825 | } |
| 826 | inline Page* prev_page() { |
| 827 | DCHECK(prev_chunk()->owner() == owner()); |
| 828 | return static_cast<Page*>(prev_chunk()); |
| 829 | } |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 830 | inline void set_next_page(Page* page); |
| 831 | inline void set_prev_page(Page* page); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 832 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 833 | // Checks whether an address is page aligned. |
| 834 | static bool IsAlignedToPageSize(Address a) { |
| 835 | return 0 == (OffsetFrom(a) & kPageAlignmentMask); |
| 836 | } |
| 837 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 838 | // Returns the offset of a given address to this page. |
| 839 | INLINE(int Offset(Address a)) { |
Steve Block | d0582a6 | 2009-12-15 09:54:21 +0000 | [diff] [blame] | 840 | int offset = static_cast<int>(a - address()); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 841 | return offset; |
| 842 | } |
| 843 | |
| 844 | // Returns the address for a given offset to the this page. |
| 845 | Address OffsetToAddress(int offset) { |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 846 | DCHECK_PAGE_OFFSET(offset); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 847 | return address() + offset; |
| 848 | } |
| 849 | |
| 850 | // --------------------------------------------------------------------- |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 851 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 852 | // Page size in bytes. This must be a multiple of the OS page size. |
| 853 | static const int kPageSize = 1 << kPageSizeBits; |
| 854 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 855 | // Maximum object size that gets allocated into regular pages. Objects larger |
| 856 | // than that size are allocated in large object space and are never moved in |
| 857 | // memory. This also applies to new space allocation, since objects are never |
| 858 | // migrated from new space to large object space. Takes double alignment into |
| 859 | // account. |
| 860 | // TODO(hpayer): This limit should be way smaller but we currently have |
| 861 | // short living objects >256K. |
| 862 | static const int kMaxRegularHeapObjectSize = 600 * KB; |
| 863 | |
| 864 | static const int kAllocatableMemory = kPageSize - kObjectStartOffset; |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 865 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 866 | // Page size mask. |
| 867 | static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1; |
| 868 | |
Ben Murdoch | 7f4d5bd | 2010-06-15 11:15:29 +0100 | [diff] [blame] | 869 | inline void ClearGCFields(); |
| 870 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 871 | static inline Page* Initialize(Heap* heap, MemoryChunk* chunk, |
| 872 | Executability executable, PagedSpace* owner); |
Ben Murdoch | 7f4d5bd | 2010-06-15 11:15:29 +0100 | [diff] [blame] | 873 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 874 | void InitializeAsAnchor(PagedSpace* owner); |
Ben Murdoch | 7f4d5bd | 2010-06-15 11:15:29 +0100 | [diff] [blame] | 875 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 876 | bool WasSwept() { return IsFlagSet(WAS_SWEPT); } |
| 877 | void SetWasSwept() { SetFlag(WAS_SWEPT); } |
| 878 | void ClearWasSwept() { ClearFlag(WAS_SWEPT); } |
Ben Murdoch | 7f4d5bd | 2010-06-15 11:15:29 +0100 | [diff] [blame] | 879 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 880 | void ResetFreeListStatistics(); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 881 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 882 | int LiveBytesFromFreeList() { |
| 883 | return static_cast<int>( |
| 884 | area_size() - non_available_small_blocks() - |
| 885 | available_in_small_free_list() - available_in_medium_free_list() - |
| 886 | available_in_large_free_list() - available_in_huge_free_list()); |
| 887 | } |
| 888 | |
| 889 | #define FRAGMENTATION_STATS_ACCESSORS(type, name) \ |
| 890 | type name() { return name##_.Value(); } \ |
| 891 | void set_##name(type name) { name##_.SetValue(name); } \ |
| 892 | void add_##name(type name) { name##_.Increment(name); } |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 893 | |
| 894 | FRAGMENTATION_STATS_ACCESSORS(intptr_t, non_available_small_blocks) |
| 895 | FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_small_free_list) |
| 896 | FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_medium_free_list) |
| 897 | FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_large_free_list) |
| 898 | FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_huge_free_list) |
| 899 | |
| 900 | #undef FRAGMENTATION_STATS_ACCESSORS |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 901 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 902 | void add_available_in_free_list(FreeListCategoryType type, intptr_t bytes) { |
| 903 | switch (type) { |
| 904 | case kSmall: |
| 905 | add_available_in_small_free_list(bytes); |
| 906 | break; |
| 907 | case kMedium: |
| 908 | add_available_in_medium_free_list(bytes); |
| 909 | break; |
| 910 | case kLarge: |
| 911 | add_available_in_large_free_list(bytes); |
| 912 | break; |
| 913 | case kHuge: |
| 914 | add_available_in_huge_free_list(bytes); |
| 915 | break; |
| 916 | default: |
| 917 | UNREACHABLE(); |
| 918 | } |
| 919 | } |
| 920 | |
| 921 | intptr_t available_in_free_list(FreeListCategoryType type) { |
| 922 | switch (type) { |
| 923 | case kSmall: |
| 924 | return available_in_small_free_list(); |
| 925 | case kMedium: |
| 926 | return available_in_medium_free_list(); |
| 927 | case kLarge: |
| 928 | return available_in_large_free_list(); |
| 929 | case kHuge: |
| 930 | return available_in_huge_free_list(); |
| 931 | default: |
| 932 | UNREACHABLE(); |
| 933 | } |
| 934 | UNREACHABLE(); |
| 935 | return 0; |
| 936 | } |
| 937 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 938 | #ifdef DEBUG |
| 939 | void Print(); |
| 940 | #endif // DEBUG |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 941 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 942 | friend class MemoryAllocator; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 943 | }; |
| 944 | |
| 945 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 946 | class LargePage : public MemoryChunk { |
| 947 | public: |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 948 | HeapObject* GetObject() { return HeapObject::FromAddress(area_start()); } |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 949 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 950 | inline LargePage* next_page() { |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 951 | return static_cast<LargePage*>(next_chunk()); |
| 952 | } |
| 953 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 954 | inline void set_next_page(LargePage* page) { set_next_chunk(page); } |
| 955 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 956 | private: |
| 957 | static inline LargePage* Initialize(Heap* heap, MemoryChunk* chunk); |
| 958 | |
| 959 | friend class MemoryAllocator; |
| 960 | }; |
| 961 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 962 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 963 | // ---------------------------------------------------------------------------- |
| 964 | // Space is the abstract superclass for all allocation spaces. |
| 965 | class Space : public Malloced { |
| 966 | public: |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 967 | Space(Heap* heap, AllocationSpace id, Executability executable) |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 968 | : heap_(heap), |
| 969 | id_(id), |
| 970 | executable_(executable), |
| 971 | committed_(0), |
| 972 | max_committed_(0) {} |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 973 | |
| 974 | virtual ~Space() {} |
| 975 | |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 976 | Heap* heap() const { return heap_; } |
| 977 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 978 | // Does the space need executable memory? |
| 979 | Executability executable() { return executable_; } |
| 980 | |
| 981 | // Identity used in error reporting. |
| 982 | AllocationSpace identity() { return id_; } |
| 983 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 984 | // Return the total amount committed memory for this space, i.e., allocatable |
| 985 | // memory and page headers. |
| 986 | virtual intptr_t CommittedMemory() { return committed_; } |
| 987 | |
| 988 | virtual intptr_t MaximumCommittedMemory() { return max_committed_; } |
| 989 | |
Shimeng (Simon) Wang | 8a31eba | 2010-12-06 19:01:33 -0800 | [diff] [blame] | 990 | // Returns allocated size. |
Ben Murdoch | f87a203 | 2010-10-22 12:50:53 +0100 | [diff] [blame] | 991 | virtual intptr_t Size() = 0; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 992 | |
Shimeng (Simon) Wang | 8a31eba | 2010-12-06 19:01:33 -0800 | [diff] [blame] | 993 | // Returns size of objects. Can differ from the allocated size |
| 994 | // (e.g. see LargeObjectSpace). |
| 995 | virtual intptr_t SizeOfObjects() { return Size(); } |
| 996 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 997 | // Approximate amount of physical memory committed for this space. |
| 998 | virtual size_t CommittedPhysicalMemory() = 0; |
| 999 | |
| 1000 | // Return the available bytes without growing. |
| 1001 | virtual intptr_t Available() = 0; |
| 1002 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 1003 | virtual int RoundSizeDownToObjectAlignment(int size) { |
| 1004 | if (id_ == CODE_SPACE) { |
| 1005 | return RoundDown(size, kCodeAlignment); |
| 1006 | } else { |
| 1007 | return RoundDown(size, kPointerSize); |
| 1008 | } |
| 1009 | } |
| 1010 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1011 | #ifdef DEBUG |
| 1012 | virtual void Print() = 0; |
| 1013 | #endif |
| 1014 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 1015 | protected: |
| 1016 | void AccountCommitted(intptr_t bytes) { |
| 1017 | DCHECK_GE(bytes, 0); |
| 1018 | committed_ += bytes; |
| 1019 | if (committed_ > max_committed_) { |
| 1020 | max_committed_ = committed_; |
| 1021 | } |
| 1022 | } |
| 1023 | |
| 1024 | void AccountUncommitted(intptr_t bytes) { |
| 1025 | DCHECK_GE(bytes, 0); |
| 1026 | committed_ -= bytes; |
| 1027 | DCHECK_GE(committed_, 0); |
| 1028 | } |
| 1029 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1030 | private: |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 1031 | Heap* heap_; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1032 | AllocationSpace id_; |
| 1033 | Executability executable_; |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 1034 | |
| 1035 | // Keeps track of committed memory in a space. |
| 1036 | intptr_t committed_; |
| 1037 | intptr_t max_committed_; |
| 1038 | }; |
| 1039 | |
| 1040 | |
| 1041 | class MemoryChunkValidator { |
| 1042 | // Computed offsets should match the compiler generated ones. |
| 1043 | STATIC_ASSERT(MemoryChunk::kSizeOffset == offsetof(MemoryChunk, size_)); |
| 1044 | STATIC_ASSERT(MemoryChunk::kLiveBytesOffset == |
| 1045 | offsetof(MemoryChunk, live_byte_count_)); |
| 1046 | STATIC_ASSERT(MemoryChunk::kSlotsBufferOffset == |
| 1047 | offsetof(MemoryChunk, slots_buffer_)); |
| 1048 | STATIC_ASSERT(MemoryChunk::kWriteBarrierCounterOffset == |
| 1049 | offsetof(MemoryChunk, write_barrier_counter_)); |
| 1050 | |
| 1051 | // Validate our estimates on the header size. |
| 1052 | STATIC_ASSERT(sizeof(MemoryChunk) <= MemoryChunk::kHeaderSize); |
| 1053 | STATIC_ASSERT(sizeof(LargePage) <= MemoryChunk::kHeaderSize); |
| 1054 | STATIC_ASSERT(sizeof(Page) <= MemoryChunk::kHeaderSize); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1055 | }; |
| 1056 | |
| 1057 | |
| 1058 | // ---------------------------------------------------------------------------- |
| 1059 | // All heap objects containing executable code (code objects) must be allocated |
| 1060 | // from a 2 GB range of memory, so that they can call each other using 32-bit |
| 1061 | // displacements. This happens automatically on 32-bit platforms, where 32-bit |
| 1062 | // displacements cover the entire 4GB virtual address space. On 64-bit |
| 1063 | // platforms, we support this using the CodeRange object, which reserves and |
| 1064 | // manages a range of virtual memory. |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 1065 | class CodeRange { |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1066 | public: |
Ben Murdoch | 69a99ed | 2011-11-30 16:03:39 +0000 | [diff] [blame] | 1067 | explicit CodeRange(Isolate* isolate); |
| 1068 | ~CodeRange() { TearDown(); } |
| 1069 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1070 | // Reserves a range of virtual memory, but does not commit any of it. |
| 1071 | // Can only be called once, at heap initialization time. |
| 1072 | // Returns false on failure. |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1073 | bool SetUp(size_t requested_size); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1074 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1075 | bool valid() { return code_range_ != NULL; } |
| 1076 | Address start() { |
| 1077 | DCHECK(valid()); |
| 1078 | return static_cast<Address>(code_range_->address()); |
| 1079 | } |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 1080 | size_t size() { |
| 1081 | DCHECK(valid()); |
| 1082 | return code_range_->size(); |
| 1083 | } |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 1084 | bool contains(Address address) { |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1085 | if (!valid()) return false; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1086 | Address start = static_cast<Address>(code_range_->address()); |
| 1087 | return start <= address && address < start + code_range_->size(); |
| 1088 | } |
| 1089 | |
| 1090 | // Allocates a chunk of memory from the large-object portion of |
| 1091 | // the code range. On platforms with no separate code range, should |
| 1092 | // not be called. |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1093 | MUST_USE_RESULT Address AllocateRawMemory(const size_t requested_size, |
| 1094 | const size_t commit_size, |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 1095 | size_t* allocated); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1096 | bool CommitRawMemory(Address start, size_t length); |
| 1097 | bool UncommitRawMemory(Address start, size_t length); |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 1098 | void FreeRawMemory(Address buf, size_t length); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1099 | |
| 1100 | private: |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 1101 | // Frees the range of virtual memory, and frees the data structures used to |
| 1102 | // manage it. |
| 1103 | void TearDown(); |
| 1104 | |
Ben Murdoch | 69a99ed | 2011-11-30 16:03:39 +0000 | [diff] [blame] | 1105 | Isolate* isolate_; |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 1106 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1107 | // The reserved range of virtual memory that all code objects are put in. |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1108 | base::VirtualMemory* code_range_; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1109 | // Plain old data class, just a struct plus a constructor. |
| 1110 | class FreeBlock { |
| 1111 | public: |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 1112 | FreeBlock() : start(0), size(0) {} |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1113 | FreeBlock(Address start_arg, size_t size_arg) |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 1114 | : start(start_arg), size(size_arg) { |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1115 | DCHECK(IsAddressAligned(start, MemoryChunk::kAlignment)); |
| 1116 | DCHECK(size >= static_cast<size_t>(Page::kPageSize)); |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 1117 | } |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1118 | FreeBlock(void* start_arg, size_t size_arg) |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 1119 | : start(static_cast<Address>(start_arg)), size(size_arg) { |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1120 | DCHECK(IsAddressAligned(start, MemoryChunk::kAlignment)); |
| 1121 | DCHECK(size >= static_cast<size_t>(Page::kPageSize)); |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 1122 | } |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1123 | |
| 1124 | Address start; |
| 1125 | size_t size; |
| 1126 | }; |
| 1127 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 1128 | // The global mutex guards free_list_ and allocation_list_ as GC threads may |
| 1129 | // access both lists concurrently to the main thread. |
| 1130 | base::Mutex code_range_mutex_; |
| 1131 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1132 | // Freed blocks of memory are added to the free list. When the allocation |
| 1133 | // list is exhausted, the free list is sorted and merged to make the new |
| 1134 | // allocation list. |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 1135 | List<FreeBlock> free_list_; |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 1136 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1137 | // Memory is allocated from the free blocks on the allocation list. |
| 1138 | // The block at current_allocation_block_index_ is the current block. |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 1139 | List<FreeBlock> allocation_list_; |
| 1140 | int current_allocation_block_index_; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1141 | |
| 1142 | // Finds a block on the allocation list that contains at least the |
| 1143 | // requested amount of memory. If none is found, sorts and merges |
| 1144 | // the existing free memory blocks, and searches again. |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1145 | // If none can be found, returns false. |
| 1146 | bool GetNextAllocationBlock(size_t requested); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1147 | // Compares the start addresses of two free blocks. |
| 1148 | static int CompareFreeBlockAddress(const FreeBlock* left, |
| 1149 | const FreeBlock* right); |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 1150 | bool ReserveBlock(const size_t requested_size, FreeBlock* block); |
| 1151 | void ReleaseBlock(const FreeBlock* block); |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 1152 | |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 1153 | DISALLOW_COPY_AND_ASSIGN(CodeRange); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1154 | }; |
| 1155 | |
| 1156 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 1157 | class SkipList { |
| 1158 | public: |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1159 | SkipList() { Clear(); } |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 1160 | |
| 1161 | void Clear() { |
| 1162 | for (int idx = 0; idx < kSize; idx++) { |
| 1163 | starts_[idx] = reinterpret_cast<Address>(-1); |
| 1164 | } |
| 1165 | } |
| 1166 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1167 | Address StartFor(Address addr) { return starts_[RegionNumber(addr)]; } |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 1168 | |
| 1169 | void AddObject(Address addr, int size) { |
| 1170 | int start_region = RegionNumber(addr); |
| 1171 | int end_region = RegionNumber(addr + size - kPointerSize); |
| 1172 | for (int idx = start_region; idx <= end_region; idx++) { |
| 1173 | if (starts_[idx] > addr) starts_[idx] = addr; |
| 1174 | } |
| 1175 | } |
| 1176 | |
| 1177 | static inline int RegionNumber(Address addr) { |
| 1178 | return (OffsetFrom(addr) & Page::kPageAlignmentMask) >> kRegionSizeLog2; |
| 1179 | } |
| 1180 | |
| 1181 | static void Update(Address addr, int size) { |
| 1182 | Page* page = Page::FromAddress(addr); |
| 1183 | SkipList* list = page->skip_list(); |
| 1184 | if (list == NULL) { |
| 1185 | list = new SkipList(); |
| 1186 | page->set_skip_list(list); |
| 1187 | } |
| 1188 | |
| 1189 | list->AddObject(addr, size); |
| 1190 | } |
| 1191 | |
| 1192 | private: |
| 1193 | static const int kRegionSizeLog2 = 13; |
| 1194 | static const int kRegionSize = 1 << kRegionSizeLog2; |
| 1195 | static const int kSize = Page::kPageSize / kRegionSize; |
| 1196 | |
| 1197 | STATIC_ASSERT(Page::kPageSize % kRegionSize == 0); |
| 1198 | |
| 1199 | Address starts_[kSize]; |
| 1200 | }; |
| 1201 | |
| 1202 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1203 | // ---------------------------------------------------------------------------- |
| 1204 | // A space acquires chunks of memory from the operating system. The memory |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 1205 | // allocator allocated and deallocates pages for the paged heap spaces and large |
| 1206 | // pages for large object space. |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1207 | // |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 1208 | // Each space has to manage it's own pages. |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1209 | // |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 1210 | class MemoryAllocator { |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1211 | public: |
Ben Murdoch | 69a99ed | 2011-11-30 16:03:39 +0000 | [diff] [blame] | 1212 | explicit MemoryAllocator(Isolate* isolate); |
| 1213 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1214 | // Initializes its internal bookkeeping structures. |
Russell Brenner | 90bac25 | 2010-11-18 13:33:46 -0800 | [diff] [blame] | 1215 | // Max capacity of the total space and executable memory limit. |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 1216 | bool SetUp(intptr_t max_capacity, intptr_t capacity_executable); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1217 | |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 1218 | void TearDown(); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1219 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1220 | Page* AllocatePage(intptr_t size, PagedSpace* owner, |
| 1221 | Executability executable); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1222 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1223 | LargePage* AllocateLargePage(intptr_t object_size, Space* owner, |
| 1224 | Executability executable); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1225 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 1226 | // PreFree logically frees the object, i.e., it takes care of the size |
| 1227 | // bookkeeping and calls the allocation callback. |
| 1228 | void PreFreeMemory(MemoryChunk* chunk); |
| 1229 | |
| 1230 | // FreeMemory can be called concurrently when PreFree was executed before. |
| 1231 | void PerformFreeMemory(MemoryChunk* chunk); |
| 1232 | |
| 1233 | // Free is a wrapper method, which calls PreFree and PerformFreeMemory |
| 1234 | // together. |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 1235 | void Free(MemoryChunk* chunk); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1236 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1237 | // Returns allocated spaces in bytes. |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 1238 | intptr_t Size() { return size_.Value(); } |
| 1239 | |
| 1240 | // Returns allocated executable spaces in bytes. |
| 1241 | intptr_t SizeExecutable() { return size_executable_.Value(); } |
| 1242 | |
| 1243 | // Returns the maximum available bytes of heaps. |
| 1244 | intptr_t Available() { |
| 1245 | intptr_t size = Size(); |
| 1246 | return capacity_ < size ? 0 : capacity_ - size; |
| 1247 | } |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1248 | |
Russell Brenner | 90bac25 | 2010-11-18 13:33:46 -0800 | [diff] [blame] | 1249 | // Returns the maximum available executable bytes of heaps. |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 1250 | intptr_t AvailableExecutable() { |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 1251 | intptr_t executable_size = SizeExecutable(); |
| 1252 | if (capacity_executable_ < executable_size) return 0; |
| 1253 | return capacity_executable_ - executable_size; |
Russell Brenner | 90bac25 | 2010-11-18 13:33:46 -0800 | [diff] [blame] | 1254 | } |
| 1255 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1256 | // Returns maximum available bytes that the old space can have. |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 1257 | intptr_t MaxAvailable() { |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 1258 | return (Available() / Page::kPageSize) * Page::kAllocatableMemory; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1259 | } |
| 1260 | |
| 1261 | // Returns an indication of whether a pointer is in a space that has |
| 1262 | // been allocated by this MemoryAllocator. |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 1263 | V8_INLINE bool IsOutsideAllocatedSpace(const void* address) { |
| 1264 | return address < lowest_ever_allocated_.Value() || |
| 1265 | address >= highest_ever_allocated_.Value(); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1266 | } |
| 1267 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1268 | #ifdef DEBUG |
| 1269 | // Reports statistic info of the space. |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 1270 | void ReportStatistics(); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1271 | #endif |
| 1272 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1273 | // Returns a MemoryChunk in which the memory region from commit_area_size to |
| 1274 | // reserve_area_size of the chunk area is reserved but not committed, it |
| 1275 | // could be committed later by calling MemoryChunk::CommitArea. |
| 1276 | MemoryChunk* AllocateChunk(intptr_t reserve_area_size, |
| 1277 | intptr_t commit_area_size, |
| 1278 | Executability executable, Space* space); |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 1279 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1280 | Address ReserveAlignedMemory(size_t requested, size_t alignment, |
| 1281 | base::VirtualMemory* controller); |
| 1282 | Address AllocateAlignedMemory(size_t reserve_size, size_t commit_size, |
| 1283 | size_t alignment, Executability executable, |
| 1284 | base::VirtualMemory* controller); |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 1285 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1286 | bool CommitMemory(Address addr, size_t size, Executability executable); |
| 1287 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 1288 | void FreeNewSpaceMemory(Address addr, base::VirtualMemory* reservation, |
| 1289 | Executability executable); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1290 | void FreeMemory(base::VirtualMemory* reservation, Executability executable); |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 1291 | void FreeMemory(Address addr, size_t size, Executability executable); |
| 1292 | |
| 1293 | // Commit a contiguous block of memory from the initial chunk. Assumes that |
| 1294 | // the address is not NULL, the size is greater than zero, and that the |
| 1295 | // block is contained in the initial chunk. Returns true if it succeeded |
| 1296 | // and false otherwise. |
| 1297 | bool CommitBlock(Address start, size_t size, Executability executable); |
| 1298 | |
| 1299 | // Uncommit a contiguous block of memory [start..(start+size)[. |
| 1300 | // start is not NULL, the size is greater than zero, and the |
| 1301 | // block is contained in the initial chunk. Returns true if it succeeded |
| 1302 | // and false otherwise. |
| 1303 | bool UncommitBlock(Address start, size_t size); |
| 1304 | |
| 1305 | // Zaps a contiguous block of memory [start..(start+size)[ thus |
| 1306 | // filling it up with a recognizable non-NULL bit pattern. |
| 1307 | void ZapBlock(Address start, size_t size); |
| 1308 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1309 | void PerformAllocationCallback(ObjectSpace space, AllocationAction action, |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 1310 | size_t size); |
| 1311 | |
| 1312 | void AddMemoryAllocationCallback(MemoryAllocationCallback callback, |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1313 | ObjectSpace space, AllocationAction action); |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 1314 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1315 | void RemoveMemoryAllocationCallback(MemoryAllocationCallback callback); |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 1316 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1317 | bool MemoryAllocationCallbackRegistered(MemoryAllocationCallback callback); |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 1318 | |
| 1319 | static int CodePageGuardStartOffset(); |
| 1320 | |
| 1321 | static int CodePageGuardSize(); |
| 1322 | |
| 1323 | static int CodePageAreaStartOffset(); |
| 1324 | |
| 1325 | static int CodePageAreaEndOffset(); |
| 1326 | |
| 1327 | static int CodePageAreaSize() { |
| 1328 | return CodePageAreaEndOffset() - CodePageAreaStartOffset(); |
| 1329 | } |
| 1330 | |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 1331 | static int PageAreaSize(AllocationSpace space) { |
| 1332 | DCHECK_NE(LO_SPACE, space); |
| 1333 | return (space == CODE_SPACE) ? CodePageAreaSize() |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 1334 | : Page::kAllocatableMemory; |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 1335 | } |
| 1336 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1337 | MUST_USE_RESULT bool CommitExecutableMemory(base::VirtualMemory* vm, |
| 1338 | Address start, size_t commit_size, |
| 1339 | size_t reserved_size); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1340 | |
| 1341 | private: |
Ben Murdoch | 69a99ed | 2011-11-30 16:03:39 +0000 | [diff] [blame] | 1342 | Isolate* isolate_; |
| 1343 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1344 | // Maximum space size in bytes. |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 1345 | intptr_t capacity_; |
Russell Brenner | 90bac25 | 2010-11-18 13:33:46 -0800 | [diff] [blame] | 1346 | // Maximum subset of capacity_ that can be executable |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 1347 | intptr_t capacity_executable_; |
Ben Murdoch | b0fe162 | 2011-05-05 13:52:32 +0100 | [diff] [blame] | 1348 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1349 | // Allocated space size in bytes. |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 1350 | AtomicNumber<intptr_t> size_; |
Steve Block | 791712a | 2010-08-27 10:21:07 +0100 | [diff] [blame] | 1351 | // Allocated executable space size in bytes. |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 1352 | AtomicNumber<intptr_t> size_executable_; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1353 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1354 | // We keep the lowest and highest addresses allocated as a quick way |
| 1355 | // of determining that pointers are outside the heap. The estimate is |
| 1356 | // conservative, i.e. not all addrsses in 'allocated' space are allocated |
| 1357 | // to our heap. The range is [lowest, highest[, inclusive on the low end |
| 1358 | // and exclusive on the high end. |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 1359 | AtomicValue<void*> lowest_ever_allocated_; |
| 1360 | AtomicValue<void*> highest_ever_allocated_; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1361 | |
Iain Merrick | 9ac36c9 | 2010-09-13 15:29:50 +0100 | [diff] [blame] | 1362 | struct MemoryAllocationCallbackRegistration { |
| 1363 | MemoryAllocationCallbackRegistration(MemoryAllocationCallback callback, |
| 1364 | ObjectSpace space, |
| 1365 | AllocationAction action) |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1366 | : callback(callback), space(space), action(action) {} |
Iain Merrick | 9ac36c9 | 2010-09-13 15:29:50 +0100 | [diff] [blame] | 1367 | MemoryAllocationCallback callback; |
| 1368 | ObjectSpace space; |
| 1369 | AllocationAction action; |
| 1370 | }; |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 1371 | |
Iain Merrick | 9ac36c9 | 2010-09-13 15:29:50 +0100 | [diff] [blame] | 1372 | // A List of callback that are triggered when memory is allocated or free'd |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1373 | List<MemoryAllocationCallbackRegistration> memory_allocation_callbacks_; |
Iain Merrick | 9ac36c9 | 2010-09-13 15:29:50 +0100 | [diff] [blame] | 1374 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1375 | // Initializes pages in a chunk. Returns the first page address. |
| 1376 | // This function and GetChunkId() are provided for the mark-compact |
| 1377 | // collector to rebuild page headers in the from space, which is |
| 1378 | // used as a marking stack and its page headers are destroyed. |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 1379 | Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk, |
| 1380 | PagedSpace* owner); |
Steve Block | 6ded16b | 2010-05-10 14:33:55 +0100 | [diff] [blame] | 1381 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1382 | void UpdateAllocatedSpaceLimits(void* low, void* high) { |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 1383 | // The use of atomic primitives does not guarantee correctness (wrt. |
| 1384 | // desired semantics) by default. The loop here ensures that we update the |
| 1385 | // values only if they did not change in between. |
| 1386 | void* ptr = nullptr; |
| 1387 | do { |
| 1388 | ptr = lowest_ever_allocated_.Value(); |
| 1389 | } while ((low < ptr) && !lowest_ever_allocated_.TrySetValue(ptr, low)); |
| 1390 | do { |
| 1391 | ptr = highest_ever_allocated_.Value(); |
| 1392 | } while ((high > ptr) && !highest_ever_allocated_.TrySetValue(ptr, high)); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1393 | } |
| 1394 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 1395 | DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1396 | }; |
| 1397 | |
| 1398 | |
| 1399 | // ----------------------------------------------------------------------------- |
| 1400 | // Interface for heap object iterator to be implemented by all object space |
| 1401 | // object iterators. |
| 1402 | // |
Leon Clarke | d91b9f7 | 2010-01-27 17:25:45 +0000 | [diff] [blame] | 1403 | // NOTE: The space specific object iterators also implements the own next() |
| 1404 | // method which is used to avoid using virtual functions |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1405 | // iterating a specific space. |
| 1406 | |
| 1407 | class ObjectIterator : public Malloced { |
| 1408 | public: |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1409 | virtual ~ObjectIterator() {} |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1410 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1411 | virtual HeapObject* next_object() = 0; |
| 1412 | }; |
| 1413 | |
| 1414 | |
| 1415 | // ----------------------------------------------------------------------------- |
| 1416 | // Heap object iterator in new/old/map spaces. |
| 1417 | // |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 1418 | // A HeapObjectIterator iterates objects from the bottom of the given space |
| 1419 | // to its top or from the bottom of the given page to its top. |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1420 | // |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 1421 | // If objects are allocated in the page during iteration the iterator may |
| 1422 | // or may not iterate over those objects. The caller must create a new |
| 1423 | // iterator in order to be sure to visit these new objects. |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1424 | class HeapObjectIterator : public ObjectIterator { |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1425 | public: |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 1426 | // Creates a new object iterator in a given space. |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1427 | explicit HeapObjectIterator(PagedSpace* space); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 1428 | explicit HeapObjectIterator(Page* page); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1429 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 1430 | // Advance to the next object, skipping free spaces and other fillers and |
| 1431 | // skipping the special garbage section of which there is one per space. |
| 1432 | // Returns NULL when the iteration has ended. |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 1433 | inline HeapObject* Next(); |
| 1434 | inline HeapObject* next_object() override; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1435 | |
| 1436 | private: |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 1437 | enum PageMode { kOnePageOnly, kAllPagesInSpace }; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1438 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1439 | Address cur_addr_; // Current iteration point. |
| 1440 | Address cur_end_; // End iteration point. |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 1441 | PagedSpace* space_; |
| 1442 | PageMode page_mode_; |
Leon Clarke | d91b9f7 | 2010-01-27 17:25:45 +0000 | [diff] [blame] | 1443 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 1444 | // Fast (inlined) path of next(). |
| 1445 | inline HeapObject* FromCurrentPage(); |
Leon Clarke | d91b9f7 | 2010-01-27 17:25:45 +0000 | [diff] [blame] | 1446 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 1447 | // Slow path of next(), goes into the next page. Returns false if the |
| 1448 | // iteration has ended. |
| 1449 | bool AdvanceToNextPage(); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1450 | |
| 1451 | // Initializes fields. |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1452 | inline void Initialize(PagedSpace* owner, Address start, Address end, |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 1453 | PageMode mode); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1454 | }; |
| 1455 | |
| 1456 | |
| 1457 | // ----------------------------------------------------------------------------- |
| 1458 | // A PageIterator iterates the pages in a paged space. |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1459 | |
| 1460 | class PageIterator BASE_EMBEDDED { |
| 1461 | public: |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 1462 | explicit inline PageIterator(PagedSpace* space); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1463 | |
| 1464 | inline bool has_next(); |
| 1465 | inline Page* next(); |
| 1466 | |
| 1467 | private: |
| 1468 | PagedSpace* space_; |
| 1469 | Page* prev_page_; // Previous page returned. |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 1470 | // Next page that will be returned. Cached here so that we can use this |
| 1471 | // iterator for operations that deallocate pages. |
| 1472 | Page* next_page_; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1473 | }; |
| 1474 | |
| 1475 | |
| 1476 | // ----------------------------------------------------------------------------- |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 1477 | // A space has a circular list of pages. The next page can be accessed via |
| 1478 | // Page::next_page() call. |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1479 | |
| 1480 | // An abstraction of allocation and relocation pointers in a page-structured |
| 1481 | // space. |
| 1482 | class AllocationInfo { |
| 1483 | public: |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 1484 | AllocationInfo() : top_(nullptr), limit_(nullptr) {} |
| 1485 | AllocationInfo(Address top, Address limit) : top_(top), limit_(limit) {} |
| 1486 | |
| 1487 | void Reset(Address top, Address limit) { |
| 1488 | set_top(top); |
| 1489 | set_limit(limit); |
| 1490 | } |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1491 | |
| 1492 | INLINE(void set_top(Address top)) { |
| 1493 | SLOW_DCHECK(top == NULL || |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 1494 | (reinterpret_cast<intptr_t>(top) & kHeapObjectTagMask) == 0); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1495 | top_ = top; |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 1496 | } |
| 1497 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1498 | INLINE(Address top()) const { |
| 1499 | SLOW_DCHECK(top_ == NULL || |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 1500 | (reinterpret_cast<intptr_t>(top_) & kHeapObjectTagMask) == 0); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1501 | return top_; |
| 1502 | } |
| 1503 | |
| 1504 | Address* top_address() { return &top_; } |
| 1505 | |
| 1506 | INLINE(void set_limit(Address limit)) { |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1507 | limit_ = limit; |
| 1508 | } |
| 1509 | |
| 1510 | INLINE(Address limit()) const { |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1511 | return limit_; |
| 1512 | } |
| 1513 | |
| 1514 | Address* limit_address() { return &limit_; } |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1515 | |
| 1516 | #ifdef DEBUG |
| 1517 | bool VerifyPagedAllocation() { |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1518 | return (Page::FromAllocationTop(top_) == Page::FromAllocationTop(limit_)) && |
| 1519 | (top_ <= limit_); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1520 | } |
| 1521 | #endif |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1522 | |
| 1523 | private: |
| 1524 | // Current allocation top. |
| 1525 | Address top_; |
| 1526 | // Current allocation limit. |
| 1527 | Address limit_; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1528 | }; |
| 1529 | |
| 1530 | |
| 1531 | // An abstraction of the accounting statistics of a page-structured space. |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1532 | // |
| 1533 | // The stats are only set by functions that ensure they stay balanced. These |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 1534 | // functions increase or decrease one of the non-capacity stats in conjunction |
| 1535 | // with capacity, or else they always balance increases and decreases to the |
| 1536 | // non-capacity stats. |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1537 | class AllocationStats BASE_EMBEDDED { |
| 1538 | public: |
| 1539 | AllocationStats() { Clear(); } |
| 1540 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 1541 | // Zero out all the allocation statistics (i.e., no capacity). |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1542 | void Clear() { |
| 1543 | capacity_ = 0; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1544 | max_capacity_ = 0; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1545 | size_ = 0; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1546 | } |
| 1547 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 1548 | void ClearSize() { size_ = capacity_; } |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 1549 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 1550 | // Reset the allocation statistics (i.e., available = capacity with no wasted |
| 1551 | // or allocated bytes). |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1552 | void Reset() { |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1553 | size_ = 0; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1554 | } |
| 1555 | |
| 1556 | // Accessors for the allocation statistics. |
Ben Murdoch | f87a203 | 2010-10-22 12:50:53 +0100 | [diff] [blame] | 1557 | intptr_t Capacity() { return capacity_; } |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1558 | intptr_t MaxCapacity() { return max_capacity_; } |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 1559 | intptr_t Size() { |
| 1560 | CHECK_GE(size_, 0); |
| 1561 | return size_; |
| 1562 | } |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1563 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 1564 | // Grow the space by adding available bytes. They are initially marked as |
| 1565 | // being in use (part of the size), but will normally be immediately freed, |
| 1566 | // putting them on the free list and removing them from size_. |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1567 | void ExpandSpace(int size_in_bytes) { |
| 1568 | capacity_ += size_in_bytes; |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 1569 | size_ += size_in_bytes; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1570 | if (capacity_ > max_capacity_) { |
| 1571 | max_capacity_ = capacity_; |
| 1572 | } |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 1573 | CHECK(size_ >= 0); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1574 | } |
| 1575 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 1576 | // Shrink the space by removing available bytes. Since shrinking is done |
| 1577 | // during sweeping, bytes have been marked as being in use (part of the size) |
| 1578 | // and are hereby freed. |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1579 | void ShrinkSpace(int size_in_bytes) { |
| 1580 | capacity_ -= size_in_bytes; |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 1581 | size_ -= size_in_bytes; |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 1582 | CHECK(size_ >= 0); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1583 | } |
| 1584 | |
| 1585 | // Allocate from available bytes (available -> size). |
Ben Murdoch | f87a203 | 2010-10-22 12:50:53 +0100 | [diff] [blame] | 1586 | void AllocateBytes(intptr_t size_in_bytes) { |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1587 | size_ += size_in_bytes; |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 1588 | CHECK(size_ >= 0); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1589 | } |
| 1590 | |
| 1591 | // Free allocated bytes, making them available (size -> available). |
Ben Murdoch | f87a203 | 2010-10-22 12:50:53 +0100 | [diff] [blame] | 1592 | void DeallocateBytes(intptr_t size_in_bytes) { |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1593 | size_ -= size_in_bytes; |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 1594 | CHECK_GE(size_, 0); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1595 | } |
| 1596 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 1597 | // Merge {other} into {this}. |
| 1598 | void Merge(const AllocationStats& other) { |
| 1599 | capacity_ += other.capacity_; |
| 1600 | size_ += other.size_; |
| 1601 | if (other.max_capacity_ > max_capacity_) { |
| 1602 | max_capacity_ = other.max_capacity_; |
| 1603 | } |
| 1604 | CHECK_GE(size_, 0); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1605 | } |
| 1606 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 1607 | void DecreaseCapacity(intptr_t size_in_bytes) { |
| 1608 | capacity_ -= size_in_bytes; |
| 1609 | CHECK_GE(capacity_, 0); |
| 1610 | CHECK_GE(capacity_, size_); |
| 1611 | } |
| 1612 | |
| 1613 | void IncreaseCapacity(intptr_t size_in_bytes) { capacity_ += size_in_bytes; } |
| 1614 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1615 | private: |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 1616 | // |capacity_|: The number of object-area bytes (i.e., not including page |
| 1617 | // bookkeeping structures) currently in the space. |
Ben Murdoch | f87a203 | 2010-10-22 12:50:53 +0100 | [diff] [blame] | 1618 | intptr_t capacity_; |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 1619 | |
| 1620 | // |max_capacity_|: The maximum capacity ever observed. |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1621 | intptr_t max_capacity_; |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 1622 | |
| 1623 | // |size_|: The number of allocated bytes. |
Ben Murdoch | f87a203 | 2010-10-22 12:50:53 +0100 | [diff] [blame] | 1624 | intptr_t size_; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1625 | }; |
| 1626 | |
| 1627 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 1628 | // A free list category maintains a linked list of free memory blocks. |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1629 | class FreeListCategory { |
| 1630 | public: |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 1631 | explicit FreeListCategory(FreeList* owner, FreeListCategoryType type) |
| 1632 | : type_(type), |
| 1633 | top_(nullptr), |
| 1634 | end_(nullptr), |
| 1635 | available_(0), |
| 1636 | owner_(owner) {} |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1637 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 1638 | // Concatenates {category} into {this}. |
| 1639 | // |
| 1640 | // Note: Thread-safe. |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1641 | intptr_t Concatenate(FreeListCategory* category); |
| 1642 | |
| 1643 | void Reset(); |
| 1644 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 1645 | void Free(FreeSpace* node, int size_in_bytes); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1646 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 1647 | // Pick a node from the list. |
| 1648 | FreeSpace* PickNodeFromList(int* node_size); |
| 1649 | |
| 1650 | // Pick a node from the list and compare it against {size_in_bytes}. If the |
| 1651 | // node's size is greater or equal return the node and null otherwise. |
| 1652 | FreeSpace* PickNodeFromList(int size_in_bytes, int* node_size); |
| 1653 | |
| 1654 | // Search for a node of size {size_in_bytes}. |
| 1655 | FreeSpace* SearchForNodeInList(int size_in_bytes, int* node_size); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1656 | |
| 1657 | intptr_t EvictFreeListItemsInList(Page* p); |
| 1658 | bool ContainsPageFreeListItemsInList(Page* p); |
| 1659 | |
| 1660 | void RepairFreeList(Heap* heap); |
| 1661 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 1662 | bool IsEmpty() { return top() == nullptr; } |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1663 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 1664 | FreeList* owner() { return owner_; } |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1665 | int available() const { return available_; } |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1666 | |
| 1667 | #ifdef DEBUG |
| 1668 | intptr_t SumFreeList(); |
| 1669 | int FreeListLength(); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 1670 | bool IsVeryLong(); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1671 | #endif |
| 1672 | |
| 1673 | private: |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 1674 | // For debug builds we accurately compute free lists lengths up until |
| 1675 | // {kVeryLongFreeList} by manually walking the list. |
| 1676 | static const int kVeryLongFreeList = 500; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1677 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 1678 | FreeSpace* top() { return top_.Value(); } |
| 1679 | void set_top(FreeSpace* top) { top_.SetValue(top); } |
| 1680 | |
| 1681 | FreeSpace* end() const { return end_; } |
| 1682 | void set_end(FreeSpace* end) { end_ = end; } |
| 1683 | |
| 1684 | // |type_|: The type of this free list category. |
| 1685 | FreeListCategoryType type_; |
| 1686 | |
| 1687 | // |top_|: Points to the top FreeSpace* in the free list category. |
| 1688 | AtomicValue<FreeSpace*> top_; |
| 1689 | |
| 1690 | // |end_|: Points to the end FreeSpace* in the free list category. |
| 1691 | FreeSpace* end_; |
| 1692 | |
| 1693 | // |available_|: Total available bytes in all blocks of this free list |
| 1694 | // category. |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1695 | int available_; |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 1696 | |
| 1697 | // |owner_|: The owning free list of this category. |
| 1698 | FreeList* owner_; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1699 | }; |
| 1700 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 1701 | // A free list maintaining free blocks of memory. The free list is organized in |
| 1702 | // a way to encourage objects allocated around the same time to be near each |
| 1703 | // other. The normal way to allocate is intended to be by bumping a 'top' |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 1704 | // pointer until it hits a 'limit' pointer. When the limit is hit we need to |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 1705 | // find a new space to allocate from. This is done with the free list, which is |
| 1706 | // divided up into rough categories to cut down on waste. Having finer |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 1707 | // categories would scatter allocation more. |
| 1708 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 1709 | // The free list is organized in categories as follows: |
| 1710 | // 1-31 words (too small): Such small free areas are discarded for efficiency |
| 1711 | // reasons. They can be reclaimed by the compactor. However the distance |
| 1712 | // between top and limit may be this small. |
| 1713 | // 32-255 words (small): Used for allocating free space between 1-31 words in |
| 1714 | // size. |
| 1715 | // 256-2047 words (medium): Used for allocating free space between 32-255 words |
| 1716 | // in size. |
| 1717 | // 1048-16383 words (large): Used for allocating free space between 256-2047 |
| 1718 | // words in size. |
| 1719 | // At least 16384 words (huge): This list is for objects of 2048 words or |
| 1720 | // larger. Empty pages are also added to this list. |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1721 | class FreeList { |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 1722 | public: |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1723 | // This method returns how much memory can be allocated after freeing |
| 1724 | // maximum_freed memory. |
| 1725 | static inline int GuaranteedAllocatable(int maximum_freed) { |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 1726 | if (maximum_freed <= kSmallListMin) { |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1727 | return 0; |
| 1728 | } else if (maximum_freed <= kSmallListMax) { |
| 1729 | return kSmallAllocationMax; |
| 1730 | } else if (maximum_freed <= kMediumListMax) { |
| 1731 | return kMediumAllocationMax; |
| 1732 | } else if (maximum_freed <= kLargeListMax) { |
| 1733 | return kLargeAllocationMax; |
| 1734 | } |
| 1735 | return maximum_freed; |
| 1736 | } |
| 1737 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 1738 | explicit FreeList(PagedSpace* owner); |
| 1739 | |
| 1740 | // The method concatenates {other} into {this} and returns the added bytes, |
| 1741 | // including waste. |
| 1742 | // |
| 1743 | // Note: Thread-safe. |
| 1744 | intptr_t Concatenate(FreeList* other); |
| 1745 | |
| 1746 | // Adds a node on the free list. The block of size {size_in_bytes} starting |
| 1747 | // at {start} is placed on the free list. The return value is the number of |
| 1748 | // bytes that were not added to the free list, because they freed memory block |
| 1749 | // was too small. Bookkeeping information will be written to the block, i.e., |
| 1750 | // its contents will be destroyed. The start address should be word aligned, |
| 1751 | // and the size should be a non-zero multiple of the word size. |
| 1752 | int Free(Address start, int size_in_bytes); |
| 1753 | |
| 1754 | // Allocate a block of size {size_in_bytes} from the free list. The block is |
| 1755 | // unitialized. A failure is returned if no block is available. The size |
| 1756 | // should be a non-zero multiple of the word size. |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 1757 | MUST_USE_RESULT HeapObject* Allocate(int size_in_bytes); |
| 1758 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 1759 | // Clear the free list. |
| 1760 | void Reset(); |
| 1761 | |
| 1762 | void ResetStats() { wasted_bytes_ = 0; } |
| 1763 | |
| 1764 | // Return the number of bytes available on the free list. |
| 1765 | intptr_t Available() { |
| 1766 | return small_list_.available() + medium_list_.available() + |
| 1767 | large_list_.available() + huge_list_.available(); |
| 1768 | } |
| 1769 | |
| 1770 | // The method tries to find a {FreeSpace} node of at least {size_in_bytes} |
| 1771 | // size in the free list category exactly matching the size. If no suitable |
| 1772 | // node could be found, the method falls back to retrieving a {FreeSpace} |
| 1773 | // from the large or huge free list category. |
| 1774 | // |
| 1775 | // Can be used concurrently. |
| 1776 | MUST_USE_RESULT FreeSpace* TryRemoveMemory(intptr_t hint_size_in_bytes); |
| 1777 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1778 | bool IsEmpty() { |
| 1779 | return small_list_.IsEmpty() && medium_list_.IsEmpty() && |
| 1780 | large_list_.IsEmpty() && huge_list_.IsEmpty(); |
| 1781 | } |
| 1782 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1783 | // Used after booting the VM. |
| 1784 | void RepairLists(Heap* heap); |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 1785 | |
| 1786 | intptr_t EvictFreeListItems(Page* p); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1787 | bool ContainsPageFreeListItems(Page* p); |
| 1788 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 1789 | PagedSpace* owner() { return owner_; } |
| 1790 | intptr_t wasted_bytes() { return wasted_bytes_; } |
| 1791 | base::Mutex* mutex() { return &mutex_; } |
| 1792 | |
| 1793 | #ifdef DEBUG |
| 1794 | void Zap(); |
| 1795 | intptr_t SumFreeLists(); |
| 1796 | bool IsVeryLong(); |
| 1797 | #endif |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 1798 | |
| 1799 | private: |
| 1800 | // The size range of blocks, in bytes. |
| 1801 | static const int kMinBlockSize = 3 * kPointerSize; |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 1802 | static const int kMaxBlockSize = Page::kAllocatableMemory; |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 1803 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 1804 | static const int kSmallListMin = 0x1f * kPointerSize; |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 1805 | static const int kSmallListMax = 0xff * kPointerSize; |
| 1806 | static const int kMediumListMax = 0x7ff * kPointerSize; |
| 1807 | static const int kLargeListMax = 0x3fff * kPointerSize; |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 1808 | static const int kSmallAllocationMax = kSmallListMin; |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 1809 | static const int kMediumAllocationMax = kSmallListMax; |
| 1810 | static const int kLargeAllocationMax = kMediumListMax; |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 1811 | |
| 1812 | FreeSpace* FindNodeFor(int size_in_bytes, int* node_size); |
| 1813 | FreeSpace* FindNodeIn(FreeListCategoryType category, int* node_size); |
| 1814 | |
| 1815 | FreeListCategory* GetFreeListCategory(FreeListCategoryType category) { |
| 1816 | switch (category) { |
| 1817 | case kSmall: |
| 1818 | return &small_list_; |
| 1819 | case kMedium: |
| 1820 | return &medium_list_; |
| 1821 | case kLarge: |
| 1822 | return &large_list_; |
| 1823 | case kHuge: |
| 1824 | return &huge_list_; |
| 1825 | default: |
| 1826 | UNREACHABLE(); |
| 1827 | } |
| 1828 | UNREACHABLE(); |
| 1829 | return nullptr; |
| 1830 | } |
| 1831 | |
| 1832 | PagedSpace* owner_; |
| 1833 | base::Mutex mutex_; |
| 1834 | intptr_t wasted_bytes_; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1835 | FreeListCategory small_list_; |
| 1836 | FreeListCategory medium_list_; |
| 1837 | FreeListCategory large_list_; |
| 1838 | FreeListCategory huge_list_; |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 1839 | |
| 1840 | DISALLOW_IMPLICIT_CONSTRUCTORS(FreeList); |
| 1841 | }; |
| 1842 | |
| 1843 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1844 | class AllocationResult { |
| 1845 | public: |
| 1846 | // Implicit constructor from Object*. |
| 1847 | AllocationResult(Object* object) // NOLINT |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 1848 | : object_(object) { |
| 1849 | // AllocationResults can't return Smis, which are used to represent |
| 1850 | // failure and the space to retry in. |
| 1851 | CHECK(!object->IsSmi()); |
| 1852 | } |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1853 | |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 1854 | AllocationResult() : object_(Smi::FromInt(NEW_SPACE)) {} |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1855 | |
| 1856 | static inline AllocationResult Retry(AllocationSpace space = NEW_SPACE) { |
| 1857 | return AllocationResult(space); |
| 1858 | } |
| 1859 | |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 1860 | inline bool IsRetry() { return object_->IsSmi(); } |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1861 | |
| 1862 | template <typename T> |
| 1863 | bool To(T** obj) { |
| 1864 | if (IsRetry()) return false; |
| 1865 | *obj = T::cast(object_); |
| 1866 | return true; |
| 1867 | } |
| 1868 | |
| 1869 | Object* ToObjectChecked() { |
| 1870 | CHECK(!IsRetry()); |
| 1871 | return object_; |
| 1872 | } |
| 1873 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 1874 | inline AllocationSpace RetrySpace(); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1875 | |
| 1876 | private: |
| 1877 | explicit AllocationResult(AllocationSpace space) |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 1878 | : object_(Smi::FromInt(static_cast<int>(space))) {} |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1879 | |
| 1880 | Object* object_; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1881 | }; |
| 1882 | |
| 1883 | |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 1884 | STATIC_ASSERT(sizeof(AllocationResult) == kPointerSize); |
| 1885 | |
| 1886 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 1887 | // LocalAllocationBuffer represents a linear allocation area that is created |
| 1888 | // from a given {AllocationResult} and can be used to allocate memory without |
| 1889 | // synchronization. |
| 1890 | // |
| 1891 | // The buffer is properly closed upon destruction and reassignment. |
| 1892 | // Example: |
| 1893 | // { |
| 1894 | // AllocationResult result = ...; |
| 1895 | // LocalAllocationBuffer a(heap, result, size); |
| 1896 | // LocalAllocationBuffer b = a; |
| 1897 | // CHECK(!a.IsValid()); |
| 1898 | // CHECK(b.IsValid()); |
| 1899 | // // {a} is invalid now and cannot be used for further allocations. |
| 1900 | // } |
| 1901 | // // Since {b} went out of scope, the LAB is closed, resulting in creating a |
| 1902 | // // filler object for the remaining area. |
| 1903 | class LocalAllocationBuffer { |
| 1904 | public: |
| 1905 | // Indicates that a buffer cannot be used for allocations anymore. Can result |
| 1906 | // from either reassigning a buffer, or trying to construct it from an |
| 1907 | // invalid {AllocationResult}. |
| 1908 | static inline LocalAllocationBuffer InvalidBuffer(); |
| 1909 | |
| 1910 | // Creates a new LAB from a given {AllocationResult}. Results in |
| 1911 | // InvalidBuffer if the result indicates a retry. |
| 1912 | static inline LocalAllocationBuffer FromResult(Heap* heap, |
| 1913 | AllocationResult result, |
| 1914 | intptr_t size); |
| 1915 | |
| 1916 | ~LocalAllocationBuffer() { Close(); } |
| 1917 | |
| 1918 | // Convert to C++11 move-semantics once allowed by the style guide. |
| 1919 | LocalAllocationBuffer(const LocalAllocationBuffer& other); |
| 1920 | LocalAllocationBuffer& operator=(const LocalAllocationBuffer& other); |
| 1921 | |
| 1922 | MUST_USE_RESULT inline AllocationResult AllocateRawAligned( |
| 1923 | int size_in_bytes, AllocationAlignment alignment); |
| 1924 | |
| 1925 | inline bool IsValid() { return allocation_info_.top() != nullptr; } |
| 1926 | |
| 1927 | // Try to merge LABs, which is only possible when they are adjacent in memory. |
| 1928 | // Returns true if the merge was successful, false otherwise. |
| 1929 | inline bool TryMerge(LocalAllocationBuffer* other); |
| 1930 | |
| 1931 | private: |
| 1932 | LocalAllocationBuffer(Heap* heap, AllocationInfo allocation_info); |
| 1933 | |
| 1934 | void Close(); |
| 1935 | |
| 1936 | Heap* heap_; |
| 1937 | AllocationInfo allocation_info_; |
| 1938 | }; |
| 1939 | |
| 1940 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1941 | class PagedSpace : public Space { |
| 1942 | public: |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 1943 | static const intptr_t kCompactionMemoryWanted = 500 * KB; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1944 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 1945 | // Creates a space with an id. |
| 1946 | PagedSpace(Heap* heap, AllocationSpace id, Executability executable); |
| 1947 | |
| 1948 | ~PagedSpace() override { TearDown(); } |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1949 | |
| 1950 | // Set up the space using the given address range of virtual memory (from |
| 1951 | // the memory allocator's initial chunk) if possible. If the block of |
| 1952 | // addresses is not big enough to contain a single page-aligned page, a |
| 1953 | // fresh chunk will be allocated. |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 1954 | bool SetUp(); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1955 | |
| 1956 | // Returns true if the space has been successfully set up and not |
| 1957 | // subsequently torn down. |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 1958 | bool HasBeenSetUp(); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1959 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1960 | // Checks whether an object/address is in this space. |
| 1961 | inline bool Contains(Address a); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 1962 | inline bool Contains(HeapObject* o); |
| 1963 | // Unlike Contains() methods it is safe to call this one even for addresses |
| 1964 | // of unmapped memory. |
| 1965 | bool ContainsSafe(Address addr); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1966 | |
| 1967 | // Given an address occupied by a live object, return that object if it is |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1968 | // in this space, or a Smi if it is not. The implementation iterates over |
| 1969 | // objects in the page containing the address, the cost is linear in the |
| 1970 | // number of objects in the page. It may be slow. |
| 1971 | Object* FindObject(Address addr); |
| 1972 | |
| 1973 | // During boot the free_space_map is created, and afterwards we may need |
| 1974 | // to write it into the free list nodes that were already created. |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 1975 | void RepairFreeListsAfterDeserialization(); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1976 | |
Ben Murdoch | 85b7179 | 2012-04-11 18:30:58 +0100 | [diff] [blame] | 1977 | // Prepares for a mark-compact GC. |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1978 | void PrepareForMarkCompact(); |
Ben Murdoch | 85b7179 | 2012-04-11 18:30:58 +0100 | [diff] [blame] | 1979 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 1980 | // Current capacity without growing (Size() + Available()). |
Ben Murdoch | f87a203 | 2010-10-22 12:50:53 +0100 | [diff] [blame] | 1981 | intptr_t Capacity() { return accounting_stats_.Capacity(); } |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1982 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1983 | // Approximate amount of physical memory committed for this space. |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 1984 | size_t CommittedPhysicalMemory() override; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1985 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1986 | void ResetFreeListStatistics(); |
| 1987 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 1988 | // Sets the capacity, the available space and the wasted space to zero. |
| 1989 | // The stats are rebuilt during sweeping by adding each page to the |
| 1990 | // capacity and the size when it is encountered. As free spaces are |
| 1991 | // discovered during the sweeping they are subtracted from the size and added |
| 1992 | // to the available and wasted totals. |
| 1993 | void ClearStats() { |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 1994 | accounting_stats_.ClearSize(); |
| 1995 | free_list_.ResetStats(); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1996 | ResetFreeListStatistics(); |
| 1997 | } |
| 1998 | |
| 1999 | // Increases the number of available bytes of that space. |
| 2000 | void AddToAccountingStats(intptr_t bytes) { |
| 2001 | accounting_stats_.DeallocateBytes(bytes); |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2002 | } |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2003 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2004 | // Available bytes without growing. These are the bytes on the free list. |
| 2005 | // The bytes in the linear allocation area are not included in this total |
| 2006 | // because updating the stats would slow down allocation. New pages are |
| 2007 | // immediately added to the free list so they show up here. |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 2008 | intptr_t Available() override { return free_list_.Available(); } |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2009 | |
| 2010 | // Allocated bytes in this space. Garbage bytes that were not found due to |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2011 | // concurrent sweeping are counted as being allocated! The bytes in the |
| 2012 | // current linear allocation area (between top and limit) are also counted |
| 2013 | // here. |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 2014 | intptr_t Size() override { return accounting_stats_.Size(); } |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2015 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2016 | // As size, but the bytes in lazily swept pages are estimated and the bytes |
| 2017 | // in the current linear allocation area are not included. |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 2018 | intptr_t SizeOfObjects() override; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2019 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2020 | // Wasted bytes in this space. These are just the bytes that were thrown away |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 2021 | // due to being too small to use for allocation. |
| 2022 | virtual intptr_t Waste() { return free_list_.wasted_bytes(); } |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2023 | |
| 2024 | // Returns the allocation pointer in this space. |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2025 | Address top() { return allocation_info_.top(); } |
| 2026 | Address limit() { return allocation_info_.limit(); } |
| 2027 | |
| 2028 | // The allocation top address. |
| 2029 | Address* allocation_top_address() { return allocation_info_.top_address(); } |
| 2030 | |
| 2031 | // The allocation limit address. |
| 2032 | Address* allocation_limit_address() { |
| 2033 | return allocation_info_.limit_address(); |
| 2034 | } |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2035 | |
| 2036 | // Allocate the requested number of bytes in the space if possible, return a |
| 2037 | // failure object if not. |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 2038 | MUST_USE_RESULT inline AllocationResult AllocateRawUnaligned( |
| 2039 | int size_in_bytes); |
| 2040 | |
| 2041 | MUST_USE_RESULT inline AllocationResult AllocateRawUnalignedSynchronized( |
| 2042 | int size_in_bytes); |
| 2043 | |
| 2044 | // Allocate the requested number of bytes in the space double aligned if |
| 2045 | // possible, return a failure object if not. |
| 2046 | MUST_USE_RESULT inline AllocationResult AllocateRawAligned( |
| 2047 | int size_in_bytes, AllocationAlignment alignment); |
| 2048 | |
| 2049 | // Allocate the requested number of bytes in the space and consider allocation |
| 2050 | // alignment if needed. |
| 2051 | MUST_USE_RESULT inline AllocationResult AllocateRaw( |
| 2052 | int size_in_bytes, AllocationAlignment alignment); |
Leon Clarke | e46be81 | 2010-01-19 14:06:41 +0000 | [diff] [blame] | 2053 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2054 | // Give a block of memory to the space's free list. It might be added to |
| 2055 | // the free list or accounted as waste. |
| 2056 | // If add_to_freelist is false then just accounting stats are updated and |
| 2057 | // no attempt to add area to free list is made. |
| 2058 | int Free(Address start, int size_in_bytes) { |
| 2059 | int wasted = free_list_.Free(start, size_in_bytes); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2060 | accounting_stats_.DeallocateBytes(size_in_bytes); |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2061 | return size_in_bytes - wasted; |
| 2062 | } |
Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 2063 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2064 | void ResetFreeList() { free_list_.Reset(); } |
| 2065 | |
Steve Block | 6ded16b | 2010-05-10 14:33:55 +0100 | [diff] [blame] | 2066 | // Set space allocation info. |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2067 | void SetTopAndLimit(Address top, Address limit) { |
| 2068 | DCHECK(top == limit || |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2069 | Page::FromAddress(top) == Page::FromAddress(limit - 1)); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2070 | MemoryChunk::UpdateHighWaterMark(allocation_info_.top()); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 2071 | allocation_info_.Reset(top, limit); |
Steve Block | 6ded16b | 2010-05-10 14:33:55 +0100 | [diff] [blame] | 2072 | } |
| 2073 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2074 | // Empty space allocation info, returning unused area to free list. |
| 2075 | void EmptyAllocationInfo() { |
| 2076 | // Mark the old linear allocation area with a free space map so it can be |
| 2077 | // skipped when scanning the heap. |
| 2078 | int old_linear_size = static_cast<int>(limit() - top()); |
| 2079 | Free(top(), old_linear_size); |
| 2080 | SetTopAndLimit(NULL, NULL); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2081 | } |
| 2082 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2083 | void Allocate(int bytes) { accounting_stats_.AllocateBytes(bytes); } |
| 2084 | |
| 2085 | void IncreaseCapacity(int size); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2086 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2087 | // Releases an unused page and shrinks the space. |
| 2088 | void ReleasePage(Page* page); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2089 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2090 | // The dummy page that anchors the linked list of pages. |
| 2091 | Page* anchor() { return &anchor_; } |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2092 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2093 | #ifdef VERIFY_HEAP |
| 2094 | // Verify integrity of this space. |
| 2095 | virtual void Verify(ObjectVisitor* visitor); |
| 2096 | |
| 2097 | // Overridden by subclasses to verify space-specific object |
| 2098 | // properties (e.g., only maps or free-list nodes are in map space). |
| 2099 | virtual void VerifyObject(HeapObject* obj) {} |
| 2100 | #endif |
| 2101 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2102 | #ifdef DEBUG |
| 2103 | // Print meta info and objects in this space. |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 2104 | void Print() override; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2105 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2106 | // Reports statistics for the space |
| 2107 | void ReportStatistics(); |
| 2108 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2109 | // Report code object related statistics |
| 2110 | void CollectCodeStatistics(); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2111 | static void ReportCodeStatistics(Isolate* isolate); |
| 2112 | static void ResetCodeStatistics(Isolate* isolate); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2113 | #endif |
| 2114 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2115 | // Evacuation candidates are swept by evacuator. Needs to return a valid |
| 2116 | // result before _and_ after evacuation has finished. |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2117 | static bool ShouldBeSweptBySweeperThreads(Page* p) { |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2118 | return !p->IsEvacuationCandidate() && |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2119 | !p->IsFlagSet(Page::RESCAN_ON_EVACUATION) && !p->WasSwept(); |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2120 | } |
| 2121 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2122 | // This function tries to steal size_in_bytes memory from the sweeper threads |
| 2123 | // free-lists. If it does not succeed stealing enough memory, it will wait |
| 2124 | // for the sweeper threads to finish sweeping. |
| 2125 | // It returns true when sweeping is completed and false otherwise. |
| 2126 | bool EnsureSweeperProgress(intptr_t size_in_bytes); |
| 2127 | |
| 2128 | void set_end_of_unswept_pages(Page* page) { end_of_unswept_pages_ = page; } |
| 2129 | |
| 2130 | Page* end_of_unswept_pages() { return end_of_unswept_pages_; } |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2131 | |
| 2132 | Page* FirstPage() { return anchor_.next_page(); } |
| 2133 | Page* LastPage() { return anchor_.prev_page(); } |
| 2134 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 2135 | void EvictEvacuationCandidatesFromLinearAllocationArea(); |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2136 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 2137 | bool CanExpand(size_t size); |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2138 | |
| 2139 | // Returns the number of total pages in this space. |
| 2140 | int CountTotalPages(); |
| 2141 | |
| 2142 | // Return size of allocatable area on a page in this space. |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2143 | inline int AreaSize() { return area_size_; } |
| 2144 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 2145 | virtual bool is_local() { return false; } |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2146 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 2147 | // Merges {other} into the current space. Note that this modifies {other}, |
| 2148 | // e.g., removes its bump pointer area and resets statistics. |
| 2149 | void MergeCompactionSpace(CompactionSpace* other); |
| 2150 | |
| 2151 | void DivideUponCompactionSpaces(CompactionSpaceCollection** other, int num, |
| 2152 | intptr_t limit = kCompactionMemoryWanted); |
| 2153 | |
| 2154 | // Refills the free list from the corresponding free list filled by the |
| 2155 | // sweeper. |
| 2156 | virtual void RefillFreeList(); |
Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 2157 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2158 | protected: |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 2159 | void AddMemory(Address start, intptr_t size); |
| 2160 | |
| 2161 | FreeSpace* TryRemoveMemory(intptr_t size_in_bytes); |
| 2162 | |
| 2163 | void MoveOverFreeMemory(PagedSpace* other); |
| 2164 | |
| 2165 | // PagedSpaces that should be included in snapshots have different, i.e., |
| 2166 | // smaller, initial pages. |
| 2167 | virtual bool snapshotable() { return true; } |
| 2168 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2169 | FreeList* free_list() { return &free_list_; } |
| 2170 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 2171 | bool HasPages() { return anchor_.next_page() != &anchor_; } |
| 2172 | |
| 2173 | // Cleans up the space, frees all pages in this space except those belonging |
| 2174 | // to the initial chunk, uncommits addresses in the initial chunk. |
| 2175 | void TearDown(); |
| 2176 | |
| 2177 | // Expands the space by allocating a fixed number of pages. Returns false if |
| 2178 | // it cannot allocate requested number of pages from OS, or if the hard heap |
| 2179 | // size limit has been hit. |
| 2180 | bool Expand(); |
| 2181 | |
| 2182 | // Generic fast case allocation function that tries linear allocation at the |
| 2183 | // address denoted by top in allocation_info_. |
| 2184 | inline HeapObject* AllocateLinearly(int size_in_bytes); |
| 2185 | |
| 2186 | // Generic fast case allocation function that tries aligned linear allocation |
| 2187 | // at the address denoted by top in allocation_info_. Writes the aligned |
| 2188 | // allocation size, which includes the filler size, to size_in_bytes. |
| 2189 | inline HeapObject* AllocateLinearlyAligned(int* size_in_bytes, |
| 2190 | AllocationAlignment alignment); |
| 2191 | |
| 2192 | // If sweeping is still in progress try to sweep unswept pages. If that is |
| 2193 | // not successful, wait for the sweeper threads and re-try free-list |
| 2194 | // allocation. |
| 2195 | MUST_USE_RESULT virtual HeapObject* SweepAndRetryAllocation( |
| 2196 | int size_in_bytes); |
| 2197 | |
| 2198 | // Slow path of AllocateRaw. This function is space-dependent. |
| 2199 | MUST_USE_RESULT HeapObject* SlowAllocateRaw(int size_in_bytes); |
| 2200 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2201 | int area_size_; |
| 2202 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2203 | // Accounting information for this space. |
| 2204 | AllocationStats accounting_stats_; |
| 2205 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2206 | // The dummy page that anchors the double linked list of pages. |
| 2207 | Page anchor_; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2208 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2209 | // The space's free list. |
| 2210 | FreeList free_list_; |
Steve Block | 6ded16b | 2010-05-10 14:33:55 +0100 | [diff] [blame] | 2211 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2212 | // Normal allocation information. |
| 2213 | AllocationInfo allocation_info_; |
| 2214 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2215 | // The sweeper threads iterate over the list of pointer and data space pages |
| 2216 | // and sweep these pages concurrently. They will stop sweeping after the |
| 2217 | // end_of_unswept_pages_ page. |
| 2218 | Page* end_of_unswept_pages_; |
| 2219 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 2220 | // Mutex guarding any concurrent access to the space. |
| 2221 | base::Mutex space_mutex_; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2222 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2223 | friend class MarkCompactCollector; |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 2224 | friend class PageIterator; |
| 2225 | |
| 2226 | // Used in cctest. |
| 2227 | friend class HeapTester; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2228 | }; |
| 2229 | |
| 2230 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2231 | class NumberAndSizeInfo BASE_EMBEDDED { |
| 2232 | public: |
| 2233 | NumberAndSizeInfo() : number_(0), bytes_(0) {} |
| 2234 | |
| 2235 | int number() const { return number_; } |
| 2236 | void increment_number(int num) { number_ += num; } |
| 2237 | |
| 2238 | int bytes() const { return bytes_; } |
| 2239 | void increment_bytes(int size) { bytes_ += size; } |
| 2240 | |
| 2241 | void clear() { |
| 2242 | number_ = 0; |
| 2243 | bytes_ = 0; |
| 2244 | } |
| 2245 | |
| 2246 | private: |
| 2247 | int number_; |
| 2248 | int bytes_; |
| 2249 | }; |
| 2250 | |
| 2251 | |
| 2252 | // HistogramInfo class for recording a single "bar" of a histogram. This |
Ben Murdoch | 3fb3ca8 | 2011-12-02 17:19:32 +0000 | [diff] [blame] | 2253 | // class is used for collecting statistics to print to the log file. |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2254 | class HistogramInfo : public NumberAndSizeInfo { |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2255 | public: |
| 2256 | HistogramInfo() : NumberAndSizeInfo() {} |
| 2257 | |
| 2258 | const char* name() { return name_; } |
| 2259 | void set_name(const char* name) { name_ = name; } |
| 2260 | |
| 2261 | private: |
| 2262 | const char* name_; |
| 2263 | }; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2264 | |
| 2265 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2266 | enum SemiSpaceId { kFromSpace = 0, kToSpace = 1 }; |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2267 | |
| 2268 | |
| 2269 | class SemiSpace; |
| 2270 | |
| 2271 | |
| 2272 | class NewSpacePage : public MemoryChunk { |
| 2273 | public: |
| 2274 | // GC related flags copied from from-space to to-space when |
| 2275 | // flipping semispaces. |
| 2276 | static const intptr_t kCopyOnFlipFlagsMask = |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2277 | (1 << MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) | |
| 2278 | (1 << MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING) | |
| 2279 | (1 << MemoryChunk::SCAN_ON_SCAVENGE); |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2280 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 2281 | static const int kAreaSize = Page::kAllocatableMemory; |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2282 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 2283 | inline NewSpacePage* next_page() { |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2284 | return static_cast<NewSpacePage*>(next_chunk()); |
| 2285 | } |
| 2286 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2287 | inline void set_next_page(NewSpacePage* page) { set_next_chunk(page); } |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2288 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 2289 | inline NewSpacePage* prev_page() { |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2290 | return static_cast<NewSpacePage*>(prev_chunk()); |
| 2291 | } |
| 2292 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2293 | inline void set_prev_page(NewSpacePage* page) { set_prev_chunk(page); } |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2294 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2295 | SemiSpace* semi_space() { return reinterpret_cast<SemiSpace*>(owner()); } |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2296 | |
| 2297 | bool is_anchor() { return !this->InNewSpace(); } |
| 2298 | |
| 2299 | static bool IsAtStart(Address addr) { |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2300 | return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask) == |
| 2301 | kObjectStartOffset; |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2302 | } |
| 2303 | |
| 2304 | static bool IsAtEnd(Address addr) { |
| 2305 | return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask) == 0; |
| 2306 | } |
| 2307 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2308 | Address address() { return reinterpret_cast<Address>(this); } |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2309 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2310 | // Finds the NewSpacePage containing the given address. |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2311 | static inline NewSpacePage* FromAddress(Address address_in_page) { |
| 2312 | Address page_start = |
| 2313 | reinterpret_cast<Address>(reinterpret_cast<uintptr_t>(address_in_page) & |
| 2314 | ~Page::kPageAlignmentMask); |
| 2315 | NewSpacePage* page = reinterpret_cast<NewSpacePage*>(page_start); |
| 2316 | return page; |
| 2317 | } |
| 2318 | |
| 2319 | // Find the page for a limit address. A limit address is either an address |
| 2320 | // inside a page, or the address right after the last byte of a page. |
| 2321 | static inline NewSpacePage* FromLimit(Address address_limit) { |
| 2322 | return NewSpacePage::FromAddress(address_limit - 1); |
| 2323 | } |
| 2324 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2325 | // Checks if address1 and address2 are on the same new space page. |
| 2326 | static inline bool OnSamePage(Address address1, Address address2) { |
| 2327 | return NewSpacePage::FromAddress(address1) == |
| 2328 | NewSpacePage::FromAddress(address2); |
| 2329 | } |
| 2330 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2331 | private: |
| 2332 | // Create a NewSpacePage object that is only used as anchor |
| 2333 | // for the doubly-linked list of real pages. |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2334 | explicit NewSpacePage(SemiSpace* owner) { InitializeAsAnchor(owner); } |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2335 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2336 | static NewSpacePage* Initialize(Heap* heap, Address start, |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2337 | SemiSpace* semi_space); |
| 2338 | |
| 2339 | // Intialize a fake NewSpacePage used as sentinel at the ends |
| 2340 | // of a doubly-linked list of real NewSpacePages. |
| 2341 | // Only uses the prev/next links, and sets flags to not be in new-space. |
| 2342 | void InitializeAsAnchor(SemiSpace* owner); |
| 2343 | |
| 2344 | friend class SemiSpace; |
| 2345 | friend class SemiSpaceIterator; |
| 2346 | }; |
| 2347 | |
| 2348 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2349 | // ----------------------------------------------------------------------------- |
| 2350 | // SemiSpace in young generation |
| 2351 | // |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2352 | // A semispace is a contiguous chunk of memory holding page-like memory |
| 2353 | // chunks. The mark-compact collector uses the memory of the first page in |
| 2354 | // the from space as a marking stack when tracing live objects. |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2355 | |
| 2356 | class SemiSpace : public Space { |
| 2357 | public: |
| 2358 | // Constructor. |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2359 | SemiSpace(Heap* heap, SemiSpaceId semispace) |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2360 | : Space(heap, NEW_SPACE, NOT_EXECUTABLE), |
| 2361 | start_(NULL), |
| 2362 | age_mark_(NULL), |
| 2363 | id_(semispace), |
| 2364 | anchor_(this), |
| 2365 | current_page_(NULL) {} |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2366 | |
| 2367 | // Sets up the semispace using the given chunk. |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 2368 | void SetUp(Address start, int initial_capacity, int target_capacity, |
| 2369 | int maximum_capacity); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2370 | |
| 2371 | // Tear down the space. Heap memory was not allocated by the space, so it |
| 2372 | // is not deallocated here. |
| 2373 | void TearDown(); |
| 2374 | |
| 2375 | // True if the space has been set up but not torn down. |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2376 | bool HasBeenSetUp() { return start_ != NULL; } |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2377 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2378 | // Grow the semispace to the new capacity. The new capacity |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2379 | // requested must be larger than the current capacity and less than |
| 2380 | // the maximum capacity. |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2381 | bool GrowTo(int new_capacity); |
| 2382 | |
| 2383 | // Shrinks the semispace to the new capacity. The new capacity |
| 2384 | // requested must be more than the amount of used memory in the |
| 2385 | // semispace and less than the current capacity. |
| 2386 | bool ShrinkTo(int new_capacity); |
| 2387 | |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 2388 | // Sets the total capacity. Only possible when the space is not committed. |
| 2389 | bool SetTotalCapacity(int new_capacity); |
| 2390 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2391 | // Returns the start address of the first page of the space. |
| 2392 | Address space_start() { |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2393 | DCHECK(anchor_.next_page() != &anchor_); |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2394 | return anchor_.next_page()->area_start(); |
| 2395 | } |
| 2396 | |
| 2397 | // Returns the start address of the current page of the space. |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2398 | Address page_low() { return current_page_->area_start(); } |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2399 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2400 | // Returns one past the end address of the space. |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2401 | Address space_end() { return anchor_.prev_page()->area_end(); } |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2402 | |
| 2403 | // Returns one past the end address of the current page of the space. |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2404 | Address page_high() { return current_page_->area_end(); } |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2405 | |
| 2406 | bool AdvancePage() { |
| 2407 | NewSpacePage* next_page = current_page_->next_page(); |
| 2408 | if (next_page == anchor()) return false; |
| 2409 | current_page_ = next_page; |
| 2410 | return true; |
| 2411 | } |
| 2412 | |
| 2413 | // Resets the space to using the first page. |
| 2414 | void Reset(); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2415 | |
| 2416 | // Age mark accessors. |
| 2417 | Address age_mark() { return age_mark_; } |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2418 | void set_age_mark(Address mark); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2419 | |
| 2420 | // True if the address is in the address range of this semispace (not |
| 2421 | // necessarily below the allocation pointer). |
| 2422 | bool Contains(Address a) { |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2423 | return (reinterpret_cast<uintptr_t>(a) & address_mask_) == |
| 2424 | reinterpret_cast<uintptr_t>(start_); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2425 | } |
| 2426 | |
| 2427 | // True if the object is a heap object in the address range of this |
| 2428 | // semispace (not necessarily below the allocation pointer). |
| 2429 | bool Contains(Object* o) { |
| 2430 | return (reinterpret_cast<uintptr_t>(o) & object_mask_) == object_expected_; |
| 2431 | } |
| 2432 | |
Leon Clarke | e46be81 | 2010-01-19 14:06:41 +0000 | [diff] [blame] | 2433 | // If we don't have these here then SemiSpace will be abstract. However |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 2434 | // they should never be called: |
| 2435 | |
| 2436 | intptr_t Size() override { |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2437 | UNREACHABLE(); |
| 2438 | return 0; |
| 2439 | } |
| 2440 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 2441 | intptr_t SizeOfObjects() override { return Size(); } |
| 2442 | |
| 2443 | intptr_t Available() override { |
| 2444 | UNREACHABLE(); |
| 2445 | return 0; |
| 2446 | } |
| 2447 | |
| 2448 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2449 | bool is_committed() { return committed_; } |
| 2450 | bool Commit(); |
| 2451 | bool Uncommit(); |
| 2452 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2453 | NewSpacePage* first_page() { return anchor_.next_page(); } |
| 2454 | NewSpacePage* current_page() { return current_page_; } |
| 2455 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2456 | #ifdef VERIFY_HEAP |
| 2457 | virtual void Verify(); |
| 2458 | #endif |
| 2459 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2460 | #ifdef DEBUG |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 2461 | void Print() override; |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2462 | // Validate a range of of addresses in a SemiSpace. |
| 2463 | // The "from" address must be on a page prior to the "to" address, |
| 2464 | // in the linked page order, or it must be earlier on the same page. |
| 2465 | static void AssertValidRange(Address from, Address to); |
| 2466 | #else |
| 2467 | // Do nothing. |
| 2468 | inline static void AssertValidRange(Address from, Address to) {} |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2469 | #endif |
| 2470 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2471 | // Returns the current total capacity of the semispace. |
| 2472 | int TotalCapacity() { return total_capacity_; } |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2473 | |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 2474 | // Returns the target for total capacity of the semispace. |
| 2475 | int TargetCapacity() { return target_capacity_; } |
| 2476 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2477 | // Returns the maximum total capacity of the semispace. |
| 2478 | int MaximumTotalCapacity() { return maximum_total_capacity_; } |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2479 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2480 | // Returns the initial capacity of the semispace. |
| 2481 | int InitialTotalCapacity() { return initial_total_capacity_; } |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2482 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2483 | SemiSpaceId id() { return id_; } |
| 2484 | |
| 2485 | static void Swap(SemiSpace* from, SemiSpace* to); |
| 2486 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2487 | // Approximate amount of physical memory committed for this space. |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 2488 | size_t CommittedPhysicalMemory() override; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2489 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2490 | private: |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2491 | // Flips the semispace between being from-space and to-space. |
| 2492 | // Copies the flags into the masked positions on all pages in the space. |
| 2493 | void FlipPages(intptr_t flags, intptr_t flag_mask); |
| 2494 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2495 | // Updates Capacity and MaximumCommitted based on new capacity. |
| 2496 | void SetCapacity(int new_capacity); |
| 2497 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2498 | NewSpacePage* anchor() { return &anchor_; } |
| 2499 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2500 | // The current and maximum total capacity of the space. |
| 2501 | int total_capacity_; |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 2502 | int target_capacity_; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2503 | int maximum_total_capacity_; |
| 2504 | int initial_total_capacity_; |
| 2505 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2506 | // The start address of the space. |
| 2507 | Address start_; |
| 2508 | // Used to govern object promotion during mark-compact collection. |
| 2509 | Address age_mark_; |
| 2510 | |
| 2511 | // Masks and comparison values to test for containment in this semispace. |
| 2512 | uintptr_t address_mask_; |
| 2513 | uintptr_t object_mask_; |
| 2514 | uintptr_t object_expected_; |
| 2515 | |
| 2516 | bool committed_; |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2517 | SemiSpaceId id_; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2518 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2519 | NewSpacePage anchor_; |
| 2520 | NewSpacePage* current_page_; |
| 2521 | |
| 2522 | friend class SemiSpaceIterator; |
| 2523 | friend class NewSpacePageIterator; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2524 | }; |
| 2525 | |
| 2526 | |
| 2527 | // A SemiSpaceIterator is an ObjectIterator that iterates over the active |
| 2528 | // semispace of the heap's new space. It iterates over the objects in the |
| 2529 | // semispace from a given start address (defaulting to the bottom of the |
| 2530 | // semispace) to the top of the semispace. New objects allocated after the |
| 2531 | // iterator is created are not iterated. |
| 2532 | class SemiSpaceIterator : public ObjectIterator { |
| 2533 | public: |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 2534 | // Create an iterator over the allocated objects in the given to-space. |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2535 | explicit SemiSpaceIterator(NewSpace* space); |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2536 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 2537 | inline HeapObject* Next(); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2538 | |
| 2539 | // Implementation of the ObjectIterator functions. |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 2540 | inline HeapObject* next_object() override; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2541 | |
| 2542 | private: |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 2543 | void Initialize(Address start, Address end); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2544 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2545 | // The current iteration point. |
| 2546 | Address current_; |
| 2547 | // The end of iteration. |
| 2548 | Address limit_; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2549 | }; |
| 2550 | |
| 2551 | |
| 2552 | // ----------------------------------------------------------------------------- |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2553 | // A PageIterator iterates the pages in a semi-space. |
| 2554 | class NewSpacePageIterator BASE_EMBEDDED { |
| 2555 | public: |
| 2556 | // Make an iterator that runs over all pages in to-space. |
| 2557 | explicit inline NewSpacePageIterator(NewSpace* space); |
| 2558 | |
| 2559 | // Make an iterator that runs over all pages in the given semispace, |
| 2560 | // even those not used in allocation. |
| 2561 | explicit inline NewSpacePageIterator(SemiSpace* space); |
| 2562 | |
| 2563 | // Make iterator that iterates from the page containing start |
| 2564 | // to the page that contains limit in the same semispace. |
| 2565 | inline NewSpacePageIterator(Address start, Address limit); |
| 2566 | |
| 2567 | inline bool has_next(); |
| 2568 | inline NewSpacePage* next(); |
| 2569 | |
| 2570 | private: |
| 2571 | NewSpacePage* prev_page_; // Previous page returned. |
| 2572 | // Next page that will be returned. Cached here so that we can use this |
| 2573 | // iterator for operations that deallocate pages. |
| 2574 | NewSpacePage* next_page_; |
| 2575 | // Last page returned. |
| 2576 | NewSpacePage* last_page_; |
| 2577 | }; |
| 2578 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 2579 | // ----------------------------------------------------------------------------- |
| 2580 | // Allows observation of inline allocation in the new space. |
| 2581 | class InlineAllocationObserver { |
| 2582 | public: |
| 2583 | explicit InlineAllocationObserver(intptr_t step_size) |
| 2584 | : step_size_(step_size), bytes_to_next_step_(step_size) { |
| 2585 | DCHECK(step_size >= kPointerSize); |
| 2586 | } |
| 2587 | virtual ~InlineAllocationObserver() {} |
| 2588 | |
| 2589 | private: |
| 2590 | intptr_t step_size() const { return step_size_; } |
| 2591 | intptr_t bytes_to_next_step() const { return bytes_to_next_step_; } |
| 2592 | |
| 2593 | // Pure virtual method provided by the subclasses that gets called when at |
| 2594 | // least step_size bytes have been allocated. soon_object is the address just |
| 2595 | // allocated (but not yet initialized.) size is the size of the object as |
| 2596 | // requested (i.e. w/o the alignment fillers). Some complexities to be aware |
| 2597 | // of: |
| 2598 | // 1) soon_object will be nullptr in cases where we end up observing an |
| 2599 | // allocation that happens to be a filler space (e.g. page boundaries.) |
| 2600 | // 2) size is the requested size at the time of allocation. Right-trimming |
| 2601 | // may change the object size dynamically. |
| 2602 | // 3) soon_object may actually be the first object in an allocation-folding |
| 2603 | // group. In such a case size is the size of the group rather than the |
| 2604 | // first object. |
| 2605 | virtual void Step(int bytes_allocated, Address soon_object, size_t size) = 0; |
| 2606 | |
| 2607 | // Called each time the new space does an inline allocation step. This may be |
| 2608 | // more frequently than the step_size we are monitoring (e.g. when there are |
| 2609 | // multiple observers, or when page or space boundary is encountered.) |
| 2610 | void InlineAllocationStep(int bytes_allocated, Address soon_object, |
| 2611 | size_t size) { |
| 2612 | bytes_to_next_step_ -= bytes_allocated; |
| 2613 | if (bytes_to_next_step_ <= 0) { |
| 2614 | Step(static_cast<int>(step_size_ - bytes_to_next_step_), soon_object, |
| 2615 | size); |
| 2616 | bytes_to_next_step_ = step_size_; |
| 2617 | } |
| 2618 | } |
| 2619 | |
| 2620 | intptr_t step_size_; |
| 2621 | intptr_t bytes_to_next_step_; |
| 2622 | |
| 2623 | friend class NewSpace; |
| 2624 | |
| 2625 | DISALLOW_COPY_AND_ASSIGN(InlineAllocationObserver); |
| 2626 | }; |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2627 | |
| 2628 | // ----------------------------------------------------------------------------- |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2629 | // The young generation space. |
| 2630 | // |
| 2631 | // The new space consists of a contiguous pair of semispaces. It simply |
| 2632 | // forwards most functions to the appropriate semispace. |
| 2633 | |
| 2634 | class NewSpace : public Space { |
| 2635 | public: |
| 2636 | // Constructor. |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 2637 | explicit NewSpace(Heap* heap) |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2638 | : Space(heap, NEW_SPACE, NOT_EXECUTABLE), |
| 2639 | to_space_(heap, kToSpace), |
| 2640 | from_space_(heap, kFromSpace), |
| 2641 | reservation_(), |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 2642 | top_on_previous_step_(0), |
| 2643 | inline_allocation_observers_paused_(false) {} |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2644 | |
| 2645 | // Sets up the new space using the given chunk. |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2646 | bool SetUp(int reserved_semispace_size_, int max_semi_space_size); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2647 | |
| 2648 | // Tears down the space. Heap memory was not allocated by the space, so it |
| 2649 | // is not deallocated here. |
| 2650 | void TearDown(); |
| 2651 | |
| 2652 | // True if the space has been set up but not torn down. |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2653 | bool HasBeenSetUp() { |
| 2654 | return to_space_.HasBeenSetUp() && from_space_.HasBeenSetUp(); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2655 | } |
| 2656 | |
| 2657 | // Flip the pair of spaces. |
| 2658 | void Flip(); |
| 2659 | |
| 2660 | // Grow the capacity of the semispaces. Assumes that they are not at |
| 2661 | // their maximum capacity. |
| 2662 | void Grow(); |
| 2663 | |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 2664 | // Grow the capacity of the semispaces by one page. |
| 2665 | bool GrowOnePage(); |
| 2666 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2667 | // Shrink the capacity of the semispaces. |
| 2668 | void Shrink(); |
| 2669 | |
| 2670 | // True if the address or object lies in the address range of either |
| 2671 | // semispace (not necessarily below the allocation pointer). |
| 2672 | bool Contains(Address a) { |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2673 | return (reinterpret_cast<uintptr_t>(a) & address_mask_) == |
| 2674 | reinterpret_cast<uintptr_t>(start_); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2675 | } |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2676 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2677 | bool Contains(Object* o) { |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2678 | Address a = reinterpret_cast<Address>(o); |
| 2679 | return (reinterpret_cast<uintptr_t>(a) & object_mask_) == object_expected_; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2680 | } |
| 2681 | |
| 2682 | // Return the allocated bytes in the active semispace. |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 2683 | intptr_t Size() override { |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2684 | return pages_used_ * NewSpacePage::kAreaSize + |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2685 | static_cast<int>(top() - to_space_.page_low()); |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2686 | } |
| 2687 | |
Ben Murdoch | f87a203 | 2010-10-22 12:50:53 +0100 | [diff] [blame] | 2688 | // The same, but returning an int. We have to have the one that returns |
| 2689 | // intptr_t because it is inherited, but if we know we are dealing with the |
| 2690 | // new space, which can't get as big as the other spaces then this is useful: |
| 2691 | int SizeAsInt() { return static_cast<int>(Size()); } |
Steve Block | 3ce2e20 | 2009-11-05 08:53:23 +0000 | [diff] [blame] | 2692 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2693 | // Return the allocatable capacity of a semispace. |
| 2694 | intptr_t Capacity() { |
| 2695 | SLOW_DCHECK(to_space_.TotalCapacity() == from_space_.TotalCapacity()); |
| 2696 | return (to_space_.TotalCapacity() / Page::kPageSize) * |
| 2697 | NewSpacePage::kAreaSize; |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2698 | } |
| 2699 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2700 | // Return the current size of a semispace, allocatable and non-allocatable |
| 2701 | // memory. |
| 2702 | intptr_t TotalCapacity() { |
| 2703 | DCHECK(to_space_.TotalCapacity() == from_space_.TotalCapacity()); |
| 2704 | return to_space_.TotalCapacity(); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2705 | } |
Steve Block | 3ce2e20 | 2009-11-05 08:53:23 +0000 | [diff] [blame] | 2706 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 2707 | // Committed memory for NewSpace is the committed memory of both semi-spaces |
| 2708 | // combined. |
| 2709 | intptr_t CommittedMemory() override { |
| 2710 | return from_space_.CommittedMemory() + to_space_.CommittedMemory(); |
Steve Block | 3ce2e20 | 2009-11-05 08:53:23 +0000 | [diff] [blame] | 2711 | } |
| 2712 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 2713 | intptr_t MaximumCommittedMemory() override { |
| 2714 | return from_space_.MaximumCommittedMemory() + |
| 2715 | to_space_.MaximumCommittedMemory(); |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2716 | } |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2717 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2718 | // Approximate amount of physical memory committed for this space. |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 2719 | size_t CommittedPhysicalMemory() override; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2720 | |
| 2721 | // Return the available bytes without growing. |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 2722 | intptr_t Available() override { return Capacity() - Size(); } |
| 2723 | |
| 2724 | intptr_t PagesFromStart(Address addr) { |
| 2725 | return static_cast<intptr_t>(addr - bottom()) / Page::kPageSize; |
| 2726 | } |
| 2727 | |
| 2728 | size_t AllocatedSinceLastGC() { |
| 2729 | intptr_t allocated = top() - to_space_.age_mark(); |
| 2730 | if (allocated < 0) { |
| 2731 | // Runtime has lowered the top below the age mark. |
| 2732 | return 0; |
| 2733 | } |
| 2734 | // Correctly account for non-allocatable regions at the beginning of |
| 2735 | // each page from the age_mark() to the top(). |
| 2736 | intptr_t pages = |
| 2737 | PagesFromStart(top()) - PagesFromStart(to_space_.age_mark()); |
| 2738 | allocated -= pages * (NewSpacePage::kObjectStartOffset); |
| 2739 | DCHECK(0 <= allocated && allocated <= Size()); |
| 2740 | return static_cast<size_t>(allocated); |
| 2741 | } |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2742 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2743 | // Return the maximum capacity of a semispace. |
| 2744 | int MaximumCapacity() { |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2745 | DCHECK(to_space_.MaximumTotalCapacity() == |
| 2746 | from_space_.MaximumTotalCapacity()); |
| 2747 | return to_space_.MaximumTotalCapacity(); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2748 | } |
| 2749 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2750 | bool IsAtMaximumCapacity() { return TotalCapacity() == MaximumCapacity(); } |
| 2751 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2752 | // Returns the initial capacity of a semispace. |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2753 | int InitialTotalCapacity() { |
| 2754 | DCHECK(to_space_.InitialTotalCapacity() == |
| 2755 | from_space_.InitialTotalCapacity()); |
| 2756 | return to_space_.InitialTotalCapacity(); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2757 | } |
| 2758 | |
| 2759 | // Return the address of the allocation pointer in the active semispace. |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2760 | Address top() { |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2761 | DCHECK(to_space_.current_page()->ContainsLimit(allocation_info_.top())); |
| 2762 | return allocation_info_.top(); |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2763 | } |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2764 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2765 | // Return the address of the allocation pointer limit in the active semispace. |
| 2766 | Address limit() { |
| 2767 | DCHECK(to_space_.current_page()->ContainsLimit(allocation_info_.limit())); |
| 2768 | return allocation_info_.limit(); |
| 2769 | } |
| 2770 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2771 | // Return the address of the first object in the active semispace. |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2772 | Address bottom() { return to_space_.space_start(); } |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2773 | |
| 2774 | // Get the age mark of the inactive semispace. |
| 2775 | Address age_mark() { return from_space_.age_mark(); } |
| 2776 | // Set the age mark in the active semispace. |
| 2777 | void set_age_mark(Address mark) { to_space_.set_age_mark(mark); } |
| 2778 | |
| 2779 | // The start address of the space and a bit mask. Anding an address in the |
| 2780 | // new space with the mask will result in the start address. |
| 2781 | Address start() { return start_; } |
| 2782 | uintptr_t mask() { return address_mask_; } |
| 2783 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2784 | INLINE(uint32_t AddressToMarkbitIndex(Address addr)) { |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2785 | DCHECK(Contains(addr)); |
| 2786 | DCHECK(IsAligned(OffsetFrom(addr), kPointerSize) || |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2787 | IsAligned(OffsetFrom(addr) - 1, kPointerSize)); |
| 2788 | return static_cast<uint32_t>(addr - start_) >> kPointerSizeLog2; |
| 2789 | } |
| 2790 | |
| 2791 | INLINE(Address MarkbitIndexToAddress(uint32_t index)) { |
| 2792 | return reinterpret_cast<Address>(index << kPointerSizeLog2); |
| 2793 | } |
| 2794 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2795 | // The allocation top and limit address. |
| 2796 | Address* allocation_top_address() { return allocation_info_.top_address(); } |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2797 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2798 | // The allocation limit address. |
| 2799 | Address* allocation_limit_address() { |
| 2800 | return allocation_info_.limit_address(); |
| 2801 | } |
| 2802 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 2803 | MUST_USE_RESULT INLINE(AllocationResult AllocateRawAligned( |
| 2804 | int size_in_bytes, AllocationAlignment alignment)); |
| 2805 | |
| 2806 | MUST_USE_RESULT INLINE( |
| 2807 | AllocationResult AllocateRawUnaligned(int size_in_bytes)); |
| 2808 | |
| 2809 | MUST_USE_RESULT INLINE(AllocationResult AllocateRaw( |
| 2810 | int size_in_bytes, AllocationAlignment alignment)); |
| 2811 | |
| 2812 | MUST_USE_RESULT inline AllocationResult AllocateRawSynchronized( |
| 2813 | int size_in_bytes, AllocationAlignment alignment); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2814 | |
| 2815 | // Reset the allocation pointer to the beginning of the active semispace. |
| 2816 | void ResetAllocationInfo(); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2817 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2818 | void UpdateInlineAllocationLimit(int size_in_bytes); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 2819 | |
| 2820 | // Allows observation of inline allocation. The observer->Step() method gets |
| 2821 | // called after every step_size bytes have been allocated (approximately). |
| 2822 | // This works by adjusting the allocation limit to a lower value and adjusting |
| 2823 | // it after each step. |
| 2824 | void AddInlineAllocationObserver(InlineAllocationObserver* observer); |
| 2825 | |
| 2826 | // Removes a previously installed observer. |
| 2827 | void RemoveInlineAllocationObserver(InlineAllocationObserver* observer); |
| 2828 | |
| 2829 | void DisableInlineAllocationSteps() { |
| 2830 | top_on_previous_step_ = 0; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2831 | UpdateInlineAllocationLimit(0); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2832 | } |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2833 | |
| 2834 | // Get the extent of the inactive semispace (for use as a marking stack, |
| 2835 | // or to zap it). Notice: space-addresses are not necessarily on the |
| 2836 | // same page, so FromSpaceStart() might be above FromSpaceEnd(). |
| 2837 | Address FromSpacePageLow() { return from_space_.page_low(); } |
| 2838 | Address FromSpacePageHigh() { return from_space_.page_high(); } |
| 2839 | Address FromSpaceStart() { return from_space_.space_start(); } |
| 2840 | Address FromSpaceEnd() { return from_space_.space_end(); } |
| 2841 | |
| 2842 | // Get the extent of the active semispace's pages' memory. |
| 2843 | Address ToSpaceStart() { return to_space_.space_start(); } |
| 2844 | Address ToSpaceEnd() { return to_space_.space_end(); } |
| 2845 | |
| 2846 | inline bool ToSpaceContains(Address address) { |
| 2847 | return to_space_.Contains(address); |
| 2848 | } |
| 2849 | inline bool FromSpaceContains(Address address) { |
| 2850 | return from_space_.Contains(address); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2851 | } |
| 2852 | |
| 2853 | // True if the object is a heap object in the address range of the |
| 2854 | // respective semispace (not necessarily below the allocation pointer of the |
| 2855 | // semispace). |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2856 | inline bool ToSpaceContains(Object* o) { return to_space_.Contains(o); } |
| 2857 | inline bool FromSpaceContains(Object* o) { return from_space_.Contains(o); } |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2858 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2859 | // Try to switch the active semispace to a new, empty, page. |
| 2860 | // Returns false if this isn't possible or reasonable (i.e., there |
| 2861 | // are no pages, or the current page is already empty), or true |
| 2862 | // if successful. |
| 2863 | bool AddFreshPage(); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 2864 | bool AddFreshPageSynchronized(); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2865 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2866 | #ifdef VERIFY_HEAP |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2867 | // Verify the active semispace. |
| 2868 | virtual void Verify(); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2869 | #endif |
| 2870 | |
| 2871 | #ifdef DEBUG |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2872 | // Print the active semispace. |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 2873 | void Print() override { to_space_.Print(); } |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2874 | #endif |
| 2875 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2876 | // Iterates the active semispace to collect statistics. |
| 2877 | void CollectStatistics(); |
| 2878 | // Reports previously collected statistics of the active semispace. |
| 2879 | void ReportStatistics(); |
| 2880 | // Clears previously collected statistics. |
| 2881 | void ClearHistograms(); |
| 2882 | |
| 2883 | // Record the allocation or promotion of a heap object. Note that we don't |
| 2884 | // record every single allocation, but only those that happen in the |
| 2885 | // to space during a scavenge GC. |
| 2886 | void RecordAllocation(HeapObject* obj); |
| 2887 | void RecordPromotion(HeapObject* obj); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2888 | |
| 2889 | // Return whether the operation succeded. |
| 2890 | bool CommitFromSpaceIfNeeded() { |
| 2891 | if (from_space_.is_committed()) return true; |
| 2892 | return from_space_.Commit(); |
| 2893 | } |
| 2894 | |
| 2895 | bool UncommitFromSpace() { |
| 2896 | if (!from_space_.is_committed()) return true; |
| 2897 | return from_space_.Uncommit(); |
| 2898 | } |
| 2899 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 2900 | bool IsFromSpaceCommitted() { return from_space_.is_committed(); } |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2901 | |
| 2902 | SemiSpace* active_space() { return &to_space_; } |
| 2903 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2904 | private: |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2905 | // Update allocation info to match the current to-space page. |
| 2906 | void UpdateAllocationInfo(); |
| 2907 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 2908 | base::Mutex mutex_; |
| 2909 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2910 | Address chunk_base_; |
| 2911 | uintptr_t chunk_size_; |
| 2912 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2913 | // The semispaces. |
| 2914 | SemiSpace to_space_; |
| 2915 | SemiSpace from_space_; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2916 | base::VirtualMemory reservation_; |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2917 | int pages_used_; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2918 | |
| 2919 | // Start address and bit mask for containment testing. |
| 2920 | Address start_; |
| 2921 | uintptr_t address_mask_; |
| 2922 | uintptr_t object_mask_; |
| 2923 | uintptr_t object_expected_; |
| 2924 | |
| 2925 | // Allocation pointer and limit for normal allocation and allocation during |
| 2926 | // mark-compact collection. |
| 2927 | AllocationInfo allocation_info_; |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2928 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 2929 | // When inline allocation stepping is active, either because of incremental |
| 2930 | // marking or because of idle scavenge, we 'interrupt' inline allocation every |
| 2931 | // once in a while. This is done by setting allocation_info_.limit to be lower |
| 2932 | // than the actual limit and and increasing it in steps to guarantee that the |
| 2933 | // observers are notified periodically. |
| 2934 | List<InlineAllocationObserver*> inline_allocation_observers_; |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 2935 | Address top_on_previous_step_; |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 2936 | bool inline_allocation_observers_paused_; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2937 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2938 | HistogramInfo* allocated_histogram_; |
| 2939 | HistogramInfo* promoted_histogram_; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2940 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 2941 | bool EnsureAllocation(int size_in_bytes, AllocationAlignment alignment); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2942 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 2943 | // If we are doing inline allocation in steps, this method performs the 'step' |
| 2944 | // operation. top is the memory address of the bump pointer at the last |
| 2945 | // inline allocation (i.e. it determines the numbers of bytes actually |
| 2946 | // allocated since the last step.) new_top is the address of the bump pointer |
| 2947 | // where the next byte is going to be allocated from. top and new_top may be |
| 2948 | // different when we cross a page boundary or reset the space. |
| 2949 | void InlineAllocationStep(Address top, Address new_top, Address soon_object, |
| 2950 | size_t size); |
| 2951 | intptr_t GetNextInlineAllocationStepSize(); |
| 2952 | void StartNextInlineAllocationStep(); |
| 2953 | void PauseInlineAllocationObservers(); |
| 2954 | void ResumeInlineAllocationObservers(); |
| 2955 | |
| 2956 | friend class PauseInlineAllocationObserversScope; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2957 | friend class SemiSpaceIterator; |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 2958 | }; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2959 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 2960 | class PauseInlineAllocationObserversScope { |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2961 | public: |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 2962 | explicit PauseInlineAllocationObserversScope(NewSpace* new_space) |
| 2963 | : new_space_(new_space) { |
| 2964 | new_space_->PauseInlineAllocationObservers(); |
| 2965 | } |
| 2966 | ~PauseInlineAllocationObserversScope() { |
| 2967 | new_space_->ResumeInlineAllocationObservers(); |
| 2968 | } |
| 2969 | |
| 2970 | private: |
| 2971 | NewSpace* new_space_; |
| 2972 | DISALLOW_COPY_AND_ASSIGN(PauseInlineAllocationObserversScope); |
| 2973 | }; |
| 2974 | |
| 2975 | // ----------------------------------------------------------------------------- |
| 2976 | // Compaction space that is used temporarily during compaction. |
| 2977 | |
| 2978 | class CompactionSpace : public PagedSpace { |
| 2979 | public: |
| 2980 | CompactionSpace(Heap* heap, AllocationSpace id, Executability executable) |
| 2981 | : PagedSpace(heap, id, executable) {} |
| 2982 | |
| 2983 | // Adds external memory starting at {start} of {size_in_bytes} to the space. |
| 2984 | void AddExternalMemory(Address start, int size_in_bytes) { |
| 2985 | IncreaseCapacity(size_in_bytes); |
| 2986 | Free(start, size_in_bytes); |
| 2987 | } |
| 2988 | |
| 2989 | bool is_local() override { return true; } |
| 2990 | |
| 2991 | void RefillFreeList() override; |
| 2992 | |
| 2993 | protected: |
| 2994 | // The space is temporary and not included in any snapshots. |
| 2995 | bool snapshotable() override { return false; } |
| 2996 | |
| 2997 | MUST_USE_RESULT HeapObject* SweepAndRetryAllocation( |
| 2998 | int size_in_bytes) override; |
| 2999 | }; |
| 3000 | |
| 3001 | |
| 3002 | // A collection of |CompactionSpace|s used by a single compaction task. |
| 3003 | class CompactionSpaceCollection : public Malloced { |
| 3004 | public: |
| 3005 | explicit CompactionSpaceCollection(Heap* heap) |
| 3006 | : old_space_(heap, OLD_SPACE, Executability::NOT_EXECUTABLE), |
| 3007 | code_space_(heap, CODE_SPACE, Executability::EXECUTABLE), |
| 3008 | duration_(0.0), |
| 3009 | bytes_compacted_(0) {} |
| 3010 | |
| 3011 | CompactionSpace* Get(AllocationSpace space) { |
| 3012 | switch (space) { |
| 3013 | case OLD_SPACE: |
| 3014 | return &old_space_; |
| 3015 | case CODE_SPACE: |
| 3016 | return &code_space_; |
| 3017 | default: |
| 3018 | UNREACHABLE(); |
| 3019 | } |
| 3020 | UNREACHABLE(); |
| 3021 | return nullptr; |
| 3022 | } |
| 3023 | |
| 3024 | void ReportCompactionProgress(double duration, intptr_t bytes_compacted) { |
| 3025 | duration_ += duration; |
| 3026 | bytes_compacted_ += bytes_compacted; |
| 3027 | } |
| 3028 | |
| 3029 | double duration() const { return duration_; } |
| 3030 | intptr_t bytes_compacted() const { return bytes_compacted_; } |
| 3031 | |
| 3032 | private: |
| 3033 | CompactionSpace old_space_; |
| 3034 | CompactionSpace code_space_; |
| 3035 | |
| 3036 | // Book keeping. |
| 3037 | double duration_; |
| 3038 | intptr_t bytes_compacted_; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 3039 | }; |
| 3040 | |
| 3041 | |
| 3042 | // ----------------------------------------------------------------------------- |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 3043 | // Old object space (includes the old space of objects and code space) |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 3044 | |
| 3045 | class OldSpace : public PagedSpace { |
| 3046 | public: |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 3047 | // Creates an old space object. The constructor does not allocate pages |
| 3048 | // from OS. |
| 3049 | OldSpace(Heap* heap, AllocationSpace id, Executability executable) |
| 3050 | : PagedSpace(heap, id, executable) {} |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 3051 | }; |
| 3052 | |
| 3053 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 3054 | // For contiguous spaces, top should be in the space (or at the end) and limit |
| 3055 | // should be the end of the space. |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 3056 | #define DCHECK_SEMISPACE_ALLOCATION_INFO(info, space) \ |
| 3057 | SLOW_DCHECK((space).page_low() <= (info).top() && \ |
| 3058 | (info).top() <= (space).page_high() && \ |
| 3059 | (info).limit() <= (space).page_high()) |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 3060 | |
| 3061 | |
| 3062 | // ----------------------------------------------------------------------------- |
| 3063 | // Old space for all map objects |
| 3064 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 3065 | class MapSpace : public PagedSpace { |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 3066 | public: |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 3067 | // Creates a map space object. |
| 3068 | MapSpace(Heap* heap, AllocationSpace id) |
| 3069 | : PagedSpace(heap, id, NOT_EXECUTABLE), |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 3070 | max_map_space_pages_(kMaxMapPageIndex - 1) {} |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 3071 | |
Ben Murdoch | 85b7179 | 2012-04-11 18:30:58 +0100 | [diff] [blame] | 3072 | // Given an index, returns the page address. |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 3073 | // TODO(1600): this limit is artifical just to keep code compilable |
| 3074 | static const int kMaxMapPageIndex = 1 << 16; |
Ben Murdoch | 85b7179 | 2012-04-11 18:30:58 +0100 | [diff] [blame] | 3075 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 3076 | int RoundSizeDownToObjectAlignment(int size) override { |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 3077 | if (base::bits::IsPowerOfTwo32(Map::kSize)) { |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 3078 | return RoundDown(size, Map::kSize); |
| 3079 | } else { |
| 3080 | return (size / Map::kSize) * Map::kSize; |
Leon Clarke | e46be81 | 2010-01-19 14:06:41 +0000 | [diff] [blame] | 3081 | } |
Leon Clarke | e46be81 | 2010-01-19 14:06:41 +0000 | [diff] [blame] | 3082 | } |
| 3083 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 3084 | #ifdef VERIFY_HEAP |
| 3085 | void VerifyObject(HeapObject* obj) override; |
| 3086 | #endif |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 3087 | |
| 3088 | private: |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 3089 | static const int kMapsPerPage = Page::kAllocatableMemory / Map::kSize; |
Leon Clarke | e46be81 | 2010-01-19 14:06:41 +0000 | [diff] [blame] | 3090 | |
| 3091 | // Do map space compaction if there is a page gap. |
Leon Clarke | d91b9f7 | 2010-01-27 17:25:45 +0000 | [diff] [blame] | 3092 | int CompactionThreshold() { |
| 3093 | return kMapsPerPage * (max_map_space_pages_ - 1); |
| 3094 | } |
| 3095 | |
| 3096 | const int max_map_space_pages_; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 3097 | }; |
| 3098 | |
| 3099 | |
| 3100 | // ----------------------------------------------------------------------------- |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 3101 | // Large objects ( > Page::kMaxRegularHeapObjectSize ) are allocated and |
| 3102 | // managed by the large object space. A large object is allocated from OS |
| 3103 | // heap with extra padding bytes (Page::kPageSize + Page::kObjectStartOffset). |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 3104 | // A large object always starts at Page::kObjectStartOffset to a page. |
| 3105 | // Large objects do not move during garbage collections. |
| 3106 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 3107 | class LargeObjectSpace : public Space { |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 3108 | public: |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 3109 | LargeObjectSpace(Heap* heap, AllocationSpace id); |
| 3110 | virtual ~LargeObjectSpace(); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 3111 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 3112 | // Initializes internal data structures. |
| 3113 | bool SetUp(); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 3114 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 3115 | // Releases internal resources, frees objects in this space. |
| 3116 | void TearDown(); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 3117 | |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 3118 | static intptr_t ObjectSizeFor(intptr_t chunk_size) { |
| 3119 | if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return 0; |
| 3120 | return chunk_size - Page::kPageSize - Page::kObjectStartOffset; |
| 3121 | } |
| 3122 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 3123 | // Shared implementation of AllocateRaw, AllocateRawCode and |
| 3124 | // AllocateRawFixedArray. |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 3125 | MUST_USE_RESULT AllocationResult |
| 3126 | AllocateRaw(int object_size, Executability executable); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 3127 | |
Ben Murdoch | 7f4d5bd | 2010-06-15 11:15:29 +0100 | [diff] [blame] | 3128 | // Available bytes for objects in this space. |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 3129 | inline intptr_t Available() override; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 3130 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 3131 | intptr_t Size() override { return size_; } |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 3132 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 3133 | intptr_t SizeOfObjects() override { return objects_size_; } |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 3134 | |
| 3135 | // Approximate amount of physical memory committed for this space. |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 3136 | size_t CommittedPhysicalMemory() override; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 3137 | |
| 3138 | int PageCount() { return page_count_; } |
| 3139 | |
| 3140 | // Finds an object for a given address, returns a Smi if it is not found. |
| 3141 | // The function iterates through all objects in this space, may be slow. |
| 3142 | Object* FindObject(Address a); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 3143 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 3144 | // Finds a large object page containing the given address, returns NULL |
Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 3145 | // if such a page doesn't exist. |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 3146 | LargePage* FindPage(Address a); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 3147 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 3148 | // Clears the marking state of live objects. |
| 3149 | void ClearMarkingStateOfLiveObjects(); |
| 3150 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 3151 | // Frees unmarked objects. |
| 3152 | void FreeUnmarkedObjects(); |
| 3153 | |
| 3154 | // Checks whether a heap object is in this space; O(1). |
| 3155 | bool Contains(HeapObject* obj); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 3156 | bool Contains(Address address); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 3157 | |
| 3158 | // Checks whether the space is empty. |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 3159 | bool IsEmpty() { return first_page_ == NULL; } |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 3160 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 3161 | LargePage* first_page() { return first_page_; } |
| 3162 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 3163 | #ifdef VERIFY_HEAP |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 3164 | virtual void Verify(); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 3165 | #endif |
| 3166 | |
| 3167 | #ifdef DEBUG |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 3168 | void Print() override; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 3169 | void ReportStatistics(); |
| 3170 | void CollectCodeStatistics(); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 3171 | #endif |
| 3172 | // Checks whether an address is in the object area in this space. It |
| 3173 | // iterates all objects in the space. May be slow. |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 3174 | bool SlowContains(Address addr) { return FindObject(addr)->IsHeapObject(); } |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 3175 | |
| 3176 | private: |
| 3177 | // The head of the linked list of large object chunks. |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 3178 | LargePage* first_page_; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 3179 | intptr_t size_; // allocated bytes |
| 3180 | int page_count_; // number of chunks |
Shimeng (Simon) Wang | 8a31eba | 2010-12-06 19:01:33 -0800 | [diff] [blame] | 3181 | intptr_t objects_size_; // size of objects |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 3182 | // Map MemoryChunk::kAlignment-aligned chunks to large pages covering them |
| 3183 | HashMap chunk_map_; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 3184 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 3185 | friend class LargeObjectIterator; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 3186 | }; |
| 3187 | |
| 3188 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 3189 | class LargeObjectIterator : public ObjectIterator { |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 3190 | public: |
| 3191 | explicit LargeObjectIterator(LargeObjectSpace* space); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 3192 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 3193 | HeapObject* Next(); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 3194 | |
| 3195 | // implementation of ObjectIterator. |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 3196 | virtual HeapObject* next_object() { return Next(); } |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 3197 | |
| 3198 | private: |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 3199 | LargePage* current_; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 3200 | }; |
| 3201 | |
| 3202 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 3203 | // Iterates over the chunks (pages and large object pages) that can contain |
| 3204 | // pointers to new space. |
| 3205 | class PointerChunkIterator BASE_EMBEDDED { |
| 3206 | public: |
| 3207 | inline explicit PointerChunkIterator(Heap* heap); |
| 3208 | |
| 3209 | // Return NULL when the iterator is done. |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 3210 | inline MemoryChunk* next(); |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 3211 | |
| 3212 | private: |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 3213 | enum State { kOldSpaceState, kMapState, kLargeObjectState, kFinishedState }; |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 3214 | State state_; |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 3215 | PageIterator old_iterator_; |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 3216 | PageIterator map_iterator_; |
| 3217 | LargeObjectIterator lo_iterator_; |
| 3218 | }; |
| 3219 | |
| 3220 | |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 3221 | #ifdef DEBUG |
| 3222 | struct CommentStatistic { |
| 3223 | const char* comment; |
| 3224 | int size; |
| 3225 | int count; |
| 3226 | void Clear() { |
| 3227 | comment = NULL; |
| 3228 | size = 0; |
| 3229 | count = 0; |
| 3230 | } |
| 3231 | // Must be small, since an iteration is used for lookup. |
| 3232 | static const int kMaxComments = 64; |
| 3233 | }; |
| 3234 | #endif |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 3235 | } // namespace internal |
| 3236 | } // namespace v8 |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 3237 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 3238 | #endif // V8_HEAP_SPACES_H_ |