Upgrade V8 to version 4.9.385.28
https://chromium.googlesource.com/v8/v8/+/4.9.385.28
FPIIM-449
Change-Id: I4b2e74289d4bf3667f2f3dc8aa2e541f63e26eb4
diff --git a/src/heap/spaces.h b/src/heap/spaces.h
index dcd3364..a8102ca 100644
--- a/src/heap/spaces.h
+++ b/src/heap/spaces.h
@@ -6,17 +6,20 @@
#define V8_HEAP_SPACES_H_
#include "src/allocation.h"
+#include "src/atomic-utils.h"
#include "src/base/atomicops.h"
#include "src/base/bits.h"
#include "src/base/platform/mutex.h"
+#include "src/flags.h"
#include "src/hashmap.h"
#include "src/list.h"
-#include "src/log.h"
+#include "src/objects.h"
#include "src/utils.h"
namespace v8 {
namespace internal {
+class CompactionSpaceCollection;
class Isolate;
// -----------------------------------------------------------------------------
@@ -34,7 +37,7 @@
// area.
//
// There is a separate large object space for objects larger than
-// Page::kMaxHeapObjectSize, so that they do not have to move during
+// Page::kMaxRegularHeapObjectSize, so that they do not have to move during
// collection. The large object space is paged. Pages in large object space
// may be larger than the page size.
//
@@ -43,11 +46,11 @@
//
// During scavenges and mark-sweep collections we sometimes (after a store
// buffer overflow) iterate intergenerational pointers without decoding heap
-// object maps so if the page belongs to old pointer space or large object
-// space it is essential to guarantee that the page does not contain any
+// object maps so if the page belongs to old space or large object space
+// it is essential to guarantee that the page does not contain any
// garbage pointers to new space: every pointer aligned word which satisfies
// the Heap::InNewSpace() predicate must be a pointer to a live heap object in
-// new space. Thus objects in old pointer and large object spaces should have a
+// new space. Thus objects in old space and large object spaces should have a
// special layout (e.g. no bare integer fields). This requirement does not
// apply to map space which is iterated in a special fashion. However we still
// require pointer fields of dead maps to be cleaned.
@@ -84,29 +87,28 @@
#define DCHECK_OBJECT_SIZE(size) \
DCHECK((0 < size) && (size <= Page::kMaxRegularHeapObjectSize))
+#define DCHECK_CODEOBJECT_SIZE(size, code_space) \
+ DCHECK((0 < size) && (size <= code_space->AreaSize()))
+
#define DCHECK_PAGE_OFFSET(offset) \
DCHECK((Page::kObjectStartOffset <= offset) && (offset <= Page::kPageSize))
#define DCHECK_MAP_PAGE_INDEX(index) \
DCHECK((0 <= index) && (index <= MapSpace::kMaxMapPageIndex))
-
-class PagedSpace;
-class MemoryAllocator;
class AllocationInfo;
-class Space;
+class CompactionSpace;
class FreeList;
+class MemoryAllocator;
class MemoryChunk;
+class PagedSpace;
+class Space;
class MarkBit {
public:
typedef uint32_t CellType;
- inline MarkBit(CellType* cell, CellType mask, bool data_only)
- : cell_(cell), mask_(mask), data_only_(data_only) {}
-
- inline CellType* cell() { return cell_; }
- inline CellType mask() { return mask_; }
+ inline MarkBit(CellType* cell, CellType mask) : cell_(cell), mask_(mask) {}
#ifdef DEBUG
bool operator==(const MarkBit& other) {
@@ -114,29 +116,27 @@
}
#endif
- inline void Set() { *cell_ |= mask_; }
- inline bool Get() { return (*cell_ & mask_) != 0; }
- inline void Clear() { *cell_ &= ~mask_; }
-
- inline bool data_only() { return data_only_; }
+ private:
+ inline CellType* cell() { return cell_; }
+ inline CellType mask() { return mask_; }
inline MarkBit Next() {
CellType new_mask = mask_ << 1;
if (new_mask == 0) {
- return MarkBit(cell_ + 1, 1, data_only_);
+ return MarkBit(cell_ + 1, 1);
} else {
- return MarkBit(cell_, new_mask, data_only_);
+ return MarkBit(cell_, new_mask);
}
}
- private:
+ inline void Set() { *cell_ |= mask_; }
+ inline bool Get() { return (*cell_ & mask_) != 0; }
+ inline void Clear() { *cell_ &= ~mask_; }
+
CellType* cell_;
CellType mask_;
- // This boolean indicates that the object is in a data-only space with no
- // pointers. This enables some optimizations when marking.
- // It is expected that this field is inlined and turned into control flow
- // at the place where the MarkBit object is created.
- bool data_only_;
+
+ friend class Marking;
};
@@ -169,6 +169,10 @@
return index >> kBitsPerCellLog2;
}
+ V8_INLINE static uint32_t IndexInCell(uint32_t index) {
+ return index & kBitIndexMask;
+ }
+
INLINE(static uint32_t CellToIndex(uint32_t index)) {
return index << kBitsPerCellLog2;
}
@@ -187,10 +191,10 @@
return reinterpret_cast<Bitmap*>(addr);
}
- inline MarkBit MarkBitFromIndex(uint32_t index, bool data_only = false) {
- MarkBit::CellType mask = 1 << (index & kBitIndexMask);
+ inline MarkBit MarkBitFromIndex(uint32_t index) {
+ MarkBit::CellType mask = 1u << IndexInCell(index);
MarkBit::CellType* cell = this->cells() + (index >> kBitsPerCellLog2);
- return MarkBit(cell, mask, data_only);
+ return MarkBit(cell, mask);
}
static inline void Clear(MemoryChunk* chunk);
@@ -260,6 +264,23 @@
}
return true;
}
+
+ // Clears all bits starting from {cell_base_index} up to and excluding
+ // {index}. Note that {cell_base_index} is required to be cell aligned.
+ void ClearRange(uint32_t cell_base_index, uint32_t index) {
+ DCHECK_EQ(IndexInCell(cell_base_index), 0u);
+ DCHECK_GE(index, cell_base_index);
+ uint32_t start_cell_index = IndexToCell(cell_base_index);
+ uint32_t end_cell_index = IndexToCell(index);
+ DCHECK_GE(end_cell_index, start_cell_index);
+ // Clear all cells till the cell containing the last index.
+ for (uint32_t i = start_cell_index; i < end_cell_index; i++) {
+ cells()[i] = 0;
+ }
+ // Clear all bits in the last cell till the last bit before index.
+ uint32_t clear_mask = ~((1u << IndexInCell(index)) - 1);
+ cells()[end_cell_index] &= clear_mask;
+ }
};
@@ -272,37 +293,197 @@
// any heap object.
class MemoryChunk {
public:
+ enum MemoryChunkFlags {
+ IS_EXECUTABLE,
+ ABOUT_TO_BE_FREED,
+ POINTERS_TO_HERE_ARE_INTERESTING,
+ POINTERS_FROM_HERE_ARE_INTERESTING,
+ SCAN_ON_SCAVENGE,
+ IN_FROM_SPACE, // Mutually exclusive with IN_TO_SPACE.
+ IN_TO_SPACE, // All pages in new space has one of these two set.
+ NEW_SPACE_BELOW_AGE_MARK,
+ EVACUATION_CANDIDATE,
+ RESCAN_ON_EVACUATION,
+ NEVER_EVACUATE, // May contain immortal immutables.
+ POPULAR_PAGE, // Slots buffer of this page overflowed on the previous GC.
+
+ // WAS_SWEPT indicates that marking bits have been cleared by the sweeper,
+ // otherwise marking bits are still intact.
+ WAS_SWEPT,
+
+ // Large objects can have a progress bar in their page header. These object
+ // are scanned in increments and will be kept black while being scanned.
+ // Even if the mutator writes to them they will be kept black and a white
+ // to grey transition is performed in the value.
+ HAS_PROGRESS_BAR,
+
+ // This flag is intended to be used for testing. Works only when both
+ // FLAG_stress_compaction and FLAG_manual_evacuation_candidates_selection
+ // are set. It forces the page to become an evacuation candidate at next
+ // candidates selection cycle.
+ FORCE_EVACUATION_CANDIDATE_FOR_TESTING,
+
+ // This flag is inteded to be used for testing.
+ NEVER_ALLOCATE_ON_PAGE,
+
+ // The memory chunk is already logically freed, however the actual freeing
+ // still has to be performed.
+ PRE_FREED,
+
+ // |COMPACTION_WAS_ABORTED|: Indicates that the compaction in this page
+ // has been aborted and needs special handling by the sweeper.
+ COMPACTION_WAS_ABORTED,
+
+ // Last flag, keep at bottom.
+ NUM_MEMORY_CHUNK_FLAGS
+ };
+
+ // |kCompactionDone|: Initial compaction state of a |MemoryChunk|.
+ // |kCompactingInProgress|: Parallel compaction is currently in progress.
+ // |kCompactingFinalize|: Parallel compaction is done but the chunk needs to
+ // be finalized.
+ // |kCompactingAborted|: Parallel compaction has been aborted, which should
+ // for now only happen in OOM scenarios.
+ enum ParallelCompactingState {
+ kCompactingDone,
+ kCompactingInProgress,
+ kCompactingFinalize,
+ kCompactingAborted,
+ };
+
+ // |kSweepingDone|: The page state when sweeping is complete or sweeping must
+ // not be performed on that page.
+ // |kSweepingFinalize|: A sweeper thread is done sweeping this page and will
+ // not touch the page memory anymore.
+ // |kSweepingInProgress|: This page is currently swept by a sweeper thread.
+ // |kSweepingPending|: This page is ready for parallel sweeping.
+ enum ParallelSweepingState {
+ kSweepingDone,
+ kSweepingFinalize,
+ kSweepingInProgress,
+ kSweepingPending
+ };
+
+ // Every n write barrier invocations we go to runtime even though
+ // we could have handled it in generated code. This lets us check
+ // whether we have hit the limit and should do some more marking.
+ static const int kWriteBarrierCounterGranularity = 500;
+
+ static const int kPointersToHereAreInterestingMask =
+ 1 << POINTERS_TO_HERE_ARE_INTERESTING;
+
+ static const int kPointersFromHereAreInterestingMask =
+ 1 << POINTERS_FROM_HERE_ARE_INTERESTING;
+
+ static const int kEvacuationCandidateMask = 1 << EVACUATION_CANDIDATE;
+
+ static const int kSkipEvacuationSlotsRecordingMask =
+ (1 << EVACUATION_CANDIDATE) | (1 << RESCAN_ON_EVACUATION) |
+ (1 << IN_FROM_SPACE) | (1 << IN_TO_SPACE);
+
+ static const intptr_t kAlignment =
+ (static_cast<uintptr_t>(1) << kPageSizeBits);
+
+ static const intptr_t kAlignmentMask = kAlignment - 1;
+
+ static const intptr_t kSizeOffset = 0;
+
+ static const intptr_t kLiveBytesOffset =
+ kSizeOffset + kPointerSize // size_t size
+ + kIntptrSize // intptr_t flags_
+ + kPointerSize // Address area_start_
+ + kPointerSize // Address area_end_
+ + 2 * kPointerSize // base::VirtualMemory reservation_
+ + kPointerSize // Address owner_
+ + kPointerSize // Heap* heap_
+ + kIntSize; // int store_buffer_counter_
+
+ static const size_t kSlotsBufferOffset =
+ kLiveBytesOffset + kIntSize; // int live_byte_count_
+
+ static const size_t kWriteBarrierCounterOffset =
+ kSlotsBufferOffset + kPointerSize // SlotsBuffer* slots_buffer_;
+ + kPointerSize; // SkipList* skip_list_;
+
+ static const size_t kMinHeaderSize =
+ kWriteBarrierCounterOffset +
+ kIntptrSize // intptr_t write_barrier_counter_
+ + kIntSize // int progress_bar_
+ + kPointerSize // AtomicValue high_water_mark_
+ + kPointerSize // base::Mutex* mutex_
+ + kPointerSize // base::AtomicWord parallel_sweeping_
+ + kPointerSize // AtomicValue parallel_compaction_
+ + 5 * kPointerSize // AtomicNumber free-list statistics
+ + kPointerSize // AtomicValue next_chunk_
+ + kPointerSize; // AtomicValue prev_chunk_
+
+ // We add some more space to the computed header size to amount for missing
+ // alignment requirements in our computation.
+ // Try to get kHeaderSize properly aligned on 32-bit and 64-bit machines.
+ static const size_t kHeaderSize = kMinHeaderSize + kIntSize;
+
+ static const int kBodyOffset =
+ CODE_POINTER_ALIGN(kHeaderSize + Bitmap::kSize);
+
+ // The start offset of the object area in a page. Aligned to both maps and
+ // code alignment to be suitable for both. Also aligned to 32 words because
+ // the marking bitmap is arranged in 32 bit chunks.
+ static const int kObjectStartAlignment = 32 * kPointerSize;
+ static const int kObjectStartOffset =
+ kBodyOffset - 1 +
+ (kObjectStartAlignment - (kBodyOffset - 1) % kObjectStartAlignment);
+
+ static const int kFlagsOffset = kPointerSize;
+
+ static void IncrementLiveBytesFromMutator(HeapObject* object, int by);
+
// Only works if the pointer is in the first kPageSize of the MemoryChunk.
static MemoryChunk* FromAddress(Address a) {
return reinterpret_cast<MemoryChunk*>(OffsetFrom(a) & ~kAlignmentMask);
}
+
static const MemoryChunk* FromAddress(const byte* a) {
return reinterpret_cast<const MemoryChunk*>(OffsetFrom(a) &
~kAlignmentMask);
}
+ static void IncrementLiveBytesFromGC(HeapObject* object, int by) {
+ MemoryChunk::FromAddress(object->address())->IncrementLiveBytes(by);
+ }
+
// Only works for addresses in pointer spaces, not data or code spaces.
static inline MemoryChunk* FromAnyPointerAddress(Heap* heap, Address addr);
+ static inline uint32_t FastAddressToMarkbitIndex(Address addr) {
+ const intptr_t offset = reinterpret_cast<intptr_t>(addr) & kAlignmentMask;
+ return static_cast<uint32_t>(offset) >> kPointerSizeLog2;
+ }
+
+ static inline void UpdateHighWaterMark(Address mark) {
+ if (mark == nullptr) return;
+ // Need to subtract one from the mark because when a chunk is full the
+ // top points to the next address after the chunk, which effectively belongs
+ // to another chunk. See the comment to Page::FromAllocationTop.
+ MemoryChunk* chunk = MemoryChunk::FromAddress(mark - 1);
+ intptr_t new_mark = static_cast<intptr_t>(mark - chunk->address());
+ intptr_t old_mark = 0;
+ do {
+ old_mark = chunk->high_water_mark_.Value();
+ } while ((new_mark > old_mark) &&
+ !chunk->high_water_mark_.TrySetValue(old_mark, new_mark));
+ }
+
Address address() { return reinterpret_cast<Address>(this); }
bool is_valid() { return address() != NULL; }
- MemoryChunk* next_chunk() const {
- return reinterpret_cast<MemoryChunk*>(base::Acquire_Load(&next_chunk_));
- }
+ MemoryChunk* next_chunk() { return next_chunk_.Value(); }
- MemoryChunk* prev_chunk() const {
- return reinterpret_cast<MemoryChunk*>(base::Acquire_Load(&prev_chunk_));
- }
+ MemoryChunk* prev_chunk() { return prev_chunk_.Value(); }
- void set_next_chunk(MemoryChunk* next) {
- base::Release_Store(&next_chunk_, reinterpret_cast<base::AtomicWord>(next));
- }
+ void set_next_chunk(MemoryChunk* next) { next_chunk_.SetValue(next); }
- void set_prev_chunk(MemoryChunk* prev) {
- base::Release_Store(&prev_chunk_, reinterpret_cast<base::AtomicWord>(prev));
- }
+ void set_prev_chunk(MemoryChunk* prev) { prev_chunk_.SetValue(prev); }
Space* owner() const {
if ((reinterpret_cast<intptr_t>(owner_) & kPageHeaderTagMask) ==
@@ -323,8 +504,6 @@
base::VirtualMemory* reserved_memory() { return &reservation_; }
- void InitializeReservedMemory() { reservation_.Reset(); }
-
void set_reserved_memory(base::VirtualMemory* reservation) {
DCHECK_NOT_NULL(reservation);
reservation_.TakeControl(reservation);
@@ -356,52 +535,6 @@
return addr >= area_start() && addr <= area_end();
}
- // Every n write barrier invocations we go to runtime even though
- // we could have handled it in generated code. This lets us check
- // whether we have hit the limit and should do some more marking.
- static const int kWriteBarrierCounterGranularity = 500;
-
- enum MemoryChunkFlags {
- IS_EXECUTABLE,
- ABOUT_TO_BE_FREED,
- POINTERS_TO_HERE_ARE_INTERESTING,
- POINTERS_FROM_HERE_ARE_INTERESTING,
- SCAN_ON_SCAVENGE,
- IN_FROM_SPACE, // Mutually exclusive with IN_TO_SPACE.
- IN_TO_SPACE, // All pages in new space has one of these two set.
- NEW_SPACE_BELOW_AGE_MARK,
- CONTAINS_ONLY_DATA,
- EVACUATION_CANDIDATE,
- RESCAN_ON_EVACUATION,
-
- // WAS_SWEPT indicates that marking bits have been cleared by the sweeper,
- // otherwise marking bits are still intact.
- WAS_SWEPT,
-
- // Large objects can have a progress bar in their page header. These object
- // are scanned in increments and will be kept black while being scanned.
- // Even if the mutator writes to them they will be kept black and a white
- // to grey transition is performed in the value.
- HAS_PROGRESS_BAR,
-
- // Last flag, keep at bottom.
- NUM_MEMORY_CHUNK_FLAGS
- };
-
-
- static const int kPointersToHereAreInterestingMask =
- 1 << POINTERS_TO_HERE_ARE_INTERESTING;
-
- static const int kPointersFromHereAreInterestingMask =
- 1 << POINTERS_FROM_HERE_ARE_INTERESTING;
-
- static const int kEvacuationCandidateMask = 1 << EVACUATION_CANDIDATE;
-
- static const int kSkipEvacuationSlotsRecordingMask =
- (1 << EVACUATION_CANDIDATE) | (1 << RESCAN_ON_EVACUATION) |
- (1 << IN_FROM_SPACE) | (1 << IN_TO_SPACE);
-
-
void SetFlag(int flag) { flags_ |= static_cast<uintptr_t>(1) << flag; }
void ClearFlag(int flag) { flags_ &= ~(static_cast<uintptr_t>(1) << flag); }
@@ -428,36 +561,30 @@
// Return all current flags.
intptr_t GetFlags() { return flags_; }
-
- // SWEEPING_DONE - The page state when sweeping is complete or sweeping must
- // not be performed on that page.
- // SWEEPING_FINALIZE - A sweeper thread is done sweeping this page and will
- // not touch the page memory anymore.
- // SWEEPING_IN_PROGRESS - This page is currently swept by a sweeper thread.
- // SWEEPING_PENDING - This page is ready for parallel sweeping.
- enum ParallelSweepingState {
- SWEEPING_DONE,
- SWEEPING_FINALIZE,
- SWEEPING_IN_PROGRESS,
- SWEEPING_PENDING
- };
-
- ParallelSweepingState parallel_sweeping() {
- return static_cast<ParallelSweepingState>(
- base::Acquire_Load(¶llel_sweeping_));
+ AtomicValue<ParallelSweepingState>& parallel_sweeping_state() {
+ return parallel_sweeping_;
}
- void set_parallel_sweeping(ParallelSweepingState state) {
- base::Release_Store(¶llel_sweeping_, state);
+ AtomicValue<ParallelCompactingState>& parallel_compaction_state() {
+ return parallel_compaction_;
}
- bool TryParallelSweeping() {
- return base::Acquire_CompareAndSwap(¶llel_sweeping_, SWEEPING_PENDING,
- SWEEPING_IN_PROGRESS) ==
- SWEEPING_PENDING;
+ bool TryLock() { return mutex_->TryLock(); }
+
+ base::Mutex* mutex() { return mutex_; }
+
+ // WaitUntilSweepingCompleted only works when concurrent sweeping is in
+ // progress. In particular, when we know that right before this call a
+ // sweeper thread was sweeping this page.
+ void WaitUntilSweepingCompleted() {
+ mutex_->Lock();
+ mutex_->Unlock();
+ DCHECK(SweepingCompleted());
}
- bool SweepingCompleted() { return parallel_sweeping() <= SWEEPING_FINALIZE; }
+ bool SweepingCompleted() {
+ return parallel_sweeping_state().Value() <= kSweepingFinalize;
+ }
// Manage live byte count (count of bytes known to be live,
// because they are marked black).
@@ -468,6 +595,7 @@
}
live_byte_count_ = 0;
}
+
void IncrementLiveBytes(int by) {
if (FLAG_gc_verbose) {
printf("UpdateLiveBytes:%p:%x%c=%x->%x\n", static_cast<void*>(this),
@@ -475,13 +603,21 @@
live_byte_count_ + by);
}
live_byte_count_ += by;
+ DCHECK_GE(live_byte_count_, 0);
DCHECK_LE(static_cast<unsigned>(live_byte_count_), size_);
}
+
int LiveBytes() {
- DCHECK(static_cast<unsigned>(live_byte_count_) <= size_);
+ DCHECK_LE(static_cast<unsigned>(live_byte_count_), size_);
return live_byte_count_;
}
+ void SetLiveBytes(int live_bytes) {
+ DCHECK_GE(live_bytes, 0);
+ DCHECK_LE(static_cast<unsigned>(live_bytes), size_);
+ live_byte_count_ = live_bytes;
+ }
+
int write_barrier_counter() {
return static_cast<int>(write_barrier_counter_);
}
@@ -507,50 +643,6 @@
}
}
- bool IsLeftOfProgressBar(Object** slot) {
- Address slot_address = reinterpret_cast<Address>(slot);
- DCHECK(slot_address > this->address());
- return (slot_address - (this->address() + kObjectStartOffset)) <
- progress_bar();
- }
-
- static void IncrementLiveBytesFromGC(Address address, int by) {
- MemoryChunk::FromAddress(address)->IncrementLiveBytes(by);
- }
-
- static void IncrementLiveBytesFromMutator(Address address, int by);
-
- static const intptr_t kAlignment =
- (static_cast<uintptr_t>(1) << kPageSizeBits);
-
- static const intptr_t kAlignmentMask = kAlignment - 1;
-
- static const intptr_t kSizeOffset = 0;
-
- static const intptr_t kLiveBytesOffset =
- kSizeOffset + kPointerSize + kPointerSize + kPointerSize + kPointerSize +
- kPointerSize + kPointerSize + kPointerSize + kPointerSize + kIntSize;
-
- static const size_t kSlotsBufferOffset = kLiveBytesOffset + kIntSize;
-
- static const size_t kWriteBarrierCounterOffset =
- kSlotsBufferOffset + kPointerSize + kPointerSize;
-
- static const size_t kHeaderSize =
- kWriteBarrierCounterOffset + kPointerSize + kIntSize + kIntSize +
- kPointerSize + 5 * kPointerSize + kPointerSize + kPointerSize;
-
- static const int kBodyOffset =
- CODE_POINTER_ALIGN(kHeaderSize + Bitmap::kSize);
-
- // The start offset of the object area in a page. Aligned to both maps and
- // code alignment to be suitable for both. Also aligned to 32 words because
- // the marking bitmap is arranged in 32 bit chunks.
- static const int kObjectStartAlignment = 32 * kPointerSize;
- static const int kObjectStartOffset =
- kBodyOffset - 1 +
- (kObjectStartAlignment - (kBodyOffset - 1) % kObjectStartAlignment);
-
size_t size() const { return size_; }
void set_size(size_t size) { size_ = size; }
@@ -564,8 +656,6 @@
return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
}
- bool ContainsOnlyData() { return IsFlagSet(CONTAINS_ONLY_DATA); }
-
bool InNewSpace() {
return (flags_ & ((1 << IN_FROM_SPACE) | (1 << IN_TO_SPACE))) != 0;
}
@@ -574,7 +664,6 @@
bool InFromSpace() { return IsFlagSet(IN_FROM_SPACE); }
- // ---------------------------------------------------------------------
// Markbits support
inline Bitmap* markbits() {
@@ -587,12 +676,6 @@
return static_cast<uint32_t>(addr - this->address()) >> kPointerSizeLog2;
}
- inline static uint32_t FastAddressToMarkbitIndex(Address addr) {
- const intptr_t offset = reinterpret_cast<intptr_t>(addr) & kAlignmentMask;
-
- return static_cast<uint32_t>(offset) >> kPointerSizeLog2;
- }
-
inline Address MarkbitIndexToAddress(uint32_t index) {
return this->address() + (index << kPointerSizeLog2);
}
@@ -602,9 +685,18 @@
inline Heap* heap() const { return heap_; }
- static const int kFlagsOffset = kPointerSize;
+ bool NeverEvacuate() { return IsFlagSet(NEVER_EVACUATE); }
- bool IsEvacuationCandidate() { return IsFlagSet(EVACUATION_CANDIDATE); }
+ void MarkNeverEvacuate() { SetFlag(NEVER_EVACUATE); }
+
+ bool IsEvacuationCandidate() {
+ DCHECK(!(IsFlagSet(NEVER_EVACUATE) && IsFlagSet(EVACUATION_CANDIDATE)));
+ return IsFlagSet(EVACUATION_CANDIDATE);
+ }
+
+ bool CanAllocate() {
+ return !IsEvacuationCandidate() && !IsFlagSet(NEVER_ALLOCATE_ON_PAGE);
+ }
bool ShouldSkipEvacuationSlotRecording() {
return (flags_ & kSkipEvacuationSlotsRecordingMask) != 0;
@@ -619,6 +711,7 @@
inline SlotsBuffer** slots_buffer_address() { return &slots_buffer_; }
void MarkEvacuationCandidate() {
+ DCHECK(!IsFlagSet(NEVER_EVACUATE));
DCHECK(slots_buffer_ == NULL);
SetFlag(EVACUATION_CANDIDATE);
}
@@ -634,11 +727,16 @@
bool CommitArea(size_t requested);
// Approximate amount of physical memory committed for this chunk.
- size_t CommittedPhysicalMemory() { return high_water_mark_; }
+ size_t CommittedPhysicalMemory() { return high_water_mark_.Value(); }
- static inline void UpdateHighWaterMark(Address mark);
+ // Should be called when memory chunk is about to be freed.
+ void ReleaseAllocatedMemory();
protected:
+ static MemoryChunk* Initialize(Heap* heap, Address base, size_t size,
+ Address area_start, Address area_end,
+ Executability executable, Space* owner);
+
size_t size_;
intptr_t flags_;
@@ -666,32 +764,33 @@
int progress_bar_;
// Assuming the initial allocation on a page is sequential,
// count highest number of bytes ever allocated on the page.
- int high_water_mark_;
+ AtomicValue<intptr_t> high_water_mark_;
- base::AtomicWord parallel_sweeping_;
+ base::Mutex* mutex_;
+ AtomicValue<ParallelSweepingState> parallel_sweeping_;
+ AtomicValue<ParallelCompactingState> parallel_compaction_;
// PagedSpace free-list statistics.
- intptr_t available_in_small_free_list_;
- intptr_t available_in_medium_free_list_;
- intptr_t available_in_large_free_list_;
- intptr_t available_in_huge_free_list_;
- intptr_t non_available_small_blocks_;
+ AtomicNumber<intptr_t> available_in_small_free_list_;
+ AtomicNumber<intptr_t> available_in_medium_free_list_;
+ AtomicNumber<intptr_t> available_in_large_free_list_;
+ AtomicNumber<intptr_t> available_in_huge_free_list_;
+ AtomicNumber<intptr_t> non_available_small_blocks_;
- static MemoryChunk* Initialize(Heap* heap, Address base, size_t size,
- Address area_start, Address area_end,
- Executability executable, Space* owner);
+ // next_chunk_ holds a pointer of type MemoryChunk
+ AtomicValue<MemoryChunk*> next_chunk_;
+ // prev_chunk_ holds a pointer of type MemoryChunk
+ AtomicValue<MemoryChunk*> prev_chunk_;
private:
- // next_chunk_ holds a pointer of type MemoryChunk
- base::AtomicWord next_chunk_;
- // prev_chunk_ holds a pointer of type MemoryChunk
- base::AtomicWord prev_chunk_;
+ void InitializeReservedMemory() { reservation_.Reset(); }
friend class MemoryAllocator;
+ friend class MemoryChunkValidator;
};
-STATIC_ASSERT(sizeof(MemoryChunk) <= MemoryChunk::kHeaderSize);
+enum FreeListCategoryType { kSmall, kMedium, kLarge, kHuge };
// -----------------------------------------------------------------------------
@@ -720,8 +819,14 @@
}
// Returns the next page in the chain of pages owned by a space.
- inline Page* next_page();
- inline Page* prev_page();
+ inline Page* next_page() {
+ DCHECK(next_chunk()->owner() == owner());
+ return static_cast<Page*>(next_chunk());
+ }
+ inline Page* prev_page() {
+ DCHECK(prev_chunk()->owner() == owner());
+ return static_cast<Page*>(prev_chunk());
+ }
inline void set_next_page(Page* page);
inline void set_prev_page(Page* page);
@@ -747,11 +852,16 @@
// Page size in bytes. This must be a multiple of the OS page size.
static const int kPageSize = 1 << kPageSizeBits;
- // Maximum object size that fits in a page. Objects larger than that size
- // are allocated in large object space and are never moved in memory. This
- // also applies to new space allocation, since objects are never migrated
- // from new space to large object space. Takes double alignment into account.
- static const int kMaxRegularHeapObjectSize = kPageSize - kObjectStartOffset;
+ // Maximum object size that gets allocated into regular pages. Objects larger
+ // than that size are allocated in large object space and are never moved in
+ // memory. This also applies to new space allocation, since objects are never
+ // migrated from new space to large object space. Takes double alignment into
+ // account.
+ // TODO(hpayer): This limit should be way smaller but we currently have
+ // short living objects >256K.
+ static const int kMaxRegularHeapObjectSize = 600 * KB;
+
+ static const int kAllocatableMemory = kPageSize - kObjectStartOffset;
// Page size mask.
static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;
@@ -769,10 +879,17 @@
void ResetFreeListStatistics();
-#define FRAGMENTATION_STATS_ACCESSORS(type, name) \
- type name() { return name##_; } \
- void set_##name(type name) { name##_ = name; } \
- void add_##name(type name) { name##_ += name; }
+ int LiveBytesFromFreeList() {
+ return static_cast<int>(
+ area_size() - non_available_small_blocks() -
+ available_in_small_free_list() - available_in_medium_free_list() -
+ available_in_large_free_list() - available_in_huge_free_list());
+ }
+
+#define FRAGMENTATION_STATS_ACCESSORS(type, name) \
+ type name() { return name##_.Value(); } \
+ void set_##name(type name) { name##_.SetValue(name); } \
+ void add_##name(type name) { name##_.Increment(name); }
FRAGMENTATION_STATS_ACCESSORS(intptr_t, non_available_small_blocks)
FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_small_free_list)
@@ -782,6 +899,42 @@
#undef FRAGMENTATION_STATS_ACCESSORS
+ void add_available_in_free_list(FreeListCategoryType type, intptr_t bytes) {
+ switch (type) {
+ case kSmall:
+ add_available_in_small_free_list(bytes);
+ break;
+ case kMedium:
+ add_available_in_medium_free_list(bytes);
+ break;
+ case kLarge:
+ add_available_in_large_free_list(bytes);
+ break;
+ case kHuge:
+ add_available_in_huge_free_list(bytes);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ intptr_t available_in_free_list(FreeListCategoryType type) {
+ switch (type) {
+ case kSmall:
+ return available_in_small_free_list();
+ case kMedium:
+ return available_in_medium_free_list();
+ case kLarge:
+ return available_in_large_free_list();
+ case kHuge:
+ return available_in_huge_free_list();
+ default:
+ UNREACHABLE();
+ }
+ UNREACHABLE();
+ return 0;
+ }
+
#ifdef DEBUG
void Print();
#endif // DEBUG
@@ -790,14 +943,11 @@
};
-STATIC_ASSERT(sizeof(Page) <= MemoryChunk::kHeaderSize);
-
-
class LargePage : public MemoryChunk {
public:
HeapObject* GetObject() { return HeapObject::FromAddress(area_start()); }
- inline LargePage* next_page() const {
+ inline LargePage* next_page() {
return static_cast<LargePage*>(next_chunk());
}
@@ -809,14 +959,17 @@
friend class MemoryAllocator;
};
-STATIC_ASSERT(sizeof(LargePage) <= MemoryChunk::kHeaderSize);
// ----------------------------------------------------------------------------
// Space is the abstract superclass for all allocation spaces.
class Space : public Malloced {
public:
Space(Heap* heap, AllocationSpace id, Executability executable)
- : heap_(heap), id_(id), executable_(executable) {}
+ : heap_(heap),
+ id_(id),
+ executable_(executable),
+ committed_(0),
+ max_committed_(0) {}
virtual ~Space() {}
@@ -828,6 +981,12 @@
// Identity used in error reporting.
AllocationSpace identity() { return id_; }
+ // Return the total amount committed memory for this space, i.e., allocatable
+ // memory and page headers.
+ virtual intptr_t CommittedMemory() { return committed_; }
+
+ virtual intptr_t MaximumCommittedMemory() { return max_committed_; }
+
// Returns allocated size.
virtual intptr_t Size() = 0;
@@ -835,6 +994,12 @@
// (e.g. see LargeObjectSpace).
virtual intptr_t SizeOfObjects() { return Size(); }
+ // Approximate amount of physical memory committed for this space.
+ virtual size_t CommittedPhysicalMemory() = 0;
+
+ // Return the available bytes without growing.
+ virtual intptr_t Available() = 0;
+
virtual int RoundSizeDownToObjectAlignment(int size) {
if (id_ == CODE_SPACE) {
return RoundDown(size, kCodeAlignment);
@@ -847,10 +1012,46 @@
virtual void Print() = 0;
#endif
+ protected:
+ void AccountCommitted(intptr_t bytes) {
+ DCHECK_GE(bytes, 0);
+ committed_ += bytes;
+ if (committed_ > max_committed_) {
+ max_committed_ = committed_;
+ }
+ }
+
+ void AccountUncommitted(intptr_t bytes) {
+ DCHECK_GE(bytes, 0);
+ committed_ -= bytes;
+ DCHECK_GE(committed_, 0);
+ }
+
private:
Heap* heap_;
AllocationSpace id_;
Executability executable_;
+
+ // Keeps track of committed memory in a space.
+ intptr_t committed_;
+ intptr_t max_committed_;
+};
+
+
+class MemoryChunkValidator {
+ // Computed offsets should match the compiler generated ones.
+ STATIC_ASSERT(MemoryChunk::kSizeOffset == offsetof(MemoryChunk, size_));
+ STATIC_ASSERT(MemoryChunk::kLiveBytesOffset ==
+ offsetof(MemoryChunk, live_byte_count_));
+ STATIC_ASSERT(MemoryChunk::kSlotsBufferOffset ==
+ offsetof(MemoryChunk, slots_buffer_));
+ STATIC_ASSERT(MemoryChunk::kWriteBarrierCounterOffset ==
+ offsetof(MemoryChunk, write_barrier_counter_));
+
+ // Validate our estimates on the header size.
+ STATIC_ASSERT(sizeof(MemoryChunk) <= MemoryChunk::kHeaderSize);
+ STATIC_ASSERT(sizeof(LargePage) <= MemoryChunk::kHeaderSize);
+ STATIC_ASSERT(sizeof(Page) <= MemoryChunk::kHeaderSize);
};
@@ -871,10 +1072,6 @@
// Returns false on failure.
bool SetUp(size_t requested_size);
- // Frees the range of virtual memory, and frees the data structures used to
- // manage it.
- void TearDown();
-
bool valid() { return code_range_ != NULL; }
Address start() {
DCHECK(valid());
@@ -900,10 +1097,11 @@
bool UncommitRawMemory(Address start, size_t length);
void FreeRawMemory(Address buf, size_t length);
- void ReserveEmergencyBlock();
- void ReleaseEmergencyBlock();
-
private:
+ // Frees the range of virtual memory, and frees the data structures used to
+ // manage it.
+ void TearDown();
+
Isolate* isolate_;
// The reserved range of virtual memory that all code objects are put in.
@@ -927,21 +1125,20 @@
size_t size;
};
+ // The global mutex guards free_list_ and allocation_list_ as GC threads may
+ // access both lists concurrently to the main thread.
+ base::Mutex code_range_mutex_;
+
// Freed blocks of memory are added to the free list. When the allocation
// list is exhausted, the free list is sorted and merged to make the new
// allocation list.
List<FreeBlock> free_list_;
+
// Memory is allocated from the free blocks on the allocation list.
// The block at current_allocation_block_index_ is the current block.
List<FreeBlock> allocation_list_;
int current_allocation_block_index_;
- // Emergency block guarantees that we can always allocate a page for
- // evacuation candidates when code space is compacted. Emergency block is
- // reserved immediately after GC and is released immedietely before
- // allocating a page for evacuation.
- FreeBlock emergency_block_;
-
// Finds a block on the allocation list that contains at least the
// requested amount of memory. If none is found, sorts and merges
// the existing free memory blocks, and searches again.
@@ -1026,33 +1223,46 @@
LargePage* AllocateLargePage(intptr_t object_size, Space* owner,
Executability executable);
+ // PreFree logically frees the object, i.e., it takes care of the size
+ // bookkeeping and calls the allocation callback.
+ void PreFreeMemory(MemoryChunk* chunk);
+
+ // FreeMemory can be called concurrently when PreFree was executed before.
+ void PerformFreeMemory(MemoryChunk* chunk);
+
+ // Free is a wrapper method, which calls PreFree and PerformFreeMemory
+ // together.
void Free(MemoryChunk* chunk);
- // Returns the maximum available bytes of heaps.
- intptr_t Available() { return capacity_ < size_ ? 0 : capacity_ - size_; }
-
// Returns allocated spaces in bytes.
- intptr_t Size() { return size_; }
+ intptr_t Size() { return size_.Value(); }
+
+ // Returns allocated executable spaces in bytes.
+ intptr_t SizeExecutable() { return size_executable_.Value(); }
+
+ // Returns the maximum available bytes of heaps.
+ intptr_t Available() {
+ intptr_t size = Size();
+ return capacity_ < size ? 0 : capacity_ - size;
+ }
// Returns the maximum available executable bytes of heaps.
intptr_t AvailableExecutable() {
- if (capacity_executable_ < size_executable_) return 0;
- return capacity_executable_ - size_executable_;
+ intptr_t executable_size = SizeExecutable();
+ if (capacity_executable_ < executable_size) return 0;
+ return capacity_executable_ - executable_size;
}
- // Returns allocated executable spaces in bytes.
- intptr_t SizeExecutable() { return size_executable_; }
-
// Returns maximum available bytes that the old space can have.
intptr_t MaxAvailable() {
- return (Available() / Page::kPageSize) * Page::kMaxRegularHeapObjectSize;
+ return (Available() / Page::kPageSize) * Page::kAllocatableMemory;
}
// Returns an indication of whether a pointer is in a space that has
// been allocated by this MemoryAllocator.
- V8_INLINE bool IsOutsideAllocatedSpace(const void* address) const {
- return address < lowest_ever_allocated_ ||
- address >= highest_ever_allocated_;
+ V8_INLINE bool IsOutsideAllocatedSpace(const void* address) {
+ return address < lowest_ever_allocated_.Value() ||
+ address >= highest_ever_allocated_.Value();
}
#ifdef DEBUG
@@ -1075,6 +1285,8 @@
bool CommitMemory(Address addr, size_t size, Executability executable);
+ void FreeNewSpaceMemory(Address addr, base::VirtualMemory* reservation,
+ Executability executable);
void FreeMemory(base::VirtualMemory* reservation, Executability executable);
void FreeMemory(Address addr, size_t size, Executability executable);
@@ -1119,7 +1331,7 @@
static int PageAreaSize(AllocationSpace space) {
DCHECK_NE(LO_SPACE, space);
return (space == CODE_SPACE) ? CodePageAreaSize()
- : Page::kMaxRegularHeapObjectSize;
+ : Page::kAllocatableMemory;
}
MUST_USE_RESULT bool CommitExecutableMemory(base::VirtualMemory* vm,
@@ -1130,22 +1342,22 @@
Isolate* isolate_;
// Maximum space size in bytes.
- size_t capacity_;
+ intptr_t capacity_;
// Maximum subset of capacity_ that can be executable
- size_t capacity_executable_;
+ intptr_t capacity_executable_;
// Allocated space size in bytes.
- size_t size_;
+ AtomicNumber<intptr_t> size_;
// Allocated executable space size in bytes.
- size_t size_executable_;
+ AtomicNumber<intptr_t> size_executable_;
// We keep the lowest and highest addresses allocated as a quick way
// of determining that pointers are outside the heap. The estimate is
// conservative, i.e. not all addrsses in 'allocated' space are allocated
// to our heap. The range is [lowest, highest[, inclusive on the low end
// and exclusive on the high end.
- void* lowest_ever_allocated_;
- void* highest_ever_allocated_;
+ AtomicValue<void*> lowest_ever_allocated_;
+ AtomicValue<void*> highest_ever_allocated_;
struct MemoryAllocationCallbackRegistration {
MemoryAllocationCallbackRegistration(MemoryAllocationCallback callback,
@@ -1168,8 +1380,16 @@
PagedSpace* owner);
void UpdateAllocatedSpaceLimits(void* low, void* high) {
- lowest_ever_allocated_ = Min(lowest_ever_allocated_, low);
- highest_ever_allocated_ = Max(highest_ever_allocated_, high);
+ // The use of atomic primitives does not guarantee correctness (wrt.
+ // desired semantics) by default. The loop here ensures that we update the
+ // values only if they did not change in between.
+ void* ptr = nullptr;
+ do {
+ ptr = lowest_ever_allocated_.Value();
+ } while ((low < ptr) && !lowest_ever_allocated_.TrySetValue(ptr, low));
+ do {
+ ptr = highest_ever_allocated_.Value();
+ } while ((high > ptr) && !highest_ever_allocated_.TrySetValue(ptr, high));
}
DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator);
@@ -1204,31 +1424,20 @@
class HeapObjectIterator : public ObjectIterator {
public:
// Creates a new object iterator in a given space.
- // If the size function is not given, the iterator calls the default
- // Object::Size().
explicit HeapObjectIterator(PagedSpace* space);
- HeapObjectIterator(PagedSpace* space, HeapObjectCallback size_func);
- HeapObjectIterator(Page* page, HeapObjectCallback size_func);
+ explicit HeapObjectIterator(Page* page);
// Advance to the next object, skipping free spaces and other fillers and
// skipping the special garbage section of which there is one per space.
// Returns NULL when the iteration has ended.
- inline HeapObject* Next() {
- do {
- HeapObject* next_obj = FromCurrentPage();
- if (next_obj != NULL) return next_obj;
- } while (AdvanceToNextPage());
- return NULL;
- }
-
- virtual HeapObject* next_object() { return Next(); }
+ inline HeapObject* Next();
+ inline HeapObject* next_object() override;
private:
enum PageMode { kOnePageOnly, kAllPagesInSpace };
Address cur_addr_; // Current iteration point.
Address cur_end_; // End iteration point.
- HeapObjectCallback size_func_; // Size function or NULL.
PagedSpace* space_;
PageMode page_mode_;
@@ -1241,7 +1450,7 @@
// Initializes fields.
inline void Initialize(PagedSpace* owner, Address start, Address end,
- PageMode mode, HeapObjectCallback size_func);
+ PageMode mode);
};
@@ -1272,32 +1481,33 @@
// space.
class AllocationInfo {
public:
- AllocationInfo() : top_(NULL), limit_(NULL) {}
+ AllocationInfo() : top_(nullptr), limit_(nullptr) {}
+ AllocationInfo(Address top, Address limit) : top_(top), limit_(limit) {}
+
+ void Reset(Address top, Address limit) {
+ set_top(top);
+ set_limit(limit);
+ }
INLINE(void set_top(Address top)) {
SLOW_DCHECK(top == NULL ||
- (reinterpret_cast<intptr_t>(top) & HeapObjectTagMask()) == 0);
+ (reinterpret_cast<intptr_t>(top) & kHeapObjectTagMask) == 0);
top_ = top;
}
INLINE(Address top()) const {
SLOW_DCHECK(top_ == NULL ||
- (reinterpret_cast<intptr_t>(top_) & HeapObjectTagMask()) == 0);
+ (reinterpret_cast<intptr_t>(top_) & kHeapObjectTagMask) == 0);
return top_;
}
Address* top_address() { return &top_; }
INLINE(void set_limit(Address limit)) {
- SLOW_DCHECK(limit == NULL ||
- (reinterpret_cast<intptr_t>(limit) & HeapObjectTagMask()) == 0);
limit_ = limit;
}
INLINE(Address limit()) const {
- SLOW_DCHECK(limit_ == NULL ||
- (reinterpret_cast<intptr_t>(limit_) & HeapObjectTagMask()) ==
- 0);
return limit_;
}
@@ -1319,19 +1529,11 @@
// An abstraction of the accounting statistics of a page-structured space.
-// The 'capacity' of a space is the number of object-area bytes (i.e., not
-// including page bookkeeping structures) currently in the space. The 'size'
-// of a space is the number of allocated bytes, the 'waste' in the space is
-// the number of bytes that are not allocated and not available to
-// allocation without reorganizing the space via a GC (e.g. small blocks due
-// to internal fragmentation, top of page areas in map space), and the bytes
-// 'available' is the number of unallocated bytes that are not waste. The
-// capacity is the sum of size, waste, and available.
//
// The stats are only set by functions that ensure they stay balanced. These
-// functions increase or decrease one of the non-capacity stats in
-// conjunction with capacity, or else they always balance increases and
-// decreases to the non-capacity stats.
+// functions increase or decrease one of the non-capacity stats in conjunction
+// with capacity, or else they always balance increases and decreases to the
+// non-capacity stats.
class AllocationStats BASE_EMBEDDED {
public:
AllocationStats() { Clear(); }
@@ -1341,26 +1543,23 @@
capacity_ = 0;
max_capacity_ = 0;
size_ = 0;
- waste_ = 0;
}
- void ClearSizeWaste() {
- size_ = capacity_;
- waste_ = 0;
- }
+ void ClearSize() { size_ = capacity_; }
- // Reset the allocation statistics (i.e., available = capacity with no
- // wasted or allocated bytes).
+ // Reset the allocation statistics (i.e., available = capacity with no wasted
+ // or allocated bytes).
void Reset() {
size_ = 0;
- waste_ = 0;
}
// Accessors for the allocation statistics.
intptr_t Capacity() { return capacity_; }
intptr_t MaxCapacity() { return max_capacity_; }
- intptr_t Size() { return size_; }
- intptr_t Waste() { return waste_; }
+ intptr_t Size() {
+ CHECK_GE(size_, 0);
+ return size_;
+ }
// Grow the space by adding available bytes. They are initially marked as
// being in use (part of the size), but will normally be immediately freed,
@@ -1371,7 +1570,7 @@
if (capacity_ > max_capacity_) {
max_capacity_ = capacity_;
}
- DCHECK(size_ >= 0);
+ CHECK(size_ >= 0);
}
// Shrink the space by removing available bytes. Since shrinking is done
@@ -1380,183 +1579,151 @@
void ShrinkSpace(int size_in_bytes) {
capacity_ -= size_in_bytes;
size_ -= size_in_bytes;
- DCHECK(size_ >= 0);
+ CHECK(size_ >= 0);
}
// Allocate from available bytes (available -> size).
void AllocateBytes(intptr_t size_in_bytes) {
size_ += size_in_bytes;
- DCHECK(size_ >= 0);
+ CHECK(size_ >= 0);
}
// Free allocated bytes, making them available (size -> available).
void DeallocateBytes(intptr_t size_in_bytes) {
size_ -= size_in_bytes;
- DCHECK(size_ >= 0);
+ CHECK_GE(size_, 0);
}
- // Waste free bytes (available -> waste).
- void WasteBytes(int size_in_bytes) {
- DCHECK(size_in_bytes >= 0);
- waste_ += size_in_bytes;
+ // Merge {other} into {this}.
+ void Merge(const AllocationStats& other) {
+ capacity_ += other.capacity_;
+ size_ += other.size_;
+ if (other.max_capacity_ > max_capacity_) {
+ max_capacity_ = other.max_capacity_;
+ }
+ CHECK_GE(size_, 0);
}
+ void DecreaseCapacity(intptr_t size_in_bytes) {
+ capacity_ -= size_in_bytes;
+ CHECK_GE(capacity_, 0);
+ CHECK_GE(capacity_, size_);
+ }
+
+ void IncreaseCapacity(intptr_t size_in_bytes) { capacity_ += size_in_bytes; }
+
private:
+ // |capacity_|: The number of object-area bytes (i.e., not including page
+ // bookkeeping structures) currently in the space.
intptr_t capacity_;
+
+ // |max_capacity_|: The maximum capacity ever observed.
intptr_t max_capacity_;
+
+ // |size_|: The number of allocated bytes.
intptr_t size_;
- intptr_t waste_;
};
-// -----------------------------------------------------------------------------
-// Free lists for old object spaces
-//
-// Free-list nodes are free blocks in the heap. They look like heap objects
-// (free-list node pointers have the heap object tag, and they have a map like
-// a heap object). They have a size and a next pointer. The next pointer is
-// the raw address of the next free list node (or NULL).
-class FreeListNode : public HeapObject {
- public:
- // Obtain a free-list node from a raw address. This is not a cast because
- // it does not check nor require that the first word at the address is a map
- // pointer.
- static FreeListNode* FromAddress(Address address) {
- return reinterpret_cast<FreeListNode*>(HeapObject::FromAddress(address));
- }
-
- static inline bool IsFreeListNode(HeapObject* object);
-
- // Set the size in bytes, which can be read with HeapObject::Size(). This
- // function also writes a map to the first word of the block so that it
- // looks like a heap object to the garbage collector and heap iteration
- // functions.
- void set_size(Heap* heap, int size_in_bytes);
-
- // Accessors for the next field.
- inline FreeListNode* next();
- inline FreeListNode** next_address();
- inline void set_next(FreeListNode* next);
-
- inline void Zap();
-
- static inline FreeListNode* cast(Object* object) {
- return reinterpret_cast<FreeListNode*>(object);
- }
-
- private:
- static const int kNextOffset = POINTER_SIZE_ALIGN(FreeSpace::kHeaderSize);
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(FreeListNode);
-};
-
-
-// The free list category holds a pointer to the top element and a pointer to
-// the end element of the linked list of free memory blocks.
+// A free list category maintains a linked list of free memory blocks.
class FreeListCategory {
public:
- FreeListCategory() : top_(0), end_(NULL), available_(0) {}
+ explicit FreeListCategory(FreeList* owner, FreeListCategoryType type)
+ : type_(type),
+ top_(nullptr),
+ end_(nullptr),
+ available_(0),
+ owner_(owner) {}
+ // Concatenates {category} into {this}.
+ //
+ // Note: Thread-safe.
intptr_t Concatenate(FreeListCategory* category);
void Reset();
- void Free(FreeListNode* node, int size_in_bytes);
+ void Free(FreeSpace* node, int size_in_bytes);
- FreeListNode* PickNodeFromList(int* node_size);
- FreeListNode* PickNodeFromList(int size_in_bytes, int* node_size);
+ // Pick a node from the list.
+ FreeSpace* PickNodeFromList(int* node_size);
+
+ // Pick a node from the list and compare it against {size_in_bytes}. If the
+ // node's size is greater or equal return the node and null otherwise.
+ FreeSpace* PickNodeFromList(int size_in_bytes, int* node_size);
+
+ // Search for a node of size {size_in_bytes}.
+ FreeSpace* SearchForNodeInList(int size_in_bytes, int* node_size);
intptr_t EvictFreeListItemsInList(Page* p);
bool ContainsPageFreeListItemsInList(Page* p);
void RepairFreeList(Heap* heap);
- FreeListNode* top() const {
- return reinterpret_cast<FreeListNode*>(base::NoBarrier_Load(&top_));
- }
+ bool IsEmpty() { return top() == nullptr; }
- void set_top(FreeListNode* top) {
- base::NoBarrier_Store(&top_, reinterpret_cast<base::AtomicWord>(top));
- }
-
- FreeListNode** GetEndAddress() { return &end_; }
- FreeListNode* end() const { return end_; }
- void set_end(FreeListNode* end) { end_ = end; }
-
- int* GetAvailableAddress() { return &available_; }
+ FreeList* owner() { return owner_; }
int available() const { return available_; }
- void set_available(int available) { available_ = available; }
-
- base::Mutex* mutex() { return &mutex_; }
-
- bool IsEmpty() { return top() == 0; }
#ifdef DEBUG
intptr_t SumFreeList();
int FreeListLength();
+ bool IsVeryLong();
#endif
private:
- // top_ points to the top FreeListNode* in the free list category.
- base::AtomicWord top_;
- FreeListNode* end_;
- base::Mutex mutex_;
+ // For debug builds we accurately compute free lists lengths up until
+ // {kVeryLongFreeList} by manually walking the list.
+ static const int kVeryLongFreeList = 500;
- // Total available bytes in all blocks of this free list category.
+ FreeSpace* top() { return top_.Value(); }
+ void set_top(FreeSpace* top) { top_.SetValue(top); }
+
+ FreeSpace* end() const { return end_; }
+ void set_end(FreeSpace* end) { end_ = end; }
+
+ // |type_|: The type of this free list category.
+ FreeListCategoryType type_;
+
+ // |top_|: Points to the top FreeSpace* in the free list category.
+ AtomicValue<FreeSpace*> top_;
+
+ // |end_|: Points to the end FreeSpace* in the free list category.
+ FreeSpace* end_;
+
+ // |available_|: Total available bytes in all blocks of this free list
+ // category.
int available_;
+
+ // |owner_|: The owning free list of this category.
+ FreeList* owner_;
};
-
-// The free list for the old space. The free list is organized in such a way
-// as to encourage objects allocated around the same time to be near each
-// other. The normal way to allocate is intended to be by bumping a 'top'
+// A free list maintaining free blocks of memory. The free list is organized in
+// a way to encourage objects allocated around the same time to be near each
+// other. The normal way to allocate is intended to be by bumping a 'top'
// pointer until it hits a 'limit' pointer. When the limit is hit we need to
-// find a new space to allocate from. This is done with the free list, which
-// is divided up into rough categories to cut down on waste. Having finer
+// find a new space to allocate from. This is done with the free list, which is
+// divided up into rough categories to cut down on waste. Having finer
// categories would scatter allocation more.
-// The old space free list is organized in categories.
-// 1-31 words: Such small free areas are discarded for efficiency reasons.
-// They can be reclaimed by the compactor. However the distance between top
-// and limit may be this small.
-// 32-255 words: There is a list of spaces this large. It is used for top and
-// limit when the object we need to allocate is 1-31 words in size. These
-// spaces are called small.
-// 256-2047 words: There is a list of spaces this large. It is used for top and
-// limit when the object we need to allocate is 32-255 words in size. These
-// spaces are called medium.
-// 1048-16383 words: There is a list of spaces this large. It is used for top
-// and limit when the object we need to allocate is 256-2047 words in size.
-// These spaces are call large.
-// At least 16384 words. This list is for objects of 2048 words or larger.
-// Empty pages are added to this list. These spaces are called huge.
+// The free list is organized in categories as follows:
+// 1-31 words (too small): Such small free areas are discarded for efficiency
+// reasons. They can be reclaimed by the compactor. However the distance
+// between top and limit may be this small.
+// 32-255 words (small): Used for allocating free space between 1-31 words in
+// size.
+// 256-2047 words (medium): Used for allocating free space between 32-255 words
+// in size.
+// 1048-16383 words (large): Used for allocating free space between 256-2047
+// words in size.
+// At least 16384 words (huge): This list is for objects of 2048 words or
+// larger. Empty pages are also added to this list.
class FreeList {
public:
- explicit FreeList(PagedSpace* owner);
-
- intptr_t Concatenate(FreeList* free_list);
-
- // Clear the free list.
- void Reset();
-
- // Return the number of bytes available on the free list.
- intptr_t available() {
- return small_list_.available() + medium_list_.available() +
- large_list_.available() + huge_list_.available();
- }
-
- // Place a node on the free list. The block of size 'size_in_bytes'
- // starting at 'start' is placed on the free list. The return value is the
- // number of bytes that have been lost due to internal fragmentation by
- // freeing the block. Bookkeeping information will be written to the block,
- // i.e., its contents will be destroyed. The start address should be word
- // aligned, and the size should be a non-zero multiple of the word size.
- int Free(Address start, int size_in_bytes);
-
// This method returns how much memory can be allocated after freeing
// maximum_freed memory.
static inline int GuaranteedAllocatable(int maximum_freed) {
- if (maximum_freed < kSmallListMin) {
+ if (maximum_freed <= kSmallListMin) {
return 0;
} else if (maximum_freed <= kSmallListMax) {
return kSmallAllocationMax;
@@ -1568,51 +1735,103 @@
return maximum_freed;
}
- // Allocate a block of size 'size_in_bytes' from the free list. The block
- // is unitialized. A failure is returned if no block is available. The
- // number of bytes lost to fragmentation is returned in the output parameter
- // 'wasted_bytes'. The size should be a non-zero multiple of the word size.
+ explicit FreeList(PagedSpace* owner);
+
+ // The method concatenates {other} into {this} and returns the added bytes,
+ // including waste.
+ //
+ // Note: Thread-safe.
+ intptr_t Concatenate(FreeList* other);
+
+ // Adds a node on the free list. The block of size {size_in_bytes} starting
+ // at {start} is placed on the free list. The return value is the number of
+ // bytes that were not added to the free list, because they freed memory block
+ // was too small. Bookkeeping information will be written to the block, i.e.,
+ // its contents will be destroyed. The start address should be word aligned,
+ // and the size should be a non-zero multiple of the word size.
+ int Free(Address start, int size_in_bytes);
+
+ // Allocate a block of size {size_in_bytes} from the free list. The block is
+ // unitialized. A failure is returned if no block is available. The size
+ // should be a non-zero multiple of the word size.
MUST_USE_RESULT HeapObject* Allocate(int size_in_bytes);
+ // Clear the free list.
+ void Reset();
+
+ void ResetStats() { wasted_bytes_ = 0; }
+
+ // Return the number of bytes available on the free list.
+ intptr_t Available() {
+ return small_list_.available() + medium_list_.available() +
+ large_list_.available() + huge_list_.available();
+ }
+
+ // The method tries to find a {FreeSpace} node of at least {size_in_bytes}
+ // size in the free list category exactly matching the size. If no suitable
+ // node could be found, the method falls back to retrieving a {FreeSpace}
+ // from the large or huge free list category.
+ //
+ // Can be used concurrently.
+ MUST_USE_RESULT FreeSpace* TryRemoveMemory(intptr_t hint_size_in_bytes);
+
bool IsEmpty() {
return small_list_.IsEmpty() && medium_list_.IsEmpty() &&
large_list_.IsEmpty() && huge_list_.IsEmpty();
}
-#ifdef DEBUG
- void Zap();
- intptr_t SumFreeLists();
- bool IsVeryLong();
-#endif
-
// Used after booting the VM.
void RepairLists(Heap* heap);
intptr_t EvictFreeListItems(Page* p);
bool ContainsPageFreeListItems(Page* p);
- FreeListCategory* small_list() { return &small_list_; }
- FreeListCategory* medium_list() { return &medium_list_; }
- FreeListCategory* large_list() { return &large_list_; }
- FreeListCategory* huge_list() { return &huge_list_; }
+ PagedSpace* owner() { return owner_; }
+ intptr_t wasted_bytes() { return wasted_bytes_; }
+ base::Mutex* mutex() { return &mutex_; }
+
+#ifdef DEBUG
+ void Zap();
+ intptr_t SumFreeLists();
+ bool IsVeryLong();
+#endif
private:
// The size range of blocks, in bytes.
static const int kMinBlockSize = 3 * kPointerSize;
- static const int kMaxBlockSize = Page::kMaxRegularHeapObjectSize;
+ static const int kMaxBlockSize = Page::kAllocatableMemory;
- FreeListNode* FindNodeFor(int size_in_bytes, int* node_size);
-
- PagedSpace* owner_;
- Heap* heap_;
-
- static const int kSmallListMin = 0x20 * kPointerSize;
+ static const int kSmallListMin = 0x1f * kPointerSize;
static const int kSmallListMax = 0xff * kPointerSize;
static const int kMediumListMax = 0x7ff * kPointerSize;
static const int kLargeListMax = 0x3fff * kPointerSize;
- static const int kSmallAllocationMax = kSmallListMin - kPointerSize;
+ static const int kSmallAllocationMax = kSmallListMin;
static const int kMediumAllocationMax = kSmallListMax;
static const int kLargeAllocationMax = kMediumListMax;
+
+ FreeSpace* FindNodeFor(int size_in_bytes, int* node_size);
+ FreeSpace* FindNodeIn(FreeListCategoryType category, int* node_size);
+
+ FreeListCategory* GetFreeListCategory(FreeListCategoryType category) {
+ switch (category) {
+ case kSmall:
+ return &small_list_;
+ case kMedium:
+ return &medium_list_;
+ case kLarge:
+ return &large_list_;
+ case kHuge:
+ return &huge_list_;
+ default:
+ UNREACHABLE();
+ }
+ UNREACHABLE();
+ return nullptr;
+ }
+
+ PagedSpace* owner_;
+ base::Mutex mutex_;
+ intptr_t wasted_bytes_;
FreeListCategory small_list_;
FreeListCategory medium_list_;
FreeListCategory large_list_;
@@ -1652,10 +1871,7 @@
return object_;
}
- AllocationSpace RetrySpace() {
- DCHECK(IsRetry());
- return static_cast<AllocationSpace>(Smi::cast(object_)->value());
- }
+ inline AllocationSpace RetrySpace();
private:
explicit AllocationResult(AllocationSpace space)
@@ -1668,13 +1884,68 @@
STATIC_ASSERT(sizeof(AllocationResult) == kPointerSize);
+// LocalAllocationBuffer represents a linear allocation area that is created
+// from a given {AllocationResult} and can be used to allocate memory without
+// synchronization.
+//
+// The buffer is properly closed upon destruction and reassignment.
+// Example:
+// {
+// AllocationResult result = ...;
+// LocalAllocationBuffer a(heap, result, size);
+// LocalAllocationBuffer b = a;
+// CHECK(!a.IsValid());
+// CHECK(b.IsValid());
+// // {a} is invalid now and cannot be used for further allocations.
+// }
+// // Since {b} went out of scope, the LAB is closed, resulting in creating a
+// // filler object for the remaining area.
+class LocalAllocationBuffer {
+ public:
+ // Indicates that a buffer cannot be used for allocations anymore. Can result
+ // from either reassigning a buffer, or trying to construct it from an
+ // invalid {AllocationResult}.
+ static inline LocalAllocationBuffer InvalidBuffer();
+
+ // Creates a new LAB from a given {AllocationResult}. Results in
+ // InvalidBuffer if the result indicates a retry.
+ static inline LocalAllocationBuffer FromResult(Heap* heap,
+ AllocationResult result,
+ intptr_t size);
+
+ ~LocalAllocationBuffer() { Close(); }
+
+ // Convert to C++11 move-semantics once allowed by the style guide.
+ LocalAllocationBuffer(const LocalAllocationBuffer& other);
+ LocalAllocationBuffer& operator=(const LocalAllocationBuffer& other);
+
+ MUST_USE_RESULT inline AllocationResult AllocateRawAligned(
+ int size_in_bytes, AllocationAlignment alignment);
+
+ inline bool IsValid() { return allocation_info_.top() != nullptr; }
+
+ // Try to merge LABs, which is only possible when they are adjacent in memory.
+ // Returns true if the merge was successful, false otherwise.
+ inline bool TryMerge(LocalAllocationBuffer* other);
+
+ private:
+ LocalAllocationBuffer(Heap* heap, AllocationInfo allocation_info);
+
+ void Close();
+
+ Heap* heap_;
+ AllocationInfo allocation_info_;
+};
+
+
class PagedSpace : public Space {
public:
- // Creates a space with a maximum capacity, and an id.
- PagedSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id,
- Executability executable);
+ static const intptr_t kCompactionMemoryWanted = 500 * KB;
- virtual ~PagedSpace() {}
+ // Creates a space with an id.
+ PagedSpace(Heap* heap, AllocationSpace id, Executability executable);
+
+ ~PagedSpace() override { TearDown(); }
// Set up the space using the given address range of virtual memory (from
// the memory allocator's initial chunk) if possible. If the block of
@@ -1686,13 +1957,12 @@
// subsequently torn down.
bool HasBeenSetUp();
- // Cleans up the space, frees all pages in this space except those belonging
- // to the initial chunk, uncommits addresses in the initial chunk.
- void TearDown();
-
// Checks whether an object/address is in this space.
inline bool Contains(Address a);
- bool Contains(HeapObject* o) { return Contains(o->address()); }
+ inline bool Contains(HeapObject* o);
+ // Unlike Contains() methods it is safe to call this one even for addresses
+ // of unmapped memory.
+ bool ContainsSafe(Address addr);
// Given an address occupied by a live object, return that object if it is
// in this space, or a Smi if it is not. The implementation iterates over
@@ -1702,7 +1972,7 @@
// During boot the free_space_map is created, and afterwards we may need
// to write it into the free list nodes that were already created.
- void RepairFreeListsAfterBoot();
+ void RepairFreeListsAfterDeserialization();
// Prepares for a mark-compact GC.
void PrepareForMarkCompact();
@@ -1710,28 +1980,9 @@
// Current capacity without growing (Size() + Available()).
intptr_t Capacity() { return accounting_stats_.Capacity(); }
- // Total amount of memory committed for this space. For paged
- // spaces this equals the capacity.
- intptr_t CommittedMemory() { return Capacity(); }
-
- // The maximum amount of memory ever committed for this space.
- intptr_t MaximumCommittedMemory() { return accounting_stats_.MaxCapacity(); }
-
// Approximate amount of physical memory committed for this space.
- size_t CommittedPhysicalMemory();
+ size_t CommittedPhysicalMemory() override;
- struct SizeStats {
- intptr_t Total() {
- return small_size_ + medium_size_ + large_size_ + huge_size_;
- }
-
- intptr_t small_size_;
- intptr_t medium_size_;
- intptr_t large_size_;
- intptr_t huge_size_;
- };
-
- void ObtainFreeListStatistics(Page* p, SizeStats* sizes);
void ResetFreeListStatistics();
// Sets the capacity, the available space and the wasted space to zero.
@@ -1740,7 +1991,8 @@
// discovered during the sweeping they are subtracted from the size and added
// to the available and wasted totals.
void ClearStats() {
- accounting_stats_.ClearSizeWaste();
+ accounting_stats_.ClearSize();
+ free_list_.ResetStats();
ResetFreeListStatistics();
}
@@ -1753,22 +2005,21 @@
// The bytes in the linear allocation area are not included in this total
// because updating the stats would slow down allocation. New pages are
// immediately added to the free list so they show up here.
- intptr_t Available() { return free_list_.available(); }
+ intptr_t Available() override { return free_list_.Available(); }
// Allocated bytes in this space. Garbage bytes that were not found due to
// concurrent sweeping are counted as being allocated! The bytes in the
// current linear allocation area (between top and limit) are also counted
// here.
- virtual intptr_t Size() { return accounting_stats_.Size(); }
+ intptr_t Size() override { return accounting_stats_.Size(); }
// As size, but the bytes in lazily swept pages are estimated and the bytes
// in the current linear allocation area are not included.
- virtual intptr_t SizeOfObjects();
+ intptr_t SizeOfObjects() override;
// Wasted bytes in this space. These are just the bytes that were thrown away
- // due to being too small to use for allocation. They do not include the
- // free bytes that were not found at all due to lazy sweeping.
- virtual intptr_t Waste() { return accounting_stats_.Waste(); }
+ // due to being too small to use for allocation.
+ virtual intptr_t Waste() { return free_list_.wasted_bytes(); }
// Returns the allocation pointer in this space.
Address top() { return allocation_info_.top(); }
@@ -1784,7 +2035,21 @@
// Allocate the requested number of bytes in the space if possible, return a
// failure object if not.
- MUST_USE_RESULT inline AllocationResult AllocateRaw(int size_in_bytes);
+ MUST_USE_RESULT inline AllocationResult AllocateRawUnaligned(
+ int size_in_bytes);
+
+ MUST_USE_RESULT inline AllocationResult AllocateRawUnalignedSynchronized(
+ int size_in_bytes);
+
+ // Allocate the requested number of bytes in the space double aligned if
+ // possible, return a failure object if not.
+ MUST_USE_RESULT inline AllocationResult AllocateRawAligned(
+ int size_in_bytes, AllocationAlignment alignment);
+
+ // Allocate the requested number of bytes in the space and consider allocation
+ // alignment if needed.
+ MUST_USE_RESULT inline AllocationResult AllocateRaw(
+ int size_in_bytes, AllocationAlignment alignment);
// Give a block of memory to the space's free list. It might be added to
// the free list or accounted as waste.
@@ -1793,7 +2058,6 @@
int Free(Address start, int size_in_bytes) {
int wasted = free_list_.Free(start, size_in_bytes);
accounting_stats_.DeallocateBytes(size_in_bytes);
- accounting_stats_.WasteBytes(wasted);
return size_in_bytes - wasted;
}
@@ -1804,8 +2068,7 @@
DCHECK(top == limit ||
Page::FromAddress(top) == Page::FromAddress(limit - 1));
MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
- allocation_info_.set_top(top);
- allocation_info_.set_limit(limit);
+ allocation_info_.Reset(top, limit);
}
// Empty space allocation info, returning unused area to free list.
@@ -1838,7 +2101,7 @@
#ifdef DEBUG
// Print meta info and objects in this space.
- virtual void Print();
+ void Print() override;
// Reports statistics for the space
void ReportStatistics();
@@ -1856,22 +2119,6 @@
!p->IsFlagSet(Page::RESCAN_ON_EVACUATION) && !p->WasSwept();
}
- void IncrementUnsweptFreeBytes(intptr_t by) { unswept_free_bytes_ += by; }
-
- void IncreaseUnsweptFreeBytes(Page* p) {
- DCHECK(ShouldBeSweptBySweeperThreads(p));
- unswept_free_bytes_ += (p->area_size() - p->LiveBytes());
- }
-
- void DecrementUnsweptFreeBytes(intptr_t by) { unswept_free_bytes_ -= by; }
-
- void DecreaseUnsweptFreeBytes(Page* p) {
- DCHECK(ShouldBeSweptBySweeperThreads(p));
- unswept_free_bytes_ -= (p->area_size() - p->LiveBytes());
- }
-
- void ResetUnsweptFreeBytes() { unswept_free_bytes_ = 0; }
-
// This function tries to steal size_in_bytes memory from the sweeper threads
// free-lists. If it does not succeed stealing enough memory, it will wait
// for the sweeper threads to finish sweeping.
@@ -1885,9 +2132,9 @@
Page* FirstPage() { return anchor_.next_page(); }
Page* LastPage() { return anchor_.prev_page(); }
- void EvictEvacuationCandidatesFromFreeLists();
+ void EvictEvacuationCandidatesFromLinearAllocationArea();
- bool CanExpand();
+ bool CanExpand(size_t size);
// Returns the number of total pages in this space.
int CountTotalPages();
@@ -1895,22 +2142,64 @@
// Return size of allocatable area on a page in this space.
inline int AreaSize() { return area_size_; }
- void CreateEmergencyMemory();
- void FreeEmergencyMemory();
- void UseEmergencyMemory();
+ virtual bool is_local() { return false; }
- bool HasEmergencyMemory() { return emergency_memory_ != NULL; }
+ // Merges {other} into the current space. Note that this modifies {other},
+ // e.g., removes its bump pointer area and resets statistics.
+ void MergeCompactionSpace(CompactionSpace* other);
+
+ void DivideUponCompactionSpaces(CompactionSpaceCollection** other, int num,
+ intptr_t limit = kCompactionMemoryWanted);
+
+ // Refills the free list from the corresponding free list filled by the
+ // sweeper.
+ virtual void RefillFreeList();
protected:
+ void AddMemory(Address start, intptr_t size);
+
+ FreeSpace* TryRemoveMemory(intptr_t size_in_bytes);
+
+ void MoveOverFreeMemory(PagedSpace* other);
+
+ // PagedSpaces that should be included in snapshots have different, i.e.,
+ // smaller, initial pages.
+ virtual bool snapshotable() { return true; }
+
FreeList* free_list() { return &free_list_; }
+ bool HasPages() { return anchor_.next_page() != &anchor_; }
+
+ // Cleans up the space, frees all pages in this space except those belonging
+ // to the initial chunk, uncommits addresses in the initial chunk.
+ void TearDown();
+
+ // Expands the space by allocating a fixed number of pages. Returns false if
+ // it cannot allocate requested number of pages from OS, or if the hard heap
+ // size limit has been hit.
+ bool Expand();
+
+ // Generic fast case allocation function that tries linear allocation at the
+ // address denoted by top in allocation_info_.
+ inline HeapObject* AllocateLinearly(int size_in_bytes);
+
+ // Generic fast case allocation function that tries aligned linear allocation
+ // at the address denoted by top in allocation_info_. Writes the aligned
+ // allocation size, which includes the filler size, to size_in_bytes.
+ inline HeapObject* AllocateLinearlyAligned(int* size_in_bytes,
+ AllocationAlignment alignment);
+
+ // If sweeping is still in progress try to sweep unswept pages. If that is
+ // not successful, wait for the sweeper threads and re-try free-list
+ // allocation.
+ MUST_USE_RESULT virtual HeapObject* SweepAndRetryAllocation(
+ int size_in_bytes);
+
+ // Slow path of AllocateRaw. This function is space-dependent.
+ MUST_USE_RESULT HeapObject* SlowAllocateRaw(int size_in_bytes);
+
int area_size_;
- // Maximum capacity of this space.
- intptr_t max_capacity_;
-
- intptr_t SizeOfFirstPage();
-
// Accounting information for this space.
AllocationStats accounting_stats_;
@@ -1923,41 +2212,19 @@
// Normal allocation information.
AllocationInfo allocation_info_;
- // The number of free bytes which could be reclaimed by advancing the
- // concurrent sweeper threads.
- intptr_t unswept_free_bytes_;
-
// The sweeper threads iterate over the list of pointer and data space pages
// and sweep these pages concurrently. They will stop sweeping after the
// end_of_unswept_pages_ page.
Page* end_of_unswept_pages_;
- // Emergency memory is the memory of a full page for a given space, allocated
- // conservatively before evacuating a page. If compaction fails due to out
- // of memory error the emergency memory can be used to complete compaction.
- // If not used, the emergency memory is released after compaction.
- MemoryChunk* emergency_memory_;
+ // Mutex guarding any concurrent access to the space.
+ base::Mutex space_mutex_;
- // Expands the space by allocating a fixed number of pages. Returns false if
- // it cannot allocate requested number of pages from OS, or if the hard heap
- // size limit has been hit.
- bool Expand();
-
- // Generic fast case allocation function that tries linear allocation at the
- // address denoted by top in allocation_info_.
- inline HeapObject* AllocateLinearly(int size_in_bytes);
-
- // If sweeping is still in progress try to sweep unswept pages. If that is
- // not successful, wait for the sweeper threads and re-try free-list
- // allocation.
- MUST_USE_RESULT HeapObject* WaitForSweeperThreadsAndRetryAllocation(
- int size_in_bytes);
-
- // Slow path of AllocateRaw. This function is space-dependent.
- MUST_USE_RESULT HeapObject* SlowAllocateRaw(int size_in_bytes);
-
- friend class PageIterator;
friend class MarkCompactCollector;
+ friend class PageIterator;
+
+ // Used in cctest.
+ friend class HeapTester;
};
@@ -2011,15 +2278,15 @@
(1 << MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING) |
(1 << MemoryChunk::SCAN_ON_SCAVENGE);
- static const int kAreaSize = Page::kMaxRegularHeapObjectSize;
+ static const int kAreaSize = Page::kAllocatableMemory;
- inline NewSpacePage* next_page() const {
+ inline NewSpacePage* next_page() {
return static_cast<NewSpacePage*>(next_chunk());
}
inline void set_next_page(NewSpacePage* page) { set_next_chunk(page); }
- inline NewSpacePage* prev_page() const {
+ inline NewSpacePage* prev_page() {
return static_cast<NewSpacePage*>(prev_chunk());
}
@@ -2164,12 +2431,21 @@
}
// If we don't have these here then SemiSpace will be abstract. However
- // they should never be called.
- virtual intptr_t Size() {
+ // they should never be called:
+
+ intptr_t Size() override {
UNREACHABLE();
return 0;
}
+ intptr_t SizeOfObjects() override { return Size(); }
+
+ intptr_t Available() override {
+ UNREACHABLE();
+ return 0;
+ }
+
+
bool is_committed() { return committed_; }
bool Commit();
bool Uncommit();
@@ -2182,7 +2458,7 @@
#endif
#ifdef DEBUG
- virtual void Print();
+ void Print() override;
// Validate a range of of addresses in a SemiSpace.
// The "from" address must be on a page prior to the "to" address,
// in the linked page order, or it must be earlier on the same page.
@@ -2208,11 +2484,8 @@
static void Swap(SemiSpace* from, SemiSpace* to);
- // Returns the maximum amount of memory ever committed by the semi space.
- size_t MaximumCommittedMemory() { return maximum_committed_; }
-
// Approximate amount of physical memory committed for this space.
- size_t CommittedPhysicalMemory();
+ size_t CommittedPhysicalMemory() override;
private:
// Flips the semispace between being from-space and to-space.
@@ -2230,8 +2503,6 @@
int maximum_total_capacity_;
int initial_total_capacity_;
- intptr_t maximum_committed_;
-
// The start address of the space.
Address start_;
// Used to govern object promotion during mark-compact collection.
@@ -2250,9 +2521,6 @@
friend class SemiSpaceIterator;
friend class NewSpacePageIterator;
-
- public:
- TRACK_MEMORY("SemiSpace")
};
@@ -2263,49 +2531,21 @@
// iterator is created are not iterated.
class SemiSpaceIterator : public ObjectIterator {
public:
- // Create an iterator over the objects in the given space. If no start
- // address is given, the iterator starts from the bottom of the space. If
- // no size function is given, the iterator calls Object::Size().
-
- // Iterate over all of allocated to-space.
+ // Create an iterator over the allocated objects in the given to-space.
explicit SemiSpaceIterator(NewSpace* space);
- // Iterate over all of allocated to-space, with a custome size function.
- SemiSpaceIterator(NewSpace* space, HeapObjectCallback size_func);
- // Iterate over part of allocated to-space, from start to the end
- // of allocation.
- SemiSpaceIterator(NewSpace* space, Address start);
- // Iterate from one address to another in the same semi-space.
- SemiSpaceIterator(Address from, Address to);
- HeapObject* Next() {
- if (current_ == limit_) return NULL;
- if (NewSpacePage::IsAtEnd(current_)) {
- NewSpacePage* page = NewSpacePage::FromLimit(current_);
- page = page->next_page();
- DCHECK(!page->is_anchor());
- current_ = page->area_start();
- if (current_ == limit_) return NULL;
- }
-
- HeapObject* object = HeapObject::FromAddress(current_);
- int size = (size_func_ == NULL) ? object->Size() : size_func_(object);
-
- current_ += size;
- return object;
- }
+ inline HeapObject* Next();
// Implementation of the ObjectIterator functions.
- virtual HeapObject* next_object() { return Next(); }
+ inline HeapObject* next_object() override;
private:
- void Initialize(Address start, Address end, HeapObjectCallback size_func);
+ void Initialize(Address start, Address end);
// The current iteration point.
Address current_;
// The end of iteration.
Address limit_;
- // The callback function.
- HeapObjectCallback size_func_;
};
@@ -2336,6 +2576,54 @@
NewSpacePage* last_page_;
};
+// -----------------------------------------------------------------------------
+// Allows observation of inline allocation in the new space.
+class InlineAllocationObserver {
+ public:
+ explicit InlineAllocationObserver(intptr_t step_size)
+ : step_size_(step_size), bytes_to_next_step_(step_size) {
+ DCHECK(step_size >= kPointerSize);
+ }
+ virtual ~InlineAllocationObserver() {}
+
+ private:
+ intptr_t step_size() const { return step_size_; }
+ intptr_t bytes_to_next_step() const { return bytes_to_next_step_; }
+
+ // Pure virtual method provided by the subclasses that gets called when at
+ // least step_size bytes have been allocated. soon_object is the address just
+ // allocated (but not yet initialized.) size is the size of the object as
+ // requested (i.e. w/o the alignment fillers). Some complexities to be aware
+ // of:
+ // 1) soon_object will be nullptr in cases where we end up observing an
+ // allocation that happens to be a filler space (e.g. page boundaries.)
+ // 2) size is the requested size at the time of allocation. Right-trimming
+ // may change the object size dynamically.
+ // 3) soon_object may actually be the first object in an allocation-folding
+ // group. In such a case size is the size of the group rather than the
+ // first object.
+ virtual void Step(int bytes_allocated, Address soon_object, size_t size) = 0;
+
+ // Called each time the new space does an inline allocation step. This may be
+ // more frequently than the step_size we are monitoring (e.g. when there are
+ // multiple observers, or when page or space boundary is encountered.)
+ void InlineAllocationStep(int bytes_allocated, Address soon_object,
+ size_t size) {
+ bytes_to_next_step_ -= bytes_allocated;
+ if (bytes_to_next_step_ <= 0) {
+ Step(static_cast<int>(step_size_ - bytes_to_next_step_), soon_object,
+ size);
+ bytes_to_next_step_ = step_size_;
+ }
+ }
+
+ intptr_t step_size_;
+ intptr_t bytes_to_next_step_;
+
+ friend class NewSpace;
+
+ DISALLOW_COPY_AND_ASSIGN(InlineAllocationObserver);
+};
// -----------------------------------------------------------------------------
// The young generation space.
@@ -2351,7 +2639,8 @@
to_space_(heap, kToSpace),
from_space_(heap, kFromSpace),
reservation_(),
- inline_allocation_limit_step_(0) {}
+ top_on_previous_step_(0),
+ inline_allocation_observers_paused_(false) {}
// Sets up the new space using the given chunk.
bool SetUp(int reserved_semispace_size_, int max_semi_space_size);
@@ -2391,7 +2680,7 @@
}
// Return the allocated bytes in the active semispace.
- virtual intptr_t Size() {
+ intptr_t Size() override {
return pages_used_ * NewSpacePage::kAreaSize +
static_cast<int>(top() - to_space_.page_low());
}
@@ -2415,23 +2704,41 @@
return to_space_.TotalCapacity();
}
- // Return the total amount of memory committed for new space.
- intptr_t CommittedMemory() {
- if (from_space_.is_committed()) return 2 * Capacity();
- return TotalCapacity();
+ // Committed memory for NewSpace is the committed memory of both semi-spaces
+ // combined.
+ intptr_t CommittedMemory() override {
+ return from_space_.CommittedMemory() + to_space_.CommittedMemory();
}
- // Return the total amount of memory committed for new space.
- intptr_t MaximumCommittedMemory() {
- return to_space_.MaximumCommittedMemory() +
- from_space_.MaximumCommittedMemory();
+ intptr_t MaximumCommittedMemory() override {
+ return from_space_.MaximumCommittedMemory() +
+ to_space_.MaximumCommittedMemory();
}
// Approximate amount of physical memory committed for this space.
- size_t CommittedPhysicalMemory();
+ size_t CommittedPhysicalMemory() override;
// Return the available bytes without growing.
- intptr_t Available() { return Capacity() - Size(); }
+ intptr_t Available() override { return Capacity() - Size(); }
+
+ intptr_t PagesFromStart(Address addr) {
+ return static_cast<intptr_t>(addr - bottom()) / Page::kPageSize;
+ }
+
+ size_t AllocatedSinceLastGC() {
+ intptr_t allocated = top() - to_space_.age_mark();
+ if (allocated < 0) {
+ // Runtime has lowered the top below the age mark.
+ return 0;
+ }
+ // Correctly account for non-allocatable regions at the beginning of
+ // each page from the age_mark() to the top().
+ intptr_t pages =
+ PagesFromStart(top()) - PagesFromStart(to_space_.age_mark());
+ allocated -= pages * (NewSpacePage::kObjectStartOffset);
+ DCHECK(0 <= allocated && allocated <= Size());
+ return static_cast<size_t>(allocated);
+ }
// Return the maximum capacity of a semispace.
int MaximumCapacity() {
@@ -2455,11 +2762,6 @@
return allocation_info_.top();
}
- void set_top(Address top) {
- DCHECK(to_space_.current_page()->ContainsLimit(top));
- allocation_info_.set_top(top);
- }
-
// Return the address of the allocation pointer limit in the active semispace.
Address limit() {
DCHECK(to_space_.current_page()->ContainsLimit(allocation_info_.limit()));
@@ -2498,16 +2800,35 @@
return allocation_info_.limit_address();
}
- MUST_USE_RESULT INLINE(AllocationResult AllocateRaw(int size_in_bytes));
+ MUST_USE_RESULT INLINE(AllocationResult AllocateRawAligned(
+ int size_in_bytes, AllocationAlignment alignment));
+
+ MUST_USE_RESULT INLINE(
+ AllocationResult AllocateRawUnaligned(int size_in_bytes));
+
+ MUST_USE_RESULT INLINE(AllocationResult AllocateRaw(
+ int size_in_bytes, AllocationAlignment alignment));
+
+ MUST_USE_RESULT inline AllocationResult AllocateRawSynchronized(
+ int size_in_bytes, AllocationAlignment alignment);
// Reset the allocation pointer to the beginning of the active semispace.
void ResetAllocationInfo();
void UpdateInlineAllocationLimit(int size_in_bytes);
- void LowerInlineAllocationLimit(intptr_t step) {
- inline_allocation_limit_step_ = step;
+
+ // Allows observation of inline allocation. The observer->Step() method gets
+ // called after every step_size bytes have been allocated (approximately).
+ // This works by adjusting the allocation limit to a lower value and adjusting
+ // it after each step.
+ void AddInlineAllocationObserver(InlineAllocationObserver* observer);
+
+ // Removes a previously installed observer.
+ void RemoveInlineAllocationObserver(InlineAllocationObserver* observer);
+
+ void DisableInlineAllocationSteps() {
+ top_on_previous_step_ = 0;
UpdateInlineAllocationLimit(0);
- top_on_previous_step_ = allocation_info_.top();
}
// Get the extent of the inactive semispace (for use as a marking stack,
@@ -2540,6 +2861,7 @@
// are no pages, or the current page is already empty), or true
// if successful.
bool AddFreshPage();
+ bool AddFreshPageSynchronized();
#ifdef VERIFY_HEAP
// Verify the active semispace.
@@ -2548,7 +2870,7 @@
#ifdef DEBUG
// Print the active semispace.
- virtual void Print() { to_space_.Print(); }
+ void Print() override { to_space_.Print(); }
#endif
// Iterates the active semispace to collect statistics.
@@ -2575,9 +2897,7 @@
return from_space_.Uncommit();
}
- inline intptr_t inline_allocation_limit_step() {
- return inline_allocation_limit_step_;
- }
+ bool IsFromSpaceCommitted() { return from_space_.is_committed(); }
SemiSpace* active_space() { return &to_space_; }
@@ -2585,6 +2905,8 @@
// Update allocation info to match the current to-space page.
void UpdateAllocationInfo();
+ base::Mutex mutex_;
+
Address chunk_base_;
uintptr_t chunk_size_;
@@ -2604,39 +2926,128 @@
// mark-compact collection.
AllocationInfo allocation_info_;
- // When incremental marking is active we will set allocation_info_.limit
- // to be lower than actual limit and then will gradually increase it
- // in steps to guarantee that we do incremental marking steps even
- // when all allocation is performed from inlined generated code.
- intptr_t inline_allocation_limit_step_;
-
+ // When inline allocation stepping is active, either because of incremental
+ // marking or because of idle scavenge, we 'interrupt' inline allocation every
+ // once in a while. This is done by setting allocation_info_.limit to be lower
+ // than the actual limit and and increasing it in steps to guarantee that the
+ // observers are notified periodically.
+ List<InlineAllocationObserver*> inline_allocation_observers_;
Address top_on_previous_step_;
+ bool inline_allocation_observers_paused_;
HistogramInfo* allocated_histogram_;
HistogramInfo* promoted_histogram_;
- MUST_USE_RESULT AllocationResult SlowAllocateRaw(int size_in_bytes);
+ bool EnsureAllocation(int size_in_bytes, AllocationAlignment alignment);
+ // If we are doing inline allocation in steps, this method performs the 'step'
+ // operation. top is the memory address of the bump pointer at the last
+ // inline allocation (i.e. it determines the numbers of bytes actually
+ // allocated since the last step.) new_top is the address of the bump pointer
+ // where the next byte is going to be allocated from. top and new_top may be
+ // different when we cross a page boundary or reset the space.
+ void InlineAllocationStep(Address top, Address new_top, Address soon_object,
+ size_t size);
+ intptr_t GetNextInlineAllocationStepSize();
+ void StartNextInlineAllocationStep();
+ void PauseInlineAllocationObservers();
+ void ResumeInlineAllocationObservers();
+
+ friend class PauseInlineAllocationObserversScope;
friend class SemiSpaceIterator;
+};
+class PauseInlineAllocationObserversScope {
public:
- TRACK_MEMORY("NewSpace")
+ explicit PauseInlineAllocationObserversScope(NewSpace* new_space)
+ : new_space_(new_space) {
+ new_space_->PauseInlineAllocationObservers();
+ }
+ ~PauseInlineAllocationObserversScope() {
+ new_space_->ResumeInlineAllocationObservers();
+ }
+
+ private:
+ NewSpace* new_space_;
+ DISALLOW_COPY_AND_ASSIGN(PauseInlineAllocationObserversScope);
+};
+
+// -----------------------------------------------------------------------------
+// Compaction space that is used temporarily during compaction.
+
+class CompactionSpace : public PagedSpace {
+ public:
+ CompactionSpace(Heap* heap, AllocationSpace id, Executability executable)
+ : PagedSpace(heap, id, executable) {}
+
+ // Adds external memory starting at {start} of {size_in_bytes} to the space.
+ void AddExternalMemory(Address start, int size_in_bytes) {
+ IncreaseCapacity(size_in_bytes);
+ Free(start, size_in_bytes);
+ }
+
+ bool is_local() override { return true; }
+
+ void RefillFreeList() override;
+
+ protected:
+ // The space is temporary and not included in any snapshots.
+ bool snapshotable() override { return false; }
+
+ MUST_USE_RESULT HeapObject* SweepAndRetryAllocation(
+ int size_in_bytes) override;
+};
+
+
+// A collection of |CompactionSpace|s used by a single compaction task.
+class CompactionSpaceCollection : public Malloced {
+ public:
+ explicit CompactionSpaceCollection(Heap* heap)
+ : old_space_(heap, OLD_SPACE, Executability::NOT_EXECUTABLE),
+ code_space_(heap, CODE_SPACE, Executability::EXECUTABLE),
+ duration_(0.0),
+ bytes_compacted_(0) {}
+
+ CompactionSpace* Get(AllocationSpace space) {
+ switch (space) {
+ case OLD_SPACE:
+ return &old_space_;
+ case CODE_SPACE:
+ return &code_space_;
+ default:
+ UNREACHABLE();
+ }
+ UNREACHABLE();
+ return nullptr;
+ }
+
+ void ReportCompactionProgress(double duration, intptr_t bytes_compacted) {
+ duration_ += duration;
+ bytes_compacted_ += bytes_compacted;
+ }
+
+ double duration() const { return duration_; }
+ intptr_t bytes_compacted() const { return bytes_compacted_; }
+
+ private:
+ CompactionSpace old_space_;
+ CompactionSpace code_space_;
+
+ // Book keeping.
+ double duration_;
+ intptr_t bytes_compacted_;
};
// -----------------------------------------------------------------------------
-// Old object space (excluding map objects)
+// Old object space (includes the old space of objects and code space)
class OldSpace : public PagedSpace {
public:
- // Creates an old space object with a given maximum capacity.
- // The constructor does not allocate pages from OS.
- OldSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id,
- Executability executable)
- : PagedSpace(heap, max_capacity, id, executable) {}
-
- public:
- TRACK_MEMORY("OldSpace")
+ // Creates an old space object. The constructor does not allocate pages
+ // from OS.
+ OldSpace(Heap* heap, AllocationSpace id, Executability executable)
+ : PagedSpace(heap, id, executable) {}
};
@@ -2653,16 +3064,16 @@
class MapSpace : public PagedSpace {
public:
- // Creates a map space object with a maximum capacity.
- MapSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id)
- : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE),
+ // Creates a map space object.
+ MapSpace(Heap* heap, AllocationSpace id)
+ : PagedSpace(heap, id, NOT_EXECUTABLE),
max_map_space_pages_(kMaxMapPageIndex - 1) {}
// Given an index, returns the page address.
// TODO(1600): this limit is artifical just to keep code compilable
static const int kMaxMapPageIndex = 1 << 16;
- virtual int RoundSizeDownToObjectAlignment(int size) {
+ int RoundSizeDownToObjectAlignment(int size) override {
if (base::bits::IsPowerOfTwo32(Map::kSize)) {
return RoundDown(size, Map::kSize);
} else {
@@ -2670,11 +3081,12 @@
}
}
- protected:
- virtual void VerifyObject(HeapObject* obj);
+#ifdef VERIFY_HEAP
+ void VerifyObject(HeapObject* obj) override;
+#endif
private:
- static const int kMapsPerPage = Page::kMaxRegularHeapObjectSize / Map::kSize;
+ static const int kMapsPerPage = Page::kAllocatableMemory / Map::kSize;
// Do map space compaction if there is a page gap.
int CompactionThreshold() {
@@ -2682,73 +3094,20 @@
}
const int max_map_space_pages_;
-
- public:
- TRACK_MEMORY("MapSpace")
};
// -----------------------------------------------------------------------------
-// Old space for simple property cell objects
-
-class CellSpace : public PagedSpace {
- public:
- // Creates a property cell space object with a maximum capacity.
- CellSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id)
- : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE) {}
-
- virtual int RoundSizeDownToObjectAlignment(int size) {
- if (base::bits::IsPowerOfTwo32(Cell::kSize)) {
- return RoundDown(size, Cell::kSize);
- } else {
- return (size / Cell::kSize) * Cell::kSize;
- }
- }
-
- protected:
- virtual void VerifyObject(HeapObject* obj);
-
- public:
- TRACK_MEMORY("CellSpace")
-};
-
-
-// -----------------------------------------------------------------------------
-// Old space for all global object property cell objects
-
-class PropertyCellSpace : public PagedSpace {
- public:
- // Creates a property cell space object with a maximum capacity.
- PropertyCellSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id)
- : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE) {}
-
- virtual int RoundSizeDownToObjectAlignment(int size) {
- if (base::bits::IsPowerOfTwo32(PropertyCell::kSize)) {
- return RoundDown(size, PropertyCell::kSize);
- } else {
- return (size / PropertyCell::kSize) * PropertyCell::kSize;
- }
- }
-
- protected:
- virtual void VerifyObject(HeapObject* obj);
-
- public:
- TRACK_MEMORY("PropertyCellSpace")
-};
-
-
-// -----------------------------------------------------------------------------
-// Large objects ( > Page::kMaxHeapObjectSize ) are allocated and managed by
-// the large object space. A large object is allocated from OS heap with
-// extra padding bytes (Page::kPageSize + Page::kObjectStartOffset).
+// Large objects ( > Page::kMaxRegularHeapObjectSize ) are allocated and
+// managed by the large object space. A large object is allocated from OS
+// heap with extra padding bytes (Page::kPageSize + Page::kObjectStartOffset).
// A large object always starts at Page::kObjectStartOffset to a page.
// Large objects do not move during garbage collections.
class LargeObjectSpace : public Space {
public:
- LargeObjectSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id);
- virtual ~LargeObjectSpace() {}
+ LargeObjectSpace(Heap* heap, AllocationSpace id);
+ virtual ~LargeObjectSpace();
// Initializes internal data structures.
bool SetUp();
@@ -2766,21 +3125,15 @@
MUST_USE_RESULT AllocationResult
AllocateRaw(int object_size, Executability executable);
- bool CanAllocateSize(int size) { return Size() + size <= max_capacity_; }
-
// Available bytes for objects in this space.
- inline intptr_t Available();
+ inline intptr_t Available() override;
- virtual intptr_t Size() { return size_; }
+ intptr_t Size() override { return size_; }
- virtual intptr_t SizeOfObjects() { return objects_size_; }
-
- intptr_t MaximumCommittedMemory() { return maximum_committed_; }
-
- intptr_t CommittedMemory() { return Size(); }
+ intptr_t SizeOfObjects() override { return objects_size_; }
// Approximate amount of physical memory committed for this space.
- size_t CommittedPhysicalMemory();
+ size_t CommittedPhysicalMemory() override;
int PageCount() { return page_count_; }
@@ -2792,11 +3145,15 @@
// if such a page doesn't exist.
LargePage* FindPage(Address a);
+ // Clears the marking state of live objects.
+ void ClearMarkingStateOfLiveObjects();
+
// Frees unmarked objects.
void FreeUnmarkedObjects();
// Checks whether a heap object is in this space; O(1).
bool Contains(HeapObject* obj);
+ bool Contains(Address address);
// Checks whether the space is empty.
bool IsEmpty() { return first_page_ == NULL; }
@@ -2808,7 +3165,7 @@
#endif
#ifdef DEBUG
- virtual void Print();
+ void Print() override;
void ReportStatistics();
void CollectCodeStatistics();
#endif
@@ -2817,8 +3174,6 @@
bool SlowContains(Address addr) { return FindObject(addr)->IsHeapObject(); }
private:
- intptr_t max_capacity_;
- intptr_t maximum_committed_;
// The head of the linked list of large object chunks.
LargePage* first_page_;
intptr_t size_; // allocated bytes
@@ -2828,16 +3183,12 @@
HashMap chunk_map_;
friend class LargeObjectIterator;
-
- public:
- TRACK_MEMORY("LargeObjectSpace")
};
class LargeObjectIterator : public ObjectIterator {
public:
explicit LargeObjectIterator(LargeObjectSpace* space);
- LargeObjectIterator(LargeObjectSpace* space, HeapObjectCallback size_func);
HeapObject* Next();
@@ -2846,7 +3197,6 @@
private:
LargePage* current_;
- HeapObjectCallback size_func_;
};
@@ -2857,50 +3207,12 @@
inline explicit PointerChunkIterator(Heap* heap);
// Return NULL when the iterator is done.
- MemoryChunk* next() {
- switch (state_) {
- case kOldPointerState: {
- if (old_pointer_iterator_.has_next()) {
- return old_pointer_iterator_.next();
- }
- state_ = kMapState;
- // Fall through.
- }
- case kMapState: {
- if (map_iterator_.has_next()) {
- return map_iterator_.next();
- }
- state_ = kLargeObjectState;
- // Fall through.
- }
- case kLargeObjectState: {
- HeapObject* heap_object;
- do {
- heap_object = lo_iterator_.Next();
- if (heap_object == NULL) {
- state_ = kFinishedState;
- return NULL;
- }
- // Fixed arrays are the only pointer-containing objects in large
- // object space.
- } while (!heap_object->IsFixedArray());
- MemoryChunk* answer = MemoryChunk::FromAddress(heap_object->address());
- return answer;
- }
- case kFinishedState:
- return NULL;
- default:
- break;
- }
- UNREACHABLE();
- return NULL;
- }
-
+ inline MemoryChunk* next();
private:
- enum State { kOldPointerState, kMapState, kLargeObjectState, kFinishedState };
+ enum State { kOldSpaceState, kMapState, kLargeObjectState, kFinishedState };
State state_;
- PageIterator old_pointer_iterator_;
+ PageIterator old_iterator_;
PageIterator map_iterator_;
LargeObjectIterator lo_iterator_;
};
@@ -2920,7 +3232,7 @@
static const int kMaxComments = 64;
};
#endif
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_HEAP_SPACES_H_