Revert "Revert "Upgrade to 5.0.71.48"" DO NOT MERGE
This reverts commit f2e3994fa5148cc3d9946666f0b0596290192b0e,
and updates the x64 makefile properly so it doesn't break that
build.
FPIIM-449
Change-Id: Ib83e35bfbae6af627451c926a9650ec57c045605
(cherry picked from commit 109988c7ccb6f3fd1a58574fa3dfb88beaef6632)
diff --git a/src/heap/spaces.h b/src/heap/spaces.h
index a8102ca..c0d399f 100644
--- a/src/heap/spaces.h
+++ b/src/heap/spaces.h
@@ -19,8 +19,20 @@
namespace v8 {
namespace internal {
+class AllocationInfo;
+class AllocationObserver;
+class CompactionSpace;
class CompactionSpaceCollection;
+class FreeList;
class Isolate;
+class MemoryAllocator;
+class MemoryChunk;
+class PagedSpace;
+class SemiSpace;
+class SkipList;
+class SlotsBuffer;
+class SlotSet;
+class Space;
// -----------------------------------------------------------------------------
// Heap structures:
@@ -96,13 +108,6 @@
#define DCHECK_MAP_PAGE_INDEX(index) \
DCHECK((0 <= index) && (index <= MapSpace::kMaxMapPageIndex))
-class AllocationInfo;
-class CompactionSpace;
-class FreeList;
-class MemoryAllocator;
-class MemoryChunk;
-class PagedSpace;
-class Space;
class MarkBit {
public:
@@ -284,9 +289,6 @@
};
-class SkipList;
-class SlotsBuffer;
-
// MemoryChunk represents a memory region owned by a specific space.
// It is divided into the header and the body. Chunk start is always
// 1MB aligned. Start of the body is aligned so it can accommodate
@@ -295,10 +297,8 @@
public:
enum MemoryChunkFlags {
IS_EXECUTABLE,
- ABOUT_TO_BE_FREED,
POINTERS_TO_HERE_ARE_INTERESTING,
POINTERS_FROM_HERE_ARE_INTERESTING,
- SCAN_ON_SCAVENGE,
IN_FROM_SPACE, // Mutually exclusive with IN_TO_SPACE.
IN_TO_SPACE, // All pages in new space has one of these two set.
NEW_SPACE_BELOW_AGE_MARK,
@@ -307,10 +307,6 @@
NEVER_EVACUATE, // May contain immortal immutables.
POPULAR_PAGE, // Slots buffer of this page overflowed on the previous GC.
- // WAS_SWEPT indicates that marking bits have been cleared by the sweeper,
- // otherwise marking bits are still intact.
- WAS_SWEPT,
-
// Large objects can have a progress bar in their page header. These object
// are scanned in increments and will be kept black while being scanned.
// Even if the mutator writes to them they will be kept black and a white
@@ -323,7 +319,7 @@
// candidates selection cycle.
FORCE_EVACUATION_CANDIDATE_FOR_TESTING,
- // This flag is inteded to be used for testing.
+ // This flag is intended to be used for testing.
NEVER_ALLOCATE_ON_PAGE,
// The memory chunk is already logically freed, however the actual freeing
@@ -352,16 +348,14 @@
};
// |kSweepingDone|: The page state when sweeping is complete or sweeping must
- // not be performed on that page.
- // |kSweepingFinalize|: A sweeper thread is done sweeping this page and will
- // not touch the page memory anymore.
- // |kSweepingInProgress|: This page is currently swept by a sweeper thread.
+ // not be performed on that page. Sweeper threads that are done with their
+ // work will set this value and not touch the page anymore.
// |kSweepingPending|: This page is ready for parallel sweeping.
- enum ParallelSweepingState {
+ // |kSweepingInProgress|: This page is currently swept by a sweeper thread.
+ enum ConcurrentSweepingState {
kSweepingDone,
- kSweepingFinalize,
+ kSweepingPending,
kSweepingInProgress,
- kSweepingPending
};
// Every n write barrier invocations we go to runtime even though
@@ -396,31 +390,32 @@
+ 2 * kPointerSize // base::VirtualMemory reservation_
+ kPointerSize // Address owner_
+ kPointerSize // Heap* heap_
- + kIntSize; // int store_buffer_counter_
+ + kIntSize; // int progress_bar_
static const size_t kSlotsBufferOffset =
kLiveBytesOffset + kIntSize; // int live_byte_count_
static const size_t kWriteBarrierCounterOffset =
kSlotsBufferOffset + kPointerSize // SlotsBuffer* slots_buffer_;
+ + kPointerSize // SlotSet* old_to_new_slots_;
+ + kPointerSize // SlotSet* old_to_old_slots_;
+ kPointerSize; // SkipList* skip_list_;
static const size_t kMinHeaderSize =
kWriteBarrierCounterOffset +
kIntptrSize // intptr_t write_barrier_counter_
- + kIntSize // int progress_bar_
+ kPointerSize // AtomicValue high_water_mark_
+ kPointerSize // base::Mutex* mutex_
+ kPointerSize // base::AtomicWord parallel_sweeping_
+ kPointerSize // AtomicValue parallel_compaction_
- + 5 * kPointerSize // AtomicNumber free-list statistics
+ + 2 * kPointerSize // AtomicNumber free-list statistics
+ kPointerSize // AtomicValue next_chunk_
+ kPointerSize; // AtomicValue prev_chunk_
// We add some more space to the computed header size to amount for missing
// alignment requirements in our computation.
// Try to get kHeaderSize properly aligned on 32-bit and 64-bit machines.
- static const size_t kHeaderSize = kMinHeaderSize + kIntSize;
+ static const size_t kHeaderSize = kMinHeaderSize;
static const int kBodyOffset =
CODE_POINTER_ALIGN(kHeaderSize + Bitmap::kSize);
@@ -435,30 +430,16 @@
static const int kFlagsOffset = kPointerSize;
- static void IncrementLiveBytesFromMutator(HeapObject* object, int by);
+ static inline void IncrementLiveBytesFromMutator(HeapObject* object, int by);
+ static inline void IncrementLiveBytesFromGC(HeapObject* object, int by);
// Only works if the pointer is in the first kPageSize of the MemoryChunk.
static MemoryChunk* FromAddress(Address a) {
return reinterpret_cast<MemoryChunk*>(OffsetFrom(a) & ~kAlignmentMask);
}
- static const MemoryChunk* FromAddress(const byte* a) {
- return reinterpret_cast<const MemoryChunk*>(OffsetFrom(a) &
- ~kAlignmentMask);
- }
-
- static void IncrementLiveBytesFromGC(HeapObject* object, int by) {
- MemoryChunk::FromAddress(object->address())->IncrementLiveBytes(by);
- }
-
- // Only works for addresses in pointer spaces, not data or code spaces.
static inline MemoryChunk* FromAnyPointerAddress(Heap* heap, Address addr);
- static inline uint32_t FastAddressToMarkbitIndex(Address addr) {
- const intptr_t offset = reinterpret_cast<intptr_t>(addr) & kAlignmentMask;
- return static_cast<uint32_t>(offset) >> kPointerSizeLog2;
- }
-
static inline void UpdateHighWaterMark(Address mark) {
if (mark == nullptr) return;
// Need to subtract one from the mark because when a chunk is full the
@@ -477,144 +458,38 @@
bool is_valid() { return address() != NULL; }
- MemoryChunk* next_chunk() { return next_chunk_.Value(); }
-
- MemoryChunk* prev_chunk() { return prev_chunk_.Value(); }
-
- void set_next_chunk(MemoryChunk* next) { next_chunk_.SetValue(next); }
-
- void set_prev_chunk(MemoryChunk* prev) { prev_chunk_.SetValue(prev); }
-
- Space* owner() const {
- if ((reinterpret_cast<intptr_t>(owner_) & kPageHeaderTagMask) ==
- kPageHeaderTag) {
- return reinterpret_cast<Space*>(reinterpret_cast<intptr_t>(owner_) -
- kPageHeaderTag);
- } else {
- return NULL;
- }
- }
-
- void set_owner(Space* space) {
- DCHECK((reinterpret_cast<intptr_t>(space) & kPageHeaderTagMask) == 0);
- owner_ = reinterpret_cast<Address>(space) + kPageHeaderTag;
- DCHECK((reinterpret_cast<intptr_t>(owner_) & kPageHeaderTagMask) ==
- kPageHeaderTag);
- }
-
- base::VirtualMemory* reserved_memory() { return &reservation_; }
-
- void set_reserved_memory(base::VirtualMemory* reservation) {
- DCHECK_NOT_NULL(reservation);
- reservation_.TakeControl(reservation);
- }
-
- bool scan_on_scavenge() { return IsFlagSet(SCAN_ON_SCAVENGE); }
- void initialize_scan_on_scavenge(bool scan) {
- if (scan) {
- SetFlag(SCAN_ON_SCAVENGE);
- } else {
- ClearFlag(SCAN_ON_SCAVENGE);
- }
- }
- inline void set_scan_on_scavenge(bool scan);
-
- int store_buffer_counter() { return store_buffer_counter_; }
- void set_store_buffer_counter(int counter) {
- store_buffer_counter_ = counter;
- }
+ base::Mutex* mutex() { return mutex_; }
bool Contains(Address addr) {
return addr >= area_start() && addr < area_end();
}
- // Checks whether addr can be a limit of addresses in this page.
- // It's a limit if it's in the page, or if it's just after the
- // last byte of the page.
+ // Checks whether |addr| can be a limit of addresses in this page. It's a
+ // limit if it's in the page, or if it's just after the last byte of the page.
bool ContainsLimit(Address addr) {
return addr >= area_start() && addr <= area_end();
}
- void SetFlag(int flag) { flags_ |= static_cast<uintptr_t>(1) << flag; }
-
- void ClearFlag(int flag) { flags_ &= ~(static_cast<uintptr_t>(1) << flag); }
-
- void SetFlagTo(int flag, bool value) {
- if (value) {
- SetFlag(flag);
- } else {
- ClearFlag(flag);
- }
- }
-
- bool IsFlagSet(int flag) {
- return (flags_ & (static_cast<uintptr_t>(1) << flag)) != 0;
- }
-
- // Set or clear multiple flags at a time. The flags in the mask
- // are set to the value in "flags", the rest retain the current value
- // in flags_.
- void SetFlags(intptr_t flags, intptr_t mask) {
- flags_ = (flags_ & ~mask) | (flags & mask);
- }
-
- // Return all current flags.
- intptr_t GetFlags() { return flags_; }
-
- AtomicValue<ParallelSweepingState>& parallel_sweeping_state() {
- return parallel_sweeping_;
+ AtomicValue<ConcurrentSweepingState>& concurrent_sweeping_state() {
+ return concurrent_sweeping_;
}
AtomicValue<ParallelCompactingState>& parallel_compaction_state() {
return parallel_compaction_;
}
- bool TryLock() { return mutex_->TryLock(); }
-
- base::Mutex* mutex() { return mutex_; }
-
- // WaitUntilSweepingCompleted only works when concurrent sweeping is in
- // progress. In particular, when we know that right before this call a
- // sweeper thread was sweeping this page.
- void WaitUntilSweepingCompleted() {
- mutex_->Lock();
- mutex_->Unlock();
- DCHECK(SweepingCompleted());
- }
-
- bool SweepingCompleted() {
- return parallel_sweeping_state().Value() <= kSweepingFinalize;
- }
-
- // Manage live byte count (count of bytes known to be live,
- // because they are marked black).
- void ResetLiveBytes() {
- if (FLAG_gc_verbose) {
- PrintF("ResetLiveBytes:%p:%x->0\n", static_cast<void*>(this),
- live_byte_count_);
- }
- live_byte_count_ = 0;
- }
-
- void IncrementLiveBytes(int by) {
- if (FLAG_gc_verbose) {
- printf("UpdateLiveBytes:%p:%x%c=%x->%x\n", static_cast<void*>(this),
- live_byte_count_, ((by < 0) ? '-' : '+'), ((by < 0) ? -by : by),
- live_byte_count_ + by);
- }
- live_byte_count_ += by;
- DCHECK_GE(live_byte_count_, 0);
- DCHECK_LE(static_cast<unsigned>(live_byte_count_), size_);
- }
+ // Manage live byte count, i.e., count of bytes in black objects.
+ inline void ResetLiveBytes();
+ inline void IncrementLiveBytes(int by);
int LiveBytes() {
- DCHECK_LE(static_cast<unsigned>(live_byte_count_), size_);
+ DCHECK_LE(static_cast<size_t>(live_byte_count_), size_);
return live_byte_count_;
}
void SetLiveBytes(int live_bytes) {
DCHECK_GE(live_bytes, 0);
- DCHECK_LE(static_cast<unsigned>(live_bytes), size_);
+ DCHECK_LE(static_cast<size_t>(live_bytes), size_);
live_byte_count_ = live_bytes;
}
@@ -626,6 +501,35 @@
write_barrier_counter_ = counter;
}
+ size_t size() const { return size_; }
+
+ inline Heap* heap() const { return heap_; }
+
+ inline SkipList* skip_list() { return skip_list_; }
+
+ inline void set_skip_list(SkipList* skip_list) { skip_list_ = skip_list; }
+
+ inline SlotsBuffer* slots_buffer() { return slots_buffer_; }
+
+ inline SlotsBuffer** slots_buffer_address() { return &slots_buffer_; }
+
+ inline SlotSet* old_to_new_slots() { return old_to_new_slots_; }
+ inline SlotSet* old_to_old_slots() { return old_to_old_slots_; }
+
+ void AllocateOldToNewSlots();
+ void ReleaseOldToNewSlots();
+ void AllocateOldToOldSlots();
+ void ReleaseOldToOldSlots();
+
+ Address area_start() { return area_start_; }
+ Address area_end() { return area_end_; }
+ int area_size() { return static_cast<int>(area_end() - area_start()); }
+
+ bool CommitArea(size_t requested);
+
+ // Approximate amount of physical memory committed for this chunk.
+ size_t CommittedPhysicalMemory() { return high_water_mark_.Value(); }
+
int progress_bar() {
DCHECK(IsFlagSet(HAS_PROGRESS_BAR));
return progress_bar_;
@@ -643,35 +547,10 @@
}
}
- size_t size() const { return size_; }
-
- void set_size(size_t size) { size_ = size; }
-
- void SetArea(Address area_start, Address area_end) {
- area_start_ = area_start;
- area_end_ = area_end;
- }
-
- Executability executable() {
- return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
- }
-
- bool InNewSpace() {
- return (flags_ & ((1 << IN_FROM_SPACE) | (1 << IN_TO_SPACE))) != 0;
- }
-
- bool InToSpace() { return IsFlagSet(IN_TO_SPACE); }
-
- bool InFromSpace() { return IsFlagSet(IN_FROM_SPACE); }
-
- // Markbits support
-
inline Bitmap* markbits() {
return Bitmap::FromAddress(address() + kHeaderSize);
}
- void PrintMarkbits() { markbits()->Print(); }
-
inline uint32_t AddressToMarkbitIndex(Address addr) {
return static_cast<uint32_t>(addr - this->address()) >> kPointerSizeLog2;
}
@@ -680,10 +559,24 @@
return this->address() + (index << kPointerSizeLog2);
}
- void InsertAfter(MemoryChunk* other);
- void Unlink();
+ void PrintMarkbits() { markbits()->Print(); }
- inline Heap* heap() const { return heap_; }
+ void SetFlag(int flag) { flags_ |= static_cast<uintptr_t>(1) << flag; }
+
+ void ClearFlag(int flag) { flags_ &= ~(static_cast<uintptr_t>(1) << flag); }
+
+ bool IsFlagSet(int flag) {
+ return (flags_ & (static_cast<uintptr_t>(1) << flag)) != 0;
+ }
+
+ // Set or clear multiple flags at a time. The flags in the mask are set to
+ // the value in "flags", the rest retain the current value in |flags_|.
+ void SetFlags(intptr_t flags, intptr_t mask) {
+ flags_ = (flags_ & ~mask) | (flags & mask);
+ }
+
+ // Return all current flags.
+ intptr_t GetFlags() { return flags_; }
bool NeverEvacuate() { return IsFlagSet(NEVER_EVACUATE); }
@@ -698,21 +591,9 @@
return !IsEvacuationCandidate() && !IsFlagSet(NEVER_ALLOCATE_ON_PAGE);
}
- bool ShouldSkipEvacuationSlotRecording() {
- return (flags_ & kSkipEvacuationSlotsRecordingMask) != 0;
- }
-
- inline SkipList* skip_list() { return skip_list_; }
-
- inline void set_skip_list(SkipList* skip_list) { skip_list_ = skip_list; }
-
- inline SlotsBuffer* slots_buffer() { return slots_buffer_; }
-
- inline SlotsBuffer** slots_buffer_address() { return &slots_buffer_; }
-
void MarkEvacuationCandidate() {
DCHECK(!IsFlagSet(NEVER_EVACUATE));
- DCHECK(slots_buffer_ == NULL);
+ DCHECK_NULL(slots_buffer_);
SetFlag(EVACUATION_CANDIDATE);
}
@@ -721,21 +602,62 @@
ClearFlag(EVACUATION_CANDIDATE);
}
- Address area_start() { return area_start_; }
- Address area_end() { return area_end_; }
- int area_size() { return static_cast<int>(area_end() - area_start()); }
- bool CommitArea(size_t requested);
+ bool ShouldSkipEvacuationSlotRecording() {
+ return (flags_ & kSkipEvacuationSlotsRecordingMask) != 0;
+ }
- // Approximate amount of physical memory committed for this chunk.
- size_t CommittedPhysicalMemory() { return high_water_mark_.Value(); }
+ Executability executable() {
+ return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
+ }
- // Should be called when memory chunk is about to be freed.
- void ReleaseAllocatedMemory();
+ bool InNewSpace() {
+ return (flags_ & ((1 << IN_FROM_SPACE) | (1 << IN_TO_SPACE))) != 0;
+ }
+
+ bool InToSpace() { return IsFlagSet(IN_TO_SPACE); }
+
+ bool InFromSpace() { return IsFlagSet(IN_FROM_SPACE); }
+
+ MemoryChunk* next_chunk() { return next_chunk_.Value(); }
+
+ MemoryChunk* prev_chunk() { return prev_chunk_.Value(); }
+
+ void set_next_chunk(MemoryChunk* next) { next_chunk_.SetValue(next); }
+
+ void set_prev_chunk(MemoryChunk* prev) { prev_chunk_.SetValue(prev); }
+
+ Space* owner() const {
+ if ((reinterpret_cast<intptr_t>(owner_) & kPageHeaderTagMask) ==
+ kPageHeaderTag) {
+ return reinterpret_cast<Space*>(reinterpret_cast<intptr_t>(owner_) -
+ kPageHeaderTag);
+ } else {
+ return nullptr;
+ }
+ }
+
+ void set_owner(Space* space) {
+ DCHECK((reinterpret_cast<intptr_t>(space) & kPageHeaderTagMask) == 0);
+ owner_ = reinterpret_cast<Address>(space) + kPageHeaderTag;
+ DCHECK((reinterpret_cast<intptr_t>(owner_) & kPageHeaderTagMask) ==
+ kPageHeaderTag);
+ }
+
+ bool HasPageHeader() { return owner() != nullptr; }
+
+ void InsertAfter(MemoryChunk* other);
+ void Unlink();
protected:
static MemoryChunk* Initialize(Heap* heap, Address base, size_t size,
Address area_start, Address area_end,
- Executability executable, Space* owner);
+ Executability executable, Space* owner,
+ base::VirtualMemory* reservation);
+
+ // Should be called when memory chunk is about to be freed.
+ void ReleaseAllocatedMemory();
+
+ base::VirtualMemory* reserved_memory() { return &reservation_; }
size_t size_;
intptr_t flags_;
@@ -746,36 +668,45 @@
// If the chunk needs to remember its memory reservation, it is stored here.
base::VirtualMemory reservation_;
+
// The identity of the owning space. This is tagged as a failure pointer, but
// no failure can be in an object, so this can be distinguished from any entry
// in a fixed array.
Address owner_;
+
Heap* heap_;
- // Used by the store buffer to keep track of which pages to mark scan-on-
- // scavenge.
- int store_buffer_counter_;
- // Count of bytes marked black on page.
- int live_byte_count_;
- SlotsBuffer* slots_buffer_;
- SkipList* skip_list_;
- intptr_t write_barrier_counter_;
+
// Used by the incremental marker to keep track of the scanning progress in
// large objects that have a progress bar and are scanned in increments.
int progress_bar_;
+
+ // Count of bytes marked black on page.
+ int live_byte_count_;
+
+ SlotsBuffer* slots_buffer_;
+
+ // A single slot set for small pages (of size kPageSize) or an array of slot
+ // set for large pages. In the latter case the number of entries in the array
+ // is ceil(size() / kPageSize).
+ SlotSet* old_to_new_slots_;
+ SlotSet* old_to_old_slots_;
+
+ SkipList* skip_list_;
+
+ intptr_t write_barrier_counter_;
+
// Assuming the initial allocation on a page is sequential,
// count highest number of bytes ever allocated on the page.
AtomicValue<intptr_t> high_water_mark_;
base::Mutex* mutex_;
- AtomicValue<ParallelSweepingState> parallel_sweeping_;
+
+ AtomicValue<ConcurrentSweepingState> concurrent_sweeping_;
AtomicValue<ParallelCompactingState> parallel_compaction_;
// PagedSpace free-list statistics.
- AtomicNumber<intptr_t> available_in_small_free_list_;
- AtomicNumber<intptr_t> available_in_medium_free_list_;
- AtomicNumber<intptr_t> available_in_large_free_list_;
- AtomicNumber<intptr_t> available_in_huge_free_list_;
- AtomicNumber<intptr_t> non_available_small_blocks_;
+ AtomicNumber<intptr_t> available_in_free_list_;
+ AtomicNumber<intptr_t> wasted_memory_;
// next_chunk_ holds a pointer of type MemoryChunk
AtomicValue<MemoryChunk*> next_chunk_;
@@ -789,9 +720,16 @@
friend class MemoryChunkValidator;
};
+enum FreeListCategoryType {
+ kSmall,
+ kMedium,
+ kLarge,
+ kHuge,
-enum FreeListCategoryType { kSmall, kMedium, kLarge, kHuge };
-
+ kFirstCategory = kSmall,
+ kLastCategory = kHuge,
+ kNumberOfCategories = kLastCategory + 1
+};
// -----------------------------------------------------------------------------
// A page is a memory chunk of a size 1MB. Large object pages may be larger.
@@ -809,6 +747,9 @@
return reinterpret_cast<Page*>(OffsetFrom(a) & ~kPageAlignmentMask);
}
+ // Only works for addresses in pointer spaces, not code space.
+ inline static Page* FromAnyPointerAddress(Heap* heap, Address addr);
+
// Returns the page containing an allocation top. Because an allocation
// top address can be the upper bound of the page, we need to subtract
// it with kPointerSize first. The address ranges from
@@ -873,17 +814,24 @@
void InitializeAsAnchor(PagedSpace* owner);
- bool WasSwept() { return IsFlagSet(WAS_SWEPT); }
- void SetWasSwept() { SetFlag(WAS_SWEPT); }
- void ClearWasSwept() { ClearFlag(WAS_SWEPT); }
+ // WaitUntilSweepingCompleted only works when concurrent sweeping is in
+ // progress. In particular, when we know that right before this call a
+ // sweeper thread was sweeping this page.
+ void WaitUntilSweepingCompleted() {
+ mutex_->Lock();
+ mutex_->Unlock();
+ DCHECK(SweepingDone());
+ }
+
+ bool SweepingDone() {
+ return concurrent_sweeping_state().Value() == kSweepingDone;
+ }
void ResetFreeListStatistics();
int LiveBytesFromFreeList() {
- return static_cast<int>(
- area_size() - non_available_small_blocks() -
- available_in_small_free_list() - available_in_medium_free_list() -
- available_in_large_free_list() - available_in_huge_free_list());
+ return static_cast<int>(area_size() - wasted_memory() -
+ available_in_free_list());
}
#define FRAGMENTATION_STATS_ACCESSORS(type, name) \
@@ -891,50 +839,11 @@
void set_##name(type name) { name##_.SetValue(name); } \
void add_##name(type name) { name##_.Increment(name); }
- FRAGMENTATION_STATS_ACCESSORS(intptr_t, non_available_small_blocks)
- FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_small_free_list)
- FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_medium_free_list)
- FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_large_free_list)
- FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_huge_free_list)
+ FRAGMENTATION_STATS_ACCESSORS(intptr_t, wasted_memory)
+ FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_free_list)
#undef FRAGMENTATION_STATS_ACCESSORS
- void add_available_in_free_list(FreeListCategoryType type, intptr_t bytes) {
- switch (type) {
- case kSmall:
- add_available_in_small_free_list(bytes);
- break;
- case kMedium:
- add_available_in_medium_free_list(bytes);
- break;
- case kLarge:
- add_available_in_large_free_list(bytes);
- break;
- case kHuge:
- add_available_in_huge_free_list(bytes);
- break;
- default:
- UNREACHABLE();
- }
- }
-
- intptr_t available_in_free_list(FreeListCategoryType type) {
- switch (type) {
- case kSmall:
- return available_in_small_free_list();
- case kMedium:
- return available_in_medium_free_list();
- case kLarge:
- return available_in_large_free_list();
- case kHuge:
- return available_in_huge_free_list();
- default:
- UNREACHABLE();
- }
- UNREACHABLE();
- return 0;
- }
-
#ifdef DEBUG
void Print();
#endif // DEBUG
@@ -965,7 +874,9 @@
class Space : public Malloced {
public:
Space(Heap* heap, AllocationSpace id, Executability executable)
- : heap_(heap),
+ : allocation_observers_(new List<AllocationObserver*>()),
+ allocation_observers_paused_(false),
+ heap_(heap),
id_(id),
executable_(executable),
committed_(0),
@@ -981,6 +892,26 @@
// Identity used in error reporting.
AllocationSpace identity() { return id_; }
+ virtual void AddAllocationObserver(AllocationObserver* observer) {
+ allocation_observers_->Add(observer);
+ }
+
+ virtual void RemoveAllocationObserver(AllocationObserver* observer) {
+ bool removed = allocation_observers_->RemoveElement(observer);
+ USE(removed);
+ DCHECK(removed);
+ }
+
+ virtual void PauseAllocationObservers() {
+ allocation_observers_paused_ = true;
+ }
+
+ virtual void ResumeAllocationObservers() {
+ allocation_observers_paused_ = false;
+ }
+
+ void AllocationStep(Address soon_object, int size);
+
// Return the total amount committed memory for this space, i.e., allocatable
// memory and page headers.
virtual intptr_t CommittedMemory() { return committed_; }
@@ -1027,6 +958,9 @@
DCHECK_GE(committed_, 0);
}
+ v8::base::SmartPointer<List<AllocationObserver*>> allocation_observers_;
+ bool allocation_observers_paused_;
+
private:
Heap* heap_;
AllocationSpace id_;
@@ -1628,12 +1562,12 @@
// A free list category maintains a linked list of free memory blocks.
class FreeListCategory {
public:
- explicit FreeListCategory(FreeList* owner, FreeListCategoryType type)
- : type_(type),
- top_(nullptr),
- end_(nullptr),
- available_(0),
- owner_(owner) {}
+ FreeListCategory() : top_(nullptr), end_(nullptr), available_(0) {}
+
+ void Initialize(FreeList* owner, FreeListCategoryType type) {
+ owner_ = owner;
+ type_ = type;
+ }
// Concatenates {category} into {this}.
//
@@ -1763,8 +1697,11 @@
// Return the number of bytes available on the free list.
intptr_t Available() {
- return small_list_.available() + medium_list_.available() +
- large_list_.available() + huge_list_.available();
+ intptr_t available = 0;
+ for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
+ available += category_[i].available();
+ }
+ return available;
}
// The method tries to find a {FreeSpace} node of at least {size_in_bytes}
@@ -1776,8 +1713,10 @@
MUST_USE_RESULT FreeSpace* TryRemoveMemory(intptr_t hint_size_in_bytes);
bool IsEmpty() {
- return small_list_.IsEmpty() && medium_list_.IsEmpty() &&
- large_list_.IsEmpty() && huge_list_.IsEmpty();
+ for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
+ if (!category_[i].IsEmpty()) return false;
+ }
+ return true;
}
// Used after booting the VM.
@@ -1813,29 +1752,36 @@
FreeSpace* FindNodeIn(FreeListCategoryType category, int* node_size);
FreeListCategory* GetFreeListCategory(FreeListCategoryType category) {
- switch (category) {
- case kSmall:
- return &small_list_;
- case kMedium:
- return &medium_list_;
- case kLarge:
- return &large_list_;
- case kHuge:
- return &huge_list_;
- default:
- UNREACHABLE();
+ return &category_[category];
+ }
+
+ FreeListCategoryType SelectFreeListCategoryType(size_t size_in_bytes) {
+ if (size_in_bytes <= kSmallListMax) {
+ return kSmall;
+ } else if (size_in_bytes <= kMediumListMax) {
+ return kMedium;
+ } else if (size_in_bytes <= kLargeListMax) {
+ return kLarge;
}
- UNREACHABLE();
- return nullptr;
+ return kHuge;
+ }
+
+ FreeListCategoryType SelectFastAllocationFreeListCategoryType(
+ size_t size_in_bytes) {
+ if (size_in_bytes <= kSmallAllocationMax) {
+ return kSmall;
+ } else if (size_in_bytes <= kMediumAllocationMax) {
+ return kMedium;
+ } else if (size_in_bytes <= kLargeAllocationMax) {
+ return kLarge;
+ }
+ return kHuge;
}
PagedSpace* owner_;
base::Mutex mutex_;
intptr_t wasted_bytes_;
- FreeListCategory small_list_;
- FreeListCategory medium_list_;
- FreeListCategory large_list_;
- FreeListCategory huge_list_;
+ FreeListCategory category_[kNumberOfCategories];
DISALLOW_IMPLICIT_CONSTRUCTORS(FreeList);
};
@@ -1959,10 +1905,8 @@
// Checks whether an object/address is in this space.
inline bool Contains(Address a);
- inline bool Contains(HeapObject* o);
- // Unlike Contains() methods it is safe to call this one even for addresses
- // of unmapped memory.
- bool ContainsSafe(Address addr);
+ inline bool Contains(Object* o);
+ bool ContainsSlow(Address addr);
// Given an address occupied by a live object, return that object if it is
// in this space, or a Smi if it is not. The implementation iterates over
@@ -2085,7 +2029,7 @@
void IncreaseCapacity(int size);
// Releases an unused page and shrinks the space.
- void ReleasePage(Page* page);
+ void ReleasePage(Page* page, bool evict_free_list_items);
// The dummy page that anchors the linked list of pages.
Page* anchor() { return &anchor_; }
@@ -2112,23 +2056,12 @@
static void ResetCodeStatistics(Isolate* isolate);
#endif
- // Evacuation candidates are swept by evacuator. Needs to return a valid
- // result before _and_ after evacuation has finished.
- static bool ShouldBeSweptBySweeperThreads(Page* p) {
- return !p->IsEvacuationCandidate() &&
- !p->IsFlagSet(Page::RESCAN_ON_EVACUATION) && !p->WasSwept();
- }
-
// This function tries to steal size_in_bytes memory from the sweeper threads
// free-lists. If it does not succeed stealing enough memory, it will wait
// for the sweeper threads to finish sweeping.
// It returns true when sweeping is completed and false otherwise.
bool EnsureSweeperProgress(intptr_t size_in_bytes);
- void set_end_of_unswept_pages(Page* page) { end_of_unswept_pages_ = page; }
-
- Page* end_of_unswept_pages() { return end_of_unswept_pages_; }
-
Page* FirstPage() { return anchor_.next_page(); }
Page* LastPage() { return anchor_.prev_page(); }
@@ -2148,9 +2081,6 @@
// e.g., removes its bump pointer area and resets statistics.
void MergeCompactionSpace(CompactionSpace* other);
- void DivideUponCompactionSpaces(CompactionSpaceCollection** other, int num,
- intptr_t limit = kCompactionMemoryWanted);
-
// Refills the free list from the corresponding free list filled by the
// sweeper.
virtual void RefillFreeList();
@@ -2158,8 +2088,6 @@
protected:
void AddMemory(Address start, intptr_t size);
- FreeSpace* TryRemoveMemory(intptr_t size_in_bytes);
-
void MoveOverFreeMemory(PagedSpace* other);
// PagedSpaces that should be included in snapshots have different, i.e.,
@@ -2212,11 +2140,6 @@
// Normal allocation information.
AllocationInfo allocation_info_;
- // The sweeper threads iterate over the list of pointer and data space pages
- // and sweep these pages concurrently. They will stop sweeping after the
- // end_of_unswept_pages_ page.
- Page* end_of_unswept_pages_;
-
// Mutex guarding any concurrent access to the space.
base::Mutex space_mutex_;
@@ -2266,17 +2189,13 @@
enum SemiSpaceId { kFromSpace = 0, kToSpace = 1 };
-class SemiSpace;
-
-
class NewSpacePage : public MemoryChunk {
public:
// GC related flags copied from from-space to to-space when
// flipping semispaces.
static const intptr_t kCopyOnFlipFlagsMask =
(1 << MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) |
- (1 << MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING) |
- (1 << MemoryChunk::SCAN_ON_SCAVENGE);
+ (1 << MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
static const int kAreaSize = Page::kAllocatableMemory;
@@ -2349,31 +2268,39 @@
// -----------------------------------------------------------------------------
// SemiSpace in young generation
//
-// A semispace is a contiguous chunk of memory holding page-like memory
-// chunks. The mark-compact collector uses the memory of the first page in
-// the from space as a marking stack when tracing live objects.
-
+// A SemiSpace is a contiguous chunk of memory holding page-like memory chunks.
+// The mark-compact collector uses the memory of the first page in the from
+// space as a marking stack when tracing live objects.
class SemiSpace : public Space {
public:
- // Constructor.
+ static void Swap(SemiSpace* from, SemiSpace* to);
+
SemiSpace(Heap* heap, SemiSpaceId semispace)
: Space(heap, NEW_SPACE, NOT_EXECUTABLE),
- start_(NULL),
- age_mark_(NULL),
+ current_capacity_(0),
+ maximum_capacity_(0),
+ minimum_capacity_(0),
+ start_(nullptr),
+ age_mark_(nullptr),
+ committed_(false),
id_(semispace),
anchor_(this),
- current_page_(NULL) {}
+ current_page_(nullptr) {}
- // Sets up the semispace using the given chunk.
- void SetUp(Address start, int initial_capacity, int target_capacity,
- int maximum_capacity);
+ inline bool Contains(HeapObject* o);
+ inline bool Contains(Object* o);
+ inline bool ContainsSlow(Address a);
+
+ // Creates a space in the young generation. The constructor does not
+ // allocate memory from the OS.
+ void SetUp(Address start, int initial_capacity, int maximum_capacity);
// Tear down the space. Heap memory was not allocated by the space, so it
// is not deallocated here.
void TearDown();
// True if the space has been set up but not torn down.
- bool HasBeenSetUp() { return start_ != NULL; }
+ bool HasBeenSetUp() { return start_ != nullptr; }
// Grow the semispace to the new capacity. The new capacity
// requested must be larger than the current capacity and less than
@@ -2385,12 +2312,9 @@
// semispace and less than the current capacity.
bool ShrinkTo(int new_capacity);
- // Sets the total capacity. Only possible when the space is not committed.
- bool SetTotalCapacity(int new_capacity);
-
// Returns the start address of the first page of the space.
Address space_start() {
- DCHECK(anchor_.next_page() != &anchor_);
+ DCHECK_NE(anchor_.next_page(), &anchor_);
return anchor_.next_page()->area_start();
}
@@ -2417,18 +2341,26 @@
Address age_mark() { return age_mark_; }
void set_age_mark(Address mark);
- // True if the address is in the address range of this semispace (not
- // necessarily below the allocation pointer).
- bool Contains(Address a) {
- return (reinterpret_cast<uintptr_t>(a) & address_mask_) ==
- reinterpret_cast<uintptr_t>(start_);
- }
+ bool is_committed() { return committed_; }
+ bool Commit();
+ bool Uncommit();
- // True if the object is a heap object in the address range of this
- // semispace (not necessarily below the allocation pointer).
- bool Contains(Object* o) {
- return (reinterpret_cast<uintptr_t>(o) & object_mask_) == object_expected_;
- }
+ NewSpacePage* first_page() { return anchor_.next_page(); }
+ NewSpacePage* current_page() { return current_page_; }
+
+ // Returns the current total capacity of the semispace.
+ int current_capacity() { return current_capacity_; }
+
+ // Returns the maximum total capacity of the semispace.
+ int maximum_capacity() { return maximum_capacity_; }
+
+ // Returns the initial capacity of the semispace.
+ int minimum_capacity() { return minimum_capacity_; }
+
+ SemiSpaceId id() { return id_; }
+
+ // Approximate amount of physical memory committed for this space.
+ size_t CommittedPhysicalMemory() override;
// If we don't have these here then SemiSpace will be abstract. However
// they should never be called:
@@ -2445,18 +2377,6 @@
return 0;
}
-
- bool is_committed() { return committed_; }
- bool Commit();
- bool Uncommit();
-
- NewSpacePage* first_page() { return anchor_.next_page(); }
- NewSpacePage* current_page() { return current_page_; }
-
-#ifdef VERIFY_HEAP
- virtual void Verify();
-#endif
-
#ifdef DEBUG
void Print() override;
// Validate a range of of addresses in a SemiSpace.
@@ -2468,51 +2388,34 @@
inline static void AssertValidRange(Address from, Address to) {}
#endif
- // Returns the current total capacity of the semispace.
- int TotalCapacity() { return total_capacity_; }
-
- // Returns the target for total capacity of the semispace.
- int TargetCapacity() { return target_capacity_; }
-
- // Returns the maximum total capacity of the semispace.
- int MaximumTotalCapacity() { return maximum_total_capacity_; }
-
- // Returns the initial capacity of the semispace.
- int InitialTotalCapacity() { return initial_total_capacity_; }
-
- SemiSpaceId id() { return id_; }
-
- static void Swap(SemiSpace* from, SemiSpace* to);
-
- // Approximate amount of physical memory committed for this space.
- size_t CommittedPhysicalMemory() override;
+#ifdef VERIFY_HEAP
+ virtual void Verify();
+#endif
private:
- // Flips the semispace between being from-space and to-space.
- // Copies the flags into the masked positions on all pages in the space.
- void FlipPages(intptr_t flags, intptr_t flag_mask);
-
- // Updates Capacity and MaximumCommitted based on new capacity.
- void SetCapacity(int new_capacity);
-
NewSpacePage* anchor() { return &anchor_; }
- // The current and maximum total capacity of the space.
- int total_capacity_;
- int target_capacity_;
- int maximum_total_capacity_;
- int initial_total_capacity_;
+ void set_current_capacity(int new_capacity) {
+ current_capacity_ = new_capacity;
+ }
+
+ // Copies the flags into the masked positions on all pages in the space.
+ void FixPagesFlags(intptr_t flags, intptr_t flag_mask);
+
+ // The currently committed space capacity.
+ int current_capacity_;
+
+ // The maximum capacity that can be used by this space.
+ int maximum_capacity_;
+
+ // The mimnimum capacity for the space. A space cannot shrink below this size.
+ int minimum_capacity_;
// The start address of the space.
Address start_;
// Used to govern object promotion during mark-compact collection.
Address age_mark_;
- // Masks and comparison values to test for containment in this semispace.
- uintptr_t address_mask_;
- uintptr_t object_mask_;
- uintptr_t object_expected_;
-
bool committed_;
SemiSpaceId id_;
@@ -2576,54 +2479,6 @@
NewSpacePage* last_page_;
};
-// -----------------------------------------------------------------------------
-// Allows observation of inline allocation in the new space.
-class InlineAllocationObserver {
- public:
- explicit InlineAllocationObserver(intptr_t step_size)
- : step_size_(step_size), bytes_to_next_step_(step_size) {
- DCHECK(step_size >= kPointerSize);
- }
- virtual ~InlineAllocationObserver() {}
-
- private:
- intptr_t step_size() const { return step_size_; }
- intptr_t bytes_to_next_step() const { return bytes_to_next_step_; }
-
- // Pure virtual method provided by the subclasses that gets called when at
- // least step_size bytes have been allocated. soon_object is the address just
- // allocated (but not yet initialized.) size is the size of the object as
- // requested (i.e. w/o the alignment fillers). Some complexities to be aware
- // of:
- // 1) soon_object will be nullptr in cases where we end up observing an
- // allocation that happens to be a filler space (e.g. page boundaries.)
- // 2) size is the requested size at the time of allocation. Right-trimming
- // may change the object size dynamically.
- // 3) soon_object may actually be the first object in an allocation-folding
- // group. In such a case size is the size of the group rather than the
- // first object.
- virtual void Step(int bytes_allocated, Address soon_object, size_t size) = 0;
-
- // Called each time the new space does an inline allocation step. This may be
- // more frequently than the step_size we are monitoring (e.g. when there are
- // multiple observers, or when page or space boundary is encountered.)
- void InlineAllocationStep(int bytes_allocated, Address soon_object,
- size_t size) {
- bytes_to_next_step_ -= bytes_allocated;
- if (bytes_to_next_step_ <= 0) {
- Step(static_cast<int>(step_size_ - bytes_to_next_step_), soon_object,
- size);
- bytes_to_next_step_ = step_size_;
- }
- }
-
- intptr_t step_size_;
- intptr_t bytes_to_next_step_;
-
- friend class NewSpace;
-
- DISALLOW_COPY_AND_ASSIGN(InlineAllocationObserver);
-};
// -----------------------------------------------------------------------------
// The young generation space.
@@ -2639,8 +2494,11 @@
to_space_(heap, kToSpace),
from_space_(heap, kFromSpace),
reservation_(),
- top_on_previous_step_(0),
- inline_allocation_observers_paused_(false) {}
+ top_on_previous_step_(0) {}
+
+ inline bool Contains(HeapObject* o);
+ inline bool ContainsSlow(Address a);
+ inline bool Contains(Object* o);
// Sets up the new space using the given chunk.
bool SetUp(int reserved_semispace_size_, int max_semi_space_size);
@@ -2661,24 +2519,9 @@
// their maximum capacity.
void Grow();
- // Grow the capacity of the semispaces by one page.
- bool GrowOnePage();
-
// Shrink the capacity of the semispaces.
void Shrink();
- // True if the address or object lies in the address range of either
- // semispace (not necessarily below the allocation pointer).
- bool Contains(Address a) {
- return (reinterpret_cast<uintptr_t>(a) & address_mask_) ==
- reinterpret_cast<uintptr_t>(start_);
- }
-
- bool Contains(Object* o) {
- Address a = reinterpret_cast<Address>(o);
- return (reinterpret_cast<uintptr_t>(a) & object_mask_) == object_expected_;
- }
-
// Return the allocated bytes in the active semispace.
intptr_t Size() override {
return pages_used_ * NewSpacePage::kAreaSize +
@@ -2692,16 +2535,16 @@
// Return the allocatable capacity of a semispace.
intptr_t Capacity() {
- SLOW_DCHECK(to_space_.TotalCapacity() == from_space_.TotalCapacity());
- return (to_space_.TotalCapacity() / Page::kPageSize) *
+ SLOW_DCHECK(to_space_.current_capacity() == from_space_.current_capacity());
+ return (to_space_.current_capacity() / Page::kPageSize) *
NewSpacePage::kAreaSize;
}
// Return the current size of a semispace, allocatable and non-allocatable
// memory.
intptr_t TotalCapacity() {
- DCHECK(to_space_.TotalCapacity() == from_space_.TotalCapacity());
- return to_space_.TotalCapacity();
+ DCHECK(to_space_.current_capacity() == from_space_.current_capacity());
+ return to_space_.current_capacity();
}
// Committed memory for NewSpace is the committed memory of both semi-spaces
@@ -2742,18 +2585,16 @@
// Return the maximum capacity of a semispace.
int MaximumCapacity() {
- DCHECK(to_space_.MaximumTotalCapacity() ==
- from_space_.MaximumTotalCapacity());
- return to_space_.MaximumTotalCapacity();
+ DCHECK(to_space_.maximum_capacity() == from_space_.maximum_capacity());
+ return to_space_.maximum_capacity();
}
bool IsAtMaximumCapacity() { return TotalCapacity() == MaximumCapacity(); }
// Returns the initial capacity of a semispace.
int InitialTotalCapacity() {
- DCHECK(to_space_.InitialTotalCapacity() ==
- from_space_.InitialTotalCapacity());
- return to_space_.InitialTotalCapacity();
+ DCHECK(to_space_.minimum_capacity() == from_space_.minimum_capacity());
+ return to_space_.minimum_capacity();
}
// Return the address of the allocation pointer in the active semispace.
@@ -2779,18 +2620,6 @@
// The start address of the space and a bit mask. Anding an address in the
// new space with the mask will result in the start address.
Address start() { return start_; }
- uintptr_t mask() { return address_mask_; }
-
- INLINE(uint32_t AddressToMarkbitIndex(Address addr)) {
- DCHECK(Contains(addr));
- DCHECK(IsAligned(OffsetFrom(addr), kPointerSize) ||
- IsAligned(OffsetFrom(addr) - 1, kPointerSize));
- return static_cast<uint32_t>(addr - start_) >> kPointerSizeLog2;
- }
-
- INLINE(Address MarkbitIndexToAddress(uint32_t index)) {
- return reinterpret_cast<Address>(index << kPointerSizeLog2);
- }
// The allocation top and limit address.
Address* allocation_top_address() { return allocation_info_.top_address(); }
@@ -2815,22 +2644,26 @@
// Reset the allocation pointer to the beginning of the active semispace.
void ResetAllocationInfo();
+ // When inline allocation stepping is active, either because of incremental
+ // marking, idle scavenge, or allocation statistics gathering, we 'interrupt'
+ // inline allocation every once in a while. This is done by setting
+ // allocation_info_.limit to be lower than the actual limit and and increasing
+ // it in steps to guarantee that the observers are notified periodically.
void UpdateInlineAllocationLimit(int size_in_bytes);
- // Allows observation of inline allocation. The observer->Step() method gets
- // called after every step_size bytes have been allocated (approximately).
- // This works by adjusting the allocation limit to a lower value and adjusting
- // it after each step.
- void AddInlineAllocationObserver(InlineAllocationObserver* observer);
-
- // Removes a previously installed observer.
- void RemoveInlineAllocationObserver(InlineAllocationObserver* observer);
-
void DisableInlineAllocationSteps() {
top_on_previous_step_ = 0;
UpdateInlineAllocationLimit(0);
}
+ // Allows observation of inline allocation. The observer->Step() method gets
+ // called after every step_size bytes have been allocated (approximately).
+ // This works by adjusting the allocation limit to a lower value and adjusting
+ // it after each step.
+ void AddAllocationObserver(AllocationObserver* observer) override;
+
+ void RemoveAllocationObserver(AllocationObserver* observer) override;
+
// Get the extent of the inactive semispace (for use as a marking stack,
// or to zap it). Notice: space-addresses are not necessarily on the
// same page, so FromSpaceStart() might be above FromSpaceEnd().
@@ -2843,18 +2676,10 @@
Address ToSpaceStart() { return to_space_.space_start(); }
Address ToSpaceEnd() { return to_space_.space_end(); }
- inline bool ToSpaceContains(Address address) {
- return to_space_.Contains(address);
- }
- inline bool FromSpaceContains(Address address) {
- return from_space_.Contains(address);
- }
-
- // True if the object is a heap object in the address range of the
- // respective semispace (not necessarily below the allocation pointer of the
- // semispace).
- inline bool ToSpaceContains(Object* o) { return to_space_.Contains(o); }
- inline bool FromSpaceContains(Object* o) { return from_space_.Contains(o); }
+ inline bool ToSpaceContainsSlow(Address a);
+ inline bool FromSpaceContainsSlow(Address a);
+ inline bool ToSpaceContains(Object* o);
+ inline bool FromSpaceContains(Object* o);
// Try to switch the active semispace to a new, empty, page.
// Returns false if this isn't possible or reasonable (i.e., there
@@ -2901,6 +2726,9 @@
SemiSpace* active_space() { return &to_space_; }
+ void PauseAllocationObservers() override;
+ void ResumeAllocationObservers() override;
+
private:
// Update allocation info to match the current to-space page.
void UpdateAllocationInfo();
@@ -2918,22 +2746,12 @@
// Start address and bit mask for containment testing.
Address start_;
- uintptr_t address_mask_;
- uintptr_t object_mask_;
- uintptr_t object_expected_;
// Allocation pointer and limit for normal allocation and allocation during
// mark-compact collection.
AllocationInfo allocation_info_;
- // When inline allocation stepping is active, either because of incremental
- // marking or because of idle scavenge, we 'interrupt' inline allocation every
- // once in a while. This is done by setting allocation_info_.limit to be lower
- // than the actual limit and and increasing it in steps to guarantee that the
- // observers are notified periodically.
- List<InlineAllocationObserver*> inline_allocation_observers_;
Address top_on_previous_step_;
- bool inline_allocation_observers_paused_;
HistogramInfo* allocated_histogram_;
HistogramInfo* promoted_histogram_;
@@ -2950,26 +2768,18 @@
size_t size);
intptr_t GetNextInlineAllocationStepSize();
void StartNextInlineAllocationStep();
- void PauseInlineAllocationObservers();
- void ResumeInlineAllocationObservers();
- friend class PauseInlineAllocationObserversScope;
friend class SemiSpaceIterator;
};
-class PauseInlineAllocationObserversScope {
+class PauseAllocationObserversScope {
public:
- explicit PauseInlineAllocationObserversScope(NewSpace* new_space)
- : new_space_(new_space) {
- new_space_->PauseInlineAllocationObservers();
- }
- ~PauseInlineAllocationObserversScope() {
- new_space_->ResumeInlineAllocationObservers();
- }
+ explicit PauseAllocationObserversScope(Heap* heap);
+ ~PauseAllocationObserversScope();
private:
- NewSpace* new_space_;
- DISALLOW_COPY_AND_ASSIGN(PauseInlineAllocationObserversScope);
+ Heap* heap_;
+ DISALLOW_COPY_AND_ASSIGN(PauseAllocationObserversScope);
};
// -----------------------------------------------------------------------------
@@ -2980,12 +2790,6 @@
CompactionSpace(Heap* heap, AllocationSpace id, Executability executable)
: PagedSpace(heap, id, executable) {}
- // Adds external memory starting at {start} of {size_in_bytes} to the space.
- void AddExternalMemory(Address start, int size_in_bytes) {
- IncreaseCapacity(size_in_bytes);
- Free(start, size_in_bytes);
- }
-
bool is_local() override { return true; }
void RefillFreeList() override;
@@ -3004,9 +2808,7 @@
public:
explicit CompactionSpaceCollection(Heap* heap)
: old_space_(heap, OLD_SPACE, Executability::NOT_EXECUTABLE),
- code_space_(heap, CODE_SPACE, Executability::EXECUTABLE),
- duration_(0.0),
- bytes_compacted_(0) {}
+ code_space_(heap, CODE_SPACE, Executability::EXECUTABLE) {}
CompactionSpace* Get(AllocationSpace space) {
switch (space) {
@@ -3021,21 +2823,9 @@
return nullptr;
}
- void ReportCompactionProgress(double duration, intptr_t bytes_compacted) {
- duration_ += duration;
- bytes_compacted_ += bytes_compacted;
- }
-
- double duration() const { return duration_; }
- intptr_t bytes_compacted() const { return bytes_compacted_; }
-
private:
CompactionSpace old_space_;
CompactionSpace code_space_;
-
- // Book keeping.
- double duration_;
- intptr_t bytes_compacted_;
};
@@ -3153,7 +2943,9 @@
// Checks whether a heap object is in this space; O(1).
bool Contains(HeapObject* obj);
- bool Contains(Address address);
+ // Checks whether an address is in the object area in this space. Iterates
+ // all objects in the space. May be slow.
+ bool ContainsSlow(Address addr) { return FindObject(addr)->IsHeapObject(); }
// Checks whether the space is empty.
bool IsEmpty() { return first_page_ == NULL; }
@@ -3169,9 +2961,6 @@
void ReportStatistics();
void CollectCodeStatistics();
#endif
- // Checks whether an address is in the object area in this space. It
- // iterates all objects in the space. May be slow.
- bool SlowContains(Address addr) { return FindObject(addr)->IsHeapObject(); }
private:
// The head of the linked list of large object chunks.