Upgrade V8 to version 4.9.385.28
https://chromium.googlesource.com/v8/v8/+/4.9.385.28
FPIIM-449
Change-Id: I4b2e74289d4bf3667f2f3dc8aa2e541f63e26eb4
diff --git a/src/heap/mark-compact.h b/src/heap/mark-compact.h
index e26e06c..cfb2d9d 100644
--- a/src/heap/mark-compact.h
+++ b/src/heap/mark-compact.h
@@ -16,18 +16,24 @@
// to the first live object in the page (only used for old and map objects).
typedef bool (*IsAliveFunction)(HeapObject* obj, int* size, int* offset);
+// Callback function to mark an object in a given heap.
+typedef void (*MarkObjectFunction)(Heap* heap, HeapObject* object);
+
// Forward declarations.
class CodeFlusher;
class MarkCompactCollector;
class MarkingVisitor;
class RootMarkingVisitor;
+class SlotsBuffer;
+class SlotsBufferAllocator;
-class Marking {
+class Marking : public AllStatic {
public:
- explicit Marking(Heap* heap) : heap_(heap) {}
-
- INLINE(static MarkBit MarkBitFrom(Address addr));
+ INLINE(static MarkBit MarkBitFrom(Address addr)) {
+ MemoryChunk* p = MemoryChunk::FromAddress(addr);
+ return p->markbits()->MarkBitFromIndex(p->AddressToMarkbitIndex(addr));
+ }
INLINE(static MarkBit MarkBitFrom(HeapObject* obj)) {
return MarkBitFrom(reinterpret_cast<Address>(obj));
@@ -39,35 +45,71 @@
return !mark_bit.Get() && mark_bit.Next().Get();
}
- // Black markbits: 10 - this is required by the sweeper.
+ // Black markbits: 11
static const char* kBlackBitPattern;
INLINE(static bool IsBlack(MarkBit mark_bit)) {
- return mark_bit.Get() && !mark_bit.Next().Get();
+ return mark_bit.Get() && mark_bit.Next().Get();
}
// White markbits: 00 - this is required by the mark bit clearer.
static const char* kWhiteBitPattern;
- INLINE(static bool IsWhite(MarkBit mark_bit)) { return !mark_bit.Get(); }
+ INLINE(static bool IsWhite(MarkBit mark_bit)) {
+ DCHECK(!IsImpossible(mark_bit));
+ return !mark_bit.Get();
+ }
- // Grey markbits: 11
+ // Grey markbits: 10
static const char* kGreyBitPattern;
INLINE(static bool IsGrey(MarkBit mark_bit)) {
- return mark_bit.Get() && mark_bit.Next().Get();
+ return mark_bit.Get() && !mark_bit.Next().Get();
}
+ // IsBlackOrGrey assumes that the first bit is set for black or grey
+ // objects.
+ INLINE(static bool IsBlackOrGrey(MarkBit mark_bit)) { return mark_bit.Get(); }
+
INLINE(static void MarkBlack(MarkBit mark_bit)) {
mark_bit.Set();
+ mark_bit.Next().Set();
+ }
+
+ INLINE(static void MarkWhite(MarkBit mark_bit)) {
+ mark_bit.Clear();
mark_bit.Next().Clear();
}
- INLINE(static void BlackToGrey(MarkBit markbit)) { markbit.Next().Set(); }
+ INLINE(static void BlackToWhite(MarkBit markbit)) {
+ DCHECK(IsBlack(markbit));
+ markbit.Clear();
+ markbit.Next().Clear();
+ }
+
+ INLINE(static void GreyToWhite(MarkBit markbit)) {
+ DCHECK(IsGrey(markbit));
+ markbit.Clear();
+ markbit.Next().Clear();
+ }
+
+ INLINE(static void BlackToGrey(MarkBit markbit)) {
+ DCHECK(IsBlack(markbit));
+ markbit.Next().Clear();
+ }
INLINE(static void WhiteToGrey(MarkBit markbit)) {
+ DCHECK(IsWhite(markbit));
+ markbit.Set();
+ }
+
+ INLINE(static void WhiteToBlack(MarkBit markbit)) {
+ DCHECK(IsWhite(markbit));
markbit.Set();
markbit.Next().Set();
}
- INLINE(static void GreyToBlack(MarkBit markbit)) { markbit.Next().Clear(); }
+ INLINE(static void GreyToBlack(MarkBit markbit)) {
+ DCHECK(IsGrey(markbit));
+ markbit.Next().Set();
+ }
INLINE(static void BlackToGrey(HeapObject* obj)) {
BlackToGrey(MarkBitFrom(obj));
@@ -75,10 +117,10 @@
INLINE(static void AnyToGrey(MarkBit markbit)) {
markbit.Set();
- markbit.Next().Set();
+ markbit.Next().Clear();
}
- void TransferMark(Address old_start, Address new_start);
+ static void TransferMark(Heap* heap, Address old_start, Address new_start);
#ifdef DEBUG
enum ObjectColor {
@@ -119,20 +161,19 @@
INLINE(static bool TransferColor(HeapObject* from, HeapObject* to)) {
MarkBit from_mark_bit = MarkBitFrom(from);
MarkBit to_mark_bit = MarkBitFrom(to);
- bool is_black = false;
+ DCHECK(Marking::IsWhite(to_mark_bit));
if (from_mark_bit.Get()) {
to_mark_bit.Set();
- is_black = true; // Looks black so far.
+ if (from_mark_bit.Next().Get()) {
+ to_mark_bit.Next().Set();
+ return true;
+ }
}
- if (from_mark_bit.Next().Get()) {
- to_mark_bit.Next().Set();
- is_black = false; // Was actually gray.
- }
- return is_black;
+ return false;
}
private:
- Heap* heap_;
+ DISALLOW_IMPLICIT_CONSTRUCTORS(Marking);
};
// ----------------------------------------------------------------------------
@@ -140,18 +181,15 @@
class MarkingDeque {
public:
MarkingDeque()
- : array_(NULL), top_(0), bottom_(0), mask_(0), overflowed_(false) {}
+ : array_(NULL),
+ top_(0),
+ bottom_(0),
+ mask_(0),
+ overflowed_(false),
+ in_use_(false) {}
- void Initialize(Address low, Address high) {
- HeapObject** obj_low = reinterpret_cast<HeapObject**>(low);
- HeapObject** obj_high = reinterpret_cast<HeapObject**>(high);
- array_ = obj_low;
- mask_ = base::bits::RoundDownToPowerOfTwo32(
- static_cast<uint32_t>(obj_high - obj_low)) -
- 1;
- top_ = bottom_ = 0;
- overflowed_ = false;
- }
+ void Initialize(Address low, Address high);
+ void Uninitialize(bool aborting = false);
inline bool IsFull() { return ((top_ + 1) & mask_) == bottom_; }
@@ -159,32 +197,23 @@
bool overflowed() const { return overflowed_; }
+ bool in_use() const { return in_use_; }
+
void ClearOverflowed() { overflowed_ = false; }
void SetOverflowed() { overflowed_ = true; }
- // Push the (marked) object on the marking stack if there is room,
- // otherwise mark the object as overflowed and wait for a rescan of the
- // heap.
- INLINE(void PushBlack(HeapObject* object)) {
- DCHECK(object->IsHeapObject());
- if (IsFull()) {
- Marking::BlackToGrey(object);
- MemoryChunk::IncrementLiveBytesFromGC(object->address(), -object->Size());
- SetOverflowed();
- } else {
- array_[top_] = object;
- top_ = ((top_ + 1) & mask_);
- }
- }
-
- INLINE(void PushGrey(HeapObject* object)) {
+ // Push the object on the marking stack if there is room, otherwise mark the
+ // deque as overflowed and wait for a rescan of the heap.
+ INLINE(bool Push(HeapObject* object)) {
DCHECK(object->IsHeapObject());
if (IsFull()) {
SetOverflowed();
+ return false;
} else {
array_[top_] = object;
top_ = ((top_ + 1) & mask_);
+ return true;
}
}
@@ -196,13 +225,17 @@
return object;
}
- INLINE(void UnshiftGrey(HeapObject* object)) {
+ // Unshift the object into the marking stack if there is room, otherwise mark
+ // the deque as overflowed and wait for a rescan of the heap.
+ INLINE(bool Unshift(HeapObject* object)) {
DCHECK(object->IsHeapObject());
if (IsFull()) {
SetOverflowed();
+ return false;
} else {
bottom_ = ((bottom_ - 1) & mask_);
array_[bottom_] = object;
+ return true;
}
}
@@ -221,155 +254,18 @@
int bottom_;
int mask_;
bool overflowed_;
+ bool in_use_;
DISALLOW_COPY_AND_ASSIGN(MarkingDeque);
};
-class SlotsBufferAllocator {
- public:
- SlotsBuffer* AllocateBuffer(SlotsBuffer* next_buffer);
- void DeallocateBuffer(SlotsBuffer* buffer);
-
- void DeallocateChain(SlotsBuffer** buffer_address);
-};
-
-
-// SlotsBuffer records a sequence of slots that has to be updated
-// after live objects were relocated from evacuation candidates.
-// All slots are either untyped or typed:
-// - Untyped slots are expected to contain a tagged object pointer.
-// They are recorded by an address.
-// - Typed slots are expected to contain an encoded pointer to a heap
-// object where the way of encoding depends on the type of the slot.
-// They are recorded as a pair (SlotType, slot address).
-// We assume that zero-page is never mapped this allows us to distinguish
-// untyped slots from typed slots during iteration by a simple comparison:
-// if element of slots buffer is less than NUMBER_OF_SLOT_TYPES then it
-// is the first element of typed slot's pair.
-class SlotsBuffer {
- public:
- typedef Object** ObjectSlot;
-
- explicit SlotsBuffer(SlotsBuffer* next_buffer)
- : idx_(0), chain_length_(1), next_(next_buffer) {
- if (next_ != NULL) {
- chain_length_ = next_->chain_length_ + 1;
- }
- }
-
- ~SlotsBuffer() {}
-
- void Add(ObjectSlot slot) {
- DCHECK(0 <= idx_ && idx_ < kNumberOfElements);
- slots_[idx_++] = slot;
- }
-
- enum SlotType {
- EMBEDDED_OBJECT_SLOT,
- RELOCATED_CODE_OBJECT,
- CODE_TARGET_SLOT,
- CODE_ENTRY_SLOT,
- DEBUG_TARGET_SLOT,
- JS_RETURN_SLOT,
- NUMBER_OF_SLOT_TYPES
- };
-
- static const char* SlotTypeToString(SlotType type) {
- switch (type) {
- case EMBEDDED_OBJECT_SLOT:
- return "EMBEDDED_OBJECT_SLOT";
- case RELOCATED_CODE_OBJECT:
- return "RELOCATED_CODE_OBJECT";
- case CODE_TARGET_SLOT:
- return "CODE_TARGET_SLOT";
- case CODE_ENTRY_SLOT:
- return "CODE_ENTRY_SLOT";
- case DEBUG_TARGET_SLOT:
- return "DEBUG_TARGET_SLOT";
- case JS_RETURN_SLOT:
- return "JS_RETURN_SLOT";
- case NUMBER_OF_SLOT_TYPES:
- return "NUMBER_OF_SLOT_TYPES";
- }
- return "UNKNOWN SlotType";
- }
-
- void UpdateSlots(Heap* heap);
-
- void UpdateSlotsWithFilter(Heap* heap);
-
- SlotsBuffer* next() { return next_; }
-
- static int SizeOfChain(SlotsBuffer* buffer) {
- if (buffer == NULL) return 0;
- return static_cast<int>(buffer->idx_ +
- (buffer->chain_length_ - 1) * kNumberOfElements);
- }
-
- inline bool IsFull() { return idx_ == kNumberOfElements; }
-
- inline bool HasSpaceForTypedSlot() { return idx_ < kNumberOfElements - 1; }
-
- static void UpdateSlotsRecordedIn(Heap* heap, SlotsBuffer* buffer,
- bool code_slots_filtering_required) {
- while (buffer != NULL) {
- if (code_slots_filtering_required) {
- buffer->UpdateSlotsWithFilter(heap);
- } else {
- buffer->UpdateSlots(heap);
- }
- buffer = buffer->next();
- }
- }
-
- enum AdditionMode { FAIL_ON_OVERFLOW, IGNORE_OVERFLOW };
-
- static bool ChainLengthThresholdReached(SlotsBuffer* buffer) {
- return buffer != NULL && buffer->chain_length_ >= kChainLengthThreshold;
- }
-
- INLINE(static bool AddTo(SlotsBufferAllocator* allocator,
- SlotsBuffer** buffer_address, ObjectSlot slot,
- AdditionMode mode)) {
- SlotsBuffer* buffer = *buffer_address;
- if (buffer == NULL || buffer->IsFull()) {
- if (mode == FAIL_ON_OVERFLOW && ChainLengthThresholdReached(buffer)) {
- allocator->DeallocateChain(buffer_address);
- return false;
- }
- buffer = allocator->AllocateBuffer(buffer);
- *buffer_address = buffer;
- }
- buffer->Add(slot);
- return true;
- }
-
- static bool IsTypedSlot(ObjectSlot slot);
-
- static bool AddTo(SlotsBufferAllocator* allocator,
- SlotsBuffer** buffer_address, SlotType type, Address addr,
- AdditionMode mode);
-
- static const int kNumberOfElements = 1021;
-
- private:
- static const int kChainLengthThreshold = 15;
-
- intptr_t idx_;
- intptr_t chain_length_;
- SlotsBuffer* next_;
- ObjectSlot slots_[kNumberOfElements];
-};
-
-
// CodeFlusher collects candidates for code flushing during marking and
// processes those candidates after marking has completed in order to
// reset those functions referencing code objects that would otherwise
-// be unreachable. Code objects can be referenced in three ways:
+// be unreachable. Code objects can be referenced in two ways:
// - SharedFunctionInfo references unoptimized code.
// - JSFunction references either unoptimized or optimized code.
-// - OptimizedCodeMap references optimized code.
// We are not allowed to flush unoptimized code for functions that got
// optimized or inlined into optimized code, because we might bailout
// into the unoptimized code again during deoptimization.
@@ -377,113 +273,42 @@
public:
explicit CodeFlusher(Isolate* isolate)
: isolate_(isolate),
- jsfunction_candidates_head_(NULL),
- shared_function_info_candidates_head_(NULL),
- optimized_code_map_holder_head_(NULL) {}
+ jsfunction_candidates_head_(nullptr),
+ shared_function_info_candidates_head_(nullptr) {}
- void AddCandidate(SharedFunctionInfo* shared_info) {
- if (GetNextCandidate(shared_info) == NULL) {
- SetNextCandidate(shared_info, shared_function_info_candidates_head_);
- shared_function_info_candidates_head_ = shared_info;
- }
- }
+ inline void AddCandidate(SharedFunctionInfo* shared_info);
+ inline void AddCandidate(JSFunction* function);
- void AddCandidate(JSFunction* function) {
- DCHECK(function->code() == function->shared()->code());
- if (GetNextCandidate(function)->IsUndefined()) {
- SetNextCandidate(function, jsfunction_candidates_head_);
- jsfunction_candidates_head_ = function;
- }
- }
-
- void AddOptimizedCodeMap(SharedFunctionInfo* code_map_holder) {
- if (GetNextCodeMap(code_map_holder)->IsUndefined()) {
- SetNextCodeMap(code_map_holder, optimized_code_map_holder_head_);
- optimized_code_map_holder_head_ = code_map_holder;
- }
- }
-
- void EvictOptimizedCodeMap(SharedFunctionInfo* code_map_holder);
void EvictCandidate(SharedFunctionInfo* shared_info);
void EvictCandidate(JSFunction* function);
void ProcessCandidates() {
- ProcessOptimizedCodeMaps();
ProcessSharedFunctionInfoCandidates();
ProcessJSFunctionCandidates();
}
- void EvictAllCandidates() {
- EvictOptimizedCodeMaps();
- EvictJSFunctionCandidates();
- EvictSharedFunctionInfoCandidates();
- }
-
void IteratePointersToFromSpace(ObjectVisitor* v);
private:
- void ProcessOptimizedCodeMaps();
void ProcessJSFunctionCandidates();
void ProcessSharedFunctionInfoCandidates();
- void EvictOptimizedCodeMaps();
- void EvictJSFunctionCandidates();
- void EvictSharedFunctionInfoCandidates();
- static JSFunction** GetNextCandidateSlot(JSFunction* candidate) {
- return reinterpret_cast<JSFunction**>(
- HeapObject::RawField(candidate, JSFunction::kNextFunctionLinkOffset));
- }
+ static inline JSFunction** GetNextCandidateSlot(JSFunction* candidate);
+ static inline JSFunction* GetNextCandidate(JSFunction* candidate);
+ static inline void SetNextCandidate(JSFunction* candidate,
+ JSFunction* next_candidate);
+ static inline void ClearNextCandidate(JSFunction* candidate,
+ Object* undefined);
- static JSFunction* GetNextCandidate(JSFunction* candidate) {
- Object* next_candidate = candidate->next_function_link();
- return reinterpret_cast<JSFunction*>(next_candidate);
- }
-
- static void SetNextCandidate(JSFunction* candidate,
- JSFunction* next_candidate) {
- candidate->set_next_function_link(next_candidate);
- }
-
- static void ClearNextCandidate(JSFunction* candidate, Object* undefined) {
- DCHECK(undefined->IsUndefined());
- candidate->set_next_function_link(undefined, SKIP_WRITE_BARRIER);
- }
-
- static SharedFunctionInfo* GetNextCandidate(SharedFunctionInfo* candidate) {
- Object* next_candidate = candidate->code()->gc_metadata();
- return reinterpret_cast<SharedFunctionInfo*>(next_candidate);
- }
-
- static void SetNextCandidate(SharedFunctionInfo* candidate,
- SharedFunctionInfo* next_candidate) {
- candidate->code()->set_gc_metadata(next_candidate);
- }
-
- static void ClearNextCandidate(SharedFunctionInfo* candidate) {
- candidate->code()->set_gc_metadata(NULL, SKIP_WRITE_BARRIER);
- }
-
- static SharedFunctionInfo* GetNextCodeMap(SharedFunctionInfo* holder) {
- FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
- Object* next_map = code_map->get(SharedFunctionInfo::kNextMapIndex);
- return reinterpret_cast<SharedFunctionInfo*>(next_map);
- }
-
- static void SetNextCodeMap(SharedFunctionInfo* holder,
- SharedFunctionInfo* next_holder) {
- FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
- code_map->set(SharedFunctionInfo::kNextMapIndex, next_holder);
- }
-
- static void ClearNextCodeMap(SharedFunctionInfo* holder) {
- FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
- code_map->set_undefined(SharedFunctionInfo::kNextMapIndex);
- }
+ static inline SharedFunctionInfo* GetNextCandidate(
+ SharedFunctionInfo* candidate);
+ static inline void SetNextCandidate(SharedFunctionInfo* candidate,
+ SharedFunctionInfo* next_candidate);
+ static inline void ClearNextCandidate(SharedFunctionInfo* candidate);
Isolate* isolate_;
JSFunction* jsfunction_candidates_head_;
SharedFunctionInfo* shared_function_info_candidates_head_;
- SharedFunctionInfo* optimized_code_map_holder_head_;
DISALLOW_COPY_AND_ASSIGN(CodeFlusher);
};
@@ -497,8 +322,10 @@
// Mark-Compact collector
class MarkCompactCollector {
public:
- // Set the global flags, it must be called before Prepare to take effect.
- inline void SetFlags(int flags);
+ enum IterationMode {
+ kKeepMarking,
+ kClearMarkbits,
+ };
static void Initialize();
@@ -538,22 +365,18 @@
static const uint32_t kMultiFreeEncoding = 1;
static inline bool IsMarked(Object* obj);
+ static bool IsUnmarkedHeapObjectWithHeap(Heap* heap, Object** p);
inline Heap* heap() const { return heap_; }
inline Isolate* isolate() const;
CodeFlusher* code_flusher() { return code_flusher_; }
inline bool is_code_flushing_enabled() const { return code_flusher_ != NULL; }
- void EnableCodeFlushing(bool enable);
-
- enum SweeperType {
- CONCURRENT_SWEEPING,
- SEQUENTIAL_SWEEPING
- };
enum SweepingParallelism { SWEEP_ON_MAIN_THREAD, SWEEP_IN_PARALLEL };
#ifdef VERIFY_HEAP
+ void VerifyValidStoreAndSlotsBufferEntries();
void VerifyMarkbitsAreClean();
static void VerifyMarkbitsAreClean(PagedSpace* space);
static void VerifyMarkbitsAreClean(NewSpace* space);
@@ -561,11 +384,6 @@
void VerifyOmittedMapChecks();
#endif
- INLINE(static bool ShouldSkipEvacuationSlotRecording(Object** anchor)) {
- return Page::FromAddress(reinterpret_cast<Address>(anchor))
- ->ShouldSkipEvacuationSlotRecording();
- }
-
INLINE(static bool ShouldSkipEvacuationSlotRecording(Object* host)) {
return Page::FromAddress(reinterpret_cast<Address>(host))
->ShouldSkipEvacuationSlotRecording();
@@ -576,46 +394,24 @@
->IsEvacuationCandidate();
}
- INLINE(void EvictEvacuationCandidate(Page* page)) {
- if (FLAG_trace_fragmentation) {
- PrintF("Page %p is too popular. Disabling evacuation.\n",
- reinterpret_cast<void*>(page));
- }
-
- // TODO(gc) If all evacuation candidates are too popular we
- // should stop slots recording entirely.
- page->ClearEvacuationCandidate();
-
- // We were not collecting slots on this page that point
- // to other evacuation candidates thus we have to
- // rescan the page after evacuation to discover and update all
- // pointers to evacuated objects.
- if (page->owner()->identity() == OLD_DATA_SPACE) {
- evacuation_candidates_.RemoveElement(page);
- } else {
- page->SetFlag(Page::RESCAN_ON_EVACUATION);
- }
- }
-
void RecordRelocSlot(RelocInfo* rinfo, Object* target);
- void RecordCodeEntrySlot(Address slot, Code* target);
+ void RecordCodeEntrySlot(HeapObject* object, Address slot, Code* target);
void RecordCodeTargetPatch(Address pc, Code* target);
+ INLINE(void RecordSlot(HeapObject* object, Object** slot, Object* target));
+ INLINE(void ForceRecordSlot(HeapObject* object, Object** slot,
+ Object* target));
- INLINE(void RecordSlot(
- Object** anchor_slot, Object** slot, Object* object,
- SlotsBuffer::AdditionMode mode = SlotsBuffer::FAIL_ON_OVERFLOW));
+ void UpdateSlots(SlotsBuffer* buffer);
+ void UpdateSlotsRecordedIn(SlotsBuffer* buffer);
void MigrateObject(HeapObject* dst, HeapObject* src, int size,
- AllocationSpace to_old_space);
-
- bool TryPromoteObject(HeapObject* object, int object_size);
+ AllocationSpace to_old_space,
+ SlotsBuffer** evacuation_slots_buffer);
void InvalidateCode(Code* code);
void ClearMarkbits();
- bool abort_incremental_marking() const { return abort_incremental_marking_; }
-
bool is_compacting() const { return compacting_; }
MarkingParity marking_parity() { return marking_parity_; }
@@ -631,15 +427,24 @@
// size of the maximum continuous freed memory chunk.
int SweepInParallel(Page* page, PagedSpace* space);
+ // Ensures that sweeping is finished.
+ //
+ // Note: Can only be called safely from main thread.
void EnsureSweepingCompleted();
+ void SweepOrWaitUntilSweepingCompleted(Page* page);
+
+ // Help out in sweeping the corresponding space and refill memory that has
+ // been regained.
+ //
+ // Note: Thread-safe.
+ void SweepAndRefill(CompactionSpace* space);
+
// If sweeper threads are not active this method will return true. If
// this is a latency issue we should be smarter here. Otherwise, it will
// return true if the sweeper threads are done processing the pages.
bool IsSweepingCompleted();
- void RefillFreeList(PagedSpace* space);
-
// Checks if sweeping is in progress right now on any space.
bool sweeping_in_progress() { return sweeping_in_progress_; }
@@ -647,35 +452,79 @@
bool evacuation() const { return evacuation_; }
- // Mark the global table which maps weak objects to dependent code without
- // marking its contents.
- void MarkWeakObjectToCodeTable();
-
// Special case for processing weak references in a full collection. We need
// to artificially keep AllocationSites alive for a time.
void MarkAllocationSite(AllocationSite* site);
+ // Mark objects in implicit references groups if their parent object
+ // is marked.
+ void MarkImplicitRefGroups(MarkObjectFunction mark_object);
+
MarkingDeque* marking_deque() { return &marking_deque_; }
- void EnsureMarkingDequeIsCommittedAndInitialize();
+ static const size_t kMaxMarkingDequeSize = 4 * MB;
+ static const size_t kMinMarkingDequeSize = 256 * KB;
+
+ void EnsureMarkingDequeIsCommittedAndInitialize(size_t max_size) {
+ if (!marking_deque_.in_use()) {
+ EnsureMarkingDequeIsCommitted(max_size);
+ InitializeMarkingDeque();
+ }
+ }
+
+ void EnsureMarkingDequeIsCommitted(size_t max_size);
+ void EnsureMarkingDequeIsReserved();
void InitializeMarkingDeque();
- void UncommitMarkingDeque();
+ // The following four methods can just be called after marking, when the
+ // whole transitive closure is known. They must be called before sweeping
+ // when mark bits are still intact.
+ bool IsSlotInBlackObject(Page* p, Address slot, HeapObject** out_object);
+ bool IsSlotInBlackObjectSlow(Page* p, Address slot);
+ bool IsSlotInLiveObject(Address slot);
+ void VerifyIsSlotInLiveObject(Address slot, HeapObject* object);
+
+ // Removes all the slots in the slot buffers that are within the given
+ // address range.
+ void RemoveObjectSlots(Address start_slot, Address end_slot);
+
+ //
+ // Free lists filled by sweeper and consumed by corresponding spaces
+ // (including compaction spaces).
+ //
+ base::SmartPointer<FreeList>& free_list_old_space() {
+ return free_list_old_space_;
+ }
+ base::SmartPointer<FreeList>& free_list_code_space() {
+ return free_list_code_space_;
+ }
+ base::SmartPointer<FreeList>& free_list_map_space() {
+ return free_list_map_space_;
+ }
private:
+ class CompactionTask;
+ class EvacuateNewSpaceVisitor;
+ class EvacuateOldSpaceVisitor;
+ class EvacuateVisitorBase;
+ class HeapObjectVisitor;
class SweeperTask;
- explicit MarkCompactCollector(Heap* heap);
- ~MarkCompactCollector();
+ static const int kInitialLocalPretenuringFeedbackCapacity = 256;
- bool MarkInvalidatedCode();
+ explicit MarkCompactCollector(Heap* heap);
+
bool WillBeDeoptimized(Code* code);
- void RemoveDeadInvalidatedCode();
- void ProcessInvalidatedCode(ObjectVisitor* visitor);
+ void EvictPopularEvacuationCandidate(Page* page);
+ void ClearInvalidStoreAndSlotsBufferEntries();
void StartSweeperThreads();
+ void ComputeEvacuationHeuristics(int area_size,
+ int* target_fragmentation_percent,
+ int* max_evacuated_bytes);
+
#ifdef DEBUG
enum CollectorState {
IDLE,
@@ -691,26 +540,13 @@
CollectorState state_;
#endif
- bool reduce_memory_footprint_;
-
- bool abort_incremental_marking_;
-
MarkingParity marking_parity_;
- // True if we are collecting slots to perform evacuation from evacuation
- // candidates.
- bool compacting_;
-
bool was_marked_incrementally_;
- // True if concurrent or parallel sweeping is currently in progress.
- bool sweeping_in_progress_;
-
- base::Semaphore pending_sweeper_jobs_semaphore_;
-
bool evacuation_;
- SlotsBufferAllocator slots_buffer_allocator_;
+ SlotsBufferAllocator* slots_buffer_allocator_;
SlotsBuffer* migration_slots_buffer_;
@@ -726,10 +562,12 @@
//
// After: Live objects are marked and non-live objects are unmarked.
- friend class RootMarkingVisitor;
- friend class MarkingVisitor;
- friend class MarkCompactMarkingVisitor;
friend class CodeMarkingVisitor;
+ friend class IncrementalMarkingMarkingVisitor;
+ friend class MarkCompactMarkingVisitor;
+ friend class MarkingVisitor;
+ friend class RecordMigratedSlotVisitor;
+ friend class RootMarkingVisitor;
friend class SharedFunctionInfoMarkingVisitor;
// Mark code objects that are active on the stack to prevent them
@@ -741,7 +579,13 @@
// Marking operations for objects reachable from roots.
void MarkLiveObjects();
- void AfterMarking();
+ // Pushes a black object onto the marking stack and accounts for live bytes.
+ // Note that this assumes live bytes have not yet been counted.
+ INLINE(void PushBlack(HeapObject* obj));
+
+ // Unshifts a black object into the marking stack and accounts for live bytes.
+ // Note that this assumes lives bytes have already been counted.
+ INLINE(void UnshiftBlack(HeapObject* obj));
// Marks the object black and pushes it on the marking stack.
// This is for non-incremental marking only.
@@ -758,10 +602,6 @@
// the string table are weak.
void MarkStringTable(RootMarkingVisitor* visitor);
- // Mark objects in implicit references groups if their parent object
- // is marked.
- void MarkImplicitRefGroups();
-
// Mark objects reachable (transitively) from objects in the marking stack
// or overflowed in the heap.
void ProcessMarkingDeque();
@@ -780,6 +620,9 @@
// otherwise a map can die and deoptimize the code.
void ProcessTopOptimizedFrame(ObjectVisitor* visitor);
+ // Collects a list of dependent code from maps embedded in optimize code.
+ DependentCode* DependentCodeListFromNonLiveMaps();
+
// Mark objects reachable (transitively) from objects in the marking
// stack. This function empties the marking stack, but may leave
// overflowed objects in the heap, in which case the marking stack's
@@ -791,27 +634,34 @@
// flag on the marking stack.
void RefillMarkingDeque();
+ // Helper methods for refilling the marking stack by discovering grey objects
+ // on various pages of the heap. Used by {RefillMarkingDeque} only.
+ template <class T>
+ void DiscoverGreyObjectsWithIterator(T* it);
+ void DiscoverGreyObjectsOnPage(MemoryChunk* p);
+ void DiscoverGreyObjectsInSpace(PagedSpace* space);
+ void DiscoverGreyObjectsInNewSpace();
+
// Callback function for telling whether the object *p is an unmarked
// heap object.
static bool IsUnmarkedHeapObject(Object** p);
- static bool IsUnmarkedHeapObjectWithHeap(Heap* heap, Object** p);
- // Map transitions from a live map to a dead map must be killed.
- // We replace them with a null descriptor, with the same key.
+ // Clear non-live references in weak cells, transition and descriptor arrays,
+ // and deoptimize dependent code of non-live maps.
void ClearNonLiveReferences();
- void ClearNonLivePrototypeTransitions(Map* map);
- void ClearNonLiveMapTransitions(Map* map, MarkBit map_mark);
- void ClearMapTransitions(Map* map);
- bool ClearMapBackPointer(Map* map);
- void TrimDescriptorArray(Map* map, DescriptorArray* descriptors,
- int number_of_own_descriptors);
+ void MarkDependentCodeForDeoptimization(DependentCode* list);
+ // Find non-live targets of simple transitions in the given list. Clear
+ // transitions to non-live targets and if needed trim descriptors arrays.
+ void ClearSimpleMapTransitions(Object* non_live_map_list);
+ void ClearSimpleMapTransition(Map* map, Map* dead_transition);
+ // Compact every array in the global list of transition arrays and
+ // trim the corresponding descriptor array if a transition target is non-live.
+ void ClearFullMapTransitions();
+ bool CompactTransitionArray(Map* map, TransitionArray* transitions,
+ DescriptorArray* descriptors);
+ void TrimDescriptorArray(Map* map, DescriptorArray* descriptors);
void TrimEnumCache(Map* map, DescriptorArray* descriptors);
- void ClearDependentCode(DependentCode* dependent_code);
- void ClearNonLiveDependentCode(DependentCode* dependent_code);
- int ClearNonLiveDependentCodeInGroup(DependentCode* dependent_code, int group,
- int start, int end, int new_start);
-
// Mark all values associated with reachable keys in weak collections
// encountered so far. This might push new object or even new weak maps onto
// the marking stack.
@@ -826,10 +676,12 @@
// collections when incremental marking is aborted.
void AbortWeakCollections();
-
- void ProcessAndClearWeakCells();
+ void ClearWeakCells(Object** non_live_map_list,
+ DependentCode** dependent_code_list);
void AbortWeakCells();
+ void AbortTransitionArrays();
+
// -----------------------------------------------------------------------
// Phase 2: Sweeping to clear mark bits and free non-live objects for
// a non-compacting collection.
@@ -846,24 +698,51 @@
// regions to each space's free list.
void SweepSpaces();
- int DiscoverAndEvacuateBlackObjectsOnPage(NewSpace* new_space,
- NewSpacePage* p);
+ void EvacuateNewSpacePrologue();
- void EvacuateNewSpace();
+ // Returns local pretenuring feedback.
+ HashMap* EvacuateNewSpaceInParallel();
- void EvacuateLiveObjectsFromPage(Page* p);
+ void AddEvacuationSlotsBufferSynchronized(
+ SlotsBuffer* evacuation_slots_buffer);
- void EvacuatePages();
+ void EvacuatePages(CompactionSpaceCollection* compaction_spaces,
+ SlotsBuffer** evacuation_slots_buffer);
+
+ void EvacuatePagesInParallel();
+
+ // The number of parallel compaction tasks, including the main thread.
+ int NumberOfParallelCompactionTasks();
+
+
+ void StartParallelCompaction(CompactionSpaceCollection** compaction_spaces,
+ uint32_t* task_ids, int len);
+ void WaitUntilCompactionCompleted(uint32_t* task_ids, int len);
void EvacuateNewSpaceAndCandidates();
+ void UpdatePointersAfterEvacuation();
+
+ // Iterates through all live objects on a page using marking information.
+ // Returns whether all objects have successfully been visited.
+ bool VisitLiveObjects(MemoryChunk* page, HeapObjectVisitor* visitor,
+ IterationMode mode);
+
+ void VisitLiveObjectsBody(Page* page, ObjectVisitor* visitor);
+
+ void RecomputeLiveBytes(MemoryChunk* page);
+
+ void SweepAbortedPages();
+
void ReleaseEvacuationCandidates();
// Moves the pages of the evacuation_candidates_ list to the end of their
// corresponding space pages list.
void MoveEvacuationCandidatesToEndOfPagesList();
- void SweepSpace(PagedSpace* space, SweeperType sweeper);
+ // Starts sweeping of a space by contributing on the main thread and setting
+ // up other pages for sweeping.
+ void StartSweepSpace(PagedSpace* space);
// Finalizes the parallel sweeping phase. Marks all the pages that were
// swept in parallel.
@@ -872,7 +751,16 @@
void ParallelSweepSpaceComplete(PagedSpace* space);
// Updates store buffer and slot buffer for a pointer in a migrating object.
- void RecordMigratedSlot(Object* value, Address slot);
+ void RecordMigratedSlot(Object* value, Address slot,
+ SlotsBuffer** evacuation_slots_buffer);
+
+ // Adds the code entry slot to the slots buffer.
+ void RecordMigratedCodeEntrySlot(Address code_entry, Address code_entry_slot,
+ SlotsBuffer** evacuation_slots_buffer);
+
+ // Adds the slot of a moved code object.
+ void RecordMigratedCodeObjectSlot(Address code_object,
+ SlotsBuffer** evacuation_slots_buffer);
#ifdef DEBUG
friend class MarkObjectVisitor;
@@ -884,18 +772,45 @@
Heap* heap_;
base::VirtualMemory* marking_deque_memory_;
- bool marking_deque_memory_committed_;
+ size_t marking_deque_memory_committed_;
MarkingDeque marking_deque_;
CodeFlusher* code_flusher_;
bool have_code_to_deoptimize_;
List<Page*> evacuation_candidates_;
- List<Code*> invalidated_code_;
- SmartPointer<FreeList> free_list_old_data_space_;
- SmartPointer<FreeList> free_list_old_pointer_space_;
+ List<MemoryChunk*> newspace_evacuation_candidates_;
+
+ // The evacuation_slots_buffers_ are used by the compaction threads.
+ // When a compaction task finishes, it uses
+ // AddEvacuationSlotsbufferSynchronized to adds its slots buffer to the
+ // evacuation_slots_buffers_ list using the evacuation_slots_buffers_mutex_
+ // lock.
+ base::Mutex evacuation_slots_buffers_mutex_;
+ List<SlotsBuffer*> evacuation_slots_buffers_;
+
+ base::SmartPointer<FreeList> free_list_old_space_;
+ base::SmartPointer<FreeList> free_list_code_space_;
+ base::SmartPointer<FreeList> free_list_map_space_;
+
+ // True if we are collecting slots to perform evacuation from evacuation
+ // candidates.
+ bool compacting_;
+
+ // True if concurrent or parallel sweeping is currently in progress.
+ bool sweeping_in_progress_;
+
+ // True if parallel compaction is currently in progress.
+ bool compaction_in_progress_;
+
+ // Semaphore used to synchronize sweeper tasks.
+ base::Semaphore pending_sweeper_tasks_semaphore_;
+
+ // Semaphore used to synchronize compaction tasks.
+ base::Semaphore pending_compaction_tasks_semaphore_;
friend class Heap;
+ friend class StoreBuffer;
};
@@ -931,6 +846,14 @@
cell_base_ += 32 * kPointerSize;
}
+ // Return the next mark bit cell. If there is no next it returns 0;
+ inline MarkBit::CellType PeekNext() {
+ if (HasNext()) {
+ return cells_[cell_index_ + 1];
+ }
+ return 0;
+ }
+
private:
MemoryChunk* chunk_;
MarkBit::CellType* cells_;
@@ -939,6 +862,26 @@
Address cell_base_;
};
+enum LiveObjectIterationMode { kBlackObjects, kGreyObjects, kAllLiveObjects };
+
+template <LiveObjectIterationMode T>
+class LiveObjectIterator BASE_EMBEDDED {
+ public:
+ explicit LiveObjectIterator(MemoryChunk* chunk)
+ : chunk_(chunk),
+ it_(chunk_),
+ cell_base_(it_.CurrentCellBase()),
+ current_cell_(*it_.CurrentCell()) {}
+
+ HeapObject* Next();
+
+ private:
+ MemoryChunk* chunk_;
+ MarkBitCellIterator it_;
+ Address cell_base_;
+ MarkBit::CellType current_cell_;
+};
+
class EvacuationScope BASE_EMBEDDED {
public:
@@ -955,7 +898,7 @@
const char* AllocationSpaceName(AllocationSpace space);
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_HEAP_MARK_COMPACT_H_