| // Copyright 2012 the V8 project authors. All rights reserved. |
| // Use of this source code is governed by a BSD-style license that can be |
| // found in the LICENSE file. |
| |
| #ifndef V8_HEAP_HEAP_H_ |
| #define V8_HEAP_HEAP_H_ |
| |
| #include <cmath> |
| #include <map> |
| |
| // Clients of this interface shouldn't depend on lots of heap internals. |
| // Do not include anything from src/heap here! |
| #include "include/v8.h" |
| #include "src/allocation.h" |
| #include "src/assert-scope.h" |
| #include "src/base/atomic-utils.h" |
| #include "src/globals.h" |
| #include "src/heap-symbols.h" |
| // TODO(mstarzinger): Two more includes to kill! |
| #include "src/heap/spaces.h" |
| #include "src/heap/store-buffer.h" |
| #include "src/list.h" |
| |
| namespace v8 { |
| namespace internal { |
| |
| using v8::MemoryPressureLevel; |
| |
| // Defines all the roots in Heap. |
| #define STRONG_ROOT_LIST(V) \ |
| /* Cluster the most popular ones in a few cache lines here at the top. */ \ |
| /* The first 32 entries are most often used in the startup snapshot and */ \ |
| /* can use a shorter representation in the serialization format. */ \ |
| V(Map, free_space_map, FreeSpaceMap) \ |
| V(Map, one_pointer_filler_map, OnePointerFillerMap) \ |
| V(Map, two_pointer_filler_map, TwoPointerFillerMap) \ |
| V(Oddball, uninitialized_value, UninitializedValue) \ |
| V(Oddball, undefined_value, UndefinedValue) \ |
| V(Oddball, the_hole_value, TheHoleValue) \ |
| V(Oddball, null_value, NullValue) \ |
| V(Oddball, true_value, TrueValue) \ |
| V(Oddball, false_value, FalseValue) \ |
| V(String, empty_string, empty_string) \ |
| V(Map, meta_map, MetaMap) \ |
| V(Map, byte_array_map, ByteArrayMap) \ |
| V(Map, fixed_array_map, FixedArrayMap) \ |
| V(Map, fixed_cow_array_map, FixedCOWArrayMap) \ |
| V(Map, hash_table_map, HashTableMap) \ |
| V(Map, symbol_map, SymbolMap) \ |
| V(Map, one_byte_string_map, OneByteStringMap) \ |
| V(Map, one_byte_internalized_string_map, OneByteInternalizedStringMap) \ |
| V(Map, scope_info_map, ScopeInfoMap) \ |
| V(Map, shared_function_info_map, SharedFunctionInfoMap) \ |
| V(Map, code_map, CodeMap) \ |
| V(Map, function_context_map, FunctionContextMap) \ |
| V(Map, cell_map, CellMap) \ |
| V(Map, weak_cell_map, WeakCellMap) \ |
| V(Map, global_property_cell_map, GlobalPropertyCellMap) \ |
| V(Map, foreign_map, ForeignMap) \ |
| V(Map, heap_number_map, HeapNumberMap) \ |
| V(Map, transition_array_map, TransitionArrayMap) \ |
| V(FixedArray, empty_literals_array, EmptyLiteralsArray) \ |
| V(FixedArray, empty_fixed_array, EmptyFixedArray) \ |
| V(FixedArray, cleared_optimized_code_map, ClearedOptimizedCodeMap) \ |
| V(DescriptorArray, empty_descriptor_array, EmptyDescriptorArray) \ |
| /* Entries beyond the first 32 */ \ |
| /* The roots above this line should be boring from a GC point of view. */ \ |
| /* This means they are never in new space and never on a page that is */ \ |
| /* being compacted. */ \ |
| /* Oddballs */ \ |
| V(Oddball, no_interceptor_result_sentinel, NoInterceptorResultSentinel) \ |
| V(Oddball, arguments_marker, ArgumentsMarker) \ |
| V(Oddball, exception, Exception) \ |
| V(Oddball, termination_exception, TerminationException) \ |
| V(Oddball, optimized_out, OptimizedOut) \ |
| V(Oddball, stale_register, StaleRegister) \ |
| /* Context maps */ \ |
| V(Map, native_context_map, NativeContextMap) \ |
| V(Map, module_context_map, ModuleContextMap) \ |
| V(Map, script_context_map, ScriptContextMap) \ |
| V(Map, block_context_map, BlockContextMap) \ |
| V(Map, catch_context_map, CatchContextMap) \ |
| V(Map, with_context_map, WithContextMap) \ |
| V(Map, debug_evaluate_context_map, DebugEvaluateContextMap) \ |
| V(Map, script_context_table_map, ScriptContextTableMap) \ |
| /* Maps */ \ |
| V(Map, fixed_double_array_map, FixedDoubleArrayMap) \ |
| V(Map, mutable_heap_number_map, MutableHeapNumberMap) \ |
| V(Map, ordered_hash_table_map, OrderedHashTableMap) \ |
| V(Map, sloppy_arguments_elements_map, SloppyArgumentsElementsMap) \ |
| V(Map, message_object_map, JSMessageObjectMap) \ |
| V(Map, neander_map, NeanderMap) \ |
| V(Map, external_map, ExternalMap) \ |
| V(Map, bytecode_array_map, BytecodeArrayMap) \ |
| /* String maps */ \ |
| V(Map, native_source_string_map, NativeSourceStringMap) \ |
| V(Map, string_map, StringMap) \ |
| V(Map, cons_one_byte_string_map, ConsOneByteStringMap) \ |
| V(Map, cons_string_map, ConsStringMap) \ |
| V(Map, sliced_string_map, SlicedStringMap) \ |
| V(Map, sliced_one_byte_string_map, SlicedOneByteStringMap) \ |
| V(Map, external_string_map, ExternalStringMap) \ |
| V(Map, external_string_with_one_byte_data_map, \ |
| ExternalStringWithOneByteDataMap) \ |
| V(Map, external_one_byte_string_map, ExternalOneByteStringMap) \ |
| V(Map, short_external_string_map, ShortExternalStringMap) \ |
| V(Map, short_external_string_with_one_byte_data_map, \ |
| ShortExternalStringWithOneByteDataMap) \ |
| V(Map, internalized_string_map, InternalizedStringMap) \ |
| V(Map, external_internalized_string_map, ExternalInternalizedStringMap) \ |
| V(Map, external_internalized_string_with_one_byte_data_map, \ |
| ExternalInternalizedStringWithOneByteDataMap) \ |
| V(Map, external_one_byte_internalized_string_map, \ |
| ExternalOneByteInternalizedStringMap) \ |
| V(Map, short_external_internalized_string_map, \ |
| ShortExternalInternalizedStringMap) \ |
| V(Map, short_external_internalized_string_with_one_byte_data_map, \ |
| ShortExternalInternalizedStringWithOneByteDataMap) \ |
| V(Map, short_external_one_byte_internalized_string_map, \ |
| ShortExternalOneByteInternalizedStringMap) \ |
| V(Map, short_external_one_byte_string_map, ShortExternalOneByteStringMap) \ |
| /* Array element maps */ \ |
| V(Map, fixed_uint8_array_map, FixedUint8ArrayMap) \ |
| V(Map, fixed_int8_array_map, FixedInt8ArrayMap) \ |
| V(Map, fixed_uint16_array_map, FixedUint16ArrayMap) \ |
| V(Map, fixed_int16_array_map, FixedInt16ArrayMap) \ |
| V(Map, fixed_uint32_array_map, FixedUint32ArrayMap) \ |
| V(Map, fixed_int32_array_map, FixedInt32ArrayMap) \ |
| V(Map, fixed_float32_array_map, FixedFloat32ArrayMap) \ |
| V(Map, fixed_float64_array_map, FixedFloat64ArrayMap) \ |
| V(Map, fixed_uint8_clamped_array_map, FixedUint8ClampedArrayMap) \ |
| V(Map, float32x4_map, Float32x4Map) \ |
| V(Map, int32x4_map, Int32x4Map) \ |
| V(Map, uint32x4_map, Uint32x4Map) \ |
| V(Map, bool32x4_map, Bool32x4Map) \ |
| V(Map, int16x8_map, Int16x8Map) \ |
| V(Map, uint16x8_map, Uint16x8Map) \ |
| V(Map, bool16x8_map, Bool16x8Map) \ |
| V(Map, int8x16_map, Int8x16Map) \ |
| V(Map, uint8x16_map, Uint8x16Map) \ |
| V(Map, bool8x16_map, Bool8x16Map) \ |
| /* Canonical empty values */ \ |
| V(ByteArray, empty_byte_array, EmptyByteArray) \ |
| V(FixedTypedArrayBase, empty_fixed_uint8_array, EmptyFixedUint8Array) \ |
| V(FixedTypedArrayBase, empty_fixed_int8_array, EmptyFixedInt8Array) \ |
| V(FixedTypedArrayBase, empty_fixed_uint16_array, EmptyFixedUint16Array) \ |
| V(FixedTypedArrayBase, empty_fixed_int16_array, EmptyFixedInt16Array) \ |
| V(FixedTypedArrayBase, empty_fixed_uint32_array, EmptyFixedUint32Array) \ |
| V(FixedTypedArrayBase, empty_fixed_int32_array, EmptyFixedInt32Array) \ |
| V(FixedTypedArrayBase, empty_fixed_float32_array, EmptyFixedFloat32Array) \ |
| V(FixedTypedArrayBase, empty_fixed_float64_array, EmptyFixedFloat64Array) \ |
| V(FixedTypedArrayBase, empty_fixed_uint8_clamped_array, \ |
| EmptyFixedUint8ClampedArray) \ |
| V(Script, empty_script, EmptyScript) \ |
| V(Cell, undefined_cell, UndefinedCell) \ |
| V(FixedArray, empty_sloppy_arguments_elements, EmptySloppyArgumentsElements) \ |
| V(SeededNumberDictionary, empty_slow_element_dictionary, \ |
| EmptySlowElementDictionary) \ |
| V(TypeFeedbackVector, dummy_vector, DummyVector) \ |
| V(PropertyCell, empty_property_cell, EmptyPropertyCell) \ |
| V(WeakCell, empty_weak_cell, EmptyWeakCell) \ |
| /* Protectors */ \ |
| V(PropertyCell, array_protector, ArrayProtector) \ |
| V(Cell, is_concat_spreadable_protector, IsConcatSpreadableProtector) \ |
| V(PropertyCell, has_instance_protector, HasInstanceProtector) \ |
| V(Cell, species_protector, SpeciesProtector) \ |
| /* Special numbers */ \ |
| V(HeapNumber, nan_value, NanValue) \ |
| V(HeapNumber, infinity_value, InfinityValue) \ |
| V(HeapNumber, minus_zero_value, MinusZeroValue) \ |
| V(HeapNumber, minus_infinity_value, MinusInfinityValue) \ |
| /* Caches */ \ |
| V(FixedArray, number_string_cache, NumberStringCache) \ |
| V(FixedArray, single_character_string_cache, SingleCharacterStringCache) \ |
| V(FixedArray, string_split_cache, StringSplitCache) \ |
| V(FixedArray, regexp_multiple_cache, RegExpMultipleCache) \ |
| V(Object, instanceof_cache_function, InstanceofCacheFunction) \ |
| V(Object, instanceof_cache_map, InstanceofCacheMap) \ |
| V(Object, instanceof_cache_answer, InstanceofCacheAnswer) \ |
| V(FixedArray, natives_source_cache, NativesSourceCache) \ |
| V(FixedArray, experimental_natives_source_cache, \ |
| ExperimentalNativesSourceCache) \ |
| V(FixedArray, extra_natives_source_cache, ExtraNativesSourceCache) \ |
| V(FixedArray, experimental_extra_natives_source_cache, \ |
| ExperimentalExtraNativesSourceCache) \ |
| /* Lists and dictionaries */ \ |
| V(NameDictionary, intrinsic_function_names, IntrinsicFunctionNames) \ |
| V(NameDictionary, empty_properties_dictionary, EmptyPropertiesDictionary) \ |
| V(Object, symbol_registry, SymbolRegistry) \ |
| V(Object, script_list, ScriptList) \ |
| V(UnseededNumberDictionary, code_stubs, CodeStubs) \ |
| V(FixedArray, materialized_objects, MaterializedObjects) \ |
| V(FixedArray, microtask_queue, MicrotaskQueue) \ |
| V(FixedArray, detached_contexts, DetachedContexts) \ |
| V(ArrayList, retained_maps, RetainedMaps) \ |
| V(WeakHashTable, weak_object_to_code_table, WeakObjectToCodeTable) \ |
| V(Object, weak_stack_trace_list, WeakStackTraceList) \ |
| V(Object, noscript_shared_function_infos, NoScriptSharedFunctionInfos) \ |
| V(FixedArray, serialized_templates, SerializedTemplates) \ |
| /* Configured values */ \ |
| V(JSObject, message_listeners, MessageListeners) \ |
| V(Code, js_entry_code, JsEntryCode) \ |
| V(Code, js_construct_entry_code, JsConstructEntryCode) \ |
| /* Oddball maps */ \ |
| V(Map, undefined_map, UndefinedMap) \ |
| V(Map, the_hole_map, TheHoleMap) \ |
| V(Map, null_map, NullMap) \ |
| V(Map, boolean_map, BooleanMap) \ |
| V(Map, uninitialized_map, UninitializedMap) \ |
| V(Map, arguments_marker_map, ArgumentsMarkerMap) \ |
| V(Map, no_interceptor_result_sentinel_map, NoInterceptorResultSentinelMap) \ |
| V(Map, exception_map, ExceptionMap) \ |
| V(Map, termination_exception_map, TerminationExceptionMap) \ |
| V(Map, optimized_out_map, OptimizedOutMap) \ |
| V(Map, stale_register_map, StaleRegisterMap) |
| |
| // Entries in this list are limited to Smis and are not visited during GC. |
| #define SMI_ROOT_LIST(V) \ |
| V(Smi, stack_limit, StackLimit) \ |
| V(Smi, real_stack_limit, RealStackLimit) \ |
| V(Smi, last_script_id, LastScriptId) \ |
| V(Smi, hash_seed, HashSeed) \ |
| /* To distinguish the function templates, so that we can find them in the */ \ |
| /* function cache of the native context. */ \ |
| V(Smi, next_template_serial_number, NextTemplateSerialNumber) \ |
| V(Smi, arguments_adaptor_deopt_pc_offset, ArgumentsAdaptorDeoptPCOffset) \ |
| V(Smi, construct_stub_deopt_pc_offset, ConstructStubDeoptPCOffset) \ |
| V(Smi, getter_stub_deopt_pc_offset, GetterStubDeoptPCOffset) \ |
| V(Smi, setter_stub_deopt_pc_offset, SetterStubDeoptPCOffset) \ |
| V(Smi, interpreter_entry_return_pc_offset, InterpreterEntryReturnPCOffset) |
| |
| #define ROOT_LIST(V) \ |
| STRONG_ROOT_LIST(V) \ |
| SMI_ROOT_LIST(V) \ |
| V(StringTable, string_table, StringTable) |
| |
| |
| // Heap roots that are known to be immortal immovable, for which we can safely |
| // skip write barriers. This list is not complete and has omissions. |
| #define IMMORTAL_IMMOVABLE_ROOT_LIST(V) \ |
| V(ByteArrayMap) \ |
| V(BytecodeArrayMap) \ |
| V(FreeSpaceMap) \ |
| V(OnePointerFillerMap) \ |
| V(TwoPointerFillerMap) \ |
| V(UndefinedValue) \ |
| V(TheHoleValue) \ |
| V(NullValue) \ |
| V(TrueValue) \ |
| V(FalseValue) \ |
| V(UninitializedValue) \ |
| V(CellMap) \ |
| V(GlobalPropertyCellMap) \ |
| V(SharedFunctionInfoMap) \ |
| V(MetaMap) \ |
| V(HeapNumberMap) \ |
| V(MutableHeapNumberMap) \ |
| V(Float32x4Map) \ |
| V(Int32x4Map) \ |
| V(Uint32x4Map) \ |
| V(Bool32x4Map) \ |
| V(Int16x8Map) \ |
| V(Uint16x8Map) \ |
| V(Bool16x8Map) \ |
| V(Int8x16Map) \ |
| V(Uint8x16Map) \ |
| V(Bool8x16Map) \ |
| V(NativeContextMap) \ |
| V(FixedArrayMap) \ |
| V(CodeMap) \ |
| V(ScopeInfoMap) \ |
| V(FixedCOWArrayMap) \ |
| V(FixedDoubleArrayMap) \ |
| V(WeakCellMap) \ |
| V(TransitionArrayMap) \ |
| V(NoInterceptorResultSentinel) \ |
| V(HashTableMap) \ |
| V(OrderedHashTableMap) \ |
| V(EmptyFixedArray) \ |
| V(EmptyByteArray) \ |
| V(EmptyDescriptorArray) \ |
| V(ArgumentsMarker) \ |
| V(SymbolMap) \ |
| V(SloppyArgumentsElementsMap) \ |
| V(FunctionContextMap) \ |
| V(CatchContextMap) \ |
| V(WithContextMap) \ |
| V(BlockContextMap) \ |
| V(ModuleContextMap) \ |
| V(ScriptContextMap) \ |
| V(UndefinedMap) \ |
| V(TheHoleMap) \ |
| V(NullMap) \ |
| V(BooleanMap) \ |
| V(UninitializedMap) \ |
| V(ArgumentsMarkerMap) \ |
| V(JSMessageObjectMap) \ |
| V(ForeignMap) \ |
| V(NeanderMap) \ |
| V(NanValue) \ |
| V(InfinityValue) \ |
| V(MinusZeroValue) \ |
| V(MinusInfinityValue) \ |
| V(EmptyWeakCell) \ |
| V(empty_string) \ |
| PRIVATE_SYMBOL_LIST(V) |
| |
| // Forward declarations. |
| class AllocationObserver; |
| class ArrayBufferTracker; |
| class GCIdleTimeAction; |
| class GCIdleTimeHandler; |
| class GCIdleTimeHeapState; |
| class GCTracer; |
| class HeapObjectsFilter; |
| class HeapStats; |
| class HistogramTimer; |
| class Isolate; |
| class MemoryReducer; |
| class ObjectStats; |
| class Scavenger; |
| class ScavengeJob; |
| class WeakObjectRetainer; |
| |
| enum PromotionMode { PROMOTE_MARKED, DEFAULT_PROMOTION }; |
| |
| typedef void (*ObjectSlotCallback)(HeapObject** from, HeapObject* to); |
| |
| // A queue of objects promoted during scavenge. Each object is accompanied |
| // by it's size to avoid dereferencing a map pointer for scanning. |
| // The last page in to-space is used for the promotion queue. On conflict |
| // during scavenge, the promotion queue is allocated externally and all |
| // entries are copied to the external queue. |
| class PromotionQueue { |
| public: |
| explicit PromotionQueue(Heap* heap) |
| : front_(NULL), |
| rear_(NULL), |
| limit_(NULL), |
| emergency_stack_(0), |
| heap_(heap) {} |
| |
| void Initialize(); |
| |
| void Destroy() { |
| DCHECK(is_empty()); |
| delete emergency_stack_; |
| emergency_stack_ = NULL; |
| } |
| |
| Page* GetHeadPage() { |
| return Page::FromAllocationAreaAddress(reinterpret_cast<Address>(rear_)); |
| } |
| |
| void SetNewLimit(Address limit) { |
| // If we are already using an emergency stack, we can ignore it. |
| if (emergency_stack_) return; |
| |
| // If the limit is not on the same page, we can ignore it. |
| if (Page::FromAllocationAreaAddress(limit) != GetHeadPage()) return; |
| |
| limit_ = reinterpret_cast<struct Entry*>(limit); |
| |
| if (limit_ <= rear_) { |
| return; |
| } |
| |
| RelocateQueueHead(); |
| } |
| |
| bool IsBelowPromotionQueue(Address to_space_top) { |
| // If an emergency stack is used, the to-space address cannot interfere |
| // with the promotion queue. |
| if (emergency_stack_) return true; |
| |
| // If the given to-space top pointer and the head of the promotion queue |
| // are not on the same page, then the to-space objects are below the |
| // promotion queue. |
| if (GetHeadPage() != Page::FromAddress(to_space_top)) { |
| return true; |
| } |
| // If the to space top pointer is smaller or equal than the promotion |
| // queue head, then the to-space objects are below the promotion queue. |
| return reinterpret_cast<struct Entry*>(to_space_top) <= rear_; |
| } |
| |
| bool is_empty() { |
| return (front_ == rear_) && |
| (emergency_stack_ == NULL || emergency_stack_->length() == 0); |
| } |
| |
| inline void insert(HeapObject* target, int32_t size, bool was_marked_black); |
| |
| void remove(HeapObject** target, int32_t* size, bool* was_marked_black) { |
| DCHECK(!is_empty()); |
| if (front_ == rear_) { |
| Entry e = emergency_stack_->RemoveLast(); |
| *target = e.obj_; |
| *size = e.size_; |
| *was_marked_black = e.was_marked_black_; |
| return; |
| } |
| |
| struct Entry* entry = reinterpret_cast<struct Entry*>(--front_); |
| *target = entry->obj_; |
| *size = entry->size_; |
| *was_marked_black = entry->was_marked_black_; |
| |
| // Assert no underflow. |
| SemiSpace::AssertValidRange(reinterpret_cast<Address>(rear_), |
| reinterpret_cast<Address>(front_)); |
| } |
| |
| private: |
| struct Entry { |
| Entry(HeapObject* obj, int32_t size, bool was_marked_black) |
| : obj_(obj), size_(size), was_marked_black_(was_marked_black) {} |
| |
| HeapObject* obj_; |
| int32_t size_ : 31; |
| bool was_marked_black_ : 1; |
| }; |
| |
| void RelocateQueueHead(); |
| |
| // The front of the queue is higher in the memory page chain than the rear. |
| struct Entry* front_; |
| struct Entry* rear_; |
| struct Entry* limit_; |
| |
| List<Entry>* emergency_stack_; |
| |
| Heap* heap_; |
| |
| DISALLOW_COPY_AND_ASSIGN(PromotionQueue); |
| }; |
| |
| |
| enum ArrayStorageAllocationMode { |
| DONT_INITIALIZE_ARRAY_ELEMENTS, |
| INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE |
| }; |
| |
| enum class ClearRecordedSlots { kYes, kNo }; |
| |
| class Heap { |
| public: |
| // Declare all the root indices. This defines the root list order. |
| enum RootListIndex { |
| #define ROOT_INDEX_DECLARATION(type, name, camel_name) k##camel_name##RootIndex, |
| STRONG_ROOT_LIST(ROOT_INDEX_DECLARATION) |
| #undef ROOT_INDEX_DECLARATION |
| |
| #define STRING_INDEX_DECLARATION(name, str) k##name##RootIndex, |
| INTERNALIZED_STRING_LIST(STRING_INDEX_DECLARATION) |
| #undef STRING_DECLARATION |
| |
| #define SYMBOL_INDEX_DECLARATION(name) k##name##RootIndex, |
| PRIVATE_SYMBOL_LIST(SYMBOL_INDEX_DECLARATION) |
| #undef SYMBOL_INDEX_DECLARATION |
| |
| #define SYMBOL_INDEX_DECLARATION(name, description) k##name##RootIndex, |
| PUBLIC_SYMBOL_LIST(SYMBOL_INDEX_DECLARATION) |
| WELL_KNOWN_SYMBOL_LIST(SYMBOL_INDEX_DECLARATION) |
| #undef SYMBOL_INDEX_DECLARATION |
| |
| // Utility type maps |
| #define DECLARE_STRUCT_MAP(NAME, Name, name) k##Name##MapRootIndex, |
| STRUCT_LIST(DECLARE_STRUCT_MAP) |
| #undef DECLARE_STRUCT_MAP |
| kStringTableRootIndex, |
| |
| #define ROOT_INDEX_DECLARATION(type, name, camel_name) k##camel_name##RootIndex, |
| SMI_ROOT_LIST(ROOT_INDEX_DECLARATION) |
| #undef ROOT_INDEX_DECLARATION |
| kRootListLength, |
| kStrongRootListLength = kStringTableRootIndex, |
| kSmiRootsStart = kStringTableRootIndex + 1 |
| }; |
| |
| enum FindMementoMode { kForRuntime, kForGC }; |
| |
| enum HeapState { NOT_IN_GC, SCAVENGE, MARK_COMPACT }; |
| |
| // Indicates whether live bytes adjustment is triggered |
| // - from within the GC code before sweeping started (SEQUENTIAL_TO_SWEEPER), |
| // - or from within GC (CONCURRENT_TO_SWEEPER), |
| // - or mutator code (CONCURRENT_TO_SWEEPER). |
| enum InvocationMode { SEQUENTIAL_TO_SWEEPER, CONCURRENT_TO_SWEEPER }; |
| |
| enum UpdateAllocationSiteMode { kGlobal, kCached }; |
| |
| // Taking this lock prevents the GC from entering a phase that relocates |
| // object references. |
| class RelocationLock { |
| public: |
| explicit RelocationLock(Heap* heap) : heap_(heap) { |
| heap_->relocation_mutex_.Lock(); |
| } |
| |
| ~RelocationLock() { heap_->relocation_mutex_.Unlock(); } |
| |
| private: |
| Heap* heap_; |
| }; |
| |
| // Support for partial snapshots. After calling this we have a linear |
| // space to write objects in each space. |
| struct Chunk { |
| uint32_t size; |
| Address start; |
| Address end; |
| }; |
| typedef List<Chunk> Reservation; |
| |
| static const intptr_t kMinimumOldGenerationAllocationLimit = |
| 8 * (Page::kPageSize > MB ? Page::kPageSize : MB); |
| |
| static const int kInitalOldGenerationLimitFactor = 2; |
| |
| #if V8_OS_ANDROID |
| // Don't apply pointer multiplier on Android since it has no swap space and |
| // should instead adapt it's heap size based on available physical memory. |
| static const int kPointerMultiplier = 1; |
| #else |
| static const int kPointerMultiplier = i::kPointerSize / 4; |
| #endif |
| |
| // The new space size has to be a power of 2. Sizes are in MB. |
| static const int kMaxSemiSpaceSizeLowMemoryDevice = 1 * kPointerMultiplier; |
| static const int kMaxSemiSpaceSizeMediumMemoryDevice = 4 * kPointerMultiplier; |
| static const int kMaxSemiSpaceSizeHighMemoryDevice = 8 * kPointerMultiplier; |
| static const int kMaxSemiSpaceSizeHugeMemoryDevice = 8 * kPointerMultiplier; |
| |
| // The old space size has to be a multiple of Page::kPageSize. |
| // Sizes are in MB. |
| static const int kMaxOldSpaceSizeLowMemoryDevice = 128 * kPointerMultiplier; |
| static const int kMaxOldSpaceSizeMediumMemoryDevice = |
| 256 * kPointerMultiplier; |
| static const int kMaxOldSpaceSizeHighMemoryDevice = 512 * kPointerMultiplier; |
| static const int kMaxOldSpaceSizeHugeMemoryDevice = 700 * kPointerMultiplier; |
| |
| // The executable size has to be a multiple of Page::kPageSize. |
| // Sizes are in MB. |
| static const int kMaxExecutableSizeLowMemoryDevice = 96 * kPointerMultiplier; |
| static const int kMaxExecutableSizeMediumMemoryDevice = |
| 192 * kPointerMultiplier; |
| static const int kMaxExecutableSizeHighMemoryDevice = |
| 256 * kPointerMultiplier; |
| static const int kMaxExecutableSizeHugeMemoryDevice = |
| 256 * kPointerMultiplier; |
| |
| static const int kTraceRingBufferSize = 512; |
| static const int kStacktraceBufferSize = 512; |
| |
| static const double kMinHeapGrowingFactor; |
| static const double kMaxHeapGrowingFactor; |
| static const double kMaxHeapGrowingFactorMemoryConstrained; |
| static const double kMaxHeapGrowingFactorIdle; |
| static const double kTargetMutatorUtilization; |
| |
| static const int kNoGCFlags = 0; |
| static const int kReduceMemoryFootprintMask = 1; |
| static const int kAbortIncrementalMarkingMask = 2; |
| static const int kFinalizeIncrementalMarkingMask = 4; |
| |
| // Making the heap iterable requires us to abort incremental marking. |
| static const int kMakeHeapIterableMask = kAbortIncrementalMarkingMask; |
| |
| // The roots that have an index less than this are always in old space. |
| static const int kOldSpaceRoots = 0x20; |
| |
| // The minimum size of a HeapObject on the heap. |
| static const int kMinObjectSizeInWords = 2; |
| |
| STATIC_ASSERT(kUndefinedValueRootIndex == |
| Internals::kUndefinedValueRootIndex); |
| STATIC_ASSERT(kTheHoleValueRootIndex == Internals::kTheHoleValueRootIndex); |
| STATIC_ASSERT(kNullValueRootIndex == Internals::kNullValueRootIndex); |
| STATIC_ASSERT(kTrueValueRootIndex == Internals::kTrueValueRootIndex); |
| STATIC_ASSERT(kFalseValueRootIndex == Internals::kFalseValueRootIndex); |
| STATIC_ASSERT(kempty_stringRootIndex == Internals::kEmptyStringRootIndex); |
| |
| // Calculates the maximum amount of filler that could be required by the |
| // given alignment. |
| static int GetMaximumFillToAlign(AllocationAlignment alignment); |
| // Calculates the actual amount of filler required for a given address at the |
| // given alignment. |
| static int GetFillToAlign(Address address, AllocationAlignment alignment); |
| |
| template <typename T> |
| static inline bool IsOneByte(T t, int chars); |
| |
| static void FatalProcessOutOfMemory(const char* location, |
| bool is_heap_oom = false); |
| |
| static bool RootIsImmortalImmovable(int root_index); |
| |
| // Checks whether the space is valid. |
| static bool IsValidAllocationSpace(AllocationSpace space); |
| |
| // Generated code can embed direct references to non-writable roots if |
| // they are in new space. |
| static bool RootCanBeWrittenAfterInitialization(RootListIndex root_index); |
| |
| // Zapping is needed for verify heap, and always done in debug builds. |
| static inline bool ShouldZapGarbage() { |
| #ifdef DEBUG |
| return true; |
| #else |
| #ifdef VERIFY_HEAP |
| return FLAG_verify_heap; |
| #else |
| return false; |
| #endif |
| #endif |
| } |
| |
| static double HeapGrowingFactor(double gc_speed, double mutator_speed); |
| |
| // Copy block of memory from src to dst. Size of block should be aligned |
| // by pointer size. |
| static inline void CopyBlock(Address dst, Address src, int byte_size); |
| |
| // Determines a static visitor id based on the given {map} that can then be |
| // stored on the map to facilitate fast dispatch for {StaticVisitorBase}. |
| static int GetStaticVisitorIdForMap(Map* map); |
| |
| // We cannot avoid stale handles to left-trimmed objects, but can only make |
| // sure all handles still needed are updated. Filter out a stale pointer |
| // and clear the slot to allow post processing of handles (needed because |
| // the sweeper might actually free the underlying page). |
| inline bool PurgeLeftTrimmedObject(Object** object); |
| |
| // Notifies the heap that is ok to start marking or other activities that |
| // should not happen during deserialization. |
| void NotifyDeserializationComplete(); |
| |
| intptr_t old_generation_allocation_limit() const { |
| return old_generation_allocation_limit_; |
| } |
| |
| bool always_allocate() { return always_allocate_scope_count_.Value() != 0; } |
| |
| Address* NewSpaceAllocationTopAddress() { |
| return new_space_.allocation_top_address(); |
| } |
| Address* NewSpaceAllocationLimitAddress() { |
| return new_space_.allocation_limit_address(); |
| } |
| |
| Address* OldSpaceAllocationTopAddress() { |
| return old_space_->allocation_top_address(); |
| } |
| Address* OldSpaceAllocationLimitAddress() { |
| return old_space_->allocation_limit_address(); |
| } |
| |
| bool CanExpandOldGeneration(int size) { |
| if (force_oom_) return false; |
| return (OldGenerationCapacity() + size) < MaxOldGenerationSize(); |
| } |
| |
| // Clear the Instanceof cache (used when a prototype changes). |
| inline void ClearInstanceofCache(); |
| |
| // FreeSpace objects have a null map after deserialization. Update the map. |
| void RepairFreeListsAfterDeserialization(); |
| |
| // Move len elements within a given array from src_index index to dst_index |
| // index. |
| void MoveElements(FixedArray* array, int dst_index, int src_index, int len); |
| |
| // Initialize a filler object to keep the ability to iterate over the heap |
| // when introducing gaps within pages. If slots could have been recorded in |
| // the freed area, then pass ClearRecordedSlots::kYes as the mode. Otherwise, |
| // pass ClearRecordedSlots::kNo. |
| void CreateFillerObjectAt(Address addr, int size, ClearRecordedSlots mode); |
| |
| bool CanMoveObjectStart(HeapObject* object); |
| |
| // Maintain consistency of live bytes during incremental marking. |
| void AdjustLiveBytes(HeapObject* object, int by, InvocationMode mode); |
| |
| // Trim the given array from the left. Note that this relocates the object |
| // start and hence is only valid if there is only a single reference to it. |
| FixedArrayBase* LeftTrimFixedArray(FixedArrayBase* obj, int elements_to_trim); |
| |
| // Trim the given array from the right. |
| template<Heap::InvocationMode mode> |
| void RightTrimFixedArray(FixedArrayBase* obj, int elements_to_trim); |
| |
| // Converts the given boolean condition to JavaScript boolean value. |
| inline Oddball* ToBoolean(bool condition); |
| |
| // Check whether the heap is currently iterable. |
| bool IsHeapIterable(); |
| |
| // Notify the heap that a context has been disposed. |
| int NotifyContextDisposed(bool dependant_context); |
| |
| void set_native_contexts_list(Object* object) { |
| native_contexts_list_ = object; |
| } |
| Object* native_contexts_list() const { return native_contexts_list_; } |
| |
| void set_allocation_sites_list(Object* object) { |
| allocation_sites_list_ = object; |
| } |
| Object* allocation_sites_list() { return allocation_sites_list_; } |
| |
| // Used in CreateAllocationSiteStub and the (de)serializer. |
| Object** allocation_sites_list_address() { return &allocation_sites_list_; } |
| |
| void set_encountered_weak_collections(Object* weak_collection) { |
| encountered_weak_collections_ = weak_collection; |
| } |
| Object* encountered_weak_collections() const { |
| return encountered_weak_collections_; |
| } |
| |
| void set_encountered_weak_cells(Object* weak_cell) { |
| encountered_weak_cells_ = weak_cell; |
| } |
| Object* encountered_weak_cells() const { return encountered_weak_cells_; } |
| |
| void set_encountered_transition_arrays(Object* transition_array) { |
| encountered_transition_arrays_ = transition_array; |
| } |
| Object* encountered_transition_arrays() const { |
| return encountered_transition_arrays_; |
| } |
| |
| // Number of mark-sweeps. |
| int ms_count() const { return ms_count_; } |
| |
| // Checks whether the given object is allowed to be migrated from it's |
| // current space into the given destination space. Used for debugging. |
| inline bool AllowedToBeMigrated(HeapObject* object, AllocationSpace dest); |
| |
| void CheckHandleCount(); |
| |
| // Number of "runtime allocations" done so far. |
| uint32_t allocations_count() { return allocations_count_; } |
| |
| // Print short heap statistics. |
| void PrintShortHeapStatistics(); |
| |
| inline HeapState gc_state() { return gc_state_; } |
| |
| inline bool IsInGCPostProcessing() { return gc_post_processing_depth_ > 0; } |
| |
| // If an object has an AllocationMemento trailing it, return it, otherwise |
| // return NULL; |
| template <FindMementoMode mode> |
| inline AllocationMemento* FindAllocationMemento(HeapObject* object); |
| |
| // Returns false if not able to reserve. |
| bool ReserveSpace(Reservation* reservations); |
| |
| void SetEmbedderHeapTracer(EmbedderHeapTracer* tracer); |
| |
| bool UsingEmbedderHeapTracer(); |
| |
| void TracePossibleWrapper(JSObject* js_object); |
| |
| void RegisterExternallyReferencedObject(Object** object); |
| |
| // |
| // Support for the API. |
| // |
| |
| void CreateApiObjects(); |
| |
| // Implements the corresponding V8 API function. |
| bool IdleNotification(double deadline_in_seconds); |
| bool IdleNotification(int idle_time_in_ms); |
| |
| void MemoryPressureNotification(MemoryPressureLevel level, |
| bool is_isolate_locked); |
| void CheckMemoryPressure(); |
| |
| double MonotonicallyIncreasingTimeInMs(); |
| |
| void RecordStats(HeapStats* stats, bool take_snapshot = false); |
| |
| // Check new space expansion criteria and expand semispaces if it was hit. |
| void CheckNewSpaceExpansionCriteria(); |
| |
| inline bool HeapIsFullEnoughToStartIncrementalMarking(intptr_t limit) { |
| if (FLAG_stress_compaction && (gc_count_ & 1) != 0) return true; |
| |
| intptr_t adjusted_allocation_limit = limit - new_space_.Capacity(); |
| |
| if (PromotedTotalSize() >= adjusted_allocation_limit) return true; |
| |
| if (HighMemoryPressure()) return true; |
| |
| return false; |
| } |
| |
| void VisitExternalResources(v8::ExternalResourceVisitor* visitor); |
| |
| // An object should be promoted if the object has survived a |
| // scavenge operation. |
| template <PromotionMode promotion_mode> |
| inline bool ShouldBePromoted(Address old_address, int object_size); |
| |
| inline PromotionMode CurrentPromotionMode(); |
| |
| void ClearNormalizedMapCaches(); |
| |
| void IncrementDeferredCount(v8::Isolate::UseCounterFeature feature); |
| |
| inline bool OldGenerationAllocationLimitReached(); |
| |
| // Completely clear the Instanceof cache (to stop it keeping objects alive |
| // around a GC). |
| inline void CompletelyClearInstanceofCache(); |
| |
| inline uint32_t HashSeed(); |
| |
| inline int NextScriptId(); |
| |
| inline void SetArgumentsAdaptorDeoptPCOffset(int pc_offset); |
| inline void SetConstructStubDeoptPCOffset(int pc_offset); |
| inline void SetGetterStubDeoptPCOffset(int pc_offset); |
| inline void SetSetterStubDeoptPCOffset(int pc_offset); |
| inline void SetInterpreterEntryReturnPCOffset(int pc_offset); |
| inline int GetNextTemplateSerialNumber(); |
| |
| inline void SetSerializedTemplates(FixedArray* templates); |
| |
| // For post mortem debugging. |
| void RememberUnmappedPage(Address page, bool compacted); |
| |
| // Global inline caching age: it is incremented on some GCs after context |
| // disposal. We use it to flush inline caches. |
| int global_ic_age() { return global_ic_age_; } |
| |
| void AgeInlineCaches() { |
| global_ic_age_ = (global_ic_age_ + 1) & SharedFunctionInfo::ICAgeBits::kMax; |
| } |
| |
| int64_t external_memory() { return external_memory_; } |
| void update_external_memory(int64_t delta) { external_memory_ += delta; } |
| |
| void update_external_memory_concurrently_freed(intptr_t freed) { |
| external_memory_concurrently_freed_.Increment(freed); |
| } |
| |
| void account_external_memory_concurrently_freed() { |
| external_memory_ -= external_memory_concurrently_freed_.Value(); |
| external_memory_concurrently_freed_.SetValue(0); |
| } |
| |
| void DeoptMarkedAllocationSites(); |
| |
| bool DeoptMaybeTenuredAllocationSites() { |
| return new_space_.IsAtMaximumCapacity() && maximum_size_scavenges_ == 0; |
| } |
| |
| void AddWeakObjectToCodeDependency(Handle<HeapObject> obj, |
| Handle<DependentCode> dep); |
| |
| DependentCode* LookupWeakObjectToCodeDependency(Handle<HeapObject> obj); |
| |
| void CompactWeakFixedArrays(); |
| |
| void AddRetainedMap(Handle<Map> map); |
| |
| // This event is triggered after successful allocation of a new object made |
| // by runtime. Allocations of target space for object evacuation do not |
| // trigger the event. In order to track ALL allocations one must turn off |
| // FLAG_inline_new and FLAG_use_allocation_folding. |
| inline void OnAllocationEvent(HeapObject* object, int size_in_bytes); |
| |
| // This event is triggered after object is moved to a new place. |
| inline void OnMoveEvent(HeapObject* target, HeapObject* source, |
| int size_in_bytes); |
| |
| bool deserialization_complete() const { return deserialization_complete_; } |
| |
| bool HasLowAllocationRate(); |
| bool HasHighFragmentation(); |
| bool HasHighFragmentation(intptr_t used, intptr_t committed); |
| |
| void SetOptimizeForLatency() { optimize_for_memory_usage_ = false; } |
| void SetOptimizeForMemoryUsage(); |
| bool ShouldOptimizeForMemoryUsage() { |
| return optimize_for_memory_usage_ || HighMemoryPressure(); |
| } |
| bool HighMemoryPressure() { |
| return memory_pressure_level_.Value() != MemoryPressureLevel::kNone; |
| } |
| |
| // =========================================================================== |
| // Initialization. =========================================================== |
| // =========================================================================== |
| |
| // Configure heap size in MB before setup. Return false if the heap has been |
| // set up already. |
| bool ConfigureHeap(int max_semi_space_size, int max_old_space_size, |
| int max_executable_size, size_t code_range_size); |
| bool ConfigureHeapDefault(); |
| |
| // Prepares the heap, setting up memory areas that are needed in the isolate |
| // without actually creating any objects. |
| bool SetUp(); |
| |
| // Bootstraps the object heap with the core set of objects required to run. |
| // Returns whether it succeeded. |
| bool CreateHeapObjects(); |
| |
| // Destroys all memory allocated by the heap. |
| void TearDown(); |
| |
| // Returns whether SetUp has been called. |
| bool HasBeenSetUp(); |
| |
| // =========================================================================== |
| // Getters for spaces. ======================================================= |
| // =========================================================================== |
| |
| Address NewSpaceTop() { return new_space_.top(); } |
| |
| NewSpace* new_space() { return &new_space_; } |
| OldSpace* old_space() { return old_space_; } |
| OldSpace* code_space() { return code_space_; } |
| MapSpace* map_space() { return map_space_; } |
| LargeObjectSpace* lo_space() { return lo_space_; } |
| |
| PagedSpace* paged_space(int idx) { |
| switch (idx) { |
| case OLD_SPACE: |
| return old_space(); |
| case MAP_SPACE: |
| return map_space(); |
| case CODE_SPACE: |
| return code_space(); |
| case NEW_SPACE: |
| case LO_SPACE: |
| UNREACHABLE(); |
| } |
| return NULL; |
| } |
| |
| Space* space(int idx) { |
| switch (idx) { |
| case NEW_SPACE: |
| return new_space(); |
| case LO_SPACE: |
| return lo_space(); |
| default: |
| return paged_space(idx); |
| } |
| } |
| |
| // Returns name of the space. |
| const char* GetSpaceName(int idx); |
| |
| // =========================================================================== |
| // Getters to other components. ============================================== |
| // =========================================================================== |
| |
| GCTracer* tracer() { return tracer_; } |
| |
| MemoryAllocator* memory_allocator() { return memory_allocator_; } |
| |
| PromotionQueue* promotion_queue() { return &promotion_queue_; } |
| |
| inline Isolate* isolate(); |
| |
| MarkCompactCollector* mark_compact_collector() { |
| return mark_compact_collector_; |
| } |
| |
| // =========================================================================== |
| // Root set access. ========================================================== |
| // =========================================================================== |
| |
| // Heap root getters. |
| #define ROOT_ACCESSOR(type, name, camel_name) inline type* name(); |
| ROOT_LIST(ROOT_ACCESSOR) |
| #undef ROOT_ACCESSOR |
| |
| // Utility type maps. |
| #define STRUCT_MAP_ACCESSOR(NAME, Name, name) inline Map* name##_map(); |
| STRUCT_LIST(STRUCT_MAP_ACCESSOR) |
| #undef STRUCT_MAP_ACCESSOR |
| |
| #define STRING_ACCESSOR(name, str) inline String* name(); |
| INTERNALIZED_STRING_LIST(STRING_ACCESSOR) |
| #undef STRING_ACCESSOR |
| |
| #define SYMBOL_ACCESSOR(name) inline Symbol* name(); |
| PRIVATE_SYMBOL_LIST(SYMBOL_ACCESSOR) |
| #undef SYMBOL_ACCESSOR |
| |
| #define SYMBOL_ACCESSOR(name, description) inline Symbol* name(); |
| PUBLIC_SYMBOL_LIST(SYMBOL_ACCESSOR) |
| WELL_KNOWN_SYMBOL_LIST(SYMBOL_ACCESSOR) |
| #undef SYMBOL_ACCESSOR |
| |
| Object* root(RootListIndex index) { return roots_[index]; } |
| Handle<Object> root_handle(RootListIndex index) { |
| return Handle<Object>(&roots_[index]); |
| } |
| |
| // Generated code can embed this address to get access to the roots. |
| Object** roots_array_start() { return roots_; } |
| |
| // Sets the stub_cache_ (only used when expanding the dictionary). |
| void SetRootCodeStubs(UnseededNumberDictionary* value) { |
| roots_[kCodeStubsRootIndex] = value; |
| } |
| |
| void SetRootMaterializedObjects(FixedArray* objects) { |
| roots_[kMaterializedObjectsRootIndex] = objects; |
| } |
| |
| void SetRootScriptList(Object* value) { |
| roots_[kScriptListRootIndex] = value; |
| } |
| |
| void SetRootStringTable(StringTable* value) { |
| roots_[kStringTableRootIndex] = value; |
| } |
| |
| void SetRootNoScriptSharedFunctionInfos(Object* value) { |
| roots_[kNoScriptSharedFunctionInfosRootIndex] = value; |
| } |
| |
| // Set the stack limit in the roots_ array. Some architectures generate |
| // code that looks here, because it is faster than loading from the static |
| // jslimit_/real_jslimit_ variable in the StackGuard. |
| void SetStackLimits(); |
| |
| // The stack limit is thread-dependent. To be able to reproduce the same |
| // snapshot blob, we need to reset it before serializing. |
| void ClearStackLimits(); |
| |
| // Generated code can treat direct references to this root as constant. |
| bool RootCanBeTreatedAsConstant(RootListIndex root_index); |
| |
| Map* MapForFixedTypedArray(ExternalArrayType array_type); |
| RootListIndex RootIndexForFixedTypedArray(ExternalArrayType array_type); |
| |
| RootListIndex RootIndexForEmptyFixedTypedArray(ElementsKind kind); |
| FixedTypedArrayBase* EmptyFixedTypedArrayForMap(Map* map); |
| |
| void RegisterStrongRoots(Object** start, Object** end); |
| void UnregisterStrongRoots(Object** start); |
| |
| // =========================================================================== |
| // Inline allocation. ======================================================== |
| // =========================================================================== |
| |
| // Indicates whether inline bump-pointer allocation has been disabled. |
| bool inline_allocation_disabled() { return inline_allocation_disabled_; } |
| |
| // Switch whether inline bump-pointer allocation should be used. |
| void EnableInlineAllocation(); |
| void DisableInlineAllocation(); |
| |
| // =========================================================================== |
| // Methods triggering GCs. =================================================== |
| // =========================================================================== |
| |
| // Performs garbage collection operation. |
| // Returns whether there is a chance that another major GC could |
| // collect more garbage. |
| inline bool CollectGarbage( |
| AllocationSpace space, const char* gc_reason = NULL, |
| const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags); |
| |
| // Performs a full garbage collection. If (flags & kMakeHeapIterableMask) is |
| // non-zero, then the slower precise sweeper is used, which leaves the heap |
| // in a state where we can iterate over the heap visiting all objects. |
| void CollectAllGarbage( |
| int flags = kFinalizeIncrementalMarkingMask, const char* gc_reason = NULL, |
| const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags); |
| |
| // Last hope GC, should try to squeeze as much as possible. |
| void CollectAllAvailableGarbage(const char* gc_reason = NULL); |
| |
| // Reports and external memory pressure event, either performs a major GC or |
| // completes incremental marking in order to free external resources. |
| void ReportExternalMemoryPressure(const char* gc_reason = NULL); |
| |
| // Invoked when GC was requested via the stack guard. |
| void HandleGCRequest(); |
| |
| // =========================================================================== |
| // Iterators. ================================================================ |
| // =========================================================================== |
| |
| // Iterates over all roots in the heap. |
| void IterateRoots(ObjectVisitor* v, VisitMode mode); |
| // Iterates over all strong roots in the heap. |
| void IterateStrongRoots(ObjectVisitor* v, VisitMode mode); |
| // Iterates over entries in the smi roots list. Only interesting to the |
| // serializer/deserializer, since GC does not care about smis. |
| void IterateSmiRoots(ObjectVisitor* v); |
| // Iterates over all the other roots in the heap. |
| void IterateWeakRoots(ObjectVisitor* v, VisitMode mode); |
| |
| // Iterate pointers of promoted objects. |
| void IteratePromotedObject(HeapObject* target, int size, |
| bool was_marked_black, |
| ObjectSlotCallback callback); |
| |
| void IteratePromotedObjectPointers(HeapObject* object, Address start, |
| Address end, bool record_slots, |
| ObjectSlotCallback callback); |
| |
| // =========================================================================== |
| // Store buffer API. ========================================================= |
| // =========================================================================== |
| |
| // Write barrier support for object[offset] = o; |
| inline void RecordWrite(Object* object, int offset, Object* o); |
| inline void RecordFixedArrayElements(FixedArray* array, int offset, |
| int length); |
| |
| Address* store_buffer_top_address() { return store_buffer()->top_address(); } |
| |
| void ClearRecordedSlot(HeapObject* object, Object** slot); |
| void ClearRecordedSlotRange(Address start, Address end); |
| |
| // =========================================================================== |
| // Incremental marking API. ================================================== |
| // =========================================================================== |
| |
| // Start incremental marking and ensure that idle time handler can perform |
| // incremental steps. |
| void StartIdleIncrementalMarking(); |
| |
| // Starts incremental marking assuming incremental marking is currently |
| // stopped. |
| void StartIncrementalMarking(int gc_flags = kNoGCFlags, |
| const GCCallbackFlags gc_callback_flags = |
| GCCallbackFlags::kNoGCCallbackFlags, |
| const char* reason = nullptr); |
| |
| void FinalizeIncrementalMarkingIfComplete(const char* comment); |
| |
| bool TryFinalizeIdleIncrementalMarking(double idle_time_in_ms); |
| |
| void RegisterReservationsForBlackAllocation(Reservation* reservations); |
| |
| IncrementalMarking* incremental_marking() { return incremental_marking_; } |
| |
| // =========================================================================== |
| // External string table API. ================================================ |
| // =========================================================================== |
| |
| // Registers an external string. |
| inline void RegisterExternalString(String* string); |
| |
| // Finalizes an external string by deleting the associated external |
| // data and clearing the resource pointer. |
| inline void FinalizeExternalString(String* string); |
| |
| // =========================================================================== |
| // Methods checking/returning the space of a given object/address. =========== |
| // =========================================================================== |
| |
| // Returns whether the object resides in new space. |
| inline bool InNewSpace(Object* object); |
| inline bool InFromSpace(Object* object); |
| inline bool InToSpace(Object* object); |
| |
| // Returns whether the object resides in old space. |
| inline bool InOldSpace(Object* object); |
| |
| // Checks whether an address/object in the heap (including auxiliary |
| // area and unused area). |
| bool Contains(HeapObject* value); |
| |
| // Checks whether an address/object in a space. |
| // Currently used by tests, serialization and heap verification only. |
| bool InSpace(HeapObject* value, AllocationSpace space); |
| |
| // Slow methods that can be used for verification as they can also be used |
| // with off-heap Addresses. |
| bool ContainsSlow(Address addr); |
| bool InSpaceSlow(Address addr, AllocationSpace space); |
| inline bool InNewSpaceSlow(Address address); |
| inline bool InOldSpaceSlow(Address address); |
| |
| // =========================================================================== |
| // Object statistics tracking. =============================================== |
| // =========================================================================== |
| |
| // Returns the number of buckets used by object statistics tracking during a |
| // major GC. Note that the following methods fail gracefully when the bounds |
| // are exceeded though. |
| size_t NumberOfTrackedHeapObjectTypes(); |
| |
| // Returns object statistics about count and size at the last major GC. |
| // Objects are being grouped into buckets that roughly resemble existing |
| // instance types. |
| size_t ObjectCountAtLastGC(size_t index); |
| size_t ObjectSizeAtLastGC(size_t index); |
| |
| // Retrieves names of buckets used by object statistics tracking. |
| bool GetObjectTypeName(size_t index, const char** object_type, |
| const char** object_sub_type); |
| |
| // =========================================================================== |
| // Code statistics. ========================================================== |
| // =========================================================================== |
| |
| // Collect code (Code and BytecodeArray objects) statistics. |
| void CollectCodeStatistics(); |
| |
| // =========================================================================== |
| // GC statistics. ============================================================ |
| // =========================================================================== |
| |
| // Returns the maximum amount of memory reserved for the heap. |
| intptr_t MaxReserved() { |
| return 2 * max_semi_space_size_ + max_old_generation_size_; |
| } |
| int MaxSemiSpaceSize() { return max_semi_space_size_; } |
| int InitialSemiSpaceSize() { return initial_semispace_size_; } |
| intptr_t MaxOldGenerationSize() { return max_old_generation_size_; } |
| intptr_t MaxExecutableSize() { return max_executable_size_; } |
| |
| // Returns the capacity of the heap in bytes w/o growing. Heap grows when |
| // more spaces are needed until it reaches the limit. |
| intptr_t Capacity(); |
| |
| // Returns the capacity of the old generation. |
| intptr_t OldGenerationCapacity(); |
| |
| // Returns the amount of memory currently committed for the heap. |
| intptr_t CommittedMemory(); |
| |
| // Returns the amount of memory currently committed for the old space. |
| intptr_t CommittedOldGenerationMemory(); |
| |
| // Returns the amount of executable memory currently committed for the heap. |
| intptr_t CommittedMemoryExecutable(); |
| |
| // Returns the amount of phyical memory currently committed for the heap. |
| size_t CommittedPhysicalMemory(); |
| |
| // Returns the maximum amount of memory ever committed for the heap. |
| intptr_t MaximumCommittedMemory() { return maximum_committed_; } |
| |
| // Updates the maximum committed memory for the heap. Should be called |
| // whenever a space grows. |
| void UpdateMaximumCommitted(); |
| |
| // Returns the available bytes in space w/o growing. |
| // Heap doesn't guarantee that it can allocate an object that requires |
| // all available bytes. Check MaxHeapObjectSize() instead. |
| intptr_t Available(); |
| |
| // Returns of size of all objects residing in the heap. |
| intptr_t SizeOfObjects(); |
| |
| void UpdateSurvivalStatistics(int start_new_space_size); |
| |
| inline void IncrementPromotedObjectsSize(intptr_t object_size) { |
| DCHECK_GE(object_size, 0); |
| promoted_objects_size_ += object_size; |
| } |
| inline intptr_t promoted_objects_size() { return promoted_objects_size_; } |
| |
| inline void IncrementSemiSpaceCopiedObjectSize(intptr_t object_size) { |
| DCHECK_GE(object_size, 0); |
| semi_space_copied_object_size_ += object_size; |
| } |
| inline intptr_t semi_space_copied_object_size() { |
| return semi_space_copied_object_size_; |
| } |
| |
| inline intptr_t SurvivedNewSpaceObjectSize() { |
| return promoted_objects_size_ + semi_space_copied_object_size_; |
| } |
| |
| inline void IncrementNodesDiedInNewSpace() { nodes_died_in_new_space_++; } |
| |
| inline void IncrementNodesCopiedInNewSpace() { nodes_copied_in_new_space_++; } |
| |
| inline void IncrementNodesPromoted() { nodes_promoted_++; } |
| |
| inline void IncrementYoungSurvivorsCounter(intptr_t survived) { |
| DCHECK_GE(survived, 0); |
| survived_last_scavenge_ = survived; |
| survived_since_last_expansion_ += survived; |
| } |
| |
| inline intptr_t PromotedTotalSize() { |
| int64_t total = PromotedSpaceSizeOfObjects() + PromotedExternalMemorySize(); |
| if (total > std::numeric_limits<intptr_t>::max()) { |
| // TODO(erikcorry): Use uintptr_t everywhere we do heap size calculations. |
| return std::numeric_limits<intptr_t>::max(); |
| } |
| if (total < 0) return 0; |
| return static_cast<intptr_t>(total); |
| } |
| |
| void UpdateNewSpaceAllocationCounter() { |
| new_space_allocation_counter_ = NewSpaceAllocationCounter(); |
| } |
| |
| size_t NewSpaceAllocationCounter() { |
| return new_space_allocation_counter_ + new_space()->AllocatedSinceLastGC(); |
| } |
| |
| // This should be used only for testing. |
| void set_new_space_allocation_counter(size_t new_value) { |
| new_space_allocation_counter_ = new_value; |
| } |
| |
| void UpdateOldGenerationAllocationCounter() { |
| old_generation_allocation_counter_ = OldGenerationAllocationCounter(); |
| } |
| |
| size_t OldGenerationAllocationCounter() { |
| return old_generation_allocation_counter_ + PromotedSinceLastGC(); |
| } |
| |
| // This should be used only for testing. |
| void set_old_generation_allocation_counter(size_t new_value) { |
| old_generation_allocation_counter_ = new_value; |
| } |
| |
| size_t PromotedSinceLastGC() { |
| return PromotedSpaceSizeOfObjects() - old_generation_size_at_last_gc_; |
| } |
| |
| int gc_count() const { return gc_count_; } |
| |
| // Returns the size of objects residing in non new spaces. |
| intptr_t PromotedSpaceSizeOfObjects(); |
| |
| double total_regexp_code_generated() { return total_regexp_code_generated_; } |
| void IncreaseTotalRegexpCodeGenerated(int size) { |
| total_regexp_code_generated_ += size; |
| } |
| |
| void IncrementCodeGeneratedBytes(bool is_crankshafted, int size) { |
| if (is_crankshafted) { |
| crankshaft_codegen_bytes_generated_ += size; |
| } else { |
| full_codegen_bytes_generated_ += size; |
| } |
| } |
| |
| // =========================================================================== |
| // Prologue/epilogue callback methods.======================================== |
| // =========================================================================== |
| |
| void AddGCPrologueCallback(v8::Isolate::GCCallback callback, |
| GCType gc_type_filter, bool pass_isolate = true); |
| void RemoveGCPrologueCallback(v8::Isolate::GCCallback callback); |
| |
| void AddGCEpilogueCallback(v8::Isolate::GCCallback callback, |
| GCType gc_type_filter, bool pass_isolate = true); |
| void RemoveGCEpilogueCallback(v8::Isolate::GCCallback callback); |
| |
| void CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags); |
| void CallGCEpilogueCallbacks(GCType gc_type, GCCallbackFlags flags); |
| |
| // =========================================================================== |
| // Allocation methods. ======================================================= |
| // =========================================================================== |
| |
| // Creates a filler object and returns a heap object immediately after it. |
| MUST_USE_RESULT HeapObject* PrecedeWithFiller(HeapObject* object, |
| int filler_size); |
| |
| // Creates a filler object if needed for alignment and returns a heap object |
| // immediately after it. If any space is left after the returned object, |
| // another filler object is created so the over allocated memory is iterable. |
| MUST_USE_RESULT HeapObject* AlignWithFiller(HeapObject* object, |
| int object_size, |
| int allocation_size, |
| AllocationAlignment alignment); |
| |
| // =========================================================================== |
| // ArrayBuffer tracking. ===================================================== |
| // =========================================================================== |
| |
| void RegisterNewArrayBuffer(JSArrayBuffer* buffer); |
| void UnregisterArrayBuffer(JSArrayBuffer* buffer); |
| |
| // =========================================================================== |
| // Allocation site tracking. ================================================= |
| // =========================================================================== |
| |
| // Updates the AllocationSite of a given {object}. If the global prenuring |
| // storage is passed as {pretenuring_feedback} the memento found count on |
| // the corresponding allocation site is immediately updated and an entry |
| // in the hash map is created. Otherwise the entry (including a the count |
| // value) is cached on the local pretenuring feedback. |
| template <UpdateAllocationSiteMode mode> |
| inline void UpdateAllocationSite(HeapObject* object, |
| base::HashMap* pretenuring_feedback); |
| |
| // Removes an entry from the global pretenuring storage. |
| inline void RemoveAllocationSitePretenuringFeedback(AllocationSite* site); |
| |
| // Merges local pretenuring feedback into the global one. Note that this |
| // method needs to be called after evacuation, as allocation sites may be |
| // evacuated and this method resolves forward pointers accordingly. |
| void MergeAllocationSitePretenuringFeedback( |
| const base::HashMap& local_pretenuring_feedback); |
| |
| // ============================================================================= |
| |
| #ifdef VERIFY_HEAP |
| // Verify the heap is in its normal state before or after a GC. |
| void Verify(); |
| #endif |
| |
| #ifdef DEBUG |
| void set_allocation_timeout(int timeout) { allocation_timeout_ = timeout; } |
| |
| void TracePathToObjectFrom(Object* target, Object* root); |
| void TracePathToObject(Object* target); |
| void TracePathToGlobal(); |
| |
| void Print(); |
| void PrintHandles(); |
| |
| // Report heap statistics. |
| void ReportHeapStatistics(const char* title); |
| void ReportCodeStatistics(const char* title); |
| #endif |
| |
| private: |
| class PretenuringScope; |
| |
| // External strings table is a place where all external strings are |
| // registered. We need to keep track of such strings to properly |
| // finalize them. |
| class ExternalStringTable { |
| public: |
| // Registers an external string. |
| inline void AddString(String* string); |
| |
| inline void Iterate(ObjectVisitor* v); |
| |
| // Restores internal invariant and gets rid of collected strings. |
| // Must be called after each Iterate() that modified the strings. |
| void CleanUp(); |
| |
| // Destroys all allocated memory. |
| void TearDown(); |
| |
| private: |
| explicit ExternalStringTable(Heap* heap) : heap_(heap) {} |
| |
| inline void Verify(); |
| |
| inline void AddOldString(String* string); |
| |
| // Notifies the table that only a prefix of the new list is valid. |
| inline void ShrinkNewStrings(int position); |
| |
| // To speed up scavenge collections new space string are kept |
| // separate from old space strings. |
| List<Object*> new_space_strings_; |
| List<Object*> old_space_strings_; |
| |
| Heap* heap_; |
| |
| friend class Heap; |
| |
| DISALLOW_COPY_AND_ASSIGN(ExternalStringTable); |
| }; |
| |
| struct StrongRootsList; |
| |
| struct StringTypeTable { |
| InstanceType type; |
| int size; |
| RootListIndex index; |
| }; |
| |
| struct ConstantStringTable { |
| const char* contents; |
| RootListIndex index; |
| }; |
| |
| struct StructTable { |
| InstanceType type; |
| int size; |
| RootListIndex index; |
| }; |
| |
| struct GCCallbackPair { |
| GCCallbackPair(v8::Isolate::GCCallback callback, GCType gc_type, |
| bool pass_isolate) |
| : callback(callback), gc_type(gc_type), pass_isolate(pass_isolate) {} |
| |
| bool operator==(const GCCallbackPair& other) const { |
| return other.callback == callback; |
| } |
| |
| v8::Isolate::GCCallback callback; |
| GCType gc_type; |
| bool pass_isolate; |
| }; |
| |
| typedef String* (*ExternalStringTableUpdaterCallback)(Heap* heap, |
| Object** pointer); |
| |
| static const int kInitialStringTableSize = 2048; |
| static const int kInitialEvalCacheSize = 64; |
| static const int kInitialNumberStringCacheSize = 256; |
| |
| static const int kRememberedUnmappedPages = 128; |
| |
| static const StringTypeTable string_type_table[]; |
| static const ConstantStringTable constant_string_table[]; |
| static const StructTable struct_table[]; |
| |
| static const int kYoungSurvivalRateHighThreshold = 90; |
| static const int kYoungSurvivalRateAllowedDeviation = 15; |
| static const int kOldSurvivalRateLowThreshold = 10; |
| |
| static const int kMaxMarkCompactsInIdleRound = 7; |
| static const int kIdleScavengeThreshold = 5; |
| |
| static const int kInitialFeedbackCapacity = 256; |
| |
| Heap(); |
| |
| static String* UpdateNewSpaceReferenceInExternalStringTableEntry( |
| Heap* heap, Object** pointer); |
| |
| // Selects the proper allocation space based on the pretenuring decision. |
| static AllocationSpace SelectSpace(PretenureFlag pretenure) { |
| return (pretenure == TENURED) ? OLD_SPACE : NEW_SPACE; |
| } |
| |
| #define ROOT_ACCESSOR(type, name, camel_name) \ |
| inline void set_##name(type* value); |
| ROOT_LIST(ROOT_ACCESSOR) |
| #undef ROOT_ACCESSOR |
| |
| StoreBuffer* store_buffer() { return &store_buffer_; } |
| |
| void set_current_gc_flags(int flags) { |
| current_gc_flags_ = flags; |
| DCHECK(!ShouldFinalizeIncrementalMarking() || |
| !ShouldAbortIncrementalMarking()); |
| } |
| |
| inline bool ShouldReduceMemory() const { |
| return current_gc_flags_ & kReduceMemoryFootprintMask; |
| } |
| |
| inline bool ShouldAbortIncrementalMarking() const { |
| return current_gc_flags_ & kAbortIncrementalMarkingMask; |
| } |
| |
| inline bool ShouldFinalizeIncrementalMarking() const { |
| return current_gc_flags_ & kFinalizeIncrementalMarkingMask; |
| } |
| |
| void PreprocessStackTraces(); |
| |
| // Checks whether a global GC is necessary |
| GarbageCollector SelectGarbageCollector(AllocationSpace space, |
| const char** reason); |
| |
| // Make sure there is a filler value behind the top of the new space |
| // so that the GC does not confuse some unintialized/stale memory |
| // with the allocation memento of the object at the top |
| void EnsureFillerObjectAtTop(); |
| |
| // Ensure that we have swept all spaces in such a way that we can iterate |
| // over all objects. May cause a GC. |
| void MakeHeapIterable(); |
| |
| // Performs garbage collection operation. |
| // Returns whether there is a chance that another major GC could |
| // collect more garbage. |
| bool CollectGarbage( |
| GarbageCollector collector, const char* gc_reason, |
| const char* collector_reason, |
| const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags); |
| |
| // Performs garbage collection |
| // Returns whether there is a chance another major GC could |
| // collect more garbage. |
| bool PerformGarbageCollection( |
| GarbageCollector collector, |
| const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags); |
| |
| inline void UpdateOldSpaceLimits(); |
| |
| // Initializes a JSObject based on its map. |
| void InitializeJSObjectFromMap(JSObject* obj, FixedArray* properties, |
| Map* map); |
| |
| // Initializes JSObject body starting at given offset. |
| void InitializeJSObjectBody(JSObject* obj, Map* map, int start_offset); |
| |
| void InitializeAllocationMemento(AllocationMemento* memento, |
| AllocationSite* allocation_site); |
| |
| bool CreateInitialMaps(); |
| void CreateInitialObjects(); |
| |
| // These five Create*EntryStub functions are here and forced to not be inlined |
| // because of a gcc-4.4 bug that assigns wrong vtable entries. |
| NO_INLINE(void CreateJSEntryStub()); |
| NO_INLINE(void CreateJSConstructEntryStub()); |
| |
| void CreateFixedStubs(); |
| |
| HeapObject* DoubleAlignForDeserialization(HeapObject* object, int size); |
| |
| // Commits from space if it is uncommitted. |
| void EnsureFromSpaceIsCommitted(); |
| |
| // Uncommit unused semi space. |
| bool UncommitFromSpace() { return new_space_.UncommitFromSpace(); } |
| |
| // Fill in bogus values in from space |
| void ZapFromSpace(); |
| |
| // Deopts all code that contains allocation instruction which are tenured or |
| // not tenured. Moreover it clears the pretenuring allocation site statistics. |
| void ResetAllAllocationSitesDependentCode(PretenureFlag flag); |
| |
| // Evaluates local pretenuring for the old space and calls |
| // ResetAllTenuredAllocationSitesDependentCode if too many objects died in |
| // the old space. |
| void EvaluateOldSpaceLocalPretenuring(uint64_t size_of_objects_before_gc); |
| |
| // Record statistics before and after garbage collection. |
| void ReportStatisticsBeforeGC(); |
| void ReportStatisticsAfterGC(); |
| |
| // Creates and installs the full-sized number string cache. |
| int FullSizeNumberStringCacheLength(); |
| // Flush the number to string cache. |
| void FlushNumberStringCache(); |
| |
| // TODO(hpayer): Allocation site pretenuring may make this method obsolete. |
| // Re-visit incremental marking heuristics. |
| bool IsHighSurvivalRate() { return high_survival_rate_period_length_ > 0; } |
| |
| void ConfigureInitialOldGenerationSize(); |
| |
| bool HasLowYoungGenerationAllocationRate(); |
| bool HasLowOldGenerationAllocationRate(); |
| double YoungGenerationMutatorUtilization(); |
| double OldGenerationMutatorUtilization(); |
| |
| void ReduceNewSpaceSize(); |
| |
| bool TryFinalizeIdleIncrementalMarking( |
| double idle_time_in_ms, size_t size_of_objects, |
| size_t mark_compact_speed_in_bytes_per_ms); |
| |
| GCIdleTimeHeapState ComputeHeapState(); |
| |
| bool PerformIdleTimeAction(GCIdleTimeAction action, |
| GCIdleTimeHeapState heap_state, |
| double deadline_in_ms); |
| |
| void IdleNotificationEpilogue(GCIdleTimeAction action, |
| GCIdleTimeHeapState heap_state, double start_ms, |
| double deadline_in_ms); |
| |
| inline void UpdateAllocationsHash(HeapObject* object); |
| inline void UpdateAllocationsHash(uint32_t value); |
| void PrintAlloctionsHash(); |
| |
| void AddToRingBuffer(const char* string); |
| void GetFromRingBuffer(char* buffer); |
| |
| void CompactRetainedMaps(ArrayList* retained_maps); |
| |
| void CollectGarbageOnMemoryPressure(const char* source); |
| |
| // Attempt to over-approximate the weak closure by marking object groups and |
| // implicit references from global handles, but don't atomically complete |
| // marking. If we continue to mark incrementally, we might have marked |
| // objects that die later. |
| void FinalizeIncrementalMarking(const char* gc_reason); |
| |
| // Returns the timer used for a given GC type. |
| // - GCScavenger: young generation GC |
| // - GCCompactor: full GC |
| // - GCFinalzeMC: finalization of incremental full GC |
| // - GCFinalizeMCReduceMemory: finalization of incremental full GC with |
| // memory reduction |
| HistogramTimer* GCTypeTimer(GarbageCollector collector); |
| |
| // =========================================================================== |
| // Pretenuring. ============================================================== |
| // =========================================================================== |
| |
| // Pretenuring decisions are made based on feedback collected during new space |
| // evacuation. Note that between feedback collection and calling this method |
| // object in old space must not move. |
| void ProcessPretenuringFeedback(); |
| |
| // =========================================================================== |
| // Actual GC. ================================================================ |
| // =========================================================================== |
| |
| // Code that should be run before and after each GC. Includes some |
| // reporting/verification activities when compiled with DEBUG set. |
| void GarbageCollectionPrologue(); |
| void GarbageCollectionEpilogue(); |
| |
| // Performs a major collection in the whole heap. |
| void MarkCompact(); |
| |
| // Code to be run before and after mark-compact. |
| void MarkCompactPrologue(); |
| void MarkCompactEpilogue(); |
| |
| // Performs a minor collection in new generation. |
| void Scavenge(); |
| |
| Address DoScavenge(ObjectVisitor* scavenge_visitor, Address new_space_front, |
| PromotionMode promotion_mode); |
| |
| void UpdateNewSpaceReferencesInExternalStringTable( |
| ExternalStringTableUpdaterCallback updater_func); |
| |
| void UpdateReferencesInExternalStringTable( |
| ExternalStringTableUpdaterCallback updater_func); |
| |
| void ProcessAllWeakReferences(WeakObjectRetainer* retainer); |
| void ProcessYoungWeakReferences(WeakObjectRetainer* retainer); |
| void ProcessNativeContexts(WeakObjectRetainer* retainer); |
| void ProcessAllocationSites(WeakObjectRetainer* retainer); |
| void ProcessWeakListRoots(WeakObjectRetainer* retainer); |
| |
| // =========================================================================== |
| // GC statistics. ============================================================ |
| // =========================================================================== |
| |
| inline intptr_t OldGenerationSpaceAvailable() { |
| return old_generation_allocation_limit_ - PromotedTotalSize(); |
| } |
| |
| // Returns maximum GC pause. |
| double get_max_gc_pause() { return max_gc_pause_; } |
| |
| // Returns maximum size of objects alive after GC. |
| intptr_t get_max_alive_after_gc() { return max_alive_after_gc_; } |
| |
| // Returns minimal interval between two subsequent collections. |
| double get_min_in_mutator() { return min_in_mutator_; } |
| |
| // Update GC statistics that are tracked on the Heap. |
| void UpdateCumulativeGCStatistics(double duration, double spent_in_mutator, |
| double marking_time); |
| |
| bool MaximumSizeScavenge() { return maximum_size_scavenges_ > 0; } |
| |
| // =========================================================================== |
| // Growing strategy. ========================================================= |
| // =========================================================================== |
| |
| // Decrease the allocation limit if the new limit based on the given |
| // parameters is lower than the current limit. |
| void DampenOldGenerationAllocationLimit(intptr_t old_gen_size, |
| double gc_speed, |
| double mutator_speed); |
| |
| |
| // Calculates the allocation limit based on a given growing factor and a |
| // given old generation size. |
| intptr_t CalculateOldGenerationAllocationLimit(double factor, |
| intptr_t old_gen_size); |
| |
| // Sets the allocation limit to trigger the next full garbage collection. |
| void SetOldGenerationAllocationLimit(intptr_t old_gen_size, double gc_speed, |
| double mutator_speed); |
| |
| // =========================================================================== |
| // Idle notification. ======================================================== |
| // =========================================================================== |
| |
| bool RecentIdleNotificationHappened(); |
| void ScheduleIdleScavengeIfNeeded(int bytes_allocated); |
| |
| // =========================================================================== |
| // HeapIterator helpers. ===================================================== |
| // =========================================================================== |
| |
| void heap_iterator_start() { heap_iterator_depth_++; } |
| |
| void heap_iterator_end() { heap_iterator_depth_--; } |
| |
| bool in_heap_iterator() { return heap_iterator_depth_ > 0; } |
| |
| // =========================================================================== |
| // Allocation methods. ======================================================= |
| // =========================================================================== |
| |
| // Returns a deep copy of the JavaScript object. |
| // Properties and elements are copied too. |
| // Optionally takes an AllocationSite to be appended in an AllocationMemento. |
| MUST_USE_RESULT AllocationResult CopyJSObject(JSObject* source, |
| AllocationSite* site = NULL); |
| |
| // Allocates a JS Map in the heap. |
| MUST_USE_RESULT AllocationResult |
| AllocateMap(InstanceType instance_type, int instance_size, |
| ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND); |
| |
| // Allocates and initializes a new JavaScript object based on a |
| // constructor. |
| // If allocation_site is non-null, then a memento is emitted after the object |
| // that points to the site. |
| MUST_USE_RESULT AllocationResult AllocateJSObject( |
| JSFunction* constructor, PretenureFlag pretenure = NOT_TENURED, |
| AllocationSite* allocation_site = NULL); |
| |
| // Allocates and initializes a new JavaScript object based on a map. |
| // Passing an allocation site means that a memento will be created that |
| // points to the site. |
| MUST_USE_RESULT AllocationResult |
| AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure = NOT_TENURED, |
| AllocationSite* allocation_site = NULL); |
| |
| // Allocates a HeapNumber from value. |
| MUST_USE_RESULT AllocationResult |
| AllocateHeapNumber(double value, MutableMode mode = IMMUTABLE, |
| PretenureFlag pretenure = NOT_TENURED); |
| |
| // Allocates SIMD values from the given lane values. |
| #define SIMD_ALLOCATE_DECLARATION(TYPE, Type, type, lane_count, lane_type) \ |
| AllocationResult Allocate##Type(lane_type lanes[lane_count], \ |
| PretenureFlag pretenure = NOT_TENURED); |
| SIMD128_TYPES(SIMD_ALLOCATE_DECLARATION) |
| #undef SIMD_ALLOCATE_DECLARATION |
| |
| // Allocates a byte array of the specified length |
| MUST_USE_RESULT AllocationResult |
| AllocateByteArray(int length, PretenureFlag pretenure = NOT_TENURED); |
| |
| // Allocates a bytecode array with given contents. |
| MUST_USE_RESULT AllocationResult |
| AllocateBytecodeArray(int length, const byte* raw_bytecodes, int frame_size, |
| int parameter_count, FixedArray* constant_pool); |
| |
| MUST_USE_RESULT AllocationResult CopyCode(Code* code); |
| |
| MUST_USE_RESULT AllocationResult |
| CopyBytecodeArray(BytecodeArray* bytecode_array); |
| |
| // Allocates a fixed array initialized with undefined values |
| MUST_USE_RESULT AllocationResult |
| AllocateFixedArray(int length, PretenureFlag pretenure = NOT_TENURED); |
| |
| // Allocate an uninitialized object. The memory is non-executable if the |
| // hardware and OS allow. This is the single choke-point for allocations |
| // performed by the runtime and should not be bypassed (to extend this to |
| // inlined allocations, use the Heap::DisableInlineAllocation() support). |
| MUST_USE_RESULT inline AllocationResult AllocateRaw( |
| int size_in_bytes, AllocationSpace space, |
| AllocationAlignment aligment = kWordAligned); |
| |
| // Allocates a heap object based on the map. |
| MUST_USE_RESULT AllocationResult |
| Allocate(Map* map, AllocationSpace space, |
| AllocationSite* allocation_site = NULL); |
| |
| // Allocates a partial map for bootstrapping. |
| MUST_USE_RESULT AllocationResult |
| AllocatePartialMap(InstanceType instance_type, int instance_size); |
| |
| // Allocate a block of memory in the given space (filled with a filler). |
| // Used as a fall-back for generated code when the space is full. |
| MUST_USE_RESULT AllocationResult |
| AllocateFillerObject(int size, bool double_align, AllocationSpace space); |
| |
| // Allocate an uninitialized fixed array. |
| MUST_USE_RESULT AllocationResult |
| AllocateRawFixedArray(int length, PretenureFlag pretenure); |
| |
| // Allocate an uninitialized fixed double array. |
| MUST_USE_RESULT AllocationResult |
| AllocateRawFixedDoubleArray(int length, PretenureFlag pretenure); |
| |
| // Allocate an initialized fixed array with the given filler value. |
| MUST_USE_RESULT AllocationResult |
| AllocateFixedArrayWithFiller(int length, PretenureFlag pretenure, |
| Object* filler); |
| |
| // Allocate and partially initializes a String. There are two String |
| // encodings: one-byte and two-byte. These functions allocate a string of |
| // the given length and set its map and length fields. The characters of |
| // the string are uninitialized. |
| MUST_USE_RESULT AllocationResult |
| AllocateRawOneByteString(int length, PretenureFlag pretenure); |
| MUST_USE_RESULT AllocationResult |
| AllocateRawTwoByteString(int length, PretenureFlag pretenure); |
| |
| // Allocates an internalized string in old space based on the character |
| // stream. |
| MUST_USE_RESULT inline AllocationResult AllocateInternalizedStringFromUtf8( |
| Vector<const char> str, int chars, uint32_t hash_field); |
| |
| MUST_USE_RESULT inline AllocationResult AllocateOneByteInternalizedString( |
| Vector<const uint8_t> str, uint32_t hash_field); |
| |
| MUST_USE_RESULT inline AllocationResult AllocateTwoByteInternalizedString( |
| Vector<const uc16> str, uint32_t hash_field); |
| |
| template <bool is_one_byte, typename T> |
| MUST_USE_RESULT AllocationResult |
| AllocateInternalizedStringImpl(T t, int chars, uint32_t hash_field); |
| |
| template <typename T> |
| MUST_USE_RESULT inline AllocationResult AllocateInternalizedStringImpl( |
| T t, int chars, uint32_t hash_field); |
| |
| // Allocates an uninitialized fixed array. It must be filled by the caller. |
| MUST_USE_RESULT AllocationResult AllocateUninitializedFixedArray(int length); |
| |
| // Make a copy of src and return it. |
| MUST_USE_RESULT inline AllocationResult CopyFixedArray(FixedArray* src); |
| |
| // Make a copy of src, also grow the copy, and return the copy. |
| MUST_USE_RESULT AllocationResult |
| CopyFixedArrayAndGrow(FixedArray* src, int grow_by, PretenureFlag pretenure); |
| |
| // Make a copy of src, also grow the copy, and return the copy. |
| MUST_USE_RESULT AllocationResult CopyFixedArrayUpTo(FixedArray* src, |
| int new_len, |
| PretenureFlag pretenure); |
| |
| // Make a copy of src, set the map, and return the copy. |
| MUST_USE_RESULT AllocationResult |
| CopyFixedArrayWithMap(FixedArray* src, Map* map); |
| |
| // Make a copy of src and return it. |
| MUST_USE_RESULT inline AllocationResult CopyFixedDoubleArray( |
| FixedDoubleArray* src); |
| |
| // Computes a single character string where the character has code. |
| // A cache is used for one-byte (Latin1) codes. |
| MUST_USE_RESULT AllocationResult |
| LookupSingleCharacterStringFromCode(uint16_t code); |
| |
| // Allocate a symbol in old space. |
| MUST_USE_RESULT AllocationResult AllocateSymbol(); |
| |
| // Allocates an external array of the specified length and type. |
| MUST_USE_RESULT AllocationResult AllocateFixedTypedArrayWithExternalPointer( |
| int length, ExternalArrayType array_type, void* external_pointer, |
| PretenureFlag pretenure); |
| |
| // Allocates a fixed typed array of the specified length and type. |
| MUST_USE_RESULT AllocationResult |
| AllocateFixedTypedArray(int length, ExternalArrayType array_type, |
| bool initialize, PretenureFlag pretenure); |
| |
| // Make a copy of src and return it. |
| MUST_USE_RESULT AllocationResult CopyAndTenureFixedCOWArray(FixedArray* src); |
| |
| // Make a copy of src, set the map, and return the copy. |
| MUST_USE_RESULT AllocationResult |
| CopyFixedDoubleArrayWithMap(FixedDoubleArray* src, Map* map); |
| |
| // Allocates a fixed double array with uninitialized values. Returns |
| MUST_USE_RESULT AllocationResult AllocateUninitializedFixedDoubleArray( |
| int length, PretenureFlag pretenure = NOT_TENURED); |
| |
| // Allocate empty fixed array. |
| MUST_USE_RESULT AllocationResult AllocateEmptyFixedArray(); |
| |
| // Allocate empty fixed typed array of given type. |
| MUST_USE_RESULT AllocationResult |
| AllocateEmptyFixedTypedArray(ExternalArrayType array_type); |
| |
| // Allocate a tenured simple cell. |
| MUST_USE_RESULT AllocationResult AllocateCell(Object* value); |
| |
| // Allocate a tenured JS global property cell initialized with the hole. |
| MUST_USE_RESULT AllocationResult AllocatePropertyCell(); |
| |
| MUST_USE_RESULT AllocationResult AllocateWeakCell(HeapObject* value); |
| |
| MUST_USE_RESULT AllocationResult AllocateTransitionArray(int capacity); |
| |
| // Allocates a new utility object in the old generation. |
| MUST_USE_RESULT AllocationResult AllocateStruct(InstanceType type); |
| |
| // Allocates a new foreign object. |
| MUST_USE_RESULT AllocationResult |
| AllocateForeign(Address address, PretenureFlag pretenure = NOT_TENURED); |
| |
| MUST_USE_RESULT AllocationResult |
| AllocateCode(int object_size, bool immovable); |
| |
| MUST_USE_RESULT AllocationResult InternalizeStringWithKey(HashTableKey* key); |
| |
| MUST_USE_RESULT AllocationResult InternalizeString(String* str); |
| |
| // =========================================================================== |
| |
| void set_force_oom(bool value) { force_oom_ = value; } |
| |
| // The amount of external memory registered through the API. |
| int64_t external_memory_; |
| |
| // The limit when to trigger memory pressure from the API. |
| int64_t external_memory_limit_; |
| |
| // Caches the amount of external memory registered at the last MC. |
| int64_t external_memory_at_last_mark_compact_; |
| |
| // The amount of memory that has been freed concurrently. |
| base::AtomicNumber<intptr_t> external_memory_concurrently_freed_; |
| |
| // This can be calculated directly from a pointer to the heap; however, it is |
| // more expedient to get at the isolate directly from within Heap methods. |
| Isolate* isolate_; |
| |
| Object* roots_[kRootListLength]; |
| |
| size_t code_range_size_; |
| int max_semi_space_size_; |
| int initial_semispace_size_; |
| intptr_t max_old_generation_size_; |
| intptr_t initial_old_generation_size_; |
| bool old_generation_size_configured_; |
| intptr_t max_executable_size_; |
| intptr_t maximum_committed_; |
| |
| // For keeping track of how much data has survived |
| // scavenge since last new space expansion. |
| intptr_t survived_since_last_expansion_; |
| |
| // ... and since the last scavenge. |
| intptr_t survived_last_scavenge_; |
| |
| // This is not the depth of nested AlwaysAllocateScope's but rather a single |
| // count, as scopes can be acquired from multiple tasks (read: threads). |
| base::AtomicNumber<size_t> always_allocate_scope_count_; |
| |
| // Stores the memory pressure level that set by MemoryPressureNotification |
| // and reset by a mark-compact garbage collection. |
| base::AtomicValue<MemoryPressureLevel> memory_pressure_level_; |
| |
| // For keeping track of context disposals. |
| int contexts_disposed_; |
| |
| // The length of the retained_maps array at the time of context disposal. |
| // This separates maps in the retained_maps array that were created before |
| // and after context disposal. |
| int number_of_disposed_maps_; |
| |
| int global_ic_age_; |
| |
| NewSpace new_space_; |
| OldSpace* old_space_; |
| OldSpace* code_space_; |
| MapSpace* map_space_; |
| LargeObjectSpace* lo_space_; |
| HeapState gc_state_; |
| int gc_post_processing_depth_; |
| Address new_space_top_after_last_gc_; |
| |
| // Returns the amount of external memory registered since last global gc. |
| int64_t PromotedExternalMemorySize(); |
| |
| // How many "runtime allocations" happened. |
| uint32_t allocations_count_; |
| |
| // Running hash over allocations performed. |
| uint32_t raw_allocations_hash_; |
| |
| // How many mark-sweep collections happened. |
| unsigned int ms_count_; |
| |
| // How many gc happened. |
| unsigned int gc_count_; |
| |
| // For post mortem debugging. |
| int remembered_unmapped_pages_index_; |
| Address remembered_unmapped_pages_[kRememberedUnmappedPages]; |
| |
| #ifdef DEBUG |
| // If the --gc-interval flag is set to a positive value, this |
| // variable holds the value indicating the number of allocations |
| // remain until the next failure and garbage collection. |
| int allocation_timeout_; |
| #endif // DEBUG |
| |
| // Limit that triggers a global GC on the next (normally caused) GC. This |
| // is checked when we have already decided to do a GC to help determine |
| // which collector to invoke, before expanding a paged space in the old |
| // generation and on every allocation in large object space. |
| intptr_t old_generation_allocation_limit_; |
| |
| // Indicates that an allocation has failed in the old generation since the |
| // last GC. |
| bool old_gen_exhausted_; |
| |
| // Indicates that memory usage is more important than latency. |
| // TODO(ulan): Merge it with memory reducer once chromium:490559 is fixed. |
| bool optimize_for_memory_usage_; |
| |
| // Indicates that inline bump-pointer allocation has been globally disabled |
| // for all spaces. This is used to disable allocations in generated code. |
| bool inline_allocation_disabled_; |
| |
| // Weak list heads, threaded through the objects. |
| // List heads are initialized lazily and contain the undefined_value at start. |
| Object* native_contexts_list_; |
| Object* allocation_sites_list_; |
| |
| // List of encountered weak collections (JSWeakMap and JSWeakSet) during |
| // marking. It is initialized during marking, destroyed after marking and |
| // contains Smi(0) while marking is not active. |
| Object* encountered_weak_collections_; |
| |
| Object* encountered_weak_cells_; |
| |
| Object* encountered_transition_arrays_; |
| |
| List<GCCallbackPair> gc_epilogue_callbacks_; |
| List<GCCallbackPair> gc_prologue_callbacks_; |
| |
| // Total RegExp code ever generated |
| double total_regexp_code_generated_; |
| |
| int deferred_counters_[v8::Isolate::kUseCounterFeatureCount]; |
| |
| GCTracer* tracer_; |
| |
| int high_survival_rate_period_length_; |
| intptr_t promoted_objects_size_; |
| double promotion_ratio_; |
| double promotion_rate_; |
| intptr_t semi_space_copied_object_size_; |
| intptr_t previous_semi_space_copied_object_size_; |
| double semi_space_copied_rate_; |
| int nodes_died_in_new_space_; |
| int nodes_copied_in_new_space_; |
| int nodes_promoted_; |
| |
| // This is the pretenuring trigger for allocation sites that are in maybe |
| // tenure state. When we switched to the maximum new space size we deoptimize |
| // the code that belongs to the allocation site and derive the lifetime |
| // of the allocation site. |
| unsigned int maximum_size_scavenges_; |
| |
| // Maximum GC pause. |
| double max_gc_pause_; |
| |
| // Total time spent in GC. |
| double total_gc_time_ms_; |
| |
| // Maximum size of objects alive after GC. |
| intptr_t max_alive_after_gc_; |
| |
| // Minimal interval between two subsequent collections. |
| double min_in_mutator_; |
| |
| // Cumulative GC time spent in marking. |
| double marking_time_; |
| |
| // Cumulative GC time spent in sweeping. |
| double sweeping_time_; |
| |
| // Last time an idle notification happened. |
| double last_idle_notification_time_; |
| |
| // Last time a garbage collection happened. |
| double last_gc_time_; |
| |
| Scavenger* scavenge_collector_; |
| |
| MarkCompactCollector* mark_compact_collector_; |
| |
| MemoryAllocator* memory_allocator_; |
| |
| StoreBuffer store_buffer_; |
| |
| IncrementalMarking* incremental_marking_; |
| |
| GCIdleTimeHandler* gc_idle_time_handler_; |
| |
| MemoryReducer* memory_reducer_; |
| |
| ObjectStats* object_stats_; |
| |
| ScavengeJob* scavenge_job_; |
| |
| AllocationObserver* idle_scavenge_observer_; |
| |
| // These two counters are monotomically increasing and never reset. |
| size_t full_codegen_bytes_generated_; |
| size_t crankshaft_codegen_bytes_generated_; |
| |
| // This counter is increased before each GC and never reset. |
| // To account for the bytes allocated since the last GC, use the |
| // NewSpaceAllocationCounter() function. |
| size_t new_space_allocation_counter_; |
| |
| // This counter is increased before each GC and never reset. To |
| // account for the bytes allocated since the last GC, use the |
| // OldGenerationAllocationCounter() function. |
| size_t old_generation_allocation_counter_; |
| |
| // The size of objects in old generation after the last MarkCompact GC. |
| size_t old_generation_size_at_last_gc_; |
| |
| // If the --deopt_every_n_garbage_collections flag is set to a positive value, |
| // this variable holds the number of garbage collections since the last |
| // deoptimization triggered by garbage collection. |
| int gcs_since_last_deopt_; |
| |
| // The feedback storage is used to store allocation sites (keys) and how often |
| // they have been visited (values) by finding a memento behind an object. The |
| // storage is only alive temporary during a GC. The invariant is that all |
| // pointers in this map are already fixed, i.e., they do not point to |
| // forwarding pointers. |
| base::HashMap* global_pretenuring_feedback_; |
| |
| char trace_ring_buffer_[kTraceRingBufferSize]; |
| // If it's not full then the data is from 0 to ring_buffer_end_. If it's |
| // full then the data is from ring_buffer_end_ to the end of the buffer and |
| // from 0 to ring_buffer_end_. |
| bool ring_buffer_full_; |
| size_t ring_buffer_end_; |
| |
| // Shared state read by the scavenge collector and set by ScavengeObject. |
| PromotionQueue promotion_queue_; |
| |
| // Flag is set when the heap has been configured. The heap can be repeatedly |
| // configured through the API until it is set up. |
| bool configured_; |
| |
| // Currently set GC flags that are respected by all GC components. |
| int current_gc_flags_; |
| |
| // Currently set GC callback flags that are used to pass information between |
| // the embedder and V8's GC. |
| GCCallbackFlags current_gc_callback_flags_; |
| |
| ExternalStringTable external_string_table_; |
| |
| base::Mutex relocation_mutex_; |
| |
| int gc_callbacks_depth_; |
| |
| bool deserialization_complete_; |
| |
| StrongRootsList* strong_roots_list_; |
| |
| // The depth of HeapIterator nestings. |
| int heap_iterator_depth_; |
| |
| // Used for testing purposes. |
| bool force_oom_; |
| |
| // Classes in "heap" can be friends. |
| friend class AlwaysAllocateScope; |
| friend class GCCallbacksScope; |
| friend class GCTracer; |
| friend class HeapIterator; |
| friend class IdleScavengeObserver; |
| friend class IncrementalMarking; |
| friend class IteratePromotedObjectsVisitor; |
| friend class MarkCompactCollector; |
| friend class MarkCompactMarkingVisitor; |
| friend class NewSpace; |
| friend class ObjectStatsCollector; |
| friend class Page; |
| friend class Scavenger; |
| friend class StoreBuffer; |
| friend class TestMemoryAllocatorScope; |
| |
| // The allocator interface. |
| friend class Factory; |
| |
| // The Isolate constructs us. |
| friend class Isolate; |
| |
| // Used in cctest. |
| friend class HeapTester; |
| |
| DISALLOW_COPY_AND_ASSIGN(Heap); |
| }; |
| |
| |
| class HeapStats { |
| public: |
| static const int kStartMarker = 0xDECADE00; |
| static const int kEndMarker = 0xDECADE01; |
| |
| int* start_marker; // 0 |
| int* new_space_size; // 1 |
| int* new_space_capacity; // 2 |
| intptr_t* old_space_size; // 3 |
| intptr_t* old_space_capacity; // 4 |
| intptr_t* code_space_size; // 5 |
| intptr_t* code_space_capacity; // 6 |
| intptr_t* map_space_size; // 7 |
| intptr_t* map_space_capacity; // 8 |
| intptr_t* lo_space_size; // 9 |
| int* global_handle_count; // 10 |
| int* weak_global_handle_count; // 11 |
| int* pending_global_handle_count; // 12 |
| int* near_death_global_handle_count; // 13 |
| int* free_global_handle_count; // 14 |
| intptr_t* memory_allocator_size; // 15 |
| intptr_t* memory_allocator_capacity; // 16 |
| int* objects_per_type; // 17 |
| int* size_per_type; // 18 |
| int* os_error; // 19 |
| char* last_few_messages; // 20 |
| char* js_stacktrace; // 21 |
| int* end_marker; // 22 |
| }; |
| |
| |
| class AlwaysAllocateScope { |
| public: |
| explicit inline AlwaysAllocateScope(Isolate* isolate); |
| inline ~AlwaysAllocateScope(); |
| |
| private: |
| Heap* heap_; |
| }; |
| |
| |
| // Visitor class to verify interior pointers in spaces that do not contain |
| // or care about intergenerational references. All heap object pointers have to |
| // point into the heap to a location that has a map pointer at its first word. |
| // Caveat: Heap::Contains is an approximation because it can return true for |
| // objects in a heap space but above the allocation pointer. |
| class VerifyPointersVisitor : public ObjectVisitor { |
| public: |
| inline void VisitPointers(Object** start, Object** end) override; |
| }; |
| |
| |
| // Verify that all objects are Smis. |
| class VerifySmisVisitor : public ObjectVisitor { |
| public: |
| inline void VisitPointers(Object** start, Object** end) override; |
| }; |
| |
| |
| // Space iterator for iterating over all spaces of the heap. Returns each space |
| // in turn, and null when it is done. |
| class AllSpaces BASE_EMBEDDED { |
| public: |
| explicit AllSpaces(Heap* heap) : heap_(heap), counter_(FIRST_SPACE) {} |
| Space* next(); |
| |
| private: |
| Heap* heap_; |
| int counter_; |
| }; |
| |
| |
| // Space iterator for iterating over all old spaces of the heap: Old space |
| // and code space. Returns each space in turn, and null when it is done. |
| class OldSpaces BASE_EMBEDDED { |
| public: |
| explicit OldSpaces(Heap* heap) : heap_(heap), counter_(OLD_SPACE) {} |
| OldSpace* next(); |
| |
| private: |
| Heap* heap_; |
| int counter_; |
| }; |
| |
| |
| // Space iterator for iterating over all the paged spaces of the heap: Map |
| // space, old space, code space and cell space. Returns |
| // each space in turn, and null when it is done. |
| class PagedSpaces BASE_EMBEDDED { |
| public: |
| explicit PagedSpaces(Heap* heap) : heap_(heap), counter_(OLD_SPACE) {} |
| PagedSpace* next(); |
| |
| private: |
| Heap* heap_; |
| int counter_; |
| }; |
| |
| |
| // Space iterator for iterating over all spaces of the heap. |
| // For each space an object iterator is provided. The deallocation of the |
| // returned object iterators is handled by the space iterator. |
| class SpaceIterator : public Malloced { |
| public: |
| explicit SpaceIterator(Heap* heap); |
| virtual ~SpaceIterator(); |
| |
| bool has_next(); |
| ObjectIterator* next(); |
| |
| private: |
| ObjectIterator* CreateIterator(); |
| |
| Heap* heap_; |
| int current_space_; // from enum AllocationSpace. |
| ObjectIterator* iterator_; // object iterator for the current space. |
| }; |
| |
| |
| // A HeapIterator provides iteration over the whole heap. It |
| // aggregates the specific iterators for the different spaces as |
| // these can only iterate over one space only. |
| // |
| // HeapIterator ensures there is no allocation during its lifetime |
| // (using an embedded DisallowHeapAllocation instance). |
| // |
| // HeapIterator can skip free list nodes (that is, de-allocated heap |
| // objects that still remain in the heap). As implementation of free |
| // nodes filtering uses GC marks, it can't be used during MS/MC GC |
| // phases. Also, it is forbidden to interrupt iteration in this mode, |
| // as this will leave heap objects marked (and thus, unusable). |
| class HeapIterator BASE_EMBEDDED { |
| public: |
| enum HeapObjectsFiltering { kNoFiltering, kFilterUnreachable }; |
| |
| explicit HeapIterator(Heap* heap, |
| HeapObjectsFiltering filtering = kNoFiltering); |
| ~HeapIterator(); |
| |
| HeapObject* next(); |
| |
| private: |
| struct MakeHeapIterableHelper { |
| explicit MakeHeapIterableHelper(Heap* heap) { heap->MakeHeapIterable(); } |
| }; |
| |
| HeapObject* NextObject(); |
| |
| // The following two fields need to be declared in this order. Initialization |
| // order guarantees that we first make the heap iterable (which may involve |
| // allocations) and only then lock it down by not allowing further |
| // allocations. |
| MakeHeapIterableHelper make_heap_iterable_helper_; |
| DisallowHeapAllocation no_heap_allocation_; |
| |
| Heap* heap_; |
| HeapObjectsFiltering filtering_; |
| HeapObjectsFilter* filter_; |
| // Space iterator for iterating all the spaces. |
| SpaceIterator* space_iterator_; |
| // Object iterator for the space currently being iterated. |
| ObjectIterator* object_iterator_; |
| }; |
| |
| |
| // Cache for mapping (map, property name) into field offset. |
| // Cleared at startup and prior to mark sweep collection. |
| class KeyedLookupCache { |
| public: |
| // Lookup field offset for (map, name). If absent, -1 is returned. |
| int Lookup(Handle<Map> map, Handle<Name> name); |
| |
| // Update an element in the cache. |
| void Update(Handle<Map> map, Handle<Name> name, int field_offset); |
| |
| // Clear the cache. |
| void Clear(); |
| |
| static const int kLength = 256; |
| static const int kCapacityMask = kLength - 1; |
| static const int kMapHashShift = 5; |
| static const int kHashMask = -4; // Zero the last two bits. |
| static const int kEntriesPerBucket = 4; |
| static const int kEntryLength = 2; |
| static const int kMapIndex = 0; |
| static const int kKeyIndex = 1; |
| static const int kNotFound = -1; |
| |
| // kEntriesPerBucket should be a power of 2. |
| STATIC_ASSERT((kEntriesPerBucket & (kEntriesPerBucket - 1)) == 0); |
| STATIC_ASSERT(kEntriesPerBucket == -kHashMask); |
| |
| private: |
| KeyedLookupCache() { |
| for (int i = 0; i < kLength; ++i) { |
| keys_[i].map = NULL; |
| keys_[i].name = NULL; |
| field_offsets_[i] = kNotFound; |
| } |
| } |
| |
| static inline int Hash(Handle<Map> map, Handle<Name> name); |
| |
| // Get the address of the keys and field_offsets arrays. Used in |
| // generated code to perform cache lookups. |
| Address keys_address() { return reinterpret_cast<Address>(&keys_); } |
| |
| Address field_offsets_address() { |
| return reinterpret_cast<Address>(&field_offsets_); |
| } |
| |
| struct Key { |
| Map* map; |
| Name* name; |
| }; |
| |
| Key keys_[kLength]; |
| int field_offsets_[kLength]; |
| |
| friend class ExternalReference; |
| friend class Isolate; |
| DISALLOW_COPY_AND_ASSIGN(KeyedLookupCache); |
| }; |
| |
| |
| // Cache for mapping (map, property name) into descriptor index. |
| // The cache contains both positive and negative results. |
| // Descriptor index equals kNotFound means the property is absent. |
| // Cleared at startup and prior to any gc. |
| class DescriptorLookupCache { |
| public: |
| // Lookup descriptor index for (map, name). |
| // If absent, kAbsent is returned. |
| inline int Lookup(Map* source, Name* name); |
| |
| // Update an element in the cache. |
| inline void Update(Map* source, Name* name, int result); |
| |
| // Clear the cache. |
| void Clear(); |
| |
| static const int kAbsent = -2; |
| |
| private: |
| DescriptorLookupCache() { |
| for (int i = 0; i < kLength; ++i) { |
| keys_[i].source = NULL; |
| keys_[i].name = NULL; |
| results_[i] = kAbsent; |
| } |
| } |
| |
| static inline int Hash(Object* source, Name* name); |
| |
| static const int kLength = 64; |
| struct Key { |
| Map* source; |
| Name* name; |
| }; |
| |
| Key keys_[kLength]; |
| int results_[kLength]; |
| |
| friend class Isolate; |
| DISALLOW_COPY_AND_ASSIGN(DescriptorLookupCache); |
| }; |
| |
| |
| // Abstract base class for checking whether a weak object should be retained. |
| class WeakObjectRetainer { |
| public: |
| virtual ~WeakObjectRetainer() {} |
| |
| // Return whether this object should be retained. If NULL is returned the |
| // object has no references. Otherwise the address of the retained object |
| // should be returned as in some GC situations the object has been moved. |
| virtual Object* RetainAs(Object* object) = 0; |
| }; |
| |
| |
| #ifdef DEBUG |
| // Helper class for tracing paths to a search target Object from all roots. |
| // The TracePathFrom() method can be used to trace paths from a specific |
| // object to the search target object. |
| class PathTracer : public ObjectVisitor { |
| public: |
| enum WhatToFind { |
| FIND_ALL, // Will find all matches. |
| FIND_FIRST // Will stop the search after first match. |
| }; |
| |
| // Tags 0, 1, and 3 are used. Use 2 for marking visited HeapObject. |
| static const int kMarkTag = 2; |
| |
| // For the WhatToFind arg, if FIND_FIRST is specified, tracing will stop |
| // after the first match. If FIND_ALL is specified, then tracing will be |
| // done for all matches. |
| PathTracer(Object* search_target, WhatToFind what_to_find, |
| VisitMode visit_mode) |
| : search_target_(search_target), |
| found_target_(false), |
| found_target_in_trace_(false), |
| what_to_find_(what_to_find), |
| visit_mode_(visit_mode), |
| object_stack_(20), |
| no_allocation() {} |
| |
| void VisitPointers(Object** start, Object** end) override; |
| |
| void Reset(); |
| void TracePathFrom(Object** root); |
| |
| bool found() const { return found_target_; } |
| |
| static Object* const kAnyGlobalObject; |
| |
| protected: |
| class MarkVisitor; |
| class UnmarkVisitor; |
| |
| void MarkRecursively(Object** p, MarkVisitor* mark_visitor); |
| void UnmarkRecursively(Object** p, UnmarkVisitor* unmark_visitor); |
| virtual void ProcessResults(); |
| |
| Object* search_target_; |
| bool found_target_; |
| bool found_target_in_trace_; |
| WhatToFind what_to_find_; |
| VisitMode visit_mode_; |
| List<Object*> object_stack_; |
| |
| DisallowHeapAllocation no_allocation; // i.e. no gc allowed. |
| |
| private: |
| DISALLOW_IMPLICIT_CONSTRUCTORS(PathTracer); |
| }; |
| #endif // DEBUG |
| |
| // ----------------------------------------------------------------------------- |
| // Allows observation of allocations. |
| class AllocationObserver { |
| public: |
| explicit AllocationObserver(intptr_t step_size) |
| : step_size_(step_size), bytes_to_next_step_(step_size) { |
| DCHECK(step_size >= kPointerSize); |
| } |
| virtual ~AllocationObserver() {} |
| |
| // Called each time the observed space does an allocation step. This may be |
| // more frequently than the step_size we are monitoring (e.g. when there are |
| // multiple observers, or when page or space boundary is encountered.) |
| void AllocationStep(int bytes_allocated, Address soon_object, size_t size) { |
| bytes_to_next_step_ -= bytes_allocated; |
| if (bytes_to_next_step_ <= 0) { |
| Step(static_cast<int>(step_size_ - bytes_to_next_step_), soon_object, |
| size); |
| step_size_ = GetNextStepSize(); |
| bytes_to_next_step_ = step_size_; |
| } |
| } |
| |
| protected: |
| intptr_t step_size() const { return step_size_; } |
| intptr_t bytes_to_next_step() const { return bytes_to_next_step_; } |
| |
| // Pure virtual method provided by the subclasses that gets called when at |
| // least step_size bytes have been allocated. soon_object is the address just |
| // allocated (but not yet initialized.) size is the size of the object as |
| // requested (i.e. w/o the alignment fillers). Some complexities to be aware |
| // of: |
| // 1) soon_object will be nullptr in cases where we end up observing an |
| // allocation that happens to be a filler space (e.g. page boundaries.) |
| // 2) size is the requested size at the time of allocation. Right-trimming |
| // may change the object size dynamically. |
| // 3) soon_object may actually be the first object in an allocation-folding |
| // group. In such a case size is the size of the group rather than the |
| // first object. |
| virtual void Step(int bytes_allocated, Address soon_object, size_t size) = 0; |
| |
| // Subclasses can override this method to make step size dynamic. |
| virtual intptr_t GetNextStepSize() { return step_size_; } |
| |
| intptr_t step_size_; |
| intptr_t bytes_to_next_step_; |
| |
| private: |
| friend class LargeObjectSpace; |
| friend class NewSpace; |
| friend class PagedSpace; |
| DISALLOW_COPY_AND_ASSIGN(AllocationObserver); |
| }; |
| |
| } // namespace internal |
| } // namespace v8 |
| |
| #endif // V8_HEAP_HEAP_H_ |