Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1 | // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 | // Use of this source code is governed by a BSD-style license that can be |
| 3 | // found in the LICENSE file. |
| 4 | |
| 5 | #ifndef V8_HEAP_HEAP_H_ |
| 6 | #define V8_HEAP_HEAP_H_ |
| 7 | |
| 8 | #include <cmath> |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 9 | #include <map> |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 10 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 11 | // Clients of this interface shouldn't depend on lots of heap internals. |
| 12 | // Do not include anything from src/heap here! |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 13 | #include "src/allocation.h" |
| 14 | #include "src/assert-scope.h" |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 15 | #include "src/atomic-utils.h" |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 16 | #include "src/globals.h" |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame^] | 17 | #include "src/heap-symbols.h" |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 18 | // TODO(mstarzinger): Two more includes to kill! |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 19 | #include "src/heap/spaces.h" |
| 20 | #include "src/heap/store-buffer.h" |
| 21 | #include "src/list.h" |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 22 | |
| 23 | namespace v8 { |
| 24 | namespace internal { |
| 25 | |
| 26 | // Defines all the roots in Heap. |
| 27 | #define STRONG_ROOT_LIST(V) \ |
| 28 | V(Map, byte_array_map, ByteArrayMap) \ |
| 29 | V(Map, free_space_map, FreeSpaceMap) \ |
| 30 | V(Map, one_pointer_filler_map, OnePointerFillerMap) \ |
| 31 | V(Map, two_pointer_filler_map, TwoPointerFillerMap) \ |
| 32 | /* Cluster the most popular ones in a few cache lines here at the top. */ \ |
| 33 | V(Smi, store_buffer_top, StoreBufferTop) \ |
| 34 | V(Oddball, undefined_value, UndefinedValue) \ |
| 35 | V(Oddball, the_hole_value, TheHoleValue) \ |
| 36 | V(Oddball, null_value, NullValue) \ |
| 37 | V(Oddball, true_value, TrueValue) \ |
| 38 | V(Oddball, false_value, FalseValue) \ |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 39 | V(String, empty_string, empty_string) \ |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 40 | V(Oddball, uninitialized_value, UninitializedValue) \ |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 41 | V(Map, cell_map, CellMap) \ |
| 42 | V(Map, global_property_cell_map, GlobalPropertyCellMap) \ |
| 43 | V(Map, shared_function_info_map, SharedFunctionInfoMap) \ |
| 44 | V(Map, meta_map, MetaMap) \ |
| 45 | V(Map, heap_number_map, HeapNumberMap) \ |
| 46 | V(Map, mutable_heap_number_map, MutableHeapNumberMap) \ |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 47 | V(Map, float32x4_map, Float32x4Map) \ |
| 48 | V(Map, int32x4_map, Int32x4Map) \ |
| 49 | V(Map, uint32x4_map, Uint32x4Map) \ |
| 50 | V(Map, bool32x4_map, Bool32x4Map) \ |
| 51 | V(Map, int16x8_map, Int16x8Map) \ |
| 52 | V(Map, uint16x8_map, Uint16x8Map) \ |
| 53 | V(Map, bool16x8_map, Bool16x8Map) \ |
| 54 | V(Map, int8x16_map, Int8x16Map) \ |
| 55 | V(Map, uint8x16_map, Uint8x16Map) \ |
| 56 | V(Map, bool8x16_map, Bool8x16Map) \ |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 57 | V(Map, native_context_map, NativeContextMap) \ |
| 58 | V(Map, fixed_array_map, FixedArrayMap) \ |
| 59 | V(Map, code_map, CodeMap) \ |
| 60 | V(Map, scope_info_map, ScopeInfoMap) \ |
| 61 | V(Map, fixed_cow_array_map, FixedCOWArrayMap) \ |
| 62 | V(Map, fixed_double_array_map, FixedDoubleArrayMap) \ |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 63 | V(Map, weak_cell_map, WeakCellMap) \ |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 64 | V(Map, transition_array_map, TransitionArrayMap) \ |
| 65 | V(Map, one_byte_string_map, OneByteStringMap) \ |
| 66 | V(Map, one_byte_internalized_string_map, OneByteInternalizedStringMap) \ |
| 67 | V(Map, function_context_map, FunctionContextMap) \ |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 68 | V(FixedArray, empty_fixed_array, EmptyFixedArray) \ |
| 69 | V(ByteArray, empty_byte_array, EmptyByteArray) \ |
| 70 | V(DescriptorArray, empty_descriptor_array, EmptyDescriptorArray) \ |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 71 | /* The roots above this line should be boring from a GC point of view. */ \ |
| 72 | /* This means they are never in new space and never on a page that is */ \ |
| 73 | /* being compacted. */ \ |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 74 | V(Oddball, no_interceptor_result_sentinel, NoInterceptorResultSentinel) \ |
| 75 | V(Oddball, arguments_marker, ArgumentsMarker) \ |
| 76 | V(Oddball, exception, Exception) \ |
| 77 | V(Oddball, termination_exception, TerminationException) \ |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 78 | V(FixedArray, number_string_cache, NumberStringCache) \ |
| 79 | V(Object, instanceof_cache_function, InstanceofCacheFunction) \ |
| 80 | V(Object, instanceof_cache_map, InstanceofCacheMap) \ |
| 81 | V(Object, instanceof_cache_answer, InstanceofCacheAnswer) \ |
| 82 | V(FixedArray, single_character_string_cache, SingleCharacterStringCache) \ |
| 83 | V(FixedArray, string_split_cache, StringSplitCache) \ |
| 84 | V(FixedArray, regexp_multiple_cache, RegExpMultipleCache) \ |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 85 | V(Smi, hash_seed, HashSeed) \ |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 86 | V(Map, hash_table_map, HashTableMap) \ |
| 87 | V(Map, ordered_hash_table_map, OrderedHashTableMap) \ |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 88 | V(Map, symbol_map, SymbolMap) \ |
| 89 | V(Map, string_map, StringMap) \ |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 90 | V(Map, cons_one_byte_string_map, ConsOneByteStringMap) \ |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 91 | V(Map, cons_string_map, ConsStringMap) \ |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 92 | V(Map, sliced_string_map, SlicedStringMap) \ |
| 93 | V(Map, sliced_one_byte_string_map, SlicedOneByteStringMap) \ |
| 94 | V(Map, external_string_map, ExternalStringMap) \ |
| 95 | V(Map, external_string_with_one_byte_data_map, \ |
| 96 | ExternalStringWithOneByteDataMap) \ |
| 97 | V(Map, external_one_byte_string_map, ExternalOneByteStringMap) \ |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 98 | V(Map, native_source_string_map, NativeSourceStringMap) \ |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 99 | V(Map, short_external_string_map, ShortExternalStringMap) \ |
| 100 | V(Map, short_external_string_with_one_byte_data_map, \ |
| 101 | ShortExternalStringWithOneByteDataMap) \ |
| 102 | V(Map, internalized_string_map, InternalizedStringMap) \ |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 103 | V(Map, external_internalized_string_map, ExternalInternalizedStringMap) \ |
| 104 | V(Map, external_internalized_string_with_one_byte_data_map, \ |
| 105 | ExternalInternalizedStringWithOneByteDataMap) \ |
| 106 | V(Map, external_one_byte_internalized_string_map, \ |
| 107 | ExternalOneByteInternalizedStringMap) \ |
| 108 | V(Map, short_external_internalized_string_map, \ |
| 109 | ShortExternalInternalizedStringMap) \ |
| 110 | V(Map, short_external_internalized_string_with_one_byte_data_map, \ |
| 111 | ShortExternalInternalizedStringWithOneByteDataMap) \ |
| 112 | V(Map, short_external_one_byte_internalized_string_map, \ |
| 113 | ShortExternalOneByteInternalizedStringMap) \ |
| 114 | V(Map, short_external_one_byte_string_map, ShortExternalOneByteStringMap) \ |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 115 | V(Map, fixed_uint8_array_map, FixedUint8ArrayMap) \ |
| 116 | V(Map, fixed_int8_array_map, FixedInt8ArrayMap) \ |
| 117 | V(Map, fixed_uint16_array_map, FixedUint16ArrayMap) \ |
| 118 | V(Map, fixed_int16_array_map, FixedInt16ArrayMap) \ |
| 119 | V(Map, fixed_uint32_array_map, FixedUint32ArrayMap) \ |
| 120 | V(Map, fixed_int32_array_map, FixedInt32ArrayMap) \ |
| 121 | V(Map, fixed_float32_array_map, FixedFloat32ArrayMap) \ |
| 122 | V(Map, fixed_float64_array_map, FixedFloat64ArrayMap) \ |
| 123 | V(Map, fixed_uint8_clamped_array_map, FixedUint8ClampedArrayMap) \ |
| 124 | V(FixedTypedArrayBase, empty_fixed_uint8_array, EmptyFixedUint8Array) \ |
| 125 | V(FixedTypedArrayBase, empty_fixed_int8_array, EmptyFixedInt8Array) \ |
| 126 | V(FixedTypedArrayBase, empty_fixed_uint16_array, EmptyFixedUint16Array) \ |
| 127 | V(FixedTypedArrayBase, empty_fixed_int16_array, EmptyFixedInt16Array) \ |
| 128 | V(FixedTypedArrayBase, empty_fixed_uint32_array, EmptyFixedUint32Array) \ |
| 129 | V(FixedTypedArrayBase, empty_fixed_int32_array, EmptyFixedInt32Array) \ |
| 130 | V(FixedTypedArrayBase, empty_fixed_float32_array, EmptyFixedFloat32Array) \ |
| 131 | V(FixedTypedArrayBase, empty_fixed_float64_array, EmptyFixedFloat64Array) \ |
| 132 | V(FixedTypedArrayBase, empty_fixed_uint8_clamped_array, \ |
| 133 | EmptyFixedUint8ClampedArray) \ |
| 134 | V(Map, sloppy_arguments_elements_map, SloppyArgumentsElementsMap) \ |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 135 | V(Map, catch_context_map, CatchContextMap) \ |
| 136 | V(Map, with_context_map, WithContextMap) \ |
| 137 | V(Map, block_context_map, BlockContextMap) \ |
| 138 | V(Map, module_context_map, ModuleContextMap) \ |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 139 | V(Map, script_context_map, ScriptContextMap) \ |
| 140 | V(Map, script_context_table_map, ScriptContextTableMap) \ |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 141 | V(Map, undefined_map, UndefinedMap) \ |
| 142 | V(Map, the_hole_map, TheHoleMap) \ |
| 143 | V(Map, null_map, NullMap) \ |
| 144 | V(Map, boolean_map, BooleanMap) \ |
| 145 | V(Map, uninitialized_map, UninitializedMap) \ |
| 146 | V(Map, arguments_marker_map, ArgumentsMarkerMap) \ |
| 147 | V(Map, no_interceptor_result_sentinel_map, NoInterceptorResultSentinelMap) \ |
| 148 | V(Map, exception_map, ExceptionMap) \ |
| 149 | V(Map, termination_exception_map, TerminationExceptionMap) \ |
| 150 | V(Map, message_object_map, JSMessageObjectMap) \ |
| 151 | V(Map, foreign_map, ForeignMap) \ |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 152 | V(Map, neander_map, NeanderMap) \ |
| 153 | V(Map, external_map, ExternalMap) \ |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 154 | V(HeapNumber, nan_value, NanValue) \ |
| 155 | V(HeapNumber, infinity_value, InfinityValue) \ |
| 156 | V(HeapNumber, minus_zero_value, MinusZeroValue) \ |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 157 | V(HeapNumber, minus_infinity_value, MinusInfinityValue) \ |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 158 | V(JSObject, message_listeners, MessageListeners) \ |
| 159 | V(UnseededNumberDictionary, code_stubs, CodeStubs) \ |
| 160 | V(UnseededNumberDictionary, non_monomorphic_cache, NonMonomorphicCache) \ |
| 161 | V(PolymorphicCodeCache, polymorphic_code_cache, PolymorphicCodeCache) \ |
| 162 | V(Code, js_entry_code, JsEntryCode) \ |
| 163 | V(Code, js_construct_entry_code, JsConstructEntryCode) \ |
| 164 | V(FixedArray, natives_source_cache, NativesSourceCache) \ |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 165 | V(FixedArray, experimental_natives_source_cache, \ |
| 166 | ExperimentalNativesSourceCache) \ |
| 167 | V(FixedArray, extra_natives_source_cache, ExtraNativesSourceCache) \ |
| 168 | V(FixedArray, experimental_extra_natives_source_cache, \ |
| 169 | ExperimentalExtraNativesSourceCache) \ |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 170 | V(Script, empty_script, EmptyScript) \ |
| 171 | V(NameDictionary, intrinsic_function_names, IntrinsicFunctionNames) \ |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 172 | V(NameDictionary, empty_properties_dictionary, EmptyPropertiesDictionary) \ |
| 173 | V(Cell, undefined_cell, UndefinedCell) \ |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 174 | V(JSObject, observation_state, ObservationState) \ |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 175 | V(Object, symbol_registry, SymbolRegistry) \ |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 176 | V(Object, script_list, ScriptList) \ |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 177 | V(SeededNumberDictionary, empty_slow_element_dictionary, \ |
| 178 | EmptySlowElementDictionary) \ |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 179 | V(FixedArray, materialized_objects, MaterializedObjects) \ |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 180 | V(FixedArray, microtask_queue, MicrotaskQueue) \ |
| 181 | V(TypeFeedbackVector, dummy_vector, DummyVector) \ |
| 182 | V(FixedArray, cleared_optimized_code_map, ClearedOptimizedCodeMap) \ |
| 183 | V(FixedArray, detached_contexts, DetachedContexts) \ |
| 184 | V(ArrayList, retained_maps, RetainedMaps) \ |
| 185 | V(WeakHashTable, weak_object_to_code_table, WeakObjectToCodeTable) \ |
| 186 | V(PropertyCell, array_protector, ArrayProtector) \ |
| 187 | V(PropertyCell, empty_property_cell, EmptyPropertyCell) \ |
| 188 | V(Object, weak_stack_trace_list, WeakStackTraceList) \ |
| 189 | V(Object, noscript_shared_function_infos, NoScriptSharedFunctionInfos) \ |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 190 | V(Map, bytecode_array_map, BytecodeArrayMap) \ |
| 191 | V(WeakCell, empty_weak_cell, EmptyWeakCell) \ |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame^] | 192 | V(PropertyCell, species_protector, SpeciesProtector) |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 193 | |
| 194 | // Entries in this list are limited to Smis and are not visited during GC. |
| 195 | #define SMI_ROOT_LIST(V) \ |
| 196 | V(Smi, stack_limit, StackLimit) \ |
| 197 | V(Smi, real_stack_limit, RealStackLimit) \ |
| 198 | V(Smi, last_script_id, LastScriptId) \ |
| 199 | V(Smi, arguments_adaptor_deopt_pc_offset, ArgumentsAdaptorDeoptPCOffset) \ |
| 200 | V(Smi, construct_stub_deopt_pc_offset, ConstructStubDeoptPCOffset) \ |
| 201 | V(Smi, getter_stub_deopt_pc_offset, GetterStubDeoptPCOffset) \ |
| 202 | V(Smi, setter_stub_deopt_pc_offset, SetterStubDeoptPCOffset) |
| 203 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 204 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 205 | #define ROOT_LIST(V) \ |
| 206 | STRONG_ROOT_LIST(V) \ |
| 207 | SMI_ROOT_LIST(V) \ |
| 208 | V(StringTable, string_table, StringTable) |
| 209 | |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 210 | |
| 211 | // Heap roots that are known to be immortal immovable, for which we can safely |
| 212 | // skip write barriers. This list is not complete and has omissions. |
| 213 | #define IMMORTAL_IMMOVABLE_ROOT_LIST(V) \ |
| 214 | V(ByteArrayMap) \ |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 215 | V(BytecodeArrayMap) \ |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 216 | V(FreeSpaceMap) \ |
| 217 | V(OnePointerFillerMap) \ |
| 218 | V(TwoPointerFillerMap) \ |
| 219 | V(UndefinedValue) \ |
| 220 | V(TheHoleValue) \ |
| 221 | V(NullValue) \ |
| 222 | V(TrueValue) \ |
| 223 | V(FalseValue) \ |
| 224 | V(UninitializedValue) \ |
| 225 | V(CellMap) \ |
| 226 | V(GlobalPropertyCellMap) \ |
| 227 | V(SharedFunctionInfoMap) \ |
| 228 | V(MetaMap) \ |
| 229 | V(HeapNumberMap) \ |
| 230 | V(MutableHeapNumberMap) \ |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 231 | V(Float32x4Map) \ |
| 232 | V(Int32x4Map) \ |
| 233 | V(Uint32x4Map) \ |
| 234 | V(Bool32x4Map) \ |
| 235 | V(Int16x8Map) \ |
| 236 | V(Uint16x8Map) \ |
| 237 | V(Bool16x8Map) \ |
| 238 | V(Int8x16Map) \ |
| 239 | V(Uint8x16Map) \ |
| 240 | V(Bool8x16Map) \ |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 241 | V(NativeContextMap) \ |
| 242 | V(FixedArrayMap) \ |
| 243 | V(CodeMap) \ |
| 244 | V(ScopeInfoMap) \ |
| 245 | V(FixedCOWArrayMap) \ |
| 246 | V(FixedDoubleArrayMap) \ |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 247 | V(WeakCellMap) \ |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 248 | V(TransitionArrayMap) \ |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 249 | V(NoInterceptorResultSentinel) \ |
| 250 | V(HashTableMap) \ |
| 251 | V(OrderedHashTableMap) \ |
| 252 | V(EmptyFixedArray) \ |
| 253 | V(EmptyByteArray) \ |
| 254 | V(EmptyDescriptorArray) \ |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 255 | V(ArgumentsMarker) \ |
| 256 | V(SymbolMap) \ |
| 257 | V(SloppyArgumentsElementsMap) \ |
| 258 | V(FunctionContextMap) \ |
| 259 | V(CatchContextMap) \ |
| 260 | V(WithContextMap) \ |
| 261 | V(BlockContextMap) \ |
| 262 | V(ModuleContextMap) \ |
| 263 | V(ScriptContextMap) \ |
| 264 | V(UndefinedMap) \ |
| 265 | V(TheHoleMap) \ |
| 266 | V(NullMap) \ |
| 267 | V(BooleanMap) \ |
| 268 | V(UninitializedMap) \ |
| 269 | V(ArgumentsMarkerMap) \ |
| 270 | V(JSMessageObjectMap) \ |
| 271 | V(ForeignMap) \ |
| 272 | V(NeanderMap) \ |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 273 | V(EmptyWeakCell) \ |
| 274 | V(empty_string) \ |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 275 | PRIVATE_SYMBOL_LIST(V) |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 276 | |
| 277 | // Forward declarations. |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame^] | 278 | class AllocationObserver; |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 279 | class ArrayBufferTracker; |
| 280 | class GCIdleTimeAction; |
| 281 | class GCIdleTimeHandler; |
| 282 | class GCIdleTimeHeapState; |
| 283 | class GCTracer; |
| 284 | class HeapObjectsFilter; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 285 | class HeapStats; |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 286 | class HistogramTimer; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 287 | class Isolate; |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 288 | class MemoryReducer; |
| 289 | class ObjectStats; |
| 290 | class Scavenger; |
| 291 | class ScavengeJob; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 292 | class WeakObjectRetainer; |
| 293 | |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame^] | 294 | typedef void (*ObjectSlotCallback)(HeapObject** from, HeapObject* to); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 295 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 296 | // A queue of objects promoted during scavenge. Each object is accompanied |
| 297 | // by it's size to avoid dereferencing a map pointer for scanning. |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 298 | // The last page in to-space is used for the promotion queue. On conflict |
| 299 | // during scavenge, the promotion queue is allocated externally and all |
| 300 | // entries are copied to the external queue. |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 301 | class PromotionQueue { |
| 302 | public: |
| 303 | explicit PromotionQueue(Heap* heap) |
| 304 | : front_(NULL), |
| 305 | rear_(NULL), |
| 306 | limit_(NULL), |
| 307 | emergency_stack_(0), |
| 308 | heap_(heap) {} |
| 309 | |
| 310 | void Initialize(); |
| 311 | |
| 312 | void Destroy() { |
| 313 | DCHECK(is_empty()); |
| 314 | delete emergency_stack_; |
| 315 | emergency_stack_ = NULL; |
| 316 | } |
| 317 | |
| 318 | Page* GetHeadPage() { |
| 319 | return Page::FromAllocationTop(reinterpret_cast<Address>(rear_)); |
| 320 | } |
| 321 | |
| 322 | void SetNewLimit(Address limit) { |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 323 | // If we are already using an emergency stack, we can ignore it. |
| 324 | if (emergency_stack_) return; |
| 325 | |
| 326 | // If the limit is not on the same page, we can ignore it. |
| 327 | if (Page::FromAllocationTop(limit) != GetHeadPage()) return; |
| 328 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 329 | limit_ = reinterpret_cast<intptr_t*>(limit); |
| 330 | |
| 331 | if (limit_ <= rear_) { |
| 332 | return; |
| 333 | } |
| 334 | |
| 335 | RelocateQueueHead(); |
| 336 | } |
| 337 | |
| 338 | bool IsBelowPromotionQueue(Address to_space_top) { |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 339 | // If an emergency stack is used, the to-space address cannot interfere |
| 340 | // with the promotion queue. |
| 341 | if (emergency_stack_) return true; |
| 342 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 343 | // If the given to-space top pointer and the head of the promotion queue |
| 344 | // are not on the same page, then the to-space objects are below the |
| 345 | // promotion queue. |
| 346 | if (GetHeadPage() != Page::FromAddress(to_space_top)) { |
| 347 | return true; |
| 348 | } |
| 349 | // If the to space top pointer is smaller or equal than the promotion |
| 350 | // queue head, then the to-space objects are below the promotion queue. |
| 351 | return reinterpret_cast<intptr_t*>(to_space_top) <= rear_; |
| 352 | } |
| 353 | |
| 354 | bool is_empty() { |
| 355 | return (front_ == rear_) && |
| 356 | (emergency_stack_ == NULL || emergency_stack_->length() == 0); |
| 357 | } |
| 358 | |
| 359 | inline void insert(HeapObject* target, int size); |
| 360 | |
| 361 | void remove(HeapObject** target, int* size) { |
| 362 | DCHECK(!is_empty()); |
| 363 | if (front_ == rear_) { |
| 364 | Entry e = emergency_stack_->RemoveLast(); |
| 365 | *target = e.obj_; |
| 366 | *size = e.size_; |
| 367 | return; |
| 368 | } |
| 369 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 370 | *target = reinterpret_cast<HeapObject*>(*(--front_)); |
| 371 | *size = static_cast<int>(*(--front_)); |
| 372 | // Assert no underflow. |
| 373 | SemiSpace::AssertValidRange(reinterpret_cast<Address>(rear_), |
| 374 | reinterpret_cast<Address>(front_)); |
| 375 | } |
| 376 | |
| 377 | private: |
| 378 | // The front of the queue is higher in the memory page chain than the rear. |
| 379 | intptr_t* front_; |
| 380 | intptr_t* rear_; |
| 381 | intptr_t* limit_; |
| 382 | |
| 383 | static const int kEntrySizeInWords = 2; |
| 384 | |
| 385 | struct Entry { |
| 386 | Entry(HeapObject* obj, int size) : obj_(obj), size_(size) {} |
| 387 | |
| 388 | HeapObject* obj_; |
| 389 | int size_; |
| 390 | }; |
| 391 | List<Entry>* emergency_stack_; |
| 392 | |
| 393 | Heap* heap_; |
| 394 | |
| 395 | void RelocateQueueHead(); |
| 396 | |
| 397 | DISALLOW_COPY_AND_ASSIGN(PromotionQueue); |
| 398 | }; |
| 399 | |
| 400 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 401 | enum ArrayStorageAllocationMode { |
| 402 | DONT_INITIALIZE_ARRAY_ELEMENTS, |
| 403 | INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE |
| 404 | }; |
| 405 | |
| 406 | |
| 407 | class Heap { |
| 408 | public: |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 409 | // Declare all the root indices. This defines the root list order. |
| 410 | enum RootListIndex { |
| 411 | #define ROOT_INDEX_DECLARATION(type, name, camel_name) k##camel_name##RootIndex, |
| 412 | STRONG_ROOT_LIST(ROOT_INDEX_DECLARATION) |
| 413 | #undef ROOT_INDEX_DECLARATION |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 414 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 415 | #define STRING_INDEX_DECLARATION(name, str) k##name##RootIndex, |
| 416 | INTERNALIZED_STRING_LIST(STRING_INDEX_DECLARATION) |
| 417 | #undef STRING_DECLARATION |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 418 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 419 | #define SYMBOL_INDEX_DECLARATION(name) k##name##RootIndex, |
| 420 | PRIVATE_SYMBOL_LIST(SYMBOL_INDEX_DECLARATION) |
| 421 | #undef SYMBOL_INDEX_DECLARATION |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 422 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 423 | #define SYMBOL_INDEX_DECLARATION(name, description) k##name##RootIndex, |
| 424 | PUBLIC_SYMBOL_LIST(SYMBOL_INDEX_DECLARATION) |
| 425 | WELL_KNOWN_SYMBOL_LIST(SYMBOL_INDEX_DECLARATION) |
| 426 | #undef SYMBOL_INDEX_DECLARATION |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 427 | |
| 428 | // Utility type maps |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 429 | #define DECLARE_STRUCT_MAP(NAME, Name, name) k##Name##MapRootIndex, |
| 430 | STRUCT_LIST(DECLARE_STRUCT_MAP) |
| 431 | #undef DECLARE_STRUCT_MAP |
| 432 | kStringTableRootIndex, |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 433 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 434 | #define ROOT_INDEX_DECLARATION(type, name, camel_name) k##camel_name##RootIndex, |
| 435 | SMI_ROOT_LIST(ROOT_INDEX_DECLARATION) |
| 436 | #undef ROOT_INDEX_DECLARATION |
| 437 | kRootListLength, |
| 438 | kStrongRootListLength = kStringTableRootIndex, |
| 439 | kSmiRootsStart = kStringTableRootIndex + 1 |
| 440 | }; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 441 | |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame^] | 442 | enum FindMementoMode { kForRuntime, kForGC }; |
| 443 | |
| 444 | enum HeapState { NOT_IN_GC, SCAVENGE, MARK_COMPACT }; |
| 445 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 446 | // Indicates whether live bytes adjustment is triggered |
| 447 | // - from within the GC code before sweeping started (SEQUENTIAL_TO_SWEEPER), |
| 448 | // - or from within GC (CONCURRENT_TO_SWEEPER), |
| 449 | // - or mutator code (CONCURRENT_TO_SWEEPER). |
| 450 | enum InvocationMode { SEQUENTIAL_TO_SWEEPER, CONCURRENT_TO_SWEEPER }; |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 451 | |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame^] | 452 | enum UpdateAllocationSiteMode { kGlobal, kCached }; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 453 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 454 | // Taking this lock prevents the GC from entering a phase that relocates |
| 455 | // object references. |
| 456 | class RelocationLock { |
| 457 | public: |
| 458 | explicit RelocationLock(Heap* heap) : heap_(heap) { |
| 459 | heap_->relocation_mutex_.Lock(); |
| 460 | } |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 461 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 462 | ~RelocationLock() { heap_->relocation_mutex_.Unlock(); } |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 463 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 464 | private: |
| 465 | Heap* heap_; |
| 466 | }; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 467 | |
| 468 | // Support for partial snapshots. After calling this we have a linear |
| 469 | // space to write objects in each space. |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 470 | struct Chunk { |
| 471 | uint32_t size; |
| 472 | Address start; |
| 473 | Address end; |
| 474 | }; |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 475 | typedef List<Chunk> Reservation; |
| 476 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 477 | static const intptr_t kMinimumOldGenerationAllocationLimit = |
| 478 | 8 * (Page::kPageSize > MB ? Page::kPageSize : MB); |
| 479 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 480 | static const int kInitalOldGenerationLimitFactor = 2; |
| 481 | |
| 482 | #if V8_OS_ANDROID |
| 483 | // Don't apply pointer multiplier on Android since it has no swap space and |
| 484 | // should instead adapt it's heap size based on available physical memory. |
| 485 | static const int kPointerMultiplier = 1; |
| 486 | #else |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 487 | static const int kPointerMultiplier = i::kPointerSize / 4; |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 488 | #endif |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 489 | |
| 490 | // The new space size has to be a power of 2. Sizes are in MB. |
| 491 | static const int kMaxSemiSpaceSizeLowMemoryDevice = 1 * kPointerMultiplier; |
| 492 | static const int kMaxSemiSpaceSizeMediumMemoryDevice = 4 * kPointerMultiplier; |
| 493 | static const int kMaxSemiSpaceSizeHighMemoryDevice = 8 * kPointerMultiplier; |
| 494 | static const int kMaxSemiSpaceSizeHugeMemoryDevice = 8 * kPointerMultiplier; |
| 495 | |
| 496 | // The old space size has to be a multiple of Page::kPageSize. |
| 497 | // Sizes are in MB. |
| 498 | static const int kMaxOldSpaceSizeLowMemoryDevice = 128 * kPointerMultiplier; |
| 499 | static const int kMaxOldSpaceSizeMediumMemoryDevice = |
| 500 | 256 * kPointerMultiplier; |
| 501 | static const int kMaxOldSpaceSizeHighMemoryDevice = 512 * kPointerMultiplier; |
| 502 | static const int kMaxOldSpaceSizeHugeMemoryDevice = 700 * kPointerMultiplier; |
| 503 | |
| 504 | // The executable size has to be a multiple of Page::kPageSize. |
| 505 | // Sizes are in MB. |
| 506 | static const int kMaxExecutableSizeLowMemoryDevice = 96 * kPointerMultiplier; |
| 507 | static const int kMaxExecutableSizeMediumMemoryDevice = |
| 508 | 192 * kPointerMultiplier; |
| 509 | static const int kMaxExecutableSizeHighMemoryDevice = |
| 510 | 256 * kPointerMultiplier; |
| 511 | static const int kMaxExecutableSizeHugeMemoryDevice = |
| 512 | 256 * kPointerMultiplier; |
| 513 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 514 | static const int kTraceRingBufferSize = 512; |
| 515 | static const int kStacktraceBufferSize = 512; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 516 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 517 | static const double kMinHeapGrowingFactor; |
| 518 | static const double kMaxHeapGrowingFactor; |
| 519 | static const double kMaxHeapGrowingFactorMemoryConstrained; |
| 520 | static const double kMaxHeapGrowingFactorIdle; |
| 521 | static const double kTargetMutatorUtilization; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 522 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 523 | static const int kNoGCFlags = 0; |
| 524 | static const int kReduceMemoryFootprintMask = 1; |
| 525 | static const int kAbortIncrementalMarkingMask = 2; |
| 526 | static const int kFinalizeIncrementalMarkingMask = 4; |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 527 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 528 | // Making the heap iterable requires us to abort incremental marking. |
| 529 | static const int kMakeHeapIterableMask = kAbortIncrementalMarkingMask; |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 530 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 531 | // The roots that have an index less than this are always in old space. |
| 532 | static const int kOldSpaceRoots = 0x20; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 533 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 534 | // The minimum size of a HeapObject on the heap. |
| 535 | static const int kMinObjectSizeInWords = 2; |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 536 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 537 | STATIC_ASSERT(kUndefinedValueRootIndex == |
| 538 | Internals::kUndefinedValueRootIndex); |
| 539 | STATIC_ASSERT(kNullValueRootIndex == Internals::kNullValueRootIndex); |
| 540 | STATIC_ASSERT(kTrueValueRootIndex == Internals::kTrueValueRootIndex); |
| 541 | STATIC_ASSERT(kFalseValueRootIndex == Internals::kFalseValueRootIndex); |
| 542 | STATIC_ASSERT(kempty_stringRootIndex == Internals::kEmptyStringRootIndex); |
| 543 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 544 | // Calculates the maximum amount of filler that could be required by the |
| 545 | // given alignment. |
| 546 | static int GetMaximumFillToAlign(AllocationAlignment alignment); |
| 547 | // Calculates the actual amount of filler required for a given address at the |
| 548 | // given alignment. |
| 549 | static int GetFillToAlign(Address address, AllocationAlignment alignment); |
| 550 | |
| 551 | template <typename T> |
| 552 | static inline bool IsOneByte(T t, int chars); |
| 553 | |
| 554 | static void FatalProcessOutOfMemory(const char* location, |
| 555 | bool take_snapshot = false); |
| 556 | |
| 557 | static bool RootIsImmortalImmovable(int root_index); |
| 558 | |
| 559 | // Checks whether the space is valid. |
| 560 | static bool IsValidAllocationSpace(AllocationSpace space); |
| 561 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 562 | // Generated code can embed direct references to non-writable roots if |
| 563 | // they are in new space. |
| 564 | static bool RootCanBeWrittenAfterInitialization(RootListIndex root_index); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 565 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 566 | // Zapping is needed for verify heap, and always done in debug builds. |
| 567 | static inline bool ShouldZapGarbage() { |
| 568 | #ifdef DEBUG |
| 569 | return true; |
| 570 | #else |
| 571 | #ifdef VERIFY_HEAP |
| 572 | return FLAG_verify_heap; |
| 573 | #else |
| 574 | return false; |
| 575 | #endif |
| 576 | #endif |
| 577 | } |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 578 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 579 | static double HeapGrowingFactor(double gc_speed, double mutator_speed); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 580 | |
| 581 | // Copy block of memory from src to dst. Size of block should be aligned |
| 582 | // by pointer size. |
| 583 | static inline void CopyBlock(Address dst, Address src, int byte_size); |
| 584 | |
| 585 | // Optimized version of memmove for blocks with pointer size aligned sizes and |
| 586 | // pointer size aligned addresses. |
| 587 | static inline void MoveBlock(Address dst, Address src, int byte_size); |
| 588 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 589 | // Determines a static visitor id based on the given {map} that can then be |
| 590 | // stored on the map to facilitate fast dispatch for {StaticVisitorBase}. |
| 591 | static int GetStaticVisitorIdForMap(Map* map); |
| 592 | |
| 593 | // Notifies the heap that is ok to start marking or other activities that |
| 594 | // should not happen during deserialization. |
| 595 | void NotifyDeserializationComplete(); |
| 596 | |
| 597 | intptr_t old_generation_allocation_limit() const { |
| 598 | return old_generation_allocation_limit_; |
| 599 | } |
| 600 | |
| 601 | bool always_allocate() { return always_allocate_scope_count_.Value() != 0; } |
| 602 | |
| 603 | Address* NewSpaceAllocationTopAddress() { |
| 604 | return new_space_.allocation_top_address(); |
| 605 | } |
| 606 | Address* NewSpaceAllocationLimitAddress() { |
| 607 | return new_space_.allocation_limit_address(); |
| 608 | } |
| 609 | |
| 610 | Address* OldSpaceAllocationTopAddress() { |
| 611 | return old_space_->allocation_top_address(); |
| 612 | } |
| 613 | Address* OldSpaceAllocationLimitAddress() { |
| 614 | return old_space_->allocation_limit_address(); |
| 615 | } |
| 616 | |
| 617 | // TODO(hpayer): There is still a missmatch between capacity and actual |
| 618 | // committed memory size. |
| 619 | bool CanExpandOldGeneration(int size = 0) { |
| 620 | if (force_oom_) return false; |
| 621 | return (CommittedOldGenerationMemory() + size) < MaxOldGenerationSize(); |
| 622 | } |
| 623 | |
| 624 | // Clear the Instanceof cache (used when a prototype changes). |
| 625 | inline void ClearInstanceofCache(); |
| 626 | |
| 627 | // FreeSpace objects have a null map after deserialization. Update the map. |
| 628 | void RepairFreeListsAfterDeserialization(); |
| 629 | |
| 630 | // Move len elements within a given array from src_index index to dst_index |
| 631 | // index. |
| 632 | void MoveElements(FixedArray* array, int dst_index, int src_index, int len); |
| 633 | |
| 634 | // Initialize a filler object to keep the ability to iterate over the heap |
| 635 | // when introducing gaps within pages. |
| 636 | void CreateFillerObjectAt(Address addr, int size); |
| 637 | |
| 638 | bool CanMoveObjectStart(HeapObject* object); |
| 639 | |
| 640 | // Maintain consistency of live bytes during incremental marking. |
| 641 | void AdjustLiveBytes(HeapObject* object, int by, InvocationMode mode); |
| 642 | |
| 643 | // Trim the given array from the left. Note that this relocates the object |
| 644 | // start and hence is only valid if there is only a single reference to it. |
| 645 | FixedArrayBase* LeftTrimFixedArray(FixedArrayBase* obj, int elements_to_trim); |
| 646 | |
| 647 | // Trim the given array from the right. |
| 648 | template<Heap::InvocationMode mode> |
| 649 | void RightTrimFixedArray(FixedArrayBase* obj, int elements_to_trim); |
| 650 | |
| 651 | // Converts the given boolean condition to JavaScript boolean value. |
| 652 | inline Object* ToBoolean(bool condition); |
| 653 | |
| 654 | // Check whether the heap is currently iterable. |
| 655 | bool IsHeapIterable(); |
| 656 | |
| 657 | // Notify the heap that a context has been disposed. |
| 658 | int NotifyContextDisposed(bool dependant_context); |
| 659 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 660 | void set_native_contexts_list(Object* object) { |
| 661 | native_contexts_list_ = object; |
| 662 | } |
| 663 | Object* native_contexts_list() const { return native_contexts_list_; } |
| 664 | |
| 665 | void set_allocation_sites_list(Object* object) { |
| 666 | allocation_sites_list_ = object; |
| 667 | } |
| 668 | Object* allocation_sites_list() { return allocation_sites_list_; } |
| 669 | |
| 670 | // Used in CreateAllocationSiteStub and the (de)serializer. |
| 671 | Object** allocation_sites_list_address() { return &allocation_sites_list_; } |
| 672 | |
| 673 | void set_encountered_weak_collections(Object* weak_collection) { |
| 674 | encountered_weak_collections_ = weak_collection; |
| 675 | } |
| 676 | Object* encountered_weak_collections() const { |
| 677 | return encountered_weak_collections_; |
| 678 | } |
| 679 | |
| 680 | void set_encountered_weak_cells(Object* weak_cell) { |
| 681 | encountered_weak_cells_ = weak_cell; |
| 682 | } |
| 683 | Object* encountered_weak_cells() const { return encountered_weak_cells_; } |
| 684 | |
| 685 | void set_encountered_transition_arrays(Object* transition_array) { |
| 686 | encountered_transition_arrays_ = transition_array; |
| 687 | } |
| 688 | Object* encountered_transition_arrays() const { |
| 689 | return encountered_transition_arrays_; |
| 690 | } |
| 691 | |
| 692 | // Number of mark-sweeps. |
| 693 | int ms_count() const { return ms_count_; } |
| 694 | |
| 695 | // Checks whether the given object is allowed to be migrated from it's |
| 696 | // current space into the given destination space. Used for debugging. |
| 697 | inline bool AllowedToBeMigrated(HeapObject* object, AllocationSpace dest); |
| 698 | |
| 699 | void CheckHandleCount(); |
| 700 | |
| 701 | // Number of "runtime allocations" done so far. |
| 702 | uint32_t allocations_count() { return allocations_count_; } |
| 703 | |
| 704 | // Print short heap statistics. |
| 705 | void PrintShortHeapStatistics(); |
| 706 | |
| 707 | inline HeapState gc_state() { return gc_state_; } |
| 708 | |
| 709 | inline bool IsInGCPostProcessing() { return gc_post_processing_depth_ > 0; } |
| 710 | |
| 711 | // If an object has an AllocationMemento trailing it, return it, otherwise |
| 712 | // return NULL; |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame^] | 713 | template <FindMementoMode mode> |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 714 | inline AllocationMemento* FindAllocationMemento(HeapObject* object); |
| 715 | |
| 716 | // Returns false if not able to reserve. |
| 717 | bool ReserveSpace(Reservation* reservations); |
| 718 | |
| 719 | // |
| 720 | // Support for the API. |
| 721 | // |
| 722 | |
| 723 | void CreateApiObjects(); |
| 724 | |
| 725 | // Implements the corresponding V8 API function. |
| 726 | bool IdleNotification(double deadline_in_seconds); |
| 727 | bool IdleNotification(int idle_time_in_ms); |
| 728 | |
| 729 | double MonotonicallyIncreasingTimeInMs(); |
| 730 | |
| 731 | void RecordStats(HeapStats* stats, bool take_snapshot = false); |
| 732 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 733 | // Check new space expansion criteria and expand semispaces if it was hit. |
| 734 | void CheckNewSpaceExpansionCriteria(); |
| 735 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 736 | inline bool HeapIsFullEnoughToStartIncrementalMarking(intptr_t limit) { |
| 737 | if (FLAG_stress_compaction && (gc_count_ & 1) != 0) return true; |
| 738 | |
| 739 | intptr_t adjusted_allocation_limit = limit - new_space_.Capacity(); |
| 740 | |
| 741 | if (PromotedTotalSize() >= adjusted_allocation_limit) return true; |
| 742 | |
| 743 | return false; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 744 | } |
| 745 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 746 | void VisitExternalResources(v8::ExternalResourceVisitor* visitor); |
| 747 | |
| 748 | // An object should be promoted if the object has survived a |
| 749 | // scavenge operation. |
| 750 | inline bool ShouldBePromoted(Address old_address, int object_size); |
| 751 | |
| 752 | void ClearNormalizedMapCaches(); |
| 753 | |
| 754 | void IncrementDeferredCount(v8::Isolate::UseCounterFeature feature); |
| 755 | |
| 756 | inline bool OldGenerationAllocationLimitReached(); |
| 757 | |
| 758 | void QueueMemoryChunkForFree(MemoryChunk* chunk); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 759 | void FreeQueuedChunks(MemoryChunk* list_head); |
| 760 | void FreeQueuedChunks(); |
| 761 | void WaitUntilUnmappingOfFreeChunksCompleted(); |
| 762 | |
| 763 | // Completely clear the Instanceof cache (to stop it keeping objects alive |
| 764 | // around a GC). |
| 765 | inline void CompletelyClearInstanceofCache(); |
| 766 | |
| 767 | inline uint32_t HashSeed(); |
| 768 | |
| 769 | inline int NextScriptId(); |
| 770 | |
| 771 | inline void SetArgumentsAdaptorDeoptPCOffset(int pc_offset); |
| 772 | inline void SetConstructStubDeoptPCOffset(int pc_offset); |
| 773 | inline void SetGetterStubDeoptPCOffset(int pc_offset); |
| 774 | inline void SetSetterStubDeoptPCOffset(int pc_offset); |
| 775 | |
| 776 | // For post mortem debugging. |
| 777 | void RememberUnmappedPage(Address page, bool compacted); |
| 778 | |
| 779 | // Global inline caching age: it is incremented on some GCs after context |
| 780 | // disposal. We use it to flush inline caches. |
| 781 | int global_ic_age() { return global_ic_age_; } |
| 782 | |
| 783 | void AgeInlineCaches() { |
| 784 | global_ic_age_ = (global_ic_age_ + 1) & SharedFunctionInfo::ICAgeBits::kMax; |
| 785 | } |
| 786 | |
| 787 | int64_t amount_of_external_allocated_memory() { |
| 788 | return amount_of_external_allocated_memory_; |
| 789 | } |
| 790 | |
| 791 | void update_amount_of_external_allocated_memory(int64_t delta) { |
| 792 | amount_of_external_allocated_memory_ += delta; |
| 793 | } |
| 794 | |
| 795 | void DeoptMarkedAllocationSites(); |
| 796 | |
| 797 | bool DeoptMaybeTenuredAllocationSites() { |
| 798 | return new_space_.IsAtMaximumCapacity() && maximum_size_scavenges_ == 0; |
| 799 | } |
| 800 | |
| 801 | void AddWeakObjectToCodeDependency(Handle<HeapObject> obj, |
| 802 | Handle<DependentCode> dep); |
| 803 | |
| 804 | DependentCode* LookupWeakObjectToCodeDependency(Handle<HeapObject> obj); |
| 805 | |
| 806 | void AddRetainedMap(Handle<Map> map); |
| 807 | |
| 808 | // This event is triggered after successful allocation of a new object made |
| 809 | // by runtime. Allocations of target space for object evacuation do not |
| 810 | // trigger the event. In order to track ALL allocations one must turn off |
| 811 | // FLAG_inline_new and FLAG_use_allocation_folding. |
| 812 | inline void OnAllocationEvent(HeapObject* object, int size_in_bytes); |
| 813 | |
| 814 | // This event is triggered after object is moved to a new place. |
| 815 | inline void OnMoveEvent(HeapObject* target, HeapObject* source, |
| 816 | int size_in_bytes); |
| 817 | |
| 818 | bool deserialization_complete() const { return deserialization_complete_; } |
| 819 | |
| 820 | bool HasLowAllocationRate(); |
| 821 | bool HasHighFragmentation(); |
| 822 | bool HasHighFragmentation(intptr_t used, intptr_t committed); |
| 823 | |
| 824 | void SetOptimizeForLatency() { optimize_for_memory_usage_ = false; } |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame^] | 825 | void SetOptimizeForMemoryUsage(); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 826 | bool ShouldOptimizeForMemoryUsage() { return optimize_for_memory_usage_; } |
| 827 | |
| 828 | // =========================================================================== |
| 829 | // Initialization. =========================================================== |
| 830 | // =========================================================================== |
| 831 | |
| 832 | // Configure heap size in MB before setup. Return false if the heap has been |
| 833 | // set up already. |
| 834 | bool ConfigureHeap(int max_semi_space_size, int max_old_space_size, |
| 835 | int max_executable_size, size_t code_range_size); |
| 836 | bool ConfigureHeapDefault(); |
| 837 | |
| 838 | // Prepares the heap, setting up memory areas that are needed in the isolate |
| 839 | // without actually creating any objects. |
| 840 | bool SetUp(); |
| 841 | |
| 842 | // Bootstraps the object heap with the core set of objects required to run. |
| 843 | // Returns whether it succeeded. |
| 844 | bool CreateHeapObjects(); |
| 845 | |
| 846 | // Destroys all memory allocated by the heap. |
| 847 | void TearDown(); |
| 848 | |
| 849 | // Returns whether SetUp has been called. |
| 850 | bool HasBeenSetUp(); |
| 851 | |
| 852 | // =========================================================================== |
| 853 | // Getters for spaces. ======================================================= |
| 854 | // =========================================================================== |
| 855 | |
| 856 | // Return the starting address and a mask for the new space. And-masking an |
| 857 | // address with the mask will result in the start address of the new space |
| 858 | // for all addresses in either semispace. |
| 859 | Address NewSpaceStart() { return new_space_.start(); } |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 860 | Address NewSpaceTop() { return new_space_.top(); } |
| 861 | |
| 862 | NewSpace* new_space() { return &new_space_; } |
| 863 | OldSpace* old_space() { return old_space_; } |
| 864 | OldSpace* code_space() { return code_space_; } |
| 865 | MapSpace* map_space() { return map_space_; } |
| 866 | LargeObjectSpace* lo_space() { return lo_space_; } |
| 867 | |
| 868 | PagedSpace* paged_space(int idx) { |
| 869 | switch (idx) { |
| 870 | case OLD_SPACE: |
| 871 | return old_space(); |
| 872 | case MAP_SPACE: |
| 873 | return map_space(); |
| 874 | case CODE_SPACE: |
| 875 | return code_space(); |
| 876 | case NEW_SPACE: |
| 877 | case LO_SPACE: |
| 878 | UNREACHABLE(); |
| 879 | } |
| 880 | return NULL; |
| 881 | } |
| 882 | |
| 883 | Space* space(int idx) { |
| 884 | switch (idx) { |
| 885 | case NEW_SPACE: |
| 886 | return new_space(); |
| 887 | case LO_SPACE: |
| 888 | return lo_space(); |
| 889 | default: |
| 890 | return paged_space(idx); |
| 891 | } |
| 892 | } |
| 893 | |
| 894 | // Returns name of the space. |
| 895 | const char* GetSpaceName(int idx); |
| 896 | |
| 897 | // =========================================================================== |
| 898 | // Getters to other components. ============================================== |
| 899 | // =========================================================================== |
| 900 | |
| 901 | GCTracer* tracer() { return tracer_; } |
| 902 | |
| 903 | PromotionQueue* promotion_queue() { return &promotion_queue_; } |
| 904 | |
| 905 | inline Isolate* isolate(); |
| 906 | |
| 907 | MarkCompactCollector* mark_compact_collector() { |
| 908 | return mark_compact_collector_; |
| 909 | } |
| 910 | |
| 911 | // =========================================================================== |
| 912 | // Root set access. ========================================================== |
| 913 | // =========================================================================== |
| 914 | |
| 915 | // Heap root getters. |
| 916 | #define ROOT_ACCESSOR(type, name, camel_name) inline type* name(); |
| 917 | ROOT_LIST(ROOT_ACCESSOR) |
| 918 | #undef ROOT_ACCESSOR |
| 919 | |
| 920 | // Utility type maps. |
| 921 | #define STRUCT_MAP_ACCESSOR(NAME, Name, name) inline Map* name##_map(); |
| 922 | STRUCT_LIST(STRUCT_MAP_ACCESSOR) |
| 923 | #undef STRUCT_MAP_ACCESSOR |
| 924 | |
| 925 | #define STRING_ACCESSOR(name, str) inline String* name(); |
| 926 | INTERNALIZED_STRING_LIST(STRING_ACCESSOR) |
| 927 | #undef STRING_ACCESSOR |
| 928 | |
| 929 | #define SYMBOL_ACCESSOR(name) inline Symbol* name(); |
| 930 | PRIVATE_SYMBOL_LIST(SYMBOL_ACCESSOR) |
| 931 | #undef SYMBOL_ACCESSOR |
| 932 | |
| 933 | #define SYMBOL_ACCESSOR(name, description) inline Symbol* name(); |
| 934 | PUBLIC_SYMBOL_LIST(SYMBOL_ACCESSOR) |
| 935 | WELL_KNOWN_SYMBOL_LIST(SYMBOL_ACCESSOR) |
| 936 | #undef SYMBOL_ACCESSOR |
| 937 | |
| 938 | Object* root(RootListIndex index) { return roots_[index]; } |
| 939 | Handle<Object> root_handle(RootListIndex index) { |
| 940 | return Handle<Object>(&roots_[index]); |
| 941 | } |
| 942 | |
| 943 | // Generated code can embed this address to get access to the roots. |
| 944 | Object** roots_array_start() { return roots_; } |
| 945 | |
| 946 | // Sets the stub_cache_ (only used when expanding the dictionary). |
| 947 | void SetRootCodeStubs(UnseededNumberDictionary* value) { |
| 948 | roots_[kCodeStubsRootIndex] = value; |
| 949 | } |
| 950 | |
| 951 | // Sets the non_monomorphic_cache_ (only used when expanding the dictionary). |
| 952 | void SetRootNonMonomorphicCache(UnseededNumberDictionary* value) { |
| 953 | roots_[kNonMonomorphicCacheRootIndex] = value; |
| 954 | } |
| 955 | |
| 956 | void SetRootMaterializedObjects(FixedArray* objects) { |
| 957 | roots_[kMaterializedObjectsRootIndex] = objects; |
| 958 | } |
| 959 | |
| 960 | void SetRootScriptList(Object* value) { |
| 961 | roots_[kScriptListRootIndex] = value; |
| 962 | } |
| 963 | |
| 964 | void SetRootStringTable(StringTable* value) { |
| 965 | roots_[kStringTableRootIndex] = value; |
| 966 | } |
| 967 | |
| 968 | void SetRootNoScriptSharedFunctionInfos(Object* value) { |
| 969 | roots_[kNoScriptSharedFunctionInfosRootIndex] = value; |
| 970 | } |
| 971 | |
| 972 | // Set the stack limit in the roots_ array. Some architectures generate |
| 973 | // code that looks here, because it is faster than loading from the static |
| 974 | // jslimit_/real_jslimit_ variable in the StackGuard. |
| 975 | void SetStackLimits(); |
| 976 | |
| 977 | // Generated code can treat direct references to this root as constant. |
| 978 | bool RootCanBeTreatedAsConstant(RootListIndex root_index); |
| 979 | |
| 980 | Map* MapForFixedTypedArray(ExternalArrayType array_type); |
| 981 | RootListIndex RootIndexForFixedTypedArray(ExternalArrayType array_type); |
| 982 | |
| 983 | RootListIndex RootIndexForEmptyFixedTypedArray(ElementsKind kind); |
| 984 | FixedTypedArrayBase* EmptyFixedTypedArrayForMap(Map* map); |
| 985 | |
| 986 | void RegisterStrongRoots(Object** start, Object** end); |
| 987 | void UnregisterStrongRoots(Object** start); |
| 988 | |
| 989 | // =========================================================================== |
| 990 | // Inline allocation. ======================================================== |
| 991 | // =========================================================================== |
| 992 | |
| 993 | // Indicates whether inline bump-pointer allocation has been disabled. |
| 994 | bool inline_allocation_disabled() { return inline_allocation_disabled_; } |
| 995 | |
| 996 | // Switch whether inline bump-pointer allocation should be used. |
| 997 | void EnableInlineAllocation(); |
| 998 | void DisableInlineAllocation(); |
| 999 | |
| 1000 | // =========================================================================== |
| 1001 | // Methods triggering GCs. =================================================== |
| 1002 | // =========================================================================== |
| 1003 | |
| 1004 | // Performs garbage collection operation. |
| 1005 | // Returns whether there is a chance that another major GC could |
| 1006 | // collect more garbage. |
| 1007 | inline bool CollectGarbage( |
| 1008 | AllocationSpace space, const char* gc_reason = NULL, |
| 1009 | const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags); |
| 1010 | |
| 1011 | // Performs a full garbage collection. If (flags & kMakeHeapIterableMask) is |
| 1012 | // non-zero, then the slower precise sweeper is used, which leaves the heap |
| 1013 | // in a state where we can iterate over the heap visiting all objects. |
| 1014 | void CollectAllGarbage( |
| 1015 | int flags = kFinalizeIncrementalMarkingMask, const char* gc_reason = NULL, |
| 1016 | const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags); |
| 1017 | |
| 1018 | // Last hope GC, should try to squeeze as much as possible. |
| 1019 | void CollectAllAvailableGarbage(const char* gc_reason = NULL); |
| 1020 | |
| 1021 | // Reports and external memory pressure event, either performs a major GC or |
| 1022 | // completes incremental marking in order to free external resources. |
| 1023 | void ReportExternalMemoryPressure(const char* gc_reason = NULL); |
| 1024 | |
| 1025 | // Invoked when GC was requested via the stack guard. |
| 1026 | void HandleGCRequest(); |
| 1027 | |
| 1028 | // =========================================================================== |
| 1029 | // Iterators. ================================================================ |
| 1030 | // =========================================================================== |
| 1031 | |
| 1032 | // Iterates over all roots in the heap. |
| 1033 | void IterateRoots(ObjectVisitor* v, VisitMode mode); |
| 1034 | // Iterates over all strong roots in the heap. |
| 1035 | void IterateStrongRoots(ObjectVisitor* v, VisitMode mode); |
| 1036 | // Iterates over entries in the smi roots list. Only interesting to the |
| 1037 | // serializer/deserializer, since GC does not care about smis. |
| 1038 | void IterateSmiRoots(ObjectVisitor* v); |
| 1039 | // Iterates over all the other roots in the heap. |
| 1040 | void IterateWeakRoots(ObjectVisitor* v, VisitMode mode); |
| 1041 | |
| 1042 | // Iterate pointers to from semispace of new space found in memory interval |
| 1043 | // from start to end within |object|. |
| 1044 | void IteratePointersToFromSpace(HeapObject* target, int size, |
| 1045 | ObjectSlotCallback callback); |
| 1046 | |
| 1047 | void IterateAndMarkPointersToFromSpace(HeapObject* object, Address start, |
| 1048 | Address end, bool record_slots, |
| 1049 | ObjectSlotCallback callback); |
| 1050 | |
| 1051 | // =========================================================================== |
| 1052 | // Store buffer API. ========================================================= |
| 1053 | // =========================================================================== |
| 1054 | |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame^] | 1055 | // Write barrier support for object[offset] = o; |
| 1056 | inline void RecordWrite(Object* object, int offset, Object* o); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1057 | |
| 1058 | Address* store_buffer_top_address() { |
| 1059 | return reinterpret_cast<Address*>(&roots_[kStoreBufferTopRootIndex]); |
| 1060 | } |
| 1061 | |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame^] | 1062 | void ClearRecordedSlot(HeapObject* object, Object** slot); |
| 1063 | void ClearRecordedSlotRange(HeapObject* object, Object** start, Object** end); |
| 1064 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1065 | // =========================================================================== |
| 1066 | // Incremental marking API. ================================================== |
| 1067 | // =========================================================================== |
| 1068 | |
| 1069 | // Start incremental marking and ensure that idle time handler can perform |
| 1070 | // incremental steps. |
| 1071 | void StartIdleIncrementalMarking(); |
| 1072 | |
| 1073 | // Starts incremental marking assuming incremental marking is currently |
| 1074 | // stopped. |
| 1075 | void StartIncrementalMarking(int gc_flags = kNoGCFlags, |
| 1076 | const GCCallbackFlags gc_callback_flags = |
| 1077 | GCCallbackFlags::kNoGCCallbackFlags, |
| 1078 | const char* reason = nullptr); |
| 1079 | |
| 1080 | void FinalizeIncrementalMarkingIfComplete(const char* comment); |
| 1081 | |
| 1082 | bool TryFinalizeIdleIncrementalMarking(double idle_time_in_ms); |
| 1083 | |
| 1084 | IncrementalMarking* incremental_marking() { return incremental_marking_; } |
| 1085 | |
| 1086 | // =========================================================================== |
| 1087 | // External string table API. ================================================ |
| 1088 | // =========================================================================== |
| 1089 | |
| 1090 | // Registers an external string. |
| 1091 | inline void RegisterExternalString(String* string); |
| 1092 | |
| 1093 | // Finalizes an external string by deleting the associated external |
| 1094 | // data and clearing the resource pointer. |
| 1095 | inline void FinalizeExternalString(String* string); |
| 1096 | |
| 1097 | // =========================================================================== |
| 1098 | // Methods checking/returning the space of a given object/address. =========== |
| 1099 | // =========================================================================== |
| 1100 | |
| 1101 | // Returns whether the object resides in new space. |
| 1102 | inline bool InNewSpace(Object* object); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1103 | inline bool InFromSpace(Object* object); |
| 1104 | inline bool InToSpace(Object* object); |
| 1105 | |
| 1106 | // Returns whether the object resides in old space. |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1107 | inline bool InOldSpace(Object* object); |
| 1108 | |
| 1109 | // Checks whether an address/object in the heap (including auxiliary |
| 1110 | // area and unused area). |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1111 | bool Contains(HeapObject* value); |
| 1112 | |
| 1113 | // Checks whether an address/object in a space. |
| 1114 | // Currently used by tests, serialization and heap verification only. |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1115 | bool InSpace(HeapObject* value, AllocationSpace space); |
| 1116 | |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame^] | 1117 | // Slow methods that can be used for verification as they can also be used |
| 1118 | // with off-heap Addresses. |
| 1119 | bool ContainsSlow(Address addr); |
| 1120 | bool InSpaceSlow(Address addr, AllocationSpace space); |
| 1121 | inline bool InNewSpaceSlow(Address address); |
| 1122 | inline bool InOldSpaceSlow(Address address); |
| 1123 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1124 | // =========================================================================== |
| 1125 | // Object statistics tracking. =============================================== |
| 1126 | // =========================================================================== |
| 1127 | |
| 1128 | // Returns the number of buckets used by object statistics tracking during a |
| 1129 | // major GC. Note that the following methods fail gracefully when the bounds |
| 1130 | // are exceeded though. |
| 1131 | size_t NumberOfTrackedHeapObjectTypes(); |
| 1132 | |
| 1133 | // Returns object statistics about count and size at the last major GC. |
| 1134 | // Objects are being grouped into buckets that roughly resemble existing |
| 1135 | // instance types. |
| 1136 | size_t ObjectCountAtLastGC(size_t index); |
| 1137 | size_t ObjectSizeAtLastGC(size_t index); |
| 1138 | |
| 1139 | // Retrieves names of buckets used by object statistics tracking. |
| 1140 | bool GetObjectTypeName(size_t index, const char** object_type, |
| 1141 | const char** object_sub_type); |
| 1142 | |
| 1143 | // =========================================================================== |
| 1144 | // GC statistics. ============================================================ |
| 1145 | // =========================================================================== |
| 1146 | |
| 1147 | // Returns the maximum amount of memory reserved for the heap. For |
| 1148 | // the young generation, we reserve 4 times the amount needed for a |
| 1149 | // semi space. The young generation consists of two semi spaces and |
| 1150 | // we reserve twice the amount needed for those in order to ensure |
| 1151 | // that new space can be aligned to its size. |
| 1152 | intptr_t MaxReserved() { |
| 1153 | return 4 * reserved_semispace_size_ + max_old_generation_size_; |
| 1154 | } |
| 1155 | int MaxSemiSpaceSize() { return max_semi_space_size_; } |
| 1156 | int ReservedSemiSpaceSize() { return reserved_semispace_size_; } |
| 1157 | int InitialSemiSpaceSize() { return initial_semispace_size_; } |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1158 | intptr_t MaxOldGenerationSize() { return max_old_generation_size_; } |
| 1159 | intptr_t MaxExecutableSize() { return max_executable_size_; } |
| 1160 | |
| 1161 | // Returns the capacity of the heap in bytes w/o growing. Heap grows when |
| 1162 | // more spaces are needed until it reaches the limit. |
| 1163 | intptr_t Capacity(); |
| 1164 | |
| 1165 | // Returns the amount of memory currently committed for the heap. |
| 1166 | intptr_t CommittedMemory(); |
| 1167 | |
| 1168 | // Returns the amount of memory currently committed for the old space. |
| 1169 | intptr_t CommittedOldGenerationMemory(); |
| 1170 | |
| 1171 | // Returns the amount of executable memory currently committed for the heap. |
| 1172 | intptr_t CommittedMemoryExecutable(); |
| 1173 | |
| 1174 | // Returns the amount of phyical memory currently committed for the heap. |
| 1175 | size_t CommittedPhysicalMemory(); |
| 1176 | |
| 1177 | // Returns the maximum amount of memory ever committed for the heap. |
| 1178 | intptr_t MaximumCommittedMemory() { return maximum_committed_; } |
| 1179 | |
| 1180 | // Updates the maximum committed memory for the heap. Should be called |
| 1181 | // whenever a space grows. |
| 1182 | void UpdateMaximumCommitted(); |
| 1183 | |
| 1184 | // Returns the available bytes in space w/o growing. |
| 1185 | // Heap doesn't guarantee that it can allocate an object that requires |
| 1186 | // all available bytes. Check MaxHeapObjectSize() instead. |
| 1187 | intptr_t Available(); |
| 1188 | |
| 1189 | // Returns of size of all objects residing in the heap. |
| 1190 | intptr_t SizeOfObjects(); |
| 1191 | |
| 1192 | void UpdateSurvivalStatistics(int start_new_space_size); |
| 1193 | |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame^] | 1194 | inline void IncrementPromotedObjectsSize(intptr_t object_size) { |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1195 | DCHECK_GE(object_size, 0); |
| 1196 | promoted_objects_size_ += object_size; |
| 1197 | } |
| 1198 | inline intptr_t promoted_objects_size() { return promoted_objects_size_; } |
| 1199 | |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame^] | 1200 | inline void IncrementSemiSpaceCopiedObjectSize(intptr_t object_size) { |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1201 | DCHECK_GE(object_size, 0); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1202 | semi_space_copied_object_size_ += object_size; |
| 1203 | } |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1204 | inline intptr_t semi_space_copied_object_size() { |
| 1205 | return semi_space_copied_object_size_; |
| 1206 | } |
| 1207 | |
| 1208 | inline intptr_t SurvivedNewSpaceObjectSize() { |
| 1209 | return promoted_objects_size_ + semi_space_copied_object_size_; |
| 1210 | } |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1211 | |
| 1212 | inline void IncrementNodesDiedInNewSpace() { nodes_died_in_new_space_++; } |
| 1213 | |
| 1214 | inline void IncrementNodesCopiedInNewSpace() { nodes_copied_in_new_space_++; } |
| 1215 | |
| 1216 | inline void IncrementNodesPromoted() { nodes_promoted_++; } |
| 1217 | |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame^] | 1218 | inline void IncrementYoungSurvivorsCounter(intptr_t survived) { |
| 1219 | DCHECK_GE(survived, 0); |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 1220 | survived_last_scavenge_ = survived; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1221 | survived_since_last_expansion_ += survived; |
| 1222 | } |
| 1223 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1224 | inline intptr_t PromotedTotalSize() { |
| 1225 | int64_t total = PromotedSpaceSizeOfObjects() + PromotedExternalMemorySize(); |
| 1226 | if (total > std::numeric_limits<intptr_t>::max()) { |
| 1227 | // TODO(erikcorry): Use uintptr_t everywhere we do heap size calculations. |
| 1228 | return std::numeric_limits<intptr_t>::max(); |
| 1229 | } |
| 1230 | if (total < 0) return 0; |
| 1231 | return static_cast<intptr_t>(total); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1232 | } |
| 1233 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1234 | void UpdateNewSpaceAllocationCounter() { |
| 1235 | new_space_allocation_counter_ = NewSpaceAllocationCounter(); |
| 1236 | } |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1237 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1238 | size_t NewSpaceAllocationCounter() { |
| 1239 | return new_space_allocation_counter_ + new_space()->AllocatedSinceLastGC(); |
| 1240 | } |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1241 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1242 | // This should be used only for testing. |
| 1243 | void set_new_space_allocation_counter(size_t new_value) { |
| 1244 | new_space_allocation_counter_ = new_value; |
| 1245 | } |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1246 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1247 | void UpdateOldGenerationAllocationCounter() { |
| 1248 | old_generation_allocation_counter_ = OldGenerationAllocationCounter(); |
| 1249 | } |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1250 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1251 | size_t OldGenerationAllocationCounter() { |
| 1252 | return old_generation_allocation_counter_ + PromotedSinceLastGC(); |
| 1253 | } |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1254 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1255 | // This should be used only for testing. |
| 1256 | void set_old_generation_allocation_counter(size_t new_value) { |
| 1257 | old_generation_allocation_counter_ = new_value; |
| 1258 | } |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1259 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1260 | size_t PromotedSinceLastGC() { |
| 1261 | return PromotedSpaceSizeOfObjects() - old_generation_size_at_last_gc_; |
| 1262 | } |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1263 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1264 | int gc_count() const { return gc_count_; } |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1265 | |
| 1266 | // Returns the size of objects residing in non new spaces. |
| 1267 | intptr_t PromotedSpaceSizeOfObjects(); |
| 1268 | |
| 1269 | double total_regexp_code_generated() { return total_regexp_code_generated_; } |
| 1270 | void IncreaseTotalRegexpCodeGenerated(int size) { |
| 1271 | total_regexp_code_generated_ += size; |
| 1272 | } |
| 1273 | |
| 1274 | void IncrementCodeGeneratedBytes(bool is_crankshafted, int size) { |
| 1275 | if (is_crankshafted) { |
| 1276 | crankshaft_codegen_bytes_generated_ += size; |
| 1277 | } else { |
| 1278 | full_codegen_bytes_generated_ += size; |
| 1279 | } |
| 1280 | } |
| 1281 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1282 | // =========================================================================== |
| 1283 | // Prologue/epilogue callback methods.======================================== |
| 1284 | // =========================================================================== |
| 1285 | |
| 1286 | void AddGCPrologueCallback(v8::Isolate::GCCallback callback, |
| 1287 | GCType gc_type_filter, bool pass_isolate = true); |
| 1288 | void RemoveGCPrologueCallback(v8::Isolate::GCCallback callback); |
| 1289 | |
| 1290 | void AddGCEpilogueCallback(v8::Isolate::GCCallback callback, |
| 1291 | GCType gc_type_filter, bool pass_isolate = true); |
| 1292 | void RemoveGCEpilogueCallback(v8::Isolate::GCCallback callback); |
| 1293 | |
| 1294 | void CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags); |
| 1295 | void CallGCEpilogueCallbacks(GCType gc_type, GCCallbackFlags flags); |
| 1296 | |
| 1297 | // =========================================================================== |
| 1298 | // Allocation methods. ======================================================= |
| 1299 | // =========================================================================== |
| 1300 | |
| 1301 | // Creates a filler object and returns a heap object immediately after it. |
| 1302 | MUST_USE_RESULT HeapObject* PrecedeWithFiller(HeapObject* object, |
| 1303 | int filler_size); |
| 1304 | |
| 1305 | // Creates a filler object if needed for alignment and returns a heap object |
| 1306 | // immediately after it. If any space is left after the returned object, |
| 1307 | // another filler object is created so the over allocated memory is iterable. |
| 1308 | MUST_USE_RESULT HeapObject* AlignWithFiller(HeapObject* object, |
| 1309 | int object_size, |
| 1310 | int allocation_size, |
| 1311 | AllocationAlignment alignment); |
| 1312 | |
| 1313 | // =========================================================================== |
| 1314 | // ArrayBuffer tracking. ===================================================== |
| 1315 | // =========================================================================== |
| 1316 | |
| 1317 | void RegisterNewArrayBuffer(JSArrayBuffer* buffer); |
| 1318 | void UnregisterArrayBuffer(JSArrayBuffer* buffer); |
| 1319 | |
| 1320 | inline ArrayBufferTracker* array_buffer_tracker() { |
| 1321 | return array_buffer_tracker_; |
| 1322 | } |
| 1323 | |
| 1324 | // =========================================================================== |
| 1325 | // Allocation site tracking. ================================================= |
| 1326 | // =========================================================================== |
| 1327 | |
| 1328 | // Updates the AllocationSite of a given {object}. If the global prenuring |
| 1329 | // storage is passed as {pretenuring_feedback} the memento found count on |
| 1330 | // the corresponding allocation site is immediately updated and an entry |
| 1331 | // in the hash map is created. Otherwise the entry (including a the count |
| 1332 | // value) is cached on the local pretenuring feedback. |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame^] | 1333 | template <UpdateAllocationSiteMode mode> |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1334 | inline void UpdateAllocationSite(HeapObject* object, |
| 1335 | HashMap* pretenuring_feedback); |
| 1336 | |
| 1337 | // Removes an entry from the global pretenuring storage. |
| 1338 | inline void RemoveAllocationSitePretenuringFeedback(AllocationSite* site); |
| 1339 | |
| 1340 | // Merges local pretenuring feedback into the global one. Note that this |
| 1341 | // method needs to be called after evacuation, as allocation sites may be |
| 1342 | // evacuated and this method resolves forward pointers accordingly. |
| 1343 | void MergeAllocationSitePretenuringFeedback( |
| 1344 | const HashMap& local_pretenuring_feedback); |
| 1345 | |
| 1346 | // ============================================================================= |
| 1347 | |
| 1348 | #ifdef VERIFY_HEAP |
| 1349 | // Verify the heap is in its normal state before or after a GC. |
| 1350 | void Verify(); |
| 1351 | #endif |
| 1352 | |
| 1353 | #ifdef DEBUG |
| 1354 | void set_allocation_timeout(int timeout) { allocation_timeout_ = timeout; } |
| 1355 | |
| 1356 | void TracePathToObjectFrom(Object* target, Object* root); |
| 1357 | void TracePathToObject(Object* target); |
| 1358 | void TracePathToGlobal(); |
| 1359 | |
| 1360 | void Print(); |
| 1361 | void PrintHandles(); |
| 1362 | |
| 1363 | // Report heap statistics. |
| 1364 | void ReportHeapStatistics(const char* title); |
| 1365 | void ReportCodeStatistics(const char* title); |
| 1366 | #endif |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame^] | 1367 | #ifdef ENABLE_SLOW_DCHECKS |
| 1368 | int CountHandlesForObject(Object* object); |
| 1369 | #endif |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1370 | |
| 1371 | private: |
| 1372 | class PretenuringScope; |
| 1373 | class UnmapFreeMemoryTask; |
| 1374 | |
| 1375 | // External strings table is a place where all external strings are |
| 1376 | // registered. We need to keep track of such strings to properly |
| 1377 | // finalize them. |
| 1378 | class ExternalStringTable { |
| 1379 | public: |
| 1380 | // Registers an external string. |
| 1381 | inline void AddString(String* string); |
| 1382 | |
| 1383 | inline void Iterate(ObjectVisitor* v); |
| 1384 | |
| 1385 | // Restores internal invariant and gets rid of collected strings. |
| 1386 | // Must be called after each Iterate() that modified the strings. |
| 1387 | void CleanUp(); |
| 1388 | |
| 1389 | // Destroys all allocated memory. |
| 1390 | void TearDown(); |
| 1391 | |
| 1392 | private: |
| 1393 | explicit ExternalStringTable(Heap* heap) : heap_(heap) {} |
| 1394 | |
| 1395 | inline void Verify(); |
| 1396 | |
| 1397 | inline void AddOldString(String* string); |
| 1398 | |
| 1399 | // Notifies the table that only a prefix of the new list is valid. |
| 1400 | inline void ShrinkNewStrings(int position); |
| 1401 | |
| 1402 | // To speed up scavenge collections new space string are kept |
| 1403 | // separate from old space strings. |
| 1404 | List<Object*> new_space_strings_; |
| 1405 | List<Object*> old_space_strings_; |
| 1406 | |
| 1407 | Heap* heap_; |
| 1408 | |
| 1409 | friend class Heap; |
| 1410 | |
| 1411 | DISALLOW_COPY_AND_ASSIGN(ExternalStringTable); |
| 1412 | }; |
| 1413 | |
| 1414 | struct StrongRootsList; |
| 1415 | |
| 1416 | struct StringTypeTable { |
| 1417 | InstanceType type; |
| 1418 | int size; |
| 1419 | RootListIndex index; |
| 1420 | }; |
| 1421 | |
| 1422 | struct ConstantStringTable { |
| 1423 | const char* contents; |
| 1424 | RootListIndex index; |
| 1425 | }; |
| 1426 | |
| 1427 | struct StructTable { |
| 1428 | InstanceType type; |
| 1429 | int size; |
| 1430 | RootListIndex index; |
| 1431 | }; |
| 1432 | |
| 1433 | struct GCCallbackPair { |
| 1434 | GCCallbackPair(v8::Isolate::GCCallback callback, GCType gc_type, |
| 1435 | bool pass_isolate) |
| 1436 | : callback(callback), gc_type(gc_type), pass_isolate(pass_isolate) {} |
| 1437 | |
| 1438 | bool operator==(const GCCallbackPair& other) const { |
| 1439 | return other.callback == callback; |
| 1440 | } |
| 1441 | |
| 1442 | v8::Isolate::GCCallback callback; |
| 1443 | GCType gc_type; |
| 1444 | bool pass_isolate; |
| 1445 | }; |
| 1446 | |
| 1447 | typedef String* (*ExternalStringTableUpdaterCallback)(Heap* heap, |
| 1448 | Object** pointer); |
| 1449 | |
| 1450 | static const int kInitialStringTableSize = 2048; |
| 1451 | static const int kInitialEvalCacheSize = 64; |
| 1452 | static const int kInitialNumberStringCacheSize = 256; |
| 1453 | |
| 1454 | static const int kRememberedUnmappedPages = 128; |
| 1455 | |
| 1456 | static const StringTypeTable string_type_table[]; |
| 1457 | static const ConstantStringTable constant_string_table[]; |
| 1458 | static const StructTable struct_table[]; |
| 1459 | |
| 1460 | static const int kYoungSurvivalRateHighThreshold = 90; |
| 1461 | static const int kYoungSurvivalRateAllowedDeviation = 15; |
| 1462 | static const int kOldSurvivalRateLowThreshold = 10; |
| 1463 | |
| 1464 | static const int kMaxMarkCompactsInIdleRound = 7; |
| 1465 | static const int kIdleScavengeThreshold = 5; |
| 1466 | |
| 1467 | static const int kInitialFeedbackCapacity = 256; |
| 1468 | |
| 1469 | Heap(); |
| 1470 | |
| 1471 | static String* UpdateNewSpaceReferenceInExternalStringTableEntry( |
| 1472 | Heap* heap, Object** pointer); |
| 1473 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1474 | // Selects the proper allocation space based on the pretenuring decision. |
| 1475 | static AllocationSpace SelectSpace(PretenureFlag pretenure) { |
| 1476 | return (pretenure == TENURED) ? OLD_SPACE : NEW_SPACE; |
| 1477 | } |
| 1478 | |
| 1479 | #define ROOT_ACCESSOR(type, name, camel_name) \ |
| 1480 | inline void set_##name(type* value); |
| 1481 | ROOT_LIST(ROOT_ACCESSOR) |
| 1482 | #undef ROOT_ACCESSOR |
| 1483 | |
| 1484 | StoreBuffer* store_buffer() { return &store_buffer_; } |
| 1485 | |
| 1486 | void set_current_gc_flags(int flags) { |
| 1487 | current_gc_flags_ = flags; |
| 1488 | DCHECK(!ShouldFinalizeIncrementalMarking() || |
| 1489 | !ShouldAbortIncrementalMarking()); |
| 1490 | } |
| 1491 | |
| 1492 | inline bool ShouldReduceMemory() const { |
| 1493 | return current_gc_flags_ & kReduceMemoryFootprintMask; |
| 1494 | } |
| 1495 | |
| 1496 | inline bool ShouldAbortIncrementalMarking() const { |
| 1497 | return current_gc_flags_ & kAbortIncrementalMarkingMask; |
| 1498 | } |
| 1499 | |
| 1500 | inline bool ShouldFinalizeIncrementalMarking() const { |
| 1501 | return current_gc_flags_ & kFinalizeIncrementalMarkingMask; |
| 1502 | } |
| 1503 | |
| 1504 | void PreprocessStackTraces(); |
| 1505 | |
| 1506 | // Checks whether a global GC is necessary |
| 1507 | GarbageCollector SelectGarbageCollector(AllocationSpace space, |
| 1508 | const char** reason); |
| 1509 | |
| 1510 | // Make sure there is a filler value behind the top of the new space |
| 1511 | // so that the GC does not confuse some unintialized/stale memory |
| 1512 | // with the allocation memento of the object at the top |
| 1513 | void EnsureFillerObjectAtTop(); |
| 1514 | |
| 1515 | // Ensure that we have swept all spaces in such a way that we can iterate |
| 1516 | // over all objects. May cause a GC. |
| 1517 | void MakeHeapIterable(); |
| 1518 | |
| 1519 | // Performs garbage collection operation. |
| 1520 | // Returns whether there is a chance that another major GC could |
| 1521 | // collect more garbage. |
| 1522 | bool CollectGarbage( |
| 1523 | GarbageCollector collector, const char* gc_reason, |
| 1524 | const char* collector_reason, |
| 1525 | const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags); |
| 1526 | |
| 1527 | // Performs garbage collection |
| 1528 | // Returns whether there is a chance another major GC could |
| 1529 | // collect more garbage. |
| 1530 | bool PerformGarbageCollection( |
| 1531 | GarbageCollector collector, |
| 1532 | const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags); |
| 1533 | |
| 1534 | inline void UpdateOldSpaceLimits(); |
| 1535 | |
| 1536 | // Initializes a JSObject based on its map. |
| 1537 | void InitializeJSObjectFromMap(JSObject* obj, FixedArray* properties, |
| 1538 | Map* map); |
| 1539 | |
| 1540 | // Initializes JSObject body starting at given offset. |
| 1541 | void InitializeJSObjectBody(JSObject* obj, Map* map, int start_offset); |
| 1542 | |
| 1543 | void InitializeAllocationMemento(AllocationMemento* memento, |
| 1544 | AllocationSite* allocation_site); |
| 1545 | |
| 1546 | bool CreateInitialMaps(); |
| 1547 | void CreateInitialObjects(); |
| 1548 | |
| 1549 | // These five Create*EntryStub functions are here and forced to not be inlined |
| 1550 | // because of a gcc-4.4 bug that assigns wrong vtable entries. |
| 1551 | NO_INLINE(void CreateJSEntryStub()); |
| 1552 | NO_INLINE(void CreateJSConstructEntryStub()); |
| 1553 | |
| 1554 | void CreateFixedStubs(); |
| 1555 | |
| 1556 | HeapObject* DoubleAlignForDeserialization(HeapObject* object, int size); |
| 1557 | |
| 1558 | // Commits from space if it is uncommitted. |
| 1559 | void EnsureFromSpaceIsCommitted(); |
| 1560 | |
| 1561 | // Uncommit unused semi space. |
| 1562 | bool UncommitFromSpace() { return new_space_.UncommitFromSpace(); } |
| 1563 | |
| 1564 | // Fill in bogus values in from space |
| 1565 | void ZapFromSpace(); |
| 1566 | |
| 1567 | // Deopts all code that contains allocation instruction which are tenured or |
| 1568 | // not tenured. Moreover it clears the pretenuring allocation site statistics. |
| 1569 | void ResetAllAllocationSitesDependentCode(PretenureFlag flag); |
| 1570 | |
| 1571 | // Evaluates local pretenuring for the old space and calls |
| 1572 | // ResetAllTenuredAllocationSitesDependentCode if too many objects died in |
| 1573 | // the old space. |
| 1574 | void EvaluateOldSpaceLocalPretenuring(uint64_t size_of_objects_before_gc); |
| 1575 | |
| 1576 | // Record statistics before and after garbage collection. |
| 1577 | void ReportStatisticsBeforeGC(); |
| 1578 | void ReportStatisticsAfterGC(); |
| 1579 | |
| 1580 | // Creates and installs the full-sized number string cache. |
| 1581 | int FullSizeNumberStringCacheLength(); |
| 1582 | // Flush the number to string cache. |
| 1583 | void FlushNumberStringCache(); |
| 1584 | |
| 1585 | // TODO(hpayer): Allocation site pretenuring may make this method obsolete. |
| 1586 | // Re-visit incremental marking heuristics. |
| 1587 | bool IsHighSurvivalRate() { return high_survival_rate_period_length_ > 0; } |
| 1588 | |
| 1589 | void ConfigureInitialOldGenerationSize(); |
| 1590 | |
| 1591 | bool HasLowYoungGenerationAllocationRate(); |
| 1592 | bool HasLowOldGenerationAllocationRate(); |
| 1593 | double YoungGenerationMutatorUtilization(); |
| 1594 | double OldGenerationMutatorUtilization(); |
| 1595 | |
| 1596 | void ReduceNewSpaceSize(); |
| 1597 | |
| 1598 | bool TryFinalizeIdleIncrementalMarking( |
| 1599 | double idle_time_in_ms, size_t size_of_objects, |
| 1600 | size_t mark_compact_speed_in_bytes_per_ms); |
| 1601 | |
| 1602 | GCIdleTimeHeapState ComputeHeapState(); |
| 1603 | |
| 1604 | bool PerformIdleTimeAction(GCIdleTimeAction action, |
| 1605 | GCIdleTimeHeapState heap_state, |
| 1606 | double deadline_in_ms); |
| 1607 | |
| 1608 | void IdleNotificationEpilogue(GCIdleTimeAction action, |
| 1609 | GCIdleTimeHeapState heap_state, double start_ms, |
| 1610 | double deadline_in_ms); |
| 1611 | |
| 1612 | inline void UpdateAllocationsHash(HeapObject* object); |
| 1613 | inline void UpdateAllocationsHash(uint32_t value); |
| 1614 | void PrintAlloctionsHash(); |
| 1615 | |
| 1616 | void AddToRingBuffer(const char* string); |
| 1617 | void GetFromRingBuffer(char* buffer); |
| 1618 | |
| 1619 | void CompactRetainedMaps(ArrayList* retained_maps); |
| 1620 | |
| 1621 | // Attempt to over-approximate the weak closure by marking object groups and |
| 1622 | // implicit references from global handles, but don't atomically complete |
| 1623 | // marking. If we continue to mark incrementally, we might have marked |
| 1624 | // objects that die later. |
| 1625 | void FinalizeIncrementalMarking(const char* gc_reason); |
| 1626 | |
| 1627 | // Returns the timer used for a given GC type. |
| 1628 | // - GCScavenger: young generation GC |
| 1629 | // - GCCompactor: full GC |
| 1630 | // - GCFinalzeMC: finalization of incremental full GC |
| 1631 | // - GCFinalizeMCReduceMemory: finalization of incremental full GC with |
| 1632 | // memory reduction |
| 1633 | HistogramTimer* GCTypeTimer(GarbageCollector collector); |
| 1634 | |
| 1635 | // =========================================================================== |
| 1636 | // Pretenuring. ============================================================== |
| 1637 | // =========================================================================== |
| 1638 | |
| 1639 | // Pretenuring decisions are made based on feedback collected during new space |
| 1640 | // evacuation. Note that between feedback collection and calling this method |
| 1641 | // object in old space must not move. |
| 1642 | void ProcessPretenuringFeedback(); |
| 1643 | |
| 1644 | // =========================================================================== |
| 1645 | // Actual GC. ================================================================ |
| 1646 | // =========================================================================== |
| 1647 | |
| 1648 | // Code that should be run before and after each GC. Includes some |
| 1649 | // reporting/verification activities when compiled with DEBUG set. |
| 1650 | void GarbageCollectionPrologue(); |
| 1651 | void GarbageCollectionEpilogue(); |
| 1652 | |
| 1653 | // Performs a major collection in the whole heap. |
| 1654 | void MarkCompact(); |
| 1655 | |
| 1656 | // Code to be run before and after mark-compact. |
| 1657 | void MarkCompactPrologue(); |
| 1658 | void MarkCompactEpilogue(); |
| 1659 | |
| 1660 | // Performs a minor collection in new generation. |
| 1661 | void Scavenge(); |
| 1662 | |
| 1663 | Address DoScavenge(ObjectVisitor* scavenge_visitor, Address new_space_front); |
| 1664 | |
| 1665 | void UpdateNewSpaceReferencesInExternalStringTable( |
| 1666 | ExternalStringTableUpdaterCallback updater_func); |
| 1667 | |
| 1668 | void UpdateReferencesInExternalStringTable( |
| 1669 | ExternalStringTableUpdaterCallback updater_func); |
| 1670 | |
| 1671 | void ProcessAllWeakReferences(WeakObjectRetainer* retainer); |
| 1672 | void ProcessYoungWeakReferences(WeakObjectRetainer* retainer); |
| 1673 | void ProcessNativeContexts(WeakObjectRetainer* retainer); |
| 1674 | void ProcessAllocationSites(WeakObjectRetainer* retainer); |
| 1675 | |
| 1676 | // =========================================================================== |
| 1677 | // GC statistics. ============================================================ |
| 1678 | // =========================================================================== |
| 1679 | |
| 1680 | inline intptr_t OldGenerationSpaceAvailable() { |
| 1681 | return old_generation_allocation_limit_ - PromotedTotalSize(); |
| 1682 | } |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1683 | |
| 1684 | // Returns maximum GC pause. |
| 1685 | double get_max_gc_pause() { return max_gc_pause_; } |
| 1686 | |
| 1687 | // Returns maximum size of objects alive after GC. |
| 1688 | intptr_t get_max_alive_after_gc() { return max_alive_after_gc_; } |
| 1689 | |
| 1690 | // Returns minimal interval between two subsequent collections. |
| 1691 | double get_min_in_mutator() { return min_in_mutator_; } |
| 1692 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1693 | // Update GC statistics that are tracked on the Heap. |
| 1694 | void UpdateCumulativeGCStatistics(double duration, double spent_in_mutator, |
| 1695 | double marking_time); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1696 | |
| 1697 | bool MaximumSizeScavenge() { return maximum_size_scavenges_ > 0; } |
| 1698 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1699 | // =========================================================================== |
| 1700 | // Growing strategy. ========================================================= |
| 1701 | // =========================================================================== |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1702 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1703 | // Decrease the allocation limit if the new limit based on the given |
| 1704 | // parameters is lower than the current limit. |
| 1705 | void DampenOldGenerationAllocationLimit(intptr_t old_gen_size, |
| 1706 | double gc_speed, |
| 1707 | double mutator_speed); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1708 | |
| 1709 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1710 | // Calculates the allocation limit based on a given growing factor and a |
| 1711 | // given old generation size. |
| 1712 | intptr_t CalculateOldGenerationAllocationLimit(double factor, |
| 1713 | intptr_t old_gen_size); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1714 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1715 | // Sets the allocation limit to trigger the next full garbage collection. |
| 1716 | void SetOldGenerationAllocationLimit(intptr_t old_gen_size, double gc_speed, |
| 1717 | double mutator_speed); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1718 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1719 | // =========================================================================== |
| 1720 | // Idle notification. ======================================================== |
| 1721 | // =========================================================================== |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1722 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1723 | bool RecentIdleNotificationHappened(); |
| 1724 | void ScheduleIdleScavengeIfNeeded(int bytes_allocated); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1725 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1726 | // =========================================================================== |
| 1727 | // HeapIterator helpers. ===================================================== |
| 1728 | // =========================================================================== |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1729 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1730 | void heap_iterator_start() { heap_iterator_depth_++; } |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1731 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1732 | void heap_iterator_end() { heap_iterator_depth_--; } |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1733 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1734 | bool in_heap_iterator() { return heap_iterator_depth_ > 0; } |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1735 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1736 | // =========================================================================== |
| 1737 | // Allocation methods. ======================================================= |
| 1738 | // =========================================================================== |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1739 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1740 | // Returns a deep copy of the JavaScript object. |
| 1741 | // Properties and elements are copied too. |
| 1742 | // Optionally takes an AllocationSite to be appended in an AllocationMemento. |
| 1743 | MUST_USE_RESULT AllocationResult CopyJSObject(JSObject* source, |
| 1744 | AllocationSite* site = NULL); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1745 | |
| 1746 | // Allocates a JS Map in the heap. |
| 1747 | MUST_USE_RESULT AllocationResult |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1748 | AllocateMap(InstanceType instance_type, int instance_size, |
| 1749 | ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1750 | |
| 1751 | // Allocates and initializes a new JavaScript object based on a |
| 1752 | // constructor. |
| 1753 | // If allocation_site is non-null, then a memento is emitted after the object |
| 1754 | // that points to the site. |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1755 | MUST_USE_RESULT AllocationResult AllocateJSObject( |
| 1756 | JSFunction* constructor, PretenureFlag pretenure = NOT_TENURED, |
| 1757 | AllocationSite* allocation_site = NULL); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1758 | |
| 1759 | // Allocates and initializes a new JavaScript object based on a map. |
| 1760 | // Passing an allocation site means that a memento will be created that |
| 1761 | // points to the site. |
| 1762 | MUST_USE_RESULT AllocationResult |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1763 | AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure = NOT_TENURED, |
| 1764 | AllocationSite* allocation_site = NULL); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1765 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1766 | // Allocates a HeapNumber from value. |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1767 | MUST_USE_RESULT AllocationResult |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1768 | AllocateHeapNumber(double value, MutableMode mode = IMMUTABLE, |
| 1769 | PretenureFlag pretenure = NOT_TENURED); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1770 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1771 | // Allocates SIMD values from the given lane values. |
| 1772 | #define SIMD_ALLOCATE_DECLARATION(TYPE, Type, type, lane_count, lane_type) \ |
| 1773 | AllocationResult Allocate##Type(lane_type lanes[lane_count], \ |
| 1774 | PretenureFlag pretenure = NOT_TENURED); |
| 1775 | SIMD128_TYPES(SIMD_ALLOCATE_DECLARATION) |
| 1776 | #undef SIMD_ALLOCATE_DECLARATION |
| 1777 | |
| 1778 | // Allocates a byte array of the specified length |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1779 | MUST_USE_RESULT AllocationResult |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1780 | AllocateByteArray(int length, PretenureFlag pretenure = NOT_TENURED); |
| 1781 | |
| 1782 | // Allocates a bytecode array with given contents. |
| 1783 | MUST_USE_RESULT AllocationResult |
| 1784 | AllocateBytecodeArray(int length, const byte* raw_bytecodes, int frame_size, |
| 1785 | int parameter_count, FixedArray* constant_pool); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1786 | |
| 1787 | // Copy the code and scope info part of the code object, but insert |
| 1788 | // the provided data as the relocation information. |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1789 | MUST_USE_RESULT AllocationResult CopyCode(Code* code, |
| 1790 | Vector<byte> reloc_info); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1791 | |
| 1792 | MUST_USE_RESULT AllocationResult CopyCode(Code* code); |
| 1793 | |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame^] | 1794 | MUST_USE_RESULT AllocationResult |
| 1795 | CopyBytecodeArray(BytecodeArray* bytecode_array); |
| 1796 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1797 | // Allocates a fixed array initialized with undefined values |
| 1798 | MUST_USE_RESULT AllocationResult |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1799 | AllocateFixedArray(int length, PretenureFlag pretenure = NOT_TENURED); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1800 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1801 | // Allocate an uninitialized object. The memory is non-executable if the |
| 1802 | // hardware and OS allow. This is the single choke-point for allocations |
| 1803 | // performed by the runtime and should not be bypassed (to extend this to |
| 1804 | // inlined allocations, use the Heap::DisableInlineAllocation() support). |
| 1805 | MUST_USE_RESULT inline AllocationResult AllocateRaw( |
| 1806 | int size_in_bytes, AllocationSpace space, |
| 1807 | AllocationAlignment aligment = kWordAligned); |
| 1808 | |
| 1809 | // Allocates a heap object based on the map. |
| 1810 | MUST_USE_RESULT AllocationResult |
| 1811 | Allocate(Map* map, AllocationSpace space, |
| 1812 | AllocationSite* allocation_site = NULL); |
| 1813 | |
| 1814 | // Allocates a partial map for bootstrapping. |
| 1815 | MUST_USE_RESULT AllocationResult |
| 1816 | AllocatePartialMap(InstanceType instance_type, int instance_size); |
| 1817 | |
| 1818 | // Allocate a block of memory in the given space (filled with a filler). |
| 1819 | // Used as a fall-back for generated code when the space is full. |
| 1820 | MUST_USE_RESULT AllocationResult |
| 1821 | AllocateFillerObject(int size, bool double_align, AllocationSpace space); |
| 1822 | |
| 1823 | // Allocate an uninitialized fixed array. |
| 1824 | MUST_USE_RESULT AllocationResult |
| 1825 | AllocateRawFixedArray(int length, PretenureFlag pretenure); |
| 1826 | |
| 1827 | // Allocate an uninitialized fixed double array. |
| 1828 | MUST_USE_RESULT AllocationResult |
| 1829 | AllocateRawFixedDoubleArray(int length, PretenureFlag pretenure); |
| 1830 | |
| 1831 | // Allocate an initialized fixed array with the given filler value. |
| 1832 | MUST_USE_RESULT AllocationResult |
| 1833 | AllocateFixedArrayWithFiller(int length, PretenureFlag pretenure, |
| 1834 | Object* filler); |
| 1835 | |
| 1836 | // Allocate and partially initializes a String. There are two String |
| 1837 | // encodings: one-byte and two-byte. These functions allocate a string of |
| 1838 | // the given length and set its map and length fields. The characters of |
| 1839 | // the string are uninitialized. |
| 1840 | MUST_USE_RESULT AllocationResult |
| 1841 | AllocateRawOneByteString(int length, PretenureFlag pretenure); |
| 1842 | MUST_USE_RESULT AllocationResult |
| 1843 | AllocateRawTwoByteString(int length, PretenureFlag pretenure); |
| 1844 | |
| 1845 | // Allocates an internalized string in old space based on the character |
| 1846 | // stream. |
| 1847 | MUST_USE_RESULT inline AllocationResult AllocateInternalizedStringFromUtf8( |
| 1848 | Vector<const char> str, int chars, uint32_t hash_field); |
| 1849 | |
| 1850 | MUST_USE_RESULT inline AllocationResult AllocateOneByteInternalizedString( |
| 1851 | Vector<const uint8_t> str, uint32_t hash_field); |
| 1852 | |
| 1853 | MUST_USE_RESULT inline AllocationResult AllocateTwoByteInternalizedString( |
| 1854 | Vector<const uc16> str, uint32_t hash_field); |
| 1855 | |
| 1856 | template <bool is_one_byte, typename T> |
| 1857 | MUST_USE_RESULT AllocationResult |
| 1858 | AllocateInternalizedStringImpl(T t, int chars, uint32_t hash_field); |
| 1859 | |
| 1860 | template <typename T> |
| 1861 | MUST_USE_RESULT inline AllocationResult AllocateInternalizedStringImpl( |
| 1862 | T t, int chars, uint32_t hash_field); |
| 1863 | |
| 1864 | // Allocates an uninitialized fixed array. It must be filled by the caller. |
| 1865 | MUST_USE_RESULT AllocationResult AllocateUninitializedFixedArray(int length); |
| 1866 | |
| 1867 | // Make a copy of src and return it. |
| 1868 | MUST_USE_RESULT inline AllocationResult CopyFixedArray(FixedArray* src); |
| 1869 | |
| 1870 | // Make a copy of src, also grow the copy, and return the copy. |
| 1871 | MUST_USE_RESULT AllocationResult |
| 1872 | CopyFixedArrayAndGrow(FixedArray* src, int grow_by, PretenureFlag pretenure); |
| 1873 | |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame^] | 1874 | // Make a copy of src, also grow the copy, and return the copy. |
| 1875 | MUST_USE_RESULT AllocationResult CopyFixedArrayUpTo(FixedArray* src, |
| 1876 | int new_len, |
| 1877 | PretenureFlag pretenure); |
| 1878 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1879 | // Make a copy of src, set the map, and return the copy. |
| 1880 | MUST_USE_RESULT AllocationResult |
| 1881 | CopyFixedArrayWithMap(FixedArray* src, Map* map); |
| 1882 | |
| 1883 | // Make a copy of src and return it. |
| 1884 | MUST_USE_RESULT inline AllocationResult CopyFixedDoubleArray( |
| 1885 | FixedDoubleArray* src); |
| 1886 | |
| 1887 | // Computes a single character string where the character has code. |
| 1888 | // A cache is used for one-byte (Latin1) codes. |
| 1889 | MUST_USE_RESULT AllocationResult |
| 1890 | LookupSingleCharacterStringFromCode(uint16_t code); |
| 1891 | |
| 1892 | // Allocate a symbol in old space. |
| 1893 | MUST_USE_RESULT AllocationResult AllocateSymbol(); |
| 1894 | |
| 1895 | // Allocates an external array of the specified length and type. |
| 1896 | MUST_USE_RESULT AllocationResult AllocateFixedTypedArrayWithExternalPointer( |
| 1897 | int length, ExternalArrayType array_type, void* external_pointer, |
| 1898 | PretenureFlag pretenure); |
| 1899 | |
| 1900 | // Allocates a fixed typed array of the specified length and type. |
| 1901 | MUST_USE_RESULT AllocationResult |
| 1902 | AllocateFixedTypedArray(int length, ExternalArrayType array_type, |
| 1903 | bool initialize, PretenureFlag pretenure); |
| 1904 | |
| 1905 | // Make a copy of src and return it. |
| 1906 | MUST_USE_RESULT AllocationResult CopyAndTenureFixedCOWArray(FixedArray* src); |
| 1907 | |
| 1908 | // Make a copy of src, set the map, and return the copy. |
| 1909 | MUST_USE_RESULT AllocationResult |
| 1910 | CopyFixedDoubleArrayWithMap(FixedDoubleArray* src, Map* map); |
| 1911 | |
| 1912 | // Allocates a fixed double array with uninitialized values. Returns |
| 1913 | MUST_USE_RESULT AllocationResult AllocateUninitializedFixedDoubleArray( |
| 1914 | int length, PretenureFlag pretenure = NOT_TENURED); |
| 1915 | |
| 1916 | // Allocate empty fixed array. |
| 1917 | MUST_USE_RESULT AllocationResult AllocateEmptyFixedArray(); |
| 1918 | |
| 1919 | // Allocate empty fixed typed array of given type. |
| 1920 | MUST_USE_RESULT AllocationResult |
| 1921 | AllocateEmptyFixedTypedArray(ExternalArrayType array_type); |
| 1922 | |
| 1923 | // Allocate a tenured simple cell. |
| 1924 | MUST_USE_RESULT AllocationResult AllocateCell(Object* value); |
| 1925 | |
| 1926 | // Allocate a tenured JS global property cell initialized with the hole. |
| 1927 | MUST_USE_RESULT AllocationResult AllocatePropertyCell(); |
| 1928 | |
| 1929 | MUST_USE_RESULT AllocationResult AllocateWeakCell(HeapObject* value); |
| 1930 | |
| 1931 | MUST_USE_RESULT AllocationResult AllocateTransitionArray(int capacity); |
| 1932 | |
| 1933 | // Allocates a new utility object in the old generation. |
| 1934 | MUST_USE_RESULT AllocationResult AllocateStruct(InstanceType type); |
| 1935 | |
| 1936 | // Allocates a new foreign object. |
| 1937 | MUST_USE_RESULT AllocationResult |
| 1938 | AllocateForeign(Address address, PretenureFlag pretenure = NOT_TENURED); |
| 1939 | |
| 1940 | MUST_USE_RESULT AllocationResult |
| 1941 | AllocateCode(int object_size, bool immovable); |
| 1942 | |
| 1943 | MUST_USE_RESULT AllocationResult InternalizeStringWithKey(HashTableKey* key); |
| 1944 | |
| 1945 | MUST_USE_RESULT AllocationResult InternalizeString(String* str); |
| 1946 | |
| 1947 | // =========================================================================== |
| 1948 | |
| 1949 | void set_force_oom(bool value) { force_oom_ = value; } |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1950 | |
| 1951 | // The amount of external memory registered through the API kept alive |
| 1952 | // by global handles |
| 1953 | int64_t amount_of_external_allocated_memory_; |
| 1954 | |
| 1955 | // Caches the amount of external memory registered at the last global gc. |
| 1956 | int64_t amount_of_external_allocated_memory_at_last_global_gc_; |
| 1957 | |
| 1958 | // This can be calculated directly from a pointer to the heap; however, it is |
| 1959 | // more expedient to get at the isolate directly from within Heap methods. |
| 1960 | Isolate* isolate_; |
| 1961 | |
| 1962 | Object* roots_[kRootListLength]; |
| 1963 | |
| 1964 | size_t code_range_size_; |
| 1965 | int reserved_semispace_size_; |
| 1966 | int max_semi_space_size_; |
| 1967 | int initial_semispace_size_; |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 1968 | int target_semispace_size_; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1969 | intptr_t max_old_generation_size_; |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 1970 | intptr_t initial_old_generation_size_; |
| 1971 | bool old_generation_size_configured_; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1972 | intptr_t max_executable_size_; |
| 1973 | intptr_t maximum_committed_; |
| 1974 | |
| 1975 | // For keeping track of how much data has survived |
| 1976 | // scavenge since last new space expansion. |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame^] | 1977 | intptr_t survived_since_last_expansion_; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1978 | |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 1979 | // ... and since the last scavenge. |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame^] | 1980 | intptr_t survived_last_scavenge_; |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 1981 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1982 | // This is not the depth of nested AlwaysAllocateScope's but rather a single |
| 1983 | // count, as scopes can be acquired from multiple tasks (read: threads). |
| 1984 | AtomicNumber<size_t> always_allocate_scope_count_; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1985 | |
| 1986 | // For keeping track of context disposals. |
| 1987 | int contexts_disposed_; |
| 1988 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1989 | // The length of the retained_maps array at the time of context disposal. |
| 1990 | // This separates maps in the retained_maps array that were created before |
| 1991 | // and after context disposal. |
| 1992 | int number_of_disposed_maps_; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1993 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1994 | int global_ic_age_; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1995 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1996 | NewSpace new_space_; |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1997 | OldSpace* old_space_; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1998 | OldSpace* code_space_; |
| 1999 | MapSpace* map_space_; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2000 | LargeObjectSpace* lo_space_; |
| 2001 | HeapState gc_state_; |
| 2002 | int gc_post_processing_depth_; |
| 2003 | Address new_space_top_after_last_gc_; |
| 2004 | |
| 2005 | // Returns the amount of external memory registered since last global gc. |
| 2006 | int64_t PromotedExternalMemorySize(); |
| 2007 | |
| 2008 | // How many "runtime allocations" happened. |
| 2009 | uint32_t allocations_count_; |
| 2010 | |
| 2011 | // Running hash over allocations performed. |
| 2012 | uint32_t raw_allocations_hash_; |
| 2013 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2014 | // How many mark-sweep collections happened. |
| 2015 | unsigned int ms_count_; |
| 2016 | |
| 2017 | // How many gc happened. |
| 2018 | unsigned int gc_count_; |
| 2019 | |
| 2020 | // For post mortem debugging. |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2021 | int remembered_unmapped_pages_index_; |
| 2022 | Address remembered_unmapped_pages_[kRememberedUnmappedPages]; |
| 2023 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2024 | #ifdef DEBUG |
| 2025 | // If the --gc-interval flag is set to a positive value, this |
| 2026 | // variable holds the value indicating the number of allocations |
| 2027 | // remain until the next failure and garbage collection. |
| 2028 | int allocation_timeout_; |
| 2029 | #endif // DEBUG |
| 2030 | |
| 2031 | // Limit that triggers a global GC on the next (normally caused) GC. This |
| 2032 | // is checked when we have already decided to do a GC to help determine |
| 2033 | // which collector to invoke, before expanding a paged space in the old |
| 2034 | // generation and on every allocation in large object space. |
| 2035 | intptr_t old_generation_allocation_limit_; |
| 2036 | |
| 2037 | // Indicates that an allocation has failed in the old generation since the |
| 2038 | // last GC. |
| 2039 | bool old_gen_exhausted_; |
| 2040 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2041 | // Indicates that memory usage is more important than latency. |
| 2042 | // TODO(ulan): Merge it with memory reducer once chromium:490559 is fixed. |
| 2043 | bool optimize_for_memory_usage_; |
| 2044 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2045 | // Indicates that inline bump-pointer allocation has been globally disabled |
| 2046 | // for all spaces. This is used to disable allocations in generated code. |
| 2047 | bool inline_allocation_disabled_; |
| 2048 | |
| 2049 | // Weak list heads, threaded through the objects. |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2050 | // List heads are initialized lazily and contain the undefined_value at start. |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2051 | Object* native_contexts_list_; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2052 | Object* allocation_sites_list_; |
| 2053 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2054 | // List of encountered weak collections (JSWeakMap and JSWeakSet) during |
| 2055 | // marking. It is initialized during marking, destroyed after marking and |
| 2056 | // contains Smi(0) while marking is not active. |
| 2057 | Object* encountered_weak_collections_; |
| 2058 | |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 2059 | Object* encountered_weak_cells_; |
| 2060 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2061 | Object* encountered_transition_arrays_; |
| 2062 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2063 | List<GCCallbackPair> gc_epilogue_callbacks_; |
| 2064 | List<GCCallbackPair> gc_prologue_callbacks_; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2065 | |
| 2066 | // Total RegExp code ever generated |
| 2067 | double total_regexp_code_generated_; |
| 2068 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2069 | int deferred_counters_[v8::Isolate::kUseCounterFeatureCount]; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2070 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2071 | GCTracer* tracer_; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2072 | |
| 2073 | int high_survival_rate_period_length_; |
| 2074 | intptr_t promoted_objects_size_; |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 2075 | double promotion_ratio_; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2076 | double promotion_rate_; |
| 2077 | intptr_t semi_space_copied_object_size_; |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 2078 | intptr_t previous_semi_space_copied_object_size_; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2079 | double semi_space_copied_rate_; |
| 2080 | int nodes_died_in_new_space_; |
| 2081 | int nodes_copied_in_new_space_; |
| 2082 | int nodes_promoted_; |
| 2083 | |
| 2084 | // This is the pretenuring trigger for allocation sites that are in maybe |
| 2085 | // tenure state. When we switched to the maximum new space size we deoptimize |
| 2086 | // the code that belongs to the allocation site and derive the lifetime |
| 2087 | // of the allocation site. |
| 2088 | unsigned int maximum_size_scavenges_; |
| 2089 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2090 | // Maximum GC pause. |
| 2091 | double max_gc_pause_; |
| 2092 | |
| 2093 | // Total time spent in GC. |
| 2094 | double total_gc_time_ms_; |
| 2095 | |
| 2096 | // Maximum size of objects alive after GC. |
| 2097 | intptr_t max_alive_after_gc_; |
| 2098 | |
| 2099 | // Minimal interval between two subsequent collections. |
| 2100 | double min_in_mutator_; |
| 2101 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2102 | // Cumulative GC time spent in marking. |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2103 | double marking_time_; |
| 2104 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2105 | // Cumulative GC time spent in sweeping. |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2106 | double sweeping_time_; |
| 2107 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2108 | // Last time an idle notification happened. |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 2109 | double last_idle_notification_time_; |
| 2110 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2111 | // Last time a garbage collection happened. |
| 2112 | double last_gc_time_; |
| 2113 | |
| 2114 | Scavenger* scavenge_collector_; |
| 2115 | |
| 2116 | MarkCompactCollector* mark_compact_collector_; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2117 | |
| 2118 | StoreBuffer store_buffer_; |
| 2119 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2120 | IncrementalMarking* incremental_marking_; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2121 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2122 | GCIdleTimeHandler* gc_idle_time_handler_; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2123 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2124 | MemoryReducer* memory_reducer_; |
| 2125 | |
| 2126 | ObjectStats* object_stats_; |
| 2127 | |
| 2128 | ScavengeJob* scavenge_job_; |
| 2129 | |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame^] | 2130 | AllocationObserver* idle_scavenge_observer_; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2131 | |
| 2132 | // These two counters are monotomically increasing and never reset. |
| 2133 | size_t full_codegen_bytes_generated_; |
| 2134 | size_t crankshaft_codegen_bytes_generated_; |
| 2135 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2136 | // This counter is increased before each GC and never reset. |
| 2137 | // To account for the bytes allocated since the last GC, use the |
| 2138 | // NewSpaceAllocationCounter() function. |
| 2139 | size_t new_space_allocation_counter_; |
| 2140 | |
| 2141 | // This counter is increased before each GC and never reset. To |
| 2142 | // account for the bytes allocated since the last GC, use the |
| 2143 | // OldGenerationAllocationCounter() function. |
| 2144 | size_t old_generation_allocation_counter_; |
| 2145 | |
| 2146 | // The size of objects in old generation after the last MarkCompact GC. |
| 2147 | size_t old_generation_size_at_last_gc_; |
| 2148 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2149 | // If the --deopt_every_n_garbage_collections flag is set to a positive value, |
| 2150 | // this variable holds the number of garbage collections since the last |
| 2151 | // deoptimization triggered by garbage collection. |
| 2152 | int gcs_since_last_deopt_; |
| 2153 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2154 | // The feedback storage is used to store allocation sites (keys) and how often |
| 2155 | // they have been visited (values) by finding a memento behind an object. The |
| 2156 | // storage is only alive temporary during a GC. The invariant is that all |
| 2157 | // pointers in this map are already fixed, i.e., they do not point to |
| 2158 | // forwarding pointers. |
| 2159 | HashMap* global_pretenuring_feedback_; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2160 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2161 | char trace_ring_buffer_[kTraceRingBufferSize]; |
| 2162 | // If it's not full then the data is from 0 to ring_buffer_end_. If it's |
| 2163 | // full then the data is from ring_buffer_end_ to the end of the buffer and |
| 2164 | // from 0 to ring_buffer_end_. |
| 2165 | bool ring_buffer_full_; |
| 2166 | size_t ring_buffer_end_; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2167 | |
| 2168 | // Shared state read by the scavenge collector and set by ScavengeObject. |
| 2169 | PromotionQueue promotion_queue_; |
| 2170 | |
| 2171 | // Flag is set when the heap has been configured. The heap can be repeatedly |
| 2172 | // configured through the API until it is set up. |
| 2173 | bool configured_; |
| 2174 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2175 | // Currently set GC flags that are respected by all GC components. |
| 2176 | int current_gc_flags_; |
| 2177 | |
| 2178 | // Currently set GC callback flags that are used to pass information between |
| 2179 | // the embedder and V8's GC. |
| 2180 | GCCallbackFlags current_gc_callback_flags_; |
| 2181 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2182 | ExternalStringTable external_string_table_; |
| 2183 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2184 | MemoryChunk* chunks_queued_for_free_; |
| 2185 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2186 | size_t concurrent_unmapping_tasks_active_; |
| 2187 | |
| 2188 | base::Semaphore pending_unmapping_tasks_semaphore_; |
| 2189 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2190 | base::Mutex relocation_mutex_; |
| 2191 | |
| 2192 | int gc_callbacks_depth_; |
| 2193 | |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 2194 | bool deserialization_complete_; |
| 2195 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2196 | StrongRootsList* strong_roots_list_; |
| 2197 | |
| 2198 | ArrayBufferTracker* array_buffer_tracker_; |
| 2199 | |
| 2200 | // The depth of HeapIterator nestings. |
| 2201 | int heap_iterator_depth_; |
| 2202 | |
| 2203 | // Used for testing purposes. |
| 2204 | bool force_oom_; |
| 2205 | |
| 2206 | // Classes in "heap" can be friends. |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2207 | friend class AlwaysAllocateScope; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2208 | friend class GCCallbacksScope; |
| 2209 | friend class GCTracer; |
| 2210 | friend class HeapIterator; |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2211 | friend class IdleScavengeObserver; |
| 2212 | friend class IncrementalMarking; |
| 2213 | friend class IteratePointersToFromSpaceVisitor; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2214 | friend class MarkCompactCollector; |
| 2215 | friend class MarkCompactMarkingVisitor; |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2216 | friend class NewSpace; |
| 2217 | friend class ObjectStatsVisitor; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2218 | friend class Page; |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2219 | friend class Scavenger; |
| 2220 | friend class StoreBuffer; |
| 2221 | |
| 2222 | // The allocator interface. |
| 2223 | friend class Factory; |
| 2224 | |
| 2225 | // The Isolate constructs us. |
| 2226 | friend class Isolate; |
| 2227 | |
| 2228 | // Used in cctest. |
| 2229 | friend class HeapTester; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2230 | |
| 2231 | DISALLOW_COPY_AND_ASSIGN(Heap); |
| 2232 | }; |
| 2233 | |
| 2234 | |
| 2235 | class HeapStats { |
| 2236 | public: |
| 2237 | static const int kStartMarker = 0xDECADE00; |
| 2238 | static const int kEndMarker = 0xDECADE01; |
| 2239 | |
| 2240 | int* start_marker; // 0 |
| 2241 | int* new_space_size; // 1 |
| 2242 | int* new_space_capacity; // 2 |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2243 | intptr_t* old_space_size; // 3 |
| 2244 | intptr_t* old_space_capacity; // 4 |
| 2245 | intptr_t* code_space_size; // 5 |
| 2246 | intptr_t* code_space_capacity; // 6 |
| 2247 | intptr_t* map_space_size; // 7 |
| 2248 | intptr_t* map_space_capacity; // 8 |
| 2249 | intptr_t* lo_space_size; // 9 |
| 2250 | int* global_handle_count; // 10 |
| 2251 | int* weak_global_handle_count; // 11 |
| 2252 | int* pending_global_handle_count; // 12 |
| 2253 | int* near_death_global_handle_count; // 13 |
| 2254 | int* free_global_handle_count; // 14 |
| 2255 | intptr_t* memory_allocator_size; // 15 |
| 2256 | intptr_t* memory_allocator_capacity; // 16 |
| 2257 | int* objects_per_type; // 17 |
| 2258 | int* size_per_type; // 18 |
| 2259 | int* os_error; // 19 |
| 2260 | char* last_few_messages; // 20 |
| 2261 | char* js_stacktrace; // 21 |
| 2262 | int* end_marker; // 22 |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2263 | }; |
| 2264 | |
| 2265 | |
| 2266 | class AlwaysAllocateScope { |
| 2267 | public: |
| 2268 | explicit inline AlwaysAllocateScope(Isolate* isolate); |
| 2269 | inline ~AlwaysAllocateScope(); |
| 2270 | |
| 2271 | private: |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2272 | Heap* heap_; |
| 2273 | }; |
| 2274 | |
| 2275 | |
| 2276 | // Visitor class to verify interior pointers in spaces that do not contain |
| 2277 | // or care about intergenerational references. All heap object pointers have to |
| 2278 | // point into the heap to a location that has a map pointer at its first word. |
| 2279 | // Caveat: Heap::Contains is an approximation because it can return true for |
| 2280 | // objects in a heap space but above the allocation pointer. |
| 2281 | class VerifyPointersVisitor : public ObjectVisitor { |
| 2282 | public: |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2283 | inline void VisitPointers(Object** start, Object** end) override; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2284 | }; |
| 2285 | |
| 2286 | |
| 2287 | // Verify that all objects are Smis. |
| 2288 | class VerifySmisVisitor : public ObjectVisitor { |
| 2289 | public: |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2290 | inline void VisitPointers(Object** start, Object** end) override; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2291 | }; |
| 2292 | |
| 2293 | |
| 2294 | // Space iterator for iterating over all spaces of the heap. Returns each space |
| 2295 | // in turn, and null when it is done. |
| 2296 | class AllSpaces BASE_EMBEDDED { |
| 2297 | public: |
| 2298 | explicit AllSpaces(Heap* heap) : heap_(heap), counter_(FIRST_SPACE) {} |
| 2299 | Space* next(); |
| 2300 | |
| 2301 | private: |
| 2302 | Heap* heap_; |
| 2303 | int counter_; |
| 2304 | }; |
| 2305 | |
| 2306 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2307 | // Space iterator for iterating over all old spaces of the heap: Old space |
| 2308 | // and code space. Returns each space in turn, and null when it is done. |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2309 | class OldSpaces BASE_EMBEDDED { |
| 2310 | public: |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2311 | explicit OldSpaces(Heap* heap) : heap_(heap), counter_(OLD_SPACE) {} |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2312 | OldSpace* next(); |
| 2313 | |
| 2314 | private: |
| 2315 | Heap* heap_; |
| 2316 | int counter_; |
| 2317 | }; |
| 2318 | |
| 2319 | |
| 2320 | // Space iterator for iterating over all the paged spaces of the heap: Map |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2321 | // space, old space, code space and cell space. Returns |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2322 | // each space in turn, and null when it is done. |
| 2323 | class PagedSpaces BASE_EMBEDDED { |
| 2324 | public: |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2325 | explicit PagedSpaces(Heap* heap) : heap_(heap), counter_(OLD_SPACE) {} |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2326 | PagedSpace* next(); |
| 2327 | |
| 2328 | private: |
| 2329 | Heap* heap_; |
| 2330 | int counter_; |
| 2331 | }; |
| 2332 | |
| 2333 | |
| 2334 | // Space iterator for iterating over all spaces of the heap. |
| 2335 | // For each space an object iterator is provided. The deallocation of the |
| 2336 | // returned object iterators is handled by the space iterator. |
| 2337 | class SpaceIterator : public Malloced { |
| 2338 | public: |
| 2339 | explicit SpaceIterator(Heap* heap); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2340 | virtual ~SpaceIterator(); |
| 2341 | |
| 2342 | bool has_next(); |
| 2343 | ObjectIterator* next(); |
| 2344 | |
| 2345 | private: |
| 2346 | ObjectIterator* CreateIterator(); |
| 2347 | |
| 2348 | Heap* heap_; |
| 2349 | int current_space_; // from enum AllocationSpace. |
| 2350 | ObjectIterator* iterator_; // object iterator for the current space. |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2351 | }; |
| 2352 | |
| 2353 | |
| 2354 | // A HeapIterator provides iteration over the whole heap. It |
| 2355 | // aggregates the specific iterators for the different spaces as |
| 2356 | // these can only iterate over one space only. |
| 2357 | // |
| 2358 | // HeapIterator ensures there is no allocation during its lifetime |
| 2359 | // (using an embedded DisallowHeapAllocation instance). |
| 2360 | // |
| 2361 | // HeapIterator can skip free list nodes (that is, de-allocated heap |
| 2362 | // objects that still remain in the heap). As implementation of free |
| 2363 | // nodes filtering uses GC marks, it can't be used during MS/MC GC |
| 2364 | // phases. Also, it is forbidden to interrupt iteration in this mode, |
| 2365 | // as this will leave heap objects marked (and thus, unusable). |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2366 | class HeapIterator BASE_EMBEDDED { |
| 2367 | public: |
| 2368 | enum HeapObjectsFiltering { kNoFiltering, kFilterUnreachable }; |
| 2369 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2370 | explicit HeapIterator(Heap* heap, |
| 2371 | HeapObjectsFiltering filtering = kNoFiltering); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2372 | ~HeapIterator(); |
| 2373 | |
| 2374 | HeapObject* next(); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2375 | |
| 2376 | private: |
| 2377 | struct MakeHeapIterableHelper { |
| 2378 | explicit MakeHeapIterableHelper(Heap* heap) { heap->MakeHeapIterable(); } |
| 2379 | }; |
| 2380 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2381 | HeapObject* NextObject(); |
| 2382 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2383 | // The following two fields need to be declared in this order. Initialization |
| 2384 | // order guarantees that we first make the heap iterable (which may involve |
| 2385 | // allocations) and only then lock it down by not allowing further |
| 2386 | // allocations. |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2387 | MakeHeapIterableHelper make_heap_iterable_helper_; |
| 2388 | DisallowHeapAllocation no_heap_allocation_; |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2389 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2390 | Heap* heap_; |
| 2391 | HeapObjectsFiltering filtering_; |
| 2392 | HeapObjectsFilter* filter_; |
| 2393 | // Space iterator for iterating all the spaces. |
| 2394 | SpaceIterator* space_iterator_; |
| 2395 | // Object iterator for the space currently being iterated. |
| 2396 | ObjectIterator* object_iterator_; |
| 2397 | }; |
| 2398 | |
| 2399 | |
| 2400 | // Cache for mapping (map, property name) into field offset. |
| 2401 | // Cleared at startup and prior to mark sweep collection. |
| 2402 | class KeyedLookupCache { |
| 2403 | public: |
| 2404 | // Lookup field offset for (map, name). If absent, -1 is returned. |
| 2405 | int Lookup(Handle<Map> map, Handle<Name> name); |
| 2406 | |
| 2407 | // Update an element in the cache. |
| 2408 | void Update(Handle<Map> map, Handle<Name> name, int field_offset); |
| 2409 | |
| 2410 | // Clear the cache. |
| 2411 | void Clear(); |
| 2412 | |
| 2413 | static const int kLength = 256; |
| 2414 | static const int kCapacityMask = kLength - 1; |
| 2415 | static const int kMapHashShift = 5; |
| 2416 | static const int kHashMask = -4; // Zero the last two bits. |
| 2417 | static const int kEntriesPerBucket = 4; |
| 2418 | static const int kEntryLength = 2; |
| 2419 | static const int kMapIndex = 0; |
| 2420 | static const int kKeyIndex = 1; |
| 2421 | static const int kNotFound = -1; |
| 2422 | |
| 2423 | // kEntriesPerBucket should be a power of 2. |
| 2424 | STATIC_ASSERT((kEntriesPerBucket & (kEntriesPerBucket - 1)) == 0); |
| 2425 | STATIC_ASSERT(kEntriesPerBucket == -kHashMask); |
| 2426 | |
| 2427 | private: |
| 2428 | KeyedLookupCache() { |
| 2429 | for (int i = 0; i < kLength; ++i) { |
| 2430 | keys_[i].map = NULL; |
| 2431 | keys_[i].name = NULL; |
| 2432 | field_offsets_[i] = kNotFound; |
| 2433 | } |
| 2434 | } |
| 2435 | |
| 2436 | static inline int Hash(Handle<Map> map, Handle<Name> name); |
| 2437 | |
| 2438 | // Get the address of the keys and field_offsets arrays. Used in |
| 2439 | // generated code to perform cache lookups. |
| 2440 | Address keys_address() { return reinterpret_cast<Address>(&keys_); } |
| 2441 | |
| 2442 | Address field_offsets_address() { |
| 2443 | return reinterpret_cast<Address>(&field_offsets_); |
| 2444 | } |
| 2445 | |
| 2446 | struct Key { |
| 2447 | Map* map; |
| 2448 | Name* name; |
| 2449 | }; |
| 2450 | |
| 2451 | Key keys_[kLength]; |
| 2452 | int field_offsets_[kLength]; |
| 2453 | |
| 2454 | friend class ExternalReference; |
| 2455 | friend class Isolate; |
| 2456 | DISALLOW_COPY_AND_ASSIGN(KeyedLookupCache); |
| 2457 | }; |
| 2458 | |
| 2459 | |
| 2460 | // Cache for mapping (map, property name) into descriptor index. |
| 2461 | // The cache contains both positive and negative results. |
| 2462 | // Descriptor index equals kNotFound means the property is absent. |
| 2463 | // Cleared at startup and prior to any gc. |
| 2464 | class DescriptorLookupCache { |
| 2465 | public: |
| 2466 | // Lookup descriptor index for (map, name). |
| 2467 | // If absent, kAbsent is returned. |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2468 | inline int Lookup(Map* source, Name* name); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2469 | |
| 2470 | // Update an element in the cache. |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2471 | inline void Update(Map* source, Name* name, int result); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2472 | |
| 2473 | // Clear the cache. |
| 2474 | void Clear(); |
| 2475 | |
| 2476 | static const int kAbsent = -2; |
| 2477 | |
| 2478 | private: |
| 2479 | DescriptorLookupCache() { |
| 2480 | for (int i = 0; i < kLength; ++i) { |
| 2481 | keys_[i].source = NULL; |
| 2482 | keys_[i].name = NULL; |
| 2483 | results_[i] = kAbsent; |
| 2484 | } |
| 2485 | } |
| 2486 | |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame^] | 2487 | static inline int Hash(Object* source, Name* name); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2488 | |
| 2489 | static const int kLength = 64; |
| 2490 | struct Key { |
| 2491 | Map* source; |
| 2492 | Name* name; |
| 2493 | }; |
| 2494 | |
| 2495 | Key keys_[kLength]; |
| 2496 | int results_[kLength]; |
| 2497 | |
| 2498 | friend class Isolate; |
| 2499 | DISALLOW_COPY_AND_ASSIGN(DescriptorLookupCache); |
| 2500 | }; |
| 2501 | |
| 2502 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2503 | // Abstract base class for checking whether a weak object should be retained. |
| 2504 | class WeakObjectRetainer { |
| 2505 | public: |
| 2506 | virtual ~WeakObjectRetainer() {} |
| 2507 | |
| 2508 | // Return whether this object should be retained. If NULL is returned the |
| 2509 | // object has no references. Otherwise the address of the retained object |
| 2510 | // should be returned as in some GC situations the object has been moved. |
| 2511 | virtual Object* RetainAs(Object* object) = 0; |
| 2512 | }; |
| 2513 | |
| 2514 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2515 | #ifdef DEBUG |
| 2516 | // Helper class for tracing paths to a search target Object from all roots. |
| 2517 | // The TracePathFrom() method can be used to trace paths from a specific |
| 2518 | // object to the search target object. |
| 2519 | class PathTracer : public ObjectVisitor { |
| 2520 | public: |
| 2521 | enum WhatToFind { |
| 2522 | FIND_ALL, // Will find all matches. |
| 2523 | FIND_FIRST // Will stop the search after first match. |
| 2524 | }; |
| 2525 | |
| 2526 | // Tags 0, 1, and 3 are used. Use 2 for marking visited HeapObject. |
| 2527 | static const int kMarkTag = 2; |
| 2528 | |
| 2529 | // For the WhatToFind arg, if FIND_FIRST is specified, tracing will stop |
| 2530 | // after the first match. If FIND_ALL is specified, then tracing will be |
| 2531 | // done for all matches. |
| 2532 | PathTracer(Object* search_target, WhatToFind what_to_find, |
| 2533 | VisitMode visit_mode) |
| 2534 | : search_target_(search_target), |
| 2535 | found_target_(false), |
| 2536 | found_target_in_trace_(false), |
| 2537 | what_to_find_(what_to_find), |
| 2538 | visit_mode_(visit_mode), |
| 2539 | object_stack_(20), |
| 2540 | no_allocation() {} |
| 2541 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2542 | void VisitPointers(Object** start, Object** end) override; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2543 | |
| 2544 | void Reset(); |
| 2545 | void TracePathFrom(Object** root); |
| 2546 | |
| 2547 | bool found() const { return found_target_; } |
| 2548 | |
| 2549 | static Object* const kAnyGlobalObject; |
| 2550 | |
| 2551 | protected: |
| 2552 | class MarkVisitor; |
| 2553 | class UnmarkVisitor; |
| 2554 | |
| 2555 | void MarkRecursively(Object** p, MarkVisitor* mark_visitor); |
| 2556 | void UnmarkRecursively(Object** p, UnmarkVisitor* unmark_visitor); |
| 2557 | virtual void ProcessResults(); |
| 2558 | |
| 2559 | Object* search_target_; |
| 2560 | bool found_target_; |
| 2561 | bool found_target_in_trace_; |
| 2562 | WhatToFind what_to_find_; |
| 2563 | VisitMode visit_mode_; |
| 2564 | List<Object*> object_stack_; |
| 2565 | |
| 2566 | DisallowHeapAllocation no_allocation; // i.e. no gc allowed. |
| 2567 | |
| 2568 | private: |
| 2569 | DISALLOW_IMPLICIT_CONSTRUCTORS(PathTracer); |
| 2570 | }; |
| 2571 | #endif // DEBUG |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame^] | 2572 | |
| 2573 | // ----------------------------------------------------------------------------- |
| 2574 | // Allows observation of allocations. |
| 2575 | class AllocationObserver { |
| 2576 | public: |
| 2577 | explicit AllocationObserver(intptr_t step_size) |
| 2578 | : step_size_(step_size), bytes_to_next_step_(step_size) { |
| 2579 | DCHECK(step_size >= kPointerSize); |
| 2580 | } |
| 2581 | virtual ~AllocationObserver() {} |
| 2582 | |
| 2583 | // Called each time the observed space does an allocation step. This may be |
| 2584 | // more frequently than the step_size we are monitoring (e.g. when there are |
| 2585 | // multiple observers, or when page or space boundary is encountered.) |
| 2586 | void AllocationStep(int bytes_allocated, Address soon_object, size_t size) { |
| 2587 | bytes_to_next_step_ -= bytes_allocated; |
| 2588 | if (bytes_to_next_step_ <= 0) { |
| 2589 | Step(static_cast<int>(step_size_ - bytes_to_next_step_), soon_object, |
| 2590 | size); |
| 2591 | step_size_ = GetNextStepSize(); |
| 2592 | bytes_to_next_step_ = step_size_; |
| 2593 | } |
| 2594 | } |
| 2595 | |
| 2596 | protected: |
| 2597 | intptr_t step_size() const { return step_size_; } |
| 2598 | intptr_t bytes_to_next_step() const { return bytes_to_next_step_; } |
| 2599 | |
| 2600 | // Pure virtual method provided by the subclasses that gets called when at |
| 2601 | // least step_size bytes have been allocated. soon_object is the address just |
| 2602 | // allocated (but not yet initialized.) size is the size of the object as |
| 2603 | // requested (i.e. w/o the alignment fillers). Some complexities to be aware |
| 2604 | // of: |
| 2605 | // 1) soon_object will be nullptr in cases where we end up observing an |
| 2606 | // allocation that happens to be a filler space (e.g. page boundaries.) |
| 2607 | // 2) size is the requested size at the time of allocation. Right-trimming |
| 2608 | // may change the object size dynamically. |
| 2609 | // 3) soon_object may actually be the first object in an allocation-folding |
| 2610 | // group. In such a case size is the size of the group rather than the |
| 2611 | // first object. |
| 2612 | virtual void Step(int bytes_allocated, Address soon_object, size_t size) = 0; |
| 2613 | |
| 2614 | // Subclasses can override this method to make step size dynamic. |
| 2615 | virtual intptr_t GetNextStepSize() { return step_size_; } |
| 2616 | |
| 2617 | intptr_t step_size_; |
| 2618 | intptr_t bytes_to_next_step_; |
| 2619 | |
| 2620 | private: |
| 2621 | friend class LargeObjectSpace; |
| 2622 | friend class NewSpace; |
| 2623 | friend class PagedSpace; |
| 2624 | DISALLOW_COPY_AND_ASSIGN(AllocationObserver); |
| 2625 | }; |
| 2626 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 2627 | } // namespace internal |
| 2628 | } // namespace v8 |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2629 | |
| 2630 | #endif // V8_HEAP_HEAP_H_ |