Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1 | // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 | // Use of this source code is governed by a BSD-style license that can be |
| 3 | // found in the LICENSE file. |
| 4 | |
| 5 | #ifndef V8_HEAP_HEAP_H_ |
| 6 | #define V8_HEAP_HEAP_H_ |
| 7 | |
| 8 | #include <cmath> |
| 9 | |
| 10 | #include "src/allocation.h" |
| 11 | #include "src/assert-scope.h" |
| 12 | #include "src/counters.h" |
| 13 | #include "src/globals.h" |
| 14 | #include "src/heap/gc-idle-time-handler.h" |
| 15 | #include "src/heap/gc-tracer.h" |
| 16 | #include "src/heap/incremental-marking.h" |
| 17 | #include "src/heap/mark-compact.h" |
| 18 | #include "src/heap/objects-visiting.h" |
| 19 | #include "src/heap/spaces.h" |
| 20 | #include "src/heap/store-buffer.h" |
| 21 | #include "src/list.h" |
| 22 | #include "src/splay-tree-inl.h" |
| 23 | |
| 24 | namespace v8 { |
| 25 | namespace internal { |
| 26 | |
| 27 | // Defines all the roots in Heap. |
| 28 | #define STRONG_ROOT_LIST(V) \ |
| 29 | V(Map, byte_array_map, ByteArrayMap) \ |
| 30 | V(Map, free_space_map, FreeSpaceMap) \ |
| 31 | V(Map, one_pointer_filler_map, OnePointerFillerMap) \ |
| 32 | V(Map, two_pointer_filler_map, TwoPointerFillerMap) \ |
| 33 | /* Cluster the most popular ones in a few cache lines here at the top. */ \ |
| 34 | V(Smi, store_buffer_top, StoreBufferTop) \ |
| 35 | V(Oddball, undefined_value, UndefinedValue) \ |
| 36 | V(Oddball, the_hole_value, TheHoleValue) \ |
| 37 | V(Oddball, null_value, NullValue) \ |
| 38 | V(Oddball, true_value, TrueValue) \ |
| 39 | V(Oddball, false_value, FalseValue) \ |
| 40 | V(Oddball, uninitialized_value, UninitializedValue) \ |
| 41 | V(Oddball, exception, Exception) \ |
| 42 | V(Map, cell_map, CellMap) \ |
| 43 | V(Map, global_property_cell_map, GlobalPropertyCellMap) \ |
| 44 | V(Map, shared_function_info_map, SharedFunctionInfoMap) \ |
| 45 | V(Map, meta_map, MetaMap) \ |
| 46 | V(Map, heap_number_map, HeapNumberMap) \ |
| 47 | V(Map, mutable_heap_number_map, MutableHeapNumberMap) \ |
| 48 | V(Map, native_context_map, NativeContextMap) \ |
| 49 | V(Map, fixed_array_map, FixedArrayMap) \ |
| 50 | V(Map, code_map, CodeMap) \ |
| 51 | V(Map, scope_info_map, ScopeInfoMap) \ |
| 52 | V(Map, fixed_cow_array_map, FixedCOWArrayMap) \ |
| 53 | V(Map, fixed_double_array_map, FixedDoubleArrayMap) \ |
| 54 | V(Map, constant_pool_array_map, ConstantPoolArrayMap) \ |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame^] | 55 | V(Map, weak_cell_map, WeakCellMap) \ |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 56 | V(Oddball, no_interceptor_result_sentinel, NoInterceptorResultSentinel) \ |
| 57 | V(Map, hash_table_map, HashTableMap) \ |
| 58 | V(Map, ordered_hash_table_map, OrderedHashTableMap) \ |
| 59 | V(FixedArray, empty_fixed_array, EmptyFixedArray) \ |
| 60 | V(ByteArray, empty_byte_array, EmptyByteArray) \ |
| 61 | V(DescriptorArray, empty_descriptor_array, EmptyDescriptorArray) \ |
| 62 | V(ConstantPoolArray, empty_constant_pool_array, EmptyConstantPoolArray) \ |
| 63 | V(Oddball, arguments_marker, ArgumentsMarker) \ |
| 64 | /* The roots above this line should be boring from a GC point of view. */ \ |
| 65 | /* This means they are never in new space and never on a page that is */ \ |
| 66 | /* being compacted. */ \ |
| 67 | V(FixedArray, number_string_cache, NumberStringCache) \ |
| 68 | V(Object, instanceof_cache_function, InstanceofCacheFunction) \ |
| 69 | V(Object, instanceof_cache_map, InstanceofCacheMap) \ |
| 70 | V(Object, instanceof_cache_answer, InstanceofCacheAnswer) \ |
| 71 | V(FixedArray, single_character_string_cache, SingleCharacterStringCache) \ |
| 72 | V(FixedArray, string_split_cache, StringSplitCache) \ |
| 73 | V(FixedArray, regexp_multiple_cache, RegExpMultipleCache) \ |
| 74 | V(Oddball, termination_exception, TerminationException) \ |
| 75 | V(Smi, hash_seed, HashSeed) \ |
| 76 | V(Map, symbol_map, SymbolMap) \ |
| 77 | V(Map, string_map, StringMap) \ |
| 78 | V(Map, one_byte_string_map, OneByteStringMap) \ |
| 79 | V(Map, cons_string_map, ConsStringMap) \ |
| 80 | V(Map, cons_one_byte_string_map, ConsOneByteStringMap) \ |
| 81 | V(Map, sliced_string_map, SlicedStringMap) \ |
| 82 | V(Map, sliced_one_byte_string_map, SlicedOneByteStringMap) \ |
| 83 | V(Map, external_string_map, ExternalStringMap) \ |
| 84 | V(Map, external_string_with_one_byte_data_map, \ |
| 85 | ExternalStringWithOneByteDataMap) \ |
| 86 | V(Map, external_one_byte_string_map, ExternalOneByteStringMap) \ |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame^] | 87 | V(Map, native_source_string_map, NativeSourceStringMap) \ |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 88 | V(Map, short_external_string_map, ShortExternalStringMap) \ |
| 89 | V(Map, short_external_string_with_one_byte_data_map, \ |
| 90 | ShortExternalStringWithOneByteDataMap) \ |
| 91 | V(Map, internalized_string_map, InternalizedStringMap) \ |
| 92 | V(Map, one_byte_internalized_string_map, OneByteInternalizedStringMap) \ |
| 93 | V(Map, external_internalized_string_map, ExternalInternalizedStringMap) \ |
| 94 | V(Map, external_internalized_string_with_one_byte_data_map, \ |
| 95 | ExternalInternalizedStringWithOneByteDataMap) \ |
| 96 | V(Map, external_one_byte_internalized_string_map, \ |
| 97 | ExternalOneByteInternalizedStringMap) \ |
| 98 | V(Map, short_external_internalized_string_map, \ |
| 99 | ShortExternalInternalizedStringMap) \ |
| 100 | V(Map, short_external_internalized_string_with_one_byte_data_map, \ |
| 101 | ShortExternalInternalizedStringWithOneByteDataMap) \ |
| 102 | V(Map, short_external_one_byte_internalized_string_map, \ |
| 103 | ShortExternalOneByteInternalizedStringMap) \ |
| 104 | V(Map, short_external_one_byte_string_map, ShortExternalOneByteStringMap) \ |
| 105 | V(Map, undetectable_string_map, UndetectableStringMap) \ |
| 106 | V(Map, undetectable_one_byte_string_map, UndetectableOneByteStringMap) \ |
| 107 | V(Map, external_int8_array_map, ExternalInt8ArrayMap) \ |
| 108 | V(Map, external_uint8_array_map, ExternalUint8ArrayMap) \ |
| 109 | V(Map, external_int16_array_map, ExternalInt16ArrayMap) \ |
| 110 | V(Map, external_uint16_array_map, ExternalUint16ArrayMap) \ |
| 111 | V(Map, external_int32_array_map, ExternalInt32ArrayMap) \ |
| 112 | V(Map, external_uint32_array_map, ExternalUint32ArrayMap) \ |
| 113 | V(Map, external_float32_array_map, ExternalFloat32ArrayMap) \ |
| 114 | V(Map, external_float64_array_map, ExternalFloat64ArrayMap) \ |
| 115 | V(Map, external_uint8_clamped_array_map, ExternalUint8ClampedArrayMap) \ |
| 116 | V(ExternalArray, empty_external_int8_array, EmptyExternalInt8Array) \ |
| 117 | V(ExternalArray, empty_external_uint8_array, EmptyExternalUint8Array) \ |
| 118 | V(ExternalArray, empty_external_int16_array, EmptyExternalInt16Array) \ |
| 119 | V(ExternalArray, empty_external_uint16_array, EmptyExternalUint16Array) \ |
| 120 | V(ExternalArray, empty_external_int32_array, EmptyExternalInt32Array) \ |
| 121 | V(ExternalArray, empty_external_uint32_array, EmptyExternalUint32Array) \ |
| 122 | V(ExternalArray, empty_external_float32_array, EmptyExternalFloat32Array) \ |
| 123 | V(ExternalArray, empty_external_float64_array, EmptyExternalFloat64Array) \ |
| 124 | V(ExternalArray, empty_external_uint8_clamped_array, \ |
| 125 | EmptyExternalUint8ClampedArray) \ |
| 126 | V(Map, fixed_uint8_array_map, FixedUint8ArrayMap) \ |
| 127 | V(Map, fixed_int8_array_map, FixedInt8ArrayMap) \ |
| 128 | V(Map, fixed_uint16_array_map, FixedUint16ArrayMap) \ |
| 129 | V(Map, fixed_int16_array_map, FixedInt16ArrayMap) \ |
| 130 | V(Map, fixed_uint32_array_map, FixedUint32ArrayMap) \ |
| 131 | V(Map, fixed_int32_array_map, FixedInt32ArrayMap) \ |
| 132 | V(Map, fixed_float32_array_map, FixedFloat32ArrayMap) \ |
| 133 | V(Map, fixed_float64_array_map, FixedFloat64ArrayMap) \ |
| 134 | V(Map, fixed_uint8_clamped_array_map, FixedUint8ClampedArrayMap) \ |
| 135 | V(FixedTypedArrayBase, empty_fixed_uint8_array, EmptyFixedUint8Array) \ |
| 136 | V(FixedTypedArrayBase, empty_fixed_int8_array, EmptyFixedInt8Array) \ |
| 137 | V(FixedTypedArrayBase, empty_fixed_uint16_array, EmptyFixedUint16Array) \ |
| 138 | V(FixedTypedArrayBase, empty_fixed_int16_array, EmptyFixedInt16Array) \ |
| 139 | V(FixedTypedArrayBase, empty_fixed_uint32_array, EmptyFixedUint32Array) \ |
| 140 | V(FixedTypedArrayBase, empty_fixed_int32_array, EmptyFixedInt32Array) \ |
| 141 | V(FixedTypedArrayBase, empty_fixed_float32_array, EmptyFixedFloat32Array) \ |
| 142 | V(FixedTypedArrayBase, empty_fixed_float64_array, EmptyFixedFloat64Array) \ |
| 143 | V(FixedTypedArrayBase, empty_fixed_uint8_clamped_array, \ |
| 144 | EmptyFixedUint8ClampedArray) \ |
| 145 | V(Map, sloppy_arguments_elements_map, SloppyArgumentsElementsMap) \ |
| 146 | V(Map, function_context_map, FunctionContextMap) \ |
| 147 | V(Map, catch_context_map, CatchContextMap) \ |
| 148 | V(Map, with_context_map, WithContextMap) \ |
| 149 | V(Map, block_context_map, BlockContextMap) \ |
| 150 | V(Map, module_context_map, ModuleContextMap) \ |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame^] | 151 | V(Map, script_context_map, ScriptContextMap) \ |
| 152 | V(Map, script_context_table_map, ScriptContextTableMap) \ |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 153 | V(Map, undefined_map, UndefinedMap) \ |
| 154 | V(Map, the_hole_map, TheHoleMap) \ |
| 155 | V(Map, null_map, NullMap) \ |
| 156 | V(Map, boolean_map, BooleanMap) \ |
| 157 | V(Map, uninitialized_map, UninitializedMap) \ |
| 158 | V(Map, arguments_marker_map, ArgumentsMarkerMap) \ |
| 159 | V(Map, no_interceptor_result_sentinel_map, NoInterceptorResultSentinelMap) \ |
| 160 | V(Map, exception_map, ExceptionMap) \ |
| 161 | V(Map, termination_exception_map, TerminationExceptionMap) \ |
| 162 | V(Map, message_object_map, JSMessageObjectMap) \ |
| 163 | V(Map, foreign_map, ForeignMap) \ |
| 164 | V(HeapNumber, nan_value, NanValue) \ |
| 165 | V(HeapNumber, infinity_value, InfinityValue) \ |
| 166 | V(HeapNumber, minus_zero_value, MinusZeroValue) \ |
| 167 | V(Map, neander_map, NeanderMap) \ |
| 168 | V(JSObject, message_listeners, MessageListeners) \ |
| 169 | V(UnseededNumberDictionary, code_stubs, CodeStubs) \ |
| 170 | V(UnseededNumberDictionary, non_monomorphic_cache, NonMonomorphicCache) \ |
| 171 | V(PolymorphicCodeCache, polymorphic_code_cache, PolymorphicCodeCache) \ |
| 172 | V(Code, js_entry_code, JsEntryCode) \ |
| 173 | V(Code, js_construct_entry_code, JsConstructEntryCode) \ |
| 174 | V(FixedArray, natives_source_cache, NativesSourceCache) \ |
| 175 | V(Script, empty_script, EmptyScript) \ |
| 176 | V(NameDictionary, intrinsic_function_names, IntrinsicFunctionNames) \ |
| 177 | V(Cell, undefined_cell, UndefineCell) \ |
| 178 | V(JSObject, observation_state, ObservationState) \ |
| 179 | V(Map, external_map, ExternalMap) \ |
| 180 | V(Object, symbol_registry, SymbolRegistry) \ |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 181 | V(SeededNumberDictionary, empty_slow_element_dictionary, \ |
| 182 | EmptySlowElementDictionary) \ |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 183 | V(FixedArray, materialized_objects, MaterializedObjects) \ |
| 184 | V(FixedArray, allocation_sites_scratchpad, AllocationSitesScratchpad) \ |
| 185 | V(FixedArray, microtask_queue, MicrotaskQueue) |
| 186 | |
| 187 | // Entries in this list are limited to Smis and are not visited during GC. |
| 188 | #define SMI_ROOT_LIST(V) \ |
| 189 | V(Smi, stack_limit, StackLimit) \ |
| 190 | V(Smi, real_stack_limit, RealStackLimit) \ |
| 191 | V(Smi, last_script_id, LastScriptId) \ |
| 192 | V(Smi, arguments_adaptor_deopt_pc_offset, ArgumentsAdaptorDeoptPCOffset) \ |
| 193 | V(Smi, construct_stub_deopt_pc_offset, ConstructStubDeoptPCOffset) \ |
| 194 | V(Smi, getter_stub_deopt_pc_offset, GetterStubDeoptPCOffset) \ |
| 195 | V(Smi, setter_stub_deopt_pc_offset, SetterStubDeoptPCOffset) |
| 196 | |
| 197 | #define ROOT_LIST(V) \ |
| 198 | STRONG_ROOT_LIST(V) \ |
| 199 | SMI_ROOT_LIST(V) \ |
| 200 | V(StringTable, string_table, StringTable) |
| 201 | |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame^] | 202 | #define INTERNALIZED_STRING_LIST(V) \ |
| 203 | V(Object_string, "Object") \ |
| 204 | V(proto_string, "__proto__") \ |
| 205 | V(arguments_string, "arguments") \ |
| 206 | V(Arguments_string, "Arguments") \ |
| 207 | V(caller_string, "caller") \ |
| 208 | V(boolean_string, "boolean") \ |
| 209 | V(Boolean_string, "Boolean") \ |
| 210 | V(callee_string, "callee") \ |
| 211 | V(constructor_string, "constructor") \ |
| 212 | V(dot_result_string, ".result") \ |
| 213 | V(eval_string, "eval") \ |
| 214 | V(empty_string, "") \ |
| 215 | V(function_string, "function") \ |
| 216 | V(Function_string, "Function") \ |
| 217 | V(length_string, "length") \ |
| 218 | V(name_string, "name") \ |
| 219 | V(null_string, "null") \ |
| 220 | V(number_string, "number") \ |
| 221 | V(Number_string, "Number") \ |
| 222 | V(nan_string, "NaN") \ |
| 223 | V(source_string, "source") \ |
| 224 | V(source_url_string, "source_url") \ |
| 225 | V(source_mapping_url_string, "source_mapping_url") \ |
| 226 | V(global_string, "global") \ |
| 227 | V(ignore_case_string, "ignoreCase") \ |
| 228 | V(multiline_string, "multiline") \ |
| 229 | V(sticky_string, "sticky") \ |
| 230 | V(harmony_regexps_string, "harmony_regexps") \ |
| 231 | V(input_string, "input") \ |
| 232 | V(index_string, "index") \ |
| 233 | V(last_index_string, "lastIndex") \ |
| 234 | V(object_string, "object") \ |
| 235 | V(prototype_string, "prototype") \ |
| 236 | V(string_string, "string") \ |
| 237 | V(String_string, "String") \ |
| 238 | V(symbol_string, "symbol") \ |
| 239 | V(Symbol_string, "Symbol") \ |
| 240 | V(Map_string, "Map") \ |
| 241 | V(Set_string, "Set") \ |
| 242 | V(WeakMap_string, "WeakMap") \ |
| 243 | V(WeakSet_string, "WeakSet") \ |
| 244 | V(for_string, "for") \ |
| 245 | V(for_api_string, "for_api") \ |
| 246 | V(for_intern_string, "for_intern") \ |
| 247 | V(private_api_string, "private_api") \ |
| 248 | V(private_intern_string, "private_intern") \ |
| 249 | V(Date_string, "Date") \ |
| 250 | V(char_at_string, "CharAt") \ |
| 251 | V(undefined_string, "undefined") \ |
| 252 | V(value_of_string, "valueOf") \ |
| 253 | V(stack_string, "stack") \ |
| 254 | V(toJSON_string, "toJSON") \ |
| 255 | V(KeyedLoadMonomorphic_string, "KeyedLoadMonomorphic") \ |
| 256 | V(KeyedStoreMonomorphic_string, "KeyedStoreMonomorphic") \ |
| 257 | V(stack_overflow_string, "kStackOverflowBoilerplate") \ |
| 258 | V(illegal_access_string, "illegal access") \ |
| 259 | V(cell_value_string, "%cell_value") \ |
| 260 | V(illegal_argument_string, "illegal argument") \ |
| 261 | V(identity_hash_string, "v8::IdentityHash") \ |
| 262 | V(closure_string, "(closure)") \ |
| 263 | V(dot_string, ".") \ |
| 264 | V(compare_ic_string, "==") \ |
| 265 | V(strict_compare_ic_string, "===") \ |
| 266 | V(infinity_string, "Infinity") \ |
| 267 | V(minus_infinity_string, "-Infinity") \ |
| 268 | V(query_colon_string, "(?:)") \ |
| 269 | V(Generator_string, "Generator") \ |
| 270 | V(throw_string, "throw") \ |
| 271 | V(done_string, "done") \ |
| 272 | V(value_string, "value") \ |
| 273 | V(next_string, "next") \ |
| 274 | V(byte_length_string, "byteLength") \ |
| 275 | V(byte_offset_string, "byteOffset") \ |
| 276 | V(minus_zero_string, "-0") \ |
| 277 | V(Array_string, "Array") \ |
| 278 | V(Error_string, "Error") \ |
| 279 | V(RegExp_string, "RegExp") |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 280 | |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame^] | 281 | #define PRIVATE_SYMBOL_LIST(V) \ |
| 282 | V(nonextensible_symbol) \ |
| 283 | V(sealed_symbol) \ |
| 284 | V(frozen_symbol) \ |
| 285 | V(nonexistent_symbol) \ |
| 286 | V(elements_transition_symbol) \ |
| 287 | V(prototype_users_symbol) \ |
| 288 | V(observed_symbol) \ |
| 289 | V(uninitialized_symbol) \ |
| 290 | V(megamorphic_symbol) \ |
| 291 | V(premonomorphic_symbol) \ |
| 292 | V(generic_symbol) \ |
| 293 | V(stack_trace_symbol) \ |
| 294 | V(detailed_stack_trace_symbol) \ |
| 295 | V(normal_ic_symbol) \ |
| 296 | V(home_object_symbol) \ |
| 297 | V(intl_initialized_marker_symbol) \ |
| 298 | V(intl_impl_object_symbol) \ |
| 299 | V(promise_debug_marker_symbol) \ |
| 300 | V(promise_has_handler_symbol) \ |
| 301 | V(class_script_symbol) \ |
| 302 | V(class_start_position_symbol) \ |
| 303 | V(class_end_position_symbol) |
| 304 | |
| 305 | #define PUBLIC_SYMBOL_LIST(V) \ |
| 306 | V(has_instance_symbol, symbolHasInstance, Symbol.hasInstance) \ |
| 307 | V(is_concat_spreadable_symbol, symbolIsConcatSpreadable, \ |
| 308 | Symbol.isConcatSpreadable) \ |
| 309 | V(is_regexp_symbol, symbolIsRegExp, Symbol.isRegExp) \ |
| 310 | V(iterator_symbol, symbolIterator, Symbol.iterator) \ |
| 311 | V(to_string_tag_symbol, symbolToStringTag, Symbol.toStringTag) \ |
| 312 | V(unscopables_symbol, symbolUnscopables, Symbol.unscopables) |
| 313 | |
| 314 | // Heap roots that are known to be immortal immovable, for which we can safely |
| 315 | // skip write barriers. This list is not complete and has omissions. |
| 316 | #define IMMORTAL_IMMOVABLE_ROOT_LIST(V) \ |
| 317 | V(ByteArrayMap) \ |
| 318 | V(FreeSpaceMap) \ |
| 319 | V(OnePointerFillerMap) \ |
| 320 | V(TwoPointerFillerMap) \ |
| 321 | V(UndefinedValue) \ |
| 322 | V(TheHoleValue) \ |
| 323 | V(NullValue) \ |
| 324 | V(TrueValue) \ |
| 325 | V(FalseValue) \ |
| 326 | V(UninitializedValue) \ |
| 327 | V(CellMap) \ |
| 328 | V(GlobalPropertyCellMap) \ |
| 329 | V(SharedFunctionInfoMap) \ |
| 330 | V(MetaMap) \ |
| 331 | V(HeapNumberMap) \ |
| 332 | V(MutableHeapNumberMap) \ |
| 333 | V(NativeContextMap) \ |
| 334 | V(FixedArrayMap) \ |
| 335 | V(CodeMap) \ |
| 336 | V(ScopeInfoMap) \ |
| 337 | V(FixedCOWArrayMap) \ |
| 338 | V(FixedDoubleArrayMap) \ |
| 339 | V(ConstantPoolArrayMap) \ |
| 340 | V(WeakCellMap) \ |
| 341 | V(NoInterceptorResultSentinel) \ |
| 342 | V(HashTableMap) \ |
| 343 | V(OrderedHashTableMap) \ |
| 344 | V(EmptyFixedArray) \ |
| 345 | V(EmptyByteArray) \ |
| 346 | V(EmptyDescriptorArray) \ |
| 347 | V(EmptyConstantPoolArray) \ |
| 348 | V(ArgumentsMarker) \ |
| 349 | V(SymbolMap) \ |
| 350 | V(SloppyArgumentsElementsMap) \ |
| 351 | V(FunctionContextMap) \ |
| 352 | V(CatchContextMap) \ |
| 353 | V(WithContextMap) \ |
| 354 | V(BlockContextMap) \ |
| 355 | V(ModuleContextMap) \ |
| 356 | V(ScriptContextMap) \ |
| 357 | V(UndefinedMap) \ |
| 358 | V(TheHoleMap) \ |
| 359 | V(NullMap) \ |
| 360 | V(BooleanMap) \ |
| 361 | V(UninitializedMap) \ |
| 362 | V(ArgumentsMarkerMap) \ |
| 363 | V(JSMessageObjectMap) \ |
| 364 | V(ForeignMap) \ |
| 365 | V(NeanderMap) \ |
| 366 | PRIVATE_SYMBOL_LIST(V) |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 367 | |
| 368 | // Forward declarations. |
| 369 | class HeapStats; |
| 370 | class Isolate; |
| 371 | class WeakObjectRetainer; |
| 372 | |
| 373 | |
| 374 | typedef String* (*ExternalStringTableUpdaterCallback)(Heap* heap, |
| 375 | Object** pointer); |
| 376 | |
| 377 | class StoreBufferRebuilder { |
| 378 | public: |
| 379 | explicit StoreBufferRebuilder(StoreBuffer* store_buffer) |
| 380 | : store_buffer_(store_buffer) {} |
| 381 | |
| 382 | void Callback(MemoryChunk* page, StoreBufferEvent event); |
| 383 | |
| 384 | private: |
| 385 | StoreBuffer* store_buffer_; |
| 386 | |
| 387 | // We record in this variable how full the store buffer was when we started |
| 388 | // iterating over the current page, finding pointers to new space. If the |
| 389 | // store buffer overflows again we can exempt the page from the store buffer |
| 390 | // by rewinding to this point instead of having to search the store buffer. |
| 391 | Object*** start_of_current_page_; |
| 392 | // The current page we are scanning in the store buffer iterator. |
| 393 | MemoryChunk* current_page_; |
| 394 | }; |
| 395 | |
| 396 | |
| 397 | // A queue of objects promoted during scavenge. Each object is accompanied |
| 398 | // by it's size to avoid dereferencing a map pointer for scanning. |
| 399 | class PromotionQueue { |
| 400 | public: |
| 401 | explicit PromotionQueue(Heap* heap) |
| 402 | : front_(NULL), |
| 403 | rear_(NULL), |
| 404 | limit_(NULL), |
| 405 | emergency_stack_(0), |
| 406 | heap_(heap) {} |
| 407 | |
| 408 | void Initialize(); |
| 409 | |
| 410 | void Destroy() { |
| 411 | DCHECK(is_empty()); |
| 412 | delete emergency_stack_; |
| 413 | emergency_stack_ = NULL; |
| 414 | } |
| 415 | |
| 416 | Page* GetHeadPage() { |
| 417 | return Page::FromAllocationTop(reinterpret_cast<Address>(rear_)); |
| 418 | } |
| 419 | |
| 420 | void SetNewLimit(Address limit) { |
| 421 | limit_ = reinterpret_cast<intptr_t*>(limit); |
| 422 | |
| 423 | if (limit_ <= rear_) { |
| 424 | return; |
| 425 | } |
| 426 | |
| 427 | RelocateQueueHead(); |
| 428 | } |
| 429 | |
| 430 | bool IsBelowPromotionQueue(Address to_space_top) { |
| 431 | // If the given to-space top pointer and the head of the promotion queue |
| 432 | // are not on the same page, then the to-space objects are below the |
| 433 | // promotion queue. |
| 434 | if (GetHeadPage() != Page::FromAddress(to_space_top)) { |
| 435 | return true; |
| 436 | } |
| 437 | // If the to space top pointer is smaller or equal than the promotion |
| 438 | // queue head, then the to-space objects are below the promotion queue. |
| 439 | return reinterpret_cast<intptr_t*>(to_space_top) <= rear_; |
| 440 | } |
| 441 | |
| 442 | bool is_empty() { |
| 443 | return (front_ == rear_) && |
| 444 | (emergency_stack_ == NULL || emergency_stack_->length() == 0); |
| 445 | } |
| 446 | |
| 447 | inline void insert(HeapObject* target, int size); |
| 448 | |
| 449 | void remove(HeapObject** target, int* size) { |
| 450 | DCHECK(!is_empty()); |
| 451 | if (front_ == rear_) { |
| 452 | Entry e = emergency_stack_->RemoveLast(); |
| 453 | *target = e.obj_; |
| 454 | *size = e.size_; |
| 455 | return; |
| 456 | } |
| 457 | |
| 458 | if (NewSpacePage::IsAtStart(reinterpret_cast<Address>(front_))) { |
| 459 | NewSpacePage* front_page = |
| 460 | NewSpacePage::FromAddress(reinterpret_cast<Address>(front_)); |
| 461 | DCHECK(!front_page->prev_page()->is_anchor()); |
| 462 | front_ = reinterpret_cast<intptr_t*>(front_page->prev_page()->area_end()); |
| 463 | } |
| 464 | *target = reinterpret_cast<HeapObject*>(*(--front_)); |
| 465 | *size = static_cast<int>(*(--front_)); |
| 466 | // Assert no underflow. |
| 467 | SemiSpace::AssertValidRange(reinterpret_cast<Address>(rear_), |
| 468 | reinterpret_cast<Address>(front_)); |
| 469 | } |
| 470 | |
| 471 | private: |
| 472 | // The front of the queue is higher in the memory page chain than the rear. |
| 473 | intptr_t* front_; |
| 474 | intptr_t* rear_; |
| 475 | intptr_t* limit_; |
| 476 | |
| 477 | static const int kEntrySizeInWords = 2; |
| 478 | |
| 479 | struct Entry { |
| 480 | Entry(HeapObject* obj, int size) : obj_(obj), size_(size) {} |
| 481 | |
| 482 | HeapObject* obj_; |
| 483 | int size_; |
| 484 | }; |
| 485 | List<Entry>* emergency_stack_; |
| 486 | |
| 487 | Heap* heap_; |
| 488 | |
| 489 | void RelocateQueueHead(); |
| 490 | |
| 491 | DISALLOW_COPY_AND_ASSIGN(PromotionQueue); |
| 492 | }; |
| 493 | |
| 494 | |
| 495 | typedef void (*ScavengingCallback)(Map* map, HeapObject** slot, |
| 496 | HeapObject* object); |
| 497 | |
| 498 | |
| 499 | // External strings table is a place where all external strings are |
| 500 | // registered. We need to keep track of such strings to properly |
| 501 | // finalize them. |
| 502 | class ExternalStringTable { |
| 503 | public: |
| 504 | // Registers an external string. |
| 505 | inline void AddString(String* string); |
| 506 | |
| 507 | inline void Iterate(ObjectVisitor* v); |
| 508 | |
| 509 | // Restores internal invariant and gets rid of collected strings. |
| 510 | // Must be called after each Iterate() that modified the strings. |
| 511 | void CleanUp(); |
| 512 | |
| 513 | // Destroys all allocated memory. |
| 514 | void TearDown(); |
| 515 | |
| 516 | private: |
| 517 | explicit ExternalStringTable(Heap* heap) : heap_(heap) {} |
| 518 | |
| 519 | friend class Heap; |
| 520 | |
| 521 | inline void Verify(); |
| 522 | |
| 523 | inline void AddOldString(String* string); |
| 524 | |
| 525 | // Notifies the table that only a prefix of the new list is valid. |
| 526 | inline void ShrinkNewStrings(int position); |
| 527 | |
| 528 | // To speed up scavenge collections new space string are kept |
| 529 | // separate from old space strings. |
| 530 | List<Object*> new_space_strings_; |
| 531 | List<Object*> old_space_strings_; |
| 532 | |
| 533 | Heap* heap_; |
| 534 | |
| 535 | DISALLOW_COPY_AND_ASSIGN(ExternalStringTable); |
| 536 | }; |
| 537 | |
| 538 | |
| 539 | enum ArrayStorageAllocationMode { |
| 540 | DONT_INITIALIZE_ARRAY_ELEMENTS, |
| 541 | INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE |
| 542 | }; |
| 543 | |
| 544 | |
| 545 | class Heap { |
| 546 | public: |
| 547 | // Configure heap size in MB before setup. Return false if the heap has been |
| 548 | // set up already. |
| 549 | bool ConfigureHeap(int max_semi_space_size, int max_old_space_size, |
| 550 | int max_executable_size, size_t code_range_size); |
| 551 | bool ConfigureHeapDefault(); |
| 552 | |
| 553 | // Prepares the heap, setting up memory areas that are needed in the isolate |
| 554 | // without actually creating any objects. |
| 555 | bool SetUp(); |
| 556 | |
| 557 | // Bootstraps the object heap with the core set of objects required to run. |
| 558 | // Returns whether it succeeded. |
| 559 | bool CreateHeapObjects(); |
| 560 | |
| 561 | // Destroys all memory allocated by the heap. |
| 562 | void TearDown(); |
| 563 | |
| 564 | // Set the stack limit in the roots_ array. Some architectures generate |
| 565 | // code that looks here, because it is faster than loading from the static |
| 566 | // jslimit_/real_jslimit_ variable in the StackGuard. |
| 567 | void SetStackLimits(); |
| 568 | |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame^] | 569 | // Notifies the heap that is ok to start marking or other activities that |
| 570 | // should not happen during deserialization. |
| 571 | void NotifyDeserializationComplete(); |
| 572 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 573 | // Returns whether SetUp has been called. |
| 574 | bool HasBeenSetUp(); |
| 575 | |
| 576 | // Returns the maximum amount of memory reserved for the heap. For |
| 577 | // the young generation, we reserve 4 times the amount needed for a |
| 578 | // semi space. The young generation consists of two semi spaces and |
| 579 | // we reserve twice the amount needed for those in order to ensure |
| 580 | // that new space can be aligned to its size. |
| 581 | intptr_t MaxReserved() { |
| 582 | return 4 * reserved_semispace_size_ + max_old_generation_size_; |
| 583 | } |
| 584 | int MaxSemiSpaceSize() { return max_semi_space_size_; } |
| 585 | int ReservedSemiSpaceSize() { return reserved_semispace_size_; } |
| 586 | int InitialSemiSpaceSize() { return initial_semispace_size_; } |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame^] | 587 | int TargetSemiSpaceSize() { return target_semispace_size_; } |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 588 | intptr_t MaxOldGenerationSize() { return max_old_generation_size_; } |
| 589 | intptr_t MaxExecutableSize() { return max_executable_size_; } |
| 590 | |
| 591 | // Returns the capacity of the heap in bytes w/o growing. Heap grows when |
| 592 | // more spaces are needed until it reaches the limit. |
| 593 | intptr_t Capacity(); |
| 594 | |
| 595 | // Returns the amount of memory currently committed for the heap. |
| 596 | intptr_t CommittedMemory(); |
| 597 | |
| 598 | // Returns the amount of executable memory currently committed for the heap. |
| 599 | intptr_t CommittedMemoryExecutable(); |
| 600 | |
| 601 | // Returns the amount of phyical memory currently committed for the heap. |
| 602 | size_t CommittedPhysicalMemory(); |
| 603 | |
| 604 | // Returns the maximum amount of memory ever committed for the heap. |
| 605 | intptr_t MaximumCommittedMemory() { return maximum_committed_; } |
| 606 | |
| 607 | // Updates the maximum committed memory for the heap. Should be called |
| 608 | // whenever a space grows. |
| 609 | void UpdateMaximumCommitted(); |
| 610 | |
| 611 | // Returns the available bytes in space w/o growing. |
| 612 | // Heap doesn't guarantee that it can allocate an object that requires |
| 613 | // all available bytes. Check MaxHeapObjectSize() instead. |
| 614 | intptr_t Available(); |
| 615 | |
| 616 | // Returns of size of all objects residing in the heap. |
| 617 | intptr_t SizeOfObjects(); |
| 618 | |
| 619 | // Return the starting address and a mask for the new space. And-masking an |
| 620 | // address with the mask will result in the start address of the new space |
| 621 | // for all addresses in either semispace. |
| 622 | Address NewSpaceStart() { return new_space_.start(); } |
| 623 | uintptr_t NewSpaceMask() { return new_space_.mask(); } |
| 624 | Address NewSpaceTop() { return new_space_.top(); } |
| 625 | |
| 626 | NewSpace* new_space() { return &new_space_; } |
| 627 | OldSpace* old_pointer_space() { return old_pointer_space_; } |
| 628 | OldSpace* old_data_space() { return old_data_space_; } |
| 629 | OldSpace* code_space() { return code_space_; } |
| 630 | MapSpace* map_space() { return map_space_; } |
| 631 | CellSpace* cell_space() { return cell_space_; } |
| 632 | PropertyCellSpace* property_cell_space() { return property_cell_space_; } |
| 633 | LargeObjectSpace* lo_space() { return lo_space_; } |
| 634 | PagedSpace* paged_space(int idx) { |
| 635 | switch (idx) { |
| 636 | case OLD_POINTER_SPACE: |
| 637 | return old_pointer_space(); |
| 638 | case OLD_DATA_SPACE: |
| 639 | return old_data_space(); |
| 640 | case MAP_SPACE: |
| 641 | return map_space(); |
| 642 | case CELL_SPACE: |
| 643 | return cell_space(); |
| 644 | case PROPERTY_CELL_SPACE: |
| 645 | return property_cell_space(); |
| 646 | case CODE_SPACE: |
| 647 | return code_space(); |
| 648 | case NEW_SPACE: |
| 649 | case LO_SPACE: |
| 650 | UNREACHABLE(); |
| 651 | } |
| 652 | return NULL; |
| 653 | } |
| 654 | |
| 655 | bool always_allocate() { return always_allocate_scope_depth_ != 0; } |
| 656 | Address always_allocate_scope_depth_address() { |
| 657 | return reinterpret_cast<Address>(&always_allocate_scope_depth_); |
| 658 | } |
| 659 | |
| 660 | Address* NewSpaceAllocationTopAddress() { |
| 661 | return new_space_.allocation_top_address(); |
| 662 | } |
| 663 | Address* NewSpaceAllocationLimitAddress() { |
| 664 | return new_space_.allocation_limit_address(); |
| 665 | } |
| 666 | |
| 667 | Address* OldPointerSpaceAllocationTopAddress() { |
| 668 | return old_pointer_space_->allocation_top_address(); |
| 669 | } |
| 670 | Address* OldPointerSpaceAllocationLimitAddress() { |
| 671 | return old_pointer_space_->allocation_limit_address(); |
| 672 | } |
| 673 | |
| 674 | Address* OldDataSpaceAllocationTopAddress() { |
| 675 | return old_data_space_->allocation_top_address(); |
| 676 | } |
| 677 | Address* OldDataSpaceAllocationLimitAddress() { |
| 678 | return old_data_space_->allocation_limit_address(); |
| 679 | } |
| 680 | |
| 681 | // Returns a deep copy of the JavaScript object. |
| 682 | // Properties and elements are copied too. |
| 683 | // Optionally takes an AllocationSite to be appended in an AllocationMemento. |
| 684 | MUST_USE_RESULT AllocationResult |
| 685 | CopyJSObject(JSObject* source, AllocationSite* site = NULL); |
| 686 | |
| 687 | // Clear the Instanceof cache (used when a prototype changes). |
| 688 | inline void ClearInstanceofCache(); |
| 689 | |
| 690 | // Iterates the whole code space to clear all ICs of the given kind. |
| 691 | void ClearAllICsByKind(Code::Kind kind); |
| 692 | |
| 693 | // For use during bootup. |
| 694 | void RepairFreeListsAfterBoot(); |
| 695 | |
| 696 | template <typename T> |
| 697 | static inline bool IsOneByte(T t, int chars); |
| 698 | |
| 699 | // Move len elements within a given array from src_index index to dst_index |
| 700 | // index. |
| 701 | void MoveElements(FixedArray* array, int dst_index, int src_index, int len); |
| 702 | |
| 703 | // Sloppy mode arguments object size. |
| 704 | static const int kSloppyArgumentsObjectSize = |
| 705 | JSObject::kHeaderSize + 2 * kPointerSize; |
| 706 | // Strict mode arguments has no callee so it is smaller. |
| 707 | static const int kStrictArgumentsObjectSize = |
| 708 | JSObject::kHeaderSize + 1 * kPointerSize; |
| 709 | // Indicies for direct access into argument objects. |
| 710 | static const int kArgumentsLengthIndex = 0; |
| 711 | // callee is only valid in sloppy mode. |
| 712 | static const int kArgumentsCalleeIndex = 1; |
| 713 | |
| 714 | // Finalizes an external string by deleting the associated external |
| 715 | // data and clearing the resource pointer. |
| 716 | inline void FinalizeExternalString(String* string); |
| 717 | |
| 718 | // Initialize a filler object to keep the ability to iterate over the heap |
| 719 | // when introducing gaps within pages. |
| 720 | void CreateFillerObjectAt(Address addr, int size); |
| 721 | |
| 722 | bool CanMoveObjectStart(HeapObject* object); |
| 723 | |
| 724 | // Indicates whether live bytes adjustment is triggered from within the GC |
| 725 | // code or from mutator code. |
| 726 | enum InvocationMode { FROM_GC, FROM_MUTATOR }; |
| 727 | |
| 728 | // Maintain consistency of live bytes during incremental marking. |
| 729 | void AdjustLiveBytes(Address address, int by, InvocationMode mode); |
| 730 | |
| 731 | // Trim the given array from the left. Note that this relocates the object |
| 732 | // start and hence is only valid if there is only a single reference to it. |
| 733 | FixedArrayBase* LeftTrimFixedArray(FixedArrayBase* obj, int elements_to_trim); |
| 734 | |
| 735 | // Trim the given array from the right. |
| 736 | template<Heap::InvocationMode mode> |
| 737 | void RightTrimFixedArray(FixedArrayBase* obj, int elements_to_trim); |
| 738 | |
| 739 | // Converts the given boolean condition to JavaScript boolean value. |
| 740 | inline Object* ToBoolean(bool condition); |
| 741 | |
| 742 | // Performs garbage collection operation. |
| 743 | // Returns whether there is a chance that another major GC could |
| 744 | // collect more garbage. |
| 745 | inline bool CollectGarbage( |
| 746 | AllocationSpace space, const char* gc_reason = NULL, |
| 747 | const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags); |
| 748 | |
| 749 | static const int kNoGCFlags = 0; |
| 750 | static const int kReduceMemoryFootprintMask = 1; |
| 751 | static const int kAbortIncrementalMarkingMask = 2; |
| 752 | |
| 753 | // Making the heap iterable requires us to abort incremental marking. |
| 754 | static const int kMakeHeapIterableMask = kAbortIncrementalMarkingMask; |
| 755 | |
| 756 | // Performs a full garbage collection. If (flags & kMakeHeapIterableMask) is |
| 757 | // non-zero, then the slower precise sweeper is used, which leaves the heap |
| 758 | // in a state where we can iterate over the heap visiting all objects. |
| 759 | void CollectAllGarbage( |
| 760 | int flags, const char* gc_reason = NULL, |
| 761 | const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags); |
| 762 | |
| 763 | // Last hope GC, should try to squeeze as much as possible. |
| 764 | void CollectAllAvailableGarbage(const char* gc_reason = NULL); |
| 765 | |
| 766 | // Check whether the heap is currently iterable. |
| 767 | bool IsHeapIterable(); |
| 768 | |
| 769 | // Notify the heap that a context has been disposed. |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame^] | 770 | int NotifyContextDisposed(bool dependant_context); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 771 | |
| 772 | inline void increment_scan_on_scavenge_pages() { |
| 773 | scan_on_scavenge_pages_++; |
| 774 | if (FLAG_gc_verbose) { |
| 775 | PrintF("Scan-on-scavenge pages: %d\n", scan_on_scavenge_pages_); |
| 776 | } |
| 777 | } |
| 778 | |
| 779 | inline void decrement_scan_on_scavenge_pages() { |
| 780 | scan_on_scavenge_pages_--; |
| 781 | if (FLAG_gc_verbose) { |
| 782 | PrintF("Scan-on-scavenge pages: %d\n", scan_on_scavenge_pages_); |
| 783 | } |
| 784 | } |
| 785 | |
| 786 | PromotionQueue* promotion_queue() { return &promotion_queue_; } |
| 787 | |
| 788 | void AddGCPrologueCallback(v8::Isolate::GCPrologueCallback callback, |
| 789 | GCType gc_type_filter, bool pass_isolate = true); |
| 790 | void RemoveGCPrologueCallback(v8::Isolate::GCPrologueCallback callback); |
| 791 | |
| 792 | void AddGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback, |
| 793 | GCType gc_type_filter, bool pass_isolate = true); |
| 794 | void RemoveGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback); |
| 795 | |
| 796 | // Heap root getters. We have versions with and without type::cast() here. |
| 797 | // You can't use type::cast during GC because the assert fails. |
| 798 | // TODO(1490): Try removing the unchecked accessors, now that GC marking does |
| 799 | // not corrupt the map. |
| 800 | #define ROOT_ACCESSOR(type, name, camel_name) \ |
| 801 | type* name() { return type::cast(roots_[k##camel_name##RootIndex]); } \ |
| 802 | type* raw_unchecked_##name() { \ |
| 803 | return reinterpret_cast<type*>(roots_[k##camel_name##RootIndex]); \ |
| 804 | } |
| 805 | ROOT_LIST(ROOT_ACCESSOR) |
| 806 | #undef ROOT_ACCESSOR |
| 807 | |
| 808 | // Utility type maps |
| 809 | #define STRUCT_MAP_ACCESSOR(NAME, Name, name) \ |
| 810 | Map* name##_map() { return Map::cast(roots_[k##Name##MapRootIndex]); } |
| 811 | STRUCT_LIST(STRUCT_MAP_ACCESSOR) |
| 812 | #undef STRUCT_MAP_ACCESSOR |
| 813 | |
| 814 | #define STRING_ACCESSOR(name, str) \ |
| 815 | String* name() { return String::cast(roots_[k##name##RootIndex]); } |
| 816 | INTERNALIZED_STRING_LIST(STRING_ACCESSOR) |
| 817 | #undef STRING_ACCESSOR |
| 818 | |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame^] | 819 | #define SYMBOL_ACCESSOR(name) \ |
| 820 | Symbol* name() { return Symbol::cast(roots_[k##name##RootIndex]); } |
| 821 | PRIVATE_SYMBOL_LIST(SYMBOL_ACCESSOR) |
| 822 | #undef SYMBOL_ACCESSOR |
| 823 | |
| 824 | #define SYMBOL_ACCESSOR(name, varname, description) \ |
| 825 | Symbol* name() { return Symbol::cast(roots_[k##name##RootIndex]); } |
| 826 | PUBLIC_SYMBOL_LIST(SYMBOL_ACCESSOR) |
| 827 | #undef SYMBOL_ACCESSOR |
| 828 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 829 | // The hidden_string is special because it is the empty string, but does |
| 830 | // not match the empty string. |
| 831 | String* hidden_string() { return hidden_string_; } |
| 832 | |
| 833 | void set_native_contexts_list(Object* object) { |
| 834 | native_contexts_list_ = object; |
| 835 | } |
| 836 | Object* native_contexts_list() const { return native_contexts_list_; } |
| 837 | |
| 838 | void set_array_buffers_list(Object* object) { array_buffers_list_ = object; } |
| 839 | Object* array_buffers_list() const { return array_buffers_list_; } |
| 840 | |
| 841 | void set_allocation_sites_list(Object* object) { |
| 842 | allocation_sites_list_ = object; |
| 843 | } |
| 844 | Object* allocation_sites_list() { return allocation_sites_list_; } |
| 845 | |
| 846 | // Used in CreateAllocationSiteStub and the (de)serializer. |
| 847 | Object** allocation_sites_list_address() { return &allocation_sites_list_; } |
| 848 | |
| 849 | Object* weak_object_to_code_table() { return weak_object_to_code_table_; } |
| 850 | |
| 851 | void set_encountered_weak_collections(Object* weak_collection) { |
| 852 | encountered_weak_collections_ = weak_collection; |
| 853 | } |
| 854 | Object* encountered_weak_collections() const { |
| 855 | return encountered_weak_collections_; |
| 856 | } |
| 857 | |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame^] | 858 | void set_encountered_weak_cells(Object* weak_cell) { |
| 859 | encountered_weak_cells_ = weak_cell; |
| 860 | } |
| 861 | Object* encountered_weak_cells() const { return encountered_weak_cells_; } |
| 862 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 863 | // Number of mark-sweeps. |
| 864 | unsigned int ms_count() { return ms_count_; } |
| 865 | |
| 866 | // Iterates over all roots in the heap. |
| 867 | void IterateRoots(ObjectVisitor* v, VisitMode mode); |
| 868 | // Iterates over all strong roots in the heap. |
| 869 | void IterateStrongRoots(ObjectVisitor* v, VisitMode mode); |
| 870 | // Iterates over entries in the smi roots list. Only interesting to the |
| 871 | // serializer/deserializer, since GC does not care about smis. |
| 872 | void IterateSmiRoots(ObjectVisitor* v); |
| 873 | // Iterates over all the other roots in the heap. |
| 874 | void IterateWeakRoots(ObjectVisitor* v, VisitMode mode); |
| 875 | |
| 876 | // Iterate pointers to from semispace of new space found in memory interval |
| 877 | // from start to end. |
| 878 | void IterateAndMarkPointersToFromSpace(Address start, Address end, |
| 879 | ObjectSlotCallback callback); |
| 880 | |
| 881 | // Returns whether the object resides in new space. |
| 882 | inline bool InNewSpace(Object* object); |
| 883 | inline bool InNewSpace(Address address); |
| 884 | inline bool InNewSpacePage(Address address); |
| 885 | inline bool InFromSpace(Object* object); |
| 886 | inline bool InToSpace(Object* object); |
| 887 | |
| 888 | // Returns whether the object resides in old pointer space. |
| 889 | inline bool InOldPointerSpace(Address address); |
| 890 | inline bool InOldPointerSpace(Object* object); |
| 891 | |
| 892 | // Returns whether the object resides in old data space. |
| 893 | inline bool InOldDataSpace(Address address); |
| 894 | inline bool InOldDataSpace(Object* object); |
| 895 | |
| 896 | // Checks whether an address/object in the heap (including auxiliary |
| 897 | // area and unused area). |
| 898 | bool Contains(Address addr); |
| 899 | bool Contains(HeapObject* value); |
| 900 | |
| 901 | // Checks whether an address/object in a space. |
| 902 | // Currently used by tests, serialization and heap verification only. |
| 903 | bool InSpace(Address addr, AllocationSpace space); |
| 904 | bool InSpace(HeapObject* value, AllocationSpace space); |
| 905 | |
| 906 | // Finds out which space an object should get promoted to based on its type. |
| 907 | inline OldSpace* TargetSpace(HeapObject* object); |
| 908 | static inline AllocationSpace TargetSpaceId(InstanceType type); |
| 909 | |
| 910 | // Checks whether the given object is allowed to be migrated from it's |
| 911 | // current space into the given destination space. Used for debugging. |
| 912 | inline bool AllowedToBeMigrated(HeapObject* object, AllocationSpace dest); |
| 913 | |
| 914 | // Sets the stub_cache_ (only used when expanding the dictionary). |
| 915 | void public_set_code_stubs(UnseededNumberDictionary* value) { |
| 916 | roots_[kCodeStubsRootIndex] = value; |
| 917 | } |
| 918 | |
| 919 | // Support for computing object sizes for old objects during GCs. Returns |
| 920 | // a function that is guaranteed to be safe for computing object sizes in |
| 921 | // the current GC phase. |
| 922 | HeapObjectCallback GcSafeSizeOfOldObjectFunction() { |
| 923 | return gc_safe_size_of_old_object_; |
| 924 | } |
| 925 | |
| 926 | // Sets the non_monomorphic_cache_ (only used when expanding the dictionary). |
| 927 | void public_set_non_monomorphic_cache(UnseededNumberDictionary* value) { |
| 928 | roots_[kNonMonomorphicCacheRootIndex] = value; |
| 929 | } |
| 930 | |
| 931 | void public_set_empty_script(Script* script) { |
| 932 | roots_[kEmptyScriptRootIndex] = script; |
| 933 | } |
| 934 | |
| 935 | void public_set_store_buffer_top(Address* top) { |
| 936 | roots_[kStoreBufferTopRootIndex] = reinterpret_cast<Smi*>(top); |
| 937 | } |
| 938 | |
| 939 | void public_set_materialized_objects(FixedArray* objects) { |
| 940 | roots_[kMaterializedObjectsRootIndex] = objects; |
| 941 | } |
| 942 | |
| 943 | // Generated code can embed this address to get access to the roots. |
| 944 | Object** roots_array_start() { return roots_; } |
| 945 | |
| 946 | Address* store_buffer_top_address() { |
| 947 | return reinterpret_cast<Address*>(&roots_[kStoreBufferTopRootIndex]); |
| 948 | } |
| 949 | |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame^] | 950 | static bool RootIsImmortalImmovable(int root_index); |
| 951 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 952 | #ifdef VERIFY_HEAP |
| 953 | // Verify the heap is in its normal state before or after a GC. |
| 954 | void Verify(); |
| 955 | |
| 956 | |
| 957 | bool weak_embedded_objects_verification_enabled() { |
| 958 | return no_weak_object_verification_scope_depth_ == 0; |
| 959 | } |
| 960 | #endif |
| 961 | |
| 962 | #ifdef DEBUG |
| 963 | void Print(); |
| 964 | void PrintHandles(); |
| 965 | |
| 966 | void OldPointerSpaceCheckStoreBuffer(); |
| 967 | void MapSpaceCheckStoreBuffer(); |
| 968 | void LargeObjectSpaceCheckStoreBuffer(); |
| 969 | |
| 970 | // Report heap statistics. |
| 971 | void ReportHeapStatistics(const char* title); |
| 972 | void ReportCodeStatistics(const char* title); |
| 973 | #endif |
| 974 | |
| 975 | // Zapping is needed for verify heap, and always done in debug builds. |
| 976 | static inline bool ShouldZapGarbage() { |
| 977 | #ifdef DEBUG |
| 978 | return true; |
| 979 | #else |
| 980 | #ifdef VERIFY_HEAP |
| 981 | return FLAG_verify_heap; |
| 982 | #else |
| 983 | return false; |
| 984 | #endif |
| 985 | #endif |
| 986 | } |
| 987 | |
| 988 | // Number of "runtime allocations" done so far. |
| 989 | uint32_t allocations_count() { return allocations_count_; } |
| 990 | |
| 991 | // Returns deterministic "time" value in ms. Works only with |
| 992 | // FLAG_verify_predictable. |
| 993 | double synthetic_time() { return allocations_count_ / 2.0; } |
| 994 | |
| 995 | // Print short heap statistics. |
| 996 | void PrintShortHeapStatistics(); |
| 997 | |
| 998 | // Write barrier support for address[offset] = o. |
| 999 | INLINE(void RecordWrite(Address address, int offset)); |
| 1000 | |
| 1001 | // Write barrier support for address[start : start + len[ = o. |
| 1002 | INLINE(void RecordWrites(Address address, int start, int len)); |
| 1003 | |
| 1004 | enum HeapState { NOT_IN_GC, SCAVENGE, MARK_COMPACT }; |
| 1005 | inline HeapState gc_state() { return gc_state_; } |
| 1006 | |
| 1007 | inline bool IsInGCPostProcessing() { return gc_post_processing_depth_ > 0; } |
| 1008 | |
| 1009 | #ifdef DEBUG |
| 1010 | void set_allocation_timeout(int timeout) { allocation_timeout_ = timeout; } |
| 1011 | |
| 1012 | void TracePathToObjectFrom(Object* target, Object* root); |
| 1013 | void TracePathToObject(Object* target); |
| 1014 | void TracePathToGlobal(); |
| 1015 | #endif |
| 1016 | |
| 1017 | // Callback function passed to Heap::Iterate etc. Copies an object if |
| 1018 | // necessary, the object might be promoted to an old space. The caller must |
| 1019 | // ensure the precondition that the object is (a) a heap object and (b) in |
| 1020 | // the heap's from space. |
| 1021 | static inline void ScavengePointer(HeapObject** p); |
| 1022 | static inline void ScavengeObject(HeapObject** p, HeapObject* object); |
| 1023 | |
| 1024 | enum ScratchpadSlotMode { IGNORE_SCRATCHPAD_SLOT, RECORD_SCRATCHPAD_SLOT }; |
| 1025 | |
| 1026 | // If an object has an AllocationMemento trailing it, return it, otherwise |
| 1027 | // return NULL; |
| 1028 | inline AllocationMemento* FindAllocationMemento(HeapObject* object); |
| 1029 | |
| 1030 | // An object may have an AllocationSite associated with it through a trailing |
| 1031 | // AllocationMemento. Its feedback should be updated when objects are found |
| 1032 | // in the heap. |
| 1033 | static inline void UpdateAllocationSiteFeedback(HeapObject* object, |
| 1034 | ScratchpadSlotMode mode); |
| 1035 | |
| 1036 | // Support for partial snapshots. After calling this we have a linear |
| 1037 | // space to write objects in each space. |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame^] | 1038 | struct Chunk { |
| 1039 | uint32_t size; |
| 1040 | Address start; |
| 1041 | Address end; |
| 1042 | }; |
| 1043 | |
| 1044 | typedef List<Chunk> Reservation; |
| 1045 | |
| 1046 | // Returns false if not able to reserve. |
| 1047 | bool ReserveSpace(Reservation* reservations); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1048 | |
| 1049 | // |
| 1050 | // Support for the API. |
| 1051 | // |
| 1052 | |
| 1053 | void CreateApiObjects(); |
| 1054 | |
| 1055 | inline intptr_t PromotedTotalSize() { |
| 1056 | int64_t total = PromotedSpaceSizeOfObjects() + PromotedExternalMemorySize(); |
| 1057 | if (total > kMaxInt) return static_cast<intptr_t>(kMaxInt); |
| 1058 | if (total < 0) return 0; |
| 1059 | return static_cast<intptr_t>(total); |
| 1060 | } |
| 1061 | |
| 1062 | inline intptr_t OldGenerationSpaceAvailable() { |
| 1063 | return old_generation_allocation_limit_ - PromotedTotalSize(); |
| 1064 | } |
| 1065 | |
| 1066 | inline intptr_t OldGenerationCapacityAvailable() { |
| 1067 | return max_old_generation_size_ - PromotedTotalSize(); |
| 1068 | } |
| 1069 | |
| 1070 | static const intptr_t kMinimumOldGenerationAllocationLimit = |
| 1071 | 8 * (Page::kPageSize > MB ? Page::kPageSize : MB); |
| 1072 | |
| 1073 | static const int kPointerMultiplier = i::kPointerSize / 4; |
| 1074 | |
| 1075 | // The new space size has to be a power of 2. Sizes are in MB. |
| 1076 | static const int kMaxSemiSpaceSizeLowMemoryDevice = 1 * kPointerMultiplier; |
| 1077 | static const int kMaxSemiSpaceSizeMediumMemoryDevice = 4 * kPointerMultiplier; |
| 1078 | static const int kMaxSemiSpaceSizeHighMemoryDevice = 8 * kPointerMultiplier; |
| 1079 | static const int kMaxSemiSpaceSizeHugeMemoryDevice = 8 * kPointerMultiplier; |
| 1080 | |
| 1081 | // The old space size has to be a multiple of Page::kPageSize. |
| 1082 | // Sizes are in MB. |
| 1083 | static const int kMaxOldSpaceSizeLowMemoryDevice = 128 * kPointerMultiplier; |
| 1084 | static const int kMaxOldSpaceSizeMediumMemoryDevice = |
| 1085 | 256 * kPointerMultiplier; |
| 1086 | static const int kMaxOldSpaceSizeHighMemoryDevice = 512 * kPointerMultiplier; |
| 1087 | static const int kMaxOldSpaceSizeHugeMemoryDevice = 700 * kPointerMultiplier; |
| 1088 | |
| 1089 | // The executable size has to be a multiple of Page::kPageSize. |
| 1090 | // Sizes are in MB. |
| 1091 | static const int kMaxExecutableSizeLowMemoryDevice = 96 * kPointerMultiplier; |
| 1092 | static const int kMaxExecutableSizeMediumMemoryDevice = |
| 1093 | 192 * kPointerMultiplier; |
| 1094 | static const int kMaxExecutableSizeHighMemoryDevice = |
| 1095 | 256 * kPointerMultiplier; |
| 1096 | static const int kMaxExecutableSizeHugeMemoryDevice = |
| 1097 | 256 * kPointerMultiplier; |
| 1098 | |
| 1099 | intptr_t OldGenerationAllocationLimit(intptr_t old_gen_size, |
| 1100 | int freed_global_handles); |
| 1101 | |
| 1102 | // Indicates whether inline bump-pointer allocation has been disabled. |
| 1103 | bool inline_allocation_disabled() { return inline_allocation_disabled_; } |
| 1104 | |
| 1105 | // Switch whether inline bump-pointer allocation should be used. |
| 1106 | void EnableInlineAllocation(); |
| 1107 | void DisableInlineAllocation(); |
| 1108 | |
| 1109 | // Implements the corresponding V8 API function. |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame^] | 1110 | bool IdleNotification(double deadline_in_seconds); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1111 | bool IdleNotification(int idle_time_in_ms); |
| 1112 | |
| 1113 | // Declare all the root indices. This defines the root list order. |
| 1114 | enum RootListIndex { |
| 1115 | #define ROOT_INDEX_DECLARATION(type, name, camel_name) k##camel_name##RootIndex, |
| 1116 | STRONG_ROOT_LIST(ROOT_INDEX_DECLARATION) |
| 1117 | #undef ROOT_INDEX_DECLARATION |
| 1118 | |
| 1119 | #define STRING_INDEX_DECLARATION(name, str) k##name##RootIndex, |
| 1120 | INTERNALIZED_STRING_LIST(STRING_INDEX_DECLARATION) |
| 1121 | #undef STRING_DECLARATION |
| 1122 | |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame^] | 1123 | #define SYMBOL_INDEX_DECLARATION(name) k##name##RootIndex, |
| 1124 | PRIVATE_SYMBOL_LIST(SYMBOL_INDEX_DECLARATION) |
| 1125 | #undef SYMBOL_INDEX_DECLARATION |
| 1126 | |
| 1127 | #define SYMBOL_INDEX_DECLARATION(name, varname, description) k##name##RootIndex, |
| 1128 | PUBLIC_SYMBOL_LIST(SYMBOL_INDEX_DECLARATION) |
| 1129 | #undef SYMBOL_INDEX_DECLARATION |
| 1130 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1131 | // Utility type maps |
| 1132 | #define DECLARE_STRUCT_MAP(NAME, Name, name) k##Name##MapRootIndex, |
| 1133 | STRUCT_LIST(DECLARE_STRUCT_MAP) |
| 1134 | #undef DECLARE_STRUCT_MAP |
| 1135 | kStringTableRootIndex, |
| 1136 | |
| 1137 | #define ROOT_INDEX_DECLARATION(type, name, camel_name) k##camel_name##RootIndex, |
| 1138 | SMI_ROOT_LIST(ROOT_INDEX_DECLARATION) |
| 1139 | #undef ROOT_INDEX_DECLARATION |
| 1140 | kRootListLength, |
| 1141 | kStrongRootListLength = kStringTableRootIndex, |
| 1142 | kSmiRootsStart = kStringTableRootIndex + 1 |
| 1143 | }; |
| 1144 | |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame^] | 1145 | Object* root(RootListIndex index) { return roots_[index]; } |
| 1146 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1147 | STATIC_ASSERT(kUndefinedValueRootIndex == |
| 1148 | Internals::kUndefinedValueRootIndex); |
| 1149 | STATIC_ASSERT(kNullValueRootIndex == Internals::kNullValueRootIndex); |
| 1150 | STATIC_ASSERT(kTrueValueRootIndex == Internals::kTrueValueRootIndex); |
| 1151 | STATIC_ASSERT(kFalseValueRootIndex == Internals::kFalseValueRootIndex); |
| 1152 | STATIC_ASSERT(kempty_stringRootIndex == Internals::kEmptyStringRootIndex); |
| 1153 | |
| 1154 | // Generated code can embed direct references to non-writable roots if |
| 1155 | // they are in new space. |
| 1156 | static bool RootCanBeWrittenAfterInitialization(RootListIndex root_index); |
| 1157 | // Generated code can treat direct references to this root as constant. |
| 1158 | bool RootCanBeTreatedAsConstant(RootListIndex root_index); |
| 1159 | |
| 1160 | Map* MapForFixedTypedArray(ExternalArrayType array_type); |
| 1161 | RootListIndex RootIndexForFixedTypedArray(ExternalArrayType array_type); |
| 1162 | |
| 1163 | Map* MapForExternalArrayType(ExternalArrayType array_type); |
| 1164 | RootListIndex RootIndexForExternalArrayType(ExternalArrayType array_type); |
| 1165 | |
| 1166 | RootListIndex RootIndexForEmptyExternalArray(ElementsKind kind); |
| 1167 | RootListIndex RootIndexForEmptyFixedTypedArray(ElementsKind kind); |
| 1168 | ExternalArray* EmptyExternalArrayForMap(Map* map); |
| 1169 | FixedTypedArrayBase* EmptyFixedTypedArrayForMap(Map* map); |
| 1170 | |
| 1171 | void RecordStats(HeapStats* stats, bool take_snapshot = false); |
| 1172 | |
| 1173 | // Copy block of memory from src to dst. Size of block should be aligned |
| 1174 | // by pointer size. |
| 1175 | static inline void CopyBlock(Address dst, Address src, int byte_size); |
| 1176 | |
| 1177 | // Optimized version of memmove for blocks with pointer size aligned sizes and |
| 1178 | // pointer size aligned addresses. |
| 1179 | static inline void MoveBlock(Address dst, Address src, int byte_size); |
| 1180 | |
| 1181 | // Check new space expansion criteria and expand semispaces if it was hit. |
| 1182 | void CheckNewSpaceExpansionCriteria(); |
| 1183 | |
| 1184 | inline void IncrementPromotedObjectsSize(int object_size) { |
| 1185 | DCHECK(object_size > 0); |
| 1186 | promoted_objects_size_ += object_size; |
| 1187 | } |
| 1188 | |
| 1189 | inline void IncrementSemiSpaceCopiedObjectSize(int object_size) { |
| 1190 | DCHECK(object_size > 0); |
| 1191 | semi_space_copied_object_size_ += object_size; |
| 1192 | } |
| 1193 | |
| 1194 | inline void IncrementNodesDiedInNewSpace() { nodes_died_in_new_space_++; } |
| 1195 | |
| 1196 | inline void IncrementNodesCopiedInNewSpace() { nodes_copied_in_new_space_++; } |
| 1197 | |
| 1198 | inline void IncrementNodesPromoted() { nodes_promoted_++; } |
| 1199 | |
| 1200 | inline void IncrementYoungSurvivorsCounter(int survived) { |
| 1201 | DCHECK(survived >= 0); |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame^] | 1202 | survived_last_scavenge_ = survived; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1203 | survived_since_last_expansion_ += survived; |
| 1204 | } |
| 1205 | |
| 1206 | inline bool NextGCIsLikelyToBeFull() { |
| 1207 | if (FLAG_gc_global) return true; |
| 1208 | |
| 1209 | if (FLAG_stress_compaction && (gc_count_ & 1) != 0) return true; |
| 1210 | |
| 1211 | intptr_t adjusted_allocation_limit = |
| 1212 | old_generation_allocation_limit_ - new_space_.Capacity(); |
| 1213 | |
| 1214 | if (PromotedTotalSize() >= adjusted_allocation_limit) return true; |
| 1215 | |
| 1216 | return false; |
| 1217 | } |
| 1218 | |
| 1219 | void UpdateNewSpaceReferencesInExternalStringTable( |
| 1220 | ExternalStringTableUpdaterCallback updater_func); |
| 1221 | |
| 1222 | void UpdateReferencesInExternalStringTable( |
| 1223 | ExternalStringTableUpdaterCallback updater_func); |
| 1224 | |
| 1225 | void ProcessWeakReferences(WeakObjectRetainer* retainer); |
| 1226 | |
| 1227 | void VisitExternalResources(v8::ExternalResourceVisitor* visitor); |
| 1228 | |
| 1229 | // An object should be promoted if the object has survived a |
| 1230 | // scavenge operation. |
| 1231 | inline bool ShouldBePromoted(Address old_address, int object_size); |
| 1232 | |
| 1233 | void ClearJSFunctionResultCaches(); |
| 1234 | |
| 1235 | void ClearNormalizedMapCaches(); |
| 1236 | |
| 1237 | GCTracer* tracer() { return &tracer_; } |
| 1238 | |
| 1239 | // Returns the size of objects residing in non new spaces. |
| 1240 | intptr_t PromotedSpaceSizeOfObjects(); |
| 1241 | |
| 1242 | double total_regexp_code_generated() { return total_regexp_code_generated_; } |
| 1243 | void IncreaseTotalRegexpCodeGenerated(int size) { |
| 1244 | total_regexp_code_generated_ += size; |
| 1245 | } |
| 1246 | |
| 1247 | void IncrementCodeGeneratedBytes(bool is_crankshafted, int size) { |
| 1248 | if (is_crankshafted) { |
| 1249 | crankshaft_codegen_bytes_generated_ += size; |
| 1250 | } else { |
| 1251 | full_codegen_bytes_generated_ += size; |
| 1252 | } |
| 1253 | } |
| 1254 | |
| 1255 | // Update GC statistics that are tracked on the Heap. |
| 1256 | void UpdateCumulativeGCStatistics(double duration, double spent_in_mutator, |
| 1257 | double marking_time); |
| 1258 | |
| 1259 | // Returns maximum GC pause. |
| 1260 | double get_max_gc_pause() { return max_gc_pause_; } |
| 1261 | |
| 1262 | // Returns maximum size of objects alive after GC. |
| 1263 | intptr_t get_max_alive_after_gc() { return max_alive_after_gc_; } |
| 1264 | |
| 1265 | // Returns minimal interval between two subsequent collections. |
| 1266 | double get_min_in_mutator() { return min_in_mutator_; } |
| 1267 | |
| 1268 | MarkCompactCollector* mark_compact_collector() { |
| 1269 | return &mark_compact_collector_; |
| 1270 | } |
| 1271 | |
| 1272 | StoreBuffer* store_buffer() { return &store_buffer_; } |
| 1273 | |
| 1274 | Marking* marking() { return &marking_; } |
| 1275 | |
| 1276 | IncrementalMarking* incremental_marking() { return &incremental_marking_; } |
| 1277 | |
| 1278 | ExternalStringTable* external_string_table() { |
| 1279 | return &external_string_table_; |
| 1280 | } |
| 1281 | |
| 1282 | // Returns the current sweep generation. |
| 1283 | int sweep_generation() { return sweep_generation_; } |
| 1284 | |
| 1285 | inline Isolate* isolate(); |
| 1286 | |
| 1287 | void CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags); |
| 1288 | void CallGCEpilogueCallbacks(GCType gc_type, GCCallbackFlags flags); |
| 1289 | |
| 1290 | inline bool OldGenerationAllocationLimitReached(); |
| 1291 | |
| 1292 | inline void DoScavengeObject(Map* map, HeapObject** slot, HeapObject* obj) { |
| 1293 | scavenging_visitors_table_.GetVisitor(map)(map, slot, obj); |
| 1294 | } |
| 1295 | |
| 1296 | void QueueMemoryChunkForFree(MemoryChunk* chunk); |
| 1297 | void FreeQueuedChunks(); |
| 1298 | |
| 1299 | int gc_count() const { return gc_count_; } |
| 1300 | |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame^] | 1301 | bool RecentIdleNotificationHappened(); |
| 1302 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1303 | // Completely clear the Instanceof cache (to stop it keeping objects alive |
| 1304 | // around a GC). |
| 1305 | inline void CompletelyClearInstanceofCache(); |
| 1306 | |
| 1307 | // The roots that have an index less than this are always in old space. |
| 1308 | static const int kOldSpaceRoots = 0x20; |
| 1309 | |
| 1310 | uint32_t HashSeed() { |
| 1311 | uint32_t seed = static_cast<uint32_t>(hash_seed()->value()); |
| 1312 | DCHECK(FLAG_randomize_hashes || seed == 0); |
| 1313 | return seed; |
| 1314 | } |
| 1315 | |
| 1316 | void SetArgumentsAdaptorDeoptPCOffset(int pc_offset) { |
| 1317 | DCHECK(arguments_adaptor_deopt_pc_offset() == Smi::FromInt(0)); |
| 1318 | set_arguments_adaptor_deopt_pc_offset(Smi::FromInt(pc_offset)); |
| 1319 | } |
| 1320 | |
| 1321 | void SetConstructStubDeoptPCOffset(int pc_offset) { |
| 1322 | DCHECK(construct_stub_deopt_pc_offset() == Smi::FromInt(0)); |
| 1323 | set_construct_stub_deopt_pc_offset(Smi::FromInt(pc_offset)); |
| 1324 | } |
| 1325 | |
| 1326 | void SetGetterStubDeoptPCOffset(int pc_offset) { |
| 1327 | DCHECK(getter_stub_deopt_pc_offset() == Smi::FromInt(0)); |
| 1328 | set_getter_stub_deopt_pc_offset(Smi::FromInt(pc_offset)); |
| 1329 | } |
| 1330 | |
| 1331 | void SetSetterStubDeoptPCOffset(int pc_offset) { |
| 1332 | DCHECK(setter_stub_deopt_pc_offset() == Smi::FromInt(0)); |
| 1333 | set_setter_stub_deopt_pc_offset(Smi::FromInt(pc_offset)); |
| 1334 | } |
| 1335 | |
| 1336 | // For post mortem debugging. |
| 1337 | void RememberUnmappedPage(Address page, bool compacted); |
| 1338 | |
| 1339 | // Global inline caching age: it is incremented on some GCs after context |
| 1340 | // disposal. We use it to flush inline caches. |
| 1341 | int global_ic_age() { return global_ic_age_; } |
| 1342 | |
| 1343 | void AgeInlineCaches() { |
| 1344 | global_ic_age_ = (global_ic_age_ + 1) & SharedFunctionInfo::ICAgeBits::kMax; |
| 1345 | } |
| 1346 | |
| 1347 | bool flush_monomorphic_ics() { return flush_monomorphic_ics_; } |
| 1348 | |
| 1349 | int64_t amount_of_external_allocated_memory() { |
| 1350 | return amount_of_external_allocated_memory_; |
| 1351 | } |
| 1352 | |
| 1353 | void DeoptMarkedAllocationSites(); |
| 1354 | |
| 1355 | bool MaximumSizeScavenge() { return maximum_size_scavenges_ > 0; } |
| 1356 | |
| 1357 | bool DeoptMaybeTenuredAllocationSites() { |
| 1358 | return new_space_.IsAtMaximumCapacity() && maximum_size_scavenges_ == 0; |
| 1359 | } |
| 1360 | |
| 1361 | // ObjectStats are kept in two arrays, counts and sizes. Related stats are |
| 1362 | // stored in a contiguous linear buffer. Stats groups are stored one after |
| 1363 | // another. |
| 1364 | enum { |
| 1365 | FIRST_CODE_KIND_SUB_TYPE = LAST_TYPE + 1, |
| 1366 | FIRST_FIXED_ARRAY_SUB_TYPE = |
| 1367 | FIRST_CODE_KIND_SUB_TYPE + Code::NUMBER_OF_KINDS, |
| 1368 | FIRST_CODE_AGE_SUB_TYPE = |
| 1369 | FIRST_FIXED_ARRAY_SUB_TYPE + LAST_FIXED_ARRAY_SUB_TYPE + 1, |
| 1370 | OBJECT_STATS_COUNT = FIRST_CODE_AGE_SUB_TYPE + Code::kCodeAgeCount + 1 |
| 1371 | }; |
| 1372 | |
| 1373 | void RecordObjectStats(InstanceType type, size_t size) { |
| 1374 | DCHECK(type <= LAST_TYPE); |
| 1375 | object_counts_[type]++; |
| 1376 | object_sizes_[type] += size; |
| 1377 | } |
| 1378 | |
| 1379 | void RecordCodeSubTypeStats(int code_sub_type, int code_age, size_t size) { |
| 1380 | int code_sub_type_index = FIRST_CODE_KIND_SUB_TYPE + code_sub_type; |
| 1381 | int code_age_index = |
| 1382 | FIRST_CODE_AGE_SUB_TYPE + code_age - Code::kFirstCodeAge; |
| 1383 | DCHECK(code_sub_type_index >= FIRST_CODE_KIND_SUB_TYPE && |
| 1384 | code_sub_type_index < FIRST_CODE_AGE_SUB_TYPE); |
| 1385 | DCHECK(code_age_index >= FIRST_CODE_AGE_SUB_TYPE && |
| 1386 | code_age_index < OBJECT_STATS_COUNT); |
| 1387 | object_counts_[code_sub_type_index]++; |
| 1388 | object_sizes_[code_sub_type_index] += size; |
| 1389 | object_counts_[code_age_index]++; |
| 1390 | object_sizes_[code_age_index] += size; |
| 1391 | } |
| 1392 | |
| 1393 | void RecordFixedArraySubTypeStats(int array_sub_type, size_t size) { |
| 1394 | DCHECK(array_sub_type <= LAST_FIXED_ARRAY_SUB_TYPE); |
| 1395 | object_counts_[FIRST_FIXED_ARRAY_SUB_TYPE + array_sub_type]++; |
| 1396 | object_sizes_[FIRST_FIXED_ARRAY_SUB_TYPE + array_sub_type] += size; |
| 1397 | } |
| 1398 | |
| 1399 | void CheckpointObjectStats(); |
| 1400 | |
| 1401 | // We don't use a LockGuard here since we want to lock the heap |
| 1402 | // only when FLAG_concurrent_recompilation is true. |
| 1403 | class RelocationLock { |
| 1404 | public: |
| 1405 | explicit RelocationLock(Heap* heap) : heap_(heap) { |
| 1406 | heap_->relocation_mutex_.Lock(); |
| 1407 | } |
| 1408 | |
| 1409 | |
| 1410 | ~RelocationLock() { heap_->relocation_mutex_.Unlock(); } |
| 1411 | |
| 1412 | private: |
| 1413 | Heap* heap_; |
| 1414 | }; |
| 1415 | |
| 1416 | void AddWeakObjectToCodeDependency(Handle<Object> obj, |
| 1417 | Handle<DependentCode> dep); |
| 1418 | |
| 1419 | DependentCode* LookupWeakObjectToCodeDependency(Handle<Object> obj); |
| 1420 | |
| 1421 | void InitializeWeakObjectToCodeTable() { |
| 1422 | set_weak_object_to_code_table(undefined_value()); |
| 1423 | } |
| 1424 | |
| 1425 | void EnsureWeakObjectToCodeTable(); |
| 1426 | |
| 1427 | static void FatalProcessOutOfMemory(const char* location, |
| 1428 | bool take_snapshot = false); |
| 1429 | |
| 1430 | // This event is triggered after successful allocation of a new object made |
| 1431 | // by runtime. Allocations of target space for object evacuation do not |
| 1432 | // trigger the event. In order to track ALL allocations one must turn off |
| 1433 | // FLAG_inline_new and FLAG_use_allocation_folding. |
| 1434 | inline void OnAllocationEvent(HeapObject* object, int size_in_bytes); |
| 1435 | |
| 1436 | // This event is triggered after object is moved to a new place. |
| 1437 | inline void OnMoveEvent(HeapObject* target, HeapObject* source, |
| 1438 | int size_in_bytes); |
| 1439 | |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame^] | 1440 | bool deserialization_complete() const { return deserialization_complete_; } |
| 1441 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1442 | protected: |
| 1443 | // Methods made available to tests. |
| 1444 | |
| 1445 | // Allocates a JS Map in the heap. |
| 1446 | MUST_USE_RESULT AllocationResult |
| 1447 | AllocateMap(InstanceType instance_type, int instance_size, |
| 1448 | ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND); |
| 1449 | |
| 1450 | // Allocates and initializes a new JavaScript object based on a |
| 1451 | // constructor. |
| 1452 | // If allocation_site is non-null, then a memento is emitted after the object |
| 1453 | // that points to the site. |
| 1454 | MUST_USE_RESULT AllocationResult |
| 1455 | AllocateJSObject(JSFunction* constructor, |
| 1456 | PretenureFlag pretenure = NOT_TENURED, |
| 1457 | AllocationSite* allocation_site = NULL); |
| 1458 | |
| 1459 | // Allocates and initializes a new JavaScript object based on a map. |
| 1460 | // Passing an allocation site means that a memento will be created that |
| 1461 | // points to the site. |
| 1462 | MUST_USE_RESULT AllocationResult |
| 1463 | AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure = NOT_TENURED, |
| 1464 | bool alloc_props = true, |
| 1465 | AllocationSite* allocation_site = NULL); |
| 1466 | |
| 1467 | // Allocated a HeapNumber from value. |
| 1468 | MUST_USE_RESULT AllocationResult |
| 1469 | AllocateHeapNumber(double value, MutableMode mode = IMMUTABLE, |
| 1470 | PretenureFlag pretenure = NOT_TENURED); |
| 1471 | |
| 1472 | // Allocate a byte array of the specified length |
| 1473 | MUST_USE_RESULT AllocationResult |
| 1474 | AllocateByteArray(int length, PretenureFlag pretenure = NOT_TENURED); |
| 1475 | |
| 1476 | // Copy the code and scope info part of the code object, but insert |
| 1477 | // the provided data as the relocation information. |
| 1478 | MUST_USE_RESULT AllocationResult |
| 1479 | CopyCode(Code* code, Vector<byte> reloc_info); |
| 1480 | |
| 1481 | MUST_USE_RESULT AllocationResult CopyCode(Code* code); |
| 1482 | |
| 1483 | // Allocates a fixed array initialized with undefined values |
| 1484 | MUST_USE_RESULT AllocationResult |
| 1485 | AllocateFixedArray(int length, PretenureFlag pretenure = NOT_TENURED); |
| 1486 | |
| 1487 | private: |
| 1488 | Heap(); |
| 1489 | |
| 1490 | // The amount of external memory registered through the API kept alive |
| 1491 | // by global handles |
| 1492 | int64_t amount_of_external_allocated_memory_; |
| 1493 | |
| 1494 | // Caches the amount of external memory registered at the last global gc. |
| 1495 | int64_t amount_of_external_allocated_memory_at_last_global_gc_; |
| 1496 | |
| 1497 | // This can be calculated directly from a pointer to the heap; however, it is |
| 1498 | // more expedient to get at the isolate directly from within Heap methods. |
| 1499 | Isolate* isolate_; |
| 1500 | |
| 1501 | Object* roots_[kRootListLength]; |
| 1502 | |
| 1503 | size_t code_range_size_; |
| 1504 | int reserved_semispace_size_; |
| 1505 | int max_semi_space_size_; |
| 1506 | int initial_semispace_size_; |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame^] | 1507 | int target_semispace_size_; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1508 | intptr_t max_old_generation_size_; |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame^] | 1509 | intptr_t initial_old_generation_size_; |
| 1510 | bool old_generation_size_configured_; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1511 | intptr_t max_executable_size_; |
| 1512 | intptr_t maximum_committed_; |
| 1513 | |
| 1514 | // For keeping track of how much data has survived |
| 1515 | // scavenge since last new space expansion. |
| 1516 | int survived_since_last_expansion_; |
| 1517 | |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame^] | 1518 | // ... and since the last scavenge. |
| 1519 | int survived_last_scavenge_; |
| 1520 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1521 | // For keeping track on when to flush RegExp code. |
| 1522 | int sweep_generation_; |
| 1523 | |
| 1524 | int always_allocate_scope_depth_; |
| 1525 | |
| 1526 | // For keeping track of context disposals. |
| 1527 | int contexts_disposed_; |
| 1528 | |
| 1529 | int global_ic_age_; |
| 1530 | |
| 1531 | bool flush_monomorphic_ics_; |
| 1532 | |
| 1533 | int scan_on_scavenge_pages_; |
| 1534 | |
| 1535 | NewSpace new_space_; |
| 1536 | OldSpace* old_pointer_space_; |
| 1537 | OldSpace* old_data_space_; |
| 1538 | OldSpace* code_space_; |
| 1539 | MapSpace* map_space_; |
| 1540 | CellSpace* cell_space_; |
| 1541 | PropertyCellSpace* property_cell_space_; |
| 1542 | LargeObjectSpace* lo_space_; |
| 1543 | HeapState gc_state_; |
| 1544 | int gc_post_processing_depth_; |
| 1545 | Address new_space_top_after_last_gc_; |
| 1546 | |
| 1547 | // Returns the amount of external memory registered since last global gc. |
| 1548 | int64_t PromotedExternalMemorySize(); |
| 1549 | |
| 1550 | // How many "runtime allocations" happened. |
| 1551 | uint32_t allocations_count_; |
| 1552 | |
| 1553 | // Running hash over allocations performed. |
| 1554 | uint32_t raw_allocations_hash_; |
| 1555 | |
| 1556 | // Countdown counter, dumps allocation hash when 0. |
| 1557 | uint32_t dump_allocations_hash_countdown_; |
| 1558 | |
| 1559 | // How many mark-sweep collections happened. |
| 1560 | unsigned int ms_count_; |
| 1561 | |
| 1562 | // How many gc happened. |
| 1563 | unsigned int gc_count_; |
| 1564 | |
| 1565 | // For post mortem debugging. |
| 1566 | static const int kRememberedUnmappedPages = 128; |
| 1567 | int remembered_unmapped_pages_index_; |
| 1568 | Address remembered_unmapped_pages_[kRememberedUnmappedPages]; |
| 1569 | |
| 1570 | // Total length of the strings we failed to flatten since the last GC. |
| 1571 | int unflattened_strings_length_; |
| 1572 | |
| 1573 | #define ROOT_ACCESSOR(type, name, camel_name) \ |
| 1574 | inline void set_##name(type* value) { \ |
| 1575 | /* The deserializer makes use of the fact that these common roots are */ \ |
| 1576 | /* never in new space and never on a page that is being compacted. */ \ |
| 1577 | DCHECK(k##camel_name##RootIndex >= kOldSpaceRoots || !InNewSpace(value)); \ |
| 1578 | roots_[k##camel_name##RootIndex] = value; \ |
| 1579 | } |
| 1580 | ROOT_LIST(ROOT_ACCESSOR) |
| 1581 | #undef ROOT_ACCESSOR |
| 1582 | |
| 1583 | #ifdef DEBUG |
| 1584 | // If the --gc-interval flag is set to a positive value, this |
| 1585 | // variable holds the value indicating the number of allocations |
| 1586 | // remain until the next failure and garbage collection. |
| 1587 | int allocation_timeout_; |
| 1588 | #endif // DEBUG |
| 1589 | |
| 1590 | // Limit that triggers a global GC on the next (normally caused) GC. This |
| 1591 | // is checked when we have already decided to do a GC to help determine |
| 1592 | // which collector to invoke, before expanding a paged space in the old |
| 1593 | // generation and on every allocation in large object space. |
| 1594 | intptr_t old_generation_allocation_limit_; |
| 1595 | |
| 1596 | // Indicates that an allocation has failed in the old generation since the |
| 1597 | // last GC. |
| 1598 | bool old_gen_exhausted_; |
| 1599 | |
| 1600 | // Indicates that inline bump-pointer allocation has been globally disabled |
| 1601 | // for all spaces. This is used to disable allocations in generated code. |
| 1602 | bool inline_allocation_disabled_; |
| 1603 | |
| 1604 | // Weak list heads, threaded through the objects. |
| 1605 | // List heads are initilized lazily and contain the undefined_value at start. |
| 1606 | Object* native_contexts_list_; |
| 1607 | Object* array_buffers_list_; |
| 1608 | Object* allocation_sites_list_; |
| 1609 | |
| 1610 | // WeakHashTable that maps objects embedded in optimized code to dependent |
| 1611 | // code list. It is initilized lazily and contains the undefined_value at |
| 1612 | // start. |
| 1613 | Object* weak_object_to_code_table_; |
| 1614 | |
| 1615 | // List of encountered weak collections (JSWeakMap and JSWeakSet) during |
| 1616 | // marking. It is initialized during marking, destroyed after marking and |
| 1617 | // contains Smi(0) while marking is not active. |
| 1618 | Object* encountered_weak_collections_; |
| 1619 | |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame^] | 1620 | Object* encountered_weak_cells_; |
| 1621 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1622 | StoreBufferRebuilder store_buffer_rebuilder_; |
| 1623 | |
| 1624 | struct StringTypeTable { |
| 1625 | InstanceType type; |
| 1626 | int size; |
| 1627 | RootListIndex index; |
| 1628 | }; |
| 1629 | |
| 1630 | struct ConstantStringTable { |
| 1631 | const char* contents; |
| 1632 | RootListIndex index; |
| 1633 | }; |
| 1634 | |
| 1635 | struct StructTable { |
| 1636 | InstanceType type; |
| 1637 | int size; |
| 1638 | RootListIndex index; |
| 1639 | }; |
| 1640 | |
| 1641 | static const StringTypeTable string_type_table[]; |
| 1642 | static const ConstantStringTable constant_string_table[]; |
| 1643 | static const StructTable struct_table[]; |
| 1644 | |
| 1645 | // The special hidden string which is an empty string, but does not match |
| 1646 | // any string when looked up in properties. |
| 1647 | String* hidden_string_; |
| 1648 | |
| 1649 | // GC callback function, called before and after mark-compact GC. |
| 1650 | // Allocations in the callback function are disallowed. |
| 1651 | struct GCPrologueCallbackPair { |
| 1652 | GCPrologueCallbackPair(v8::Isolate::GCPrologueCallback callback, |
| 1653 | GCType gc_type, bool pass_isolate) |
| 1654 | : callback(callback), gc_type(gc_type), pass_isolate_(pass_isolate) {} |
| 1655 | bool operator==(const GCPrologueCallbackPair& pair) const { |
| 1656 | return pair.callback == callback; |
| 1657 | } |
| 1658 | v8::Isolate::GCPrologueCallback callback; |
| 1659 | GCType gc_type; |
| 1660 | // TODO(dcarney): remove variable |
| 1661 | bool pass_isolate_; |
| 1662 | }; |
| 1663 | List<GCPrologueCallbackPair> gc_prologue_callbacks_; |
| 1664 | |
| 1665 | struct GCEpilogueCallbackPair { |
| 1666 | GCEpilogueCallbackPair(v8::Isolate::GCPrologueCallback callback, |
| 1667 | GCType gc_type, bool pass_isolate) |
| 1668 | : callback(callback), gc_type(gc_type), pass_isolate_(pass_isolate) {} |
| 1669 | bool operator==(const GCEpilogueCallbackPair& pair) const { |
| 1670 | return pair.callback == callback; |
| 1671 | } |
| 1672 | v8::Isolate::GCPrologueCallback callback; |
| 1673 | GCType gc_type; |
| 1674 | // TODO(dcarney): remove variable |
| 1675 | bool pass_isolate_; |
| 1676 | }; |
| 1677 | List<GCEpilogueCallbackPair> gc_epilogue_callbacks_; |
| 1678 | |
| 1679 | // Support for computing object sizes during GC. |
| 1680 | HeapObjectCallback gc_safe_size_of_old_object_; |
| 1681 | static int GcSafeSizeOfOldObject(HeapObject* object); |
| 1682 | |
| 1683 | // Update the GC state. Called from the mark-compact collector. |
| 1684 | void MarkMapPointersAsEncoded(bool encoded) { |
| 1685 | DCHECK(!encoded); |
| 1686 | gc_safe_size_of_old_object_ = &GcSafeSizeOfOldObject; |
| 1687 | } |
| 1688 | |
| 1689 | // Code that should be run before and after each GC. Includes some |
| 1690 | // reporting/verification activities when compiled with DEBUG set. |
| 1691 | void GarbageCollectionPrologue(); |
| 1692 | void GarbageCollectionEpilogue(); |
| 1693 | |
| 1694 | // Pretenuring decisions are made based on feedback collected during new |
| 1695 | // space evacuation. Note that between feedback collection and calling this |
| 1696 | // method object in old space must not move. |
| 1697 | // Right now we only process pretenuring feedback in high promotion mode. |
| 1698 | void ProcessPretenuringFeedback(); |
| 1699 | |
| 1700 | // Checks whether a global GC is necessary |
| 1701 | GarbageCollector SelectGarbageCollector(AllocationSpace space, |
| 1702 | const char** reason); |
| 1703 | |
| 1704 | // Make sure there is a filler value behind the top of the new space |
| 1705 | // so that the GC does not confuse some unintialized/stale memory |
| 1706 | // with the allocation memento of the object at the top |
| 1707 | void EnsureFillerObjectAtTop(); |
| 1708 | |
| 1709 | // Ensure that we have swept all spaces in such a way that we can iterate |
| 1710 | // over all objects. May cause a GC. |
| 1711 | void MakeHeapIterable(); |
| 1712 | |
| 1713 | // Performs garbage collection operation. |
| 1714 | // Returns whether there is a chance that another major GC could |
| 1715 | // collect more garbage. |
| 1716 | bool CollectGarbage( |
| 1717 | GarbageCollector collector, const char* gc_reason, |
| 1718 | const char* collector_reason, |
| 1719 | const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags); |
| 1720 | |
| 1721 | // Performs garbage collection |
| 1722 | // Returns whether there is a chance another major GC could |
| 1723 | // collect more garbage. |
| 1724 | bool PerformGarbageCollection( |
| 1725 | GarbageCollector collector, |
| 1726 | const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags); |
| 1727 | |
| 1728 | inline void UpdateOldSpaceLimits(); |
| 1729 | |
| 1730 | // Selects the proper allocation space depending on the given object |
| 1731 | // size, pretenuring decision, and preferred old-space. |
| 1732 | static AllocationSpace SelectSpace(int object_size, |
| 1733 | AllocationSpace preferred_old_space, |
| 1734 | PretenureFlag pretenure) { |
| 1735 | DCHECK(preferred_old_space == OLD_POINTER_SPACE || |
| 1736 | preferred_old_space == OLD_DATA_SPACE); |
| 1737 | if (object_size > Page::kMaxRegularHeapObjectSize) return LO_SPACE; |
| 1738 | return (pretenure == TENURED) ? preferred_old_space : NEW_SPACE; |
| 1739 | } |
| 1740 | |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame^] | 1741 | HeapObject* DoubleAlignForDeserialization(HeapObject* object, int size); |
| 1742 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1743 | // Allocate an uninitialized object. The memory is non-executable if the |
| 1744 | // hardware and OS allow. This is the single choke-point for allocations |
| 1745 | // performed by the runtime and should not be bypassed (to extend this to |
| 1746 | // inlined allocations, use the Heap::DisableInlineAllocation() support). |
| 1747 | MUST_USE_RESULT inline AllocationResult AllocateRaw( |
| 1748 | int size_in_bytes, AllocationSpace space, AllocationSpace retry_space); |
| 1749 | |
| 1750 | // Allocates a heap object based on the map. |
| 1751 | MUST_USE_RESULT AllocationResult |
| 1752 | Allocate(Map* map, AllocationSpace space, |
| 1753 | AllocationSite* allocation_site = NULL); |
| 1754 | |
| 1755 | // Allocates a partial map for bootstrapping. |
| 1756 | MUST_USE_RESULT AllocationResult |
| 1757 | AllocatePartialMap(InstanceType instance_type, int instance_size); |
| 1758 | |
| 1759 | // Initializes a JSObject based on its map. |
| 1760 | void InitializeJSObjectFromMap(JSObject* obj, FixedArray* properties, |
| 1761 | Map* map); |
| 1762 | void InitializeAllocationMemento(AllocationMemento* memento, |
| 1763 | AllocationSite* allocation_site); |
| 1764 | |
| 1765 | // Allocate a block of memory in the given space (filled with a filler). |
| 1766 | // Used as a fall-back for generated code when the space is full. |
| 1767 | MUST_USE_RESULT AllocationResult |
| 1768 | AllocateFillerObject(int size, bool double_align, AllocationSpace space); |
| 1769 | |
| 1770 | // Allocate an uninitialized fixed array. |
| 1771 | MUST_USE_RESULT AllocationResult |
| 1772 | AllocateRawFixedArray(int length, PretenureFlag pretenure); |
| 1773 | |
| 1774 | // Allocate an uninitialized fixed double array. |
| 1775 | MUST_USE_RESULT AllocationResult |
| 1776 | AllocateRawFixedDoubleArray(int length, PretenureFlag pretenure); |
| 1777 | |
| 1778 | // Allocate an initialized fixed array with the given filler value. |
| 1779 | MUST_USE_RESULT AllocationResult |
| 1780 | AllocateFixedArrayWithFiller(int length, PretenureFlag pretenure, |
| 1781 | Object* filler); |
| 1782 | |
| 1783 | // Allocate and partially initializes a String. There are two String |
| 1784 | // encodings: one-byte and two-byte. These functions allocate a string of |
| 1785 | // the given length and set its map and length fields. The characters of |
| 1786 | // the string are uninitialized. |
| 1787 | MUST_USE_RESULT AllocationResult |
| 1788 | AllocateRawOneByteString(int length, PretenureFlag pretenure); |
| 1789 | MUST_USE_RESULT AllocationResult |
| 1790 | AllocateRawTwoByteString(int length, PretenureFlag pretenure); |
| 1791 | |
| 1792 | bool CreateInitialMaps(); |
| 1793 | void CreateInitialObjects(); |
| 1794 | |
| 1795 | // Allocates an internalized string in old space based on the character |
| 1796 | // stream. |
| 1797 | MUST_USE_RESULT inline AllocationResult AllocateInternalizedStringFromUtf8( |
| 1798 | Vector<const char> str, int chars, uint32_t hash_field); |
| 1799 | |
| 1800 | MUST_USE_RESULT inline AllocationResult AllocateOneByteInternalizedString( |
| 1801 | Vector<const uint8_t> str, uint32_t hash_field); |
| 1802 | |
| 1803 | MUST_USE_RESULT inline AllocationResult AllocateTwoByteInternalizedString( |
| 1804 | Vector<const uc16> str, uint32_t hash_field); |
| 1805 | |
| 1806 | template <bool is_one_byte, typename T> |
| 1807 | MUST_USE_RESULT AllocationResult |
| 1808 | AllocateInternalizedStringImpl(T t, int chars, uint32_t hash_field); |
| 1809 | |
| 1810 | template <typename T> |
| 1811 | MUST_USE_RESULT inline AllocationResult AllocateInternalizedStringImpl( |
| 1812 | T t, int chars, uint32_t hash_field); |
| 1813 | |
| 1814 | // Allocates an uninitialized fixed array. It must be filled by the caller. |
| 1815 | MUST_USE_RESULT AllocationResult AllocateUninitializedFixedArray(int length); |
| 1816 | |
| 1817 | // Make a copy of src and return it. Returns |
| 1818 | // Failure::RetryAfterGC(requested_bytes, space) if the allocation failed. |
| 1819 | MUST_USE_RESULT inline AllocationResult CopyFixedArray(FixedArray* src); |
| 1820 | |
| 1821 | // Make a copy of src, set the map, and return the copy. Returns |
| 1822 | // Failure::RetryAfterGC(requested_bytes, space) if the allocation failed. |
| 1823 | MUST_USE_RESULT AllocationResult |
| 1824 | CopyFixedArrayWithMap(FixedArray* src, Map* map); |
| 1825 | |
| 1826 | // Make a copy of src and return it. Returns |
| 1827 | // Failure::RetryAfterGC(requested_bytes, space) if the allocation failed. |
| 1828 | MUST_USE_RESULT inline AllocationResult CopyFixedDoubleArray( |
| 1829 | FixedDoubleArray* src); |
| 1830 | |
| 1831 | // Make a copy of src and return it. Returns |
| 1832 | // Failure::RetryAfterGC(requested_bytes, space) if the allocation failed. |
| 1833 | MUST_USE_RESULT inline AllocationResult CopyConstantPoolArray( |
| 1834 | ConstantPoolArray* src); |
| 1835 | |
| 1836 | |
| 1837 | // Computes a single character string where the character has code. |
| 1838 | // A cache is used for one-byte (Latin1) codes. |
| 1839 | MUST_USE_RESULT AllocationResult |
| 1840 | LookupSingleCharacterStringFromCode(uint16_t code); |
| 1841 | |
| 1842 | // Allocate a symbol in old space. |
| 1843 | MUST_USE_RESULT AllocationResult AllocateSymbol(); |
| 1844 | |
| 1845 | // Make a copy of src, set the map, and return the copy. |
| 1846 | MUST_USE_RESULT AllocationResult |
| 1847 | CopyConstantPoolArrayWithMap(ConstantPoolArray* src, Map* map); |
| 1848 | |
| 1849 | MUST_USE_RESULT AllocationResult AllocateConstantPoolArray( |
| 1850 | const ConstantPoolArray::NumberOfEntries& small); |
| 1851 | |
| 1852 | MUST_USE_RESULT AllocationResult AllocateExtendedConstantPoolArray( |
| 1853 | const ConstantPoolArray::NumberOfEntries& small, |
| 1854 | const ConstantPoolArray::NumberOfEntries& extended); |
| 1855 | |
| 1856 | // Allocates an external array of the specified length and type. |
| 1857 | MUST_USE_RESULT AllocationResult |
| 1858 | AllocateExternalArray(int length, ExternalArrayType array_type, |
| 1859 | void* external_pointer, PretenureFlag pretenure); |
| 1860 | |
| 1861 | // Allocates a fixed typed array of the specified length and type. |
| 1862 | MUST_USE_RESULT AllocationResult |
| 1863 | AllocateFixedTypedArray(int length, ExternalArrayType array_type, |
| 1864 | PretenureFlag pretenure); |
| 1865 | |
| 1866 | // Make a copy of src and return it. |
| 1867 | MUST_USE_RESULT AllocationResult CopyAndTenureFixedCOWArray(FixedArray* src); |
| 1868 | |
| 1869 | // Make a copy of src, set the map, and return the copy. |
| 1870 | MUST_USE_RESULT AllocationResult |
| 1871 | CopyFixedDoubleArrayWithMap(FixedDoubleArray* src, Map* map); |
| 1872 | |
| 1873 | // Allocates a fixed double array with uninitialized values. Returns |
| 1874 | MUST_USE_RESULT AllocationResult AllocateUninitializedFixedDoubleArray( |
| 1875 | int length, PretenureFlag pretenure = NOT_TENURED); |
| 1876 | |
| 1877 | // These five Create*EntryStub functions are here and forced to not be inlined |
| 1878 | // because of a gcc-4.4 bug that assigns wrong vtable entries. |
| 1879 | NO_INLINE(void CreateJSEntryStub()); |
| 1880 | NO_INLINE(void CreateJSConstructEntryStub()); |
| 1881 | |
| 1882 | void CreateFixedStubs(); |
| 1883 | |
| 1884 | // Allocate empty fixed array. |
| 1885 | MUST_USE_RESULT AllocationResult AllocateEmptyFixedArray(); |
| 1886 | |
| 1887 | // Allocate empty external array of given type. |
| 1888 | MUST_USE_RESULT AllocationResult |
| 1889 | AllocateEmptyExternalArray(ExternalArrayType array_type); |
| 1890 | |
| 1891 | // Allocate empty fixed typed array of given type. |
| 1892 | MUST_USE_RESULT AllocationResult |
| 1893 | AllocateEmptyFixedTypedArray(ExternalArrayType array_type); |
| 1894 | |
| 1895 | // Allocate empty constant pool array. |
| 1896 | MUST_USE_RESULT AllocationResult AllocateEmptyConstantPoolArray(); |
| 1897 | |
| 1898 | // Allocate a tenured simple cell. |
| 1899 | MUST_USE_RESULT AllocationResult AllocateCell(Object* value); |
| 1900 | |
| 1901 | // Allocate a tenured JS global property cell initialized with the hole. |
| 1902 | MUST_USE_RESULT AllocationResult AllocatePropertyCell(); |
| 1903 | |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame^] | 1904 | MUST_USE_RESULT AllocationResult AllocateWeakCell(HeapObject* value); |
| 1905 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1906 | // Allocates a new utility object in the old generation. |
| 1907 | MUST_USE_RESULT AllocationResult AllocateStruct(InstanceType type); |
| 1908 | |
| 1909 | // Allocates a new foreign object. |
| 1910 | MUST_USE_RESULT AllocationResult |
| 1911 | AllocateForeign(Address address, PretenureFlag pretenure = NOT_TENURED); |
| 1912 | |
| 1913 | MUST_USE_RESULT AllocationResult |
| 1914 | AllocateCode(int object_size, bool immovable); |
| 1915 | |
| 1916 | MUST_USE_RESULT AllocationResult InternalizeStringWithKey(HashTableKey* key); |
| 1917 | |
| 1918 | MUST_USE_RESULT AllocationResult InternalizeString(String* str); |
| 1919 | |
| 1920 | // Performs a minor collection in new generation. |
| 1921 | void Scavenge(); |
| 1922 | |
| 1923 | // Commits from space if it is uncommitted. |
| 1924 | void EnsureFromSpaceIsCommitted(); |
| 1925 | |
| 1926 | // Uncommit unused semi space. |
| 1927 | bool UncommitFromSpace() { return new_space_.UncommitFromSpace(); } |
| 1928 | |
| 1929 | // Fill in bogus values in from space |
| 1930 | void ZapFromSpace(); |
| 1931 | |
| 1932 | static String* UpdateNewSpaceReferenceInExternalStringTableEntry( |
| 1933 | Heap* heap, Object** pointer); |
| 1934 | |
| 1935 | Address DoScavenge(ObjectVisitor* scavenge_visitor, Address new_space_front); |
| 1936 | static void ScavengeStoreBufferCallback(Heap* heap, MemoryChunk* page, |
| 1937 | StoreBufferEvent event); |
| 1938 | |
| 1939 | // Performs a major collection in the whole heap. |
| 1940 | void MarkCompact(); |
| 1941 | |
| 1942 | // Code to be run before and after mark-compact. |
| 1943 | void MarkCompactPrologue(); |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame^] | 1944 | void MarkCompactEpilogue(); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1945 | |
| 1946 | void ProcessNativeContexts(WeakObjectRetainer* retainer); |
| 1947 | void ProcessArrayBuffers(WeakObjectRetainer* retainer); |
| 1948 | void ProcessAllocationSites(WeakObjectRetainer* retainer); |
| 1949 | |
| 1950 | // Deopts all code that contains allocation instruction which are tenured or |
| 1951 | // not tenured. Moreover it clears the pretenuring allocation site statistics. |
| 1952 | void ResetAllAllocationSitesDependentCode(PretenureFlag flag); |
| 1953 | |
| 1954 | // Evaluates local pretenuring for the old space and calls |
| 1955 | // ResetAllTenuredAllocationSitesDependentCode if too many objects died in |
| 1956 | // the old space. |
| 1957 | void EvaluateOldSpaceLocalPretenuring(uint64_t size_of_objects_before_gc); |
| 1958 | |
| 1959 | // Called on heap tear-down. |
| 1960 | void TearDownArrayBuffers(); |
| 1961 | |
| 1962 | // Record statistics before and after garbage collection. |
| 1963 | void ReportStatisticsBeforeGC(); |
| 1964 | void ReportStatisticsAfterGC(); |
| 1965 | |
| 1966 | // Slow part of scavenge object. |
| 1967 | static void ScavengeObjectSlow(HeapObject** p, HeapObject* object); |
| 1968 | |
| 1969 | // Total RegExp code ever generated |
| 1970 | double total_regexp_code_generated_; |
| 1971 | |
| 1972 | GCTracer tracer_; |
| 1973 | |
| 1974 | // Creates and installs the full-sized number string cache. |
| 1975 | int FullSizeNumberStringCacheLength(); |
| 1976 | // Flush the number to string cache. |
| 1977 | void FlushNumberStringCache(); |
| 1978 | |
| 1979 | // Sets used allocation sites entries to undefined. |
| 1980 | void FlushAllocationSitesScratchpad(); |
| 1981 | |
| 1982 | // Initializes the allocation sites scratchpad with undefined values. |
| 1983 | void InitializeAllocationSitesScratchpad(); |
| 1984 | |
| 1985 | // Adds an allocation site to the scratchpad if there is space left. |
| 1986 | void AddAllocationSiteToScratchpad(AllocationSite* site, |
| 1987 | ScratchpadSlotMode mode); |
| 1988 | |
| 1989 | void UpdateSurvivalStatistics(int start_new_space_size); |
| 1990 | |
| 1991 | static const int kYoungSurvivalRateHighThreshold = 90; |
| 1992 | static const int kYoungSurvivalRateAllowedDeviation = 15; |
| 1993 | |
| 1994 | static const int kOldSurvivalRateLowThreshold = 10; |
| 1995 | |
| 1996 | int high_survival_rate_period_length_; |
| 1997 | intptr_t promoted_objects_size_; |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame^] | 1998 | double promotion_ratio_; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1999 | double promotion_rate_; |
| 2000 | intptr_t semi_space_copied_object_size_; |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame^] | 2001 | intptr_t previous_semi_space_copied_object_size_; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2002 | double semi_space_copied_rate_; |
| 2003 | int nodes_died_in_new_space_; |
| 2004 | int nodes_copied_in_new_space_; |
| 2005 | int nodes_promoted_; |
| 2006 | |
| 2007 | // This is the pretenuring trigger for allocation sites that are in maybe |
| 2008 | // tenure state. When we switched to the maximum new space size we deoptimize |
| 2009 | // the code that belongs to the allocation site and derive the lifetime |
| 2010 | // of the allocation site. |
| 2011 | unsigned int maximum_size_scavenges_; |
| 2012 | |
| 2013 | // TODO(hpayer): Allocation site pretenuring may make this method obsolete. |
| 2014 | // Re-visit incremental marking heuristics. |
| 2015 | bool IsHighSurvivalRate() { return high_survival_rate_period_length_ > 0; } |
| 2016 | |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame^] | 2017 | void ConfigureInitialOldGenerationSize(); |
| 2018 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2019 | void SelectScavengingVisitorsTable(); |
| 2020 | |
| 2021 | void IdleMarkCompact(const char* message); |
| 2022 | |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame^] | 2023 | bool TryFinalizeIdleIncrementalMarking( |
| 2024 | double idle_time_in_ms, size_t size_of_objects, |
| 2025 | size_t mark_compact_speed_in_bytes_per_ms); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2026 | |
| 2027 | bool WorthActivatingIncrementalMarking(); |
| 2028 | |
| 2029 | void ClearObjectStats(bool clear_last_time_stats = false); |
| 2030 | |
| 2031 | void set_weak_object_to_code_table(Object* value) { |
| 2032 | DCHECK(!InNewSpace(value)); |
| 2033 | weak_object_to_code_table_ = value; |
| 2034 | } |
| 2035 | |
| 2036 | Object** weak_object_to_code_table_address() { |
| 2037 | return &weak_object_to_code_table_; |
| 2038 | } |
| 2039 | |
| 2040 | inline void UpdateAllocationsHash(HeapObject* object); |
| 2041 | inline void UpdateAllocationsHash(uint32_t value); |
| 2042 | inline void PrintAlloctionsHash(); |
| 2043 | |
| 2044 | static const int kInitialStringTableSize = 2048; |
| 2045 | static const int kInitialEvalCacheSize = 64; |
| 2046 | static const int kInitialNumberStringCacheSize = 256; |
| 2047 | |
| 2048 | // Object counts and used memory by InstanceType |
| 2049 | size_t object_counts_[OBJECT_STATS_COUNT]; |
| 2050 | size_t object_counts_last_time_[OBJECT_STATS_COUNT]; |
| 2051 | size_t object_sizes_[OBJECT_STATS_COUNT]; |
| 2052 | size_t object_sizes_last_time_[OBJECT_STATS_COUNT]; |
| 2053 | |
| 2054 | // Maximum GC pause. |
| 2055 | double max_gc_pause_; |
| 2056 | |
| 2057 | // Total time spent in GC. |
| 2058 | double total_gc_time_ms_; |
| 2059 | |
| 2060 | // Maximum size of objects alive after GC. |
| 2061 | intptr_t max_alive_after_gc_; |
| 2062 | |
| 2063 | // Minimal interval between two subsequent collections. |
| 2064 | double min_in_mutator_; |
| 2065 | |
| 2066 | // Cumulative GC time spent in marking |
| 2067 | double marking_time_; |
| 2068 | |
| 2069 | // Cumulative GC time spent in sweeping |
| 2070 | double sweeping_time_; |
| 2071 | |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame^] | 2072 | // Last time an idle notification happened |
| 2073 | double last_idle_notification_time_; |
| 2074 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2075 | MarkCompactCollector mark_compact_collector_; |
| 2076 | |
| 2077 | StoreBuffer store_buffer_; |
| 2078 | |
| 2079 | Marking marking_; |
| 2080 | |
| 2081 | IncrementalMarking incremental_marking_; |
| 2082 | |
| 2083 | GCIdleTimeHandler gc_idle_time_handler_; |
| 2084 | unsigned int gc_count_at_last_idle_gc_; |
| 2085 | |
| 2086 | // These two counters are monotomically increasing and never reset. |
| 2087 | size_t full_codegen_bytes_generated_; |
| 2088 | size_t crankshaft_codegen_bytes_generated_; |
| 2089 | |
| 2090 | // If the --deopt_every_n_garbage_collections flag is set to a positive value, |
| 2091 | // this variable holds the number of garbage collections since the last |
| 2092 | // deoptimization triggered by garbage collection. |
| 2093 | int gcs_since_last_deopt_; |
| 2094 | |
| 2095 | #ifdef VERIFY_HEAP |
| 2096 | int no_weak_object_verification_scope_depth_; |
| 2097 | #endif |
| 2098 | |
| 2099 | static const int kAllocationSiteScratchpadSize = 256; |
| 2100 | int allocation_sites_scratchpad_length_; |
| 2101 | |
| 2102 | static const int kMaxMarkCompactsInIdleRound = 7; |
| 2103 | static const int kIdleScavengeThreshold = 5; |
| 2104 | |
| 2105 | // Shared state read by the scavenge collector and set by ScavengeObject. |
| 2106 | PromotionQueue promotion_queue_; |
| 2107 | |
| 2108 | // Flag is set when the heap has been configured. The heap can be repeatedly |
| 2109 | // configured through the API until it is set up. |
| 2110 | bool configured_; |
| 2111 | |
| 2112 | ExternalStringTable external_string_table_; |
| 2113 | |
| 2114 | VisitorDispatchTable<ScavengingCallback> scavenging_visitors_table_; |
| 2115 | |
| 2116 | MemoryChunk* chunks_queued_for_free_; |
| 2117 | |
| 2118 | base::Mutex relocation_mutex_; |
| 2119 | |
| 2120 | int gc_callbacks_depth_; |
| 2121 | |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame^] | 2122 | bool deserialization_complete_; |
| 2123 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2124 | friend class AlwaysAllocateScope; |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame^] | 2125 | friend class Deserializer; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2126 | friend class Factory; |
| 2127 | friend class GCCallbacksScope; |
| 2128 | friend class GCTracer; |
| 2129 | friend class HeapIterator; |
| 2130 | friend class Isolate; |
| 2131 | friend class MarkCompactCollector; |
| 2132 | friend class MarkCompactMarkingVisitor; |
| 2133 | friend class MapCompact; |
| 2134 | #ifdef VERIFY_HEAP |
| 2135 | friend class NoWeakObjectVerificationScope; |
| 2136 | #endif |
| 2137 | friend class Page; |
| 2138 | |
| 2139 | DISALLOW_COPY_AND_ASSIGN(Heap); |
| 2140 | }; |
| 2141 | |
| 2142 | |
| 2143 | class HeapStats { |
| 2144 | public: |
| 2145 | static const int kStartMarker = 0xDECADE00; |
| 2146 | static const int kEndMarker = 0xDECADE01; |
| 2147 | |
| 2148 | int* start_marker; // 0 |
| 2149 | int* new_space_size; // 1 |
| 2150 | int* new_space_capacity; // 2 |
| 2151 | intptr_t* old_pointer_space_size; // 3 |
| 2152 | intptr_t* old_pointer_space_capacity; // 4 |
| 2153 | intptr_t* old_data_space_size; // 5 |
| 2154 | intptr_t* old_data_space_capacity; // 6 |
| 2155 | intptr_t* code_space_size; // 7 |
| 2156 | intptr_t* code_space_capacity; // 8 |
| 2157 | intptr_t* map_space_size; // 9 |
| 2158 | intptr_t* map_space_capacity; // 10 |
| 2159 | intptr_t* cell_space_size; // 11 |
| 2160 | intptr_t* cell_space_capacity; // 12 |
| 2161 | intptr_t* lo_space_size; // 13 |
| 2162 | int* global_handle_count; // 14 |
| 2163 | int* weak_global_handle_count; // 15 |
| 2164 | int* pending_global_handle_count; // 16 |
| 2165 | int* near_death_global_handle_count; // 17 |
| 2166 | int* free_global_handle_count; // 18 |
| 2167 | intptr_t* memory_allocator_size; // 19 |
| 2168 | intptr_t* memory_allocator_capacity; // 20 |
| 2169 | int* objects_per_type; // 21 |
| 2170 | int* size_per_type; // 22 |
| 2171 | int* os_error; // 23 |
| 2172 | int* end_marker; // 24 |
| 2173 | intptr_t* property_cell_space_size; // 25 |
| 2174 | intptr_t* property_cell_space_capacity; // 26 |
| 2175 | }; |
| 2176 | |
| 2177 | |
| 2178 | class AlwaysAllocateScope { |
| 2179 | public: |
| 2180 | explicit inline AlwaysAllocateScope(Isolate* isolate); |
| 2181 | inline ~AlwaysAllocateScope(); |
| 2182 | |
| 2183 | private: |
| 2184 | // Implicitly disable artificial allocation failures. |
| 2185 | Heap* heap_; |
| 2186 | DisallowAllocationFailure daf_; |
| 2187 | }; |
| 2188 | |
| 2189 | |
| 2190 | #ifdef VERIFY_HEAP |
| 2191 | class NoWeakObjectVerificationScope { |
| 2192 | public: |
| 2193 | inline NoWeakObjectVerificationScope(); |
| 2194 | inline ~NoWeakObjectVerificationScope(); |
| 2195 | }; |
| 2196 | #endif |
| 2197 | |
| 2198 | |
| 2199 | class GCCallbacksScope { |
| 2200 | public: |
| 2201 | explicit inline GCCallbacksScope(Heap* heap); |
| 2202 | inline ~GCCallbacksScope(); |
| 2203 | |
| 2204 | inline bool CheckReenter(); |
| 2205 | |
| 2206 | private: |
| 2207 | Heap* heap_; |
| 2208 | }; |
| 2209 | |
| 2210 | |
| 2211 | // Visitor class to verify interior pointers in spaces that do not contain |
| 2212 | // or care about intergenerational references. All heap object pointers have to |
| 2213 | // point into the heap to a location that has a map pointer at its first word. |
| 2214 | // Caveat: Heap::Contains is an approximation because it can return true for |
| 2215 | // objects in a heap space but above the allocation pointer. |
| 2216 | class VerifyPointersVisitor : public ObjectVisitor { |
| 2217 | public: |
| 2218 | inline void VisitPointers(Object** start, Object** end); |
| 2219 | }; |
| 2220 | |
| 2221 | |
| 2222 | // Verify that all objects are Smis. |
| 2223 | class VerifySmisVisitor : public ObjectVisitor { |
| 2224 | public: |
| 2225 | inline void VisitPointers(Object** start, Object** end); |
| 2226 | }; |
| 2227 | |
| 2228 | |
| 2229 | // Space iterator for iterating over all spaces of the heap. Returns each space |
| 2230 | // in turn, and null when it is done. |
| 2231 | class AllSpaces BASE_EMBEDDED { |
| 2232 | public: |
| 2233 | explicit AllSpaces(Heap* heap) : heap_(heap), counter_(FIRST_SPACE) {} |
| 2234 | Space* next(); |
| 2235 | |
| 2236 | private: |
| 2237 | Heap* heap_; |
| 2238 | int counter_; |
| 2239 | }; |
| 2240 | |
| 2241 | |
| 2242 | // Space iterator for iterating over all old spaces of the heap: Old pointer |
| 2243 | // space, old data space and code space. Returns each space in turn, and null |
| 2244 | // when it is done. |
| 2245 | class OldSpaces BASE_EMBEDDED { |
| 2246 | public: |
| 2247 | explicit OldSpaces(Heap* heap) : heap_(heap), counter_(OLD_POINTER_SPACE) {} |
| 2248 | OldSpace* next(); |
| 2249 | |
| 2250 | private: |
| 2251 | Heap* heap_; |
| 2252 | int counter_; |
| 2253 | }; |
| 2254 | |
| 2255 | |
| 2256 | // Space iterator for iterating over all the paged spaces of the heap: Map |
| 2257 | // space, old pointer space, old data space, code space and cell space. Returns |
| 2258 | // each space in turn, and null when it is done. |
| 2259 | class PagedSpaces BASE_EMBEDDED { |
| 2260 | public: |
| 2261 | explicit PagedSpaces(Heap* heap) : heap_(heap), counter_(OLD_POINTER_SPACE) {} |
| 2262 | PagedSpace* next(); |
| 2263 | |
| 2264 | private: |
| 2265 | Heap* heap_; |
| 2266 | int counter_; |
| 2267 | }; |
| 2268 | |
| 2269 | |
| 2270 | // Space iterator for iterating over all spaces of the heap. |
| 2271 | // For each space an object iterator is provided. The deallocation of the |
| 2272 | // returned object iterators is handled by the space iterator. |
| 2273 | class SpaceIterator : public Malloced { |
| 2274 | public: |
| 2275 | explicit SpaceIterator(Heap* heap); |
| 2276 | SpaceIterator(Heap* heap, HeapObjectCallback size_func); |
| 2277 | virtual ~SpaceIterator(); |
| 2278 | |
| 2279 | bool has_next(); |
| 2280 | ObjectIterator* next(); |
| 2281 | |
| 2282 | private: |
| 2283 | ObjectIterator* CreateIterator(); |
| 2284 | |
| 2285 | Heap* heap_; |
| 2286 | int current_space_; // from enum AllocationSpace. |
| 2287 | ObjectIterator* iterator_; // object iterator for the current space. |
| 2288 | HeapObjectCallback size_func_; |
| 2289 | }; |
| 2290 | |
| 2291 | |
| 2292 | // A HeapIterator provides iteration over the whole heap. It |
| 2293 | // aggregates the specific iterators for the different spaces as |
| 2294 | // these can only iterate over one space only. |
| 2295 | // |
| 2296 | // HeapIterator ensures there is no allocation during its lifetime |
| 2297 | // (using an embedded DisallowHeapAllocation instance). |
| 2298 | // |
| 2299 | // HeapIterator can skip free list nodes (that is, de-allocated heap |
| 2300 | // objects that still remain in the heap). As implementation of free |
| 2301 | // nodes filtering uses GC marks, it can't be used during MS/MC GC |
| 2302 | // phases. Also, it is forbidden to interrupt iteration in this mode, |
| 2303 | // as this will leave heap objects marked (and thus, unusable). |
| 2304 | class HeapObjectsFilter; |
| 2305 | |
| 2306 | class HeapIterator BASE_EMBEDDED { |
| 2307 | public: |
| 2308 | enum HeapObjectsFiltering { kNoFiltering, kFilterUnreachable }; |
| 2309 | |
| 2310 | explicit HeapIterator(Heap* heap); |
| 2311 | HeapIterator(Heap* heap, HeapObjectsFiltering filtering); |
| 2312 | ~HeapIterator(); |
| 2313 | |
| 2314 | HeapObject* next(); |
| 2315 | void reset(); |
| 2316 | |
| 2317 | private: |
| 2318 | struct MakeHeapIterableHelper { |
| 2319 | explicit MakeHeapIterableHelper(Heap* heap) { heap->MakeHeapIterable(); } |
| 2320 | }; |
| 2321 | |
| 2322 | // Perform the initialization. |
| 2323 | void Init(); |
| 2324 | // Perform all necessary shutdown (destruction) work. |
| 2325 | void Shutdown(); |
| 2326 | HeapObject* NextObject(); |
| 2327 | |
| 2328 | MakeHeapIterableHelper make_heap_iterable_helper_; |
| 2329 | DisallowHeapAllocation no_heap_allocation_; |
| 2330 | Heap* heap_; |
| 2331 | HeapObjectsFiltering filtering_; |
| 2332 | HeapObjectsFilter* filter_; |
| 2333 | // Space iterator for iterating all the spaces. |
| 2334 | SpaceIterator* space_iterator_; |
| 2335 | // Object iterator for the space currently being iterated. |
| 2336 | ObjectIterator* object_iterator_; |
| 2337 | }; |
| 2338 | |
| 2339 | |
| 2340 | // Cache for mapping (map, property name) into field offset. |
| 2341 | // Cleared at startup and prior to mark sweep collection. |
| 2342 | class KeyedLookupCache { |
| 2343 | public: |
| 2344 | // Lookup field offset for (map, name). If absent, -1 is returned. |
| 2345 | int Lookup(Handle<Map> map, Handle<Name> name); |
| 2346 | |
| 2347 | // Update an element in the cache. |
| 2348 | void Update(Handle<Map> map, Handle<Name> name, int field_offset); |
| 2349 | |
| 2350 | // Clear the cache. |
| 2351 | void Clear(); |
| 2352 | |
| 2353 | static const int kLength = 256; |
| 2354 | static const int kCapacityMask = kLength - 1; |
| 2355 | static const int kMapHashShift = 5; |
| 2356 | static const int kHashMask = -4; // Zero the last two bits. |
| 2357 | static const int kEntriesPerBucket = 4; |
| 2358 | static const int kEntryLength = 2; |
| 2359 | static const int kMapIndex = 0; |
| 2360 | static const int kKeyIndex = 1; |
| 2361 | static const int kNotFound = -1; |
| 2362 | |
| 2363 | // kEntriesPerBucket should be a power of 2. |
| 2364 | STATIC_ASSERT((kEntriesPerBucket & (kEntriesPerBucket - 1)) == 0); |
| 2365 | STATIC_ASSERT(kEntriesPerBucket == -kHashMask); |
| 2366 | |
| 2367 | private: |
| 2368 | KeyedLookupCache() { |
| 2369 | for (int i = 0; i < kLength; ++i) { |
| 2370 | keys_[i].map = NULL; |
| 2371 | keys_[i].name = NULL; |
| 2372 | field_offsets_[i] = kNotFound; |
| 2373 | } |
| 2374 | } |
| 2375 | |
| 2376 | static inline int Hash(Handle<Map> map, Handle<Name> name); |
| 2377 | |
| 2378 | // Get the address of the keys and field_offsets arrays. Used in |
| 2379 | // generated code to perform cache lookups. |
| 2380 | Address keys_address() { return reinterpret_cast<Address>(&keys_); } |
| 2381 | |
| 2382 | Address field_offsets_address() { |
| 2383 | return reinterpret_cast<Address>(&field_offsets_); |
| 2384 | } |
| 2385 | |
| 2386 | struct Key { |
| 2387 | Map* map; |
| 2388 | Name* name; |
| 2389 | }; |
| 2390 | |
| 2391 | Key keys_[kLength]; |
| 2392 | int field_offsets_[kLength]; |
| 2393 | |
| 2394 | friend class ExternalReference; |
| 2395 | friend class Isolate; |
| 2396 | DISALLOW_COPY_AND_ASSIGN(KeyedLookupCache); |
| 2397 | }; |
| 2398 | |
| 2399 | |
| 2400 | // Cache for mapping (map, property name) into descriptor index. |
| 2401 | // The cache contains both positive and negative results. |
| 2402 | // Descriptor index equals kNotFound means the property is absent. |
| 2403 | // Cleared at startup and prior to any gc. |
| 2404 | class DescriptorLookupCache { |
| 2405 | public: |
| 2406 | // Lookup descriptor index for (map, name). |
| 2407 | // If absent, kAbsent is returned. |
| 2408 | int Lookup(Map* source, Name* name) { |
| 2409 | if (!name->IsUniqueName()) return kAbsent; |
| 2410 | int index = Hash(source, name); |
| 2411 | Key& key = keys_[index]; |
| 2412 | if ((key.source == source) && (key.name == name)) return results_[index]; |
| 2413 | return kAbsent; |
| 2414 | } |
| 2415 | |
| 2416 | // Update an element in the cache. |
| 2417 | void Update(Map* source, Name* name, int result) { |
| 2418 | DCHECK(result != kAbsent); |
| 2419 | if (name->IsUniqueName()) { |
| 2420 | int index = Hash(source, name); |
| 2421 | Key& key = keys_[index]; |
| 2422 | key.source = source; |
| 2423 | key.name = name; |
| 2424 | results_[index] = result; |
| 2425 | } |
| 2426 | } |
| 2427 | |
| 2428 | // Clear the cache. |
| 2429 | void Clear(); |
| 2430 | |
| 2431 | static const int kAbsent = -2; |
| 2432 | |
| 2433 | private: |
| 2434 | DescriptorLookupCache() { |
| 2435 | for (int i = 0; i < kLength; ++i) { |
| 2436 | keys_[i].source = NULL; |
| 2437 | keys_[i].name = NULL; |
| 2438 | results_[i] = kAbsent; |
| 2439 | } |
| 2440 | } |
| 2441 | |
| 2442 | static int Hash(Object* source, Name* name) { |
| 2443 | // Uses only lower 32 bits if pointers are larger. |
| 2444 | uint32_t source_hash = |
| 2445 | static_cast<uint32_t>(reinterpret_cast<uintptr_t>(source)) >> |
| 2446 | kPointerSizeLog2; |
| 2447 | uint32_t name_hash = |
| 2448 | static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name)) >> |
| 2449 | kPointerSizeLog2; |
| 2450 | return (source_hash ^ name_hash) % kLength; |
| 2451 | } |
| 2452 | |
| 2453 | static const int kLength = 64; |
| 2454 | struct Key { |
| 2455 | Map* source; |
| 2456 | Name* name; |
| 2457 | }; |
| 2458 | |
| 2459 | Key keys_[kLength]; |
| 2460 | int results_[kLength]; |
| 2461 | |
| 2462 | friend class Isolate; |
| 2463 | DISALLOW_COPY_AND_ASSIGN(DescriptorLookupCache); |
| 2464 | }; |
| 2465 | |
| 2466 | |
| 2467 | class RegExpResultsCache { |
| 2468 | public: |
| 2469 | enum ResultsCacheType { REGEXP_MULTIPLE_INDICES, STRING_SPLIT_SUBSTRINGS }; |
| 2470 | |
| 2471 | // Attempt to retrieve a cached result. On failure, 0 is returned as a Smi. |
| 2472 | // On success, the returned result is guaranteed to be a COW-array. |
| 2473 | static Object* Lookup(Heap* heap, String* key_string, Object* key_pattern, |
| 2474 | ResultsCacheType type); |
| 2475 | // Attempt to add value_array to the cache specified by type. On success, |
| 2476 | // value_array is turned into a COW-array. |
| 2477 | static void Enter(Isolate* isolate, Handle<String> key_string, |
| 2478 | Handle<Object> key_pattern, Handle<FixedArray> value_array, |
| 2479 | ResultsCacheType type); |
| 2480 | static void Clear(FixedArray* cache); |
| 2481 | static const int kRegExpResultsCacheSize = 0x100; |
| 2482 | |
| 2483 | private: |
| 2484 | static const int kArrayEntriesPerCacheEntry = 4; |
| 2485 | static const int kStringOffset = 0; |
| 2486 | static const int kPatternOffset = 1; |
| 2487 | static const int kArrayOffset = 2; |
| 2488 | }; |
| 2489 | |
| 2490 | |
| 2491 | // Abstract base class for checking whether a weak object should be retained. |
| 2492 | class WeakObjectRetainer { |
| 2493 | public: |
| 2494 | virtual ~WeakObjectRetainer() {} |
| 2495 | |
| 2496 | // Return whether this object should be retained. If NULL is returned the |
| 2497 | // object has no references. Otherwise the address of the retained object |
| 2498 | // should be returned as in some GC situations the object has been moved. |
| 2499 | virtual Object* RetainAs(Object* object) = 0; |
| 2500 | }; |
| 2501 | |
| 2502 | |
| 2503 | // Intrusive object marking uses least significant bit of |
| 2504 | // heap object's map word to mark objects. |
| 2505 | // Normally all map words have least significant bit set |
| 2506 | // because they contain tagged map pointer. |
| 2507 | // If the bit is not set object is marked. |
| 2508 | // All objects should be unmarked before resuming |
| 2509 | // JavaScript execution. |
| 2510 | class IntrusiveMarking { |
| 2511 | public: |
| 2512 | static bool IsMarked(HeapObject* object) { |
| 2513 | return (object->map_word().ToRawValue() & kNotMarkedBit) == 0; |
| 2514 | } |
| 2515 | |
| 2516 | static void ClearMark(HeapObject* object) { |
| 2517 | uintptr_t map_word = object->map_word().ToRawValue(); |
| 2518 | object->set_map_word(MapWord::FromRawValue(map_word | kNotMarkedBit)); |
| 2519 | DCHECK(!IsMarked(object)); |
| 2520 | } |
| 2521 | |
| 2522 | static void SetMark(HeapObject* object) { |
| 2523 | uintptr_t map_word = object->map_word().ToRawValue(); |
| 2524 | object->set_map_word(MapWord::FromRawValue(map_word & ~kNotMarkedBit)); |
| 2525 | DCHECK(IsMarked(object)); |
| 2526 | } |
| 2527 | |
| 2528 | static Map* MapOfMarkedObject(HeapObject* object) { |
| 2529 | uintptr_t map_word = object->map_word().ToRawValue(); |
| 2530 | return MapWord::FromRawValue(map_word | kNotMarkedBit).ToMap(); |
| 2531 | } |
| 2532 | |
| 2533 | static int SizeOfMarkedObject(HeapObject* object) { |
| 2534 | return object->SizeFromMap(MapOfMarkedObject(object)); |
| 2535 | } |
| 2536 | |
| 2537 | private: |
| 2538 | static const uintptr_t kNotMarkedBit = 0x1; |
| 2539 | STATIC_ASSERT((kHeapObjectTag & kNotMarkedBit) != 0); // NOLINT |
| 2540 | }; |
| 2541 | |
| 2542 | |
| 2543 | #ifdef DEBUG |
| 2544 | // Helper class for tracing paths to a search target Object from all roots. |
| 2545 | // The TracePathFrom() method can be used to trace paths from a specific |
| 2546 | // object to the search target object. |
| 2547 | class PathTracer : public ObjectVisitor { |
| 2548 | public: |
| 2549 | enum WhatToFind { |
| 2550 | FIND_ALL, // Will find all matches. |
| 2551 | FIND_FIRST // Will stop the search after first match. |
| 2552 | }; |
| 2553 | |
| 2554 | // Tags 0, 1, and 3 are used. Use 2 for marking visited HeapObject. |
| 2555 | static const int kMarkTag = 2; |
| 2556 | |
| 2557 | // For the WhatToFind arg, if FIND_FIRST is specified, tracing will stop |
| 2558 | // after the first match. If FIND_ALL is specified, then tracing will be |
| 2559 | // done for all matches. |
| 2560 | PathTracer(Object* search_target, WhatToFind what_to_find, |
| 2561 | VisitMode visit_mode) |
| 2562 | : search_target_(search_target), |
| 2563 | found_target_(false), |
| 2564 | found_target_in_trace_(false), |
| 2565 | what_to_find_(what_to_find), |
| 2566 | visit_mode_(visit_mode), |
| 2567 | object_stack_(20), |
| 2568 | no_allocation() {} |
| 2569 | |
| 2570 | virtual void VisitPointers(Object** start, Object** end); |
| 2571 | |
| 2572 | void Reset(); |
| 2573 | void TracePathFrom(Object** root); |
| 2574 | |
| 2575 | bool found() const { return found_target_; } |
| 2576 | |
| 2577 | static Object* const kAnyGlobalObject; |
| 2578 | |
| 2579 | protected: |
| 2580 | class MarkVisitor; |
| 2581 | class UnmarkVisitor; |
| 2582 | |
| 2583 | void MarkRecursively(Object** p, MarkVisitor* mark_visitor); |
| 2584 | void UnmarkRecursively(Object** p, UnmarkVisitor* unmark_visitor); |
| 2585 | virtual void ProcessResults(); |
| 2586 | |
| 2587 | Object* search_target_; |
| 2588 | bool found_target_; |
| 2589 | bool found_target_in_trace_; |
| 2590 | WhatToFind what_to_find_; |
| 2591 | VisitMode visit_mode_; |
| 2592 | List<Object*> object_stack_; |
| 2593 | |
| 2594 | DisallowHeapAllocation no_allocation; // i.e. no gc allowed. |
| 2595 | |
| 2596 | private: |
| 2597 | DISALLOW_IMPLICIT_CONSTRUCTORS(PathTracer); |
| 2598 | }; |
| 2599 | #endif // DEBUG |
| 2600 | } |
| 2601 | } // namespace v8::internal |
| 2602 | |
| 2603 | #endif // V8_HEAP_HEAP_H_ |