blob: 8fdb64a98944bc5751877465bf167c393a6ea2f3 [file] [log] [blame]
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001// Copyright 2012 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_HEAP_HEAP_H_
6#define V8_HEAP_HEAP_H_
7
8#include <cmath>
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00009#include <map>
Ben Murdochb8a8cc12014-11-26 15:28:44 +000010
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000011// Clients of this interface shouldn't depend on lots of heap internals.
12// Do not include anything from src/heap here!
Ben Murdochda12d292016-06-02 14:46:10 +010013#include "include/v8.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000014#include "src/allocation.h"
15#include "src/assert-scope.h"
Ben Murdochc5610432016-08-08 18:44:38 +010016#include "src/base/atomic-utils.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000017#include "src/globals.h"
Ben Murdoch097c5b22016-05-18 11:27:45 +010018#include "src/heap-symbols.h"
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000019// TODO(mstarzinger): Two more includes to kill!
Ben Murdochb8a8cc12014-11-26 15:28:44 +000020#include "src/heap/spaces.h"
21#include "src/heap/store-buffer.h"
22#include "src/list.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000023
24namespace v8 {
25namespace internal {
26
Ben Murdochda12d292016-06-02 14:46:10 +010027using v8::MemoryPressureLevel;
28
Ben Murdochb8a8cc12014-11-26 15:28:44 +000029// Defines all the roots in Heap.
30#define STRONG_ROOT_LIST(V) \
31 V(Map, byte_array_map, ByteArrayMap) \
32 V(Map, free_space_map, FreeSpaceMap) \
33 V(Map, one_pointer_filler_map, OnePointerFillerMap) \
34 V(Map, two_pointer_filler_map, TwoPointerFillerMap) \
35 /* Cluster the most popular ones in a few cache lines here at the top. */ \
Ben Murdochb8a8cc12014-11-26 15:28:44 +000036 V(Oddball, undefined_value, UndefinedValue) \
37 V(Oddball, the_hole_value, TheHoleValue) \
38 V(Oddball, null_value, NullValue) \
39 V(Oddball, true_value, TrueValue) \
40 V(Oddball, false_value, FalseValue) \
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000041 V(String, empty_string, empty_string) \
Ben Murdochb8a8cc12014-11-26 15:28:44 +000042 V(Oddball, uninitialized_value, UninitializedValue) \
Ben Murdochb8a8cc12014-11-26 15:28:44 +000043 V(Map, cell_map, CellMap) \
44 V(Map, global_property_cell_map, GlobalPropertyCellMap) \
45 V(Map, shared_function_info_map, SharedFunctionInfoMap) \
46 V(Map, meta_map, MetaMap) \
47 V(Map, heap_number_map, HeapNumberMap) \
48 V(Map, mutable_heap_number_map, MutableHeapNumberMap) \
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000049 V(Map, float32x4_map, Float32x4Map) \
50 V(Map, int32x4_map, Int32x4Map) \
51 V(Map, uint32x4_map, Uint32x4Map) \
52 V(Map, bool32x4_map, Bool32x4Map) \
53 V(Map, int16x8_map, Int16x8Map) \
54 V(Map, uint16x8_map, Uint16x8Map) \
55 V(Map, bool16x8_map, Bool16x8Map) \
56 V(Map, int8x16_map, Int8x16Map) \
57 V(Map, uint8x16_map, Uint8x16Map) \
58 V(Map, bool8x16_map, Bool8x16Map) \
Ben Murdochb8a8cc12014-11-26 15:28:44 +000059 V(Map, native_context_map, NativeContextMap) \
60 V(Map, fixed_array_map, FixedArrayMap) \
61 V(Map, code_map, CodeMap) \
62 V(Map, scope_info_map, ScopeInfoMap) \
63 V(Map, fixed_cow_array_map, FixedCOWArrayMap) \
64 V(Map, fixed_double_array_map, FixedDoubleArrayMap) \
Emily Bernierd0a1eb72015-03-24 16:35:39 -040065 V(Map, weak_cell_map, WeakCellMap) \
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000066 V(Map, transition_array_map, TransitionArrayMap) \
67 V(Map, one_byte_string_map, OneByteStringMap) \
68 V(Map, one_byte_internalized_string_map, OneByteInternalizedStringMap) \
69 V(Map, function_context_map, FunctionContextMap) \
Ben Murdochb8a8cc12014-11-26 15:28:44 +000070 V(FixedArray, empty_fixed_array, EmptyFixedArray) \
71 V(ByteArray, empty_byte_array, EmptyByteArray) \
72 V(DescriptorArray, empty_descriptor_array, EmptyDescriptorArray) \
Ben Murdochb8a8cc12014-11-26 15:28:44 +000073 /* The roots above this line should be boring from a GC point of view. */ \
74 /* This means they are never in new space and never on a page that is */ \
75 /* being compacted. */ \
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000076 V(Oddball, no_interceptor_result_sentinel, NoInterceptorResultSentinel) \
77 V(Oddball, arguments_marker, ArgumentsMarker) \
78 V(Oddball, exception, Exception) \
79 V(Oddball, termination_exception, TerminationException) \
Ben Murdochda12d292016-06-02 14:46:10 +010080 V(Oddball, optimized_out, OptimizedOut) \
Ben Murdochc5610432016-08-08 18:44:38 +010081 V(Oddball, stale_register, StaleRegister) \
Ben Murdochb8a8cc12014-11-26 15:28:44 +000082 V(FixedArray, number_string_cache, NumberStringCache) \
83 V(Object, instanceof_cache_function, InstanceofCacheFunction) \
84 V(Object, instanceof_cache_map, InstanceofCacheMap) \
85 V(Object, instanceof_cache_answer, InstanceofCacheAnswer) \
86 V(FixedArray, single_character_string_cache, SingleCharacterStringCache) \
87 V(FixedArray, string_split_cache, StringSplitCache) \
88 V(FixedArray, regexp_multiple_cache, RegExpMultipleCache) \
Ben Murdochb8a8cc12014-11-26 15:28:44 +000089 V(Smi, hash_seed, HashSeed) \
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000090 V(Map, hash_table_map, HashTableMap) \
91 V(Map, ordered_hash_table_map, OrderedHashTableMap) \
Ben Murdochb8a8cc12014-11-26 15:28:44 +000092 V(Map, symbol_map, SymbolMap) \
93 V(Map, string_map, StringMap) \
Ben Murdochb8a8cc12014-11-26 15:28:44 +000094 V(Map, cons_one_byte_string_map, ConsOneByteStringMap) \
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000095 V(Map, cons_string_map, ConsStringMap) \
Ben Murdochb8a8cc12014-11-26 15:28:44 +000096 V(Map, sliced_string_map, SlicedStringMap) \
97 V(Map, sliced_one_byte_string_map, SlicedOneByteStringMap) \
98 V(Map, external_string_map, ExternalStringMap) \
99 V(Map, external_string_with_one_byte_data_map, \
100 ExternalStringWithOneByteDataMap) \
101 V(Map, external_one_byte_string_map, ExternalOneByteStringMap) \
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400102 V(Map, native_source_string_map, NativeSourceStringMap) \
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000103 V(Map, short_external_string_map, ShortExternalStringMap) \
104 V(Map, short_external_string_with_one_byte_data_map, \
105 ShortExternalStringWithOneByteDataMap) \
106 V(Map, internalized_string_map, InternalizedStringMap) \
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000107 V(Map, external_internalized_string_map, ExternalInternalizedStringMap) \
108 V(Map, external_internalized_string_with_one_byte_data_map, \
109 ExternalInternalizedStringWithOneByteDataMap) \
110 V(Map, external_one_byte_internalized_string_map, \
111 ExternalOneByteInternalizedStringMap) \
112 V(Map, short_external_internalized_string_map, \
113 ShortExternalInternalizedStringMap) \
114 V(Map, short_external_internalized_string_with_one_byte_data_map, \
115 ShortExternalInternalizedStringWithOneByteDataMap) \
116 V(Map, short_external_one_byte_internalized_string_map, \
117 ShortExternalOneByteInternalizedStringMap) \
118 V(Map, short_external_one_byte_string_map, ShortExternalOneByteStringMap) \
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000119 V(Map, fixed_uint8_array_map, FixedUint8ArrayMap) \
120 V(Map, fixed_int8_array_map, FixedInt8ArrayMap) \
121 V(Map, fixed_uint16_array_map, FixedUint16ArrayMap) \
122 V(Map, fixed_int16_array_map, FixedInt16ArrayMap) \
123 V(Map, fixed_uint32_array_map, FixedUint32ArrayMap) \
124 V(Map, fixed_int32_array_map, FixedInt32ArrayMap) \
125 V(Map, fixed_float32_array_map, FixedFloat32ArrayMap) \
126 V(Map, fixed_float64_array_map, FixedFloat64ArrayMap) \
127 V(Map, fixed_uint8_clamped_array_map, FixedUint8ClampedArrayMap) \
128 V(FixedTypedArrayBase, empty_fixed_uint8_array, EmptyFixedUint8Array) \
129 V(FixedTypedArrayBase, empty_fixed_int8_array, EmptyFixedInt8Array) \
130 V(FixedTypedArrayBase, empty_fixed_uint16_array, EmptyFixedUint16Array) \
131 V(FixedTypedArrayBase, empty_fixed_int16_array, EmptyFixedInt16Array) \
132 V(FixedTypedArrayBase, empty_fixed_uint32_array, EmptyFixedUint32Array) \
133 V(FixedTypedArrayBase, empty_fixed_int32_array, EmptyFixedInt32Array) \
134 V(FixedTypedArrayBase, empty_fixed_float32_array, EmptyFixedFloat32Array) \
135 V(FixedTypedArrayBase, empty_fixed_float64_array, EmptyFixedFloat64Array) \
136 V(FixedTypedArrayBase, empty_fixed_uint8_clamped_array, \
137 EmptyFixedUint8ClampedArray) \
138 V(Map, sloppy_arguments_elements_map, SloppyArgumentsElementsMap) \
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000139 V(Map, catch_context_map, CatchContextMap) \
140 V(Map, with_context_map, WithContextMap) \
Ben Murdochda12d292016-06-02 14:46:10 +0100141 V(Map, debug_evaluate_context_map, DebugEvaluateContextMap) \
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000142 V(Map, block_context_map, BlockContextMap) \
143 V(Map, module_context_map, ModuleContextMap) \
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400144 V(Map, script_context_map, ScriptContextMap) \
145 V(Map, script_context_table_map, ScriptContextTableMap) \
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000146 V(Map, undefined_map, UndefinedMap) \
147 V(Map, the_hole_map, TheHoleMap) \
148 V(Map, null_map, NullMap) \
149 V(Map, boolean_map, BooleanMap) \
150 V(Map, uninitialized_map, UninitializedMap) \
151 V(Map, arguments_marker_map, ArgumentsMarkerMap) \
152 V(Map, no_interceptor_result_sentinel_map, NoInterceptorResultSentinelMap) \
153 V(Map, exception_map, ExceptionMap) \
154 V(Map, termination_exception_map, TerminationExceptionMap) \
Ben Murdochda12d292016-06-02 14:46:10 +0100155 V(Map, optimized_out_map, OptimizedOutMap) \
Ben Murdochc5610432016-08-08 18:44:38 +0100156 V(Map, stale_register_map, StaleRegisterMap) \
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000157 V(Map, message_object_map, JSMessageObjectMap) \
158 V(Map, foreign_map, ForeignMap) \
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000159 V(Map, neander_map, NeanderMap) \
160 V(Map, external_map, ExternalMap) \
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000161 V(HeapNumber, nan_value, NanValue) \
162 V(HeapNumber, infinity_value, InfinityValue) \
163 V(HeapNumber, minus_zero_value, MinusZeroValue) \
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000164 V(HeapNumber, minus_infinity_value, MinusInfinityValue) \
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000165 V(JSObject, message_listeners, MessageListeners) \
166 V(UnseededNumberDictionary, code_stubs, CodeStubs) \
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000167 V(Code, js_entry_code, JsEntryCode) \
168 V(Code, js_construct_entry_code, JsConstructEntryCode) \
169 V(FixedArray, natives_source_cache, NativesSourceCache) \
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000170 V(FixedArray, experimental_natives_source_cache, \
171 ExperimentalNativesSourceCache) \
172 V(FixedArray, extra_natives_source_cache, ExtraNativesSourceCache) \
173 V(FixedArray, experimental_extra_natives_source_cache, \
174 ExperimentalExtraNativesSourceCache) \
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000175 V(Script, empty_script, EmptyScript) \
176 V(NameDictionary, intrinsic_function_names, IntrinsicFunctionNames) \
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000177 V(NameDictionary, empty_properties_dictionary, EmptyPropertiesDictionary) \
178 V(Cell, undefined_cell, UndefinedCell) \
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000179 V(Object, symbol_registry, SymbolRegistry) \
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000180 V(Object, script_list, ScriptList) \
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000181 V(SeededNumberDictionary, empty_slow_element_dictionary, \
182 EmptySlowElementDictionary) \
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000183 V(FixedArray, materialized_objects, MaterializedObjects) \
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000184 V(FixedArray, microtask_queue, MicrotaskQueue) \
185 V(TypeFeedbackVector, dummy_vector, DummyVector) \
186 V(FixedArray, cleared_optimized_code_map, ClearedOptimizedCodeMap) \
187 V(FixedArray, detached_contexts, DetachedContexts) \
188 V(ArrayList, retained_maps, RetainedMaps) \
189 V(WeakHashTable, weak_object_to_code_table, WeakObjectToCodeTable) \
190 V(PropertyCell, array_protector, ArrayProtector) \
Ben Murdochc5610432016-08-08 18:44:38 +0100191 V(Cell, is_concat_spreadable_protector, IsConcatSpreadableProtector) \
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000192 V(PropertyCell, empty_property_cell, EmptyPropertyCell) \
193 V(Object, weak_stack_trace_list, WeakStackTraceList) \
194 V(Object, noscript_shared_function_infos, NoScriptSharedFunctionInfos) \
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000195 V(Map, bytecode_array_map, BytecodeArrayMap) \
196 V(WeakCell, empty_weak_cell, EmptyWeakCell) \
Ben Murdochc5610432016-08-08 18:44:38 +0100197 V(PropertyCell, has_instance_protector, HasInstanceProtector) \
198 V(Cell, species_protector, SpeciesProtector)
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000199
200// Entries in this list are limited to Smis and are not visited during GC.
201#define SMI_ROOT_LIST(V) \
202 V(Smi, stack_limit, StackLimit) \
203 V(Smi, real_stack_limit, RealStackLimit) \
204 V(Smi, last_script_id, LastScriptId) \
205 V(Smi, arguments_adaptor_deopt_pc_offset, ArgumentsAdaptorDeoptPCOffset) \
206 V(Smi, construct_stub_deopt_pc_offset, ConstructStubDeoptPCOffset) \
207 V(Smi, getter_stub_deopt_pc_offset, GetterStubDeoptPCOffset) \
Ben Murdochc5610432016-08-08 18:44:38 +0100208 V(Smi, setter_stub_deopt_pc_offset, SetterStubDeoptPCOffset) \
209 V(Smi, interpreter_entry_return_pc_offset, InterpreterEntryReturnPCOffset)
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000210
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000211#define ROOT_LIST(V) \
212 STRONG_ROOT_LIST(V) \
213 SMI_ROOT_LIST(V) \
214 V(StringTable, string_table, StringTable)
215
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400216
217// Heap roots that are known to be immortal immovable, for which we can safely
218// skip write barriers. This list is not complete and has omissions.
219#define IMMORTAL_IMMOVABLE_ROOT_LIST(V) \
220 V(ByteArrayMap) \
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000221 V(BytecodeArrayMap) \
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400222 V(FreeSpaceMap) \
223 V(OnePointerFillerMap) \
224 V(TwoPointerFillerMap) \
225 V(UndefinedValue) \
226 V(TheHoleValue) \
227 V(NullValue) \
228 V(TrueValue) \
229 V(FalseValue) \
230 V(UninitializedValue) \
231 V(CellMap) \
232 V(GlobalPropertyCellMap) \
233 V(SharedFunctionInfoMap) \
234 V(MetaMap) \
235 V(HeapNumberMap) \
236 V(MutableHeapNumberMap) \
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000237 V(Float32x4Map) \
238 V(Int32x4Map) \
239 V(Uint32x4Map) \
240 V(Bool32x4Map) \
241 V(Int16x8Map) \
242 V(Uint16x8Map) \
243 V(Bool16x8Map) \
244 V(Int8x16Map) \
245 V(Uint8x16Map) \
246 V(Bool8x16Map) \
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400247 V(NativeContextMap) \
248 V(FixedArrayMap) \
249 V(CodeMap) \
250 V(ScopeInfoMap) \
251 V(FixedCOWArrayMap) \
252 V(FixedDoubleArrayMap) \
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400253 V(WeakCellMap) \
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000254 V(TransitionArrayMap) \
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400255 V(NoInterceptorResultSentinel) \
256 V(HashTableMap) \
257 V(OrderedHashTableMap) \
258 V(EmptyFixedArray) \
259 V(EmptyByteArray) \
260 V(EmptyDescriptorArray) \
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400261 V(ArgumentsMarker) \
262 V(SymbolMap) \
263 V(SloppyArgumentsElementsMap) \
264 V(FunctionContextMap) \
265 V(CatchContextMap) \
266 V(WithContextMap) \
267 V(BlockContextMap) \
268 V(ModuleContextMap) \
269 V(ScriptContextMap) \
270 V(UndefinedMap) \
271 V(TheHoleMap) \
272 V(NullMap) \
273 V(BooleanMap) \
274 V(UninitializedMap) \
275 V(ArgumentsMarkerMap) \
276 V(JSMessageObjectMap) \
277 V(ForeignMap) \
278 V(NeanderMap) \
Ben Murdochda12d292016-06-02 14:46:10 +0100279 V(NanValue) \
280 V(InfinityValue) \
281 V(MinusZeroValue) \
282 V(MinusInfinityValue) \
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000283 V(EmptyWeakCell) \
284 V(empty_string) \
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400285 PRIVATE_SYMBOL_LIST(V)
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000286
287// Forward declarations.
Ben Murdoch097c5b22016-05-18 11:27:45 +0100288class AllocationObserver;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000289class ArrayBufferTracker;
290class GCIdleTimeAction;
291class GCIdleTimeHandler;
292class GCIdleTimeHeapState;
293class GCTracer;
294class HeapObjectsFilter;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000295class HeapStats;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000296class HistogramTimer;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000297class Isolate;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000298class MemoryReducer;
299class ObjectStats;
300class Scavenger;
301class ScavengeJob;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000302class WeakObjectRetainer;
303
Ben Murdoch097c5b22016-05-18 11:27:45 +0100304typedef void (*ObjectSlotCallback)(HeapObject** from, HeapObject* to);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000305
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000306// A queue of objects promoted during scavenge. Each object is accompanied
307// by it's size to avoid dereferencing a map pointer for scanning.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000308// The last page in to-space is used for the promotion queue. On conflict
309// during scavenge, the promotion queue is allocated externally and all
310// entries are copied to the external queue.
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000311class PromotionQueue {
312 public:
313 explicit PromotionQueue(Heap* heap)
314 : front_(NULL),
315 rear_(NULL),
316 limit_(NULL),
317 emergency_stack_(0),
318 heap_(heap) {}
319
320 void Initialize();
321
322 void Destroy() {
323 DCHECK(is_empty());
324 delete emergency_stack_;
325 emergency_stack_ = NULL;
326 }
327
328 Page* GetHeadPage() {
Ben Murdochc5610432016-08-08 18:44:38 +0100329 return Page::FromAllocationAreaAddress(reinterpret_cast<Address>(rear_));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000330 }
331
332 void SetNewLimit(Address limit) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000333 // If we are already using an emergency stack, we can ignore it.
334 if (emergency_stack_) return;
335
336 // If the limit is not on the same page, we can ignore it.
Ben Murdochc5610432016-08-08 18:44:38 +0100337 if (Page::FromAllocationAreaAddress(limit) != GetHeadPage()) return;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000338
Ben Murdochda12d292016-06-02 14:46:10 +0100339 limit_ = reinterpret_cast<struct Entry*>(limit);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000340
341 if (limit_ <= rear_) {
342 return;
343 }
344
345 RelocateQueueHead();
346 }
347
348 bool IsBelowPromotionQueue(Address to_space_top) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000349 // If an emergency stack is used, the to-space address cannot interfere
350 // with the promotion queue.
351 if (emergency_stack_) return true;
352
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000353 // If the given to-space top pointer and the head of the promotion queue
354 // are not on the same page, then the to-space objects are below the
355 // promotion queue.
356 if (GetHeadPage() != Page::FromAddress(to_space_top)) {
357 return true;
358 }
359 // If the to space top pointer is smaller or equal than the promotion
360 // queue head, then the to-space objects are below the promotion queue.
Ben Murdochda12d292016-06-02 14:46:10 +0100361 return reinterpret_cast<struct Entry*>(to_space_top) <= rear_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000362 }
363
364 bool is_empty() {
365 return (front_ == rear_) &&
366 (emergency_stack_ == NULL || emergency_stack_->length() == 0);
367 }
368
Ben Murdochda12d292016-06-02 14:46:10 +0100369 inline void insert(HeapObject* target, int32_t size, bool was_marked_black);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000370
Ben Murdochda12d292016-06-02 14:46:10 +0100371 void remove(HeapObject** target, int32_t* size, bool* was_marked_black) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000372 DCHECK(!is_empty());
373 if (front_ == rear_) {
374 Entry e = emergency_stack_->RemoveLast();
375 *target = e.obj_;
376 *size = e.size_;
Ben Murdochda12d292016-06-02 14:46:10 +0100377 *was_marked_black = e.was_marked_black_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000378 return;
379 }
380
Ben Murdochda12d292016-06-02 14:46:10 +0100381 struct Entry* entry = reinterpret_cast<struct Entry*>(--front_);
382 *target = entry->obj_;
383 *size = entry->size_;
384 *was_marked_black = entry->was_marked_black_;
385
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000386 // Assert no underflow.
387 SemiSpace::AssertValidRange(reinterpret_cast<Address>(rear_),
388 reinterpret_cast<Address>(front_));
389 }
390
391 private:
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000392 struct Entry {
Ben Murdochda12d292016-06-02 14:46:10 +0100393 Entry(HeapObject* obj, int32_t size, bool was_marked_black)
394 : obj_(obj), size_(size), was_marked_black_(was_marked_black) {}
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000395
396 HeapObject* obj_;
Ben Murdochda12d292016-06-02 14:46:10 +0100397 int32_t size_ : 31;
398 bool was_marked_black_ : 1;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000399 };
Ben Murdochda12d292016-06-02 14:46:10 +0100400
401 void RelocateQueueHead();
402
403 // The front of the queue is higher in the memory page chain than the rear.
404 struct Entry* front_;
405 struct Entry* rear_;
406 struct Entry* limit_;
407
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000408 List<Entry>* emergency_stack_;
409
410 Heap* heap_;
411
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000412 DISALLOW_COPY_AND_ASSIGN(PromotionQueue);
413};
414
415
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000416enum ArrayStorageAllocationMode {
417 DONT_INITIALIZE_ARRAY_ELEMENTS,
418 INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE
419};
420
Ben Murdochda12d292016-06-02 14:46:10 +0100421enum class ClearRecordedSlots { kYes, kNo };
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000422
423class Heap {
424 public:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000425 // Declare all the root indices. This defines the root list order.
426 enum RootListIndex {
427#define ROOT_INDEX_DECLARATION(type, name, camel_name) k##camel_name##RootIndex,
428 STRONG_ROOT_LIST(ROOT_INDEX_DECLARATION)
429#undef ROOT_INDEX_DECLARATION
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000430
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000431#define STRING_INDEX_DECLARATION(name, str) k##name##RootIndex,
432 INTERNALIZED_STRING_LIST(STRING_INDEX_DECLARATION)
433#undef STRING_DECLARATION
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000434
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000435#define SYMBOL_INDEX_DECLARATION(name) k##name##RootIndex,
436 PRIVATE_SYMBOL_LIST(SYMBOL_INDEX_DECLARATION)
437#undef SYMBOL_INDEX_DECLARATION
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000438
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000439#define SYMBOL_INDEX_DECLARATION(name, description) k##name##RootIndex,
440 PUBLIC_SYMBOL_LIST(SYMBOL_INDEX_DECLARATION)
441 WELL_KNOWN_SYMBOL_LIST(SYMBOL_INDEX_DECLARATION)
442#undef SYMBOL_INDEX_DECLARATION
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000443
444// Utility type maps
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000445#define DECLARE_STRUCT_MAP(NAME, Name, name) k##Name##MapRootIndex,
446 STRUCT_LIST(DECLARE_STRUCT_MAP)
447#undef DECLARE_STRUCT_MAP
448 kStringTableRootIndex,
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000449
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000450#define ROOT_INDEX_DECLARATION(type, name, camel_name) k##camel_name##RootIndex,
451 SMI_ROOT_LIST(ROOT_INDEX_DECLARATION)
452#undef ROOT_INDEX_DECLARATION
453 kRootListLength,
454 kStrongRootListLength = kStringTableRootIndex,
455 kSmiRootsStart = kStringTableRootIndex + 1
456 };
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000457
Ben Murdoch097c5b22016-05-18 11:27:45 +0100458 enum FindMementoMode { kForRuntime, kForGC };
459
460 enum HeapState { NOT_IN_GC, SCAVENGE, MARK_COMPACT };
461
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000462 // Indicates whether live bytes adjustment is triggered
463 // - from within the GC code before sweeping started (SEQUENTIAL_TO_SWEEPER),
464 // - or from within GC (CONCURRENT_TO_SWEEPER),
465 // - or mutator code (CONCURRENT_TO_SWEEPER).
466 enum InvocationMode { SEQUENTIAL_TO_SWEEPER, CONCURRENT_TO_SWEEPER };
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400467
Ben Murdoch097c5b22016-05-18 11:27:45 +0100468 enum UpdateAllocationSiteMode { kGlobal, kCached };
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000469
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000470 // Taking this lock prevents the GC from entering a phase that relocates
471 // object references.
472 class RelocationLock {
473 public:
474 explicit RelocationLock(Heap* heap) : heap_(heap) {
475 heap_->relocation_mutex_.Lock();
476 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000477
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000478 ~RelocationLock() { heap_->relocation_mutex_.Unlock(); }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000479
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000480 private:
481 Heap* heap_;
482 };
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000483
484 // Support for partial snapshots. After calling this we have a linear
485 // space to write objects in each space.
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400486 struct Chunk {
487 uint32_t size;
488 Address start;
489 Address end;
490 };
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400491 typedef List<Chunk> Reservation;
492
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000493 static const intptr_t kMinimumOldGenerationAllocationLimit =
494 8 * (Page::kPageSize > MB ? Page::kPageSize : MB);
495
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000496 static const int kInitalOldGenerationLimitFactor = 2;
497
498#if V8_OS_ANDROID
499 // Don't apply pointer multiplier on Android since it has no swap space and
500 // should instead adapt it's heap size based on available physical memory.
501 static const int kPointerMultiplier = 1;
502#else
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000503 static const int kPointerMultiplier = i::kPointerSize / 4;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000504#endif
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000505
506 // The new space size has to be a power of 2. Sizes are in MB.
507 static const int kMaxSemiSpaceSizeLowMemoryDevice = 1 * kPointerMultiplier;
508 static const int kMaxSemiSpaceSizeMediumMemoryDevice = 4 * kPointerMultiplier;
509 static const int kMaxSemiSpaceSizeHighMemoryDevice = 8 * kPointerMultiplier;
510 static const int kMaxSemiSpaceSizeHugeMemoryDevice = 8 * kPointerMultiplier;
511
512 // The old space size has to be a multiple of Page::kPageSize.
513 // Sizes are in MB.
514 static const int kMaxOldSpaceSizeLowMemoryDevice = 128 * kPointerMultiplier;
515 static const int kMaxOldSpaceSizeMediumMemoryDevice =
516 256 * kPointerMultiplier;
517 static const int kMaxOldSpaceSizeHighMemoryDevice = 512 * kPointerMultiplier;
518 static const int kMaxOldSpaceSizeHugeMemoryDevice = 700 * kPointerMultiplier;
519
520 // The executable size has to be a multiple of Page::kPageSize.
521 // Sizes are in MB.
522 static const int kMaxExecutableSizeLowMemoryDevice = 96 * kPointerMultiplier;
523 static const int kMaxExecutableSizeMediumMemoryDevice =
524 192 * kPointerMultiplier;
525 static const int kMaxExecutableSizeHighMemoryDevice =
526 256 * kPointerMultiplier;
527 static const int kMaxExecutableSizeHugeMemoryDevice =
528 256 * kPointerMultiplier;
529
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000530 static const int kTraceRingBufferSize = 512;
531 static const int kStacktraceBufferSize = 512;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000532
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000533 static const double kMinHeapGrowingFactor;
534 static const double kMaxHeapGrowingFactor;
535 static const double kMaxHeapGrowingFactorMemoryConstrained;
536 static const double kMaxHeapGrowingFactorIdle;
537 static const double kTargetMutatorUtilization;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000538
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000539 static const int kNoGCFlags = 0;
540 static const int kReduceMemoryFootprintMask = 1;
541 static const int kAbortIncrementalMarkingMask = 2;
542 static const int kFinalizeIncrementalMarkingMask = 4;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400543
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000544 // Making the heap iterable requires us to abort incremental marking.
545 static const int kMakeHeapIterableMask = kAbortIncrementalMarkingMask;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400546
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000547 // The roots that have an index less than this are always in old space.
548 static const int kOldSpaceRoots = 0x20;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000549
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000550 // The minimum size of a HeapObject on the heap.
551 static const int kMinObjectSizeInWords = 2;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400552
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000553 STATIC_ASSERT(kUndefinedValueRootIndex ==
554 Internals::kUndefinedValueRootIndex);
Ben Murdochda12d292016-06-02 14:46:10 +0100555 STATIC_ASSERT(kTheHoleValueRootIndex == Internals::kTheHoleValueRootIndex);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000556 STATIC_ASSERT(kNullValueRootIndex == Internals::kNullValueRootIndex);
557 STATIC_ASSERT(kTrueValueRootIndex == Internals::kTrueValueRootIndex);
558 STATIC_ASSERT(kFalseValueRootIndex == Internals::kFalseValueRootIndex);
559 STATIC_ASSERT(kempty_stringRootIndex == Internals::kEmptyStringRootIndex);
560
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000561 // Calculates the maximum amount of filler that could be required by the
562 // given alignment.
563 static int GetMaximumFillToAlign(AllocationAlignment alignment);
564 // Calculates the actual amount of filler required for a given address at the
565 // given alignment.
566 static int GetFillToAlign(Address address, AllocationAlignment alignment);
567
568 template <typename T>
569 static inline bool IsOneByte(T t, int chars);
570
571 static void FatalProcessOutOfMemory(const char* location,
Ben Murdochc5610432016-08-08 18:44:38 +0100572 bool is_heap_oom = false);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000573
574 static bool RootIsImmortalImmovable(int root_index);
575
576 // Checks whether the space is valid.
577 static bool IsValidAllocationSpace(AllocationSpace space);
578
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000579 // Generated code can embed direct references to non-writable roots if
580 // they are in new space.
581 static bool RootCanBeWrittenAfterInitialization(RootListIndex root_index);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000582
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000583 // Zapping is needed for verify heap, and always done in debug builds.
584 static inline bool ShouldZapGarbage() {
585#ifdef DEBUG
586 return true;
587#else
588#ifdef VERIFY_HEAP
589 return FLAG_verify_heap;
590#else
591 return false;
592#endif
593#endif
594 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000595
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000596 static double HeapGrowingFactor(double gc_speed, double mutator_speed);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000597
598 // Copy block of memory from src to dst. Size of block should be aligned
599 // by pointer size.
600 static inline void CopyBlock(Address dst, Address src, int byte_size);
601
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000602 // Determines a static visitor id based on the given {map} that can then be
603 // stored on the map to facilitate fast dispatch for {StaticVisitorBase}.
604 static int GetStaticVisitorIdForMap(Map* map);
605
606 // Notifies the heap that is ok to start marking or other activities that
607 // should not happen during deserialization.
608 void NotifyDeserializationComplete();
609
610 intptr_t old_generation_allocation_limit() const {
611 return old_generation_allocation_limit_;
612 }
613
614 bool always_allocate() { return always_allocate_scope_count_.Value() != 0; }
615
616 Address* NewSpaceAllocationTopAddress() {
617 return new_space_.allocation_top_address();
618 }
619 Address* NewSpaceAllocationLimitAddress() {
620 return new_space_.allocation_limit_address();
621 }
622
623 Address* OldSpaceAllocationTopAddress() {
624 return old_space_->allocation_top_address();
625 }
626 Address* OldSpaceAllocationLimitAddress() {
627 return old_space_->allocation_limit_address();
628 }
629
Ben Murdochc5610432016-08-08 18:44:38 +0100630 bool CanExpandOldGeneration(int size) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000631 if (force_oom_) return false;
Ben Murdochc5610432016-08-08 18:44:38 +0100632 return (OldGenerationCapacity() + size) < MaxOldGenerationSize();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000633 }
634
635 // Clear the Instanceof cache (used when a prototype changes).
636 inline void ClearInstanceofCache();
637
638 // FreeSpace objects have a null map after deserialization. Update the map.
639 void RepairFreeListsAfterDeserialization();
640
641 // Move len elements within a given array from src_index index to dst_index
642 // index.
643 void MoveElements(FixedArray* array, int dst_index, int src_index, int len);
644
645 // Initialize a filler object to keep the ability to iterate over the heap
Ben Murdochda12d292016-06-02 14:46:10 +0100646 // when introducing gaps within pages. If slots could have been recorded in
647 // the freed area, then pass ClearRecordedSlots::kYes as the mode. Otherwise,
648 // pass ClearRecordedSlots::kNo.
649 void CreateFillerObjectAt(Address addr, int size, ClearRecordedSlots mode);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000650
651 bool CanMoveObjectStart(HeapObject* object);
652
653 // Maintain consistency of live bytes during incremental marking.
654 void AdjustLiveBytes(HeapObject* object, int by, InvocationMode mode);
655
656 // Trim the given array from the left. Note that this relocates the object
657 // start and hence is only valid if there is only a single reference to it.
658 FixedArrayBase* LeftTrimFixedArray(FixedArrayBase* obj, int elements_to_trim);
659
660 // Trim the given array from the right.
661 template<Heap::InvocationMode mode>
662 void RightTrimFixedArray(FixedArrayBase* obj, int elements_to_trim);
663
664 // Converts the given boolean condition to JavaScript boolean value.
Ben Murdochda12d292016-06-02 14:46:10 +0100665 inline Oddball* ToBoolean(bool condition);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000666
667 // Check whether the heap is currently iterable.
668 bool IsHeapIterable();
669
670 // Notify the heap that a context has been disposed.
671 int NotifyContextDisposed(bool dependant_context);
672
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000673 void set_native_contexts_list(Object* object) {
674 native_contexts_list_ = object;
675 }
676 Object* native_contexts_list() const { return native_contexts_list_; }
677
678 void set_allocation_sites_list(Object* object) {
679 allocation_sites_list_ = object;
680 }
681 Object* allocation_sites_list() { return allocation_sites_list_; }
682
683 // Used in CreateAllocationSiteStub and the (de)serializer.
684 Object** allocation_sites_list_address() { return &allocation_sites_list_; }
685
686 void set_encountered_weak_collections(Object* weak_collection) {
687 encountered_weak_collections_ = weak_collection;
688 }
689 Object* encountered_weak_collections() const {
690 return encountered_weak_collections_;
691 }
692
693 void set_encountered_weak_cells(Object* weak_cell) {
694 encountered_weak_cells_ = weak_cell;
695 }
696 Object* encountered_weak_cells() const { return encountered_weak_cells_; }
697
698 void set_encountered_transition_arrays(Object* transition_array) {
699 encountered_transition_arrays_ = transition_array;
700 }
701 Object* encountered_transition_arrays() const {
702 return encountered_transition_arrays_;
703 }
704
705 // Number of mark-sweeps.
706 int ms_count() const { return ms_count_; }
707
708 // Checks whether the given object is allowed to be migrated from it's
709 // current space into the given destination space. Used for debugging.
710 inline bool AllowedToBeMigrated(HeapObject* object, AllocationSpace dest);
711
712 void CheckHandleCount();
713
714 // Number of "runtime allocations" done so far.
715 uint32_t allocations_count() { return allocations_count_; }
716
717 // Print short heap statistics.
718 void PrintShortHeapStatistics();
719
720 inline HeapState gc_state() { return gc_state_; }
721
722 inline bool IsInGCPostProcessing() { return gc_post_processing_depth_ > 0; }
723
724 // If an object has an AllocationMemento trailing it, return it, otherwise
725 // return NULL;
Ben Murdoch097c5b22016-05-18 11:27:45 +0100726 template <FindMementoMode mode>
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000727 inline AllocationMemento* FindAllocationMemento(HeapObject* object);
728
729 // Returns false if not able to reserve.
730 bool ReserveSpace(Reservation* reservations);
731
Ben Murdochc5610432016-08-08 18:44:38 +0100732 void SetEmbedderHeapTracer(EmbedderHeapTracer* tracer);
733
734 bool UsingEmbedderHeapTracer();
735
736 void TracePossibleWrapper(JSObject* js_object);
737
738 void RegisterExternallyReferencedObject(Object** object);
739
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000740 //
741 // Support for the API.
742 //
743
744 void CreateApiObjects();
745
746 // Implements the corresponding V8 API function.
747 bool IdleNotification(double deadline_in_seconds);
748 bool IdleNotification(int idle_time_in_ms);
749
Ben Murdochda12d292016-06-02 14:46:10 +0100750 void MemoryPressureNotification(MemoryPressureLevel level,
751 bool is_isolate_locked);
752 void CheckMemoryPressure();
753
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000754 double MonotonicallyIncreasingTimeInMs();
755
756 void RecordStats(HeapStats* stats, bool take_snapshot = false);
757
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000758 // Check new space expansion criteria and expand semispaces if it was hit.
759 void CheckNewSpaceExpansionCriteria();
760
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000761 inline bool HeapIsFullEnoughToStartIncrementalMarking(intptr_t limit) {
762 if (FLAG_stress_compaction && (gc_count_ & 1) != 0) return true;
763
764 intptr_t adjusted_allocation_limit = limit - new_space_.Capacity();
765
766 if (PromotedTotalSize() >= adjusted_allocation_limit) return true;
767
Ben Murdochda12d292016-06-02 14:46:10 +0100768 if (HighMemoryPressure()) return true;
769
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000770 return false;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000771 }
772
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000773 void VisitExternalResources(v8::ExternalResourceVisitor* visitor);
774
775 // An object should be promoted if the object has survived a
776 // scavenge operation.
777 inline bool ShouldBePromoted(Address old_address, int object_size);
778
779 void ClearNormalizedMapCaches();
780
781 void IncrementDeferredCount(v8::Isolate::UseCounterFeature feature);
782
783 inline bool OldGenerationAllocationLimitReached();
784
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000785 // Completely clear the Instanceof cache (to stop it keeping objects alive
786 // around a GC).
787 inline void CompletelyClearInstanceofCache();
788
789 inline uint32_t HashSeed();
790
791 inline int NextScriptId();
792
793 inline void SetArgumentsAdaptorDeoptPCOffset(int pc_offset);
794 inline void SetConstructStubDeoptPCOffset(int pc_offset);
795 inline void SetGetterStubDeoptPCOffset(int pc_offset);
796 inline void SetSetterStubDeoptPCOffset(int pc_offset);
Ben Murdochc5610432016-08-08 18:44:38 +0100797 inline void SetInterpreterEntryReturnPCOffset(int pc_offset);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000798
799 // For post mortem debugging.
800 void RememberUnmappedPage(Address page, bool compacted);
801
802 // Global inline caching age: it is incremented on some GCs after context
803 // disposal. We use it to flush inline caches.
804 int global_ic_age() { return global_ic_age_; }
805
806 void AgeInlineCaches() {
807 global_ic_age_ = (global_ic_age_ + 1) & SharedFunctionInfo::ICAgeBits::kMax;
808 }
809
810 int64_t amount_of_external_allocated_memory() {
811 return amount_of_external_allocated_memory_;
812 }
813
814 void update_amount_of_external_allocated_memory(int64_t delta) {
815 amount_of_external_allocated_memory_ += delta;
816 }
817
818 void DeoptMarkedAllocationSites();
819
820 bool DeoptMaybeTenuredAllocationSites() {
821 return new_space_.IsAtMaximumCapacity() && maximum_size_scavenges_ == 0;
822 }
823
824 void AddWeakObjectToCodeDependency(Handle<HeapObject> obj,
825 Handle<DependentCode> dep);
826
827 DependentCode* LookupWeakObjectToCodeDependency(Handle<HeapObject> obj);
828
829 void AddRetainedMap(Handle<Map> map);
830
831 // This event is triggered after successful allocation of a new object made
832 // by runtime. Allocations of target space for object evacuation do not
833 // trigger the event. In order to track ALL allocations one must turn off
834 // FLAG_inline_new and FLAG_use_allocation_folding.
835 inline void OnAllocationEvent(HeapObject* object, int size_in_bytes);
836
837 // This event is triggered after object is moved to a new place.
838 inline void OnMoveEvent(HeapObject* target, HeapObject* source,
839 int size_in_bytes);
840
841 bool deserialization_complete() const { return deserialization_complete_; }
842
843 bool HasLowAllocationRate();
844 bool HasHighFragmentation();
845 bool HasHighFragmentation(intptr_t used, intptr_t committed);
846
847 void SetOptimizeForLatency() { optimize_for_memory_usage_ = false; }
Ben Murdoch097c5b22016-05-18 11:27:45 +0100848 void SetOptimizeForMemoryUsage();
Ben Murdochda12d292016-06-02 14:46:10 +0100849 bool ShouldOptimizeForMemoryUsage() {
850 return optimize_for_memory_usage_ || HighMemoryPressure();
851 }
852 bool HighMemoryPressure() {
853 return memory_pressure_level_.Value() != MemoryPressureLevel::kNone;
854 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000855
856 // ===========================================================================
857 // Initialization. ===========================================================
858 // ===========================================================================
859
860 // Configure heap size in MB before setup. Return false if the heap has been
861 // set up already.
862 bool ConfigureHeap(int max_semi_space_size, int max_old_space_size,
863 int max_executable_size, size_t code_range_size);
864 bool ConfigureHeapDefault();
865
866 // Prepares the heap, setting up memory areas that are needed in the isolate
867 // without actually creating any objects.
868 bool SetUp();
869
870 // Bootstraps the object heap with the core set of objects required to run.
871 // Returns whether it succeeded.
872 bool CreateHeapObjects();
873
874 // Destroys all memory allocated by the heap.
875 void TearDown();
876
877 // Returns whether SetUp has been called.
878 bool HasBeenSetUp();
879
880 // ===========================================================================
881 // Getters for spaces. =======================================================
882 // ===========================================================================
883
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000884 Address NewSpaceTop() { return new_space_.top(); }
885
886 NewSpace* new_space() { return &new_space_; }
887 OldSpace* old_space() { return old_space_; }
888 OldSpace* code_space() { return code_space_; }
889 MapSpace* map_space() { return map_space_; }
890 LargeObjectSpace* lo_space() { return lo_space_; }
891
892 PagedSpace* paged_space(int idx) {
893 switch (idx) {
894 case OLD_SPACE:
895 return old_space();
896 case MAP_SPACE:
897 return map_space();
898 case CODE_SPACE:
899 return code_space();
900 case NEW_SPACE:
901 case LO_SPACE:
902 UNREACHABLE();
903 }
904 return NULL;
905 }
906
907 Space* space(int idx) {
908 switch (idx) {
909 case NEW_SPACE:
910 return new_space();
911 case LO_SPACE:
912 return lo_space();
913 default:
914 return paged_space(idx);
915 }
916 }
917
918 // Returns name of the space.
919 const char* GetSpaceName(int idx);
920
921 // ===========================================================================
922 // Getters to other components. ==============================================
923 // ===========================================================================
924
925 GCTracer* tracer() { return tracer_; }
926
Ben Murdochc5610432016-08-08 18:44:38 +0100927 MemoryAllocator* memory_allocator() { return memory_allocator_; }
Ben Murdochda12d292016-06-02 14:46:10 +0100928
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000929 PromotionQueue* promotion_queue() { return &promotion_queue_; }
930
931 inline Isolate* isolate();
932
933 MarkCompactCollector* mark_compact_collector() {
934 return mark_compact_collector_;
935 }
936
937 // ===========================================================================
938 // Root set access. ==========================================================
939 // ===========================================================================
940
941 // Heap root getters.
942#define ROOT_ACCESSOR(type, name, camel_name) inline type* name();
943 ROOT_LIST(ROOT_ACCESSOR)
944#undef ROOT_ACCESSOR
945
946 // Utility type maps.
947#define STRUCT_MAP_ACCESSOR(NAME, Name, name) inline Map* name##_map();
948 STRUCT_LIST(STRUCT_MAP_ACCESSOR)
949#undef STRUCT_MAP_ACCESSOR
950
951#define STRING_ACCESSOR(name, str) inline String* name();
952 INTERNALIZED_STRING_LIST(STRING_ACCESSOR)
953#undef STRING_ACCESSOR
954
955#define SYMBOL_ACCESSOR(name) inline Symbol* name();
956 PRIVATE_SYMBOL_LIST(SYMBOL_ACCESSOR)
957#undef SYMBOL_ACCESSOR
958
959#define SYMBOL_ACCESSOR(name, description) inline Symbol* name();
960 PUBLIC_SYMBOL_LIST(SYMBOL_ACCESSOR)
961 WELL_KNOWN_SYMBOL_LIST(SYMBOL_ACCESSOR)
962#undef SYMBOL_ACCESSOR
963
964 Object* root(RootListIndex index) { return roots_[index]; }
965 Handle<Object> root_handle(RootListIndex index) {
966 return Handle<Object>(&roots_[index]);
967 }
968
969 // Generated code can embed this address to get access to the roots.
970 Object** roots_array_start() { return roots_; }
971
972 // Sets the stub_cache_ (only used when expanding the dictionary).
973 void SetRootCodeStubs(UnseededNumberDictionary* value) {
974 roots_[kCodeStubsRootIndex] = value;
975 }
976
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000977 void SetRootMaterializedObjects(FixedArray* objects) {
978 roots_[kMaterializedObjectsRootIndex] = objects;
979 }
980
981 void SetRootScriptList(Object* value) {
982 roots_[kScriptListRootIndex] = value;
983 }
984
985 void SetRootStringTable(StringTable* value) {
986 roots_[kStringTableRootIndex] = value;
987 }
988
989 void SetRootNoScriptSharedFunctionInfos(Object* value) {
990 roots_[kNoScriptSharedFunctionInfosRootIndex] = value;
991 }
992
993 // Set the stack limit in the roots_ array. Some architectures generate
994 // code that looks here, because it is faster than loading from the static
995 // jslimit_/real_jslimit_ variable in the StackGuard.
996 void SetStackLimits();
997
Ben Murdochda12d292016-06-02 14:46:10 +0100998 // The stack limit is thread-dependent. To be able to reproduce the same
999 // snapshot blob, we need to reset it before serializing.
1000 void ClearStackLimits();
1001
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001002 // Generated code can treat direct references to this root as constant.
1003 bool RootCanBeTreatedAsConstant(RootListIndex root_index);
1004
1005 Map* MapForFixedTypedArray(ExternalArrayType array_type);
1006 RootListIndex RootIndexForFixedTypedArray(ExternalArrayType array_type);
1007
1008 RootListIndex RootIndexForEmptyFixedTypedArray(ElementsKind kind);
1009 FixedTypedArrayBase* EmptyFixedTypedArrayForMap(Map* map);
1010
1011 void RegisterStrongRoots(Object** start, Object** end);
1012 void UnregisterStrongRoots(Object** start);
1013
1014 // ===========================================================================
1015 // Inline allocation. ========================================================
1016 // ===========================================================================
1017
1018 // Indicates whether inline bump-pointer allocation has been disabled.
1019 bool inline_allocation_disabled() { return inline_allocation_disabled_; }
1020
1021 // Switch whether inline bump-pointer allocation should be used.
1022 void EnableInlineAllocation();
1023 void DisableInlineAllocation();
1024
1025 // ===========================================================================
1026 // Methods triggering GCs. ===================================================
1027 // ===========================================================================
1028
1029 // Performs garbage collection operation.
1030 // Returns whether there is a chance that another major GC could
1031 // collect more garbage.
1032 inline bool CollectGarbage(
1033 AllocationSpace space, const char* gc_reason = NULL,
1034 const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
1035
1036 // Performs a full garbage collection. If (flags & kMakeHeapIterableMask) is
1037 // non-zero, then the slower precise sweeper is used, which leaves the heap
1038 // in a state where we can iterate over the heap visiting all objects.
1039 void CollectAllGarbage(
1040 int flags = kFinalizeIncrementalMarkingMask, const char* gc_reason = NULL,
1041 const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
1042
1043 // Last hope GC, should try to squeeze as much as possible.
1044 void CollectAllAvailableGarbage(const char* gc_reason = NULL);
1045
1046 // Reports and external memory pressure event, either performs a major GC or
1047 // completes incremental marking in order to free external resources.
1048 void ReportExternalMemoryPressure(const char* gc_reason = NULL);
1049
1050 // Invoked when GC was requested via the stack guard.
1051 void HandleGCRequest();
1052
1053 // ===========================================================================
1054 // Iterators. ================================================================
1055 // ===========================================================================
1056
1057 // Iterates over all roots in the heap.
1058 void IterateRoots(ObjectVisitor* v, VisitMode mode);
1059 // Iterates over all strong roots in the heap.
1060 void IterateStrongRoots(ObjectVisitor* v, VisitMode mode);
1061 // Iterates over entries in the smi roots list. Only interesting to the
1062 // serializer/deserializer, since GC does not care about smis.
1063 void IterateSmiRoots(ObjectVisitor* v);
1064 // Iterates over all the other roots in the heap.
1065 void IterateWeakRoots(ObjectVisitor* v, VisitMode mode);
1066
Ben Murdochda12d292016-06-02 14:46:10 +01001067 // Iterate pointers of promoted objects.
1068 void IteratePromotedObject(HeapObject* target, int size,
1069 bool was_marked_black,
1070 ObjectSlotCallback callback);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001071
Ben Murdochda12d292016-06-02 14:46:10 +01001072 void IteratePromotedObjectPointers(HeapObject* object, Address start,
1073 Address end, bool record_slots,
1074 ObjectSlotCallback callback);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001075
1076 // ===========================================================================
1077 // Store buffer API. =========================================================
1078 // ===========================================================================
1079
Ben Murdoch097c5b22016-05-18 11:27:45 +01001080 // Write barrier support for object[offset] = o;
1081 inline void RecordWrite(Object* object, int offset, Object* o);
Ben Murdochc5610432016-08-08 18:44:38 +01001082 inline void RecordFixedArrayElements(FixedArray* array, int offset,
1083 int length);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001084
Ben Murdochda12d292016-06-02 14:46:10 +01001085 Address* store_buffer_top_address() { return store_buffer()->top_address(); }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001086
Ben Murdoch097c5b22016-05-18 11:27:45 +01001087 void ClearRecordedSlot(HeapObject* object, Object** slot);
Ben Murdochda12d292016-06-02 14:46:10 +01001088 void ClearRecordedSlotRange(Address start, Address end);
Ben Murdoch097c5b22016-05-18 11:27:45 +01001089
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001090 // ===========================================================================
1091 // Incremental marking API. ==================================================
1092 // ===========================================================================
1093
1094 // Start incremental marking and ensure that idle time handler can perform
1095 // incremental steps.
1096 void StartIdleIncrementalMarking();
1097
1098 // Starts incremental marking assuming incremental marking is currently
1099 // stopped.
1100 void StartIncrementalMarking(int gc_flags = kNoGCFlags,
1101 const GCCallbackFlags gc_callback_flags =
1102 GCCallbackFlags::kNoGCCallbackFlags,
1103 const char* reason = nullptr);
1104
1105 void FinalizeIncrementalMarkingIfComplete(const char* comment);
1106
1107 bool TryFinalizeIdleIncrementalMarking(double idle_time_in_ms);
1108
Ben Murdochda12d292016-06-02 14:46:10 +01001109 void RegisterReservationsForBlackAllocation(Reservation* reservations);
1110
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001111 IncrementalMarking* incremental_marking() { return incremental_marking_; }
1112
1113 // ===========================================================================
1114 // External string table API. ================================================
1115 // ===========================================================================
1116
1117 // Registers an external string.
1118 inline void RegisterExternalString(String* string);
1119
1120 // Finalizes an external string by deleting the associated external
1121 // data and clearing the resource pointer.
1122 inline void FinalizeExternalString(String* string);
1123
1124 // ===========================================================================
1125 // Methods checking/returning the space of a given object/address. ===========
1126 // ===========================================================================
1127
1128 // Returns whether the object resides in new space.
1129 inline bool InNewSpace(Object* object);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001130 inline bool InFromSpace(Object* object);
1131 inline bool InToSpace(Object* object);
1132
1133 // Returns whether the object resides in old space.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001134 inline bool InOldSpace(Object* object);
1135
1136 // Checks whether an address/object in the heap (including auxiliary
1137 // area and unused area).
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001138 bool Contains(HeapObject* value);
1139
1140 // Checks whether an address/object in a space.
1141 // Currently used by tests, serialization and heap verification only.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001142 bool InSpace(HeapObject* value, AllocationSpace space);
1143
Ben Murdoch097c5b22016-05-18 11:27:45 +01001144 // Slow methods that can be used for verification as they can also be used
1145 // with off-heap Addresses.
1146 bool ContainsSlow(Address addr);
1147 bool InSpaceSlow(Address addr, AllocationSpace space);
1148 inline bool InNewSpaceSlow(Address address);
1149 inline bool InOldSpaceSlow(Address address);
1150
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001151 // ===========================================================================
1152 // Object statistics tracking. ===============================================
1153 // ===========================================================================
1154
1155 // Returns the number of buckets used by object statistics tracking during a
1156 // major GC. Note that the following methods fail gracefully when the bounds
1157 // are exceeded though.
1158 size_t NumberOfTrackedHeapObjectTypes();
1159
1160 // Returns object statistics about count and size at the last major GC.
1161 // Objects are being grouped into buckets that roughly resemble existing
1162 // instance types.
1163 size_t ObjectCountAtLastGC(size_t index);
1164 size_t ObjectSizeAtLastGC(size_t index);
1165
1166 // Retrieves names of buckets used by object statistics tracking.
1167 bool GetObjectTypeName(size_t index, const char** object_type,
1168 const char** object_sub_type);
1169
1170 // ===========================================================================
1171 // GC statistics. ============================================================
1172 // ===========================================================================
1173
Ben Murdochda12d292016-06-02 14:46:10 +01001174 // Returns the maximum amount of memory reserved for the heap.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001175 intptr_t MaxReserved() {
Ben Murdochda12d292016-06-02 14:46:10 +01001176 return 2 * max_semi_space_size_ + max_old_generation_size_;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001177 }
1178 int MaxSemiSpaceSize() { return max_semi_space_size_; }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001179 int InitialSemiSpaceSize() { return initial_semispace_size_; }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001180 intptr_t MaxOldGenerationSize() { return max_old_generation_size_; }
1181 intptr_t MaxExecutableSize() { return max_executable_size_; }
1182
1183 // Returns the capacity of the heap in bytes w/o growing. Heap grows when
1184 // more spaces are needed until it reaches the limit.
1185 intptr_t Capacity();
1186
Ben Murdochc5610432016-08-08 18:44:38 +01001187 // Returns the capacity of the old generation.
1188 intptr_t OldGenerationCapacity();
1189
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001190 // Returns the amount of memory currently committed for the heap.
1191 intptr_t CommittedMemory();
1192
1193 // Returns the amount of memory currently committed for the old space.
1194 intptr_t CommittedOldGenerationMemory();
1195
1196 // Returns the amount of executable memory currently committed for the heap.
1197 intptr_t CommittedMemoryExecutable();
1198
1199 // Returns the amount of phyical memory currently committed for the heap.
1200 size_t CommittedPhysicalMemory();
1201
1202 // Returns the maximum amount of memory ever committed for the heap.
1203 intptr_t MaximumCommittedMemory() { return maximum_committed_; }
1204
1205 // Updates the maximum committed memory for the heap. Should be called
1206 // whenever a space grows.
1207 void UpdateMaximumCommitted();
1208
1209 // Returns the available bytes in space w/o growing.
1210 // Heap doesn't guarantee that it can allocate an object that requires
1211 // all available bytes. Check MaxHeapObjectSize() instead.
1212 intptr_t Available();
1213
1214 // Returns of size of all objects residing in the heap.
1215 intptr_t SizeOfObjects();
1216
1217 void UpdateSurvivalStatistics(int start_new_space_size);
1218
Ben Murdoch097c5b22016-05-18 11:27:45 +01001219 inline void IncrementPromotedObjectsSize(intptr_t object_size) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001220 DCHECK_GE(object_size, 0);
1221 promoted_objects_size_ += object_size;
1222 }
1223 inline intptr_t promoted_objects_size() { return promoted_objects_size_; }
1224
Ben Murdoch097c5b22016-05-18 11:27:45 +01001225 inline void IncrementSemiSpaceCopiedObjectSize(intptr_t object_size) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001226 DCHECK_GE(object_size, 0);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001227 semi_space_copied_object_size_ += object_size;
1228 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001229 inline intptr_t semi_space_copied_object_size() {
1230 return semi_space_copied_object_size_;
1231 }
1232
1233 inline intptr_t SurvivedNewSpaceObjectSize() {
1234 return promoted_objects_size_ + semi_space_copied_object_size_;
1235 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001236
1237 inline void IncrementNodesDiedInNewSpace() { nodes_died_in_new_space_++; }
1238
1239 inline void IncrementNodesCopiedInNewSpace() { nodes_copied_in_new_space_++; }
1240
1241 inline void IncrementNodesPromoted() { nodes_promoted_++; }
1242
Ben Murdoch097c5b22016-05-18 11:27:45 +01001243 inline void IncrementYoungSurvivorsCounter(intptr_t survived) {
1244 DCHECK_GE(survived, 0);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001245 survived_last_scavenge_ = survived;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001246 survived_since_last_expansion_ += survived;
1247 }
1248
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001249 inline intptr_t PromotedTotalSize() {
1250 int64_t total = PromotedSpaceSizeOfObjects() + PromotedExternalMemorySize();
1251 if (total > std::numeric_limits<intptr_t>::max()) {
1252 // TODO(erikcorry): Use uintptr_t everywhere we do heap size calculations.
1253 return std::numeric_limits<intptr_t>::max();
1254 }
1255 if (total < 0) return 0;
1256 return static_cast<intptr_t>(total);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001257 }
1258
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001259 void UpdateNewSpaceAllocationCounter() {
1260 new_space_allocation_counter_ = NewSpaceAllocationCounter();
1261 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001262
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001263 size_t NewSpaceAllocationCounter() {
1264 return new_space_allocation_counter_ + new_space()->AllocatedSinceLastGC();
1265 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001266
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001267 // This should be used only for testing.
1268 void set_new_space_allocation_counter(size_t new_value) {
1269 new_space_allocation_counter_ = new_value;
1270 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001271
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001272 void UpdateOldGenerationAllocationCounter() {
1273 old_generation_allocation_counter_ = OldGenerationAllocationCounter();
1274 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001275
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001276 size_t OldGenerationAllocationCounter() {
1277 return old_generation_allocation_counter_ + PromotedSinceLastGC();
1278 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001279
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001280 // This should be used only for testing.
1281 void set_old_generation_allocation_counter(size_t new_value) {
1282 old_generation_allocation_counter_ = new_value;
1283 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001284
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001285 size_t PromotedSinceLastGC() {
1286 return PromotedSpaceSizeOfObjects() - old_generation_size_at_last_gc_;
1287 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001288
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001289 int gc_count() const { return gc_count_; }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001290
1291 // Returns the size of objects residing in non new spaces.
1292 intptr_t PromotedSpaceSizeOfObjects();
1293
1294 double total_regexp_code_generated() { return total_regexp_code_generated_; }
1295 void IncreaseTotalRegexpCodeGenerated(int size) {
1296 total_regexp_code_generated_ += size;
1297 }
1298
1299 void IncrementCodeGeneratedBytes(bool is_crankshafted, int size) {
1300 if (is_crankshafted) {
1301 crankshaft_codegen_bytes_generated_ += size;
1302 } else {
1303 full_codegen_bytes_generated_ += size;
1304 }
1305 }
1306
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001307 // ===========================================================================
1308 // Prologue/epilogue callback methods.========================================
1309 // ===========================================================================
1310
1311 void AddGCPrologueCallback(v8::Isolate::GCCallback callback,
1312 GCType gc_type_filter, bool pass_isolate = true);
1313 void RemoveGCPrologueCallback(v8::Isolate::GCCallback callback);
1314
1315 void AddGCEpilogueCallback(v8::Isolate::GCCallback callback,
1316 GCType gc_type_filter, bool pass_isolate = true);
1317 void RemoveGCEpilogueCallback(v8::Isolate::GCCallback callback);
1318
1319 void CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags);
1320 void CallGCEpilogueCallbacks(GCType gc_type, GCCallbackFlags flags);
1321
1322 // ===========================================================================
1323 // Allocation methods. =======================================================
1324 // ===========================================================================
1325
1326 // Creates a filler object and returns a heap object immediately after it.
1327 MUST_USE_RESULT HeapObject* PrecedeWithFiller(HeapObject* object,
1328 int filler_size);
1329
1330 // Creates a filler object if needed for alignment and returns a heap object
1331 // immediately after it. If any space is left after the returned object,
1332 // another filler object is created so the over allocated memory is iterable.
1333 MUST_USE_RESULT HeapObject* AlignWithFiller(HeapObject* object,
1334 int object_size,
1335 int allocation_size,
1336 AllocationAlignment alignment);
1337
1338 // ===========================================================================
1339 // ArrayBuffer tracking. =====================================================
1340 // ===========================================================================
1341
1342 void RegisterNewArrayBuffer(JSArrayBuffer* buffer);
1343 void UnregisterArrayBuffer(JSArrayBuffer* buffer);
1344
1345 inline ArrayBufferTracker* array_buffer_tracker() {
1346 return array_buffer_tracker_;
1347 }
1348
1349 // ===========================================================================
1350 // Allocation site tracking. =================================================
1351 // ===========================================================================
1352
1353 // Updates the AllocationSite of a given {object}. If the global prenuring
1354 // storage is passed as {pretenuring_feedback} the memento found count on
1355 // the corresponding allocation site is immediately updated and an entry
1356 // in the hash map is created. Otherwise the entry (including a the count
1357 // value) is cached on the local pretenuring feedback.
Ben Murdoch097c5b22016-05-18 11:27:45 +01001358 template <UpdateAllocationSiteMode mode>
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001359 inline void UpdateAllocationSite(HeapObject* object,
1360 HashMap* pretenuring_feedback);
1361
1362 // Removes an entry from the global pretenuring storage.
1363 inline void RemoveAllocationSitePretenuringFeedback(AllocationSite* site);
1364
1365 // Merges local pretenuring feedback into the global one. Note that this
1366 // method needs to be called after evacuation, as allocation sites may be
1367 // evacuated and this method resolves forward pointers accordingly.
1368 void MergeAllocationSitePretenuringFeedback(
1369 const HashMap& local_pretenuring_feedback);
1370
1371// =============================================================================
1372
1373#ifdef VERIFY_HEAP
1374 // Verify the heap is in its normal state before or after a GC.
1375 void Verify();
1376#endif
1377
1378#ifdef DEBUG
1379 void set_allocation_timeout(int timeout) { allocation_timeout_ = timeout; }
1380
1381 void TracePathToObjectFrom(Object* target, Object* root);
1382 void TracePathToObject(Object* target);
1383 void TracePathToGlobal();
1384
1385 void Print();
1386 void PrintHandles();
1387
1388 // Report heap statistics.
1389 void ReportHeapStatistics(const char* title);
1390 void ReportCodeStatistics(const char* title);
1391#endif
1392
1393 private:
1394 class PretenuringScope;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001395
1396 // External strings table is a place where all external strings are
1397 // registered. We need to keep track of such strings to properly
1398 // finalize them.
1399 class ExternalStringTable {
1400 public:
1401 // Registers an external string.
1402 inline void AddString(String* string);
1403
1404 inline void Iterate(ObjectVisitor* v);
1405
1406 // Restores internal invariant and gets rid of collected strings.
1407 // Must be called after each Iterate() that modified the strings.
1408 void CleanUp();
1409
1410 // Destroys all allocated memory.
1411 void TearDown();
1412
1413 private:
1414 explicit ExternalStringTable(Heap* heap) : heap_(heap) {}
1415
1416 inline void Verify();
1417
1418 inline void AddOldString(String* string);
1419
1420 // Notifies the table that only a prefix of the new list is valid.
1421 inline void ShrinkNewStrings(int position);
1422
1423 // To speed up scavenge collections new space string are kept
1424 // separate from old space strings.
1425 List<Object*> new_space_strings_;
1426 List<Object*> old_space_strings_;
1427
1428 Heap* heap_;
1429
1430 friend class Heap;
1431
1432 DISALLOW_COPY_AND_ASSIGN(ExternalStringTable);
1433 };
1434
1435 struct StrongRootsList;
1436
1437 struct StringTypeTable {
1438 InstanceType type;
1439 int size;
1440 RootListIndex index;
1441 };
1442
1443 struct ConstantStringTable {
1444 const char* contents;
1445 RootListIndex index;
1446 };
1447
1448 struct StructTable {
1449 InstanceType type;
1450 int size;
1451 RootListIndex index;
1452 };
1453
1454 struct GCCallbackPair {
1455 GCCallbackPair(v8::Isolate::GCCallback callback, GCType gc_type,
1456 bool pass_isolate)
1457 : callback(callback), gc_type(gc_type), pass_isolate(pass_isolate) {}
1458
1459 bool operator==(const GCCallbackPair& other) const {
1460 return other.callback == callback;
1461 }
1462
1463 v8::Isolate::GCCallback callback;
1464 GCType gc_type;
1465 bool pass_isolate;
1466 };
1467
1468 typedef String* (*ExternalStringTableUpdaterCallback)(Heap* heap,
1469 Object** pointer);
1470
1471 static const int kInitialStringTableSize = 2048;
1472 static const int kInitialEvalCacheSize = 64;
1473 static const int kInitialNumberStringCacheSize = 256;
1474
1475 static const int kRememberedUnmappedPages = 128;
1476
1477 static const StringTypeTable string_type_table[];
1478 static const ConstantStringTable constant_string_table[];
1479 static const StructTable struct_table[];
1480
1481 static const int kYoungSurvivalRateHighThreshold = 90;
1482 static const int kYoungSurvivalRateAllowedDeviation = 15;
1483 static const int kOldSurvivalRateLowThreshold = 10;
1484
1485 static const int kMaxMarkCompactsInIdleRound = 7;
1486 static const int kIdleScavengeThreshold = 5;
1487
1488 static const int kInitialFeedbackCapacity = 256;
1489
1490 Heap();
1491
1492 static String* UpdateNewSpaceReferenceInExternalStringTableEntry(
1493 Heap* heap, Object** pointer);
1494
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001495 // Selects the proper allocation space based on the pretenuring decision.
1496 static AllocationSpace SelectSpace(PretenureFlag pretenure) {
1497 return (pretenure == TENURED) ? OLD_SPACE : NEW_SPACE;
1498 }
1499
1500#define ROOT_ACCESSOR(type, name, camel_name) \
1501 inline void set_##name(type* value);
1502 ROOT_LIST(ROOT_ACCESSOR)
1503#undef ROOT_ACCESSOR
1504
1505 StoreBuffer* store_buffer() { return &store_buffer_; }
1506
1507 void set_current_gc_flags(int flags) {
1508 current_gc_flags_ = flags;
1509 DCHECK(!ShouldFinalizeIncrementalMarking() ||
1510 !ShouldAbortIncrementalMarking());
1511 }
1512
1513 inline bool ShouldReduceMemory() const {
1514 return current_gc_flags_ & kReduceMemoryFootprintMask;
1515 }
1516
1517 inline bool ShouldAbortIncrementalMarking() const {
1518 return current_gc_flags_ & kAbortIncrementalMarkingMask;
1519 }
1520
1521 inline bool ShouldFinalizeIncrementalMarking() const {
1522 return current_gc_flags_ & kFinalizeIncrementalMarkingMask;
1523 }
1524
1525 void PreprocessStackTraces();
1526
1527 // Checks whether a global GC is necessary
1528 GarbageCollector SelectGarbageCollector(AllocationSpace space,
1529 const char** reason);
1530
1531 // Make sure there is a filler value behind the top of the new space
1532 // so that the GC does not confuse some unintialized/stale memory
1533 // with the allocation memento of the object at the top
1534 void EnsureFillerObjectAtTop();
1535
1536 // Ensure that we have swept all spaces in such a way that we can iterate
1537 // over all objects. May cause a GC.
1538 void MakeHeapIterable();
1539
1540 // Performs garbage collection operation.
1541 // Returns whether there is a chance that another major GC could
1542 // collect more garbage.
1543 bool CollectGarbage(
1544 GarbageCollector collector, const char* gc_reason,
1545 const char* collector_reason,
1546 const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
1547
1548 // Performs garbage collection
1549 // Returns whether there is a chance another major GC could
1550 // collect more garbage.
1551 bool PerformGarbageCollection(
1552 GarbageCollector collector,
1553 const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
1554
1555 inline void UpdateOldSpaceLimits();
1556
1557 // Initializes a JSObject based on its map.
1558 void InitializeJSObjectFromMap(JSObject* obj, FixedArray* properties,
1559 Map* map);
1560
1561 // Initializes JSObject body starting at given offset.
1562 void InitializeJSObjectBody(JSObject* obj, Map* map, int start_offset);
1563
1564 void InitializeAllocationMemento(AllocationMemento* memento,
1565 AllocationSite* allocation_site);
1566
1567 bool CreateInitialMaps();
1568 void CreateInitialObjects();
1569
1570 // These five Create*EntryStub functions are here and forced to not be inlined
1571 // because of a gcc-4.4 bug that assigns wrong vtable entries.
1572 NO_INLINE(void CreateJSEntryStub());
1573 NO_INLINE(void CreateJSConstructEntryStub());
1574
1575 void CreateFixedStubs();
1576
1577 HeapObject* DoubleAlignForDeserialization(HeapObject* object, int size);
1578
1579 // Commits from space if it is uncommitted.
1580 void EnsureFromSpaceIsCommitted();
1581
1582 // Uncommit unused semi space.
1583 bool UncommitFromSpace() { return new_space_.UncommitFromSpace(); }
1584
1585 // Fill in bogus values in from space
1586 void ZapFromSpace();
1587
1588 // Deopts all code that contains allocation instruction which are tenured or
1589 // not tenured. Moreover it clears the pretenuring allocation site statistics.
1590 void ResetAllAllocationSitesDependentCode(PretenureFlag flag);
1591
1592 // Evaluates local pretenuring for the old space and calls
1593 // ResetAllTenuredAllocationSitesDependentCode if too many objects died in
1594 // the old space.
1595 void EvaluateOldSpaceLocalPretenuring(uint64_t size_of_objects_before_gc);
1596
1597 // Record statistics before and after garbage collection.
1598 void ReportStatisticsBeforeGC();
1599 void ReportStatisticsAfterGC();
1600
1601 // Creates and installs the full-sized number string cache.
1602 int FullSizeNumberStringCacheLength();
1603 // Flush the number to string cache.
1604 void FlushNumberStringCache();
1605
1606 // TODO(hpayer): Allocation site pretenuring may make this method obsolete.
1607 // Re-visit incremental marking heuristics.
1608 bool IsHighSurvivalRate() { return high_survival_rate_period_length_ > 0; }
1609
1610 void ConfigureInitialOldGenerationSize();
1611
1612 bool HasLowYoungGenerationAllocationRate();
1613 bool HasLowOldGenerationAllocationRate();
1614 double YoungGenerationMutatorUtilization();
1615 double OldGenerationMutatorUtilization();
1616
1617 void ReduceNewSpaceSize();
1618
1619 bool TryFinalizeIdleIncrementalMarking(
1620 double idle_time_in_ms, size_t size_of_objects,
1621 size_t mark_compact_speed_in_bytes_per_ms);
1622
1623 GCIdleTimeHeapState ComputeHeapState();
1624
1625 bool PerformIdleTimeAction(GCIdleTimeAction action,
1626 GCIdleTimeHeapState heap_state,
1627 double deadline_in_ms);
1628
1629 void IdleNotificationEpilogue(GCIdleTimeAction action,
1630 GCIdleTimeHeapState heap_state, double start_ms,
1631 double deadline_in_ms);
1632
1633 inline void UpdateAllocationsHash(HeapObject* object);
1634 inline void UpdateAllocationsHash(uint32_t value);
1635 void PrintAlloctionsHash();
1636
1637 void AddToRingBuffer(const char* string);
1638 void GetFromRingBuffer(char* buffer);
1639
1640 void CompactRetainedMaps(ArrayList* retained_maps);
1641
Ben Murdochda12d292016-06-02 14:46:10 +01001642 void CollectGarbageOnMemoryPressure(const char* source);
1643
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001644 // Attempt to over-approximate the weak closure by marking object groups and
1645 // implicit references from global handles, but don't atomically complete
1646 // marking. If we continue to mark incrementally, we might have marked
1647 // objects that die later.
1648 void FinalizeIncrementalMarking(const char* gc_reason);
1649
1650 // Returns the timer used for a given GC type.
1651 // - GCScavenger: young generation GC
1652 // - GCCompactor: full GC
1653 // - GCFinalzeMC: finalization of incremental full GC
1654 // - GCFinalizeMCReduceMemory: finalization of incremental full GC with
1655 // memory reduction
1656 HistogramTimer* GCTypeTimer(GarbageCollector collector);
1657
1658 // ===========================================================================
1659 // Pretenuring. ==============================================================
1660 // ===========================================================================
1661
1662 // Pretenuring decisions are made based on feedback collected during new space
1663 // evacuation. Note that between feedback collection and calling this method
1664 // object in old space must not move.
1665 void ProcessPretenuringFeedback();
1666
1667 // ===========================================================================
1668 // Actual GC. ================================================================
1669 // ===========================================================================
1670
1671 // Code that should be run before and after each GC. Includes some
1672 // reporting/verification activities when compiled with DEBUG set.
1673 void GarbageCollectionPrologue();
1674 void GarbageCollectionEpilogue();
1675
1676 // Performs a major collection in the whole heap.
1677 void MarkCompact();
1678
1679 // Code to be run before and after mark-compact.
1680 void MarkCompactPrologue();
1681 void MarkCompactEpilogue();
1682
1683 // Performs a minor collection in new generation.
1684 void Scavenge();
1685
1686 Address DoScavenge(ObjectVisitor* scavenge_visitor, Address new_space_front);
1687
1688 void UpdateNewSpaceReferencesInExternalStringTable(
1689 ExternalStringTableUpdaterCallback updater_func);
1690
1691 void UpdateReferencesInExternalStringTable(
1692 ExternalStringTableUpdaterCallback updater_func);
1693
1694 void ProcessAllWeakReferences(WeakObjectRetainer* retainer);
1695 void ProcessYoungWeakReferences(WeakObjectRetainer* retainer);
1696 void ProcessNativeContexts(WeakObjectRetainer* retainer);
1697 void ProcessAllocationSites(WeakObjectRetainer* retainer);
Ben Murdochda12d292016-06-02 14:46:10 +01001698 void ProcessWeakListRoots(WeakObjectRetainer* retainer);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001699
1700 // ===========================================================================
1701 // GC statistics. ============================================================
1702 // ===========================================================================
1703
1704 inline intptr_t OldGenerationSpaceAvailable() {
1705 return old_generation_allocation_limit_ - PromotedTotalSize();
1706 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001707
1708 // Returns maximum GC pause.
1709 double get_max_gc_pause() { return max_gc_pause_; }
1710
1711 // Returns maximum size of objects alive after GC.
1712 intptr_t get_max_alive_after_gc() { return max_alive_after_gc_; }
1713
1714 // Returns minimal interval between two subsequent collections.
1715 double get_min_in_mutator() { return min_in_mutator_; }
1716
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001717 // Update GC statistics that are tracked on the Heap.
1718 void UpdateCumulativeGCStatistics(double duration, double spent_in_mutator,
1719 double marking_time);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001720
1721 bool MaximumSizeScavenge() { return maximum_size_scavenges_ > 0; }
1722
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001723 // ===========================================================================
1724 // Growing strategy. =========================================================
1725 // ===========================================================================
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001726
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001727 // Decrease the allocation limit if the new limit based on the given
1728 // parameters is lower than the current limit.
1729 void DampenOldGenerationAllocationLimit(intptr_t old_gen_size,
1730 double gc_speed,
1731 double mutator_speed);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001732
1733
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001734 // Calculates the allocation limit based on a given growing factor and a
1735 // given old generation size.
1736 intptr_t CalculateOldGenerationAllocationLimit(double factor,
1737 intptr_t old_gen_size);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001738
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001739 // Sets the allocation limit to trigger the next full garbage collection.
1740 void SetOldGenerationAllocationLimit(intptr_t old_gen_size, double gc_speed,
1741 double mutator_speed);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001742
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001743 // ===========================================================================
1744 // Idle notification. ========================================================
1745 // ===========================================================================
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001746
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001747 bool RecentIdleNotificationHappened();
1748 void ScheduleIdleScavengeIfNeeded(int bytes_allocated);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001749
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001750 // ===========================================================================
1751 // HeapIterator helpers. =====================================================
1752 // ===========================================================================
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001753
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001754 void heap_iterator_start() { heap_iterator_depth_++; }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001755
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001756 void heap_iterator_end() { heap_iterator_depth_--; }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001757
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001758 bool in_heap_iterator() { return heap_iterator_depth_ > 0; }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001759
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001760 // ===========================================================================
1761 // Allocation methods. =======================================================
1762 // ===========================================================================
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001763
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001764 // Returns a deep copy of the JavaScript object.
1765 // Properties and elements are copied too.
1766 // Optionally takes an AllocationSite to be appended in an AllocationMemento.
1767 MUST_USE_RESULT AllocationResult CopyJSObject(JSObject* source,
1768 AllocationSite* site = NULL);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001769
1770 // Allocates a JS Map in the heap.
1771 MUST_USE_RESULT AllocationResult
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001772 AllocateMap(InstanceType instance_type, int instance_size,
1773 ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001774
1775 // Allocates and initializes a new JavaScript object based on a
1776 // constructor.
1777 // If allocation_site is non-null, then a memento is emitted after the object
1778 // that points to the site.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001779 MUST_USE_RESULT AllocationResult AllocateJSObject(
1780 JSFunction* constructor, PretenureFlag pretenure = NOT_TENURED,
1781 AllocationSite* allocation_site = NULL);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001782
1783 // Allocates and initializes a new JavaScript object based on a map.
1784 // Passing an allocation site means that a memento will be created that
1785 // points to the site.
1786 MUST_USE_RESULT AllocationResult
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001787 AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure = NOT_TENURED,
1788 AllocationSite* allocation_site = NULL);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001789
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001790 // Allocates a HeapNumber from value.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001791 MUST_USE_RESULT AllocationResult
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001792 AllocateHeapNumber(double value, MutableMode mode = IMMUTABLE,
1793 PretenureFlag pretenure = NOT_TENURED);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001794
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001795// Allocates SIMD values from the given lane values.
1796#define SIMD_ALLOCATE_DECLARATION(TYPE, Type, type, lane_count, lane_type) \
1797 AllocationResult Allocate##Type(lane_type lanes[lane_count], \
1798 PretenureFlag pretenure = NOT_TENURED);
1799 SIMD128_TYPES(SIMD_ALLOCATE_DECLARATION)
1800#undef SIMD_ALLOCATE_DECLARATION
1801
1802 // Allocates a byte array of the specified length
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001803 MUST_USE_RESULT AllocationResult
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001804 AllocateByteArray(int length, PretenureFlag pretenure = NOT_TENURED);
1805
1806 // Allocates a bytecode array with given contents.
1807 MUST_USE_RESULT AllocationResult
1808 AllocateBytecodeArray(int length, const byte* raw_bytecodes, int frame_size,
1809 int parameter_count, FixedArray* constant_pool);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001810
1811 // Copy the code and scope info part of the code object, but insert
1812 // the provided data as the relocation information.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001813 MUST_USE_RESULT AllocationResult CopyCode(Code* code,
1814 Vector<byte> reloc_info);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001815
1816 MUST_USE_RESULT AllocationResult CopyCode(Code* code);
1817
Ben Murdoch097c5b22016-05-18 11:27:45 +01001818 MUST_USE_RESULT AllocationResult
1819 CopyBytecodeArray(BytecodeArray* bytecode_array);
1820
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001821 // Allocates a fixed array initialized with undefined values
1822 MUST_USE_RESULT AllocationResult
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001823 AllocateFixedArray(int length, PretenureFlag pretenure = NOT_TENURED);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001824
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001825 // Allocate an uninitialized object. The memory is non-executable if the
1826 // hardware and OS allow. This is the single choke-point for allocations
1827 // performed by the runtime and should not be bypassed (to extend this to
1828 // inlined allocations, use the Heap::DisableInlineAllocation() support).
1829 MUST_USE_RESULT inline AllocationResult AllocateRaw(
1830 int size_in_bytes, AllocationSpace space,
1831 AllocationAlignment aligment = kWordAligned);
1832
1833 // Allocates a heap object based on the map.
1834 MUST_USE_RESULT AllocationResult
1835 Allocate(Map* map, AllocationSpace space,
1836 AllocationSite* allocation_site = NULL);
1837
1838 // Allocates a partial map for bootstrapping.
1839 MUST_USE_RESULT AllocationResult
1840 AllocatePartialMap(InstanceType instance_type, int instance_size);
1841
1842 // Allocate a block of memory in the given space (filled with a filler).
1843 // Used as a fall-back for generated code when the space is full.
1844 MUST_USE_RESULT AllocationResult
1845 AllocateFillerObject(int size, bool double_align, AllocationSpace space);
1846
1847 // Allocate an uninitialized fixed array.
1848 MUST_USE_RESULT AllocationResult
1849 AllocateRawFixedArray(int length, PretenureFlag pretenure);
1850
1851 // Allocate an uninitialized fixed double array.
1852 MUST_USE_RESULT AllocationResult
1853 AllocateRawFixedDoubleArray(int length, PretenureFlag pretenure);
1854
1855 // Allocate an initialized fixed array with the given filler value.
1856 MUST_USE_RESULT AllocationResult
1857 AllocateFixedArrayWithFiller(int length, PretenureFlag pretenure,
1858 Object* filler);
1859
1860 // Allocate and partially initializes a String. There are two String
1861 // encodings: one-byte and two-byte. These functions allocate a string of
1862 // the given length and set its map and length fields. The characters of
1863 // the string are uninitialized.
1864 MUST_USE_RESULT AllocationResult
1865 AllocateRawOneByteString(int length, PretenureFlag pretenure);
1866 MUST_USE_RESULT AllocationResult
1867 AllocateRawTwoByteString(int length, PretenureFlag pretenure);
1868
1869 // Allocates an internalized string in old space based on the character
1870 // stream.
1871 MUST_USE_RESULT inline AllocationResult AllocateInternalizedStringFromUtf8(
1872 Vector<const char> str, int chars, uint32_t hash_field);
1873
1874 MUST_USE_RESULT inline AllocationResult AllocateOneByteInternalizedString(
1875 Vector<const uint8_t> str, uint32_t hash_field);
1876
1877 MUST_USE_RESULT inline AllocationResult AllocateTwoByteInternalizedString(
1878 Vector<const uc16> str, uint32_t hash_field);
1879
1880 template <bool is_one_byte, typename T>
1881 MUST_USE_RESULT AllocationResult
1882 AllocateInternalizedStringImpl(T t, int chars, uint32_t hash_field);
1883
1884 template <typename T>
1885 MUST_USE_RESULT inline AllocationResult AllocateInternalizedStringImpl(
1886 T t, int chars, uint32_t hash_field);
1887
1888 // Allocates an uninitialized fixed array. It must be filled by the caller.
1889 MUST_USE_RESULT AllocationResult AllocateUninitializedFixedArray(int length);
1890
1891 // Make a copy of src and return it.
1892 MUST_USE_RESULT inline AllocationResult CopyFixedArray(FixedArray* src);
1893
1894 // Make a copy of src, also grow the copy, and return the copy.
1895 MUST_USE_RESULT AllocationResult
1896 CopyFixedArrayAndGrow(FixedArray* src, int grow_by, PretenureFlag pretenure);
1897
Ben Murdoch097c5b22016-05-18 11:27:45 +01001898 // Make a copy of src, also grow the copy, and return the copy.
1899 MUST_USE_RESULT AllocationResult CopyFixedArrayUpTo(FixedArray* src,
1900 int new_len,
1901 PretenureFlag pretenure);
1902
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001903 // Make a copy of src, set the map, and return the copy.
1904 MUST_USE_RESULT AllocationResult
1905 CopyFixedArrayWithMap(FixedArray* src, Map* map);
1906
1907 // Make a copy of src and return it.
1908 MUST_USE_RESULT inline AllocationResult CopyFixedDoubleArray(
1909 FixedDoubleArray* src);
1910
1911 // Computes a single character string where the character has code.
1912 // A cache is used for one-byte (Latin1) codes.
1913 MUST_USE_RESULT AllocationResult
1914 LookupSingleCharacterStringFromCode(uint16_t code);
1915
1916 // Allocate a symbol in old space.
1917 MUST_USE_RESULT AllocationResult AllocateSymbol();
1918
1919 // Allocates an external array of the specified length and type.
1920 MUST_USE_RESULT AllocationResult AllocateFixedTypedArrayWithExternalPointer(
1921 int length, ExternalArrayType array_type, void* external_pointer,
1922 PretenureFlag pretenure);
1923
1924 // Allocates a fixed typed array of the specified length and type.
1925 MUST_USE_RESULT AllocationResult
1926 AllocateFixedTypedArray(int length, ExternalArrayType array_type,
1927 bool initialize, PretenureFlag pretenure);
1928
1929 // Make a copy of src and return it.
1930 MUST_USE_RESULT AllocationResult CopyAndTenureFixedCOWArray(FixedArray* src);
1931
1932 // Make a copy of src, set the map, and return the copy.
1933 MUST_USE_RESULT AllocationResult
1934 CopyFixedDoubleArrayWithMap(FixedDoubleArray* src, Map* map);
1935
1936 // Allocates a fixed double array with uninitialized values. Returns
1937 MUST_USE_RESULT AllocationResult AllocateUninitializedFixedDoubleArray(
1938 int length, PretenureFlag pretenure = NOT_TENURED);
1939
1940 // Allocate empty fixed array.
1941 MUST_USE_RESULT AllocationResult AllocateEmptyFixedArray();
1942
1943 // Allocate empty fixed typed array of given type.
1944 MUST_USE_RESULT AllocationResult
1945 AllocateEmptyFixedTypedArray(ExternalArrayType array_type);
1946
1947 // Allocate a tenured simple cell.
1948 MUST_USE_RESULT AllocationResult AllocateCell(Object* value);
1949
1950 // Allocate a tenured JS global property cell initialized with the hole.
1951 MUST_USE_RESULT AllocationResult AllocatePropertyCell();
1952
1953 MUST_USE_RESULT AllocationResult AllocateWeakCell(HeapObject* value);
1954
1955 MUST_USE_RESULT AllocationResult AllocateTransitionArray(int capacity);
1956
1957 // Allocates a new utility object in the old generation.
1958 MUST_USE_RESULT AllocationResult AllocateStruct(InstanceType type);
1959
1960 // Allocates a new foreign object.
1961 MUST_USE_RESULT AllocationResult
1962 AllocateForeign(Address address, PretenureFlag pretenure = NOT_TENURED);
1963
1964 MUST_USE_RESULT AllocationResult
1965 AllocateCode(int object_size, bool immovable);
1966
1967 MUST_USE_RESULT AllocationResult InternalizeStringWithKey(HashTableKey* key);
1968
1969 MUST_USE_RESULT AllocationResult InternalizeString(String* str);
1970
1971 // ===========================================================================
1972
1973 void set_force_oom(bool value) { force_oom_ = value; }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001974
1975 // The amount of external memory registered through the API kept alive
1976 // by global handles
1977 int64_t amount_of_external_allocated_memory_;
1978
1979 // Caches the amount of external memory registered at the last global gc.
1980 int64_t amount_of_external_allocated_memory_at_last_global_gc_;
1981
1982 // This can be calculated directly from a pointer to the heap; however, it is
1983 // more expedient to get at the isolate directly from within Heap methods.
1984 Isolate* isolate_;
1985
1986 Object* roots_[kRootListLength];
1987
1988 size_t code_range_size_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001989 int max_semi_space_size_;
1990 int initial_semispace_size_;
1991 intptr_t max_old_generation_size_;
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001992 intptr_t initial_old_generation_size_;
1993 bool old_generation_size_configured_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001994 intptr_t max_executable_size_;
1995 intptr_t maximum_committed_;
1996
1997 // For keeping track of how much data has survived
1998 // scavenge since last new space expansion.
Ben Murdoch097c5b22016-05-18 11:27:45 +01001999 intptr_t survived_since_last_expansion_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002000
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002001 // ... and since the last scavenge.
Ben Murdoch097c5b22016-05-18 11:27:45 +01002002 intptr_t survived_last_scavenge_;
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002003
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002004 // This is not the depth of nested AlwaysAllocateScope's but rather a single
2005 // count, as scopes can be acquired from multiple tasks (read: threads).
Ben Murdochc5610432016-08-08 18:44:38 +01002006 base::AtomicNumber<size_t> always_allocate_scope_count_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002007
Ben Murdochda12d292016-06-02 14:46:10 +01002008 // Stores the memory pressure level that set by MemoryPressureNotification
2009 // and reset by a mark-compact garbage collection.
Ben Murdochc5610432016-08-08 18:44:38 +01002010 base::AtomicValue<MemoryPressureLevel> memory_pressure_level_;
Ben Murdochda12d292016-06-02 14:46:10 +01002011
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002012 // For keeping track of context disposals.
2013 int contexts_disposed_;
2014
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002015 // The length of the retained_maps array at the time of context disposal.
2016 // This separates maps in the retained_maps array that were created before
2017 // and after context disposal.
2018 int number_of_disposed_maps_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002019
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002020 int global_ic_age_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002021
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002022 NewSpace new_space_;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002023 OldSpace* old_space_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002024 OldSpace* code_space_;
2025 MapSpace* map_space_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002026 LargeObjectSpace* lo_space_;
2027 HeapState gc_state_;
2028 int gc_post_processing_depth_;
2029 Address new_space_top_after_last_gc_;
2030
2031 // Returns the amount of external memory registered since last global gc.
2032 int64_t PromotedExternalMemorySize();
2033
2034 // How many "runtime allocations" happened.
2035 uint32_t allocations_count_;
2036
2037 // Running hash over allocations performed.
2038 uint32_t raw_allocations_hash_;
2039
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002040 // How many mark-sweep collections happened.
2041 unsigned int ms_count_;
2042
2043 // How many gc happened.
2044 unsigned int gc_count_;
2045
2046 // For post mortem debugging.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002047 int remembered_unmapped_pages_index_;
2048 Address remembered_unmapped_pages_[kRememberedUnmappedPages];
2049
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002050#ifdef DEBUG
2051 // If the --gc-interval flag is set to a positive value, this
2052 // variable holds the value indicating the number of allocations
2053 // remain until the next failure and garbage collection.
2054 int allocation_timeout_;
2055#endif // DEBUG
2056
2057 // Limit that triggers a global GC on the next (normally caused) GC. This
2058 // is checked when we have already decided to do a GC to help determine
2059 // which collector to invoke, before expanding a paged space in the old
2060 // generation and on every allocation in large object space.
2061 intptr_t old_generation_allocation_limit_;
2062
2063 // Indicates that an allocation has failed in the old generation since the
2064 // last GC.
2065 bool old_gen_exhausted_;
2066
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002067 // Indicates that memory usage is more important than latency.
2068 // TODO(ulan): Merge it with memory reducer once chromium:490559 is fixed.
2069 bool optimize_for_memory_usage_;
2070
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002071 // Indicates that inline bump-pointer allocation has been globally disabled
2072 // for all spaces. This is used to disable allocations in generated code.
2073 bool inline_allocation_disabled_;
2074
2075 // Weak list heads, threaded through the objects.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002076 // List heads are initialized lazily and contain the undefined_value at start.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002077 Object* native_contexts_list_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002078 Object* allocation_sites_list_;
2079
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002080 // List of encountered weak collections (JSWeakMap and JSWeakSet) during
2081 // marking. It is initialized during marking, destroyed after marking and
2082 // contains Smi(0) while marking is not active.
2083 Object* encountered_weak_collections_;
2084
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002085 Object* encountered_weak_cells_;
2086
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002087 Object* encountered_transition_arrays_;
2088
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002089 List<GCCallbackPair> gc_epilogue_callbacks_;
2090 List<GCCallbackPair> gc_prologue_callbacks_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002091
2092 // Total RegExp code ever generated
2093 double total_regexp_code_generated_;
2094
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002095 int deferred_counters_[v8::Isolate::kUseCounterFeatureCount];
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002096
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002097 GCTracer* tracer_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002098
2099 int high_survival_rate_period_length_;
2100 intptr_t promoted_objects_size_;
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002101 double promotion_ratio_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002102 double promotion_rate_;
2103 intptr_t semi_space_copied_object_size_;
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002104 intptr_t previous_semi_space_copied_object_size_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002105 double semi_space_copied_rate_;
2106 int nodes_died_in_new_space_;
2107 int nodes_copied_in_new_space_;
2108 int nodes_promoted_;
2109
2110 // This is the pretenuring trigger for allocation sites that are in maybe
2111 // tenure state. When we switched to the maximum new space size we deoptimize
2112 // the code that belongs to the allocation site and derive the lifetime
2113 // of the allocation site.
2114 unsigned int maximum_size_scavenges_;
2115
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002116 // Maximum GC pause.
2117 double max_gc_pause_;
2118
2119 // Total time spent in GC.
2120 double total_gc_time_ms_;
2121
2122 // Maximum size of objects alive after GC.
2123 intptr_t max_alive_after_gc_;
2124
2125 // Minimal interval between two subsequent collections.
2126 double min_in_mutator_;
2127
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002128 // Cumulative GC time spent in marking.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002129 double marking_time_;
2130
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002131 // Cumulative GC time spent in sweeping.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002132 double sweeping_time_;
2133
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002134 // Last time an idle notification happened.
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002135 double last_idle_notification_time_;
2136
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002137 // Last time a garbage collection happened.
2138 double last_gc_time_;
2139
2140 Scavenger* scavenge_collector_;
2141
2142 MarkCompactCollector* mark_compact_collector_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002143
Ben Murdochc5610432016-08-08 18:44:38 +01002144 MemoryAllocator* memory_allocator_;
2145
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002146 StoreBuffer store_buffer_;
2147
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002148 IncrementalMarking* incremental_marking_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002149
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002150 GCIdleTimeHandler* gc_idle_time_handler_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002151
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002152 MemoryReducer* memory_reducer_;
2153
2154 ObjectStats* object_stats_;
2155
2156 ScavengeJob* scavenge_job_;
2157
Ben Murdoch097c5b22016-05-18 11:27:45 +01002158 AllocationObserver* idle_scavenge_observer_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002159
2160 // These two counters are monotomically increasing and never reset.
2161 size_t full_codegen_bytes_generated_;
2162 size_t crankshaft_codegen_bytes_generated_;
2163
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002164 // This counter is increased before each GC and never reset.
2165 // To account for the bytes allocated since the last GC, use the
2166 // NewSpaceAllocationCounter() function.
2167 size_t new_space_allocation_counter_;
2168
2169 // This counter is increased before each GC and never reset. To
2170 // account for the bytes allocated since the last GC, use the
2171 // OldGenerationAllocationCounter() function.
2172 size_t old_generation_allocation_counter_;
2173
2174 // The size of objects in old generation after the last MarkCompact GC.
2175 size_t old_generation_size_at_last_gc_;
2176
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002177 // If the --deopt_every_n_garbage_collections flag is set to a positive value,
2178 // this variable holds the number of garbage collections since the last
2179 // deoptimization triggered by garbage collection.
2180 int gcs_since_last_deopt_;
2181
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002182 // The feedback storage is used to store allocation sites (keys) and how often
2183 // they have been visited (values) by finding a memento behind an object. The
2184 // storage is only alive temporary during a GC. The invariant is that all
2185 // pointers in this map are already fixed, i.e., they do not point to
2186 // forwarding pointers.
2187 HashMap* global_pretenuring_feedback_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002188
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002189 char trace_ring_buffer_[kTraceRingBufferSize];
2190 // If it's not full then the data is from 0 to ring_buffer_end_. If it's
2191 // full then the data is from ring_buffer_end_ to the end of the buffer and
2192 // from 0 to ring_buffer_end_.
2193 bool ring_buffer_full_;
2194 size_t ring_buffer_end_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002195
2196 // Shared state read by the scavenge collector and set by ScavengeObject.
2197 PromotionQueue promotion_queue_;
2198
2199 // Flag is set when the heap has been configured. The heap can be repeatedly
2200 // configured through the API until it is set up.
2201 bool configured_;
2202
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002203 // Currently set GC flags that are respected by all GC components.
2204 int current_gc_flags_;
2205
2206 // Currently set GC callback flags that are used to pass information between
2207 // the embedder and V8's GC.
2208 GCCallbackFlags current_gc_callback_flags_;
2209
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002210 ExternalStringTable external_string_table_;
2211
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002212 base::Mutex relocation_mutex_;
2213
2214 int gc_callbacks_depth_;
2215
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002216 bool deserialization_complete_;
2217
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002218 StrongRootsList* strong_roots_list_;
2219
2220 ArrayBufferTracker* array_buffer_tracker_;
2221
2222 // The depth of HeapIterator nestings.
2223 int heap_iterator_depth_;
2224
2225 // Used for testing purposes.
2226 bool force_oom_;
2227
2228 // Classes in "heap" can be friends.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002229 friend class AlwaysAllocateScope;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002230 friend class GCCallbacksScope;
2231 friend class GCTracer;
2232 friend class HeapIterator;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002233 friend class IdleScavengeObserver;
2234 friend class IncrementalMarking;
Ben Murdochda12d292016-06-02 14:46:10 +01002235 friend class IteratePromotedObjectsVisitor;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002236 friend class MarkCompactCollector;
2237 friend class MarkCompactMarkingVisitor;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002238 friend class NewSpace;
2239 friend class ObjectStatsVisitor;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002240 friend class Page;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002241 friend class Scavenger;
2242 friend class StoreBuffer;
Ben Murdochc5610432016-08-08 18:44:38 +01002243 friend class TestMemoryAllocatorScope;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002244
2245 // The allocator interface.
2246 friend class Factory;
2247
2248 // The Isolate constructs us.
2249 friend class Isolate;
2250
2251 // Used in cctest.
2252 friend class HeapTester;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002253
2254 DISALLOW_COPY_AND_ASSIGN(Heap);
2255};
2256
2257
2258class HeapStats {
2259 public:
2260 static const int kStartMarker = 0xDECADE00;
2261 static const int kEndMarker = 0xDECADE01;
2262
2263 int* start_marker; // 0
2264 int* new_space_size; // 1
2265 int* new_space_capacity; // 2
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002266 intptr_t* old_space_size; // 3
2267 intptr_t* old_space_capacity; // 4
2268 intptr_t* code_space_size; // 5
2269 intptr_t* code_space_capacity; // 6
2270 intptr_t* map_space_size; // 7
2271 intptr_t* map_space_capacity; // 8
2272 intptr_t* lo_space_size; // 9
2273 int* global_handle_count; // 10
2274 int* weak_global_handle_count; // 11
2275 int* pending_global_handle_count; // 12
2276 int* near_death_global_handle_count; // 13
2277 int* free_global_handle_count; // 14
2278 intptr_t* memory_allocator_size; // 15
2279 intptr_t* memory_allocator_capacity; // 16
2280 int* objects_per_type; // 17
2281 int* size_per_type; // 18
2282 int* os_error; // 19
2283 char* last_few_messages; // 20
2284 char* js_stacktrace; // 21
2285 int* end_marker; // 22
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002286};
2287
2288
2289class AlwaysAllocateScope {
2290 public:
2291 explicit inline AlwaysAllocateScope(Isolate* isolate);
2292 inline ~AlwaysAllocateScope();
2293
2294 private:
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002295 Heap* heap_;
2296};
2297
2298
2299// Visitor class to verify interior pointers in spaces that do not contain
2300// or care about intergenerational references. All heap object pointers have to
2301// point into the heap to a location that has a map pointer at its first word.
2302// Caveat: Heap::Contains is an approximation because it can return true for
2303// objects in a heap space but above the allocation pointer.
2304class VerifyPointersVisitor : public ObjectVisitor {
2305 public:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002306 inline void VisitPointers(Object** start, Object** end) override;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002307};
2308
2309
2310// Verify that all objects are Smis.
2311class VerifySmisVisitor : public ObjectVisitor {
2312 public:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002313 inline void VisitPointers(Object** start, Object** end) override;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002314};
2315
2316
2317// Space iterator for iterating over all spaces of the heap. Returns each space
2318// in turn, and null when it is done.
2319class AllSpaces BASE_EMBEDDED {
2320 public:
2321 explicit AllSpaces(Heap* heap) : heap_(heap), counter_(FIRST_SPACE) {}
2322 Space* next();
2323
2324 private:
2325 Heap* heap_;
2326 int counter_;
2327};
2328
2329
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002330// Space iterator for iterating over all old spaces of the heap: Old space
2331// and code space. Returns each space in turn, and null when it is done.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002332class OldSpaces BASE_EMBEDDED {
2333 public:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002334 explicit OldSpaces(Heap* heap) : heap_(heap), counter_(OLD_SPACE) {}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002335 OldSpace* next();
2336
2337 private:
2338 Heap* heap_;
2339 int counter_;
2340};
2341
2342
2343// Space iterator for iterating over all the paged spaces of the heap: Map
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002344// space, old space, code space and cell space. Returns
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002345// each space in turn, and null when it is done.
2346class PagedSpaces BASE_EMBEDDED {
2347 public:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002348 explicit PagedSpaces(Heap* heap) : heap_(heap), counter_(OLD_SPACE) {}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002349 PagedSpace* next();
2350
2351 private:
2352 Heap* heap_;
2353 int counter_;
2354};
2355
2356
2357// Space iterator for iterating over all spaces of the heap.
2358// For each space an object iterator is provided. The deallocation of the
2359// returned object iterators is handled by the space iterator.
2360class SpaceIterator : public Malloced {
2361 public:
2362 explicit SpaceIterator(Heap* heap);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002363 virtual ~SpaceIterator();
2364
2365 bool has_next();
2366 ObjectIterator* next();
2367
2368 private:
2369 ObjectIterator* CreateIterator();
2370
2371 Heap* heap_;
2372 int current_space_; // from enum AllocationSpace.
2373 ObjectIterator* iterator_; // object iterator for the current space.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002374};
2375
2376
2377// A HeapIterator provides iteration over the whole heap. It
2378// aggregates the specific iterators for the different spaces as
2379// these can only iterate over one space only.
2380//
2381// HeapIterator ensures there is no allocation during its lifetime
2382// (using an embedded DisallowHeapAllocation instance).
2383//
2384// HeapIterator can skip free list nodes (that is, de-allocated heap
2385// objects that still remain in the heap). As implementation of free
2386// nodes filtering uses GC marks, it can't be used during MS/MC GC
2387// phases. Also, it is forbidden to interrupt iteration in this mode,
2388// as this will leave heap objects marked (and thus, unusable).
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002389class HeapIterator BASE_EMBEDDED {
2390 public:
2391 enum HeapObjectsFiltering { kNoFiltering, kFilterUnreachable };
2392
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002393 explicit HeapIterator(Heap* heap,
2394 HeapObjectsFiltering filtering = kNoFiltering);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002395 ~HeapIterator();
2396
2397 HeapObject* next();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002398
2399 private:
2400 struct MakeHeapIterableHelper {
2401 explicit MakeHeapIterableHelper(Heap* heap) { heap->MakeHeapIterable(); }
2402 };
2403
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002404 HeapObject* NextObject();
2405
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002406 // The following two fields need to be declared in this order. Initialization
2407 // order guarantees that we first make the heap iterable (which may involve
2408 // allocations) and only then lock it down by not allowing further
2409 // allocations.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002410 MakeHeapIterableHelper make_heap_iterable_helper_;
2411 DisallowHeapAllocation no_heap_allocation_;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002412
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002413 Heap* heap_;
2414 HeapObjectsFiltering filtering_;
2415 HeapObjectsFilter* filter_;
2416 // Space iterator for iterating all the spaces.
2417 SpaceIterator* space_iterator_;
2418 // Object iterator for the space currently being iterated.
2419 ObjectIterator* object_iterator_;
2420};
2421
2422
2423// Cache for mapping (map, property name) into field offset.
2424// Cleared at startup and prior to mark sweep collection.
2425class KeyedLookupCache {
2426 public:
2427 // Lookup field offset for (map, name). If absent, -1 is returned.
2428 int Lookup(Handle<Map> map, Handle<Name> name);
2429
2430 // Update an element in the cache.
2431 void Update(Handle<Map> map, Handle<Name> name, int field_offset);
2432
2433 // Clear the cache.
2434 void Clear();
2435
2436 static const int kLength = 256;
2437 static const int kCapacityMask = kLength - 1;
2438 static const int kMapHashShift = 5;
2439 static const int kHashMask = -4; // Zero the last two bits.
2440 static const int kEntriesPerBucket = 4;
2441 static const int kEntryLength = 2;
2442 static const int kMapIndex = 0;
2443 static const int kKeyIndex = 1;
2444 static const int kNotFound = -1;
2445
2446 // kEntriesPerBucket should be a power of 2.
2447 STATIC_ASSERT((kEntriesPerBucket & (kEntriesPerBucket - 1)) == 0);
2448 STATIC_ASSERT(kEntriesPerBucket == -kHashMask);
2449
2450 private:
2451 KeyedLookupCache() {
2452 for (int i = 0; i < kLength; ++i) {
2453 keys_[i].map = NULL;
2454 keys_[i].name = NULL;
2455 field_offsets_[i] = kNotFound;
2456 }
2457 }
2458
2459 static inline int Hash(Handle<Map> map, Handle<Name> name);
2460
2461 // Get the address of the keys and field_offsets arrays. Used in
2462 // generated code to perform cache lookups.
2463 Address keys_address() { return reinterpret_cast<Address>(&keys_); }
2464
2465 Address field_offsets_address() {
2466 return reinterpret_cast<Address>(&field_offsets_);
2467 }
2468
2469 struct Key {
2470 Map* map;
2471 Name* name;
2472 };
2473
2474 Key keys_[kLength];
2475 int field_offsets_[kLength];
2476
2477 friend class ExternalReference;
2478 friend class Isolate;
2479 DISALLOW_COPY_AND_ASSIGN(KeyedLookupCache);
2480};
2481
2482
2483// Cache for mapping (map, property name) into descriptor index.
2484// The cache contains both positive and negative results.
2485// Descriptor index equals kNotFound means the property is absent.
2486// Cleared at startup and prior to any gc.
2487class DescriptorLookupCache {
2488 public:
2489 // Lookup descriptor index for (map, name).
2490 // If absent, kAbsent is returned.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002491 inline int Lookup(Map* source, Name* name);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002492
2493 // Update an element in the cache.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002494 inline void Update(Map* source, Name* name, int result);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002495
2496 // Clear the cache.
2497 void Clear();
2498
2499 static const int kAbsent = -2;
2500
2501 private:
2502 DescriptorLookupCache() {
2503 for (int i = 0; i < kLength; ++i) {
2504 keys_[i].source = NULL;
2505 keys_[i].name = NULL;
2506 results_[i] = kAbsent;
2507 }
2508 }
2509
Ben Murdoch097c5b22016-05-18 11:27:45 +01002510 static inline int Hash(Object* source, Name* name);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002511
2512 static const int kLength = 64;
2513 struct Key {
2514 Map* source;
2515 Name* name;
2516 };
2517
2518 Key keys_[kLength];
2519 int results_[kLength];
2520
2521 friend class Isolate;
2522 DISALLOW_COPY_AND_ASSIGN(DescriptorLookupCache);
2523};
2524
2525
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002526// Abstract base class for checking whether a weak object should be retained.
2527class WeakObjectRetainer {
2528 public:
2529 virtual ~WeakObjectRetainer() {}
2530
2531 // Return whether this object should be retained. If NULL is returned the
2532 // object has no references. Otherwise the address of the retained object
2533 // should be returned as in some GC situations the object has been moved.
2534 virtual Object* RetainAs(Object* object) = 0;
2535};
2536
2537
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002538#ifdef DEBUG
2539// Helper class for tracing paths to a search target Object from all roots.
2540// The TracePathFrom() method can be used to trace paths from a specific
2541// object to the search target object.
2542class PathTracer : public ObjectVisitor {
2543 public:
2544 enum WhatToFind {
2545 FIND_ALL, // Will find all matches.
2546 FIND_FIRST // Will stop the search after first match.
2547 };
2548
2549 // Tags 0, 1, and 3 are used. Use 2 for marking visited HeapObject.
2550 static const int kMarkTag = 2;
2551
2552 // For the WhatToFind arg, if FIND_FIRST is specified, tracing will stop
2553 // after the first match. If FIND_ALL is specified, then tracing will be
2554 // done for all matches.
2555 PathTracer(Object* search_target, WhatToFind what_to_find,
2556 VisitMode visit_mode)
2557 : search_target_(search_target),
2558 found_target_(false),
2559 found_target_in_trace_(false),
2560 what_to_find_(what_to_find),
2561 visit_mode_(visit_mode),
2562 object_stack_(20),
2563 no_allocation() {}
2564
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002565 void VisitPointers(Object** start, Object** end) override;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002566
2567 void Reset();
2568 void TracePathFrom(Object** root);
2569
2570 bool found() const { return found_target_; }
2571
2572 static Object* const kAnyGlobalObject;
2573
2574 protected:
2575 class MarkVisitor;
2576 class UnmarkVisitor;
2577
2578 void MarkRecursively(Object** p, MarkVisitor* mark_visitor);
2579 void UnmarkRecursively(Object** p, UnmarkVisitor* unmark_visitor);
2580 virtual void ProcessResults();
2581
2582 Object* search_target_;
2583 bool found_target_;
2584 bool found_target_in_trace_;
2585 WhatToFind what_to_find_;
2586 VisitMode visit_mode_;
2587 List<Object*> object_stack_;
2588
2589 DisallowHeapAllocation no_allocation; // i.e. no gc allowed.
2590
2591 private:
2592 DISALLOW_IMPLICIT_CONSTRUCTORS(PathTracer);
2593};
2594#endif // DEBUG
Ben Murdoch097c5b22016-05-18 11:27:45 +01002595
2596// -----------------------------------------------------------------------------
2597// Allows observation of allocations.
2598class AllocationObserver {
2599 public:
2600 explicit AllocationObserver(intptr_t step_size)
2601 : step_size_(step_size), bytes_to_next_step_(step_size) {
2602 DCHECK(step_size >= kPointerSize);
2603 }
2604 virtual ~AllocationObserver() {}
2605
2606 // Called each time the observed space does an allocation step. This may be
2607 // more frequently than the step_size we are monitoring (e.g. when there are
2608 // multiple observers, or when page or space boundary is encountered.)
2609 void AllocationStep(int bytes_allocated, Address soon_object, size_t size) {
2610 bytes_to_next_step_ -= bytes_allocated;
2611 if (bytes_to_next_step_ <= 0) {
2612 Step(static_cast<int>(step_size_ - bytes_to_next_step_), soon_object,
2613 size);
2614 step_size_ = GetNextStepSize();
2615 bytes_to_next_step_ = step_size_;
2616 }
2617 }
2618
2619 protected:
2620 intptr_t step_size() const { return step_size_; }
2621 intptr_t bytes_to_next_step() const { return bytes_to_next_step_; }
2622
2623 // Pure virtual method provided by the subclasses that gets called when at
2624 // least step_size bytes have been allocated. soon_object is the address just
2625 // allocated (but not yet initialized.) size is the size of the object as
2626 // requested (i.e. w/o the alignment fillers). Some complexities to be aware
2627 // of:
2628 // 1) soon_object will be nullptr in cases where we end up observing an
2629 // allocation that happens to be a filler space (e.g. page boundaries.)
2630 // 2) size is the requested size at the time of allocation. Right-trimming
2631 // may change the object size dynamically.
2632 // 3) soon_object may actually be the first object in an allocation-folding
2633 // group. In such a case size is the size of the group rather than the
2634 // first object.
2635 virtual void Step(int bytes_allocated, Address soon_object, size_t size) = 0;
2636
2637 // Subclasses can override this method to make step size dynamic.
2638 virtual intptr_t GetNextStepSize() { return step_size_; }
2639
2640 intptr_t step_size_;
2641 intptr_t bytes_to_next_step_;
2642
2643 private:
2644 friend class LargeObjectSpace;
2645 friend class NewSpace;
2646 friend class PagedSpace;
2647 DISALLOW_COPY_AND_ASSIGN(AllocationObserver);
2648};
2649
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002650} // namespace internal
2651} // namespace v8
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002652
2653#endif // V8_HEAP_HEAP_H_