blob: ed1e65253e89a03a4135886046919d98f639bd70 [file] [log] [blame]
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001// Copyright 2012 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_HEAP_HEAP_H_
6#define V8_HEAP_HEAP_H_
7
8#include <cmath>
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00009#include <map>
Ben Murdochb8a8cc12014-11-26 15:28:44 +000010
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000011// Clients of this interface shouldn't depend on lots of heap internals.
12// Do not include anything from src/heap here!
Ben Murdochda12d292016-06-02 14:46:10 +010013#include "include/v8.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000014#include "src/allocation.h"
15#include "src/assert-scope.h"
Ben Murdochc5610432016-08-08 18:44:38 +010016#include "src/base/atomic-utils.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000017#include "src/globals.h"
Ben Murdoch097c5b22016-05-18 11:27:45 +010018#include "src/heap-symbols.h"
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000019// TODO(mstarzinger): Two more includes to kill!
Ben Murdochb8a8cc12014-11-26 15:28:44 +000020#include "src/heap/spaces.h"
21#include "src/heap/store-buffer.h"
22#include "src/list.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000023
24namespace v8 {
25namespace internal {
26
Ben Murdochda12d292016-06-02 14:46:10 +010027using v8::MemoryPressureLevel;
28
Ben Murdochb8a8cc12014-11-26 15:28:44 +000029// Defines all the roots in Heap.
30#define STRONG_ROOT_LIST(V) \
Ben Murdoch61f157c2016-09-16 13:49:30 +010031 /* Cluster the most popular ones in a few cache lines here at the top. */ \
32 /* The first 32 entries are most often used in the startup snapshot and */ \
33 /* can use a shorter representation in the serialization format. */ \
Ben Murdochb8a8cc12014-11-26 15:28:44 +000034 V(Map, free_space_map, FreeSpaceMap) \
35 V(Map, one_pointer_filler_map, OnePointerFillerMap) \
36 V(Map, two_pointer_filler_map, TwoPointerFillerMap) \
Ben Murdoch61f157c2016-09-16 13:49:30 +010037 V(Oddball, uninitialized_value, UninitializedValue) \
Ben Murdochb8a8cc12014-11-26 15:28:44 +000038 V(Oddball, undefined_value, UndefinedValue) \
39 V(Oddball, the_hole_value, TheHoleValue) \
40 V(Oddball, null_value, NullValue) \
41 V(Oddball, true_value, TrueValue) \
42 V(Oddball, false_value, FalseValue) \
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000043 V(String, empty_string, empty_string) \
Ben Murdochb8a8cc12014-11-26 15:28:44 +000044 V(Map, meta_map, MetaMap) \
Ben Murdoch61f157c2016-09-16 13:49:30 +010045 V(Map, byte_array_map, ByteArrayMap) \
Ben Murdochb8a8cc12014-11-26 15:28:44 +000046 V(Map, fixed_array_map, FixedArrayMap) \
Ben Murdochb8a8cc12014-11-26 15:28:44 +000047 V(Map, fixed_cow_array_map, FixedCOWArrayMap) \
Ben Murdoch61f157c2016-09-16 13:49:30 +010048 V(Map, hash_table_map, HashTableMap) \
49 V(Map, symbol_map, SymbolMap) \
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000050 V(Map, one_byte_string_map, OneByteStringMap) \
51 V(Map, one_byte_internalized_string_map, OneByteInternalizedStringMap) \
Ben Murdoch61f157c2016-09-16 13:49:30 +010052 V(Map, scope_info_map, ScopeInfoMap) \
53 V(Map, shared_function_info_map, SharedFunctionInfoMap) \
54 V(Map, code_map, CodeMap) \
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000055 V(Map, function_context_map, FunctionContextMap) \
Ben Murdoch61f157c2016-09-16 13:49:30 +010056 V(Map, cell_map, CellMap) \
57 V(Map, weak_cell_map, WeakCellMap) \
58 V(Map, global_property_cell_map, GlobalPropertyCellMap) \
59 V(Map, foreign_map, ForeignMap) \
60 V(Map, heap_number_map, HeapNumberMap) \
61 V(Map, transition_array_map, TransitionArrayMap) \
62 V(FixedArray, empty_literals_array, EmptyLiteralsArray) \
Ben Murdochb8a8cc12014-11-26 15:28:44 +000063 V(FixedArray, empty_fixed_array, EmptyFixedArray) \
Ben Murdoch61f157c2016-09-16 13:49:30 +010064 V(FixedArray, cleared_optimized_code_map, ClearedOptimizedCodeMap) \
Ben Murdochb8a8cc12014-11-26 15:28:44 +000065 V(DescriptorArray, empty_descriptor_array, EmptyDescriptorArray) \
Ben Murdoch61f157c2016-09-16 13:49:30 +010066 /* Entries beyond the first 32 */ \
Ben Murdochb8a8cc12014-11-26 15:28:44 +000067 /* The roots above this line should be boring from a GC point of view. */ \
68 /* This means they are never in new space and never on a page that is */ \
69 /* being compacted. */ \
Ben Murdoch61f157c2016-09-16 13:49:30 +010070 /* Oddballs */ \
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000071 V(Oddball, no_interceptor_result_sentinel, NoInterceptorResultSentinel) \
72 V(Oddball, arguments_marker, ArgumentsMarker) \
73 V(Oddball, exception, Exception) \
74 V(Oddball, termination_exception, TerminationException) \
Ben Murdochda12d292016-06-02 14:46:10 +010075 V(Oddball, optimized_out, OptimizedOut) \
Ben Murdochc5610432016-08-08 18:44:38 +010076 V(Oddball, stale_register, StaleRegister) \
Ben Murdoch61f157c2016-09-16 13:49:30 +010077 /* Context maps */ \
78 V(Map, native_context_map, NativeContextMap) \
79 V(Map, module_context_map, ModuleContextMap) \
80 V(Map, script_context_map, ScriptContextMap) \
81 V(Map, block_context_map, BlockContextMap) \
82 V(Map, catch_context_map, CatchContextMap) \
83 V(Map, with_context_map, WithContextMap) \
84 V(Map, debug_evaluate_context_map, DebugEvaluateContextMap) \
85 V(Map, script_context_table_map, ScriptContextTableMap) \
86 /* Maps */ \
87 V(Map, fixed_double_array_map, FixedDoubleArrayMap) \
88 V(Map, mutable_heap_number_map, MutableHeapNumberMap) \
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000089 V(Map, ordered_hash_table_map, OrderedHashTableMap) \
Ben Murdoch61f157c2016-09-16 13:49:30 +010090 V(Map, sloppy_arguments_elements_map, SloppyArgumentsElementsMap) \
91 V(Map, message_object_map, JSMessageObjectMap) \
92 V(Map, neander_map, NeanderMap) \
93 V(Map, external_map, ExternalMap) \
94 V(Map, bytecode_array_map, BytecodeArrayMap) \
95 /* String maps */ \
96 V(Map, native_source_string_map, NativeSourceStringMap) \
Ben Murdochb8a8cc12014-11-26 15:28:44 +000097 V(Map, string_map, StringMap) \
Ben Murdochb8a8cc12014-11-26 15:28:44 +000098 V(Map, cons_one_byte_string_map, ConsOneByteStringMap) \
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000099 V(Map, cons_string_map, ConsStringMap) \
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000100 V(Map, sliced_string_map, SlicedStringMap) \
101 V(Map, sliced_one_byte_string_map, SlicedOneByteStringMap) \
102 V(Map, external_string_map, ExternalStringMap) \
103 V(Map, external_string_with_one_byte_data_map, \
104 ExternalStringWithOneByteDataMap) \
105 V(Map, external_one_byte_string_map, ExternalOneByteStringMap) \
106 V(Map, short_external_string_map, ShortExternalStringMap) \
107 V(Map, short_external_string_with_one_byte_data_map, \
108 ShortExternalStringWithOneByteDataMap) \
109 V(Map, internalized_string_map, InternalizedStringMap) \
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000110 V(Map, external_internalized_string_map, ExternalInternalizedStringMap) \
111 V(Map, external_internalized_string_with_one_byte_data_map, \
112 ExternalInternalizedStringWithOneByteDataMap) \
113 V(Map, external_one_byte_internalized_string_map, \
114 ExternalOneByteInternalizedStringMap) \
115 V(Map, short_external_internalized_string_map, \
116 ShortExternalInternalizedStringMap) \
117 V(Map, short_external_internalized_string_with_one_byte_data_map, \
118 ShortExternalInternalizedStringWithOneByteDataMap) \
119 V(Map, short_external_one_byte_internalized_string_map, \
120 ShortExternalOneByteInternalizedStringMap) \
121 V(Map, short_external_one_byte_string_map, ShortExternalOneByteStringMap) \
Ben Murdoch61f157c2016-09-16 13:49:30 +0100122 /* Array element maps */ \
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000123 V(Map, fixed_uint8_array_map, FixedUint8ArrayMap) \
124 V(Map, fixed_int8_array_map, FixedInt8ArrayMap) \
125 V(Map, fixed_uint16_array_map, FixedUint16ArrayMap) \
126 V(Map, fixed_int16_array_map, FixedInt16ArrayMap) \
127 V(Map, fixed_uint32_array_map, FixedUint32ArrayMap) \
128 V(Map, fixed_int32_array_map, FixedInt32ArrayMap) \
129 V(Map, fixed_float32_array_map, FixedFloat32ArrayMap) \
130 V(Map, fixed_float64_array_map, FixedFloat64ArrayMap) \
131 V(Map, fixed_uint8_clamped_array_map, FixedUint8ClampedArrayMap) \
Ben Murdoch61f157c2016-09-16 13:49:30 +0100132 V(Map, float32x4_map, Float32x4Map) \
133 V(Map, int32x4_map, Int32x4Map) \
134 V(Map, uint32x4_map, Uint32x4Map) \
135 V(Map, bool32x4_map, Bool32x4Map) \
136 V(Map, int16x8_map, Int16x8Map) \
137 V(Map, uint16x8_map, Uint16x8Map) \
138 V(Map, bool16x8_map, Bool16x8Map) \
139 V(Map, int8x16_map, Int8x16Map) \
140 V(Map, uint8x16_map, Uint8x16Map) \
141 V(Map, bool8x16_map, Bool8x16Map) \
142 /* Canonical empty values */ \
143 V(ByteArray, empty_byte_array, EmptyByteArray) \
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000144 V(FixedTypedArrayBase, empty_fixed_uint8_array, EmptyFixedUint8Array) \
145 V(FixedTypedArrayBase, empty_fixed_int8_array, EmptyFixedInt8Array) \
146 V(FixedTypedArrayBase, empty_fixed_uint16_array, EmptyFixedUint16Array) \
147 V(FixedTypedArrayBase, empty_fixed_int16_array, EmptyFixedInt16Array) \
148 V(FixedTypedArrayBase, empty_fixed_uint32_array, EmptyFixedUint32Array) \
149 V(FixedTypedArrayBase, empty_fixed_int32_array, EmptyFixedInt32Array) \
150 V(FixedTypedArrayBase, empty_fixed_float32_array, EmptyFixedFloat32Array) \
151 V(FixedTypedArrayBase, empty_fixed_float64_array, EmptyFixedFloat64Array) \
152 V(FixedTypedArrayBase, empty_fixed_uint8_clamped_array, \
153 EmptyFixedUint8ClampedArray) \
Ben Murdoch61f157c2016-09-16 13:49:30 +0100154 V(Script, empty_script, EmptyScript) \
155 V(Cell, undefined_cell, UndefinedCell) \
156 V(FixedArray, empty_sloppy_arguments_elements, EmptySloppyArgumentsElements) \
157 V(SeededNumberDictionary, empty_slow_element_dictionary, \
158 EmptySlowElementDictionary) \
159 V(TypeFeedbackVector, dummy_vector, DummyVector) \
160 V(PropertyCell, empty_property_cell, EmptyPropertyCell) \
161 V(WeakCell, empty_weak_cell, EmptyWeakCell) \
162 /* Protectors */ \
163 V(PropertyCell, array_protector, ArrayProtector) \
164 V(Cell, is_concat_spreadable_protector, IsConcatSpreadableProtector) \
165 V(PropertyCell, has_instance_protector, HasInstanceProtector) \
166 V(Cell, species_protector, SpeciesProtector) \
167 /* Special numbers */ \
168 V(HeapNumber, nan_value, NanValue) \
169 V(HeapNumber, infinity_value, InfinityValue) \
170 V(HeapNumber, minus_zero_value, MinusZeroValue) \
171 V(HeapNumber, minus_infinity_value, MinusInfinityValue) \
172 /* Caches */ \
173 V(FixedArray, number_string_cache, NumberStringCache) \
174 V(FixedArray, single_character_string_cache, SingleCharacterStringCache) \
175 V(FixedArray, string_split_cache, StringSplitCache) \
176 V(FixedArray, regexp_multiple_cache, RegExpMultipleCache) \
177 V(Object, instanceof_cache_function, InstanceofCacheFunction) \
178 V(Object, instanceof_cache_map, InstanceofCacheMap) \
179 V(Object, instanceof_cache_answer, InstanceofCacheAnswer) \
180 V(FixedArray, natives_source_cache, NativesSourceCache) \
181 V(FixedArray, experimental_natives_source_cache, \
182 ExperimentalNativesSourceCache) \
183 V(FixedArray, extra_natives_source_cache, ExtraNativesSourceCache) \
184 V(FixedArray, experimental_extra_natives_source_cache, \
185 ExperimentalExtraNativesSourceCache) \
186 /* Lists and dictionaries */ \
187 V(NameDictionary, intrinsic_function_names, IntrinsicFunctionNames) \
188 V(NameDictionary, empty_properties_dictionary, EmptyPropertiesDictionary) \
189 V(Object, symbol_registry, SymbolRegistry) \
190 V(Object, script_list, ScriptList) \
191 V(UnseededNumberDictionary, code_stubs, CodeStubs) \
192 V(FixedArray, materialized_objects, MaterializedObjects) \
193 V(FixedArray, microtask_queue, MicrotaskQueue) \
194 V(FixedArray, detached_contexts, DetachedContexts) \
195 V(ArrayList, retained_maps, RetainedMaps) \
196 V(WeakHashTable, weak_object_to_code_table, WeakObjectToCodeTable) \
197 V(Object, weak_stack_trace_list, WeakStackTraceList) \
198 V(Object, noscript_shared_function_infos, NoScriptSharedFunctionInfos) \
199 V(FixedArray, serialized_templates, SerializedTemplates) \
200 /* Configured values */ \
201 V(JSObject, message_listeners, MessageListeners) \
202 V(Code, js_entry_code, JsEntryCode) \
203 V(Code, js_construct_entry_code, JsConstructEntryCode) \
204 /* Oddball maps */ \
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000205 V(Map, undefined_map, UndefinedMap) \
206 V(Map, the_hole_map, TheHoleMap) \
207 V(Map, null_map, NullMap) \
208 V(Map, boolean_map, BooleanMap) \
209 V(Map, uninitialized_map, UninitializedMap) \
210 V(Map, arguments_marker_map, ArgumentsMarkerMap) \
211 V(Map, no_interceptor_result_sentinel_map, NoInterceptorResultSentinelMap) \
212 V(Map, exception_map, ExceptionMap) \
213 V(Map, termination_exception_map, TerminationExceptionMap) \
Ben Murdochda12d292016-06-02 14:46:10 +0100214 V(Map, optimized_out_map, OptimizedOutMap) \
Ben Murdoch61f157c2016-09-16 13:49:30 +0100215 V(Map, stale_register_map, StaleRegisterMap)
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000216
217// Entries in this list are limited to Smis and are not visited during GC.
Ben Murdoch61f157c2016-09-16 13:49:30 +0100218#define SMI_ROOT_LIST(V) \
219 V(Smi, stack_limit, StackLimit) \
220 V(Smi, real_stack_limit, RealStackLimit) \
221 V(Smi, last_script_id, LastScriptId) \
222 V(Smi, hash_seed, HashSeed) \
223 /* To distinguish the function templates, so that we can find them in the */ \
224 /* function cache of the native context. */ \
225 V(Smi, next_template_serial_number, NextTemplateSerialNumber) \
226 V(Smi, arguments_adaptor_deopt_pc_offset, ArgumentsAdaptorDeoptPCOffset) \
227 V(Smi, construct_stub_deopt_pc_offset, ConstructStubDeoptPCOffset) \
228 V(Smi, getter_stub_deopt_pc_offset, GetterStubDeoptPCOffset) \
229 V(Smi, setter_stub_deopt_pc_offset, SetterStubDeoptPCOffset) \
Ben Murdochc5610432016-08-08 18:44:38 +0100230 V(Smi, interpreter_entry_return_pc_offset, InterpreterEntryReturnPCOffset)
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000231
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000232#define ROOT_LIST(V) \
233 STRONG_ROOT_LIST(V) \
234 SMI_ROOT_LIST(V) \
235 V(StringTable, string_table, StringTable)
236
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400237
238// Heap roots that are known to be immortal immovable, for which we can safely
239// skip write barriers. This list is not complete and has omissions.
240#define IMMORTAL_IMMOVABLE_ROOT_LIST(V) \
241 V(ByteArrayMap) \
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000242 V(BytecodeArrayMap) \
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400243 V(FreeSpaceMap) \
244 V(OnePointerFillerMap) \
245 V(TwoPointerFillerMap) \
246 V(UndefinedValue) \
247 V(TheHoleValue) \
248 V(NullValue) \
249 V(TrueValue) \
250 V(FalseValue) \
251 V(UninitializedValue) \
252 V(CellMap) \
253 V(GlobalPropertyCellMap) \
254 V(SharedFunctionInfoMap) \
255 V(MetaMap) \
256 V(HeapNumberMap) \
257 V(MutableHeapNumberMap) \
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000258 V(Float32x4Map) \
259 V(Int32x4Map) \
260 V(Uint32x4Map) \
261 V(Bool32x4Map) \
262 V(Int16x8Map) \
263 V(Uint16x8Map) \
264 V(Bool16x8Map) \
265 V(Int8x16Map) \
266 V(Uint8x16Map) \
267 V(Bool8x16Map) \
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400268 V(NativeContextMap) \
269 V(FixedArrayMap) \
270 V(CodeMap) \
271 V(ScopeInfoMap) \
272 V(FixedCOWArrayMap) \
273 V(FixedDoubleArrayMap) \
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400274 V(WeakCellMap) \
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000275 V(TransitionArrayMap) \
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400276 V(NoInterceptorResultSentinel) \
277 V(HashTableMap) \
278 V(OrderedHashTableMap) \
279 V(EmptyFixedArray) \
280 V(EmptyByteArray) \
281 V(EmptyDescriptorArray) \
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400282 V(ArgumentsMarker) \
283 V(SymbolMap) \
284 V(SloppyArgumentsElementsMap) \
285 V(FunctionContextMap) \
286 V(CatchContextMap) \
287 V(WithContextMap) \
288 V(BlockContextMap) \
289 V(ModuleContextMap) \
290 V(ScriptContextMap) \
291 V(UndefinedMap) \
292 V(TheHoleMap) \
293 V(NullMap) \
294 V(BooleanMap) \
295 V(UninitializedMap) \
296 V(ArgumentsMarkerMap) \
297 V(JSMessageObjectMap) \
298 V(ForeignMap) \
299 V(NeanderMap) \
Ben Murdochda12d292016-06-02 14:46:10 +0100300 V(NanValue) \
301 V(InfinityValue) \
302 V(MinusZeroValue) \
303 V(MinusInfinityValue) \
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000304 V(EmptyWeakCell) \
305 V(empty_string) \
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400306 PRIVATE_SYMBOL_LIST(V)
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000307
308// Forward declarations.
Ben Murdoch097c5b22016-05-18 11:27:45 +0100309class AllocationObserver;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000310class ArrayBufferTracker;
311class GCIdleTimeAction;
312class GCIdleTimeHandler;
313class GCIdleTimeHeapState;
314class GCTracer;
315class HeapObjectsFilter;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000316class HeapStats;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000317class HistogramTimer;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000318class Isolate;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000319class MemoryReducer;
320class ObjectStats;
321class Scavenger;
322class ScavengeJob;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000323class WeakObjectRetainer;
324
Ben Murdoch61f157c2016-09-16 13:49:30 +0100325enum PromotionMode { PROMOTE_MARKED, DEFAULT_PROMOTION };
326
Ben Murdoch097c5b22016-05-18 11:27:45 +0100327typedef void (*ObjectSlotCallback)(HeapObject** from, HeapObject* to);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000328
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000329// A queue of objects promoted during scavenge. Each object is accompanied
330// by it's size to avoid dereferencing a map pointer for scanning.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000331// The last page in to-space is used for the promotion queue. On conflict
332// during scavenge, the promotion queue is allocated externally and all
333// entries are copied to the external queue.
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000334class PromotionQueue {
335 public:
336 explicit PromotionQueue(Heap* heap)
337 : front_(NULL),
338 rear_(NULL),
339 limit_(NULL),
340 emergency_stack_(0),
341 heap_(heap) {}
342
343 void Initialize();
344
345 void Destroy() {
346 DCHECK(is_empty());
347 delete emergency_stack_;
348 emergency_stack_ = NULL;
349 }
350
351 Page* GetHeadPage() {
Ben Murdochc5610432016-08-08 18:44:38 +0100352 return Page::FromAllocationAreaAddress(reinterpret_cast<Address>(rear_));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000353 }
354
355 void SetNewLimit(Address limit) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000356 // If we are already using an emergency stack, we can ignore it.
357 if (emergency_stack_) return;
358
359 // If the limit is not on the same page, we can ignore it.
Ben Murdochc5610432016-08-08 18:44:38 +0100360 if (Page::FromAllocationAreaAddress(limit) != GetHeadPage()) return;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000361
Ben Murdochda12d292016-06-02 14:46:10 +0100362 limit_ = reinterpret_cast<struct Entry*>(limit);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000363
364 if (limit_ <= rear_) {
365 return;
366 }
367
368 RelocateQueueHead();
369 }
370
371 bool IsBelowPromotionQueue(Address to_space_top) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000372 // If an emergency stack is used, the to-space address cannot interfere
373 // with the promotion queue.
374 if (emergency_stack_) return true;
375
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000376 // If the given to-space top pointer and the head of the promotion queue
377 // are not on the same page, then the to-space objects are below the
378 // promotion queue.
379 if (GetHeadPage() != Page::FromAddress(to_space_top)) {
380 return true;
381 }
382 // If the to space top pointer is smaller or equal than the promotion
383 // queue head, then the to-space objects are below the promotion queue.
Ben Murdochda12d292016-06-02 14:46:10 +0100384 return reinterpret_cast<struct Entry*>(to_space_top) <= rear_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000385 }
386
387 bool is_empty() {
388 return (front_ == rear_) &&
389 (emergency_stack_ == NULL || emergency_stack_->length() == 0);
390 }
391
Ben Murdochda12d292016-06-02 14:46:10 +0100392 inline void insert(HeapObject* target, int32_t size, bool was_marked_black);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000393
Ben Murdochda12d292016-06-02 14:46:10 +0100394 void remove(HeapObject** target, int32_t* size, bool* was_marked_black) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000395 DCHECK(!is_empty());
396 if (front_ == rear_) {
397 Entry e = emergency_stack_->RemoveLast();
398 *target = e.obj_;
399 *size = e.size_;
Ben Murdochda12d292016-06-02 14:46:10 +0100400 *was_marked_black = e.was_marked_black_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000401 return;
402 }
403
Ben Murdochda12d292016-06-02 14:46:10 +0100404 struct Entry* entry = reinterpret_cast<struct Entry*>(--front_);
405 *target = entry->obj_;
406 *size = entry->size_;
407 *was_marked_black = entry->was_marked_black_;
408
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000409 // Assert no underflow.
410 SemiSpace::AssertValidRange(reinterpret_cast<Address>(rear_),
411 reinterpret_cast<Address>(front_));
412 }
413
414 private:
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000415 struct Entry {
Ben Murdochda12d292016-06-02 14:46:10 +0100416 Entry(HeapObject* obj, int32_t size, bool was_marked_black)
417 : obj_(obj), size_(size), was_marked_black_(was_marked_black) {}
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000418
419 HeapObject* obj_;
Ben Murdochda12d292016-06-02 14:46:10 +0100420 int32_t size_ : 31;
421 bool was_marked_black_ : 1;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000422 };
Ben Murdochda12d292016-06-02 14:46:10 +0100423
424 void RelocateQueueHead();
425
426 // The front of the queue is higher in the memory page chain than the rear.
427 struct Entry* front_;
428 struct Entry* rear_;
429 struct Entry* limit_;
430
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000431 List<Entry>* emergency_stack_;
432
433 Heap* heap_;
434
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000435 DISALLOW_COPY_AND_ASSIGN(PromotionQueue);
436};
437
438
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000439enum ArrayStorageAllocationMode {
440 DONT_INITIALIZE_ARRAY_ELEMENTS,
441 INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE
442};
443
Ben Murdochda12d292016-06-02 14:46:10 +0100444enum class ClearRecordedSlots { kYes, kNo };
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000445
446class Heap {
447 public:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000448 // Declare all the root indices. This defines the root list order.
449 enum RootListIndex {
450#define ROOT_INDEX_DECLARATION(type, name, camel_name) k##camel_name##RootIndex,
451 STRONG_ROOT_LIST(ROOT_INDEX_DECLARATION)
452#undef ROOT_INDEX_DECLARATION
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000453
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000454#define STRING_INDEX_DECLARATION(name, str) k##name##RootIndex,
455 INTERNALIZED_STRING_LIST(STRING_INDEX_DECLARATION)
456#undef STRING_DECLARATION
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000457
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000458#define SYMBOL_INDEX_DECLARATION(name) k##name##RootIndex,
459 PRIVATE_SYMBOL_LIST(SYMBOL_INDEX_DECLARATION)
460#undef SYMBOL_INDEX_DECLARATION
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000461
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000462#define SYMBOL_INDEX_DECLARATION(name, description) k##name##RootIndex,
463 PUBLIC_SYMBOL_LIST(SYMBOL_INDEX_DECLARATION)
464 WELL_KNOWN_SYMBOL_LIST(SYMBOL_INDEX_DECLARATION)
465#undef SYMBOL_INDEX_DECLARATION
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000466
467// Utility type maps
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000468#define DECLARE_STRUCT_MAP(NAME, Name, name) k##Name##MapRootIndex,
469 STRUCT_LIST(DECLARE_STRUCT_MAP)
470#undef DECLARE_STRUCT_MAP
471 kStringTableRootIndex,
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000472
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000473#define ROOT_INDEX_DECLARATION(type, name, camel_name) k##camel_name##RootIndex,
474 SMI_ROOT_LIST(ROOT_INDEX_DECLARATION)
475#undef ROOT_INDEX_DECLARATION
476 kRootListLength,
477 kStrongRootListLength = kStringTableRootIndex,
478 kSmiRootsStart = kStringTableRootIndex + 1
479 };
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000480
Ben Murdoch097c5b22016-05-18 11:27:45 +0100481 enum FindMementoMode { kForRuntime, kForGC };
482
483 enum HeapState { NOT_IN_GC, SCAVENGE, MARK_COMPACT };
484
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000485 // Indicates whether live bytes adjustment is triggered
486 // - from within the GC code before sweeping started (SEQUENTIAL_TO_SWEEPER),
487 // - or from within GC (CONCURRENT_TO_SWEEPER),
488 // - or mutator code (CONCURRENT_TO_SWEEPER).
489 enum InvocationMode { SEQUENTIAL_TO_SWEEPER, CONCURRENT_TO_SWEEPER };
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400490
Ben Murdoch097c5b22016-05-18 11:27:45 +0100491 enum UpdateAllocationSiteMode { kGlobal, kCached };
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000492
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000493 // Taking this lock prevents the GC from entering a phase that relocates
494 // object references.
495 class RelocationLock {
496 public:
497 explicit RelocationLock(Heap* heap) : heap_(heap) {
498 heap_->relocation_mutex_.Lock();
499 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000500
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000501 ~RelocationLock() { heap_->relocation_mutex_.Unlock(); }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000502
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000503 private:
504 Heap* heap_;
505 };
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000506
507 // Support for partial snapshots. After calling this we have a linear
508 // space to write objects in each space.
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400509 struct Chunk {
510 uint32_t size;
511 Address start;
512 Address end;
513 };
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400514 typedef List<Chunk> Reservation;
515
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000516 static const intptr_t kMinimumOldGenerationAllocationLimit =
517 8 * (Page::kPageSize > MB ? Page::kPageSize : MB);
518
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000519 static const int kInitalOldGenerationLimitFactor = 2;
520
521#if V8_OS_ANDROID
522 // Don't apply pointer multiplier on Android since it has no swap space and
523 // should instead adapt it's heap size based on available physical memory.
524 static const int kPointerMultiplier = 1;
525#else
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000526 static const int kPointerMultiplier = i::kPointerSize / 4;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000527#endif
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000528
529 // The new space size has to be a power of 2. Sizes are in MB.
530 static const int kMaxSemiSpaceSizeLowMemoryDevice = 1 * kPointerMultiplier;
531 static const int kMaxSemiSpaceSizeMediumMemoryDevice = 4 * kPointerMultiplier;
532 static const int kMaxSemiSpaceSizeHighMemoryDevice = 8 * kPointerMultiplier;
533 static const int kMaxSemiSpaceSizeHugeMemoryDevice = 8 * kPointerMultiplier;
534
535 // The old space size has to be a multiple of Page::kPageSize.
536 // Sizes are in MB.
537 static const int kMaxOldSpaceSizeLowMemoryDevice = 128 * kPointerMultiplier;
538 static const int kMaxOldSpaceSizeMediumMemoryDevice =
539 256 * kPointerMultiplier;
540 static const int kMaxOldSpaceSizeHighMemoryDevice = 512 * kPointerMultiplier;
541 static const int kMaxOldSpaceSizeHugeMemoryDevice = 700 * kPointerMultiplier;
542
543 // The executable size has to be a multiple of Page::kPageSize.
544 // Sizes are in MB.
545 static const int kMaxExecutableSizeLowMemoryDevice = 96 * kPointerMultiplier;
546 static const int kMaxExecutableSizeMediumMemoryDevice =
547 192 * kPointerMultiplier;
548 static const int kMaxExecutableSizeHighMemoryDevice =
549 256 * kPointerMultiplier;
550 static const int kMaxExecutableSizeHugeMemoryDevice =
551 256 * kPointerMultiplier;
552
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000553 static const int kTraceRingBufferSize = 512;
554 static const int kStacktraceBufferSize = 512;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000555
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000556 static const double kMinHeapGrowingFactor;
557 static const double kMaxHeapGrowingFactor;
558 static const double kMaxHeapGrowingFactorMemoryConstrained;
559 static const double kMaxHeapGrowingFactorIdle;
560 static const double kTargetMutatorUtilization;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000561
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000562 static const int kNoGCFlags = 0;
563 static const int kReduceMemoryFootprintMask = 1;
564 static const int kAbortIncrementalMarkingMask = 2;
565 static const int kFinalizeIncrementalMarkingMask = 4;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400566
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000567 // Making the heap iterable requires us to abort incremental marking.
568 static const int kMakeHeapIterableMask = kAbortIncrementalMarkingMask;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400569
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000570 // The roots that have an index less than this are always in old space.
571 static const int kOldSpaceRoots = 0x20;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000572
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000573 // The minimum size of a HeapObject on the heap.
574 static const int kMinObjectSizeInWords = 2;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400575
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000576 STATIC_ASSERT(kUndefinedValueRootIndex ==
577 Internals::kUndefinedValueRootIndex);
Ben Murdochda12d292016-06-02 14:46:10 +0100578 STATIC_ASSERT(kTheHoleValueRootIndex == Internals::kTheHoleValueRootIndex);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000579 STATIC_ASSERT(kNullValueRootIndex == Internals::kNullValueRootIndex);
580 STATIC_ASSERT(kTrueValueRootIndex == Internals::kTrueValueRootIndex);
581 STATIC_ASSERT(kFalseValueRootIndex == Internals::kFalseValueRootIndex);
582 STATIC_ASSERT(kempty_stringRootIndex == Internals::kEmptyStringRootIndex);
583
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000584 // Calculates the maximum amount of filler that could be required by the
585 // given alignment.
586 static int GetMaximumFillToAlign(AllocationAlignment alignment);
587 // Calculates the actual amount of filler required for a given address at the
588 // given alignment.
589 static int GetFillToAlign(Address address, AllocationAlignment alignment);
590
591 template <typename T>
592 static inline bool IsOneByte(T t, int chars);
593
594 static void FatalProcessOutOfMemory(const char* location,
Ben Murdochc5610432016-08-08 18:44:38 +0100595 bool is_heap_oom = false);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000596
597 static bool RootIsImmortalImmovable(int root_index);
598
599 // Checks whether the space is valid.
600 static bool IsValidAllocationSpace(AllocationSpace space);
601
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000602 // Generated code can embed direct references to non-writable roots if
603 // they are in new space.
604 static bool RootCanBeWrittenAfterInitialization(RootListIndex root_index);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000605
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000606 // Zapping is needed for verify heap, and always done in debug builds.
607 static inline bool ShouldZapGarbage() {
608#ifdef DEBUG
609 return true;
610#else
611#ifdef VERIFY_HEAP
612 return FLAG_verify_heap;
613#else
614 return false;
615#endif
616#endif
617 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000618
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000619 static double HeapGrowingFactor(double gc_speed, double mutator_speed);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000620
621 // Copy block of memory from src to dst. Size of block should be aligned
622 // by pointer size.
623 static inline void CopyBlock(Address dst, Address src, int byte_size);
624
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000625 // Determines a static visitor id based on the given {map} that can then be
626 // stored on the map to facilitate fast dispatch for {StaticVisitorBase}.
627 static int GetStaticVisitorIdForMap(Map* map);
628
Ben Murdoch61f157c2016-09-16 13:49:30 +0100629 // We cannot avoid stale handles to left-trimmed objects, but can only make
630 // sure all handles still needed are updated. Filter out a stale pointer
631 // and clear the slot to allow post processing of handles (needed because
632 // the sweeper might actually free the underlying page).
633 inline bool PurgeLeftTrimmedObject(Object** object);
634
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000635 // Notifies the heap that is ok to start marking or other activities that
636 // should not happen during deserialization.
637 void NotifyDeserializationComplete();
638
639 intptr_t old_generation_allocation_limit() const {
640 return old_generation_allocation_limit_;
641 }
642
643 bool always_allocate() { return always_allocate_scope_count_.Value() != 0; }
644
645 Address* NewSpaceAllocationTopAddress() {
646 return new_space_.allocation_top_address();
647 }
648 Address* NewSpaceAllocationLimitAddress() {
649 return new_space_.allocation_limit_address();
650 }
651
652 Address* OldSpaceAllocationTopAddress() {
653 return old_space_->allocation_top_address();
654 }
655 Address* OldSpaceAllocationLimitAddress() {
656 return old_space_->allocation_limit_address();
657 }
658
Ben Murdochc5610432016-08-08 18:44:38 +0100659 bool CanExpandOldGeneration(int size) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000660 if (force_oom_) return false;
Ben Murdochc5610432016-08-08 18:44:38 +0100661 return (OldGenerationCapacity() + size) < MaxOldGenerationSize();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000662 }
663
664 // Clear the Instanceof cache (used when a prototype changes).
665 inline void ClearInstanceofCache();
666
667 // FreeSpace objects have a null map after deserialization. Update the map.
668 void RepairFreeListsAfterDeserialization();
669
670 // Move len elements within a given array from src_index index to dst_index
671 // index.
672 void MoveElements(FixedArray* array, int dst_index, int src_index, int len);
673
674 // Initialize a filler object to keep the ability to iterate over the heap
Ben Murdochda12d292016-06-02 14:46:10 +0100675 // when introducing gaps within pages. If slots could have been recorded in
676 // the freed area, then pass ClearRecordedSlots::kYes as the mode. Otherwise,
677 // pass ClearRecordedSlots::kNo.
678 void CreateFillerObjectAt(Address addr, int size, ClearRecordedSlots mode);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000679
680 bool CanMoveObjectStart(HeapObject* object);
681
682 // Maintain consistency of live bytes during incremental marking.
683 void AdjustLiveBytes(HeapObject* object, int by, InvocationMode mode);
684
685 // Trim the given array from the left. Note that this relocates the object
686 // start and hence is only valid if there is only a single reference to it.
687 FixedArrayBase* LeftTrimFixedArray(FixedArrayBase* obj, int elements_to_trim);
688
689 // Trim the given array from the right.
690 template<Heap::InvocationMode mode>
691 void RightTrimFixedArray(FixedArrayBase* obj, int elements_to_trim);
692
693 // Converts the given boolean condition to JavaScript boolean value.
Ben Murdochda12d292016-06-02 14:46:10 +0100694 inline Oddball* ToBoolean(bool condition);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000695
696 // Check whether the heap is currently iterable.
697 bool IsHeapIterable();
698
699 // Notify the heap that a context has been disposed.
700 int NotifyContextDisposed(bool dependant_context);
701
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000702 void set_native_contexts_list(Object* object) {
703 native_contexts_list_ = object;
704 }
705 Object* native_contexts_list() const { return native_contexts_list_; }
706
707 void set_allocation_sites_list(Object* object) {
708 allocation_sites_list_ = object;
709 }
710 Object* allocation_sites_list() { return allocation_sites_list_; }
711
712 // Used in CreateAllocationSiteStub and the (de)serializer.
713 Object** allocation_sites_list_address() { return &allocation_sites_list_; }
714
715 void set_encountered_weak_collections(Object* weak_collection) {
716 encountered_weak_collections_ = weak_collection;
717 }
718 Object* encountered_weak_collections() const {
719 return encountered_weak_collections_;
720 }
721
722 void set_encountered_weak_cells(Object* weak_cell) {
723 encountered_weak_cells_ = weak_cell;
724 }
725 Object* encountered_weak_cells() const { return encountered_weak_cells_; }
726
727 void set_encountered_transition_arrays(Object* transition_array) {
728 encountered_transition_arrays_ = transition_array;
729 }
730 Object* encountered_transition_arrays() const {
731 return encountered_transition_arrays_;
732 }
733
734 // Number of mark-sweeps.
735 int ms_count() const { return ms_count_; }
736
737 // Checks whether the given object is allowed to be migrated from it's
738 // current space into the given destination space. Used for debugging.
739 inline bool AllowedToBeMigrated(HeapObject* object, AllocationSpace dest);
740
741 void CheckHandleCount();
742
743 // Number of "runtime allocations" done so far.
744 uint32_t allocations_count() { return allocations_count_; }
745
746 // Print short heap statistics.
747 void PrintShortHeapStatistics();
748
749 inline HeapState gc_state() { return gc_state_; }
750
751 inline bool IsInGCPostProcessing() { return gc_post_processing_depth_ > 0; }
752
753 // If an object has an AllocationMemento trailing it, return it, otherwise
754 // return NULL;
Ben Murdoch097c5b22016-05-18 11:27:45 +0100755 template <FindMementoMode mode>
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000756 inline AllocationMemento* FindAllocationMemento(HeapObject* object);
757
758 // Returns false if not able to reserve.
759 bool ReserveSpace(Reservation* reservations);
760
Ben Murdochc5610432016-08-08 18:44:38 +0100761 void SetEmbedderHeapTracer(EmbedderHeapTracer* tracer);
762
763 bool UsingEmbedderHeapTracer();
764
765 void TracePossibleWrapper(JSObject* js_object);
766
767 void RegisterExternallyReferencedObject(Object** object);
768
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000769 //
770 // Support for the API.
771 //
772
773 void CreateApiObjects();
774
775 // Implements the corresponding V8 API function.
776 bool IdleNotification(double deadline_in_seconds);
777 bool IdleNotification(int idle_time_in_ms);
778
Ben Murdochda12d292016-06-02 14:46:10 +0100779 void MemoryPressureNotification(MemoryPressureLevel level,
780 bool is_isolate_locked);
781 void CheckMemoryPressure();
782
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000783 double MonotonicallyIncreasingTimeInMs();
784
785 void RecordStats(HeapStats* stats, bool take_snapshot = false);
786
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000787 // Check new space expansion criteria and expand semispaces if it was hit.
788 void CheckNewSpaceExpansionCriteria();
789
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000790 inline bool HeapIsFullEnoughToStartIncrementalMarking(intptr_t limit) {
791 if (FLAG_stress_compaction && (gc_count_ & 1) != 0) return true;
792
793 intptr_t adjusted_allocation_limit = limit - new_space_.Capacity();
794
795 if (PromotedTotalSize() >= adjusted_allocation_limit) return true;
796
Ben Murdochda12d292016-06-02 14:46:10 +0100797 if (HighMemoryPressure()) return true;
798
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000799 return false;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000800 }
801
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000802 void VisitExternalResources(v8::ExternalResourceVisitor* visitor);
803
804 // An object should be promoted if the object has survived a
805 // scavenge operation.
Ben Murdoch61f157c2016-09-16 13:49:30 +0100806 template <PromotionMode promotion_mode>
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000807 inline bool ShouldBePromoted(Address old_address, int object_size);
808
Ben Murdoch61f157c2016-09-16 13:49:30 +0100809 inline PromotionMode CurrentPromotionMode();
810
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000811 void ClearNormalizedMapCaches();
812
813 void IncrementDeferredCount(v8::Isolate::UseCounterFeature feature);
814
815 inline bool OldGenerationAllocationLimitReached();
816
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000817 // Completely clear the Instanceof cache (to stop it keeping objects alive
818 // around a GC).
819 inline void CompletelyClearInstanceofCache();
820
821 inline uint32_t HashSeed();
822
823 inline int NextScriptId();
824
825 inline void SetArgumentsAdaptorDeoptPCOffset(int pc_offset);
826 inline void SetConstructStubDeoptPCOffset(int pc_offset);
827 inline void SetGetterStubDeoptPCOffset(int pc_offset);
828 inline void SetSetterStubDeoptPCOffset(int pc_offset);
Ben Murdochc5610432016-08-08 18:44:38 +0100829 inline void SetInterpreterEntryReturnPCOffset(int pc_offset);
Ben Murdoch61f157c2016-09-16 13:49:30 +0100830 inline int GetNextTemplateSerialNumber();
831
832 inline void SetSerializedTemplates(FixedArray* templates);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000833
834 // For post mortem debugging.
835 void RememberUnmappedPage(Address page, bool compacted);
836
837 // Global inline caching age: it is incremented on some GCs after context
838 // disposal. We use it to flush inline caches.
839 int global_ic_age() { return global_ic_age_; }
840
841 void AgeInlineCaches() {
842 global_ic_age_ = (global_ic_age_ + 1) & SharedFunctionInfo::ICAgeBits::kMax;
843 }
844
Ben Murdoch61f157c2016-09-16 13:49:30 +0100845 int64_t external_memory() { return external_memory_; }
846 void update_external_memory(int64_t delta) { external_memory_ += delta; }
847
848 void update_external_memory_concurrently_freed(intptr_t freed) {
849 external_memory_concurrently_freed_.Increment(freed);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000850 }
851
Ben Murdoch61f157c2016-09-16 13:49:30 +0100852 void account_external_memory_concurrently_freed() {
853 external_memory_ -= external_memory_concurrently_freed_.Value();
854 external_memory_concurrently_freed_.SetValue(0);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000855 }
856
857 void DeoptMarkedAllocationSites();
858
859 bool DeoptMaybeTenuredAllocationSites() {
860 return new_space_.IsAtMaximumCapacity() && maximum_size_scavenges_ == 0;
861 }
862
863 void AddWeakObjectToCodeDependency(Handle<HeapObject> obj,
864 Handle<DependentCode> dep);
865
866 DependentCode* LookupWeakObjectToCodeDependency(Handle<HeapObject> obj);
867
Ben Murdoch61f157c2016-09-16 13:49:30 +0100868 void CompactWeakFixedArrays();
869
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000870 void AddRetainedMap(Handle<Map> map);
871
872 // This event is triggered after successful allocation of a new object made
873 // by runtime. Allocations of target space for object evacuation do not
874 // trigger the event. In order to track ALL allocations one must turn off
875 // FLAG_inline_new and FLAG_use_allocation_folding.
876 inline void OnAllocationEvent(HeapObject* object, int size_in_bytes);
877
878 // This event is triggered after object is moved to a new place.
879 inline void OnMoveEvent(HeapObject* target, HeapObject* source,
880 int size_in_bytes);
881
882 bool deserialization_complete() const { return deserialization_complete_; }
883
884 bool HasLowAllocationRate();
885 bool HasHighFragmentation();
886 bool HasHighFragmentation(intptr_t used, intptr_t committed);
887
888 void SetOptimizeForLatency() { optimize_for_memory_usage_ = false; }
Ben Murdoch097c5b22016-05-18 11:27:45 +0100889 void SetOptimizeForMemoryUsage();
Ben Murdochda12d292016-06-02 14:46:10 +0100890 bool ShouldOptimizeForMemoryUsage() {
891 return optimize_for_memory_usage_ || HighMemoryPressure();
892 }
893 bool HighMemoryPressure() {
894 return memory_pressure_level_.Value() != MemoryPressureLevel::kNone;
895 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000896
897 // ===========================================================================
898 // Initialization. ===========================================================
899 // ===========================================================================
900
901 // Configure heap size in MB before setup. Return false if the heap has been
902 // set up already.
903 bool ConfigureHeap(int max_semi_space_size, int max_old_space_size,
904 int max_executable_size, size_t code_range_size);
905 bool ConfigureHeapDefault();
906
907 // Prepares the heap, setting up memory areas that are needed in the isolate
908 // without actually creating any objects.
909 bool SetUp();
910
911 // Bootstraps the object heap with the core set of objects required to run.
912 // Returns whether it succeeded.
913 bool CreateHeapObjects();
914
915 // Destroys all memory allocated by the heap.
916 void TearDown();
917
918 // Returns whether SetUp has been called.
919 bool HasBeenSetUp();
920
921 // ===========================================================================
922 // Getters for spaces. =======================================================
923 // ===========================================================================
924
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000925 Address NewSpaceTop() { return new_space_.top(); }
926
927 NewSpace* new_space() { return &new_space_; }
928 OldSpace* old_space() { return old_space_; }
929 OldSpace* code_space() { return code_space_; }
930 MapSpace* map_space() { return map_space_; }
931 LargeObjectSpace* lo_space() { return lo_space_; }
932
933 PagedSpace* paged_space(int idx) {
934 switch (idx) {
935 case OLD_SPACE:
936 return old_space();
937 case MAP_SPACE:
938 return map_space();
939 case CODE_SPACE:
940 return code_space();
941 case NEW_SPACE:
942 case LO_SPACE:
943 UNREACHABLE();
944 }
945 return NULL;
946 }
947
948 Space* space(int idx) {
949 switch (idx) {
950 case NEW_SPACE:
951 return new_space();
952 case LO_SPACE:
953 return lo_space();
954 default:
955 return paged_space(idx);
956 }
957 }
958
959 // Returns name of the space.
960 const char* GetSpaceName(int idx);
961
962 // ===========================================================================
963 // Getters to other components. ==============================================
964 // ===========================================================================
965
966 GCTracer* tracer() { return tracer_; }
967
Ben Murdochc5610432016-08-08 18:44:38 +0100968 MemoryAllocator* memory_allocator() { return memory_allocator_; }
Ben Murdochda12d292016-06-02 14:46:10 +0100969
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000970 PromotionQueue* promotion_queue() { return &promotion_queue_; }
971
972 inline Isolate* isolate();
973
974 MarkCompactCollector* mark_compact_collector() {
975 return mark_compact_collector_;
976 }
977
978 // ===========================================================================
979 // Root set access. ==========================================================
980 // ===========================================================================
981
982 // Heap root getters.
983#define ROOT_ACCESSOR(type, name, camel_name) inline type* name();
984 ROOT_LIST(ROOT_ACCESSOR)
985#undef ROOT_ACCESSOR
986
987 // Utility type maps.
988#define STRUCT_MAP_ACCESSOR(NAME, Name, name) inline Map* name##_map();
989 STRUCT_LIST(STRUCT_MAP_ACCESSOR)
990#undef STRUCT_MAP_ACCESSOR
991
992#define STRING_ACCESSOR(name, str) inline String* name();
993 INTERNALIZED_STRING_LIST(STRING_ACCESSOR)
994#undef STRING_ACCESSOR
995
996#define SYMBOL_ACCESSOR(name) inline Symbol* name();
997 PRIVATE_SYMBOL_LIST(SYMBOL_ACCESSOR)
998#undef SYMBOL_ACCESSOR
999
1000#define SYMBOL_ACCESSOR(name, description) inline Symbol* name();
1001 PUBLIC_SYMBOL_LIST(SYMBOL_ACCESSOR)
1002 WELL_KNOWN_SYMBOL_LIST(SYMBOL_ACCESSOR)
1003#undef SYMBOL_ACCESSOR
1004
1005 Object* root(RootListIndex index) { return roots_[index]; }
1006 Handle<Object> root_handle(RootListIndex index) {
1007 return Handle<Object>(&roots_[index]);
1008 }
1009
1010 // Generated code can embed this address to get access to the roots.
1011 Object** roots_array_start() { return roots_; }
1012
1013 // Sets the stub_cache_ (only used when expanding the dictionary).
1014 void SetRootCodeStubs(UnseededNumberDictionary* value) {
1015 roots_[kCodeStubsRootIndex] = value;
1016 }
1017
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001018 void SetRootMaterializedObjects(FixedArray* objects) {
1019 roots_[kMaterializedObjectsRootIndex] = objects;
1020 }
1021
1022 void SetRootScriptList(Object* value) {
1023 roots_[kScriptListRootIndex] = value;
1024 }
1025
1026 void SetRootStringTable(StringTable* value) {
1027 roots_[kStringTableRootIndex] = value;
1028 }
1029
1030 void SetRootNoScriptSharedFunctionInfos(Object* value) {
1031 roots_[kNoScriptSharedFunctionInfosRootIndex] = value;
1032 }
1033
1034 // Set the stack limit in the roots_ array. Some architectures generate
1035 // code that looks here, because it is faster than loading from the static
1036 // jslimit_/real_jslimit_ variable in the StackGuard.
1037 void SetStackLimits();
1038
Ben Murdochda12d292016-06-02 14:46:10 +01001039 // The stack limit is thread-dependent. To be able to reproduce the same
1040 // snapshot blob, we need to reset it before serializing.
1041 void ClearStackLimits();
1042
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001043 // Generated code can treat direct references to this root as constant.
1044 bool RootCanBeTreatedAsConstant(RootListIndex root_index);
1045
1046 Map* MapForFixedTypedArray(ExternalArrayType array_type);
1047 RootListIndex RootIndexForFixedTypedArray(ExternalArrayType array_type);
1048
1049 RootListIndex RootIndexForEmptyFixedTypedArray(ElementsKind kind);
1050 FixedTypedArrayBase* EmptyFixedTypedArrayForMap(Map* map);
1051
1052 void RegisterStrongRoots(Object** start, Object** end);
1053 void UnregisterStrongRoots(Object** start);
1054
1055 // ===========================================================================
1056 // Inline allocation. ========================================================
1057 // ===========================================================================
1058
1059 // Indicates whether inline bump-pointer allocation has been disabled.
1060 bool inline_allocation_disabled() { return inline_allocation_disabled_; }
1061
1062 // Switch whether inline bump-pointer allocation should be used.
1063 void EnableInlineAllocation();
1064 void DisableInlineAllocation();
1065
1066 // ===========================================================================
1067 // Methods triggering GCs. ===================================================
1068 // ===========================================================================
1069
1070 // Performs garbage collection operation.
1071 // Returns whether there is a chance that another major GC could
1072 // collect more garbage.
1073 inline bool CollectGarbage(
1074 AllocationSpace space, const char* gc_reason = NULL,
1075 const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
1076
1077 // Performs a full garbage collection. If (flags & kMakeHeapIterableMask) is
1078 // non-zero, then the slower precise sweeper is used, which leaves the heap
1079 // in a state where we can iterate over the heap visiting all objects.
1080 void CollectAllGarbage(
1081 int flags = kFinalizeIncrementalMarkingMask, const char* gc_reason = NULL,
1082 const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
1083
1084 // Last hope GC, should try to squeeze as much as possible.
1085 void CollectAllAvailableGarbage(const char* gc_reason = NULL);
1086
1087 // Reports and external memory pressure event, either performs a major GC or
1088 // completes incremental marking in order to free external resources.
1089 void ReportExternalMemoryPressure(const char* gc_reason = NULL);
1090
1091 // Invoked when GC was requested via the stack guard.
1092 void HandleGCRequest();
1093
1094 // ===========================================================================
1095 // Iterators. ================================================================
1096 // ===========================================================================
1097
1098 // Iterates over all roots in the heap.
1099 void IterateRoots(ObjectVisitor* v, VisitMode mode);
1100 // Iterates over all strong roots in the heap.
1101 void IterateStrongRoots(ObjectVisitor* v, VisitMode mode);
1102 // Iterates over entries in the smi roots list. Only interesting to the
1103 // serializer/deserializer, since GC does not care about smis.
1104 void IterateSmiRoots(ObjectVisitor* v);
1105 // Iterates over all the other roots in the heap.
1106 void IterateWeakRoots(ObjectVisitor* v, VisitMode mode);
1107
Ben Murdochda12d292016-06-02 14:46:10 +01001108 // Iterate pointers of promoted objects.
1109 void IteratePromotedObject(HeapObject* target, int size,
1110 bool was_marked_black,
1111 ObjectSlotCallback callback);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001112
Ben Murdochda12d292016-06-02 14:46:10 +01001113 void IteratePromotedObjectPointers(HeapObject* object, Address start,
1114 Address end, bool record_slots,
1115 ObjectSlotCallback callback);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001116
1117 // ===========================================================================
1118 // Store buffer API. =========================================================
1119 // ===========================================================================
1120
Ben Murdoch097c5b22016-05-18 11:27:45 +01001121 // Write barrier support for object[offset] = o;
1122 inline void RecordWrite(Object* object, int offset, Object* o);
Ben Murdochc5610432016-08-08 18:44:38 +01001123 inline void RecordFixedArrayElements(FixedArray* array, int offset,
1124 int length);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001125
Ben Murdochda12d292016-06-02 14:46:10 +01001126 Address* store_buffer_top_address() { return store_buffer()->top_address(); }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001127
Ben Murdoch097c5b22016-05-18 11:27:45 +01001128 void ClearRecordedSlot(HeapObject* object, Object** slot);
Ben Murdochda12d292016-06-02 14:46:10 +01001129 void ClearRecordedSlotRange(Address start, Address end);
Ben Murdoch097c5b22016-05-18 11:27:45 +01001130
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001131 // ===========================================================================
1132 // Incremental marking API. ==================================================
1133 // ===========================================================================
1134
1135 // Start incremental marking and ensure that idle time handler can perform
1136 // incremental steps.
1137 void StartIdleIncrementalMarking();
1138
1139 // Starts incremental marking assuming incremental marking is currently
1140 // stopped.
1141 void StartIncrementalMarking(int gc_flags = kNoGCFlags,
1142 const GCCallbackFlags gc_callback_flags =
1143 GCCallbackFlags::kNoGCCallbackFlags,
1144 const char* reason = nullptr);
1145
1146 void FinalizeIncrementalMarkingIfComplete(const char* comment);
1147
1148 bool TryFinalizeIdleIncrementalMarking(double idle_time_in_ms);
1149
Ben Murdochda12d292016-06-02 14:46:10 +01001150 void RegisterReservationsForBlackAllocation(Reservation* reservations);
1151
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001152 IncrementalMarking* incremental_marking() { return incremental_marking_; }
1153
1154 // ===========================================================================
1155 // External string table API. ================================================
1156 // ===========================================================================
1157
1158 // Registers an external string.
1159 inline void RegisterExternalString(String* string);
1160
1161 // Finalizes an external string by deleting the associated external
1162 // data and clearing the resource pointer.
1163 inline void FinalizeExternalString(String* string);
1164
1165 // ===========================================================================
1166 // Methods checking/returning the space of a given object/address. ===========
1167 // ===========================================================================
1168
1169 // Returns whether the object resides in new space.
1170 inline bool InNewSpace(Object* object);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001171 inline bool InFromSpace(Object* object);
1172 inline bool InToSpace(Object* object);
1173
1174 // Returns whether the object resides in old space.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001175 inline bool InOldSpace(Object* object);
1176
1177 // Checks whether an address/object in the heap (including auxiliary
1178 // area and unused area).
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001179 bool Contains(HeapObject* value);
1180
1181 // Checks whether an address/object in a space.
1182 // Currently used by tests, serialization and heap verification only.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001183 bool InSpace(HeapObject* value, AllocationSpace space);
1184
Ben Murdoch097c5b22016-05-18 11:27:45 +01001185 // Slow methods that can be used for verification as they can also be used
1186 // with off-heap Addresses.
1187 bool ContainsSlow(Address addr);
1188 bool InSpaceSlow(Address addr, AllocationSpace space);
1189 inline bool InNewSpaceSlow(Address address);
1190 inline bool InOldSpaceSlow(Address address);
1191
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001192 // ===========================================================================
1193 // Object statistics tracking. ===============================================
1194 // ===========================================================================
1195
1196 // Returns the number of buckets used by object statistics tracking during a
1197 // major GC. Note that the following methods fail gracefully when the bounds
1198 // are exceeded though.
1199 size_t NumberOfTrackedHeapObjectTypes();
1200
1201 // Returns object statistics about count and size at the last major GC.
1202 // Objects are being grouped into buckets that roughly resemble existing
1203 // instance types.
1204 size_t ObjectCountAtLastGC(size_t index);
1205 size_t ObjectSizeAtLastGC(size_t index);
1206
1207 // Retrieves names of buckets used by object statistics tracking.
1208 bool GetObjectTypeName(size_t index, const char** object_type,
1209 const char** object_sub_type);
1210
1211 // ===========================================================================
Ben Murdoch61f157c2016-09-16 13:49:30 +01001212 // Code statistics. ==========================================================
1213 // ===========================================================================
1214
1215 // Collect code (Code and BytecodeArray objects) statistics.
1216 void CollectCodeStatistics();
1217
1218 // ===========================================================================
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001219 // GC statistics. ============================================================
1220 // ===========================================================================
1221
Ben Murdochda12d292016-06-02 14:46:10 +01001222 // Returns the maximum amount of memory reserved for the heap.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001223 intptr_t MaxReserved() {
Ben Murdochda12d292016-06-02 14:46:10 +01001224 return 2 * max_semi_space_size_ + max_old_generation_size_;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001225 }
1226 int MaxSemiSpaceSize() { return max_semi_space_size_; }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001227 int InitialSemiSpaceSize() { return initial_semispace_size_; }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001228 intptr_t MaxOldGenerationSize() { return max_old_generation_size_; }
1229 intptr_t MaxExecutableSize() { return max_executable_size_; }
1230
1231 // Returns the capacity of the heap in bytes w/o growing. Heap grows when
1232 // more spaces are needed until it reaches the limit.
1233 intptr_t Capacity();
1234
Ben Murdochc5610432016-08-08 18:44:38 +01001235 // Returns the capacity of the old generation.
1236 intptr_t OldGenerationCapacity();
1237
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001238 // Returns the amount of memory currently committed for the heap.
1239 intptr_t CommittedMemory();
1240
1241 // Returns the amount of memory currently committed for the old space.
1242 intptr_t CommittedOldGenerationMemory();
1243
1244 // Returns the amount of executable memory currently committed for the heap.
1245 intptr_t CommittedMemoryExecutable();
1246
1247 // Returns the amount of phyical memory currently committed for the heap.
1248 size_t CommittedPhysicalMemory();
1249
1250 // Returns the maximum amount of memory ever committed for the heap.
1251 intptr_t MaximumCommittedMemory() { return maximum_committed_; }
1252
1253 // Updates the maximum committed memory for the heap. Should be called
1254 // whenever a space grows.
1255 void UpdateMaximumCommitted();
1256
1257 // Returns the available bytes in space w/o growing.
1258 // Heap doesn't guarantee that it can allocate an object that requires
1259 // all available bytes. Check MaxHeapObjectSize() instead.
1260 intptr_t Available();
1261
1262 // Returns of size of all objects residing in the heap.
1263 intptr_t SizeOfObjects();
1264
1265 void UpdateSurvivalStatistics(int start_new_space_size);
1266
Ben Murdoch097c5b22016-05-18 11:27:45 +01001267 inline void IncrementPromotedObjectsSize(intptr_t object_size) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001268 DCHECK_GE(object_size, 0);
1269 promoted_objects_size_ += object_size;
1270 }
1271 inline intptr_t promoted_objects_size() { return promoted_objects_size_; }
1272
Ben Murdoch097c5b22016-05-18 11:27:45 +01001273 inline void IncrementSemiSpaceCopiedObjectSize(intptr_t object_size) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001274 DCHECK_GE(object_size, 0);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001275 semi_space_copied_object_size_ += object_size;
1276 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001277 inline intptr_t semi_space_copied_object_size() {
1278 return semi_space_copied_object_size_;
1279 }
1280
1281 inline intptr_t SurvivedNewSpaceObjectSize() {
1282 return promoted_objects_size_ + semi_space_copied_object_size_;
1283 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001284
1285 inline void IncrementNodesDiedInNewSpace() { nodes_died_in_new_space_++; }
1286
1287 inline void IncrementNodesCopiedInNewSpace() { nodes_copied_in_new_space_++; }
1288
1289 inline void IncrementNodesPromoted() { nodes_promoted_++; }
1290
Ben Murdoch097c5b22016-05-18 11:27:45 +01001291 inline void IncrementYoungSurvivorsCounter(intptr_t survived) {
1292 DCHECK_GE(survived, 0);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001293 survived_last_scavenge_ = survived;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001294 survived_since_last_expansion_ += survived;
1295 }
1296
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001297 inline intptr_t PromotedTotalSize() {
1298 int64_t total = PromotedSpaceSizeOfObjects() + PromotedExternalMemorySize();
1299 if (total > std::numeric_limits<intptr_t>::max()) {
1300 // TODO(erikcorry): Use uintptr_t everywhere we do heap size calculations.
1301 return std::numeric_limits<intptr_t>::max();
1302 }
1303 if (total < 0) return 0;
1304 return static_cast<intptr_t>(total);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001305 }
1306
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001307 void UpdateNewSpaceAllocationCounter() {
1308 new_space_allocation_counter_ = NewSpaceAllocationCounter();
1309 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001310
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001311 size_t NewSpaceAllocationCounter() {
1312 return new_space_allocation_counter_ + new_space()->AllocatedSinceLastGC();
1313 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001314
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001315 // This should be used only for testing.
1316 void set_new_space_allocation_counter(size_t new_value) {
1317 new_space_allocation_counter_ = new_value;
1318 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001319
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001320 void UpdateOldGenerationAllocationCounter() {
1321 old_generation_allocation_counter_ = OldGenerationAllocationCounter();
1322 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001323
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001324 size_t OldGenerationAllocationCounter() {
1325 return old_generation_allocation_counter_ + PromotedSinceLastGC();
1326 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001327
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001328 // This should be used only for testing.
1329 void set_old_generation_allocation_counter(size_t new_value) {
1330 old_generation_allocation_counter_ = new_value;
1331 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001332
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001333 size_t PromotedSinceLastGC() {
1334 return PromotedSpaceSizeOfObjects() - old_generation_size_at_last_gc_;
1335 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001336
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001337 int gc_count() const { return gc_count_; }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001338
1339 // Returns the size of objects residing in non new spaces.
1340 intptr_t PromotedSpaceSizeOfObjects();
1341
1342 double total_regexp_code_generated() { return total_regexp_code_generated_; }
1343 void IncreaseTotalRegexpCodeGenerated(int size) {
1344 total_regexp_code_generated_ += size;
1345 }
1346
1347 void IncrementCodeGeneratedBytes(bool is_crankshafted, int size) {
1348 if (is_crankshafted) {
1349 crankshaft_codegen_bytes_generated_ += size;
1350 } else {
1351 full_codegen_bytes_generated_ += size;
1352 }
1353 }
1354
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001355 // ===========================================================================
1356 // Prologue/epilogue callback methods.========================================
1357 // ===========================================================================
1358
1359 void AddGCPrologueCallback(v8::Isolate::GCCallback callback,
1360 GCType gc_type_filter, bool pass_isolate = true);
1361 void RemoveGCPrologueCallback(v8::Isolate::GCCallback callback);
1362
1363 void AddGCEpilogueCallback(v8::Isolate::GCCallback callback,
1364 GCType gc_type_filter, bool pass_isolate = true);
1365 void RemoveGCEpilogueCallback(v8::Isolate::GCCallback callback);
1366
1367 void CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags);
1368 void CallGCEpilogueCallbacks(GCType gc_type, GCCallbackFlags flags);
1369
1370 // ===========================================================================
1371 // Allocation methods. =======================================================
1372 // ===========================================================================
1373
1374 // Creates a filler object and returns a heap object immediately after it.
1375 MUST_USE_RESULT HeapObject* PrecedeWithFiller(HeapObject* object,
1376 int filler_size);
1377
1378 // Creates a filler object if needed for alignment and returns a heap object
1379 // immediately after it. If any space is left after the returned object,
1380 // another filler object is created so the over allocated memory is iterable.
1381 MUST_USE_RESULT HeapObject* AlignWithFiller(HeapObject* object,
1382 int object_size,
1383 int allocation_size,
1384 AllocationAlignment alignment);
1385
1386 // ===========================================================================
1387 // ArrayBuffer tracking. =====================================================
1388 // ===========================================================================
1389
1390 void RegisterNewArrayBuffer(JSArrayBuffer* buffer);
1391 void UnregisterArrayBuffer(JSArrayBuffer* buffer);
1392
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001393 // ===========================================================================
1394 // Allocation site tracking. =================================================
1395 // ===========================================================================
1396
1397 // Updates the AllocationSite of a given {object}. If the global prenuring
1398 // storage is passed as {pretenuring_feedback} the memento found count on
1399 // the corresponding allocation site is immediately updated and an entry
1400 // in the hash map is created. Otherwise the entry (including a the count
1401 // value) is cached on the local pretenuring feedback.
Ben Murdoch097c5b22016-05-18 11:27:45 +01001402 template <UpdateAllocationSiteMode mode>
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001403 inline void UpdateAllocationSite(HeapObject* object,
Ben Murdoch61f157c2016-09-16 13:49:30 +01001404 base::HashMap* pretenuring_feedback);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001405
1406 // Removes an entry from the global pretenuring storage.
1407 inline void RemoveAllocationSitePretenuringFeedback(AllocationSite* site);
1408
1409 // Merges local pretenuring feedback into the global one. Note that this
1410 // method needs to be called after evacuation, as allocation sites may be
1411 // evacuated and this method resolves forward pointers accordingly.
1412 void MergeAllocationSitePretenuringFeedback(
Ben Murdoch61f157c2016-09-16 13:49:30 +01001413 const base::HashMap& local_pretenuring_feedback);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001414
1415// =============================================================================
1416
1417#ifdef VERIFY_HEAP
1418 // Verify the heap is in its normal state before or after a GC.
1419 void Verify();
1420#endif
1421
1422#ifdef DEBUG
1423 void set_allocation_timeout(int timeout) { allocation_timeout_ = timeout; }
1424
1425 void TracePathToObjectFrom(Object* target, Object* root);
1426 void TracePathToObject(Object* target);
1427 void TracePathToGlobal();
1428
1429 void Print();
1430 void PrintHandles();
1431
1432 // Report heap statistics.
1433 void ReportHeapStatistics(const char* title);
1434 void ReportCodeStatistics(const char* title);
1435#endif
1436
1437 private:
1438 class PretenuringScope;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001439
1440 // External strings table is a place where all external strings are
1441 // registered. We need to keep track of such strings to properly
1442 // finalize them.
1443 class ExternalStringTable {
1444 public:
1445 // Registers an external string.
1446 inline void AddString(String* string);
1447
1448 inline void Iterate(ObjectVisitor* v);
1449
1450 // Restores internal invariant and gets rid of collected strings.
1451 // Must be called after each Iterate() that modified the strings.
1452 void CleanUp();
1453
1454 // Destroys all allocated memory.
1455 void TearDown();
1456
1457 private:
1458 explicit ExternalStringTable(Heap* heap) : heap_(heap) {}
1459
1460 inline void Verify();
1461
1462 inline void AddOldString(String* string);
1463
1464 // Notifies the table that only a prefix of the new list is valid.
1465 inline void ShrinkNewStrings(int position);
1466
1467 // To speed up scavenge collections new space string are kept
1468 // separate from old space strings.
1469 List<Object*> new_space_strings_;
1470 List<Object*> old_space_strings_;
1471
1472 Heap* heap_;
1473
1474 friend class Heap;
1475
1476 DISALLOW_COPY_AND_ASSIGN(ExternalStringTable);
1477 };
1478
1479 struct StrongRootsList;
1480
1481 struct StringTypeTable {
1482 InstanceType type;
1483 int size;
1484 RootListIndex index;
1485 };
1486
1487 struct ConstantStringTable {
1488 const char* contents;
1489 RootListIndex index;
1490 };
1491
1492 struct StructTable {
1493 InstanceType type;
1494 int size;
1495 RootListIndex index;
1496 };
1497
1498 struct GCCallbackPair {
1499 GCCallbackPair(v8::Isolate::GCCallback callback, GCType gc_type,
1500 bool pass_isolate)
1501 : callback(callback), gc_type(gc_type), pass_isolate(pass_isolate) {}
1502
1503 bool operator==(const GCCallbackPair& other) const {
1504 return other.callback == callback;
1505 }
1506
1507 v8::Isolate::GCCallback callback;
1508 GCType gc_type;
1509 bool pass_isolate;
1510 };
1511
1512 typedef String* (*ExternalStringTableUpdaterCallback)(Heap* heap,
1513 Object** pointer);
1514
1515 static const int kInitialStringTableSize = 2048;
1516 static const int kInitialEvalCacheSize = 64;
1517 static const int kInitialNumberStringCacheSize = 256;
1518
1519 static const int kRememberedUnmappedPages = 128;
1520
1521 static const StringTypeTable string_type_table[];
1522 static const ConstantStringTable constant_string_table[];
1523 static const StructTable struct_table[];
1524
1525 static const int kYoungSurvivalRateHighThreshold = 90;
1526 static const int kYoungSurvivalRateAllowedDeviation = 15;
1527 static const int kOldSurvivalRateLowThreshold = 10;
1528
1529 static const int kMaxMarkCompactsInIdleRound = 7;
1530 static const int kIdleScavengeThreshold = 5;
1531
1532 static const int kInitialFeedbackCapacity = 256;
1533
1534 Heap();
1535
1536 static String* UpdateNewSpaceReferenceInExternalStringTableEntry(
1537 Heap* heap, Object** pointer);
1538
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001539 // Selects the proper allocation space based on the pretenuring decision.
1540 static AllocationSpace SelectSpace(PretenureFlag pretenure) {
1541 return (pretenure == TENURED) ? OLD_SPACE : NEW_SPACE;
1542 }
1543
1544#define ROOT_ACCESSOR(type, name, camel_name) \
1545 inline void set_##name(type* value);
1546 ROOT_LIST(ROOT_ACCESSOR)
1547#undef ROOT_ACCESSOR
1548
1549 StoreBuffer* store_buffer() { return &store_buffer_; }
1550
1551 void set_current_gc_flags(int flags) {
1552 current_gc_flags_ = flags;
1553 DCHECK(!ShouldFinalizeIncrementalMarking() ||
1554 !ShouldAbortIncrementalMarking());
1555 }
1556
1557 inline bool ShouldReduceMemory() const {
1558 return current_gc_flags_ & kReduceMemoryFootprintMask;
1559 }
1560
1561 inline bool ShouldAbortIncrementalMarking() const {
1562 return current_gc_flags_ & kAbortIncrementalMarkingMask;
1563 }
1564
1565 inline bool ShouldFinalizeIncrementalMarking() const {
1566 return current_gc_flags_ & kFinalizeIncrementalMarkingMask;
1567 }
1568
1569 void PreprocessStackTraces();
1570
1571 // Checks whether a global GC is necessary
1572 GarbageCollector SelectGarbageCollector(AllocationSpace space,
1573 const char** reason);
1574
1575 // Make sure there is a filler value behind the top of the new space
1576 // so that the GC does not confuse some unintialized/stale memory
1577 // with the allocation memento of the object at the top
1578 void EnsureFillerObjectAtTop();
1579
1580 // Ensure that we have swept all spaces in such a way that we can iterate
1581 // over all objects. May cause a GC.
1582 void MakeHeapIterable();
1583
1584 // Performs garbage collection operation.
1585 // Returns whether there is a chance that another major GC could
1586 // collect more garbage.
1587 bool CollectGarbage(
1588 GarbageCollector collector, const char* gc_reason,
1589 const char* collector_reason,
1590 const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
1591
1592 // Performs garbage collection
1593 // Returns whether there is a chance another major GC could
1594 // collect more garbage.
1595 bool PerformGarbageCollection(
1596 GarbageCollector collector,
1597 const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
1598
1599 inline void UpdateOldSpaceLimits();
1600
1601 // Initializes a JSObject based on its map.
1602 void InitializeJSObjectFromMap(JSObject* obj, FixedArray* properties,
1603 Map* map);
1604
1605 // Initializes JSObject body starting at given offset.
1606 void InitializeJSObjectBody(JSObject* obj, Map* map, int start_offset);
1607
1608 void InitializeAllocationMemento(AllocationMemento* memento,
1609 AllocationSite* allocation_site);
1610
1611 bool CreateInitialMaps();
1612 void CreateInitialObjects();
1613
1614 // These five Create*EntryStub functions are here and forced to not be inlined
1615 // because of a gcc-4.4 bug that assigns wrong vtable entries.
1616 NO_INLINE(void CreateJSEntryStub());
1617 NO_INLINE(void CreateJSConstructEntryStub());
1618
1619 void CreateFixedStubs();
1620
1621 HeapObject* DoubleAlignForDeserialization(HeapObject* object, int size);
1622
1623 // Commits from space if it is uncommitted.
1624 void EnsureFromSpaceIsCommitted();
1625
1626 // Uncommit unused semi space.
1627 bool UncommitFromSpace() { return new_space_.UncommitFromSpace(); }
1628
1629 // Fill in bogus values in from space
1630 void ZapFromSpace();
1631
1632 // Deopts all code that contains allocation instruction which are tenured or
1633 // not tenured. Moreover it clears the pretenuring allocation site statistics.
1634 void ResetAllAllocationSitesDependentCode(PretenureFlag flag);
1635
1636 // Evaluates local pretenuring for the old space and calls
1637 // ResetAllTenuredAllocationSitesDependentCode if too many objects died in
1638 // the old space.
1639 void EvaluateOldSpaceLocalPretenuring(uint64_t size_of_objects_before_gc);
1640
1641 // Record statistics before and after garbage collection.
1642 void ReportStatisticsBeforeGC();
1643 void ReportStatisticsAfterGC();
1644
1645 // Creates and installs the full-sized number string cache.
1646 int FullSizeNumberStringCacheLength();
1647 // Flush the number to string cache.
1648 void FlushNumberStringCache();
1649
1650 // TODO(hpayer): Allocation site pretenuring may make this method obsolete.
1651 // Re-visit incremental marking heuristics.
1652 bool IsHighSurvivalRate() { return high_survival_rate_period_length_ > 0; }
1653
1654 void ConfigureInitialOldGenerationSize();
1655
1656 bool HasLowYoungGenerationAllocationRate();
1657 bool HasLowOldGenerationAllocationRate();
1658 double YoungGenerationMutatorUtilization();
1659 double OldGenerationMutatorUtilization();
1660
1661 void ReduceNewSpaceSize();
1662
1663 bool TryFinalizeIdleIncrementalMarking(
1664 double idle_time_in_ms, size_t size_of_objects,
1665 size_t mark_compact_speed_in_bytes_per_ms);
1666
1667 GCIdleTimeHeapState ComputeHeapState();
1668
1669 bool PerformIdleTimeAction(GCIdleTimeAction action,
1670 GCIdleTimeHeapState heap_state,
1671 double deadline_in_ms);
1672
1673 void IdleNotificationEpilogue(GCIdleTimeAction action,
1674 GCIdleTimeHeapState heap_state, double start_ms,
1675 double deadline_in_ms);
1676
1677 inline void UpdateAllocationsHash(HeapObject* object);
1678 inline void UpdateAllocationsHash(uint32_t value);
1679 void PrintAlloctionsHash();
1680
1681 void AddToRingBuffer(const char* string);
1682 void GetFromRingBuffer(char* buffer);
1683
1684 void CompactRetainedMaps(ArrayList* retained_maps);
1685
Ben Murdochda12d292016-06-02 14:46:10 +01001686 void CollectGarbageOnMemoryPressure(const char* source);
1687
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001688 // Attempt to over-approximate the weak closure by marking object groups and
1689 // implicit references from global handles, but don't atomically complete
1690 // marking. If we continue to mark incrementally, we might have marked
1691 // objects that die later.
1692 void FinalizeIncrementalMarking(const char* gc_reason);
1693
1694 // Returns the timer used for a given GC type.
1695 // - GCScavenger: young generation GC
1696 // - GCCompactor: full GC
1697 // - GCFinalzeMC: finalization of incremental full GC
1698 // - GCFinalizeMCReduceMemory: finalization of incremental full GC with
1699 // memory reduction
1700 HistogramTimer* GCTypeTimer(GarbageCollector collector);
1701
1702 // ===========================================================================
1703 // Pretenuring. ==============================================================
1704 // ===========================================================================
1705
1706 // Pretenuring decisions are made based on feedback collected during new space
1707 // evacuation. Note that between feedback collection and calling this method
1708 // object in old space must not move.
1709 void ProcessPretenuringFeedback();
1710
1711 // ===========================================================================
1712 // Actual GC. ================================================================
1713 // ===========================================================================
1714
1715 // Code that should be run before and after each GC. Includes some
1716 // reporting/verification activities when compiled with DEBUG set.
1717 void GarbageCollectionPrologue();
1718 void GarbageCollectionEpilogue();
1719
1720 // Performs a major collection in the whole heap.
1721 void MarkCompact();
1722
1723 // Code to be run before and after mark-compact.
1724 void MarkCompactPrologue();
1725 void MarkCompactEpilogue();
1726
1727 // Performs a minor collection in new generation.
1728 void Scavenge();
1729
Ben Murdoch61f157c2016-09-16 13:49:30 +01001730 Address DoScavenge(ObjectVisitor* scavenge_visitor, Address new_space_front,
1731 PromotionMode promotion_mode);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001732
1733 void UpdateNewSpaceReferencesInExternalStringTable(
1734 ExternalStringTableUpdaterCallback updater_func);
1735
1736 void UpdateReferencesInExternalStringTable(
1737 ExternalStringTableUpdaterCallback updater_func);
1738
1739 void ProcessAllWeakReferences(WeakObjectRetainer* retainer);
1740 void ProcessYoungWeakReferences(WeakObjectRetainer* retainer);
1741 void ProcessNativeContexts(WeakObjectRetainer* retainer);
1742 void ProcessAllocationSites(WeakObjectRetainer* retainer);
Ben Murdochda12d292016-06-02 14:46:10 +01001743 void ProcessWeakListRoots(WeakObjectRetainer* retainer);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001744
1745 // ===========================================================================
1746 // GC statistics. ============================================================
1747 // ===========================================================================
1748
1749 inline intptr_t OldGenerationSpaceAvailable() {
1750 return old_generation_allocation_limit_ - PromotedTotalSize();
1751 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001752
1753 // Returns maximum GC pause.
1754 double get_max_gc_pause() { return max_gc_pause_; }
1755
1756 // Returns maximum size of objects alive after GC.
1757 intptr_t get_max_alive_after_gc() { return max_alive_after_gc_; }
1758
1759 // Returns minimal interval between two subsequent collections.
1760 double get_min_in_mutator() { return min_in_mutator_; }
1761
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001762 // Update GC statistics that are tracked on the Heap.
1763 void UpdateCumulativeGCStatistics(double duration, double spent_in_mutator,
1764 double marking_time);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001765
1766 bool MaximumSizeScavenge() { return maximum_size_scavenges_ > 0; }
1767
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001768 // ===========================================================================
1769 // Growing strategy. =========================================================
1770 // ===========================================================================
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001771
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001772 // Decrease the allocation limit if the new limit based on the given
1773 // parameters is lower than the current limit.
1774 void DampenOldGenerationAllocationLimit(intptr_t old_gen_size,
1775 double gc_speed,
1776 double mutator_speed);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001777
1778
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001779 // Calculates the allocation limit based on a given growing factor and a
1780 // given old generation size.
1781 intptr_t CalculateOldGenerationAllocationLimit(double factor,
1782 intptr_t old_gen_size);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001783
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001784 // Sets the allocation limit to trigger the next full garbage collection.
1785 void SetOldGenerationAllocationLimit(intptr_t old_gen_size, double gc_speed,
1786 double mutator_speed);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001787
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001788 // ===========================================================================
1789 // Idle notification. ========================================================
1790 // ===========================================================================
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001791
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001792 bool RecentIdleNotificationHappened();
1793 void ScheduleIdleScavengeIfNeeded(int bytes_allocated);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001794
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001795 // ===========================================================================
1796 // HeapIterator helpers. =====================================================
1797 // ===========================================================================
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001798
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001799 void heap_iterator_start() { heap_iterator_depth_++; }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001800
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001801 void heap_iterator_end() { heap_iterator_depth_--; }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001802
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001803 bool in_heap_iterator() { return heap_iterator_depth_ > 0; }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001804
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001805 // ===========================================================================
1806 // Allocation methods. =======================================================
1807 // ===========================================================================
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001808
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001809 // Returns a deep copy of the JavaScript object.
1810 // Properties and elements are copied too.
1811 // Optionally takes an AllocationSite to be appended in an AllocationMemento.
1812 MUST_USE_RESULT AllocationResult CopyJSObject(JSObject* source,
1813 AllocationSite* site = NULL);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001814
1815 // Allocates a JS Map in the heap.
1816 MUST_USE_RESULT AllocationResult
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001817 AllocateMap(InstanceType instance_type, int instance_size,
1818 ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001819
1820 // Allocates and initializes a new JavaScript object based on a
1821 // constructor.
1822 // If allocation_site is non-null, then a memento is emitted after the object
1823 // that points to the site.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001824 MUST_USE_RESULT AllocationResult AllocateJSObject(
1825 JSFunction* constructor, PretenureFlag pretenure = NOT_TENURED,
1826 AllocationSite* allocation_site = NULL);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001827
1828 // Allocates and initializes a new JavaScript object based on a map.
1829 // Passing an allocation site means that a memento will be created that
1830 // points to the site.
1831 MUST_USE_RESULT AllocationResult
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001832 AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure = NOT_TENURED,
1833 AllocationSite* allocation_site = NULL);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001834
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001835 // Allocates a HeapNumber from value.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001836 MUST_USE_RESULT AllocationResult
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001837 AllocateHeapNumber(double value, MutableMode mode = IMMUTABLE,
1838 PretenureFlag pretenure = NOT_TENURED);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001839
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001840// Allocates SIMD values from the given lane values.
1841#define SIMD_ALLOCATE_DECLARATION(TYPE, Type, type, lane_count, lane_type) \
1842 AllocationResult Allocate##Type(lane_type lanes[lane_count], \
1843 PretenureFlag pretenure = NOT_TENURED);
1844 SIMD128_TYPES(SIMD_ALLOCATE_DECLARATION)
1845#undef SIMD_ALLOCATE_DECLARATION
1846
1847 // Allocates a byte array of the specified length
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001848 MUST_USE_RESULT AllocationResult
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001849 AllocateByteArray(int length, PretenureFlag pretenure = NOT_TENURED);
1850
1851 // Allocates a bytecode array with given contents.
1852 MUST_USE_RESULT AllocationResult
1853 AllocateBytecodeArray(int length, const byte* raw_bytecodes, int frame_size,
1854 int parameter_count, FixedArray* constant_pool);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001855
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001856 MUST_USE_RESULT AllocationResult CopyCode(Code* code);
1857
Ben Murdoch097c5b22016-05-18 11:27:45 +01001858 MUST_USE_RESULT AllocationResult
1859 CopyBytecodeArray(BytecodeArray* bytecode_array);
1860
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001861 // Allocates a fixed array initialized with undefined values
1862 MUST_USE_RESULT AllocationResult
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001863 AllocateFixedArray(int length, PretenureFlag pretenure = NOT_TENURED);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001864
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001865 // Allocate an uninitialized object. The memory is non-executable if the
1866 // hardware and OS allow. This is the single choke-point for allocations
1867 // performed by the runtime and should not be bypassed (to extend this to
1868 // inlined allocations, use the Heap::DisableInlineAllocation() support).
1869 MUST_USE_RESULT inline AllocationResult AllocateRaw(
1870 int size_in_bytes, AllocationSpace space,
1871 AllocationAlignment aligment = kWordAligned);
1872
1873 // Allocates a heap object based on the map.
1874 MUST_USE_RESULT AllocationResult
1875 Allocate(Map* map, AllocationSpace space,
1876 AllocationSite* allocation_site = NULL);
1877
1878 // Allocates a partial map for bootstrapping.
1879 MUST_USE_RESULT AllocationResult
1880 AllocatePartialMap(InstanceType instance_type, int instance_size);
1881
1882 // Allocate a block of memory in the given space (filled with a filler).
1883 // Used as a fall-back for generated code when the space is full.
1884 MUST_USE_RESULT AllocationResult
1885 AllocateFillerObject(int size, bool double_align, AllocationSpace space);
1886
1887 // Allocate an uninitialized fixed array.
1888 MUST_USE_RESULT AllocationResult
1889 AllocateRawFixedArray(int length, PretenureFlag pretenure);
1890
1891 // Allocate an uninitialized fixed double array.
1892 MUST_USE_RESULT AllocationResult
1893 AllocateRawFixedDoubleArray(int length, PretenureFlag pretenure);
1894
1895 // Allocate an initialized fixed array with the given filler value.
1896 MUST_USE_RESULT AllocationResult
1897 AllocateFixedArrayWithFiller(int length, PretenureFlag pretenure,
1898 Object* filler);
1899
1900 // Allocate and partially initializes a String. There are two String
1901 // encodings: one-byte and two-byte. These functions allocate a string of
1902 // the given length and set its map and length fields. The characters of
1903 // the string are uninitialized.
1904 MUST_USE_RESULT AllocationResult
1905 AllocateRawOneByteString(int length, PretenureFlag pretenure);
1906 MUST_USE_RESULT AllocationResult
1907 AllocateRawTwoByteString(int length, PretenureFlag pretenure);
1908
1909 // Allocates an internalized string in old space based on the character
1910 // stream.
1911 MUST_USE_RESULT inline AllocationResult AllocateInternalizedStringFromUtf8(
1912 Vector<const char> str, int chars, uint32_t hash_field);
1913
1914 MUST_USE_RESULT inline AllocationResult AllocateOneByteInternalizedString(
1915 Vector<const uint8_t> str, uint32_t hash_field);
1916
1917 MUST_USE_RESULT inline AllocationResult AllocateTwoByteInternalizedString(
1918 Vector<const uc16> str, uint32_t hash_field);
1919
1920 template <bool is_one_byte, typename T>
1921 MUST_USE_RESULT AllocationResult
1922 AllocateInternalizedStringImpl(T t, int chars, uint32_t hash_field);
1923
1924 template <typename T>
1925 MUST_USE_RESULT inline AllocationResult AllocateInternalizedStringImpl(
1926 T t, int chars, uint32_t hash_field);
1927
1928 // Allocates an uninitialized fixed array. It must be filled by the caller.
1929 MUST_USE_RESULT AllocationResult AllocateUninitializedFixedArray(int length);
1930
1931 // Make a copy of src and return it.
1932 MUST_USE_RESULT inline AllocationResult CopyFixedArray(FixedArray* src);
1933
1934 // Make a copy of src, also grow the copy, and return the copy.
1935 MUST_USE_RESULT AllocationResult
1936 CopyFixedArrayAndGrow(FixedArray* src, int grow_by, PretenureFlag pretenure);
1937
Ben Murdoch097c5b22016-05-18 11:27:45 +01001938 // Make a copy of src, also grow the copy, and return the copy.
1939 MUST_USE_RESULT AllocationResult CopyFixedArrayUpTo(FixedArray* src,
1940 int new_len,
1941 PretenureFlag pretenure);
1942
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001943 // Make a copy of src, set the map, and return the copy.
1944 MUST_USE_RESULT AllocationResult
1945 CopyFixedArrayWithMap(FixedArray* src, Map* map);
1946
1947 // Make a copy of src and return it.
1948 MUST_USE_RESULT inline AllocationResult CopyFixedDoubleArray(
1949 FixedDoubleArray* src);
1950
1951 // Computes a single character string where the character has code.
1952 // A cache is used for one-byte (Latin1) codes.
1953 MUST_USE_RESULT AllocationResult
1954 LookupSingleCharacterStringFromCode(uint16_t code);
1955
1956 // Allocate a symbol in old space.
1957 MUST_USE_RESULT AllocationResult AllocateSymbol();
1958
1959 // Allocates an external array of the specified length and type.
1960 MUST_USE_RESULT AllocationResult AllocateFixedTypedArrayWithExternalPointer(
1961 int length, ExternalArrayType array_type, void* external_pointer,
1962 PretenureFlag pretenure);
1963
1964 // Allocates a fixed typed array of the specified length and type.
1965 MUST_USE_RESULT AllocationResult
1966 AllocateFixedTypedArray(int length, ExternalArrayType array_type,
1967 bool initialize, PretenureFlag pretenure);
1968
1969 // Make a copy of src and return it.
1970 MUST_USE_RESULT AllocationResult CopyAndTenureFixedCOWArray(FixedArray* src);
1971
1972 // Make a copy of src, set the map, and return the copy.
1973 MUST_USE_RESULT AllocationResult
1974 CopyFixedDoubleArrayWithMap(FixedDoubleArray* src, Map* map);
1975
1976 // Allocates a fixed double array with uninitialized values. Returns
1977 MUST_USE_RESULT AllocationResult AllocateUninitializedFixedDoubleArray(
1978 int length, PretenureFlag pretenure = NOT_TENURED);
1979
1980 // Allocate empty fixed array.
1981 MUST_USE_RESULT AllocationResult AllocateEmptyFixedArray();
1982
1983 // Allocate empty fixed typed array of given type.
1984 MUST_USE_RESULT AllocationResult
1985 AllocateEmptyFixedTypedArray(ExternalArrayType array_type);
1986
1987 // Allocate a tenured simple cell.
1988 MUST_USE_RESULT AllocationResult AllocateCell(Object* value);
1989
1990 // Allocate a tenured JS global property cell initialized with the hole.
1991 MUST_USE_RESULT AllocationResult AllocatePropertyCell();
1992
1993 MUST_USE_RESULT AllocationResult AllocateWeakCell(HeapObject* value);
1994
1995 MUST_USE_RESULT AllocationResult AllocateTransitionArray(int capacity);
1996
1997 // Allocates a new utility object in the old generation.
1998 MUST_USE_RESULT AllocationResult AllocateStruct(InstanceType type);
1999
2000 // Allocates a new foreign object.
2001 MUST_USE_RESULT AllocationResult
2002 AllocateForeign(Address address, PretenureFlag pretenure = NOT_TENURED);
2003
2004 MUST_USE_RESULT AllocationResult
2005 AllocateCode(int object_size, bool immovable);
2006
2007 MUST_USE_RESULT AllocationResult InternalizeStringWithKey(HashTableKey* key);
2008
2009 MUST_USE_RESULT AllocationResult InternalizeString(String* str);
2010
2011 // ===========================================================================
2012
2013 void set_force_oom(bool value) { force_oom_ = value; }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002014
Ben Murdoch61f157c2016-09-16 13:49:30 +01002015 // The amount of external memory registered through the API.
2016 int64_t external_memory_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002017
Ben Murdoch61f157c2016-09-16 13:49:30 +01002018 // The limit when to trigger memory pressure from the API.
2019 int64_t external_memory_limit_;
2020
2021 // Caches the amount of external memory registered at the last MC.
2022 int64_t external_memory_at_last_mark_compact_;
2023
2024 // The amount of memory that has been freed concurrently.
2025 base::AtomicNumber<intptr_t> external_memory_concurrently_freed_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002026
2027 // This can be calculated directly from a pointer to the heap; however, it is
2028 // more expedient to get at the isolate directly from within Heap methods.
2029 Isolate* isolate_;
2030
2031 Object* roots_[kRootListLength];
2032
2033 size_t code_range_size_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002034 int max_semi_space_size_;
2035 int initial_semispace_size_;
2036 intptr_t max_old_generation_size_;
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002037 intptr_t initial_old_generation_size_;
2038 bool old_generation_size_configured_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002039 intptr_t max_executable_size_;
2040 intptr_t maximum_committed_;
2041
2042 // For keeping track of how much data has survived
2043 // scavenge since last new space expansion.
Ben Murdoch097c5b22016-05-18 11:27:45 +01002044 intptr_t survived_since_last_expansion_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002045
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002046 // ... and since the last scavenge.
Ben Murdoch097c5b22016-05-18 11:27:45 +01002047 intptr_t survived_last_scavenge_;
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002048
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002049 // This is not the depth of nested AlwaysAllocateScope's but rather a single
2050 // count, as scopes can be acquired from multiple tasks (read: threads).
Ben Murdochc5610432016-08-08 18:44:38 +01002051 base::AtomicNumber<size_t> always_allocate_scope_count_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002052
Ben Murdochda12d292016-06-02 14:46:10 +01002053 // Stores the memory pressure level that set by MemoryPressureNotification
2054 // and reset by a mark-compact garbage collection.
Ben Murdochc5610432016-08-08 18:44:38 +01002055 base::AtomicValue<MemoryPressureLevel> memory_pressure_level_;
Ben Murdochda12d292016-06-02 14:46:10 +01002056
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002057 // For keeping track of context disposals.
2058 int contexts_disposed_;
2059
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002060 // The length of the retained_maps array at the time of context disposal.
2061 // This separates maps in the retained_maps array that were created before
2062 // and after context disposal.
2063 int number_of_disposed_maps_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002064
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002065 int global_ic_age_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002066
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002067 NewSpace new_space_;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002068 OldSpace* old_space_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002069 OldSpace* code_space_;
2070 MapSpace* map_space_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002071 LargeObjectSpace* lo_space_;
2072 HeapState gc_state_;
2073 int gc_post_processing_depth_;
2074 Address new_space_top_after_last_gc_;
2075
2076 // Returns the amount of external memory registered since last global gc.
2077 int64_t PromotedExternalMemorySize();
2078
2079 // How many "runtime allocations" happened.
2080 uint32_t allocations_count_;
2081
2082 // Running hash over allocations performed.
2083 uint32_t raw_allocations_hash_;
2084
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002085 // How many mark-sweep collections happened.
2086 unsigned int ms_count_;
2087
2088 // How many gc happened.
2089 unsigned int gc_count_;
2090
2091 // For post mortem debugging.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002092 int remembered_unmapped_pages_index_;
2093 Address remembered_unmapped_pages_[kRememberedUnmappedPages];
2094
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002095#ifdef DEBUG
2096 // If the --gc-interval flag is set to a positive value, this
2097 // variable holds the value indicating the number of allocations
2098 // remain until the next failure and garbage collection.
2099 int allocation_timeout_;
2100#endif // DEBUG
2101
2102 // Limit that triggers a global GC on the next (normally caused) GC. This
2103 // is checked when we have already decided to do a GC to help determine
2104 // which collector to invoke, before expanding a paged space in the old
2105 // generation and on every allocation in large object space.
2106 intptr_t old_generation_allocation_limit_;
2107
2108 // Indicates that an allocation has failed in the old generation since the
2109 // last GC.
2110 bool old_gen_exhausted_;
2111
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002112 // Indicates that memory usage is more important than latency.
2113 // TODO(ulan): Merge it with memory reducer once chromium:490559 is fixed.
2114 bool optimize_for_memory_usage_;
2115
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002116 // Indicates that inline bump-pointer allocation has been globally disabled
2117 // for all spaces. This is used to disable allocations in generated code.
2118 bool inline_allocation_disabled_;
2119
2120 // Weak list heads, threaded through the objects.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002121 // List heads are initialized lazily and contain the undefined_value at start.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002122 Object* native_contexts_list_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002123 Object* allocation_sites_list_;
2124
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002125 // List of encountered weak collections (JSWeakMap and JSWeakSet) during
2126 // marking. It is initialized during marking, destroyed after marking and
2127 // contains Smi(0) while marking is not active.
2128 Object* encountered_weak_collections_;
2129
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002130 Object* encountered_weak_cells_;
2131
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002132 Object* encountered_transition_arrays_;
2133
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002134 List<GCCallbackPair> gc_epilogue_callbacks_;
2135 List<GCCallbackPair> gc_prologue_callbacks_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002136
2137 // Total RegExp code ever generated
2138 double total_regexp_code_generated_;
2139
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002140 int deferred_counters_[v8::Isolate::kUseCounterFeatureCount];
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002141
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002142 GCTracer* tracer_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002143
2144 int high_survival_rate_period_length_;
2145 intptr_t promoted_objects_size_;
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002146 double promotion_ratio_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002147 double promotion_rate_;
2148 intptr_t semi_space_copied_object_size_;
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002149 intptr_t previous_semi_space_copied_object_size_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002150 double semi_space_copied_rate_;
2151 int nodes_died_in_new_space_;
2152 int nodes_copied_in_new_space_;
2153 int nodes_promoted_;
2154
2155 // This is the pretenuring trigger for allocation sites that are in maybe
2156 // tenure state. When we switched to the maximum new space size we deoptimize
2157 // the code that belongs to the allocation site and derive the lifetime
2158 // of the allocation site.
2159 unsigned int maximum_size_scavenges_;
2160
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002161 // Maximum GC pause.
2162 double max_gc_pause_;
2163
2164 // Total time spent in GC.
2165 double total_gc_time_ms_;
2166
2167 // Maximum size of objects alive after GC.
2168 intptr_t max_alive_after_gc_;
2169
2170 // Minimal interval between two subsequent collections.
2171 double min_in_mutator_;
2172
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002173 // Cumulative GC time spent in marking.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002174 double marking_time_;
2175
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002176 // Cumulative GC time spent in sweeping.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002177 double sweeping_time_;
2178
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002179 // Last time an idle notification happened.
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002180 double last_idle_notification_time_;
2181
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002182 // Last time a garbage collection happened.
2183 double last_gc_time_;
2184
2185 Scavenger* scavenge_collector_;
2186
2187 MarkCompactCollector* mark_compact_collector_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002188
Ben Murdochc5610432016-08-08 18:44:38 +01002189 MemoryAllocator* memory_allocator_;
2190
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002191 StoreBuffer store_buffer_;
2192
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002193 IncrementalMarking* incremental_marking_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002194
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002195 GCIdleTimeHandler* gc_idle_time_handler_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002196
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002197 MemoryReducer* memory_reducer_;
2198
2199 ObjectStats* object_stats_;
2200
2201 ScavengeJob* scavenge_job_;
2202
Ben Murdoch097c5b22016-05-18 11:27:45 +01002203 AllocationObserver* idle_scavenge_observer_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002204
2205 // These two counters are monotomically increasing and never reset.
2206 size_t full_codegen_bytes_generated_;
2207 size_t crankshaft_codegen_bytes_generated_;
2208
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002209 // This counter is increased before each GC and never reset.
2210 // To account for the bytes allocated since the last GC, use the
2211 // NewSpaceAllocationCounter() function.
2212 size_t new_space_allocation_counter_;
2213
2214 // This counter is increased before each GC and never reset. To
2215 // account for the bytes allocated since the last GC, use the
2216 // OldGenerationAllocationCounter() function.
2217 size_t old_generation_allocation_counter_;
2218
2219 // The size of objects in old generation after the last MarkCompact GC.
2220 size_t old_generation_size_at_last_gc_;
2221
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002222 // If the --deopt_every_n_garbage_collections flag is set to a positive value,
2223 // this variable holds the number of garbage collections since the last
2224 // deoptimization triggered by garbage collection.
2225 int gcs_since_last_deopt_;
2226
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002227 // The feedback storage is used to store allocation sites (keys) and how often
2228 // they have been visited (values) by finding a memento behind an object. The
2229 // storage is only alive temporary during a GC. The invariant is that all
2230 // pointers in this map are already fixed, i.e., they do not point to
2231 // forwarding pointers.
Ben Murdoch61f157c2016-09-16 13:49:30 +01002232 base::HashMap* global_pretenuring_feedback_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002233
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002234 char trace_ring_buffer_[kTraceRingBufferSize];
2235 // If it's not full then the data is from 0 to ring_buffer_end_. If it's
2236 // full then the data is from ring_buffer_end_ to the end of the buffer and
2237 // from 0 to ring_buffer_end_.
2238 bool ring_buffer_full_;
2239 size_t ring_buffer_end_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002240
2241 // Shared state read by the scavenge collector and set by ScavengeObject.
2242 PromotionQueue promotion_queue_;
2243
2244 // Flag is set when the heap has been configured. The heap can be repeatedly
2245 // configured through the API until it is set up.
2246 bool configured_;
2247
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002248 // Currently set GC flags that are respected by all GC components.
2249 int current_gc_flags_;
2250
2251 // Currently set GC callback flags that are used to pass information between
2252 // the embedder and V8's GC.
2253 GCCallbackFlags current_gc_callback_flags_;
2254
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002255 ExternalStringTable external_string_table_;
2256
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002257 base::Mutex relocation_mutex_;
2258
2259 int gc_callbacks_depth_;
2260
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002261 bool deserialization_complete_;
2262
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002263 StrongRootsList* strong_roots_list_;
2264
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002265 // The depth of HeapIterator nestings.
2266 int heap_iterator_depth_;
2267
2268 // Used for testing purposes.
2269 bool force_oom_;
2270
2271 // Classes in "heap" can be friends.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002272 friend class AlwaysAllocateScope;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002273 friend class GCCallbacksScope;
2274 friend class GCTracer;
2275 friend class HeapIterator;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002276 friend class IdleScavengeObserver;
2277 friend class IncrementalMarking;
Ben Murdochda12d292016-06-02 14:46:10 +01002278 friend class IteratePromotedObjectsVisitor;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002279 friend class MarkCompactCollector;
2280 friend class MarkCompactMarkingVisitor;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002281 friend class NewSpace;
Ben Murdoch61f157c2016-09-16 13:49:30 +01002282 friend class ObjectStatsCollector;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002283 friend class Page;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002284 friend class Scavenger;
2285 friend class StoreBuffer;
Ben Murdochc5610432016-08-08 18:44:38 +01002286 friend class TestMemoryAllocatorScope;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002287
2288 // The allocator interface.
2289 friend class Factory;
2290
2291 // The Isolate constructs us.
2292 friend class Isolate;
2293
2294 // Used in cctest.
2295 friend class HeapTester;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002296
2297 DISALLOW_COPY_AND_ASSIGN(Heap);
2298};
2299
2300
2301class HeapStats {
2302 public:
2303 static const int kStartMarker = 0xDECADE00;
2304 static const int kEndMarker = 0xDECADE01;
2305
2306 int* start_marker; // 0
2307 int* new_space_size; // 1
2308 int* new_space_capacity; // 2
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002309 intptr_t* old_space_size; // 3
2310 intptr_t* old_space_capacity; // 4
2311 intptr_t* code_space_size; // 5
2312 intptr_t* code_space_capacity; // 6
2313 intptr_t* map_space_size; // 7
2314 intptr_t* map_space_capacity; // 8
2315 intptr_t* lo_space_size; // 9
2316 int* global_handle_count; // 10
2317 int* weak_global_handle_count; // 11
2318 int* pending_global_handle_count; // 12
2319 int* near_death_global_handle_count; // 13
2320 int* free_global_handle_count; // 14
2321 intptr_t* memory_allocator_size; // 15
2322 intptr_t* memory_allocator_capacity; // 16
2323 int* objects_per_type; // 17
2324 int* size_per_type; // 18
2325 int* os_error; // 19
2326 char* last_few_messages; // 20
2327 char* js_stacktrace; // 21
2328 int* end_marker; // 22
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002329};
2330
2331
2332class AlwaysAllocateScope {
2333 public:
2334 explicit inline AlwaysAllocateScope(Isolate* isolate);
2335 inline ~AlwaysAllocateScope();
2336
2337 private:
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002338 Heap* heap_;
2339};
2340
2341
2342// Visitor class to verify interior pointers in spaces that do not contain
2343// or care about intergenerational references. All heap object pointers have to
2344// point into the heap to a location that has a map pointer at its first word.
2345// Caveat: Heap::Contains is an approximation because it can return true for
2346// objects in a heap space but above the allocation pointer.
2347class VerifyPointersVisitor : public ObjectVisitor {
2348 public:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002349 inline void VisitPointers(Object** start, Object** end) override;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002350};
2351
2352
2353// Verify that all objects are Smis.
2354class VerifySmisVisitor : public ObjectVisitor {
2355 public:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002356 inline void VisitPointers(Object** start, Object** end) override;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002357};
2358
2359
2360// Space iterator for iterating over all spaces of the heap. Returns each space
2361// in turn, and null when it is done.
2362class AllSpaces BASE_EMBEDDED {
2363 public:
2364 explicit AllSpaces(Heap* heap) : heap_(heap), counter_(FIRST_SPACE) {}
2365 Space* next();
2366
2367 private:
2368 Heap* heap_;
2369 int counter_;
2370};
2371
2372
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002373// Space iterator for iterating over all old spaces of the heap: Old space
2374// and code space. Returns each space in turn, and null when it is done.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002375class OldSpaces BASE_EMBEDDED {
2376 public:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002377 explicit OldSpaces(Heap* heap) : heap_(heap), counter_(OLD_SPACE) {}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002378 OldSpace* next();
2379
2380 private:
2381 Heap* heap_;
2382 int counter_;
2383};
2384
2385
2386// Space iterator for iterating over all the paged spaces of the heap: Map
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002387// space, old space, code space and cell space. Returns
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002388// each space in turn, and null when it is done.
2389class PagedSpaces BASE_EMBEDDED {
2390 public:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002391 explicit PagedSpaces(Heap* heap) : heap_(heap), counter_(OLD_SPACE) {}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002392 PagedSpace* next();
2393
2394 private:
2395 Heap* heap_;
2396 int counter_;
2397};
2398
2399
2400// Space iterator for iterating over all spaces of the heap.
2401// For each space an object iterator is provided. The deallocation of the
2402// returned object iterators is handled by the space iterator.
2403class SpaceIterator : public Malloced {
2404 public:
2405 explicit SpaceIterator(Heap* heap);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002406 virtual ~SpaceIterator();
2407
2408 bool has_next();
2409 ObjectIterator* next();
2410
2411 private:
2412 ObjectIterator* CreateIterator();
2413
2414 Heap* heap_;
2415 int current_space_; // from enum AllocationSpace.
2416 ObjectIterator* iterator_; // object iterator for the current space.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002417};
2418
2419
2420// A HeapIterator provides iteration over the whole heap. It
2421// aggregates the specific iterators for the different spaces as
2422// these can only iterate over one space only.
2423//
2424// HeapIterator ensures there is no allocation during its lifetime
2425// (using an embedded DisallowHeapAllocation instance).
2426//
2427// HeapIterator can skip free list nodes (that is, de-allocated heap
2428// objects that still remain in the heap). As implementation of free
2429// nodes filtering uses GC marks, it can't be used during MS/MC GC
2430// phases. Also, it is forbidden to interrupt iteration in this mode,
2431// as this will leave heap objects marked (and thus, unusable).
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002432class HeapIterator BASE_EMBEDDED {
2433 public:
2434 enum HeapObjectsFiltering { kNoFiltering, kFilterUnreachable };
2435
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002436 explicit HeapIterator(Heap* heap,
2437 HeapObjectsFiltering filtering = kNoFiltering);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002438 ~HeapIterator();
2439
2440 HeapObject* next();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002441
2442 private:
2443 struct MakeHeapIterableHelper {
2444 explicit MakeHeapIterableHelper(Heap* heap) { heap->MakeHeapIterable(); }
2445 };
2446
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002447 HeapObject* NextObject();
2448
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002449 // The following two fields need to be declared in this order. Initialization
2450 // order guarantees that we first make the heap iterable (which may involve
2451 // allocations) and only then lock it down by not allowing further
2452 // allocations.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002453 MakeHeapIterableHelper make_heap_iterable_helper_;
2454 DisallowHeapAllocation no_heap_allocation_;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002455
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002456 Heap* heap_;
2457 HeapObjectsFiltering filtering_;
2458 HeapObjectsFilter* filter_;
2459 // Space iterator for iterating all the spaces.
2460 SpaceIterator* space_iterator_;
2461 // Object iterator for the space currently being iterated.
2462 ObjectIterator* object_iterator_;
2463};
2464
2465
2466// Cache for mapping (map, property name) into field offset.
2467// Cleared at startup and prior to mark sweep collection.
2468class KeyedLookupCache {
2469 public:
2470 // Lookup field offset for (map, name). If absent, -1 is returned.
2471 int Lookup(Handle<Map> map, Handle<Name> name);
2472
2473 // Update an element in the cache.
2474 void Update(Handle<Map> map, Handle<Name> name, int field_offset);
2475
2476 // Clear the cache.
2477 void Clear();
2478
2479 static const int kLength = 256;
2480 static const int kCapacityMask = kLength - 1;
2481 static const int kMapHashShift = 5;
2482 static const int kHashMask = -4; // Zero the last two bits.
2483 static const int kEntriesPerBucket = 4;
2484 static const int kEntryLength = 2;
2485 static const int kMapIndex = 0;
2486 static const int kKeyIndex = 1;
2487 static const int kNotFound = -1;
2488
2489 // kEntriesPerBucket should be a power of 2.
2490 STATIC_ASSERT((kEntriesPerBucket & (kEntriesPerBucket - 1)) == 0);
2491 STATIC_ASSERT(kEntriesPerBucket == -kHashMask);
2492
2493 private:
2494 KeyedLookupCache() {
2495 for (int i = 0; i < kLength; ++i) {
2496 keys_[i].map = NULL;
2497 keys_[i].name = NULL;
2498 field_offsets_[i] = kNotFound;
2499 }
2500 }
2501
2502 static inline int Hash(Handle<Map> map, Handle<Name> name);
2503
2504 // Get the address of the keys and field_offsets arrays. Used in
2505 // generated code to perform cache lookups.
2506 Address keys_address() { return reinterpret_cast<Address>(&keys_); }
2507
2508 Address field_offsets_address() {
2509 return reinterpret_cast<Address>(&field_offsets_);
2510 }
2511
2512 struct Key {
2513 Map* map;
2514 Name* name;
2515 };
2516
2517 Key keys_[kLength];
2518 int field_offsets_[kLength];
2519
2520 friend class ExternalReference;
2521 friend class Isolate;
2522 DISALLOW_COPY_AND_ASSIGN(KeyedLookupCache);
2523};
2524
2525
2526// Cache for mapping (map, property name) into descriptor index.
2527// The cache contains both positive and negative results.
2528// Descriptor index equals kNotFound means the property is absent.
2529// Cleared at startup and prior to any gc.
2530class DescriptorLookupCache {
2531 public:
2532 // Lookup descriptor index for (map, name).
2533 // If absent, kAbsent is returned.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002534 inline int Lookup(Map* source, Name* name);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002535
2536 // Update an element in the cache.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002537 inline void Update(Map* source, Name* name, int result);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002538
2539 // Clear the cache.
2540 void Clear();
2541
2542 static const int kAbsent = -2;
2543
2544 private:
2545 DescriptorLookupCache() {
2546 for (int i = 0; i < kLength; ++i) {
2547 keys_[i].source = NULL;
2548 keys_[i].name = NULL;
2549 results_[i] = kAbsent;
2550 }
2551 }
2552
Ben Murdoch097c5b22016-05-18 11:27:45 +01002553 static inline int Hash(Object* source, Name* name);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002554
2555 static const int kLength = 64;
2556 struct Key {
2557 Map* source;
2558 Name* name;
2559 };
2560
2561 Key keys_[kLength];
2562 int results_[kLength];
2563
2564 friend class Isolate;
2565 DISALLOW_COPY_AND_ASSIGN(DescriptorLookupCache);
2566};
2567
2568
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002569// Abstract base class for checking whether a weak object should be retained.
2570class WeakObjectRetainer {
2571 public:
2572 virtual ~WeakObjectRetainer() {}
2573
2574 // Return whether this object should be retained. If NULL is returned the
2575 // object has no references. Otherwise the address of the retained object
2576 // should be returned as in some GC situations the object has been moved.
2577 virtual Object* RetainAs(Object* object) = 0;
2578};
2579
2580
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002581#ifdef DEBUG
2582// Helper class for tracing paths to a search target Object from all roots.
2583// The TracePathFrom() method can be used to trace paths from a specific
2584// object to the search target object.
2585class PathTracer : public ObjectVisitor {
2586 public:
2587 enum WhatToFind {
2588 FIND_ALL, // Will find all matches.
2589 FIND_FIRST // Will stop the search after first match.
2590 };
2591
2592 // Tags 0, 1, and 3 are used. Use 2 for marking visited HeapObject.
2593 static const int kMarkTag = 2;
2594
2595 // For the WhatToFind arg, if FIND_FIRST is specified, tracing will stop
2596 // after the first match. If FIND_ALL is specified, then tracing will be
2597 // done for all matches.
2598 PathTracer(Object* search_target, WhatToFind what_to_find,
2599 VisitMode visit_mode)
2600 : search_target_(search_target),
2601 found_target_(false),
2602 found_target_in_trace_(false),
2603 what_to_find_(what_to_find),
2604 visit_mode_(visit_mode),
2605 object_stack_(20),
2606 no_allocation() {}
2607
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002608 void VisitPointers(Object** start, Object** end) override;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002609
2610 void Reset();
2611 void TracePathFrom(Object** root);
2612
2613 bool found() const { return found_target_; }
2614
2615 static Object* const kAnyGlobalObject;
2616
2617 protected:
2618 class MarkVisitor;
2619 class UnmarkVisitor;
2620
2621 void MarkRecursively(Object** p, MarkVisitor* mark_visitor);
2622 void UnmarkRecursively(Object** p, UnmarkVisitor* unmark_visitor);
2623 virtual void ProcessResults();
2624
2625 Object* search_target_;
2626 bool found_target_;
2627 bool found_target_in_trace_;
2628 WhatToFind what_to_find_;
2629 VisitMode visit_mode_;
2630 List<Object*> object_stack_;
2631
2632 DisallowHeapAllocation no_allocation; // i.e. no gc allowed.
2633
2634 private:
2635 DISALLOW_IMPLICIT_CONSTRUCTORS(PathTracer);
2636};
2637#endif // DEBUG
Ben Murdoch097c5b22016-05-18 11:27:45 +01002638
2639// -----------------------------------------------------------------------------
2640// Allows observation of allocations.
2641class AllocationObserver {
2642 public:
2643 explicit AllocationObserver(intptr_t step_size)
2644 : step_size_(step_size), bytes_to_next_step_(step_size) {
2645 DCHECK(step_size >= kPointerSize);
2646 }
2647 virtual ~AllocationObserver() {}
2648
2649 // Called each time the observed space does an allocation step. This may be
2650 // more frequently than the step_size we are monitoring (e.g. when there are
2651 // multiple observers, or when page or space boundary is encountered.)
2652 void AllocationStep(int bytes_allocated, Address soon_object, size_t size) {
2653 bytes_to_next_step_ -= bytes_allocated;
2654 if (bytes_to_next_step_ <= 0) {
2655 Step(static_cast<int>(step_size_ - bytes_to_next_step_), soon_object,
2656 size);
2657 step_size_ = GetNextStepSize();
2658 bytes_to_next_step_ = step_size_;
2659 }
2660 }
2661
2662 protected:
2663 intptr_t step_size() const { return step_size_; }
2664 intptr_t bytes_to_next_step() const { return bytes_to_next_step_; }
2665
2666 // Pure virtual method provided by the subclasses that gets called when at
2667 // least step_size bytes have been allocated. soon_object is the address just
2668 // allocated (but not yet initialized.) size is the size of the object as
2669 // requested (i.e. w/o the alignment fillers). Some complexities to be aware
2670 // of:
2671 // 1) soon_object will be nullptr in cases where we end up observing an
2672 // allocation that happens to be a filler space (e.g. page boundaries.)
2673 // 2) size is the requested size at the time of allocation. Right-trimming
2674 // may change the object size dynamically.
2675 // 3) soon_object may actually be the first object in an allocation-folding
2676 // group. In such a case size is the size of the group rather than the
2677 // first object.
2678 virtual void Step(int bytes_allocated, Address soon_object, size_t size) = 0;
2679
2680 // Subclasses can override this method to make step size dynamic.
2681 virtual intptr_t GetNextStepSize() { return step_size_; }
2682
2683 intptr_t step_size_;
2684 intptr_t bytes_to_next_step_;
2685
2686 private:
2687 friend class LargeObjectSpace;
2688 friend class NewSpace;
2689 friend class PagedSpace;
2690 DISALLOW_COPY_AND_ASSIGN(AllocationObserver);
2691};
2692
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002693} // namespace internal
2694} // namespace v8
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002695
2696#endif // V8_HEAP_HEAP_H_