blob: 0391e0e526416264300f9188e49d427f78dbbdcb [file] [log] [blame]
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001// Copyright 2012 the V8 project authors. All rights reserved.
Steve Blocka7e24c12009-10-30 11:49:00 +00002// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#ifndef V8_HEAP_H_
29#define V8_HEAP_H_
30
31#include <math.h>
32
Ben Murdoch257744e2011-11-30 15:57:28 +000033#include "allocation.h"
Ben Murdoche0cee9b2011-05-25 10:26:03 +010034#include "globals.h"
Ben Murdoch3ef787d2012-04-12 10:51:47 +010035#include "incremental-marking.h"
Ben Murdoche0cee9b2011-05-25 10:26:03 +010036#include "list.h"
Steve Block44f0eee2011-05-26 01:26:41 +010037#include "mark-compact.h"
Ben Murdoch3ef787d2012-04-12 10:51:47 +010038#include "objects-visiting.h"
Kristian Monsen80d68ea2010-09-08 11:05:35 +010039#include "spaces.h"
Steve Block6ded16b2010-05-10 14:33:55 +010040#include "splay-tree-inl.h"
Ben Murdoch3ef787d2012-04-12 10:51:47 +010041#include "store-buffer.h"
Steve Block6ded16b2010-05-10 14:33:55 +010042#include "v8-counters.h"
Ben Murdoch3ef787d2012-04-12 10:51:47 +010043#include "v8globals.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000044
45namespace v8 {
46namespace internal {
47
48// Defines all the roots in Heap.
Ben Murdoch3ef787d2012-04-12 10:51:47 +010049#define STRONG_ROOT_LIST(V) \
Steve Blockd0582a62009-12-15 09:54:21 +000050 V(Map, byte_array_map, ByteArrayMap) \
Ben Murdoch3ef787d2012-04-12 10:51:47 +010051 V(Map, free_space_map, FreeSpaceMap) \
Steve Blockd0582a62009-12-15 09:54:21 +000052 V(Map, one_pointer_filler_map, OnePointerFillerMap) \
53 V(Map, two_pointer_filler_map, TwoPointerFillerMap) \
54 /* Cluster the most popular ones in a few cache lines here at the top. */ \
Ben Murdoch3ef787d2012-04-12 10:51:47 +010055 V(Smi, store_buffer_top, StoreBufferTop) \
56 V(Oddball, undefined_value, UndefinedValue) \
57 V(Oddball, the_hole_value, TheHoleValue) \
58 V(Oddball, null_value, NullValue) \
59 V(Oddball, true_value, TrueValue) \
60 V(Oddball, false_value, FalseValue) \
61 V(Map, global_property_cell_map, GlobalPropertyCellMap) \
62 V(Map, shared_function_info_map, SharedFunctionInfoMap) \
63 V(Map, meta_map, MetaMap) \
64 V(Map, ascii_symbol_map, AsciiSymbolMap) \
65 V(Map, ascii_string_map, AsciiStringMap) \
Steve Blocka7e24c12009-10-30 11:49:00 +000066 V(Map, heap_number_map, HeapNumberMap) \
67 V(Map, global_context_map, GlobalContextMap) \
68 V(Map, fixed_array_map, FixedArrayMap) \
Ben Murdoch3ef787d2012-04-12 10:51:47 +010069 V(Map, code_map, CodeMap) \
70 V(Map, scope_info_map, ScopeInfoMap) \
Iain Merrick75681382010-08-19 15:07:18 +010071 V(Map, fixed_cow_array_map, FixedCOWArrayMap) \
Ben Murdoch3fb3ca82011-12-02 17:19:32 +000072 V(Map, fixed_double_array_map, FixedDoubleArrayMap) \
Steve Blocka7e24c12009-10-30 11:49:00 +000073 V(Object, no_interceptor_result_sentinel, NoInterceptorResultSentinel) \
Steve Blocka7e24c12009-10-30 11:49:00 +000074 V(Map, hash_table_map, HashTableMap) \
Ben Murdoch3ef787d2012-04-12 10:51:47 +010075 V(FixedArray, empty_fixed_array, EmptyFixedArray) \
76 V(ByteArray, empty_byte_array, EmptyByteArray) \
77 V(String, empty_string, EmptyString) \
78 V(DescriptorArray, empty_descriptor_array, EmptyDescriptorArray) \
Ben Murdoche0cee9b2011-05-25 10:26:03 +010079 V(Smi, stack_limit, StackLimit) \
Ben Murdoch3ef787d2012-04-12 10:51:47 +010080 V(Oddball, arguments_marker, ArgumentsMarker) \
81 /* The first 32 roots above this line should be boring from a GC point of */ \
82 /* view. This means they are never in new space and never on a page that */ \
83 /* is being compacted. */ \
Ben Murdoche0cee9b2011-05-25 10:26:03 +010084 V(FixedArray, number_string_cache, NumberStringCache) \
85 V(Object, instanceof_cache_function, InstanceofCacheFunction) \
86 V(Object, instanceof_cache_map, InstanceofCacheMap) \
87 V(Object, instanceof_cache_answer, InstanceofCacheAnswer) \
88 V(FixedArray, single_character_string_cache, SingleCharacterStringCache) \
Ben Murdoch589d6972011-11-30 16:04:58 +000089 V(FixedArray, string_split_cache, StringSplitCache) \
Ben Murdoche0cee9b2011-05-25 10:26:03 +010090 V(Object, termination_exception, TerminationException) \
Ben Murdochc7cc0282012-03-05 14:35:55 +000091 V(Smi, hash_seed, HashSeed) \
Steve Blockd0582a62009-12-15 09:54:21 +000092 V(Map, string_map, StringMap) \
Steve Blockd0582a62009-12-15 09:54:21 +000093 V(Map, symbol_map, SymbolMap) \
Ben Murdoche0cee9b2011-05-25 10:26:03 +010094 V(Map, cons_string_map, ConsStringMap) \
95 V(Map, cons_ascii_string_map, ConsAsciiStringMap) \
Ben Murdoch69a99ed2011-11-30 16:03:39 +000096 V(Map, sliced_string_map, SlicedStringMap) \
97 V(Map, sliced_ascii_string_map, SlicedAsciiStringMap) \
Steve Blockd0582a62009-12-15 09:54:21 +000098 V(Map, cons_symbol_map, ConsSymbolMap) \
99 V(Map, cons_ascii_symbol_map, ConsAsciiSymbolMap) \
100 V(Map, external_symbol_map, ExternalSymbolMap) \
Kristian Monsen9dcf7e22010-06-28 14:14:28 +0100101 V(Map, external_symbol_with_ascii_data_map, ExternalSymbolWithAsciiDataMap) \
Steve Blockd0582a62009-12-15 09:54:21 +0000102 V(Map, external_ascii_symbol_map, ExternalAsciiSymbolMap) \
Steve Blockd0582a62009-12-15 09:54:21 +0000103 V(Map, external_string_map, ExternalStringMap) \
Kristian Monsen9dcf7e22010-06-28 14:14:28 +0100104 V(Map, external_string_with_ascii_data_map, ExternalStringWithAsciiDataMap) \
Steve Blockd0582a62009-12-15 09:54:21 +0000105 V(Map, external_ascii_string_map, ExternalAsciiStringMap) \
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100106 V(Map, short_external_symbol_map, ShortExternalSymbolMap) \
107 V(Map, \
108 short_external_symbol_with_ascii_data_map, \
109 ShortExternalSymbolWithAsciiDataMap) \
110 V(Map, short_external_ascii_symbol_map, ShortExternalAsciiSymbolMap) \
111 V(Map, short_external_string_map, ShortExternalStringMap) \
112 V(Map, \
113 short_external_string_with_ascii_data_map, \
114 ShortExternalStringWithAsciiDataMap) \
115 V(Map, short_external_ascii_string_map, ShortExternalAsciiStringMap) \
Steve Blockd0582a62009-12-15 09:54:21 +0000116 V(Map, undetectable_string_map, UndetectableStringMap) \
117 V(Map, undetectable_ascii_string_map, UndetectableAsciiStringMap) \
Steve Block44f0eee2011-05-26 01:26:41 +0100118 V(Map, external_pixel_array_map, ExternalPixelArrayMap) \
Steve Block3ce2e202009-11-05 08:53:23 +0000119 V(Map, external_byte_array_map, ExternalByteArrayMap) \
120 V(Map, external_unsigned_byte_array_map, ExternalUnsignedByteArrayMap) \
121 V(Map, external_short_array_map, ExternalShortArrayMap) \
122 V(Map, external_unsigned_short_array_map, ExternalUnsignedShortArrayMap) \
123 V(Map, external_int_array_map, ExternalIntArrayMap) \
124 V(Map, external_unsigned_int_array_map, ExternalUnsignedIntArrayMap) \
125 V(Map, external_float_array_map, ExternalFloatArrayMap) \
Ben Murdoch257744e2011-11-30 15:57:28 +0000126 V(Map, external_double_array_map, ExternalDoubleArrayMap) \
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000127 V(Map, non_strict_arguments_elements_map, NonStrictArgumentsElementsMap) \
128 V(Map, function_context_map, FunctionContextMap) \
Steve Blocka7e24c12009-10-30 11:49:00 +0000129 V(Map, catch_context_map, CatchContextMap) \
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000130 V(Map, with_context_map, WithContextMap) \
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000131 V(Map, block_context_map, BlockContextMap) \
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100132 V(Map, module_context_map, ModuleContextMap) \
Steve Blocka7e24c12009-10-30 11:49:00 +0000133 V(Map, oddball_map, OddballMap) \
Steve Block1e0659c2011-05-24 12:43:12 +0100134 V(Map, message_object_map, JSMessageObjectMap) \
Ben Murdoch257744e2011-11-30 15:57:28 +0000135 V(Map, foreign_map, ForeignMap) \
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100136 V(HeapNumber, nan_value, NanValue) \
137 V(HeapNumber, infinity_value, InfinityValue) \
138 V(HeapNumber, minus_zero_value, MinusZeroValue) \
Steve Blocka7e24c12009-10-30 11:49:00 +0000139 V(Map, neander_map, NeanderMap) \
140 V(JSObject, message_listeners, MessageListeners) \
Ben Murdoch257744e2011-11-30 15:57:28 +0000141 V(Foreign, prototype_accessors, PrototypeAccessors) \
Ben Murdochc7cc0282012-03-05 14:35:55 +0000142 V(UnseededNumberDictionary, code_stubs, CodeStubs) \
143 V(UnseededNumberDictionary, non_monomorphic_cache, NonMonomorphicCache) \
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000144 V(PolymorphicCodeCache, polymorphic_code_cache, PolymorphicCodeCache) \
Steve Blocka7e24c12009-10-30 11:49:00 +0000145 V(Code, js_entry_code, JsEntryCode) \
146 V(Code, js_construct_entry_code, JsConstructEntryCode) \
Steve Blocka7e24c12009-10-30 11:49:00 +0000147 V(FixedArray, natives_source_cache, NativesSourceCache) \
148 V(Object, last_script_id, LastScriptId) \
Andrei Popescu31002712010-02-23 13:46:05 +0000149 V(Script, empty_script, EmptyScript) \
Steve Blockd0582a62009-12-15 09:54:21 +0000150 V(Smi, real_stack_limit, RealStackLimit) \
Kristian Monsen0d5e1162010-09-30 15:31:59 +0100151 V(StringDictionary, intrinsic_function_names, IntrinsicFunctionNames) \
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100152 V(Smi, arguments_adaptor_deopt_pc_offset, ArgumentsAdaptorDeoptPCOffset) \
153 V(Smi, construct_stub_deopt_pc_offset, ConstructStubDeoptPCOffset)
Steve Blocka7e24c12009-10-30 11:49:00 +0000154
Steve Blocka7e24c12009-10-30 11:49:00 +0000155#define ROOT_LIST(V) \
156 STRONG_ROOT_LIST(V) \
157 V(SymbolTable, symbol_table, SymbolTable)
158
159#define SYMBOL_LIST(V) \
160 V(Array_symbol, "Array") \
161 V(Object_symbol, "Object") \
162 V(Proto_symbol, "__proto__") \
163 V(StringImpl_symbol, "StringImpl") \
164 V(arguments_symbol, "arguments") \
165 V(Arguments_symbol, "Arguments") \
Steve Blocka7e24c12009-10-30 11:49:00 +0000166 V(call_symbol, "call") \
167 V(apply_symbol, "apply") \
168 V(caller_symbol, "caller") \
169 V(boolean_symbol, "boolean") \
170 V(Boolean_symbol, "Boolean") \
171 V(callee_symbol, "callee") \
172 V(constructor_symbol, "constructor") \
173 V(code_symbol, ".code") \
174 V(result_symbol, ".result") \
175 V(catch_var_symbol, ".catch-var") \
176 V(empty_symbol, "") \
177 V(eval_symbol, "eval") \
178 V(function_symbol, "function") \
179 V(length_symbol, "length") \
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100180 V(module_symbol, "module") \
Steve Blocka7e24c12009-10-30 11:49:00 +0000181 V(name_symbol, "name") \
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000182 V(native_symbol, "native") \
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000183 V(null_symbol, "null") \
Steve Blocka7e24c12009-10-30 11:49:00 +0000184 V(number_symbol, "number") \
185 V(Number_symbol, "Number") \
Ben Murdoch8b112d22011-06-08 16:22:53 +0100186 V(nan_symbol, "NaN") \
Steve Blocka7e24c12009-10-30 11:49:00 +0000187 V(RegExp_symbol, "RegExp") \
Steve Block6ded16b2010-05-10 14:33:55 +0100188 V(source_symbol, "source") \
189 V(global_symbol, "global") \
190 V(ignore_case_symbol, "ignoreCase") \
191 V(multiline_symbol, "multiline") \
192 V(input_symbol, "input") \
193 V(index_symbol, "index") \
194 V(last_index_symbol, "lastIndex") \
Steve Blocka7e24c12009-10-30 11:49:00 +0000195 V(object_symbol, "object") \
196 V(prototype_symbol, "prototype") \
197 V(string_symbol, "string") \
198 V(String_symbol, "String") \
199 V(Date_symbol, "Date") \
200 V(this_symbol, "this") \
201 V(to_string_symbol, "toString") \
202 V(char_at_symbol, "CharAt") \
203 V(undefined_symbol, "undefined") \
204 V(value_of_symbol, "valueOf") \
205 V(InitializeVarGlobal_symbol, "InitializeVarGlobal") \
206 V(InitializeConstGlobal_symbol, "InitializeConstGlobal") \
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000207 V(KeyedLoadElementMonomorphic_symbol, \
208 "KeyedLoadElementMonomorphic") \
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000209 V(KeyedStoreElementMonomorphic_symbol, \
210 "KeyedStoreElementMonomorphic") \
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100211 V(KeyedStoreAndGrowElementMonomorphic_symbol, \
212 "KeyedStoreAndGrowElementMonomorphic") \
Steve Blocka7e24c12009-10-30 11:49:00 +0000213 V(stack_overflow_symbol, "kStackOverflowBoilerplate") \
214 V(illegal_access_symbol, "illegal access") \
215 V(out_of_memory_symbol, "out-of-memory") \
216 V(illegal_execution_state_symbol, "illegal execution state") \
217 V(get_symbol, "get") \
218 V(set_symbol, "set") \
219 V(function_class_symbol, "Function") \
220 V(illegal_argument_symbol, "illegal argument") \
221 V(MakeReferenceError_symbol, "MakeReferenceError") \
222 V(MakeSyntaxError_symbol, "MakeSyntaxError") \
223 V(MakeTypeError_symbol, "MakeTypeError") \
224 V(invalid_lhs_in_assignment_symbol, "invalid_lhs_in_assignment") \
225 V(invalid_lhs_in_for_in_symbol, "invalid_lhs_in_for_in") \
226 V(invalid_lhs_in_postfix_op_symbol, "invalid_lhs_in_postfix_op") \
227 V(invalid_lhs_in_prefix_op_symbol, "invalid_lhs_in_prefix_op") \
228 V(illegal_return_symbol, "illegal_return") \
229 V(illegal_break_symbol, "illegal_break") \
230 V(illegal_continue_symbol, "illegal_continue") \
231 V(unknown_label_symbol, "unknown_label") \
232 V(redeclaration_symbol, "redeclaration") \
233 V(failure_symbol, "<failure>") \
234 V(space_symbol, " ") \
235 V(exec_symbol, "exec") \
236 V(zero_symbol, "0") \
237 V(global_eval_symbol, "GlobalEval") \
Steve Blockd0582a62009-12-15 09:54:21 +0000238 V(identity_hash_symbol, "v8::IdentityHash") \
Steve Block1e0659c2011-05-24 12:43:12 +0100239 V(closure_symbol, "(closure)") \
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000240 V(use_strict, "use strict") \
241 V(dot_symbol, ".") \
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100242 V(anonymous_function_symbol, "(anonymous function)") \
243 V(compare_ic_symbol, ".compare_ic") \
244 V(infinity_symbol, "Infinity") \
245 V(minus_infinity_symbol, "-Infinity") \
246 V(hidden_stack_trace_symbol, "v8::hidden_stack_trace")
Steve Blocka7e24c12009-10-30 11:49:00 +0000247
Ben Murdochf87a2032010-10-22 12:50:53 +0100248// Forward declarations.
Steve Blocka7e24c12009-10-30 11:49:00 +0000249class GCTracer;
Steve Blockd0582a62009-12-15 09:54:21 +0000250class HeapStats;
Steve Block44f0eee2011-05-26 01:26:41 +0100251class Isolate;
Ben Murdochf87a2032010-10-22 12:50:53 +0100252class WeakObjectRetainer;
Steve Blocka7e24c12009-10-30 11:49:00 +0000253
254
Steve Block44f0eee2011-05-26 01:26:41 +0100255typedef String* (*ExternalStringTableUpdaterCallback)(Heap* heap,
256 Object** pointer);
Steve Block6ded16b2010-05-10 14:33:55 +0100257
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100258class StoreBufferRebuilder {
259 public:
260 explicit StoreBufferRebuilder(StoreBuffer* store_buffer)
261 : store_buffer_(store_buffer) {
262 }
263
264 void Callback(MemoryChunk* page, StoreBufferEvent event);
265
266 private:
267 StoreBuffer* store_buffer_;
268
269 // We record in this variable how full the store buffer was when we started
270 // iterating over the current page, finding pointers to new space. If the
271 // store buffer overflows again we can exempt the page from the store buffer
272 // by rewinding to this point instead of having to search the store buffer.
273 Object*** start_of_current_page_;
274 // The current page we are scanning in the store buffer iterator.
275 MemoryChunk* current_page_;
276};
277
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100278
Steve Block6ded16b2010-05-10 14:33:55 +0100279
Steve Blocka7e24c12009-10-30 11:49:00 +0000280// The all static Heap captures the interface to the global object heap.
281// All JavaScript contexts by this process share the same object heap.
282
Steve Block44f0eee2011-05-26 01:26:41 +0100283#ifdef DEBUG
284class HeapDebugUtils;
285#endif
286
287
288// A queue of objects promoted during scavenge. Each object is accompanied
289// by it's size to avoid dereferencing a map pointer for scanning.
290class PromotionQueue {
291 public:
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100292 explicit PromotionQueue(Heap* heap)
293 : front_(NULL),
294 rear_(NULL),
295 limit_(NULL),
296 emergency_stack_(0),
297 heap_(heap) { }
Steve Block44f0eee2011-05-26 01:26:41 +0100298
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100299 void Initialize();
300
301 void Destroy() {
302 ASSERT(is_empty());
303 delete emergency_stack_;
304 emergency_stack_ = NULL;
Steve Block44f0eee2011-05-26 01:26:41 +0100305 }
306
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100307 inline void ActivateGuardIfOnTheSamePage();
308
309 Page* GetHeadPage() {
310 return Page::FromAllocationTop(reinterpret_cast<Address>(rear_));
311 }
312
313 void SetNewLimit(Address limit) {
314 if (!guard_) {
315 return;
316 }
317
318 ASSERT(GetHeadPage() == Page::FromAllocationTop(limit));
319 limit_ = reinterpret_cast<intptr_t*>(limit);
320
321 if (limit_ <= rear_) {
322 return;
323 }
324
325 RelocateQueueHead();
326 }
327
328 bool is_empty() {
329 return (front_ == rear_) &&
330 (emergency_stack_ == NULL || emergency_stack_->length() == 0);
331 }
Steve Block44f0eee2011-05-26 01:26:41 +0100332
333 inline void insert(HeapObject* target, int size);
334
335 void remove(HeapObject** target, int* size) {
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100336 ASSERT(!is_empty());
337 if (front_ == rear_) {
338 Entry e = emergency_stack_->RemoveLast();
339 *target = e.obj_;
340 *size = e.size_;
341 return;
342 }
343
344 if (NewSpacePage::IsAtStart(reinterpret_cast<Address>(front_))) {
345 NewSpacePage* front_page =
346 NewSpacePage::FromAddress(reinterpret_cast<Address>(front_));
347 ASSERT(!front_page->prev_page()->is_anchor());
348 front_ =
349 reinterpret_cast<intptr_t*>(front_page->prev_page()->area_end());
350 }
Steve Block44f0eee2011-05-26 01:26:41 +0100351 *target = reinterpret_cast<HeapObject*>(*(--front_));
352 *size = static_cast<int>(*(--front_));
353 // Assert no underflow.
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100354 SemiSpace::AssertValidRange(reinterpret_cast<Address>(rear_),
355 reinterpret_cast<Address>(front_));
Steve Block44f0eee2011-05-26 01:26:41 +0100356 }
357
358 private:
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100359 // The front of the queue is higher in the memory page chain than the rear.
Steve Block44f0eee2011-05-26 01:26:41 +0100360 intptr_t* front_;
361 intptr_t* rear_;
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100362 intptr_t* limit_;
363
364 bool guard_;
365
366 static const int kEntrySizeInWords = 2;
367
368 struct Entry {
369 Entry(HeapObject* obj, int size) : obj_(obj), size_(size) { }
370
371 HeapObject* obj_;
372 int size_;
373 };
374 List<Entry>* emergency_stack_;
375
376 Heap* heap_;
377
378 void RelocateQueueHead();
Steve Block44f0eee2011-05-26 01:26:41 +0100379
380 DISALLOW_COPY_AND_ASSIGN(PromotionQueue);
381};
382
383
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100384typedef void (*ScavengingCallback)(Map* map,
385 HeapObject** slot,
386 HeapObject* object);
387
388
Steve Block44f0eee2011-05-26 01:26:41 +0100389// External strings table is a place where all external strings are
390// registered. We need to keep track of such strings to properly
391// finalize them.
392class ExternalStringTable {
393 public:
394 // Registers an external string.
395 inline void AddString(String* string);
396
397 inline void Iterate(ObjectVisitor* v);
398
399 // Restores internal invariant and gets rid of collected strings.
400 // Must be called after each Iterate() that modified the strings.
401 void CleanUp();
402
403 // Destroys all allocated memory.
404 void TearDown();
405
406 private:
407 ExternalStringTable() { }
408
409 friend class Heap;
410
411 inline void Verify();
412
413 inline void AddOldString(String* string);
414
415 // Notifies the table that only a prefix of the new list is valid.
416 inline void ShrinkNewStrings(int position);
417
418 // To speed up scavenge collections new space string are kept
419 // separate from old space strings.
420 List<Object*> new_space_strings_;
421 List<Object*> old_space_strings_;
422
423 Heap* heap_;
424
425 DISALLOW_COPY_AND_ASSIGN(ExternalStringTable);
426};
427
428
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100429enum ArrayStorageAllocationMode {
430 DONT_INITIALIZE_ARRAY_ELEMENTS,
431 INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE
432};
433
Steve Block44f0eee2011-05-26 01:26:41 +0100434class Heap {
Steve Blocka7e24c12009-10-30 11:49:00 +0000435 public:
436 // Configure heap size before setup. Return false if the heap has been
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100437 // set up already.
Steve Block44f0eee2011-05-26 01:26:41 +0100438 bool ConfigureHeap(int max_semispace_size,
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100439 intptr_t max_old_gen_size,
440 intptr_t max_executable_size);
Steve Block44f0eee2011-05-26 01:26:41 +0100441 bool ConfigureHeapDefault();
Steve Blocka7e24c12009-10-30 11:49:00 +0000442
443 // Initializes the global object heap. If create_heap_objects is true,
444 // also creates the basic non-mutable objects.
445 // Returns whether it succeeded.
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100446 bool SetUp(bool create_heap_objects);
Steve Blocka7e24c12009-10-30 11:49:00 +0000447
448 // Destroys all memory allocated by the heap.
Steve Block44f0eee2011-05-26 01:26:41 +0100449 void TearDown();
Steve Blocka7e24c12009-10-30 11:49:00 +0000450
Steve Blockd0582a62009-12-15 09:54:21 +0000451 // Set the stack limit in the roots_ array. Some architectures generate
452 // code that looks here, because it is faster than loading from the static
453 // jslimit_/real_jslimit_ variable in the StackGuard.
Steve Block44f0eee2011-05-26 01:26:41 +0100454 void SetStackLimits();
Steve Blocka7e24c12009-10-30 11:49:00 +0000455
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100456 // Returns whether SetUp has been called.
457 bool HasBeenSetUp();
Steve Blocka7e24c12009-10-30 11:49:00 +0000458
Steve Block3ce2e202009-11-05 08:53:23 +0000459 // Returns the maximum amount of memory reserved for the heap. For
460 // the young generation, we reserve 4 times the amount needed for a
461 // semi space. The young generation consists of two semi spaces and
462 // we reserve twice the amount needed for those in order to ensure
463 // that new space can be aligned to its size.
Steve Block44f0eee2011-05-26 01:26:41 +0100464 intptr_t MaxReserved() {
Steve Block3ce2e202009-11-05 08:53:23 +0000465 return 4 * reserved_semispace_size_ + max_old_generation_size_;
Steve Blocka7e24c12009-10-30 11:49:00 +0000466 }
Steve Block44f0eee2011-05-26 01:26:41 +0100467 int MaxSemiSpaceSize() { return max_semispace_size_; }
468 int ReservedSemiSpaceSize() { return reserved_semispace_size_; }
469 int InitialSemiSpaceSize() { return initial_semispace_size_; }
470 intptr_t MaxOldGenerationSize() { return max_old_generation_size_; }
471 intptr_t MaxExecutableSize() { return max_executable_size_; }
Steve Blocka7e24c12009-10-30 11:49:00 +0000472
473 // Returns the capacity of the heap in bytes w/o growing. Heap grows when
474 // more spaces are needed until it reaches the limit.
Steve Block44f0eee2011-05-26 01:26:41 +0100475 intptr_t Capacity();
Steve Blocka7e24c12009-10-30 11:49:00 +0000476
Steve Block3ce2e202009-11-05 08:53:23 +0000477 // Returns the amount of memory currently committed for the heap.
Steve Block44f0eee2011-05-26 01:26:41 +0100478 intptr_t CommittedMemory();
Steve Block3ce2e202009-11-05 08:53:23 +0000479
Russell Brenner90bac252010-11-18 13:33:46 -0800480 // Returns the amount of executable memory currently committed for the heap.
Steve Block44f0eee2011-05-26 01:26:41 +0100481 intptr_t CommittedMemoryExecutable();
Russell Brenner90bac252010-11-18 13:33:46 -0800482
Steve Blocka7e24c12009-10-30 11:49:00 +0000483 // Returns the available bytes in space w/o growing.
484 // Heap doesn't guarantee that it can allocate an object that requires
485 // all available bytes. Check MaxHeapObjectSize() instead.
Steve Block44f0eee2011-05-26 01:26:41 +0100486 intptr_t Available();
Steve Blocka7e24c12009-10-30 11:49:00 +0000487
Steve Blocka7e24c12009-10-30 11:49:00 +0000488 // Returns of size of all objects residing in the heap.
Steve Block44f0eee2011-05-26 01:26:41 +0100489 intptr_t SizeOfObjects();
Steve Blocka7e24c12009-10-30 11:49:00 +0000490
491 // Return the starting address and a mask for the new space. And-masking an
492 // address with the mask will result in the start address of the new space
493 // for all addresses in either semispace.
Steve Block44f0eee2011-05-26 01:26:41 +0100494 Address NewSpaceStart() { return new_space_.start(); }
495 uintptr_t NewSpaceMask() { return new_space_.mask(); }
496 Address NewSpaceTop() { return new_space_.top(); }
Steve Blocka7e24c12009-10-30 11:49:00 +0000497
Steve Block44f0eee2011-05-26 01:26:41 +0100498 NewSpace* new_space() { return &new_space_; }
499 OldSpace* old_pointer_space() { return old_pointer_space_; }
500 OldSpace* old_data_space() { return old_data_space_; }
501 OldSpace* code_space() { return code_space_; }
502 MapSpace* map_space() { return map_space_; }
503 CellSpace* cell_space() { return cell_space_; }
504 LargeObjectSpace* lo_space() { return lo_space_; }
Steve Blocka7e24c12009-10-30 11:49:00 +0000505
Steve Block44f0eee2011-05-26 01:26:41 +0100506 bool always_allocate() { return always_allocate_scope_depth_ != 0; }
507 Address always_allocate_scope_depth_address() {
Steve Blocka7e24c12009-10-30 11:49:00 +0000508 return reinterpret_cast<Address>(&always_allocate_scope_depth_);
509 }
Steve Block44f0eee2011-05-26 01:26:41 +0100510 bool linear_allocation() {
Leon Clarkee46be812010-01-19 14:06:41 +0000511 return linear_allocation_scope_depth_ != 0;
Steve Blockd0582a62009-12-15 09:54:21 +0000512 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000513
Steve Block44f0eee2011-05-26 01:26:41 +0100514 Address* NewSpaceAllocationTopAddress() {
Steve Blocka7e24c12009-10-30 11:49:00 +0000515 return new_space_.allocation_top_address();
516 }
Steve Block44f0eee2011-05-26 01:26:41 +0100517 Address* NewSpaceAllocationLimitAddress() {
Steve Blocka7e24c12009-10-30 11:49:00 +0000518 return new_space_.allocation_limit_address();
519 }
520
521 // Uncommit unused semi space.
Steve Block44f0eee2011-05-26 01:26:41 +0100522 bool UncommitFromSpace() { return new_space_.UncommitFromSpace(); }
Steve Blocka7e24c12009-10-30 11:49:00 +0000523
Steve Blocka7e24c12009-10-30 11:49:00 +0000524 // Allocates and initializes a new JavaScript object based on a
525 // constructor.
526 // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
527 // failed.
528 // Please note this does not perform a garbage collection.
Steve Block44f0eee2011-05-26 01:26:41 +0100529 MUST_USE_RESULT MaybeObject* AllocateJSObject(
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100530 JSFunction* constructor, PretenureFlag pretenure = NOT_TENURED);
Steve Blocka7e24c12009-10-30 11:49:00 +0000531
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100532 // Allocate a JSArray with no elements
533 MUST_USE_RESULT MaybeObject* AllocateEmptyJSArray(
534 ElementsKind elements_kind,
535 PretenureFlag pretenure = NOT_TENURED) {
536 return AllocateJSArrayAndStorage(elements_kind, 0, 0,
537 DONT_INITIALIZE_ARRAY_ELEMENTS,
538 pretenure);
539 }
540
541 // Allocate a JSArray with a specified length but elements that are left
542 // uninitialized.
543 MUST_USE_RESULT MaybeObject* AllocateJSArrayAndStorage(
544 ElementsKind elements_kind,
545 int length,
546 int capacity,
547 ArrayStorageAllocationMode mode = DONT_INITIALIZE_ARRAY_ELEMENTS,
548 PretenureFlag pretenure = NOT_TENURED);
549
550 // Allocate a JSArray with no elements
551 MUST_USE_RESULT MaybeObject* AllocateJSArrayWithElements(
552 FixedArrayBase* array_base,
553 ElementsKind elements_kind,
554 PretenureFlag pretenure = NOT_TENURED);
555
Steve Blocka7e24c12009-10-30 11:49:00 +0000556 // Allocates and initializes a new global object based on a constructor.
557 // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
558 // failed.
559 // Please note this does not perform a garbage collection.
Steve Block44f0eee2011-05-26 01:26:41 +0100560 MUST_USE_RESULT MaybeObject* AllocateGlobalObject(JSFunction* constructor);
Steve Blocka7e24c12009-10-30 11:49:00 +0000561
562 // Returns a deep copy of the JavaScript object.
563 // Properties and elements are copied too.
564 // Returns failure if allocation failed.
Steve Block44f0eee2011-05-26 01:26:41 +0100565 MUST_USE_RESULT MaybeObject* CopyJSObject(JSObject* source);
Steve Blocka7e24c12009-10-30 11:49:00 +0000566
567 // Allocates the function prototype.
568 // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
569 // failed.
570 // Please note this does not perform a garbage collection.
Steve Block44f0eee2011-05-26 01:26:41 +0100571 MUST_USE_RESULT MaybeObject* AllocateFunctionPrototype(JSFunction* function);
Steve Blocka7e24c12009-10-30 11:49:00 +0000572
Ben Murdoch589d6972011-11-30 16:04:58 +0000573 // Allocates a Harmony proxy or function proxy.
Ben Murdoch257744e2011-11-30 15:57:28 +0000574 // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
575 // failed.
576 // Please note this does not perform a garbage collection.
577 MUST_USE_RESULT MaybeObject* AllocateJSProxy(Object* handler,
578 Object* prototype);
579
Ben Murdoch589d6972011-11-30 16:04:58 +0000580 MUST_USE_RESULT MaybeObject* AllocateJSFunctionProxy(Object* handler,
581 Object* call_trap,
582 Object* construct_trap,
583 Object* prototype);
584
585 // Reinitialize a JSReceiver into an (empty) JS object of respective type and
586 // size, but keeping the original prototype. The receiver must have at least
587 // the size of the new object. The object is reinitialized and behaves as an
588 // object that has been freshly allocated.
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100589 // Returns failure if an error occured, otherwise object.
Ben Murdoch589d6972011-11-30 16:04:58 +0000590 MUST_USE_RESULT MaybeObject* ReinitializeJSReceiver(JSReceiver* object,
591 InstanceType type,
592 int size);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000593
Steve Blocka7e24c12009-10-30 11:49:00 +0000594 // Reinitialize an JSGlobalProxy based on a constructor. The object
595 // must have the same size as objects allocated using the
596 // constructor. The object is reinitialized and behaves as an
597 // object that has been freshly allocated using the constructor.
Steve Block44f0eee2011-05-26 01:26:41 +0100598 MUST_USE_RESULT MaybeObject* ReinitializeJSGlobalProxy(
599 JSFunction* constructor, JSGlobalProxy* global);
Steve Blocka7e24c12009-10-30 11:49:00 +0000600
601 // Allocates and initializes a new JavaScript object based on a map.
602 // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
603 // failed.
604 // Please note this does not perform a garbage collection.
Steve Block44f0eee2011-05-26 01:26:41 +0100605 MUST_USE_RESULT MaybeObject* AllocateJSObjectFromMap(
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100606 Map* map, PretenureFlag pretenure = NOT_TENURED);
Steve Blocka7e24c12009-10-30 11:49:00 +0000607
608 // Allocates a heap object based on the map.
609 // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
610 // failed.
611 // Please note this function does not perform a garbage collection.
Steve Block44f0eee2011-05-26 01:26:41 +0100612 MUST_USE_RESULT MaybeObject* Allocate(Map* map, AllocationSpace space);
Steve Blocka7e24c12009-10-30 11:49:00 +0000613
614 // Allocates a JS Map in the heap.
615 // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
616 // failed.
617 // Please note this function does not perform a garbage collection.
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100618 MUST_USE_RESULT MaybeObject* AllocateMap(
619 InstanceType instance_type,
620 int instance_size,
621 ElementsKind elements_kind = FAST_ELEMENTS);
Steve Blocka7e24c12009-10-30 11:49:00 +0000622
623 // Allocates a partial map for bootstrapping.
Steve Block44f0eee2011-05-26 01:26:41 +0100624 MUST_USE_RESULT MaybeObject* AllocatePartialMap(InstanceType instance_type,
625 int instance_size);
Steve Blocka7e24c12009-10-30 11:49:00 +0000626
627 // Allocate a map for the specified function
Steve Block44f0eee2011-05-26 01:26:41 +0100628 MUST_USE_RESULT MaybeObject* AllocateInitialMap(JSFunction* fun);
Steve Blocka7e24c12009-10-30 11:49:00 +0000629
Steve Block6ded16b2010-05-10 14:33:55 +0100630 // Allocates an empty code cache.
Steve Block44f0eee2011-05-26 01:26:41 +0100631 MUST_USE_RESULT MaybeObject* AllocateCodeCache();
Steve Block6ded16b2010-05-10 14:33:55 +0100632
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000633 // Allocates a serialized scope info.
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100634 MUST_USE_RESULT MaybeObject* AllocateScopeInfo(int length);
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000635
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000636 // Allocates an empty PolymorphicCodeCache.
637 MUST_USE_RESULT MaybeObject* AllocatePolymorphicCodeCache();
638
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100639 // Allocates a pre-tenured empty AccessorPair.
640 MUST_USE_RESULT MaybeObject* AllocateAccessorPair();
641
642 // Allocates an empty TypeFeedbackInfo.
643 MUST_USE_RESULT MaybeObject* AllocateTypeFeedbackInfo();
644
645 // Allocates an AliasedArgumentsEntry.
646 MUST_USE_RESULT MaybeObject* AllocateAliasedArgumentsEntry(int slot);
647
Kristian Monsen25f61362010-05-21 11:50:48 +0100648 // Clear the Instanceof cache (used when a prototype changes).
Steve Block44f0eee2011-05-26 01:26:41 +0100649 inline void ClearInstanceofCache();
Kristian Monsen25f61362010-05-21 11:50:48 +0100650
Steve Blocka7e24c12009-10-30 11:49:00 +0000651 // Allocates and fully initializes a String. There are two String
652 // encodings: ASCII and two byte. One should choose between the three string
653 // allocation functions based on the encoding of the string buffer used to
654 // initialized the string.
655 // - ...FromAscii initializes the string from a buffer that is ASCII
656 // encoded (it does not check that the buffer is ASCII encoded) and the
657 // result will be ASCII encoded.
658 // - ...FromUTF8 initializes the string from a buffer that is UTF-8
659 // encoded. If the characters are all single-byte characters, the
660 // result will be ASCII encoded, otherwise it will converted to two
661 // byte.
662 // - ...FromTwoByte initializes the string from a buffer that is two-byte
663 // encoded. If the characters are all single-byte characters, the
664 // result will be converted to ASCII, otherwise it will be left as
665 // two-byte.
666 // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
667 // failed.
668 // Please note this does not perform a garbage collection.
Steve Block44f0eee2011-05-26 01:26:41 +0100669 MUST_USE_RESULT MaybeObject* AllocateStringFromAscii(
Steve Blocka7e24c12009-10-30 11:49:00 +0000670 Vector<const char> str,
671 PretenureFlag pretenure = NOT_TENURED);
Steve Block44f0eee2011-05-26 01:26:41 +0100672 MUST_USE_RESULT inline MaybeObject* AllocateStringFromUtf8(
Steve Block9fac8402011-05-12 15:51:54 +0100673 Vector<const char> str,
674 PretenureFlag pretenure = NOT_TENURED);
Steve Block44f0eee2011-05-26 01:26:41 +0100675 MUST_USE_RESULT MaybeObject* AllocateStringFromUtf8Slow(
Steve Blocka7e24c12009-10-30 11:49:00 +0000676 Vector<const char> str,
677 PretenureFlag pretenure = NOT_TENURED);
Steve Block44f0eee2011-05-26 01:26:41 +0100678 MUST_USE_RESULT MaybeObject* AllocateStringFromTwoByte(
Steve Blocka7e24c12009-10-30 11:49:00 +0000679 Vector<const uc16> str,
680 PretenureFlag pretenure = NOT_TENURED);
681
682 // Allocates a symbol in old space based on the character stream.
683 // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
684 // failed.
685 // Please note this function does not perform a garbage collection.
Steve Block44f0eee2011-05-26 01:26:41 +0100686 MUST_USE_RESULT inline MaybeObject* AllocateSymbol(Vector<const char> str,
687 int chars,
688 uint32_t hash_field);
Steve Blocka7e24c12009-10-30 11:49:00 +0000689
Steve Block44f0eee2011-05-26 01:26:41 +0100690 MUST_USE_RESULT inline MaybeObject* AllocateAsciiSymbol(
Steve Block9fac8402011-05-12 15:51:54 +0100691 Vector<const char> str,
692 uint32_t hash_field);
693
Steve Block44f0eee2011-05-26 01:26:41 +0100694 MUST_USE_RESULT inline MaybeObject* AllocateTwoByteSymbol(
Steve Block9fac8402011-05-12 15:51:54 +0100695 Vector<const uc16> str,
696 uint32_t hash_field);
697
Steve Block44f0eee2011-05-26 01:26:41 +0100698 MUST_USE_RESULT MaybeObject* AllocateInternalSymbol(
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100699 unibrow::CharacterStream* buffer, int chars, uint32_t hash_field);
Steve Blocka7e24c12009-10-30 11:49:00 +0000700
Steve Block44f0eee2011-05-26 01:26:41 +0100701 MUST_USE_RESULT MaybeObject* AllocateExternalSymbol(
John Reck59135872010-11-02 12:39:01 -0700702 Vector<const char> str,
703 int chars);
Steve Blocka7e24c12009-10-30 11:49:00 +0000704
Steve Blocka7e24c12009-10-30 11:49:00 +0000705 // Allocates and partially initializes a String. There are two String
706 // encodings: ASCII and two byte. These functions allocate a string of the
707 // given length and set its map and length fields. The characters of the
708 // string are uninitialized.
709 // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
710 // failed.
711 // Please note this does not perform a garbage collection.
Steve Block44f0eee2011-05-26 01:26:41 +0100712 MUST_USE_RESULT MaybeObject* AllocateRawAsciiString(
Steve Blocka7e24c12009-10-30 11:49:00 +0000713 int length,
714 PretenureFlag pretenure = NOT_TENURED);
Steve Block44f0eee2011-05-26 01:26:41 +0100715 MUST_USE_RESULT MaybeObject* AllocateRawTwoByteString(
Steve Blocka7e24c12009-10-30 11:49:00 +0000716 int length,
717 PretenureFlag pretenure = NOT_TENURED);
718
719 // Computes a single character string where the character has code.
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100720 // A cache is used for ASCII codes.
Steve Blocka7e24c12009-10-30 11:49:00 +0000721 // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
722 // failed. Please note this does not perform a garbage collection.
Steve Block44f0eee2011-05-26 01:26:41 +0100723 MUST_USE_RESULT MaybeObject* LookupSingleCharacterStringFromCode(
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100724 uint16_t code);
Steve Blocka7e24c12009-10-30 11:49:00 +0000725
726 // Allocate a byte array of the specified length
727 // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
728 // failed.
729 // Please note this does not perform a garbage collection.
Steve Block44f0eee2011-05-26 01:26:41 +0100730 MUST_USE_RESULT MaybeObject* AllocateByteArray(int length,
731 PretenureFlag pretenure);
Steve Blocka7e24c12009-10-30 11:49:00 +0000732
733 // Allocate a non-tenured byte array of the specified length
734 // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
735 // failed.
736 // Please note this does not perform a garbage collection.
Steve Block44f0eee2011-05-26 01:26:41 +0100737 MUST_USE_RESULT MaybeObject* AllocateByteArray(int length);
Steve Blocka7e24c12009-10-30 11:49:00 +0000738
Steve Block3ce2e202009-11-05 08:53:23 +0000739 // Allocates an external array of the specified length and type.
740 // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
741 // failed.
742 // Please note this does not perform a garbage collection.
Steve Block44f0eee2011-05-26 01:26:41 +0100743 MUST_USE_RESULT MaybeObject* AllocateExternalArray(
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100744 int length,
745 ExternalArrayType array_type,
746 void* external_pointer,
747 PretenureFlag pretenure);
Steve Block3ce2e202009-11-05 08:53:23 +0000748
Steve Blocka7e24c12009-10-30 11:49:00 +0000749 // Allocate a tenured JS global property cell.
750 // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
751 // failed.
752 // Please note this does not perform a garbage collection.
Steve Block44f0eee2011-05-26 01:26:41 +0100753 MUST_USE_RESULT MaybeObject* AllocateJSGlobalPropertyCell(Object* value);
Steve Blocka7e24c12009-10-30 11:49:00 +0000754
755 // Allocates a fixed array initialized with undefined values
756 // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
757 // failed.
758 // Please note this does not perform a garbage collection.
Steve Block44f0eee2011-05-26 01:26:41 +0100759 MUST_USE_RESULT MaybeObject* AllocateFixedArray(int length,
760 PretenureFlag pretenure);
Steve Block6ded16b2010-05-10 14:33:55 +0100761 // Allocates a fixed array initialized with undefined values
Steve Block44f0eee2011-05-26 01:26:41 +0100762 MUST_USE_RESULT MaybeObject* AllocateFixedArray(int length);
Steve Blocka7e24c12009-10-30 11:49:00 +0000763
Steve Block6ded16b2010-05-10 14:33:55 +0100764 // Allocates an uninitialized fixed array. It must be filled by the caller.
765 //
766 // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
767 // failed.
768 // Please note this does not perform a garbage collection.
Steve Block44f0eee2011-05-26 01:26:41 +0100769 MUST_USE_RESULT MaybeObject* AllocateUninitializedFixedArray(int length);
Steve Block6ded16b2010-05-10 14:33:55 +0100770
Steve Blocka7e24c12009-10-30 11:49:00 +0000771 // Make a copy of src and return it. Returns
772 // Failure::RetryAfterGC(requested_bytes, space) if the allocation failed.
Steve Block44f0eee2011-05-26 01:26:41 +0100773 MUST_USE_RESULT inline MaybeObject* CopyFixedArray(FixedArray* src);
Kristian Monsen0d5e1162010-09-30 15:31:59 +0100774
775 // Make a copy of src, set the map, and return the copy. Returns
776 // Failure::RetryAfterGC(requested_bytes, space) if the allocation failed.
Steve Block44f0eee2011-05-26 01:26:41 +0100777 MUST_USE_RESULT MaybeObject* CopyFixedArrayWithMap(FixedArray* src, Map* map);
Steve Blocka7e24c12009-10-30 11:49:00 +0000778
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000779 // Make a copy of src and return it. Returns
780 // Failure::RetryAfterGC(requested_bytes, space) if the allocation failed.
781 MUST_USE_RESULT inline MaybeObject* CopyFixedDoubleArray(
782 FixedDoubleArray* src);
783
784 // Make a copy of src, set the map, and return the copy. Returns
785 // Failure::RetryAfterGC(requested_bytes, space) if the allocation failed.
786 MUST_USE_RESULT MaybeObject* CopyFixedDoubleArrayWithMap(
787 FixedDoubleArray* src, Map* map);
788
Steve Blocka7e24c12009-10-30 11:49:00 +0000789 // Allocates a fixed array initialized with the hole values.
790 // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
791 // failed.
792 // Please note this does not perform a garbage collection.
Steve Block44f0eee2011-05-26 01:26:41 +0100793 MUST_USE_RESULT MaybeObject* AllocateFixedArrayWithHoles(
Steve Block6ded16b2010-05-10 14:33:55 +0100794 int length,
795 PretenureFlag pretenure = NOT_TENURED);
Steve Blocka7e24c12009-10-30 11:49:00 +0000796
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000797 MUST_USE_RESULT MaybeObject* AllocateRawFixedDoubleArray(
798 int length,
799 PretenureFlag pretenure);
800
801 // Allocates a fixed double array with uninitialized values. Returns
802 // Failure::RetryAfterGC(requested_bytes, space) if the allocation failed.
803 // Please note this does not perform a garbage collection.
804 MUST_USE_RESULT MaybeObject* AllocateUninitializedFixedDoubleArray(
805 int length,
806 PretenureFlag pretenure = NOT_TENURED);
807
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100808 // Allocates a fixed double array with hole values. Returns
809 // Failure::RetryAfterGC(requested_bytes, space) if the allocation failed.
810 // Please note this does not perform a garbage collection.
811 MUST_USE_RESULT MaybeObject* AllocateFixedDoubleArrayWithHoles(
812 int length,
813 PretenureFlag pretenure = NOT_TENURED);
814
Steve Blocka7e24c12009-10-30 11:49:00 +0000815 // AllocateHashTable is identical to AllocateFixedArray except
816 // that the resulting object has hash_table_map as map.
Steve Block44f0eee2011-05-26 01:26:41 +0100817 MUST_USE_RESULT MaybeObject* AllocateHashTable(
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100818 int length, PretenureFlag pretenure = NOT_TENURED);
Steve Blocka7e24c12009-10-30 11:49:00 +0000819
820 // Allocate a global (but otherwise uninitialized) context.
Steve Block44f0eee2011-05-26 01:26:41 +0100821 MUST_USE_RESULT MaybeObject* AllocateGlobalContext();
Steve Blocka7e24c12009-10-30 11:49:00 +0000822
823 // Allocate a function context.
Steve Block44f0eee2011-05-26 01:26:41 +0100824 MUST_USE_RESULT MaybeObject* AllocateFunctionContext(int length,
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000825 JSFunction* function);
Steve Blocka7e24c12009-10-30 11:49:00 +0000826
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000827 // Allocate a catch context.
828 MUST_USE_RESULT MaybeObject* AllocateCatchContext(JSFunction* function,
829 Context* previous,
830 String* name,
831 Object* thrown_object);
Steve Blocka7e24c12009-10-30 11:49:00 +0000832 // Allocate a 'with' context.
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000833 MUST_USE_RESULT MaybeObject* AllocateWithContext(JSFunction* function,
834 Context* previous,
835 JSObject* extension);
Steve Blocka7e24c12009-10-30 11:49:00 +0000836
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000837 // Allocate a block context.
838 MUST_USE_RESULT MaybeObject* AllocateBlockContext(JSFunction* function,
839 Context* previous,
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100840 ScopeInfo* info);
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000841
Steve Blocka7e24c12009-10-30 11:49:00 +0000842 // Allocates a new utility object in the old generation.
Steve Block44f0eee2011-05-26 01:26:41 +0100843 MUST_USE_RESULT MaybeObject* AllocateStruct(InstanceType type);
Steve Blocka7e24c12009-10-30 11:49:00 +0000844
845 // Allocates a function initialized with a shared part.
846 // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
847 // failed.
848 // Please note this does not perform a garbage collection.
Steve Block44f0eee2011-05-26 01:26:41 +0100849 MUST_USE_RESULT MaybeObject* AllocateFunction(
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100850 Map* function_map,
851 SharedFunctionInfo* shared,
852 Object* prototype,
853 PretenureFlag pretenure = TENURED);
Steve Blocka7e24c12009-10-30 11:49:00 +0000854
Steve Block44f0eee2011-05-26 01:26:41 +0100855 // Arguments object size.
Leon Clarkee46be812010-01-19 14:06:41 +0000856 static const int kArgumentsObjectSize =
857 JSObject::kHeaderSize + 2 * kPointerSize;
Steve Block44f0eee2011-05-26 01:26:41 +0100858 // Strict mode arguments has no callee so it is smaller.
859 static const int kArgumentsObjectSizeStrict =
860 JSObject::kHeaderSize + 1 * kPointerSize;
861 // Indicies for direct access into argument objects.
862 static const int kArgumentsLengthIndex = 0;
863 // callee is only valid in non-strict mode.
864 static const int kArgumentsCalleeIndex = 1;
Steve Blocka7e24c12009-10-30 11:49:00 +0000865
866 // Allocates an arguments object - optionally with an elements array.
867 // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
868 // failed.
869 // Please note this does not perform a garbage collection.
Steve Block44f0eee2011-05-26 01:26:41 +0100870 MUST_USE_RESULT MaybeObject* AllocateArgumentsObject(
871 Object* callee, int length);
Steve Blocka7e24c12009-10-30 11:49:00 +0000872
Steve Blocka7e24c12009-10-30 11:49:00 +0000873 // Same as NewNumberFromDouble, but may return a preallocated/immutable
874 // number object (e.g., minus_zero_value_, nan_value_)
Steve Block44f0eee2011-05-26 01:26:41 +0100875 MUST_USE_RESULT MaybeObject* NumberFromDouble(
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100876 double value, PretenureFlag pretenure = NOT_TENURED);
Steve Blocka7e24c12009-10-30 11:49:00 +0000877
878 // Allocated a HeapNumber from value.
Steve Block44f0eee2011-05-26 01:26:41 +0100879 MUST_USE_RESULT MaybeObject* AllocateHeapNumber(
John Reck59135872010-11-02 12:39:01 -0700880 double value,
881 PretenureFlag pretenure);
Steve Block44f0eee2011-05-26 01:26:41 +0100882 // pretenure = NOT_TENURED
883 MUST_USE_RESULT MaybeObject* AllocateHeapNumber(double value);
Steve Blocka7e24c12009-10-30 11:49:00 +0000884
885 // Converts an int into either a Smi or a HeapNumber object.
886 // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
887 // failed.
888 // Please note this does not perform a garbage collection.
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100889 MUST_USE_RESULT inline MaybeObject* NumberFromInt32(
890 int32_t value, PretenureFlag pretenure = NOT_TENURED);
Steve Blocka7e24c12009-10-30 11:49:00 +0000891
892 // Converts an int into either a Smi or a HeapNumber object.
893 // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
894 // failed.
895 // Please note this does not perform a garbage collection.
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100896 MUST_USE_RESULT inline MaybeObject* NumberFromUint32(
897 uint32_t value, PretenureFlag pretenure = NOT_TENURED);
Steve Blocka7e24c12009-10-30 11:49:00 +0000898
Ben Murdoch257744e2011-11-30 15:57:28 +0000899 // Allocates a new foreign object.
Steve Blocka7e24c12009-10-30 11:49:00 +0000900 // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
901 // failed.
902 // Please note this does not perform a garbage collection.
Ben Murdoch257744e2011-11-30 15:57:28 +0000903 MUST_USE_RESULT MaybeObject* AllocateForeign(
904 Address address, PretenureFlag pretenure = NOT_TENURED);
Steve Blocka7e24c12009-10-30 11:49:00 +0000905
906 // Allocates a new SharedFunctionInfo object.
907 // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
908 // failed.
909 // Please note this does not perform a garbage collection.
Steve Block44f0eee2011-05-26 01:26:41 +0100910 MUST_USE_RESULT MaybeObject* AllocateSharedFunctionInfo(Object* name);
Steve Blocka7e24c12009-10-30 11:49:00 +0000911
Steve Block1e0659c2011-05-24 12:43:12 +0100912 // Allocates a new JSMessageObject object.
913 // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
914 // failed.
915 // Please note that this does not perform a garbage collection.
Steve Block44f0eee2011-05-26 01:26:41 +0100916 MUST_USE_RESULT MaybeObject* AllocateJSMessageObject(
Steve Block1e0659c2011-05-24 12:43:12 +0100917 String* type,
918 JSArray* arguments,
919 int start_position,
920 int end_position,
921 Object* script,
922 Object* stack_trace,
923 Object* stack_frames);
924
Steve Blocka7e24c12009-10-30 11:49:00 +0000925 // Allocates a new cons string object.
926 // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
927 // failed.
928 // Please note this does not perform a garbage collection.
Steve Block44f0eee2011-05-26 01:26:41 +0100929 MUST_USE_RESULT MaybeObject* AllocateConsString(String* first,
930 String* second);
Steve Blocka7e24c12009-10-30 11:49:00 +0000931
Steve Blocka7e24c12009-10-30 11:49:00 +0000932 // Allocates a new sub string object which is a substring of an underlying
933 // string buffer stretching from the index start (inclusive) to the index
934 // end (exclusive).
935 // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
936 // failed.
937 // Please note this does not perform a garbage collection.
Steve Block44f0eee2011-05-26 01:26:41 +0100938 MUST_USE_RESULT MaybeObject* AllocateSubString(
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100939 String* buffer,
940 int start,
941 int end,
942 PretenureFlag pretenure = NOT_TENURED);
Steve Blocka7e24c12009-10-30 11:49:00 +0000943
944 // Allocate a new external string object, which is backed by a string
945 // resource that resides outside the V8 heap.
946 // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
947 // failed.
948 // Please note this does not perform a garbage collection.
Steve Block44f0eee2011-05-26 01:26:41 +0100949 MUST_USE_RESULT MaybeObject* AllocateExternalStringFromAscii(
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100950 const ExternalAsciiString::Resource* resource);
Steve Block44f0eee2011-05-26 01:26:41 +0100951 MUST_USE_RESULT MaybeObject* AllocateExternalStringFromTwoByte(
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100952 const ExternalTwoByteString::Resource* resource);
Steve Blocka7e24c12009-10-30 11:49:00 +0000953
Leon Clarkee46be812010-01-19 14:06:41 +0000954 // Finalizes an external string by deleting the associated external
955 // data and clearing the resource pointer.
Steve Block44f0eee2011-05-26 01:26:41 +0100956 inline void FinalizeExternalString(String* string);
Leon Clarkee46be812010-01-19 14:06:41 +0000957
Steve Blocka7e24c12009-10-30 11:49:00 +0000958 // Allocates an uninitialized object. The memory is non-executable if the
959 // hardware and OS allow.
960 // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
961 // failed.
962 // Please note this function does not perform a garbage collection.
Steve Block44f0eee2011-05-26 01:26:41 +0100963 MUST_USE_RESULT inline MaybeObject* AllocateRaw(int size_in_bytes,
964 AllocationSpace space,
965 AllocationSpace retry_space);
Steve Blocka7e24c12009-10-30 11:49:00 +0000966
967 // Initialize a filler object to keep the ability to iterate over the heap
968 // when shortening objects.
Steve Block44f0eee2011-05-26 01:26:41 +0100969 void CreateFillerObjectAt(Address addr, int size);
Steve Blocka7e24c12009-10-30 11:49:00 +0000970
971 // Makes a new native code object
972 // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
973 // failed. On success, the pointer to the Code object is stored in the
974 // self_reference. This allows generated code to reference its own Code
975 // object by containing this pointer.
976 // Please note this function does not perform a garbage collection.
Steve Block44f0eee2011-05-26 01:26:41 +0100977 MUST_USE_RESULT MaybeObject* CreateCode(const CodeDesc& desc,
978 Code::Flags flags,
979 Handle<Object> self_reference,
980 bool immovable = false);
Steve Blocka7e24c12009-10-30 11:49:00 +0000981
Steve Block44f0eee2011-05-26 01:26:41 +0100982 MUST_USE_RESULT MaybeObject* CopyCode(Code* code);
Steve Block6ded16b2010-05-10 14:33:55 +0100983
984 // Copy the code and scope info part of the code object, but insert
985 // the provided data as the relocation information.
Steve Block44f0eee2011-05-26 01:26:41 +0100986 MUST_USE_RESULT MaybeObject* CopyCode(Code* code, Vector<byte> reloc_info);
Steve Block6ded16b2010-05-10 14:33:55 +0100987
Steve Blocka7e24c12009-10-30 11:49:00 +0000988 // Finds the symbol for string in the symbol table.
989 // If not found, a new symbol is added to the table and returned.
990 // Returns Failure::RetryAfterGC(requested_bytes, space) if allocation
991 // failed.
992 // Please note this function does not perform a garbage collection.
Steve Block44f0eee2011-05-26 01:26:41 +0100993 MUST_USE_RESULT MaybeObject* LookupSymbol(Vector<const char> str);
994 MUST_USE_RESULT MaybeObject* LookupAsciiSymbol(Vector<const char> str);
Ben Murdochc7cc0282012-03-05 14:35:55 +0000995 MUST_USE_RESULT MaybeObject* LookupTwoByteSymbol(Vector<const uc16> str);
Steve Block44f0eee2011-05-26 01:26:41 +0100996 MUST_USE_RESULT MaybeObject* LookupAsciiSymbol(const char* str) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000997 return LookupSymbol(CStrVector(str));
998 }
Steve Block44f0eee2011-05-26 01:26:41 +0100999 MUST_USE_RESULT MaybeObject* LookupSymbol(String* str);
Ben Murdoch257744e2011-11-30 15:57:28 +00001000 MUST_USE_RESULT MaybeObject* LookupAsciiSymbol(Handle<SeqAsciiString> string,
1001 int from,
1002 int length);
1003
Steve Block44f0eee2011-05-26 01:26:41 +01001004 bool LookupSymbolIfExists(String* str, String** symbol);
1005 bool LookupTwoCharsSymbolIfExists(String* str, String** symbol);
Steve Blocka7e24c12009-10-30 11:49:00 +00001006
1007 // Compute the matching symbol map for a string if possible.
1008 // NULL is returned if string is in new space or not flattened.
Steve Block44f0eee2011-05-26 01:26:41 +01001009 Map* SymbolMapForString(String* str);
Steve Blocka7e24c12009-10-30 11:49:00 +00001010
Steve Block6ded16b2010-05-10 14:33:55 +01001011 // Tries to flatten a string before compare operation.
1012 //
1013 // Returns a failure in case it was decided that flattening was
1014 // necessary and failed. Note, if flattening is not necessary the
1015 // string might stay non-flat even when not a failure is returned.
1016 //
1017 // Please note this function does not perform a garbage collection.
Steve Block44f0eee2011-05-26 01:26:41 +01001018 MUST_USE_RESULT inline MaybeObject* PrepareForCompare(String* str);
Steve Block6ded16b2010-05-10 14:33:55 +01001019
Steve Blocka7e24c12009-10-30 11:49:00 +00001020 // Converts the given boolean condition to JavaScript boolean value.
Steve Block44f0eee2011-05-26 01:26:41 +01001021 inline Object* ToBoolean(bool condition);
Steve Blocka7e24c12009-10-30 11:49:00 +00001022
1023 // Code that should be run before and after each GC. Includes some
1024 // reporting/verification activities when compiled with DEBUG set.
Steve Block44f0eee2011-05-26 01:26:41 +01001025 void GarbageCollectionPrologue();
1026 void GarbageCollectionEpilogue();
Steve Blocka7e24c12009-10-30 11:49:00 +00001027
Steve Blocka7e24c12009-10-30 11:49:00 +00001028 // Performs garbage collection operation.
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -08001029 // Returns whether there is a chance that another major GC could
1030 // collect more garbage.
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001031 bool CollectGarbage(AllocationSpace space,
1032 GarbageCollector collector,
1033 const char* gc_reason,
1034 const char* collector_reason);
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -08001035
1036 // Performs garbage collection operation.
1037 // Returns whether there is a chance that another major GC could
1038 // collect more garbage.
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001039 inline bool CollectGarbage(AllocationSpace space,
1040 const char* gc_reason = NULL);
Steve Blocka7e24c12009-10-30 11:49:00 +00001041
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001042 static const int kNoGCFlags = 0;
1043 static const int kSweepPreciselyMask = 1;
1044 static const int kReduceMemoryFootprintMask = 2;
1045 static const int kAbortIncrementalMarkingMask = 4;
1046
1047 // Making the heap iterable requires us to sweep precisely and abort any
1048 // incremental marking as well.
1049 static const int kMakeHeapIterableMask =
1050 kSweepPreciselyMask | kAbortIncrementalMarkingMask;
1051
1052 // Performs a full garbage collection. If (flags & kMakeHeapIterableMask) is
1053 // non-zero, then the slower precise sweeper is used, which leaves the heap
1054 // in a state where we can iterate over the heap visiting all objects.
1055 void CollectAllGarbage(int flags, const char* gc_reason = NULL);
Steve Blocka7e24c12009-10-30 11:49:00 +00001056
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -08001057 // Last hope GC, should try to squeeze as much as possible.
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001058 void CollectAllAvailableGarbage(const char* gc_reason = NULL);
1059
1060 // Check whether the heap is currently iterable.
1061 bool IsHeapIterable();
1062
1063 // Ensure that we have swept all spaces in such a way that we can iterate
1064 // over all objects. May cause a GC.
1065 void EnsureHeapIsIterable();
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001066
Steve Blocka7e24c12009-10-30 11:49:00 +00001067 // Notify the heap that a context has been disposed.
Steve Block44f0eee2011-05-26 01:26:41 +01001068 int NotifyContextDisposed() { return ++contexts_disposed_; }
Steve Blocka7e24c12009-10-30 11:49:00 +00001069
1070 // Utility to invoke the scavenger. This is needed in test code to
1071 // ensure correct callback for weak global handles.
Steve Block44f0eee2011-05-26 01:26:41 +01001072 void PerformScavenge();
1073
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001074 inline void increment_scan_on_scavenge_pages() {
1075 scan_on_scavenge_pages_++;
1076 if (FLAG_gc_verbose) {
1077 PrintF("Scan-on-scavenge pages: %d\n", scan_on_scavenge_pages_);
1078 }
1079 }
1080
1081 inline void decrement_scan_on_scavenge_pages() {
1082 scan_on_scavenge_pages_--;
1083 if (FLAG_gc_verbose) {
1084 PrintF("Scan-on-scavenge pages: %d\n", scan_on_scavenge_pages_);
1085 }
1086 }
1087
Steve Block44f0eee2011-05-26 01:26:41 +01001088 PromotionQueue* promotion_queue() { return &promotion_queue_; }
Steve Blocka7e24c12009-10-30 11:49:00 +00001089
1090#ifdef DEBUG
1091 // Utility used with flag gc-greedy.
Steve Block44f0eee2011-05-26 01:26:41 +01001092 void GarbageCollectionGreedyCheck();
Steve Blocka7e24c12009-10-30 11:49:00 +00001093#endif
1094
Steve Block44f0eee2011-05-26 01:26:41 +01001095 void AddGCPrologueCallback(
Steve Block6ded16b2010-05-10 14:33:55 +01001096 GCEpilogueCallback callback, GCType gc_type_filter);
Steve Block44f0eee2011-05-26 01:26:41 +01001097 void RemoveGCPrologueCallback(GCEpilogueCallback callback);
Steve Block6ded16b2010-05-10 14:33:55 +01001098
Steve Block44f0eee2011-05-26 01:26:41 +01001099 void AddGCEpilogueCallback(
Steve Block6ded16b2010-05-10 14:33:55 +01001100 GCEpilogueCallback callback, GCType gc_type_filter);
Steve Block44f0eee2011-05-26 01:26:41 +01001101 void RemoveGCEpilogueCallback(GCEpilogueCallback callback);
Steve Block6ded16b2010-05-10 14:33:55 +01001102
Steve Block44f0eee2011-05-26 01:26:41 +01001103 void SetGlobalGCPrologueCallback(GCCallback callback) {
Steve Block6ded16b2010-05-10 14:33:55 +01001104 ASSERT((callback == NULL) ^ (global_gc_prologue_callback_ == NULL));
Steve Blocka7e24c12009-10-30 11:49:00 +00001105 global_gc_prologue_callback_ = callback;
1106 }
Steve Block44f0eee2011-05-26 01:26:41 +01001107 void SetGlobalGCEpilogueCallback(GCCallback callback) {
Steve Block6ded16b2010-05-10 14:33:55 +01001108 ASSERT((callback == NULL) ^ (global_gc_epilogue_callback_ == NULL));
Steve Blocka7e24c12009-10-30 11:49:00 +00001109 global_gc_epilogue_callback_ = callback;
1110 }
1111
1112 // Heap root getters. We have versions with and without type::cast() here.
1113 // You can't use type::cast during GC because the assert fails.
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001114 // TODO(1490): Try removing the unchecked accessors, now that GC marking does
1115 // not corrupt the map.
Steve Blocka7e24c12009-10-30 11:49:00 +00001116#define ROOT_ACCESSOR(type, name, camel_name) \
Steve Block44f0eee2011-05-26 01:26:41 +01001117 type* name() { \
Steve Blocka7e24c12009-10-30 11:49:00 +00001118 return type::cast(roots_[k##camel_name##RootIndex]); \
1119 } \
Steve Block44f0eee2011-05-26 01:26:41 +01001120 type* raw_unchecked_##name() { \
Steve Blocka7e24c12009-10-30 11:49:00 +00001121 return reinterpret_cast<type*>(roots_[k##camel_name##RootIndex]); \
1122 }
1123 ROOT_LIST(ROOT_ACCESSOR)
1124#undef ROOT_ACCESSOR
1125
1126// Utility type maps
1127#define STRUCT_MAP_ACCESSOR(NAME, Name, name) \
Steve Block44f0eee2011-05-26 01:26:41 +01001128 Map* name##_map() { \
Steve Blocka7e24c12009-10-30 11:49:00 +00001129 return Map::cast(roots_[k##Name##MapRootIndex]); \
1130 }
1131 STRUCT_LIST(STRUCT_MAP_ACCESSOR)
1132#undef STRUCT_MAP_ACCESSOR
1133
Steve Block44f0eee2011-05-26 01:26:41 +01001134#define SYMBOL_ACCESSOR(name, str) String* name() { \
Steve Blocka7e24c12009-10-30 11:49:00 +00001135 return String::cast(roots_[k##name##RootIndex]); \
1136 }
1137 SYMBOL_LIST(SYMBOL_ACCESSOR)
1138#undef SYMBOL_ACCESSOR
1139
1140 // The hidden_symbol is special because it is the empty string, but does
1141 // not match the empty string.
Steve Block44f0eee2011-05-26 01:26:41 +01001142 String* hidden_symbol() { return hidden_symbol_; }
Steve Blocka7e24c12009-10-30 11:49:00 +00001143
Steve Block44f0eee2011-05-26 01:26:41 +01001144 void set_global_contexts_list(Object* object) {
Ben Murdochf87a2032010-10-22 12:50:53 +01001145 global_contexts_list_ = object;
1146 }
Steve Block44f0eee2011-05-26 01:26:41 +01001147 Object* global_contexts_list() { return global_contexts_list_; }
Ben Murdochf87a2032010-10-22 12:50:53 +01001148
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001149 // Number of mark-sweeps.
1150 int ms_count() { return ms_count_; }
1151
Steve Blocka7e24c12009-10-30 11:49:00 +00001152 // Iterates over all roots in the heap.
Steve Block44f0eee2011-05-26 01:26:41 +01001153 void IterateRoots(ObjectVisitor* v, VisitMode mode);
Steve Blocka7e24c12009-10-30 11:49:00 +00001154 // Iterates over all strong roots in the heap.
Steve Block44f0eee2011-05-26 01:26:41 +01001155 void IterateStrongRoots(ObjectVisitor* v, VisitMode mode);
Leon Clarked91b9f72010-01-27 17:25:45 +00001156 // Iterates over all the other roots in the heap.
Steve Block44f0eee2011-05-26 01:26:41 +01001157 void IterateWeakRoots(ObjectVisitor* v, VisitMode mode);
Steve Blocka7e24c12009-10-30 11:49:00 +00001158
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001159 // Iterate pointers to from semispace of new space found in memory interval
1160 // from start to end.
Steve Block44f0eee2011-05-26 01:26:41 +01001161 void IterateAndMarkPointersToFromSpace(Address start,
1162 Address end,
1163 ObjectSlotCallback callback);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001164
Steve Blocka7e24c12009-10-30 11:49:00 +00001165 // Returns whether the object resides in new space.
Steve Block44f0eee2011-05-26 01:26:41 +01001166 inline bool InNewSpace(Object* object);
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001167 inline bool InNewSpace(Address addr);
1168 inline bool InNewSpacePage(Address addr);
Steve Block44f0eee2011-05-26 01:26:41 +01001169 inline bool InFromSpace(Object* object);
1170 inline bool InToSpace(Object* object);
Steve Blocka7e24c12009-10-30 11:49:00 +00001171
1172 // Checks whether an address/object in the heap (including auxiliary
1173 // area and unused area).
Steve Block44f0eee2011-05-26 01:26:41 +01001174 bool Contains(Address addr);
1175 bool Contains(HeapObject* value);
Steve Blocka7e24c12009-10-30 11:49:00 +00001176
1177 // Checks whether an address/object in a space.
Steve Blockd0582a62009-12-15 09:54:21 +00001178 // Currently used by tests, serialization and heap verification only.
Steve Block44f0eee2011-05-26 01:26:41 +01001179 bool InSpace(Address addr, AllocationSpace space);
1180 bool InSpace(HeapObject* value, AllocationSpace space);
Steve Blocka7e24c12009-10-30 11:49:00 +00001181
1182 // Finds out which space an object should get promoted to based on its type.
Steve Block44f0eee2011-05-26 01:26:41 +01001183 inline OldSpace* TargetSpace(HeapObject* object);
1184 inline AllocationSpace TargetSpaceId(InstanceType type);
Steve Blocka7e24c12009-10-30 11:49:00 +00001185
1186 // Sets the stub_cache_ (only used when expanding the dictionary).
Ben Murdochc7cc0282012-03-05 14:35:55 +00001187 void public_set_code_stubs(UnseededNumberDictionary* value) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001188 roots_[kCodeStubsRootIndex] = value;
1189 }
1190
Kristian Monsen80d68ea2010-09-08 11:05:35 +01001191 // Support for computing object sizes for old objects during GCs. Returns
1192 // a function that is guaranteed to be safe for computing object sizes in
1193 // the current GC phase.
Steve Block44f0eee2011-05-26 01:26:41 +01001194 HeapObjectCallback GcSafeSizeOfOldObjectFunction() {
Kristian Monsen80d68ea2010-09-08 11:05:35 +01001195 return gc_safe_size_of_old_object_;
1196 }
1197
Steve Blocka7e24c12009-10-30 11:49:00 +00001198 // Sets the non_monomorphic_cache_ (only used when expanding the dictionary).
Ben Murdochc7cc0282012-03-05 14:35:55 +00001199 void public_set_non_monomorphic_cache(UnseededNumberDictionary* value) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001200 roots_[kNonMonomorphicCacheRootIndex] = value;
1201 }
1202
Steve Block44f0eee2011-05-26 01:26:41 +01001203 void public_set_empty_script(Script* script) {
Andrei Popescu31002712010-02-23 13:46:05 +00001204 roots_[kEmptyScriptRootIndex] = script;
1205 }
1206
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001207 void public_set_store_buffer_top(Address* top) {
1208 roots_[kStoreBufferTopRootIndex] = reinterpret_cast<Smi*>(top);
1209 }
1210
Steve Blocka7e24c12009-10-30 11:49:00 +00001211 // Update the next script id.
Steve Block44f0eee2011-05-26 01:26:41 +01001212 inline void SetLastScriptId(Object* last_script_id);
Steve Blocka7e24c12009-10-30 11:49:00 +00001213
1214 // Generated code can embed this address to get access to the roots.
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001215 Object** roots_array_start() { return roots_; }
1216
1217 Address* store_buffer_top_address() {
1218 return reinterpret_cast<Address*>(&roots_[kStoreBufferTopRootIndex]);
1219 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001220
Ben Murdochf87a2032010-10-22 12:50:53 +01001221 // Get address of global contexts list for serialization support.
Steve Block44f0eee2011-05-26 01:26:41 +01001222 Object** global_contexts_list_address() {
Ben Murdochf87a2032010-10-22 12:50:53 +01001223 return &global_contexts_list_;
1224 }
1225
Steve Blocka7e24c12009-10-30 11:49:00 +00001226#ifdef DEBUG
Steve Block44f0eee2011-05-26 01:26:41 +01001227 void Print();
1228 void PrintHandles();
Steve Blocka7e24c12009-10-30 11:49:00 +00001229
1230 // Verify the heap is in its normal state before or after a GC.
Steve Block44f0eee2011-05-26 01:26:41 +01001231 void Verify();
Steve Blocka7e24c12009-10-30 11:49:00 +00001232
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001233 // Verify that AccessorPairs are not shared, i.e. make sure that they have
1234 // exactly one pointer to them.
1235 void VerifyNoAccessorPairSharing();
1236
1237 void OldPointerSpaceCheckStoreBuffer();
1238 void MapSpaceCheckStoreBuffer();
1239 void LargeObjectSpaceCheckStoreBuffer();
1240
Steve Blocka7e24c12009-10-30 11:49:00 +00001241 // Report heap statistics.
Steve Block44f0eee2011-05-26 01:26:41 +01001242 void ReportHeapStatistics(const char* title);
1243 void ReportCodeStatistics(const char* title);
Steve Blocka7e24c12009-10-30 11:49:00 +00001244
1245 // Fill in bogus values in from space
Steve Block44f0eee2011-05-26 01:26:41 +01001246 void ZapFromSpace();
Steve Blocka7e24c12009-10-30 11:49:00 +00001247#endif
1248
Steve Blocka7e24c12009-10-30 11:49:00 +00001249 // Print short heap statistics.
Steve Block44f0eee2011-05-26 01:26:41 +01001250 void PrintShortHeapStatistics();
Steve Blocka7e24c12009-10-30 11:49:00 +00001251
1252 // Makes a new symbol object
1253 // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
1254 // failed.
1255 // Please note this function does not perform a garbage collection.
Steve Block44f0eee2011-05-26 01:26:41 +01001256 MUST_USE_RESULT MaybeObject* CreateSymbol(
1257 const char* str, int length, int hash);
1258 MUST_USE_RESULT MaybeObject* CreateSymbol(String* str);
Steve Blocka7e24c12009-10-30 11:49:00 +00001259
1260 // Write barrier support for address[offset] = o.
Steve Block44f0eee2011-05-26 01:26:41 +01001261 inline void RecordWrite(Address address, int offset);
Steve Blocka7e24c12009-10-30 11:49:00 +00001262
Steve Block6ded16b2010-05-10 14:33:55 +01001263 // Write barrier support for address[start : start + len[ = o.
Steve Block44f0eee2011-05-26 01:26:41 +01001264 inline void RecordWrites(Address address, int start, int len);
Steve Block6ded16b2010-05-10 14:33:55 +01001265
Steve Blocka7e24c12009-10-30 11:49:00 +00001266 // Given an address occupied by a live code object, return that object.
Steve Block44f0eee2011-05-26 01:26:41 +01001267 Object* FindCodeObject(Address a);
Steve Blocka7e24c12009-10-30 11:49:00 +00001268
1269 // Invoke Shrink on shrinkable spaces.
Steve Block44f0eee2011-05-26 01:26:41 +01001270 void Shrink();
Steve Blocka7e24c12009-10-30 11:49:00 +00001271
1272 enum HeapState { NOT_IN_GC, SCAVENGE, MARK_COMPACT };
Steve Block44f0eee2011-05-26 01:26:41 +01001273 inline HeapState gc_state() { return gc_state_; }
Steve Blocka7e24c12009-10-30 11:49:00 +00001274
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001275 inline bool IsInGCPostProcessing() { return gc_post_processing_depth_ > 0; }
1276
Steve Blocka7e24c12009-10-30 11:49:00 +00001277#ifdef DEBUG
Steve Block44f0eee2011-05-26 01:26:41 +01001278 bool IsAllocationAllowed() { return allocation_allowed_; }
1279 inline bool allow_allocation(bool enable);
Steve Blocka7e24c12009-10-30 11:49:00 +00001280
Steve Block44f0eee2011-05-26 01:26:41 +01001281 bool disallow_allocation_failure() {
Steve Blocka7e24c12009-10-30 11:49:00 +00001282 return disallow_allocation_failure_;
1283 }
1284
Steve Block44f0eee2011-05-26 01:26:41 +01001285 void TracePathToObject(Object* target);
1286 void TracePathToGlobal();
Steve Blocka7e24c12009-10-30 11:49:00 +00001287#endif
1288
1289 // Callback function passed to Heap::Iterate etc. Copies an object if
1290 // necessary, the object might be promoted to an old space. The caller must
1291 // ensure the precondition that the object is (a) a heap object and (b) in
1292 // the heap's from space.
Steve Block44f0eee2011-05-26 01:26:41 +01001293 static inline void ScavengePointer(HeapObject** p);
Steve Blocka7e24c12009-10-30 11:49:00 +00001294 static inline void ScavengeObject(HeapObject** p, HeapObject* object);
1295
Steve Blocka7e24c12009-10-30 11:49:00 +00001296 // Commits from space if it is uncommitted.
Steve Block44f0eee2011-05-26 01:26:41 +01001297 void EnsureFromSpaceIsCommitted();
Steve Blocka7e24c12009-10-30 11:49:00 +00001298
Leon Clarkee46be812010-01-19 14:06:41 +00001299 // Support for partial snapshots. After calling this we can allocate a
1300 // certain number of bytes using only linear allocation (with a
1301 // LinearAllocationScope and an AlwaysAllocateScope) without using freelists
1302 // or causing a GC. It returns true of space was reserved or false if a GC is
1303 // needed. For paged spaces the space requested must include the space wasted
1304 // at the end of each page when allocating linearly.
Steve Block44f0eee2011-05-26 01:26:41 +01001305 void ReserveSpace(
Leon Clarkee46be812010-01-19 14:06:41 +00001306 int new_space_size,
1307 int pointer_space_size,
1308 int data_space_size,
1309 int code_space_size,
1310 int map_space_size,
1311 int cell_space_size,
1312 int large_object_size);
1313
Steve Blocka7e24c12009-10-30 11:49:00 +00001314 //
1315 // Support for the API.
1316 //
1317
Steve Block44f0eee2011-05-26 01:26:41 +01001318 bool CreateApiObjects();
Steve Blocka7e24c12009-10-30 11:49:00 +00001319
1320 // Attempt to find the number in a small cache. If we finds it, return
1321 // the string representation of the number. Otherwise return undefined.
Steve Block44f0eee2011-05-26 01:26:41 +01001322 Object* GetNumberStringCache(Object* number);
Steve Blocka7e24c12009-10-30 11:49:00 +00001323
1324 // Update the cache with a new number-string pair.
Steve Block44f0eee2011-05-26 01:26:41 +01001325 void SetNumberStringCache(Object* number, String* str);
Steve Blocka7e24c12009-10-30 11:49:00 +00001326
Steve Blocka7e24c12009-10-30 11:49:00 +00001327 // Adjusts the amount of registered external memory.
1328 // Returns the adjusted value.
Steve Block44f0eee2011-05-26 01:26:41 +01001329 inline int AdjustAmountOfExternalAllocatedMemory(int change_in_bytes);
Steve Blocka7e24c12009-10-30 11:49:00 +00001330
Steve Block6ded16b2010-05-10 14:33:55 +01001331 // Allocate uninitialized fixed array.
Steve Block44f0eee2011-05-26 01:26:41 +01001332 MUST_USE_RESULT MaybeObject* AllocateRawFixedArray(int length);
1333 MUST_USE_RESULT MaybeObject* AllocateRawFixedArray(int length,
1334 PretenureFlag pretenure);
Steve Blocka7e24c12009-10-30 11:49:00 +00001335
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001336 inline intptr_t PromotedTotalSize() {
1337 return PromotedSpaceSize() + PromotedExternalMemorySize();
1338 }
1339
Ben Murdoch85b71792012-04-11 18:30:58 +01001340 // True if we have reached the allocation limit in the old generation that
1341 // should force the next GC (caused normally) to be a full one.
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001342 inline bool OldGenerationPromotionLimitReached() {
1343 return PromotedTotalSize() > old_gen_promotion_limit_;
Ben Murdoch85b71792012-04-11 18:30:58 +01001344 }
1345
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001346 inline intptr_t OldGenerationSpaceAvailable() {
1347 return old_gen_allocation_limit_ - PromotedTotalSize();
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001348 }
1349
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001350 inline intptr_t OldGenerationCapacityAvailable() {
1351 return max_old_generation_size_ - PromotedTotalSize();
Steve Blocka7e24c12009-10-30 11:49:00 +00001352 }
1353
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001354 static const intptr_t kMinimumPromotionLimit = 5 * Page::kPageSize;
1355 static const intptr_t kMinimumAllocationLimit =
1356 8 * (Page::kPageSize > MB ? Page::kPageSize : MB);
1357
1358 // When we sweep lazily we initially guess that there is no garbage on the
1359 // heap and set the limits for the next GC accordingly. As we sweep we find
1360 // out that some of the pages contained garbage and we have to adjust
1361 // downwards the size of the heap. This means the limits that control the
1362 // timing of the next GC also need to be adjusted downwards.
1363 void LowerOldGenLimits(intptr_t adjustment) {
1364 size_of_old_gen_at_last_old_space_gc_ -= adjustment;
1365 old_gen_promotion_limit_ =
1366 OldGenPromotionLimit(size_of_old_gen_at_last_old_space_gc_);
1367 old_gen_allocation_limit_ =
1368 OldGenAllocationLimit(size_of_old_gen_at_last_old_space_gc_);
1369 }
1370
1371 intptr_t OldGenPromotionLimit(intptr_t old_gen_size) {
1372 const int divisor = FLAG_stress_compaction ? 10 : 3;
1373 intptr_t limit =
1374 Max(old_gen_size + old_gen_size / divisor, kMinimumPromotionLimit);
1375 limit += new_space_.Capacity();
1376 limit *= old_gen_limit_factor_;
1377 intptr_t halfway_to_the_max = (old_gen_size + max_old_generation_size_) / 2;
1378 return Min(limit, halfway_to_the_max);
1379 }
1380
1381 intptr_t OldGenAllocationLimit(intptr_t old_gen_size) {
1382 const int divisor = FLAG_stress_compaction ? 8 : 2;
1383 intptr_t limit =
1384 Max(old_gen_size + old_gen_size / divisor, kMinimumAllocationLimit);
1385 limit += new_space_.Capacity();
1386 limit *= old_gen_limit_factor_;
1387 intptr_t halfway_to_the_max = (old_gen_size + max_old_generation_size_) / 2;
1388 return Min(limit, halfway_to_the_max);
1389 }
1390
1391 // Implements the corresponding V8 API function.
1392 bool IdleNotification(int hint);
Steve Blocka7e24c12009-10-30 11:49:00 +00001393
1394 // Declare all the root indices.
1395 enum RootListIndex {
1396#define ROOT_INDEX_DECLARATION(type, name, camel_name) k##camel_name##RootIndex,
1397 STRONG_ROOT_LIST(ROOT_INDEX_DECLARATION)
1398#undef ROOT_INDEX_DECLARATION
1399
1400// Utility type maps
1401#define DECLARE_STRUCT_MAP(NAME, Name, name) k##Name##MapRootIndex,
1402 STRUCT_LIST(DECLARE_STRUCT_MAP)
1403#undef DECLARE_STRUCT_MAP
1404
1405#define SYMBOL_INDEX_DECLARATION(name, str) k##name##RootIndex,
1406 SYMBOL_LIST(SYMBOL_INDEX_DECLARATION)
1407#undef SYMBOL_DECLARATION
1408
1409 kSymbolTableRootIndex,
1410 kStrongRootListLength = kSymbolTableRootIndex,
1411 kRootListLength
1412 };
1413
Steve Block44f0eee2011-05-26 01:26:41 +01001414 MUST_USE_RESULT MaybeObject* NumberToString(
1415 Object* number, bool check_number_string_cache = true);
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001416 MUST_USE_RESULT MaybeObject* Uint32ToString(
1417 uint32_t value, bool check_number_string_cache = true);
Steve Blocka7e24c12009-10-30 11:49:00 +00001418
Steve Block44f0eee2011-05-26 01:26:41 +01001419 Map* MapForExternalArrayType(ExternalArrayType array_type);
1420 RootListIndex RootIndexForExternalArrayType(
Steve Block3ce2e202009-11-05 08:53:23 +00001421 ExternalArrayType array_type);
1422
Steve Block44f0eee2011-05-26 01:26:41 +01001423 void RecordStats(HeapStats* stats, bool take_snapshot = false);
Steve Blockd0582a62009-12-15 09:54:21 +00001424
Steve Block6ded16b2010-05-10 14:33:55 +01001425 // Copy block of memory from src to dst. Size of block should be aligned
1426 // by pointer size.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001427 static inline void CopyBlock(Address dst, Address src, int byte_size);
1428
Steve Block6ded16b2010-05-10 14:33:55 +01001429 // Optimized version of memmove for blocks with pointer size aligned sizes and
1430 // pointer size aligned addresses.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001431 static inline void MoveBlock(Address dst, Address src, int byte_size);
1432
Steve Block6ded16b2010-05-10 14:33:55 +01001433 // Check new space expansion criteria and expand semispaces if it was hit.
Steve Block44f0eee2011-05-26 01:26:41 +01001434 void CheckNewSpaceExpansionCriteria();
Steve Block6ded16b2010-05-10 14:33:55 +01001435
Steve Block44f0eee2011-05-26 01:26:41 +01001436 inline void IncrementYoungSurvivorsCounter(int survived) {
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001437 ASSERT(survived >= 0);
Steve Block8defd9f2010-07-08 12:39:36 +01001438 young_survivors_after_last_gc_ = survived;
Steve Block6ded16b2010-05-10 14:33:55 +01001439 survived_since_last_expansion_ += survived;
1440 }
1441
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001442 inline bool NextGCIsLikelyToBeFull() {
1443 if (FLAG_gc_global) return true;
1444
1445 intptr_t total_promoted = PromotedTotalSize();
1446
1447 intptr_t adjusted_promotion_limit =
1448 old_gen_promotion_limit_ - new_space_.Capacity();
1449
1450 if (total_promoted >= adjusted_promotion_limit) return true;
1451
1452 intptr_t adjusted_allocation_limit =
1453 old_gen_allocation_limit_ - new_space_.Capacity() / 5;
1454
1455 if (PromotedSpaceSize() >= adjusted_allocation_limit) return true;
1456
1457 return false;
1458 }
1459
1460
Steve Block44f0eee2011-05-26 01:26:41 +01001461 void UpdateNewSpaceReferencesInExternalStringTable(
Steve Block6ded16b2010-05-10 14:33:55 +01001462 ExternalStringTableUpdaterCallback updater_func);
1463
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001464 void UpdateReferencesInExternalStringTable(
1465 ExternalStringTableUpdaterCallback updater_func);
1466
Steve Block44f0eee2011-05-26 01:26:41 +01001467 void ProcessWeakReferences(WeakObjectRetainer* retainer);
Ben Murdochf87a2032010-10-22 12:50:53 +01001468
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001469 void VisitExternalResources(v8::ExternalResourceVisitor* visitor);
1470
Steve Block6ded16b2010-05-10 14:33:55 +01001471 // Helper function that governs the promotion policy from new space to
1472 // old. If the object's old address lies below the new space's age
1473 // mark or if we've already filled the bottom 1/16th of the to space,
1474 // we try to promote this object.
Steve Block44f0eee2011-05-26 01:26:41 +01001475 inline bool ShouldBePromoted(Address old_address, int object_size);
Steve Block6ded16b2010-05-10 14:33:55 +01001476
Steve Block44f0eee2011-05-26 01:26:41 +01001477 int MaxObjectSizeInNewSpace() { return kMaxObjectSizeInNewSpace; }
Steve Block6ded16b2010-05-10 14:33:55 +01001478
Steve Block44f0eee2011-05-26 01:26:41 +01001479 void ClearJSFunctionResultCaches();
Kristian Monsen25f61362010-05-21 11:50:48 +01001480
Steve Block44f0eee2011-05-26 01:26:41 +01001481 void ClearNormalizedMapCaches();
Kristian Monsen80d68ea2010-09-08 11:05:35 +01001482
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001483 // Clears the cache of ICs related to this map.
1484 void ClearCacheOnMap(Map* map) {
1485 if (FLAG_cleanup_code_caches_at_gc) {
1486 map->ClearCodeCache(this);
1487 }
1488 }
1489
Steve Block44f0eee2011-05-26 01:26:41 +01001490 GCTracer* tracer() { return tracer_; }
1491
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001492 // Returns the size of objects residing in non new spaces.
1493 intptr_t PromotedSpaceSize();
1494 intptr_t PromotedSpaceSizeOfObjects();
1495
Steve Block053d10c2011-06-13 19:13:29 +01001496 double total_regexp_code_generated() { return total_regexp_code_generated_; }
1497 void IncreaseTotalRegexpCodeGenerated(int size) {
1498 total_regexp_code_generated_ += size;
1499 }
1500
Steve Block44f0eee2011-05-26 01:26:41 +01001501 // Returns maximum GC pause.
1502 int get_max_gc_pause() { return max_gc_pause_; }
1503
1504 // Returns maximum size of objects alive after GC.
1505 intptr_t get_max_alive_after_gc() { return max_alive_after_gc_; }
1506
1507 // Returns minimal interval between two subsequent collections.
1508 int get_min_in_mutator() { return min_in_mutator_; }
1509
1510 MarkCompactCollector* mark_compact_collector() {
1511 return &mark_compact_collector_;
1512 }
1513
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001514 StoreBuffer* store_buffer() {
1515 return &store_buffer_;
1516 }
1517
1518 Marking* marking() {
1519 return &marking_;
1520 }
1521
1522 IncrementalMarking* incremental_marking() {
1523 return &incremental_marking_;
1524 }
1525
1526 bool IsSweepingComplete() {
1527 return old_data_space()->IsSweepingComplete() &&
1528 old_pointer_space()->IsSweepingComplete();
1529 }
1530
1531 bool AdvanceSweepers(int step_size) {
1532 bool sweeping_complete = old_data_space()->AdvanceSweeper(step_size);
1533 sweeping_complete &= old_pointer_space()->AdvanceSweeper(step_size);
1534 return sweeping_complete;
1535 }
1536
Steve Block44f0eee2011-05-26 01:26:41 +01001537 ExternalStringTable* external_string_table() {
1538 return &external_string_table_;
1539 }
1540
Ben Murdoch257744e2011-11-30 15:57:28 +00001541 // Returns the current sweep generation.
1542 int sweep_generation() {
1543 return sweep_generation_;
1544 }
1545
Steve Block44f0eee2011-05-26 01:26:41 +01001546 inline Isolate* isolate();
Steve Block44f0eee2011-05-26 01:26:41 +01001547
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001548 inline void CallGlobalGCPrologueCallback() {
Steve Block44f0eee2011-05-26 01:26:41 +01001549 if (global_gc_prologue_callback_ != NULL) global_gc_prologue_callback_();
1550 }
1551
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001552 inline void CallGlobalGCEpilogueCallback() {
Steve Block44f0eee2011-05-26 01:26:41 +01001553 if (global_gc_epilogue_callback_ != NULL) global_gc_epilogue_callback_();
1554 }
Leon Clarkef7060e22010-06-03 12:02:55 +01001555
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001556 inline bool OldGenerationAllocationLimitReached();
1557
1558 inline void DoScavengeObject(Map* map, HeapObject** slot, HeapObject* obj) {
1559 scavenging_visitors_table_.GetVisitor(map)(map, slot, obj);
1560 }
1561
1562 void QueueMemoryChunkForFree(MemoryChunk* chunk);
1563 void FreeQueuedChunks();
1564
1565 // Completely clear the Instanceof cache (to stop it keeping objects alive
1566 // around a GC).
1567 inline void CompletelyClearInstanceofCache();
1568
1569 // The roots that have an index less than this are always in old space.
1570 static const int kOldSpaceRoots = 0x20;
1571
Ben Murdochc7cc0282012-03-05 14:35:55 +00001572 uint32_t HashSeed() {
1573 uint32_t seed = static_cast<uint32_t>(hash_seed()->value());
1574 ASSERT(FLAG_randomize_hashes || seed == 0);
1575 return seed;
1576 }
1577
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001578 void SetArgumentsAdaptorDeoptPCOffset(int pc_offset) {
1579 ASSERT(arguments_adaptor_deopt_pc_offset() == Smi::FromInt(0));
1580 set_arguments_adaptor_deopt_pc_offset(Smi::FromInt(pc_offset));
1581 }
1582
1583 void SetConstructStubDeoptPCOffset(int pc_offset) {
1584 ASSERT(construct_stub_deopt_pc_offset() == Smi::FromInt(0));
1585 set_construct_stub_deopt_pc_offset(Smi::FromInt(pc_offset));
1586 }
1587
1588 // For post mortem debugging.
1589 void RememberUnmappedPage(Address page, bool compacted);
1590
1591 // Global inline caching age: it is incremented on some GCs after context
1592 // disposal. We use it to flush inline caches.
1593 int global_ic_age() {
1594 return global_ic_age_;
1595 }
1596
1597 void AgeInlineCaches() {
1598 ++global_ic_age_;
1599 }
1600
Steve Blocka7e24c12009-10-30 11:49:00 +00001601 private:
Steve Block44f0eee2011-05-26 01:26:41 +01001602 Heap();
1603
1604 // This can be calculated directly from a pointer to the heap; however, it is
1605 // more expedient to get at the isolate directly from within Heap methods.
1606 Isolate* isolate_;
1607
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001608 intptr_t code_range_size_;
Steve Block44f0eee2011-05-26 01:26:41 +01001609 int reserved_semispace_size_;
1610 int max_semispace_size_;
1611 int initial_semispace_size_;
1612 intptr_t max_old_generation_size_;
1613 intptr_t max_executable_size_;
Steve Blocka7e24c12009-10-30 11:49:00 +00001614
1615 // For keeping track of how much data has survived
1616 // scavenge since last new space expansion.
Steve Block44f0eee2011-05-26 01:26:41 +01001617 int survived_since_last_expansion_;
Steve Blocka7e24c12009-10-30 11:49:00 +00001618
Ben Murdoch257744e2011-11-30 15:57:28 +00001619 // For keeping track on when to flush RegExp code.
1620 int sweep_generation_;
1621
Steve Block44f0eee2011-05-26 01:26:41 +01001622 int always_allocate_scope_depth_;
1623 int linear_allocation_scope_depth_;
Steve Block6ded16b2010-05-10 14:33:55 +01001624
1625 // For keeping track of context disposals.
Steve Block44f0eee2011-05-26 01:26:41 +01001626 int contexts_disposed_;
Steve Blocka7e24c12009-10-30 11:49:00 +00001627
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001628 int global_ic_age_;
1629
1630 int scan_on_scavenge_pages_;
1631
Steve Blocka7e24c12009-10-30 11:49:00 +00001632#if defined(V8_TARGET_ARCH_X64)
Ben Murdochb0fe1622011-05-05 13:52:32 +01001633 static const int kMaxObjectSizeInNewSpace = 1024*KB;
Steve Blocka7e24c12009-10-30 11:49:00 +00001634#else
Ben Murdochb0fe1622011-05-05 13:52:32 +01001635 static const int kMaxObjectSizeInNewSpace = 512*KB;
Steve Blocka7e24c12009-10-30 11:49:00 +00001636#endif
1637
Steve Block44f0eee2011-05-26 01:26:41 +01001638 NewSpace new_space_;
1639 OldSpace* old_pointer_space_;
1640 OldSpace* old_data_space_;
1641 OldSpace* code_space_;
1642 MapSpace* map_space_;
1643 CellSpace* cell_space_;
1644 LargeObjectSpace* lo_space_;
1645 HeapState gc_state_;
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001646 int gc_post_processing_depth_;
Steve Blocka7e24c12009-10-30 11:49:00 +00001647
Steve Blocka7e24c12009-10-30 11:49:00 +00001648 // Returns the amount of external memory registered since last global gc.
Steve Block44f0eee2011-05-26 01:26:41 +01001649 int PromotedExternalMemorySize();
Steve Blocka7e24c12009-10-30 11:49:00 +00001650
Steve Block44f0eee2011-05-26 01:26:41 +01001651 int ms_count_; // how many mark-sweep collections happened
1652 unsigned int gc_count_; // how many gc happened
Steve Blocka7e24c12009-10-30 11:49:00 +00001653
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001654 // For post mortem debugging.
1655 static const int kRememberedUnmappedPages = 128;
1656 int remembered_unmapped_pages_index_;
1657 Address remembered_unmapped_pages_[kRememberedUnmappedPages];
1658
Steve Block6ded16b2010-05-10 14:33:55 +01001659 // Total length of the strings we failed to flatten since the last GC.
Steve Block44f0eee2011-05-26 01:26:41 +01001660 int unflattened_strings_length_;
Steve Block6ded16b2010-05-10 14:33:55 +01001661
Steve Blocka7e24c12009-10-30 11:49:00 +00001662#define ROOT_ACCESSOR(type, name, camel_name) \
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001663 inline void set_##name(type* value) { \
1664 /* The deserializer makes use of the fact that these common roots are */ \
1665 /* never in new space and never on a page that is being compacted. */ \
1666 ASSERT(k##camel_name##RootIndex >= kOldSpaceRoots || !InNewSpace(value)); \
Steve Blocka7e24c12009-10-30 11:49:00 +00001667 roots_[k##camel_name##RootIndex] = value; \
1668 }
1669 ROOT_LIST(ROOT_ACCESSOR)
1670#undef ROOT_ACCESSOR
1671
1672#ifdef DEBUG
Steve Block44f0eee2011-05-26 01:26:41 +01001673 bool allocation_allowed_;
Steve Blocka7e24c12009-10-30 11:49:00 +00001674
1675 // If the --gc-interval flag is set to a positive value, this
1676 // variable holds the value indicating the number of allocations
1677 // remain until the next failure and garbage collection.
Steve Block44f0eee2011-05-26 01:26:41 +01001678 int allocation_timeout_;
Steve Blocka7e24c12009-10-30 11:49:00 +00001679
1680 // Do we expect to be able to handle allocation failure at this
1681 // time?
Steve Block44f0eee2011-05-26 01:26:41 +01001682 bool disallow_allocation_failure_;
1683
1684 HeapDebugUtils* debug_utils_;
Steve Blocka7e24c12009-10-30 11:49:00 +00001685#endif // DEBUG
1686
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001687 // Indicates that the new space should be kept small due to high promotion
1688 // rates caused by the mutator allocating a lot of long-lived objects.
1689 bool new_space_high_promotion_mode_active_;
1690
Steve Blocka7e24c12009-10-30 11:49:00 +00001691 // Limit that triggers a global GC on the next (normally caused) GC. This
1692 // is checked when we have already decided to do a GC to help determine
1693 // which collector to invoke.
Steve Block44f0eee2011-05-26 01:26:41 +01001694 intptr_t old_gen_promotion_limit_;
Steve Blocka7e24c12009-10-30 11:49:00 +00001695
1696 // Limit that triggers a global GC as soon as is reasonable. This is
1697 // checked before expanding a paged space in the old generation and on
1698 // every allocation in large object space.
Steve Block44f0eee2011-05-26 01:26:41 +01001699 intptr_t old_gen_allocation_limit_;
Steve Blocka7e24c12009-10-30 11:49:00 +00001700
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001701 // Sometimes the heuristics dictate that those limits are increased. This
1702 // variable records that fact.
1703 int old_gen_limit_factor_;
1704
1705 // Used to adjust the limits that control the timing of the next GC.
1706 intptr_t size_of_old_gen_at_last_old_space_gc_;
1707
Steve Blocka7e24c12009-10-30 11:49:00 +00001708 // Limit on the amount of externally allocated memory allowed
1709 // between global GCs. If reached a global GC is forced.
Steve Block44f0eee2011-05-26 01:26:41 +01001710 intptr_t external_allocation_limit_;
Steve Blocka7e24c12009-10-30 11:49:00 +00001711
1712 // The amount of external memory registered through the API kept alive
1713 // by global handles
Steve Block44f0eee2011-05-26 01:26:41 +01001714 int amount_of_external_allocated_memory_;
Steve Blocka7e24c12009-10-30 11:49:00 +00001715
1716 // Caches the amount of external memory registered at the last global gc.
Steve Block44f0eee2011-05-26 01:26:41 +01001717 int amount_of_external_allocated_memory_at_last_global_gc_;
Steve Blocka7e24c12009-10-30 11:49:00 +00001718
1719 // Indicates that an allocation has failed in the old generation since the
1720 // last GC.
Steve Block44f0eee2011-05-26 01:26:41 +01001721 int old_gen_exhausted_;
Steve Blocka7e24c12009-10-30 11:49:00 +00001722
Steve Block44f0eee2011-05-26 01:26:41 +01001723 Object* roots_[kRootListLength];
Steve Blocka7e24c12009-10-30 11:49:00 +00001724
Steve Block44f0eee2011-05-26 01:26:41 +01001725 Object* global_contexts_list_;
Ben Murdochf87a2032010-10-22 12:50:53 +01001726
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001727 StoreBufferRebuilder store_buffer_rebuilder_;
1728
Steve Blocka7e24c12009-10-30 11:49:00 +00001729 struct StringTypeTable {
1730 InstanceType type;
1731 int size;
1732 RootListIndex index;
1733 };
1734
1735 struct ConstantSymbolTable {
1736 const char* contents;
1737 RootListIndex index;
1738 };
1739
1740 struct StructTable {
1741 InstanceType type;
1742 int size;
1743 RootListIndex index;
1744 };
1745
1746 static const StringTypeTable string_type_table[];
1747 static const ConstantSymbolTable constant_symbol_table[];
1748 static const StructTable struct_table[];
1749
1750 // The special hidden symbol which is an empty string, but does not match
1751 // any string when looked up in properties.
Steve Block44f0eee2011-05-26 01:26:41 +01001752 String* hidden_symbol_;
Steve Blocka7e24c12009-10-30 11:49:00 +00001753
1754 // GC callback function, called before and after mark-compact GC.
1755 // Allocations in the callback function are disallowed.
Steve Block6ded16b2010-05-10 14:33:55 +01001756 struct GCPrologueCallbackPair {
1757 GCPrologueCallbackPair(GCPrologueCallback callback, GCType gc_type)
1758 : callback(callback), gc_type(gc_type) {
1759 }
1760 bool operator==(const GCPrologueCallbackPair& pair) const {
1761 return pair.callback == callback;
1762 }
1763 GCPrologueCallback callback;
1764 GCType gc_type;
1765 };
Steve Block44f0eee2011-05-26 01:26:41 +01001766 List<GCPrologueCallbackPair> gc_prologue_callbacks_;
Steve Block6ded16b2010-05-10 14:33:55 +01001767
1768 struct GCEpilogueCallbackPair {
1769 GCEpilogueCallbackPair(GCEpilogueCallback callback, GCType gc_type)
1770 : callback(callback), gc_type(gc_type) {
1771 }
1772 bool operator==(const GCEpilogueCallbackPair& pair) const {
1773 return pair.callback == callback;
1774 }
1775 GCEpilogueCallback callback;
1776 GCType gc_type;
1777 };
Steve Block44f0eee2011-05-26 01:26:41 +01001778 List<GCEpilogueCallbackPair> gc_epilogue_callbacks_;
Steve Block6ded16b2010-05-10 14:33:55 +01001779
Steve Block44f0eee2011-05-26 01:26:41 +01001780 GCCallback global_gc_prologue_callback_;
1781 GCCallback global_gc_epilogue_callback_;
Steve Blocka7e24c12009-10-30 11:49:00 +00001782
Kristian Monsen80d68ea2010-09-08 11:05:35 +01001783 // Support for computing object sizes during GC.
Steve Block44f0eee2011-05-26 01:26:41 +01001784 HeapObjectCallback gc_safe_size_of_old_object_;
Kristian Monsen80d68ea2010-09-08 11:05:35 +01001785 static int GcSafeSizeOfOldObject(HeapObject* object);
Kristian Monsen80d68ea2010-09-08 11:05:35 +01001786
1787 // Update the GC state. Called from the mark-compact collector.
Steve Block44f0eee2011-05-26 01:26:41 +01001788 void MarkMapPointersAsEncoded(bool encoded) {
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001789 ASSERT(!encoded);
1790 gc_safe_size_of_old_object_ = &GcSafeSizeOfOldObject;
Kristian Monsen80d68ea2010-09-08 11:05:35 +01001791 }
1792
Steve Blocka7e24c12009-10-30 11:49:00 +00001793 // Checks whether a global GC is necessary
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001794 GarbageCollector SelectGarbageCollector(AllocationSpace space,
1795 const char** reason);
Steve Blocka7e24c12009-10-30 11:49:00 +00001796
1797 // Performs garbage collection
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -08001798 // Returns whether there is a chance another major GC could
1799 // collect more garbage.
Steve Block44f0eee2011-05-26 01:26:41 +01001800 bool PerformGarbageCollection(GarbageCollector collector,
1801 GCTracer* tracer);
1802
Steve Block44f0eee2011-05-26 01:26:41 +01001803
1804 inline void UpdateOldSpaceLimits();
Steve Blocka7e24c12009-10-30 11:49:00 +00001805
Steve Blocka7e24c12009-10-30 11:49:00 +00001806 // Allocate an uninitialized object in map space. The behavior is identical
1807 // to Heap::AllocateRaw(size_in_bytes, MAP_SPACE), except that (a) it doesn't
1808 // have to test the allocation space argument and (b) can reduce code size
1809 // (since both AllocateRaw and AllocateRawMap are inlined).
Steve Block44f0eee2011-05-26 01:26:41 +01001810 MUST_USE_RESULT inline MaybeObject* AllocateRawMap();
Steve Blocka7e24c12009-10-30 11:49:00 +00001811
1812 // Allocate an uninitialized object in the global property cell space.
Steve Block44f0eee2011-05-26 01:26:41 +01001813 MUST_USE_RESULT inline MaybeObject* AllocateRawCell();
Steve Blocka7e24c12009-10-30 11:49:00 +00001814
1815 // Initializes a JSObject based on its map.
Steve Block44f0eee2011-05-26 01:26:41 +01001816 void InitializeJSObjectFromMap(JSObject* obj,
1817 FixedArray* properties,
1818 Map* map);
Steve Blocka7e24c12009-10-30 11:49:00 +00001819
Steve Block44f0eee2011-05-26 01:26:41 +01001820 bool CreateInitialMaps();
1821 bool CreateInitialObjects();
Steve Blocka7e24c12009-10-30 11:49:00 +00001822
Steve Block1e0659c2011-05-24 12:43:12 +01001823 // These five Create*EntryStub functions are here and forced to not be inlined
Ben Murdochbb769b22010-08-11 14:56:33 +01001824 // because of a gcc-4.4 bug that assigns wrong vtable entries.
Steve Block44f0eee2011-05-26 01:26:41 +01001825 NO_INLINE(void CreateJSEntryStub());
1826 NO_INLINE(void CreateJSConstructEntryStub());
Steve Blocka7e24c12009-10-30 11:49:00 +00001827
Steve Block44f0eee2011-05-26 01:26:41 +01001828 void CreateFixedStubs();
Steve Blocka7e24c12009-10-30 11:49:00 +00001829
Steve Block44f0eee2011-05-26 01:26:41 +01001830 MaybeObject* CreateOddball(const char* to_string,
1831 Object* to_number,
1832 byte kind);
Steve Blocka7e24c12009-10-30 11:49:00 +00001833
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001834 // Allocate a JSArray with no elements
1835 MUST_USE_RESULT MaybeObject* AllocateJSArray(
1836 ElementsKind elements_kind,
1837 PretenureFlag pretenure = NOT_TENURED);
1838
Steve Blocka7e24c12009-10-30 11:49:00 +00001839 // Allocate empty fixed array.
Steve Block44f0eee2011-05-26 01:26:41 +01001840 MUST_USE_RESULT MaybeObject* AllocateEmptyFixedArray();
Steve Blocka7e24c12009-10-30 11:49:00 +00001841
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001842 // Allocate empty fixed double array.
1843 MUST_USE_RESULT MaybeObject* AllocateEmptyFixedDoubleArray();
1844
Steve Blocka7e24c12009-10-30 11:49:00 +00001845 // Performs a minor collection in new generation.
Steve Block44f0eee2011-05-26 01:26:41 +01001846 void Scavenge();
Steve Block6ded16b2010-05-10 14:33:55 +01001847
1848 static String* UpdateNewSpaceReferenceInExternalStringTableEntry(
Steve Block44f0eee2011-05-26 01:26:41 +01001849 Heap* heap,
Steve Block6ded16b2010-05-10 14:33:55 +01001850 Object** pointer);
1851
Steve Block44f0eee2011-05-26 01:26:41 +01001852 Address DoScavenge(ObjectVisitor* scavenge_visitor, Address new_space_front);
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001853 static void ScavengeStoreBufferCallback(Heap* heap,
1854 MemoryChunk* page,
1855 StoreBufferEvent event);
Steve Blocka7e24c12009-10-30 11:49:00 +00001856
1857 // Performs a major collection in the whole heap.
Steve Block44f0eee2011-05-26 01:26:41 +01001858 void MarkCompact(GCTracer* tracer);
Steve Blocka7e24c12009-10-30 11:49:00 +00001859
1860 // Code to be run before and after mark-compact.
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001861 void MarkCompactPrologue();
Kristian Monsen25f61362010-05-21 11:50:48 +01001862
Steve Blocka7e24c12009-10-30 11:49:00 +00001863 // Record statistics before and after garbage collection.
Steve Block44f0eee2011-05-26 01:26:41 +01001864 void ReportStatisticsBeforeGC();
1865 void ReportStatisticsAfterGC();
Steve Blocka7e24c12009-10-30 11:49:00 +00001866
Steve Blocka7e24c12009-10-30 11:49:00 +00001867 // Slow part of scavenge object.
1868 static void ScavengeObjectSlow(HeapObject** p, HeapObject* object);
1869
Steve Blocka7e24c12009-10-30 11:49:00 +00001870 // Initializes a function with a shared part and prototype.
Steve Blocka7e24c12009-10-30 11:49:00 +00001871 // Note: this code was factored out of AllocateFunction such that
1872 // other parts of the VM could use it. Specifically, a function that creates
1873 // instances of type JS_FUNCTION_TYPE benefit from the use of this function.
1874 // Please note this does not perform a garbage collection.
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001875 inline void InitializeFunction(
Kristian Monsen80d68ea2010-09-08 11:05:35 +01001876 JSFunction* function,
1877 SharedFunctionInfo* shared,
1878 Object* prototype);
Steve Blocka7e24c12009-10-30 11:49:00 +00001879
Steve Block053d10c2011-06-13 19:13:29 +01001880 // Total RegExp code ever generated
1881 double total_regexp_code_generated_;
1882
Steve Block44f0eee2011-05-26 01:26:41 +01001883 GCTracer* tracer_;
Leon Clarkef7060e22010-06-03 12:02:55 +01001884
Leon Clarkee46be812010-01-19 14:06:41 +00001885
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001886 // Allocates a small number to string cache.
1887 MUST_USE_RESULT MaybeObject* AllocateInitialNumberStringCache();
1888 // Creates and installs the full-sized number string cache.
1889 void AllocateFullSizeNumberStringCache();
1890 // Get the length of the number to string cache based on the max semispace
1891 // size.
1892 int FullSizeNumberStringCacheLength();
Leon Clarkee46be812010-01-19 14:06:41 +00001893 // Flush the number to string cache.
Steve Block44f0eee2011-05-26 01:26:41 +01001894 void FlushNumberStringCache();
Leon Clarkee46be812010-01-19 14:06:41 +00001895
Steve Block44f0eee2011-05-26 01:26:41 +01001896 void UpdateSurvivalRateTrend(int start_new_space_size);
Steve Block8defd9f2010-07-08 12:39:36 +01001897
1898 enum SurvivalRateTrend { INCREASING, STABLE, DECREASING, FLUCTUATING };
1899
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001900 static const int kYoungSurvivalRateHighThreshold = 90;
1901 static const int kYoungSurvivalRateLowThreshold = 10;
Steve Block8defd9f2010-07-08 12:39:36 +01001902 static const int kYoungSurvivalRateAllowedDeviation = 15;
1903
Steve Block44f0eee2011-05-26 01:26:41 +01001904 int young_survivors_after_last_gc_;
1905 int high_survival_rate_period_length_;
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001906 int low_survival_rate_period_length_;
Steve Block44f0eee2011-05-26 01:26:41 +01001907 double survival_rate_;
1908 SurvivalRateTrend previous_survival_rate_trend_;
1909 SurvivalRateTrend survival_rate_trend_;
Steve Block8defd9f2010-07-08 12:39:36 +01001910
Steve Block44f0eee2011-05-26 01:26:41 +01001911 void set_survival_rate_trend(SurvivalRateTrend survival_rate_trend) {
Steve Block8defd9f2010-07-08 12:39:36 +01001912 ASSERT(survival_rate_trend != FLUCTUATING);
1913 previous_survival_rate_trend_ = survival_rate_trend_;
1914 survival_rate_trend_ = survival_rate_trend;
1915 }
1916
Steve Block44f0eee2011-05-26 01:26:41 +01001917 SurvivalRateTrend survival_rate_trend() {
Steve Block8defd9f2010-07-08 12:39:36 +01001918 if (survival_rate_trend_ == STABLE) {
1919 return STABLE;
1920 } else if (previous_survival_rate_trend_ == STABLE) {
1921 return survival_rate_trend_;
1922 } else if (survival_rate_trend_ != previous_survival_rate_trend_) {
1923 return FLUCTUATING;
1924 } else {
1925 return survival_rate_trend_;
1926 }
1927 }
1928
Steve Block44f0eee2011-05-26 01:26:41 +01001929 bool IsStableOrIncreasingSurvivalTrend() {
Steve Block8defd9f2010-07-08 12:39:36 +01001930 switch (survival_rate_trend()) {
1931 case STABLE:
1932 case INCREASING:
1933 return true;
1934 default:
1935 return false;
1936 }
1937 }
1938
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001939 bool IsStableOrDecreasingSurvivalTrend() {
1940 switch (survival_rate_trend()) {
1941 case STABLE:
1942 case DECREASING:
1943 return true;
1944 default:
1945 return false;
1946 }
1947 }
1948
Ben Murdochc7cc0282012-03-05 14:35:55 +00001949 bool IsIncreasingSurvivalTrend() {
1950 return survival_rate_trend() == INCREASING;
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001951 }
1952
Steve Block44f0eee2011-05-26 01:26:41 +01001953 bool IsHighSurvivalRate() {
Steve Block8defd9f2010-07-08 12:39:36 +01001954 return high_survival_rate_period_length_ > 0;
1955 }
1956
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001957 bool IsLowSurvivalRate() {
1958 return low_survival_rate_period_length_ > 0;
1959 }
1960
1961 void SelectScavengingVisitorsTable();
1962
1963 void StartIdleRound() {
1964 mark_sweeps_since_idle_round_started_ = 0;
1965 ms_count_at_last_idle_notification_ = ms_count_;
1966 }
1967
1968 void FinishIdleRound() {
1969 mark_sweeps_since_idle_round_started_ = kMaxMarkSweepsInIdleRound;
1970 scavenges_since_last_idle_round_ = 0;
1971 }
1972
1973 bool EnoughGarbageSinceLastIdleRound() {
1974 return (scavenges_since_last_idle_round_ >= kIdleScavengeThreshold);
1975 }
1976
1977 bool WorthStartingGCWhenIdle() {
1978 if (contexts_disposed_ > 0) {
1979 return true;
1980 }
1981 return incremental_marking()->WorthActivating();
1982 }
1983
1984 // Estimates how many milliseconds a Mark-Sweep would take to complete.
1985 // In idle notification handler we assume that this function will return:
1986 // - a number less than 10 for small heaps, which are less than 8Mb.
1987 // - a number greater than 10 for large heaps, which are greater than 32Mb.
1988 int TimeMarkSweepWouldTakeInMs() {
1989 // Rough estimate of how many megabytes of heap can be processed in 1 ms.
1990 static const int kMbPerMs = 2;
1991
1992 int heap_size_mb = static_cast<int>(SizeOfObjects() / MB);
1993 return heap_size_mb / kMbPerMs;
1994 }
1995
1996 // Returns true if no more GC work is left.
1997 bool IdleGlobalGC();
1998
1999 void AdvanceIdleIncrementalMarking(intptr_t step_size);
2000
2001
Steve Blocka7e24c12009-10-30 11:49:00 +00002002 static const int kInitialSymbolTableSize = 2048;
2003 static const int kInitialEvalCacheSize = 64;
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002004 static const int kInitialNumberStringCacheSize = 256;
Steve Blocka7e24c12009-10-30 11:49:00 +00002005
Steve Block44f0eee2011-05-26 01:26:41 +01002006 // Maximum GC pause.
2007 int max_gc_pause_;
2008
2009 // Maximum size of objects alive after GC.
2010 intptr_t max_alive_after_gc_;
2011
2012 // Minimal interval between two subsequent collections.
2013 int min_in_mutator_;
2014
2015 // Size of objects alive after last GC.
2016 intptr_t alive_after_last_gc_;
2017
2018 double last_gc_end_timestamp_;
2019
2020 MarkCompactCollector mark_compact_collector_;
2021
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002022 StoreBuffer store_buffer_;
2023
2024 Marking marking_;
2025
2026 IncrementalMarking incremental_marking_;
Steve Block44f0eee2011-05-26 01:26:41 +01002027
2028 int number_idle_notifications_;
2029 unsigned int last_idle_notification_gc_count_;
2030 bool last_idle_notification_gc_count_init_;
2031
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002032 int mark_sweeps_since_idle_round_started_;
2033 int ms_count_at_last_idle_notification_;
2034 unsigned int gc_count_at_last_idle_gc_;
2035 int scavenges_since_last_idle_round_;
2036
2037 static const int kMaxMarkSweepsInIdleRound = 7;
2038 static const int kIdleScavengeThreshold = 5;
2039
Steve Block44f0eee2011-05-26 01:26:41 +01002040 // Shared state read by the scavenge collector and set by ScavengeObject.
2041 PromotionQueue promotion_queue_;
2042
2043 // Flag is set when the heap has been configured. The heap can be repeatedly
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002044 // configured through the API until it is set up.
Steve Block44f0eee2011-05-26 01:26:41 +01002045 bool configured_;
2046
2047 ExternalStringTable external_string_table_;
2048
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002049 VisitorDispatchTable<ScavengingCallback> scavenging_visitors_table_;
2050
2051 MemoryChunk* chunks_queued_for_free_;
Steve Block44f0eee2011-05-26 01:26:41 +01002052
Steve Blocka7e24c12009-10-30 11:49:00 +00002053 friend class Factory;
Steve Block44f0eee2011-05-26 01:26:41 +01002054 friend class GCTracer;
Steve Blocka7e24c12009-10-30 11:49:00 +00002055 friend class DisallowAllocationFailure;
2056 friend class AlwaysAllocateScope;
Steve Blockd0582a62009-12-15 09:54:21 +00002057 friend class LinearAllocationScope;
Steve Block44f0eee2011-05-26 01:26:41 +01002058 friend class Page;
2059 friend class Isolate;
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002060 friend class MarkCompactCollector;
Ben Murdoch69a99ed2011-11-30 16:03:39 +00002061 friend class StaticMarkingVisitor;
Steve Block44f0eee2011-05-26 01:26:41 +01002062 friend class MapCompact;
2063
2064 DISALLOW_COPY_AND_ASSIGN(Heap);
Steve Blockd0582a62009-12-15 09:54:21 +00002065};
2066
2067
2068class HeapStats {
2069 public:
Iain Merrick75681382010-08-19 15:07:18 +01002070 static const int kStartMarker = 0xDECADE00;
2071 static const int kEndMarker = 0xDECADE01;
2072
Ben Murdochbb769b22010-08-11 14:56:33 +01002073 int* start_marker; // 0
2074 int* new_space_size; // 1
2075 int* new_space_capacity; // 2
Ben Murdochf87a2032010-10-22 12:50:53 +01002076 intptr_t* old_pointer_space_size; // 3
2077 intptr_t* old_pointer_space_capacity; // 4
2078 intptr_t* old_data_space_size; // 5
2079 intptr_t* old_data_space_capacity; // 6
2080 intptr_t* code_space_size; // 7
2081 intptr_t* code_space_capacity; // 8
2082 intptr_t* map_space_size; // 9
2083 intptr_t* map_space_capacity; // 10
2084 intptr_t* cell_space_size; // 11
2085 intptr_t* cell_space_capacity; // 12
2086 intptr_t* lo_space_size; // 13
Ben Murdochbb769b22010-08-11 14:56:33 +01002087 int* global_handle_count; // 14
2088 int* weak_global_handle_count; // 15
2089 int* pending_global_handle_count; // 16
2090 int* near_death_global_handle_count; // 17
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002091 int* free_global_handle_count; // 18
Ben Murdochf87a2032010-10-22 12:50:53 +01002092 intptr_t* memory_allocator_size; // 19
2093 intptr_t* memory_allocator_capacity; // 20
Ben Murdochbb769b22010-08-11 14:56:33 +01002094 int* objects_per_type; // 21
2095 int* size_per_type; // 22
Iain Merrick75681382010-08-19 15:07:18 +01002096 int* os_error; // 23
2097 int* end_marker; // 24
Steve Blocka7e24c12009-10-30 11:49:00 +00002098};
2099
2100
2101class AlwaysAllocateScope {
2102 public:
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002103 inline AlwaysAllocateScope();
2104 inline ~AlwaysAllocateScope();
Steve Blocka7e24c12009-10-30 11:49:00 +00002105};
2106
2107
Steve Blockd0582a62009-12-15 09:54:21 +00002108class LinearAllocationScope {
2109 public:
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002110 inline LinearAllocationScope();
2111 inline ~LinearAllocationScope();
Steve Blockd0582a62009-12-15 09:54:21 +00002112};
2113
2114
Steve Blocka7e24c12009-10-30 11:49:00 +00002115#ifdef DEBUG
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002116// Visitor class to verify interior pointers in spaces that do not contain
2117// or care about intergenerational references. All heap object pointers have to
2118// point into the heap to a location that has a map pointer at its first word.
2119// Caveat: Heap::Contains is an approximation because it can return true for
2120// objects in a heap space but above the allocation pointer.
Steve Blocka7e24c12009-10-30 11:49:00 +00002121class VerifyPointersVisitor: public ObjectVisitor {
2122 public:
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002123 inline void VisitPointers(Object** start, Object** end);
Steve Blocka7e24c12009-10-30 11:49:00 +00002124};
Steve Blocka7e24c12009-10-30 11:49:00 +00002125#endif
2126
2127
2128// Space iterator for iterating over all spaces of the heap.
2129// Returns each space in turn, and null when it is done.
2130class AllSpaces BASE_EMBEDDED {
2131 public:
2132 Space* next();
2133 AllSpaces() { counter_ = FIRST_SPACE; }
2134 private:
2135 int counter_;
2136};
2137
2138
2139// Space iterator for iterating over all old spaces of the heap: Old pointer
2140// space, old data space and code space.
2141// Returns each space in turn, and null when it is done.
2142class OldSpaces BASE_EMBEDDED {
2143 public:
2144 OldSpace* next();
2145 OldSpaces() { counter_ = OLD_POINTER_SPACE; }
2146 private:
2147 int counter_;
2148};
2149
2150
2151// Space iterator for iterating over all the paged spaces of the heap:
Leon Clarkee46be812010-01-19 14:06:41 +00002152// Map space, old pointer space, old data space, code space and cell space.
Steve Blocka7e24c12009-10-30 11:49:00 +00002153// Returns each space in turn, and null when it is done.
2154class PagedSpaces BASE_EMBEDDED {
2155 public:
2156 PagedSpace* next();
2157 PagedSpaces() { counter_ = OLD_POINTER_SPACE; }
2158 private:
2159 int counter_;
2160};
2161
2162
2163// Space iterator for iterating over all spaces of the heap.
2164// For each space an object iterator is provided. The deallocation of the
2165// returned object iterators is handled by the space iterator.
2166class SpaceIterator : public Malloced {
2167 public:
2168 SpaceIterator();
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08002169 explicit SpaceIterator(HeapObjectCallback size_func);
Steve Blocka7e24c12009-10-30 11:49:00 +00002170 virtual ~SpaceIterator();
2171
2172 bool has_next();
2173 ObjectIterator* next();
2174
2175 private:
2176 ObjectIterator* CreateIterator();
2177
2178 int current_space_; // from enum AllocationSpace.
2179 ObjectIterator* iterator_; // object iterator for the current space.
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08002180 HeapObjectCallback size_func_;
Steve Blocka7e24c12009-10-30 11:49:00 +00002181};
2182
2183
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08002184// A HeapIterator provides iteration over the whole heap. It
2185// aggregates the specific iterators for the different spaces as
2186// these can only iterate over one space only.
2187//
2188// HeapIterator can skip free list nodes (that is, de-allocated heap
2189// objects that still remain in the heap). As implementation of free
2190// nodes filtering uses GC marks, it can't be used during MS/MC GC
2191// phases. Also, it is forbidden to interrupt iteration in this mode,
2192// as this will leave heap objects marked (and thus, unusable).
Ben Murdochb0fe1622011-05-05 13:52:32 +01002193class HeapObjectsFilter;
Steve Blocka7e24c12009-10-30 11:49:00 +00002194
2195class HeapIterator BASE_EMBEDDED {
2196 public:
Ben Murdochb0fe1622011-05-05 13:52:32 +01002197 enum HeapObjectsFiltering {
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08002198 kNoFiltering,
Ben Murdochb0fe1622011-05-05 13:52:32 +01002199 kFilterUnreachable
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08002200 };
2201
2202 HeapIterator();
Ben Murdochb0fe1622011-05-05 13:52:32 +01002203 explicit HeapIterator(HeapObjectsFiltering filtering);
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08002204 ~HeapIterator();
Steve Blocka7e24c12009-10-30 11:49:00 +00002205
Steve Blocka7e24c12009-10-30 11:49:00 +00002206 HeapObject* next();
2207 void reset();
2208
2209 private:
2210 // Perform the initialization.
2211 void Init();
Steve Blocka7e24c12009-10-30 11:49:00 +00002212 // Perform all necessary shutdown (destruction) work.
2213 void Shutdown();
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08002214 HeapObject* NextObject();
Steve Blocka7e24c12009-10-30 11:49:00 +00002215
Ben Murdochb0fe1622011-05-05 13:52:32 +01002216 HeapObjectsFiltering filtering_;
2217 HeapObjectsFilter* filter_;
Steve Blocka7e24c12009-10-30 11:49:00 +00002218 // Space iterator for iterating all the spaces.
2219 SpaceIterator* space_iterator_;
2220 // Object iterator for the space currently being iterated.
2221 ObjectIterator* object_iterator_;
2222};
2223
2224
2225// Cache for mapping (map, property name) into field offset.
2226// Cleared at startup and prior to mark sweep collection.
2227class KeyedLookupCache {
2228 public:
2229 // Lookup field offset for (map, name). If absent, -1 is returned.
Steve Block44f0eee2011-05-26 01:26:41 +01002230 int Lookup(Map* map, String* name);
Steve Blocka7e24c12009-10-30 11:49:00 +00002231
2232 // Update an element in the cache.
Steve Block44f0eee2011-05-26 01:26:41 +01002233 void Update(Map* map, String* name, int field_offset);
Steve Blocka7e24c12009-10-30 11:49:00 +00002234
2235 // Clear the cache.
Steve Block44f0eee2011-05-26 01:26:41 +01002236 void Clear();
Leon Clarkee46be812010-01-19 14:06:41 +00002237
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002238 static const int kLength = 256;
Leon Clarkee46be812010-01-19 14:06:41 +00002239 static const int kCapacityMask = kLength - 1;
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002240 static const int kMapHashShift = 5;
2241 static const int kHashMask = -4; // Zero the last two bits.
2242 static const int kEntriesPerBucket = 4;
Steve Block44f0eee2011-05-26 01:26:41 +01002243 static const int kNotFound = -1;
Leon Clarkee46be812010-01-19 14:06:41 +00002244
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002245 // kEntriesPerBucket should be a power of 2.
2246 STATIC_ASSERT((kEntriesPerBucket & (kEntriesPerBucket - 1)) == 0);
2247 STATIC_ASSERT(kEntriesPerBucket == -kHashMask);
2248
Steve Blocka7e24c12009-10-30 11:49:00 +00002249 private:
Steve Block44f0eee2011-05-26 01:26:41 +01002250 KeyedLookupCache() {
2251 for (int i = 0; i < kLength; ++i) {
2252 keys_[i].map = NULL;
2253 keys_[i].name = NULL;
2254 field_offsets_[i] = kNotFound;
2255 }
2256 }
2257
Steve Blocka7e24c12009-10-30 11:49:00 +00002258 static inline int Hash(Map* map, String* name);
Leon Clarkee46be812010-01-19 14:06:41 +00002259
2260 // Get the address of the keys and field_offsets arrays. Used in
2261 // generated code to perform cache lookups.
Steve Block44f0eee2011-05-26 01:26:41 +01002262 Address keys_address() {
Leon Clarkee46be812010-01-19 14:06:41 +00002263 return reinterpret_cast<Address>(&keys_);
2264 }
2265
Steve Block44f0eee2011-05-26 01:26:41 +01002266 Address field_offsets_address() {
Leon Clarkee46be812010-01-19 14:06:41 +00002267 return reinterpret_cast<Address>(&field_offsets_);
2268 }
2269
Steve Blocka7e24c12009-10-30 11:49:00 +00002270 struct Key {
2271 Map* map;
2272 String* name;
2273 };
Steve Block44f0eee2011-05-26 01:26:41 +01002274
2275 Key keys_[kLength];
2276 int field_offsets_[kLength];
Steve Blocka7e24c12009-10-30 11:49:00 +00002277
Leon Clarkee46be812010-01-19 14:06:41 +00002278 friend class ExternalReference;
Steve Block44f0eee2011-05-26 01:26:41 +01002279 friend class Isolate;
2280 DISALLOW_COPY_AND_ASSIGN(KeyedLookupCache);
Leon Clarkee46be812010-01-19 14:06:41 +00002281};
Steve Blocka7e24c12009-10-30 11:49:00 +00002282
2283
2284// Cache for mapping (array, property name) into descriptor index.
2285// The cache contains both positive and negative results.
2286// Descriptor index equals kNotFound means the property is absent.
2287// Cleared at startup and prior to any gc.
2288class DescriptorLookupCache {
2289 public:
2290 // Lookup descriptor index for (map, name).
2291 // If absent, kAbsent is returned.
Steve Block44f0eee2011-05-26 01:26:41 +01002292 int Lookup(DescriptorArray* array, String* name) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002293 if (!StringShape(name).IsSymbol()) return kAbsent;
2294 int index = Hash(array, name);
2295 Key& key = keys_[index];
2296 if ((key.array == array) && (key.name == name)) return results_[index];
2297 return kAbsent;
2298 }
2299
2300 // Update an element in the cache.
Steve Block44f0eee2011-05-26 01:26:41 +01002301 void Update(DescriptorArray* array, String* name, int result) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002302 ASSERT(result != kAbsent);
2303 if (StringShape(name).IsSymbol()) {
2304 int index = Hash(array, name);
2305 Key& key = keys_[index];
2306 key.array = array;
2307 key.name = name;
2308 results_[index] = result;
2309 }
2310 }
2311
2312 // Clear the cache.
Steve Block44f0eee2011-05-26 01:26:41 +01002313 void Clear();
Steve Blocka7e24c12009-10-30 11:49:00 +00002314
2315 static const int kAbsent = -2;
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002316
Steve Blocka7e24c12009-10-30 11:49:00 +00002317 private:
Steve Block44f0eee2011-05-26 01:26:41 +01002318 DescriptorLookupCache() {
2319 for (int i = 0; i < kLength; ++i) {
2320 keys_[i].array = NULL;
2321 keys_[i].name = NULL;
2322 results_[i] = kAbsent;
2323 }
2324 }
2325
Steve Blocka7e24c12009-10-30 11:49:00 +00002326 static int Hash(DescriptorArray* array, String* name) {
2327 // Uses only lower 32 bits if pointers are larger.
Andrei Popescu402d9372010-02-26 13:31:12 +00002328 uint32_t array_hash =
Steve Blocka7e24c12009-10-30 11:49:00 +00002329 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(array)) >> 2;
Andrei Popescu402d9372010-02-26 13:31:12 +00002330 uint32_t name_hash =
Steve Blocka7e24c12009-10-30 11:49:00 +00002331 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name)) >> 2;
2332 return (array_hash ^ name_hash) % kLength;
2333 }
2334
2335 static const int kLength = 64;
2336 struct Key {
2337 DescriptorArray* array;
2338 String* name;
2339 };
2340
Steve Block44f0eee2011-05-26 01:26:41 +01002341 Key keys_[kLength];
2342 int results_[kLength];
Steve Blocka7e24c12009-10-30 11:49:00 +00002343
Steve Block44f0eee2011-05-26 01:26:41 +01002344 friend class Isolate;
2345 DISALLOW_COPY_AND_ASSIGN(DescriptorLookupCache);
Steve Blocka7e24c12009-10-30 11:49:00 +00002346};
2347
2348
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002349#ifdef DEBUG
2350class DisallowAllocationFailure {
2351 public:
2352 inline DisallowAllocationFailure();
2353 inline ~DisallowAllocationFailure();
2354
2355 private:
2356 bool old_state_;
2357};
2358#endif
2359
2360
Steve Blocka7e24c12009-10-30 11:49:00 +00002361// A helper class to document/test C++ scopes where we do not
2362// expect a GC. Usage:
2363//
2364// /* Allocation not allowed: we cannot handle a GC in this scope. */
2365// { AssertNoAllocation nogc;
2366// ...
2367// }
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002368class AssertNoAllocation {
2369 public:
2370 inline AssertNoAllocation();
2371 inline ~AssertNoAllocation();
Steve Blocka7e24c12009-10-30 11:49:00 +00002372
2373#ifdef DEBUG
Steve Blocka7e24c12009-10-30 11:49:00 +00002374 private:
2375 bool old_state_;
Ben Murdoch85b71792012-04-11 18:30:58 +01002376#endif
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002377};
2378
2379
2380class DisableAssertNoAllocation {
2381 public:
2382 inline DisableAssertNoAllocation();
2383 inline ~DisableAssertNoAllocation();
2384
2385#ifdef DEBUG
2386 private:
2387 bool old_state_;
2388#endif
2389};
Ben Murdoch85b71792012-04-11 18:30:58 +01002390
Steve Blocka7e24c12009-10-30 11:49:00 +00002391// GCTracer collects and prints ONE line after each garbage collector
2392// invocation IFF --trace_gc is used.
2393
2394class GCTracer BASE_EMBEDDED {
2395 public:
Leon Clarkef7060e22010-06-03 12:02:55 +01002396 class Scope BASE_EMBEDDED {
Steve Block6ded16b2010-05-10 14:33:55 +01002397 public:
Leon Clarkef7060e22010-06-03 12:02:55 +01002398 enum ScopeId {
2399 EXTERNAL,
2400 MC_MARK,
2401 MC_SWEEP,
Iain Merrick75681382010-08-19 15:07:18 +01002402 MC_SWEEP_NEWSPACE,
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002403 MC_EVACUATE_PAGES,
2404 MC_UPDATE_NEW_TO_NEW_POINTERS,
2405 MC_UPDATE_ROOT_TO_NEW_POINTERS,
2406 MC_UPDATE_OLD_TO_NEW_POINTERS,
2407 MC_UPDATE_POINTERS_TO_EVACUATED,
2408 MC_UPDATE_POINTERS_BETWEEN_EVACUATED,
2409 MC_UPDATE_MISC_POINTERS,
Kristian Monsen50ef84f2010-07-29 15:18:00 +01002410 MC_FLUSH_CODE,
Leon Clarkef7060e22010-06-03 12:02:55 +01002411 kNumberOfScopes
2412 };
2413
2414 Scope(GCTracer* tracer, ScopeId scope)
2415 : tracer_(tracer),
2416 scope_(scope) {
Steve Block6ded16b2010-05-10 14:33:55 +01002417 start_time_ = OS::TimeCurrentMillis();
2418 }
Leon Clarkef7060e22010-06-03 12:02:55 +01002419
2420 ~Scope() {
Ben Murdochb8e0da22011-05-16 14:20:40 +01002421 ASSERT(scope_ < kNumberOfScopes); // scope_ is unsigned.
Leon Clarkef7060e22010-06-03 12:02:55 +01002422 tracer_->scopes_[scope_] += OS::TimeCurrentMillis() - start_time_;
Steve Block6ded16b2010-05-10 14:33:55 +01002423 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002424
Steve Block6ded16b2010-05-10 14:33:55 +01002425 private:
2426 GCTracer* tracer_;
Leon Clarkef7060e22010-06-03 12:02:55 +01002427 ScopeId scope_;
Steve Block6ded16b2010-05-10 14:33:55 +01002428 double start_time_;
2429 };
2430
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002431 explicit GCTracer(Heap* heap,
2432 const char* gc_reason,
2433 const char* collector_reason);
Steve Blocka7e24c12009-10-30 11:49:00 +00002434 ~GCTracer();
2435
2436 // Sets the collector.
2437 void set_collector(GarbageCollector collector) { collector_ = collector; }
2438
2439 // Sets the GC count.
Ben Murdoche0cee9b2011-05-25 10:26:03 +01002440 void set_gc_count(unsigned int count) { gc_count_ = count; }
Steve Blocka7e24c12009-10-30 11:49:00 +00002441
2442 // Sets the full GC count.
2443 void set_full_gc_count(int count) { full_gc_count_ = count; }
2444
Leon Clarkef7060e22010-06-03 12:02:55 +01002445 void increment_promoted_objects_size(int object_size) {
2446 promoted_objects_size_ += object_size;
2447 }
2448
Steve Blocka7e24c12009-10-30 11:49:00 +00002449 private:
2450 // Returns a string matching the collector.
2451 const char* CollectorString();
2452
2453 // Returns size of object in heap (in MB).
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002454 inline double SizeOfHeapObjects();
Steve Blocka7e24c12009-10-30 11:49:00 +00002455
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002456 // Timestamp set in the constructor.
2457 double start_time_;
Ben Murdoch5d4cdbf2012-04-11 10:23:59 +01002458
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002459 // Size of objects in heap set in constructor.
2460 intptr_t start_object_size_;
2461
2462 // Size of memory allocated from OS set in constructor.
2463 intptr_t start_memory_size_;
2464
2465 // Type of collector.
2466 GarbageCollector collector_;
2467
2468 // A count (including this one, e.g. the first collection is 1) of the
Steve Blocka7e24c12009-10-30 11:49:00 +00002469 // number of garbage collections.
Ben Murdoche0cee9b2011-05-25 10:26:03 +01002470 unsigned int gc_count_;
Steve Blocka7e24c12009-10-30 11:49:00 +00002471
2472 // A count (including this one) of the number of full garbage collections.
2473 int full_gc_count_;
2474
Leon Clarkef7060e22010-06-03 12:02:55 +01002475 // Amounts of time spent in different scopes during GC.
2476 double scopes_[Scope::kNumberOfScopes];
2477
2478 // Total amount of space either wasted or contained in one of free lists
2479 // before the current GC.
Ben Murdochf87a2032010-10-22 12:50:53 +01002480 intptr_t in_free_list_or_wasted_before_gc_;
Leon Clarkef7060e22010-06-03 12:02:55 +01002481
2482 // Difference between space used in the heap at the beginning of the current
2483 // collection and the end of the previous collection.
Ben Murdochf87a2032010-10-22 12:50:53 +01002484 intptr_t allocated_since_last_gc_;
Leon Clarkef7060e22010-06-03 12:02:55 +01002485
2486 // Amount of time spent in mutator that is time elapsed between end of the
2487 // previous collection and the beginning of the current one.
2488 double spent_in_mutator_;
2489
2490 // Size of objects promoted during the current collection.
Ben Murdochf87a2032010-10-22 12:50:53 +01002491 intptr_t promoted_objects_size_;
Leon Clarkef7060e22010-06-03 12:02:55 +01002492
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002493 // Incremental marking steps counters.
2494 int steps_count_;
2495 double steps_took_;
2496 double longest_step_;
2497 int steps_count_since_last_gc_;
2498 double steps_took_since_last_gc_;
2499
Steve Block44f0eee2011-05-26 01:26:41 +01002500 Heap* heap_;
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002501
2502 const char* gc_reason_;
2503 const char* collector_reason_;
Steve Blocka7e24c12009-10-30 11:49:00 +00002504};
2505
2506
Ben Murdoch589d6972011-11-30 16:04:58 +00002507class StringSplitCache {
2508 public:
2509 static Object* Lookup(FixedArray* cache, String* string, String* pattern);
2510 static void Enter(Heap* heap,
2511 FixedArray* cache,
2512 String* string,
2513 String* pattern,
2514 FixedArray* array);
2515 static void Clear(FixedArray* cache);
2516 static const int kStringSplitCacheSize = 0x100;
2517
2518 private:
2519 static const int kArrayEntriesPerCacheEntry = 4;
2520 static const int kStringOffset = 0;
2521 static const int kPatternOffset = 1;
2522 static const int kArrayOffset = 2;
2523
2524 static MaybeObject* WrapFixedArrayInJSArray(Object* fixed_array);
2525};
2526
2527
Steve Blocka7e24c12009-10-30 11:49:00 +00002528class TranscendentalCache {
2529 public:
2530 enum Type {ACOS, ASIN, ATAN, COS, EXP, LOG, SIN, TAN, kNumberOfCaches};
Ben Murdochb0fe1622011-05-05 13:52:32 +01002531 static const int kTranscendentalTypeBits = 3;
2532 STATIC_ASSERT((1 << kTranscendentalTypeBits) >= kNumberOfCaches);
Steve Blocka7e24c12009-10-30 11:49:00 +00002533
Steve Blocka7e24c12009-10-30 11:49:00 +00002534 // Returns a heap number with f(input), where f is a math function specified
2535 // by the 'type' argument.
Steve Block44f0eee2011-05-26 01:26:41 +01002536 MUST_USE_RESULT inline MaybeObject* Get(Type type, double input);
Steve Blocka7e24c12009-10-30 11:49:00 +00002537
2538 // The cache contains raw Object pointers. This method disposes of
2539 // them before a garbage collection.
Steve Block44f0eee2011-05-26 01:26:41 +01002540 void Clear();
Steve Blocka7e24c12009-10-30 11:49:00 +00002541
2542 private:
Steve Block44f0eee2011-05-26 01:26:41 +01002543 class SubCache {
2544 static const int kCacheSize = 512;
Steve Blocka7e24c12009-10-30 11:49:00 +00002545
Steve Block44f0eee2011-05-26 01:26:41 +01002546 explicit SubCache(Type t);
2547
2548 MUST_USE_RESULT inline MaybeObject* Get(double input);
2549
2550 inline double Calculate(double input);
2551
2552 struct Element {
2553 uint32_t in[2];
2554 Object* output;
2555 };
2556
2557 union Converter {
2558 double dbl;
2559 uint32_t integers[2];
2560 };
2561
2562 inline static int Hash(const Converter& c) {
2563 uint32_t hash = (c.integers[0] ^ c.integers[1]);
2564 hash ^= static_cast<int32_t>(hash) >> 16;
2565 hash ^= static_cast<int32_t>(hash) >> 8;
2566 return (hash & (kCacheSize - 1));
Steve Blocka7e24c12009-10-30 11:49:00 +00002567 }
Steve Block44f0eee2011-05-26 01:26:41 +01002568
2569 Element elements_[kCacheSize];
2570 Type type_;
2571 Isolate* isolate_;
2572
2573 // Allow access to the caches_ array as an ExternalReference.
2574 friend class ExternalReference;
2575 // Inline implementation of the cache.
2576 friend class TranscendentalCacheStub;
2577 // For evaluating value.
2578 friend class TranscendentalCache;
2579
2580 DISALLOW_COPY_AND_ASSIGN(SubCache);
Steve Blocka7e24c12009-10-30 11:49:00 +00002581 };
Steve Block44f0eee2011-05-26 01:26:41 +01002582
2583 TranscendentalCache() {
2584 for (int i = 0; i < kNumberOfCaches; ++i) caches_[i] = NULL;
Steve Blocka7e24c12009-10-30 11:49:00 +00002585 }
Andrei Popescu402d9372010-02-26 13:31:12 +00002586
Steve Block44f0eee2011-05-26 01:26:41 +01002587 // Used to create an external reference.
2588 inline Address cache_array_address();
Andrei Popescu402d9372010-02-26 13:31:12 +00002589
Steve Block44f0eee2011-05-26 01:26:41 +01002590 // Instantiation
2591 friend class Isolate;
2592 // Inline implementation of the caching.
2593 friend class TranscendentalCacheStub;
Andrei Popescu402d9372010-02-26 13:31:12 +00002594 // Allow access to the caches_ array as an ExternalReference.
2595 friend class ExternalReference;
Andrei Popescu402d9372010-02-26 13:31:12 +00002596
Steve Block44f0eee2011-05-26 01:26:41 +01002597 SubCache* caches_[kNumberOfCaches];
2598 DISALLOW_COPY_AND_ASSIGN(TranscendentalCache);
Leon Clarkee46be812010-01-19 14:06:41 +00002599};
2600
Ben Murdochf87a2032010-10-22 12:50:53 +01002601
2602// Abstract base class for checking whether a weak object should be retained.
2603class WeakObjectRetainer {
2604 public:
2605 virtual ~WeakObjectRetainer() {}
2606
2607 // Return whether this object should be retained. If NULL is returned the
2608 // object has no references. Otherwise the address of the retained object
2609 // should be returned as in some GC situations the object has been moved.
2610 virtual Object* RetainAs(Object* object) = 0;
2611};
2612
2613
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002614// Intrusive object marking uses least significant bit of
2615// heap object's map word to mark objects.
2616// Normally all map words have least significant bit set
2617// because they contain tagged map pointer.
2618// If the bit is not set object is marked.
2619// All objects should be unmarked before resuming
2620// JavaScript execution.
2621class IntrusiveMarking {
2622 public:
2623 static bool IsMarked(HeapObject* object) {
2624 return (object->map_word().ToRawValue() & kNotMarkedBit) == 0;
2625 }
2626
2627 static void ClearMark(HeapObject* object) {
2628 uintptr_t map_word = object->map_word().ToRawValue();
2629 object->set_map_word(MapWord::FromRawValue(map_word | kNotMarkedBit));
2630 ASSERT(!IsMarked(object));
2631 }
2632
2633 static void SetMark(HeapObject* object) {
2634 uintptr_t map_word = object->map_word().ToRawValue();
2635 object->set_map_word(MapWord::FromRawValue(map_word & ~kNotMarkedBit));
2636 ASSERT(IsMarked(object));
2637 }
2638
2639 static Map* MapOfMarkedObject(HeapObject* object) {
2640 uintptr_t map_word = object->map_word().ToRawValue();
2641 return MapWord::FromRawValue(map_word | kNotMarkedBit).ToMap();
2642 }
2643
2644 static int SizeOfMarkedObject(HeapObject* object) {
2645 return object->SizeFromMap(MapOfMarkedObject(object));
2646 }
2647
2648 private:
2649 static const uintptr_t kNotMarkedBit = 0x1;
2650 STATIC_ASSERT((kHeapObjectTag & kNotMarkedBit) != 0);
2651};
2652
2653
Ben Murdoche0cee9b2011-05-25 10:26:03 +01002654#if defined(DEBUG) || defined(LIVE_OBJECT_LIST)
2655// Helper class for tracing paths to a search target Object from all roots.
2656// The TracePathFrom() method can be used to trace paths from a specific
2657// object to the search target object.
2658class PathTracer : public ObjectVisitor {
2659 public:
2660 enum WhatToFind {
2661 FIND_ALL, // Will find all matches.
2662 FIND_FIRST // Will stop the search after first match.
2663 };
2664
2665 // For the WhatToFind arg, if FIND_FIRST is specified, tracing will stop
2666 // after the first match. If FIND_ALL is specified, then tracing will be
2667 // done for all matches.
2668 PathTracer(Object* search_target,
2669 WhatToFind what_to_find,
2670 VisitMode visit_mode)
2671 : search_target_(search_target),
2672 found_target_(false),
2673 found_target_in_trace_(false),
2674 what_to_find_(what_to_find),
2675 visit_mode_(visit_mode),
2676 object_stack_(20),
2677 no_alloc() {}
2678
2679 virtual void VisitPointers(Object** start, Object** end);
2680
2681 void Reset();
2682 void TracePathFrom(Object** root);
2683
2684 bool found() const { return found_target_; }
2685
2686 static Object* const kAnyGlobalObject;
2687
2688 protected:
2689 class MarkVisitor;
2690 class UnmarkVisitor;
2691
2692 void MarkRecursively(Object** p, MarkVisitor* mark_visitor);
2693 void UnmarkRecursively(Object** p, UnmarkVisitor* unmark_visitor);
2694 virtual void ProcessResults();
2695
2696 // Tags 0, 1, and 3 are used. Use 2 for marking visited HeapObject.
2697 static const int kMarkTag = 2;
2698
2699 Object* search_target_;
2700 bool found_target_;
2701 bool found_target_in_trace_;
2702 WhatToFind what_to_find_;
2703 VisitMode visit_mode_;
2704 List<Object*> object_stack_;
2705
2706 AssertNoAllocation no_alloc; // i.e. no gc allowed.
2707
Ben Murdoch3ef787d2012-04-12 10:51:47 +01002708 private:
Ben Murdoche0cee9b2011-05-25 10:26:03 +01002709 DISALLOW_IMPLICIT_CONSTRUCTORS(PathTracer);
2710};
2711#endif // DEBUG || LIVE_OBJECT_LIST
2712
Steve Blocka7e24c12009-10-30 11:49:00 +00002713} } // namespace v8::internal
2714
Steve Blocka7e24c12009-10-30 11:49:00 +00002715#endif // V8_HEAP_H_