blob: 18991b4c09b629570f1f3267061b3dc38c7a50eb [file] [log] [blame]
Steve Blocka7e24c12009-10-30 11:49:00 +00001// Copyright 2006-2008 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#ifndef V8_HEAP_H_
29#define V8_HEAP_H_
30
31#include <math.h>
32
Steve Block6ded16b2010-05-10 14:33:55 +010033#include "splay-tree-inl.h"
34#include "v8-counters.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000035
36namespace v8 {
37namespace internal {
38
Steve Block6ded16b2010-05-10 14:33:55 +010039
Steve Blocka7e24c12009-10-30 11:49:00 +000040// Defines all the roots in Heap.
41#define UNCONDITIONAL_STRONG_ROOT_LIST(V) \
Steve Blockd0582a62009-12-15 09:54:21 +000042 /* Put the byte array map early. We need it to be in place by the time */ \
43 /* the deserializer hits the next page, since it wants to put a byte */ \
44 /* array in the unused space at the end of the page. */ \
45 V(Map, byte_array_map, ByteArrayMap) \
46 V(Map, one_pointer_filler_map, OnePointerFillerMap) \
47 V(Map, two_pointer_filler_map, TwoPointerFillerMap) \
48 /* Cluster the most popular ones in a few cache lines here at the top. */ \
Steve Blocka7e24c12009-10-30 11:49:00 +000049 V(Smi, stack_limit, StackLimit) \
50 V(Object, undefined_value, UndefinedValue) \
51 V(Object, the_hole_value, TheHoleValue) \
52 V(Object, null_value, NullValue) \
53 V(Object, true_value, TrueValue) \
54 V(Object, false_value, FalseValue) \
55 V(Map, heap_number_map, HeapNumberMap) \
56 V(Map, global_context_map, GlobalContextMap) \
57 V(Map, fixed_array_map, FixedArrayMap) \
58 V(Object, no_interceptor_result_sentinel, NoInterceptorResultSentinel) \
59 V(Map, meta_map, MetaMap) \
60 V(Object, termination_exception, TerminationException) \
61 V(Map, hash_table_map, HashTableMap) \
62 V(FixedArray, empty_fixed_array, EmptyFixedArray) \
Steve Blockd0582a62009-12-15 09:54:21 +000063 V(Map, string_map, StringMap) \
64 V(Map, ascii_string_map, AsciiStringMap) \
65 V(Map, symbol_map, SymbolMap) \
66 V(Map, ascii_symbol_map, AsciiSymbolMap) \
67 V(Map, cons_symbol_map, ConsSymbolMap) \
68 V(Map, cons_ascii_symbol_map, ConsAsciiSymbolMap) \
69 V(Map, external_symbol_map, ExternalSymbolMap) \
Kristian Monsen9dcf7e22010-06-28 14:14:28 +010070 V(Map, external_symbol_with_ascii_data_map, ExternalSymbolWithAsciiDataMap) \
Steve Blockd0582a62009-12-15 09:54:21 +000071 V(Map, external_ascii_symbol_map, ExternalAsciiSymbolMap) \
72 V(Map, cons_string_map, ConsStringMap) \
73 V(Map, cons_ascii_string_map, ConsAsciiStringMap) \
74 V(Map, external_string_map, ExternalStringMap) \
Kristian Monsen9dcf7e22010-06-28 14:14:28 +010075 V(Map, external_string_with_ascii_data_map, ExternalStringWithAsciiDataMap) \
Steve Blockd0582a62009-12-15 09:54:21 +000076 V(Map, external_ascii_string_map, ExternalAsciiStringMap) \
77 V(Map, undetectable_string_map, UndetectableStringMap) \
78 V(Map, undetectable_ascii_string_map, UndetectableAsciiStringMap) \
Steve Blocka7e24c12009-10-30 11:49:00 +000079 V(Map, pixel_array_map, PixelArrayMap) \
Steve Block3ce2e202009-11-05 08:53:23 +000080 V(Map, external_byte_array_map, ExternalByteArrayMap) \
81 V(Map, external_unsigned_byte_array_map, ExternalUnsignedByteArrayMap) \
82 V(Map, external_short_array_map, ExternalShortArrayMap) \
83 V(Map, external_unsigned_short_array_map, ExternalUnsignedShortArrayMap) \
84 V(Map, external_int_array_map, ExternalIntArrayMap) \
85 V(Map, external_unsigned_int_array_map, ExternalUnsignedIntArrayMap) \
86 V(Map, external_float_array_map, ExternalFloatArrayMap) \
Steve Blocka7e24c12009-10-30 11:49:00 +000087 V(Map, context_map, ContextMap) \
88 V(Map, catch_context_map, CatchContextMap) \
89 V(Map, code_map, CodeMap) \
90 V(Map, oddball_map, OddballMap) \
91 V(Map, global_property_cell_map, GlobalPropertyCellMap) \
Steve Blocka7e24c12009-10-30 11:49:00 +000092 V(Map, shared_function_info_map, SharedFunctionInfoMap) \
93 V(Map, proxy_map, ProxyMap) \
Steve Blocka7e24c12009-10-30 11:49:00 +000094 V(Object, nan_value, NanValue) \
95 V(Object, minus_zero_value, MinusZeroValue) \
Kristian Monsen25f61362010-05-21 11:50:48 +010096 V(Object, instanceof_cache_function, InstanceofCacheFunction) \
97 V(Object, instanceof_cache_map, InstanceofCacheMap) \
98 V(Object, instanceof_cache_answer, InstanceofCacheAnswer) \
Steve Blocka7e24c12009-10-30 11:49:00 +000099 V(String, empty_string, EmptyString) \
100 V(DescriptorArray, empty_descriptor_array, EmptyDescriptorArray) \
101 V(Map, neander_map, NeanderMap) \
102 V(JSObject, message_listeners, MessageListeners) \
103 V(Proxy, prototype_accessors, PrototypeAccessors) \
104 V(NumberDictionary, code_stubs, CodeStubs) \
105 V(NumberDictionary, non_monomorphic_cache, NonMonomorphicCache) \
106 V(Code, js_entry_code, JsEntryCode) \
107 V(Code, js_construct_entry_code, JsConstructEntryCode) \
108 V(Code, c_entry_code, CEntryCode) \
Steve Blocka7e24c12009-10-30 11:49:00 +0000109 V(FixedArray, number_string_cache, NumberStringCache) \
110 V(FixedArray, single_character_string_cache, SingleCharacterStringCache) \
111 V(FixedArray, natives_source_cache, NativesSourceCache) \
112 V(Object, last_script_id, LastScriptId) \
Andrei Popescu31002712010-02-23 13:46:05 +0000113 V(Script, empty_script, EmptyScript) \
Steve Blockd0582a62009-12-15 09:54:21 +0000114 V(Smi, real_stack_limit, RealStackLimit) \
Steve Blocka7e24c12009-10-30 11:49:00 +0000115
Steve Block6ded16b2010-05-10 14:33:55 +0100116#if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
Steve Blocka7e24c12009-10-30 11:49:00 +0000117#define STRONG_ROOT_LIST(V) \
118 UNCONDITIONAL_STRONG_ROOT_LIST(V) \
119 V(Code, re_c_entry_code, RegExpCEntryCode)
120#else
121#define STRONG_ROOT_LIST(V) UNCONDITIONAL_STRONG_ROOT_LIST(V)
122#endif
123
124#define ROOT_LIST(V) \
125 STRONG_ROOT_LIST(V) \
126 V(SymbolTable, symbol_table, SymbolTable)
127
128#define SYMBOL_LIST(V) \
129 V(Array_symbol, "Array") \
130 V(Object_symbol, "Object") \
131 V(Proto_symbol, "__proto__") \
132 V(StringImpl_symbol, "StringImpl") \
133 V(arguments_symbol, "arguments") \
134 V(Arguments_symbol, "Arguments") \
135 V(arguments_shadow_symbol, ".arguments") \
136 V(call_symbol, "call") \
137 V(apply_symbol, "apply") \
138 V(caller_symbol, "caller") \
139 V(boolean_symbol, "boolean") \
140 V(Boolean_symbol, "Boolean") \
141 V(callee_symbol, "callee") \
142 V(constructor_symbol, "constructor") \
143 V(code_symbol, ".code") \
144 V(result_symbol, ".result") \
145 V(catch_var_symbol, ".catch-var") \
146 V(empty_symbol, "") \
147 V(eval_symbol, "eval") \
148 V(function_symbol, "function") \
149 V(length_symbol, "length") \
150 V(name_symbol, "name") \
151 V(number_symbol, "number") \
152 V(Number_symbol, "Number") \
153 V(RegExp_symbol, "RegExp") \
Steve Block6ded16b2010-05-10 14:33:55 +0100154 V(source_symbol, "source") \
155 V(global_symbol, "global") \
156 V(ignore_case_symbol, "ignoreCase") \
157 V(multiline_symbol, "multiline") \
158 V(input_symbol, "input") \
159 V(index_symbol, "index") \
160 V(last_index_symbol, "lastIndex") \
Steve Blocka7e24c12009-10-30 11:49:00 +0000161 V(object_symbol, "object") \
162 V(prototype_symbol, "prototype") \
163 V(string_symbol, "string") \
164 V(String_symbol, "String") \
165 V(Date_symbol, "Date") \
166 V(this_symbol, "this") \
167 V(to_string_symbol, "toString") \
168 V(char_at_symbol, "CharAt") \
169 V(undefined_symbol, "undefined") \
170 V(value_of_symbol, "valueOf") \
171 V(InitializeVarGlobal_symbol, "InitializeVarGlobal") \
172 V(InitializeConstGlobal_symbol, "InitializeConstGlobal") \
173 V(stack_overflow_symbol, "kStackOverflowBoilerplate") \
174 V(illegal_access_symbol, "illegal access") \
175 V(out_of_memory_symbol, "out-of-memory") \
176 V(illegal_execution_state_symbol, "illegal execution state") \
177 V(get_symbol, "get") \
178 V(set_symbol, "set") \
179 V(function_class_symbol, "Function") \
180 V(illegal_argument_symbol, "illegal argument") \
181 V(MakeReferenceError_symbol, "MakeReferenceError") \
182 V(MakeSyntaxError_symbol, "MakeSyntaxError") \
183 V(MakeTypeError_symbol, "MakeTypeError") \
184 V(invalid_lhs_in_assignment_symbol, "invalid_lhs_in_assignment") \
185 V(invalid_lhs_in_for_in_symbol, "invalid_lhs_in_for_in") \
186 V(invalid_lhs_in_postfix_op_symbol, "invalid_lhs_in_postfix_op") \
187 V(invalid_lhs_in_prefix_op_symbol, "invalid_lhs_in_prefix_op") \
188 V(illegal_return_symbol, "illegal_return") \
189 V(illegal_break_symbol, "illegal_break") \
190 V(illegal_continue_symbol, "illegal_continue") \
191 V(unknown_label_symbol, "unknown_label") \
192 V(redeclaration_symbol, "redeclaration") \
193 V(failure_symbol, "<failure>") \
194 V(space_symbol, " ") \
195 V(exec_symbol, "exec") \
196 V(zero_symbol, "0") \
197 V(global_eval_symbol, "GlobalEval") \
Steve Blockd0582a62009-12-15 09:54:21 +0000198 V(identity_hash_symbol, "v8::IdentityHash") \
199 V(closure_symbol, "(closure)")
Steve Blocka7e24c12009-10-30 11:49:00 +0000200
201
202// Forward declaration of the GCTracer class.
203class GCTracer;
Steve Blockd0582a62009-12-15 09:54:21 +0000204class HeapStats;
Steve Blocka7e24c12009-10-30 11:49:00 +0000205
206
Steve Block6ded16b2010-05-10 14:33:55 +0100207typedef String* (*ExternalStringTableUpdaterCallback)(Object** pointer);
208
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100209typedef bool (*DirtyRegionCallback)(Address start,
210 Address end,
211 ObjectSlotCallback copy_object_func);
212
Steve Block6ded16b2010-05-10 14:33:55 +0100213
Steve Blocka7e24c12009-10-30 11:49:00 +0000214// The all static Heap captures the interface to the global object heap.
215// All JavaScript contexts by this process share the same object heap.
216
217class Heap : public AllStatic {
218 public:
219 // Configure heap size before setup. Return false if the heap has been
220 // setup already.
Steve Block3ce2e202009-11-05 08:53:23 +0000221 static bool ConfigureHeap(int max_semispace_size, int max_old_gen_size);
Steve Blocka7e24c12009-10-30 11:49:00 +0000222 static bool ConfigureHeapDefault();
223
224 // Initializes the global object heap. If create_heap_objects is true,
225 // also creates the basic non-mutable objects.
226 // Returns whether it succeeded.
227 static bool Setup(bool create_heap_objects);
228
229 // Destroys all memory allocated by the heap.
230 static void TearDown();
231
Steve Blockd0582a62009-12-15 09:54:21 +0000232 // Set the stack limit in the roots_ array. Some architectures generate
233 // code that looks here, because it is faster than loading from the static
234 // jslimit_/real_jslimit_ variable in the StackGuard.
235 static void SetStackLimits();
Steve Blocka7e24c12009-10-30 11:49:00 +0000236
237 // Returns whether Setup has been called.
238 static bool HasBeenSetup();
239
Steve Block3ce2e202009-11-05 08:53:23 +0000240 // Returns the maximum amount of memory reserved for the heap. For
241 // the young generation, we reserve 4 times the amount needed for a
242 // semi space. The young generation consists of two semi spaces and
243 // we reserve twice the amount needed for those in order to ensure
244 // that new space can be aligned to its size.
245 static int MaxReserved() {
246 return 4 * reserved_semispace_size_ + max_old_generation_size_;
Steve Blocka7e24c12009-10-30 11:49:00 +0000247 }
Steve Block3ce2e202009-11-05 08:53:23 +0000248 static int MaxSemiSpaceSize() { return max_semispace_size_; }
249 static int ReservedSemiSpaceSize() { return reserved_semispace_size_; }
Steve Blocka7e24c12009-10-30 11:49:00 +0000250 static int InitialSemiSpaceSize() { return initial_semispace_size_; }
Steve Block3ce2e202009-11-05 08:53:23 +0000251 static int MaxOldGenerationSize() { return max_old_generation_size_; }
Steve Blocka7e24c12009-10-30 11:49:00 +0000252
253 // Returns the capacity of the heap in bytes w/o growing. Heap grows when
254 // more spaces are needed until it reaches the limit.
255 static int Capacity();
256
Steve Block3ce2e202009-11-05 08:53:23 +0000257 // Returns the amount of memory currently committed for the heap.
258 static int CommittedMemory();
259
Steve Blocka7e24c12009-10-30 11:49:00 +0000260 // Returns the available bytes in space w/o growing.
261 // Heap doesn't guarantee that it can allocate an object that requires
262 // all available bytes. Check MaxHeapObjectSize() instead.
263 static int Available();
264
265 // Returns the maximum object size in paged space.
266 static inline int MaxObjectSizeInPagedSpace();
267
268 // Returns of size of all objects residing in the heap.
269 static int SizeOfObjects();
270
271 // Return the starting address and a mask for the new space. And-masking an
272 // address with the mask will result in the start address of the new space
273 // for all addresses in either semispace.
274 static Address NewSpaceStart() { return new_space_.start(); }
275 static uintptr_t NewSpaceMask() { return new_space_.mask(); }
276 static Address NewSpaceTop() { return new_space_.top(); }
277
278 static NewSpace* new_space() { return &new_space_; }
279 static OldSpace* old_pointer_space() { return old_pointer_space_; }
280 static OldSpace* old_data_space() { return old_data_space_; }
281 static OldSpace* code_space() { return code_space_; }
282 static MapSpace* map_space() { return map_space_; }
283 static CellSpace* cell_space() { return cell_space_; }
284 static LargeObjectSpace* lo_space() { return lo_space_; }
285
286 static bool always_allocate() { return always_allocate_scope_depth_ != 0; }
287 static Address always_allocate_scope_depth_address() {
288 return reinterpret_cast<Address>(&always_allocate_scope_depth_);
289 }
Steve Blockd0582a62009-12-15 09:54:21 +0000290 static bool linear_allocation() {
Leon Clarkee46be812010-01-19 14:06:41 +0000291 return linear_allocation_scope_depth_ != 0;
Steve Blockd0582a62009-12-15 09:54:21 +0000292 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000293
294 static Address* NewSpaceAllocationTopAddress() {
295 return new_space_.allocation_top_address();
296 }
297 static Address* NewSpaceAllocationLimitAddress() {
298 return new_space_.allocation_limit_address();
299 }
300
301 // Uncommit unused semi space.
302 static bool UncommitFromSpace() { return new_space_.UncommitFromSpace(); }
303
304#ifdef ENABLE_HEAP_PROTECTION
305 // Protect/unprotect the heap by marking all spaces read-only/writable.
306 static void Protect();
307 static void Unprotect();
308#endif
309
310 // Allocates and initializes a new JavaScript object based on a
311 // constructor.
312 // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
313 // failed.
314 // Please note this does not perform a garbage collection.
315 static Object* AllocateJSObject(JSFunction* constructor,
316 PretenureFlag pretenure = NOT_TENURED);
317
318 // Allocates and initializes a new global object based on a constructor.
319 // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
320 // failed.
321 // Please note this does not perform a garbage collection.
322 static Object* AllocateGlobalObject(JSFunction* constructor);
323
324 // Returns a deep copy of the JavaScript object.
325 // Properties and elements are copied too.
326 // Returns failure if allocation failed.
327 static Object* CopyJSObject(JSObject* source);
328
329 // Allocates the function prototype.
330 // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
331 // failed.
332 // Please note this does not perform a garbage collection.
333 static Object* AllocateFunctionPrototype(JSFunction* function);
334
335 // Reinitialize an JSGlobalProxy based on a constructor. The object
336 // must have the same size as objects allocated using the
337 // constructor. The object is reinitialized and behaves as an
338 // object that has been freshly allocated using the constructor.
339 static Object* ReinitializeJSGlobalProxy(JSFunction* constructor,
340 JSGlobalProxy* global);
341
342 // Allocates and initializes a new JavaScript object based on a map.
343 // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
344 // failed.
345 // Please note this does not perform a garbage collection.
346 static Object* AllocateJSObjectFromMap(Map* map,
347 PretenureFlag pretenure = NOT_TENURED);
348
349 // Allocates a heap object based on the map.
350 // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
351 // failed.
352 // Please note this function does not perform a garbage collection.
353 static Object* Allocate(Map* map, AllocationSpace space);
354
355 // Allocates a JS Map in the heap.
356 // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
357 // failed.
358 // Please note this function does not perform a garbage collection.
359 static Object* AllocateMap(InstanceType instance_type, int instance_size);
360
361 // Allocates a partial map for bootstrapping.
362 static Object* AllocatePartialMap(InstanceType instance_type,
363 int instance_size);
364
365 // Allocate a map for the specified function
366 static Object* AllocateInitialMap(JSFunction* fun);
367
Steve Block6ded16b2010-05-10 14:33:55 +0100368 // Allocates an empty code cache.
369 static Object* AllocateCodeCache();
370
Kristian Monsen25f61362010-05-21 11:50:48 +0100371 // Clear the Instanceof cache (used when a prototype changes).
372 static void ClearInstanceofCache() {
373 set_instanceof_cache_function(the_hole_value());
374 }
375
Steve Blocka7e24c12009-10-30 11:49:00 +0000376 // Allocates and fully initializes a String. There are two String
377 // encodings: ASCII and two byte. One should choose between the three string
378 // allocation functions based on the encoding of the string buffer used to
379 // initialized the string.
380 // - ...FromAscii initializes the string from a buffer that is ASCII
381 // encoded (it does not check that the buffer is ASCII encoded) and the
382 // result will be ASCII encoded.
383 // - ...FromUTF8 initializes the string from a buffer that is UTF-8
384 // encoded. If the characters are all single-byte characters, the
385 // result will be ASCII encoded, otherwise it will converted to two
386 // byte.
387 // - ...FromTwoByte initializes the string from a buffer that is two-byte
388 // encoded. If the characters are all single-byte characters, the
389 // result will be converted to ASCII, otherwise it will be left as
390 // two-byte.
391 // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
392 // failed.
393 // Please note this does not perform a garbage collection.
394 static Object* AllocateStringFromAscii(
395 Vector<const char> str,
396 PretenureFlag pretenure = NOT_TENURED);
397 static Object* AllocateStringFromUtf8(
398 Vector<const char> str,
399 PretenureFlag pretenure = NOT_TENURED);
400 static Object* AllocateStringFromTwoByte(
401 Vector<const uc16> str,
402 PretenureFlag pretenure = NOT_TENURED);
403
404 // Allocates a symbol in old space based on the character stream.
405 // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
406 // failed.
407 // Please note this function does not perform a garbage collection.
408 static inline Object* AllocateSymbol(Vector<const char> str,
409 int chars,
Steve Blockd0582a62009-12-15 09:54:21 +0000410 uint32_t hash_field);
Steve Blocka7e24c12009-10-30 11:49:00 +0000411
412 static Object* AllocateInternalSymbol(unibrow::CharacterStream* buffer,
413 int chars,
Steve Blockd0582a62009-12-15 09:54:21 +0000414 uint32_t hash_field);
Steve Blocka7e24c12009-10-30 11:49:00 +0000415
416 static Object* AllocateExternalSymbol(Vector<const char> str,
417 int chars);
418
419
420 // Allocates and partially initializes a String. There are two String
421 // encodings: ASCII and two byte. These functions allocate a string of the
422 // given length and set its map and length fields. The characters of the
423 // string are uninitialized.
424 // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
425 // failed.
426 // Please note this does not perform a garbage collection.
427 static Object* AllocateRawAsciiString(
428 int length,
429 PretenureFlag pretenure = NOT_TENURED);
430 static Object* AllocateRawTwoByteString(
431 int length,
432 PretenureFlag pretenure = NOT_TENURED);
433
434 // Computes a single character string where the character has code.
435 // A cache is used for ascii codes.
436 // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
437 // failed. Please note this does not perform a garbage collection.
438 static Object* LookupSingleCharacterStringFromCode(uint16_t code);
439
440 // Allocate a byte array of the specified length
441 // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
442 // failed.
443 // Please note this does not perform a garbage collection.
444 static Object* AllocateByteArray(int length, PretenureFlag pretenure);
445
446 // Allocate a non-tenured byte array of the specified length
447 // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
448 // failed.
449 // Please note this does not perform a garbage collection.
450 static Object* AllocateByteArray(int length);
451
452 // Allocate a pixel array of the specified length
453 // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
454 // failed.
455 // Please note this does not perform a garbage collection.
456 static Object* AllocatePixelArray(int length,
457 uint8_t* external_pointer,
458 PretenureFlag pretenure);
459
Steve Block3ce2e202009-11-05 08:53:23 +0000460 // Allocates an external array of the specified length and type.
461 // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
462 // failed.
463 // Please note this does not perform a garbage collection.
464 static Object* AllocateExternalArray(int length,
465 ExternalArrayType array_type,
466 void* external_pointer,
467 PretenureFlag pretenure);
468
Steve Blocka7e24c12009-10-30 11:49:00 +0000469 // Allocate a tenured JS global property cell.
470 // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
471 // failed.
472 // Please note this does not perform a garbage collection.
473 static Object* AllocateJSGlobalPropertyCell(Object* value);
474
475 // Allocates a fixed array initialized with undefined values
476 // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
477 // failed.
478 // Please note this does not perform a garbage collection.
479 static Object* AllocateFixedArray(int length, PretenureFlag pretenure);
Steve Block6ded16b2010-05-10 14:33:55 +0100480 // Allocates a fixed array initialized with undefined values
Steve Blocka7e24c12009-10-30 11:49:00 +0000481 static Object* AllocateFixedArray(int length);
482
Steve Block6ded16b2010-05-10 14:33:55 +0100483 // Allocates an uninitialized fixed array. It must be filled by the caller.
484 //
485 // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
486 // failed.
487 // Please note this does not perform a garbage collection.
488 static Object* AllocateUninitializedFixedArray(int length);
489
Steve Blocka7e24c12009-10-30 11:49:00 +0000490 // Make a copy of src and return it. Returns
491 // Failure::RetryAfterGC(requested_bytes, space) if the allocation failed.
492 static Object* CopyFixedArray(FixedArray* src);
493
494 // Allocates a fixed array initialized with the hole values.
495 // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
496 // failed.
497 // Please note this does not perform a garbage collection.
Steve Block6ded16b2010-05-10 14:33:55 +0100498 static Object* AllocateFixedArrayWithHoles(
499 int length,
500 PretenureFlag pretenure = NOT_TENURED);
Steve Blocka7e24c12009-10-30 11:49:00 +0000501
502 // AllocateHashTable is identical to AllocateFixedArray except
503 // that the resulting object has hash_table_map as map.
Steve Block6ded16b2010-05-10 14:33:55 +0100504 static Object* AllocateHashTable(int length,
505 PretenureFlag pretenure = NOT_TENURED);
Steve Blocka7e24c12009-10-30 11:49:00 +0000506
507 // Allocate a global (but otherwise uninitialized) context.
508 static Object* AllocateGlobalContext();
509
510 // Allocate a function context.
511 static Object* AllocateFunctionContext(int length, JSFunction* closure);
512
513 // Allocate a 'with' context.
514 static Object* AllocateWithContext(Context* previous,
515 JSObject* extension,
516 bool is_catch_context);
517
518 // Allocates a new utility object in the old generation.
519 static Object* AllocateStruct(InstanceType type);
520
521 // Allocates a function initialized with a shared part.
522 // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
523 // failed.
524 // Please note this does not perform a garbage collection.
525 static Object* AllocateFunction(Map* function_map,
526 SharedFunctionInfo* shared,
Leon Clarkee46be812010-01-19 14:06:41 +0000527 Object* prototype,
528 PretenureFlag pretenure = TENURED);
Steve Blocka7e24c12009-10-30 11:49:00 +0000529
530 // Indicies for direct access into argument objects.
Leon Clarkee46be812010-01-19 14:06:41 +0000531 static const int kArgumentsObjectSize =
532 JSObject::kHeaderSize + 2 * kPointerSize;
Steve Blocka7e24c12009-10-30 11:49:00 +0000533 static const int arguments_callee_index = 0;
534 static const int arguments_length_index = 1;
535
536 // Allocates an arguments object - optionally with an elements array.
537 // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
538 // failed.
539 // Please note this does not perform a garbage collection.
540 static Object* AllocateArgumentsObject(Object* callee, int length);
541
Steve Blocka7e24c12009-10-30 11:49:00 +0000542 // Same as NewNumberFromDouble, but may return a preallocated/immutable
543 // number object (e.g., minus_zero_value_, nan_value_)
544 static Object* NumberFromDouble(double value,
545 PretenureFlag pretenure = NOT_TENURED);
546
547 // Allocated a HeapNumber from value.
548 static Object* AllocateHeapNumber(double value, PretenureFlag pretenure);
549 static Object* AllocateHeapNumber(double value); // pretenure = NOT_TENURED
550
551 // Converts an int into either a Smi or a HeapNumber object.
552 // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
553 // failed.
554 // Please note this does not perform a garbage collection.
555 static inline Object* NumberFromInt32(int32_t value);
556
557 // Converts an int into either a Smi or a HeapNumber object.
558 // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
559 // failed.
560 // Please note this does not perform a garbage collection.
561 static inline Object* NumberFromUint32(uint32_t value);
562
563 // Allocates a new proxy object.
564 // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
565 // failed.
566 // Please note this does not perform a garbage collection.
567 static Object* AllocateProxy(Address proxy,
568 PretenureFlag pretenure = NOT_TENURED);
569
570 // Allocates a new SharedFunctionInfo object.
571 // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
572 // failed.
573 // Please note this does not perform a garbage collection.
574 static Object* AllocateSharedFunctionInfo(Object* name);
575
576 // Allocates a new cons string object.
577 // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
578 // failed.
579 // Please note this does not perform a garbage collection.
580 static Object* AllocateConsString(String* first, String* second);
581
Steve Blocka7e24c12009-10-30 11:49:00 +0000582 // Allocates a new sub string object which is a substring of an underlying
583 // string buffer stretching from the index start (inclusive) to the index
584 // end (exclusive).
585 // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
586 // failed.
587 // Please note this does not perform a garbage collection.
588 static Object* AllocateSubString(String* buffer,
589 int start,
Steve Block6ded16b2010-05-10 14:33:55 +0100590 int end,
591 PretenureFlag pretenure = NOT_TENURED);
Steve Blocka7e24c12009-10-30 11:49:00 +0000592
593 // Allocate a new external string object, which is backed by a string
594 // resource that resides outside the V8 heap.
595 // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
596 // failed.
597 // Please note this does not perform a garbage collection.
598 static Object* AllocateExternalStringFromAscii(
599 ExternalAsciiString::Resource* resource);
600 static Object* AllocateExternalStringFromTwoByte(
601 ExternalTwoByteString::Resource* resource);
602
Leon Clarkee46be812010-01-19 14:06:41 +0000603 // Finalizes an external string by deleting the associated external
604 // data and clearing the resource pointer.
605 static inline void FinalizeExternalString(String* string);
606
Steve Blocka7e24c12009-10-30 11:49:00 +0000607 // Allocates an uninitialized object. The memory is non-executable if the
608 // hardware and OS allow.
609 // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
610 // failed.
611 // Please note this function does not perform a garbage collection.
612 static inline Object* AllocateRaw(int size_in_bytes,
613 AllocationSpace space,
614 AllocationSpace retry_space);
615
616 // Initialize a filler object to keep the ability to iterate over the heap
617 // when shortening objects.
618 static void CreateFillerObjectAt(Address addr, int size);
619
620 // Makes a new native code object
621 // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
622 // failed. On success, the pointer to the Code object is stored in the
623 // self_reference. This allows generated code to reference its own Code
624 // object by containing this pointer.
625 // Please note this function does not perform a garbage collection.
626 static Object* CreateCode(const CodeDesc& desc,
Steve Blocka7e24c12009-10-30 11:49:00 +0000627 Code::Flags flags,
628 Handle<Object> self_reference);
629
630 static Object* CopyCode(Code* code);
Steve Block6ded16b2010-05-10 14:33:55 +0100631
632 // Copy the code and scope info part of the code object, but insert
633 // the provided data as the relocation information.
634 static Object* CopyCode(Code* code, Vector<byte> reloc_info);
635
Steve Blocka7e24c12009-10-30 11:49:00 +0000636 // Finds the symbol for string in the symbol table.
637 // If not found, a new symbol is added to the table and returned.
638 // Returns Failure::RetryAfterGC(requested_bytes, space) if allocation
639 // failed.
640 // Please note this function does not perform a garbage collection.
641 static Object* LookupSymbol(Vector<const char> str);
642 static Object* LookupAsciiSymbol(const char* str) {
643 return LookupSymbol(CStrVector(str));
644 }
645 static Object* LookupSymbol(String* str);
646 static bool LookupSymbolIfExists(String* str, String** symbol);
Steve Blockd0582a62009-12-15 09:54:21 +0000647 static bool LookupTwoCharsSymbolIfExists(String* str, String** symbol);
Steve Blocka7e24c12009-10-30 11:49:00 +0000648
649 // Compute the matching symbol map for a string if possible.
650 // NULL is returned if string is in new space or not flattened.
651 static Map* SymbolMapForString(String* str);
652
Steve Block6ded16b2010-05-10 14:33:55 +0100653 // Tries to flatten a string before compare operation.
654 //
655 // Returns a failure in case it was decided that flattening was
656 // necessary and failed. Note, if flattening is not necessary the
657 // string might stay non-flat even when not a failure is returned.
658 //
659 // Please note this function does not perform a garbage collection.
660 static inline Object* PrepareForCompare(String* str);
661
Steve Blocka7e24c12009-10-30 11:49:00 +0000662 // Converts the given boolean condition to JavaScript boolean value.
663 static Object* ToBoolean(bool condition) {
664 return condition ? true_value() : false_value();
665 }
666
667 // Code that should be run before and after each GC. Includes some
668 // reporting/verification activities when compiled with DEBUG set.
669 static void GarbageCollectionPrologue();
670 static void GarbageCollectionEpilogue();
671
Steve Blocka7e24c12009-10-30 11:49:00 +0000672 // Performs garbage collection operation.
673 // Returns whether required_space bytes are available after the collection.
674 static bool CollectGarbage(int required_space, AllocationSpace space);
675
676 // Performs a full garbage collection. Force compaction if the
677 // parameter is true.
678 static void CollectAllGarbage(bool force_compaction);
679
Steve Blocka7e24c12009-10-30 11:49:00 +0000680 // Notify the heap that a context has been disposed.
Steve Block6ded16b2010-05-10 14:33:55 +0100681 static int NotifyContextDisposed() { return ++contexts_disposed_; }
Steve Blocka7e24c12009-10-30 11:49:00 +0000682
683 // Utility to invoke the scavenger. This is needed in test code to
684 // ensure correct callback for weak global handles.
685 static void PerformScavenge();
686
687#ifdef DEBUG
688 // Utility used with flag gc-greedy.
689 static bool GarbageCollectionGreedyCheck();
690#endif
691
Steve Block6ded16b2010-05-10 14:33:55 +0100692 static void AddGCPrologueCallback(
693 GCEpilogueCallback callback, GCType gc_type_filter);
694 static void RemoveGCPrologueCallback(GCEpilogueCallback callback);
695
696 static void AddGCEpilogueCallback(
697 GCEpilogueCallback callback, GCType gc_type_filter);
698 static void RemoveGCEpilogueCallback(GCEpilogueCallback callback);
699
Steve Blocka7e24c12009-10-30 11:49:00 +0000700 static void SetGlobalGCPrologueCallback(GCCallback callback) {
Steve Block6ded16b2010-05-10 14:33:55 +0100701 ASSERT((callback == NULL) ^ (global_gc_prologue_callback_ == NULL));
Steve Blocka7e24c12009-10-30 11:49:00 +0000702 global_gc_prologue_callback_ = callback;
703 }
704 static void SetGlobalGCEpilogueCallback(GCCallback callback) {
Steve Block6ded16b2010-05-10 14:33:55 +0100705 ASSERT((callback == NULL) ^ (global_gc_epilogue_callback_ == NULL));
Steve Blocka7e24c12009-10-30 11:49:00 +0000706 global_gc_epilogue_callback_ = callback;
707 }
708
709 // Heap root getters. We have versions with and without type::cast() here.
710 // You can't use type::cast during GC because the assert fails.
711#define ROOT_ACCESSOR(type, name, camel_name) \
712 static inline type* name() { \
713 return type::cast(roots_[k##camel_name##RootIndex]); \
714 } \
715 static inline type* raw_unchecked_##name() { \
716 return reinterpret_cast<type*>(roots_[k##camel_name##RootIndex]); \
717 }
718 ROOT_LIST(ROOT_ACCESSOR)
719#undef ROOT_ACCESSOR
720
721// Utility type maps
722#define STRUCT_MAP_ACCESSOR(NAME, Name, name) \
723 static inline Map* name##_map() { \
724 return Map::cast(roots_[k##Name##MapRootIndex]); \
725 }
726 STRUCT_LIST(STRUCT_MAP_ACCESSOR)
727#undef STRUCT_MAP_ACCESSOR
728
729#define SYMBOL_ACCESSOR(name, str) static inline String* name() { \
730 return String::cast(roots_[k##name##RootIndex]); \
731 }
732 SYMBOL_LIST(SYMBOL_ACCESSOR)
733#undef SYMBOL_ACCESSOR
734
735 // The hidden_symbol is special because it is the empty string, but does
736 // not match the empty string.
737 static String* hidden_symbol() { return hidden_symbol_; }
738
739 // Iterates over all roots in the heap.
Steve Blockd0582a62009-12-15 09:54:21 +0000740 static void IterateRoots(ObjectVisitor* v, VisitMode mode);
Steve Blocka7e24c12009-10-30 11:49:00 +0000741 // Iterates over all strong roots in the heap.
Steve Blockd0582a62009-12-15 09:54:21 +0000742 static void IterateStrongRoots(ObjectVisitor* v, VisitMode mode);
Leon Clarked91b9f72010-01-27 17:25:45 +0000743 // Iterates over all the other roots in the heap.
744 static void IterateWeakRoots(ObjectVisitor* v, VisitMode mode);
Steve Blocka7e24c12009-10-30 11:49:00 +0000745
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100746 enum ExpectedPageWatermarkState {
747 WATERMARK_SHOULD_BE_VALID,
748 WATERMARK_CAN_BE_INVALID
749 };
Steve Blocka7e24c12009-10-30 11:49:00 +0000750
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100751 // For each dirty region on a page in use from an old space call
752 // visit_dirty_region callback.
753 // If either visit_dirty_region or callback can cause an allocation
754 // in old space and changes in allocation watermark then
755 // can_preallocate_during_iteration should be set to true.
756 // All pages will be marked as having invalid watermark upon
757 // iteration completion.
758 static void IterateDirtyRegions(
759 PagedSpace* space,
760 DirtyRegionCallback visit_dirty_region,
761 ObjectSlotCallback callback,
762 ExpectedPageWatermarkState expected_page_watermark_state);
763
764 // Interpret marks as a bitvector of dirty marks for regions of size
765 // Page::kRegionSize aligned by Page::kRegionAlignmentMask and covering
766 // memory interval from start to top. For each dirty region call a
767 // visit_dirty_region callback. Return updated bitvector of dirty marks.
768 static uint32_t IterateDirtyRegions(uint32_t marks,
769 Address start,
770 Address end,
771 DirtyRegionCallback visit_dirty_region,
772 ObjectSlotCallback callback);
773
Ben Murdoch3bec4d22010-07-22 14:51:16 +0100774 // Iterate pointers to from semispace of new space found in memory interval
775 // from start to end.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100776 // Update dirty marks for page containing start address.
Ben Murdoch3bec4d22010-07-22 14:51:16 +0100777 static void IterateAndMarkPointersToFromSpace(Address start,
778 Address end,
779 ObjectSlotCallback callback);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100780
781 // Iterate pointers to new space found in memory interval from start to end.
782 // Return true if pointers to new space was found.
783 static bool IteratePointersInDirtyRegion(Address start,
784 Address end,
785 ObjectSlotCallback callback);
786
787
788 // Iterate pointers to new space found in memory interval from start to end.
789 // This interval is considered to belong to the map space.
790 // Return true if pointers to new space was found.
791 static bool IteratePointersInDirtyMapsRegion(Address start,
792 Address end,
793 ObjectSlotCallback callback);
794
Steve Blocka7e24c12009-10-30 11:49:00 +0000795
796 // Returns whether the object resides in new space.
797 static inline bool InNewSpace(Object* object);
798 static inline bool InFromSpace(Object* object);
799 static inline bool InToSpace(Object* object);
800
801 // Checks whether an address/object in the heap (including auxiliary
802 // area and unused area).
803 static bool Contains(Address addr);
804 static bool Contains(HeapObject* value);
805
806 // Checks whether an address/object in a space.
Steve Blockd0582a62009-12-15 09:54:21 +0000807 // Currently used by tests, serialization and heap verification only.
Steve Blocka7e24c12009-10-30 11:49:00 +0000808 static bool InSpace(Address addr, AllocationSpace space);
809 static bool InSpace(HeapObject* value, AllocationSpace space);
810
811 // Finds out which space an object should get promoted to based on its type.
812 static inline OldSpace* TargetSpace(HeapObject* object);
813 static inline AllocationSpace TargetSpaceId(InstanceType type);
814
815 // Sets the stub_cache_ (only used when expanding the dictionary).
816 static void public_set_code_stubs(NumberDictionary* value) {
817 roots_[kCodeStubsRootIndex] = value;
818 }
819
820 // Sets the non_monomorphic_cache_ (only used when expanding the dictionary).
821 static void public_set_non_monomorphic_cache(NumberDictionary* value) {
822 roots_[kNonMonomorphicCacheRootIndex] = value;
823 }
824
Andrei Popescu31002712010-02-23 13:46:05 +0000825 static void public_set_empty_script(Script* script) {
826 roots_[kEmptyScriptRootIndex] = script;
827 }
828
Steve Blocka7e24c12009-10-30 11:49:00 +0000829 // Update the next script id.
830 static inline void SetLastScriptId(Object* last_script_id);
831
832 // Generated code can embed this address to get access to the roots.
833 static Object** roots_address() { return roots_; }
834
835#ifdef DEBUG
836 static void Print();
837 static void PrintHandles();
838
839 // Verify the heap is in its normal state before or after a GC.
840 static void Verify();
841
842 // Report heap statistics.
843 static void ReportHeapStatistics(const char* title);
844 static void ReportCodeStatistics(const char* title);
845
846 // Fill in bogus values in from space
847 static void ZapFromSpace();
848#endif
849
850#if defined(ENABLE_LOGGING_AND_PROFILING)
851 // Print short heap statistics.
852 static void PrintShortHeapStatistics();
853#endif
854
855 // Makes a new symbol object
856 // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
857 // failed.
858 // Please note this function does not perform a garbage collection.
859 static Object* CreateSymbol(const char* str, int length, int hash);
860 static Object* CreateSymbol(String* str);
861
862 // Write barrier support for address[offset] = o.
863 static inline void RecordWrite(Address address, int offset);
864
Steve Block6ded16b2010-05-10 14:33:55 +0100865 // Write barrier support for address[start : start + len[ = o.
866 static inline void RecordWrites(Address address, int start, int len);
867
Steve Blocka7e24c12009-10-30 11:49:00 +0000868 // Given an address occupied by a live code object, return that object.
869 static Object* FindCodeObject(Address a);
870
871 // Invoke Shrink on shrinkable spaces.
872 static void Shrink();
873
874 enum HeapState { NOT_IN_GC, SCAVENGE, MARK_COMPACT };
875 static inline HeapState gc_state() { return gc_state_; }
876
877#ifdef DEBUG
878 static bool IsAllocationAllowed() { return allocation_allowed_; }
879 static inline bool allow_allocation(bool enable);
880
881 static bool disallow_allocation_failure() {
882 return disallow_allocation_failure_;
883 }
884
Leon Clarkee46be812010-01-19 14:06:41 +0000885 static void TracePathToObject(Object* target);
Steve Blocka7e24c12009-10-30 11:49:00 +0000886 static void TracePathToGlobal();
887#endif
888
889 // Callback function passed to Heap::Iterate etc. Copies an object if
890 // necessary, the object might be promoted to an old space. The caller must
891 // ensure the precondition that the object is (a) a heap object and (b) in
892 // the heap's from space.
893 static void ScavengePointer(HeapObject** p);
894 static inline void ScavengeObject(HeapObject** p, HeapObject* object);
895
Steve Blocka7e24c12009-10-30 11:49:00 +0000896 // Commits from space if it is uncommitted.
897 static void EnsureFromSpaceIsCommitted();
898
Leon Clarkee46be812010-01-19 14:06:41 +0000899 // Support for partial snapshots. After calling this we can allocate a
900 // certain number of bytes using only linear allocation (with a
901 // LinearAllocationScope and an AlwaysAllocateScope) without using freelists
902 // or causing a GC. It returns true of space was reserved or false if a GC is
903 // needed. For paged spaces the space requested must include the space wasted
904 // at the end of each page when allocating linearly.
905 static void ReserveSpace(
906 int new_space_size,
907 int pointer_space_size,
908 int data_space_size,
909 int code_space_size,
910 int map_space_size,
911 int cell_space_size,
912 int large_object_size);
913
Steve Blocka7e24c12009-10-30 11:49:00 +0000914 //
915 // Support for the API.
916 //
917
918 static bool CreateApiObjects();
919
920 // Attempt to find the number in a small cache. If we finds it, return
921 // the string representation of the number. Otherwise return undefined.
922 static Object* GetNumberStringCache(Object* number);
923
924 // Update the cache with a new number-string pair.
925 static void SetNumberStringCache(Object* number, String* str);
926
Steve Blocka7e24c12009-10-30 11:49:00 +0000927 // Adjusts the amount of registered external memory.
928 // Returns the adjusted value.
929 static inline int AdjustAmountOfExternalAllocatedMemory(int change_in_bytes);
930
Steve Block6ded16b2010-05-10 14:33:55 +0100931 // Allocate uninitialized fixed array.
Steve Blocka7e24c12009-10-30 11:49:00 +0000932 static Object* AllocateRawFixedArray(int length);
Steve Block6ded16b2010-05-10 14:33:55 +0100933 static Object* AllocateRawFixedArray(int length,
934 PretenureFlag pretenure);
Steve Blocka7e24c12009-10-30 11:49:00 +0000935
936 // True if we have reached the allocation limit in the old generation that
937 // should force the next GC (caused normally) to be a full one.
938 static bool OldGenerationPromotionLimitReached() {
939 return (PromotedSpaceSize() + PromotedExternalMemorySize())
940 > old_gen_promotion_limit_;
941 }
942
Leon Clarkee46be812010-01-19 14:06:41 +0000943 static intptr_t OldGenerationSpaceAvailable() {
944 return old_gen_allocation_limit_ -
945 (PromotedSpaceSize() + PromotedExternalMemorySize());
946 }
947
Steve Blocka7e24c12009-10-30 11:49:00 +0000948 // True if we have reached the allocation limit in the old generation that
949 // should artificially cause a GC right now.
950 static bool OldGenerationAllocationLimitReached() {
Leon Clarkee46be812010-01-19 14:06:41 +0000951 return OldGenerationSpaceAvailable() < 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000952 }
953
954 // Can be called when the embedding application is idle.
955 static bool IdleNotification();
956
957 // Declare all the root indices.
958 enum RootListIndex {
959#define ROOT_INDEX_DECLARATION(type, name, camel_name) k##camel_name##RootIndex,
960 STRONG_ROOT_LIST(ROOT_INDEX_DECLARATION)
961#undef ROOT_INDEX_DECLARATION
962
963// Utility type maps
964#define DECLARE_STRUCT_MAP(NAME, Name, name) k##Name##MapRootIndex,
965 STRUCT_LIST(DECLARE_STRUCT_MAP)
966#undef DECLARE_STRUCT_MAP
967
968#define SYMBOL_INDEX_DECLARATION(name, str) k##name##RootIndex,
969 SYMBOL_LIST(SYMBOL_INDEX_DECLARATION)
970#undef SYMBOL_DECLARATION
971
972 kSymbolTableRootIndex,
973 kStrongRootListLength = kSymbolTableRootIndex,
974 kRootListLength
975 };
976
Steve Block6ded16b2010-05-10 14:33:55 +0100977 static Object* NumberToString(Object* number,
978 bool check_number_string_cache = true);
Steve Blocka7e24c12009-10-30 11:49:00 +0000979
Steve Block3ce2e202009-11-05 08:53:23 +0000980 static Map* MapForExternalArrayType(ExternalArrayType array_type);
981 static RootListIndex RootIndexForExternalArrayType(
982 ExternalArrayType array_type);
983
Steve Blockd0582a62009-12-15 09:54:21 +0000984 static void RecordStats(HeapStats* stats);
985
Ben Murdoch3bec4d22010-07-22 14:51:16 +0100986 static Scavenger GetScavenger(int instance_type, int instance_size);
987
Steve Block6ded16b2010-05-10 14:33:55 +0100988 // Copy block of memory from src to dst. Size of block should be aligned
989 // by pointer size.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100990 static inline void CopyBlock(Address dst, Address src, int byte_size);
991
992 static inline void CopyBlockToOldSpaceAndUpdateRegionMarks(Address dst,
993 Address src,
994 int byte_size);
Steve Block6ded16b2010-05-10 14:33:55 +0100995
996 // Optimized version of memmove for blocks with pointer size aligned sizes and
997 // pointer size aligned addresses.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100998 static inline void MoveBlock(Address dst, Address src, int byte_size);
999
1000 static inline void MoveBlockToOldSpaceAndUpdateRegionMarks(Address dst,
1001 Address src,
1002 int byte_size);
Steve Block6ded16b2010-05-10 14:33:55 +01001003
1004 // Check new space expansion criteria and expand semispaces if it was hit.
1005 static void CheckNewSpaceExpansionCriteria();
1006
1007 static inline void IncrementYoungSurvivorsCounter(int survived) {
Steve Block8defd9f2010-07-08 12:39:36 +01001008 young_survivors_after_last_gc_ = survived;
Steve Block6ded16b2010-05-10 14:33:55 +01001009 survived_since_last_expansion_ += survived;
1010 }
1011
1012 static void UpdateNewSpaceReferencesInExternalStringTable(
1013 ExternalStringTableUpdaterCallback updater_func);
1014
1015 // Helper function that governs the promotion policy from new space to
1016 // old. If the object's old address lies below the new space's age
1017 // mark or if we've already filled the bottom 1/16th of the to space,
1018 // we try to promote this object.
1019 static inline bool ShouldBePromoted(Address old_address, int object_size);
1020
1021 static int MaxObjectSizeInNewSpace() { return kMaxObjectSizeInNewSpace; }
1022
Kristian Monsen25f61362010-05-21 11:50:48 +01001023 static void ClearJSFunctionResultCaches();
1024
Leon Clarkef7060e22010-06-03 12:02:55 +01001025 static GCTracer* tracer() { return tracer_; }
1026
Steve Blocka7e24c12009-10-30 11:49:00 +00001027 private:
Steve Block3ce2e202009-11-05 08:53:23 +00001028 static int reserved_semispace_size_;
1029 static int max_semispace_size_;
Steve Blocka7e24c12009-10-30 11:49:00 +00001030 static int initial_semispace_size_;
Steve Block3ce2e202009-11-05 08:53:23 +00001031 static int max_old_generation_size_;
Steve Blocka7e24c12009-10-30 11:49:00 +00001032 static size_t code_range_size_;
1033
1034 // For keeping track of how much data has survived
1035 // scavenge since last new space expansion.
1036 static int survived_since_last_expansion_;
1037
1038 static int always_allocate_scope_depth_;
Steve Blockd0582a62009-12-15 09:54:21 +00001039 static int linear_allocation_scope_depth_;
Steve Block6ded16b2010-05-10 14:33:55 +01001040
1041 // For keeping track of context disposals.
1042 static int contexts_disposed_;
Steve Blocka7e24c12009-10-30 11:49:00 +00001043
Steve Blocka7e24c12009-10-30 11:49:00 +00001044#if defined(V8_TARGET_ARCH_X64)
1045 static const int kMaxObjectSizeInNewSpace = 512*KB;
1046#else
1047 static const int kMaxObjectSizeInNewSpace = 256*KB;
1048#endif
1049
1050 static NewSpace new_space_;
1051 static OldSpace* old_pointer_space_;
1052 static OldSpace* old_data_space_;
1053 static OldSpace* code_space_;
1054 static MapSpace* map_space_;
1055 static CellSpace* cell_space_;
1056 static LargeObjectSpace* lo_space_;
1057 static HeapState gc_state_;
1058
1059 // Returns the size of object residing in non new spaces.
1060 static int PromotedSpaceSize();
1061
1062 // Returns the amount of external memory registered since last global gc.
1063 static int PromotedExternalMemorySize();
1064
1065 static int mc_count_; // how many mark-compact collections happened
Leon Clarkef7060e22010-06-03 12:02:55 +01001066 static int ms_count_; // how many mark-sweep collections happened
Steve Blocka7e24c12009-10-30 11:49:00 +00001067 static int gc_count_; // how many gc happened
1068
Steve Block6ded16b2010-05-10 14:33:55 +01001069 // Total length of the strings we failed to flatten since the last GC.
1070 static int unflattened_strings_length_;
1071
Steve Blocka7e24c12009-10-30 11:49:00 +00001072#define ROOT_ACCESSOR(type, name, camel_name) \
1073 static inline void set_##name(type* value) { \
1074 roots_[k##camel_name##RootIndex] = value; \
1075 }
1076 ROOT_LIST(ROOT_ACCESSOR)
1077#undef ROOT_ACCESSOR
1078
1079#ifdef DEBUG
1080 static bool allocation_allowed_;
1081
1082 // If the --gc-interval flag is set to a positive value, this
1083 // variable holds the value indicating the number of allocations
1084 // remain until the next failure and garbage collection.
1085 static int allocation_timeout_;
1086
1087 // Do we expect to be able to handle allocation failure at this
1088 // time?
1089 static bool disallow_allocation_failure_;
1090#endif // DEBUG
1091
1092 // Limit that triggers a global GC on the next (normally caused) GC. This
1093 // is checked when we have already decided to do a GC to help determine
1094 // which collector to invoke.
1095 static int old_gen_promotion_limit_;
1096
1097 // Limit that triggers a global GC as soon as is reasonable. This is
1098 // checked before expanding a paged space in the old generation and on
1099 // every allocation in large object space.
1100 static int old_gen_allocation_limit_;
1101
1102 // Limit on the amount of externally allocated memory allowed
1103 // between global GCs. If reached a global GC is forced.
1104 static int external_allocation_limit_;
1105
1106 // The amount of external memory registered through the API kept alive
1107 // by global handles
1108 static int amount_of_external_allocated_memory_;
1109
1110 // Caches the amount of external memory registered at the last global gc.
1111 static int amount_of_external_allocated_memory_at_last_global_gc_;
1112
1113 // Indicates that an allocation has failed in the old generation since the
1114 // last GC.
1115 static int old_gen_exhausted_;
1116
1117 static Object* roots_[kRootListLength];
1118
1119 struct StringTypeTable {
1120 InstanceType type;
1121 int size;
1122 RootListIndex index;
1123 };
1124
1125 struct ConstantSymbolTable {
1126 const char* contents;
1127 RootListIndex index;
1128 };
1129
1130 struct StructTable {
1131 InstanceType type;
1132 int size;
1133 RootListIndex index;
1134 };
1135
1136 static const StringTypeTable string_type_table[];
1137 static const ConstantSymbolTable constant_symbol_table[];
1138 static const StructTable struct_table[];
1139
1140 // The special hidden symbol which is an empty string, but does not match
1141 // any string when looked up in properties.
1142 static String* hidden_symbol_;
1143
1144 // GC callback function, called before and after mark-compact GC.
1145 // Allocations in the callback function are disallowed.
Steve Block6ded16b2010-05-10 14:33:55 +01001146 struct GCPrologueCallbackPair {
1147 GCPrologueCallbackPair(GCPrologueCallback callback, GCType gc_type)
1148 : callback(callback), gc_type(gc_type) {
1149 }
1150 bool operator==(const GCPrologueCallbackPair& pair) const {
1151 return pair.callback == callback;
1152 }
1153 GCPrologueCallback callback;
1154 GCType gc_type;
1155 };
1156 static List<GCPrologueCallbackPair> gc_prologue_callbacks_;
1157
1158 struct GCEpilogueCallbackPair {
1159 GCEpilogueCallbackPair(GCEpilogueCallback callback, GCType gc_type)
1160 : callback(callback), gc_type(gc_type) {
1161 }
1162 bool operator==(const GCEpilogueCallbackPair& pair) const {
1163 return pair.callback == callback;
1164 }
1165 GCEpilogueCallback callback;
1166 GCType gc_type;
1167 };
1168 static List<GCEpilogueCallbackPair> gc_epilogue_callbacks_;
1169
Steve Blocka7e24c12009-10-30 11:49:00 +00001170 static GCCallback global_gc_prologue_callback_;
1171 static GCCallback global_gc_epilogue_callback_;
1172
1173 // Checks whether a global GC is necessary
1174 static GarbageCollector SelectGarbageCollector(AllocationSpace space);
1175
1176 // Performs garbage collection
1177 static void PerformGarbageCollection(AllocationSpace space,
1178 GarbageCollector collector,
1179 GCTracer* tracer);
1180
Steve Blocka7e24c12009-10-30 11:49:00 +00001181 // Allocate an uninitialized object in map space. The behavior is identical
1182 // to Heap::AllocateRaw(size_in_bytes, MAP_SPACE), except that (a) it doesn't
1183 // have to test the allocation space argument and (b) can reduce code size
1184 // (since both AllocateRaw and AllocateRawMap are inlined).
1185 static inline Object* AllocateRawMap();
1186
1187 // Allocate an uninitialized object in the global property cell space.
1188 static inline Object* AllocateRawCell();
1189
1190 // Initializes a JSObject based on its map.
1191 static void InitializeJSObjectFromMap(JSObject* obj,
1192 FixedArray* properties,
1193 Map* map);
1194
1195 static bool CreateInitialMaps();
1196 static bool CreateInitialObjects();
1197
1198 // These four Create*EntryStub functions are here because of a gcc-4.4 bug
1199 // that assigns wrong vtable entries.
1200 static void CreateCEntryStub();
Steve Blocka7e24c12009-10-30 11:49:00 +00001201 static void CreateJSEntryStub();
1202 static void CreateJSConstructEntryStub();
1203 static void CreateRegExpCEntryStub();
1204
1205 static void CreateFixedStubs();
1206
Steve Block6ded16b2010-05-10 14:33:55 +01001207 static Object* CreateOddball(const char* to_string, Object* to_number);
Steve Blocka7e24c12009-10-30 11:49:00 +00001208
1209 // Allocate empty fixed array.
1210 static Object* AllocateEmptyFixedArray();
1211
1212 // Performs a minor collection in new generation.
1213 static void Scavenge();
Steve Block6ded16b2010-05-10 14:33:55 +01001214
1215 static String* UpdateNewSpaceReferenceInExternalStringTableEntry(
1216 Object** pointer);
1217
Leon Clarkee46be812010-01-19 14:06:41 +00001218 static Address DoScavenge(ObjectVisitor* scavenge_visitor,
1219 Address new_space_front);
Steve Blocka7e24c12009-10-30 11:49:00 +00001220
1221 // Performs a major collection in the whole heap.
1222 static void MarkCompact(GCTracer* tracer);
1223
1224 // Code to be run before and after mark-compact.
1225 static void MarkCompactPrologue(bool is_compacting);
1226 static void MarkCompactEpilogue(bool is_compacting);
1227
Kristian Monsen25f61362010-05-21 11:50:48 +01001228 // Completely clear the Instanceof cache (to stop it keeping objects alive
1229 // around a GC).
1230 static void CompletelyClearInstanceofCache() {
1231 set_instanceof_cache_map(the_hole_value());
1232 set_instanceof_cache_function(the_hole_value());
1233 }
1234
Steve Blocka7e24c12009-10-30 11:49:00 +00001235#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
Steve Blocka7e24c12009-10-30 11:49:00 +00001236 // Record statistics before and after garbage collection.
1237 static void ReportStatisticsBeforeGC();
1238 static void ReportStatisticsAfterGC();
1239#endif
1240
Steve Blocka7e24c12009-10-30 11:49:00 +00001241 // Slow part of scavenge object.
1242 static void ScavengeObjectSlow(HeapObject** p, HeapObject* object);
1243
Steve Blocka7e24c12009-10-30 11:49:00 +00001244 // Initializes a function with a shared part and prototype.
1245 // Returns the function.
1246 // Note: this code was factored out of AllocateFunction such that
1247 // other parts of the VM could use it. Specifically, a function that creates
1248 // instances of type JS_FUNCTION_TYPE benefit from the use of this function.
1249 // Please note this does not perform a garbage collection.
1250 static inline Object* InitializeFunction(JSFunction* function,
1251 SharedFunctionInfo* shared,
1252 Object* prototype);
1253
Leon Clarkef7060e22010-06-03 12:02:55 +01001254 static GCTracer* tracer_;
1255
Leon Clarkee46be812010-01-19 14:06:41 +00001256
1257 // Initializes the number to string cache based on the max semispace size.
1258 static Object* InitializeNumberStringCache();
1259 // Flush the number to string cache.
1260 static void FlushNumberStringCache();
1261
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001262 // Flush code from functions we do not expect to use again. The code will
1263 // be replaced with a lazy compilable version.
1264 static void FlushCode();
1265
Steve Block8defd9f2010-07-08 12:39:36 +01001266 static void UpdateSurvivalRateTrend(int start_new_space_size);
1267
1268 enum SurvivalRateTrend { INCREASING, STABLE, DECREASING, FLUCTUATING };
1269
1270 static const int kYoungSurvivalRateThreshold = 90;
1271 static const int kYoungSurvivalRateAllowedDeviation = 15;
1272
1273 static int young_survivors_after_last_gc_;
1274 static int high_survival_rate_period_length_;
1275 static double survival_rate_;
1276 static SurvivalRateTrend previous_survival_rate_trend_;
1277 static SurvivalRateTrend survival_rate_trend_;
1278
1279 static void set_survival_rate_trend(SurvivalRateTrend survival_rate_trend) {
1280 ASSERT(survival_rate_trend != FLUCTUATING);
1281 previous_survival_rate_trend_ = survival_rate_trend_;
1282 survival_rate_trend_ = survival_rate_trend;
1283 }
1284
1285 static SurvivalRateTrend survival_rate_trend() {
1286 if (survival_rate_trend_ == STABLE) {
1287 return STABLE;
1288 } else if (previous_survival_rate_trend_ == STABLE) {
1289 return survival_rate_trend_;
1290 } else if (survival_rate_trend_ != previous_survival_rate_trend_) {
1291 return FLUCTUATING;
1292 } else {
1293 return survival_rate_trend_;
1294 }
1295 }
1296
1297 static bool IsStableOrIncreasingSurvivalTrend() {
1298 switch (survival_rate_trend()) {
1299 case STABLE:
1300 case INCREASING:
1301 return true;
1302 default:
1303 return false;
1304 }
1305 }
1306
1307 static bool IsIncreasingSurvivalTrend() {
1308 return survival_rate_trend() == INCREASING;
1309 }
1310
1311 static bool IsHighSurvivalRate() {
1312 return high_survival_rate_period_length_ > 0;
1313 }
1314
Steve Blocka7e24c12009-10-30 11:49:00 +00001315 static const int kInitialSymbolTableSize = 2048;
1316 static const int kInitialEvalCacheSize = 64;
1317
1318 friend class Factory;
1319 friend class DisallowAllocationFailure;
1320 friend class AlwaysAllocateScope;
Steve Blockd0582a62009-12-15 09:54:21 +00001321 friend class LinearAllocationScope;
1322};
1323
1324
1325class HeapStats {
1326 public:
Steve Block6ded16b2010-05-10 14:33:55 +01001327 int* start_marker;
1328 int* new_space_size;
1329 int* new_space_capacity;
1330 int* old_pointer_space_size;
1331 int* old_pointer_space_capacity;
1332 int* old_data_space_size;
1333 int* old_data_space_capacity;
1334 int* code_space_size;
1335 int* code_space_capacity;
1336 int* map_space_size;
1337 int* map_space_capacity;
1338 int* cell_space_size;
1339 int* cell_space_capacity;
1340 int* lo_space_size;
1341 int* global_handle_count;
1342 int* weak_global_handle_count;
1343 int* pending_global_handle_count;
1344 int* near_death_global_handle_count;
1345 int* destroyed_global_handle_count;
1346 int* end_marker;
Steve Blocka7e24c12009-10-30 11:49:00 +00001347};
1348
1349
1350class AlwaysAllocateScope {
1351 public:
1352 AlwaysAllocateScope() {
1353 // We shouldn't hit any nested scopes, because that requires
1354 // non-handle code to call handle code. The code still works but
1355 // performance will degrade, so we want to catch this situation
1356 // in debug mode.
1357 ASSERT(Heap::always_allocate_scope_depth_ == 0);
1358 Heap::always_allocate_scope_depth_++;
1359 }
1360
1361 ~AlwaysAllocateScope() {
1362 Heap::always_allocate_scope_depth_--;
1363 ASSERT(Heap::always_allocate_scope_depth_ == 0);
1364 }
1365};
1366
1367
Steve Blockd0582a62009-12-15 09:54:21 +00001368class LinearAllocationScope {
1369 public:
1370 LinearAllocationScope() {
1371 Heap::linear_allocation_scope_depth_++;
1372 }
1373
1374 ~LinearAllocationScope() {
1375 Heap::linear_allocation_scope_depth_--;
1376 ASSERT(Heap::linear_allocation_scope_depth_ >= 0);
1377 }
1378};
1379
1380
Steve Blocka7e24c12009-10-30 11:49:00 +00001381#ifdef DEBUG
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001382// Visitor class to verify interior pointers in spaces that do not contain
1383// or care about intergenerational references. All heap object pointers have to
1384// point into the heap to a location that has a map pointer at its first word.
1385// Caveat: Heap::Contains is an approximation because it can return true for
1386// objects in a heap space but above the allocation pointer.
Steve Blocka7e24c12009-10-30 11:49:00 +00001387class VerifyPointersVisitor: public ObjectVisitor {
1388 public:
1389 void VisitPointers(Object** start, Object** end) {
1390 for (Object** current = start; current < end; current++) {
1391 if ((*current)->IsHeapObject()) {
1392 HeapObject* object = HeapObject::cast(*current);
1393 ASSERT(Heap::Contains(object));
1394 ASSERT(object->map()->IsMap());
1395 }
1396 }
1397 }
1398};
1399
1400
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001401// Visitor class to verify interior pointers in spaces that use region marks
1402// to keep track of intergenerational references.
1403// As VerifyPointersVisitor but also checks that dirty marks are set
1404// for regions covering intergenerational references.
1405class VerifyPointersAndDirtyRegionsVisitor: public ObjectVisitor {
Steve Blocka7e24c12009-10-30 11:49:00 +00001406 public:
1407 void VisitPointers(Object** start, Object** end) {
1408 for (Object** current = start; current < end; current++) {
1409 if ((*current)->IsHeapObject()) {
1410 HeapObject* object = HeapObject::cast(*current);
1411 ASSERT(Heap::Contains(object));
1412 ASSERT(object->map()->IsMap());
1413 if (Heap::InNewSpace(object)) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001414 ASSERT(Heap::InToSpace(object));
1415 Address addr = reinterpret_cast<Address>(current);
1416 ASSERT(Page::FromAddress(addr)->IsRegionDirty(addr));
Steve Blocka7e24c12009-10-30 11:49:00 +00001417 }
1418 }
1419 }
1420 }
1421};
1422#endif
1423
1424
1425// Space iterator for iterating over all spaces of the heap.
1426// Returns each space in turn, and null when it is done.
1427class AllSpaces BASE_EMBEDDED {
1428 public:
1429 Space* next();
1430 AllSpaces() { counter_ = FIRST_SPACE; }
1431 private:
1432 int counter_;
1433};
1434
1435
1436// Space iterator for iterating over all old spaces of the heap: Old pointer
1437// space, old data space and code space.
1438// Returns each space in turn, and null when it is done.
1439class OldSpaces BASE_EMBEDDED {
1440 public:
1441 OldSpace* next();
1442 OldSpaces() { counter_ = OLD_POINTER_SPACE; }
1443 private:
1444 int counter_;
1445};
1446
1447
1448// Space iterator for iterating over all the paged spaces of the heap:
Leon Clarkee46be812010-01-19 14:06:41 +00001449// Map space, old pointer space, old data space, code space and cell space.
Steve Blocka7e24c12009-10-30 11:49:00 +00001450// Returns each space in turn, and null when it is done.
1451class PagedSpaces BASE_EMBEDDED {
1452 public:
1453 PagedSpace* next();
1454 PagedSpaces() { counter_ = OLD_POINTER_SPACE; }
1455 private:
1456 int counter_;
1457};
1458
1459
1460// Space iterator for iterating over all spaces of the heap.
1461// For each space an object iterator is provided. The deallocation of the
1462// returned object iterators is handled by the space iterator.
1463class SpaceIterator : public Malloced {
1464 public:
1465 SpaceIterator();
1466 virtual ~SpaceIterator();
1467
1468 bool has_next();
1469 ObjectIterator* next();
1470
1471 private:
1472 ObjectIterator* CreateIterator();
1473
1474 int current_space_; // from enum AllocationSpace.
1475 ObjectIterator* iterator_; // object iterator for the current space.
1476};
1477
1478
1479// A HeapIterator provides iteration over the whole heap It aggregates a the
1480// specific iterators for the different spaces as these can only iterate over
1481// one space only.
1482
1483class HeapIterator BASE_EMBEDDED {
1484 public:
1485 explicit HeapIterator();
1486 virtual ~HeapIterator();
1487
Steve Blocka7e24c12009-10-30 11:49:00 +00001488 HeapObject* next();
1489 void reset();
1490
1491 private:
1492 // Perform the initialization.
1493 void Init();
1494
1495 // Perform all necessary shutdown (destruction) work.
1496 void Shutdown();
1497
1498 // Space iterator for iterating all the spaces.
1499 SpaceIterator* space_iterator_;
1500 // Object iterator for the space currently being iterated.
1501 ObjectIterator* object_iterator_;
1502};
1503
1504
1505// Cache for mapping (map, property name) into field offset.
1506// Cleared at startup and prior to mark sweep collection.
1507class KeyedLookupCache {
1508 public:
1509 // Lookup field offset for (map, name). If absent, -1 is returned.
1510 static int Lookup(Map* map, String* name);
1511
1512 // Update an element in the cache.
1513 static void Update(Map* map, String* name, int field_offset);
1514
1515 // Clear the cache.
1516 static void Clear();
Leon Clarkee46be812010-01-19 14:06:41 +00001517
1518 static const int kLength = 64;
1519 static const int kCapacityMask = kLength - 1;
1520 static const int kMapHashShift = 2;
1521
Steve Blocka7e24c12009-10-30 11:49:00 +00001522 private:
1523 static inline int Hash(Map* map, String* name);
Leon Clarkee46be812010-01-19 14:06:41 +00001524
1525 // Get the address of the keys and field_offsets arrays. Used in
1526 // generated code to perform cache lookups.
1527 static Address keys_address() {
1528 return reinterpret_cast<Address>(&keys_);
1529 }
1530
1531 static Address field_offsets_address() {
1532 return reinterpret_cast<Address>(&field_offsets_);
1533 }
1534
Steve Blocka7e24c12009-10-30 11:49:00 +00001535 struct Key {
1536 Map* map;
1537 String* name;
1538 };
1539 static Key keys_[kLength];
1540 static int field_offsets_[kLength];
Steve Blocka7e24c12009-10-30 11:49:00 +00001541
Leon Clarkee46be812010-01-19 14:06:41 +00001542 friend class ExternalReference;
1543};
Steve Blocka7e24c12009-10-30 11:49:00 +00001544
1545
1546// Cache for mapping (array, property name) into descriptor index.
1547// The cache contains both positive and negative results.
1548// Descriptor index equals kNotFound means the property is absent.
1549// Cleared at startup and prior to any gc.
1550class DescriptorLookupCache {
1551 public:
1552 // Lookup descriptor index for (map, name).
1553 // If absent, kAbsent is returned.
1554 static int Lookup(DescriptorArray* array, String* name) {
1555 if (!StringShape(name).IsSymbol()) return kAbsent;
1556 int index = Hash(array, name);
1557 Key& key = keys_[index];
1558 if ((key.array == array) && (key.name == name)) return results_[index];
1559 return kAbsent;
1560 }
1561
1562 // Update an element in the cache.
1563 static void Update(DescriptorArray* array, String* name, int result) {
1564 ASSERT(result != kAbsent);
1565 if (StringShape(name).IsSymbol()) {
1566 int index = Hash(array, name);
1567 Key& key = keys_[index];
1568 key.array = array;
1569 key.name = name;
1570 results_[index] = result;
1571 }
1572 }
1573
1574 // Clear the cache.
1575 static void Clear();
1576
1577 static const int kAbsent = -2;
1578 private:
1579 static int Hash(DescriptorArray* array, String* name) {
1580 // Uses only lower 32 bits if pointers are larger.
Andrei Popescu402d9372010-02-26 13:31:12 +00001581 uint32_t array_hash =
Steve Blocka7e24c12009-10-30 11:49:00 +00001582 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(array)) >> 2;
Andrei Popescu402d9372010-02-26 13:31:12 +00001583 uint32_t name_hash =
Steve Blocka7e24c12009-10-30 11:49:00 +00001584 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name)) >> 2;
1585 return (array_hash ^ name_hash) % kLength;
1586 }
1587
1588 static const int kLength = 64;
1589 struct Key {
1590 DescriptorArray* array;
1591 String* name;
1592 };
1593
1594 static Key keys_[kLength];
1595 static int results_[kLength];
1596};
1597
1598
1599// ----------------------------------------------------------------------------
1600// Marking stack for tracing live objects.
1601
1602class MarkingStack {
1603 public:
1604 void Initialize(Address low, Address high) {
1605 top_ = low_ = reinterpret_cast<HeapObject**>(low);
1606 high_ = reinterpret_cast<HeapObject**>(high);
1607 overflowed_ = false;
1608 }
1609
1610 bool is_full() { return top_ >= high_; }
1611
1612 bool is_empty() { return top_ <= low_; }
1613
1614 bool overflowed() { return overflowed_; }
1615
1616 void clear_overflowed() { overflowed_ = false; }
1617
1618 // Push the (marked) object on the marking stack if there is room,
1619 // otherwise mark the object as overflowed and wait for a rescan of the
1620 // heap.
1621 void Push(HeapObject* object) {
1622 CHECK(object->IsHeapObject());
1623 if (is_full()) {
1624 object->SetOverflow();
1625 overflowed_ = true;
1626 } else {
1627 *(top_++) = object;
1628 }
1629 }
1630
1631 HeapObject* Pop() {
1632 ASSERT(!is_empty());
1633 HeapObject* object = *(--top_);
1634 CHECK(object->IsHeapObject());
1635 return object;
1636 }
1637
1638 private:
1639 HeapObject** low_;
1640 HeapObject** top_;
1641 HeapObject** high_;
1642 bool overflowed_;
1643};
1644
1645
1646// A helper class to document/test C++ scopes where we do not
1647// expect a GC. Usage:
1648//
1649// /* Allocation not allowed: we cannot handle a GC in this scope. */
1650// { AssertNoAllocation nogc;
1651// ...
1652// }
1653
1654#ifdef DEBUG
1655
1656class DisallowAllocationFailure {
1657 public:
1658 DisallowAllocationFailure() {
1659 old_state_ = Heap::disallow_allocation_failure_;
1660 Heap::disallow_allocation_failure_ = true;
1661 }
1662 ~DisallowAllocationFailure() {
1663 Heap::disallow_allocation_failure_ = old_state_;
1664 }
1665 private:
1666 bool old_state_;
1667};
1668
1669class AssertNoAllocation {
1670 public:
1671 AssertNoAllocation() {
1672 old_state_ = Heap::allow_allocation(false);
1673 }
1674
1675 ~AssertNoAllocation() {
1676 Heap::allow_allocation(old_state_);
1677 }
1678
1679 private:
1680 bool old_state_;
1681};
1682
1683class DisableAssertNoAllocation {
1684 public:
1685 DisableAssertNoAllocation() {
1686 old_state_ = Heap::allow_allocation(true);
1687 }
1688
1689 ~DisableAssertNoAllocation() {
1690 Heap::allow_allocation(old_state_);
1691 }
1692
1693 private:
1694 bool old_state_;
1695};
1696
1697#else // ndef DEBUG
1698
1699class AssertNoAllocation {
1700 public:
1701 AssertNoAllocation() { }
1702 ~AssertNoAllocation() { }
1703};
1704
1705class DisableAssertNoAllocation {
1706 public:
1707 DisableAssertNoAllocation() { }
1708 ~DisableAssertNoAllocation() { }
1709};
1710
1711#endif
1712
1713// GCTracer collects and prints ONE line after each garbage collector
1714// invocation IFF --trace_gc is used.
1715
1716class GCTracer BASE_EMBEDDED {
1717 public:
Leon Clarkef7060e22010-06-03 12:02:55 +01001718 class Scope BASE_EMBEDDED {
Steve Block6ded16b2010-05-10 14:33:55 +01001719 public:
Leon Clarkef7060e22010-06-03 12:02:55 +01001720 enum ScopeId {
1721 EXTERNAL,
1722 MC_MARK,
1723 MC_SWEEP,
1724 MC_COMPACT,
1725 kNumberOfScopes
1726 };
1727
1728 Scope(GCTracer* tracer, ScopeId scope)
1729 : tracer_(tracer),
1730 scope_(scope) {
Steve Block6ded16b2010-05-10 14:33:55 +01001731 start_time_ = OS::TimeCurrentMillis();
1732 }
Leon Clarkef7060e22010-06-03 12:02:55 +01001733
1734 ~Scope() {
1735 ASSERT((0 <= scope_) && (scope_ < kNumberOfScopes));
1736 tracer_->scopes_[scope_] += OS::TimeCurrentMillis() - start_time_;
Steve Block6ded16b2010-05-10 14:33:55 +01001737 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001738
Steve Block6ded16b2010-05-10 14:33:55 +01001739 private:
1740 GCTracer* tracer_;
Leon Clarkef7060e22010-06-03 12:02:55 +01001741 ScopeId scope_;
Steve Block6ded16b2010-05-10 14:33:55 +01001742 double start_time_;
1743 };
1744
1745 GCTracer();
Steve Blocka7e24c12009-10-30 11:49:00 +00001746 ~GCTracer();
1747
1748 // Sets the collector.
1749 void set_collector(GarbageCollector collector) { collector_ = collector; }
1750
1751 // Sets the GC count.
1752 void set_gc_count(int count) { gc_count_ = count; }
1753
1754 // Sets the full GC count.
1755 void set_full_gc_count(int count) { full_gc_count_ = count; }
1756
1757 // Sets the flag that this is a compacting full GC.
1758 void set_is_compacting() { is_compacting_ = true; }
Steve Block6ded16b2010-05-10 14:33:55 +01001759 bool is_compacting() const { return is_compacting_; }
Steve Blocka7e24c12009-10-30 11:49:00 +00001760
1761 // Increment and decrement the count of marked objects.
1762 void increment_marked_count() { ++marked_count_; }
1763 void decrement_marked_count() { --marked_count_; }
1764
1765 int marked_count() { return marked_count_; }
1766
Leon Clarkef7060e22010-06-03 12:02:55 +01001767 void increment_promoted_objects_size(int object_size) {
1768 promoted_objects_size_ += object_size;
1769 }
1770
1771 // Returns maximum GC pause.
1772 static int get_max_gc_pause() { return max_gc_pause_; }
1773
1774 // Returns maximum size of objects alive after GC.
1775 static int get_max_alive_after_gc() { return max_alive_after_gc_; }
1776
1777 // Returns minimal interval between two subsequent collections.
1778 static int get_min_in_mutator() { return min_in_mutator_; }
1779
Steve Blocka7e24c12009-10-30 11:49:00 +00001780 private:
1781 // Returns a string matching the collector.
1782 const char* CollectorString();
1783
1784 // Returns size of object in heap (in MB).
1785 double SizeOfHeapObjects() {
1786 return (static_cast<double>(Heap::SizeOfObjects())) / MB;
1787 }
1788
1789 double start_time_; // Timestamp set in the constructor.
Leon Clarkef7060e22010-06-03 12:02:55 +01001790 int start_size_; // Size of objects in heap set in constructor.
Steve Blocka7e24c12009-10-30 11:49:00 +00001791 GarbageCollector collector_; // Type of collector.
1792
1793 // A count (including this one, eg, the first collection is 1) of the
1794 // number of garbage collections.
1795 int gc_count_;
1796
1797 // A count (including this one) of the number of full garbage collections.
1798 int full_gc_count_;
1799
1800 // True if the current GC is a compacting full collection, false
1801 // otherwise.
1802 bool is_compacting_;
1803
1804 // True if the *previous* full GC cwas a compacting collection (will be
1805 // false if there has not been a previous full GC).
1806 bool previous_has_compacted_;
1807
1808 // On a full GC, a count of the number of marked objects. Incremented
1809 // when an object is marked and decremented when an object's mark bit is
1810 // cleared. Will be zero on a scavenge collection.
1811 int marked_count_;
1812
1813 // The count from the end of the previous full GC. Will be zero if there
1814 // was no previous full GC.
1815 int previous_marked_count_;
Leon Clarkef7060e22010-06-03 12:02:55 +01001816
1817 // Amounts of time spent in different scopes during GC.
1818 double scopes_[Scope::kNumberOfScopes];
1819
1820 // Total amount of space either wasted or contained in one of free lists
1821 // before the current GC.
1822 int in_free_list_or_wasted_before_gc_;
1823
1824 // Difference between space used in the heap at the beginning of the current
1825 // collection and the end of the previous collection.
1826 int allocated_since_last_gc_;
1827
1828 // Amount of time spent in mutator that is time elapsed between end of the
1829 // previous collection and the beginning of the current one.
1830 double spent_in_mutator_;
1831
1832 // Size of objects promoted during the current collection.
1833 int promoted_objects_size_;
1834
1835 // Maximum GC pause.
1836 static int max_gc_pause_;
1837
1838 // Maximum size of objects alive after GC.
1839 static int max_alive_after_gc_;
1840
1841 // Minimal interval between two subsequent collections.
1842 static int min_in_mutator_;
1843
1844 // Size of objects alive after last GC.
1845 static int alive_after_last_gc_;
1846
1847 static double last_gc_end_timestamp_;
Steve Blocka7e24c12009-10-30 11:49:00 +00001848};
1849
1850
1851class TranscendentalCache {
1852 public:
1853 enum Type {ACOS, ASIN, ATAN, COS, EXP, LOG, SIN, TAN, kNumberOfCaches};
1854
1855 explicit TranscendentalCache(Type t);
1856
1857 // Returns a heap number with f(input), where f is a math function specified
1858 // by the 'type' argument.
1859 static inline Object* Get(Type type, double input) {
1860 TranscendentalCache* cache = caches_[type];
1861 if (cache == NULL) {
1862 caches_[type] = cache = new TranscendentalCache(type);
1863 }
1864 return cache->Get(input);
1865 }
1866
1867 // The cache contains raw Object pointers. This method disposes of
1868 // them before a garbage collection.
1869 static void Clear();
1870
1871 private:
1872 inline Object* Get(double input) {
1873 Converter c;
1874 c.dbl = input;
1875 int hash = Hash(c);
1876 Element e = elements_[hash];
1877 if (e.in[0] == c.integers[0] &&
1878 e.in[1] == c.integers[1]) {
1879 ASSERT(e.output != NULL);
Andrei Popescu402d9372010-02-26 13:31:12 +00001880 Counters::transcendental_cache_hit.Increment();
Steve Blocka7e24c12009-10-30 11:49:00 +00001881 return e.output;
1882 }
1883 double answer = Calculate(input);
1884 Object* heap_number = Heap::AllocateHeapNumber(answer);
1885 if (!heap_number->IsFailure()) {
1886 elements_[hash].in[0] = c.integers[0];
1887 elements_[hash].in[1] = c.integers[1];
1888 elements_[hash].output = heap_number;
1889 }
Andrei Popescu402d9372010-02-26 13:31:12 +00001890 Counters::transcendental_cache_miss.Increment();
Steve Blocka7e24c12009-10-30 11:49:00 +00001891 return heap_number;
1892 }
1893
1894 inline double Calculate(double input) {
1895 switch (type_) {
1896 case ACOS:
1897 return acos(input);
1898 case ASIN:
1899 return asin(input);
1900 case ATAN:
1901 return atan(input);
1902 case COS:
1903 return cos(input);
1904 case EXP:
1905 return exp(input);
1906 case LOG:
1907 return log(input);
1908 case SIN:
1909 return sin(input);
1910 case TAN:
1911 return tan(input);
1912 default:
1913 return 0.0; // Never happens.
1914 }
1915 }
1916 static const int kCacheSize = 512;
1917 struct Element {
1918 uint32_t in[2];
1919 Object* output;
1920 };
1921 union Converter {
1922 double dbl;
1923 uint32_t integers[2];
1924 };
1925 inline static int Hash(const Converter& c) {
1926 uint32_t hash = (c.integers[0] ^ c.integers[1]);
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01001927 hash ^= static_cast<int32_t>(hash) >> 16;
1928 hash ^= static_cast<int32_t>(hash) >> 8;
Steve Blocka7e24c12009-10-30 11:49:00 +00001929 return (hash & (kCacheSize - 1));
1930 }
Andrei Popescu402d9372010-02-26 13:31:12 +00001931
1932 static Address cache_array_address() {
1933 // Used to create an external reference.
1934 return reinterpret_cast<Address>(caches_);
1935 }
1936
1937 // Allow access to the caches_ array as an ExternalReference.
1938 friend class ExternalReference;
1939 // Inline implementation of the caching.
1940 friend class TranscendentalCacheStub;
1941
Steve Blocka7e24c12009-10-30 11:49:00 +00001942 static TranscendentalCache* caches_[kNumberOfCaches];
1943 Element elements_[kCacheSize];
1944 Type type_;
1945};
1946
1947
Leon Clarkee46be812010-01-19 14:06:41 +00001948// External strings table is a place where all external strings are
1949// registered. We need to keep track of such strings to properly
1950// finalize them.
1951class ExternalStringTable : public AllStatic {
1952 public:
1953 // Registers an external string.
1954 inline static void AddString(String* string);
1955
1956 inline static void Iterate(ObjectVisitor* v);
1957
1958 // Restores internal invariant and gets rid of collected strings.
1959 // Must be called after each Iterate() that modified the strings.
1960 static void CleanUp();
1961
1962 // Destroys all allocated memory.
1963 static void TearDown();
1964
1965 private:
1966 friend class Heap;
1967
1968 inline static void Verify();
1969
1970 inline static void AddOldString(String* string);
1971
1972 // Notifies the table that only a prefix of the new list is valid.
1973 inline static void ShrinkNewStrings(int position);
1974
1975 // To speed up scavenge collections new space string are kept
1976 // separate from old space strings.
1977 static List<Object*> new_space_strings_;
1978 static List<Object*> old_space_strings_;
1979};
1980
Steve Blocka7e24c12009-10-30 11:49:00 +00001981} } // namespace v8::internal
1982
1983#endif // V8_HEAP_H_