blob: df3ba0ea2a24b9c3b1f85d532c6d31fcf80a2b85 [file] [log] [blame]
Steve Blocka7e24c12009-10-30 11:49:00 +00001// Copyright 2006-2008 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#ifndef V8_HEAP_H_
29#define V8_HEAP_H_
30
31#include <math.h>
32
Steve Block6ded16b2010-05-10 14:33:55 +010033#include "splay-tree-inl.h"
34#include "v8-counters.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000035
36namespace v8 {
37namespace internal {
38
Steve Block6ded16b2010-05-10 14:33:55 +010039// Forward declarations.
40class ZoneScopeInfo;
41
Steve Blocka7e24c12009-10-30 11:49:00 +000042// Defines all the roots in Heap.
43#define UNCONDITIONAL_STRONG_ROOT_LIST(V) \
Steve Blockd0582a62009-12-15 09:54:21 +000044 /* Put the byte array map early. We need it to be in place by the time */ \
45 /* the deserializer hits the next page, since it wants to put a byte */ \
46 /* array in the unused space at the end of the page. */ \
47 V(Map, byte_array_map, ByteArrayMap) \
48 V(Map, one_pointer_filler_map, OnePointerFillerMap) \
49 V(Map, two_pointer_filler_map, TwoPointerFillerMap) \
50 /* Cluster the most popular ones in a few cache lines here at the top. */ \
Steve Blocka7e24c12009-10-30 11:49:00 +000051 V(Smi, stack_limit, StackLimit) \
52 V(Object, undefined_value, UndefinedValue) \
53 V(Object, the_hole_value, TheHoleValue) \
54 V(Object, null_value, NullValue) \
55 V(Object, true_value, TrueValue) \
56 V(Object, false_value, FalseValue) \
57 V(Map, heap_number_map, HeapNumberMap) \
58 V(Map, global_context_map, GlobalContextMap) \
59 V(Map, fixed_array_map, FixedArrayMap) \
60 V(Object, no_interceptor_result_sentinel, NoInterceptorResultSentinel) \
61 V(Map, meta_map, MetaMap) \
62 V(Object, termination_exception, TerminationException) \
63 V(Map, hash_table_map, HashTableMap) \
64 V(FixedArray, empty_fixed_array, EmptyFixedArray) \
Steve Blockd0582a62009-12-15 09:54:21 +000065 V(Map, string_map, StringMap) \
66 V(Map, ascii_string_map, AsciiStringMap) \
67 V(Map, symbol_map, SymbolMap) \
68 V(Map, ascii_symbol_map, AsciiSymbolMap) \
69 V(Map, cons_symbol_map, ConsSymbolMap) \
70 V(Map, cons_ascii_symbol_map, ConsAsciiSymbolMap) \
71 V(Map, external_symbol_map, ExternalSymbolMap) \
Kristian Monsen9dcf7e22010-06-28 14:14:28 +010072 V(Map, external_symbol_with_ascii_data_map, ExternalSymbolWithAsciiDataMap) \
Steve Blockd0582a62009-12-15 09:54:21 +000073 V(Map, external_ascii_symbol_map, ExternalAsciiSymbolMap) \
74 V(Map, cons_string_map, ConsStringMap) \
75 V(Map, cons_ascii_string_map, ConsAsciiStringMap) \
76 V(Map, external_string_map, ExternalStringMap) \
Kristian Monsen9dcf7e22010-06-28 14:14:28 +010077 V(Map, external_string_with_ascii_data_map, ExternalStringWithAsciiDataMap) \
Steve Blockd0582a62009-12-15 09:54:21 +000078 V(Map, external_ascii_string_map, ExternalAsciiStringMap) \
79 V(Map, undetectable_string_map, UndetectableStringMap) \
80 V(Map, undetectable_ascii_string_map, UndetectableAsciiStringMap) \
Steve Blocka7e24c12009-10-30 11:49:00 +000081 V(Map, pixel_array_map, PixelArrayMap) \
Steve Block3ce2e202009-11-05 08:53:23 +000082 V(Map, external_byte_array_map, ExternalByteArrayMap) \
83 V(Map, external_unsigned_byte_array_map, ExternalUnsignedByteArrayMap) \
84 V(Map, external_short_array_map, ExternalShortArrayMap) \
85 V(Map, external_unsigned_short_array_map, ExternalUnsignedShortArrayMap) \
86 V(Map, external_int_array_map, ExternalIntArrayMap) \
87 V(Map, external_unsigned_int_array_map, ExternalUnsignedIntArrayMap) \
88 V(Map, external_float_array_map, ExternalFloatArrayMap) \
Steve Blocka7e24c12009-10-30 11:49:00 +000089 V(Map, context_map, ContextMap) \
90 V(Map, catch_context_map, CatchContextMap) \
91 V(Map, code_map, CodeMap) \
92 V(Map, oddball_map, OddballMap) \
93 V(Map, global_property_cell_map, GlobalPropertyCellMap) \
Steve Blocka7e24c12009-10-30 11:49:00 +000094 V(Map, shared_function_info_map, SharedFunctionInfoMap) \
95 V(Map, proxy_map, ProxyMap) \
Steve Blocka7e24c12009-10-30 11:49:00 +000096 V(Object, nan_value, NanValue) \
97 V(Object, minus_zero_value, MinusZeroValue) \
Kristian Monsen25f61362010-05-21 11:50:48 +010098 V(Object, instanceof_cache_function, InstanceofCacheFunction) \
99 V(Object, instanceof_cache_map, InstanceofCacheMap) \
100 V(Object, instanceof_cache_answer, InstanceofCacheAnswer) \
Steve Blocka7e24c12009-10-30 11:49:00 +0000101 V(String, empty_string, EmptyString) \
102 V(DescriptorArray, empty_descriptor_array, EmptyDescriptorArray) \
103 V(Map, neander_map, NeanderMap) \
104 V(JSObject, message_listeners, MessageListeners) \
105 V(Proxy, prototype_accessors, PrototypeAccessors) \
106 V(NumberDictionary, code_stubs, CodeStubs) \
107 V(NumberDictionary, non_monomorphic_cache, NonMonomorphicCache) \
108 V(Code, js_entry_code, JsEntryCode) \
109 V(Code, js_construct_entry_code, JsConstructEntryCode) \
110 V(Code, c_entry_code, CEntryCode) \
Steve Blocka7e24c12009-10-30 11:49:00 +0000111 V(FixedArray, number_string_cache, NumberStringCache) \
112 V(FixedArray, single_character_string_cache, SingleCharacterStringCache) \
113 V(FixedArray, natives_source_cache, NativesSourceCache) \
114 V(Object, last_script_id, LastScriptId) \
Andrei Popescu31002712010-02-23 13:46:05 +0000115 V(Script, empty_script, EmptyScript) \
Steve Blockd0582a62009-12-15 09:54:21 +0000116 V(Smi, real_stack_limit, RealStackLimit) \
Steve Blocka7e24c12009-10-30 11:49:00 +0000117
Steve Block6ded16b2010-05-10 14:33:55 +0100118#if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
Steve Blocka7e24c12009-10-30 11:49:00 +0000119#define STRONG_ROOT_LIST(V) \
120 UNCONDITIONAL_STRONG_ROOT_LIST(V) \
121 V(Code, re_c_entry_code, RegExpCEntryCode)
122#else
123#define STRONG_ROOT_LIST(V) UNCONDITIONAL_STRONG_ROOT_LIST(V)
124#endif
125
126#define ROOT_LIST(V) \
127 STRONG_ROOT_LIST(V) \
128 V(SymbolTable, symbol_table, SymbolTable)
129
130#define SYMBOL_LIST(V) \
131 V(Array_symbol, "Array") \
132 V(Object_symbol, "Object") \
133 V(Proto_symbol, "__proto__") \
134 V(StringImpl_symbol, "StringImpl") \
135 V(arguments_symbol, "arguments") \
136 V(Arguments_symbol, "Arguments") \
137 V(arguments_shadow_symbol, ".arguments") \
138 V(call_symbol, "call") \
139 V(apply_symbol, "apply") \
140 V(caller_symbol, "caller") \
141 V(boolean_symbol, "boolean") \
142 V(Boolean_symbol, "Boolean") \
143 V(callee_symbol, "callee") \
144 V(constructor_symbol, "constructor") \
145 V(code_symbol, ".code") \
146 V(result_symbol, ".result") \
147 V(catch_var_symbol, ".catch-var") \
148 V(empty_symbol, "") \
149 V(eval_symbol, "eval") \
150 V(function_symbol, "function") \
151 V(length_symbol, "length") \
152 V(name_symbol, "name") \
153 V(number_symbol, "number") \
154 V(Number_symbol, "Number") \
155 V(RegExp_symbol, "RegExp") \
Steve Block6ded16b2010-05-10 14:33:55 +0100156 V(source_symbol, "source") \
157 V(global_symbol, "global") \
158 V(ignore_case_symbol, "ignoreCase") \
159 V(multiline_symbol, "multiline") \
160 V(input_symbol, "input") \
161 V(index_symbol, "index") \
162 V(last_index_symbol, "lastIndex") \
Steve Blocka7e24c12009-10-30 11:49:00 +0000163 V(object_symbol, "object") \
164 V(prototype_symbol, "prototype") \
165 V(string_symbol, "string") \
166 V(String_symbol, "String") \
167 V(Date_symbol, "Date") \
168 V(this_symbol, "this") \
169 V(to_string_symbol, "toString") \
170 V(char_at_symbol, "CharAt") \
171 V(undefined_symbol, "undefined") \
172 V(value_of_symbol, "valueOf") \
173 V(InitializeVarGlobal_symbol, "InitializeVarGlobal") \
174 V(InitializeConstGlobal_symbol, "InitializeConstGlobal") \
175 V(stack_overflow_symbol, "kStackOverflowBoilerplate") \
176 V(illegal_access_symbol, "illegal access") \
177 V(out_of_memory_symbol, "out-of-memory") \
178 V(illegal_execution_state_symbol, "illegal execution state") \
179 V(get_symbol, "get") \
180 V(set_symbol, "set") \
181 V(function_class_symbol, "Function") \
182 V(illegal_argument_symbol, "illegal argument") \
183 V(MakeReferenceError_symbol, "MakeReferenceError") \
184 V(MakeSyntaxError_symbol, "MakeSyntaxError") \
185 V(MakeTypeError_symbol, "MakeTypeError") \
186 V(invalid_lhs_in_assignment_symbol, "invalid_lhs_in_assignment") \
187 V(invalid_lhs_in_for_in_symbol, "invalid_lhs_in_for_in") \
188 V(invalid_lhs_in_postfix_op_symbol, "invalid_lhs_in_postfix_op") \
189 V(invalid_lhs_in_prefix_op_symbol, "invalid_lhs_in_prefix_op") \
190 V(illegal_return_symbol, "illegal_return") \
191 V(illegal_break_symbol, "illegal_break") \
192 V(illegal_continue_symbol, "illegal_continue") \
193 V(unknown_label_symbol, "unknown_label") \
194 V(redeclaration_symbol, "redeclaration") \
195 V(failure_symbol, "<failure>") \
196 V(space_symbol, " ") \
197 V(exec_symbol, "exec") \
198 V(zero_symbol, "0") \
199 V(global_eval_symbol, "GlobalEval") \
Steve Blockd0582a62009-12-15 09:54:21 +0000200 V(identity_hash_symbol, "v8::IdentityHash") \
201 V(closure_symbol, "(closure)")
Steve Blocka7e24c12009-10-30 11:49:00 +0000202
203
204// Forward declaration of the GCTracer class.
205class GCTracer;
Steve Blockd0582a62009-12-15 09:54:21 +0000206class HeapStats;
Steve Blocka7e24c12009-10-30 11:49:00 +0000207
208
Steve Block6ded16b2010-05-10 14:33:55 +0100209typedef String* (*ExternalStringTableUpdaterCallback)(Object** pointer);
210
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100211typedef bool (*DirtyRegionCallback)(Address start,
212 Address end,
213 ObjectSlotCallback copy_object_func);
214
Steve Block6ded16b2010-05-10 14:33:55 +0100215
Steve Blocka7e24c12009-10-30 11:49:00 +0000216// The all static Heap captures the interface to the global object heap.
217// All JavaScript contexts by this process share the same object heap.
218
219class Heap : public AllStatic {
220 public:
221 // Configure heap size before setup. Return false if the heap has been
222 // setup already.
Steve Block3ce2e202009-11-05 08:53:23 +0000223 static bool ConfigureHeap(int max_semispace_size, int max_old_gen_size);
Steve Blocka7e24c12009-10-30 11:49:00 +0000224 static bool ConfigureHeapDefault();
225
226 // Initializes the global object heap. If create_heap_objects is true,
227 // also creates the basic non-mutable objects.
228 // Returns whether it succeeded.
229 static bool Setup(bool create_heap_objects);
230
231 // Destroys all memory allocated by the heap.
232 static void TearDown();
233
Steve Blockd0582a62009-12-15 09:54:21 +0000234 // Set the stack limit in the roots_ array. Some architectures generate
235 // code that looks here, because it is faster than loading from the static
236 // jslimit_/real_jslimit_ variable in the StackGuard.
237 static void SetStackLimits();
Steve Blocka7e24c12009-10-30 11:49:00 +0000238
239 // Returns whether Setup has been called.
240 static bool HasBeenSetup();
241
Steve Block3ce2e202009-11-05 08:53:23 +0000242 // Returns the maximum amount of memory reserved for the heap. For
243 // the young generation, we reserve 4 times the amount needed for a
244 // semi space. The young generation consists of two semi spaces and
245 // we reserve twice the amount needed for those in order to ensure
246 // that new space can be aligned to its size.
247 static int MaxReserved() {
248 return 4 * reserved_semispace_size_ + max_old_generation_size_;
Steve Blocka7e24c12009-10-30 11:49:00 +0000249 }
Steve Block3ce2e202009-11-05 08:53:23 +0000250 static int MaxSemiSpaceSize() { return max_semispace_size_; }
251 static int ReservedSemiSpaceSize() { return reserved_semispace_size_; }
Steve Blocka7e24c12009-10-30 11:49:00 +0000252 static int InitialSemiSpaceSize() { return initial_semispace_size_; }
Steve Block3ce2e202009-11-05 08:53:23 +0000253 static int MaxOldGenerationSize() { return max_old_generation_size_; }
Steve Blocka7e24c12009-10-30 11:49:00 +0000254
255 // Returns the capacity of the heap in bytes w/o growing. Heap grows when
256 // more spaces are needed until it reaches the limit.
257 static int Capacity();
258
Steve Block3ce2e202009-11-05 08:53:23 +0000259 // Returns the amount of memory currently committed for the heap.
260 static int CommittedMemory();
261
Steve Blocka7e24c12009-10-30 11:49:00 +0000262 // Returns the available bytes in space w/o growing.
263 // Heap doesn't guarantee that it can allocate an object that requires
264 // all available bytes. Check MaxHeapObjectSize() instead.
265 static int Available();
266
267 // Returns the maximum object size in paged space.
268 static inline int MaxObjectSizeInPagedSpace();
269
270 // Returns of size of all objects residing in the heap.
271 static int SizeOfObjects();
272
273 // Return the starting address and a mask for the new space. And-masking an
274 // address with the mask will result in the start address of the new space
275 // for all addresses in either semispace.
276 static Address NewSpaceStart() { return new_space_.start(); }
277 static uintptr_t NewSpaceMask() { return new_space_.mask(); }
278 static Address NewSpaceTop() { return new_space_.top(); }
279
280 static NewSpace* new_space() { return &new_space_; }
281 static OldSpace* old_pointer_space() { return old_pointer_space_; }
282 static OldSpace* old_data_space() { return old_data_space_; }
283 static OldSpace* code_space() { return code_space_; }
284 static MapSpace* map_space() { return map_space_; }
285 static CellSpace* cell_space() { return cell_space_; }
286 static LargeObjectSpace* lo_space() { return lo_space_; }
287
288 static bool always_allocate() { return always_allocate_scope_depth_ != 0; }
289 static Address always_allocate_scope_depth_address() {
290 return reinterpret_cast<Address>(&always_allocate_scope_depth_);
291 }
Steve Blockd0582a62009-12-15 09:54:21 +0000292 static bool linear_allocation() {
Leon Clarkee46be812010-01-19 14:06:41 +0000293 return linear_allocation_scope_depth_ != 0;
Steve Blockd0582a62009-12-15 09:54:21 +0000294 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000295
296 static Address* NewSpaceAllocationTopAddress() {
297 return new_space_.allocation_top_address();
298 }
299 static Address* NewSpaceAllocationLimitAddress() {
300 return new_space_.allocation_limit_address();
301 }
302
303 // Uncommit unused semi space.
304 static bool UncommitFromSpace() { return new_space_.UncommitFromSpace(); }
305
306#ifdef ENABLE_HEAP_PROTECTION
307 // Protect/unprotect the heap by marking all spaces read-only/writable.
308 static void Protect();
309 static void Unprotect();
310#endif
311
312 // Allocates and initializes a new JavaScript object based on a
313 // constructor.
314 // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
315 // failed.
316 // Please note this does not perform a garbage collection.
317 static Object* AllocateJSObject(JSFunction* constructor,
318 PretenureFlag pretenure = NOT_TENURED);
319
320 // Allocates and initializes a new global object based on a constructor.
321 // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
322 // failed.
323 // Please note this does not perform a garbage collection.
324 static Object* AllocateGlobalObject(JSFunction* constructor);
325
326 // Returns a deep copy of the JavaScript object.
327 // Properties and elements are copied too.
328 // Returns failure if allocation failed.
329 static Object* CopyJSObject(JSObject* source);
330
331 // Allocates the function prototype.
332 // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
333 // failed.
334 // Please note this does not perform a garbage collection.
335 static Object* AllocateFunctionPrototype(JSFunction* function);
336
337 // Reinitialize an JSGlobalProxy based on a constructor. The object
338 // must have the same size as objects allocated using the
339 // constructor. The object is reinitialized and behaves as an
340 // object that has been freshly allocated using the constructor.
341 static Object* ReinitializeJSGlobalProxy(JSFunction* constructor,
342 JSGlobalProxy* global);
343
344 // Allocates and initializes a new JavaScript object based on a map.
345 // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
346 // failed.
347 // Please note this does not perform a garbage collection.
348 static Object* AllocateJSObjectFromMap(Map* map,
349 PretenureFlag pretenure = NOT_TENURED);
350
351 // Allocates a heap object based on the map.
352 // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
353 // failed.
354 // Please note this function does not perform a garbage collection.
355 static Object* Allocate(Map* map, AllocationSpace space);
356
357 // Allocates a JS Map in the heap.
358 // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
359 // failed.
360 // Please note this function does not perform a garbage collection.
361 static Object* AllocateMap(InstanceType instance_type, int instance_size);
362
363 // Allocates a partial map for bootstrapping.
364 static Object* AllocatePartialMap(InstanceType instance_type,
365 int instance_size);
366
367 // Allocate a map for the specified function
368 static Object* AllocateInitialMap(JSFunction* fun);
369
Steve Block6ded16b2010-05-10 14:33:55 +0100370 // Allocates an empty code cache.
371 static Object* AllocateCodeCache();
372
Kristian Monsen25f61362010-05-21 11:50:48 +0100373 // Clear the Instanceof cache (used when a prototype changes).
374 static void ClearInstanceofCache() {
375 set_instanceof_cache_function(the_hole_value());
376 }
377
Steve Blocka7e24c12009-10-30 11:49:00 +0000378 // Allocates and fully initializes a String. There are two String
379 // encodings: ASCII and two byte. One should choose between the three string
380 // allocation functions based on the encoding of the string buffer used to
381 // initialized the string.
382 // - ...FromAscii initializes the string from a buffer that is ASCII
383 // encoded (it does not check that the buffer is ASCII encoded) and the
384 // result will be ASCII encoded.
385 // - ...FromUTF8 initializes the string from a buffer that is UTF-8
386 // encoded. If the characters are all single-byte characters, the
387 // result will be ASCII encoded, otherwise it will converted to two
388 // byte.
389 // - ...FromTwoByte initializes the string from a buffer that is two-byte
390 // encoded. If the characters are all single-byte characters, the
391 // result will be converted to ASCII, otherwise it will be left as
392 // two-byte.
393 // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
394 // failed.
395 // Please note this does not perform a garbage collection.
396 static Object* AllocateStringFromAscii(
397 Vector<const char> str,
398 PretenureFlag pretenure = NOT_TENURED);
399 static Object* AllocateStringFromUtf8(
400 Vector<const char> str,
401 PretenureFlag pretenure = NOT_TENURED);
402 static Object* AllocateStringFromTwoByte(
403 Vector<const uc16> str,
404 PretenureFlag pretenure = NOT_TENURED);
405
406 // Allocates a symbol in old space based on the character stream.
407 // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
408 // failed.
409 // Please note this function does not perform a garbage collection.
410 static inline Object* AllocateSymbol(Vector<const char> str,
411 int chars,
Steve Blockd0582a62009-12-15 09:54:21 +0000412 uint32_t hash_field);
Steve Blocka7e24c12009-10-30 11:49:00 +0000413
414 static Object* AllocateInternalSymbol(unibrow::CharacterStream* buffer,
415 int chars,
Steve Blockd0582a62009-12-15 09:54:21 +0000416 uint32_t hash_field);
Steve Blocka7e24c12009-10-30 11:49:00 +0000417
418 static Object* AllocateExternalSymbol(Vector<const char> str,
419 int chars);
420
421
422 // Allocates and partially initializes a String. There are two String
423 // encodings: ASCII and two byte. These functions allocate a string of the
424 // given length and set its map and length fields. The characters of the
425 // string are uninitialized.
426 // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
427 // failed.
428 // Please note this does not perform a garbage collection.
429 static Object* AllocateRawAsciiString(
430 int length,
431 PretenureFlag pretenure = NOT_TENURED);
432 static Object* AllocateRawTwoByteString(
433 int length,
434 PretenureFlag pretenure = NOT_TENURED);
435
436 // Computes a single character string where the character has code.
437 // A cache is used for ascii codes.
438 // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
439 // failed. Please note this does not perform a garbage collection.
440 static Object* LookupSingleCharacterStringFromCode(uint16_t code);
441
442 // Allocate a byte array of the specified length
443 // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
444 // failed.
445 // Please note this does not perform a garbage collection.
446 static Object* AllocateByteArray(int length, PretenureFlag pretenure);
447
448 // Allocate a non-tenured byte array of the specified length
449 // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
450 // failed.
451 // Please note this does not perform a garbage collection.
452 static Object* AllocateByteArray(int length);
453
454 // Allocate a pixel array of the specified length
455 // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
456 // failed.
457 // Please note this does not perform a garbage collection.
458 static Object* AllocatePixelArray(int length,
459 uint8_t* external_pointer,
460 PretenureFlag pretenure);
461
Steve Block3ce2e202009-11-05 08:53:23 +0000462 // Allocates an external array of the specified length and type.
463 // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
464 // failed.
465 // Please note this does not perform a garbage collection.
466 static Object* AllocateExternalArray(int length,
467 ExternalArrayType array_type,
468 void* external_pointer,
469 PretenureFlag pretenure);
470
Steve Blocka7e24c12009-10-30 11:49:00 +0000471 // Allocate a tenured JS global property cell.
472 // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
473 // failed.
474 // Please note this does not perform a garbage collection.
475 static Object* AllocateJSGlobalPropertyCell(Object* value);
476
477 // Allocates a fixed array initialized with undefined values
478 // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
479 // failed.
480 // Please note this does not perform a garbage collection.
481 static Object* AllocateFixedArray(int length, PretenureFlag pretenure);
Steve Block6ded16b2010-05-10 14:33:55 +0100482 // Allocates a fixed array initialized with undefined values
Steve Blocka7e24c12009-10-30 11:49:00 +0000483 static Object* AllocateFixedArray(int length);
484
Steve Block6ded16b2010-05-10 14:33:55 +0100485 // Allocates an uninitialized fixed array. It must be filled by the caller.
486 //
487 // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
488 // failed.
489 // Please note this does not perform a garbage collection.
490 static Object* AllocateUninitializedFixedArray(int length);
491
Steve Blocka7e24c12009-10-30 11:49:00 +0000492 // Make a copy of src and return it. Returns
493 // Failure::RetryAfterGC(requested_bytes, space) if the allocation failed.
494 static Object* CopyFixedArray(FixedArray* src);
495
496 // Allocates a fixed array initialized with the hole values.
497 // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
498 // failed.
499 // Please note this does not perform a garbage collection.
Steve Block6ded16b2010-05-10 14:33:55 +0100500 static Object* AllocateFixedArrayWithHoles(
501 int length,
502 PretenureFlag pretenure = NOT_TENURED);
Steve Blocka7e24c12009-10-30 11:49:00 +0000503
504 // AllocateHashTable is identical to AllocateFixedArray except
505 // that the resulting object has hash_table_map as map.
Steve Block6ded16b2010-05-10 14:33:55 +0100506 static Object* AllocateHashTable(int length,
507 PretenureFlag pretenure = NOT_TENURED);
Steve Blocka7e24c12009-10-30 11:49:00 +0000508
509 // Allocate a global (but otherwise uninitialized) context.
510 static Object* AllocateGlobalContext();
511
512 // Allocate a function context.
513 static Object* AllocateFunctionContext(int length, JSFunction* closure);
514
515 // Allocate a 'with' context.
516 static Object* AllocateWithContext(Context* previous,
517 JSObject* extension,
518 bool is_catch_context);
519
520 // Allocates a new utility object in the old generation.
521 static Object* AllocateStruct(InstanceType type);
522
523 // Allocates a function initialized with a shared part.
524 // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
525 // failed.
526 // Please note this does not perform a garbage collection.
527 static Object* AllocateFunction(Map* function_map,
528 SharedFunctionInfo* shared,
Leon Clarkee46be812010-01-19 14:06:41 +0000529 Object* prototype,
530 PretenureFlag pretenure = TENURED);
Steve Blocka7e24c12009-10-30 11:49:00 +0000531
532 // Indicies for direct access into argument objects.
Leon Clarkee46be812010-01-19 14:06:41 +0000533 static const int kArgumentsObjectSize =
534 JSObject::kHeaderSize + 2 * kPointerSize;
Steve Blocka7e24c12009-10-30 11:49:00 +0000535 static const int arguments_callee_index = 0;
536 static const int arguments_length_index = 1;
537
538 // Allocates an arguments object - optionally with an elements array.
539 // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
540 // failed.
541 // Please note this does not perform a garbage collection.
542 static Object* AllocateArgumentsObject(Object* callee, int length);
543
Steve Blocka7e24c12009-10-30 11:49:00 +0000544 // Same as NewNumberFromDouble, but may return a preallocated/immutable
545 // number object (e.g., minus_zero_value_, nan_value_)
546 static Object* NumberFromDouble(double value,
547 PretenureFlag pretenure = NOT_TENURED);
548
549 // Allocated a HeapNumber from value.
550 static Object* AllocateHeapNumber(double value, PretenureFlag pretenure);
551 static Object* AllocateHeapNumber(double value); // pretenure = NOT_TENURED
552
553 // Converts an int into either a Smi or a HeapNumber object.
554 // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
555 // failed.
556 // Please note this does not perform a garbage collection.
557 static inline Object* NumberFromInt32(int32_t value);
558
559 // Converts an int into either a Smi or a HeapNumber object.
560 // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
561 // failed.
562 // Please note this does not perform a garbage collection.
563 static inline Object* NumberFromUint32(uint32_t value);
564
565 // Allocates a new proxy object.
566 // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
567 // failed.
568 // Please note this does not perform a garbage collection.
569 static Object* AllocateProxy(Address proxy,
570 PretenureFlag pretenure = NOT_TENURED);
571
572 // Allocates a new SharedFunctionInfo object.
573 // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
574 // failed.
575 // Please note this does not perform a garbage collection.
576 static Object* AllocateSharedFunctionInfo(Object* name);
577
578 // Allocates a new cons string object.
579 // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
580 // failed.
581 // Please note this does not perform a garbage collection.
582 static Object* AllocateConsString(String* first, String* second);
583
Steve Blocka7e24c12009-10-30 11:49:00 +0000584 // Allocates a new sub string object which is a substring of an underlying
585 // string buffer stretching from the index start (inclusive) to the index
586 // end (exclusive).
587 // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
588 // failed.
589 // Please note this does not perform a garbage collection.
590 static Object* AllocateSubString(String* buffer,
591 int start,
Steve Block6ded16b2010-05-10 14:33:55 +0100592 int end,
593 PretenureFlag pretenure = NOT_TENURED);
Steve Blocka7e24c12009-10-30 11:49:00 +0000594
595 // Allocate a new external string object, which is backed by a string
596 // resource that resides outside the V8 heap.
597 // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
598 // failed.
599 // Please note this does not perform a garbage collection.
600 static Object* AllocateExternalStringFromAscii(
601 ExternalAsciiString::Resource* resource);
602 static Object* AllocateExternalStringFromTwoByte(
603 ExternalTwoByteString::Resource* resource);
604
Leon Clarkee46be812010-01-19 14:06:41 +0000605 // Finalizes an external string by deleting the associated external
606 // data and clearing the resource pointer.
607 static inline void FinalizeExternalString(String* string);
608
Steve Blocka7e24c12009-10-30 11:49:00 +0000609 // Allocates an uninitialized object. The memory is non-executable if the
610 // hardware and OS allow.
611 // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
612 // failed.
613 // Please note this function does not perform a garbage collection.
614 static inline Object* AllocateRaw(int size_in_bytes,
615 AllocationSpace space,
616 AllocationSpace retry_space);
617
618 // Initialize a filler object to keep the ability to iterate over the heap
619 // when shortening objects.
620 static void CreateFillerObjectAt(Address addr, int size);
621
622 // Makes a new native code object
623 // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
624 // failed. On success, the pointer to the Code object is stored in the
625 // self_reference. This allows generated code to reference its own Code
626 // object by containing this pointer.
627 // Please note this function does not perform a garbage collection.
628 static Object* CreateCode(const CodeDesc& desc,
629 ZoneScopeInfo* sinfo,
630 Code::Flags flags,
631 Handle<Object> self_reference);
632
633 static Object* CopyCode(Code* code);
Steve Block6ded16b2010-05-10 14:33:55 +0100634
635 // Copy the code and scope info part of the code object, but insert
636 // the provided data as the relocation information.
637 static Object* CopyCode(Code* code, Vector<byte> reloc_info);
638
Steve Blocka7e24c12009-10-30 11:49:00 +0000639 // Finds the symbol for string in the symbol table.
640 // If not found, a new symbol is added to the table and returned.
641 // Returns Failure::RetryAfterGC(requested_bytes, space) if allocation
642 // failed.
643 // Please note this function does not perform a garbage collection.
644 static Object* LookupSymbol(Vector<const char> str);
645 static Object* LookupAsciiSymbol(const char* str) {
646 return LookupSymbol(CStrVector(str));
647 }
648 static Object* LookupSymbol(String* str);
649 static bool LookupSymbolIfExists(String* str, String** symbol);
Steve Blockd0582a62009-12-15 09:54:21 +0000650 static bool LookupTwoCharsSymbolIfExists(String* str, String** symbol);
Steve Blocka7e24c12009-10-30 11:49:00 +0000651
652 // Compute the matching symbol map for a string if possible.
653 // NULL is returned if string is in new space or not flattened.
654 static Map* SymbolMapForString(String* str);
655
Steve Block6ded16b2010-05-10 14:33:55 +0100656 // Tries to flatten a string before compare operation.
657 //
658 // Returns a failure in case it was decided that flattening was
659 // necessary and failed. Note, if flattening is not necessary the
660 // string might stay non-flat even when not a failure is returned.
661 //
662 // Please note this function does not perform a garbage collection.
663 static inline Object* PrepareForCompare(String* str);
664
Steve Blocka7e24c12009-10-30 11:49:00 +0000665 // Converts the given boolean condition to JavaScript boolean value.
666 static Object* ToBoolean(bool condition) {
667 return condition ? true_value() : false_value();
668 }
669
670 // Code that should be run before and after each GC. Includes some
671 // reporting/verification activities when compiled with DEBUG set.
672 static void GarbageCollectionPrologue();
673 static void GarbageCollectionEpilogue();
674
Steve Blocka7e24c12009-10-30 11:49:00 +0000675 // Performs garbage collection operation.
676 // Returns whether required_space bytes are available after the collection.
677 static bool CollectGarbage(int required_space, AllocationSpace space);
678
679 // Performs a full garbage collection. Force compaction if the
680 // parameter is true.
681 static void CollectAllGarbage(bool force_compaction);
682
Steve Blocka7e24c12009-10-30 11:49:00 +0000683 // Notify the heap that a context has been disposed.
Steve Block6ded16b2010-05-10 14:33:55 +0100684 static int NotifyContextDisposed() { return ++contexts_disposed_; }
Steve Blocka7e24c12009-10-30 11:49:00 +0000685
686 // Utility to invoke the scavenger. This is needed in test code to
687 // ensure correct callback for weak global handles.
688 static void PerformScavenge();
689
690#ifdef DEBUG
691 // Utility used with flag gc-greedy.
692 static bool GarbageCollectionGreedyCheck();
693#endif
694
Steve Block6ded16b2010-05-10 14:33:55 +0100695 static void AddGCPrologueCallback(
696 GCEpilogueCallback callback, GCType gc_type_filter);
697 static void RemoveGCPrologueCallback(GCEpilogueCallback callback);
698
699 static void AddGCEpilogueCallback(
700 GCEpilogueCallback callback, GCType gc_type_filter);
701 static void RemoveGCEpilogueCallback(GCEpilogueCallback callback);
702
Steve Blocka7e24c12009-10-30 11:49:00 +0000703 static void SetGlobalGCPrologueCallback(GCCallback callback) {
Steve Block6ded16b2010-05-10 14:33:55 +0100704 ASSERT((callback == NULL) ^ (global_gc_prologue_callback_ == NULL));
Steve Blocka7e24c12009-10-30 11:49:00 +0000705 global_gc_prologue_callback_ = callback;
706 }
707 static void SetGlobalGCEpilogueCallback(GCCallback callback) {
Steve Block6ded16b2010-05-10 14:33:55 +0100708 ASSERT((callback == NULL) ^ (global_gc_epilogue_callback_ == NULL));
Steve Blocka7e24c12009-10-30 11:49:00 +0000709 global_gc_epilogue_callback_ = callback;
710 }
711
712 // Heap root getters. We have versions with and without type::cast() here.
713 // You can't use type::cast during GC because the assert fails.
714#define ROOT_ACCESSOR(type, name, camel_name) \
715 static inline type* name() { \
716 return type::cast(roots_[k##camel_name##RootIndex]); \
717 } \
718 static inline type* raw_unchecked_##name() { \
719 return reinterpret_cast<type*>(roots_[k##camel_name##RootIndex]); \
720 }
721 ROOT_LIST(ROOT_ACCESSOR)
722#undef ROOT_ACCESSOR
723
724// Utility type maps
725#define STRUCT_MAP_ACCESSOR(NAME, Name, name) \
726 static inline Map* name##_map() { \
727 return Map::cast(roots_[k##Name##MapRootIndex]); \
728 }
729 STRUCT_LIST(STRUCT_MAP_ACCESSOR)
730#undef STRUCT_MAP_ACCESSOR
731
732#define SYMBOL_ACCESSOR(name, str) static inline String* name() { \
733 return String::cast(roots_[k##name##RootIndex]); \
734 }
735 SYMBOL_LIST(SYMBOL_ACCESSOR)
736#undef SYMBOL_ACCESSOR
737
738 // The hidden_symbol is special because it is the empty string, but does
739 // not match the empty string.
740 static String* hidden_symbol() { return hidden_symbol_; }
741
742 // Iterates over all roots in the heap.
Steve Blockd0582a62009-12-15 09:54:21 +0000743 static void IterateRoots(ObjectVisitor* v, VisitMode mode);
Steve Blocka7e24c12009-10-30 11:49:00 +0000744 // Iterates over all strong roots in the heap.
Steve Blockd0582a62009-12-15 09:54:21 +0000745 static void IterateStrongRoots(ObjectVisitor* v, VisitMode mode);
Leon Clarked91b9f72010-01-27 17:25:45 +0000746 // Iterates over all the other roots in the heap.
747 static void IterateWeakRoots(ObjectVisitor* v, VisitMode mode);
Steve Blocka7e24c12009-10-30 11:49:00 +0000748
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100749 enum ExpectedPageWatermarkState {
750 WATERMARK_SHOULD_BE_VALID,
751 WATERMARK_CAN_BE_INVALID
752 };
Steve Blocka7e24c12009-10-30 11:49:00 +0000753
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100754 // For each dirty region on a page in use from an old space call
755 // visit_dirty_region callback.
756 // If either visit_dirty_region or callback can cause an allocation
757 // in old space and changes in allocation watermark then
758 // can_preallocate_during_iteration should be set to true.
759 // All pages will be marked as having invalid watermark upon
760 // iteration completion.
761 static void IterateDirtyRegions(
762 PagedSpace* space,
763 DirtyRegionCallback visit_dirty_region,
764 ObjectSlotCallback callback,
765 ExpectedPageWatermarkState expected_page_watermark_state);
766
767 // Interpret marks as a bitvector of dirty marks for regions of size
768 // Page::kRegionSize aligned by Page::kRegionAlignmentMask and covering
769 // memory interval from start to top. For each dirty region call a
770 // visit_dirty_region callback. Return updated bitvector of dirty marks.
771 static uint32_t IterateDirtyRegions(uint32_t marks,
772 Address start,
773 Address end,
774 DirtyRegionCallback visit_dirty_region,
775 ObjectSlotCallback callback);
776
777 // Iterate pointers to new space found in memory interval from start to end.
778 // Update dirty marks for page containing start address.
779 static void IterateAndMarkPointersToNewSpace(Address start,
780 Address end,
781 ObjectSlotCallback callback);
782
783 // Iterate pointers to new space found in memory interval from start to end.
784 // Return true if pointers to new space was found.
785 static bool IteratePointersInDirtyRegion(Address start,
786 Address end,
787 ObjectSlotCallback callback);
788
789
790 // Iterate pointers to new space found in memory interval from start to end.
791 // This interval is considered to belong to the map space.
792 // Return true if pointers to new space was found.
793 static bool IteratePointersInDirtyMapsRegion(Address start,
794 Address end,
795 ObjectSlotCallback callback);
796
Steve Blocka7e24c12009-10-30 11:49:00 +0000797
798 // Returns whether the object resides in new space.
799 static inline bool InNewSpace(Object* object);
800 static inline bool InFromSpace(Object* object);
801 static inline bool InToSpace(Object* object);
802
803 // Checks whether an address/object in the heap (including auxiliary
804 // area and unused area).
805 static bool Contains(Address addr);
806 static bool Contains(HeapObject* value);
807
808 // Checks whether an address/object in a space.
Steve Blockd0582a62009-12-15 09:54:21 +0000809 // Currently used by tests, serialization and heap verification only.
Steve Blocka7e24c12009-10-30 11:49:00 +0000810 static bool InSpace(Address addr, AllocationSpace space);
811 static bool InSpace(HeapObject* value, AllocationSpace space);
812
813 // Finds out which space an object should get promoted to based on its type.
814 static inline OldSpace* TargetSpace(HeapObject* object);
815 static inline AllocationSpace TargetSpaceId(InstanceType type);
816
817 // Sets the stub_cache_ (only used when expanding the dictionary).
818 static void public_set_code_stubs(NumberDictionary* value) {
819 roots_[kCodeStubsRootIndex] = value;
820 }
821
822 // Sets the non_monomorphic_cache_ (only used when expanding the dictionary).
823 static void public_set_non_monomorphic_cache(NumberDictionary* value) {
824 roots_[kNonMonomorphicCacheRootIndex] = value;
825 }
826
Andrei Popescu31002712010-02-23 13:46:05 +0000827 static void public_set_empty_script(Script* script) {
828 roots_[kEmptyScriptRootIndex] = script;
829 }
830
Steve Blocka7e24c12009-10-30 11:49:00 +0000831 // Update the next script id.
832 static inline void SetLastScriptId(Object* last_script_id);
833
834 // Generated code can embed this address to get access to the roots.
835 static Object** roots_address() { return roots_; }
836
837#ifdef DEBUG
838 static void Print();
839 static void PrintHandles();
840
841 // Verify the heap is in its normal state before or after a GC.
842 static void Verify();
843
844 // Report heap statistics.
845 static void ReportHeapStatistics(const char* title);
846 static void ReportCodeStatistics(const char* title);
847
848 // Fill in bogus values in from space
849 static void ZapFromSpace();
850#endif
851
852#if defined(ENABLE_LOGGING_AND_PROFILING)
853 // Print short heap statistics.
854 static void PrintShortHeapStatistics();
855#endif
856
857 // Makes a new symbol object
858 // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
859 // failed.
860 // Please note this function does not perform a garbage collection.
861 static Object* CreateSymbol(const char* str, int length, int hash);
862 static Object* CreateSymbol(String* str);
863
864 // Write barrier support for address[offset] = o.
865 static inline void RecordWrite(Address address, int offset);
866
Steve Block6ded16b2010-05-10 14:33:55 +0100867 // Write barrier support for address[start : start + len[ = o.
868 static inline void RecordWrites(Address address, int start, int len);
869
Steve Blocka7e24c12009-10-30 11:49:00 +0000870 // Given an address occupied by a live code object, return that object.
871 static Object* FindCodeObject(Address a);
872
873 // Invoke Shrink on shrinkable spaces.
874 static void Shrink();
875
876 enum HeapState { NOT_IN_GC, SCAVENGE, MARK_COMPACT };
877 static inline HeapState gc_state() { return gc_state_; }
878
879#ifdef DEBUG
880 static bool IsAllocationAllowed() { return allocation_allowed_; }
881 static inline bool allow_allocation(bool enable);
882
883 static bool disallow_allocation_failure() {
884 return disallow_allocation_failure_;
885 }
886
Leon Clarkee46be812010-01-19 14:06:41 +0000887 static void TracePathToObject(Object* target);
Steve Blocka7e24c12009-10-30 11:49:00 +0000888 static void TracePathToGlobal();
889#endif
890
891 // Callback function passed to Heap::Iterate etc. Copies an object if
892 // necessary, the object might be promoted to an old space. The caller must
893 // ensure the precondition that the object is (a) a heap object and (b) in
894 // the heap's from space.
895 static void ScavengePointer(HeapObject** p);
896 static inline void ScavengeObject(HeapObject** p, HeapObject* object);
897
Steve Blocka7e24c12009-10-30 11:49:00 +0000898 // Commits from space if it is uncommitted.
899 static void EnsureFromSpaceIsCommitted();
900
Leon Clarkee46be812010-01-19 14:06:41 +0000901 // Support for partial snapshots. After calling this we can allocate a
902 // certain number of bytes using only linear allocation (with a
903 // LinearAllocationScope and an AlwaysAllocateScope) without using freelists
904 // or causing a GC. It returns true of space was reserved or false if a GC is
905 // needed. For paged spaces the space requested must include the space wasted
906 // at the end of each page when allocating linearly.
907 static void ReserveSpace(
908 int new_space_size,
909 int pointer_space_size,
910 int data_space_size,
911 int code_space_size,
912 int map_space_size,
913 int cell_space_size,
914 int large_object_size);
915
Steve Blocka7e24c12009-10-30 11:49:00 +0000916 //
917 // Support for the API.
918 //
919
920 static bool CreateApiObjects();
921
922 // Attempt to find the number in a small cache. If we finds it, return
923 // the string representation of the number. Otherwise return undefined.
924 static Object* GetNumberStringCache(Object* number);
925
926 // Update the cache with a new number-string pair.
927 static void SetNumberStringCache(Object* number, String* str);
928
Steve Blocka7e24c12009-10-30 11:49:00 +0000929 // Adjusts the amount of registered external memory.
930 // Returns the adjusted value.
931 static inline int AdjustAmountOfExternalAllocatedMemory(int change_in_bytes);
932
Steve Block6ded16b2010-05-10 14:33:55 +0100933 // Allocate uninitialized fixed array.
Steve Blocka7e24c12009-10-30 11:49:00 +0000934 static Object* AllocateRawFixedArray(int length);
Steve Block6ded16b2010-05-10 14:33:55 +0100935 static Object* AllocateRawFixedArray(int length,
936 PretenureFlag pretenure);
Steve Blocka7e24c12009-10-30 11:49:00 +0000937
938 // True if we have reached the allocation limit in the old generation that
939 // should force the next GC (caused normally) to be a full one.
940 static bool OldGenerationPromotionLimitReached() {
941 return (PromotedSpaceSize() + PromotedExternalMemorySize())
942 > old_gen_promotion_limit_;
943 }
944
Leon Clarkee46be812010-01-19 14:06:41 +0000945 static intptr_t OldGenerationSpaceAvailable() {
946 return old_gen_allocation_limit_ -
947 (PromotedSpaceSize() + PromotedExternalMemorySize());
948 }
949
Steve Blocka7e24c12009-10-30 11:49:00 +0000950 // True if we have reached the allocation limit in the old generation that
951 // should artificially cause a GC right now.
952 static bool OldGenerationAllocationLimitReached() {
Leon Clarkee46be812010-01-19 14:06:41 +0000953 return OldGenerationSpaceAvailable() < 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000954 }
955
956 // Can be called when the embedding application is idle.
957 static bool IdleNotification();
958
959 // Declare all the root indices.
960 enum RootListIndex {
961#define ROOT_INDEX_DECLARATION(type, name, camel_name) k##camel_name##RootIndex,
962 STRONG_ROOT_LIST(ROOT_INDEX_DECLARATION)
963#undef ROOT_INDEX_DECLARATION
964
965// Utility type maps
966#define DECLARE_STRUCT_MAP(NAME, Name, name) k##Name##MapRootIndex,
967 STRUCT_LIST(DECLARE_STRUCT_MAP)
968#undef DECLARE_STRUCT_MAP
969
970#define SYMBOL_INDEX_DECLARATION(name, str) k##name##RootIndex,
971 SYMBOL_LIST(SYMBOL_INDEX_DECLARATION)
972#undef SYMBOL_DECLARATION
973
974 kSymbolTableRootIndex,
975 kStrongRootListLength = kSymbolTableRootIndex,
976 kRootListLength
977 };
978
Steve Block6ded16b2010-05-10 14:33:55 +0100979 static Object* NumberToString(Object* number,
980 bool check_number_string_cache = true);
Steve Blocka7e24c12009-10-30 11:49:00 +0000981
Steve Block3ce2e202009-11-05 08:53:23 +0000982 static Map* MapForExternalArrayType(ExternalArrayType array_type);
983 static RootListIndex RootIndexForExternalArrayType(
984 ExternalArrayType array_type);
985
Steve Blockd0582a62009-12-15 09:54:21 +0000986 static void RecordStats(HeapStats* stats);
987
Steve Block6ded16b2010-05-10 14:33:55 +0100988 // Copy block of memory from src to dst. Size of block should be aligned
989 // by pointer size.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100990 static inline void CopyBlock(Address dst, Address src, int byte_size);
991
992 static inline void CopyBlockToOldSpaceAndUpdateRegionMarks(Address dst,
993 Address src,
994 int byte_size);
Steve Block6ded16b2010-05-10 14:33:55 +0100995
996 // Optimized version of memmove for blocks with pointer size aligned sizes and
997 // pointer size aligned addresses.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100998 static inline void MoveBlock(Address dst, Address src, int byte_size);
999
1000 static inline void MoveBlockToOldSpaceAndUpdateRegionMarks(Address dst,
1001 Address src,
1002 int byte_size);
Steve Block6ded16b2010-05-10 14:33:55 +01001003
1004 // Check new space expansion criteria and expand semispaces if it was hit.
1005 static void CheckNewSpaceExpansionCriteria();
1006
1007 static inline void IncrementYoungSurvivorsCounter(int survived) {
Steve Block8defd9f2010-07-08 12:39:36 +01001008 young_survivors_after_last_gc_ = survived;
Steve Block6ded16b2010-05-10 14:33:55 +01001009 survived_since_last_expansion_ += survived;
1010 }
1011
1012 static void UpdateNewSpaceReferencesInExternalStringTable(
1013 ExternalStringTableUpdaterCallback updater_func);
1014
1015 // Helper function that governs the promotion policy from new space to
1016 // old. If the object's old address lies below the new space's age
1017 // mark or if we've already filled the bottom 1/16th of the to space,
1018 // we try to promote this object.
1019 static inline bool ShouldBePromoted(Address old_address, int object_size);
1020
1021 static int MaxObjectSizeInNewSpace() { return kMaxObjectSizeInNewSpace; }
1022
Kristian Monsen25f61362010-05-21 11:50:48 +01001023 static void ClearJSFunctionResultCaches();
1024
Leon Clarkef7060e22010-06-03 12:02:55 +01001025 static GCTracer* tracer() { return tracer_; }
1026
Steve Blocka7e24c12009-10-30 11:49:00 +00001027 private:
Steve Block3ce2e202009-11-05 08:53:23 +00001028 static int reserved_semispace_size_;
1029 static int max_semispace_size_;
Steve Blocka7e24c12009-10-30 11:49:00 +00001030 static int initial_semispace_size_;
Steve Block3ce2e202009-11-05 08:53:23 +00001031 static int max_old_generation_size_;
Steve Blocka7e24c12009-10-30 11:49:00 +00001032 static size_t code_range_size_;
1033
1034 // For keeping track of how much data has survived
1035 // scavenge since last new space expansion.
1036 static int survived_since_last_expansion_;
1037
1038 static int always_allocate_scope_depth_;
Steve Blockd0582a62009-12-15 09:54:21 +00001039 static int linear_allocation_scope_depth_;
Steve Block6ded16b2010-05-10 14:33:55 +01001040
1041 // For keeping track of context disposals.
1042 static int contexts_disposed_;
Steve Blocka7e24c12009-10-30 11:49:00 +00001043
Steve Blocka7e24c12009-10-30 11:49:00 +00001044#if defined(V8_TARGET_ARCH_X64)
1045 static const int kMaxObjectSizeInNewSpace = 512*KB;
1046#else
1047 static const int kMaxObjectSizeInNewSpace = 256*KB;
1048#endif
1049
1050 static NewSpace new_space_;
1051 static OldSpace* old_pointer_space_;
1052 static OldSpace* old_data_space_;
1053 static OldSpace* code_space_;
1054 static MapSpace* map_space_;
1055 static CellSpace* cell_space_;
1056 static LargeObjectSpace* lo_space_;
1057 static HeapState gc_state_;
1058
1059 // Returns the size of object residing in non new spaces.
1060 static int PromotedSpaceSize();
1061
1062 // Returns the amount of external memory registered since last global gc.
1063 static int PromotedExternalMemorySize();
1064
1065 static int mc_count_; // how many mark-compact collections happened
Leon Clarkef7060e22010-06-03 12:02:55 +01001066 static int ms_count_; // how many mark-sweep collections happened
Steve Blocka7e24c12009-10-30 11:49:00 +00001067 static int gc_count_; // how many gc happened
1068
Steve Block6ded16b2010-05-10 14:33:55 +01001069 // Total length of the strings we failed to flatten since the last GC.
1070 static int unflattened_strings_length_;
1071
Steve Blocka7e24c12009-10-30 11:49:00 +00001072#define ROOT_ACCESSOR(type, name, camel_name) \
1073 static inline void set_##name(type* value) { \
1074 roots_[k##camel_name##RootIndex] = value; \
1075 }
1076 ROOT_LIST(ROOT_ACCESSOR)
1077#undef ROOT_ACCESSOR
1078
1079#ifdef DEBUG
1080 static bool allocation_allowed_;
1081
1082 // If the --gc-interval flag is set to a positive value, this
1083 // variable holds the value indicating the number of allocations
1084 // remain until the next failure and garbage collection.
1085 static int allocation_timeout_;
1086
1087 // Do we expect to be able to handle allocation failure at this
1088 // time?
1089 static bool disallow_allocation_failure_;
1090#endif // DEBUG
1091
1092 // Limit that triggers a global GC on the next (normally caused) GC. This
1093 // is checked when we have already decided to do a GC to help determine
1094 // which collector to invoke.
1095 static int old_gen_promotion_limit_;
1096
1097 // Limit that triggers a global GC as soon as is reasonable. This is
1098 // checked before expanding a paged space in the old generation and on
1099 // every allocation in large object space.
1100 static int old_gen_allocation_limit_;
1101
1102 // Limit on the amount of externally allocated memory allowed
1103 // between global GCs. If reached a global GC is forced.
1104 static int external_allocation_limit_;
1105
1106 // The amount of external memory registered through the API kept alive
1107 // by global handles
1108 static int amount_of_external_allocated_memory_;
1109
1110 // Caches the amount of external memory registered at the last global gc.
1111 static int amount_of_external_allocated_memory_at_last_global_gc_;
1112
1113 // Indicates that an allocation has failed in the old generation since the
1114 // last GC.
1115 static int old_gen_exhausted_;
1116
1117 static Object* roots_[kRootListLength];
1118
1119 struct StringTypeTable {
1120 InstanceType type;
1121 int size;
1122 RootListIndex index;
1123 };
1124
1125 struct ConstantSymbolTable {
1126 const char* contents;
1127 RootListIndex index;
1128 };
1129
1130 struct StructTable {
1131 InstanceType type;
1132 int size;
1133 RootListIndex index;
1134 };
1135
1136 static const StringTypeTable string_type_table[];
1137 static const ConstantSymbolTable constant_symbol_table[];
1138 static const StructTable struct_table[];
1139
1140 // The special hidden symbol which is an empty string, but does not match
1141 // any string when looked up in properties.
1142 static String* hidden_symbol_;
1143
1144 // GC callback function, called before and after mark-compact GC.
1145 // Allocations in the callback function are disallowed.
Steve Block6ded16b2010-05-10 14:33:55 +01001146 struct GCPrologueCallbackPair {
1147 GCPrologueCallbackPair(GCPrologueCallback callback, GCType gc_type)
1148 : callback(callback), gc_type(gc_type) {
1149 }
1150 bool operator==(const GCPrologueCallbackPair& pair) const {
1151 return pair.callback == callback;
1152 }
1153 GCPrologueCallback callback;
1154 GCType gc_type;
1155 };
1156 static List<GCPrologueCallbackPair> gc_prologue_callbacks_;
1157
1158 struct GCEpilogueCallbackPair {
1159 GCEpilogueCallbackPair(GCEpilogueCallback callback, GCType gc_type)
1160 : callback(callback), gc_type(gc_type) {
1161 }
1162 bool operator==(const GCEpilogueCallbackPair& pair) const {
1163 return pair.callback == callback;
1164 }
1165 GCEpilogueCallback callback;
1166 GCType gc_type;
1167 };
1168 static List<GCEpilogueCallbackPair> gc_epilogue_callbacks_;
1169
Steve Blocka7e24c12009-10-30 11:49:00 +00001170 static GCCallback global_gc_prologue_callback_;
1171 static GCCallback global_gc_epilogue_callback_;
1172
1173 // Checks whether a global GC is necessary
1174 static GarbageCollector SelectGarbageCollector(AllocationSpace space);
1175
1176 // Performs garbage collection
1177 static void PerformGarbageCollection(AllocationSpace space,
1178 GarbageCollector collector,
1179 GCTracer* tracer);
1180
Steve Blocka7e24c12009-10-30 11:49:00 +00001181 // Allocate an uninitialized object in map space. The behavior is identical
1182 // to Heap::AllocateRaw(size_in_bytes, MAP_SPACE), except that (a) it doesn't
1183 // have to test the allocation space argument and (b) can reduce code size
1184 // (since both AllocateRaw and AllocateRawMap are inlined).
1185 static inline Object* AllocateRawMap();
1186
1187 // Allocate an uninitialized object in the global property cell space.
1188 static inline Object* AllocateRawCell();
1189
1190 // Initializes a JSObject based on its map.
1191 static void InitializeJSObjectFromMap(JSObject* obj,
1192 FixedArray* properties,
1193 Map* map);
1194
1195 static bool CreateInitialMaps();
1196 static bool CreateInitialObjects();
1197
1198 // These four Create*EntryStub functions are here because of a gcc-4.4 bug
1199 // that assigns wrong vtable entries.
1200 static void CreateCEntryStub();
Steve Blocka7e24c12009-10-30 11:49:00 +00001201 static void CreateJSEntryStub();
1202 static void CreateJSConstructEntryStub();
1203 static void CreateRegExpCEntryStub();
1204
1205 static void CreateFixedStubs();
1206
Steve Block6ded16b2010-05-10 14:33:55 +01001207 static Object* CreateOddball(const char* to_string, Object* to_number);
Steve Blocka7e24c12009-10-30 11:49:00 +00001208
1209 // Allocate empty fixed array.
1210 static Object* AllocateEmptyFixedArray();
1211
1212 // Performs a minor collection in new generation.
1213 static void Scavenge();
Steve Block6ded16b2010-05-10 14:33:55 +01001214
1215 static String* UpdateNewSpaceReferenceInExternalStringTableEntry(
1216 Object** pointer);
1217
Leon Clarkee46be812010-01-19 14:06:41 +00001218 static Address DoScavenge(ObjectVisitor* scavenge_visitor,
1219 Address new_space_front);
Steve Blocka7e24c12009-10-30 11:49:00 +00001220
1221 // Performs a major collection in the whole heap.
1222 static void MarkCompact(GCTracer* tracer);
1223
1224 // Code to be run before and after mark-compact.
1225 static void MarkCompactPrologue(bool is_compacting);
1226 static void MarkCompactEpilogue(bool is_compacting);
1227
Kristian Monsen25f61362010-05-21 11:50:48 +01001228 // Completely clear the Instanceof cache (to stop it keeping objects alive
1229 // around a GC).
1230 static void CompletelyClearInstanceofCache() {
1231 set_instanceof_cache_map(the_hole_value());
1232 set_instanceof_cache_function(the_hole_value());
1233 }
1234
Steve Blocka7e24c12009-10-30 11:49:00 +00001235 // Helper function used by CopyObject to copy a source object to an
1236 // allocated target object and update the forwarding pointer in the source
1237 // object. Returns the target object.
Leon Clarkee46be812010-01-19 14:06:41 +00001238 static inline HeapObject* MigrateObject(HeapObject* source,
1239 HeapObject* target,
1240 int size);
Steve Blocka7e24c12009-10-30 11:49:00 +00001241
Steve Blocka7e24c12009-10-30 11:49:00 +00001242#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1243 // Record the copy of an object in the NewSpace's statistics.
1244 static void RecordCopiedObject(HeapObject* obj);
1245
1246 // Record statistics before and after garbage collection.
1247 static void ReportStatisticsBeforeGC();
1248 static void ReportStatisticsAfterGC();
1249#endif
1250
Steve Blocka7e24c12009-10-30 11:49:00 +00001251 // Slow part of scavenge object.
1252 static void ScavengeObjectSlow(HeapObject** p, HeapObject* object);
1253
Steve Blocka7e24c12009-10-30 11:49:00 +00001254 // Initializes a function with a shared part and prototype.
1255 // Returns the function.
1256 // Note: this code was factored out of AllocateFunction such that
1257 // other parts of the VM could use it. Specifically, a function that creates
1258 // instances of type JS_FUNCTION_TYPE benefit from the use of this function.
1259 // Please note this does not perform a garbage collection.
1260 static inline Object* InitializeFunction(JSFunction* function,
1261 SharedFunctionInfo* shared,
1262 Object* prototype);
1263
Leon Clarkef7060e22010-06-03 12:02:55 +01001264 static GCTracer* tracer_;
1265
Leon Clarkee46be812010-01-19 14:06:41 +00001266
1267 // Initializes the number to string cache based on the max semispace size.
1268 static Object* InitializeNumberStringCache();
1269 // Flush the number to string cache.
1270 static void FlushNumberStringCache();
1271
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001272 // Flush code from functions we do not expect to use again. The code will
1273 // be replaced with a lazy compilable version.
1274 static void FlushCode();
1275
Steve Block8defd9f2010-07-08 12:39:36 +01001276 static void UpdateSurvivalRateTrend(int start_new_space_size);
1277
1278 enum SurvivalRateTrend { INCREASING, STABLE, DECREASING, FLUCTUATING };
1279
1280 static const int kYoungSurvivalRateThreshold = 90;
1281 static const int kYoungSurvivalRateAllowedDeviation = 15;
1282
1283 static int young_survivors_after_last_gc_;
1284 static int high_survival_rate_period_length_;
1285 static double survival_rate_;
1286 static SurvivalRateTrend previous_survival_rate_trend_;
1287 static SurvivalRateTrend survival_rate_trend_;
1288
1289 static void set_survival_rate_trend(SurvivalRateTrend survival_rate_trend) {
1290 ASSERT(survival_rate_trend != FLUCTUATING);
1291 previous_survival_rate_trend_ = survival_rate_trend_;
1292 survival_rate_trend_ = survival_rate_trend;
1293 }
1294
1295 static SurvivalRateTrend survival_rate_trend() {
1296 if (survival_rate_trend_ == STABLE) {
1297 return STABLE;
1298 } else if (previous_survival_rate_trend_ == STABLE) {
1299 return survival_rate_trend_;
1300 } else if (survival_rate_trend_ != previous_survival_rate_trend_) {
1301 return FLUCTUATING;
1302 } else {
1303 return survival_rate_trend_;
1304 }
1305 }
1306
1307 static bool IsStableOrIncreasingSurvivalTrend() {
1308 switch (survival_rate_trend()) {
1309 case STABLE:
1310 case INCREASING:
1311 return true;
1312 default:
1313 return false;
1314 }
1315 }
1316
1317 static bool IsIncreasingSurvivalTrend() {
1318 return survival_rate_trend() == INCREASING;
1319 }
1320
1321 static bool IsHighSurvivalRate() {
1322 return high_survival_rate_period_length_ > 0;
1323 }
1324
Steve Blocka7e24c12009-10-30 11:49:00 +00001325 static const int kInitialSymbolTableSize = 2048;
1326 static const int kInitialEvalCacheSize = 64;
1327
1328 friend class Factory;
1329 friend class DisallowAllocationFailure;
1330 friend class AlwaysAllocateScope;
Steve Blockd0582a62009-12-15 09:54:21 +00001331 friend class LinearAllocationScope;
1332};
1333
1334
1335class HeapStats {
1336 public:
Steve Block6ded16b2010-05-10 14:33:55 +01001337 int* start_marker;
1338 int* new_space_size;
1339 int* new_space_capacity;
1340 int* old_pointer_space_size;
1341 int* old_pointer_space_capacity;
1342 int* old_data_space_size;
1343 int* old_data_space_capacity;
1344 int* code_space_size;
1345 int* code_space_capacity;
1346 int* map_space_size;
1347 int* map_space_capacity;
1348 int* cell_space_size;
1349 int* cell_space_capacity;
1350 int* lo_space_size;
1351 int* global_handle_count;
1352 int* weak_global_handle_count;
1353 int* pending_global_handle_count;
1354 int* near_death_global_handle_count;
1355 int* destroyed_global_handle_count;
1356 int* end_marker;
Steve Blocka7e24c12009-10-30 11:49:00 +00001357};
1358
1359
1360class AlwaysAllocateScope {
1361 public:
1362 AlwaysAllocateScope() {
1363 // We shouldn't hit any nested scopes, because that requires
1364 // non-handle code to call handle code. The code still works but
1365 // performance will degrade, so we want to catch this situation
1366 // in debug mode.
1367 ASSERT(Heap::always_allocate_scope_depth_ == 0);
1368 Heap::always_allocate_scope_depth_++;
1369 }
1370
1371 ~AlwaysAllocateScope() {
1372 Heap::always_allocate_scope_depth_--;
1373 ASSERT(Heap::always_allocate_scope_depth_ == 0);
1374 }
1375};
1376
1377
Steve Blockd0582a62009-12-15 09:54:21 +00001378class LinearAllocationScope {
1379 public:
1380 LinearAllocationScope() {
1381 Heap::linear_allocation_scope_depth_++;
1382 }
1383
1384 ~LinearAllocationScope() {
1385 Heap::linear_allocation_scope_depth_--;
1386 ASSERT(Heap::linear_allocation_scope_depth_ >= 0);
1387 }
1388};
1389
1390
Steve Blocka7e24c12009-10-30 11:49:00 +00001391#ifdef DEBUG
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001392// Visitor class to verify interior pointers in spaces that do not contain
1393// or care about intergenerational references. All heap object pointers have to
1394// point into the heap to a location that has a map pointer at its first word.
1395// Caveat: Heap::Contains is an approximation because it can return true for
1396// objects in a heap space but above the allocation pointer.
Steve Blocka7e24c12009-10-30 11:49:00 +00001397class VerifyPointersVisitor: public ObjectVisitor {
1398 public:
1399 void VisitPointers(Object** start, Object** end) {
1400 for (Object** current = start; current < end; current++) {
1401 if ((*current)->IsHeapObject()) {
1402 HeapObject* object = HeapObject::cast(*current);
1403 ASSERT(Heap::Contains(object));
1404 ASSERT(object->map()->IsMap());
1405 }
1406 }
1407 }
1408};
1409
1410
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001411// Visitor class to verify interior pointers in spaces that use region marks
1412// to keep track of intergenerational references.
1413// As VerifyPointersVisitor but also checks that dirty marks are set
1414// for regions covering intergenerational references.
1415class VerifyPointersAndDirtyRegionsVisitor: public ObjectVisitor {
Steve Blocka7e24c12009-10-30 11:49:00 +00001416 public:
1417 void VisitPointers(Object** start, Object** end) {
1418 for (Object** current = start; current < end; current++) {
1419 if ((*current)->IsHeapObject()) {
1420 HeapObject* object = HeapObject::cast(*current);
1421 ASSERT(Heap::Contains(object));
1422 ASSERT(object->map()->IsMap());
1423 if (Heap::InNewSpace(object)) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001424 ASSERT(Heap::InToSpace(object));
1425 Address addr = reinterpret_cast<Address>(current);
1426 ASSERT(Page::FromAddress(addr)->IsRegionDirty(addr));
Steve Blocka7e24c12009-10-30 11:49:00 +00001427 }
1428 }
1429 }
1430 }
1431};
1432#endif
1433
1434
1435// Space iterator for iterating over all spaces of the heap.
1436// Returns each space in turn, and null when it is done.
1437class AllSpaces BASE_EMBEDDED {
1438 public:
1439 Space* next();
1440 AllSpaces() { counter_ = FIRST_SPACE; }
1441 private:
1442 int counter_;
1443};
1444
1445
1446// Space iterator for iterating over all old spaces of the heap: Old pointer
1447// space, old data space and code space.
1448// Returns each space in turn, and null when it is done.
1449class OldSpaces BASE_EMBEDDED {
1450 public:
1451 OldSpace* next();
1452 OldSpaces() { counter_ = OLD_POINTER_SPACE; }
1453 private:
1454 int counter_;
1455};
1456
1457
1458// Space iterator for iterating over all the paged spaces of the heap:
Leon Clarkee46be812010-01-19 14:06:41 +00001459// Map space, old pointer space, old data space, code space and cell space.
Steve Blocka7e24c12009-10-30 11:49:00 +00001460// Returns each space in turn, and null when it is done.
1461class PagedSpaces BASE_EMBEDDED {
1462 public:
1463 PagedSpace* next();
1464 PagedSpaces() { counter_ = OLD_POINTER_SPACE; }
1465 private:
1466 int counter_;
1467};
1468
1469
1470// Space iterator for iterating over all spaces of the heap.
1471// For each space an object iterator is provided. The deallocation of the
1472// returned object iterators is handled by the space iterator.
1473class SpaceIterator : public Malloced {
1474 public:
1475 SpaceIterator();
1476 virtual ~SpaceIterator();
1477
1478 bool has_next();
1479 ObjectIterator* next();
1480
1481 private:
1482 ObjectIterator* CreateIterator();
1483
1484 int current_space_; // from enum AllocationSpace.
1485 ObjectIterator* iterator_; // object iterator for the current space.
1486};
1487
1488
1489// A HeapIterator provides iteration over the whole heap It aggregates a the
1490// specific iterators for the different spaces as these can only iterate over
1491// one space only.
1492
1493class HeapIterator BASE_EMBEDDED {
1494 public:
1495 explicit HeapIterator();
1496 virtual ~HeapIterator();
1497
Steve Blocka7e24c12009-10-30 11:49:00 +00001498 HeapObject* next();
1499 void reset();
1500
1501 private:
1502 // Perform the initialization.
1503 void Init();
1504
1505 // Perform all necessary shutdown (destruction) work.
1506 void Shutdown();
1507
1508 // Space iterator for iterating all the spaces.
1509 SpaceIterator* space_iterator_;
1510 // Object iterator for the space currently being iterated.
1511 ObjectIterator* object_iterator_;
1512};
1513
1514
1515// Cache for mapping (map, property name) into field offset.
1516// Cleared at startup and prior to mark sweep collection.
1517class KeyedLookupCache {
1518 public:
1519 // Lookup field offset for (map, name). If absent, -1 is returned.
1520 static int Lookup(Map* map, String* name);
1521
1522 // Update an element in the cache.
1523 static void Update(Map* map, String* name, int field_offset);
1524
1525 // Clear the cache.
1526 static void Clear();
Leon Clarkee46be812010-01-19 14:06:41 +00001527
1528 static const int kLength = 64;
1529 static const int kCapacityMask = kLength - 1;
1530 static const int kMapHashShift = 2;
1531
Steve Blocka7e24c12009-10-30 11:49:00 +00001532 private:
1533 static inline int Hash(Map* map, String* name);
Leon Clarkee46be812010-01-19 14:06:41 +00001534
1535 // Get the address of the keys and field_offsets arrays. Used in
1536 // generated code to perform cache lookups.
1537 static Address keys_address() {
1538 return reinterpret_cast<Address>(&keys_);
1539 }
1540
1541 static Address field_offsets_address() {
1542 return reinterpret_cast<Address>(&field_offsets_);
1543 }
1544
Steve Blocka7e24c12009-10-30 11:49:00 +00001545 struct Key {
1546 Map* map;
1547 String* name;
1548 };
1549 static Key keys_[kLength];
1550 static int field_offsets_[kLength];
Steve Blocka7e24c12009-10-30 11:49:00 +00001551
Leon Clarkee46be812010-01-19 14:06:41 +00001552 friend class ExternalReference;
1553};
Steve Blocka7e24c12009-10-30 11:49:00 +00001554
1555
1556// Cache for mapping (array, property name) into descriptor index.
1557// The cache contains both positive and negative results.
1558// Descriptor index equals kNotFound means the property is absent.
1559// Cleared at startup and prior to any gc.
1560class DescriptorLookupCache {
1561 public:
1562 // Lookup descriptor index for (map, name).
1563 // If absent, kAbsent is returned.
1564 static int Lookup(DescriptorArray* array, String* name) {
1565 if (!StringShape(name).IsSymbol()) return kAbsent;
1566 int index = Hash(array, name);
1567 Key& key = keys_[index];
1568 if ((key.array == array) && (key.name == name)) return results_[index];
1569 return kAbsent;
1570 }
1571
1572 // Update an element in the cache.
1573 static void Update(DescriptorArray* array, String* name, int result) {
1574 ASSERT(result != kAbsent);
1575 if (StringShape(name).IsSymbol()) {
1576 int index = Hash(array, name);
1577 Key& key = keys_[index];
1578 key.array = array;
1579 key.name = name;
1580 results_[index] = result;
1581 }
1582 }
1583
1584 // Clear the cache.
1585 static void Clear();
1586
1587 static const int kAbsent = -2;
1588 private:
1589 static int Hash(DescriptorArray* array, String* name) {
1590 // Uses only lower 32 bits if pointers are larger.
Andrei Popescu402d9372010-02-26 13:31:12 +00001591 uint32_t array_hash =
Steve Blocka7e24c12009-10-30 11:49:00 +00001592 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(array)) >> 2;
Andrei Popescu402d9372010-02-26 13:31:12 +00001593 uint32_t name_hash =
Steve Blocka7e24c12009-10-30 11:49:00 +00001594 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name)) >> 2;
1595 return (array_hash ^ name_hash) % kLength;
1596 }
1597
1598 static const int kLength = 64;
1599 struct Key {
1600 DescriptorArray* array;
1601 String* name;
1602 };
1603
1604 static Key keys_[kLength];
1605 static int results_[kLength];
1606};
1607
1608
1609// ----------------------------------------------------------------------------
1610// Marking stack for tracing live objects.
1611
1612class MarkingStack {
1613 public:
1614 void Initialize(Address low, Address high) {
1615 top_ = low_ = reinterpret_cast<HeapObject**>(low);
1616 high_ = reinterpret_cast<HeapObject**>(high);
1617 overflowed_ = false;
1618 }
1619
1620 bool is_full() { return top_ >= high_; }
1621
1622 bool is_empty() { return top_ <= low_; }
1623
1624 bool overflowed() { return overflowed_; }
1625
1626 void clear_overflowed() { overflowed_ = false; }
1627
1628 // Push the (marked) object on the marking stack if there is room,
1629 // otherwise mark the object as overflowed and wait for a rescan of the
1630 // heap.
1631 void Push(HeapObject* object) {
1632 CHECK(object->IsHeapObject());
1633 if (is_full()) {
1634 object->SetOverflow();
1635 overflowed_ = true;
1636 } else {
1637 *(top_++) = object;
1638 }
1639 }
1640
1641 HeapObject* Pop() {
1642 ASSERT(!is_empty());
1643 HeapObject* object = *(--top_);
1644 CHECK(object->IsHeapObject());
1645 return object;
1646 }
1647
1648 private:
1649 HeapObject** low_;
1650 HeapObject** top_;
1651 HeapObject** high_;
1652 bool overflowed_;
1653};
1654
1655
1656// A helper class to document/test C++ scopes where we do not
1657// expect a GC. Usage:
1658//
1659// /* Allocation not allowed: we cannot handle a GC in this scope. */
1660// { AssertNoAllocation nogc;
1661// ...
1662// }
1663
1664#ifdef DEBUG
1665
1666class DisallowAllocationFailure {
1667 public:
1668 DisallowAllocationFailure() {
1669 old_state_ = Heap::disallow_allocation_failure_;
1670 Heap::disallow_allocation_failure_ = true;
1671 }
1672 ~DisallowAllocationFailure() {
1673 Heap::disallow_allocation_failure_ = old_state_;
1674 }
1675 private:
1676 bool old_state_;
1677};
1678
1679class AssertNoAllocation {
1680 public:
1681 AssertNoAllocation() {
1682 old_state_ = Heap::allow_allocation(false);
1683 }
1684
1685 ~AssertNoAllocation() {
1686 Heap::allow_allocation(old_state_);
1687 }
1688
1689 private:
1690 bool old_state_;
1691};
1692
1693class DisableAssertNoAllocation {
1694 public:
1695 DisableAssertNoAllocation() {
1696 old_state_ = Heap::allow_allocation(true);
1697 }
1698
1699 ~DisableAssertNoAllocation() {
1700 Heap::allow_allocation(old_state_);
1701 }
1702
1703 private:
1704 bool old_state_;
1705};
1706
1707#else // ndef DEBUG
1708
1709class AssertNoAllocation {
1710 public:
1711 AssertNoAllocation() { }
1712 ~AssertNoAllocation() { }
1713};
1714
1715class DisableAssertNoAllocation {
1716 public:
1717 DisableAssertNoAllocation() { }
1718 ~DisableAssertNoAllocation() { }
1719};
1720
1721#endif
1722
1723// GCTracer collects and prints ONE line after each garbage collector
1724// invocation IFF --trace_gc is used.
1725
1726class GCTracer BASE_EMBEDDED {
1727 public:
Leon Clarkef7060e22010-06-03 12:02:55 +01001728 class Scope BASE_EMBEDDED {
Steve Block6ded16b2010-05-10 14:33:55 +01001729 public:
Leon Clarkef7060e22010-06-03 12:02:55 +01001730 enum ScopeId {
1731 EXTERNAL,
1732 MC_MARK,
1733 MC_SWEEP,
1734 MC_COMPACT,
1735 kNumberOfScopes
1736 };
1737
1738 Scope(GCTracer* tracer, ScopeId scope)
1739 : tracer_(tracer),
1740 scope_(scope) {
Steve Block6ded16b2010-05-10 14:33:55 +01001741 start_time_ = OS::TimeCurrentMillis();
1742 }
Leon Clarkef7060e22010-06-03 12:02:55 +01001743
1744 ~Scope() {
1745 ASSERT((0 <= scope_) && (scope_ < kNumberOfScopes));
1746 tracer_->scopes_[scope_] += OS::TimeCurrentMillis() - start_time_;
Steve Block6ded16b2010-05-10 14:33:55 +01001747 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001748
Steve Block6ded16b2010-05-10 14:33:55 +01001749 private:
1750 GCTracer* tracer_;
Leon Clarkef7060e22010-06-03 12:02:55 +01001751 ScopeId scope_;
Steve Block6ded16b2010-05-10 14:33:55 +01001752 double start_time_;
1753 };
1754
1755 GCTracer();
Steve Blocka7e24c12009-10-30 11:49:00 +00001756 ~GCTracer();
1757
1758 // Sets the collector.
1759 void set_collector(GarbageCollector collector) { collector_ = collector; }
1760
1761 // Sets the GC count.
1762 void set_gc_count(int count) { gc_count_ = count; }
1763
1764 // Sets the full GC count.
1765 void set_full_gc_count(int count) { full_gc_count_ = count; }
1766
1767 // Sets the flag that this is a compacting full GC.
1768 void set_is_compacting() { is_compacting_ = true; }
Steve Block6ded16b2010-05-10 14:33:55 +01001769 bool is_compacting() const { return is_compacting_; }
Steve Blocka7e24c12009-10-30 11:49:00 +00001770
1771 // Increment and decrement the count of marked objects.
1772 void increment_marked_count() { ++marked_count_; }
1773 void decrement_marked_count() { --marked_count_; }
1774
1775 int marked_count() { return marked_count_; }
1776
Leon Clarkef7060e22010-06-03 12:02:55 +01001777 void increment_promoted_objects_size(int object_size) {
1778 promoted_objects_size_ += object_size;
1779 }
1780
1781 // Returns maximum GC pause.
1782 static int get_max_gc_pause() { return max_gc_pause_; }
1783
1784 // Returns maximum size of objects alive after GC.
1785 static int get_max_alive_after_gc() { return max_alive_after_gc_; }
1786
1787 // Returns minimal interval between two subsequent collections.
1788 static int get_min_in_mutator() { return min_in_mutator_; }
1789
Steve Blocka7e24c12009-10-30 11:49:00 +00001790 private:
1791 // Returns a string matching the collector.
1792 const char* CollectorString();
1793
1794 // Returns size of object in heap (in MB).
1795 double SizeOfHeapObjects() {
1796 return (static_cast<double>(Heap::SizeOfObjects())) / MB;
1797 }
1798
1799 double start_time_; // Timestamp set in the constructor.
Leon Clarkef7060e22010-06-03 12:02:55 +01001800 int start_size_; // Size of objects in heap set in constructor.
Steve Blocka7e24c12009-10-30 11:49:00 +00001801 GarbageCollector collector_; // Type of collector.
1802
1803 // A count (including this one, eg, the first collection is 1) of the
1804 // number of garbage collections.
1805 int gc_count_;
1806
1807 // A count (including this one) of the number of full garbage collections.
1808 int full_gc_count_;
1809
1810 // True if the current GC is a compacting full collection, false
1811 // otherwise.
1812 bool is_compacting_;
1813
1814 // True if the *previous* full GC cwas a compacting collection (will be
1815 // false if there has not been a previous full GC).
1816 bool previous_has_compacted_;
1817
1818 // On a full GC, a count of the number of marked objects. Incremented
1819 // when an object is marked and decremented when an object's mark bit is
1820 // cleared. Will be zero on a scavenge collection.
1821 int marked_count_;
1822
1823 // The count from the end of the previous full GC. Will be zero if there
1824 // was no previous full GC.
1825 int previous_marked_count_;
Leon Clarkef7060e22010-06-03 12:02:55 +01001826
1827 // Amounts of time spent in different scopes during GC.
1828 double scopes_[Scope::kNumberOfScopes];
1829
1830 // Total amount of space either wasted or contained in one of free lists
1831 // before the current GC.
1832 int in_free_list_or_wasted_before_gc_;
1833
1834 // Difference between space used in the heap at the beginning of the current
1835 // collection and the end of the previous collection.
1836 int allocated_since_last_gc_;
1837
1838 // Amount of time spent in mutator that is time elapsed between end of the
1839 // previous collection and the beginning of the current one.
1840 double spent_in_mutator_;
1841
1842 // Size of objects promoted during the current collection.
1843 int promoted_objects_size_;
1844
1845 // Maximum GC pause.
1846 static int max_gc_pause_;
1847
1848 // Maximum size of objects alive after GC.
1849 static int max_alive_after_gc_;
1850
1851 // Minimal interval between two subsequent collections.
1852 static int min_in_mutator_;
1853
1854 // Size of objects alive after last GC.
1855 static int alive_after_last_gc_;
1856
1857 static double last_gc_end_timestamp_;
Steve Blocka7e24c12009-10-30 11:49:00 +00001858};
1859
1860
1861class TranscendentalCache {
1862 public:
1863 enum Type {ACOS, ASIN, ATAN, COS, EXP, LOG, SIN, TAN, kNumberOfCaches};
1864
1865 explicit TranscendentalCache(Type t);
1866
1867 // Returns a heap number with f(input), where f is a math function specified
1868 // by the 'type' argument.
1869 static inline Object* Get(Type type, double input) {
1870 TranscendentalCache* cache = caches_[type];
1871 if (cache == NULL) {
1872 caches_[type] = cache = new TranscendentalCache(type);
1873 }
1874 return cache->Get(input);
1875 }
1876
1877 // The cache contains raw Object pointers. This method disposes of
1878 // them before a garbage collection.
1879 static void Clear();
1880
1881 private:
1882 inline Object* Get(double input) {
1883 Converter c;
1884 c.dbl = input;
1885 int hash = Hash(c);
1886 Element e = elements_[hash];
1887 if (e.in[0] == c.integers[0] &&
1888 e.in[1] == c.integers[1]) {
1889 ASSERT(e.output != NULL);
Andrei Popescu402d9372010-02-26 13:31:12 +00001890 Counters::transcendental_cache_hit.Increment();
Steve Blocka7e24c12009-10-30 11:49:00 +00001891 return e.output;
1892 }
1893 double answer = Calculate(input);
1894 Object* heap_number = Heap::AllocateHeapNumber(answer);
1895 if (!heap_number->IsFailure()) {
1896 elements_[hash].in[0] = c.integers[0];
1897 elements_[hash].in[1] = c.integers[1];
1898 elements_[hash].output = heap_number;
1899 }
Andrei Popescu402d9372010-02-26 13:31:12 +00001900 Counters::transcendental_cache_miss.Increment();
Steve Blocka7e24c12009-10-30 11:49:00 +00001901 return heap_number;
1902 }
1903
1904 inline double Calculate(double input) {
1905 switch (type_) {
1906 case ACOS:
1907 return acos(input);
1908 case ASIN:
1909 return asin(input);
1910 case ATAN:
1911 return atan(input);
1912 case COS:
1913 return cos(input);
1914 case EXP:
1915 return exp(input);
1916 case LOG:
1917 return log(input);
1918 case SIN:
1919 return sin(input);
1920 case TAN:
1921 return tan(input);
1922 default:
1923 return 0.0; // Never happens.
1924 }
1925 }
1926 static const int kCacheSize = 512;
1927 struct Element {
1928 uint32_t in[2];
1929 Object* output;
1930 };
1931 union Converter {
1932 double dbl;
1933 uint32_t integers[2];
1934 };
1935 inline static int Hash(const Converter& c) {
1936 uint32_t hash = (c.integers[0] ^ c.integers[1]);
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01001937 hash ^= static_cast<int32_t>(hash) >> 16;
1938 hash ^= static_cast<int32_t>(hash) >> 8;
Steve Blocka7e24c12009-10-30 11:49:00 +00001939 return (hash & (kCacheSize - 1));
1940 }
Andrei Popescu402d9372010-02-26 13:31:12 +00001941
1942 static Address cache_array_address() {
1943 // Used to create an external reference.
1944 return reinterpret_cast<Address>(caches_);
1945 }
1946
1947 // Allow access to the caches_ array as an ExternalReference.
1948 friend class ExternalReference;
1949 // Inline implementation of the caching.
1950 friend class TranscendentalCacheStub;
1951
Steve Blocka7e24c12009-10-30 11:49:00 +00001952 static TranscendentalCache* caches_[kNumberOfCaches];
1953 Element elements_[kCacheSize];
1954 Type type_;
1955};
1956
1957
Leon Clarkee46be812010-01-19 14:06:41 +00001958// External strings table is a place where all external strings are
1959// registered. We need to keep track of such strings to properly
1960// finalize them.
1961class ExternalStringTable : public AllStatic {
1962 public:
1963 // Registers an external string.
1964 inline static void AddString(String* string);
1965
1966 inline static void Iterate(ObjectVisitor* v);
1967
1968 // Restores internal invariant and gets rid of collected strings.
1969 // Must be called after each Iterate() that modified the strings.
1970 static void CleanUp();
1971
1972 // Destroys all allocated memory.
1973 static void TearDown();
1974
1975 private:
1976 friend class Heap;
1977
1978 inline static void Verify();
1979
1980 inline static void AddOldString(String* string);
1981
1982 // Notifies the table that only a prefix of the new list is valid.
1983 inline static void ShrinkNewStrings(int position);
1984
1985 // To speed up scavenge collections new space string are kept
1986 // separate from old space strings.
1987 static List<Object*> new_space_strings_;
1988 static List<Object*> old_space_strings_;
1989};
1990
Steve Blocka7e24c12009-10-30 11:49:00 +00001991} } // namespace v8::internal
1992
1993#endif // V8_HEAP_H_