blob: c218f80dc11a701c048eeb957427243dc537c738 [file] [log] [blame]
Steve Blocka7e24c12009-10-30 11:49:00 +00001// Copyright 2006-2009 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#ifndef V8_GLOBALS_H_
29#define V8_GLOBALS_H_
30
31namespace v8 {
32namespace internal {
33
34// Processor architecture detection. For more info on what's defined, see:
35// http://msdn.microsoft.com/en-us/library/b0084kay.aspx
36// http://www.agner.org/optimize/calling_conventions.pdf
37// or with gcc, run: "echo | gcc -E -dM -"
38#if defined(_M_X64) || defined(__x86_64__)
39#define V8_HOST_ARCH_X64 1
40#define V8_HOST_ARCH_64_BIT 1
41#define V8_HOST_CAN_READ_UNALIGNED 1
42#elif defined(_M_IX86) || defined(__i386__)
43#define V8_HOST_ARCH_IA32 1
44#define V8_HOST_ARCH_32_BIT 1
45#define V8_HOST_CAN_READ_UNALIGNED 1
46#elif defined(__ARMEL__)
47#define V8_HOST_ARCH_ARM 1
48#define V8_HOST_ARCH_32_BIT 1
Kristian Monsen25f61362010-05-21 11:50:48 +010049// Some CPU-OS combinations allow unaligned access on ARM. We assume
50// that unaligned accesses are not allowed unless the build system
51// defines the CAN_USE_UNALIGNED_ACCESSES macro to be non-zero.
52#if CAN_USE_UNALIGNED_ACCESSES
53#define V8_HOST_CAN_READ_UNALIGNED 1
54#endif
Andrei Popescu31002712010-02-23 13:46:05 +000055#elif defined(_MIPS_ARCH_MIPS32R2)
56#define V8_HOST_ARCH_MIPS 1
57#define V8_HOST_ARCH_32_BIT 1
Steve Blocka7e24c12009-10-30 11:49:00 +000058#else
Steve Block6ded16b2010-05-10 14:33:55 +010059#error Host architecture was not detected as supported by v8
Steve Blocka7e24c12009-10-30 11:49:00 +000060#endif
61
Leon Clarkef7060e22010-06-03 12:02:55 +010062// Target architecture detection. This may be set externally. If not, detect
63// in the same way as the host architecture, that is, target the native
64// environment as presented by the compiler.
65#if !defined(V8_TARGET_ARCH_X64) && !defined(V8_TARGET_ARCH_IA32) && \
66 !defined(V8_TARGET_ARCH_ARM) && !defined(V8_TARGET_ARCH_MIPS)
67#if defined(_M_X64) || defined(__x86_64__)
68#define V8_TARGET_ARCH_X64 1
69#elif defined(_M_IX86) || defined(__i386__)
70#define V8_TARGET_ARCH_IA32 1
71#elif defined(__ARMEL__)
72#define V8_TARGET_ARCH_ARM 1
73#elif defined(_MIPS_ARCH_MIPS32R2)
74#define V8_TARGET_ARCH_MIPS 1
75#else
76#error Target architecture was not detected as supported by v8
77#endif
78#endif
79
Steve Block6ded16b2010-05-10 14:33:55 +010080// Check for supported combinations of host and target architectures.
81#if defined(V8_TARGET_ARCH_IA32) && !defined(V8_HOST_ARCH_IA32)
82#error Target architecture ia32 is only supported on ia32 host
83#endif
84#if defined(V8_TARGET_ARCH_X64) && !defined(V8_HOST_ARCH_X64)
85#error Target architecture x64 is only supported on x64 host
86#endif
87#if (defined(V8_TARGET_ARCH_ARM) && \
88 !(defined(V8_HOST_ARCH_IA32) || defined(V8_HOST_ARCH_ARM)))
89#error Target architecture arm is only supported on arm and ia32 host
90#endif
91#if (defined(V8_TARGET_ARCH_MIPS) && \
92 !(defined(V8_HOST_ARCH_IA32) || defined(V8_HOST_ARCH_MIPS)))
93#error Target architecture mips is only supported on mips and ia32 host
94#endif
95
John Reck59135872010-11-02 12:39:01 -070096// Determine whether we are running in a simulated environment.
97// Setting USE_SIMULATOR explicitly from the build script will force
98// the use of a simulated environment.
99#if !defined(USE_SIMULATOR)
100#if (defined(V8_TARGET_ARCH_ARM) && !defined(V8_HOST_ARCH_ARM))
101#define USE_SIMULATOR 1
102#endif
103#if (defined(V8_TARGET_ARCH_MIPS) && !defined(V8_HOST_ARCH_MIPS))
104#define USE_SIMULATOR 1
105#endif
106#endif
107
Steve Block6ded16b2010-05-10 14:33:55 +0100108// Define unaligned read for the target architectures supporting it.
Steve Blocka7e24c12009-10-30 11:49:00 +0000109#if defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_IA32)
110#define V8_TARGET_CAN_READ_UNALIGNED 1
111#elif V8_TARGET_ARCH_ARM
Kristian Monsen25f61362010-05-21 11:50:48 +0100112// Some CPU-OS combinations allow unaligned access on ARM. We assume
113// that unaligned accesses are not allowed unless the build system
114// defines the CAN_USE_UNALIGNED_ACCESSES macro to be non-zero.
115#if CAN_USE_UNALIGNED_ACCESSES
116#define V8_TARGET_CAN_READ_UNALIGNED 1
117#endif
Andrei Popescu31002712010-02-23 13:46:05 +0000118#elif V8_TARGET_ARCH_MIPS
Steve Blocka7e24c12009-10-30 11:49:00 +0000119#else
Steve Block6ded16b2010-05-10 14:33:55 +0100120#error Target architecture is not supported by v8
Steve Blocka7e24c12009-10-30 11:49:00 +0000121#endif
122
123// Support for alternative bool type. This is only enabled if the code is
124// compiled with USE_MYBOOL defined. This catches some nasty type bugs.
125// For instance, 'bool b = "false";' results in b == true! This is a hidden
126// source of bugs.
127// However, redefining the bool type does have some negative impact on some
128// platforms. It gives rise to compiler warnings (i.e. with
129// MSVC) in the API header files when mixing code that uses the standard
130// bool with code that uses the redefined version.
131// This does not actually belong in the platform code, but needs to be
132// defined here because the platform code uses bool, and platform.h is
133// include very early in the main include file.
134
135#ifdef USE_MYBOOL
136typedef unsigned int __my_bool__;
137#define bool __my_bool__ // use 'indirection' to avoid name clashes
138#endif
139
140typedef uint8_t byte;
141typedef byte* Address;
142
143// Define our own macros for writing 64-bit constants. This is less fragile
144// than defining __STDC_CONSTANT_MACROS before including <stdint.h>, and it
145// works on compilers that don't have it (like MSVC).
146#if V8_HOST_ARCH_64_BIT
147#ifdef _MSC_VER
148#define V8_UINT64_C(x) (x ## UI64)
149#define V8_INT64_C(x) (x ## I64)
150#define V8_PTR_PREFIX "ll"
151#else // _MSC_VER
152#define V8_UINT64_C(x) (x ## UL)
153#define V8_INT64_C(x) (x ## L)
154#define V8_PTR_PREFIX "l"
155#endif // _MSC_VER
156#else // V8_HOST_ARCH_64_BIT
157#define V8_PTR_PREFIX ""
158#endif // V8_HOST_ARCH_64_BIT
159
Steve Block6ded16b2010-05-10 14:33:55 +0100160// The following macro works on both 32 and 64-bit platforms.
161// Usage: instead of writing 0x1234567890123456
162// write V8_2PART_UINT64_C(0x12345678,90123456);
163#define V8_2PART_UINT64_C(a, b) (((static_cast<uint64_t>(a) << 32) + 0x##b##u))
164
Steve Blocka7e24c12009-10-30 11:49:00 +0000165#define V8PRIxPTR V8_PTR_PREFIX "x"
166#define V8PRIdPTR V8_PTR_PREFIX "d"
167
168// Fix for Mac OS X defining uintptr_t as "unsigned long":
169#if defined(__APPLE__) && defined(__MACH__)
170#undef V8PRIxPTR
171#define V8PRIxPTR "lx"
172#endif
173
Steve Block6ded16b2010-05-10 14:33:55 +0100174#if (defined(__APPLE__) && defined(__MACH__)) || \
175 defined(__FreeBSD__) || defined(__OpenBSD__)
176#define USING_BSD_ABI
Steve Blockd0582a62009-12-15 09:54:21 +0000177#endif
178
Steve Blocka7e24c12009-10-30 11:49:00 +0000179// Code-point values in Unicode 4.0 are 21 bits wide.
180typedef uint16_t uc16;
181typedef int32_t uc32;
182
183// -----------------------------------------------------------------------------
184// Constants
185
186const int KB = 1024;
187const int MB = KB * KB;
188const int GB = KB * KB * KB;
189const int kMaxInt = 0x7FFFFFFF;
190const int kMinInt = -kMaxInt - 1;
191
192const uint32_t kMaxUInt32 = 0xFFFFFFFFu;
193
194const int kCharSize = sizeof(char); // NOLINT
195const int kShortSize = sizeof(short); // NOLINT
196const int kIntSize = sizeof(int); // NOLINT
197const int kDoubleSize = sizeof(double); // NOLINT
198const int kPointerSize = sizeof(void*); // NOLINT
199const int kIntptrSize = sizeof(intptr_t); // NOLINT
200
201#if V8_HOST_ARCH_64_BIT
202const int kPointerSizeLog2 = 3;
203const intptr_t kIntptrSignBit = V8_INT64_C(0x8000000000000000);
John Reck59135872010-11-02 12:39:01 -0700204const uintptr_t kUintptrAllBitsSet = V8_UINT64_C(0xFFFFFFFFFFFFFFFF);
Steve Blocka7e24c12009-10-30 11:49:00 +0000205#else
206const int kPointerSizeLog2 = 2;
207const intptr_t kIntptrSignBit = 0x80000000;
John Reck59135872010-11-02 12:39:01 -0700208const uintptr_t kUintptrAllBitsSet = 0xFFFFFFFFu;
Steve Blocka7e24c12009-10-30 11:49:00 +0000209#endif
210
Steve Block6ded16b2010-05-10 14:33:55 +0100211// Mask for the sign bit in a smi.
212const intptr_t kSmiSignMask = kIntptrSignBit;
213
Steve Blocka7e24c12009-10-30 11:49:00 +0000214const int kObjectAlignmentBits = kPointerSizeLog2;
215const intptr_t kObjectAlignment = 1 << kObjectAlignmentBits;
216const intptr_t kObjectAlignmentMask = kObjectAlignment - 1;
217
218// Desired alignment for pointers.
219const intptr_t kPointerAlignment = (1 << kPointerSizeLog2);
220const intptr_t kPointerAlignmentMask = kPointerAlignment - 1;
221
Leon Clarkee46be812010-01-19 14:06:41 +0000222// Desired alignment for maps.
223#if V8_HOST_ARCH_64_BIT
224const intptr_t kMapAlignmentBits = kObjectAlignmentBits;
225#else
226const intptr_t kMapAlignmentBits = kObjectAlignmentBits + 3;
227#endif
228const intptr_t kMapAlignment = (1 << kMapAlignmentBits);
229const intptr_t kMapAlignmentMask = kMapAlignment - 1;
Steve Blocka7e24c12009-10-30 11:49:00 +0000230
Kristian Monsen0d5e1162010-09-30 15:31:59 +0100231// Desired alignment for generated code is 32 bytes (to improve cache line
232// utilization).
233const int kCodeAlignmentBits = 5;
234const intptr_t kCodeAlignment = 1 << kCodeAlignmentBits;
235const intptr_t kCodeAlignmentMask = kCodeAlignment - 1;
236
Steve Blocka7e24c12009-10-30 11:49:00 +0000237// Tag information for Failure.
238const int kFailureTag = 3;
239const int kFailureTagSize = 2;
240const intptr_t kFailureTagMask = (1 << kFailureTagSize) - 1;
241
242
243const int kBitsPerByte = 8;
244const int kBitsPerByteLog2 = 3;
245const int kBitsPerPointer = kPointerSize * kBitsPerByte;
246const int kBitsPerInt = kIntSize * kBitsPerByte;
247
Steve Block6ded16b2010-05-10 14:33:55 +0100248// IEEE 754 single precision floating point number bit layout.
249const uint32_t kBinary32SignMask = 0x80000000u;
250const uint32_t kBinary32ExponentMask = 0x7f800000u;
251const uint32_t kBinary32MantissaMask = 0x007fffffu;
252const int kBinary32ExponentBias = 127;
253const int kBinary32MaxExponent = 0xFE;
254const int kBinary32MinExponent = 0x01;
255const int kBinary32MantissaBits = 23;
256const int kBinary32ExponentShift = 23;
Steve Blocka7e24c12009-10-30 11:49:00 +0000257
258// Zap-value: The value used for zapping dead objects.
259// Should be a recognizable hex value tagged as a heap object pointer.
260#ifdef V8_HOST_ARCH_64_BIT
261const Address kZapValue =
262 reinterpret_cast<Address>(V8_UINT64_C(0xdeadbeedbeadbeed));
263const Address kHandleZapValue =
264 reinterpret_cast<Address>(V8_UINT64_C(0x1baddead0baddead));
265const Address kFromSpaceZapValue =
266 reinterpret_cast<Address>(V8_UINT64_C(0x1beefdad0beefdad));
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100267const uint64_t kDebugZapValue = 0xbadbaddbbadbaddb;
Steve Blocka7e24c12009-10-30 11:49:00 +0000268#else
269const Address kZapValue = reinterpret_cast<Address>(0xdeadbeed);
270const Address kHandleZapValue = reinterpret_cast<Address>(0xbaddead);
271const Address kFromSpaceZapValue = reinterpret_cast<Address>(0xbeefdad);
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100272const uint32_t kDebugZapValue = 0xbadbaddb;
Steve Blocka7e24c12009-10-30 11:49:00 +0000273#endif
274
275
Leon Clarkee46be812010-01-19 14:06:41 +0000276// Number of bits to represent the page size for paged spaces. The value of 13
277// gives 8K bytes per page.
278const int kPageSizeBits = 13;
279
Steve Block6ded16b2010-05-10 14:33:55 +0100280// On Intel architecture, cache line size is 64 bytes.
281// On ARM it may be less (32 bytes), but as far this constant is
282// used for aligning data, it doesn't hurt to align on a greater value.
283const int kProcessorCacheLineSize = 64;
Leon Clarkee46be812010-01-19 14:06:41 +0000284
Steve Blockd0582a62009-12-15 09:54:21 +0000285// Constants relevant to double precision floating point numbers.
286
287// Quiet NaNs have bits 51 to 62 set, possibly the sign bit, and no
288// other bits set.
289const uint64_t kQuietNaNMask = static_cast<uint64_t>(0xfff) << 51;
290// If looking only at the top 32 bits, the QNaN mask is bits 19 to 30.
291const uint32_t kQuietNaNHighBitsMask = 0xfff << (51 - 32);
292
293
Steve Blocka7e24c12009-10-30 11:49:00 +0000294// -----------------------------------------------------------------------------
295// Forward declarations for frequently used classes
296// (sorted alphabetically)
297
298class AccessorInfo;
299class Allocation;
300class Arguments;
301class Assembler;
Leon Clarke4515c472010-02-03 11:58:03 +0000302class AssertNoAllocation;
Steve Blocka7e24c12009-10-30 11:49:00 +0000303class BreakableStatement;
304class Code;
305class CodeGenerator;
306class CodeStub;
307class Context;
308class Debug;
309class Debugger;
310class DebugInfo;
311class Descriptor;
312class DescriptorArray;
313class Expression;
314class ExternalReference;
315class FixedArray;
316class FunctionEntry;
317class FunctionLiteral;
318class FunctionTemplateInfo;
319class NumberDictionary;
320class StringDictionary;
321class FreeStoreAllocationPolicy;
322template <typename T> class Handle;
323class Heap;
324class HeapObject;
325class IC;
326class InterceptorInfo;
327class IterationStatement;
Steve Blocka7e24c12009-10-30 11:49:00 +0000328class JSArray;
329class JSFunction;
330class JSObject;
331class LargeObjectSpace;
332template <typename T, class P = FreeStoreAllocationPolicy> class List;
333class LookupResult;
334class MacroAssembler;
335class Map;
336class MapSpace;
337class MarkCompactCollector;
338class NewSpace;
339class NodeVisitor;
340class Object;
John Reck59135872010-11-02 12:39:01 -0700341class MaybeObject;
Steve Blocka7e24c12009-10-30 11:49:00 +0000342class OldSpace;
343class Property;
344class Proxy;
345class RegExpNode;
346struct RegExpCompileData;
347class RegExpTree;
348class RegExpCompiler;
349class RegExpVisitor;
350class Scope;
351template<class Allocator = FreeStoreAllocationPolicy> class ScopeInfo;
Ben Murdoch3bec4d22010-07-22 14:51:16 +0100352class SerializedScopeInfo;
Steve Blocka7e24c12009-10-30 11:49:00 +0000353class Script;
354class Slot;
355class Smi;
Steve Block6ded16b2010-05-10 14:33:55 +0100356template <typename Config, class Allocator = FreeStoreAllocationPolicy>
357 class SplayTree;
Steve Blocka7e24c12009-10-30 11:49:00 +0000358class Statement;
359class String;
360class Struct;
361class SwitchStatement;
362class AstVisitor;
363class Variable;
364class VariableProxy;
365class RelocInfo;
366class Deserializer;
367class MessageLocation;
368class ObjectGroup;
369class TickSample;
370class VirtualMemory;
371class Mutex;
Steve Blocka7e24c12009-10-30 11:49:00 +0000372
373typedef bool (*WeakSlotCallback)(Object** pointer);
374
375// -----------------------------------------------------------------------------
376// Miscellaneous
377
378// NOTE: SpaceIterator depends on AllocationSpace enumeration values being
379// consecutive.
380enum AllocationSpace {
381 NEW_SPACE, // Semispaces collected with copying collector.
382 OLD_POINTER_SPACE, // May contain pointers to new space.
383 OLD_DATA_SPACE, // Must not have pointers to new space.
384 CODE_SPACE, // No pointers to new space, marked executable.
385 MAP_SPACE, // Only and all map objects.
386 CELL_SPACE, // Only and all cell objects.
387 LO_SPACE, // Promoted large objects.
388
389 FIRST_SPACE = NEW_SPACE,
Steve Blockd0582a62009-12-15 09:54:21 +0000390 LAST_SPACE = LO_SPACE,
391 FIRST_PAGED_SPACE = OLD_POINTER_SPACE,
392 LAST_PAGED_SPACE = CELL_SPACE
Steve Blocka7e24c12009-10-30 11:49:00 +0000393};
394const int kSpaceTagSize = 3;
395const int kSpaceTagMask = (1 << kSpaceTagSize) - 1;
396
397
398// A flag that indicates whether objects should be pretenured when
399// allocated (allocated directly into the old generation) or not
400// (allocated in the young generation if the object size and type
401// allows).
402enum PretenureFlag { NOT_TENURED, TENURED };
403
404enum GarbageCollector { SCAVENGER, MARK_COMPACTOR };
405
406enum Executability { NOT_EXECUTABLE, EXECUTABLE };
407
Leon Clarkee46be812010-01-19 14:06:41 +0000408enum VisitMode { VISIT_ALL, VISIT_ALL_IN_SCAVENGE, VISIT_ONLY_STRONG };
Steve Blockd0582a62009-12-15 09:54:21 +0000409
Steve Block6ded16b2010-05-10 14:33:55 +0100410// Flag indicating whether code is built into the VM (one of the natives files).
Andrei Popescu31002712010-02-23 13:46:05 +0000411enum NativesFlag { NOT_NATIVES_CODE, NATIVES_CODE };
412
Steve Blocka7e24c12009-10-30 11:49:00 +0000413
414// A CodeDesc describes a buffer holding instructions and relocation
415// information. The instructions start at the beginning of the buffer
416// and grow forward, the relocation information starts at the end of
417// the buffer and grows backward.
418//
419// |<--------------- buffer_size ---------------->|
420// |<-- instr_size -->| |<-- reloc_size -->|
421// +==================+========+==================+
422// | instructions | free | reloc info |
423// +==================+========+==================+
424// ^
425// |
426// buffer
427
428struct CodeDesc {
429 byte* buffer;
430 int buffer_size;
431 int instr_size;
432 int reloc_size;
433 Assembler* origin;
434};
435
436
437// Callback function on object slots, used for iterating heap object slots in
438// HeapObjects, global pointers to heap objects, etc. The callback allows the
439// callback function to change the value of the slot.
440typedef void (*ObjectSlotCallback)(HeapObject** pointer);
441
442
443// Callback function used for iterating objects in heap spaces,
444// for example, scanning heap objects.
445typedef int (*HeapObjectCallback)(HeapObject* obj);
446
447
448// Callback function used for checking constraints when copying/relocating
449// objects. Returns true if an object can be copied/relocated from its
450// old_addr to a new_addr.
451typedef bool (*ConstraintCallback)(Address new_addr, Address old_addr);
452
453
454// Callback function on inline caches, used for iterating over inline caches
455// in compiled code.
456typedef void (*InlineCacheCallback)(Code* code, Address ic);
457
458
459// State for inline cache call sites. Aliased as IC::State.
460enum InlineCacheState {
461 // Has never been executed.
462 UNINITIALIZED,
463 // Has been executed but monomorhic state has been delayed.
464 PREMONOMORPHIC,
465 // Has been executed and only one receiver type has been seen.
466 MONOMORPHIC,
467 // Like MONOMORPHIC but check failed due to prototype.
468 MONOMORPHIC_PROTOTYPE_FAILURE,
469 // Multiple receiver types have been seen.
470 MEGAMORPHIC,
471 // Special states for debug break or step in prepare stubs.
472 DEBUG_BREAK,
473 DEBUG_PREPARE_STEP_IN
474};
475
476
477enum InLoopFlag {
478 NOT_IN_LOOP,
479 IN_LOOP
480};
481
482
Leon Clarkee46be812010-01-19 14:06:41 +0000483enum CallFunctionFlags {
484 NO_CALL_FUNCTION_FLAGS = 0,
485 RECEIVER_MIGHT_BE_VALUE = 1 << 0 // Receiver might not be a JSObject.
486};
487
488
Steve Block8defd9f2010-07-08 12:39:36 +0100489enum InlineCacheHolderFlag {
490 OWN_MAP, // For fast properties objects.
491 PROTOTYPE_MAP // For slow properties objects (except GlobalObjects).
492};
493
494
Steve Blocka7e24c12009-10-30 11:49:00 +0000495// Type of properties.
496// Order of properties is significant.
497// Must fit in the BitField PropertyDetails::TypeField.
Andrei Popescu31002712010-02-23 13:46:05 +0000498// A copy of this is in mirror-debugger.js.
Steve Blocka7e24c12009-10-30 11:49:00 +0000499enum PropertyType {
500 NORMAL = 0, // only in slow mode
501 FIELD = 1, // only in fast mode
502 CONSTANT_FUNCTION = 2, // only in fast mode
503 CALLBACKS = 3,
504 INTERCEPTOR = 4, // only in lookup results, not in descriptors.
505 MAP_TRANSITION = 5, // only in fast mode
506 CONSTANT_TRANSITION = 6, // only in fast mode
507 NULL_DESCRIPTOR = 7, // only in fast mode
508 // All properties before MAP_TRANSITION are real.
Steve Block6ded16b2010-05-10 14:33:55 +0100509 FIRST_PHANTOM_PROPERTY_TYPE = MAP_TRANSITION,
510 // There are no IC stubs for NULL_DESCRIPTORS. Therefore,
511 // NULL_DESCRIPTOR can be used as the type flag for IC stubs for
512 // nonexistent properties.
513 NONEXISTENT = NULL_DESCRIPTOR
Steve Blocka7e24c12009-10-30 11:49:00 +0000514};
515
516
517// Whether to remove map transitions and constant transitions from a
518// DescriptorArray.
519enum TransitionFlag {
520 REMOVE_TRANSITIONS,
521 KEEP_TRANSITIONS
522};
523
524
525// Union used for fast testing of specific double values.
526union DoubleRepresentation {
527 double value;
528 int64_t bits;
529 DoubleRepresentation(double x) { value = x; }
530};
531
532
Iain Merrick75681382010-08-19 15:07:18 +0100533// Union used for customized checking of the IEEE double types
534// inlined within v8 runtime, rather than going to the underlying
535// platform headers and libraries
536union IeeeDoubleLittleEndianArchType {
537 double d;
538 struct {
539 unsigned int man_low :32;
540 unsigned int man_high :20;
541 unsigned int exp :11;
542 unsigned int sign :1;
543 } bits;
544};
545
546
547union IeeeDoubleBigEndianArchType {
548 double d;
549 struct {
550 unsigned int sign :1;
551 unsigned int exp :11;
552 unsigned int man_high :20;
553 unsigned int man_low :32;
554 } bits;
555};
556
557
Steve Blocka7e24c12009-10-30 11:49:00 +0000558// AccessorCallback
559struct AccessorDescriptor {
John Reck59135872010-11-02 12:39:01 -0700560 MaybeObject* (*getter)(Object* object, void* data);
561 MaybeObject* (*setter)(JSObject* object, Object* value, void* data);
Steve Blocka7e24c12009-10-30 11:49:00 +0000562 void* data;
563};
564
565
566// Logging and profiling.
567// A StateTag represents a possible state of the VM. When compiled with
Steve Block6ded16b2010-05-10 14:33:55 +0100568// ENABLE_VMSTATE_TRACKING, the logger maintains a stack of these.
Steve Blocka7e24c12009-10-30 11:49:00 +0000569// Creating a VMState object enters a state by pushing on the stack, and
570// destroying a VMState object leaves a state by popping the current state
571// from the stack.
572
573#define STATE_TAG_LIST(V) \
574 V(JS) \
575 V(GC) \
576 V(COMPILER) \
577 V(OTHER) \
578 V(EXTERNAL)
579
580enum StateTag {
581#define DEF_STATE_TAG(name) name,
582 STATE_TAG_LIST(DEF_STATE_TAG)
583#undef DEF_STATE_TAG
584 // Pseudo-types.
585 state_tag_count
586};
587
588
589// -----------------------------------------------------------------------------
590// Macros
591
592// Testers for test.
593
594#define HAS_SMI_TAG(value) \
595 ((reinterpret_cast<intptr_t>(value) & kSmiTagMask) == kSmiTag)
596
597#define HAS_FAILURE_TAG(value) \
598 ((reinterpret_cast<intptr_t>(value) & kFailureTagMask) == kFailureTag)
599
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100600// OBJECT_POINTER_ALIGN returns the value aligned as a HeapObject pointer
601#define OBJECT_POINTER_ALIGN(value) \
Steve Blocka7e24c12009-10-30 11:49:00 +0000602 (((value) + kObjectAlignmentMask) & ~kObjectAlignmentMask)
603
604// POINTER_SIZE_ALIGN returns the value aligned as a pointer.
605#define POINTER_SIZE_ALIGN(value) \
606 (((value) + kPointerAlignmentMask) & ~kPointerAlignmentMask)
607
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100608// MAP_POINTER_ALIGN returns the value aligned as a map pointer.
609#define MAP_POINTER_ALIGN(value) \
Leon Clarkee46be812010-01-19 14:06:41 +0000610 (((value) + kMapAlignmentMask) & ~kMapAlignmentMask)
611
Kristian Monsen0d5e1162010-09-30 15:31:59 +0100612// CODE_POINTER_ALIGN returns the value aligned as a generated code segment.
613#define CODE_POINTER_ALIGN(value) \
614 (((value) + kCodeAlignmentMask) & ~kCodeAlignmentMask)
615
Steve Blocka7e24c12009-10-30 11:49:00 +0000616// The expression OFFSET_OF(type, field) computes the byte-offset
617// of the specified field relative to the containing type. This
618// corresponds to 'offsetof' (in stddef.h), except that it doesn't
619// use 0 or NULL, which causes a problem with the compiler warnings
620// we have enabled (which is also why 'offsetof' doesn't seem to work).
621// Here we simply use the non-zero value 4, which seems to work.
622#define OFFSET_OF(type, field) \
623 (reinterpret_cast<intptr_t>(&(reinterpret_cast<type*>(4)->field)) - 4)
624
625
626// The expression ARRAY_SIZE(a) is a compile-time constant of type
627// size_t which represents the number of elements of the given
628// array. You should only use ARRAY_SIZE on statically allocated
629// arrays.
630#define ARRAY_SIZE(a) \
631 ((sizeof(a) / sizeof(*(a))) / \
632 static_cast<size_t>(!(sizeof(a) % sizeof(*(a)))))
633
634
635// The USE(x) template is used to silence C++ compiler warnings
636// issued for (yet) unused variables (typically parameters).
637template <typename T>
638static inline void USE(T) { }
639
640
641// FUNCTION_ADDR(f) gets the address of a C function f.
642#define FUNCTION_ADDR(f) \
643 (reinterpret_cast<v8::internal::Address>(reinterpret_cast<intptr_t>(f)))
644
645
646// FUNCTION_CAST<F>(addr) casts an address into a function
647// of type F. Used to invoke generated code from within C.
648template <typename F>
649F FUNCTION_CAST(Address addr) {
650 return reinterpret_cast<F>(reinterpret_cast<intptr_t>(addr));
651}
652
653
654// A macro to disallow the evil copy constructor and operator= functions
655// This should be used in the private: declarations for a class
656#define DISALLOW_COPY_AND_ASSIGN(TypeName) \
657 TypeName(const TypeName&); \
658 void operator=(const TypeName&)
659
660
661// A macro to disallow all the implicit constructors, namely the
662// default constructor, copy constructor and operator= functions.
663//
664// This should be used in the private: declarations for a class
665// that wants to prevent anyone from instantiating it. This is
666// especially useful for classes containing only static methods.
667#define DISALLOW_IMPLICIT_CONSTRUCTORS(TypeName) \
668 TypeName(); \
669 DISALLOW_COPY_AND_ASSIGN(TypeName)
670
671
672// Support for tracking C++ memory allocation. Insert TRACK_MEMORY("Fisk")
673// inside a C++ class and new and delete will be overloaded so logging is
674// performed.
675// This file (globals.h) is included before log.h, so we use direct calls to
676// the Logger rather than the LOG macro.
677#ifdef DEBUG
678#define TRACK_MEMORY(name) \
679 void* operator new(size_t size) { \
680 void* result = ::operator new(size); \
681 Logger::NewEvent(name, result, size); \
682 return result; \
683 } \
684 void operator delete(void* object) { \
685 Logger::DeleteEvent(name, object); \
686 ::operator delete(object); \
687 }
688#else
689#define TRACK_MEMORY(name)
690#endif
691
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100692// Define used for helping GCC to make better inlining. Don't bother for debug
Steve Blocka7e24c12009-10-30 11:49:00 +0000693// builds. On GCC 3.4.5 using __attribute__((always_inline)) causes compilation
694// errors in debug build.
695#if defined(__GNUC__) && !defined(DEBUG)
696#if (__GNUC__ >= 4)
697#define INLINE(header) inline header __attribute__((always_inline))
Ben Murdochbb769b22010-08-11 14:56:33 +0100698#define NO_INLINE(header) header __attribute__((noinline))
Steve Blocka7e24c12009-10-30 11:49:00 +0000699#else
700#define INLINE(header) inline __attribute__((always_inline)) header
Ben Murdochbb769b22010-08-11 14:56:33 +0100701#define NO_INLINE(header) __attribute__((noinline)) header
Steve Blocka7e24c12009-10-30 11:49:00 +0000702#endif
703#else
704#define INLINE(header) inline header
Ben Murdochbb769b22010-08-11 14:56:33 +0100705#define NO_INLINE(header) header
Steve Blocka7e24c12009-10-30 11:49:00 +0000706#endif
707
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100708
709#if defined(__GNUC__) && __GNUC__ >= 4
710#define MUST_USE_RESULT __attribute__ ((warn_unused_result))
711#else
712#define MUST_USE_RESULT
713#endif
714
715
Steve Blockd0582a62009-12-15 09:54:21 +0000716// Feature flags bit positions. They are mostly based on the CPUID spec.
717// (We assign CPUID itself to one of the currently reserved bits --
718// feel free to change this if needed.)
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100719// On X86/X64, values below 32 are bits in EDX, values above 32 are bits in ECX.
720enum CpuFeature { SSE4_1 = 32 + 19, // x86
721 SSE3 = 32 + 0, // x86
Steve Blockd0582a62009-12-15 09:54:21 +0000722 SSE2 = 26, // x86
723 CMOV = 15, // x86
724 RDTSC = 4, // x86
725 CPUID = 10, // x86
726 VFP3 = 1, // ARM
Andrei Popescu31002712010-02-23 13:46:05 +0000727 ARMv7 = 2, // ARM
Steve Blockd0582a62009-12-15 09:54:21 +0000728 SAHF = 0}; // x86
729
Steve Blocka7e24c12009-10-30 11:49:00 +0000730} } // namespace v8::internal
731
732#endif // V8_GLOBALS_H_