blob: f168d6eb14ebef213928a5145017bc3196b42d1e [file] [log] [blame]
Steve Blocka7e24c12009-10-30 11:49:00 +00001// Copyright 2006-2009 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#ifndef V8_GLOBALS_H_
29#define V8_GLOBALS_H_
30
31namespace v8 {
32namespace internal {
33
34// Processor architecture detection. For more info on what's defined, see:
35// http://msdn.microsoft.com/en-us/library/b0084kay.aspx
36// http://www.agner.org/optimize/calling_conventions.pdf
37// or with gcc, run: "echo | gcc -E -dM -"
38#if defined(_M_X64) || defined(__x86_64__)
39#define V8_HOST_ARCH_X64 1
40#define V8_HOST_ARCH_64_BIT 1
41#define V8_HOST_CAN_READ_UNALIGNED 1
42#elif defined(_M_IX86) || defined(__i386__)
43#define V8_HOST_ARCH_IA32 1
44#define V8_HOST_ARCH_32_BIT 1
45#define V8_HOST_CAN_READ_UNALIGNED 1
46#elif defined(__ARMEL__)
47#define V8_HOST_ARCH_ARM 1
48#define V8_HOST_ARCH_32_BIT 1
Kristian Monsen25f61362010-05-21 11:50:48 +010049// Some CPU-OS combinations allow unaligned access on ARM. We assume
50// that unaligned accesses are not allowed unless the build system
51// defines the CAN_USE_UNALIGNED_ACCESSES macro to be non-zero.
52#if CAN_USE_UNALIGNED_ACCESSES
53#define V8_HOST_CAN_READ_UNALIGNED 1
54#endif
Andrei Popescu31002712010-02-23 13:46:05 +000055#elif defined(_MIPS_ARCH_MIPS32R2)
56#define V8_HOST_ARCH_MIPS 1
57#define V8_HOST_ARCH_32_BIT 1
Steve Blocka7e24c12009-10-30 11:49:00 +000058#else
Steve Block6ded16b2010-05-10 14:33:55 +010059#error Host architecture was not detected as supported by v8
Steve Blocka7e24c12009-10-30 11:49:00 +000060#endif
61
Leon Clarkef7060e22010-06-03 12:02:55 +010062// Target architecture detection. This may be set externally. If not, detect
63// in the same way as the host architecture, that is, target the native
64// environment as presented by the compiler.
65#if !defined(V8_TARGET_ARCH_X64) && !defined(V8_TARGET_ARCH_IA32) && \
66 !defined(V8_TARGET_ARCH_ARM) && !defined(V8_TARGET_ARCH_MIPS)
67#if defined(_M_X64) || defined(__x86_64__)
68#define V8_TARGET_ARCH_X64 1
69#elif defined(_M_IX86) || defined(__i386__)
70#define V8_TARGET_ARCH_IA32 1
71#elif defined(__ARMEL__)
72#define V8_TARGET_ARCH_ARM 1
73#elif defined(_MIPS_ARCH_MIPS32R2)
74#define V8_TARGET_ARCH_MIPS 1
75#else
76#error Target architecture was not detected as supported by v8
77#endif
78#endif
79
Steve Block6ded16b2010-05-10 14:33:55 +010080// Check for supported combinations of host and target architectures.
81#if defined(V8_TARGET_ARCH_IA32) && !defined(V8_HOST_ARCH_IA32)
82#error Target architecture ia32 is only supported on ia32 host
83#endif
84#if defined(V8_TARGET_ARCH_X64) && !defined(V8_HOST_ARCH_X64)
85#error Target architecture x64 is only supported on x64 host
86#endif
87#if (defined(V8_TARGET_ARCH_ARM) && \
88 !(defined(V8_HOST_ARCH_IA32) || defined(V8_HOST_ARCH_ARM)))
89#error Target architecture arm is only supported on arm and ia32 host
90#endif
91#if (defined(V8_TARGET_ARCH_MIPS) && \
92 !(defined(V8_HOST_ARCH_IA32) || defined(V8_HOST_ARCH_MIPS)))
93#error Target architecture mips is only supported on mips and ia32 host
94#endif
95
96// Define unaligned read for the target architectures supporting it.
Steve Blocka7e24c12009-10-30 11:49:00 +000097#if defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_IA32)
98#define V8_TARGET_CAN_READ_UNALIGNED 1
99#elif V8_TARGET_ARCH_ARM
Kristian Monsen25f61362010-05-21 11:50:48 +0100100// Some CPU-OS combinations allow unaligned access on ARM. We assume
101// that unaligned accesses are not allowed unless the build system
102// defines the CAN_USE_UNALIGNED_ACCESSES macro to be non-zero.
103#if CAN_USE_UNALIGNED_ACCESSES
104#define V8_TARGET_CAN_READ_UNALIGNED 1
105#endif
Andrei Popescu31002712010-02-23 13:46:05 +0000106#elif V8_TARGET_ARCH_MIPS
Steve Blocka7e24c12009-10-30 11:49:00 +0000107#else
Steve Block6ded16b2010-05-10 14:33:55 +0100108#error Target architecture is not supported by v8
Steve Blocka7e24c12009-10-30 11:49:00 +0000109#endif
110
111// Support for alternative bool type. This is only enabled if the code is
112// compiled with USE_MYBOOL defined. This catches some nasty type bugs.
113// For instance, 'bool b = "false";' results in b == true! This is a hidden
114// source of bugs.
115// However, redefining the bool type does have some negative impact on some
116// platforms. It gives rise to compiler warnings (i.e. with
117// MSVC) in the API header files when mixing code that uses the standard
118// bool with code that uses the redefined version.
119// This does not actually belong in the platform code, but needs to be
120// defined here because the platform code uses bool, and platform.h is
121// include very early in the main include file.
122
123#ifdef USE_MYBOOL
124typedef unsigned int __my_bool__;
125#define bool __my_bool__ // use 'indirection' to avoid name clashes
126#endif
127
128typedef uint8_t byte;
129typedef byte* Address;
130
131// Define our own macros for writing 64-bit constants. This is less fragile
132// than defining __STDC_CONSTANT_MACROS before including <stdint.h>, and it
133// works on compilers that don't have it (like MSVC).
134#if V8_HOST_ARCH_64_BIT
135#ifdef _MSC_VER
136#define V8_UINT64_C(x) (x ## UI64)
137#define V8_INT64_C(x) (x ## I64)
138#define V8_PTR_PREFIX "ll"
139#else // _MSC_VER
140#define V8_UINT64_C(x) (x ## UL)
141#define V8_INT64_C(x) (x ## L)
142#define V8_PTR_PREFIX "l"
143#endif // _MSC_VER
144#else // V8_HOST_ARCH_64_BIT
145#define V8_PTR_PREFIX ""
146#endif // V8_HOST_ARCH_64_BIT
147
Steve Block6ded16b2010-05-10 14:33:55 +0100148// The following macro works on both 32 and 64-bit platforms.
149// Usage: instead of writing 0x1234567890123456
150// write V8_2PART_UINT64_C(0x12345678,90123456);
151#define V8_2PART_UINT64_C(a, b) (((static_cast<uint64_t>(a) << 32) + 0x##b##u))
152
Steve Blocka7e24c12009-10-30 11:49:00 +0000153#define V8PRIxPTR V8_PTR_PREFIX "x"
154#define V8PRIdPTR V8_PTR_PREFIX "d"
155
156// Fix for Mac OS X defining uintptr_t as "unsigned long":
157#if defined(__APPLE__) && defined(__MACH__)
158#undef V8PRIxPTR
159#define V8PRIxPTR "lx"
160#endif
161
Steve Block6ded16b2010-05-10 14:33:55 +0100162#if (defined(__APPLE__) && defined(__MACH__)) || \
163 defined(__FreeBSD__) || defined(__OpenBSD__)
164#define USING_BSD_ABI
Steve Blockd0582a62009-12-15 09:54:21 +0000165#endif
166
Steve Blocka7e24c12009-10-30 11:49:00 +0000167// Code-point values in Unicode 4.0 are 21 bits wide.
168typedef uint16_t uc16;
169typedef int32_t uc32;
170
171// -----------------------------------------------------------------------------
172// Constants
173
174const int KB = 1024;
175const int MB = KB * KB;
176const int GB = KB * KB * KB;
177const int kMaxInt = 0x7FFFFFFF;
178const int kMinInt = -kMaxInt - 1;
179
180const uint32_t kMaxUInt32 = 0xFFFFFFFFu;
181
182const int kCharSize = sizeof(char); // NOLINT
183const int kShortSize = sizeof(short); // NOLINT
184const int kIntSize = sizeof(int); // NOLINT
185const int kDoubleSize = sizeof(double); // NOLINT
186const int kPointerSize = sizeof(void*); // NOLINT
187const int kIntptrSize = sizeof(intptr_t); // NOLINT
188
189#if V8_HOST_ARCH_64_BIT
190const int kPointerSizeLog2 = 3;
191const intptr_t kIntptrSignBit = V8_INT64_C(0x8000000000000000);
192#else
193const int kPointerSizeLog2 = 2;
194const intptr_t kIntptrSignBit = 0x80000000;
195#endif
196
Steve Block6ded16b2010-05-10 14:33:55 +0100197// Mask for the sign bit in a smi.
198const intptr_t kSmiSignMask = kIntptrSignBit;
199
Steve Blocka7e24c12009-10-30 11:49:00 +0000200const int kObjectAlignmentBits = kPointerSizeLog2;
201const intptr_t kObjectAlignment = 1 << kObjectAlignmentBits;
202const intptr_t kObjectAlignmentMask = kObjectAlignment - 1;
203
204// Desired alignment for pointers.
205const intptr_t kPointerAlignment = (1 << kPointerSizeLog2);
206const intptr_t kPointerAlignmentMask = kPointerAlignment - 1;
207
Leon Clarkee46be812010-01-19 14:06:41 +0000208// Desired alignment for maps.
209#if V8_HOST_ARCH_64_BIT
210const intptr_t kMapAlignmentBits = kObjectAlignmentBits;
211#else
212const intptr_t kMapAlignmentBits = kObjectAlignmentBits + 3;
213#endif
214const intptr_t kMapAlignment = (1 << kMapAlignmentBits);
215const intptr_t kMapAlignmentMask = kMapAlignment - 1;
Steve Blocka7e24c12009-10-30 11:49:00 +0000216
217// Tag information for Failure.
218const int kFailureTag = 3;
219const int kFailureTagSize = 2;
220const intptr_t kFailureTagMask = (1 << kFailureTagSize) - 1;
221
222
223const int kBitsPerByte = 8;
224const int kBitsPerByteLog2 = 3;
225const int kBitsPerPointer = kPointerSize * kBitsPerByte;
226const int kBitsPerInt = kIntSize * kBitsPerByte;
227
Steve Block6ded16b2010-05-10 14:33:55 +0100228// IEEE 754 single precision floating point number bit layout.
229const uint32_t kBinary32SignMask = 0x80000000u;
230const uint32_t kBinary32ExponentMask = 0x7f800000u;
231const uint32_t kBinary32MantissaMask = 0x007fffffu;
232const int kBinary32ExponentBias = 127;
233const int kBinary32MaxExponent = 0xFE;
234const int kBinary32MinExponent = 0x01;
235const int kBinary32MantissaBits = 23;
236const int kBinary32ExponentShift = 23;
Steve Blocka7e24c12009-10-30 11:49:00 +0000237
238// Zap-value: The value used for zapping dead objects.
239// Should be a recognizable hex value tagged as a heap object pointer.
240#ifdef V8_HOST_ARCH_64_BIT
241const Address kZapValue =
242 reinterpret_cast<Address>(V8_UINT64_C(0xdeadbeedbeadbeed));
243const Address kHandleZapValue =
244 reinterpret_cast<Address>(V8_UINT64_C(0x1baddead0baddead));
245const Address kFromSpaceZapValue =
246 reinterpret_cast<Address>(V8_UINT64_C(0x1beefdad0beefdad));
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100247const uint64_t kDebugZapValue = 0xbadbaddbbadbaddb;
Steve Blocka7e24c12009-10-30 11:49:00 +0000248#else
249const Address kZapValue = reinterpret_cast<Address>(0xdeadbeed);
250const Address kHandleZapValue = reinterpret_cast<Address>(0xbaddead);
251const Address kFromSpaceZapValue = reinterpret_cast<Address>(0xbeefdad);
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100252const uint32_t kDebugZapValue = 0xbadbaddb;
Steve Blocka7e24c12009-10-30 11:49:00 +0000253#endif
254
255
Leon Clarkee46be812010-01-19 14:06:41 +0000256// Number of bits to represent the page size for paged spaces. The value of 13
257// gives 8K bytes per page.
258const int kPageSizeBits = 13;
259
Steve Block6ded16b2010-05-10 14:33:55 +0100260// On Intel architecture, cache line size is 64 bytes.
261// On ARM it may be less (32 bytes), but as far this constant is
262// used for aligning data, it doesn't hurt to align on a greater value.
263const int kProcessorCacheLineSize = 64;
Leon Clarkee46be812010-01-19 14:06:41 +0000264
Steve Blockd0582a62009-12-15 09:54:21 +0000265// Constants relevant to double precision floating point numbers.
266
267// Quiet NaNs have bits 51 to 62 set, possibly the sign bit, and no
268// other bits set.
269const uint64_t kQuietNaNMask = static_cast<uint64_t>(0xfff) << 51;
270// If looking only at the top 32 bits, the QNaN mask is bits 19 to 30.
271const uint32_t kQuietNaNHighBitsMask = 0xfff << (51 - 32);
272
273
Steve Blocka7e24c12009-10-30 11:49:00 +0000274// -----------------------------------------------------------------------------
275// Forward declarations for frequently used classes
276// (sorted alphabetically)
277
278class AccessorInfo;
279class Allocation;
280class Arguments;
281class Assembler;
Leon Clarke4515c472010-02-03 11:58:03 +0000282class AssertNoAllocation;
Steve Blocka7e24c12009-10-30 11:49:00 +0000283class BreakableStatement;
284class Code;
285class CodeGenerator;
286class CodeStub;
287class Context;
288class Debug;
289class Debugger;
290class DebugInfo;
291class Descriptor;
292class DescriptorArray;
293class Expression;
294class ExternalReference;
295class FixedArray;
296class FunctionEntry;
297class FunctionLiteral;
298class FunctionTemplateInfo;
299class NumberDictionary;
300class StringDictionary;
301class FreeStoreAllocationPolicy;
302template <typename T> class Handle;
303class Heap;
304class HeapObject;
305class IC;
306class InterceptorInfo;
307class IterationStatement;
Steve Blocka7e24c12009-10-30 11:49:00 +0000308class JSArray;
309class JSFunction;
310class JSObject;
311class LargeObjectSpace;
312template <typename T, class P = FreeStoreAllocationPolicy> class List;
313class LookupResult;
314class MacroAssembler;
315class Map;
316class MapSpace;
317class MarkCompactCollector;
318class NewSpace;
319class NodeVisitor;
320class Object;
321class OldSpace;
322class Property;
323class Proxy;
324class RegExpNode;
325struct RegExpCompileData;
326class RegExpTree;
327class RegExpCompiler;
328class RegExpVisitor;
329class Scope;
330template<class Allocator = FreeStoreAllocationPolicy> class ScopeInfo;
Ben Murdoch3bec4d22010-07-22 14:51:16 +0100331class SerializedScopeInfo;
Steve Blocka7e24c12009-10-30 11:49:00 +0000332class Script;
333class Slot;
334class Smi;
Steve Block6ded16b2010-05-10 14:33:55 +0100335template <typename Config, class Allocator = FreeStoreAllocationPolicy>
336 class SplayTree;
Steve Blocka7e24c12009-10-30 11:49:00 +0000337class Statement;
338class String;
339class Struct;
340class SwitchStatement;
341class AstVisitor;
342class Variable;
343class VariableProxy;
344class RelocInfo;
345class Deserializer;
346class MessageLocation;
347class ObjectGroup;
348class TickSample;
349class VirtualMemory;
350class Mutex;
Steve Blocka7e24c12009-10-30 11:49:00 +0000351
352typedef bool (*WeakSlotCallback)(Object** pointer);
353
354// -----------------------------------------------------------------------------
355// Miscellaneous
356
357// NOTE: SpaceIterator depends on AllocationSpace enumeration values being
358// consecutive.
359enum AllocationSpace {
360 NEW_SPACE, // Semispaces collected with copying collector.
361 OLD_POINTER_SPACE, // May contain pointers to new space.
362 OLD_DATA_SPACE, // Must not have pointers to new space.
363 CODE_SPACE, // No pointers to new space, marked executable.
364 MAP_SPACE, // Only and all map objects.
365 CELL_SPACE, // Only and all cell objects.
366 LO_SPACE, // Promoted large objects.
367
368 FIRST_SPACE = NEW_SPACE,
Steve Blockd0582a62009-12-15 09:54:21 +0000369 LAST_SPACE = LO_SPACE,
370 FIRST_PAGED_SPACE = OLD_POINTER_SPACE,
371 LAST_PAGED_SPACE = CELL_SPACE
Steve Blocka7e24c12009-10-30 11:49:00 +0000372};
373const int kSpaceTagSize = 3;
374const int kSpaceTagMask = (1 << kSpaceTagSize) - 1;
375
376
377// A flag that indicates whether objects should be pretenured when
378// allocated (allocated directly into the old generation) or not
379// (allocated in the young generation if the object size and type
380// allows).
381enum PretenureFlag { NOT_TENURED, TENURED };
382
383enum GarbageCollector { SCAVENGER, MARK_COMPACTOR };
384
385enum Executability { NOT_EXECUTABLE, EXECUTABLE };
386
Leon Clarkee46be812010-01-19 14:06:41 +0000387enum VisitMode { VISIT_ALL, VISIT_ALL_IN_SCAVENGE, VISIT_ONLY_STRONG };
Steve Blockd0582a62009-12-15 09:54:21 +0000388
Steve Block6ded16b2010-05-10 14:33:55 +0100389// Flag indicating whether code is built into the VM (one of the natives files).
Andrei Popescu31002712010-02-23 13:46:05 +0000390enum NativesFlag { NOT_NATIVES_CODE, NATIVES_CODE };
391
Steve Blocka7e24c12009-10-30 11:49:00 +0000392
393// A CodeDesc describes a buffer holding instructions and relocation
394// information. The instructions start at the beginning of the buffer
395// and grow forward, the relocation information starts at the end of
396// the buffer and grows backward.
397//
398// |<--------------- buffer_size ---------------->|
399// |<-- instr_size -->| |<-- reloc_size -->|
400// +==================+========+==================+
401// | instructions | free | reloc info |
402// +==================+========+==================+
403// ^
404// |
405// buffer
406
407struct CodeDesc {
408 byte* buffer;
409 int buffer_size;
410 int instr_size;
411 int reloc_size;
412 Assembler* origin;
413};
414
415
416// Callback function on object slots, used for iterating heap object slots in
417// HeapObjects, global pointers to heap objects, etc. The callback allows the
418// callback function to change the value of the slot.
419typedef void (*ObjectSlotCallback)(HeapObject** pointer);
420
421
422// Callback function used for iterating objects in heap spaces,
423// for example, scanning heap objects.
424typedef int (*HeapObjectCallback)(HeapObject* obj);
425
426
427// Callback function used for checking constraints when copying/relocating
428// objects. Returns true if an object can be copied/relocated from its
429// old_addr to a new_addr.
430typedef bool (*ConstraintCallback)(Address new_addr, Address old_addr);
431
432
433// Callback function on inline caches, used for iterating over inline caches
434// in compiled code.
435typedef void (*InlineCacheCallback)(Code* code, Address ic);
436
437
438// State for inline cache call sites. Aliased as IC::State.
439enum InlineCacheState {
440 // Has never been executed.
441 UNINITIALIZED,
442 // Has been executed but monomorhic state has been delayed.
443 PREMONOMORPHIC,
444 // Has been executed and only one receiver type has been seen.
445 MONOMORPHIC,
446 // Like MONOMORPHIC but check failed due to prototype.
447 MONOMORPHIC_PROTOTYPE_FAILURE,
448 // Multiple receiver types have been seen.
449 MEGAMORPHIC,
450 // Special states for debug break or step in prepare stubs.
451 DEBUG_BREAK,
452 DEBUG_PREPARE_STEP_IN
453};
454
455
456enum InLoopFlag {
457 NOT_IN_LOOP,
458 IN_LOOP
459};
460
461
Leon Clarkee46be812010-01-19 14:06:41 +0000462enum CallFunctionFlags {
463 NO_CALL_FUNCTION_FLAGS = 0,
464 RECEIVER_MIGHT_BE_VALUE = 1 << 0 // Receiver might not be a JSObject.
465};
466
467
Steve Block8defd9f2010-07-08 12:39:36 +0100468enum InlineCacheHolderFlag {
469 OWN_MAP, // For fast properties objects.
470 PROTOTYPE_MAP // For slow properties objects (except GlobalObjects).
471};
472
473
Steve Blocka7e24c12009-10-30 11:49:00 +0000474// Type of properties.
475// Order of properties is significant.
476// Must fit in the BitField PropertyDetails::TypeField.
Andrei Popescu31002712010-02-23 13:46:05 +0000477// A copy of this is in mirror-debugger.js.
Steve Blocka7e24c12009-10-30 11:49:00 +0000478enum PropertyType {
479 NORMAL = 0, // only in slow mode
480 FIELD = 1, // only in fast mode
481 CONSTANT_FUNCTION = 2, // only in fast mode
482 CALLBACKS = 3,
483 INTERCEPTOR = 4, // only in lookup results, not in descriptors.
484 MAP_TRANSITION = 5, // only in fast mode
485 CONSTANT_TRANSITION = 6, // only in fast mode
486 NULL_DESCRIPTOR = 7, // only in fast mode
487 // All properties before MAP_TRANSITION are real.
Steve Block6ded16b2010-05-10 14:33:55 +0100488 FIRST_PHANTOM_PROPERTY_TYPE = MAP_TRANSITION,
489 // There are no IC stubs for NULL_DESCRIPTORS. Therefore,
490 // NULL_DESCRIPTOR can be used as the type flag for IC stubs for
491 // nonexistent properties.
492 NONEXISTENT = NULL_DESCRIPTOR
Steve Blocka7e24c12009-10-30 11:49:00 +0000493};
494
495
496// Whether to remove map transitions and constant transitions from a
497// DescriptorArray.
498enum TransitionFlag {
499 REMOVE_TRANSITIONS,
500 KEEP_TRANSITIONS
501};
502
503
504// Union used for fast testing of specific double values.
505union DoubleRepresentation {
506 double value;
507 int64_t bits;
508 DoubleRepresentation(double x) { value = x; }
509};
510
511
Iain Merrick75681382010-08-19 15:07:18 +0100512// Union used for customized checking of the IEEE double types
513// inlined within v8 runtime, rather than going to the underlying
514// platform headers and libraries
515union IeeeDoubleLittleEndianArchType {
516 double d;
517 struct {
518 unsigned int man_low :32;
519 unsigned int man_high :20;
520 unsigned int exp :11;
521 unsigned int sign :1;
522 } bits;
523};
524
525
526union IeeeDoubleBigEndianArchType {
527 double d;
528 struct {
529 unsigned int sign :1;
530 unsigned int exp :11;
531 unsigned int man_high :20;
532 unsigned int man_low :32;
533 } bits;
534};
535
536
Steve Blocka7e24c12009-10-30 11:49:00 +0000537// AccessorCallback
538struct AccessorDescriptor {
539 Object* (*getter)(Object* object, void* data);
540 Object* (*setter)(JSObject* object, Object* value, void* data);
541 void* data;
542};
543
544
545// Logging and profiling.
546// A StateTag represents a possible state of the VM. When compiled with
Steve Block6ded16b2010-05-10 14:33:55 +0100547// ENABLE_VMSTATE_TRACKING, the logger maintains a stack of these.
Steve Blocka7e24c12009-10-30 11:49:00 +0000548// Creating a VMState object enters a state by pushing on the stack, and
549// destroying a VMState object leaves a state by popping the current state
550// from the stack.
551
552#define STATE_TAG_LIST(V) \
553 V(JS) \
554 V(GC) \
555 V(COMPILER) \
556 V(OTHER) \
557 V(EXTERNAL)
558
559enum StateTag {
560#define DEF_STATE_TAG(name) name,
561 STATE_TAG_LIST(DEF_STATE_TAG)
562#undef DEF_STATE_TAG
563 // Pseudo-types.
564 state_tag_count
565};
566
567
568// -----------------------------------------------------------------------------
569// Macros
570
571// Testers for test.
572
573#define HAS_SMI_TAG(value) \
574 ((reinterpret_cast<intptr_t>(value) & kSmiTagMask) == kSmiTag)
575
576#define HAS_FAILURE_TAG(value) \
577 ((reinterpret_cast<intptr_t>(value) & kFailureTagMask) == kFailureTag)
578
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100579// OBJECT_POINTER_ALIGN returns the value aligned as a HeapObject pointer
580#define OBJECT_POINTER_ALIGN(value) \
Steve Blocka7e24c12009-10-30 11:49:00 +0000581 (((value) + kObjectAlignmentMask) & ~kObjectAlignmentMask)
582
583// POINTER_SIZE_ALIGN returns the value aligned as a pointer.
584#define POINTER_SIZE_ALIGN(value) \
585 (((value) + kPointerAlignmentMask) & ~kPointerAlignmentMask)
586
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100587// MAP_POINTER_ALIGN returns the value aligned as a map pointer.
588#define MAP_POINTER_ALIGN(value) \
Leon Clarkee46be812010-01-19 14:06:41 +0000589 (((value) + kMapAlignmentMask) & ~kMapAlignmentMask)
590
Steve Blocka7e24c12009-10-30 11:49:00 +0000591// The expression OFFSET_OF(type, field) computes the byte-offset
592// of the specified field relative to the containing type. This
593// corresponds to 'offsetof' (in stddef.h), except that it doesn't
594// use 0 or NULL, which causes a problem with the compiler warnings
595// we have enabled (which is also why 'offsetof' doesn't seem to work).
596// Here we simply use the non-zero value 4, which seems to work.
597#define OFFSET_OF(type, field) \
598 (reinterpret_cast<intptr_t>(&(reinterpret_cast<type*>(4)->field)) - 4)
599
600
601// The expression ARRAY_SIZE(a) is a compile-time constant of type
602// size_t which represents the number of elements of the given
603// array. You should only use ARRAY_SIZE on statically allocated
604// arrays.
605#define ARRAY_SIZE(a) \
606 ((sizeof(a) / sizeof(*(a))) / \
607 static_cast<size_t>(!(sizeof(a) % sizeof(*(a)))))
608
609
610// The USE(x) template is used to silence C++ compiler warnings
611// issued for (yet) unused variables (typically parameters).
612template <typename T>
613static inline void USE(T) { }
614
615
616// FUNCTION_ADDR(f) gets the address of a C function f.
617#define FUNCTION_ADDR(f) \
618 (reinterpret_cast<v8::internal::Address>(reinterpret_cast<intptr_t>(f)))
619
620
621// FUNCTION_CAST<F>(addr) casts an address into a function
622// of type F. Used to invoke generated code from within C.
623template <typename F>
624F FUNCTION_CAST(Address addr) {
625 return reinterpret_cast<F>(reinterpret_cast<intptr_t>(addr));
626}
627
628
629// A macro to disallow the evil copy constructor and operator= functions
630// This should be used in the private: declarations for a class
631#define DISALLOW_COPY_AND_ASSIGN(TypeName) \
632 TypeName(const TypeName&); \
633 void operator=(const TypeName&)
634
635
636// A macro to disallow all the implicit constructors, namely the
637// default constructor, copy constructor and operator= functions.
638//
639// This should be used in the private: declarations for a class
640// that wants to prevent anyone from instantiating it. This is
641// especially useful for classes containing only static methods.
642#define DISALLOW_IMPLICIT_CONSTRUCTORS(TypeName) \
643 TypeName(); \
644 DISALLOW_COPY_AND_ASSIGN(TypeName)
645
646
647// Support for tracking C++ memory allocation. Insert TRACK_MEMORY("Fisk")
648// inside a C++ class and new and delete will be overloaded so logging is
649// performed.
650// This file (globals.h) is included before log.h, so we use direct calls to
651// the Logger rather than the LOG macro.
652#ifdef DEBUG
653#define TRACK_MEMORY(name) \
654 void* operator new(size_t size) { \
655 void* result = ::operator new(size); \
656 Logger::NewEvent(name, result, size); \
657 return result; \
658 } \
659 void operator delete(void* object) { \
660 Logger::DeleteEvent(name, object); \
661 ::operator delete(object); \
662 }
663#else
664#define TRACK_MEMORY(name)
665#endif
666
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100667// Define used for helping GCC to make better inlining. Don't bother for debug
Steve Blocka7e24c12009-10-30 11:49:00 +0000668// builds. On GCC 3.4.5 using __attribute__((always_inline)) causes compilation
669// errors in debug build.
670#if defined(__GNUC__) && !defined(DEBUG)
671#if (__GNUC__ >= 4)
672#define INLINE(header) inline header __attribute__((always_inline))
Ben Murdochbb769b22010-08-11 14:56:33 +0100673#define NO_INLINE(header) header __attribute__((noinline))
Steve Blocka7e24c12009-10-30 11:49:00 +0000674#else
675#define INLINE(header) inline __attribute__((always_inline)) header
Ben Murdochbb769b22010-08-11 14:56:33 +0100676#define NO_INLINE(header) __attribute__((noinline)) header
Steve Blocka7e24c12009-10-30 11:49:00 +0000677#endif
678#else
679#define INLINE(header) inline header
Ben Murdochbb769b22010-08-11 14:56:33 +0100680#define NO_INLINE(header) header
Steve Blocka7e24c12009-10-30 11:49:00 +0000681#endif
682
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100683
684#if defined(__GNUC__) && __GNUC__ >= 4
685#define MUST_USE_RESULT __attribute__ ((warn_unused_result))
686#else
687#define MUST_USE_RESULT
688#endif
689
690
Steve Blockd0582a62009-12-15 09:54:21 +0000691// Feature flags bit positions. They are mostly based on the CPUID spec.
692// (We assign CPUID itself to one of the currently reserved bits --
693// feel free to change this if needed.)
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100694// On X86/X64, values below 32 are bits in EDX, values above 32 are bits in ECX.
695enum CpuFeature { SSE4_1 = 32 + 19, // x86
696 SSE3 = 32 + 0, // x86
Steve Blockd0582a62009-12-15 09:54:21 +0000697 SSE2 = 26, // x86
698 CMOV = 15, // x86
699 RDTSC = 4, // x86
700 CPUID = 10, // x86
701 VFP3 = 1, // ARM
Andrei Popescu31002712010-02-23 13:46:05 +0000702 ARMv7 = 2, // ARM
Steve Blockd0582a62009-12-15 09:54:21 +0000703 SAHF = 0}; // x86
704
Steve Blocka7e24c12009-10-30 11:49:00 +0000705} } // namespace v8::internal
706
707#endif // V8_GLOBALS_H_