blob: 981ea16d722b7fe724e9e373cc16073cf21084d8 [file] [log] [blame]
Steve Blocka7e24c12009-10-30 11:49:00 +00001// Copyright 2006-2009 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#ifndef V8_GLOBALS_H_
29#define V8_GLOBALS_H_
30
31namespace v8 {
32namespace internal {
33
34// Processor architecture detection. For more info on what's defined, see:
35// http://msdn.microsoft.com/en-us/library/b0084kay.aspx
36// http://www.agner.org/optimize/calling_conventions.pdf
37// or with gcc, run: "echo | gcc -E -dM -"
38#if defined(_M_X64) || defined(__x86_64__)
39#define V8_HOST_ARCH_X64 1
40#define V8_HOST_ARCH_64_BIT 1
41#define V8_HOST_CAN_READ_UNALIGNED 1
42#elif defined(_M_IX86) || defined(__i386__)
43#define V8_HOST_ARCH_IA32 1
44#define V8_HOST_ARCH_32_BIT 1
45#define V8_HOST_CAN_READ_UNALIGNED 1
46#elif defined(__ARMEL__)
47#define V8_HOST_ARCH_ARM 1
48#define V8_HOST_ARCH_32_BIT 1
Kristian Monsen25f61362010-05-21 11:50:48 +010049// Some CPU-OS combinations allow unaligned access on ARM. We assume
50// that unaligned accesses are not allowed unless the build system
51// defines the CAN_USE_UNALIGNED_ACCESSES macro to be non-zero.
52#if CAN_USE_UNALIGNED_ACCESSES
53#define V8_HOST_CAN_READ_UNALIGNED 1
54#endif
Andrei Popescu31002712010-02-23 13:46:05 +000055#elif defined(_MIPS_ARCH_MIPS32R2)
56#define V8_HOST_ARCH_MIPS 1
57#define V8_HOST_ARCH_32_BIT 1
Steve Blocka7e24c12009-10-30 11:49:00 +000058#else
Steve Block6ded16b2010-05-10 14:33:55 +010059#error Host architecture was not detected as supported by v8
Steve Blocka7e24c12009-10-30 11:49:00 +000060#endif
61
Steve Block6ded16b2010-05-10 14:33:55 +010062// Check for supported combinations of host and target architectures.
63#if defined(V8_TARGET_ARCH_IA32) && !defined(V8_HOST_ARCH_IA32)
64#error Target architecture ia32 is only supported on ia32 host
65#endif
66#if defined(V8_TARGET_ARCH_X64) && !defined(V8_HOST_ARCH_X64)
67#error Target architecture x64 is only supported on x64 host
68#endif
69#if (defined(V8_TARGET_ARCH_ARM) && \
70 !(defined(V8_HOST_ARCH_IA32) || defined(V8_HOST_ARCH_ARM)))
71#error Target architecture arm is only supported on arm and ia32 host
72#endif
73#if (defined(V8_TARGET_ARCH_MIPS) && \
74 !(defined(V8_HOST_ARCH_IA32) || defined(V8_HOST_ARCH_MIPS)))
75#error Target architecture mips is only supported on mips and ia32 host
76#endif
77
78// Define unaligned read for the target architectures supporting it.
Steve Blocka7e24c12009-10-30 11:49:00 +000079#if defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_IA32)
80#define V8_TARGET_CAN_READ_UNALIGNED 1
81#elif V8_TARGET_ARCH_ARM
Kristian Monsen25f61362010-05-21 11:50:48 +010082// Some CPU-OS combinations allow unaligned access on ARM. We assume
83// that unaligned accesses are not allowed unless the build system
84// defines the CAN_USE_UNALIGNED_ACCESSES macro to be non-zero.
85#if CAN_USE_UNALIGNED_ACCESSES
86#define V8_TARGET_CAN_READ_UNALIGNED 1
87#endif
Andrei Popescu31002712010-02-23 13:46:05 +000088#elif V8_TARGET_ARCH_MIPS
Steve Blocka7e24c12009-10-30 11:49:00 +000089#else
Steve Block6ded16b2010-05-10 14:33:55 +010090#error Target architecture is not supported by v8
Steve Blocka7e24c12009-10-30 11:49:00 +000091#endif
92
93// Support for alternative bool type. This is only enabled if the code is
94// compiled with USE_MYBOOL defined. This catches some nasty type bugs.
95// For instance, 'bool b = "false";' results in b == true! This is a hidden
96// source of bugs.
97// However, redefining the bool type does have some negative impact on some
98// platforms. It gives rise to compiler warnings (i.e. with
99// MSVC) in the API header files when mixing code that uses the standard
100// bool with code that uses the redefined version.
101// This does not actually belong in the platform code, but needs to be
102// defined here because the platform code uses bool, and platform.h is
103// include very early in the main include file.
104
105#ifdef USE_MYBOOL
106typedef unsigned int __my_bool__;
107#define bool __my_bool__ // use 'indirection' to avoid name clashes
108#endif
109
110typedef uint8_t byte;
111typedef byte* Address;
112
113// Define our own macros for writing 64-bit constants. This is less fragile
114// than defining __STDC_CONSTANT_MACROS before including <stdint.h>, and it
115// works on compilers that don't have it (like MSVC).
116#if V8_HOST_ARCH_64_BIT
117#ifdef _MSC_VER
118#define V8_UINT64_C(x) (x ## UI64)
119#define V8_INT64_C(x) (x ## I64)
120#define V8_PTR_PREFIX "ll"
121#else // _MSC_VER
122#define V8_UINT64_C(x) (x ## UL)
123#define V8_INT64_C(x) (x ## L)
124#define V8_PTR_PREFIX "l"
125#endif // _MSC_VER
126#else // V8_HOST_ARCH_64_BIT
127#define V8_PTR_PREFIX ""
128#endif // V8_HOST_ARCH_64_BIT
129
Steve Block6ded16b2010-05-10 14:33:55 +0100130// The following macro works on both 32 and 64-bit platforms.
131// Usage: instead of writing 0x1234567890123456
132// write V8_2PART_UINT64_C(0x12345678,90123456);
133#define V8_2PART_UINT64_C(a, b) (((static_cast<uint64_t>(a) << 32) + 0x##b##u))
134
Steve Blocka7e24c12009-10-30 11:49:00 +0000135#define V8PRIxPTR V8_PTR_PREFIX "x"
136#define V8PRIdPTR V8_PTR_PREFIX "d"
137
138// Fix for Mac OS X defining uintptr_t as "unsigned long":
139#if defined(__APPLE__) && defined(__MACH__)
140#undef V8PRIxPTR
141#define V8PRIxPTR "lx"
142#endif
143
Steve Block6ded16b2010-05-10 14:33:55 +0100144#if (defined(__APPLE__) && defined(__MACH__)) || \
145 defined(__FreeBSD__) || defined(__OpenBSD__)
146#define USING_BSD_ABI
Steve Blockd0582a62009-12-15 09:54:21 +0000147#endif
148
Steve Blocka7e24c12009-10-30 11:49:00 +0000149// Code-point values in Unicode 4.0 are 21 bits wide.
150typedef uint16_t uc16;
151typedef int32_t uc32;
152
153// -----------------------------------------------------------------------------
154// Constants
155
156const int KB = 1024;
157const int MB = KB * KB;
158const int GB = KB * KB * KB;
159const int kMaxInt = 0x7FFFFFFF;
160const int kMinInt = -kMaxInt - 1;
161
162const uint32_t kMaxUInt32 = 0xFFFFFFFFu;
163
164const int kCharSize = sizeof(char); // NOLINT
165const int kShortSize = sizeof(short); // NOLINT
166const int kIntSize = sizeof(int); // NOLINT
167const int kDoubleSize = sizeof(double); // NOLINT
168const int kPointerSize = sizeof(void*); // NOLINT
169const int kIntptrSize = sizeof(intptr_t); // NOLINT
170
171#if V8_HOST_ARCH_64_BIT
172const int kPointerSizeLog2 = 3;
173const intptr_t kIntptrSignBit = V8_INT64_C(0x8000000000000000);
174#else
175const int kPointerSizeLog2 = 2;
176const intptr_t kIntptrSignBit = 0x80000000;
177#endif
178
Steve Block6ded16b2010-05-10 14:33:55 +0100179// Mask for the sign bit in a smi.
180const intptr_t kSmiSignMask = kIntptrSignBit;
181
Steve Blocka7e24c12009-10-30 11:49:00 +0000182const int kObjectAlignmentBits = kPointerSizeLog2;
183const intptr_t kObjectAlignment = 1 << kObjectAlignmentBits;
184const intptr_t kObjectAlignmentMask = kObjectAlignment - 1;
185
186// Desired alignment for pointers.
187const intptr_t kPointerAlignment = (1 << kPointerSizeLog2);
188const intptr_t kPointerAlignmentMask = kPointerAlignment - 1;
189
Leon Clarkee46be812010-01-19 14:06:41 +0000190// Desired alignment for maps.
191#if V8_HOST_ARCH_64_BIT
192const intptr_t kMapAlignmentBits = kObjectAlignmentBits;
193#else
194const intptr_t kMapAlignmentBits = kObjectAlignmentBits + 3;
195#endif
196const intptr_t kMapAlignment = (1 << kMapAlignmentBits);
197const intptr_t kMapAlignmentMask = kMapAlignment - 1;
Steve Blocka7e24c12009-10-30 11:49:00 +0000198
199// Tag information for Failure.
200const int kFailureTag = 3;
201const int kFailureTagSize = 2;
202const intptr_t kFailureTagMask = (1 << kFailureTagSize) - 1;
203
204
205const int kBitsPerByte = 8;
206const int kBitsPerByteLog2 = 3;
207const int kBitsPerPointer = kPointerSize * kBitsPerByte;
208const int kBitsPerInt = kIntSize * kBitsPerByte;
209
Steve Block6ded16b2010-05-10 14:33:55 +0100210// IEEE 754 single precision floating point number bit layout.
211const uint32_t kBinary32SignMask = 0x80000000u;
212const uint32_t kBinary32ExponentMask = 0x7f800000u;
213const uint32_t kBinary32MantissaMask = 0x007fffffu;
214const int kBinary32ExponentBias = 127;
215const int kBinary32MaxExponent = 0xFE;
216const int kBinary32MinExponent = 0x01;
217const int kBinary32MantissaBits = 23;
218const int kBinary32ExponentShift = 23;
Steve Blocka7e24c12009-10-30 11:49:00 +0000219
220// Zap-value: The value used for zapping dead objects.
221// Should be a recognizable hex value tagged as a heap object pointer.
222#ifdef V8_HOST_ARCH_64_BIT
223const Address kZapValue =
224 reinterpret_cast<Address>(V8_UINT64_C(0xdeadbeedbeadbeed));
225const Address kHandleZapValue =
226 reinterpret_cast<Address>(V8_UINT64_C(0x1baddead0baddead));
227const Address kFromSpaceZapValue =
228 reinterpret_cast<Address>(V8_UINT64_C(0x1beefdad0beefdad));
229#else
230const Address kZapValue = reinterpret_cast<Address>(0xdeadbeed);
231const Address kHandleZapValue = reinterpret_cast<Address>(0xbaddead);
232const Address kFromSpaceZapValue = reinterpret_cast<Address>(0xbeefdad);
233#endif
234
235
Leon Clarkee46be812010-01-19 14:06:41 +0000236// Number of bits to represent the page size for paged spaces. The value of 13
237// gives 8K bytes per page.
238const int kPageSizeBits = 13;
239
Steve Block6ded16b2010-05-10 14:33:55 +0100240// On Intel architecture, cache line size is 64 bytes.
241// On ARM it may be less (32 bytes), but as far this constant is
242// used for aligning data, it doesn't hurt to align on a greater value.
243const int kProcessorCacheLineSize = 64;
Leon Clarkee46be812010-01-19 14:06:41 +0000244
Steve Blockd0582a62009-12-15 09:54:21 +0000245// Constants relevant to double precision floating point numbers.
246
247// Quiet NaNs have bits 51 to 62 set, possibly the sign bit, and no
248// other bits set.
249const uint64_t kQuietNaNMask = static_cast<uint64_t>(0xfff) << 51;
250// If looking only at the top 32 bits, the QNaN mask is bits 19 to 30.
251const uint32_t kQuietNaNHighBitsMask = 0xfff << (51 - 32);
252
253
Steve Blocka7e24c12009-10-30 11:49:00 +0000254// -----------------------------------------------------------------------------
255// Forward declarations for frequently used classes
256// (sorted alphabetically)
257
258class AccessorInfo;
259class Allocation;
260class Arguments;
261class Assembler;
Leon Clarke4515c472010-02-03 11:58:03 +0000262class AssertNoAllocation;
Steve Blocka7e24c12009-10-30 11:49:00 +0000263class BreakableStatement;
264class Code;
265class CodeGenerator;
266class CodeStub;
267class Context;
268class Debug;
269class Debugger;
270class DebugInfo;
271class Descriptor;
272class DescriptorArray;
273class Expression;
274class ExternalReference;
275class FixedArray;
276class FunctionEntry;
277class FunctionLiteral;
278class FunctionTemplateInfo;
279class NumberDictionary;
280class StringDictionary;
281class FreeStoreAllocationPolicy;
282template <typename T> class Handle;
283class Heap;
284class HeapObject;
285class IC;
286class InterceptorInfo;
287class IterationStatement;
288class Array;
289class JSArray;
290class JSFunction;
291class JSObject;
292class LargeObjectSpace;
293template <typename T, class P = FreeStoreAllocationPolicy> class List;
294class LookupResult;
295class MacroAssembler;
296class Map;
297class MapSpace;
298class MarkCompactCollector;
299class NewSpace;
300class NodeVisitor;
301class Object;
302class OldSpace;
303class Property;
304class Proxy;
305class RegExpNode;
306struct RegExpCompileData;
307class RegExpTree;
308class RegExpCompiler;
309class RegExpVisitor;
310class Scope;
311template<class Allocator = FreeStoreAllocationPolicy> class ScopeInfo;
312class Script;
313class Slot;
314class Smi;
Steve Block6ded16b2010-05-10 14:33:55 +0100315template <typename Config, class Allocator = FreeStoreAllocationPolicy>
316 class SplayTree;
Steve Blocka7e24c12009-10-30 11:49:00 +0000317class Statement;
318class String;
319class Struct;
320class SwitchStatement;
321class AstVisitor;
322class Variable;
323class VariableProxy;
324class RelocInfo;
325class Deserializer;
326class MessageLocation;
327class ObjectGroup;
328class TickSample;
329class VirtualMemory;
330class Mutex;
331class ZoneScopeInfo;
332
333typedef bool (*WeakSlotCallback)(Object** pointer);
334
335// -----------------------------------------------------------------------------
336// Miscellaneous
337
338// NOTE: SpaceIterator depends on AllocationSpace enumeration values being
339// consecutive.
340enum AllocationSpace {
341 NEW_SPACE, // Semispaces collected with copying collector.
342 OLD_POINTER_SPACE, // May contain pointers to new space.
343 OLD_DATA_SPACE, // Must not have pointers to new space.
344 CODE_SPACE, // No pointers to new space, marked executable.
345 MAP_SPACE, // Only and all map objects.
346 CELL_SPACE, // Only and all cell objects.
347 LO_SPACE, // Promoted large objects.
348
349 FIRST_SPACE = NEW_SPACE,
Steve Blockd0582a62009-12-15 09:54:21 +0000350 LAST_SPACE = LO_SPACE,
351 FIRST_PAGED_SPACE = OLD_POINTER_SPACE,
352 LAST_PAGED_SPACE = CELL_SPACE
Steve Blocka7e24c12009-10-30 11:49:00 +0000353};
354const int kSpaceTagSize = 3;
355const int kSpaceTagMask = (1 << kSpaceTagSize) - 1;
356
357
358// A flag that indicates whether objects should be pretenured when
359// allocated (allocated directly into the old generation) or not
360// (allocated in the young generation if the object size and type
361// allows).
362enum PretenureFlag { NOT_TENURED, TENURED };
363
364enum GarbageCollector { SCAVENGER, MARK_COMPACTOR };
365
366enum Executability { NOT_EXECUTABLE, EXECUTABLE };
367
Leon Clarkee46be812010-01-19 14:06:41 +0000368enum VisitMode { VISIT_ALL, VISIT_ALL_IN_SCAVENGE, VISIT_ONLY_STRONG };
Steve Blockd0582a62009-12-15 09:54:21 +0000369
Steve Block6ded16b2010-05-10 14:33:55 +0100370// Flag indicating whether code is built into the VM (one of the natives files).
Andrei Popescu31002712010-02-23 13:46:05 +0000371enum NativesFlag { NOT_NATIVES_CODE, NATIVES_CODE };
372
Steve Blocka7e24c12009-10-30 11:49:00 +0000373
374// A CodeDesc describes a buffer holding instructions and relocation
375// information. The instructions start at the beginning of the buffer
376// and grow forward, the relocation information starts at the end of
377// the buffer and grows backward.
378//
379// |<--------------- buffer_size ---------------->|
380// |<-- instr_size -->| |<-- reloc_size -->|
381// +==================+========+==================+
382// | instructions | free | reloc info |
383// +==================+========+==================+
384// ^
385// |
386// buffer
387
388struct CodeDesc {
389 byte* buffer;
390 int buffer_size;
391 int instr_size;
392 int reloc_size;
393 Assembler* origin;
394};
395
396
397// Callback function on object slots, used for iterating heap object slots in
398// HeapObjects, global pointers to heap objects, etc. The callback allows the
399// callback function to change the value of the slot.
400typedef void (*ObjectSlotCallback)(HeapObject** pointer);
401
402
403// Callback function used for iterating objects in heap spaces,
404// for example, scanning heap objects.
405typedef int (*HeapObjectCallback)(HeapObject* obj);
406
407
408// Callback function used for checking constraints when copying/relocating
409// objects. Returns true if an object can be copied/relocated from its
410// old_addr to a new_addr.
411typedef bool (*ConstraintCallback)(Address new_addr, Address old_addr);
412
413
414// Callback function on inline caches, used for iterating over inline caches
415// in compiled code.
416typedef void (*InlineCacheCallback)(Code* code, Address ic);
417
418
419// State for inline cache call sites. Aliased as IC::State.
420enum InlineCacheState {
421 // Has never been executed.
422 UNINITIALIZED,
423 // Has been executed but monomorhic state has been delayed.
424 PREMONOMORPHIC,
425 // Has been executed and only one receiver type has been seen.
426 MONOMORPHIC,
427 // Like MONOMORPHIC but check failed due to prototype.
428 MONOMORPHIC_PROTOTYPE_FAILURE,
429 // Multiple receiver types have been seen.
430 MEGAMORPHIC,
431 // Special states for debug break or step in prepare stubs.
432 DEBUG_BREAK,
433 DEBUG_PREPARE_STEP_IN
434};
435
436
437enum InLoopFlag {
438 NOT_IN_LOOP,
439 IN_LOOP
440};
441
442
Leon Clarkee46be812010-01-19 14:06:41 +0000443enum CallFunctionFlags {
444 NO_CALL_FUNCTION_FLAGS = 0,
445 RECEIVER_MIGHT_BE_VALUE = 1 << 0 // Receiver might not be a JSObject.
446};
447
448
Steve Blocka7e24c12009-10-30 11:49:00 +0000449// Type of properties.
450// Order of properties is significant.
451// Must fit in the BitField PropertyDetails::TypeField.
Andrei Popescu31002712010-02-23 13:46:05 +0000452// A copy of this is in mirror-debugger.js.
Steve Blocka7e24c12009-10-30 11:49:00 +0000453enum PropertyType {
454 NORMAL = 0, // only in slow mode
455 FIELD = 1, // only in fast mode
456 CONSTANT_FUNCTION = 2, // only in fast mode
457 CALLBACKS = 3,
458 INTERCEPTOR = 4, // only in lookup results, not in descriptors.
459 MAP_TRANSITION = 5, // only in fast mode
460 CONSTANT_TRANSITION = 6, // only in fast mode
461 NULL_DESCRIPTOR = 7, // only in fast mode
462 // All properties before MAP_TRANSITION are real.
Steve Block6ded16b2010-05-10 14:33:55 +0100463 FIRST_PHANTOM_PROPERTY_TYPE = MAP_TRANSITION,
464 // There are no IC stubs for NULL_DESCRIPTORS. Therefore,
465 // NULL_DESCRIPTOR can be used as the type flag for IC stubs for
466 // nonexistent properties.
467 NONEXISTENT = NULL_DESCRIPTOR
Steve Blocka7e24c12009-10-30 11:49:00 +0000468};
469
470
471// Whether to remove map transitions and constant transitions from a
472// DescriptorArray.
473enum TransitionFlag {
474 REMOVE_TRANSITIONS,
475 KEEP_TRANSITIONS
476};
477
478
479// Union used for fast testing of specific double values.
480union DoubleRepresentation {
481 double value;
482 int64_t bits;
483 DoubleRepresentation(double x) { value = x; }
484};
485
486
487// AccessorCallback
488struct AccessorDescriptor {
489 Object* (*getter)(Object* object, void* data);
490 Object* (*setter)(JSObject* object, Object* value, void* data);
491 void* data;
492};
493
494
495// Logging and profiling.
496// A StateTag represents a possible state of the VM. When compiled with
Steve Block6ded16b2010-05-10 14:33:55 +0100497// ENABLE_VMSTATE_TRACKING, the logger maintains a stack of these.
Steve Blocka7e24c12009-10-30 11:49:00 +0000498// Creating a VMState object enters a state by pushing on the stack, and
499// destroying a VMState object leaves a state by popping the current state
500// from the stack.
501
502#define STATE_TAG_LIST(V) \
503 V(JS) \
504 V(GC) \
505 V(COMPILER) \
506 V(OTHER) \
507 V(EXTERNAL)
508
509enum StateTag {
510#define DEF_STATE_TAG(name) name,
511 STATE_TAG_LIST(DEF_STATE_TAG)
512#undef DEF_STATE_TAG
513 // Pseudo-types.
514 state_tag_count
515};
516
517
518// -----------------------------------------------------------------------------
519// Macros
520
521// Testers for test.
522
523#define HAS_SMI_TAG(value) \
524 ((reinterpret_cast<intptr_t>(value) & kSmiTagMask) == kSmiTag)
525
526#define HAS_FAILURE_TAG(value) \
527 ((reinterpret_cast<intptr_t>(value) & kFailureTagMask) == kFailureTag)
528
529// OBJECT_SIZE_ALIGN returns the value aligned HeapObject size
530#define OBJECT_SIZE_ALIGN(value) \
531 (((value) + kObjectAlignmentMask) & ~kObjectAlignmentMask)
532
533// POINTER_SIZE_ALIGN returns the value aligned as a pointer.
534#define POINTER_SIZE_ALIGN(value) \
535 (((value) + kPointerAlignmentMask) & ~kPointerAlignmentMask)
536
Leon Clarkee46be812010-01-19 14:06:41 +0000537// MAP_SIZE_ALIGN returns the value aligned as a map pointer.
538#define MAP_SIZE_ALIGN(value) \
539 (((value) + kMapAlignmentMask) & ~kMapAlignmentMask)
540
Steve Blocka7e24c12009-10-30 11:49:00 +0000541// The expression OFFSET_OF(type, field) computes the byte-offset
542// of the specified field relative to the containing type. This
543// corresponds to 'offsetof' (in stddef.h), except that it doesn't
544// use 0 or NULL, which causes a problem with the compiler warnings
545// we have enabled (which is also why 'offsetof' doesn't seem to work).
546// Here we simply use the non-zero value 4, which seems to work.
547#define OFFSET_OF(type, field) \
548 (reinterpret_cast<intptr_t>(&(reinterpret_cast<type*>(4)->field)) - 4)
549
550
551// The expression ARRAY_SIZE(a) is a compile-time constant of type
552// size_t which represents the number of elements of the given
553// array. You should only use ARRAY_SIZE on statically allocated
554// arrays.
555#define ARRAY_SIZE(a) \
556 ((sizeof(a) / sizeof(*(a))) / \
557 static_cast<size_t>(!(sizeof(a) % sizeof(*(a)))))
558
559
560// The USE(x) template is used to silence C++ compiler warnings
561// issued for (yet) unused variables (typically parameters).
562template <typename T>
563static inline void USE(T) { }
564
565
566// FUNCTION_ADDR(f) gets the address of a C function f.
567#define FUNCTION_ADDR(f) \
568 (reinterpret_cast<v8::internal::Address>(reinterpret_cast<intptr_t>(f)))
569
570
571// FUNCTION_CAST<F>(addr) casts an address into a function
572// of type F. Used to invoke generated code from within C.
573template <typename F>
574F FUNCTION_CAST(Address addr) {
575 return reinterpret_cast<F>(reinterpret_cast<intptr_t>(addr));
576}
577
578
579// A macro to disallow the evil copy constructor and operator= functions
580// This should be used in the private: declarations for a class
581#define DISALLOW_COPY_AND_ASSIGN(TypeName) \
582 TypeName(const TypeName&); \
583 void operator=(const TypeName&)
584
585
586// A macro to disallow all the implicit constructors, namely the
587// default constructor, copy constructor and operator= functions.
588//
589// This should be used in the private: declarations for a class
590// that wants to prevent anyone from instantiating it. This is
591// especially useful for classes containing only static methods.
592#define DISALLOW_IMPLICIT_CONSTRUCTORS(TypeName) \
593 TypeName(); \
594 DISALLOW_COPY_AND_ASSIGN(TypeName)
595
596
597// Support for tracking C++ memory allocation. Insert TRACK_MEMORY("Fisk")
598// inside a C++ class and new and delete will be overloaded so logging is
599// performed.
600// This file (globals.h) is included before log.h, so we use direct calls to
601// the Logger rather than the LOG macro.
602#ifdef DEBUG
603#define TRACK_MEMORY(name) \
604 void* operator new(size_t size) { \
605 void* result = ::operator new(size); \
606 Logger::NewEvent(name, result, size); \
607 return result; \
608 } \
609 void operator delete(void* object) { \
610 Logger::DeleteEvent(name, object); \
611 ::operator delete(object); \
612 }
613#else
614#define TRACK_MEMORY(name)
615#endif
616
617// define used for helping GCC to make better inlining. Don't bother for debug
618// builds. On GCC 3.4.5 using __attribute__((always_inline)) causes compilation
619// errors in debug build.
620#if defined(__GNUC__) && !defined(DEBUG)
621#if (__GNUC__ >= 4)
622#define INLINE(header) inline header __attribute__((always_inline))
623#else
624#define INLINE(header) inline __attribute__((always_inline)) header
625#endif
626#else
627#define INLINE(header) inline header
628#endif
629
Steve Blockd0582a62009-12-15 09:54:21 +0000630// Feature flags bit positions. They are mostly based on the CPUID spec.
631// (We assign CPUID itself to one of the currently reserved bits --
632// feel free to change this if needed.)
633enum CpuFeature { SSE3 = 32, // x86
634 SSE2 = 26, // x86
635 CMOV = 15, // x86
636 RDTSC = 4, // x86
637 CPUID = 10, // x86
638 VFP3 = 1, // ARM
Andrei Popescu31002712010-02-23 13:46:05 +0000639 ARMv7 = 2, // ARM
Steve Blockd0582a62009-12-15 09:54:21 +0000640 SAHF = 0}; // x86
641
Steve Blocka7e24c12009-10-30 11:49:00 +0000642} } // namespace v8::internal
643
644#endif // V8_GLOBALS_H_