blob: 2ff131840f400bba046e625372b4225721151a99 [file] [log] [blame]
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001// Copyright 2012 the V8 project authors. All rights reserved.
Steve Block44f0eee2011-05-26 01:26:41 +01002// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#ifndef V8_ISOLATE_H_
29#define V8_ISOLATE_H_
30
31#include "../include/v8-debug.h"
32#include "allocation.h"
33#include "apiutils.h"
34#include "atomicops.h"
35#include "builtins.h"
36#include "contexts.h"
37#include "execution.h"
38#include "frames.h"
Ben Murdoch3ef787d2012-04-12 10:51:47 +010039#include "date.h"
Steve Block44f0eee2011-05-26 01:26:41 +010040#include "global-handles.h"
41#include "handles.h"
Ben Murdoch3ef787d2012-04-12 10:51:47 +010042#include "hashmap.h"
Steve Block44f0eee2011-05-26 01:26:41 +010043#include "heap.h"
44#include "regexp-stack.h"
45#include "runtime-profiler.h"
46#include "runtime.h"
47#include "zone.h"
48
49namespace v8 {
50namespace internal {
51
Steve Block44f0eee2011-05-26 01:26:41 +010052class Bootstrapper;
53class CodeGenerator;
54class CodeRange;
55class CompilationCache;
56class ContextSlotCache;
57class ContextSwitcher;
58class Counters;
59class CpuFeatures;
60class CpuProfiler;
61class DeoptimizerData;
62class Deserializer;
63class EmptyStatement;
64class ExternalReferenceTable;
65class Factory;
66class FunctionInfoListener;
67class HandleScopeImplementer;
68class HeapProfiler;
69class InlineRuntimeFunctionsTable;
70class NoAllocationStringAllocator;
Ben Murdoch3ef787d2012-04-12 10:51:47 +010071class InnerPointerToCodeCache;
Steve Block44f0eee2011-05-26 01:26:41 +010072class PreallocatedMemoryThread;
Steve Block44f0eee2011-05-26 01:26:41 +010073class RegExpStack;
74class SaveContext;
Ben Murdoch8b112d22011-06-08 16:22:53 +010075class UnicodeCache;
Steve Block44f0eee2011-05-26 01:26:41 +010076class StringInputBuffer;
77class StringTracker;
78class StubCache;
79class ThreadManager;
80class ThreadState;
81class ThreadVisitor; // Defined in v8threads.h
82class VMState;
83
84// 'void function pointer', used to roundtrip the
85// ExternalReference::ExternalReferenceRedirector since we can not include
86// assembler.h, where it is defined, here.
87typedef void* ExternalReferenceRedirectorPointer();
88
89
90#ifdef ENABLE_DEBUGGER_SUPPORT
91class Debug;
92class Debugger;
93class DebuggerAgent;
94#endif
95
96#if !defined(__arm__) && defined(V8_TARGET_ARCH_ARM) || \
97 !defined(__mips__) && defined(V8_TARGET_ARCH_MIPS)
98class Redirection;
99class Simulator;
100#endif
101
102
103// Static indirection table for handles to constants. If a frame
104// element represents a constant, the data contains an index into
105// this table of handles to the actual constants.
106// Static indirection table for handles to constants. If a Result
107// represents a constant, the data contains an index into this table
108// of handles to the actual constants.
109typedef ZoneList<Handle<Object> > ZoneObjectList;
110
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100111#define RETURN_IF_SCHEDULED_EXCEPTION(isolate) \
112 do { \
113 Isolate* __isolate__ = (isolate); \
114 if (__isolate__->has_scheduled_exception()) { \
115 return __isolate__->PromoteScheduledException(); \
116 } \
117 } while (false)
Steve Block44f0eee2011-05-26 01:26:41 +0100118
119#define RETURN_IF_EMPTY_HANDLE_VALUE(isolate, call, value) \
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100120 do { \
121 if ((call).is_null()) { \
122 ASSERT((isolate)->has_pending_exception()); \
123 return (value); \
124 } \
125 } while (false)
126
127#define CHECK_NOT_EMPTY_HANDLE(isolate, call) \
128 do { \
129 ASSERT(!(isolate)->has_pending_exception()); \
130 CHECK(!(call).is_null()); \
131 CHECK(!(isolate)->has_pending_exception()); \
132 } while (false)
Steve Block44f0eee2011-05-26 01:26:41 +0100133
134#define RETURN_IF_EMPTY_HANDLE(isolate, call) \
135 RETURN_IF_EMPTY_HANDLE_VALUE(isolate, call, Failure::Exception())
136
Ben Murdoch589d6972011-11-30 16:04:58 +0000137#define FOR_EACH_ISOLATE_ADDRESS_NAME(C) \
138 C(Handler, handler) \
139 C(CEntryFP, c_entry_fp) \
140 C(Context, context) \
141 C(PendingException, pending_exception) \
142 C(ExternalCaughtException, external_caught_exception) \
143 C(JSEntrySP, js_entry_sp)
Steve Block44f0eee2011-05-26 01:26:41 +0100144
145
Ben Murdoch8b112d22011-06-08 16:22:53 +0100146// Platform-independent, reliable thread identifier.
147class ThreadId {
148 public:
149 // Creates an invalid ThreadId.
150 ThreadId() : id_(kInvalidId) {}
151
152 // Returns ThreadId for current thread.
153 static ThreadId Current() { return ThreadId(GetCurrentThreadId()); }
154
155 // Returns invalid ThreadId (guaranteed not to be equal to any thread).
156 static ThreadId Invalid() { return ThreadId(kInvalidId); }
157
158 // Compares ThreadIds for equality.
159 INLINE(bool Equals(const ThreadId& other) const) {
160 return id_ == other.id_;
161 }
162
163 // Checks whether this ThreadId refers to any thread.
164 INLINE(bool IsValid() const) {
165 return id_ != kInvalidId;
166 }
167
168 // Converts ThreadId to an integer representation
169 // (required for public API: V8::V8::GetCurrentThreadId).
170 int ToInteger() const { return id_; }
171
172 // Converts ThreadId to an integer representation
173 // (required for public API: V8::V8::TerminateExecution).
174 static ThreadId FromInteger(int id) { return ThreadId(id); }
175
176 private:
177 static const int kInvalidId = -1;
178
179 explicit ThreadId(int id) : id_(id) {}
180
181 static int AllocateThreadId();
182
183 static int GetCurrentThreadId();
184
185 int id_;
186
187 static Atomic32 highest_thread_id_;
188
189 friend class Isolate;
190};
191
192
Steve Block44f0eee2011-05-26 01:26:41 +0100193class ThreadLocalTop BASE_EMBEDDED {
194 public:
Ben Murdoch8b112d22011-06-08 16:22:53 +0100195 // Does early low-level initialization that does not depend on the
196 // isolate being present.
197 ThreadLocalTop();
198
Steve Block44f0eee2011-05-26 01:26:41 +0100199 // Initialize the thread data.
200 void Initialize();
201
202 // Get the top C++ try catch handler or NULL if none are registered.
203 //
204 // This method is not guarenteed to return an address that can be
205 // used for comparison with addresses into the JS stack. If such an
206 // address is needed, use try_catch_handler_address.
207 v8::TryCatch* TryCatchHandler();
208
209 // Get the address of the top C++ try catch handler or NULL if
210 // none are registered.
211 //
212 // This method always returns an address that can be compared to
213 // pointers into the JavaScript stack. When running on actual
214 // hardware, try_catch_handler_address and TryCatchHandler return
215 // the same pointer. When running on a simulator with a separate JS
216 // stack, try_catch_handler_address returns a JS stack address that
217 // corresponds to the place on the JS stack where the C++ handler
218 // would have been if the stack were not separate.
219 inline Address try_catch_handler_address() {
220 return try_catch_handler_address_;
221 }
222
223 // Set the address of the top C++ try catch handler.
224 inline void set_try_catch_handler_address(Address address) {
225 try_catch_handler_address_ = address;
226 }
227
228 void Free() {
229 ASSERT(!has_pending_message_);
230 ASSERT(!external_caught_exception_);
231 ASSERT(try_catch_handler_address_ == NULL);
232 }
233
Ben Murdoch257744e2011-11-30 15:57:28 +0000234 Isolate* isolate_;
Steve Block44f0eee2011-05-26 01:26:41 +0100235 // The context where the current execution method is created and for variable
236 // lookups.
237 Context* context_;
Ben Murdoch8b112d22011-06-08 16:22:53 +0100238 ThreadId thread_id_;
Steve Block44f0eee2011-05-26 01:26:41 +0100239 MaybeObject* pending_exception_;
240 bool has_pending_message_;
Steve Block44f0eee2011-05-26 01:26:41 +0100241 Object* pending_message_obj_;
242 Script* pending_message_script_;
243 int pending_message_start_pos_;
244 int pending_message_end_pos_;
245 // Use a separate value for scheduled exceptions to preserve the
246 // invariants that hold about pending_exception. We may want to
247 // unify them later.
248 MaybeObject* scheduled_exception_;
249 bool external_caught_exception_;
250 SaveContext* save_context_;
251 v8::TryCatch* catcher_;
252
253 // Stack.
254 Address c_entry_fp_; // the frame pointer of the top c entry frame
255 Address handler_; // try-blocks are chained through the stack
256
257#ifdef USE_SIMULATOR
258#if defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_MIPS)
259 Simulator* simulator_;
260#endif
261#endif // USE_SIMULATOR
262
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100263 Address js_entry_sp_; // the stack pointer of the bottom JS entry frame
Steve Block44f0eee2011-05-26 01:26:41 +0100264 Address external_callback_; // the external callback we're currently in
Steve Block44f0eee2011-05-26 01:26:41 +0100265 StateTag current_vm_state_;
Steve Block44f0eee2011-05-26 01:26:41 +0100266
267 // Generated code scratch locations.
268 int32_t formal_count_;
269
270 // Call back function to report unsafe JS accesses.
271 v8::FailedAccessCheckCallback failed_access_check_callback_;
272
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100273 // Head of the list of live LookupResults.
274 LookupResult* top_lookup_result_;
275
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000276 // Whether out of memory exceptions should be ignored.
277 bool ignore_out_of_memory_;
278
Steve Block44f0eee2011-05-26 01:26:41 +0100279 private:
Ben Murdoch8b112d22011-06-08 16:22:53 +0100280 void InitializeInternal();
281
Steve Block44f0eee2011-05-26 01:26:41 +0100282 Address try_catch_handler_address_;
283};
284
Steve Block44f0eee2011-05-26 01:26:41 +0100285
286#ifdef ENABLE_DEBUGGER_SUPPORT
287
288#define ISOLATE_DEBUGGER_INIT_LIST(V) \
289 V(v8::Debug::EventCallback, debug_event_callback, NULL) \
290 V(DebuggerAgent*, debugger_agent_instance, NULL)
291#else
292
293#define ISOLATE_DEBUGGER_INIT_LIST(V)
294
295#endif
296
297#ifdef DEBUG
298
299#define ISOLATE_INIT_DEBUG_ARRAY_LIST(V) \
300 V(CommentStatistic, paged_space_comments_statistics, \
301 CommentStatistic::kMaxComments + 1)
302#else
303
304#define ISOLATE_INIT_DEBUG_ARRAY_LIST(V)
305
306#endif
307
Steve Block44f0eee2011-05-26 01:26:41 +0100308#define ISOLATE_INIT_ARRAY_LIST(V) \
309 /* SerializerDeserializer state. */ \
310 V(Object*, serialize_partial_snapshot_cache, kPartialSnapshotCacheCapacity) \
311 V(int, jsregexp_static_offsets_vector, kJSRegexpStaticOffsetsVectorSize) \
312 V(int, bad_char_shift_table, kUC16AlphabetSize) \
313 V(int, good_suffix_shift_table, (kBMMaxShift + 1)) \
314 V(int, suffix_table, (kBMMaxShift + 1)) \
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000315 V(uint32_t, private_random_seed, 2) \
Steve Block44f0eee2011-05-26 01:26:41 +0100316 ISOLATE_INIT_DEBUG_ARRAY_LIST(V)
317
318typedef List<HeapObject*, PreallocatedStorage> DebugObjectCache;
319
320#define ISOLATE_INIT_LIST(V) \
Steve Block44f0eee2011-05-26 01:26:41 +0100321 /* SerializerDeserializer state. */ \
322 V(int, serialize_partial_snapshot_cache_length, 0) \
323 /* Assembler state. */ \
324 /* A previously allocated buffer of kMinimalBufferSize bytes, or NULL. */ \
325 V(byte*, assembler_spare_buffer, NULL) \
326 V(FatalErrorCallback, exception_behavior, NULL) \
Ben Murdoch257744e2011-11-30 15:57:28 +0000327 V(AllowCodeGenerationFromStringsCallback, allow_code_gen_callback, NULL) \
Steve Block44f0eee2011-05-26 01:26:41 +0100328 V(v8::Debug::MessageHandler, message_handler, NULL) \
329 /* To distinguish the function templates, so that we can find them in the */ \
330 /* function cache of the global context. */ \
331 V(int, next_serial_number, 0) \
332 V(ExternalReferenceRedirectorPointer*, external_reference_redirector, NULL) \
333 V(bool, always_allow_natives_syntax, false) \
334 /* Part of the state of liveedit. */ \
335 V(FunctionInfoListener*, active_function_info_listener, NULL) \
336 /* State for Relocatable. */ \
337 V(Relocatable*, relocatable_top, NULL) \
338 /* State for CodeEntry in profile-generator. */ \
339 V(CodeGenerator*, current_code_generator, NULL) \
340 V(bool, jump_target_compiling_deferred_code, false) \
341 V(DebugObjectCache*, string_stream_debug_object_cache, NULL) \
342 V(Object*, string_stream_current_security_token, NULL) \
343 /* TODO(isolates): Release this on destruction? */ \
344 V(int*, irregexp_interpreter_backtrack_stack_cache, NULL) \
345 /* Serializer state. */ \
346 V(ExternalReferenceTable*, external_reference_table, NULL) \
347 /* AstNode state. */ \
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100348 V(int, ast_node_id, 0) \
Steve Block44f0eee2011-05-26 01:26:41 +0100349 V(unsigned, ast_node_count, 0) \
Ben Murdoch8b112d22011-06-08 16:22:53 +0100350 /* SafeStackFrameIterator activations count. */ \
351 V(int, safe_stack_iterator_counter, 0) \
Ben Murdoch257744e2011-11-30 15:57:28 +0000352 V(uint64_t, enabled_cpu_features, 0) \
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000353 V(CpuProfiler*, cpu_profiler, NULL) \
354 V(HeapProfiler*, heap_profiler, NULL) \
Steve Block44f0eee2011-05-26 01:26:41 +0100355 ISOLATE_DEBUGGER_INIT_LIST(V)
356
357class Isolate {
358 // These forward declarations are required to make the friend declarations in
359 // PerIsolateThreadData work on some older versions of gcc.
360 class ThreadDataTable;
361 class EntryStackItem;
362 public:
363 ~Isolate();
364
Steve Block44f0eee2011-05-26 01:26:41 +0100365 // A thread has a PerIsolateThreadData instance for each isolate that it has
366 // entered. That instance is allocated when the isolate is initially entered
367 // and reused on subsequent entries.
368 class PerIsolateThreadData {
369 public:
370 PerIsolateThreadData(Isolate* isolate, ThreadId thread_id)
371 : isolate_(isolate),
372 thread_id_(thread_id),
373 stack_limit_(0),
374 thread_state_(NULL),
375#if !defined(__arm__) && defined(V8_TARGET_ARCH_ARM) || \
376 !defined(__mips__) && defined(V8_TARGET_ARCH_MIPS)
377 simulator_(NULL),
378#endif
379 next_(NULL),
380 prev_(NULL) { }
381 Isolate* isolate() const { return isolate_; }
382 ThreadId thread_id() const { return thread_id_; }
383 void set_stack_limit(uintptr_t value) { stack_limit_ = value; }
384 uintptr_t stack_limit() const { return stack_limit_; }
385 ThreadState* thread_state() const { return thread_state_; }
386 void set_thread_state(ThreadState* value) { thread_state_ = value; }
387
388#if !defined(__arm__) && defined(V8_TARGET_ARCH_ARM) || \
389 !defined(__mips__) && defined(V8_TARGET_ARCH_MIPS)
390 Simulator* simulator() const { return simulator_; }
391 void set_simulator(Simulator* simulator) {
392 simulator_ = simulator;
393 }
394#endif
395
396 bool Matches(Isolate* isolate, ThreadId thread_id) const {
Ben Murdoch8b112d22011-06-08 16:22:53 +0100397 return isolate_ == isolate && thread_id_.Equals(thread_id);
Steve Block44f0eee2011-05-26 01:26:41 +0100398 }
399
400 private:
401 Isolate* isolate_;
402 ThreadId thread_id_;
403 uintptr_t stack_limit_;
404 ThreadState* thread_state_;
405
406#if !defined(__arm__) && defined(V8_TARGET_ARCH_ARM) || \
407 !defined(__mips__) && defined(V8_TARGET_ARCH_MIPS)
408 Simulator* simulator_;
409#endif
410
411 PerIsolateThreadData* next_;
412 PerIsolateThreadData* prev_;
413
414 friend class Isolate;
415 friend class ThreadDataTable;
416 friend class EntryStackItem;
417
418 DISALLOW_COPY_AND_ASSIGN(PerIsolateThreadData);
419 };
420
421
422 enum AddressId {
Ben Murdoch589d6972011-11-30 16:04:58 +0000423#define DECLARE_ENUM(CamelName, hacker_name) k##CamelName##Address,
424 FOR_EACH_ISOLATE_ADDRESS_NAME(DECLARE_ENUM)
Steve Block44f0eee2011-05-26 01:26:41 +0100425#undef C
Ben Murdoch589d6972011-11-30 16:04:58 +0000426 kIsolateAddressCount
Steve Block44f0eee2011-05-26 01:26:41 +0100427 };
428
429 // Returns the PerIsolateThreadData for the current thread (or NULL if one is
430 // not currently set).
431 static PerIsolateThreadData* CurrentPerIsolateThreadData() {
432 return reinterpret_cast<PerIsolateThreadData*>(
Ben Murdoch85b71792012-04-11 18:30:58 +0100433 Thread::GetThreadLocal(per_isolate_thread_data_key_));
Steve Block44f0eee2011-05-26 01:26:41 +0100434 }
435
436 // Returns the isolate inside which the current thread is running.
437 INLINE(static Isolate* Current()) {
438 Isolate* isolate = reinterpret_cast<Isolate*>(
Ben Murdoch85b71792012-04-11 18:30:58 +0100439 Thread::GetExistingThreadLocal(isolate_key_));
Steve Block44f0eee2011-05-26 01:26:41 +0100440 ASSERT(isolate != NULL);
441 return isolate;
442 }
443
444 INLINE(static Isolate* UncheckedCurrent()) {
Ben Murdoch85b71792012-04-11 18:30:58 +0100445 return reinterpret_cast<Isolate*>(Thread::GetThreadLocal(isolate_key_));
Steve Block44f0eee2011-05-26 01:26:41 +0100446 }
447
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000448 // Usually called by Init(), but can be called early e.g. to allow
449 // testing components that require logging but not the whole
450 // isolate.
451 //
452 // Safe to call more than once.
453 void InitializeLoggingAndCounters();
454
Steve Block44f0eee2011-05-26 01:26:41 +0100455 bool Init(Deserializer* des);
456
457 bool IsInitialized() { return state_ == INITIALIZED; }
458
459 // True if at least one thread Enter'ed this isolate.
460 bool IsInUse() { return entry_stack_ != NULL; }
461
462 // Destroys the non-default isolates.
463 // Sets default isolate into "has_been_disposed" state rather then destroying,
464 // for legacy API reasons.
465 void TearDown();
466
Ben Murdoch85b71792012-04-11 18:30:58 +0100467 bool IsDefaultIsolate() const { return this == default_isolate_; }
Steve Block44f0eee2011-05-26 01:26:41 +0100468
469 // Ensures that process-wide resources and the default isolate have been
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100470 // allocated. It is only necessary to call this method in rare cases, for
Steve Block44f0eee2011-05-26 01:26:41 +0100471 // example if you are using V8 from within the body of a static initializer.
472 // Safe to call multiple times.
473 static void EnsureDefaultIsolate();
474
Ben Murdoch257744e2011-11-30 15:57:28 +0000475 // Find the PerThread for this particular (isolate, thread) combination
476 // If one does not yet exist, return null.
477 PerIsolateThreadData* FindPerThreadDataForThisThread();
478
479#ifdef ENABLE_DEBUGGER_SUPPORT
Steve Block44f0eee2011-05-26 01:26:41 +0100480 // Get the debugger from the default isolate. Preinitializes the
481 // default isolate if needed.
482 static Debugger* GetDefaultIsolateDebugger();
Ben Murdoch257744e2011-11-30 15:57:28 +0000483#endif
Steve Block44f0eee2011-05-26 01:26:41 +0100484
485 // Get the stack guard from the default isolate. Preinitializes the
486 // default isolate if needed.
487 static StackGuard* GetDefaultIsolateStackGuard();
488
489 // Returns the key used to store the pointer to the current isolate.
490 // Used internally for V8 threads that do not execute JavaScript but still
491 // are part of the domain of an isolate (like the context switcher).
Ben Murdoch85b71792012-04-11 18:30:58 +0100492 static Thread::LocalStorageKey isolate_key() {
493 return isolate_key_;
494 }
Steve Block44f0eee2011-05-26 01:26:41 +0100495
496 // Returns the key used to store process-wide thread IDs.
Ben Murdoch85b71792012-04-11 18:30:58 +0100497 static Thread::LocalStorageKey thread_id_key() {
498 return thread_id_key_;
499 }
Steve Block44f0eee2011-05-26 01:26:41 +0100500
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100501 static Thread::LocalStorageKey per_isolate_thread_data_key();
502
Steve Block44f0eee2011-05-26 01:26:41 +0100503 // If a client attempts to create a Locker without specifying an isolate,
504 // we assume that the client is using legacy behavior. Set up the current
505 // thread to be inside the implicit isolate (or fail a check if we have
506 // switched to non-legacy behavior).
507 static void EnterDefaultIsolate();
508
Steve Block44f0eee2011-05-26 01:26:41 +0100509 // Mutex for serializing access to break control structures.
510 Mutex* break_access() { return break_access_; }
511
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000512 // Mutex for serializing access to debugger.
513 Mutex* debugger_access() { return debugger_access_; }
514
Steve Block44f0eee2011-05-26 01:26:41 +0100515 Address get_address_from_id(AddressId id);
516
517 // Access to top context (where the current function object was created).
518 Context* context() { return thread_local_top_.context_; }
519 void set_context(Context* context) {
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000520 ASSERT(context == NULL || context->IsContext());
Steve Block44f0eee2011-05-26 01:26:41 +0100521 thread_local_top_.context_ = context;
522 }
523 Context** context_address() { return &thread_local_top_.context_; }
524
525 SaveContext* save_context() {return thread_local_top_.save_context_; }
526 void set_save_context(SaveContext* save) {
527 thread_local_top_.save_context_ = save;
528 }
529
530 // Access to current thread id.
Ben Murdoch8b112d22011-06-08 16:22:53 +0100531 ThreadId thread_id() { return thread_local_top_.thread_id_; }
532 void set_thread_id(ThreadId id) { thread_local_top_.thread_id_ = id; }
Steve Block44f0eee2011-05-26 01:26:41 +0100533
534 // Interface to pending exception.
535 MaybeObject* pending_exception() {
536 ASSERT(has_pending_exception());
537 return thread_local_top_.pending_exception_;
538 }
539 bool external_caught_exception() {
540 return thread_local_top_.external_caught_exception_;
541 }
Ben Murdoch8b112d22011-06-08 16:22:53 +0100542 void set_external_caught_exception(bool value) {
543 thread_local_top_.external_caught_exception_ = value;
544 }
Steve Block44f0eee2011-05-26 01:26:41 +0100545 void set_pending_exception(MaybeObject* exception) {
546 thread_local_top_.pending_exception_ = exception;
547 }
548 void clear_pending_exception() {
549 thread_local_top_.pending_exception_ = heap_.the_hole_value();
550 }
551 MaybeObject** pending_exception_address() {
552 return &thread_local_top_.pending_exception_;
553 }
554 bool has_pending_exception() {
555 return !thread_local_top_.pending_exception_->IsTheHole();
556 }
557 void clear_pending_message() {
558 thread_local_top_.has_pending_message_ = false;
Steve Block44f0eee2011-05-26 01:26:41 +0100559 thread_local_top_.pending_message_obj_ = heap_.the_hole_value();
560 thread_local_top_.pending_message_script_ = NULL;
561 }
562 v8::TryCatch* try_catch_handler() {
563 return thread_local_top_.TryCatchHandler();
564 }
565 Address try_catch_handler_address() {
566 return thread_local_top_.try_catch_handler_address();
567 }
568 bool* external_caught_exception_address() {
569 return &thread_local_top_.external_caught_exception_;
570 }
Ben Murdoch8b112d22011-06-08 16:22:53 +0100571 v8::TryCatch* catcher() {
572 return thread_local_top_.catcher_;
573 }
574 void set_catcher(v8::TryCatch* catcher) {
575 thread_local_top_.catcher_ = catcher;
576 }
Steve Block44f0eee2011-05-26 01:26:41 +0100577
578 MaybeObject** scheduled_exception_address() {
579 return &thread_local_top_.scheduled_exception_;
580 }
581 MaybeObject* scheduled_exception() {
582 ASSERT(has_scheduled_exception());
583 return thread_local_top_.scheduled_exception_;
584 }
585 bool has_scheduled_exception() {
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000586 return thread_local_top_.scheduled_exception_ != heap_.the_hole_value();
Steve Block44f0eee2011-05-26 01:26:41 +0100587 }
588 void clear_scheduled_exception() {
589 thread_local_top_.scheduled_exception_ = heap_.the_hole_value();
590 }
591
592 bool IsExternallyCaught();
593
594 bool is_catchable_by_javascript(MaybeObject* exception) {
595 return (exception != Failure::OutOfMemoryException()) &&
596 (exception != heap()->termination_exception());
597 }
598
599 // JS execution stack (see frames.h).
600 static Address c_entry_fp(ThreadLocalTop* thread) {
601 return thread->c_entry_fp_;
602 }
603 static Address handler(ThreadLocalTop* thread) { return thread->handler_; }
604
605 inline Address* c_entry_fp_address() {
606 return &thread_local_top_.c_entry_fp_;
607 }
608 inline Address* handler_address() { return &thread_local_top_.handler_; }
609
Steve Block44f0eee2011-05-26 01:26:41 +0100610 // Bottom JS entry (see StackTracer::Trace in log.cc).
611 static Address js_entry_sp(ThreadLocalTop* thread) {
612 return thread->js_entry_sp_;
613 }
614 inline Address* js_entry_sp_address() {
615 return &thread_local_top_.js_entry_sp_;
616 }
Steve Block44f0eee2011-05-26 01:26:41 +0100617
618 // Generated code scratch locations.
619 void* formal_count_address() { return &thread_local_top_.formal_count_; }
620
621 // Returns the global object of the current context. It could be
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100622 // a builtin object, or a JS global object.
Steve Block44f0eee2011-05-26 01:26:41 +0100623 Handle<GlobalObject> global() {
624 return Handle<GlobalObject>(context()->global());
625 }
626
627 // Returns the global proxy object of the current context.
628 Object* global_proxy() {
629 return context()->global_proxy();
630 }
631
632 Handle<JSBuiltinsObject> js_builtins_object() {
633 return Handle<JSBuiltinsObject>(thread_local_top_.context_->builtins());
634 }
635
636 static int ArchiveSpacePerThread() { return sizeof(ThreadLocalTop); }
637 void FreeThreadResources() { thread_local_top_.Free(); }
638
639 // This method is called by the api after operations that may throw
640 // exceptions. If an exception was thrown and not handled by an external
641 // handler the exception is scheduled to be rethrown when we return to running
642 // JavaScript code. If an exception is scheduled true is returned.
643 bool OptionalRescheduleException(bool is_bottom_call);
644
Ben Murdoch8b112d22011-06-08 16:22:53 +0100645 class ExceptionScope {
646 public:
647 explicit ExceptionScope(Isolate* isolate) :
648 // Scope currently can only be used for regular exceptions, not
649 // failures like OOM or termination exception.
650 isolate_(isolate),
651 pending_exception_(isolate_->pending_exception()->ToObjectUnchecked()),
652 catcher_(isolate_->catcher())
653 { }
654
655 ~ExceptionScope() {
656 isolate_->set_catcher(catcher_);
657 isolate_->set_pending_exception(*pending_exception_);
658 }
659
660 private:
661 Isolate* isolate_;
662 Handle<Object> pending_exception_;
663 v8::TryCatch* catcher_;
664 };
665
Steve Block44f0eee2011-05-26 01:26:41 +0100666 void SetCaptureStackTraceForUncaughtExceptions(
667 bool capture,
668 int frame_limit,
669 StackTrace::StackTraceOptions options);
670
671 // Tells whether the current context has experienced an out of memory
672 // exception.
673 bool is_out_of_memory();
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000674 bool ignore_out_of_memory() {
675 return thread_local_top_.ignore_out_of_memory_;
676 }
677 void set_ignore_out_of_memory(bool value) {
678 thread_local_top_.ignore_out_of_memory_ = value;
679 }
Steve Block44f0eee2011-05-26 01:26:41 +0100680
681 void PrintCurrentStackTrace(FILE* out);
682 void PrintStackTrace(FILE* out, char* thread_data);
683 void PrintStack(StringStream* accumulator);
684 void PrintStack();
685 Handle<String> StackTraceString();
686 Handle<JSArray> CaptureCurrentStackTrace(
687 int frame_limit,
688 StackTrace::StackTraceOptions options);
689
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100690 void CaptureAndSetCurrentStackTraceFor(Handle<JSObject> error_object);
691
Steve Block44f0eee2011-05-26 01:26:41 +0100692 // Returns if the top context may access the given global object. If
693 // the result is false, the pending exception is guaranteed to be
694 // set.
695 bool MayNamedAccess(JSObject* receiver,
696 Object* key,
697 v8::AccessType type);
698 bool MayIndexedAccess(JSObject* receiver,
699 uint32_t index,
700 v8::AccessType type);
701
702 void SetFailedAccessCheckCallback(v8::FailedAccessCheckCallback callback);
703 void ReportFailedAccessCheck(JSObject* receiver, v8::AccessType type);
704
705 // Exception throwing support. The caller should use the result
706 // of Throw() as its return value.
707 Failure* Throw(Object* exception, MessageLocation* location = NULL);
708 // Re-throw an exception. This involves no error reporting since
709 // error reporting was handled when the exception was thrown
710 // originally.
711 Failure* ReThrow(MaybeObject* exception, MessageLocation* location = NULL);
712 void ScheduleThrow(Object* exception);
713 void ReportPendingMessages();
714 Failure* ThrowIllegalOperation();
715
716 // Promote a scheduled exception to pending. Asserts has_scheduled_exception.
717 Failure* PromoteScheduledException();
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100718 void DoThrow(Object* exception, MessageLocation* location);
Steve Block44f0eee2011-05-26 01:26:41 +0100719 // Checks if exception should be reported and finds out if it's
720 // caught externally.
721 bool ShouldReportException(bool* can_be_caught_externally,
722 bool catchable_by_javascript);
723
724 // Attempts to compute the current source location, storing the
725 // result in the target out parameter.
726 void ComputeLocation(MessageLocation* target);
727
728 // Override command line flag.
729 void TraceException(bool flag);
730
731 // Out of resource exception helpers.
732 Failure* StackOverflow();
733 Failure* TerminateExecution();
734
735 // Administration
736 void Iterate(ObjectVisitor* v);
737 void Iterate(ObjectVisitor* v, ThreadLocalTop* t);
738 char* Iterate(ObjectVisitor* v, char* t);
739 void IterateThread(ThreadVisitor* v);
740 void IterateThread(ThreadVisitor* v, char* t);
741
742
743 // Returns the current global context.
744 Handle<Context> global_context();
745
746 // Returns the global context of the calling JavaScript code. That
747 // is, the global context of the top-most JavaScript frame.
748 Handle<Context> GetCallingGlobalContext();
749
750 void RegisterTryCatchHandler(v8::TryCatch* that);
751 void UnregisterTryCatchHandler(v8::TryCatch* that);
752
753 char* ArchiveThread(char* to);
754 char* RestoreThread(char* from);
755
756 static const char* const kStackOverflowMessage;
757
758 static const int kUC16AlphabetSize = 256; // See StringSearchBase.
759 static const int kBMMaxShift = 250; // See StringSearchBase.
760
761 // Accessors.
762#define GLOBAL_ACCESSOR(type, name, initialvalue) \
763 inline type name() const { \
764 ASSERT(OFFSET_OF(Isolate, name##_) == name##_debug_offset_); \
765 return name##_; \
766 } \
767 inline void set_##name(type value) { \
768 ASSERT(OFFSET_OF(Isolate, name##_) == name##_debug_offset_); \
769 name##_ = value; \
770 }
771 ISOLATE_INIT_LIST(GLOBAL_ACCESSOR)
772#undef GLOBAL_ACCESSOR
773
774#define GLOBAL_ARRAY_ACCESSOR(type, name, length) \
775 inline type* name() { \
776 ASSERT(OFFSET_OF(Isolate, name##_) == name##_debug_offset_); \
777 return &(name##_)[0]; \
778 }
779 ISOLATE_INIT_ARRAY_LIST(GLOBAL_ARRAY_ACCESSOR)
780#undef GLOBAL_ARRAY_ACCESSOR
781
782#define GLOBAL_CONTEXT_FIELD_ACCESSOR(index, type, name) \
783 Handle<type> name() { \
784 return Handle<type>(context()->global_context()->name()); \
785 }
786 GLOBAL_CONTEXT_FIELDS(GLOBAL_CONTEXT_FIELD_ACCESSOR)
787#undef GLOBAL_CONTEXT_FIELD_ACCESSOR
788
789 Bootstrapper* bootstrapper() { return bootstrapper_; }
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000790 Counters* counters() {
791 // Call InitializeLoggingAndCounters() if logging is needed before
792 // the isolate is fully initialized.
793 ASSERT(counters_ != NULL);
794 return counters_;
795 }
Steve Block44f0eee2011-05-26 01:26:41 +0100796 CodeRange* code_range() { return code_range_; }
797 RuntimeProfiler* runtime_profiler() { return runtime_profiler_; }
798 CompilationCache* compilation_cache() { return compilation_cache_; }
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000799 Logger* logger() {
800 // Call InitializeLoggingAndCounters() if logging is needed before
801 // the isolate is fully initialized.
802 ASSERT(logger_ != NULL);
803 return logger_;
804 }
Steve Block44f0eee2011-05-26 01:26:41 +0100805 StackGuard* stack_guard() { return &stack_guard_; }
806 Heap* heap() { return &heap_; }
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000807 StatsTable* stats_table();
Steve Block44f0eee2011-05-26 01:26:41 +0100808 StubCache* stub_cache() { return stub_cache_; }
809 DeoptimizerData* deoptimizer_data() { return deoptimizer_data_; }
810 ThreadLocalTop* thread_local_top() { return &thread_local_top_; }
811
812 TranscendentalCache* transcendental_cache() const {
813 return transcendental_cache_;
814 }
815
816 MemoryAllocator* memory_allocator() {
817 return memory_allocator_;
818 }
819
820 KeyedLookupCache* keyed_lookup_cache() {
821 return keyed_lookup_cache_;
822 }
823
824 ContextSlotCache* context_slot_cache() {
825 return context_slot_cache_;
826 }
827
828 DescriptorLookupCache* descriptor_lookup_cache() {
829 return descriptor_lookup_cache_;
830 }
831
832 v8::ImplementationUtilities::HandleScopeData* handle_scope_data() {
833 return &handle_scope_data_;
834 }
835 HandleScopeImplementer* handle_scope_implementer() {
836 ASSERT(handle_scope_implementer_);
837 return handle_scope_implementer_;
838 }
839 Zone* zone() { return &zone_; }
840
Ben Murdoch8b112d22011-06-08 16:22:53 +0100841 UnicodeCache* unicode_cache() {
842 return unicode_cache_;
Steve Block44f0eee2011-05-26 01:26:41 +0100843 }
844
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100845 InnerPointerToCodeCache* inner_pointer_to_code_cache() {
846 return inner_pointer_to_code_cache_;
847 }
Steve Block44f0eee2011-05-26 01:26:41 +0100848
849 StringInputBuffer* write_input_buffer() { return write_input_buffer_; }
850
851 GlobalHandles* global_handles() { return global_handles_; }
852
853 ThreadManager* thread_manager() { return thread_manager_; }
854
855 ContextSwitcher* context_switcher() { return context_switcher_; }
856
857 void set_context_switcher(ContextSwitcher* switcher) {
858 context_switcher_ = switcher;
859 }
860
861 StringTracker* string_tracker() { return string_tracker_; }
862
863 unibrow::Mapping<unibrow::Ecma262UnCanonicalize>* jsregexp_uncanonicalize() {
864 return &jsregexp_uncanonicalize_;
865 }
866
867 unibrow::Mapping<unibrow::CanonicalizationRange>* jsregexp_canonrange() {
868 return &jsregexp_canonrange_;
869 }
870
871 StringInputBuffer* objects_string_compare_buffer_a() {
872 return &objects_string_compare_buffer_a_;
873 }
874
875 StringInputBuffer* objects_string_compare_buffer_b() {
876 return &objects_string_compare_buffer_b_;
877 }
878
879 StaticResource<StringInputBuffer>* objects_string_input_buffer() {
880 return &objects_string_input_buffer_;
881 }
882
Steve Block44f0eee2011-05-26 01:26:41 +0100883 RuntimeState* runtime_state() { return &runtime_state_; }
884
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100885 void set_fp_stubs_generated(bool value) {
886 fp_stubs_generated_ = value;
887 }
888
889 bool fp_stubs_generated() { return fp_stubs_generated_; }
890
Steve Block44f0eee2011-05-26 01:26:41 +0100891 StaticResource<SafeStringInputBuffer>* compiler_safe_string_input_buffer() {
892 return &compiler_safe_string_input_buffer_;
893 }
894
895 Builtins* builtins() { return &builtins_; }
896
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100897 void NotifyExtensionInstalled() {
898 has_installed_extensions_ = true;
899 }
900
901 bool has_installed_extensions() { return has_installed_extensions_; }
902
Steve Block44f0eee2011-05-26 01:26:41 +0100903 unibrow::Mapping<unibrow::Ecma262Canonicalize>*
904 regexp_macro_assembler_canonicalize() {
905 return &regexp_macro_assembler_canonicalize_;
906 }
907
908 RegExpStack* regexp_stack() { return regexp_stack_; }
909
910 unibrow::Mapping<unibrow::Ecma262Canonicalize>*
911 interp_canonicalize_mapping() {
912 return &interp_canonicalize_mapping_;
913 }
914
Steve Block44f0eee2011-05-26 01:26:41 +0100915 void* PreallocatedStorageNew(size_t size);
916 void PreallocatedStorageDelete(void* p);
917 void PreallocatedStorageInit(size_t size);
918
919#ifdef ENABLE_DEBUGGER_SUPPORT
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000920 Debugger* debugger() {
921 if (!NoBarrier_Load(&debugger_initialized_)) InitializeDebugger();
922 return debugger_;
923 }
924 Debug* debug() {
925 if (!NoBarrier_Load(&debugger_initialized_)) InitializeDebugger();
926 return debug_;
927 }
Steve Block44f0eee2011-05-26 01:26:41 +0100928#endif
929
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100930 inline bool IsDebuggerActive();
Ben Murdoch257744e2011-11-30 15:57:28 +0000931 inline bool DebuggerHasBreakPoints();
932
Steve Block44f0eee2011-05-26 01:26:41 +0100933#ifdef DEBUG
934 HistogramInfo* heap_histograms() { return heap_histograms_; }
935
936 JSObject::SpillInformation* js_spill_information() {
937 return &js_spill_information_;
938 }
939
940 int* code_kind_statistics() { return code_kind_statistics_; }
941#endif
942
943#if defined(V8_TARGET_ARCH_ARM) && !defined(__arm__) || \
944 defined(V8_TARGET_ARCH_MIPS) && !defined(__mips__)
945 bool simulator_initialized() { return simulator_initialized_; }
946 void set_simulator_initialized(bool initialized) {
947 simulator_initialized_ = initialized;
948 }
949
950 HashMap* simulator_i_cache() { return simulator_i_cache_; }
951 void set_simulator_i_cache(HashMap* hash_map) {
952 simulator_i_cache_ = hash_map;
953 }
954
955 Redirection* simulator_redirection() {
956 return simulator_redirection_;
957 }
958 void set_simulator_redirection(Redirection* redirection) {
959 simulator_redirection_ = redirection;
960 }
961#endif
962
963 Factory* factory() { return reinterpret_cast<Factory*>(this); }
964
965 // SerializerDeserializer state.
966 static const int kPartialSnapshotCacheCapacity = 1400;
967
968 static const int kJSRegexpStaticOffsetsVectorSize = 50;
969
Steve Block44f0eee2011-05-26 01:26:41 +0100970 Address external_callback() {
971 return thread_local_top_.external_callback_;
972 }
973 void set_external_callback(Address callback) {
974 thread_local_top_.external_callback_ = callback;
975 }
Steve Block44f0eee2011-05-26 01:26:41 +0100976
Steve Block44f0eee2011-05-26 01:26:41 +0100977 StateTag current_vm_state() {
978 return thread_local_top_.current_vm_state_;
979 }
980
981 void SetCurrentVMState(StateTag state) {
982 if (RuntimeProfiler::IsEnabled()) {
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000983 // Make sure thread local top is initialized.
984 ASSERT(thread_local_top_.isolate_ == this);
Ben Murdoch8b112d22011-06-08 16:22:53 +0100985 StateTag current_state = thread_local_top_.current_vm_state_;
986 if (current_state != JS && state == JS) {
987 // Non-JS -> JS transition.
Steve Block44f0eee2011-05-26 01:26:41 +0100988 RuntimeProfiler::IsolateEnteredJS(this);
Ben Murdoch8b112d22011-06-08 16:22:53 +0100989 } else if (current_state == JS && state != JS) {
Steve Block44f0eee2011-05-26 01:26:41 +0100990 // JS -> non-JS transition.
991 ASSERT(RuntimeProfiler::IsSomeIsolateInJS());
992 RuntimeProfiler::IsolateExitedJS(this);
Ben Murdoch8b112d22011-06-08 16:22:53 +0100993 } else {
994 // Other types of state transitions are not interesting to the
995 // runtime profiler, because they don't affect whether we're
996 // in JS or not.
997 ASSERT((current_state == JS) == (state == JS));
Steve Block44f0eee2011-05-26 01:26:41 +0100998 }
999 }
1000 thread_local_top_.current_vm_state_ = state;
1001 }
Steve Block44f0eee2011-05-26 01:26:41 +01001002
Ben Murdoch257744e2011-11-30 15:57:28 +00001003 void SetData(void* data) { embedder_data_ = data; }
1004 void* GetData() { return embedder_data_; }
1005
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001006 LookupResult* top_lookup_result() {
1007 return thread_local_top_.top_lookup_result_;
1008 }
1009 void SetTopLookupResult(LookupResult* top) {
1010 thread_local_top_.top_lookup_result_ = top;
1011 }
1012
1013 bool context_exit_happened() {
1014 return context_exit_happened_;
1015 }
1016 void set_context_exit_happened(bool context_exit_happened) {
1017 context_exit_happened_ = context_exit_happened;
1018 }
1019
1020 double time_millis_since_init() {
1021 return OS::TimeCurrentMillis() - time_millis_at_init_;
1022 }
1023
1024 DateCache* date_cache() {
1025 return date_cache_;
1026 }
1027
1028 void set_date_cache(DateCache* date_cache) {
1029 if (date_cache != date_cache_) {
1030 delete date_cache_;
1031 }
1032 date_cache_ = date_cache;
1033 }
1034
Steve Block44f0eee2011-05-26 01:26:41 +01001035 private:
1036 Isolate();
1037
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001038 friend struct GlobalState;
1039 friend struct InitializeGlobalState;
1040
Steve Block44f0eee2011-05-26 01:26:41 +01001041 // The per-process lock should be acquired before the ThreadDataTable is
1042 // modified.
1043 class ThreadDataTable {
1044 public:
1045 ThreadDataTable();
1046 ~ThreadDataTable();
1047
1048 PerIsolateThreadData* Lookup(Isolate* isolate, ThreadId thread_id);
1049 void Insert(PerIsolateThreadData* data);
1050 void Remove(Isolate* isolate, ThreadId thread_id);
1051 void Remove(PerIsolateThreadData* data);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001052 void RemoveAllThreads(Isolate* isolate);
Steve Block44f0eee2011-05-26 01:26:41 +01001053
1054 private:
1055 PerIsolateThreadData* list_;
1056 };
1057
1058 // These items form a stack synchronously with threads Enter'ing and Exit'ing
1059 // the Isolate. The top of the stack points to a thread which is currently
1060 // running the Isolate. When the stack is empty, the Isolate is considered
1061 // not entered by any thread and can be Disposed.
1062 // If the same thread enters the Isolate more then once, the entry_count_
1063 // is incremented rather then a new item pushed to the stack.
1064 class EntryStackItem {
1065 public:
1066 EntryStackItem(PerIsolateThreadData* previous_thread_data,
1067 Isolate* previous_isolate,
1068 EntryStackItem* previous_item)
1069 : entry_count(1),
1070 previous_thread_data(previous_thread_data),
1071 previous_isolate(previous_isolate),
1072 previous_item(previous_item) { }
1073
1074 int entry_count;
1075 PerIsolateThreadData* previous_thread_data;
1076 Isolate* previous_isolate;
1077 EntryStackItem* previous_item;
1078
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001079 private:
Steve Block44f0eee2011-05-26 01:26:41 +01001080 DISALLOW_COPY_AND_ASSIGN(EntryStackItem);
1081 };
1082
Ben Murdoch85b71792012-04-11 18:30:58 +01001083 // This mutex protects highest_thread_id_, thread_data_table_ and
1084 // default_isolate_.
1085 static Mutex* process_wide_mutex_;
1086
1087 static Thread::LocalStorageKey per_isolate_thread_data_key_;
1088 static Thread::LocalStorageKey isolate_key_;
1089 static Thread::LocalStorageKey thread_id_key_;
1090 static Isolate* default_isolate_;
1091 static ThreadDataTable* thread_data_table_;
1092
Steve Block44f0eee2011-05-26 01:26:41 +01001093 void Deinit();
1094
1095 static void SetIsolateThreadLocals(Isolate* isolate,
1096 PerIsolateThreadData* data);
1097
1098 enum State {
1099 UNINITIALIZED, // Some components may not have been allocated.
Steve Block44f0eee2011-05-26 01:26:41 +01001100 INITIALIZED // All components are fully initialized.
1101 };
1102
1103 State state_;
1104 EntryStackItem* entry_stack_;
1105
1106 // Allocate and insert PerIsolateThreadData into the ThreadDataTable
1107 // (regardless of whether such data already exists).
1108 PerIsolateThreadData* AllocatePerIsolateThreadData(ThreadId thread_id);
1109
1110 // Find the PerThread for this particular (isolate, thread) combination.
1111 // If one does not yet exist, allocate a new one.
1112 PerIsolateThreadData* FindOrAllocatePerThreadDataForThisThread();
1113
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001114 // PreInits and returns a default isolate. Needed when a new thread tries
Steve Block44f0eee2011-05-26 01:26:41 +01001115 // to create a Locker for the first time (the lock itself is in the isolate).
1116 static Isolate* GetDefaultIsolateForLocking();
1117
1118 // Initializes the current thread to run this Isolate.
1119 // Not thread-safe. Multiple threads should not Enter/Exit the same isolate
1120 // at the same time, this should be prevented using external locking.
1121 void Enter();
1122
1123 // Exits the current thread. The previosuly entered Isolate is restored
1124 // for the thread.
1125 // Not thread-safe. Multiple threads should not Enter/Exit the same isolate
1126 // at the same time, this should be prevented using external locking.
1127 void Exit();
1128
1129 void PreallocatedMemoryThreadStart();
1130 void PreallocatedMemoryThreadStop();
1131 void InitializeThreadLocal();
1132
1133 void PrintStackTrace(FILE* out, ThreadLocalTop* thread);
1134 void MarkCompactPrologue(bool is_compacting,
1135 ThreadLocalTop* archived_thread_data);
1136 void MarkCompactEpilogue(bool is_compacting,
1137 ThreadLocalTop* archived_thread_data);
1138
1139 void FillCache();
1140
Ben Murdoch8b112d22011-06-08 16:22:53 +01001141 void PropagatePendingExceptionToExternalTryCatch();
1142
Ben Murdoch69a99ed2011-11-30 16:03:39 +00001143 void InitializeDebugger();
1144
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001145 // Traverse prototype chain to find out whether the object is derived from
1146 // the Error object.
1147 bool IsErrorObject(Handle<Object> obj);
1148
Steve Block44f0eee2011-05-26 01:26:41 +01001149 int stack_trace_nesting_level_;
1150 StringStream* incomplete_message_;
1151 // The preallocated memory thread singleton.
1152 PreallocatedMemoryThread* preallocated_memory_thread_;
Ben Murdoch589d6972011-11-30 16:04:58 +00001153 Address isolate_addresses_[kIsolateAddressCount + 1]; // NOLINT
Steve Block44f0eee2011-05-26 01:26:41 +01001154 NoAllocationStringAllocator* preallocated_message_space_;
1155
1156 Bootstrapper* bootstrapper_;
1157 RuntimeProfiler* runtime_profiler_;
1158 CompilationCache* compilation_cache_;
1159 Counters* counters_;
Steve Block44f0eee2011-05-26 01:26:41 +01001160 CodeRange* code_range_;
1161 Mutex* break_access_;
Ben Murdoch69a99ed2011-11-30 16:03:39 +00001162 Atomic32 debugger_initialized_;
1163 Mutex* debugger_access_;
Steve Block44f0eee2011-05-26 01:26:41 +01001164 Heap heap_;
1165 Logger* logger_;
1166 StackGuard stack_guard_;
1167 StatsTable* stats_table_;
1168 StubCache* stub_cache_;
1169 DeoptimizerData* deoptimizer_data_;
1170 ThreadLocalTop thread_local_top_;
1171 bool capture_stack_trace_for_uncaught_exceptions_;
1172 int stack_trace_for_uncaught_exceptions_frame_limit_;
1173 StackTrace::StackTraceOptions stack_trace_for_uncaught_exceptions_options_;
1174 TranscendentalCache* transcendental_cache_;
1175 MemoryAllocator* memory_allocator_;
1176 KeyedLookupCache* keyed_lookup_cache_;
1177 ContextSlotCache* context_slot_cache_;
1178 DescriptorLookupCache* descriptor_lookup_cache_;
1179 v8::ImplementationUtilities::HandleScopeData handle_scope_data_;
1180 HandleScopeImplementer* handle_scope_implementer_;
Ben Murdoch8b112d22011-06-08 16:22:53 +01001181 UnicodeCache* unicode_cache_;
Steve Block44f0eee2011-05-26 01:26:41 +01001182 Zone zone_;
1183 PreallocatedStorage in_use_list_;
1184 PreallocatedStorage free_list_;
1185 bool preallocated_storage_preallocated_;
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001186 InnerPointerToCodeCache* inner_pointer_to_code_cache_;
Steve Block44f0eee2011-05-26 01:26:41 +01001187 StringInputBuffer* write_input_buffer_;
1188 GlobalHandles* global_handles_;
1189 ContextSwitcher* context_switcher_;
1190 ThreadManager* thread_manager_;
Steve Block44f0eee2011-05-26 01:26:41 +01001191 RuntimeState runtime_state_;
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001192 bool fp_stubs_generated_;
Steve Block44f0eee2011-05-26 01:26:41 +01001193 StaticResource<SafeStringInputBuffer> compiler_safe_string_input_buffer_;
1194 Builtins builtins_;
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001195 bool has_installed_extensions_;
Steve Block44f0eee2011-05-26 01:26:41 +01001196 StringTracker* string_tracker_;
1197 unibrow::Mapping<unibrow::Ecma262UnCanonicalize> jsregexp_uncanonicalize_;
1198 unibrow::Mapping<unibrow::CanonicalizationRange> jsregexp_canonrange_;
1199 StringInputBuffer objects_string_compare_buffer_a_;
1200 StringInputBuffer objects_string_compare_buffer_b_;
1201 StaticResource<StringInputBuffer> objects_string_input_buffer_;
1202 unibrow::Mapping<unibrow::Ecma262Canonicalize>
1203 regexp_macro_assembler_canonicalize_;
1204 RegExpStack* regexp_stack_;
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001205
1206 DateCache* date_cache_;
1207
Steve Block44f0eee2011-05-26 01:26:41 +01001208 unibrow::Mapping<unibrow::Ecma262Canonicalize> interp_canonicalize_mapping_;
Ben Murdoch257744e2011-11-30 15:57:28 +00001209 void* embedder_data_;
Steve Block44f0eee2011-05-26 01:26:41 +01001210
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001211 // The garbage collector should be a little more aggressive when it knows
1212 // that a context was recently exited.
1213 bool context_exit_happened_;
1214
1215 // Time stamp at initialization.
1216 double time_millis_at_init_;
1217
Steve Block44f0eee2011-05-26 01:26:41 +01001218#if defined(V8_TARGET_ARCH_ARM) && !defined(__arm__) || \
1219 defined(V8_TARGET_ARCH_MIPS) && !defined(__mips__)
1220 bool simulator_initialized_;
1221 HashMap* simulator_i_cache_;
1222 Redirection* simulator_redirection_;
1223#endif
1224
1225#ifdef DEBUG
1226 // A static array of histogram info for each type.
1227 HistogramInfo heap_histograms_[LAST_TYPE + 1];
1228 JSObject::SpillInformation js_spill_information_;
1229 int code_kind_statistics_[Code::NUMBER_OF_KINDS];
1230#endif
1231
1232#ifdef ENABLE_DEBUGGER_SUPPORT
1233 Debugger* debugger_;
1234 Debug* debug_;
1235#endif
1236
Steve Block44f0eee2011-05-26 01:26:41 +01001237#define GLOBAL_BACKING_STORE(type, name, initialvalue) \
1238 type name##_;
1239 ISOLATE_INIT_LIST(GLOBAL_BACKING_STORE)
1240#undef GLOBAL_BACKING_STORE
1241
1242#define GLOBAL_ARRAY_BACKING_STORE(type, name, length) \
1243 type name##_[length];
1244 ISOLATE_INIT_ARRAY_LIST(GLOBAL_ARRAY_BACKING_STORE)
1245#undef GLOBAL_ARRAY_BACKING_STORE
1246
1247#ifdef DEBUG
1248 // This class is huge and has a number of fields controlled by
1249 // preprocessor defines. Make sure the offsets of these fields agree
1250 // between compilation units.
1251#define ISOLATE_FIELD_OFFSET(type, name, ignored) \
1252 static const intptr_t name##_debug_offset_;
1253 ISOLATE_INIT_LIST(ISOLATE_FIELD_OFFSET)
1254 ISOLATE_INIT_ARRAY_LIST(ISOLATE_FIELD_OFFSET)
1255#undef ISOLATE_FIELD_OFFSET
1256#endif
1257
1258 friend class ExecutionAccess;
1259 friend class IsolateInitializer;
Ben Murdoch257744e2011-11-30 15:57:28 +00001260 friend class ThreadManager;
1261 friend class Simulator;
1262 friend class StackGuard;
Ben Murdoch8b112d22011-06-08 16:22:53 +01001263 friend class ThreadId;
Ben Murdoch69a99ed2011-11-30 16:03:39 +00001264 friend class TestMemoryAllocatorScope;
Steve Block44f0eee2011-05-26 01:26:41 +01001265 friend class v8::Isolate;
1266 friend class v8::Locker;
Ben Murdoch257744e2011-11-30 15:57:28 +00001267 friend class v8::Unlocker;
Steve Block44f0eee2011-05-26 01:26:41 +01001268
1269 DISALLOW_COPY_AND_ASSIGN(Isolate);
1270};
1271
1272
1273// If the GCC version is 4.1.x or 4.2.x an additional field is added to the
1274// class as a work around for a bug in the generated code found with these
1275// versions of GCC. See V8 issue 122 for details.
1276class SaveContext BASE_EMBEDDED {
1277 public:
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001278 inline explicit SaveContext(Isolate* isolate);
Steve Block44f0eee2011-05-26 01:26:41 +01001279
1280 ~SaveContext() {
1281 if (context_.is_null()) {
1282 Isolate* isolate = Isolate::Current();
1283 isolate->set_context(NULL);
1284 isolate->set_save_context(prev_);
1285 } else {
1286 Isolate* isolate = context_->GetIsolate();
1287 isolate->set_context(*context_);
1288 isolate->set_save_context(prev_);
1289 }
1290 }
1291
1292 Handle<Context> context() { return context_; }
1293 SaveContext* prev() { return prev_; }
1294
1295 // Returns true if this save context is below a given JavaScript frame.
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001296 bool IsBelowFrame(JavaScriptFrame* frame) {
1297 return (c_entry_fp_ == 0) || (c_entry_fp_ > frame->sp());
Steve Block44f0eee2011-05-26 01:26:41 +01001298 }
1299
1300 private:
1301 Handle<Context> context_;
1302#if __GNUC_VERSION__ >= 40100 && __GNUC_VERSION__ < 40300
1303 Handle<Context> dummy_;
1304#endif
1305 SaveContext* prev_;
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001306 Address c_entry_fp_;
Steve Block44f0eee2011-05-26 01:26:41 +01001307};
1308
1309
1310class AssertNoContextChange BASE_EMBEDDED {
1311#ifdef DEBUG
1312 public:
1313 AssertNoContextChange() :
1314 scope_(Isolate::Current()),
1315 context_(Isolate::Current()->context(), Isolate::Current()) {
1316 }
1317
1318 ~AssertNoContextChange() {
1319 ASSERT(Isolate::Current()->context() == *context_);
1320 }
1321
1322 private:
1323 HandleScope scope_;
1324 Handle<Context> context_;
1325#else
1326 public:
1327 AssertNoContextChange() { }
1328#endif
1329};
1330
1331
1332class ExecutionAccess BASE_EMBEDDED {
1333 public:
1334 explicit ExecutionAccess(Isolate* isolate) : isolate_(isolate) {
1335 Lock(isolate);
1336 }
1337 ~ExecutionAccess() { Unlock(isolate_); }
1338
1339 static void Lock(Isolate* isolate) { isolate->break_access_->Lock(); }
1340 static void Unlock(Isolate* isolate) { isolate->break_access_->Unlock(); }
1341
1342 static bool TryLock(Isolate* isolate) {
1343 return isolate->break_access_->TryLock();
1344 }
1345
1346 private:
1347 Isolate* isolate_;
1348};
1349
1350
1351// Support for checking for stack-overflows in C++ code.
1352class StackLimitCheck BASE_EMBEDDED {
1353 public:
1354 explicit StackLimitCheck(Isolate* isolate) : isolate_(isolate) { }
1355
1356 bool HasOverflowed() const {
1357 StackGuard* stack_guard = isolate_->stack_guard();
1358 // Stack has overflowed in C++ code only if stack pointer exceeds the C++
1359 // stack guard and the limits are not set to interrupt values.
1360 // TODO(214): Stack overflows are ignored if a interrupt is pending. This
1361 // code should probably always use the initial C++ limit.
1362 return (reinterpret_cast<uintptr_t>(this) < stack_guard->climit()) &&
1363 stack_guard->IsStackOverflow();
1364 }
1365 private:
1366 Isolate* isolate_;
1367};
1368
1369
1370// Support for temporarily postponing interrupts. When the outermost
1371// postpone scope is left the interrupts will be re-enabled and any
1372// interrupts that occurred while in the scope will be taken into
1373// account.
1374class PostponeInterruptsScope BASE_EMBEDDED {
1375 public:
1376 explicit PostponeInterruptsScope(Isolate* isolate)
1377 : stack_guard_(isolate->stack_guard()) {
1378 stack_guard_->thread_local_.postpone_interrupts_nesting_++;
1379 stack_guard_->DisableInterrupts();
1380 }
1381
1382 ~PostponeInterruptsScope() {
1383 if (--stack_guard_->thread_local_.postpone_interrupts_nesting_ == 0) {
1384 stack_guard_->EnableInterrupts();
1385 }
1386 }
1387 private:
1388 StackGuard* stack_guard_;
1389};
1390
1391
1392// Temporary macros for accessing current isolate and its subobjects.
1393// They provide better readability, especially when used a lot in the code.
1394#define HEAP (v8::internal::Isolate::Current()->heap())
1395#define FACTORY (v8::internal::Isolate::Current()->factory())
1396#define ISOLATE (v8::internal::Isolate::Current())
1397#define ZONE (v8::internal::Isolate::Current()->zone())
1398#define LOGGER (v8::internal::Isolate::Current()->logger())
1399
1400
1401// Tells whether the global context is marked with out of memory.
1402inline bool Context::has_out_of_memory() {
1403 return global_context()->out_of_memory()->IsTrue();
1404}
1405
1406
1407// Mark the global context with out of memory.
1408inline void Context::mark_out_of_memory() {
1409 global_context()->set_out_of_memory(HEAP->true_value());
1410}
1411
1412
Steve Block44f0eee2011-05-26 01:26:41 +01001413} } // namespace v8::internal
1414
Steve Block44f0eee2011-05-26 01:26:41 +01001415#endif // V8_ISOLATE_H_