blob: 0c5a54c598d892320fa1ff4565e545b54a70b9ae [file] [log] [blame]
Ben Murdochc7cc0282012-03-05 14:35:55 +00001// Copyright 2012 the V8 project authors. All rights reserved.
Steve Block44f0eee2011-05-26 01:26:41 +01002// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#ifndef V8_ISOLATE_H_
29#define V8_ISOLATE_H_
30
31#include "../include/v8-debug.h"
32#include "allocation.h"
33#include "apiutils.h"
34#include "atomicops.h"
35#include "builtins.h"
36#include "contexts.h"
37#include "execution.h"
38#include "frames.h"
Ben Murdoch5d4cdbf2012-04-11 10:23:59 +010039#include "date.h"
Steve Block44f0eee2011-05-26 01:26:41 +010040#include "global-handles.h"
41#include "handles.h"
Ben Murdoch5d4cdbf2012-04-11 10:23:59 +010042#include "hashmap.h"
Steve Block44f0eee2011-05-26 01:26:41 +010043#include "heap.h"
44#include "regexp-stack.h"
45#include "runtime-profiler.h"
46#include "runtime.h"
47#include "zone.h"
48
49namespace v8 {
50namespace internal {
51
Steve Block44f0eee2011-05-26 01:26:41 +010052class Bootstrapper;
53class CodeGenerator;
54class CodeRange;
55class CompilationCache;
56class ContextSlotCache;
57class ContextSwitcher;
58class Counters;
59class CpuFeatures;
60class CpuProfiler;
61class DeoptimizerData;
62class Deserializer;
63class EmptyStatement;
64class ExternalReferenceTable;
65class Factory;
66class FunctionInfoListener;
67class HandleScopeImplementer;
68class HeapProfiler;
69class InlineRuntimeFunctionsTable;
70class NoAllocationStringAllocator;
Ben Murdoch592a9fc2012-03-05 11:04:45 +000071class InnerPointerToCodeCache;
Steve Block44f0eee2011-05-26 01:26:41 +010072class PreallocatedMemoryThread;
Steve Block44f0eee2011-05-26 01:26:41 +010073class RegExpStack;
74class SaveContext;
Ben Murdoch8b112d22011-06-08 16:22:53 +010075class UnicodeCache;
Steve Block44f0eee2011-05-26 01:26:41 +010076class StringInputBuffer;
77class StringTracker;
78class StubCache;
79class ThreadManager;
80class ThreadState;
81class ThreadVisitor; // Defined in v8threads.h
82class VMState;
83
84// 'void function pointer', used to roundtrip the
85// ExternalReference::ExternalReferenceRedirector since we can not include
86// assembler.h, where it is defined, here.
87typedef void* ExternalReferenceRedirectorPointer();
88
89
90#ifdef ENABLE_DEBUGGER_SUPPORT
91class Debug;
92class Debugger;
93class DebuggerAgent;
94#endif
95
96#if !defined(__arm__) && defined(V8_TARGET_ARCH_ARM) || \
97 !defined(__mips__) && defined(V8_TARGET_ARCH_MIPS)
98class Redirection;
99class Simulator;
100#endif
101
102
103// Static indirection table for handles to constants. If a frame
104// element represents a constant, the data contains an index into
105// this table of handles to the actual constants.
106// Static indirection table for handles to constants. If a Result
107// represents a constant, the data contains an index into this table
108// of handles to the actual constants.
109typedef ZoneList<Handle<Object> > ZoneObjectList;
110
Ben Murdochc7cc0282012-03-05 14:35:55 +0000111#define RETURN_IF_SCHEDULED_EXCEPTION(isolate) \
112 do { \
113 Isolate* __isolate__ = (isolate); \
114 if (__isolate__->has_scheduled_exception()) { \
115 return __isolate__->PromoteScheduledException(); \
116 } \
117 } while (false)
Steve Block44f0eee2011-05-26 01:26:41 +0100118
119#define RETURN_IF_EMPTY_HANDLE_VALUE(isolate, call, value) \
Ben Murdochc7cc0282012-03-05 14:35:55 +0000120 do { \
121 if ((call).is_null()) { \
122 ASSERT((isolate)->has_pending_exception()); \
123 return (value); \
124 } \
125 } while (false)
126
127#define CHECK_NOT_EMPTY_HANDLE(isolate, call) \
128 do { \
129 ASSERT(!(isolate)->has_pending_exception()); \
130 CHECK(!(call).is_null()); \
131 CHECK(!(isolate)->has_pending_exception()); \
132 } while (false)
Steve Block44f0eee2011-05-26 01:26:41 +0100133
134#define RETURN_IF_EMPTY_HANDLE(isolate, call) \
135 RETURN_IF_EMPTY_HANDLE_VALUE(isolate, call, Failure::Exception())
136
Ben Murdoch589d6972011-11-30 16:04:58 +0000137#define FOR_EACH_ISOLATE_ADDRESS_NAME(C) \
138 C(Handler, handler) \
139 C(CEntryFP, c_entry_fp) \
140 C(Context, context) \
141 C(PendingException, pending_exception) \
142 C(ExternalCaughtException, external_caught_exception) \
143 C(JSEntrySP, js_entry_sp)
Steve Block44f0eee2011-05-26 01:26:41 +0100144
145
Ben Murdoch8b112d22011-06-08 16:22:53 +0100146// Platform-independent, reliable thread identifier.
147class ThreadId {
148 public:
149 // Creates an invalid ThreadId.
150 ThreadId() : id_(kInvalidId) {}
151
152 // Returns ThreadId for current thread.
153 static ThreadId Current() { return ThreadId(GetCurrentThreadId()); }
154
155 // Returns invalid ThreadId (guaranteed not to be equal to any thread).
156 static ThreadId Invalid() { return ThreadId(kInvalidId); }
157
158 // Compares ThreadIds for equality.
159 INLINE(bool Equals(const ThreadId& other) const) {
160 return id_ == other.id_;
161 }
162
163 // Checks whether this ThreadId refers to any thread.
164 INLINE(bool IsValid() const) {
165 return id_ != kInvalidId;
166 }
167
168 // Converts ThreadId to an integer representation
169 // (required for public API: V8::V8::GetCurrentThreadId).
170 int ToInteger() const { return id_; }
171
172 // Converts ThreadId to an integer representation
173 // (required for public API: V8::V8::TerminateExecution).
174 static ThreadId FromInteger(int id) { return ThreadId(id); }
175
176 private:
177 static const int kInvalidId = -1;
178
179 explicit ThreadId(int id) : id_(id) {}
180
181 static int AllocateThreadId();
182
183 static int GetCurrentThreadId();
184
185 int id_;
186
187 static Atomic32 highest_thread_id_;
188
189 friend class Isolate;
190};
191
192
Steve Block44f0eee2011-05-26 01:26:41 +0100193class ThreadLocalTop BASE_EMBEDDED {
194 public:
Ben Murdoch8b112d22011-06-08 16:22:53 +0100195 // Does early low-level initialization that does not depend on the
196 // isolate being present.
197 ThreadLocalTop();
198
Steve Block44f0eee2011-05-26 01:26:41 +0100199 // Initialize the thread data.
200 void Initialize();
201
202 // Get the top C++ try catch handler or NULL if none are registered.
203 //
204 // This method is not guarenteed to return an address that can be
205 // used for comparison with addresses into the JS stack. If such an
206 // address is needed, use try_catch_handler_address.
207 v8::TryCatch* TryCatchHandler();
208
209 // Get the address of the top C++ try catch handler or NULL if
210 // none are registered.
211 //
212 // This method always returns an address that can be compared to
213 // pointers into the JavaScript stack. When running on actual
214 // hardware, try_catch_handler_address and TryCatchHandler return
215 // the same pointer. When running on a simulator with a separate JS
216 // stack, try_catch_handler_address returns a JS stack address that
217 // corresponds to the place on the JS stack where the C++ handler
218 // would have been if the stack were not separate.
219 inline Address try_catch_handler_address() {
220 return try_catch_handler_address_;
221 }
222
223 // Set the address of the top C++ try catch handler.
224 inline void set_try_catch_handler_address(Address address) {
225 try_catch_handler_address_ = address;
226 }
227
228 void Free() {
229 ASSERT(!has_pending_message_);
230 ASSERT(!external_caught_exception_);
231 ASSERT(try_catch_handler_address_ == NULL);
232 }
233
Ben Murdoch257744e2011-11-30 15:57:28 +0000234 Isolate* isolate_;
Steve Block44f0eee2011-05-26 01:26:41 +0100235 // The context where the current execution method is created and for variable
236 // lookups.
237 Context* context_;
Ben Murdoch8b112d22011-06-08 16:22:53 +0100238 ThreadId thread_id_;
Steve Block44f0eee2011-05-26 01:26:41 +0100239 MaybeObject* pending_exception_;
240 bool has_pending_message_;
Steve Block44f0eee2011-05-26 01:26:41 +0100241 Object* pending_message_obj_;
242 Script* pending_message_script_;
243 int pending_message_start_pos_;
244 int pending_message_end_pos_;
245 // Use a separate value for scheduled exceptions to preserve the
246 // invariants that hold about pending_exception. We may want to
247 // unify them later.
248 MaybeObject* scheduled_exception_;
249 bool external_caught_exception_;
250 SaveContext* save_context_;
251 v8::TryCatch* catcher_;
252
253 // Stack.
254 Address c_entry_fp_; // the frame pointer of the top c entry frame
255 Address handler_; // try-blocks are chained through the stack
256
257#ifdef USE_SIMULATOR
258#if defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_MIPS)
259 Simulator* simulator_;
260#endif
261#endif // USE_SIMULATOR
262
Ben Murdochc7cc0282012-03-05 14:35:55 +0000263 Address js_entry_sp_; // the stack pointer of the bottom JS entry frame
Steve Block44f0eee2011-05-26 01:26:41 +0100264 Address external_callback_; // the external callback we're currently in
Steve Block44f0eee2011-05-26 01:26:41 +0100265 StateTag current_vm_state_;
Steve Block44f0eee2011-05-26 01:26:41 +0100266
267 // Generated code scratch locations.
268 int32_t formal_count_;
269
270 // Call back function to report unsafe JS accesses.
271 v8::FailedAccessCheckCallback failed_access_check_callback_;
272
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000273 // Head of the list of live LookupResults.
274 LookupResult* top_lookup_result_;
275
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000276 // Whether out of memory exceptions should be ignored.
277 bool ignore_out_of_memory_;
278
Steve Block44f0eee2011-05-26 01:26:41 +0100279 private:
Ben Murdoch8b112d22011-06-08 16:22:53 +0100280 void InitializeInternal();
281
Steve Block44f0eee2011-05-26 01:26:41 +0100282 Address try_catch_handler_address_;
283};
284
Steve Block44f0eee2011-05-26 01:26:41 +0100285
286#ifdef ENABLE_DEBUGGER_SUPPORT
287
288#define ISOLATE_DEBUGGER_INIT_LIST(V) \
289 V(v8::Debug::EventCallback, debug_event_callback, NULL) \
290 V(DebuggerAgent*, debugger_agent_instance, NULL)
291#else
292
293#define ISOLATE_DEBUGGER_INIT_LIST(V)
294
295#endif
296
297#ifdef DEBUG
298
299#define ISOLATE_INIT_DEBUG_ARRAY_LIST(V) \
300 V(CommentStatistic, paged_space_comments_statistics, \
301 CommentStatistic::kMaxComments + 1)
302#else
303
304#define ISOLATE_INIT_DEBUG_ARRAY_LIST(V)
305
306#endif
307
Steve Block44f0eee2011-05-26 01:26:41 +0100308#define ISOLATE_INIT_ARRAY_LIST(V) \
309 /* SerializerDeserializer state. */ \
310 V(Object*, serialize_partial_snapshot_cache, kPartialSnapshotCacheCapacity) \
311 V(int, jsregexp_static_offsets_vector, kJSRegexpStaticOffsetsVectorSize) \
312 V(int, bad_char_shift_table, kUC16AlphabetSize) \
313 V(int, good_suffix_shift_table, (kBMMaxShift + 1)) \
314 V(int, suffix_table, (kBMMaxShift + 1)) \
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000315 V(uint32_t, private_random_seed, 2) \
Steve Block44f0eee2011-05-26 01:26:41 +0100316 ISOLATE_INIT_DEBUG_ARRAY_LIST(V)
317
318typedef List<HeapObject*, PreallocatedStorage> DebugObjectCache;
319
320#define ISOLATE_INIT_LIST(V) \
Steve Block44f0eee2011-05-26 01:26:41 +0100321 /* SerializerDeserializer state. */ \
322 V(int, serialize_partial_snapshot_cache_length, 0) \
323 /* Assembler state. */ \
324 /* A previously allocated buffer of kMinimalBufferSize bytes, or NULL. */ \
325 V(byte*, assembler_spare_buffer, NULL) \
326 V(FatalErrorCallback, exception_behavior, NULL) \
Ben Murdoch257744e2011-11-30 15:57:28 +0000327 V(AllowCodeGenerationFromStringsCallback, allow_code_gen_callback, NULL) \
Steve Block44f0eee2011-05-26 01:26:41 +0100328 V(v8::Debug::MessageHandler, message_handler, NULL) \
329 /* To distinguish the function templates, so that we can find them in the */ \
330 /* function cache of the global context. */ \
331 V(int, next_serial_number, 0) \
332 V(ExternalReferenceRedirectorPointer*, external_reference_redirector, NULL) \
333 V(bool, always_allow_natives_syntax, false) \
334 /* Part of the state of liveedit. */ \
335 V(FunctionInfoListener*, active_function_info_listener, NULL) \
336 /* State for Relocatable. */ \
337 V(Relocatable*, relocatable_top, NULL) \
338 /* State for CodeEntry in profile-generator. */ \
339 V(CodeGenerator*, current_code_generator, NULL) \
340 V(bool, jump_target_compiling_deferred_code, false) \
341 V(DebugObjectCache*, string_stream_debug_object_cache, NULL) \
342 V(Object*, string_stream_current_security_token, NULL) \
343 /* TODO(isolates): Release this on destruction? */ \
344 V(int*, irregexp_interpreter_backtrack_stack_cache, NULL) \
345 /* Serializer state. */ \
346 V(ExternalReferenceTable*, external_reference_table, NULL) \
347 /* AstNode state. */ \
Ben Murdoch5d4cdbf2012-04-11 10:23:59 +0100348 V(int, ast_node_id, 0) \
Steve Block44f0eee2011-05-26 01:26:41 +0100349 V(unsigned, ast_node_count, 0) \
Ben Murdoch8b112d22011-06-08 16:22:53 +0100350 /* SafeStackFrameIterator activations count. */ \
351 V(int, safe_stack_iterator_counter, 0) \
Ben Murdoch257744e2011-11-30 15:57:28 +0000352 V(uint64_t, enabled_cpu_features, 0) \
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000353 V(CpuProfiler*, cpu_profiler, NULL) \
354 V(HeapProfiler*, heap_profiler, NULL) \
Steve Block44f0eee2011-05-26 01:26:41 +0100355 ISOLATE_DEBUGGER_INIT_LIST(V)
356
357class Isolate {
358 // These forward declarations are required to make the friend declarations in
359 // PerIsolateThreadData work on some older versions of gcc.
360 class ThreadDataTable;
361 class EntryStackItem;
362 public:
363 ~Isolate();
364
Steve Block44f0eee2011-05-26 01:26:41 +0100365 // A thread has a PerIsolateThreadData instance for each isolate that it has
366 // entered. That instance is allocated when the isolate is initially entered
367 // and reused on subsequent entries.
368 class PerIsolateThreadData {
369 public:
370 PerIsolateThreadData(Isolate* isolate, ThreadId thread_id)
371 : isolate_(isolate),
372 thread_id_(thread_id),
373 stack_limit_(0),
374 thread_state_(NULL),
375#if !defined(__arm__) && defined(V8_TARGET_ARCH_ARM) || \
376 !defined(__mips__) && defined(V8_TARGET_ARCH_MIPS)
377 simulator_(NULL),
378#endif
379 next_(NULL),
380 prev_(NULL) { }
381 Isolate* isolate() const { return isolate_; }
382 ThreadId thread_id() const { return thread_id_; }
383 void set_stack_limit(uintptr_t value) { stack_limit_ = value; }
384 uintptr_t stack_limit() const { return stack_limit_; }
385 ThreadState* thread_state() const { return thread_state_; }
386 void set_thread_state(ThreadState* value) { thread_state_ = value; }
387
388#if !defined(__arm__) && defined(V8_TARGET_ARCH_ARM) || \
389 !defined(__mips__) && defined(V8_TARGET_ARCH_MIPS)
390 Simulator* simulator() const { return simulator_; }
391 void set_simulator(Simulator* simulator) {
392 simulator_ = simulator;
393 }
394#endif
395
396 bool Matches(Isolate* isolate, ThreadId thread_id) const {
Ben Murdoch8b112d22011-06-08 16:22:53 +0100397 return isolate_ == isolate && thread_id_.Equals(thread_id);
Steve Block44f0eee2011-05-26 01:26:41 +0100398 }
399
400 private:
401 Isolate* isolate_;
402 ThreadId thread_id_;
403 uintptr_t stack_limit_;
404 ThreadState* thread_state_;
405
406#if !defined(__arm__) && defined(V8_TARGET_ARCH_ARM) || \
407 !defined(__mips__) && defined(V8_TARGET_ARCH_MIPS)
408 Simulator* simulator_;
409#endif
410
411 PerIsolateThreadData* next_;
412 PerIsolateThreadData* prev_;
413
414 friend class Isolate;
415 friend class ThreadDataTable;
416 friend class EntryStackItem;
417
418 DISALLOW_COPY_AND_ASSIGN(PerIsolateThreadData);
419 };
420
421
422 enum AddressId {
Ben Murdoch589d6972011-11-30 16:04:58 +0000423#define DECLARE_ENUM(CamelName, hacker_name) k##CamelName##Address,
424 FOR_EACH_ISOLATE_ADDRESS_NAME(DECLARE_ENUM)
Steve Block44f0eee2011-05-26 01:26:41 +0100425#undef C
Ben Murdoch589d6972011-11-30 16:04:58 +0000426 kIsolateAddressCount
Steve Block44f0eee2011-05-26 01:26:41 +0100427 };
428
429 // Returns the PerIsolateThreadData for the current thread (or NULL if one is
430 // not currently set).
431 static PerIsolateThreadData* CurrentPerIsolateThreadData() {
432 return reinterpret_cast<PerIsolateThreadData*>(
Ben Murdoch5d4cdbf2012-04-11 10:23:59 +0100433 Thread::GetThreadLocal(per_isolate_thread_data_key()));
Steve Block44f0eee2011-05-26 01:26:41 +0100434 }
435
436 // Returns the isolate inside which the current thread is running.
437 INLINE(static Isolate* Current()) {
Ben Murdoch5d4cdbf2012-04-11 10:23:59 +0100438 const Thread::LocalStorageKey key = isolate_key();
Steve Block44f0eee2011-05-26 01:26:41 +0100439 Isolate* isolate = reinterpret_cast<Isolate*>(
Ben Murdoch5d4cdbf2012-04-11 10:23:59 +0100440 Thread::GetExistingThreadLocal(key));
441 if (!isolate) {
442 EnsureDefaultIsolate();
443 isolate = reinterpret_cast<Isolate*>(
444 Thread::GetExistingThreadLocal(key));
445 }
Steve Block44f0eee2011-05-26 01:26:41 +0100446 ASSERT(isolate != NULL);
447 return isolate;
448 }
449
450 INLINE(static Isolate* UncheckedCurrent()) {
Ben Murdoch5d4cdbf2012-04-11 10:23:59 +0100451 return reinterpret_cast<Isolate*>(Thread::GetThreadLocal(isolate_key()));
Steve Block44f0eee2011-05-26 01:26:41 +0100452 }
453
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000454 // Usually called by Init(), but can be called early e.g. to allow
455 // testing components that require logging but not the whole
456 // isolate.
457 //
458 // Safe to call more than once.
459 void InitializeLoggingAndCounters();
460
Steve Block44f0eee2011-05-26 01:26:41 +0100461 bool Init(Deserializer* des);
462
463 bool IsInitialized() { return state_ == INITIALIZED; }
464
465 // True if at least one thread Enter'ed this isolate.
466 bool IsInUse() { return entry_stack_ != NULL; }
467
468 // Destroys the non-default isolates.
469 // Sets default isolate into "has_been_disposed" state rather then destroying,
470 // for legacy API reasons.
471 void TearDown();
472
Ben Murdoch5d4cdbf2012-04-11 10:23:59 +0100473 bool IsDefaultIsolate() const;
Steve Block44f0eee2011-05-26 01:26:41 +0100474
475 // Ensures that process-wide resources and the default isolate have been
Ben Murdochc7cc0282012-03-05 14:35:55 +0000476 // allocated. It is only necessary to call this method in rare cases, for
Steve Block44f0eee2011-05-26 01:26:41 +0100477 // example if you are using V8 from within the body of a static initializer.
478 // Safe to call multiple times.
479 static void EnsureDefaultIsolate();
480
Ben Murdoch257744e2011-11-30 15:57:28 +0000481 // Find the PerThread for this particular (isolate, thread) combination
482 // If one does not yet exist, return null.
483 PerIsolateThreadData* FindPerThreadDataForThisThread();
484
485#ifdef ENABLE_DEBUGGER_SUPPORT
Steve Block44f0eee2011-05-26 01:26:41 +0100486 // Get the debugger from the default isolate. Preinitializes the
487 // default isolate if needed.
488 static Debugger* GetDefaultIsolateDebugger();
Ben Murdoch257744e2011-11-30 15:57:28 +0000489#endif
Steve Block44f0eee2011-05-26 01:26:41 +0100490
491 // Get the stack guard from the default isolate. Preinitializes the
492 // default isolate if needed.
493 static StackGuard* GetDefaultIsolateStackGuard();
494
495 // Returns the key used to store the pointer to the current isolate.
496 // Used internally for V8 threads that do not execute JavaScript but still
497 // are part of the domain of an isolate (like the context switcher).
Ben Murdoch5d4cdbf2012-04-11 10:23:59 +0100498 static Thread::LocalStorageKey isolate_key();
Steve Block44f0eee2011-05-26 01:26:41 +0100499
500 // Returns the key used to store process-wide thread IDs.
Ben Murdoch5d4cdbf2012-04-11 10:23:59 +0100501 static Thread::LocalStorageKey thread_id_key();
502
503 static Thread::LocalStorageKey per_isolate_thread_data_key();
Steve Block44f0eee2011-05-26 01:26:41 +0100504
Steve Block44f0eee2011-05-26 01:26:41 +0100505 // If a client attempts to create a Locker without specifying an isolate,
506 // we assume that the client is using legacy behavior. Set up the current
507 // thread to be inside the implicit isolate (or fail a check if we have
508 // switched to non-legacy behavior).
509 static void EnterDefaultIsolate();
510
Steve Block44f0eee2011-05-26 01:26:41 +0100511 // Mutex for serializing access to break control structures.
512 Mutex* break_access() { return break_access_; }
513
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000514 // Mutex for serializing access to debugger.
515 Mutex* debugger_access() { return debugger_access_; }
516
Steve Block44f0eee2011-05-26 01:26:41 +0100517 Address get_address_from_id(AddressId id);
518
519 // Access to top context (where the current function object was created).
520 Context* context() { return thread_local_top_.context_; }
521 void set_context(Context* context) {
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000522 ASSERT(context == NULL || context->IsContext());
Steve Block44f0eee2011-05-26 01:26:41 +0100523 thread_local_top_.context_ = context;
524 }
525 Context** context_address() { return &thread_local_top_.context_; }
526
527 SaveContext* save_context() {return thread_local_top_.save_context_; }
528 void set_save_context(SaveContext* save) {
529 thread_local_top_.save_context_ = save;
530 }
531
532 // Access to current thread id.
Ben Murdoch8b112d22011-06-08 16:22:53 +0100533 ThreadId thread_id() { return thread_local_top_.thread_id_; }
534 void set_thread_id(ThreadId id) { thread_local_top_.thread_id_ = id; }
Steve Block44f0eee2011-05-26 01:26:41 +0100535
536 // Interface to pending exception.
537 MaybeObject* pending_exception() {
538 ASSERT(has_pending_exception());
539 return thread_local_top_.pending_exception_;
540 }
541 bool external_caught_exception() {
542 return thread_local_top_.external_caught_exception_;
543 }
Ben Murdoch8b112d22011-06-08 16:22:53 +0100544 void set_external_caught_exception(bool value) {
545 thread_local_top_.external_caught_exception_ = value;
546 }
Steve Block44f0eee2011-05-26 01:26:41 +0100547 void set_pending_exception(MaybeObject* exception) {
548 thread_local_top_.pending_exception_ = exception;
549 }
550 void clear_pending_exception() {
551 thread_local_top_.pending_exception_ = heap_.the_hole_value();
552 }
553 MaybeObject** pending_exception_address() {
554 return &thread_local_top_.pending_exception_;
555 }
556 bool has_pending_exception() {
557 return !thread_local_top_.pending_exception_->IsTheHole();
558 }
559 void clear_pending_message() {
560 thread_local_top_.has_pending_message_ = false;
Steve Block44f0eee2011-05-26 01:26:41 +0100561 thread_local_top_.pending_message_obj_ = heap_.the_hole_value();
562 thread_local_top_.pending_message_script_ = NULL;
563 }
564 v8::TryCatch* try_catch_handler() {
565 return thread_local_top_.TryCatchHandler();
566 }
567 Address try_catch_handler_address() {
568 return thread_local_top_.try_catch_handler_address();
569 }
570 bool* external_caught_exception_address() {
571 return &thread_local_top_.external_caught_exception_;
572 }
Ben Murdoch8b112d22011-06-08 16:22:53 +0100573 v8::TryCatch* catcher() {
574 return thread_local_top_.catcher_;
575 }
576 void set_catcher(v8::TryCatch* catcher) {
577 thread_local_top_.catcher_ = catcher;
578 }
Steve Block44f0eee2011-05-26 01:26:41 +0100579
580 MaybeObject** scheduled_exception_address() {
581 return &thread_local_top_.scheduled_exception_;
582 }
583 MaybeObject* scheduled_exception() {
584 ASSERT(has_scheduled_exception());
585 return thread_local_top_.scheduled_exception_;
586 }
587 bool has_scheduled_exception() {
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000588 return thread_local_top_.scheduled_exception_ != heap_.the_hole_value();
Steve Block44f0eee2011-05-26 01:26:41 +0100589 }
590 void clear_scheduled_exception() {
591 thread_local_top_.scheduled_exception_ = heap_.the_hole_value();
592 }
593
594 bool IsExternallyCaught();
595
596 bool is_catchable_by_javascript(MaybeObject* exception) {
597 return (exception != Failure::OutOfMemoryException()) &&
598 (exception != heap()->termination_exception());
599 }
600
601 // JS execution stack (see frames.h).
602 static Address c_entry_fp(ThreadLocalTop* thread) {
603 return thread->c_entry_fp_;
604 }
605 static Address handler(ThreadLocalTop* thread) { return thread->handler_; }
606
607 inline Address* c_entry_fp_address() {
608 return &thread_local_top_.c_entry_fp_;
609 }
610 inline Address* handler_address() { return &thread_local_top_.handler_; }
611
Steve Block44f0eee2011-05-26 01:26:41 +0100612 // Bottom JS entry (see StackTracer::Trace in log.cc).
613 static Address js_entry_sp(ThreadLocalTop* thread) {
614 return thread->js_entry_sp_;
615 }
616 inline Address* js_entry_sp_address() {
617 return &thread_local_top_.js_entry_sp_;
618 }
Steve Block44f0eee2011-05-26 01:26:41 +0100619
620 // Generated code scratch locations.
621 void* formal_count_address() { return &thread_local_top_.formal_count_; }
622
623 // Returns the global object of the current context. It could be
Ben Murdochc7cc0282012-03-05 14:35:55 +0000624 // a builtin object, or a JS global object.
Steve Block44f0eee2011-05-26 01:26:41 +0100625 Handle<GlobalObject> global() {
626 return Handle<GlobalObject>(context()->global());
627 }
628
629 // Returns the global proxy object of the current context.
630 Object* global_proxy() {
631 return context()->global_proxy();
632 }
633
634 Handle<JSBuiltinsObject> js_builtins_object() {
635 return Handle<JSBuiltinsObject>(thread_local_top_.context_->builtins());
636 }
637
638 static int ArchiveSpacePerThread() { return sizeof(ThreadLocalTop); }
639 void FreeThreadResources() { thread_local_top_.Free(); }
640
641 // This method is called by the api after operations that may throw
642 // exceptions. If an exception was thrown and not handled by an external
643 // handler the exception is scheduled to be rethrown when we return to running
644 // JavaScript code. If an exception is scheduled true is returned.
645 bool OptionalRescheduleException(bool is_bottom_call);
646
Ben Murdoch8b112d22011-06-08 16:22:53 +0100647 class ExceptionScope {
648 public:
649 explicit ExceptionScope(Isolate* isolate) :
650 // Scope currently can only be used for regular exceptions, not
651 // failures like OOM or termination exception.
652 isolate_(isolate),
653 pending_exception_(isolate_->pending_exception()->ToObjectUnchecked()),
654 catcher_(isolate_->catcher())
655 { }
656
657 ~ExceptionScope() {
658 isolate_->set_catcher(catcher_);
659 isolate_->set_pending_exception(*pending_exception_);
660 }
661
662 private:
663 Isolate* isolate_;
664 Handle<Object> pending_exception_;
665 v8::TryCatch* catcher_;
666 };
667
Steve Block44f0eee2011-05-26 01:26:41 +0100668 void SetCaptureStackTraceForUncaughtExceptions(
669 bool capture,
670 int frame_limit,
671 StackTrace::StackTraceOptions options);
672
673 // Tells whether the current context has experienced an out of memory
674 // exception.
675 bool is_out_of_memory();
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000676 bool ignore_out_of_memory() {
677 return thread_local_top_.ignore_out_of_memory_;
678 }
679 void set_ignore_out_of_memory(bool value) {
680 thread_local_top_.ignore_out_of_memory_ = value;
681 }
Steve Block44f0eee2011-05-26 01:26:41 +0100682
683 void PrintCurrentStackTrace(FILE* out);
684 void PrintStackTrace(FILE* out, char* thread_data);
685 void PrintStack(StringStream* accumulator);
686 void PrintStack();
687 Handle<String> StackTraceString();
688 Handle<JSArray> CaptureCurrentStackTrace(
689 int frame_limit,
690 StackTrace::StackTraceOptions options);
691
Ben Murdoch5d4cdbf2012-04-11 10:23:59 +0100692 void CaptureAndSetCurrentStackTraceFor(Handle<JSObject> error_object);
693
Steve Block44f0eee2011-05-26 01:26:41 +0100694 // Returns if the top context may access the given global object. If
695 // the result is false, the pending exception is guaranteed to be
696 // set.
697 bool MayNamedAccess(JSObject* receiver,
698 Object* key,
699 v8::AccessType type);
700 bool MayIndexedAccess(JSObject* receiver,
701 uint32_t index,
702 v8::AccessType type);
703
704 void SetFailedAccessCheckCallback(v8::FailedAccessCheckCallback callback);
705 void ReportFailedAccessCheck(JSObject* receiver, v8::AccessType type);
706
707 // Exception throwing support. The caller should use the result
708 // of Throw() as its return value.
709 Failure* Throw(Object* exception, MessageLocation* location = NULL);
710 // Re-throw an exception. This involves no error reporting since
711 // error reporting was handled when the exception was thrown
712 // originally.
713 Failure* ReThrow(MaybeObject* exception, MessageLocation* location = NULL);
714 void ScheduleThrow(Object* exception);
715 void ReportPendingMessages();
716 Failure* ThrowIllegalOperation();
717
718 // Promote a scheduled exception to pending. Asserts has_scheduled_exception.
719 Failure* PromoteScheduledException();
Ben Murdoch5d4cdbf2012-04-11 10:23:59 +0100720 void DoThrow(Object* exception, MessageLocation* location);
Steve Block44f0eee2011-05-26 01:26:41 +0100721 // Checks if exception should be reported and finds out if it's
722 // caught externally.
723 bool ShouldReportException(bool* can_be_caught_externally,
724 bool catchable_by_javascript);
725
726 // Attempts to compute the current source location, storing the
727 // result in the target out parameter.
728 void ComputeLocation(MessageLocation* target);
729
730 // Override command line flag.
731 void TraceException(bool flag);
732
733 // Out of resource exception helpers.
734 Failure* StackOverflow();
735 Failure* TerminateExecution();
736
737 // Administration
738 void Iterate(ObjectVisitor* v);
739 void Iterate(ObjectVisitor* v, ThreadLocalTop* t);
740 char* Iterate(ObjectVisitor* v, char* t);
741 void IterateThread(ThreadVisitor* v);
742 void IterateThread(ThreadVisitor* v, char* t);
743
744
745 // Returns the current global context.
746 Handle<Context> global_context();
747
748 // Returns the global context of the calling JavaScript code. That
749 // is, the global context of the top-most JavaScript frame.
750 Handle<Context> GetCallingGlobalContext();
751
752 void RegisterTryCatchHandler(v8::TryCatch* that);
753 void UnregisterTryCatchHandler(v8::TryCatch* that);
754
755 char* ArchiveThread(char* to);
756 char* RestoreThread(char* from);
757
758 static const char* const kStackOverflowMessage;
759
760 static const int kUC16AlphabetSize = 256; // See StringSearchBase.
761 static const int kBMMaxShift = 250; // See StringSearchBase.
762
763 // Accessors.
764#define GLOBAL_ACCESSOR(type, name, initialvalue) \
765 inline type name() const { \
766 ASSERT(OFFSET_OF(Isolate, name##_) == name##_debug_offset_); \
767 return name##_; \
768 } \
769 inline void set_##name(type value) { \
770 ASSERT(OFFSET_OF(Isolate, name##_) == name##_debug_offset_); \
771 name##_ = value; \
772 }
773 ISOLATE_INIT_LIST(GLOBAL_ACCESSOR)
774#undef GLOBAL_ACCESSOR
775
776#define GLOBAL_ARRAY_ACCESSOR(type, name, length) \
777 inline type* name() { \
778 ASSERT(OFFSET_OF(Isolate, name##_) == name##_debug_offset_); \
779 return &(name##_)[0]; \
780 }
781 ISOLATE_INIT_ARRAY_LIST(GLOBAL_ARRAY_ACCESSOR)
782#undef GLOBAL_ARRAY_ACCESSOR
783
784#define GLOBAL_CONTEXT_FIELD_ACCESSOR(index, type, name) \
785 Handle<type> name() { \
786 return Handle<type>(context()->global_context()->name()); \
787 }
788 GLOBAL_CONTEXT_FIELDS(GLOBAL_CONTEXT_FIELD_ACCESSOR)
789#undef GLOBAL_CONTEXT_FIELD_ACCESSOR
790
791 Bootstrapper* bootstrapper() { return bootstrapper_; }
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000792 Counters* counters() {
793 // Call InitializeLoggingAndCounters() if logging is needed before
794 // the isolate is fully initialized.
795 ASSERT(counters_ != NULL);
796 return counters_;
797 }
Steve Block44f0eee2011-05-26 01:26:41 +0100798 CodeRange* code_range() { return code_range_; }
799 RuntimeProfiler* runtime_profiler() { return runtime_profiler_; }
800 CompilationCache* compilation_cache() { return compilation_cache_; }
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000801 Logger* logger() {
802 // Call InitializeLoggingAndCounters() if logging is needed before
803 // the isolate is fully initialized.
804 ASSERT(logger_ != NULL);
805 return logger_;
806 }
Steve Block44f0eee2011-05-26 01:26:41 +0100807 StackGuard* stack_guard() { return &stack_guard_; }
808 Heap* heap() { return &heap_; }
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000809 StatsTable* stats_table();
Steve Block44f0eee2011-05-26 01:26:41 +0100810 StubCache* stub_cache() { return stub_cache_; }
811 DeoptimizerData* deoptimizer_data() { return deoptimizer_data_; }
812 ThreadLocalTop* thread_local_top() { return &thread_local_top_; }
813
814 TranscendentalCache* transcendental_cache() const {
815 return transcendental_cache_;
816 }
817
818 MemoryAllocator* memory_allocator() {
819 return memory_allocator_;
820 }
821
822 KeyedLookupCache* keyed_lookup_cache() {
823 return keyed_lookup_cache_;
824 }
825
826 ContextSlotCache* context_slot_cache() {
827 return context_slot_cache_;
828 }
829
830 DescriptorLookupCache* descriptor_lookup_cache() {
831 return descriptor_lookup_cache_;
832 }
833
834 v8::ImplementationUtilities::HandleScopeData* handle_scope_data() {
835 return &handle_scope_data_;
836 }
837 HandleScopeImplementer* handle_scope_implementer() {
838 ASSERT(handle_scope_implementer_);
839 return handle_scope_implementer_;
840 }
841 Zone* zone() { return &zone_; }
842
Ben Murdoch8b112d22011-06-08 16:22:53 +0100843 UnicodeCache* unicode_cache() {
844 return unicode_cache_;
Steve Block44f0eee2011-05-26 01:26:41 +0100845 }
846
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000847 InnerPointerToCodeCache* inner_pointer_to_code_cache() {
848 return inner_pointer_to_code_cache_;
849 }
Steve Block44f0eee2011-05-26 01:26:41 +0100850
851 StringInputBuffer* write_input_buffer() { return write_input_buffer_; }
852
853 GlobalHandles* global_handles() { return global_handles_; }
854
855 ThreadManager* thread_manager() { return thread_manager_; }
856
857 ContextSwitcher* context_switcher() { return context_switcher_; }
858
859 void set_context_switcher(ContextSwitcher* switcher) {
860 context_switcher_ = switcher;
861 }
862
863 StringTracker* string_tracker() { return string_tracker_; }
864
865 unibrow::Mapping<unibrow::Ecma262UnCanonicalize>* jsregexp_uncanonicalize() {
866 return &jsregexp_uncanonicalize_;
867 }
868
869 unibrow::Mapping<unibrow::CanonicalizationRange>* jsregexp_canonrange() {
870 return &jsregexp_canonrange_;
871 }
872
873 StringInputBuffer* objects_string_compare_buffer_a() {
874 return &objects_string_compare_buffer_a_;
875 }
876
877 StringInputBuffer* objects_string_compare_buffer_b() {
878 return &objects_string_compare_buffer_b_;
879 }
880
881 StaticResource<StringInputBuffer>* objects_string_input_buffer() {
882 return &objects_string_input_buffer_;
883 }
884
Steve Block44f0eee2011-05-26 01:26:41 +0100885 RuntimeState* runtime_state() { return &runtime_state_; }
886
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000887 void set_fp_stubs_generated(bool value) {
888 fp_stubs_generated_ = value;
889 }
890
891 bool fp_stubs_generated() { return fp_stubs_generated_; }
892
Steve Block44f0eee2011-05-26 01:26:41 +0100893 StaticResource<SafeStringInputBuffer>* compiler_safe_string_input_buffer() {
894 return &compiler_safe_string_input_buffer_;
895 }
896
897 Builtins* builtins() { return &builtins_; }
898
Ben Murdoch592a9fc2012-03-05 11:04:45 +0000899 void NotifyExtensionInstalled() {
900 has_installed_extensions_ = true;
901 }
902
903 bool has_installed_extensions() { return has_installed_extensions_; }
904
Steve Block44f0eee2011-05-26 01:26:41 +0100905 unibrow::Mapping<unibrow::Ecma262Canonicalize>*
906 regexp_macro_assembler_canonicalize() {
907 return &regexp_macro_assembler_canonicalize_;
908 }
909
910 RegExpStack* regexp_stack() { return regexp_stack_; }
911
912 unibrow::Mapping<unibrow::Ecma262Canonicalize>*
913 interp_canonicalize_mapping() {
914 return &interp_canonicalize_mapping_;
915 }
916
Steve Block44f0eee2011-05-26 01:26:41 +0100917 void* PreallocatedStorageNew(size_t size);
918 void PreallocatedStorageDelete(void* p);
919 void PreallocatedStorageInit(size_t size);
920
921#ifdef ENABLE_DEBUGGER_SUPPORT
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000922 Debugger* debugger() {
923 if (!NoBarrier_Load(&debugger_initialized_)) InitializeDebugger();
924 return debugger_;
925 }
926 Debug* debug() {
927 if (!NoBarrier_Load(&debugger_initialized_)) InitializeDebugger();
928 return debug_;
929 }
Steve Block44f0eee2011-05-26 01:26:41 +0100930#endif
931
Ben Murdoch5d4cdbf2012-04-11 10:23:59 +0100932 inline bool IsDebuggerActive();
Ben Murdoch257744e2011-11-30 15:57:28 +0000933 inline bool DebuggerHasBreakPoints();
934
Steve Block44f0eee2011-05-26 01:26:41 +0100935#ifdef DEBUG
936 HistogramInfo* heap_histograms() { return heap_histograms_; }
937
938 JSObject::SpillInformation* js_spill_information() {
939 return &js_spill_information_;
940 }
941
942 int* code_kind_statistics() { return code_kind_statistics_; }
943#endif
944
945#if defined(V8_TARGET_ARCH_ARM) && !defined(__arm__) || \
946 defined(V8_TARGET_ARCH_MIPS) && !defined(__mips__)
947 bool simulator_initialized() { return simulator_initialized_; }
948 void set_simulator_initialized(bool initialized) {
949 simulator_initialized_ = initialized;
950 }
951
952 HashMap* simulator_i_cache() { return simulator_i_cache_; }
953 void set_simulator_i_cache(HashMap* hash_map) {
954 simulator_i_cache_ = hash_map;
955 }
956
957 Redirection* simulator_redirection() {
958 return simulator_redirection_;
959 }
960 void set_simulator_redirection(Redirection* redirection) {
961 simulator_redirection_ = redirection;
962 }
963#endif
964
965 Factory* factory() { return reinterpret_cast<Factory*>(this); }
966
967 // SerializerDeserializer state.
968 static const int kPartialSnapshotCacheCapacity = 1400;
969
970 static const int kJSRegexpStaticOffsetsVectorSize = 50;
971
Steve Block44f0eee2011-05-26 01:26:41 +0100972 Address external_callback() {
973 return thread_local_top_.external_callback_;
974 }
975 void set_external_callback(Address callback) {
976 thread_local_top_.external_callback_ = callback;
977 }
Steve Block44f0eee2011-05-26 01:26:41 +0100978
Steve Block44f0eee2011-05-26 01:26:41 +0100979 StateTag current_vm_state() {
980 return thread_local_top_.current_vm_state_;
981 }
982
983 void SetCurrentVMState(StateTag state) {
984 if (RuntimeProfiler::IsEnabled()) {
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000985 // Make sure thread local top is initialized.
986 ASSERT(thread_local_top_.isolate_ == this);
Ben Murdoch8b112d22011-06-08 16:22:53 +0100987 StateTag current_state = thread_local_top_.current_vm_state_;
988 if (current_state != JS && state == JS) {
989 // Non-JS -> JS transition.
Steve Block44f0eee2011-05-26 01:26:41 +0100990 RuntimeProfiler::IsolateEnteredJS(this);
Ben Murdoch8b112d22011-06-08 16:22:53 +0100991 } else if (current_state == JS && state != JS) {
Steve Block44f0eee2011-05-26 01:26:41 +0100992 // JS -> non-JS transition.
993 ASSERT(RuntimeProfiler::IsSomeIsolateInJS());
994 RuntimeProfiler::IsolateExitedJS(this);
Ben Murdoch8b112d22011-06-08 16:22:53 +0100995 } else {
996 // Other types of state transitions are not interesting to the
997 // runtime profiler, because they don't affect whether we're
998 // in JS or not.
999 ASSERT((current_state == JS) == (state == JS));
Steve Block44f0eee2011-05-26 01:26:41 +01001000 }
1001 }
1002 thread_local_top_.current_vm_state_ = state;
1003 }
Steve Block44f0eee2011-05-26 01:26:41 +01001004
Ben Murdoch257744e2011-11-30 15:57:28 +00001005 void SetData(void* data) { embedder_data_ = data; }
1006 void* GetData() { return embedder_data_; }
1007
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001008 LookupResult* top_lookup_result() {
1009 return thread_local_top_.top_lookup_result_;
1010 }
1011 void SetTopLookupResult(LookupResult* top) {
1012 thread_local_top_.top_lookup_result_ = top;
1013 }
1014
Ben Murdochc7cc0282012-03-05 14:35:55 +00001015 bool context_exit_happened() {
1016 return context_exit_happened_;
1017 }
1018 void set_context_exit_happened(bool context_exit_happened) {
1019 context_exit_happened_ = context_exit_happened;
1020 }
1021
Ben Murdoch5d4cdbf2012-04-11 10:23:59 +01001022 double time_millis_since_init() {
1023 return OS::TimeCurrentMillis() - time_millis_at_init_;
1024 }
1025
1026 DateCache* date_cache() {
1027 return date_cache_;
1028 }
1029
1030 void set_date_cache(DateCache* date_cache) {
1031 if (date_cache != date_cache_) {
1032 delete date_cache_;
1033 }
1034 date_cache_ = date_cache;
1035 }
1036
Steve Block44f0eee2011-05-26 01:26:41 +01001037 private:
1038 Isolate();
1039
Ben Murdoch5d4cdbf2012-04-11 10:23:59 +01001040 friend struct GlobalState;
1041 friend struct InitializeGlobalState;
1042
Steve Block44f0eee2011-05-26 01:26:41 +01001043 // The per-process lock should be acquired before the ThreadDataTable is
1044 // modified.
1045 class ThreadDataTable {
1046 public:
1047 ThreadDataTable();
1048 ~ThreadDataTable();
1049
1050 PerIsolateThreadData* Lookup(Isolate* isolate, ThreadId thread_id);
1051 void Insert(PerIsolateThreadData* data);
1052 void Remove(Isolate* isolate, ThreadId thread_id);
1053 void Remove(PerIsolateThreadData* data);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001054 void RemoveAllThreads(Isolate* isolate);
Steve Block44f0eee2011-05-26 01:26:41 +01001055
1056 private:
1057 PerIsolateThreadData* list_;
1058 };
1059
1060 // These items form a stack synchronously with threads Enter'ing and Exit'ing
1061 // the Isolate. The top of the stack points to a thread which is currently
1062 // running the Isolate. When the stack is empty, the Isolate is considered
1063 // not entered by any thread and can be Disposed.
1064 // If the same thread enters the Isolate more then once, the entry_count_
1065 // is incremented rather then a new item pushed to the stack.
1066 class EntryStackItem {
1067 public:
1068 EntryStackItem(PerIsolateThreadData* previous_thread_data,
1069 Isolate* previous_isolate,
1070 EntryStackItem* previous_item)
1071 : entry_count(1),
1072 previous_thread_data(previous_thread_data),
1073 previous_isolate(previous_isolate),
1074 previous_item(previous_item) { }
1075
1076 int entry_count;
1077 PerIsolateThreadData* previous_thread_data;
1078 Isolate* previous_isolate;
1079 EntryStackItem* previous_item;
1080
Ben Murdochc7cc0282012-03-05 14:35:55 +00001081 private:
Steve Block44f0eee2011-05-26 01:26:41 +01001082 DISALLOW_COPY_AND_ASSIGN(EntryStackItem);
1083 };
1084
Steve Block44f0eee2011-05-26 01:26:41 +01001085 void Deinit();
1086
1087 static void SetIsolateThreadLocals(Isolate* isolate,
1088 PerIsolateThreadData* data);
1089
1090 enum State {
1091 UNINITIALIZED, // Some components may not have been allocated.
Steve Block44f0eee2011-05-26 01:26:41 +01001092 INITIALIZED // All components are fully initialized.
1093 };
1094
1095 State state_;
1096 EntryStackItem* entry_stack_;
1097
1098 // Allocate and insert PerIsolateThreadData into the ThreadDataTable
1099 // (regardless of whether such data already exists).
1100 PerIsolateThreadData* AllocatePerIsolateThreadData(ThreadId thread_id);
1101
1102 // Find the PerThread for this particular (isolate, thread) combination.
1103 // If one does not yet exist, allocate a new one.
1104 PerIsolateThreadData* FindOrAllocatePerThreadDataForThisThread();
1105
Ben Murdoch5d4cdbf2012-04-11 10:23:59 +01001106 // PreInits and returns a default isolate. Needed when a new thread tries
Steve Block44f0eee2011-05-26 01:26:41 +01001107 // to create a Locker for the first time (the lock itself is in the isolate).
1108 static Isolate* GetDefaultIsolateForLocking();
1109
1110 // Initializes the current thread to run this Isolate.
1111 // Not thread-safe. Multiple threads should not Enter/Exit the same isolate
1112 // at the same time, this should be prevented using external locking.
1113 void Enter();
1114
1115 // Exits the current thread. The previosuly entered Isolate is restored
1116 // for the thread.
1117 // Not thread-safe. Multiple threads should not Enter/Exit the same isolate
1118 // at the same time, this should be prevented using external locking.
1119 void Exit();
1120
1121 void PreallocatedMemoryThreadStart();
1122 void PreallocatedMemoryThreadStop();
1123 void InitializeThreadLocal();
1124
1125 void PrintStackTrace(FILE* out, ThreadLocalTop* thread);
1126 void MarkCompactPrologue(bool is_compacting,
1127 ThreadLocalTop* archived_thread_data);
1128 void MarkCompactEpilogue(bool is_compacting,
1129 ThreadLocalTop* archived_thread_data);
1130
1131 void FillCache();
1132
Ben Murdoch8b112d22011-06-08 16:22:53 +01001133 void PropagatePendingExceptionToExternalTryCatch();
1134
Ben Murdoch69a99ed2011-11-30 16:03:39 +00001135 void InitializeDebugger();
1136
Ben Murdoch5d4cdbf2012-04-11 10:23:59 +01001137 // Traverse prototype chain to find out whether the object is derived from
1138 // the Error object.
1139 bool IsErrorObject(Handle<Object> obj);
1140
Steve Block44f0eee2011-05-26 01:26:41 +01001141 int stack_trace_nesting_level_;
1142 StringStream* incomplete_message_;
1143 // The preallocated memory thread singleton.
1144 PreallocatedMemoryThread* preallocated_memory_thread_;
Ben Murdoch589d6972011-11-30 16:04:58 +00001145 Address isolate_addresses_[kIsolateAddressCount + 1]; // NOLINT
Steve Block44f0eee2011-05-26 01:26:41 +01001146 NoAllocationStringAllocator* preallocated_message_space_;
1147
1148 Bootstrapper* bootstrapper_;
1149 RuntimeProfiler* runtime_profiler_;
1150 CompilationCache* compilation_cache_;
1151 Counters* counters_;
Steve Block44f0eee2011-05-26 01:26:41 +01001152 CodeRange* code_range_;
1153 Mutex* break_access_;
Ben Murdoch69a99ed2011-11-30 16:03:39 +00001154 Atomic32 debugger_initialized_;
1155 Mutex* debugger_access_;
Steve Block44f0eee2011-05-26 01:26:41 +01001156 Heap heap_;
1157 Logger* logger_;
1158 StackGuard stack_guard_;
1159 StatsTable* stats_table_;
1160 StubCache* stub_cache_;
1161 DeoptimizerData* deoptimizer_data_;
1162 ThreadLocalTop thread_local_top_;
1163 bool capture_stack_trace_for_uncaught_exceptions_;
1164 int stack_trace_for_uncaught_exceptions_frame_limit_;
1165 StackTrace::StackTraceOptions stack_trace_for_uncaught_exceptions_options_;
1166 TranscendentalCache* transcendental_cache_;
1167 MemoryAllocator* memory_allocator_;
1168 KeyedLookupCache* keyed_lookup_cache_;
1169 ContextSlotCache* context_slot_cache_;
1170 DescriptorLookupCache* descriptor_lookup_cache_;
1171 v8::ImplementationUtilities::HandleScopeData handle_scope_data_;
1172 HandleScopeImplementer* handle_scope_implementer_;
Ben Murdoch8b112d22011-06-08 16:22:53 +01001173 UnicodeCache* unicode_cache_;
Steve Block44f0eee2011-05-26 01:26:41 +01001174 Zone zone_;
1175 PreallocatedStorage in_use_list_;
1176 PreallocatedStorage free_list_;
1177 bool preallocated_storage_preallocated_;
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001178 InnerPointerToCodeCache* inner_pointer_to_code_cache_;
Steve Block44f0eee2011-05-26 01:26:41 +01001179 StringInputBuffer* write_input_buffer_;
1180 GlobalHandles* global_handles_;
1181 ContextSwitcher* context_switcher_;
1182 ThreadManager* thread_manager_;
Steve Block44f0eee2011-05-26 01:26:41 +01001183 RuntimeState runtime_state_;
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001184 bool fp_stubs_generated_;
Steve Block44f0eee2011-05-26 01:26:41 +01001185 StaticResource<SafeStringInputBuffer> compiler_safe_string_input_buffer_;
1186 Builtins builtins_;
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001187 bool has_installed_extensions_;
Steve Block44f0eee2011-05-26 01:26:41 +01001188 StringTracker* string_tracker_;
1189 unibrow::Mapping<unibrow::Ecma262UnCanonicalize> jsregexp_uncanonicalize_;
1190 unibrow::Mapping<unibrow::CanonicalizationRange> jsregexp_canonrange_;
1191 StringInputBuffer objects_string_compare_buffer_a_;
1192 StringInputBuffer objects_string_compare_buffer_b_;
1193 StaticResource<StringInputBuffer> objects_string_input_buffer_;
1194 unibrow::Mapping<unibrow::Ecma262Canonicalize>
1195 regexp_macro_assembler_canonicalize_;
1196 RegExpStack* regexp_stack_;
Ben Murdoch5d4cdbf2012-04-11 10:23:59 +01001197
1198 DateCache* date_cache_;
1199
Steve Block44f0eee2011-05-26 01:26:41 +01001200 unibrow::Mapping<unibrow::Ecma262Canonicalize> interp_canonicalize_mapping_;
Ben Murdoch257744e2011-11-30 15:57:28 +00001201 void* embedder_data_;
Steve Block44f0eee2011-05-26 01:26:41 +01001202
Ben Murdochc7cc0282012-03-05 14:35:55 +00001203 // The garbage collector should be a little more aggressive when it knows
1204 // that a context was recently exited.
1205 bool context_exit_happened_;
1206
Ben Murdoch5d4cdbf2012-04-11 10:23:59 +01001207 // Time stamp at initialization.
1208 double time_millis_at_init_;
1209
Steve Block44f0eee2011-05-26 01:26:41 +01001210#if defined(V8_TARGET_ARCH_ARM) && !defined(__arm__) || \
1211 defined(V8_TARGET_ARCH_MIPS) && !defined(__mips__)
1212 bool simulator_initialized_;
1213 HashMap* simulator_i_cache_;
1214 Redirection* simulator_redirection_;
1215#endif
1216
1217#ifdef DEBUG
1218 // A static array of histogram info for each type.
1219 HistogramInfo heap_histograms_[LAST_TYPE + 1];
1220 JSObject::SpillInformation js_spill_information_;
1221 int code_kind_statistics_[Code::NUMBER_OF_KINDS];
1222#endif
1223
1224#ifdef ENABLE_DEBUGGER_SUPPORT
1225 Debugger* debugger_;
1226 Debug* debug_;
1227#endif
1228
Steve Block44f0eee2011-05-26 01:26:41 +01001229#define GLOBAL_BACKING_STORE(type, name, initialvalue) \
1230 type name##_;
1231 ISOLATE_INIT_LIST(GLOBAL_BACKING_STORE)
1232#undef GLOBAL_BACKING_STORE
1233
1234#define GLOBAL_ARRAY_BACKING_STORE(type, name, length) \
1235 type name##_[length];
1236 ISOLATE_INIT_ARRAY_LIST(GLOBAL_ARRAY_BACKING_STORE)
1237#undef GLOBAL_ARRAY_BACKING_STORE
1238
1239#ifdef DEBUG
1240 // This class is huge and has a number of fields controlled by
1241 // preprocessor defines. Make sure the offsets of these fields agree
1242 // between compilation units.
1243#define ISOLATE_FIELD_OFFSET(type, name, ignored) \
1244 static const intptr_t name##_debug_offset_;
1245 ISOLATE_INIT_LIST(ISOLATE_FIELD_OFFSET)
1246 ISOLATE_INIT_ARRAY_LIST(ISOLATE_FIELD_OFFSET)
1247#undef ISOLATE_FIELD_OFFSET
1248#endif
1249
1250 friend class ExecutionAccess;
1251 friend class IsolateInitializer;
Ben Murdoch257744e2011-11-30 15:57:28 +00001252 friend class ThreadManager;
1253 friend class Simulator;
1254 friend class StackGuard;
Ben Murdoch8b112d22011-06-08 16:22:53 +01001255 friend class ThreadId;
Ben Murdoch69a99ed2011-11-30 16:03:39 +00001256 friend class TestMemoryAllocatorScope;
Steve Block44f0eee2011-05-26 01:26:41 +01001257 friend class v8::Isolate;
1258 friend class v8::Locker;
Ben Murdoch257744e2011-11-30 15:57:28 +00001259 friend class v8::Unlocker;
Steve Block44f0eee2011-05-26 01:26:41 +01001260
1261 DISALLOW_COPY_AND_ASSIGN(Isolate);
1262};
1263
1264
1265// If the GCC version is 4.1.x or 4.2.x an additional field is added to the
1266// class as a work around for a bug in the generated code found with these
1267// versions of GCC. See V8 issue 122 for details.
1268class SaveContext BASE_EMBEDDED {
1269 public:
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001270 inline explicit SaveContext(Isolate* isolate);
Steve Block44f0eee2011-05-26 01:26:41 +01001271
1272 ~SaveContext() {
1273 if (context_.is_null()) {
1274 Isolate* isolate = Isolate::Current();
1275 isolate->set_context(NULL);
1276 isolate->set_save_context(prev_);
1277 } else {
1278 Isolate* isolate = context_->GetIsolate();
1279 isolate->set_context(*context_);
1280 isolate->set_save_context(prev_);
1281 }
1282 }
1283
1284 Handle<Context> context() { return context_; }
1285 SaveContext* prev() { return prev_; }
1286
1287 // Returns true if this save context is below a given JavaScript frame.
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001288 bool IsBelowFrame(JavaScriptFrame* frame) {
1289 return (c_entry_fp_ == 0) || (c_entry_fp_ > frame->sp());
Steve Block44f0eee2011-05-26 01:26:41 +01001290 }
1291
1292 private:
1293 Handle<Context> context_;
1294#if __GNUC_VERSION__ >= 40100 && __GNUC_VERSION__ < 40300
1295 Handle<Context> dummy_;
1296#endif
1297 SaveContext* prev_;
Ben Murdoch592a9fc2012-03-05 11:04:45 +00001298 Address c_entry_fp_;
Steve Block44f0eee2011-05-26 01:26:41 +01001299};
1300
1301
1302class AssertNoContextChange BASE_EMBEDDED {
1303#ifdef DEBUG
1304 public:
1305 AssertNoContextChange() :
1306 scope_(Isolate::Current()),
1307 context_(Isolate::Current()->context(), Isolate::Current()) {
1308 }
1309
1310 ~AssertNoContextChange() {
1311 ASSERT(Isolate::Current()->context() == *context_);
1312 }
1313
1314 private:
1315 HandleScope scope_;
1316 Handle<Context> context_;
1317#else
1318 public:
1319 AssertNoContextChange() { }
1320#endif
1321};
1322
1323
1324class ExecutionAccess BASE_EMBEDDED {
1325 public:
1326 explicit ExecutionAccess(Isolate* isolate) : isolate_(isolate) {
1327 Lock(isolate);
1328 }
1329 ~ExecutionAccess() { Unlock(isolate_); }
1330
1331 static void Lock(Isolate* isolate) { isolate->break_access_->Lock(); }
1332 static void Unlock(Isolate* isolate) { isolate->break_access_->Unlock(); }
1333
1334 static bool TryLock(Isolate* isolate) {
1335 return isolate->break_access_->TryLock();
1336 }
1337
1338 private:
1339 Isolate* isolate_;
1340};
1341
1342
1343// Support for checking for stack-overflows in C++ code.
1344class StackLimitCheck BASE_EMBEDDED {
1345 public:
1346 explicit StackLimitCheck(Isolate* isolate) : isolate_(isolate) { }
1347
1348 bool HasOverflowed() const {
1349 StackGuard* stack_guard = isolate_->stack_guard();
1350 // Stack has overflowed in C++ code only if stack pointer exceeds the C++
1351 // stack guard and the limits are not set to interrupt values.
1352 // TODO(214): Stack overflows are ignored if a interrupt is pending. This
1353 // code should probably always use the initial C++ limit.
1354 return (reinterpret_cast<uintptr_t>(this) < stack_guard->climit()) &&
1355 stack_guard->IsStackOverflow();
1356 }
1357 private:
1358 Isolate* isolate_;
1359};
1360
1361
1362// Support for temporarily postponing interrupts. When the outermost
1363// postpone scope is left the interrupts will be re-enabled and any
1364// interrupts that occurred while in the scope will be taken into
1365// account.
1366class PostponeInterruptsScope BASE_EMBEDDED {
1367 public:
1368 explicit PostponeInterruptsScope(Isolate* isolate)
1369 : stack_guard_(isolate->stack_guard()) {
1370 stack_guard_->thread_local_.postpone_interrupts_nesting_++;
1371 stack_guard_->DisableInterrupts();
1372 }
1373
1374 ~PostponeInterruptsScope() {
1375 if (--stack_guard_->thread_local_.postpone_interrupts_nesting_ == 0) {
1376 stack_guard_->EnableInterrupts();
1377 }
1378 }
1379 private:
1380 StackGuard* stack_guard_;
1381};
1382
1383
1384// Temporary macros for accessing current isolate and its subobjects.
1385// They provide better readability, especially when used a lot in the code.
1386#define HEAP (v8::internal::Isolate::Current()->heap())
1387#define FACTORY (v8::internal::Isolate::Current()->factory())
1388#define ISOLATE (v8::internal::Isolate::Current())
1389#define ZONE (v8::internal::Isolate::Current()->zone())
1390#define LOGGER (v8::internal::Isolate::Current()->logger())
1391
1392
1393// Tells whether the global context is marked with out of memory.
1394inline bool Context::has_out_of_memory() {
1395 return global_context()->out_of_memory()->IsTrue();
1396}
1397
1398
1399// Mark the global context with out of memory.
1400inline void Context::mark_out_of_memory() {
1401 global_context()->set_out_of_memory(HEAP->true_value());
1402}
1403
1404
Steve Block44f0eee2011-05-26 01:26:41 +01001405} } // namespace v8::internal
1406
Steve Block44f0eee2011-05-26 01:26:41 +01001407#endif // V8_ISOLATE_H_