Upgrade to 3.29
Update V8 to 3.29.88.17 and update makefiles to support building on
all the relevant platforms.
Bug: 17370214
Change-Id: Ia3407c157fd8d72a93e23d8318ccaf6ecf77fa4e
diff --git a/src/isolate.h b/src/isolate.h
index 2ff1318..24d4b08 100644
--- a/src/isolate.h
+++ b/src/isolate.h
@@ -1,85 +1,74 @@
// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#ifndef V8_ISOLATE_H_
#define V8_ISOLATE_H_
-#include "../include/v8-debug.h"
-#include "allocation.h"
-#include "apiutils.h"
-#include "atomicops.h"
-#include "builtins.h"
-#include "contexts.h"
-#include "execution.h"
-#include "frames.h"
-#include "date.h"
-#include "global-handles.h"
-#include "handles.h"
-#include "hashmap.h"
-#include "heap.h"
-#include "regexp-stack.h"
-#include "runtime-profiler.h"
-#include "runtime.h"
-#include "zone.h"
+#include "include/v8-debug.h"
+#include "src/allocation.h"
+#include "src/assert-scope.h"
+#include "src/base/atomicops.h"
+#include "src/builtins.h"
+#include "src/contexts.h"
+#include "src/date.h"
+#include "src/execution.h"
+#include "src/frames.h"
+#include "src/global-handles.h"
+#include "src/handles.h"
+#include "src/hashmap.h"
+#include "src/heap/heap.h"
+#include "src/optimizing-compiler-thread.h"
+#include "src/regexp-stack.h"
+#include "src/runtime.h"
+#include "src/runtime-profiler.h"
+#include "src/zone.h"
namespace v8 {
+
+namespace base {
+class RandomNumberGenerator;
+}
+
namespace internal {
class Bootstrapper;
+class CallInterfaceDescriptorData;
class CodeGenerator;
class CodeRange;
+class CodeStubDescriptor;
+class CodeTracer;
class CompilationCache;
+class ConsStringIteratorOp;
class ContextSlotCache;
-class ContextSwitcher;
class Counters;
class CpuFeatures;
class CpuProfiler;
class DeoptimizerData;
class Deserializer;
class EmptyStatement;
+class ExternalCallbackScope;
class ExternalReferenceTable;
class Factory;
class FunctionInfoListener;
class HandleScopeImplementer;
class HeapProfiler;
+class HStatistics;
+class HTracer;
class InlineRuntimeFunctionsTable;
-class NoAllocationStringAllocator;
class InnerPointerToCodeCache;
-class PreallocatedMemoryThread;
+class MaterializedObjectStore;
+class CodeAgingHelper;
class RegExpStack;
class SaveContext;
-class UnicodeCache;
-class StringInputBuffer;
class StringTracker;
class StubCache;
+class SweeperThread;
class ThreadManager;
class ThreadState;
class ThreadVisitor; // Defined in v8threads.h
-class VMState;
+class UnicodeCache;
+template <StateTag Tag> class VMState;
// 'void function pointer', used to roundtrip the
// ExternalReference::ExternalReferenceRedirector since we can not include
@@ -87,14 +76,14 @@
typedef void* ExternalReferenceRedirectorPointer();
-#ifdef ENABLE_DEBUGGER_SUPPORT
class Debug;
class Debugger;
-class DebuggerAgent;
-#endif
+class PromiseOnStack;
-#if !defined(__arm__) && defined(V8_TARGET_ARCH_ARM) || \
- !defined(__mips__) && defined(V8_TARGET_ARCH_MIPS)
+#if !defined(__arm__) && V8_TARGET_ARCH_ARM || \
+ !defined(__aarch64__) && V8_TARGET_ARCH_ARM64 || \
+ !defined(__mips__) && V8_TARGET_ARCH_MIPS || \
+ !defined(__mips__) && V8_TARGET_ARCH_MIPS64
class Redirection;
class Simulator;
#endif
@@ -108,7 +97,7 @@
// of handles to the actual constants.
typedef ZoneList<Handle<Object> > ZoneObjectList;
-#define RETURN_IF_SCHEDULED_EXCEPTION(isolate) \
+#define RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate) \
do { \
Isolate* __isolate__ = (isolate); \
if (__isolate__->has_scheduled_exception()) { \
@@ -116,23 +105,65 @@
} \
} while (false)
-#define RETURN_IF_EMPTY_HANDLE_VALUE(isolate, call, value) \
- do { \
- if ((call).is_null()) { \
- ASSERT((isolate)->has_pending_exception()); \
- return (value); \
- } \
+// Macros for MaybeHandle.
+
+#define RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, value) \
+ do { \
+ Isolate* __isolate__ = (isolate); \
+ if (__isolate__->has_scheduled_exception()) { \
+ __isolate__->PromoteScheduledException(); \
+ return value; \
+ } \
} while (false)
-#define CHECK_NOT_EMPTY_HANDLE(isolate, call) \
- do { \
- ASSERT(!(isolate)->has_pending_exception()); \
- CHECK(!(call).is_null()); \
- CHECK(!(isolate)->has_pending_exception()); \
+#define RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, T) \
+ RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, MaybeHandle<T>())
+
+#define ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, dst, call, value) \
+ do { \
+ if (!(call).ToHandle(&dst)) { \
+ DCHECK((isolate)->has_pending_exception()); \
+ return value; \
+ } \
} while (false)
-#define RETURN_IF_EMPTY_HANDLE(isolate, call) \
- RETURN_IF_EMPTY_HANDLE_VALUE(isolate, call, Failure::Exception())
+#define ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, dst, call) \
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE( \
+ isolate, dst, call, isolate->heap()->exception())
+
+#define ASSIGN_RETURN_ON_EXCEPTION(isolate, dst, call, T) \
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, dst, call, MaybeHandle<T>())
+
+#define THROW_NEW_ERROR(isolate, call, T) \
+ do { \
+ Handle<Object> __error__; \
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, __error__, isolate->factory()->call, \
+ T); \
+ return isolate->Throw<T>(__error__); \
+ } while (false)
+
+#define THROW_NEW_ERROR_RETURN_FAILURE(isolate, call) \
+ do { \
+ Handle<Object> __error__; \
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, __error__, \
+ isolate->factory()->call); \
+ return isolate->Throw(*__error__); \
+ } while (false)
+
+#define RETURN_ON_EXCEPTION_VALUE(isolate, call, value) \
+ do { \
+ if ((call).is_null()) { \
+ DCHECK((isolate)->has_pending_exception()); \
+ return value; \
+ } \
+ } while (false)
+
+#define RETURN_FAILURE_ON_EXCEPTION(isolate, call) \
+ RETURN_ON_EXCEPTION_VALUE(isolate, call, isolate->heap()->exception())
+
+#define RETURN_ON_EXCEPTION(isolate, call, T) \
+ RETURN_ON_EXCEPTION_VALUE(isolate, call, MaybeHandle<T>())
+
#define FOR_EACH_ISOLATE_ADDRESS_NAME(C) \
C(Handler, handler) \
@@ -184,12 +215,17 @@
int id_;
- static Atomic32 highest_thread_id_;
+ static base::Atomic32 highest_thread_id_;
friend class Isolate;
};
+#define FIELD_ACCESSOR(type, name) \
+ inline void set_##name(type v) { name##_ = v; } \
+ inline type name() const { return name##_; }
+
+
class ThreadLocalTop BASE_EMBEDDED {
public:
// Does early low-level initialization that does not depend on the
@@ -201,10 +237,10 @@
// Get the top C++ try catch handler or NULL if none are registered.
//
- // This method is not guarenteed to return an address that can be
+ // This method is not guaranteed to return an address that can be
// used for comparison with addresses into the JS stack. If such an
// address is needed, use try_catch_handler_address.
- v8::TryCatch* TryCatchHandler();
+ FIELD_ACCESSOR(v8::TryCatch*, try_catch_handler)
// Get the address of the top C++ try catch handler or NULL if
// none are registered.
@@ -216,36 +252,29 @@
// stack, try_catch_handler_address returns a JS stack address that
// corresponds to the place on the JS stack where the C++ handler
// would have been if the stack were not separate.
- inline Address try_catch_handler_address() {
- return try_catch_handler_address_;
+ Address try_catch_handler_address() {
+ return reinterpret_cast<Address>(
+ v8::TryCatch::JSStackComparableAddress(try_catch_handler()));
}
- // Set the address of the top C++ try catch handler.
- inline void set_try_catch_handler_address(Address address) {
- try_catch_handler_address_ = address;
- }
-
- void Free() {
- ASSERT(!has_pending_message_);
- ASSERT(!external_caught_exception_);
- ASSERT(try_catch_handler_address_ == NULL);
- }
+ void Free();
Isolate* isolate_;
// The context where the current execution method is created and for variable
// lookups.
Context* context_;
ThreadId thread_id_;
- MaybeObject* pending_exception_;
+ Object* pending_exception_;
bool has_pending_message_;
+ bool rethrowing_message_;
Object* pending_message_obj_;
- Script* pending_message_script_;
+ Object* pending_message_script_;
int pending_message_start_pos_;
int pending_message_end_pos_;
// Use a separate value for scheduled exceptions to preserve the
// invariants that hold about pending_exception. We may want to
// unify them later.
- MaybeObject* scheduled_exception_;
+ Object* scheduled_exception_;
bool external_caught_exception_;
SaveContext* save_context_;
v8::TryCatch* catcher_;
@@ -254,14 +283,18 @@
Address c_entry_fp_; // the frame pointer of the top c entry frame
Address handler_; // try-blocks are chained through the stack
+ // Throwing an exception may cause a Promise rejection. For this purpose
+ // we keep track of a stack of nested promises and the corresponding
+ // try-catch handlers.
+ PromiseOnStack* promise_on_stack_;
+
#ifdef USE_SIMULATOR
-#if defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_MIPS)
Simulator* simulator_;
#endif
-#endif // USE_SIMULATOR
Address js_entry_sp_; // the stack pointer of the bottom JS entry frame
- Address external_callback_; // the external callback we're currently in
+ // the external callback we're currently in
+ ExternalCallbackScope* external_callback_scope_;
StateTag current_vm_state_;
// Generated code scratch locations.
@@ -273,32 +306,35 @@
// Head of the list of live LookupResults.
LookupResult* top_lookup_result_;
- // Whether out of memory exceptions should be ignored.
- bool ignore_out_of_memory_;
-
private:
void InitializeInternal();
- Address try_catch_handler_address_;
+ v8::TryCatch* try_catch_handler_;
};
-#ifdef ENABLE_DEBUGGER_SUPPORT
+#if V8_TARGET_ARCH_ARM && !defined(__arm__) || \
+ V8_TARGET_ARCH_ARM64 && !defined(__aarch64__) || \
+ V8_TARGET_ARCH_MIPS && !defined(__mips__) || \
+ V8_TARGET_ARCH_MIPS64 && !defined(__mips__)
-#define ISOLATE_DEBUGGER_INIT_LIST(V) \
- V(v8::Debug::EventCallback, debug_event_callback, NULL) \
- V(DebuggerAgent*, debugger_agent_instance, NULL)
+#define ISOLATE_INIT_SIMULATOR_LIST(V) \
+ V(bool, simulator_initialized, false) \
+ V(HashMap*, simulator_i_cache, NULL) \
+ V(Redirection*, simulator_redirection, NULL)
#else
-#define ISOLATE_DEBUGGER_INIT_LIST(V)
+#define ISOLATE_INIT_SIMULATOR_LIST(V)
#endif
+
#ifdef DEBUG
#define ISOLATE_INIT_DEBUG_ARRAY_LIST(V) \
V(CommentStatistic, paged_space_comments_statistics, \
- CommentStatistic::kMaxComments + 1)
+ CommentStatistic::kMaxComments + 1) \
+ V(int, code_kind_statistics, Code::NUMBER_OF_KINDS)
#else
#define ISOLATE_INIT_DEBUG_ARRAY_LIST(V)
@@ -307,52 +343,53 @@
#define ISOLATE_INIT_ARRAY_LIST(V) \
/* SerializerDeserializer state. */ \
- V(Object*, serialize_partial_snapshot_cache, kPartialSnapshotCacheCapacity) \
- V(int, jsregexp_static_offsets_vector, kJSRegexpStaticOffsetsVectorSize) \
+ V(int32_t, jsregexp_static_offsets_vector, kJSRegexpStaticOffsetsVectorSize) \
V(int, bad_char_shift_table, kUC16AlphabetSize) \
V(int, good_suffix_shift_table, (kBMMaxShift + 1)) \
V(int, suffix_table, (kBMMaxShift + 1)) \
V(uint32_t, private_random_seed, 2) \
ISOLATE_INIT_DEBUG_ARRAY_LIST(V)
-typedef List<HeapObject*, PreallocatedStorage> DebugObjectCache;
+typedef List<HeapObject*> DebugObjectCache;
#define ISOLATE_INIT_LIST(V) \
/* SerializerDeserializer state. */ \
V(int, serialize_partial_snapshot_cache_length, 0) \
+ V(int, serialize_partial_snapshot_cache_capacity, 0) \
+ V(Object**, serialize_partial_snapshot_cache, NULL) \
/* Assembler state. */ \
- /* A previously allocated buffer of kMinimalBufferSize bytes, or NULL. */ \
- V(byte*, assembler_spare_buffer, NULL) \
V(FatalErrorCallback, exception_behavior, NULL) \
+ V(LogEventCallback, event_logger, NULL) \
V(AllowCodeGenerationFromStringsCallback, allow_code_gen_callback, NULL) \
- V(v8::Debug::MessageHandler, message_handler, NULL) \
/* To distinguish the function templates, so that we can find them in the */ \
- /* function cache of the global context. */ \
+ /* function cache of the native context. */ \
V(int, next_serial_number, 0) \
V(ExternalReferenceRedirectorPointer*, external_reference_redirector, NULL) \
- V(bool, always_allow_natives_syntax, false) \
/* Part of the state of liveedit. */ \
V(FunctionInfoListener*, active_function_info_listener, NULL) \
/* State for Relocatable. */ \
V(Relocatable*, relocatable_top, NULL) \
- /* State for CodeEntry in profile-generator. */ \
- V(CodeGenerator*, current_code_generator, NULL) \
- V(bool, jump_target_compiling_deferred_code, false) \
V(DebugObjectCache*, string_stream_debug_object_cache, NULL) \
V(Object*, string_stream_current_security_token, NULL) \
- /* TODO(isolates): Release this on destruction? */ \
- V(int*, irregexp_interpreter_backtrack_stack_cache, NULL) \
/* Serializer state. */ \
V(ExternalReferenceTable*, external_reference_table, NULL) \
- /* AstNode state. */ \
- V(int, ast_node_id, 0) \
- V(unsigned, ast_node_count, 0) \
- /* SafeStackFrameIterator activations count. */ \
- V(int, safe_stack_iterator_counter, 0) \
- V(uint64_t, enabled_cpu_features, 0) \
- V(CpuProfiler*, cpu_profiler, NULL) \
- V(HeapProfiler*, heap_profiler, NULL) \
- ISOLATE_DEBUGGER_INIT_LIST(V)
+ V(int, pending_microtask_count, 0) \
+ V(bool, autorun_microtasks, true) \
+ V(HStatistics*, hstatistics, NULL) \
+ V(HStatistics*, tstatistics, NULL) \
+ V(HTracer*, htracer, NULL) \
+ V(CodeTracer*, code_tracer, NULL) \
+ V(bool, fp_stubs_generated, false) \
+ V(int, max_available_threads, 0) \
+ V(uint32_t, per_isolate_assert_data, 0xFFFFFFFFu) \
+ V(InterruptCallback, api_interrupt_callback, NULL) \
+ V(void*, api_interrupt_callback_data, NULL) \
+ ISOLATE_INIT_SIMULATOR_LIST(V)
+
+#define THREAD_LOCAL_TOP_ACCESSOR(type, name) \
+ inline void set_##name(type v) { thread_local_top_.name##_ = v; } \
+ inline type name() const { return thread_local_top_.name##_; }
+
class Isolate {
// These forward declarations are required to make the friend declarations in
@@ -372,25 +409,26 @@
thread_id_(thread_id),
stack_limit_(0),
thread_state_(NULL),
-#if !defined(__arm__) && defined(V8_TARGET_ARCH_ARM) || \
- !defined(__mips__) && defined(V8_TARGET_ARCH_MIPS)
+#if !defined(__arm__) && V8_TARGET_ARCH_ARM || \
+ !defined(__aarch64__) && V8_TARGET_ARCH_ARM64 || \
+ !defined(__mips__) && V8_TARGET_ARCH_MIPS || \
+ !defined(__mips__) && V8_TARGET_ARCH_MIPS64
simulator_(NULL),
#endif
next_(NULL),
prev_(NULL) { }
+ ~PerIsolateThreadData();
Isolate* isolate() const { return isolate_; }
ThreadId thread_id() const { return thread_id_; }
- void set_stack_limit(uintptr_t value) { stack_limit_ = value; }
- uintptr_t stack_limit() const { return stack_limit_; }
- ThreadState* thread_state() const { return thread_state_; }
- void set_thread_state(ThreadState* value) { thread_state_ = value; }
-#if !defined(__arm__) && defined(V8_TARGET_ARCH_ARM) || \
- !defined(__mips__) && defined(V8_TARGET_ARCH_MIPS)
- Simulator* simulator() const { return simulator_; }
- void set_simulator(Simulator* simulator) {
- simulator_ = simulator;
- }
+ FIELD_ACCESSOR(uintptr_t, stack_limit)
+ FIELD_ACCESSOR(ThreadState*, thread_state)
+
+#if !defined(__arm__) && V8_TARGET_ARCH_ARM || \
+ !defined(__aarch64__) && V8_TARGET_ARCH_ARM64 || \
+ !defined(__mips__) && V8_TARGET_ARCH_MIPS || \
+ !defined(__mips__) && V8_TARGET_ARCH_MIPS64
+ FIELD_ACCESSOR(Simulator*, simulator)
#endif
bool Matches(Isolate* isolate, ThreadId thread_id) const {
@@ -403,8 +441,10 @@
uintptr_t stack_limit_;
ThreadState* thread_state_;
-#if !defined(__arm__) && defined(V8_TARGET_ARCH_ARM) || \
- !defined(__mips__) && defined(V8_TARGET_ARCH_MIPS)
+#if !defined(__arm__) && V8_TARGET_ARCH_ARM || \
+ !defined(__aarch64__) && V8_TARGET_ARCH_ARM64 || \
+ !defined(__mips__) && V8_TARGET_ARCH_MIPS || \
+ !defined(__mips__) && V8_TARGET_ARCH_MIPS64
Simulator* simulator_;
#endif
@@ -422,27 +462,37 @@
enum AddressId {
#define DECLARE_ENUM(CamelName, hacker_name) k##CamelName##Address,
FOR_EACH_ISOLATE_ADDRESS_NAME(DECLARE_ENUM)
-#undef C
+#undef DECLARE_ENUM
kIsolateAddressCount
};
+ static void InitializeOncePerProcess();
+
// Returns the PerIsolateThreadData for the current thread (or NULL if one is
// not currently set).
static PerIsolateThreadData* CurrentPerIsolateThreadData() {
return reinterpret_cast<PerIsolateThreadData*>(
- Thread::GetThreadLocal(per_isolate_thread_data_key_));
+ base::Thread::GetThreadLocal(per_isolate_thread_data_key_));
}
// Returns the isolate inside which the current thread is running.
INLINE(static Isolate* Current()) {
Isolate* isolate = reinterpret_cast<Isolate*>(
- Thread::GetExistingThreadLocal(isolate_key_));
- ASSERT(isolate != NULL);
+ base::Thread::GetExistingThreadLocal(isolate_key_));
+ DCHECK(isolate != NULL);
return isolate;
}
INLINE(static Isolate* UncheckedCurrent()) {
- return reinterpret_cast<Isolate*>(Thread::GetThreadLocal(isolate_key_));
+ return reinterpret_cast<Isolate*>(
+ base::Thread::GetThreadLocal(isolate_key_));
+ }
+
+ // Like UncheckedCurrent, but skips the check that |isolate_key_| was
+ // initialized. Callers have to ensure that themselves.
+ INLINE(static Isolate* UnsafeCurrent()) {
+ return reinterpret_cast<Isolate*>(
+ base::Thread::GetThreadLocal(isolate_key_));
}
// Usually called by Init(), but can be called early e.g. to allow
@@ -464,103 +514,83 @@
// for legacy API reasons.
void TearDown();
- bool IsDefaultIsolate() const { return this == default_isolate_; }
-
- // Ensures that process-wide resources and the default isolate have been
- // allocated. It is only necessary to call this method in rare cases, for
- // example if you are using V8 from within the body of a static initializer.
- // Safe to call multiple times.
- static void EnsureDefaultIsolate();
+ static void GlobalTearDown();
// Find the PerThread for this particular (isolate, thread) combination
// If one does not yet exist, return null.
PerIsolateThreadData* FindPerThreadDataForThisThread();
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // Get the debugger from the default isolate. Preinitializes the
- // default isolate if needed.
- static Debugger* GetDefaultIsolateDebugger();
-#endif
-
- // Get the stack guard from the default isolate. Preinitializes the
- // default isolate if needed.
- static StackGuard* GetDefaultIsolateStackGuard();
+ // Find the PerThread for given (isolate, thread) combination
+ // If one does not yet exist, return null.
+ PerIsolateThreadData* FindPerThreadDataForThread(ThreadId thread_id);
// Returns the key used to store the pointer to the current isolate.
// Used internally for V8 threads that do not execute JavaScript but still
// are part of the domain of an isolate (like the context switcher).
- static Thread::LocalStorageKey isolate_key() {
+ static base::Thread::LocalStorageKey isolate_key() {
return isolate_key_;
}
// Returns the key used to store process-wide thread IDs.
- static Thread::LocalStorageKey thread_id_key() {
+ static base::Thread::LocalStorageKey thread_id_key() {
return thread_id_key_;
}
- static Thread::LocalStorageKey per_isolate_thread_data_key();
-
- // If a client attempts to create a Locker without specifying an isolate,
- // we assume that the client is using legacy behavior. Set up the current
- // thread to be inside the implicit isolate (or fail a check if we have
- // switched to non-legacy behavior).
- static void EnterDefaultIsolate();
+ static base::Thread::LocalStorageKey per_isolate_thread_data_key();
// Mutex for serializing access to break control structures.
- Mutex* break_access() { return break_access_; }
-
- // Mutex for serializing access to debugger.
- Mutex* debugger_access() { return debugger_access_; }
+ base::RecursiveMutex* break_access() { return &break_access_; }
Address get_address_from_id(AddressId id);
// Access to top context (where the current function object was created).
Context* context() { return thread_local_top_.context_; }
void set_context(Context* context) {
- ASSERT(context == NULL || context->IsContext());
+ DCHECK(context == NULL || context->IsContext());
thread_local_top_.context_ = context;
}
Context** context_address() { return &thread_local_top_.context_; }
- SaveContext* save_context() {return thread_local_top_.save_context_; }
- void set_save_context(SaveContext* save) {
- thread_local_top_.save_context_ = save;
- }
+ THREAD_LOCAL_TOP_ACCESSOR(SaveContext*, save_context)
// Access to current thread id.
- ThreadId thread_id() { return thread_local_top_.thread_id_; }
- void set_thread_id(ThreadId id) { thread_local_top_.thread_id_ = id; }
+ THREAD_LOCAL_TOP_ACCESSOR(ThreadId, thread_id)
// Interface to pending exception.
- MaybeObject* pending_exception() {
- ASSERT(has_pending_exception());
+ Object* pending_exception() {
+ DCHECK(has_pending_exception());
+ DCHECK(!thread_local_top_.pending_exception_->IsException());
return thread_local_top_.pending_exception_;
}
- bool external_caught_exception() {
- return thread_local_top_.external_caught_exception_;
+
+ void set_pending_exception(Object* exception_obj) {
+ DCHECK(!exception_obj->IsException());
+ thread_local_top_.pending_exception_ = exception_obj;
}
- void set_external_caught_exception(bool value) {
- thread_local_top_.external_caught_exception_ = value;
- }
- void set_pending_exception(MaybeObject* exception) {
- thread_local_top_.pending_exception_ = exception;
- }
+
void clear_pending_exception() {
+ DCHECK(!thread_local_top_.pending_exception_->IsException());
thread_local_top_.pending_exception_ = heap_.the_hole_value();
}
- MaybeObject** pending_exception_address() {
+
+ Object** pending_exception_address() {
return &thread_local_top_.pending_exception_;
}
+
bool has_pending_exception() {
+ DCHECK(!thread_local_top_.pending_exception_->IsException());
return !thread_local_top_.pending_exception_->IsTheHole();
}
+
+ THREAD_LOCAL_TOP_ACCESSOR(bool, external_caught_exception)
+
void clear_pending_message() {
thread_local_top_.has_pending_message_ = false;
thread_local_top_.pending_message_obj_ = heap_.the_hole_value();
- thread_local_top_.pending_message_script_ = NULL;
+ thread_local_top_.pending_message_script_ = heap_.the_hole_value();
}
v8::TryCatch* try_catch_handler() {
- return thread_local_top_.TryCatchHandler();
+ return thread_local_top_.try_catch_handler();
}
Address try_catch_handler_address() {
return thread_local_top_.try_catch_handler_address();
@@ -568,34 +598,50 @@
bool* external_caught_exception_address() {
return &thread_local_top_.external_caught_exception_;
}
- v8::TryCatch* catcher() {
- return thread_local_top_.catcher_;
- }
- void set_catcher(v8::TryCatch* catcher) {
- thread_local_top_.catcher_ = catcher;
- }
- MaybeObject** scheduled_exception_address() {
+ THREAD_LOCAL_TOP_ACCESSOR(v8::TryCatch*, catcher)
+
+ Object** scheduled_exception_address() {
return &thread_local_top_.scheduled_exception_;
}
- MaybeObject* scheduled_exception() {
- ASSERT(has_scheduled_exception());
+
+ Address pending_message_obj_address() {
+ return reinterpret_cast<Address>(&thread_local_top_.pending_message_obj_);
+ }
+
+ Address has_pending_message_address() {
+ return reinterpret_cast<Address>(&thread_local_top_.has_pending_message_);
+ }
+
+ Address pending_message_script_address() {
+ return reinterpret_cast<Address>(
+ &thread_local_top_.pending_message_script_);
+ }
+
+ Object* scheduled_exception() {
+ DCHECK(has_scheduled_exception());
+ DCHECK(!thread_local_top_.scheduled_exception_->IsException());
return thread_local_top_.scheduled_exception_;
}
bool has_scheduled_exception() {
+ DCHECK(!thread_local_top_.scheduled_exception_->IsException());
return thread_local_top_.scheduled_exception_ != heap_.the_hole_value();
}
void clear_scheduled_exception() {
+ DCHECK(!thread_local_top_.scheduled_exception_->IsException());
thread_local_top_.scheduled_exception_ = heap_.the_hole_value();
}
- bool IsExternallyCaught();
+ bool HasExternalTryCatch();
+ bool IsFinallyOnTop();
- bool is_catchable_by_javascript(MaybeObject* exception) {
- return (exception != Failure::OutOfMemoryException()) &&
- (exception != heap()->termination_exception());
+ bool is_catchable_by_javascript(Object* exception) {
+ return exception != heap()->termination_exception();
}
+ // Serializer.
+ void PushToPartialSnapshotCache(Object* obj);
+
// JS execution stack (see frames.h).
static Address c_entry_fp(ThreadLocalTop* thread) {
return thread->c_entry_fp_;
@@ -607,9 +653,9 @@
}
inline Address* handler_address() { return &thread_local_top_.handler_; }
- // Bottom JS entry (see StackTracer::Trace in log.cc).
- static Address js_entry_sp(ThreadLocalTop* thread) {
- return thread->js_entry_sp_;
+ // Bottom JS entry.
+ Address js_entry_sp() {
+ return thread_local_top_.js_entry_sp_;
}
inline Address* js_entry_sp_address() {
return &thread_local_top_.js_entry_sp_;
@@ -620,12 +666,12 @@
// Returns the global object of the current context. It could be
// a builtin object, or a JS global object.
- Handle<GlobalObject> global() {
- return Handle<GlobalObject>(context()->global());
+ Handle<GlobalObject> global_object() {
+ return Handle<GlobalObject>(context()->global_object());
}
// Returns the global proxy object of the current context.
- Object* global_proxy() {
+ JSObject* global_proxy() {
return context()->global_proxy();
}
@@ -642,13 +688,18 @@
// JavaScript code. If an exception is scheduled true is returned.
bool OptionalRescheduleException(bool is_bottom_call);
+ // Push and pop a promise and the current try-catch handler.
+ void PushPromise(Handle<JSObject> promise);
+ void PopPromise();
+ Handle<Object> GetPromiseOnStackOnThrow();
+
class ExceptionScope {
public:
explicit ExceptionScope(Isolate* isolate) :
- // Scope currently can only be used for regular exceptions, not
- // failures like OOM or termination exception.
+ // Scope currently can only be used for regular exceptions,
+ // not termination exception.
isolate_(isolate),
- pending_exception_(isolate_->pending_exception()->ToObjectUnchecked()),
+ pending_exception_(isolate_->pending_exception(), isolate_),
catcher_(isolate_->catcher())
{ }
@@ -668,53 +719,65 @@
int frame_limit,
StackTrace::StackTraceOptions options);
- // Tells whether the current context has experienced an out of memory
- // exception.
- bool is_out_of_memory();
- bool ignore_out_of_memory() {
- return thread_local_top_.ignore_out_of_memory_;
- }
- void set_ignore_out_of_memory(bool value) {
- thread_local_top_.ignore_out_of_memory_ = value;
- }
-
void PrintCurrentStackTrace(FILE* out);
- void PrintStackTrace(FILE* out, char* thread_data);
void PrintStack(StringStream* accumulator);
- void PrintStack();
+ void PrintStack(FILE* out);
Handle<String> StackTraceString();
+ NO_INLINE(void PushStackTraceAndDie(unsigned int magic,
+ Object* object,
+ Map* map,
+ unsigned int magic2));
Handle<JSArray> CaptureCurrentStackTrace(
int frame_limit,
StackTrace::StackTraceOptions options);
-
- void CaptureAndSetCurrentStackTraceFor(Handle<JSObject> error_object);
+ Handle<Object> CaptureSimpleStackTrace(Handle<JSObject> error_object,
+ Handle<Object> caller);
+ void CaptureAndSetDetailedStackTrace(Handle<JSObject> error_object);
+ void CaptureAndSetSimpleStackTrace(Handle<JSObject> error_object,
+ Handle<Object> caller);
// Returns if the top context may access the given global object. If
// the result is false, the pending exception is guaranteed to be
// set.
- bool MayNamedAccess(JSObject* receiver,
- Object* key,
+
+ bool MayNamedAccess(Handle<JSObject> receiver,
+ Handle<Object> key,
v8::AccessType type);
- bool MayIndexedAccess(JSObject* receiver,
+ bool MayIndexedAccess(Handle<JSObject> receiver,
uint32_t index,
v8::AccessType type);
void SetFailedAccessCheckCallback(v8::FailedAccessCheckCallback callback);
- void ReportFailedAccessCheck(JSObject* receiver, v8::AccessType type);
+ void ReportFailedAccessCheck(Handle<JSObject> receiver, v8::AccessType type);
// Exception throwing support. The caller should use the result
// of Throw() as its return value.
- Failure* Throw(Object* exception, MessageLocation* location = NULL);
+ Object* Throw(Object* exception, MessageLocation* location = NULL);
+
+ template <typename T>
+ MUST_USE_RESULT MaybeHandle<T> Throw(Handle<Object> exception,
+ MessageLocation* location = NULL) {
+ Throw(*exception, location);
+ return MaybeHandle<T>();
+ }
+
// Re-throw an exception. This involves no error reporting since
// error reporting was handled when the exception was thrown
// originally.
- Failure* ReThrow(MaybeObject* exception, MessageLocation* location = NULL);
+ Object* ReThrow(Object* exception);
void ScheduleThrow(Object* exception);
+ // Re-set pending message, script and positions reported to the TryCatch
+ // back to the TLS for re-use when rethrowing.
+ void RestorePendingMessageFromTryCatch(v8::TryCatch* handler);
+ // Un-schedule an exception that was caught by a TryCatch handler.
+ void CancelScheduledExceptionFromTryCatch(v8::TryCatch* handler);
void ReportPendingMessages();
- Failure* ThrowIllegalOperation();
+ // Return pending location if any or unfilled structure.
+ MessageLocation GetMessageLocation();
+ Object* ThrowIllegalOperation();
// Promote a scheduled exception to pending. Asserts has_scheduled_exception.
- Failure* PromoteScheduledException();
+ Object* PromoteScheduledException();
void DoThrow(Object* exception, MessageLocation* location);
// Checks if exception should be reported and finds out if it's
// caught externally.
@@ -725,27 +788,27 @@
// result in the target out parameter.
void ComputeLocation(MessageLocation* target);
- // Override command line flag.
- void TraceException(bool flag);
-
// Out of resource exception helpers.
- Failure* StackOverflow();
- Failure* TerminateExecution();
+ Object* StackOverflow();
+ Object* TerminateExecution();
+ void CancelTerminateExecution();
+
+ void InvokeApiInterruptCallback();
// Administration
void Iterate(ObjectVisitor* v);
void Iterate(ObjectVisitor* v, ThreadLocalTop* t);
char* Iterate(ObjectVisitor* v, char* t);
- void IterateThread(ThreadVisitor* v);
void IterateThread(ThreadVisitor* v, char* t);
- // Returns the current global context.
+ // Returns the current native and global context.
+ Handle<Context> native_context();
Handle<Context> global_context();
- // Returns the global context of the calling JavaScript code. That
- // is, the global context of the top-most JavaScript frame.
- Handle<Context> GetCallingGlobalContext();
+ // Returns the native context of the calling JavaScript code. That
+ // is, the native context of the top-most JavaScript frame.
+ Handle<Context> GetCallingNativeContext();
void RegisterTryCatchHandler(v8::TryCatch* that);
void UnregisterTryCatchHandler(v8::TryCatch* that);
@@ -761,11 +824,11 @@
// Accessors.
#define GLOBAL_ACCESSOR(type, name, initialvalue) \
inline type name() const { \
- ASSERT(OFFSET_OF(Isolate, name##_) == name##_debug_offset_); \
+ DCHECK(OFFSET_OF(Isolate, name##_) == name##_debug_offset_); \
return name##_; \
} \
inline void set_##name(type value) { \
- ASSERT(OFFSET_OF(Isolate, name##_) == name##_debug_offset_); \
+ DCHECK(OFFSET_OF(Isolate, name##_) == name##_debug_offset_); \
name##_ = value; \
}
ISOLATE_INIT_LIST(GLOBAL_ACCESSOR)
@@ -773,24 +836,27 @@
#define GLOBAL_ARRAY_ACCESSOR(type, name, length) \
inline type* name() { \
- ASSERT(OFFSET_OF(Isolate, name##_) == name##_debug_offset_); \
+ DCHECK(OFFSET_OF(Isolate, name##_) == name##_debug_offset_); \
return &(name##_)[0]; \
}
ISOLATE_INIT_ARRAY_LIST(GLOBAL_ARRAY_ACCESSOR)
#undef GLOBAL_ARRAY_ACCESSOR
-#define GLOBAL_CONTEXT_FIELD_ACCESSOR(index, type, name) \
- Handle<type> name() { \
- return Handle<type>(context()->global_context()->name()); \
+#define NATIVE_CONTEXT_FIELD_ACCESSOR(index, type, name) \
+ Handle<type> name() { \
+ return Handle<type>(native_context()->name(), this); \
+ } \
+ bool is_##name(type* value) { \
+ return native_context()->is_##name(value); \
}
- GLOBAL_CONTEXT_FIELDS(GLOBAL_CONTEXT_FIELD_ACCESSOR)
-#undef GLOBAL_CONTEXT_FIELD_ACCESSOR
+ NATIVE_CONTEXT_FIELDS(NATIVE_CONTEXT_FIELD_ACCESSOR)
+#undef NATIVE_CONTEXT_FIELD_ACCESSOR
Bootstrapper* bootstrapper() { return bootstrapper_; }
Counters* counters() {
// Call InitializeLoggingAndCounters() if logging is needed before
// the isolate is fully initialized.
- ASSERT(counters_ != NULL);
+ DCHECK(counters_ != NULL);
return counters_;
}
CodeRange* code_range() { return code_range_; }
@@ -799,18 +865,18 @@
Logger* logger() {
// Call InitializeLoggingAndCounters() if logging is needed before
// the isolate is fully initialized.
- ASSERT(logger_ != NULL);
+ DCHECK(logger_ != NULL);
return logger_;
}
StackGuard* stack_guard() { return &stack_guard_; }
Heap* heap() { return &heap_; }
StatsTable* stats_table();
StubCache* stub_cache() { return stub_cache_; }
+ CodeAgingHelper* code_aging_helper() { return code_aging_helper_; }
DeoptimizerData* deoptimizer_data() { return deoptimizer_data_; }
ThreadLocalTop* thread_local_top() { return &thread_local_top_; }
-
- TranscendentalCache* transcendental_cache() const {
- return transcendental_cache_;
+ MaterializedObjectStore* materialized_object_store() {
+ return materialized_object_store_;
}
MemoryAllocator* memory_allocator() {
@@ -829,14 +895,13 @@
return descriptor_lookup_cache_;
}
- v8::ImplementationUtilities::HandleScopeData* handle_scope_data() {
- return &handle_scope_data_;
- }
+ HandleScopeData* handle_scope_data() { return &handle_scope_data_; }
+
HandleScopeImplementer* handle_scope_implementer() {
- ASSERT(handle_scope_implementer_);
+ DCHECK(handle_scope_implementer_);
return handle_scope_implementer_;
}
- Zone* zone() { return &zone_; }
+ Zone* runtime_zone() { return &runtime_zone_; }
UnicodeCache* unicode_cache() {
return unicode_cache_;
@@ -846,18 +911,14 @@
return inner_pointer_to_code_cache_;
}
- StringInputBuffer* write_input_buffer() { return write_input_buffer_; }
+ ConsStringIteratorOp* write_iterator() { return write_iterator_; }
GlobalHandles* global_handles() { return global_handles_; }
+ EternalHandles* eternal_handles() { return eternal_handles_; }
+
ThreadManager* thread_manager() { return thread_manager_; }
- ContextSwitcher* context_switcher() { return context_switcher_; }
-
- void set_context_switcher(ContextSwitcher* switcher) {
- context_switcher_ = switcher;
- }
-
StringTracker* string_tracker() { return string_tracker_; }
unibrow::Mapping<unibrow::Ecma262UnCanonicalize>* jsregexp_uncanonicalize() {
@@ -868,30 +929,20 @@
return &jsregexp_canonrange_;
}
- StringInputBuffer* objects_string_compare_buffer_a() {
- return &objects_string_compare_buffer_a_;
+ ConsStringIteratorOp* objects_string_compare_iterator_a() {
+ return &objects_string_compare_iterator_a_;
}
- StringInputBuffer* objects_string_compare_buffer_b() {
- return &objects_string_compare_buffer_b_;
+ ConsStringIteratorOp* objects_string_compare_iterator_b() {
+ return &objects_string_compare_iterator_b_;
}
- StaticResource<StringInputBuffer>* objects_string_input_buffer() {
- return &objects_string_input_buffer_;
+ StaticResource<ConsStringIteratorOp>* objects_string_iterator() {
+ return &objects_string_iterator_;
}
RuntimeState* runtime_state() { return &runtime_state_; }
- void set_fp_stubs_generated(bool value) {
- fp_stubs_generated_ = value;
- }
-
- bool fp_stubs_generated() { return fp_stubs_generated_; }
-
- StaticResource<SafeStringInputBuffer>* compiler_safe_string_input_buffer() {
- return &compiler_safe_string_input_buffer_;
- }
-
Builtins* builtins() { return &builtins_; }
void NotifyExtensionInstalled() {
@@ -912,113 +963,57 @@
return &interp_canonicalize_mapping_;
}
- void* PreallocatedStorageNew(size_t size);
- void PreallocatedStorageDelete(void* p);
- void PreallocatedStorageInit(size_t size);
+ Debug* debug() { return debug_; }
-#ifdef ENABLE_DEBUGGER_SUPPORT
- Debugger* debugger() {
- if (!NoBarrier_Load(&debugger_initialized_)) InitializeDebugger();
- return debugger_;
- }
- Debug* debug() {
- if (!NoBarrier_Load(&debugger_initialized_)) InitializeDebugger();
- return debug_;
- }
-#endif
-
- inline bool IsDebuggerActive();
inline bool DebuggerHasBreakPoints();
+ CpuProfiler* cpu_profiler() const { return cpu_profiler_; }
+ HeapProfiler* heap_profiler() const { return heap_profiler_; }
+
#ifdef DEBUG
HistogramInfo* heap_histograms() { return heap_histograms_; }
JSObject::SpillInformation* js_spill_information() {
return &js_spill_information_;
}
-
- int* code_kind_statistics() { return code_kind_statistics_; }
-#endif
-
-#if defined(V8_TARGET_ARCH_ARM) && !defined(__arm__) || \
- defined(V8_TARGET_ARCH_MIPS) && !defined(__mips__)
- bool simulator_initialized() { return simulator_initialized_; }
- void set_simulator_initialized(bool initialized) {
- simulator_initialized_ = initialized;
- }
-
- HashMap* simulator_i_cache() { return simulator_i_cache_; }
- void set_simulator_i_cache(HashMap* hash_map) {
- simulator_i_cache_ = hash_map;
- }
-
- Redirection* simulator_redirection() {
- return simulator_redirection_;
- }
- void set_simulator_redirection(Redirection* redirection) {
- simulator_redirection_ = redirection;
- }
#endif
Factory* factory() { return reinterpret_cast<Factory*>(this); }
- // SerializerDeserializer state.
- static const int kPartialSnapshotCacheCapacity = 1400;
+ static const int kJSRegexpStaticOffsetsVectorSize = 128;
- static const int kJSRegexpStaticOffsetsVectorSize = 50;
+ THREAD_LOCAL_TOP_ACCESSOR(ExternalCallbackScope*, external_callback_scope)
- Address external_callback() {
- return thread_local_top_.external_callback_;
+ THREAD_LOCAL_TOP_ACCESSOR(StateTag, current_vm_state)
+
+ void SetData(uint32_t slot, void* data) {
+ DCHECK(slot < Internals::kNumIsolateDataSlots);
+ embedder_data_[slot] = data;
}
- void set_external_callback(Address callback) {
- thread_local_top_.external_callback_ = callback;
+ void* GetData(uint32_t slot) {
+ DCHECK(slot < Internals::kNumIsolateDataSlots);
+ return embedder_data_[slot];
}
- StateTag current_vm_state() {
- return thread_local_top_.current_vm_state_;
+ THREAD_LOCAL_TOP_ACCESSOR(LookupResult*, top_lookup_result)
+
+ void enable_serializer() {
+ // The serializer can only be enabled before the isolate init.
+ DCHECK(state_ != INITIALIZED);
+ serializer_enabled_ = true;
}
- void SetCurrentVMState(StateTag state) {
- if (RuntimeProfiler::IsEnabled()) {
- // Make sure thread local top is initialized.
- ASSERT(thread_local_top_.isolate_ == this);
- StateTag current_state = thread_local_top_.current_vm_state_;
- if (current_state != JS && state == JS) {
- // Non-JS -> JS transition.
- RuntimeProfiler::IsolateEnteredJS(this);
- } else if (current_state == JS && state != JS) {
- // JS -> non-JS transition.
- ASSERT(RuntimeProfiler::IsSomeIsolateInJS());
- RuntimeProfiler::IsolateExitedJS(this);
- } else {
- // Other types of state transitions are not interesting to the
- // runtime profiler, because they don't affect whether we're
- // in JS or not.
- ASSERT((current_state == JS) == (state == JS));
- }
- }
- thread_local_top_.current_vm_state_ = state;
- }
+ bool serializer_enabled() const { return serializer_enabled_; }
- void SetData(void* data) { embedder_data_ = data; }
- void* GetData() { return embedder_data_; }
+ bool IsDead() { return has_fatal_error_; }
+ void SignalFatalError() { has_fatal_error_ = true; }
- LookupResult* top_lookup_result() {
- return thread_local_top_.top_lookup_result_;
- }
- void SetTopLookupResult(LookupResult* top) {
- thread_local_top_.top_lookup_result_ = top;
- }
+ bool use_crankshaft() const;
- bool context_exit_happened() {
- return context_exit_happened_;
- }
- void set_context_exit_happened(bool context_exit_happened) {
- context_exit_happened_ = context_exit_happened;
- }
+ bool initialized_from_snapshot() { return initialized_from_snapshot_; }
double time_millis_since_init() {
- return OS::TimeCurrentMillis() - time_millis_at_init_;
+ return base::OS::TimeCurrentMillis() - time_millis_at_init_;
}
DateCache* date_cache() {
@@ -1032,12 +1027,106 @@
date_cache_ = date_cache;
}
+ Map* get_initial_js_array_map(ElementsKind kind);
+
+ bool IsFastArrayConstructorPrototypeChainIntact();
+
+ CallInterfaceDescriptorData* call_descriptor_data(int index);
+
+ void IterateDeferredHandles(ObjectVisitor* visitor);
+ void LinkDeferredHandles(DeferredHandles* deferred_handles);
+ void UnlinkDeferredHandles(DeferredHandles* deferred_handles);
+
+#ifdef DEBUG
+ bool IsDeferredHandle(Object** location);
+#endif // DEBUG
+
+ bool concurrent_recompilation_enabled() {
+ // Thread is only available with flag enabled.
+ DCHECK(optimizing_compiler_thread_ == NULL ||
+ FLAG_concurrent_recompilation);
+ return optimizing_compiler_thread_ != NULL;
+ }
+
+ bool concurrent_osr_enabled() const {
+ // Thread is only available with flag enabled.
+ DCHECK(optimizing_compiler_thread_ == NULL ||
+ FLAG_concurrent_recompilation);
+ return optimizing_compiler_thread_ != NULL && FLAG_concurrent_osr;
+ }
+
+ OptimizingCompilerThread* optimizing_compiler_thread() {
+ return optimizing_compiler_thread_;
+ }
+
+ int num_sweeper_threads() const {
+ return num_sweeper_threads_;
+ }
+
+ SweeperThread** sweeper_threads() {
+ return sweeper_thread_;
+ }
+
+ int id() const { return static_cast<int>(id_); }
+
+ HStatistics* GetHStatistics();
+ HStatistics* GetTStatistics();
+ HTracer* GetHTracer();
+ CodeTracer* GetCodeTracer();
+
+ FunctionEntryHook function_entry_hook() { return function_entry_hook_; }
+ void set_function_entry_hook(FunctionEntryHook function_entry_hook) {
+ function_entry_hook_ = function_entry_hook;
+ }
+
+ void* stress_deopt_count_address() { return &stress_deopt_count_; }
+
+ inline base::RandomNumberGenerator* random_number_generator();
+
+ // Given an address occupied by a live code object, return that object.
+ Object* FindCodeObject(Address a);
+
+ int NextOptimizationId() {
+ int id = next_optimization_id_++;
+ if (!Smi::IsValid(next_optimization_id_)) {
+ next_optimization_id_ = 0;
+ }
+ return id;
+ }
+
+ // Get (and lazily initialize) the registry for per-isolate symbols.
+ Handle<JSObject> GetSymbolRegistry();
+
+ void AddCallCompletedCallback(CallCompletedCallback callback);
+ void RemoveCallCompletedCallback(CallCompletedCallback callback);
+ void FireCallCompletedCallback();
+
+ void EnqueueMicrotask(Handle<Object> microtask);
+ void RunMicrotasks();
+
+ void SetUseCounterCallback(v8::Isolate::UseCounterCallback callback);
+ void CountUsage(v8::Isolate::UseCounterFeature feature);
+
+ static Isolate* NewForTesting() { return new Isolate(); }
+
private:
Isolate();
friend struct GlobalState;
friend struct InitializeGlobalState;
+ enum State {
+ UNINITIALIZED, // Some components may not have been allocated.
+ INITIALIZED // All components are fully initialized.
+ };
+
+ // These fields are accessed through the API, offsets must be kept in sync
+ // with v8::internal::Internals (in include/v8.h) constants. This is also
+ // verified in Isolate::Init() using runtime checks.
+ void* embedder_data_[Internals::kNumIsolateDataSlots];
+ Heap heap_;
+ State state_; // Will be padded to kApiPointerSize.
+
// The per-process lock should be acquired before the ThreadDataTable is
// modified.
class ThreadDataTable {
@@ -1047,7 +1136,6 @@
PerIsolateThreadData* Lookup(Isolate* isolate, ThreadId thread_id);
void Insert(PerIsolateThreadData* data);
- void Remove(Isolate* isolate, ThreadId thread_id);
void Remove(PerIsolateThreadData* data);
void RemoveAllThreads(Isolate* isolate);
@@ -1080,41 +1168,25 @@
DISALLOW_COPY_AND_ASSIGN(EntryStackItem);
};
- // This mutex protects highest_thread_id_, thread_data_table_ and
- // default_isolate_.
- static Mutex* process_wide_mutex_;
+ static base::LazyMutex thread_data_table_mutex_;
- static Thread::LocalStorageKey per_isolate_thread_data_key_;
- static Thread::LocalStorageKey isolate_key_;
- static Thread::LocalStorageKey thread_id_key_;
- static Isolate* default_isolate_;
+ static base::Thread::LocalStorageKey per_isolate_thread_data_key_;
+ static base::Thread::LocalStorageKey isolate_key_;
+ static base::Thread::LocalStorageKey thread_id_key_;
static ThreadDataTable* thread_data_table_;
+ // A global counter for all generated Isolates, might overflow.
+ static base::Atomic32 isolate_counter_;
+
void Deinit();
static void SetIsolateThreadLocals(Isolate* isolate,
PerIsolateThreadData* data);
- enum State {
- UNINITIALIZED, // Some components may not have been allocated.
- INITIALIZED // All components are fully initialized.
- };
-
- State state_;
- EntryStackItem* entry_stack_;
-
- // Allocate and insert PerIsolateThreadData into the ThreadDataTable
- // (regardless of whether such data already exists).
- PerIsolateThreadData* AllocatePerIsolateThreadData(ThreadId thread_id);
-
// Find the PerThread for this particular (isolate, thread) combination.
// If one does not yet exist, allocate a new one.
PerIsolateThreadData* FindOrAllocatePerThreadDataForThisThread();
- // PreInits and returns a default isolate. Needed when a new thread tries
- // to create a Locker for the first time (the lock itself is in the isolate).
- static Isolate* GetDefaultIsolateForLocking();
-
// Initializes the current thread to run this Isolate.
// Not thread-safe. Multiple threads should not Enter/Exit the same isolate
// at the same time, this should be prevented using external locking.
@@ -1126,11 +1198,8 @@
// at the same time, this should be prevented using external locking.
void Exit();
- void PreallocatedMemoryThreadStart();
- void PreallocatedMemoryThreadStop();
void InitializeThreadLocal();
- void PrintStackTrace(FILE* out, ThreadLocalTop* thread);
void MarkCompactPrologue(bool is_compacting,
ThreadLocalTop* archived_thread_data);
void MarkCompactEpilogue(bool is_compacting,
@@ -1138,101 +1207,90 @@
void FillCache();
- void PropagatePendingExceptionToExternalTryCatch();
-
- void InitializeDebugger();
+ // Propagate pending exception message to the v8::TryCatch.
+ // If there is no external try-catch or message was successfully propagated,
+ // then return true.
+ bool PropagatePendingExceptionToExternalTryCatch();
// Traverse prototype chain to find out whether the object is derived from
// the Error object.
bool IsErrorObject(Handle<Object> obj);
+ base::Atomic32 id_;
+ EntryStackItem* entry_stack_;
int stack_trace_nesting_level_;
StringStream* incomplete_message_;
- // The preallocated memory thread singleton.
- PreallocatedMemoryThread* preallocated_memory_thread_;
Address isolate_addresses_[kIsolateAddressCount + 1]; // NOLINT
- NoAllocationStringAllocator* preallocated_message_space_;
-
Bootstrapper* bootstrapper_;
RuntimeProfiler* runtime_profiler_;
CompilationCache* compilation_cache_;
Counters* counters_;
CodeRange* code_range_;
- Mutex* break_access_;
- Atomic32 debugger_initialized_;
- Mutex* debugger_access_;
- Heap heap_;
+ base::RecursiveMutex break_access_;
+ base::Atomic32 debugger_initialized_;
Logger* logger_;
StackGuard stack_guard_;
StatsTable* stats_table_;
StubCache* stub_cache_;
+ CodeAgingHelper* code_aging_helper_;
DeoptimizerData* deoptimizer_data_;
+ MaterializedObjectStore* materialized_object_store_;
ThreadLocalTop thread_local_top_;
bool capture_stack_trace_for_uncaught_exceptions_;
int stack_trace_for_uncaught_exceptions_frame_limit_;
StackTrace::StackTraceOptions stack_trace_for_uncaught_exceptions_options_;
- TranscendentalCache* transcendental_cache_;
MemoryAllocator* memory_allocator_;
KeyedLookupCache* keyed_lookup_cache_;
ContextSlotCache* context_slot_cache_;
DescriptorLookupCache* descriptor_lookup_cache_;
- v8::ImplementationUtilities::HandleScopeData handle_scope_data_;
+ HandleScopeData handle_scope_data_;
HandleScopeImplementer* handle_scope_implementer_;
UnicodeCache* unicode_cache_;
- Zone zone_;
- PreallocatedStorage in_use_list_;
- PreallocatedStorage free_list_;
- bool preallocated_storage_preallocated_;
+ Zone runtime_zone_;
InnerPointerToCodeCache* inner_pointer_to_code_cache_;
- StringInputBuffer* write_input_buffer_;
+ ConsStringIteratorOp* write_iterator_;
GlobalHandles* global_handles_;
- ContextSwitcher* context_switcher_;
+ EternalHandles* eternal_handles_;
ThreadManager* thread_manager_;
RuntimeState runtime_state_;
- bool fp_stubs_generated_;
- StaticResource<SafeStringInputBuffer> compiler_safe_string_input_buffer_;
Builtins builtins_;
bool has_installed_extensions_;
StringTracker* string_tracker_;
unibrow::Mapping<unibrow::Ecma262UnCanonicalize> jsregexp_uncanonicalize_;
unibrow::Mapping<unibrow::CanonicalizationRange> jsregexp_canonrange_;
- StringInputBuffer objects_string_compare_buffer_a_;
- StringInputBuffer objects_string_compare_buffer_b_;
- StaticResource<StringInputBuffer> objects_string_input_buffer_;
+ ConsStringIteratorOp objects_string_compare_iterator_a_;
+ ConsStringIteratorOp objects_string_compare_iterator_b_;
+ StaticResource<ConsStringIteratorOp> objects_string_iterator_;
unibrow::Mapping<unibrow::Ecma262Canonicalize>
regexp_macro_assembler_canonicalize_;
RegExpStack* regexp_stack_;
-
DateCache* date_cache_;
-
unibrow::Mapping<unibrow::Ecma262Canonicalize> interp_canonicalize_mapping_;
- void* embedder_data_;
+ CallInterfaceDescriptorData* call_descriptor_data_;
+ base::RandomNumberGenerator* random_number_generator_;
- // The garbage collector should be a little more aggressive when it knows
- // that a context was recently exited.
- bool context_exit_happened_;
+ // Whether the isolate has been created for snapshotting.
+ bool serializer_enabled_;
+
+ // True if fatal error has been signaled for this isolate.
+ bool has_fatal_error_;
+
+ // True if this isolate was initialized from a snapshot.
+ bool initialized_from_snapshot_;
// Time stamp at initialization.
double time_millis_at_init_;
-#if defined(V8_TARGET_ARCH_ARM) && !defined(__arm__) || \
- defined(V8_TARGET_ARCH_MIPS) && !defined(__mips__)
- bool simulator_initialized_;
- HashMap* simulator_i_cache_;
- Redirection* simulator_redirection_;
-#endif
-
#ifdef DEBUG
// A static array of histogram info for each type.
HistogramInfo heap_histograms_[LAST_TYPE + 1];
JSObject::SpillInformation js_spill_information_;
- int code_kind_statistics_[Code::NUMBER_OF_KINDS];
#endif
-#ifdef ENABLE_DEBUGGER_SUPPORT
- Debugger* debugger_;
Debug* debug_;
-#endif
+ CpuProfiler* cpu_profiler_;
+ HeapProfiler* heap_profiler_;
+ FunctionEntryHook function_entry_hook_;
#define GLOBAL_BACKING_STORE(type, name, initialvalue) \
type name##_;
@@ -1255,13 +1313,32 @@
#undef ISOLATE_FIELD_OFFSET
#endif
+ DeferredHandles* deferred_handles_head_;
+ OptimizingCompilerThread* optimizing_compiler_thread_;
+ SweeperThread** sweeper_thread_;
+ int num_sweeper_threads_;
+
+ // Counts deopt points if deopt_every_n_times is enabled.
+ unsigned int stress_deopt_count_;
+
+ int next_optimization_id_;
+
+ // List of callbacks when a Call completes.
+ List<CallCompletedCallback> call_completed_callbacks_;
+
+ v8::Isolate::UseCounterCallback use_counter_callback_;
+
friend class ExecutionAccess;
+ friend class HandleScopeImplementer;
friend class IsolateInitializer;
+ friend class OptimizingCompilerThread;
+ friend class SweeperThread;
friend class ThreadManager;
friend class Simulator;
friend class StackGuard;
friend class ThreadId;
friend class TestMemoryAllocatorScope;
+ friend class TestCodeRangeScope;
friend class v8::Isolate;
friend class v8::Locker;
friend class v8::Unlocker;
@@ -1270,6 +1347,26 @@
};
+#undef FIELD_ACCESSOR
+#undef THREAD_LOCAL_TOP_ACCESSOR
+
+
+class PromiseOnStack {
+ public:
+ PromiseOnStack(StackHandler* handler, Handle<JSObject> promise,
+ PromiseOnStack* prev)
+ : handler_(handler), promise_(promise), prev_(prev) {}
+ StackHandler* handler() { return handler_; }
+ Handle<JSObject> promise() { return promise_; }
+ PromiseOnStack* prev() { return prev_; }
+
+ private:
+ StackHandler* handler_;
+ Handle<JSObject> promise_;
+ PromiseOnStack* prev_;
+};
+
+
// If the GCC version is 4.1.x or 4.2.x an additional field is added to the
// class as a work around for a bug in the generated code found with these
// versions of GCC. See V8 issue 122 for details.
@@ -1278,15 +1375,8 @@
inline explicit SaveContext(Isolate* isolate);
~SaveContext() {
- if (context_.is_null()) {
- Isolate* isolate = Isolate::Current();
- isolate->set_context(NULL);
- isolate->set_save_context(prev_);
- } else {
- Isolate* isolate = context_->GetIsolate();
- isolate->set_context(*context_);
- isolate->set_save_context(prev_);
- }
+ isolate_->set_context(context_.is_null() ? NULL : *context_);
+ isolate_->set_save_context(prev_);
}
Handle<Context> context() { return context_; }
@@ -1298,10 +1388,8 @@
}
private:
+ Isolate* isolate_;
Handle<Context> context_;
-#if __GNUC_VERSION__ >= 40100 && __GNUC_VERSION__ < 40300
- Handle<Context> dummy_;
-#endif
SaveContext* prev_;
Address c_entry_fp_;
};
@@ -1310,21 +1398,19 @@
class AssertNoContextChange BASE_EMBEDDED {
#ifdef DEBUG
public:
- AssertNoContextChange() :
- scope_(Isolate::Current()),
- context_(Isolate::Current()->context(), Isolate::Current()) {
- }
-
+ explicit AssertNoContextChange(Isolate* isolate)
+ : isolate_(isolate),
+ context_(isolate->context(), isolate) { }
~AssertNoContextChange() {
- ASSERT(Isolate::Current()->context() == *context_);
+ DCHECK(isolate_->context() == *context_);
}
private:
- HandleScope scope_;
+ Isolate* isolate_;
Handle<Context> context_;
#else
public:
- AssertNoContextChange() { }
+ explicit AssertNoContextChange(Isolate* isolate) { }
#endif
};
@@ -1336,11 +1422,11 @@
}
~ExecutionAccess() { Unlock(isolate_); }
- static void Lock(Isolate* isolate) { isolate->break_access_->Lock(); }
- static void Unlock(Isolate* isolate) { isolate->break_access_->Unlock(); }
+ static void Lock(Isolate* isolate) { isolate->break_access()->Lock(); }
+ static void Unlock(Isolate* isolate) { isolate->break_access()->Unlock(); }
static bool TryLock(Isolate* isolate) {
- return isolate->break_access_->TryLock();
+ return isolate->break_access()->TryLock();
}
private:
@@ -1348,20 +1434,20 @@
};
-// Support for checking for stack-overflows in C++ code.
+// Support for checking for stack-overflows.
class StackLimitCheck BASE_EMBEDDED {
public:
explicit StackLimitCheck(Isolate* isolate) : isolate_(isolate) { }
- bool HasOverflowed() const {
+ // Use this to check for stack-overflows in C++ code.
+ inline bool HasOverflowed() const {
StackGuard* stack_guard = isolate_->stack_guard();
- // Stack has overflowed in C++ code only if stack pointer exceeds the C++
- // stack guard and the limits are not set to interrupt values.
- // TODO(214): Stack overflows are ignored if a interrupt is pending. This
- // code should probably always use the initial C++ limit.
- return (reinterpret_cast<uintptr_t>(this) < stack_guard->climit()) &&
- stack_guard->IsStackOverflow();
+ return GetCurrentStackPosition() < stack_guard->real_climit();
}
+
+ // Use this to check for stack-overflow when entering runtime from JS code.
+ bool JsHasOverflowed() const;
+
private:
Isolate* isolate_;
};
@@ -1373,42 +1459,99 @@
// account.
class PostponeInterruptsScope BASE_EMBEDDED {
public:
- explicit PostponeInterruptsScope(Isolate* isolate)
- : stack_guard_(isolate->stack_guard()) {
- stack_guard_->thread_local_.postpone_interrupts_nesting_++;
- stack_guard_->DisableInterrupts();
+ PostponeInterruptsScope(Isolate* isolate,
+ int intercept_mask = StackGuard::ALL_INTERRUPTS)
+ : stack_guard_(isolate->stack_guard()),
+ intercept_mask_(intercept_mask),
+ intercepted_flags_(0) {
+ stack_guard_->PushPostponeInterruptsScope(this);
}
~PostponeInterruptsScope() {
- if (--stack_guard_->thread_local_.postpone_interrupts_nesting_ == 0) {
- stack_guard_->EnableInterrupts();
- }
+ stack_guard_->PopPostponeInterruptsScope();
}
+
+ // Find the bottom-most scope that intercepts this interrupt.
+ // Return whether the interrupt has been intercepted.
+ bool Intercept(StackGuard::InterruptFlag flag);
+
private:
StackGuard* stack_guard_;
+ int intercept_mask_;
+ int intercepted_flags_;
+ PostponeInterruptsScope* prev_;
+
+ friend class StackGuard;
};
-// Temporary macros for accessing current isolate and its subobjects.
-// They provide better readability, especially when used a lot in the code.
-#define HEAP (v8::internal::Isolate::Current()->heap())
-#define FACTORY (v8::internal::Isolate::Current()->factory())
-#define ISOLATE (v8::internal::Isolate::Current())
-#define ZONE (v8::internal::Isolate::Current()->zone())
-#define LOGGER (v8::internal::Isolate::Current()->logger())
+class CodeTracer FINAL : public Malloced {
+ public:
+ explicit CodeTracer(int isolate_id)
+ : file_(NULL),
+ scope_depth_(0) {
+ if (!ShouldRedirect()) {
+ file_ = stdout;
+ return;
+ }
+ if (FLAG_redirect_code_traces_to == NULL) {
+ SNPrintF(filename_,
+ "code-%d-%d.asm",
+ base::OS::GetCurrentProcessId(),
+ isolate_id);
+ } else {
+ StrNCpy(filename_, FLAG_redirect_code_traces_to, filename_.length());
+ }
-// Tells whether the global context is marked with out of memory.
-inline bool Context::has_out_of_memory() {
- return global_context()->out_of_memory()->IsTrue();
-}
+ WriteChars(filename_.start(), "", 0, false);
+ }
+ class Scope {
+ public:
+ explicit Scope(CodeTracer* tracer) : tracer_(tracer) { tracer->OpenFile(); }
+ ~Scope() { tracer_->CloseFile(); }
-// Mark the global context with out of memory.
-inline void Context::mark_out_of_memory() {
- global_context()->set_out_of_memory(HEAP->true_value());
-}
+ FILE* file() const { return tracer_->file(); }
+ private:
+ CodeTracer* tracer_;
+ };
+
+ void OpenFile() {
+ if (!ShouldRedirect()) {
+ return;
+ }
+
+ if (file_ == NULL) {
+ file_ = base::OS::FOpen(filename_.start(), "a");
+ }
+
+ scope_depth_++;
+ }
+
+ void CloseFile() {
+ if (!ShouldRedirect()) {
+ return;
+ }
+
+ if (--scope_depth_ == 0) {
+ fclose(file_);
+ file_ = NULL;
+ }
+ }
+
+ FILE* file() const { return file_; }
+
+ private:
+ static bool ShouldRedirect() {
+ return FLAG_redirect_code_traces;
+ }
+
+ EmbeddedVector<char, 128> filename_;
+ FILE* file_;
+ int scope_depth_;
+};
} } // namespace v8::internal