Upgrade to V8 3.4
Merge 3.4.14.35
Simple merge required updates to makefiles only.
Bug: 568872
Change-Id: I403a38452c547e06fcfa951c12eca12a1bc40978
diff --git a/src/SConscript b/src/SConscript
index fe21d02..b45a567 100755
--- a/src/SConscript
+++ b/src/SConscript
@@ -85,7 +85,6 @@
inspector.cc
interpreter-irregexp.cc
isolate.cc
- json-parser.cc
jsregexp.cc
lithium-allocator.cc
lithium.cc
@@ -127,7 +126,9 @@
utils.cc
v8-counters.cc
v8.cc
+ v8conversions.cc
v8threads.cc
+ v8utils.cc
variables.cc
version.cc
zone.cc
@@ -237,11 +238,19 @@
scanner-base.cc
token.cc
unicode.cc
+ utils.cc
""")
}
-D8_FILES = {
+D8_LIGHT_FILES = {
+ 'all': [
+ 'd8.cc'
+ ]
+}
+
+
+D8_FULL_FILES = {
'all': [
'd8.cc', 'd8-debug.cc'
],
@@ -313,26 +322,39 @@
env['BUILDERS']['JS2C'] = Builder(action=js2c.JS2C)
env['BUILDERS']['Snapshot'] = Builder(action='$SOURCE $TARGET --logfile "$LOGFILE" --log-snapshot-positions')
+ def BuildJS2CEnv(type):
+ js2c_env = { 'TYPE': type, 'COMPRESSION': 'off' }
+ if 'COMPRESS_STARTUP_DATA_BZ2' in env['CPPDEFINES']:
+ js2c_env['COMPRESSION'] = 'bz2'
+ return js2c_env
+
# Build the standard platform-independent source files.
source_files = context.GetRelevantSources(SOURCES)
-
- d8_files = context.GetRelevantSources(D8_FILES)
- d8_js = env.JS2C('d8-js.cc', 'd8.js', TYPE='D8')
+ d8_js = env.JS2C('d8-js.cc', 'd8.js', **{'TYPE': 'D8', 'COMPRESSION': 'off'})
d8_js_obj = context.ConfigureObject(env, d8_js, CPPPATH=['.'])
- d8_objs = [context.ConfigureObject(env, [d8_files]), d8_js_obj]
+ if context.options['library'] == 'shared':
+ d8_files = context.GetRelevantSources(D8_LIGHT_FILES)
+ d8_objs = []
+ else:
+ d8_files = context.GetRelevantSources(D8_FULL_FILES)
+ d8_objs = [d8_js_obj]
+ d8_objs.append(context.ConfigureObject(env, [d8_files]))
# Combine the JavaScript library files into a single C++ file and
# compile it.
library_files = [s for s in LIBRARY_FILES]
library_files.append('macros.py')
- libraries_src = env.JS2C(['libraries.cc'], library_files, TYPE='CORE')
+ libraries_src = env.JS2C(
+ ['libraries.cc'], library_files, **BuildJS2CEnv('CORE'))
libraries_obj = context.ConfigureObject(env, libraries_src, CPPPATH=['.'])
# Combine the experimental JavaScript library files into a C++ file
# and compile it.
experimental_library_files = [ s for s in EXPERIMENTAL_LIBRARY_FILES ]
experimental_library_files.append('macros.py')
- experimental_libraries_src = env.JS2C(['experimental-libraries.cc'], experimental_library_files, TYPE='EXPERIMENTAL')
+ experimental_libraries_src = env.JS2C(['experimental-libraries.cc'],
+ experimental_library_files,
+ **BuildJS2CEnv('EXPERIMENTAL'))
experimental_libraries_obj = context.ConfigureObject(env, experimental_libraries_src, CPPPATH=['.'])
source_objs = context.ConfigureObject(env, source_files)
diff --git a/src/accessors.cc b/src/accessors.cc
index 255e3dd..e7d6aa0 100644
--- a/src/accessors.cc
+++ b/src/accessors.cc
@@ -688,7 +688,6 @@
index_(0) {
GetFunctions();
}
-
JSFunction* next() {
if (functions_.length() == 0) return NULL;
JSFunction* next_function = functions_[index_];
diff --git a/src/api.cc b/src/api.cc
index 5980854..b0e9775 100644
--- a/src/api.cc
+++ b/src/api.cc
@@ -38,6 +38,7 @@
#include "global-handles.h"
#include "heap-profiler.h"
#include "messages.h"
+#include "natives.h"
#include "parser.h"
#include "platform.h"
#include "profile-generator-inl.h"
@@ -53,16 +54,11 @@
#define LOG_API(isolate, expr) LOG(isolate, ApiEntryCall(expr))
-#ifdef ENABLE_VMSTATE_TRACKING
#define ENTER_V8(isolate) \
ASSERT((isolate)->IsInitialized()); \
i::VMState __state__((isolate), i::OTHER)
#define LEAVE_V8(isolate) \
i::VMState __state__((isolate), i::EXTERNAL)
-#else
-#define ENTER_V8(isolate) ((void) 0)
-#define LEAVE_V8(isolate) ((void) 0)
-#endif
namespace v8 {
@@ -97,13 +93,11 @@
} \
} while (false)
-// TODO(isolates): Add a parameter to this macro for an isolate.
-#define API_ENTRY_CHECK(msg) \
+#define API_ENTRY_CHECK(isolate, msg) \
do { \
if (v8::Locker::IsActive()) { \
- ApiCheck(i::Isolate::Current()->thread_manager()-> \
- IsLockedByCurrentThread(), \
+ ApiCheck(isolate->thread_manager()->IsLockedByCurrentThread(), \
msg, \
"Entering the V8 API without proper locking in place"); \
} \
@@ -115,9 +109,7 @@
static void DefaultFatalErrorHandler(const char* location,
const char* message) {
-#ifdef ENABLE_VMSTATE_TRACKING
i::VMState __state__(i::Isolate::Current(), i::OTHER);
-#endif
API_Fatal(location, message);
}
@@ -176,8 +168,8 @@
heap_stats.pending_global_handle_count = &pending_global_handle_count;
int near_death_global_handle_count;
heap_stats.near_death_global_handle_count = &near_death_global_handle_count;
- int destroyed_global_handle_count;
- heap_stats.destroyed_global_handle_count = &destroyed_global_handle_count;
+ int free_global_handle_count;
+ heap_stats.free_global_handle_count = &free_global_handle_count;
intptr_t memory_allocator_size;
heap_stats.memory_allocator_size = &memory_allocator_size;
intptr_t memory_allocator_capacity;
@@ -311,6 +303,46 @@
}
+StartupDataDecompressor::StartupDataDecompressor()
+ : raw_data(i::NewArray<char*>(V8::GetCompressedStartupDataCount())) {
+ for (int i = 0; i < V8::GetCompressedStartupDataCount(); ++i) {
+ raw_data[i] = NULL;
+ }
+}
+
+
+StartupDataDecompressor::~StartupDataDecompressor() {
+ for (int i = 0; i < V8::GetCompressedStartupDataCount(); ++i) {
+ i::DeleteArray(raw_data[i]);
+ }
+ i::DeleteArray(raw_data);
+}
+
+
+int StartupDataDecompressor::Decompress() {
+ int compressed_data_count = V8::GetCompressedStartupDataCount();
+ StartupData* compressed_data =
+ i::NewArray<StartupData>(compressed_data_count);
+ V8::GetCompressedStartupData(compressed_data);
+ for (int i = 0; i < compressed_data_count; ++i) {
+ char* decompressed = raw_data[i] =
+ i::NewArray<char>(compressed_data[i].raw_size);
+ if (compressed_data[i].compressed_size != 0) {
+ int result = DecompressData(decompressed,
+ &compressed_data[i].raw_size,
+ compressed_data[i].data,
+ compressed_data[i].compressed_size);
+ if (result != 0) return result;
+ } else {
+ ASSERT_EQ(0, compressed_data[i].raw_size);
+ }
+ compressed_data[i].data = decompressed;
+ }
+ V8::SetDecompressedStartupData(compressed_data);
+ return 0;
+}
+
+
StartupData::CompressionAlgorithm V8::GetCompressedStartupDataAlgorithm() {
#ifdef COMPRESS_STARTUP_DATA_BZ2
return StartupData::kBZip2;
@@ -323,6 +355,8 @@
enum CompressedStartupDataItems {
kSnapshot = 0,
kSnapshotContext,
+ kLibraries,
+ kExperimentalLibraries,
kCompressedStartupDataCount
};
@@ -347,6 +381,21 @@
compressed_data[kSnapshotContext].compressed_size =
i::Snapshot::context_size();
compressed_data[kSnapshotContext].raw_size = i::Snapshot::context_raw_size();
+
+ i::Vector<const i::byte> libraries_source = i::Natives::GetScriptsSource();
+ compressed_data[kLibraries].data =
+ reinterpret_cast<const char*>(libraries_source.start());
+ compressed_data[kLibraries].compressed_size = libraries_source.length();
+ compressed_data[kLibraries].raw_size = i::Natives::GetRawScriptsSize();
+
+ i::Vector<const i::byte> exp_libraries_source =
+ i::ExperimentalNatives::GetScriptsSource();
+ compressed_data[kExperimentalLibraries].data =
+ reinterpret_cast<const char*>(exp_libraries_source.start());
+ compressed_data[kExperimentalLibraries].compressed_size =
+ exp_libraries_source.length();
+ compressed_data[kExperimentalLibraries].raw_size =
+ i::ExperimentalNatives::GetRawScriptsSize();
#endif
}
@@ -362,6 +411,20 @@
i::Snapshot::set_context_raw_data(
reinterpret_cast<const i::byte*>(
decompressed_data[kSnapshotContext].data));
+
+ ASSERT_EQ(i::Natives::GetRawScriptsSize(),
+ decompressed_data[kLibraries].raw_size);
+ i::Vector<const char> libraries_source(
+ decompressed_data[kLibraries].data,
+ decompressed_data[kLibraries].raw_size);
+ i::Natives::SetRawScriptsSource(libraries_source);
+
+ ASSERT_EQ(i::ExperimentalNatives::GetRawScriptsSize(),
+ decompressed_data[kExperimentalLibraries].raw_size);
+ i::Vector<const char> exp_libraries_source(
+ decompressed_data[kExperimentalLibraries].data,
+ decompressed_data[kExperimentalLibraries].raw_size);
+ i::ExperimentalNatives::SetRawScriptsSource(exp_libraries_source);
#endif
}
@@ -573,8 +636,8 @@
HandleScope::HandleScope() {
- API_ENTRY_CHECK("HandleScope::HandleScope");
i::Isolate* isolate = i::Isolate::Current();
+ API_ENTRY_CHECK(isolate, "HandleScope::HandleScope");
v8::ImplementationUtilities::HandleScopeData* current =
isolate->handle_scope_data();
isolate_ = isolate;
@@ -630,12 +693,11 @@
void Context::Enter() {
- // TODO(isolates): Context should have a pointer to isolate.
- i::Isolate* isolate = i::Isolate::Current();
+ i::Handle<i::Context> env = Utils::OpenHandle(this);
+ i::Isolate* isolate = env->GetIsolate();
if (IsDeadCheck(isolate, "v8::Context::Enter()")) return;
ENTER_V8(isolate);
- i::Handle<i::Context> env = Utils::OpenHandle(this);
isolate->handle_scope_implementer()->EnterContext(env);
isolate->handle_scope_implementer()->SaveContext(isolate->context());
@@ -644,7 +706,9 @@
void Context::Exit() {
- // TODO(isolates): Context should have a pointer to isolate.
+ // Exit is essentially a static function and doesn't use the
+ // receiver, so we have to get the current isolate from the thread
+ // local.
i::Isolate* isolate = i::Isolate::Current();
if (!isolate->IsInitialized()) return;
@@ -662,41 +726,31 @@
void Context::SetData(v8::Handle<String> data) {
- // TODO(isolates): Context should have a pointer to isolate.
- i::Isolate* isolate = i::Isolate::Current();
+ i::Handle<i::Context> env = Utils::OpenHandle(this);
+ i::Isolate* isolate = env->GetIsolate();
if (IsDeadCheck(isolate, "v8::Context::SetData()")) return;
- ENTER_V8(isolate);
- {
- i::HandleScope scope(isolate);
- i::Handle<i::Context> env = Utils::OpenHandle(this);
- i::Handle<i::Object> raw_data = Utils::OpenHandle(*data);
- ASSERT(env->IsGlobalContext());
- if (env->IsGlobalContext()) {
- env->set_data(*raw_data);
- }
+ i::Handle<i::Object> raw_data = Utils::OpenHandle(*data);
+ ASSERT(env->IsGlobalContext());
+ if (env->IsGlobalContext()) {
+ env->set_data(*raw_data);
}
}
v8::Local<v8::Value> Context::GetData() {
- // TODO(isolates): Context should have a pointer to isolate.
- i::Isolate* isolate = i::Isolate::Current();
+ i::Handle<i::Context> env = Utils::OpenHandle(this);
+ i::Isolate* isolate = env->GetIsolate();
if (IsDeadCheck(isolate, "v8::Context::GetData()")) {
return v8::Local<Value>();
}
- ENTER_V8(isolate);
i::Object* raw_result = NULL;
- {
- i::HandleScope scope(isolate);
- i::Handle<i::Context> env = Utils::OpenHandle(this);
- ASSERT(env->IsGlobalContext());
- if (env->IsGlobalContext()) {
- raw_result = env->data();
- } else {
- return Local<Value>();
- }
+ ASSERT(env->IsGlobalContext());
+ if (env->IsGlobalContext()) {
+ raw_result = env->data();
+ } else {
+ return Local<Value>();
}
- i::Handle<i::Object> result(raw_result);
+ i::Handle<i::Object> result(raw_result, isolate);
return Utils::ToLocal(result);
}
@@ -823,6 +877,7 @@
i::Handle<i::FunctionTemplateInfo> info) {
info->set_tag(i::Smi::FromInt(Consts::FUNCTION_TEMPLATE));
info->set_flag(0);
+ info->set_prototype_attributes(i::Smi::FromInt(v8::None));
}
@@ -925,6 +980,7 @@
int TypeSwitch::match(v8::Handle<Value> value) {
i::Isolate* isolate = i::Isolate::Current();
LOG_API(isolate, "TypeSwitch::match");
+ USE(isolate);
i::Handle<i::Object> obj = Utils::OpenHandle(*value);
i::Handle<i::TypeSwitchInfo> info = Utils::OpenHandle(this);
i::FixedArray* types = i::FixedArray::cast(info->types());
@@ -1044,6 +1100,17 @@
}
+void FunctionTemplate::SetPrototypeAttributes(int attributes) {
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ if (IsDeadCheck(isolate, "v8::FunctionTemplate::SetPrototypeAttributes()")) {
+ return;
+ }
+ ENTER_V8(isolate);
+ Utils::OpenHandle(this)->set_prototype_attributes(
+ i::Smi::FromInt(attributes));
+}
+
+
void FunctionTemplate::SetNamedInstancePropertyHandler(
NamedPropertyGetter getter,
NamedPropertySetter setter,
@@ -1545,26 +1612,27 @@
v8::TryCatch::TryCatch()
- : next_(i::Isolate::Current()->try_catch_handler_address()),
- exception_(HEAP->the_hole_value()),
+ : isolate_(i::Isolate::Current()),
+ next_(isolate_->try_catch_handler_address()),
+ exception_(isolate_->heap()->the_hole_value()),
message_(i::Smi::FromInt(0)),
is_verbose_(false),
can_continue_(true),
capture_message_(true),
rethrow_(false) {
- i::Isolate::Current()->RegisterTryCatchHandler(this);
+ isolate_->RegisterTryCatchHandler(this);
}
v8::TryCatch::~TryCatch() {
- i::Isolate* isolate = i::Isolate::Current();
+ ASSERT(isolate_ == i::Isolate::Current());
if (rethrow_) {
v8::HandleScope scope;
v8::Local<v8::Value> exc = v8::Local<v8::Value>::New(Exception());
- isolate->UnregisterTryCatchHandler(this);
+ isolate_->UnregisterTryCatchHandler(this);
v8::ThrowException(exc);
} else {
- isolate->UnregisterTryCatchHandler(this);
+ isolate_->UnregisterTryCatchHandler(this);
}
}
@@ -1587,10 +1655,11 @@
v8::Local<Value> v8::TryCatch::Exception() const {
+ ASSERT(isolate_ == i::Isolate::Current());
if (HasCaught()) {
// Check for out of memory exception.
i::Object* exception = reinterpret_cast<i::Object*>(exception_);
- return v8::Utils::ToLocal(i::Handle<i::Object>(exception));
+ return v8::Utils::ToLocal(i::Handle<i::Object>(exception, isolate_));
} else {
return v8::Local<Value>();
}
@@ -1598,15 +1667,17 @@
v8::Local<Value> v8::TryCatch::StackTrace() const {
+ ASSERT(isolate_ == i::Isolate::Current());
if (HasCaught()) {
i::Object* raw_obj = reinterpret_cast<i::Object*>(exception_);
if (!raw_obj->IsJSObject()) return v8::Local<Value>();
- v8::HandleScope scope;
- i::Handle<i::JSObject> obj(i::JSObject::cast(raw_obj));
- i::Handle<i::String> name = FACTORY->LookupAsciiSymbol("stack");
- if (!obj->HasProperty(*name))
- return v8::Local<Value>();
- return scope.Close(v8::Utils::ToLocal(i::GetProperty(obj, name)));
+ i::HandleScope scope(isolate_);
+ i::Handle<i::JSObject> obj(i::JSObject::cast(raw_obj), isolate_);
+ i::Handle<i::String> name = isolate_->factory()->LookupAsciiSymbol("stack");
+ if (!obj->HasProperty(*name)) return v8::Local<Value>();
+ i::Handle<i::Object> value = i::GetProperty(obj, name);
+ if (value.is_null()) return v8::Local<Value>();
+ return v8::Utils::ToLocal(scope.CloseAndEscape(value));
} else {
return v8::Local<Value>();
}
@@ -1614,9 +1685,10 @@
v8::Local<v8::Message> v8::TryCatch::Message() const {
+ ASSERT(isolate_ == i::Isolate::Current());
if (HasCaught() && message_ != i::Smi::FromInt(0)) {
i::Object* message = reinterpret_cast<i::Object*>(message_);
- return v8::Utils::MessageToLocal(i::Handle<i::Object>(message));
+ return v8::Utils::MessageToLocal(i::Handle<i::Object>(message, isolate_));
} else {
return v8::Local<v8::Message>();
}
@@ -1624,7 +1696,8 @@
void v8::TryCatch::Reset() {
- exception_ = HEAP->the_hole_value();
+ ASSERT(isolate_ == i::Isolate::Current());
+ exception_ = isolate_->heap()->the_hole_value();
message_ = i::Smi::FromInt(0);
}
@@ -2094,6 +2167,65 @@
}
+bool Value::IsStringObject() const {
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::Value::IsStringObject()")) return false;
+ i::Handle<i::Object> obj = Utils::OpenHandle(this);
+ return obj->HasSpecificClassOf(isolate->heap()->String_symbol());
+}
+
+
+bool Value::IsNumberObject() const {
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::Value::IsNumberObject()")) return false;
+ i::Handle<i::Object> obj = Utils::OpenHandle(this);
+ return obj->HasSpecificClassOf(isolate->heap()->Number_symbol());
+}
+
+
+static i::Object* LookupBuiltin(i::Isolate* isolate,
+ const char* builtin_name) {
+ i::Handle<i::String> symbol =
+ isolate->factory()->LookupAsciiSymbol(builtin_name);
+ i::Handle<i::JSBuiltinsObject> builtins = isolate->js_builtins_object();
+ return builtins->GetPropertyNoExceptionThrown(*symbol);
+}
+
+
+static bool CheckConstructor(i::Isolate* isolate,
+ i::Handle<i::JSObject> obj,
+ const char* class_name) {
+ return obj->map()->constructor() == LookupBuiltin(isolate, class_name);
+}
+
+
+bool Value::IsNativeError() const {
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::Value::IsNativeError()")) return false;
+ i::Handle<i::Object> obj = Utils::OpenHandle(this);
+ if (obj->IsJSObject()) {
+ i::Handle<i::JSObject> js_obj(i::JSObject::cast(*obj));
+ return CheckConstructor(isolate, js_obj, "$Error") ||
+ CheckConstructor(isolate, js_obj, "$EvalError") ||
+ CheckConstructor(isolate, js_obj, "$RangeError") ||
+ CheckConstructor(isolate, js_obj, "$ReferenceError") ||
+ CheckConstructor(isolate, js_obj, "$SyntaxError") ||
+ CheckConstructor(isolate, js_obj, "$TypeError") ||
+ CheckConstructor(isolate, js_obj, "$URIError");
+ } else {
+ return false;
+ }
+}
+
+
+bool Value::IsBooleanObject() const {
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::Value::IsBooleanObject()")) return false;
+ i::Handle<i::Object> obj = Utils::OpenHandle(this);
+ return obj->HasSpecificClassOf(isolate->heap()->Boolean_symbol());
+}
+
+
bool Value::IsRegExp() const {
if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsRegExp()")) return false;
i::Handle<i::Object> obj = Utils::OpenHandle(this);
@@ -2289,6 +2421,36 @@
}
+void v8::StringObject::CheckCast(v8::Value* that) {
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::StringObject::Cast()")) return;
+ i::Handle<i::Object> obj = Utils::OpenHandle(that);
+ ApiCheck(obj->HasSpecificClassOf(isolate->heap()->String_symbol()),
+ "v8::StringObject::Cast()",
+ "Could not convert to StringObject");
+}
+
+
+void v8::NumberObject::CheckCast(v8::Value* that) {
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::NumberObject::Cast()")) return;
+ i::Handle<i::Object> obj = Utils::OpenHandle(that);
+ ApiCheck(obj->HasSpecificClassOf(isolate->heap()->Number_symbol()),
+ "v8::NumberObject::Cast()",
+ "Could not convert to NumberObject");
+}
+
+
+void v8::BooleanObject::CheckCast(v8::Value* that) {
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::BooleanObject::Cast()")) return;
+ i::Handle<i::Object> obj = Utils::OpenHandle(that);
+ ApiCheck(obj->HasSpecificClassOf(isolate->heap()->Boolean_symbol()),
+ "v8::BooleanObject::Cast()",
+ "Could not convert to BooleanObject");
+}
+
+
void v8::RegExp::CheckCast(v8::Value* that) {
if (IsDeadCheck(i::Isolate::Current(), "v8::RegExp::Cast()")) return;
i::Handle<i::Object> obj = Utils::OpenHandle(that);
@@ -2632,6 +2794,26 @@
}
+PropertyAttribute v8::Object::GetPropertyAttributes(v8::Handle<Value> key) {
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ ON_BAILOUT(isolate, "v8::Object::GetPropertyAttribute()",
+ return static_cast<PropertyAttribute>(NONE));
+ ENTER_V8(isolate);
+ i::HandleScope scope(isolate);
+ i::Handle<i::JSObject> self = Utils::OpenHandle(this);
+ i::Handle<i::Object> key_obj = Utils::OpenHandle(*key);
+ if (!key_obj->IsString()) {
+ EXCEPTION_PREAMBLE(isolate);
+ key_obj = i::Execution::ToString(key_obj, &has_pending_exception);
+ EXCEPTION_BAILOUT_CHECK(isolate, static_cast<PropertyAttribute>(NONE));
+ }
+ i::Handle<i::String> key_string = i::Handle<i::String>::cast(key_obj);
+ PropertyAttributes result = self->GetPropertyAttribute(*key_string);
+ if (result == ABSENT) return static_cast<PropertyAttribute>(NONE);
+ return static_cast<PropertyAttribute>(result);
+}
+
+
Local<Value> v8::Object::GetPrototype() {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ON_BAILOUT(isolate, "v8::Object::GetPrototype()",
@@ -2697,6 +2879,25 @@
}
+Local<Array> v8::Object::GetOwnPropertyNames() {
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ ON_BAILOUT(isolate, "v8::Object::GetOwnPropertyNames()",
+ return Local<v8::Array>());
+ ENTER_V8(isolate);
+ i::HandleScope scope(isolate);
+ i::Handle<i::JSObject> self = Utils::OpenHandle(this);
+ i::Handle<i::FixedArray> value =
+ i::GetKeysInFixedArrayFor(self, i::LOCAL_ONLY);
+ // Because we use caching to speed up enumeration it is important
+ // to never change the result of the basic enumeration function so
+ // we clone the result.
+ i::Handle<i::FixedArray> elms = isolate->factory()->CopyFixedArray(value);
+ i::Handle<i::JSArray> result =
+ isolate->factory()->NewJSArrayWithElements(elms);
+ return Utils::ToLocal(scope.CloseAndEscape(result));
+}
+
+
Local<String> v8::Object::ObjectProtoToString() {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ON_BAILOUT(isolate, "v8::Object::ObjectProtoToString()",
@@ -3392,6 +3593,7 @@
void Function::SetName(v8::Handle<v8::String> name) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ENTER_V8(isolate);
+ USE(isolate);
i::Handle<i::JSFunction> func = Utils::OpenHandle(this);
func->shared()->set_name(*Utils::OpenHandle(*name));
}
@@ -3751,6 +3953,11 @@
}
+void v8::V8::SetEntropySource(EntropySource source) {
+ i::V8::SetEntropySource(source);
+}
+
+
bool v8::V8::Dispose() {
i::Isolate* isolate = i::Isolate::Current();
if (!ApiCheck(isolate != NULL && isolate->IsDefaultIsolate(),
@@ -4262,6 +4469,9 @@
if (isolate->string_tracker()->IsFreshUnusedString(obj)) {
return false;
}
+ if (isolate->heap()->IsInGCPostProcessing()) {
+ return false;
+ }
bool result = obj->MakeExternal(resource);
if (result && !obj->IsSymbol()) {
isolate->heap()->external_string_table()->AddString(*obj);
@@ -4294,6 +4504,9 @@
if (isolate->string_tracker()->IsFreshUnusedString(obj)) {
return false;
}
+ if (isolate->heap()->IsInGCPostProcessing()) {
+ return false;
+ }
bool result = obj->MakeExternal(resource);
if (result && !obj->IsSymbol()) {
isolate->heap()->external_string_table()->AddString(*obj);
@@ -4328,6 +4541,73 @@
}
+Local<v8::Value> v8::NumberObject::New(double value) {
+ i::Isolate* isolate = i::Isolate::Current();
+ EnsureInitializedForIsolate(isolate, "v8::NumberObject::New()");
+ LOG_API(isolate, "NumberObject::New");
+ ENTER_V8(isolate);
+ i::Handle<i::Object> number = isolate->factory()->NewNumber(value);
+ i::Handle<i::Object> obj = isolate->factory()->ToObject(number);
+ return Utils::ToLocal(obj);
+}
+
+
+double v8::NumberObject::NumberValue() const {
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::NumberObject::NumberValue()")) return 0;
+ LOG_API(isolate, "NumberObject::NumberValue");
+ i::Handle<i::Object> obj = Utils::OpenHandle(this);
+ i::Handle<i::JSValue> jsvalue = i::Handle<i::JSValue>::cast(obj);
+ return jsvalue->value()->Number();
+}
+
+
+Local<v8::Value> v8::BooleanObject::New(bool value) {
+ i::Isolate* isolate = i::Isolate::Current();
+ EnsureInitializedForIsolate(isolate, "v8::BooleanObject::New()");
+ LOG_API(isolate, "BooleanObject::New");
+ ENTER_V8(isolate);
+ i::Handle<i::Object> boolean(value ? isolate->heap()->true_value()
+ : isolate->heap()->false_value());
+ i::Handle<i::Object> obj = isolate->factory()->ToObject(boolean);
+ return Utils::ToLocal(obj);
+}
+
+
+bool v8::BooleanObject::BooleanValue() const {
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::BooleanObject::BooleanValue()")) return 0;
+ LOG_API(isolate, "BooleanObject::BooleanValue");
+ i::Handle<i::Object> obj = Utils::OpenHandle(this);
+ i::Handle<i::JSValue> jsvalue = i::Handle<i::JSValue>::cast(obj);
+ return jsvalue->value()->IsTrue();
+}
+
+
+Local<v8::Value> v8::StringObject::New(Handle<String> value) {
+ i::Isolate* isolate = i::Isolate::Current();
+ EnsureInitializedForIsolate(isolate, "v8::StringObject::New()");
+ LOG_API(isolate, "StringObject::New");
+ ENTER_V8(isolate);
+ i::Handle<i::Object> obj =
+ isolate->factory()->ToObject(Utils::OpenHandle(*value));
+ return Utils::ToLocal(obj);
+}
+
+
+Local<v8::String> v8::StringObject::StringValue() const {
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::StringObject::StringValue()")) {
+ return Local<v8::String>();
+ }
+ LOG_API(isolate, "StringObject::StringValue");
+ i::Handle<i::Object> obj = Utils::OpenHandle(this);
+ i::Handle<i::JSValue> jsvalue = i::Handle<i::JSValue>::cast(obj);
+ return Utils::ToLocal(
+ i::Handle<i::String>(i::String::cast(jsvalue->value())));
+}
+
+
Local<v8::Value> v8::Date::New(double time) {
i::Isolate* isolate = i::Isolate::Current();
EnsureInitializedForIsolate(isolate, "v8::Date::New()");
@@ -4726,73 +5006,20 @@
void V8::PauseProfiler() {
-#ifdef ENABLE_LOGGING_AND_PROFILING
- PauseProfilerEx(PROFILER_MODULE_CPU);
-#endif
+ i::Isolate* isolate = i::Isolate::Current();
+ isolate->logger()->PauseProfiler();
}
void V8::ResumeProfiler() {
-#ifdef ENABLE_LOGGING_AND_PROFILING
- ResumeProfilerEx(PROFILER_MODULE_CPU);
-#endif
+ i::Isolate* isolate = i::Isolate::Current();
+ isolate->logger()->ResumeProfiler();
}
bool V8::IsProfilerPaused() {
-#ifdef ENABLE_LOGGING_AND_PROFILING
- return LOGGER->GetActiveProfilerModules() & PROFILER_MODULE_CPU;
-#else
- return true;
-#endif
-}
-
-
-void V8::ResumeProfilerEx(int flags, int tag) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
i::Isolate* isolate = i::Isolate::Current();
- if (flags & PROFILER_MODULE_HEAP_SNAPSHOT) {
- // Snapshot mode: resume modules, perform GC, then pause only
- // those modules which haven't been started prior to making a
- // snapshot.
-
- // Make a GC prior to taking a snapshot.
- isolate->heap()->CollectAllGarbage(false);
- // Reset snapshot flag and CPU module flags.
- flags &= ~(PROFILER_MODULE_HEAP_SNAPSHOT | PROFILER_MODULE_CPU);
- const int current_flags = isolate->logger()->GetActiveProfilerModules();
- isolate->logger()->ResumeProfiler(flags, tag);
- isolate->heap()->CollectAllGarbage(false);
- isolate->logger()->PauseProfiler(~current_flags & flags, tag);
- } else {
- isolate->logger()->ResumeProfiler(flags, tag);
- }
-#endif
-}
-
-
-void V8::PauseProfilerEx(int flags, int tag) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
- LOGGER->PauseProfiler(flags, tag);
-#endif
-}
-
-
-int V8::GetActiveProfilerModules() {
-#ifdef ENABLE_LOGGING_AND_PROFILING
- return LOGGER->GetActiveProfilerModules();
-#else
- return PROFILER_MODULE_NONE;
-#endif
-}
-
-
-int V8::GetLogLines(int from_pos, char* dest_buf, int max_size) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
- ASSERT(max_size >= kMinimumSizeForLogLinesBuffer);
- return LOGGER->GetLogLines(from_pos, dest_buf, max_size);
-#endif
- return 0;
+ return isolate->logger()->IsProfilerPaused();
}
@@ -4806,7 +5033,7 @@
void V8::TerminateExecution(int thread_id) {
i::Isolate* isolate = i::Isolate::Current();
if (!isolate->IsInitialized()) return;
- API_ENTRY_CHECK("V8::TerminateExecution()");
+ API_ENTRY_CHECK(isolate, "V8::TerminateExecution()");
// If the thread_id identifies the current thread just terminate
// execution right away. Otherwise, ask the thread manager to
// terminate the thread with the given id if any.
@@ -4829,9 +5056,10 @@
}
-bool V8::IsExecutionTerminating() {
- i::Isolate* isolate = i::Isolate::Current();
- return IsExecutionTerminatingCheck(isolate);
+bool V8::IsExecutionTerminating(Isolate* isolate) {
+ i::Isolate* i_isolate = isolate != NULL ?
+ reinterpret_cast<i::Isolate*>(isolate) : i::Isolate::Current();
+ return IsExecutionTerminatingCheck(i_isolate);
}
@@ -5255,8 +5483,6 @@
#endif // ENABLE_DEBUGGER_SUPPORT
-#ifdef ENABLE_LOGGING_AND_PROFILING
-
Handle<String> CpuProfileNode::GetFunctionName() const {
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::CpuProfileNode::GetFunctionName");
@@ -5443,6 +5669,7 @@
reinterpret_cast<const i::HeapGraphEdge*>(edge));
}
+
HeapGraphEdge::Type HeapGraphEdge::GetType() const {
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapGraphEdge::GetType");
@@ -5511,19 +5738,10 @@
uint64_t HeapGraphNode::GetId() const {
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapGraphNode::GetId");
- ASSERT(ToInternal(this)->snapshot()->type() != i::HeapSnapshot::kAggregated);
return ToInternal(this)->id();
}
-int HeapGraphNode::GetInstancesCount() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapGraphNode::GetInstancesCount");
- ASSERT(ToInternal(this)->snapshot()->type() == i::HeapSnapshot::kAggregated);
- return static_cast<int>(ToInternal(this)->id());
-}
-
-
int HeapGraphNode::GetSelfSize() const {
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapGraphNode::GetSelfSize");
@@ -5630,6 +5848,21 @@
}
+int HeapSnapshot::GetNodesCount() const {
+ i::Isolate* isolate = i::Isolate::Current();
+ IsDeadCheck(isolate, "v8::HeapSnapshot::GetNodesCount");
+ return ToInternal(this)->entries()->length();
+}
+
+
+const HeapGraphNode* HeapSnapshot::GetNode(int index) const {
+ i::Isolate* isolate = i::Isolate::Current();
+ IsDeadCheck(isolate, "v8::HeapSnapshot::GetNode");
+ return reinterpret_cast<const HeapGraphNode*>(
+ ToInternal(this)->entries()->at(index));
+}
+
+
void HeapSnapshot::Serialize(OutputStream* stream,
HeapSnapshot::SerializationFormat format) const {
i::Isolate* isolate = i::Isolate::Current();
@@ -5681,9 +5914,6 @@
case HeapSnapshot::kFull:
internal_type = i::HeapSnapshot::kFull;
break;
- case HeapSnapshot::kAggregated:
- internal_type = i::HeapSnapshot::kAggregated;
- break;
default:
UNREACHABLE();
}
@@ -5706,7 +5936,6 @@
callback);
}
-#endif // ENABLE_LOGGING_AND_PROFILING
v8::Testing::StressType internal::Testing::stress_type_ =
diff --git a/src/api.h b/src/api.h
index 5846414..8d2e778 100644
--- a/src/api.h
+++ b/src/api.h
@@ -398,7 +398,6 @@
// data.
class HandleScopeImplementer {
public:
-
explicit HandleScopeImplementer(Isolate* isolate)
: isolate_(isolate),
blocks_(0),
diff --git a/src/apinatives.js b/src/apinatives.js
index ca2bbf5..193863f 100644
--- a/src/apinatives.js
+++ b/src/apinatives.js
@@ -73,7 +73,15 @@
if (name) %FunctionSetName(fun, name);
cache[serialNumber] = fun;
var prototype = %GetTemplateField(data, kApiPrototypeTemplateOffset);
- fun.prototype = prototype ? Instantiate(prototype) : {};
+ var attributes = %GetTemplateField(data, kApiPrototypeAttributesOffset);
+ if (attributes != NONE) {
+ %IgnoreAttributesAndSetProperty(
+ fun, "prototype",
+ prototype ? Instantiate(prototype) : {},
+ attributes);
+ } else {
+ fun.prototype = prototype ? Instantiate(prototype) : {};
+ }
%SetProperty(fun.prototype, "constructor", fun, DONT_ENUM);
var parent = %GetTemplateField(data, kApiParentTemplateOffset);
if (parent) {
diff --git a/src/arguments.h b/src/arguments.h
index a080581..72bbe1d 100644
--- a/src/arguments.h
+++ b/src/arguments.h
@@ -63,6 +63,14 @@
return Handle<S>(reinterpret_cast<S**>(value));
}
+ int smi_at(int index) {
+ return Smi::cast((*this)[index])->value();
+ }
+
+ double number_at(int index) {
+ return (*this)[index]->Number();
+ }
+
// Get the total number of arguments including the receiver.
int length() const { return length_; }
diff --git a/src/arm/assembler-arm.cc b/src/arm/assembler-arm.cc
index c7050a7..89df079 100644
--- a/src/arm/assembler-arm.cc
+++ b/src/arm/assembler-arm.cc
@@ -320,13 +320,13 @@
ASSERT(buffer_ != NULL);
pc_ = buffer_;
reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
- num_prinfo_ = 0;
+ num_pending_reloc_info_ = 0;
next_buffer_check_ = 0;
const_pool_blocked_nesting_ = 0;
no_const_pool_before_ = 0;
- last_const_pool_end_ = 0;
+ first_const_pool_use_ = -1;
last_bound_pos_ = 0;
- ast_id_for_reloc_info_ = kNoASTId;
+ ClearRecordedAstId();
}
@@ -346,7 +346,7 @@
void Assembler::GetCode(CodeDesc* desc) {
// Emit constant pool if necessary.
CheckConstPool(true, false);
- ASSERT(num_prinfo_ == 0);
+ ASSERT(num_pending_reloc_info_ == 0);
// Setup code descriptor.
desc->buffer = buffer_;
@@ -873,7 +873,7 @@
emit(instr | rn.code()*B16 | rd.code()*B12);
if (rn.is(pc) || x.rm_.is(pc)) {
// Block constant pool emission for one instruction after reading pc.
- BlockConstPoolBefore(pc_offset() + kInstrSize);
+ BlockConstPoolFor(1);
}
}
@@ -997,7 +997,7 @@
// Block the emission of the constant pool, since the branch instruction must
// be emitted at the pc offset recorded by the label.
- BlockConstPoolBefore(pc_offset() + kInstrSize);
+ BlockConstPoolFor(1);
return target_pos - (pc_offset() + kPcLoadDelta);
}
@@ -1493,15 +1493,17 @@
void Assembler::stop(const char* msg, Condition cond, int32_t code) {
#ifndef __arm__
ASSERT(code >= kDefaultStopCode);
- // The Simulator will handle the stop instruction and get the message address.
- // It expects to find the address just after the svc instruction.
- BlockConstPoolFor(2);
- if (code >= 0) {
- svc(kStopCode + code, cond);
- } else {
- svc(kStopCode + kMaxStopCode, cond);
+ {
+ // The Simulator will handle the stop instruction and get the message
+ // address. It expects to find the address just after the svc instruction.
+ BlockConstPoolScope block_const_pool(this);
+ if (code >= 0) {
+ svc(kStopCode + code, cond);
+ } else {
+ svc(kStopCode + kMaxStopCode, cond);
+ }
+ emit(reinterpret_cast<Instr>(msg));
}
- emit(reinterpret_cast<Instr>(msg));
#else // def __arm__
#ifdef CAN_USE_ARMV5_INSTRUCTIONS
if (cond != al) {
@@ -2406,11 +2408,6 @@
}
-void Assembler::BlockConstPoolFor(int instructions) {
- BlockConstPoolBefore(pc_offset() + instructions * kInstrSize);
-}
-
-
// Debugging.
void Assembler::RecordJSReturn() {
positions_recorder()->WriteRecordedPositions();
@@ -2474,8 +2471,8 @@
// to relocate any emitted relocation entries.
// Relocate pending relocation entries.
- for (int i = 0; i < num_prinfo_; i++) {
- RelocInfo& rinfo = prinfo_[i];
+ for (int i = 0; i < num_pending_reloc_info_; i++) {
+ RelocInfo& rinfo = pending_reloc_info_[i];
ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
rinfo.rmode() != RelocInfo::POSITION);
if (rinfo.rmode() != RelocInfo::JS_RETURN) {
@@ -2489,7 +2486,7 @@
// No relocation info should be pending while using db. db is used
// to write pure data with no pointers and the constant pool should
// be emitted before using db.
- ASSERT(num_prinfo_ == 0);
+ ASSERT(num_pending_reloc_info_ == 0);
CheckBuffer();
*reinterpret_cast<uint8_t*>(pc_) = data;
pc_ += sizeof(uint8_t);
@@ -2500,7 +2497,7 @@
// No relocation info should be pending while using dd. dd is used
// to write pure data with no pointers and the constant pool should
// be emitted before using dd.
- ASSERT(num_prinfo_ == 0);
+ ASSERT(num_pending_reloc_info_ == 0);
CheckBuffer();
*reinterpret_cast<uint32_t*>(pc_) = data;
pc_ += sizeof(uint32_t);
@@ -2517,11 +2514,14 @@
|| RelocInfo::IsPosition(rmode));
// These modes do not need an entry in the constant pool.
} else {
- ASSERT(num_prinfo_ < kMaxNumPRInfo);
- prinfo_[num_prinfo_++] = rinfo;
+ ASSERT(num_pending_reloc_info_ < kMaxNumPendingRelocInfo);
+ if (num_pending_reloc_info_ == 0) {
+ first_const_pool_use_ = pc_offset();
+ }
+ pending_reloc_info_[num_pending_reloc_info_++] = rinfo;
// Make sure the constant pool is not emitted in place of the next
// instruction for which we just recorded relocation info.
- BlockConstPoolBefore(pc_offset() + kInstrSize);
+ BlockConstPoolFor(1);
}
if (rinfo.rmode() != RelocInfo::NONE) {
// Don't record external references unless the heap will be serialized.
@@ -2537,9 +2537,8 @@
}
ASSERT(buffer_space() >= kMaxRelocSize); // too late to grow buffer here
if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
- ASSERT(ast_id_for_reloc_info_ != kNoASTId);
- RelocInfo reloc_info_with_ast_id(pc_, rmode, ast_id_for_reloc_info_);
- ast_id_for_reloc_info_ = kNoASTId;
+ RelocInfo reloc_info_with_ast_id(pc_, rmode, RecordedAstId());
+ ClearRecordedAstId();
reloc_info_writer.Write(&reloc_info_with_ast_id);
} else {
reloc_info_writer.Write(&rinfo);
@@ -2548,111 +2547,112 @@
}
-void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
- // Calculate the offset of the next check. It will be overwritten
- // when a const pool is generated or when const pools are being
- // blocked for a specific range.
- next_buffer_check_ = pc_offset() + kCheckConstInterval;
-
- // There is nothing to do if there are no pending relocation info entries.
- if (num_prinfo_ == 0) return;
-
- // We emit a constant pool at regular intervals of about kDistBetweenPools
- // or when requested by parameter force_emit (e.g. after each function).
- // We prefer not to emit a jump unless the max distance is reached or if we
- // are running low on slots, which can happen if a lot of constants are being
- // emitted (e.g. --debug-code and many static references).
- int dist = pc_offset() - last_const_pool_end_;
- if (!force_emit && dist < kMaxDistBetweenPools &&
- (require_jump || dist < kDistBetweenPools) &&
- // TODO(1236125): Cleanup the "magic" number below. We know that
- // the code generation will test every kCheckConstIntervalInst.
- // Thus we are safe as long as we generate less than 7 constant
- // entries per instruction.
- (num_prinfo_ < (kMaxNumPRInfo - (7 * kCheckConstIntervalInst)))) {
- return;
+void Assembler::BlockConstPoolFor(int instructions) {
+ int pc_limit = pc_offset() + instructions * kInstrSize;
+ if (no_const_pool_before_ < pc_limit) {
+ // If there are some pending entries, the constant pool cannot be blocked
+ // further than first_const_pool_use_ + kMaxDistToPool
+ ASSERT((num_pending_reloc_info_ == 0) ||
+ (pc_limit < (first_const_pool_use_ + kMaxDistToPool)));
+ no_const_pool_before_ = pc_limit;
}
- // If we did not return by now, we need to emit the constant pool soon.
+ if (next_buffer_check_ < no_const_pool_before_) {
+ next_buffer_check_ = no_const_pool_before_;
+ }
+}
- // However, some small sequences of instructions must not be broken up by the
- // insertion of a constant pool; such sequences are protected by setting
- // either const_pool_blocked_nesting_ or no_const_pool_before_, which are
- // both checked here. Also, recursive calls to CheckConstPool are blocked by
- // no_const_pool_before_.
- if (const_pool_blocked_nesting_ > 0 || pc_offset() < no_const_pool_before_) {
- // Emission is currently blocked; make sure we try again as soon as
- // possible.
- if (const_pool_blocked_nesting_ > 0) {
- next_buffer_check_ = pc_offset() + kInstrSize;
- } else {
- next_buffer_check_ = no_const_pool_before_;
- }
+void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
+ // Some short sequence of instruction mustn't be broken up by constant pool
+ // emission, such sequences are protected by calls to BlockConstPoolFor and
+ // BlockConstPoolScope.
+ if (is_const_pool_blocked()) {
// Something is wrong if emission is forced and blocked at the same time.
ASSERT(!force_emit);
return;
}
- int jump_instr = require_jump ? kInstrSize : 0;
+ // There is nothing to do if there are no pending constant pool entries.
+ if (num_pending_reloc_info_ == 0) {
+ // Calculate the offset of the next check.
+ next_buffer_check_ = pc_offset() + kCheckPoolInterval;
+ return;
+ }
+
+ // We emit a constant pool when:
+ // * requested to do so by parameter force_emit (e.g. after each function).
+ // * the distance to the first instruction accessing the constant pool is
+ // kAvgDistToPool or more.
+ // * no jump is required and the distance to the first instruction accessing
+ // the constant pool is at least kMaxDistToPool / 2.
+ ASSERT(first_const_pool_use_ >= 0);
+ int dist = pc_offset() - first_const_pool_use_;
+ if (!force_emit && dist < kAvgDistToPool &&
+ (require_jump || (dist < (kMaxDistToPool / 2)))) {
+ return;
+ }
// Check that the code buffer is large enough before emitting the constant
- // pool and relocation information (include the jump over the pool and the
- // constant pool marker).
- int max_needed_space =
- jump_instr + kInstrSize + num_prinfo_*(kInstrSize + kMaxRelocSize);
- while (buffer_space() <= (max_needed_space + kGap)) GrowBuffer();
+ // pool (include the jump over the pool and the constant pool marker and
+ // the gap to the relocation information).
+ int jump_instr = require_jump ? kInstrSize : 0;
+ int needed_space = jump_instr + kInstrSize +
+ num_pending_reloc_info_ * kInstrSize + kGap;
+ while (buffer_space() <= needed_space) GrowBuffer();
- // Block recursive calls to CheckConstPool.
- BlockConstPoolBefore(pc_offset() + jump_instr + kInstrSize +
- num_prinfo_*kInstrSize);
- // Don't bother to check for the emit calls below.
- next_buffer_check_ = no_const_pool_before_;
+ {
+ // Block recursive calls to CheckConstPool.
+ BlockConstPoolScope block_const_pool(this);
- // Emit jump over constant pool if necessary.
- Label after_pool;
- if (require_jump) b(&after_pool);
-
- RecordComment("[ Constant Pool");
-
- // Put down constant pool marker "Undefined instruction" as specified by
- // A5.6 (ARMv7) Instruction set encoding.
- emit(kConstantPoolMarker | num_prinfo_);
-
- // Emit constant pool entries.
- for (int i = 0; i < num_prinfo_; i++) {
- RelocInfo& rinfo = prinfo_[i];
- ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
- rinfo.rmode() != RelocInfo::POSITION &&
- rinfo.rmode() != RelocInfo::STATEMENT_POSITION);
- Instr instr = instr_at(rinfo.pc());
-
- // Instruction to patch must be a ldr/str [pc, #offset].
- // P and U set, B and W clear, Rn == pc, offset12 still 0.
- ASSERT((instr & (7*B25 | P | U | B | W | 15*B16 | kOff12Mask)) ==
- (2*B25 | P | U | pc.code()*B16));
- int delta = pc_ - rinfo.pc() - 8;
- ASSERT(delta >= -4); // instr could be ldr pc, [pc, #-4] followed by targ32
- if (delta < 0) {
- instr &= ~U;
- delta = -delta;
+ // Emit jump over constant pool if necessary.
+ Label after_pool;
+ if (require_jump) {
+ b(&after_pool);
}
- ASSERT(is_uint12(delta));
- instr_at_put(rinfo.pc(), instr + delta);
- emit(rinfo.data());
- }
- num_prinfo_ = 0;
- last_const_pool_end_ = pc_offset();
- RecordComment("]");
+ RecordComment("[ Constant Pool");
- if (after_pool.is_linked()) {
- bind(&after_pool);
+ // Put down constant pool marker "Undefined instruction" as specified by
+ // A5.6 (ARMv7) Instruction set encoding.
+ emit(kConstantPoolMarker | num_pending_reloc_info_);
+
+ // Emit constant pool entries.
+ for (int i = 0; i < num_pending_reloc_info_; i++) {
+ RelocInfo& rinfo = pending_reloc_info_[i];
+ ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
+ rinfo.rmode() != RelocInfo::POSITION &&
+ rinfo.rmode() != RelocInfo::STATEMENT_POSITION);
+
+ Instr instr = instr_at(rinfo.pc());
+ // Instruction to patch must be 'ldr rd, [pc, #offset]' with offset == 0.
+ ASSERT(IsLdrPcImmediateOffset(instr) &&
+ GetLdrRegisterImmediateOffset(instr) == 0);
+
+ int delta = pc_ - rinfo.pc() - kPcLoadDelta;
+ // 0 is the smallest delta:
+ // ldr rd, [pc, #0]
+ // constant pool marker
+ // data
+ ASSERT(is_uint12(delta));
+
+ instr_at_put(rinfo.pc(), SetLdrRegisterImmediateOffset(instr, delta));
+ emit(rinfo.data());
+ }
+
+ num_pending_reloc_info_ = 0;
+ first_const_pool_use_ = -1;
+
+ RecordComment("]");
+
+ if (after_pool.is_linked()) {
+ bind(&after_pool);
+ }
}
// Since a constant pool was just emitted, move the check offset forward by
// the standard interval.
- next_buffer_check_ = pc_offset() + kCheckConstInterval;
+ next_buffer_check_ = pc_offset() + kCheckPoolInterval;
}
diff --git a/src/arm/assembler-arm.h b/src/arm/assembler-arm.h
index 2ab46b3..97d4226 100644
--- a/src/arm/assembler-arm.h
+++ b/src/arm/assembler-arm.h
@@ -167,13 +167,14 @@
// Double word VFP register.
struct DwVfpRegister {
- // d0 has been excluded from allocation. This is following ia32
- // where xmm0 is excluded. This should be revisited.
- // Currently d0 is used as a scratch register.
- // d1 has also been excluded from allocation to be used as a scratch
- // register as well.
static const int kNumRegisters = 16;
- static const int kNumAllocatableRegisters = 15;
+ // A few double registers are reserved: one as a scratch register and one to
+ // hold 0.0, that does not fit in the immediate field of vmov instructions.
+ // d14: 0.0
+ // d15: scratch register.
+ static const int kNumReservedRegisters = 2;
+ static const int kNumAllocatableRegisters = kNumRegisters -
+ kNumReservedRegisters;
static int ToAllocationIndex(DwVfpRegister reg) {
ASSERT(reg.code() != 0);
@@ -188,6 +189,7 @@
static const char* AllocationIndexToString(int index) {
ASSERT(index >= 0 && index < kNumAllocatableRegisters);
const char* const names[] = {
+ "d0",
"d1",
"d2",
"d3",
@@ -200,9 +202,7 @@
"d10",
"d11",
"d12",
- "d13",
- "d14",
- "d15"
+ "d13"
};
return names[index];
}
@@ -306,6 +306,7 @@
// Aliases for double registers.
const DwVfpRegister kFirstCalleeSavedDoubleReg = d8;
const DwVfpRegister kLastCalleeSavedDoubleReg = d15;
+const DwVfpRegister kDoubleRegZero = d14;
// Coprocessor register
@@ -377,7 +378,6 @@
INLINE(explicit Operand(int32_t immediate,
RelocInfo::Mode rmode = RelocInfo::NONE));
INLINE(explicit Operand(const ExternalReference& f));
- INLINE(explicit Operand(const char* s));
explicit Operand(Handle<Object> handle);
INLINE(explicit Operand(Smi* value));
@@ -455,6 +455,7 @@
Register rn() const { return rn_; }
Register rm() const { return rm_; }
+ AddrMode am() const { return am_; }
bool OffsetIsUint12Encodable() const {
return offset_ >= 0 ? is_uint12(offset_) : is_uint12(-offset_);
@@ -504,6 +505,7 @@
// Enable a specified feature within a scope.
class Scope BASE_EMBEDDED {
#ifdef DEBUG
+
public:
explicit Scope(CpuFeature f) {
unsigned mask = 1u << f;
@@ -523,10 +525,12 @@
isolate_->set_enabled_cpu_features(old_enabled_);
}
}
+
private:
Isolate* isolate_;
unsigned old_enabled_;
#else
+
public:
explicit Scope(CpuFeature f) {}
#endif
@@ -1136,8 +1140,13 @@
void jmp(Label* L) { b(L, al); }
// Check the code size generated from label to here.
- int InstructionsGeneratedSince(Label* l) {
- return (pc_offset() - l->pos()) / kInstrSize;
+ int SizeOfCodeGeneratedSince(Label* label) {
+ return pc_offset() - label->pos();
+ }
+
+ // Check the number of instructions generated from label to here.
+ int InstructionsGeneratedSince(Label* label) {
+ return SizeOfCodeGeneratedSince(label) / kInstrSize;
}
// Check whether an immediate fits an addressing mode 1 instruction.
@@ -1159,10 +1168,6 @@
DISALLOW_IMPLICIT_CONSTRUCTORS(BlockConstPoolScope);
};
- // Postpone the generation of the constant pool for the specified number of
- // instructions.
- void BlockConstPoolFor(int instructions);
-
// Debugging
// Mark address of the ExitJSFrame code.
@@ -1173,7 +1178,17 @@
// Record the AST id of the CallIC being compiled, so that it can be placed
// in the relocation information.
- void RecordAstId(unsigned ast_id) { ast_id_for_reloc_info_ = ast_id; }
+ void SetRecordedAstId(unsigned ast_id) {
+ ASSERT(recorded_ast_id_ == kNoASTId);
+ recorded_ast_id_ = ast_id;
+ }
+
+ unsigned RecordedAstId() {
+ ASSERT(recorded_ast_id_ != kNoASTId);
+ return recorded_ast_id_;
+ }
+
+ void ClearRecordedAstId() { recorded_ast_id_ = kNoASTId; }
// Record a comment relocation entry that can be used by a disassembler.
// Use --code-comments to enable.
@@ -1222,24 +1237,24 @@
static int GetCmpImmediateRawImmediate(Instr instr);
static bool IsNop(Instr instr, int type = NON_MARKING_NOP);
- // Buffer size and constant pool distance are checked together at regular
- // intervals of kBufferCheckInterval emitted bytes
- static const int kBufferCheckInterval = 1*KB/2;
// Constants in pools are accessed via pc relative addressing, which can
// reach +/-4KB thereby defining a maximum distance between the instruction
- // and the accessed constant. We satisfy this constraint by limiting the
- // distance between pools.
- static const int kMaxDistBetweenPools = 4*KB - 2*kBufferCheckInterval;
- static const int kMaxNumPRInfo = kMaxDistBetweenPools/kInstrSize;
+ // and the accessed constant.
+ static const int kMaxDistToPool = 4*KB;
+ static const int kMaxNumPendingRelocInfo = kMaxDistToPool/kInstrSize;
- // Check if is time to emit a constant pool for pending reloc info entries
+ // Postpone the generation of the constant pool for the specified number of
+ // instructions.
+ void BlockConstPoolFor(int instructions);
+
+ // Check if is time to emit a constant pool.
void CheckConstPool(bool force_emit, bool require_jump);
protected:
// Relocation for a type-recording IC has the AST id added to it. This
// member variable is a way to pass the information from the call site to
// the relocation info.
- unsigned ast_id_for_reloc_info_;
+ unsigned recorded_ast_id_;
bool emit_debug_code() const { return emit_debug_code_; }
@@ -1257,18 +1272,37 @@
// Patch branch instruction at pos to branch to given branch target pos
void target_at_put(int pos, int target_pos);
- // Block the emission of the constant pool before pc_offset
- void BlockConstPoolBefore(int pc_offset) {
- if (no_const_pool_before_ < pc_offset) no_const_pool_before_ = pc_offset;
+ // Prevent contant pool emission until EndBlockConstPool is called.
+ // Call to this function can be nested but must be followed by an equal
+ // number of call to EndBlockConstpool.
+ void StartBlockConstPool() {
+ if (const_pool_blocked_nesting_++ == 0) {
+ // Prevent constant pool checks happening by setting the next check to
+ // the biggest possible offset.
+ next_buffer_check_ = kMaxInt;
+ }
}
- void StartBlockConstPool() {
- const_pool_blocked_nesting_++;
- }
+ // Resume constant pool emission. Need to be called as many time as
+ // StartBlockConstPool to have an effect.
void EndBlockConstPool() {
- const_pool_blocked_nesting_--;
+ if (--const_pool_blocked_nesting_ == 0) {
+ // Check the constant pool hasn't been blocked for too long.
+ ASSERT((num_pending_reloc_info_ == 0) ||
+ (pc_offset() < (first_const_pool_use_ + kMaxDistToPool)));
+ // Two cases:
+ // * no_const_pool_before_ >= next_buffer_check_ and the emission is
+ // still blocked
+ // * no_const_pool_before_ < next_buffer_check_ and the next emit will
+ // trigger a check.
+ next_buffer_check_ = no_const_pool_before_;
+ }
}
- bool is_const_pool_blocked() const { return const_pool_blocked_nesting_ > 0; }
+
+ bool is_const_pool_blocked() const {
+ return (const_pool_blocked_nesting_ > 0) ||
+ (pc_offset() < no_const_pool_before_);
+ }
private:
// Code buffer:
@@ -1302,33 +1336,41 @@
// expensive. By default we only check again once a number of instructions
// has been generated. That also means that the sizing of the buffers is not
// an exact science, and that we rely on some slop to not overrun buffers.
- static const int kCheckConstIntervalInst = 32;
- static const int kCheckConstInterval = kCheckConstIntervalInst * kInstrSize;
+ static const int kCheckPoolIntervalInst = 32;
+ static const int kCheckPoolInterval = kCheckPoolIntervalInst * kInstrSize;
- // Pools are emitted after function return and in dead code at (more or less)
- // regular intervals of kDistBetweenPools bytes
- static const int kDistBetweenPools = 1*KB;
+ // Average distance beetween a constant pool and the first instruction
+ // accessing the constant pool. Longer distance should result in less I-cache
+ // pollution.
+ // In practice the distance will be smaller since constant pool emission is
+ // forced after function return and sometimes after unconditional branches.
+ static const int kAvgDistToPool = kMaxDistToPool - kCheckPoolInterval;
// Emission of the constant pool may be blocked in some code sequences.
int const_pool_blocked_nesting_; // Block emission if this is not zero.
int no_const_pool_before_; // Block emission before this pc offset.
- // Keep track of the last emitted pool to guarantee a maximal distance
- int last_const_pool_end_; // pc offset following the last constant pool
+ // Keep track of the first instruction requiring a constant pool entry
+ // since the previous constant pool was emitted.
+ int first_const_pool_use_;
// Relocation info generation
// Each relocation is encoded as a variable size value
static const int kMaxRelocSize = RelocInfoWriter::kMaxSize;
RelocInfoWriter reloc_info_writer;
+
// Relocation info records are also used during code generation as temporary
// containers for constants and code target addresses until they are emitted
// to the constant pool. These pending relocation info records are temporarily
// stored in a separate buffer until a constant pool is emitted.
// If every instruction in a long sequence is accessing the pool, we need one
// pending relocation entry per instruction.
- RelocInfo prinfo_[kMaxNumPRInfo]; // the buffer of pending relocation info
- int num_prinfo_; // number of pending reloc info entries in the buffer
+
+ // the buffer of pending relocation info
+ RelocInfo pending_reloc_info_[kMaxNumPendingRelocInfo];
+ // number of pending reloc info entries in the buffer
+ int num_pending_reloc_info_;
// The bound position, before this we cannot do instruction elimination.
int last_bound_pos_;
diff --git a/src/arm/builtins-arm.cc b/src/arm/builtins-arm.cc
index 794b370..328102b 100644
--- a/src/arm/builtins-arm.cc
+++ b/src/arm/builtins-arm.cc
@@ -619,8 +619,7 @@
Label non_function_call;
// Check that the function is not a smi.
- __ tst(r1, Operand(kSmiTagMask));
- __ b(eq, &non_function_call);
+ __ JumpIfSmi(r1, &non_function_call);
// Check that the function is a JSFunction.
__ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
__ b(ne, &non_function_call);
@@ -675,8 +674,7 @@
// Load the initial map and verify that it is in fact a map.
// r1: constructor function
__ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
- __ tst(r2, Operand(kSmiTagMask));
- __ b(eq, &rt_call);
+ __ JumpIfSmi(r2, &rt_call);
__ CompareObjectType(r2, r3, r4, MAP_TYPE);
__ b(ne, &rt_call);
@@ -946,12 +944,11 @@
// sp[0]: receiver (newly allocated object)
// sp[1]: constructor function
// sp[2]: number of arguments (smi-tagged)
- __ tst(r0, Operand(kSmiTagMask));
- __ b(eq, &use_receiver);
+ __ JumpIfSmi(r0, &use_receiver);
// If the type of the result (stored in its map) is less than
- // FIRST_JS_OBJECT_TYPE, it is not an object in the ECMA sense.
- __ CompareObjectType(r0, r3, r3, FIRST_JS_OBJECT_TYPE);
+ // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
+ __ CompareObjectType(r0, r3, r3, FIRST_SPEC_OBJECT_TYPE);
__ b(ge, &exit);
// Throw away the result of the constructor invocation and use the
@@ -1047,8 +1044,7 @@
// Invoke the code and pass argc as r0.
__ mov(r0, Operand(r3));
if (is_construct) {
- __ Call(masm->isolate()->builtins()->JSConstructCall(),
- RelocInfo::CODE_TARGET);
+ __ Call(masm->isolate()->builtins()->JSConstructCall());
} else {
ParameterCount actual(r0);
__ InvokeFunction(r1, actual, CALL_FUNCTION,
@@ -1236,8 +1232,7 @@
// r0: actual number of arguments
Label non_function;
__ ldr(r1, MemOperand(sp, r0, LSL, kPointerSizeLog2));
- __ tst(r1, Operand(kSmiTagMask));
- __ b(eq, &non_function);
+ __ JumpIfSmi(r1, &non_function);
__ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
__ b(ne, &non_function);
@@ -1257,8 +1252,7 @@
__ b(ne, &shift_arguments);
// Do not transform the receiver for native (Compilerhints already in r3).
- __ tst(r3, Operand(1 << (SharedFunctionInfo::kES5Native +
- kSmiTagSize)));
+ __ tst(r3, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
__ b(ne, &shift_arguments);
// Compute the receiver in non-strict mode.
@@ -1267,8 +1261,7 @@
// r0: actual number of arguments
// r1: function
// r2: first argument
- __ tst(r2, Operand(kSmiTagMask));
- __ b(eq, &convert_to_object);
+ __ JumpIfSmi(r2, &convert_to_object);
__ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
__ cmp(r2, r3);
@@ -1277,9 +1270,8 @@
__ cmp(r2, r3);
__ b(eq, &use_global_receiver);
- STATIC_ASSERT(LAST_JS_OBJECT_TYPE + 1 == LAST_TYPE);
- STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
- __ CompareObjectType(r2, r3, r3, FIRST_JS_OBJECT_TYPE);
+ STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+ __ CompareObjectType(r2, r3, r3, FIRST_SPEC_OBJECT_TYPE);
__ b(ge, &shift_arguments);
__ bind(&convert_to_object);
@@ -1443,13 +1435,11 @@
__ b(ne, &push_receiver);
// Do not transform the receiver for strict mode functions.
- __ tst(r2, Operand(1 << (SharedFunctionInfo::kES5Native +
- kSmiTagSize)));
+ __ tst(r2, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
__ b(ne, &push_receiver);
// Compute the receiver in non-strict mode.
- __ tst(r0, Operand(kSmiTagMask));
- __ b(eq, &call_to_object);
+ __ JumpIfSmi(r0, &call_to_object);
__ LoadRoot(r1, Heap::kNullValueRootIndex);
__ cmp(r0, r1);
__ b(eq, &use_global_receiver);
@@ -1459,9 +1449,8 @@
// Check if the receiver is already a JavaScript object.
// r0: receiver
- STATIC_ASSERT(LAST_JS_OBJECT_TYPE + 1 == LAST_TYPE);
- STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
- __ CompareObjectType(r0, r1, r1, FIRST_JS_OBJECT_TYPE);
+ STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+ __ CompareObjectType(r0, r1, r1, FIRST_SPEC_OBJECT_TYPE);
__ b(ge, &push_receiver);
// Convert the receiver to a regular object.
diff --git a/src/arm/code-stubs-arm.cc b/src/arm/code-stubs-arm.cc
index 5e6c0c3..eaad9f2 100644
--- a/src/arm/code-stubs-arm.cc
+++ b/src/arm/code-stubs-arm.cc
@@ -69,8 +69,7 @@
void ToNumberStub::Generate(MacroAssembler* masm) {
// The ToNumber stub takes one argument in eax.
Label check_heap_number, call_builtin;
- __ tst(r0, Operand(kSmiTagMask));
- __ b(ne, &check_heap_number);
+ __ JumpIfNotSmi(r0, &check_heap_number);
__ Ret();
__ bind(&check_heap_number);
@@ -158,7 +157,7 @@
__ ldr(r3, MemOperand(sp, 0));
// Setup the object header.
- __ LoadRoot(r2, Heap::kContextMapRootIndex);
+ __ LoadRoot(r2, Heap::kFunctionContextMapRootIndex);
__ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
__ mov(r2, Operand(Smi::FromInt(length)));
__ str(r2, FieldMemOperand(r0, FixedArray::kLengthOffset));
@@ -166,11 +165,10 @@
// Setup the fixed slots.
__ mov(r1, Operand(Smi::FromInt(0)));
__ str(r3, MemOperand(r0, Context::SlotOffset(Context::CLOSURE_INDEX)));
- __ str(r0, MemOperand(r0, Context::SlotOffset(Context::FCONTEXT_INDEX)));
- __ str(r1, MemOperand(r0, Context::SlotOffset(Context::PREVIOUS_INDEX)));
+ __ str(cp, MemOperand(r0, Context::SlotOffset(Context::PREVIOUS_INDEX)));
__ str(r1, MemOperand(r0, Context::SlotOffset(Context::EXTENSION_INDEX)));
- // Copy the global object from the surrounding context.
+ // Copy the global object from the previous context.
__ ldr(r1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
__ str(r1, MemOperand(r0, Context::SlotOffset(Context::GLOBAL_INDEX)));
@@ -187,7 +185,7 @@
// Need to collect. Call into runtime system.
__ bind(&gc);
- __ TailCallRuntime(Runtime::kNewContext, 1, 1);
+ __ TailCallRuntime(Runtime::kNewFunctionContext, 1, 1);
}
@@ -306,12 +304,6 @@
}
void Generate(MacroAssembler* masm);
-
- const char* GetName() { return "ConvertToDoubleStub"; }
-
-#ifdef DEBUG
- void Print() { PrintF("ConvertToDoubleStub\n"); }
-#endif
};
@@ -394,11 +386,11 @@
__ mov(scratch1, Operand(r0));
ConvertToDoubleStub stub1(r3, r2, scratch1, scratch2);
__ push(lr);
- __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
+ __ Call(stub1.GetCode());
// Write Smi from r1 to r1 and r0 in double format.
__ mov(scratch1, Operand(r1));
ConvertToDoubleStub stub2(r1, r0, scratch1, scratch2);
- __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
+ __ Call(stub2.GetCode());
__ pop(lr);
}
}
@@ -475,7 +467,7 @@
__ mov(scratch1, Operand(object));
ConvertToDoubleStub stub(dst2, dst1, scratch1, scratch2);
__ push(lr);
- __ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
+ __ Call(stub.GetCode());
__ pop(lr);
}
@@ -931,14 +923,14 @@
// They are both equal and they are not both Smis so both of them are not
// Smis. If it's not a heap number, then return equal.
if (cond == lt || cond == gt) {
- __ CompareObjectType(r0, r4, r4, FIRST_JS_OBJECT_TYPE);
+ __ CompareObjectType(r0, r4, r4, FIRST_SPEC_OBJECT_TYPE);
__ b(ge, slow);
} else {
__ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
__ b(eq, &heap_number);
// Comparing JS objects with <=, >= is complicated.
if (cond != eq) {
- __ cmp(r4, Operand(FIRST_JS_OBJECT_TYPE));
+ __ cmp(r4, Operand(FIRST_SPEC_OBJECT_TYPE));
__ b(ge, slow);
// Normally here we fall through to return_equal, but undefined is
// special: (undefined == undefined) == true, but
@@ -1029,8 +1021,7 @@
(lhs.is(r1) && rhs.is(r0)));
Label rhs_is_smi;
- __ tst(rhs, Operand(kSmiTagMask));
- __ b(eq, &rhs_is_smi);
+ __ JumpIfSmi(rhs, &rhs_is_smi);
// Lhs is a Smi. Check whether the rhs is a heap number.
__ CompareObjectType(rhs, r4, r4, HEAP_NUMBER_TYPE);
@@ -1061,7 +1052,7 @@
// Convert lhs to a double in r2, r3.
__ mov(r7, Operand(lhs));
ConvertToDoubleStub stub1(r3, r2, r7, r6);
- __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
+ __ Call(stub1.GetCode());
// Load rhs to a double in r0, r1.
__ Ldrd(r0, r1, FieldMemOperand(rhs, HeapNumber::kValueOffset));
__ pop(lr);
@@ -1103,7 +1094,7 @@
// Convert rhs to a double in r0, r1.
__ mov(r7, Operand(rhs));
ConvertToDoubleStub stub2(r1, r0, r7, r6);
- __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
+ __ Call(stub2.GetCode());
__ pop(lr);
}
// Fall through to both_loaded_as_doubles.
@@ -1220,14 +1211,14 @@
ASSERT((lhs.is(r0) && rhs.is(r1)) ||
(lhs.is(r1) && rhs.is(r0)));
- // If either operand is a JSObject or an oddball value, then they are
+ // If either operand is a JS object or an oddball value, then they are
// not equal since their pointers are different.
// There is no test for undetectability in strict equality.
- STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+ STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
Label first_non_object;
// Get the type of the first operand into r2 and compare it with
- // FIRST_JS_OBJECT_TYPE.
- __ CompareObjectType(rhs, r2, r2, FIRST_JS_OBJECT_TYPE);
+ // FIRST_SPEC_OBJECT_TYPE.
+ __ CompareObjectType(rhs, r2, r2, FIRST_SPEC_OBJECT_TYPE);
__ b(lt, &first_non_object);
// Return non-zero (r0 is not zero)
@@ -1240,7 +1231,7 @@
__ cmp(r2, Operand(ODDBALL_TYPE));
__ b(eq, &return_not_equal);
- __ CompareObjectType(lhs, r3, r3, FIRST_JS_OBJECT_TYPE);
+ __ CompareObjectType(lhs, r3, r3, FIRST_SPEC_OBJECT_TYPE);
__ b(ge, &return_not_equal);
// Check for oddballs: true, false, null, undefined.
@@ -1317,9 +1308,9 @@
__ Ret();
__ bind(&object_test);
- __ cmp(r2, Operand(FIRST_JS_OBJECT_TYPE));
+ __ cmp(r2, Operand(FIRST_SPEC_OBJECT_TYPE));
__ b(lt, not_both_strings);
- __ CompareObjectType(lhs, r2, r3, FIRST_JS_OBJECT_TYPE);
+ __ CompareObjectType(lhs, r2, r3, FIRST_SPEC_OBJECT_TYPE);
__ b(lt, not_both_strings);
// If both objects are undetectable, they are equal. Otherwise, they
// are not equal, since they are different objects and an object is not
@@ -1458,8 +1449,7 @@
if (include_smi_compare_) {
Label not_two_smis, smi_done;
__ orr(r2, r1, r0);
- __ tst(r2, Operand(kSmiTagMask));
- __ b(ne, ¬_two_smis);
+ __ JumpIfNotSmi(r2, ¬_two_smis);
__ mov(r1, Operand(r1, ASR, 1));
__ sub(r0, r1, Operand(r0, ASR, 1));
__ Ret();
@@ -1482,8 +1472,7 @@
STATIC_ASSERT(kSmiTag == 0);
ASSERT_EQ(0, Smi::FromInt(0));
__ and_(r2, lhs_, Operand(rhs_));
- __ tst(r2, Operand(kSmiTagMask));
- __ b(ne, ¬_smis);
+ __ JumpIfNotSmi(r2, ¬_smis);
// One operand is a smi. EmitSmiNonsmiComparison generates code that can:
// 1) Return the answer.
// 2) Go to slow.
@@ -1614,15 +1603,13 @@
}
-// This stub does not handle the inlined cases (Smis, Booleans, undefined).
// The stub returns zero for false, and a non-zero value for true.
void ToBooleanStub::Generate(MacroAssembler* masm) {
// This stub uses VFP3 instructions.
CpuFeatures::Scope scope(VFP3);
- Label false_result;
- Label not_heap_number;
- Register scratch = r9.is(tos_) ? r7 : r9;
+ Label false_result, true_result, not_string;
+ const Register map = r9.is(tos_) ? r7 : r9;
// undefined -> false
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
@@ -1652,11 +1639,31 @@
__ cmp(tos_, ip);
__ b(eq, &false_result);
- // HeapNumber => false iff +0, -0, or NaN.
- __ ldr(scratch, FieldMemOperand(tos_, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
- __ cmp(scratch, ip);
- __ b(¬_heap_number, ne);
+ // Get the map of the heap object.
+ __ ldr(map, FieldMemOperand(tos_, HeapObject::kMapOffset));
+
+ // Undetectable -> false.
+ __ ldrb(ip, FieldMemOperand(map, Map::kBitFieldOffset));
+ __ tst(ip, Operand(1 << Map::kIsUndetectable));
+ __ b(&false_result, ne);
+
+ // JavaScript object -> true.
+ __ CompareInstanceType(map, ip, FIRST_SPEC_OBJECT_TYPE);
+ // "tos_" is a register and contains a non-zero value. Hence we implicitly
+ // return true if the greater than condition is satisfied.
+ __ Ret(ge);
+
+ // String value -> false iff empty.
+ __ CompareInstanceType(map, ip, FIRST_NONSTRING_TYPE);
+ __ b(¬_string, ge);
+ __ ldr(tos_, FieldMemOperand(tos_, String::kLengthOffset));
+ // Return string length as boolean value, i.e. return false iff length is 0.
+ __ Ret();
+
+ __ bind(¬_string);
+ // HeapNumber -> false iff +0, -0, or NaN.
+ __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
+ __ b(&true_result, ne);
__ vldr(d1, FieldMemOperand(tos_, HeapNumber::kValueOffset));
__ VFPCompareAndSetFlags(d1, 0.0);
// "tos_" is a register, and contains a non zero value by default.
@@ -1666,72 +1673,27 @@
__ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, vs); // for FP_NAN
__ Ret();
- __ bind(¬_heap_number);
-
- // It can be an undetectable object.
- // Undetectable => false.
- __ ldr(ip, FieldMemOperand(tos_, HeapObject::kMapOffset));
- __ ldrb(scratch, FieldMemOperand(ip, Map::kBitFieldOffset));
- __ and_(scratch, scratch, Operand(1 << Map::kIsUndetectable));
- __ cmp(scratch, Operand(1 << Map::kIsUndetectable));
- __ b(&false_result, eq);
-
- // JavaScript object => true.
- __ ldr(scratch, FieldMemOperand(tos_, HeapObject::kMapOffset));
- __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
- __ cmp(scratch, Operand(FIRST_JS_OBJECT_TYPE));
- // "tos_" is a register and contains a non-zero value.
- // Hence we implicitly return true if the greater than
- // condition is satisfied.
- __ Ret(gt);
-
- // Check for string
- __ ldr(scratch, FieldMemOperand(tos_, HeapObject::kMapOffset));
- __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
- __ cmp(scratch, Operand(FIRST_NONSTRING_TYPE));
- // "tos_" is a register and contains a non-zero value.
- // Hence we implicitly return true if the greater than
- // condition is satisfied.
- __ Ret(gt);
-
- // String value => false iff empty, i.e., length is zero
- __ ldr(tos_, FieldMemOperand(tos_, String::kLengthOffset));
- // If length is zero, "tos_" contains zero ==> false.
- // If length is not zero, "tos_" contains a non-zero value ==> true.
+ // Return 1/0 for true/false in tos_.
+ __ bind(&true_result);
+ __ mov(tos_, Operand(1, RelocInfo::NONE));
__ Ret();
-
- // Return 0 in "tos_" for false .
__ bind(&false_result);
__ mov(tos_, Operand(0, RelocInfo::NONE));
__ Ret();
}
-Handle<Code> GetUnaryOpStub(int key, UnaryOpIC::TypeInfo type_info) {
- UnaryOpStub stub(key, type_info);
- return stub.GetCode();
-}
-
-
-const char* UnaryOpStub::GetName() {
- if (name_ != NULL) return name_;
- const int kMaxNameLength = 100;
- name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(
- kMaxNameLength);
- if (name_ == NULL) return "OOM";
+void UnaryOpStub::PrintName(StringStream* stream) {
const char* op_name = Token::Name(op_);
const char* overwrite_name = NULL; // Make g++ happy.
switch (mode_) {
case UNARY_NO_OVERWRITE: overwrite_name = "Alloc"; break;
case UNARY_OVERWRITE: overwrite_name = "Overwrite"; break;
}
-
- OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
- "UnaryOpStub_%s_%s_%s",
- op_name,
- overwrite_name,
- UnaryOpIC::GetName(operand_type_));
- return name_;
+ stream->Add("UnaryOpStub_%s_%s_%s",
+ op_name,
+ overwrite_name,
+ UnaryOpIC::GetName(operand_type_));
}
@@ -1755,22 +1717,14 @@
void UnaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
- // Prepare to push argument.
- __ mov(r3, Operand(r0));
-
- // Push this stub's key. Although the operation and the type info are
- // encoded into the key, the encoding is opaque, so push them too.
- __ mov(r2, Operand(Smi::FromInt(MinorKey())));
- __ mov(r1, Operand(Smi::FromInt(op_)));
+ __ mov(r3, Operand(r0)); // the operand
+ __ mov(r2, Operand(Smi::FromInt(op_)));
+ __ mov(r1, Operand(Smi::FromInt(mode_)));
__ mov(r0, Operand(Smi::FromInt(operand_type_)));
-
__ Push(r3, r2, r1, r0);
__ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kUnaryOp_Patch),
- masm->isolate()),
- 4,
- 1);
+ ExternalReference(IC_Utility(IC::kUnaryOp_Patch), masm->isolate()), 4, 1);
}
@@ -1903,6 +1857,8 @@
void UnaryOpStub::GenerateHeapNumberCodeBitNot(
MacroAssembler* masm, Label* slow) {
+ Label impossible;
+
EmitCheckForHeapNumber(masm, r0, r1, r6, slow);
// Convert the heap number is r0 to an untagged integer in r1.
__ ConvertToInt32(r0, r1, r2, r3, d0, slow);
@@ -1921,17 +1877,27 @@
__ bind(&try_float);
if (mode_ == UNARY_NO_OVERWRITE) {
Label slow_allocate_heapnumber, heapnumber_allocated;
- __ AllocateHeapNumber(r0, r2, r3, r6, &slow_allocate_heapnumber);
+ // Allocate a new heap number without zapping r0, which we need if it fails.
+ __ AllocateHeapNumber(r2, r3, r4, r6, &slow_allocate_heapnumber);
__ jmp(&heapnumber_allocated);
__ bind(&slow_allocate_heapnumber);
__ EnterInternalFrame();
- __ push(r1);
- __ CallRuntime(Runtime::kNumberAlloc, 0);
- __ pop(r1);
+ __ push(r0); // Push the heap number, not the untagged int32.
+ __ CallRuntime(Runtime::kNumberAlloc, 0);
+ __ mov(r2, r0); // Move the new heap number into r2.
+ // Get the heap number into r0, now that the new heap number is in r2.
+ __ pop(r0);
__ LeaveInternalFrame();
+ // Convert the heap number in r0 to an untagged integer in r1.
+ // This can't go slow-case because it's the same number we already
+ // converted once again.
+ __ ConvertToInt32(r0, r1, r3, r4, d0, &impossible);
+ __ mvn(r1, Operand(r1));
+
__ bind(&heapnumber_allocated);
+ __ mov(r0, r2); // Move newly allocated heap number to r0.
}
if (CpuFeatures::IsSupported(VFP3)) {
@@ -1948,6 +1914,11 @@
WriteInt32ToHeapNumberStub stub(r1, r0, r2);
__ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
}
+
+ __ bind(&impossible);
+ if (FLAG_debug_code) {
+ __ stop("Incorrect assumption in bit-not stub");
+ }
}
@@ -2002,14 +1973,6 @@
}
-Handle<Code> GetBinaryOpStub(int key,
- BinaryOpIC::TypeInfo type_info,
- BinaryOpIC::TypeInfo result_type_info) {
- BinaryOpStub stub(key, type_info, result_type_info);
- return stub.GetCode();
-}
-
-
void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
Label get_result;
@@ -2066,12 +2029,7 @@
}
-const char* BinaryOpStub::GetName() {
- if (name_ != NULL) return name_;
- const int kMaxNameLength = 100;
- name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(
- kMaxNameLength);
- if (name_ == NULL) return "OOM";
+void BinaryOpStub::PrintName(StringStream* stream) {
const char* op_name = Token::Name(op_);
const char* overwrite_name;
switch (mode_) {
@@ -2080,13 +2038,10 @@
case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
default: overwrite_name = "UnknownOverwrite"; break;
}
-
- OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
- "BinaryOpStub_%s_%s_%s",
- op_name,
- overwrite_name,
- BinaryOpIC::GetName(operands_type_));
- return name_;
+ stream->Add("BinaryOpStub_%s_%s_%s",
+ op_name,
+ overwrite_name,
+ BinaryOpIC::GetName(operands_type_));
}
@@ -2444,8 +2399,7 @@
// Perform combined smi check on both operands.
__ orr(scratch1, left, Operand(right));
STATIC_ASSERT(kSmiTag == 0);
- __ tst(scratch1, Operand(kSmiTagMask));
- __ b(ne, ¬_smis);
+ __ JumpIfNotSmi(scratch1, ¬_smis);
// If the smi-smi operation results in a smi return is generated.
GenerateSmiSmiOperation(masm);
@@ -2558,37 +2512,36 @@
case Token::MUL:
case Token::DIV:
case Token::MOD: {
- // Load both operands and check that they are 32-bit integer.
- // Jump to type transition if they are not. The registers r0 and r1 (right
- // and left) are preserved for the runtime call.
- FloatingPointHelper::Destination destination =
- CpuFeatures::IsSupported(VFP3) &&
- op_ != Token::MOD ?
- FloatingPointHelper::kVFPRegisters :
- FloatingPointHelper::kCoreRegisters;
+ // Load both operands and check that they are 32-bit integer.
+ // Jump to type transition if they are not. The registers r0 and r1 (right
+ // and left) are preserved for the runtime call.
+ FloatingPointHelper::Destination destination =
+ (CpuFeatures::IsSupported(VFP3) && op_ != Token::MOD)
+ ? FloatingPointHelper::kVFPRegisters
+ : FloatingPointHelper::kCoreRegisters;
- FloatingPointHelper::LoadNumberAsInt32Double(masm,
- right,
- destination,
- d7,
- r2,
- r3,
- heap_number_map,
- scratch1,
- scratch2,
- s0,
- &transition);
- FloatingPointHelper::LoadNumberAsInt32Double(masm,
- left,
- destination,
- d6,
- r4,
- r5,
- heap_number_map,
- scratch1,
- scratch2,
- s0,
- &transition);
+ FloatingPointHelper::LoadNumberAsInt32Double(masm,
+ right,
+ destination,
+ d7,
+ r2,
+ r3,
+ heap_number_map,
+ scratch1,
+ scratch2,
+ s0,
+ &transition);
+ FloatingPointHelper::LoadNumberAsInt32Double(masm,
+ left,
+ destination,
+ d6,
+ r4,
+ r5,
+ heap_number_map,
+ scratch1,
+ scratch2,
+ s0,
+ &transition);
if (destination == FloatingPointHelper::kVFPRegisters) {
CpuFeatures::Scope scope(VFP3);
@@ -2649,9 +2602,11 @@
// DIV just falls through to allocating a heap number.
}
- if (result_type_ >= (op_ == Token::DIV) ? BinaryOpIC::HEAP_NUMBER
- : BinaryOpIC::INT32) {
- __ bind(&return_heap_number);
+ __ bind(&return_heap_number);
+ // Return a heap number, or fall through to type transition or runtime
+ // call if we can't.
+ if (result_type_ >= ((op_ == Token::DIV) ? BinaryOpIC::HEAP_NUMBER
+ : BinaryOpIC::INT32)) {
// We are using vfp registers so r5 is available.
heap_number_result = r5;
GenerateHeapResultAllocation(masm,
@@ -2825,7 +2780,11 @@
UNREACHABLE();
}
- if (transition.is_linked()) {
+ // We never expect DIV to yield an integer result, so we always generate
+ // type transition code for DIV operations expecting an integer result: the
+ // code will fall through to this type transition.
+ if (transition.is_linked() ||
+ ((op_ == Token::DIV) && (result_type_ <= BinaryOpIC::INT32))) {
__ bind(&transition);
GenerateTypeTransition(masm);
}
@@ -3394,15 +3353,10 @@
__ mov(r2, Operand(ExternalReference::isolate_address()));
-
- // TODO(1242173): To let the GC traverse the return address of the exit
- // frames, we need to know where the return address is. Right now,
- // we store it on the stack to be able to find it again, but we never
- // restore from it in case of changes, which makes it impossible to
- // support moving the C entry code stub. This should be fixed, but currently
- // this is OK because the CEntryStub gets generated so early in the V8 boot
- // sequence that it is not moving ever.
-
+ // To let the GC traverse the return address of the exit frames, we need to
+ // know where the return address is. The CEntryStub is unmovable, so
+ // we can store the address on the stack to be able to find it again and
+ // we never have to restore it, because it will not change.
// Compute the return address in lr to return to after the jump below. Pc is
// already at '+ 8' from the current instruction but return is after three
// instructions so add another 4 to pc to get the return address.
@@ -3556,6 +3510,8 @@
CpuFeatures::Scope scope(VFP3);
// Save callee-saved vfp registers.
__ vstm(db_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg);
+ // Set up the reserved register for 0.0.
+ __ vmov(kDoubleRegZero, 0.0);
}
// Get address of argv, see stm above.
@@ -3590,7 +3546,6 @@
// Setup frame pointer for the frame to be pushed.
__ add(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
-#ifdef ENABLE_LOGGING_AND_PROFILING
// If this is the outermost JS call, set js_entry_sp value.
Label non_outermost_js;
ExternalReference js_entry_sp(Isolate::k_js_entry_sp_address, isolate);
@@ -3606,7 +3561,6 @@
__ mov(ip, Operand(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME)));
__ bind(&cont);
__ push(ip);
-#endif
// Call a faked try-block that does the invoke.
__ bl(&invoke);
@@ -3667,7 +3621,6 @@
__ PopTryHandler();
__ bind(&exit); // r0 holds result
-#ifdef ENABLE_LOGGING_AND_PROFILING
// Check if the current stack frame is marked as the outermost JS frame.
Label non_outermost_js_2;
__ pop(r5);
@@ -3677,7 +3630,6 @@
__ mov(r5, Operand(ExternalReference(js_entry_sp)));
__ str(r6, MemOperand(r5));
__ bind(&non_outermost_js_2);
-#endif
// Restore the top frame descriptors from the stack.
__ pop(r3);
@@ -3940,12 +3892,233 @@
}
-void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
+void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) {
// sp[0] : number of parameters
// sp[4] : receiver displacement
// sp[8] : function
// Check if the calling frame is an arguments adaptor frame.
+ Label runtime;
+ __ ldr(r3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ ldr(r2, MemOperand(r3, StandardFrameConstants::kContextOffset));
+ __ cmp(r2, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ b(ne, &runtime);
+
+ // Patch the arguments.length and the parameters pointer in the current frame.
+ __ ldr(r2, MemOperand(r3, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ str(r2, MemOperand(sp, 0 * kPointerSize));
+ __ add(r3, r3, Operand(r2, LSL, 1));
+ __ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset));
+ __ str(r3, MemOperand(sp, 1 * kPointerSize));
+
+ __ bind(&runtime);
+ __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
+}
+
+
+void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
+ // Stack layout:
+ // sp[0] : number of parameters (tagged)
+ // sp[4] : address of receiver argument
+ // sp[8] : function
+ // Registers used over whole function:
+ // r6 : allocated object (tagged)
+ // r9 : mapped parameter count (tagged)
+
+ __ ldr(r1, MemOperand(sp, 0 * kPointerSize));
+ // r1 = parameter count (tagged)
+
+ // Check if the calling frame is an arguments adaptor frame.
+ Label runtime;
+ Label adaptor_frame, try_allocate;
+ __ ldr(r3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ ldr(r2, MemOperand(r3, StandardFrameConstants::kContextOffset));
+ __ cmp(r2, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ b(eq, &adaptor_frame);
+
+ // No adaptor, parameter count = argument count.
+ __ mov(r2, r1);
+ __ b(&try_allocate);
+
+ // We have an adaptor frame. Patch the parameters pointer.
+ __ bind(&adaptor_frame);
+ __ ldr(r2, MemOperand(r3, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ add(r3, r3, Operand(r2, LSL, 1));
+ __ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset));
+ __ str(r3, MemOperand(sp, 1 * kPointerSize));
+
+ // r1 = parameter count (tagged)
+ // r2 = argument count (tagged)
+ // Compute the mapped parameter count = min(r1, r2) in r1.
+ __ cmp(r1, Operand(r2));
+ __ mov(r1, Operand(r2), LeaveCC, gt);
+
+ __ bind(&try_allocate);
+
+ // Compute the sizes of backing store, parameter map, and arguments object.
+ // 1. Parameter map, has 2 extra words containing context and backing store.
+ const int kParameterMapHeaderSize =
+ FixedArray::kHeaderSize + 2 * kPointerSize;
+ // If there are no mapped parameters, we do not need the parameter_map.
+ __ cmp(r1, Operand(Smi::FromInt(0)));
+ __ mov(r9, Operand(0), LeaveCC, eq);
+ __ mov(r9, Operand(r1, LSL, 1), LeaveCC, ne);
+ __ add(r9, r9, Operand(kParameterMapHeaderSize), LeaveCC, ne);
+
+ // 2. Backing store.
+ __ add(r9, r9, Operand(r2, LSL, 1));
+ __ add(r9, r9, Operand(FixedArray::kHeaderSize));
+
+ // 3. Arguments object.
+ __ add(r9, r9, Operand(Heap::kArgumentsObjectSize));
+
+ // Do the allocation of all three objects in one go.
+ __ AllocateInNewSpace(r9, r0, r3, r4, &runtime, TAG_OBJECT);
+
+ // r0 = address of new object(s) (tagged)
+ // r2 = argument count (tagged)
+ // Get the arguments boilerplate from the current (global) context into r4.
+ const int kNormalOffset =
+ Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX);
+ const int kAliasedOffset =
+ Context::SlotOffset(Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX);
+
+ __ ldr(r4, MemOperand(r8, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ ldr(r4, FieldMemOperand(r4, GlobalObject::kGlobalContextOffset));
+ __ cmp(r1, Operand(0));
+ __ ldr(r4, MemOperand(r4, kNormalOffset), eq);
+ __ ldr(r4, MemOperand(r4, kAliasedOffset), ne);
+
+ // r0 = address of new object (tagged)
+ // r1 = mapped parameter count (tagged)
+ // r2 = argument count (tagged)
+ // r4 = address of boilerplate object (tagged)
+ // Copy the JS object part.
+ for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
+ __ ldr(r3, FieldMemOperand(r4, i));
+ __ str(r3, FieldMemOperand(r0, i));
+ }
+
+ // Setup the callee in-object property.
+ STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
+ __ ldr(r3, MemOperand(sp, 2 * kPointerSize));
+ const int kCalleeOffset = JSObject::kHeaderSize +
+ Heap::kArgumentsCalleeIndex * kPointerSize;
+ __ str(r3, FieldMemOperand(r0, kCalleeOffset));
+
+ // Use the length (smi tagged) and set that as an in-object property too.
+ STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
+ const int kLengthOffset = JSObject::kHeaderSize +
+ Heap::kArgumentsLengthIndex * kPointerSize;
+ __ str(r2, FieldMemOperand(r0, kLengthOffset));
+
+ // Setup the elements pointer in the allocated arguments object.
+ // If we allocated a parameter map, r4 will point there, otherwise
+ // it will point to the backing store.
+ __ add(r4, r0, Operand(Heap::kArgumentsObjectSize));
+ __ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset));
+
+ // r0 = address of new object (tagged)
+ // r1 = mapped parameter count (tagged)
+ // r2 = argument count (tagged)
+ // r4 = address of parameter map or backing store (tagged)
+ // Initialize parameter map. If there are no mapped arguments, we're done.
+ Label skip_parameter_map;
+ __ cmp(r1, Operand(Smi::FromInt(0)));
+ // Move backing store address to r3, because it is
+ // expected there when filling in the unmapped arguments.
+ __ mov(r3, r4, LeaveCC, eq);
+ __ b(eq, &skip_parameter_map);
+
+ __ LoadRoot(r6, Heap::kNonStrictArgumentsElementsMapRootIndex);
+ __ str(r6, FieldMemOperand(r4, FixedArray::kMapOffset));
+ __ add(r6, r1, Operand(Smi::FromInt(2)));
+ __ str(r6, FieldMemOperand(r4, FixedArray::kLengthOffset));
+ __ str(r8, FieldMemOperand(r4, FixedArray::kHeaderSize + 0 * kPointerSize));
+ __ add(r6, r4, Operand(r1, LSL, 1));
+ __ add(r6, r6, Operand(kParameterMapHeaderSize));
+ __ str(r6, FieldMemOperand(r4, FixedArray::kHeaderSize + 1 * kPointerSize));
+
+ // Copy the parameter slots and the holes in the arguments.
+ // We need to fill in mapped_parameter_count slots. They index the context,
+ // where parameters are stored in reverse order, at
+ // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
+ // The mapped parameter thus need to get indices
+ // MIN_CONTEXT_SLOTS+parameter_count-1 ..
+ // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
+ // We loop from right to left.
+ Label parameters_loop, parameters_test;
+ __ mov(r6, r1);
+ __ ldr(r9, MemOperand(sp, 0 * kPointerSize));
+ __ add(r9, r9, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
+ __ sub(r9, r9, Operand(r1));
+ __ LoadRoot(r7, Heap::kTheHoleValueRootIndex);
+ __ add(r3, r4, Operand(r6, LSL, 1));
+ __ add(r3, r3, Operand(kParameterMapHeaderSize));
+
+ // r6 = loop variable (tagged)
+ // r1 = mapping index (tagged)
+ // r3 = address of backing store (tagged)
+ // r4 = address of parameter map (tagged)
+ // r5 = temporary scratch (a.o., for address calculation)
+ // r7 = the hole value
+ __ jmp(¶meters_test);
+
+ __ bind(¶meters_loop);
+ __ sub(r6, r6, Operand(Smi::FromInt(1)));
+ __ mov(r5, Operand(r6, LSL, 1));
+ __ add(r5, r5, Operand(kParameterMapHeaderSize - kHeapObjectTag));
+ __ str(r9, MemOperand(r4, r5));
+ __ sub(r5, r5, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize));
+ __ str(r7, MemOperand(r3, r5));
+ __ add(r9, r9, Operand(Smi::FromInt(1)));
+ __ bind(¶meters_test);
+ __ cmp(r6, Operand(Smi::FromInt(0)));
+ __ b(ne, ¶meters_loop);
+
+ __ bind(&skip_parameter_map);
+ // r2 = argument count (tagged)
+ // r3 = address of backing store (tagged)
+ // r5 = scratch
+ // Copy arguments header and remaining slots (if there are any).
+ __ LoadRoot(r5, Heap::kFixedArrayMapRootIndex);
+ __ str(r5, FieldMemOperand(r3, FixedArray::kMapOffset));
+ __ str(r2, FieldMemOperand(r3, FixedArray::kLengthOffset));
+
+ Label arguments_loop, arguments_test;
+ __ mov(r9, r1);
+ __ ldr(r4, MemOperand(sp, 1 * kPointerSize));
+ __ sub(r4, r4, Operand(r9, LSL, 1));
+ __ jmp(&arguments_test);
+
+ __ bind(&arguments_loop);
+ __ sub(r4, r4, Operand(kPointerSize));
+ __ ldr(r6, MemOperand(r4, 0));
+ __ add(r5, r3, Operand(r9, LSL, 1));
+ __ str(r6, FieldMemOperand(r5, FixedArray::kHeaderSize));
+ __ add(r9, r9, Operand(Smi::FromInt(1)));
+
+ __ bind(&arguments_test);
+ __ cmp(r9, Operand(r2));
+ __ b(lt, &arguments_loop);
+
+ // Return and remove the on-stack parameters.
+ __ add(sp, sp, Operand(3 * kPointerSize));
+ __ Ret();
+
+ // Do the runtime call to allocate the arguments object.
+ // r2 = argument count (taggged)
+ __ bind(&runtime);
+ __ str(r2, MemOperand(sp, 0 * kPointerSize)); // Patch argument count.
+ __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
+}
+
+
+void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
+ // sp[0] : number of parameters
+ // sp[4] : receiver displacement
+ // sp[8] : function
+ // Check if the calling frame is an arguments adaptor frame.
Label adaptor_frame, try_allocate, runtime;
__ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
__ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
@@ -3973,40 +4146,31 @@
__ mov(r1, Operand(r1, LSR, kSmiTagSize));
__ add(r1, r1, Operand(FixedArray::kHeaderSize / kPointerSize));
__ bind(&add_arguments_object);
- __ add(r1, r1, Operand(GetArgumentsObjectSize() / kPointerSize));
+ __ add(r1, r1, Operand(Heap::kArgumentsObjectSizeStrict / kPointerSize));
// Do the allocation of both objects in one go.
- __ AllocateInNewSpace(
- r1,
- r0,
- r2,
- r3,
- &runtime,
- static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
+ __ AllocateInNewSpace(r1,
+ r0,
+ r2,
+ r3,
+ &runtime,
+ static_cast<AllocationFlags>(TAG_OBJECT |
+ SIZE_IN_WORDS));
// Get the arguments boilerplate from the current (global) context.
__ ldr(r4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
__ ldr(r4, FieldMemOperand(r4, GlobalObject::kGlobalContextOffset));
- __ ldr(r4, MemOperand(r4,
- Context::SlotOffset(GetArgumentsBoilerplateIndex())));
+ __ ldr(r4, MemOperand(r4, Context::SlotOffset(
+ Context::STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX)));
// Copy the JS object part.
__ CopyFields(r0, r4, r3.bit(), JSObject::kHeaderSize / kPointerSize);
- if (type_ == NEW_NON_STRICT) {
- // Setup the callee in-object property.
- STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
- __ ldr(r3, MemOperand(sp, 2 * kPointerSize));
- const int kCalleeOffset = JSObject::kHeaderSize +
- Heap::kArgumentsCalleeIndex * kPointerSize;
- __ str(r3, FieldMemOperand(r0, kCalleeOffset));
- }
-
// Get the length (smi tagged) and set that as an in-object property too.
STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
__ ldr(r1, MemOperand(sp, 0 * kPointerSize));
__ str(r1, FieldMemOperand(r0, JSObject::kHeaderSize +
- Heap::kArgumentsLengthIndex * kPointerSize));
+ Heap::kArgumentsLengthIndex * kPointerSize));
// If there are no actual arguments, we're done.
Label done;
@@ -4018,12 +4182,13 @@
// Setup the elements pointer in the allocated arguments object and
// initialize the header in the elements fixed array.
- __ add(r4, r0, Operand(GetArgumentsObjectSize()));
+ __ add(r4, r0, Operand(Heap::kArgumentsObjectSizeStrict));
__ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset));
__ LoadRoot(r3, Heap::kFixedArrayMapRootIndex);
__ str(r3, FieldMemOperand(r4, FixedArray::kMapOffset));
__ str(r1, FieldMemOperand(r4, FixedArray::kLengthOffset));
- __ mov(r1, Operand(r1, LSR, kSmiTagSize)); // Untag the length for the loop.
+ // Untag the length for the loop.
+ __ mov(r1, Operand(r1, LSR, kSmiTagSize));
// Copy the fixed array slots.
Label loop;
@@ -4046,7 +4211,7 @@
// Do the runtime call to allocate the arguments object.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
+ __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1);
}
@@ -4098,8 +4263,7 @@
// Check that the first argument is a JSRegExp object.
__ ldr(r0, MemOperand(sp, kJSRegExpOffset));
STATIC_ASSERT(kSmiTag == 0);
- __ tst(r0, Operand(kSmiTagMask));
- __ b(eq, &runtime);
+ __ JumpIfSmi(r0, &runtime);
__ CompareObjectType(r0, r1, r1, JS_REGEXP_TYPE);
__ b(ne, &runtime);
@@ -4135,8 +4299,7 @@
// regexp_data: RegExp data (FixedArray)
// Check that the second argument is a string.
__ ldr(subject, MemOperand(sp, kSubjectOffset));
- __ tst(subject, Operand(kSmiTagMask));
- __ b(eq, &runtime);
+ __ JumpIfSmi(subject, &runtime);
Condition is_string = masm->IsObjectStringType(subject, r0);
__ b(NegateCondition(is_string), &runtime);
// Get the length of the string to r3.
@@ -4149,8 +4312,7 @@
// Check that the third argument is a positive smi less than the subject
// string length. A negative value will be greater (unsigned comparison).
__ ldr(r0, MemOperand(sp, kPreviousIndexOffset));
- __ tst(r0, Operand(kSmiTagMask));
- __ b(ne, &runtime);
+ __ JumpIfNotSmi(r0, &runtime);
__ cmp(r3, Operand(r0));
__ b(ls, &runtime);
@@ -4159,8 +4321,7 @@
// regexp_data: RegExp data (FixedArray)
// Check that the fourth object is a JSArray object.
__ ldr(r0, MemOperand(sp, kLastMatchInfoOffset));
- __ tst(r0, Operand(kSmiTagMask));
- __ b(eq, &runtime);
+ __ JumpIfSmi(r0, &runtime);
__ CompareObjectType(r0, r1, r1, JS_ARRAY_TYPE);
__ b(ne, &runtime);
// Check that the JSArray is in fast case.
@@ -4419,8 +4580,7 @@
__ ldr(r1, MemOperand(sp, kPointerSize * 2));
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize == 1);
- __ tst(r1, Operand(kSmiTagMask));
- __ b(ne, &slowcase);
+ __ JumpIfNotSmi(r1, &slowcase);
__ cmp(r1, Operand(Smi::FromInt(kMaxInlineLength)));
__ b(hi, &slowcase);
// Smi-tagging is equivalent to multiplying by 2.
@@ -4562,6 +4722,7 @@
__ mov(r0, Operand(argc_)); // Setup the number of arguments.
__ mov(r2, Operand(0, RelocInfo::NONE));
__ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION);
+ __ SetCallKind(r5, CALL_AS_METHOD);
__ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
RelocInfo::CODE_TARGET);
}
@@ -4569,16 +4730,9 @@
// Unfortunately you have to run without snapshots to see most of these
// names in the profile since most compare stubs end up in the snapshot.
-const char* CompareStub::GetName() {
+void CompareStub::PrintName(StringStream* stream) {
ASSERT((lhs_.is(r0) && rhs_.is(r1)) ||
(lhs_.is(r1) && rhs_.is(r0)));
-
- if (name_ != NULL) return name_;
- const int kMaxNameLength = 100;
- name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(
- kMaxNameLength);
- if (name_ == NULL) return "OOM";
-
const char* cc_name;
switch (cc_) {
case lt: cc_name = "LT"; break;
@@ -4589,40 +4743,14 @@
case ne: cc_name = "NE"; break;
default: cc_name = "UnknownCondition"; break;
}
-
- const char* lhs_name = lhs_.is(r0) ? "_r0" : "_r1";
- const char* rhs_name = rhs_.is(r0) ? "_r0" : "_r1";
-
- const char* strict_name = "";
- if (strict_ && (cc_ == eq || cc_ == ne)) {
- strict_name = "_STRICT";
- }
-
- const char* never_nan_nan_name = "";
- if (never_nan_nan_ && (cc_ == eq || cc_ == ne)) {
- never_nan_nan_name = "_NO_NAN";
- }
-
- const char* include_number_compare_name = "";
- if (!include_number_compare_) {
- include_number_compare_name = "_NO_NUMBER";
- }
-
- const char* include_smi_compare_name = "";
- if (!include_smi_compare_) {
- include_smi_compare_name = "_NO_SMI";
- }
-
- OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
- "CompareStub_%s%s%s%s%s%s",
- cc_name,
- lhs_name,
- rhs_name,
- strict_name,
- never_nan_nan_name,
- include_number_compare_name,
- include_smi_compare_name);
- return name_;
+ bool is_equality = cc_ == eq || cc_ == ne;
+ stream->Add("CompareStub_%s", cc_name);
+ stream->Add(lhs_.is(r0) ? "_r0" : "_r1");
+ stream->Add(rhs_.is(r0) ? "_r0" : "_r1");
+ if (strict_ && is_equality) stream->Add("_STRICT");
+ if (never_nan_nan_ && is_equality) stream->Add("_NO_NAN");
+ if (!include_number_compare_) stream->Add("_NO_NUMBER");
+ if (!include_smi_compare_) stream->Add("_NO_SMI");
}
@@ -5301,8 +5429,7 @@
// Make sure first argument is a sequential (or flat) string.
__ ldr(r5, MemOperand(sp, kStringOffset));
STATIC_ASSERT(kSmiTag == 0);
- __ tst(r5, Operand(kSmiTagMask));
- __ b(eq, &runtime);
+ __ JumpIfSmi(r5, &runtime);
Condition is_string = masm->IsObjectStringType(r5, r1);
__ b(NegateCondition(is_string), &runtime);
@@ -5939,8 +6066,7 @@
ASSERT(state_ == CompareIC::SMIS);
Label miss;
__ orr(r2, r1, r0);
- __ tst(r2, Operand(kSmiTagMask));
- __ b(ne, &miss);
+ __ JumpIfNotSmi(r2, &miss);
if (GetCondition() == eq) {
// For equality we do not care about the sign of the result.
@@ -5964,8 +6090,7 @@
Label unordered;
Label miss;
__ and_(r2, r1, Operand(r0));
- __ tst(r2, Operand(kSmiTagMask));
- __ b(eq, &generic_stub);
+ __ JumpIfSmi(r2, &generic_stub);
__ CompareObjectType(r0, r2, r2, HEAP_NUMBER_TYPE);
__ b(ne, &miss);
@@ -6114,8 +6239,7 @@
ASSERT(state_ == CompareIC::OBJECTS);
Label miss;
__ and_(r2, r1, Operand(r0));
- __ tst(r2, Operand(kSmiTagMask));
- __ b(eq, &miss);
+ __ JumpIfSmi(r2, &miss);
__ CompareObjectType(r0, r2, r2, JS_OBJECT_TYPE);
__ b(ne, &miss);
diff --git a/src/arm/code-stubs-arm.h b/src/arm/code-stubs-arm.h
index fb05cd2..557f7e6 100644
--- a/src/arm/code-stubs-arm.h
+++ b/src/arm/code-stubs-arm.h
@@ -58,35 +58,14 @@
};
-class ToBooleanStub: public CodeStub {
- public:
- explicit ToBooleanStub(Register tos) : tos_(tos) { }
-
- void Generate(MacroAssembler* masm);
-
- private:
- Register tos_;
- Major MajorKey() { return ToBoolean; }
- int MinorKey() { return tos_.code(); }
-};
-
-
class UnaryOpStub: public CodeStub {
public:
- UnaryOpStub(Token::Value op, UnaryOverwriteMode mode)
+ UnaryOpStub(Token::Value op,
+ UnaryOverwriteMode mode,
+ UnaryOpIC::TypeInfo operand_type = UnaryOpIC::UNINITIALIZED)
: op_(op),
mode_(mode),
- operand_type_(UnaryOpIC::UNINITIALIZED),
- name_(NULL) {
- }
-
- UnaryOpStub(
- int key,
- UnaryOpIC::TypeInfo operand_type)
- : op_(OpBits::decode(key)),
- mode_(ModeBits::decode(key)),
- operand_type_(operand_type),
- name_(NULL) {
+ operand_type_(operand_type) {
}
private:
@@ -96,20 +75,7 @@
// Operand type information determined at runtime.
UnaryOpIC::TypeInfo operand_type_;
- char* name_;
-
- const char* GetName();
-
-#ifdef DEBUG
- void Print() {
- PrintF("UnaryOpStub %d (op %s), "
- "(mode %d, runtime_type_info %s)\n",
- MinorKey(),
- Token::String(op_),
- static_cast<int>(mode_),
- UnaryOpIC::GetName(operand_type_));
- }
-#endif
+ virtual void PrintName(StringStream* stream);
class ModeBits: public BitField<UnaryOverwriteMode, 0, 1> {};
class OpBits: public BitField<Token::Value, 1, 7> {};
@@ -163,8 +129,7 @@
: op_(op),
mode_(mode),
operands_type_(BinaryOpIC::UNINITIALIZED),
- result_type_(BinaryOpIC::UNINITIALIZED),
- name_(NULL) {
+ result_type_(BinaryOpIC::UNINITIALIZED) {
use_vfp3_ = CpuFeatures::IsSupported(VFP3);
ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
}
@@ -177,8 +142,7 @@
mode_(ModeBits::decode(key)),
use_vfp3_(VFP3Bits::decode(key)),
operands_type_(operands_type),
- result_type_(result_type),
- name_(NULL) { }
+ result_type_(result_type) { }
private:
enum SmiCodeGenerateHeapNumberResults {
@@ -194,20 +158,7 @@
BinaryOpIC::TypeInfo operands_type_;
BinaryOpIC::TypeInfo result_type_;
- char* name_;
-
- const char* GetName();
-
-#ifdef DEBUG
- void Print() {
- PrintF("BinaryOpStub %d (op %s), "
- "(mode %d, runtime_type_info %s)\n",
- MinorKey(),
- Token::String(op_),
- static_cast<int>(mode_),
- BinaryOpIC::GetName(operands_type_));
- }
-#endif
+ virtual void PrintName(StringStream* stream);
// Minor key encoding in 16 bits RRRTTTVOOOOOOOMM.
class ModeBits: public BitField<OverwriteMode, 0, 2> {};
@@ -391,12 +342,6 @@
}
void Generate(MacroAssembler* masm);
-
- const char* GetName() { return "WriteInt32ToHeapNumberStub"; }
-
-#ifdef DEBUG
- void Print() { PrintF("WriteInt32ToHeapNumberStub\n"); }
-#endif
};
@@ -423,8 +368,6 @@
int MinorKey() { return 0; }
void Generate(MacroAssembler* masm);
-
- const char* GetName() { return "NumberToStringStub"; }
};
@@ -442,8 +385,6 @@
int MinorKey() { return 0; }
bool NeedsImmovableCode() { return true; }
-
- const char* GetName() { return "RegExpCEntryStub"; }
};
@@ -464,14 +405,11 @@
int MinorKey() { return 0; }
bool NeedsImmovableCode() { return true; }
-
- const char* GetName() { return "DirectCEntryStub"; }
};
class FloatingPointHelper : public AllStatic {
public:
-
enum Destination {
kVFPRegisters,
kCoreRegisters
@@ -649,13 +587,6 @@
StringDictionary::kHeaderSize +
StringDictionary::kElementsStartIndex * kPointerSize;
-
-#ifdef DEBUG
- void Print() {
- PrintF("StringDictionaryLookupStub\n");
- }
-#endif
-
Major MajorKey() { return StringDictionaryNegativeLookup; }
int MinorKey() {
diff --git a/src/arm/codegen-arm.h b/src/arm/codegen-arm.h
index 01aa805..d27982a 100644
--- a/src/arm/codegen-arm.h
+++ b/src/arm/codegen-arm.h
@@ -58,9 +58,7 @@
// Print the code after compiling it.
static void PrintCode(Handle<Code> code, CompilationInfo* info);
-#ifdef ENABLE_LOGGING_AND_PROFILING
static bool ShouldGenerateLog(Expression* type);
-#endif
static void SetFunctionInfo(Handle<JSFunction> fun,
FunctionLiteral* lit,
diff --git a/src/arm/deoptimizer-arm.cc b/src/arm/deoptimizer-arm.cc
index 5b62d82..4b994e5 100644
--- a/src/arm/deoptimizer-arm.cc
+++ b/src/arm/deoptimizer-arm.cc
@@ -270,6 +270,9 @@
output_ = new FrameDescription*[1];
output_[0] = new(output_frame_size) FrameDescription(
output_frame_size, function_);
+#ifdef DEBUG
+ output_[0]->SetKind(Code::OPTIMIZED_FUNCTION);
+#endif
// Clear the incoming parameters in the optimized frame to avoid
// confusing the garbage collector.
@@ -385,6 +388,9 @@
// Allocate and store the output frame description.
FrameDescription* output_frame =
new(output_frame_size) FrameDescription(output_frame_size, function);
+#ifdef DEBUG
+ output_frame->SetKind(Code::FUNCTION);
+#endif
bool is_bottommost = (0 == frame_index);
bool is_topmost = (output_count_ - 1 == frame_index);
@@ -519,7 +525,7 @@
// Set the continuation for the topmost frame.
- if (is_topmost) {
+ if (is_topmost && bailout_type_ != DEBUGGER) {
Builtins* builtins = isolate_->builtins();
Code* continuation = (bailout_type_ == EAGER)
? builtins->builtin(Builtins::kNotifyDeoptimized)
@@ -532,8 +538,28 @@
}
-#define __ masm()->
+void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
+ // Set the register values. The values are not important as there are no
+ // callee saved registers in JavaScript frames, so all registers are
+ // spilled. Registers fp and sp are set to the correct values though.
+ for (int i = 0; i < Register::kNumRegisters; i++) {
+ input_->SetRegister(i, i * 4);
+ }
+ input_->SetRegister(sp.code(), reinterpret_cast<intptr_t>(frame->sp()));
+ input_->SetRegister(fp.code(), reinterpret_cast<intptr_t>(frame->fp()));
+ for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; i++) {
+ input_->SetDoubleRegister(i, 0.0);
+ }
+
+ // Fill the frame content from the actual data on the frame.
+ for (unsigned i = 0; i < input_->GetFrameSize(); i += kPointerSize) {
+ input_->SetFrameSlot(i, Memory::uint32_at(tos + i));
+ }
+}
+
+
+#define __ masm()->
// This code tries to be close to ia32 code so that any changes can be
// easily ported.
diff --git a/src/arm/full-codegen-arm.cc b/src/arm/full-codegen-arm.cc
index 6116513..c3440eb 100644
--- a/src/arm/full-codegen-arm.cc
+++ b/src/arm/full-codegen-arm.cc
@@ -92,17 +92,19 @@
}
void EmitPatchInfo() {
- int delta_to_patch_site = masm_->InstructionsGeneratedSince(&patch_site_);
- Register reg;
- reg.set_code(delta_to_patch_site / kOff12Mask);
- __ cmp_raw_immediate(reg, delta_to_patch_site % kOff12Mask);
+ if (patch_site_.is_bound()) {
+ int delta_to_patch_site = masm_->InstructionsGeneratedSince(&patch_site_);
+ Register reg;
+ reg.set_code(delta_to_patch_site / kOff12Mask);
+ __ cmp_raw_immediate(reg, delta_to_patch_site % kOff12Mask);
#ifdef DEBUG
- info_emitted_ = true;
+ info_emitted_ = true;
#endif
+ } else {
+ __ nop(); // Signals no inlined code.
+ }
}
- bool is_bound() const { return patch_site_.is_bound(); }
-
private:
MacroAssembler* masm_;
Label patch_site_;
@@ -129,6 +131,7 @@
void FullCodeGenerator::Generate(CompilationInfo* info) {
ASSERT(info_ == NULL);
info_ = info;
+ scope_ = info->scope();
SetFunctionPosition(function());
Comment cmnt(masm_, "[ function compiled by full code generator");
@@ -139,21 +142,21 @@
}
#endif
- // Strict mode functions need to replace the receiver with undefined
- // when called as functions (without an explicit receiver
- // object). r5 is zero for method calls and non-zero for function
- // calls.
- if (info->is_strict_mode()) {
+ // Strict mode functions and builtins need to replace the receiver
+ // with undefined when called as functions (without an explicit
+ // receiver object). r5 is zero for method calls and non-zero for
+ // function calls.
+ if (info->is_strict_mode() || info->is_native()) {
Label ok;
__ cmp(r5, Operand(0));
__ b(eq, &ok);
- int receiver_offset = scope()->num_parameters() * kPointerSize;
+ int receiver_offset = info->scope()->num_parameters() * kPointerSize;
__ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
__ str(r2, MemOperand(sp, receiver_offset));
__ bind(&ok);
}
- int locals_count = scope()->num_stack_slots();
+ int locals_count = info->scope()->num_stack_slots();
__ Push(lr, fp, cp, r1);
if (locals_count > 0) {
@@ -173,7 +176,7 @@
bool function_in_register = true;
// Possibly allocate a local context.
- int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+ int heap_slots = info->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
if (heap_slots > 0) {
Comment cmnt(masm_, "[ Allocate local context");
// Argument to NewContext is the function, which is in r1.
@@ -182,14 +185,14 @@
FastNewContextStub stub(heap_slots);
__ CallStub(&stub);
} else {
- __ CallRuntime(Runtime::kNewContext, 1);
+ __ CallRuntime(Runtime::kNewFunctionContext, 1);
}
function_in_register = false;
// Context is returned in both r0 and cp. It replaces the context
// passed to us. It's saved in the stack and kept live in cp.
__ str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
// Copy any necessary parameters into the context.
- int num_parameters = scope()->num_parameters();
+ int num_parameters = info->scope()->num_parameters();
for (int i = 0; i < num_parameters; i++) {
Slot* slot = scope()->parameter(i)->AsSlot();
if (slot != NULL && slot->type() == Slot::CONTEXT) {
@@ -220,27 +223,28 @@
__ mov(r3, r1);
}
// Receiver is just before the parameters on the caller's stack.
- int offset = scope()->num_parameters() * kPointerSize;
+ int num_parameters = info->scope()->num_parameters();
+ int offset = num_parameters * kPointerSize;
__ add(r2, fp,
Operand(StandardFrameConstants::kCallerSPOffset + offset));
- __ mov(r1, Operand(Smi::FromInt(scope()->num_parameters())));
+ __ mov(r1, Operand(Smi::FromInt(num_parameters)));
__ Push(r3, r2, r1);
// Arguments to ArgumentsAccessStub:
// function, receiver address, parameter count.
// The stub will rewrite receiever and parameter count if the previous
// stack frame was an arguments adapter frame.
- ArgumentsAccessStub stub(
- is_strict_mode() ? ArgumentsAccessStub::NEW_STRICT
- : ArgumentsAccessStub::NEW_NON_STRICT);
+ ArgumentsAccessStub::Type type;
+ if (is_strict_mode()) {
+ type = ArgumentsAccessStub::NEW_STRICT;
+ } else if (function()->has_duplicate_parameters()) {
+ type = ArgumentsAccessStub::NEW_NON_STRICT_SLOW;
+ } else {
+ type = ArgumentsAccessStub::NEW_NON_STRICT_FAST;
+ }
+ ArgumentsAccessStub stub(type);
__ CallStub(&stub);
- Variable* arguments_shadow = scope()->arguments_shadow();
- if (arguments_shadow != NULL) {
- // Duplicate the value; move-to-slot operation might clobber registers.
- __ mov(r3, r0);
- Move(arguments_shadow->AsSlot(), r3, r1, r2);
- }
Move(arguments->AsSlot(), r0, r1, r2);
}
@@ -345,7 +349,7 @@
{ Assembler::BlockConstPoolScope block_const_pool(masm_);
// Here we use masm_-> instead of the __ macro to avoid the code coverage
// tool from instrumenting as we rely on the code size here.
- int32_t sp_delta = (scope()->num_parameters() + 1) * kPointerSize;
+ int32_t sp_delta = (info_->scope()->num_parameters() + 1) * kPointerSize;
CodeGenerator::RecordPositions(masm_, function()->end_position() - 1);
__ RecordJSReturn();
masm_->mov(sp, fp);
@@ -383,7 +387,7 @@
// For simplicity we always test the accumulator register.
codegen()->Move(result_register(), slot);
codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
- codegen()->DoTest(true_label_, false_label_, fall_through_);
+ codegen()->DoTest(this);
}
@@ -417,7 +421,7 @@
if (true_label_ != fall_through_) __ b(true_label_);
} else {
__ LoadRoot(result_register(), index);
- codegen()->DoTest(true_label_, false_label_, fall_through_);
+ codegen()->DoTest(this);
}
}
@@ -464,7 +468,7 @@
} else {
// For simplicity we always test the accumulator register.
__ mov(result_register(), Operand(lit));
- codegen()->DoTest(true_label_, false_label_, fall_through_);
+ codegen()->DoTest(this);
}
}
@@ -500,7 +504,7 @@
__ Drop(count);
__ Move(result_register(), reg);
codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
- codegen()->DoTest(true_label_, false_label_, fall_through_);
+ codegen()->DoTest(this);
}
@@ -578,7 +582,8 @@
}
-void FullCodeGenerator::DoTest(Label* if_true,
+void FullCodeGenerator::DoTest(Expression* condition,
+ Label* if_true,
Label* if_false,
Label* fall_through) {
if (CpuFeatures::IsSupported(VFP3)) {
@@ -712,10 +717,12 @@
// context.
ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
if (FLAG_debug_code) {
- // Check that we're not inside a 'with'.
- __ ldr(r1, ContextOperand(cp, Context::FCONTEXT_INDEX));
- __ cmp(r1, cp);
- __ Check(eq, "Unexpected declaration in current context.");
+ // Check that we're not inside a with or catch context.
+ __ ldr(r1, FieldMemOperand(cp, HeapObject::kMapOffset));
+ __ CompareRoot(r1, Heap::kWithContextMapRootIndex);
+ __ Check(ne, "Declaration in with context.");
+ __ CompareRoot(r1, Heap::kCatchContextMapRootIndex);
+ __ Check(ne, "Declaration in catch context.");
}
if (mode == Variable::CONST) {
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
@@ -769,7 +776,7 @@
// IDs for bailouts from optimized code.
ASSERT(prop->obj()->AsVariableProxy() != NULL);
{ AccumulatorValueContext for_object(this);
- EmitVariableLoad(prop->obj()->AsVariableProxy()->var());
+ EmitVariableLoad(prop->obj()->AsVariableProxy());
}
__ push(r0);
@@ -783,7 +790,7 @@
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
: isolate()->builtins()->KeyedStoreIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, AstNode::kNoNumber);
+ __ Call(ic);
// Value in r0 is ignored (declarations are statements).
}
}
@@ -857,7 +864,8 @@
// Record position before stub call for type feedback.
SetSourcePosition(clause->position());
Handle<Code> ic = CompareIC::GetUninitialized(Token::EQ_STRICT);
- EmitCallIC(ic, &patch_site, clause->CompareId());
+ __ Call(ic, RelocInfo::CODE_TARGET, clause->CompareId());
+ patch_site.EmitPatchInfo();
__ cmp(r0, Operand(0));
__ b(ne, &next_test);
@@ -912,8 +920,8 @@
// Convert the object to a JS object.
Label convert, done_convert;
__ JumpIfSmi(r0, &convert);
- __ CompareObjectType(r0, r1, r1, FIRST_JS_OBJECT_TYPE);
- __ b(hs, &done_convert);
+ __ CompareObjectType(r0, r1, r1, FIRST_SPEC_OBJECT_TYPE);
+ __ b(ge, &done_convert);
__ bind(&convert);
__ push(r0);
__ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
@@ -1105,7 +1113,7 @@
void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
Comment cmnt(masm_, "[ VariableProxy");
- EmitVariableLoad(expr->var());
+ EmitVariableLoad(expr);
}
@@ -1127,8 +1135,7 @@
__ b(ne, slow);
}
// Load next context in chain.
- __ ldr(next, ContextOperand(current, Context::CLOSURE_INDEX));
- __ ldr(next, FieldMemOperand(next, JSFunction::kContextOffset));
+ __ ldr(next, ContextOperand(current, Context::PREVIOUS_INDEX));
// Walk the rest of the chain without clobbering cp.
current = next;
}
@@ -1154,8 +1161,7 @@
__ tst(temp, temp);
__ b(ne, slow);
// Load next context in chain.
- __ ldr(next, ContextOperand(next, Context::CLOSURE_INDEX));
- __ ldr(next, FieldMemOperand(next, JSFunction::kContextOffset));
+ __ ldr(next, ContextOperand(next, Context::PREVIOUS_INDEX));
__ b(&loop);
__ bind(&fast);
}
@@ -1166,7 +1172,7 @@
? RelocInfo::CODE_TARGET
: RelocInfo::CODE_TARGET_CONTEXT;
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- EmitCallIC(ic, mode, AstNode::kNoNumber);
+ __ Call(ic, mode);
}
@@ -1186,8 +1192,7 @@
__ tst(temp, temp);
__ b(ne, slow);
}
- __ ldr(next, ContextOperand(context, Context::CLOSURE_INDEX));
- __ ldr(next, FieldMemOperand(next, JSFunction::kContextOffset));
+ __ ldr(next, ContextOperand(context, Context::PREVIOUS_INDEX));
// Walk the rest of the chain without clobbering cp.
context = next;
}
@@ -1248,7 +1253,7 @@
__ mov(r0, Operand(key_literal->handle()));
Handle<Code> ic =
isolate()->builtins()->KeyedLoadIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, GetPropertyId(property));
+ __ Call(ic, RelocInfo::CODE_TARGET, GetPropertyId(property));
__ jmp(done);
}
}
@@ -1257,24 +1262,27 @@
}
-void FullCodeGenerator::EmitVariableLoad(Variable* var) {
- // Four cases: non-this global variables, lookup slots, all other
- // types of slots, and parameters that rewrite to explicit property
- // accesses on the arguments object.
- Slot* slot = var->AsSlot();
- Property* property = var->AsProperty();
+void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
+ // Record position before possible IC call.
+ SetSourcePosition(proxy->position());
+ Variable* var = proxy->var();
- if (var->is_global() && !var->is_this()) {
+ // Three cases: non-this global variables, lookup slots, and all other
+ // types of slots.
+ Slot* slot = var->AsSlot();
+ ASSERT((var->is_global() && !var->is_this()) == (slot == NULL));
+
+ if (slot == NULL) {
Comment cmnt(masm_, "Global variable");
// Use inline caching. Variable name is passed in r2 and the global
// object (receiver) in r0.
__ ldr(r0, GlobalObjectOperand());
__ mov(r2, Operand(var->name()));
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT, AstNode::kNoNumber);
+ __ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
context()->Plug(r0);
- } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
+ } else if (slot->type() == Slot::LOOKUP) {
Label done, slow;
// Generate code for loading from variables potentially shadowed
@@ -1290,7 +1298,7 @@
context()->Plug(r0);
- } else if (slot != NULL) {
+ } else {
Comment cmnt(masm_, (slot->type() == Slot::CONTEXT)
? "Context slot"
: "Stack slot");
@@ -1306,32 +1314,6 @@
} else {
context()->Plug(slot);
}
- } else {
- Comment cmnt(masm_, "Rewritten parameter");
- ASSERT_NOT_NULL(property);
- // Rewritten parameter accesses are of the form "slot[literal]".
-
- // Assert that the object is in a slot.
- Variable* object_var = property->obj()->AsVariableProxy()->AsVariable();
- ASSERT_NOT_NULL(object_var);
- Slot* object_slot = object_var->AsSlot();
- ASSERT_NOT_NULL(object_slot);
-
- // Load the object.
- Move(r1, object_slot);
-
- // Assert that the key is a smi.
- Literal* key_literal = property->key()->AsLiteral();
- ASSERT_NOT_NULL(key_literal);
- ASSERT(key_literal->handle()->IsSmi());
-
- // Load the key.
- __ mov(r0, Operand(key_literal->handle()));
-
- // Call keyed load IC. It has arguments key and receiver in r0 and r1.
- Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, GetPropertyId(property));
- context()->Plug(r0);
}
}
@@ -1441,7 +1423,7 @@
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->StoreIC_Initialize_Strict()
: isolate()->builtins()->StoreIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, key->id());
+ __ Call(ic, RelocInfo::CODE_TARGET, key->id());
PrepareForBailoutForId(key->id(), NO_REGISTERS);
} else {
VisitForEffect(value);
@@ -1572,7 +1554,7 @@
}
// Left-hand side can only be a property, a global or a (parameter or local)
- // slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
+ // slot.
enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
LhsKind assign_type = VARIABLE;
Property* property = expr->target()->AsProperty();
@@ -1598,27 +1580,13 @@
break;
case KEYED_PROPERTY:
if (expr->is_compound()) {
- if (property->is_arguments_access()) {
- VariableProxy* obj_proxy = property->obj()->AsVariableProxy();
- __ ldr(r0, EmitSlotSearch(obj_proxy->var()->AsSlot(), r0));
- __ push(r0);
- __ mov(r0, Operand(property->key()->AsLiteral()->handle()));
- } else {
- VisitForStackValue(property->obj());
- VisitForAccumulatorValue(property->key());
- }
+ VisitForStackValue(property->obj());
+ VisitForAccumulatorValue(property->key());
__ ldr(r1, MemOperand(sp, 0));
__ push(r0);
} else {
- if (property->is_arguments_access()) {
- VariableProxy* obj_proxy = property->obj()->AsVariableProxy();
- __ ldr(r1, EmitSlotSearch(obj_proxy->var()->AsSlot(), r0));
- __ mov(r0, Operand(property->key()->AsLiteral()->handle()));
- __ Push(r1, r0);
- } else {
- VisitForStackValue(property->obj());
- VisitForStackValue(property->key());
- }
+ VisitForStackValue(property->obj());
+ VisitForStackValue(property->key());
}
break;
}
@@ -1629,7 +1597,7 @@
{ AccumulatorValueContext context(this);
switch (assign_type) {
case VARIABLE:
- EmitVariableLoad(expr->target()->AsVariableProxy()->var());
+ EmitVariableLoad(expr->target()->AsVariableProxy());
PrepareForBailout(expr->target(), TOS_REG);
break;
case NAMED_PROPERTY:
@@ -1695,7 +1663,7 @@
__ mov(r2, Operand(key->handle()));
// Call load IC. It has arguments receiver and property name r0 and r2.
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
+ __ Call(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
}
@@ -1703,7 +1671,7 @@
SetSourcePosition(prop->position());
// Call keyed load IC. It has arguments key and receiver in r0 and r1.
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
+ __ Call(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
}
@@ -1730,7 +1698,8 @@
__ bind(&stub_call);
BinaryOpStub stub(op, mode);
- EmitCallIC(stub.GetCode(), &patch_site, expr->id());
+ __ Call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
+ patch_site.EmitPatchInfo();
__ jmp(&done);
__ bind(&smi_case);
@@ -1811,7 +1780,9 @@
OverwriteMode mode) {
__ pop(r1);
BinaryOpStub stub(op, mode);
- EmitCallIC(stub.GetCode(), NULL, expr->id());
+ JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
+ __ Call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
+ patch_site.EmitPatchInfo();
context()->Plug(r0);
}
@@ -1825,7 +1796,7 @@
}
// Left-hand side can only be a property, a global or a (parameter or local)
- // slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
+ // slot.
enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
LhsKind assign_type = VARIABLE;
Property* prop = expr->AsProperty();
@@ -1851,30 +1822,20 @@
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->StoreIC_Initialize_Strict()
: isolate()->builtins()->StoreIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, AstNode::kNoNumber);
+ __ Call(ic);
break;
}
case KEYED_PROPERTY: {
__ push(r0); // Preserve value.
- if (prop->is_synthetic()) {
- ASSERT(prop->obj()->AsVariableProxy() != NULL);
- ASSERT(prop->key()->AsLiteral() != NULL);
- { AccumulatorValueContext for_object(this);
- EmitVariableLoad(prop->obj()->AsVariableProxy()->var());
- }
- __ mov(r2, r0);
- __ mov(r1, Operand(prop->key()->AsLiteral()->handle()));
- } else {
- VisitForStackValue(prop->obj());
- VisitForAccumulatorValue(prop->key());
- __ mov(r1, r0);
- __ pop(r2);
- }
+ VisitForStackValue(prop->obj());
+ VisitForAccumulatorValue(prop->key());
+ __ mov(r1, r0);
+ __ pop(r2);
__ pop(r0); // Restore value.
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
: isolate()->builtins()->KeyedStoreIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, AstNode::kNoNumber);
+ __ Call(ic);
break;
}
}
@@ -1885,8 +1846,6 @@
void FullCodeGenerator::EmitVariableAssignment(Variable* var,
Token::Value op) {
- // Left-hand sides that rewrite to explicit property accesses do not reach
- // here.
ASSERT(var != NULL);
ASSERT(var->is_global() || var->AsSlot() != NULL);
@@ -1900,7 +1859,7 @@
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->StoreIC_Initialize_Strict()
: isolate()->builtins()->StoreIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT, AstNode::kNoNumber);
+ __ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
} else if (op == Token::INIT_CONST) {
// Like var declarations, const declarations are hoisted to function
@@ -1922,18 +1881,7 @@
__ b(ne, &skip);
__ str(result_register(), MemOperand(fp, SlotOffset(slot)));
break;
- case Slot::CONTEXT: {
- __ ldr(r1, ContextOperand(cp, Context::FCONTEXT_INDEX));
- __ ldr(r2, ContextOperand(r1, slot->index()));
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ cmp(r2, ip);
- __ b(ne, &skip);
- __ str(r0, ContextOperand(r1, slot->index()));
- int offset = Context::SlotOffset(slot->index());
- __ mov(r3, r0); // Preserve the stored value in r0.
- __ RecordWrite(r1, Operand(offset), r3, r2);
- break;
- }
+ case Slot::CONTEXT:
case Slot::LOOKUP:
__ push(r0);
__ mov(r0, Operand(slot->var()->name()));
@@ -2009,7 +1957,7 @@
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->StoreIC_Initialize_Strict()
: isolate()->builtins()->StoreIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+ __ Call(ic, RelocInfo::CODE_TARGET, expr->id());
// If the assignment ends an initialization block, revert to fast case.
if (expr->ends_initialization_block()) {
@@ -2055,7 +2003,7 @@
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
: isolate()->builtins()->KeyedStoreIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+ __ Call(ic, RelocInfo::CODE_TARGET, expr->id());
// If the assignment ends an initialization block, revert to fast case.
if (expr->ends_initialization_block()) {
@@ -2107,7 +2055,7 @@
InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
Handle<Code> ic =
isolate()->stub_cache()->ComputeCallInitialize(arg_count, in_loop, mode);
- EmitCallIC(ic, mode, expr->id());
+ __ Call(ic, mode, expr->id());
RecordJSReturnSite(expr);
// Restore context register.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -2141,7 +2089,7 @@
Handle<Code> ic =
isolate()->stub_cache()->ComputeKeyedCallInitialize(arg_count, in_loop);
__ ldr(r2, MemOperand(sp, (arg_count + 1) * kPointerSize)); // Key.
- EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+ __ Call(ic, RelocInfo::CODE_TARGET, expr->id());
RecordJSReturnSite(expr);
// Restore context register.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -2181,7 +2129,8 @@
__ push(r1);
// Push the receiver of the enclosing function and do runtime call.
- __ ldr(r1, MemOperand(fp, (2 + scope()->num_parameters()) * kPointerSize));
+ int receiver_offset = 2 + info_->scope()->num_parameters();
+ __ ldr(r1, MemOperand(fp, receiver_offset * kPointerSize));
__ push(r1);
// Push the strict mode flag.
__ mov(r1, Operand(Smi::FromInt(strict_mode_flag())));
@@ -2300,9 +2249,9 @@
__ bind(&done);
// Push function.
__ push(r0);
- // Push global receiver.
- __ ldr(r1, GlobalObjectOperand());
- __ ldr(r1, FieldMemOperand(r1, GlobalObject::kGlobalReceiverOffset));
+ // The receiver is implicitly the global receiver. Indicate this
+ // by passing the hole to the call function stub.
+ __ LoadRoot(r1, Heap::kTheHoleValueRootIndex);
__ push(r1);
__ bind(&call);
}
@@ -2324,7 +2273,7 @@
} else {
// Call to a keyed property.
// For a synthetic property use keyed load IC followed by function call,
- // for a regular property use keyed EmitCallIC.
+ // for a regular property use EmitKeyedCallWithIC.
if (prop->is_synthetic()) {
// Do not visit the object and key subexpressions (they are shared
// by all occurrences of the same rewritten parameter).
@@ -2342,7 +2291,7 @@
SetSourcePosition(prop->position());
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
+ __ Call(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
__ ldr(r1, GlobalObjectOperand());
__ ldr(r1, FieldMemOperand(r1, GlobalObject::kGlobalReceiverOffset));
__ Push(r0, r1); // Function, receiver.
@@ -2468,9 +2417,9 @@
__ tst(r1, Operand(1 << Map::kIsUndetectable));
__ b(ne, if_false);
__ ldrb(r1, FieldMemOperand(r2, Map::kInstanceTypeOffset));
- __ cmp(r1, Operand(FIRST_JS_OBJECT_TYPE));
+ __ cmp(r1, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
__ b(lt, if_false);
- __ cmp(r1, Operand(LAST_JS_OBJECT_TYPE));
+ __ cmp(r1, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
Split(le, if_true, if_false, fall_through);
@@ -2491,7 +2440,7 @@
&if_true, &if_false, &fall_through);
__ JumpIfSmi(r0, if_false);
- __ CompareObjectType(r0, r1, r1, FIRST_JS_OBJECT_TYPE);
+ __ CompareObjectType(r0, r1, r1, FIRST_SPEC_OBJECT_TYPE);
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
Split(ge, if_true, if_false, fall_through);
@@ -2588,8 +2537,7 @@
// If a valueOf property is not found on the object check that it's
// prototype is the un-modified String prototype. If not result is false.
__ ldr(r2, FieldMemOperand(r1, Map::kPrototypeOffset));
- __ tst(r2, Operand(kSmiTagMask));
- __ b(eq, if_false);
+ __ JumpIfSmi(r2, if_false);
__ ldr(r2, FieldMemOperand(r2, HeapObject::kMapOffset));
__ ldr(r3, ContextOperand(cp, Context::GLOBAL_INDEX));
__ ldr(r3, FieldMemOperand(r3, GlobalObject::kGlobalContextOffset));
@@ -2734,7 +2682,7 @@
// parameter count in r0.
VisitForAccumulatorValue(args->at(0));
__ mov(r1, r0);
- __ mov(r0, Operand(Smi::FromInt(scope()->num_parameters())));
+ __ mov(r0, Operand(Smi::FromInt(info_->scope()->num_parameters())));
ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
__ CallStub(&stub);
context()->Plug(r0);
@@ -2746,7 +2694,7 @@
Label exit;
// Get the number of formal parameters.
- __ mov(r0, Operand(Smi::FromInt(scope()->num_parameters())));
+ __ mov(r0, Operand(Smi::FromInt(info_->scope()->num_parameters())));
// Check if the calling frame is an arguments adaptor frame.
__ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
@@ -2774,16 +2722,18 @@
// Check that the object is a JS object but take special care of JS
// functions to make sure they have 'Function' as their class.
- __ CompareObjectType(r0, r0, r1, FIRST_JS_OBJECT_TYPE); // Map is now in r0.
+ __ CompareObjectType(r0, r0, r1, FIRST_SPEC_OBJECT_TYPE);
+ // Map is now in r0.
__ b(lt, &null);
- // As long as JS_FUNCTION_TYPE is the last instance type and it is
- // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
- // LAST_JS_OBJECT_TYPE.
- ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
- ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
- __ cmp(r1, Operand(JS_FUNCTION_TYPE));
- __ b(eq, &function);
+ // As long as LAST_CALLABLE_SPEC_OBJECT_TYPE is the last instance type, and
+ // FIRST_CALLABLE_SPEC_OBJECT_TYPE comes right after
+ // LAST_NONCALLABLE_SPEC_OBJECT_TYPE, we can avoid checking for the latter.
+ STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
+ STATIC_ASSERT(FIRST_CALLABLE_SPEC_OBJECT_TYPE ==
+ LAST_NONCALLABLE_SPEC_OBJECT_TYPE + 1);
+ __ cmp(r1, Operand(FIRST_CALLABLE_SPEC_OBJECT_TYPE));
+ __ b(ge, &function);
// Check if the constructor in the map is a function.
__ ldr(r0, FieldMemOperand(r0, Map::kConstructorOffset));
@@ -2826,13 +2776,12 @@
// with '%2s' (see Logger::LogRuntime for all the formats).
// 2 (array): Arguments to the format string.
ASSERT_EQ(args->length(), 3);
-#ifdef ENABLE_LOGGING_AND_PROFILING
if (CodeGenerator::ShouldGenerateLog(args->at(0))) {
VisitForStackValue(args->at(1));
VisitForStackValue(args->at(2));
__ CallRuntime(Runtime::kLog, 2);
}
-#endif
+
// Finally, we're expected to leave a value on the top of the stack.
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
context()->Plug(r0);
@@ -3343,8 +3292,7 @@
__ b(eq, &ok);
// Fail if either is a non-HeapObject.
__ and_(tmp, left, Operand(right));
- __ tst(tmp, Operand(kSmiTagMask));
- __ b(eq, &fail);
+ __ JumpIfSmi(tmp, &fail);
__ ldr(tmp, FieldMemOperand(left, HeapObject::kMapOffset));
__ ldrb(tmp2, FieldMemOperand(tmp, Map::kInstanceTypeOffset));
__ cmp(tmp2, Operand(JS_REGEXP_TYPE));
@@ -3434,9 +3382,7 @@
__ b(ne, &bailout);
// Check that the array has fast elements.
- __ ldrb(scratch2, FieldMemOperand(scratch1, Map::kBitField2Offset));
- __ tst(scratch2, Operand(1 << Map::kHasFastElements));
- __ b(eq, &bailout);
+ __ CheckFastElements(scratch1, scratch2, &bailout);
// If the array has length zero, return the empty string.
__ ldr(array_length, FieldMemOperand(array, JSArray::kLengthOffset));
@@ -3634,6 +3580,39 @@
}
+void FullCodeGenerator::EmitIsNativeOrStrictMode(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+
+ // Load the function into r0.
+ VisitForAccumulatorValue(args->at(0));
+
+ // Prepare for the test.
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ // Test for strict mode function.
+ __ ldr(r1, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset));
+ __ ldr(r1, FieldMemOperand(r1, SharedFunctionInfo::kCompilerHintsOffset));
+ __ tst(r1, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
+ kSmiTagSize)));
+ __ b(ne, if_true);
+
+ // Test for native function.
+ __ tst(r1, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
+ __ b(ne, if_true);
+
+ // Not native or strict-mode function.
+ __ b(if_false);
+
+ PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ context()->Plug(if_true, if_false);
+}
+
+
void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
Handle<String> name = expr->name();
if (name->length() > 0 && name->Get(0) == '_') {
@@ -3666,7 +3645,7 @@
isolate()->stub_cache()->ComputeCallInitialize(arg_count,
NOT_IN_LOOP,
mode);
- EmitCallIC(ic, mode, expr->id());
+ __ Call(ic, mode, expr->id());
// Restore context register.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
} else {
@@ -3774,8 +3753,7 @@
Comment cmt(masm_, "[ UnaryOperation (ADD)");
VisitForAccumulatorValue(expr->expression());
Label no_conversion;
- __ tst(result_register(), Operand(kSmiTagMask));
- __ b(eq, &no_conversion);
+ __ JumpIfSmi(result_register(), &no_conversion);
ToNumberStub convert_stub;
__ CallStub(&convert_stub);
__ bind(&no_conversion);
@@ -3809,7 +3787,7 @@
// accumulator register r0.
VisitForAccumulatorValue(expr->expression());
SetSourcePosition(expr->position());
- EmitCallIC(stub.GetCode(), NULL, expr->id());
+ __ Call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
context()->Plug(r0);
}
@@ -3826,7 +3804,7 @@
}
// Expression can only be a property, a global or a (parameter or local)
- // slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
+ // slot.
enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
LhsKind assign_type = VARIABLE;
Property* prop = expr->expression()->AsProperty();
@@ -3841,7 +3819,7 @@
if (assign_type == VARIABLE) {
ASSERT(expr->expression()->AsVariableProxy()->var() != NULL);
AccumulatorValueContext context(this);
- EmitVariableLoad(expr->expression()->AsVariableProxy()->var());
+ EmitVariableLoad(expr->expression()->AsVariableProxy());
} else {
// Reserve space for result of postfix operation.
if (expr->is_postfix() && !context()->IsEffect()) {
@@ -3854,15 +3832,8 @@
__ push(r0);
EmitNamedPropertyLoad(prop);
} else {
- if (prop->is_arguments_access()) {
- VariableProxy* obj_proxy = prop->obj()->AsVariableProxy();
- __ ldr(r0, EmitSlotSearch(obj_proxy->var()->AsSlot(), r0));
- __ push(r0);
- __ mov(r0, Operand(prop->key()->AsLiteral()->handle()));
- } else {
- VisitForStackValue(prop->obj());
- VisitForAccumulatorValue(prop->key());
- }
+ VisitForStackValue(prop->obj());
+ VisitForAccumulatorValue(prop->key());
__ ldr(r1, MemOperand(sp, 0));
__ push(r0);
EmitKeyedPropertyLoad(prop);
@@ -3927,7 +3898,8 @@
SetSourcePosition(expr->position());
BinaryOpStub stub(Token::ADD, NO_OVERWRITE);
- EmitCallIC(stub.GetCode(), &patch_site, expr->CountId());
+ __ Call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->CountId());
+ patch_site.EmitPatchInfo();
__ bind(&done);
// Store the value returned in r0.
@@ -3958,7 +3930,7 @@
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->StoreIC_Initialize_Strict()
: isolate()->builtins()->StoreIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+ __ Call(ic, RelocInfo::CODE_TARGET, expr->id());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -3975,7 +3947,7 @@
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
: isolate()->builtins()->KeyedStoreIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+ __ Call(ic, RelocInfo::CODE_TARGET, expr->id());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -4001,7 +3973,7 @@
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
// Use a regular load, not a contextual load, to avoid a reference
// error.
- EmitCallIC(ic, RelocInfo::CODE_TARGET, AstNode::kNoNumber);
+ __ Call(ic);
PrepareForBailout(expr, TOS_REG);
context()->Plug(r0);
} else if (proxy != NULL &&
@@ -4024,30 +3996,18 @@
context()->Plug(r0);
} else {
// This expression cannot throw a reference error at the top level.
- context()->HandleExpression(expr);
+ VisitInCurrentContext(expr);
}
}
-bool FullCodeGenerator::TryLiteralCompare(Token::Value op,
- Expression* left,
- Expression* right,
- Label* if_true,
- Label* if_false,
- Label* fall_through) {
- if (op != Token::EQ && op != Token::EQ_STRICT) return false;
-
- // Check for the pattern: typeof <expression> == <string literal>.
- Literal* right_literal = right->AsLiteral();
- if (right_literal == NULL) return false;
- Handle<Object> right_literal_value = right_literal->handle();
- if (!right_literal_value->IsString()) return false;
- UnaryOperation* left_unary = left->AsUnaryOperation();
- if (left_unary == NULL || left_unary->op() != Token::TYPEOF) return false;
- Handle<String> check = Handle<String>::cast(right_literal_value);
-
+void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
+ Handle<String> check,
+ Label* if_true,
+ Label* if_false,
+ Label* fall_through) {
{ AccumulatorValueContext context(this);
- VisitForTypeofValue(left_unary->expression());
+ VisitForTypeofValue(expr);
}
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
@@ -4082,7 +4042,7 @@
} else if (check->Equals(isolate()->heap()->function_symbol())) {
__ JumpIfSmi(r0, if_false);
- __ CompareObjectType(r0, r1, r0, FIRST_FUNCTION_CLASS_TYPE);
+ __ CompareObjectType(r0, r1, r0, FIRST_CALLABLE_SPEC_OBJECT_TYPE);
Split(ge, if_true, if_false, fall_through);
} else if (check->Equals(isolate()->heap()->object_symbol())) {
@@ -4090,10 +4050,10 @@
__ CompareRoot(r0, Heap::kNullValueRootIndex);
__ b(eq, if_true);
// Check for JS objects => true.
- __ CompareObjectType(r0, r0, r1, FIRST_JS_OBJECT_TYPE);
- __ b(lo, if_false);
- __ CompareInstanceType(r0, r1, FIRST_FUNCTION_CLASS_TYPE);
- __ b(hs, if_false);
+ __ CompareObjectType(r0, r0, r1, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
+ __ b(lt, if_false);
+ __ CompareInstanceType(r0, r1, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
+ __ b(gt, if_false);
// Check for undetectable objects => false.
__ ldrb(r1, FieldMemOperand(r0, Map::kBitFieldOffset));
__ tst(r1, Operand(1 << Map::kIsUndetectable));
@@ -4101,8 +4061,18 @@
} else {
if (if_false != fall_through) __ jmp(if_false);
}
+}
- return true;
+
+void FullCodeGenerator::EmitLiteralCompareUndefined(Expression* expr,
+ Label* if_true,
+ Label* if_false,
+ Label* fall_through) {
+ VisitForAccumulatorValue(expr);
+ PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+
+ __ CompareRoot(r0, Heap::kUndefinedValueRootIndex);
+ Split(eq, if_true, if_false, fall_through);
}
@@ -4122,14 +4092,12 @@
// First we try a fast inlined version of the compare when one of
// the operands is a literal.
- Token::Value op = expr->op();
- Expression* left = expr->left();
- Expression* right = expr->right();
- if (TryLiteralCompare(op, left, right, if_true, if_false, fall_through)) {
+ if (TryLiteralCompare(expr, if_true, if_false, fall_through)) {
context()->Plug(if_true, if_false);
return;
}
+ Token::Value op = expr->op();
VisitForStackValue(expr->left());
switch (op) {
case Token::IN:
@@ -4204,7 +4172,8 @@
// Record position and call the compare IC.
SetSourcePosition(expr->position());
Handle<Code> ic = CompareIC::GetUninitialized(op);
- EmitCallIC(ic, &patch_site, expr->id());
+ __ Call(ic, RelocInfo::CODE_TARGET, expr->id());
+ patch_site.EmitPatchInfo();
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
__ cmp(r0, Operand(0));
Split(cond, if_true, if_false, fall_through);
@@ -4237,8 +4206,7 @@
__ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
__ cmp(r0, r1);
__ b(eq, if_true);
- __ tst(r0, Operand(kSmiTagMask));
- __ b(eq, if_false);
+ __ JumpIfSmi(r0, if_false);
// It can be an undetectable object.
__ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
__ ldrb(r1, FieldMemOperand(r1, Map::kBitFieldOffset));
@@ -4266,70 +4234,6 @@
}
-void FullCodeGenerator::EmitCallIC(Handle<Code> ic,
- RelocInfo::Mode mode,
- unsigned ast_id) {
- ASSERT(mode == RelocInfo::CODE_TARGET ||
- mode == RelocInfo::CODE_TARGET_CONTEXT);
- Counters* counters = isolate()->counters();
- switch (ic->kind()) {
- case Code::LOAD_IC:
- __ IncrementCounter(counters->named_load_full(), 1, r1, r2);
- break;
- case Code::KEYED_LOAD_IC:
- __ IncrementCounter(counters->keyed_load_full(), 1, r1, r2);
- break;
- case Code::STORE_IC:
- __ IncrementCounter(counters->named_store_full(), 1, r1, r2);
- break;
- case Code::KEYED_STORE_IC:
- __ IncrementCounter(counters->keyed_store_full(), 1, r1, r2);
- default:
- break;
- }
- if (ast_id == kNoASTId || mode == RelocInfo::CODE_TARGET_CONTEXT) {
- __ Call(ic, mode);
- } else {
- ASSERT(mode == RelocInfo::CODE_TARGET);
- mode = RelocInfo::CODE_TARGET_WITH_ID;
- __ CallWithAstId(ic, mode, ast_id);
- }
-}
-
-
-void FullCodeGenerator::EmitCallIC(Handle<Code> ic,
- JumpPatchSite* patch_site,
- unsigned ast_id) {
- Counters* counters = isolate()->counters();
- switch (ic->kind()) {
- case Code::LOAD_IC:
- __ IncrementCounter(counters->named_load_full(), 1, r1, r2);
- break;
- case Code::KEYED_LOAD_IC:
- __ IncrementCounter(counters->keyed_load_full(), 1, r1, r2);
- break;
- case Code::STORE_IC:
- __ IncrementCounter(counters->named_store_full(), 1, r1, r2);
- break;
- case Code::KEYED_STORE_IC:
- __ IncrementCounter(counters->keyed_store_full(), 1, r1, r2);
- default:
- break;
- }
-
- if (ast_id == kNoASTId) {
- __ Call(ic, RelocInfo::CODE_TARGET);
- } else {
- __ CallWithAstId(ic, RelocInfo::CODE_TARGET_WITH_ID, ast_id);
- }
- if (patch_site != NULL && patch_site->is_bound()) {
- patch_site->EmitPatchInfo();
- } else {
- __ nop(); // Signals no inlined code.
- }
-}
-
-
void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
ASSERT_EQ(POINTER_SIZE_ALIGN(frame_offset), frame_offset);
__ str(value, MemOperand(fp, frame_offset));
@@ -4341,6 +4245,27 @@
}
+void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
+ Scope* declaration_scope = scope()->DeclarationScope();
+ if (declaration_scope->is_global_scope()) {
+ // Contexts nested in the global context have a canonical empty function
+ // as their closure, not the anonymous closure containing the global
+ // code. Pass a smi sentinel and let the runtime look up the empty
+ // function.
+ __ mov(ip, Operand(Smi::FromInt(0)));
+ } else if (declaration_scope->is_eval_scope()) {
+ // Contexts created by a call to eval have the same closure as the
+ // context calling eval, not the anonymous closure containing the eval
+ // code. Fetch it from the context.
+ __ ldr(ip, ContextOperand(cp, Context::CLOSURE_INDEX));
+ } else {
+ ASSERT(declaration_scope->is_function_scope());
+ __ ldr(ip, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ }
+ __ push(ip);
+}
+
+
// ----------------------------------------------------------------------------
// Non-local control flow support.
diff --git a/src/arm/ic-arm.cc b/src/arm/ic-arm.cc
index 2123163..6038153 100644
--- a/src/arm/ic-arm.cc
+++ b/src/arm/ic-arm.cc
@@ -79,15 +79,14 @@
// elements map.
// Check that the receiver isn't a smi.
- __ tst(receiver, Operand(kSmiTagMask));
- __ b(eq, miss);
+ __ JumpIfSmi(receiver, miss);
// Check that the receiver is a valid JS object.
- __ CompareObjectType(receiver, t0, t1, FIRST_JS_OBJECT_TYPE);
+ __ CompareObjectType(receiver, t0, t1, FIRST_SPEC_OBJECT_TYPE);
__ b(lt, miss);
// If this assert fails, we have to check upper bound too.
- ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+ STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
GenerateGlobalInstanceTypeCheck(masm, t1, miss);
@@ -213,101 +212,6 @@
}
-static void GenerateNumberDictionaryLoad(MacroAssembler* masm,
- Label* miss,
- Register elements,
- Register key,
- Register result,
- Register t0,
- Register t1,
- Register t2) {
- // Register use:
- //
- // elements - holds the slow-case elements of the receiver on entry.
- // Unchanged unless 'result' is the same register.
- //
- // key - holds the smi key on entry.
- // Unchanged unless 'result' is the same register.
- //
- // result - holds the result on exit if the load succeeded.
- // Allowed to be the same as 'key' or 'result'.
- // Unchanged on bailout so 'key' or 'result' can be used
- // in further computation.
- //
- // Scratch registers:
- //
- // t0 - holds the untagged key on entry and holds the hash once computed.
- //
- // t1 - used to hold the capacity mask of the dictionary
- //
- // t2 - used for the index into the dictionary.
- Label done;
-
- // Compute the hash code from the untagged key. This must be kept in sync
- // with ComputeIntegerHash in utils.h.
- //
- // hash = ~hash + (hash << 15);
- __ mvn(t1, Operand(t0));
- __ add(t0, t1, Operand(t0, LSL, 15));
- // hash = hash ^ (hash >> 12);
- __ eor(t0, t0, Operand(t0, LSR, 12));
- // hash = hash + (hash << 2);
- __ add(t0, t0, Operand(t0, LSL, 2));
- // hash = hash ^ (hash >> 4);
- __ eor(t0, t0, Operand(t0, LSR, 4));
- // hash = hash * 2057;
- __ mov(t1, Operand(2057));
- __ mul(t0, t0, t1);
- // hash = hash ^ (hash >> 16);
- __ eor(t0, t0, Operand(t0, LSR, 16));
-
- // Compute the capacity mask.
- __ ldr(t1, FieldMemOperand(elements, NumberDictionary::kCapacityOffset));
- __ mov(t1, Operand(t1, ASR, kSmiTagSize)); // convert smi to int
- __ sub(t1, t1, Operand(1));
-
- // Generate an unrolled loop that performs a few probes before giving up.
- static const int kProbes = 4;
- for (int i = 0; i < kProbes; i++) {
- // Use t2 for index calculations and keep the hash intact in t0.
- __ mov(t2, t0);
- // Compute the masked index: (hash + i + i * i) & mask.
- if (i > 0) {
- __ add(t2, t2, Operand(NumberDictionary::GetProbeOffset(i)));
- }
- __ and_(t2, t2, Operand(t1));
-
- // Scale the index by multiplying by the element size.
- ASSERT(NumberDictionary::kEntrySize == 3);
- __ add(t2, t2, Operand(t2, LSL, 1)); // t2 = t2 * 3
-
- // Check if the key is identical to the name.
- __ add(t2, elements, Operand(t2, LSL, kPointerSizeLog2));
- __ ldr(ip, FieldMemOperand(t2, NumberDictionary::kElementsStartOffset));
- __ cmp(key, Operand(ip));
- if (i != kProbes - 1) {
- __ b(eq, &done);
- } else {
- __ b(ne, miss);
- }
- }
-
- __ bind(&done);
- // Check that the value is a normal property.
- // t2: elements + (index * kPointerSize)
- const int kDetailsOffset =
- NumberDictionary::kElementsStartOffset + 2 * kPointerSize;
- __ ldr(t1, FieldMemOperand(t2, kDetailsOffset));
- __ tst(t1, Operand(Smi::FromInt(PropertyDetails::TypeField::mask())));
- __ b(ne, miss);
-
- // Get the value at the masked, scaled index and return.
- const int kValueOffset =
- NumberDictionary::kElementsStartOffset + kPointerSize;
- __ ldr(result, FieldMemOperand(t2, kValueOffset));
-}
-
-
void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r2 : name
@@ -503,8 +407,7 @@
// to probe.
//
// Check for number.
- __ tst(r1, Operand(kSmiTagMask));
- __ b(eq, &number);
+ __ JumpIfSmi(r1, &number);
__ CompareObjectType(r1, r3, r3, HEAP_NUMBER_TYPE);
__ b(ne, &non_number);
__ bind(&number);
@@ -548,8 +451,7 @@
// r1: function
// Check that the value isn't a smi.
- __ tst(r1, Operand(kSmiTagMask));
- __ b(eq, miss);
+ __ JumpIfSmi(r1, miss);
// Check that the value is a JSFunction.
__ CompareObjectType(r1, scratch, scratch, JS_FUNCTION_TYPE);
@@ -624,8 +526,7 @@
if (id == IC::kCallIC_Miss) {
Label invoke, global;
__ ldr(r2, MemOperand(sp, argc * kPointerSize)); // receiver
- __ tst(r2, Operand(kSmiTagMask));
- __ b(eq, &invoke);
+ __ JumpIfSmi(r2, &invoke);
__ CompareObjectType(r2, r3, r3, JS_GLOBAL_OBJECT_TYPE);
__ b(eq, &global);
__ cmp(r3, Operand(JS_BUILTINS_OBJECT_TYPE));
@@ -742,7 +643,7 @@
__ b(ne, &slow_load);
__ mov(r0, Operand(r2, ASR, kSmiTagSize));
// r0: untagged index
- GenerateNumberDictionaryLoad(masm, &slow_load, r4, r2, r1, r0, r3, r5);
+ __ LoadFromNumberDictionary(&slow_load, r4, r2, r1, r0, r3, r5);
__ IncrementCounter(counters->keyed_call_generic_smi_dict(), 1, r0, r3);
__ jmp(&do_call);
@@ -812,8 +713,7 @@
// Check if the name is a string.
Label miss;
- __ tst(r2, Operand(kSmiTagMask));
- __ b(eq, &miss);
+ __ JumpIfSmi(r2, &miss);
__ IsObjectJSStringType(r2, r0, &miss);
GenerateCallNormal(masm, argc);
@@ -887,6 +787,174 @@
}
+static MemOperand GenerateMappedArgumentsLookup(MacroAssembler* masm,
+ Register object,
+ Register key,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* unmapped_case,
+ Label* slow_case) {
+ Heap* heap = masm->isolate()->heap();
+
+ // Check that the receiver is a JSObject. Because of the map check
+ // later, we do not need to check for interceptors or whether it
+ // requires access checks.
+ __ JumpIfSmi(object, slow_case);
+ // Check that the object is some kind of JSObject.
+ __ CompareObjectType(object, scratch1, scratch2, FIRST_JS_RECEIVER_TYPE);
+ __ b(lt, slow_case);
+
+ // Check that the key is a positive smi.
+ __ tst(key, Operand(0x8000001));
+ __ b(ne, slow_case);
+
+ // Load the elements into scratch1 and check its map.
+ Handle<Map> arguments_map(heap->non_strict_arguments_elements_map());
+ __ ldr(scratch1, FieldMemOperand(object, JSObject::kElementsOffset));
+ __ CheckMap(scratch1, scratch2, arguments_map, slow_case, DONT_DO_SMI_CHECK);
+
+ // Check if element is in the range of mapped arguments. If not, jump
+ // to the unmapped lookup with the parameter map in scratch1.
+ __ ldr(scratch2, FieldMemOperand(scratch1, FixedArray::kLengthOffset));
+ __ sub(scratch2, scratch2, Operand(Smi::FromInt(2)));
+ __ cmp(key, Operand(scratch2));
+ __ b(cs, unmapped_case);
+
+ // Load element index and check whether it is the hole.
+ const int kOffset =
+ FixedArray::kHeaderSize + 2 * kPointerSize - kHeapObjectTag;
+
+ __ mov(scratch3, Operand(kPointerSize >> 1));
+ __ mul(scratch3, key, scratch3);
+ __ add(scratch3, scratch3, Operand(kOffset));
+
+ __ ldr(scratch2, MemOperand(scratch1, scratch3));
+ __ LoadRoot(scratch3, Heap::kTheHoleValueRootIndex);
+ __ cmp(scratch2, scratch3);
+ __ b(eq, unmapped_case);
+
+ // Load value from context and return it. We can reuse scratch1 because
+ // we do not jump to the unmapped lookup (which requires the parameter
+ // map in scratch1).
+ __ ldr(scratch1, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
+ __ mov(scratch3, Operand(kPointerSize >> 1));
+ __ mul(scratch3, scratch2, scratch3);
+ __ add(scratch3, scratch3, Operand(Context::kHeaderSize - kHeapObjectTag));
+ return MemOperand(scratch1, scratch3);
+}
+
+
+static MemOperand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
+ Register key,
+ Register parameter_map,
+ Register scratch,
+ Label* slow_case) {
+ // Element is in arguments backing store, which is referenced by the
+ // second element of the parameter_map. The parameter_map register
+ // must be loaded with the parameter map of the arguments object and is
+ // overwritten.
+ const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize;
+ Register backing_store = parameter_map;
+ __ ldr(backing_store, FieldMemOperand(parameter_map, kBackingStoreOffset));
+ Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map());
+ __ CheckMap(backing_store, scratch, fixed_array_map, slow_case,
+ DONT_DO_SMI_CHECK);
+ __ ldr(scratch, FieldMemOperand(backing_store, FixedArray::kLengthOffset));
+ __ cmp(key, Operand(scratch));
+ __ b(cs, slow_case);
+ __ mov(scratch, Operand(kPointerSize >> 1));
+ __ mul(scratch, key, scratch);
+ __ add(scratch,
+ scratch,
+ Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ return MemOperand(backing_store, scratch);
+}
+
+
+void KeyedLoadIC::GenerateNonStrictArguments(MacroAssembler* masm) {
+ // ---------- S t a t e --------------
+ // -- lr : return address
+ // -- r0 : key
+ // -- r1 : receiver
+ // -----------------------------------
+ Label slow, notin;
+ MemOperand mapped_location =
+ GenerateMappedArgumentsLookup(masm, r1, r0, r2, r3, r4, ¬in, &slow);
+ __ ldr(r0, mapped_location);
+ __ Ret();
+ __ bind(¬in);
+ // The unmapped lookup expects that the parameter map is in r2.
+ MemOperand unmapped_location =
+ GenerateUnmappedArgumentsLookup(masm, r0, r2, r3, &slow);
+ __ ldr(r2, unmapped_location);
+ __ LoadRoot(r3, Heap::kTheHoleValueRootIndex);
+ __ cmp(r2, r3);
+ __ b(eq, &slow);
+ __ mov(r0, r2);
+ __ Ret();
+ __ bind(&slow);
+ GenerateMiss(masm, false);
+}
+
+
+void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
+ // ---------- S t a t e --------------
+ // -- r0 : value
+ // -- r1 : key
+ // -- r2 : receiver
+ // -- lr : return address
+ // -----------------------------------
+ Label slow, notin;
+ MemOperand mapped_location =
+ GenerateMappedArgumentsLookup(masm, r2, r1, r3, r4, r5, ¬in, &slow);
+ __ str(r0, mapped_location);
+ __ add(r6, r3, r5);
+ __ RecordWrite(r3, r6, r9);
+ __ Ret();
+ __ bind(¬in);
+ // The unmapped lookup expects that the parameter map is in r3.
+ MemOperand unmapped_location =
+ GenerateUnmappedArgumentsLookup(masm, r1, r3, r4, &slow);
+ __ str(r0, unmapped_location);
+ __ add(r6, r3, r4);
+ __ RecordWrite(r3, r6, r9);
+ __ Ret();
+ __ bind(&slow);
+ GenerateMiss(masm, false);
+}
+
+
+void KeyedCallIC::GenerateNonStrictArguments(MacroAssembler* masm,
+ int argc) {
+ // ----------- S t a t e -------------
+ // -- r2 : name
+ // -- lr : return address
+ // -----------------------------------
+ Label slow, notin;
+ // Load receiver.
+ __ ldr(r1, MemOperand(sp, argc * kPointerSize));
+ MemOperand mapped_location =
+ GenerateMappedArgumentsLookup(masm, r1, r2, r3, r4, r5, ¬in, &slow);
+ __ ldr(r1, mapped_location);
+ GenerateFunctionTailCall(masm, argc, &slow, r3);
+ __ bind(¬in);
+ // The unmapped lookup expects that the parameter map is in r3.
+ MemOperand unmapped_location =
+ GenerateUnmappedArgumentsLookup(masm, r2, r3, r4, &slow);
+ __ ldr(r1, unmapped_location);
+ __ LoadRoot(r3, Heap::kTheHoleValueRootIndex);
+ __ cmp(r1, r3);
+ __ b(eq, &slow);
+ GenerateFunctionTailCall(masm, argc, &slow, r3);
+ __ bind(&slow);
+ GenerateMiss(masm, argc);
+}
+
+
+Object* KeyedLoadIC_Miss(Arguments args);
+
+
void KeyedLoadIC::GenerateMiss(MacroAssembler* masm, bool force_generic) {
// ---------- S t a t e --------------
// -- lr : return address
@@ -944,11 +1012,8 @@
GenerateKeyedLoadReceiverCheck(
masm, receiver, r2, r3, Map::kHasIndexedInterceptor, &slow);
- // Check the "has fast elements" bit in the receiver's map which is
- // now in r2.
- __ ldrb(r3, FieldMemOperand(r2, Map::kBitField2Offset));
- __ tst(r3, Operand(1 << Map::kHasFastElements));
- __ b(eq, &check_number_dictionary);
+ // Check the receiver's map to see if it has fast elements.
+ __ CheckFastElements(r2, r3, &check_number_dictionary);
GenerateFastArrayLoad(
masm, receiver, key, r4, r3, r2, r0, NULL, &slow);
@@ -967,7 +1032,7 @@
__ cmp(r3, ip);
__ b(ne, &slow);
__ mov(r2, Operand(r0, ASR, kSmiTagSize));
- GenerateNumberDictionaryLoad(masm, &slow, r4, r0, r0, r2, r3, r5);
+ __ LoadFromNumberDictionary(&slow, r4, r0, r0, r2, r3, r5);
__ Ret();
// Slow case, key and receiver still in r0 and r1.
@@ -1214,11 +1279,9 @@
// r4 and r5 are used as general scratch registers.
// Check that the key is a smi.
- __ tst(key, Operand(kSmiTagMask));
- __ b(ne, &slow);
+ __ JumpIfNotSmi(key, &slow);
// Check that the object isn't a smi.
- __ tst(receiver, Operand(kSmiTagMask));
- __ b(eq, &slow);
+ __ JumpIfSmi(receiver, &slow);
// Get the map of the object.
__ ldr(r4, FieldMemOperand(receiver, HeapObject::kMapOffset));
// Check that the receiver does not require access checks. We need
@@ -1230,9 +1293,13 @@
__ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
__ cmp(r4, Operand(JS_ARRAY_TYPE));
__ b(eq, &array);
- // Check that the object is some kind of JS object.
- __ cmp(r4, Operand(FIRST_JS_OBJECT_TYPE));
+ // Check that the object is some kind of JSObject.
+ __ cmp(r4, Operand(FIRST_JS_RECEIVER_TYPE));
__ b(lt, &slow);
+ __ cmp(r4, Operand(JS_PROXY_TYPE));
+ __ b(eq, &slow);
+ __ cmp(r4, Operand(JS_FUNCTION_PROXY_TYPE));
+ __ b(eq, &slow);
// Object case: Check key against length in the elements array.
__ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
diff --git a/src/arm/lithium-arm.cc b/src/arm/lithium-arm.cc
index e32cd0c..b96805e 100644
--- a/src/arm/lithium-arm.cc
+++ b/src/arm/lithium-arm.cc
@@ -68,13 +68,13 @@
ASSERT(Output() == NULL ||
LUnallocated::cast(Output())->HasFixedPolicy() ||
!LUnallocated::cast(Output())->HasRegisterPolicy());
- for (UseIterator it(this); it.HasNext(); it.Advance()) {
- LUnallocated* operand = LUnallocated::cast(it.Next());
+ for (UseIterator it(this); !it.Done(); it.Advance()) {
+ LUnallocated* operand = LUnallocated::cast(it.Current());
ASSERT(operand->HasFixedPolicy() ||
operand->IsUsedAtStart());
}
- for (TempIterator it(this); it.HasNext(); it.Advance()) {
- LUnallocated* operand = LUnallocated::cast(it.Next());
+ for (TempIterator it(this); !it.Done(); it.Advance()) {
+ LUnallocated* operand = LUnallocated::cast(it.Current());
ASSERT(operand->HasFixedPolicy() ||!operand->HasRegisterPolicy());
}
}
@@ -111,21 +111,18 @@
template<int R, int I, int T>
void LTemplateInstruction<R, I, T>::PrintDataTo(StringStream* stream) {
stream->Add("= ");
- inputs_.PrintOperandsTo(stream);
+ for (int i = 0; i < inputs_.length(); i++) {
+ if (i > 0) stream->Add(" ");
+ inputs_[i]->PrintTo(stream);
+ }
}
template<int R, int I, int T>
void LTemplateInstruction<R, I, T>::PrintOutputOperandTo(StringStream* stream) {
- results_.PrintOperandsTo(stream);
-}
-
-
-template<typename T, int N>
-void OperandContainer<T, N>::PrintOperandsTo(StringStream* stream) {
- for (int i = 0; i < N; i++) {
+ for (int i = 0; i < results_.length(); i++) {
if (i > 0) stream->Add(" ");
- elems_[i]->PrintTo(stream);
+ results_[i]->PrintTo(stream);
}
}
@@ -268,12 +265,6 @@
}
-void LTypeofIs::PrintDataTo(StringStream* stream) {
- InputAt(0)->PrintTo(stream);
- stream->Add(" == \"%s\"", *hydrogen()->type_literal()->ToCString());
-}
-
-
void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if typeof ");
InputAt(0)->PrintTo(stream);
@@ -343,13 +334,6 @@
}
-void LClassOfTest::PrintDataTo(StringStream* stream) {
- stream->Add("= class_of_test(");
- InputAt(0)->PrintTo(stream);
- stream->Add(", \"%o\")", *hydrogen()->class_name());
-}
-
-
void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
arguments()->PrintTo(stream);
@@ -388,6 +372,15 @@
}
+void LStoreKeyedFastDoubleElement::PrintDataTo(StringStream* stream) {
+ elements()->PrintTo(stream);
+ stream->Add("[");
+ key()->PrintTo(stream);
+ stream->Add("] <- ");
+ value()->PrintTo(stream);
+}
+
+
void LStoreKeyedGeneric::PrintDataTo(StringStream* stream) {
object()->PrintTo(stream);
stream->Add("[");
@@ -436,8 +429,7 @@
LLabel* label = LLabel::cast(first_instr);
if (last_instr->IsGoto()) {
LGoto* goto_instr = LGoto::cast(last_instr);
- if (!goto_instr->include_stack_check() &&
- label->IsRedundant() &&
+ if (label->IsRedundant() &&
!label->is_loop_header()) {
bool can_eliminate = true;
for (int i = first + 1; i < last && can_eliminate; ++i) {
@@ -718,7 +710,9 @@
LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
HEnvironment* hydrogen_env = current_block_->last_environment();
- instr->set_environment(CreateEnvironment(hydrogen_env));
+ int argument_index_accumulator = 0;
+ instr->set_environment(CreateEnvironment(hydrogen_env,
+ &argument_index_accumulator));
return instr;
}
@@ -808,6 +802,11 @@
}
+LInstruction* LChunkBuilder::DoSoftDeoptimize(HSoftDeoptimize* instr) {
+ return AssignEnvironment(new LDeoptimize);
+}
+
+
LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
return AssignEnvironment(new LDeoptimize);
}
@@ -821,7 +820,7 @@
LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
- return DefineSameAsFirst(new LBitI(op, left, right));
+ return DefineAsRegister(new LBitI(op, left, right));
} else {
ASSERT(instr->representation().IsTagged());
ASSERT(instr->left()->representation().IsTagged());
@@ -848,11 +847,11 @@
}
ASSERT(instr->representation().IsInteger32());
- ASSERT(instr->OperandAt(0)->representation().IsInteger32());
- ASSERT(instr->OperandAt(1)->representation().IsInteger32());
- LOperand* left = UseRegisterAtStart(instr->OperandAt(0));
+ ASSERT(instr->left()->representation().IsInteger32());
+ ASSERT(instr->right()->representation().IsInteger32());
+ LOperand* left = UseRegisterAtStart(instr->left());
- HValue* right_value = instr->OperandAt(1);
+ HValue* right_value = instr->right();
LOperand* right = NULL;
int constant_value = 0;
if (right_value->IsConstant()) {
@@ -860,7 +859,7 @@
right = chunk_->DefineConstantOperand(constant);
constant_value = constant->Integer32Value() & 0x1f;
} else {
- right = UseRegister(right_value);
+ right = UseRegisterAtStart(right_value);
}
// Shift operations can only deoptimize if we do a logical shift
@@ -877,7 +876,7 @@
}
LInstruction* result =
- DefineSameAsFirst(new LShiftI(op, left, right, does_deopt));
+ DefineAsRegister(new LShiftI(op, left, right, does_deopt));
return does_deopt ? AssignEnvironment(result) : result;
}
@@ -891,7 +890,7 @@
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseRegisterAtStart(instr->right());
LArithmeticD* result = new LArithmeticD(op, left, right);
- return DefineSameAsFirst(result);
+ return DefineAsRegister(result);
}
@@ -989,28 +988,20 @@
if (FLAG_stress_environments && !instr->HasEnvironment()) {
instr = AssignEnvironment(instr);
}
- if (current->IsTest() && !instr->IsGoto()) {
- ASSERT(instr->IsControl());
- HTest* test = HTest::cast(current);
- instr->set_hydrogen_value(test->value());
- HBasicBlock* first = test->FirstSuccessor();
- HBasicBlock* second = test->SecondSuccessor();
- ASSERT(first != NULL && second != NULL);
- instr->SetBranchTargets(first->block_id(), second->block_id());
- } else {
- instr->set_hydrogen_value(current);
- }
-
+ instr->set_hydrogen_value(current);
chunk_->AddInstruction(instr, current_block_);
}
current_instruction_ = old_current;
}
-LEnvironment* LChunkBuilder::CreateEnvironment(HEnvironment* hydrogen_env) {
+LEnvironment* LChunkBuilder::CreateEnvironment(
+ HEnvironment* hydrogen_env,
+ int* argument_index_accumulator) {
if (hydrogen_env == NULL) return NULL;
- LEnvironment* outer = CreateEnvironment(hydrogen_env->outer());
+ LEnvironment* outer =
+ CreateEnvironment(hydrogen_env->outer(), argument_index_accumulator);
int ast_id = hydrogen_env->ast_id();
ASSERT(ast_id != AstNode::kNoNumber);
int value_count = hydrogen_env->length();
@@ -1020,7 +1011,6 @@
argument_count_,
value_count,
outer);
- int argument_index = 0;
for (int i = 0; i < value_count; ++i) {
if (hydrogen_env->is_special_index(i)) continue;
@@ -1029,7 +1019,7 @@
if (value->IsArgumentsObject()) {
op = NULL;
} else if (value->IsPushArgument()) {
- op = new LArgument(argument_index++);
+ op = new LArgument((*argument_index_accumulator)++);
} else {
op = UseAny(value);
}
@@ -1041,102 +1031,19 @@
LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
- LInstruction* result = new LGoto(instr->FirstSuccessor()->block_id(),
- instr->include_stack_check());
- if (instr->include_stack_check()) result = AssignPointerMap(result);
- return result;
+ return new LGoto(instr->FirstSuccessor()->block_id());
}
-LInstruction* LChunkBuilder::DoTest(HTest* instr) {
+LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
HValue* v = instr->value();
- if (!v->EmitAtUses()) {
- return new LBranch(UseRegisterAtStart(v));
- } else if (v->IsClassOfTest()) {
- HClassOfTest* compare = HClassOfTest::cast(v);
- ASSERT(compare->value()->representation().IsTagged());
- return new LClassOfTestAndBranch(UseTempRegister(compare->value()),
- TempRegister());
- } else if (v->IsCompare()) {
- HCompare* compare = HCompare::cast(v);
- Token::Value op = compare->token();
- HValue* left = compare->left();
- HValue* right = compare->right();
- Representation r = compare->GetInputRepresentation();
- if (r.IsInteger32()) {
- ASSERT(left->representation().IsInteger32());
- ASSERT(right->representation().IsInteger32());
- return new LCmpIDAndBranch(UseRegisterAtStart(left),
- UseRegisterAtStart(right));
- } else if (r.IsDouble()) {
- ASSERT(left->representation().IsDouble());
- ASSERT(right->representation().IsDouble());
- return new LCmpIDAndBranch(UseRegisterAtStart(left),
- UseRegisterAtStart(right));
- } else {
- ASSERT(left->representation().IsTagged());
- ASSERT(right->representation().IsTagged());
- bool reversed = op == Token::GT || op == Token::LTE;
- LOperand* left_operand = UseFixed(left, reversed ? r0 : r1);
- LOperand* right_operand = UseFixed(right, reversed ? r1 : r0);
- LInstruction* result = new LCmpTAndBranch(left_operand, right_operand);
- return MarkAsCall(result, instr);
- }
- } else if (v->IsIsSmi()) {
- HIsSmi* compare = HIsSmi::cast(v);
- ASSERT(compare->value()->representation().IsTagged());
- return new LIsSmiAndBranch(Use(compare->value()));
- } else if (v->IsIsUndetectable()) {
- HIsUndetectable* compare = HIsUndetectable::cast(v);
- ASSERT(compare->value()->representation().IsTagged());
- return new LIsUndetectableAndBranch(UseRegisterAtStart(compare->value()),
- TempRegister());
- } else if (v->IsHasInstanceType()) {
- HHasInstanceType* compare = HHasInstanceType::cast(v);
- ASSERT(compare->value()->representation().IsTagged());
- return new LHasInstanceTypeAndBranch(UseRegisterAtStart(compare->value()));
- } else if (v->IsHasCachedArrayIndex()) {
- HHasCachedArrayIndex* compare = HHasCachedArrayIndex::cast(v);
- ASSERT(compare->value()->representation().IsTagged());
- return new LHasCachedArrayIndexAndBranch(
- UseRegisterAtStart(compare->value()));
- } else if (v->IsIsNull()) {
- HIsNull* compare = HIsNull::cast(v);
- ASSERT(compare->value()->representation().IsTagged());
- return new LIsNullAndBranch(UseRegisterAtStart(compare->value()));
- } else if (v->IsIsObject()) {
- HIsObject* compare = HIsObject::cast(v);
- ASSERT(compare->value()->representation().IsTagged());
- LOperand* temp = TempRegister();
- return new LIsObjectAndBranch(UseRegister(compare->value()), temp);
- } else if (v->IsCompareJSObjectEq()) {
- HCompareJSObjectEq* compare = HCompareJSObjectEq::cast(v);
- return new LCmpJSObjectEqAndBranch(UseRegisterAtStart(compare->left()),
- UseRegisterAtStart(compare->right()));
- } else if (v->IsCompareSymbolEq()) {
- HCompareSymbolEq* compare = HCompareSymbolEq::cast(v);
- return new LCmpSymbolEqAndBranch(UseRegisterAtStart(compare->left()),
- UseRegisterAtStart(compare->right()));
- } else if (v->IsInstanceOf()) {
- HInstanceOf* instance_of = HInstanceOf::cast(v);
- LInstruction* result =
- new LInstanceOfAndBranch(UseFixed(instance_of->left(), r0),
- UseFixed(instance_of->right(), r1));
- return MarkAsCall(result, instr);
- } else if (v->IsTypeofIs()) {
- HTypeofIs* typeof_is = HTypeofIs::cast(v);
- return new LTypeofIsAndBranch(UseTempRegister(typeof_is->value()));
- } else if (v->IsIsConstructCall()) {
- return new LIsConstructCallAndBranch(TempRegister());
- } else if (v->IsConstant()) {
+ if (v->EmitAtUses()) {
HBasicBlock* successor = HConstant::cast(v)->ToBoolean()
? instr->FirstSuccessor()
: instr->SecondSuccessor();
return new LGoto(successor->block_id());
- } else {
- Abort("Undefined compare before branch");
- return NULL;
}
+ return new LBranch(UseRegisterAtStart(v));
}
@@ -1170,7 +1077,7 @@
LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
HInstanceOfKnownGlobal* instr) {
LInstanceOfKnownGlobal* result =
- new LInstanceOfKnownGlobal(UseFixed(instr->value(), r0), FixedTemp(r4));
+ new LInstanceOfKnownGlobal(UseFixed(instr->left(), r0), FixedTemp(r4));
return MarkAsCall(DefineFixed(result, r0), instr);
}
@@ -1195,6 +1102,11 @@
}
+LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) {
+ return instr->HasNoUses() ? NULL : DefineAsRegister(new LThisFunction);
+}
+
+
LInstruction* LChunkBuilder::DoContext(HContext* instr) {
return instr->HasNoUses() ? NULL : DefineAsRegister(new LContext);
}
@@ -1245,15 +1157,15 @@
LUnaryMathOperation* result = new LUnaryMathOperation(input, temp);
switch (op) {
case kMathAbs:
- return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
+ return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
case kMathFloor:
return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
case kMathSqrt:
- return DefineSameAsFirst(result);
+ return DefineAsRegister(result);
case kMathRound:
return AssignEnvironment(DefineAsRegister(result));
case kMathPowHalf:
- return DefineSameAsFirst(result);
+ return DefineAsRegister(result);
default:
UNREACHABLE();
return NULL;
@@ -1331,7 +1243,7 @@
LInstruction* LChunkBuilder::DoBitNot(HBitNot* instr) {
ASSERT(instr->value()->representation().IsInteger32());
ASSERT(instr->representation().IsInteger32());
- return DefineSameAsFirst(new LBitNotI(UseRegisterAtStart(instr->value())));
+ return DefineAsRegister(new LBitNotI(UseRegisterAtStart(instr->value())));
}
@@ -1376,15 +1288,20 @@
mod = new LModI(value, UseOrConstant(instr->right()));
} else {
LOperand* dividend = UseRegister(instr->left());
- LOperand* divisor = UseRegisterAtStart(instr->right());
+ LOperand* divisor = UseRegister(instr->right());
mod = new LModI(dividend,
divisor,
TempRegister(),
- FixedTemp(d1),
- FixedTemp(d2));
+ FixedTemp(d10),
+ FixedTemp(d11));
}
- return AssignEnvironment(DefineSameAsFirst(mod));
+ if (instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
+ instr->CheckFlag(HValue::kCanBeDivByZero)) {
+ return AssignEnvironment(DefineAsRegister(mod));
+ } else {
+ return DefineAsRegister(mod);
+ }
} else if (instr->representation().IsTagged()) {
return DoArithmeticT(Token::MOD, instr);
} else {
@@ -1404,16 +1321,22 @@
if (instr->representation().IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
- LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
+ LOperand* left;
LOperand* right = UseOrConstant(instr->MostConstantOperand());
LOperand* temp = NULL;
- if (instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ if (instr->CheckFlag(HValue::kBailoutOnMinusZero) &&
+ (instr->CheckFlag(HValue::kCanOverflow) ||
+ !right->IsConstantOperand())) {
+ left = UseRegister(instr->LeastConstantOperand());
temp = TempRegister();
+ } else {
+ left = UseRegisterAtStart(instr->LeastConstantOperand());
}
- LMulI* mul = new LMulI(left, right, temp);
- return AssignEnvironment(DefineSameAsFirst(mul));
+ return AssignEnvironment(DefineAsRegister(new LMulI(left, right, temp)));
+
} else if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::MUL, instr);
+
} else {
return DoArithmeticT(Token::MUL, instr);
}
@@ -1427,7 +1350,7 @@
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseOrConstantAtStart(instr->right());
LSubI* sub = new LSubI(left, right);
- LInstruction* result = DefineSameAsFirst(sub);
+ LInstruction* result = DefineAsRegister(sub);
if (instr->CheckFlag(HValue::kCanOverflow)) {
result = AssignEnvironment(result);
}
@@ -1447,7 +1370,7 @@
LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
LAddI* add = new LAddI(left, right);
- LInstruction* result = DefineSameAsFirst(add);
+ LInstruction* result = DefineAsRegister(add);
if (instr->CheckFlag(HValue::kCanOverflow)) {
result = AssignEnvironment(result);
}
@@ -1478,88 +1401,84 @@
}
-LInstruction* LChunkBuilder::DoCompare(HCompare* instr) {
+LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
Token::Value op = instr->token();
Representation r = instr->GetInputRepresentation();
+ ASSERT(instr->left()->representation().IsTagged());
+ ASSERT(instr->right()->representation().IsTagged());
+ bool reversed = (op == Token::GT || op == Token::LTE);
+ LOperand* left = UseFixed(instr->left(), reversed ? r0 : r1);
+ LOperand* right = UseFixed(instr->right(), reversed ? r1 : r0);
+ LCmpT* result = new LCmpT(left, right);
+ return MarkAsCall(DefineFixed(result, r0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCompareIDAndBranch(
+ HCompareIDAndBranch* instr) {
+ Representation r = instr->GetInputRepresentation();
if (r.IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseRegisterAtStart(instr->right());
- return DefineAsRegister(new LCmpID(left, right));
- } else if (r.IsDouble()) {
+ return new LCmpIDAndBranch(left, right);
+ } else {
+ ASSERT(r.IsDouble());
ASSERT(instr->left()->representation().IsDouble());
ASSERT(instr->right()->representation().IsDouble());
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseRegisterAtStart(instr->right());
- return DefineAsRegister(new LCmpID(left, right));
- } else {
- ASSERT(instr->left()->representation().IsTagged());
- ASSERT(instr->right()->representation().IsTagged());
- bool reversed = (op == Token::GT || op == Token::LTE);
- LOperand* left = UseFixed(instr->left(), reversed ? r0 : r1);
- LOperand* right = UseFixed(instr->right(), reversed ? r1 : r0);
- LCmpT* result = new LCmpT(left, right);
- return MarkAsCall(DefineFixed(result, r0), instr);
+ return new LCmpIDAndBranch(left, right);
}
}
-LInstruction* LChunkBuilder::DoCompareJSObjectEq(
- HCompareJSObjectEq* instr) {
+LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch(
+ HCompareObjectEqAndBranch* instr) {
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseRegisterAtStart(instr->right());
- LCmpJSObjectEq* result = new LCmpJSObjectEq(left, right);
- return DefineAsRegister(result);
+ return new LCmpObjectEqAndBranch(left, right);
}
-LInstruction* LChunkBuilder::DoCompareSymbolEq(
- HCompareSymbolEq* instr) {
- LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseRegisterAtStart(instr->right());
- LCmpSymbolEq* result = new LCmpSymbolEq(left, right);
- return DefineAsRegister(result);
+LInstruction* LChunkBuilder::DoCompareConstantEqAndBranch(
+ HCompareConstantEqAndBranch* instr) {
+ return new LCmpConstantEqAndBranch(UseRegisterAtStart(instr->value()));
}
-LInstruction* LChunkBuilder::DoIsNull(HIsNull* instr) {
+LInstruction* LChunkBuilder::DoIsNullAndBranch(HIsNullAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
- LOperand* value = UseRegisterAtStart(instr->value());
-
- return DefineAsRegister(new LIsNull(value));
+ return new LIsNullAndBranch(UseRegisterAtStart(instr->value()));
}
-LInstruction* LChunkBuilder::DoIsObject(HIsObject* instr) {
+LInstruction* LChunkBuilder::DoIsObjectAndBranch(HIsObjectAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
- LOperand* value = UseRegisterAtStart(instr->value());
-
- return DefineAsRegister(new LIsObject(value));
+ LOperand* temp = TempRegister();
+ return new LIsObjectAndBranch(UseRegisterAtStart(instr->value()), temp);
}
-LInstruction* LChunkBuilder::DoIsSmi(HIsSmi* instr) {
+LInstruction* LChunkBuilder::DoIsSmiAndBranch(HIsSmiAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
- LOperand* value = UseAtStart(instr->value());
-
- return DefineAsRegister(new LIsSmi(value));
+ return new LIsSmiAndBranch(Use(instr->value()));
}
-LInstruction* LChunkBuilder::DoIsUndetectable(HIsUndetectable* instr) {
+LInstruction* LChunkBuilder::DoIsUndetectableAndBranch(
+ HIsUndetectableAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
- LOperand* value = UseRegisterAtStart(instr->value());
-
- return DefineAsRegister(new LIsUndetectable(value));
+ return new LIsUndetectableAndBranch(UseRegisterAtStart(instr->value()),
+ TempRegister());
}
-LInstruction* LChunkBuilder::DoHasInstanceType(HHasInstanceType* instr) {
+LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch(
+ HHasInstanceTypeAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
- LOperand* value = UseRegisterAtStart(instr->value());
-
- return DefineAsRegister(new LHasInstanceType(value));
+ return new LHasInstanceTypeAndBranch(UseRegisterAtStart(instr->value()));
}
@@ -1572,19 +1491,19 @@
}
-LInstruction* LChunkBuilder::DoHasCachedArrayIndex(
- HHasCachedArrayIndex* instr) {
+LInstruction* LChunkBuilder::DoHasCachedArrayIndexAndBranch(
+ HHasCachedArrayIndexAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
- LOperand* value = UseRegister(instr->value());
-
- return DefineAsRegister(new LHasCachedArrayIndex(value));
+ return new LHasCachedArrayIndexAndBranch(
+ UseRegisterAtStart(instr->value()));
}
-LInstruction* LChunkBuilder::DoClassOfTest(HClassOfTest* instr) {
+LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
+ HClassOfTestAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
- LOperand* value = UseTempRegister(instr->value());
- return DefineSameAsFirst(new LClassOfTest(value));
+ return new LClassOfTestAndBranch(UseTempRegister(instr->value()),
+ TempRegister());
}
@@ -1607,10 +1526,16 @@
}
+LInstruction* LChunkBuilder::DoElementsKind(HElementsKind* instr) {
+ LOperand* object = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new LElementsKind(object));
+}
+
+
LInstruction* LChunkBuilder::DoValueOf(HValueOf* instr) {
LOperand* object = UseRegister(instr->value());
LValueOf* result = new LValueOf(object, TempRegister());
- return AssignEnvironment(DefineSameAsFirst(result));
+ return AssignEnvironment(DefineAsRegister(result));
}
@@ -1633,6 +1558,11 @@
}
+LInstruction* LChunkBuilder::DoUseConst(HUseConst* instr) {
+ return NULL;
+}
+
+
LInstruction* LChunkBuilder::DoForceRepresentation(HForceRepresentation* bad) {
// All HForceRepresentation instructions should be eliminated in the
// representation change phase of Hydrogen.
@@ -1660,7 +1590,7 @@
LOperand* temp1 = TempRegister();
LOperand* temp2 = instr->CanTruncateToInt32() ? TempRegister()
: NULL;
- LOperand* temp3 = instr->CanTruncateToInt32() ? FixedTemp(d3)
+ LOperand* temp3 = instr->CanTruncateToInt32() ? FixedTemp(d11)
: NULL;
res = DefineSameAsFirst(new LTaggedToI(value, temp1, temp2, temp3));
res = AssignEnvironment(res);
@@ -1754,19 +1684,44 @@
Representation input_rep = value->representation();
LOperand* reg = UseRegister(value);
if (input_rep.IsDouble()) {
- return DefineAsRegister(new LClampDToUint8(reg, FixedTemp(d1)));
+ return DefineAsRegister(new LClampDToUint8(reg, FixedTemp(d11)));
} else if (input_rep.IsInteger32()) {
return DefineAsRegister(new LClampIToUint8(reg));
} else {
ASSERT(input_rep.IsTagged());
// Register allocator doesn't (yet) support allocation of double
// temps. Reserve d1 explicitly.
- LClampTToUint8* result = new LClampTToUint8(reg, FixedTemp(d1));
+ LClampTToUint8* result = new LClampTToUint8(reg, FixedTemp(d11));
return AssignEnvironment(DefineAsRegister(result));
}
}
+LInstruction* LChunkBuilder::DoToInt32(HToInt32* instr) {
+ HValue* value = instr->value();
+ Representation input_rep = value->representation();
+ LOperand* reg = UseRegister(value);
+ if (input_rep.IsDouble()) {
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempRegister();
+ LDoubleToI* res = new LDoubleToI(reg, temp1, temp2);
+ return AssignEnvironment(DefineAsRegister(res));
+ } else if (input_rep.IsInteger32()) {
+ // Canonicalization should already have removed the hydrogen instruction in
+ // this case, since it is a noop.
+ UNREACHABLE();
+ return NULL;
+ } else {
+ ASSERT(input_rep.IsTagged());
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempRegister();
+ LOperand* temp3 = FixedTemp(d11);
+ LTaggedToI* res = new LTaggedToI(reg, temp1, temp2, temp3);
+ return AssignEnvironment(DefineSameAsFirst(res));
+ }
+}
+
+
LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
return new LReturn(UseFixed(instr->value(), r0));
}
@@ -1898,19 +1853,33 @@
LOperand* obj = UseRegisterAtStart(instr->object());
LOperand* key = UseRegisterAtStart(instr->key());
LLoadKeyedFastElement* result = new LLoadKeyedFastElement(obj, key);
- return AssignEnvironment(DefineSameAsFirst(result));
+ return AssignEnvironment(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadKeyedFastDoubleElement(
+ HLoadKeyedFastDoubleElement* instr) {
+ ASSERT(instr->representation().IsDouble());
+ ASSERT(instr->key()->representation().IsInteger32());
+ LOperand* elements = UseTempRegister(instr->elements());
+ LOperand* key = UseRegisterOrConstantAtStart(instr->key());
+ LLoadKeyedFastDoubleElement* result =
+ new LLoadKeyedFastDoubleElement(elements, key);
+ return AssignEnvironment(DefineAsRegister(result));
}
LInstruction* LChunkBuilder::DoLoadKeyedSpecializedArrayElement(
HLoadKeyedSpecializedArrayElement* instr) {
- ExternalArrayType array_type = instr->array_type();
+ JSObject::ElementsKind elements_kind = instr->elements_kind();
Representation representation(instr->representation());
ASSERT(
- (representation.IsInteger32() && (array_type != kExternalFloatArray &&
- array_type != kExternalDoubleArray)) ||
- (representation.IsDouble() && (array_type == kExternalFloatArray ||
- array_type == kExternalDoubleArray)));
+ (representation.IsInteger32() &&
+ (elements_kind != JSObject::EXTERNAL_FLOAT_ELEMENTS) &&
+ (elements_kind != JSObject::EXTERNAL_DOUBLE_ELEMENTS)) ||
+ (representation.IsDouble() &&
+ ((elements_kind == JSObject::EXTERNAL_FLOAT_ELEMENTS) ||
+ (elements_kind == JSObject::EXTERNAL_DOUBLE_ELEMENTS))));
ASSERT(instr->key()->representation().IsInteger32());
LOperand* external_pointer = UseRegister(instr->external_pointer());
LOperand* key = UseRegisterOrConstant(instr->key());
@@ -1919,7 +1888,7 @@
LInstruction* load_instr = DefineAsRegister(result);
// An unsigned int array load might overflow and cause a deopt, make sure it
// has an environment.
- return (array_type == kExternalUnsignedIntArray) ?
+ return (elements_kind == JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS) ?
AssignEnvironment(load_instr) : load_instr;
}
@@ -1953,21 +1922,38 @@
}
+LInstruction* LChunkBuilder::DoStoreKeyedFastDoubleElement(
+ HStoreKeyedFastDoubleElement* instr) {
+ ASSERT(instr->value()->representation().IsDouble());
+ ASSERT(instr->elements()->representation().IsTagged());
+ ASSERT(instr->key()->representation().IsInteger32());
+
+ LOperand* elements = UseRegisterAtStart(instr->elements());
+ LOperand* val = UseTempRegister(instr->value());
+ LOperand* key = UseRegisterOrConstantAtStart(instr->key());
+
+ return new LStoreKeyedFastDoubleElement(elements, key, val);
+}
+
+
LInstruction* LChunkBuilder::DoStoreKeyedSpecializedArrayElement(
HStoreKeyedSpecializedArrayElement* instr) {
Representation representation(instr->value()->representation());
- ExternalArrayType array_type = instr->array_type();
+ JSObject::ElementsKind elements_kind = instr->elements_kind();
ASSERT(
- (representation.IsInteger32() && (array_type != kExternalFloatArray &&
- array_type != kExternalDoubleArray)) ||
- (representation.IsDouble() && (array_type == kExternalFloatArray ||
- array_type == kExternalDoubleArray)));
+ (representation.IsInteger32() &&
+ (elements_kind != JSObject::EXTERNAL_FLOAT_ELEMENTS) &&
+ (elements_kind != JSObject::EXTERNAL_DOUBLE_ELEMENTS)) ||
+ (representation.IsDouble() &&
+ ((elements_kind == JSObject::EXTERNAL_FLOAT_ELEMENTS) ||
+ (elements_kind == JSObject::EXTERNAL_DOUBLE_ELEMENTS))));
ASSERT(instr->external_pointer()->representation().IsExternal());
ASSERT(instr->key()->representation().IsInteger32());
LOperand* external_pointer = UseRegister(instr->external_pointer());
- bool val_is_temp_register = array_type == kExternalPixelArray ||
- array_type == kExternalFloatArray;
+ bool val_is_temp_register =
+ elements_kind == JSObject::EXTERNAL_PIXEL_ELEMENTS ||
+ elements_kind == JSObject::EXTERNAL_FLOAT_ELEMENTS;
LOperand* val = val_is_temp_register
? UseTempRegister(instr->value())
: UseRegister(instr->value());
@@ -2132,13 +2118,14 @@
}
-LInstruction* LChunkBuilder::DoTypeofIs(HTypeofIs* instr) {
- return DefineSameAsFirst(new LTypeofIs(UseRegister(instr->value())));
+LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) {
+ return new LTypeofIsAndBranch(UseTempRegister(instr->value()));
}
-LInstruction* LChunkBuilder::DoIsConstructCall(HIsConstructCall* instr) {
- return DefineAsRegister(new LIsConstructCall());
+LInstruction* LChunkBuilder::DoIsConstructCallAndBranch(
+ HIsConstructCallAndBranch* instr) {
+ return new LIsConstructCallAndBranch(TempRegister());
}
@@ -2174,7 +2161,12 @@
LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
- return MarkAsCall(new LStackCheck, instr);
+ if (instr->is_function_entry()) {
+ return MarkAsCall(new LStackCheck, instr);
+ } else {
+ ASSERT(instr->is_backwards_branch());
+ return AssignEnvironment(AssignPointerMap(new LStackCheck));
+ }
}
@@ -2183,7 +2175,6 @@
HConstant* undefined = graph()->GetConstantUndefined();
HEnvironment* inner = outer->CopyForInlining(instr->closure(),
instr->function(),
- HEnvironment::LITHIUM,
undefined,
instr->call_kind());
current_block_->UpdateEnvironment(inner);
diff --git a/src/arm/lithium-arm.h b/src/arm/lithium-arm.h
index 73c4a87..b477e99 100644
--- a/src/arm/lithium-arm.h
+++ b/src/arm/lithium-arm.h
@@ -32,6 +32,7 @@
#include "lithium-allocator.h"
#include "lithium.h"
#include "safepoint-table.h"
+#include "utils.h"
namespace v8 {
namespace internal {
@@ -76,17 +77,12 @@
V(ClampDToUint8) \
V(ClampIToUint8) \
V(ClampTToUint8) \
- V(ClassOfTest) \
V(ClassOfTestAndBranch) \
- V(CmpID) \
+ V(CmpConstantEqAndBranch) \
V(CmpIDAndBranch) \
- V(CmpJSObjectEq) \
- V(CmpJSObjectEqAndBranch) \
+ V(CmpObjectEqAndBranch) \
V(CmpMapAndBranch) \
- V(CmpSymbolEq) \
- V(CmpSymbolEqAndBranch) \
V(CmpT) \
- V(CmpTAndBranch) \
V(ConstantD) \
V(ConstantI) \
V(ConstantT) \
@@ -95,6 +91,7 @@
V(Deoptimize) \
V(DivI) \
V(DoubleToI) \
+ V(ElementsKind) \
V(ExternalArrayLength) \
V(FixedArrayLength) \
V(FunctionLiteral) \
@@ -102,26 +99,18 @@
V(GlobalObject) \
V(GlobalReceiver) \
V(Goto) \
- V(HasCachedArrayIndex) \
V(HasCachedArrayIndexAndBranch) \
- V(HasInstanceType) \
V(HasInstanceTypeAndBranch) \
V(In) \
V(InstanceOf) \
- V(InstanceOfAndBranch) \
V(InstanceOfKnownGlobal) \
V(InstructionGap) \
V(Integer32ToDouble) \
V(InvokeFunction) \
- V(IsConstructCall) \
V(IsConstructCallAndBranch) \
- V(IsNull) \
V(IsNullAndBranch) \
- V(IsObject) \
V(IsObjectAndBranch) \
- V(IsSmi) \
V(IsSmiAndBranch) \
- V(IsUndetectable) \
V(IsUndetectableAndBranch) \
V(JSArrayLength) \
V(Label) \
@@ -132,6 +121,7 @@
V(LoadFunctionPrototype) \
V(LoadGlobalCell) \
V(LoadGlobalGeneric) \
+ V(LoadKeyedFastDoubleElement) \
V(LoadKeyedFastElement) \
V(LoadKeyedGeneric) \
V(LoadKeyedSpecializedArrayElement) \
@@ -158,6 +148,7 @@
V(StoreContextSlot) \
V(StoreGlobalCell) \
V(StoreGlobalGeneric) \
+ V(StoreKeyedFastDoubleElement) \
V(StoreKeyedFastElement) \
V(StoreKeyedGeneric) \
V(StoreKeyedSpecializedArrayElement) \
@@ -169,10 +160,10 @@
V(StringLength) \
V(SubI) \
V(TaggedToI) \
+ V(ThisFunction) \
V(Throw) \
V(ToFastProperties) \
V(Typeof) \
- V(TypeofIs) \
V(TypeofIsAndBranch) \
V(UnaryMathOperation) \
V(UnknownOSRValue) \
@@ -231,7 +222,6 @@
virtual bool IsGap() const { return false; }
virtual bool IsControl() const { return false; }
- virtual void SetBranchTargets(int true_block_id, int false_block_id) { }
void set_environment(LEnvironment* env) { environment_ = env; }
LEnvironment* environment() const { return environment_; }
@@ -286,37 +276,6 @@
};
-template<typename ElementType, int NumElements>
-class OperandContainer {
- public:
- OperandContainer() {
- for (int i = 0; i < NumElements; i++) elems_[i] = NULL;
- }
- int length() { return NumElements; }
- ElementType& operator[](int i) {
- ASSERT(i < length());
- return elems_[i];
- }
- void PrintOperandsTo(StringStream* stream);
-
- private:
- ElementType elems_[NumElements];
-};
-
-
-template<typename ElementType>
-class OperandContainer<ElementType, 0> {
- public:
- int length() { return 0; }
- void PrintOperandsTo(StringStream* stream) { }
- ElementType& operator[](int i) {
- UNREACHABLE();
- static ElementType t = 0;
- return t;
- }
-};
-
-
// R = number of result operands (0 or 1).
// I = number of input operands.
// T = number of temporary operands.
@@ -339,9 +298,9 @@
virtual void PrintOutputOperandTo(StringStream* stream);
protected:
- OperandContainer<LOperand*, R> results_;
- OperandContainer<LOperand*, I> inputs_;
- OperandContainer<LOperand*, T> temps_;
+ EmbeddedContainer<LOperand*, R> results_;
+ EmbeddedContainer<LOperand*, I> inputs_;
+ EmbeddedContainer<LOperand*, T> temps_;
};
@@ -401,19 +360,16 @@
class LGoto: public LTemplateInstruction<0, 0, 0> {
public:
- LGoto(int block_id, bool include_stack_check = false)
- : block_id_(block_id), include_stack_check_(include_stack_check) { }
+ explicit LGoto(int block_id) : block_id_(block_id) { }
DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
virtual void PrintDataTo(StringStream* stream);
virtual bool IsControl() const { return true; }
int block_id() const { return block_id_; }
- bool include_stack_check() const { return include_stack_check_; }
private:
int block_id_;
- bool include_stack_check_;
};
@@ -489,16 +445,15 @@
public:
virtual bool IsControl() const { return true; }
- int true_block_id() const { return true_block_id_; }
- int false_block_id() const { return false_block_id_; }
- void SetBranchTargets(int true_block_id, int false_block_id) {
- true_block_id_ = true_block_id;
- false_block_id_ = false_block_id;
- }
+ int SuccessorCount() { return hydrogen()->SuccessorCount(); }
+ HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); }
+ int true_block_id() { return hydrogen()->SuccessorAt(0)->block_id(); }
+ int false_block_id() { return hydrogen()->SuccessorAt(1)->block_id(); }
private:
- int true_block_id_;
- int false_block_id_;
+ HControlInstruction* hydrogen() {
+ return HControlInstruction::cast(this->hydrogen_value());
+ }
};
@@ -614,23 +569,6 @@
};
-class LCmpID: public LTemplateInstruction<1, 2, 0> {
- public:
- LCmpID(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpID, "cmp-id")
- DECLARE_HYDROGEN_ACCESSOR(Compare)
-
- Token::Value op() const { return hydrogen()->token(); }
- bool is_double() const {
- return hydrogen()->GetInputRepresentation().IsDouble();
- }
-};
-
-
class LCmpIDAndBranch: public LControlInstruction<2, 0> {
public:
LCmpIDAndBranch(LOperand* left, LOperand* right) {
@@ -639,7 +577,7 @@
}
DECLARE_CONCRETE_INSTRUCTION(CmpIDAndBranch, "cmp-id-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(Compare)
+ DECLARE_HYDROGEN_ACCESSOR(CompareIDAndBranch)
Token::Value op() const { return hydrogen()->token(); }
bool is_double() const {
@@ -665,63 +603,31 @@
};
-class LCmpJSObjectEq: public LTemplateInstruction<1, 2, 0> {
+class LCmpObjectEqAndBranch: public LControlInstruction<2, 0> {
public:
- LCmpJSObjectEq(LOperand* left, LOperand* right) {
+ LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
inputs_[0] = left;
inputs_[1] = right;
}
- DECLARE_CONCRETE_INSTRUCTION(CmpJSObjectEq, "cmp-jsobject-eq")
+ DECLARE_CONCRETE_INSTRUCTION(CmpObjectEqAndBranch,
+ "cmp-object-eq-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(CompareObjectEqAndBranch)
};
-class LCmpJSObjectEqAndBranch: public LControlInstruction<2, 0> {
+class LCmpConstantEqAndBranch: public LControlInstruction<1, 0> {
public:
- LCmpJSObjectEqAndBranch(LOperand* left, LOperand* right) {
+ explicit LCmpConstantEqAndBranch(LOperand* left) {
inputs_[0] = left;
- inputs_[1] = right;
}
- DECLARE_CONCRETE_INSTRUCTION(CmpJSObjectEqAndBranch,
- "cmp-jsobject-eq-and-branch")
+ DECLARE_CONCRETE_INSTRUCTION(CmpConstantEqAndBranch,
+ "cmp-constant-eq-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(CompareConstantEqAndBranch)
};
-class LCmpSymbolEq: public LTemplateInstruction<1, 2, 0> {
- public:
- LCmpSymbolEq(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpSymbolEq, "cmp-symbol-eq")
-};
-
-
-class LCmpSymbolEqAndBranch: public LControlInstruction<2, 0> {
- public:
- LCmpSymbolEqAndBranch(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpSymbolEqAndBranch, "cmp-symbol-eq-and-branch")
-};
-
-
-class LIsNull: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LIsNull(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(IsNull, "is-null")
- DECLARE_HYDROGEN_ACCESSOR(IsNull)
-
- bool is_strict() const { return hydrogen()->is_strict(); }
-};
-
class LIsNullAndBranch: public LControlInstruction<1, 0> {
public:
explicit LIsNullAndBranch(LOperand* value) {
@@ -729,7 +635,7 @@
}
DECLARE_CONCRETE_INSTRUCTION(IsNullAndBranch, "is-null-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(IsNull)
+ DECLARE_HYDROGEN_ACCESSOR(IsNullAndBranch)
bool is_strict() const { return hydrogen()->is_strict(); }
@@ -737,17 +643,7 @@
};
-class LIsObject: public LTemplateInstruction<1, 1, 1> {
- public:
- explicit LIsObject(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(IsObject, "is-object")
-};
-
-
-class LIsObjectAndBranch: public LControlInstruction<1, 2> {
+class LIsObjectAndBranch: public LControlInstruction<1, 1> {
public:
LIsObjectAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -755,22 +651,12 @@
}
DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(IsObjectAndBranch)
virtual void PrintDataTo(StringStream* stream);
};
-class LIsSmi: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LIsSmi(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(IsSmi, "is-smi")
- DECLARE_HYDROGEN_ACCESSOR(IsSmi)
-};
-
-
class LIsSmiAndBranch: public LControlInstruction<1, 0> {
public:
explicit LIsSmiAndBranch(LOperand* value) {
@@ -778,22 +664,12 @@
}
DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch)
virtual void PrintDataTo(StringStream* stream);
};
-class LIsUndetectable: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LIsUndetectable(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(IsUndetectable, "is-undetectable")
- DECLARE_HYDROGEN_ACCESSOR(IsUndetectable)
-};
-
-
class LIsUndetectableAndBranch: public LControlInstruction<1, 1> {
public:
explicit LIsUndetectableAndBranch(LOperand* value, LOperand* temp) {
@@ -803,22 +679,12 @@
DECLARE_CONCRETE_INSTRUCTION(IsUndetectableAndBranch,
"is-undetectable-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(IsUndetectableAndBranch)
virtual void PrintDataTo(StringStream* stream);
};
-class LHasInstanceType: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LHasInstanceType(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(HasInstanceType, "has-instance-type")
- DECLARE_HYDROGEN_ACCESSOR(HasInstanceType)
-};
-
-
class LHasInstanceTypeAndBranch: public LControlInstruction<1, 0> {
public:
explicit LHasInstanceTypeAndBranch(LOperand* value) {
@@ -827,7 +693,7 @@
DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch,
"has-instance-type-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(HasInstanceType)
+ DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch)
virtual void PrintDataTo(StringStream* stream);
};
@@ -844,17 +710,6 @@
};
-class LHasCachedArrayIndex: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LHasCachedArrayIndex(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndex, "has-cached-array-index")
- DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndex)
-};
-
-
class LHasCachedArrayIndexAndBranch: public LControlInstruction<1, 0> {
public:
explicit LHasCachedArrayIndexAndBranch(LOperand* value) {
@@ -863,18 +718,7 @@
DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndexAndBranch,
"has-cached-array-index-and-branch")
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LClassOfTest: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LClassOfTest(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(ClassOfTest, "class-of-test")
- DECLARE_HYDROGEN_ACCESSOR(ClassOfTest)
+ DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndexAndBranch)
virtual void PrintDataTo(StringStream* stream);
};
@@ -889,7 +733,7 @@
DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch,
"class-of-test-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(ClassOfTest)
+ DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch)
virtual void PrintDataTo(StringStream* stream);
};
@@ -903,21 +747,7 @@
}
DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
- DECLARE_HYDROGEN_ACCESSOR(Compare)
-
- Token::Value op() const { return hydrogen()->token(); }
-};
-
-
-class LCmpTAndBranch: public LControlInstruction<2, 0> {
- public:
- LCmpTAndBranch(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpTAndBranch, "cmp-t-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(Compare)
+ DECLARE_HYDROGEN_ACCESSOR(CompareGeneric)
Token::Value op() const { return hydrogen()->token(); }
};
@@ -934,17 +764,6 @@
};
-class LInstanceOfAndBranch: public LControlInstruction<2, 0> {
- public:
- LInstanceOfAndBranch(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(InstanceOfAndBranch, "instance-of-and-branch")
-};
-
-
class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 1, 1> {
public:
LInstanceOfKnownGlobal(LOperand* value, LOperand* temp) {
@@ -1057,7 +876,7 @@
}
DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
- DECLARE_HYDROGEN_ACCESSOR(Value)
+ DECLARE_HYDROGEN_ACCESSOR(Branch)
virtual void PrintDataTo(StringStream* stream);
};
@@ -1118,6 +937,17 @@
};
+class LElementsKind: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LElementsKind(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(ElementsKind, "elements-kind")
+ DECLARE_HYDROGEN_ACCESSOR(ElementsKind)
+};
+
+
class LValueOf: public LTemplateInstruction<1, 1, 1> {
public:
LValueOf(LOperand* value, LOperand* temp) {
@@ -1309,6 +1139,22 @@
};
+class LLoadKeyedFastDoubleElement: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LLoadKeyedFastDoubleElement(LOperand* elements, LOperand* key) {
+ inputs_[0] = elements;
+ inputs_[1] = key;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastDoubleElement,
+ "load-keyed-fast-double-element")
+ DECLARE_HYDROGEN_ACCESSOR(LoadKeyedFastDoubleElement)
+
+ LOperand* elements() { return inputs_[0]; }
+ LOperand* key() { return inputs_[1]; }
+};
+
+
class LLoadKeyedSpecializedArrayElement: public LTemplateInstruction<1, 2, 0> {
public:
LLoadKeyedSpecializedArrayElement(LOperand* external_pointer,
@@ -1323,8 +1169,8 @@
LOperand* external_pointer() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
- ExternalArrayType array_type() const {
- return hydrogen()->array_type();
+ JSObject::ElementsKind elements_kind() const {
+ return hydrogen()->elements_kind();
}
};
@@ -1440,6 +1286,11 @@
};
+class LThisFunction: public LTemplateInstruction<1, 0, 0> {
+ DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
+};
+
+
class LContext: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(Context, "context")
@@ -1639,7 +1490,7 @@
}
DECLARE_CONCRETE_INSTRUCTION(DoubleToI, "double-to-i")
- DECLARE_HYDROGEN_ACCESSOR(Change)
+ DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
bool truncating() { return hydrogen()->CanTruncateToInt32(); }
};
@@ -1659,7 +1510,7 @@
}
DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i")
- DECLARE_HYDROGEN_ACCESSOR(Change)
+ DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
bool truncating() { return hydrogen()->CanTruncateToInt32(); }
};
@@ -1764,6 +1615,28 @@
};
+class LStoreKeyedFastDoubleElement: public LTemplateInstruction<0, 3, 0> {
+ public:
+ LStoreKeyedFastDoubleElement(LOperand* elements,
+ LOperand* key,
+ LOperand* val) {
+ inputs_[0] = elements;
+ inputs_[1] = key;
+ inputs_[2] = val;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastDoubleElement,
+ "store-keyed-fast-double-element")
+ DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastDoubleElement)
+
+ virtual void PrintDataTo(StringStream* stream);
+
+ LOperand* elements() { return inputs_[0]; }
+ LOperand* key() { return inputs_[1]; }
+ LOperand* value() { return inputs_[2]; }
+};
+
+
class LStoreKeyedGeneric: public LTemplateInstruction<0, 3, 0> {
public:
LStoreKeyedGeneric(LOperand* obj, LOperand* key, LOperand* val) {
@@ -1800,8 +1673,8 @@
LOperand* external_pointer() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
- ExternalArrayType array_type() const {
- return hydrogen()->array_type();
+ JSObject::ElementsKind elements_kind() const {
+ return hydrogen()->elements_kind();
}
};
@@ -2020,21 +1893,6 @@
};
-class LTypeofIs: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LTypeofIs(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(TypeofIs, "typeof-is")
- DECLARE_HYDROGEN_ACCESSOR(TypeofIs)
-
- Handle<String> type_literal() { return hydrogen()->type_literal(); }
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
class LTypeofIsAndBranch: public LControlInstruction<1, 0> {
public:
explicit LTypeofIsAndBranch(LOperand* value) {
@@ -2042,7 +1900,7 @@
}
DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch, "typeof-is-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(TypeofIs)
+ DECLARE_HYDROGEN_ACCESSOR(TypeofIsAndBranch)
Handle<String> type_literal() { return hydrogen()->type_literal(); }
@@ -2050,13 +1908,6 @@
};
-class LIsConstructCall: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(IsConstructCall, "is-construct-call")
- DECLARE_HYDROGEN_ACCESSOR(IsConstructCall)
-};
-
-
class LIsConstructCallAndBranch: public LControlInstruction<0, 1> {
public:
explicit LIsConstructCallAndBranch(LOperand* temp) {
@@ -2108,6 +1959,12 @@
class LStackCheck: public LTemplateInstruction<0, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(StackCheck, "stack-check")
+ DECLARE_HYDROGEN_ACCESSOR(StackCheck)
+
+ Label* done_label() { return &done_label_; }
+
+ private:
+ Label done_label_;
};
@@ -2313,7 +2170,8 @@
LInstruction* instr, int ast_id);
void ClearInstructionPendingDeoptimizationEnvironment();
- LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env);
+ LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env,
+ int* argument_index_accumulator);
void VisitInstruction(HInstruction* current);
diff --git a/src/arm/lithium-codegen-arm.cc b/src/arm/lithium-codegen-arm.cc
index d25ca49..ad8091b 100644
--- a/src/arm/lithium-codegen-arm.cc
+++ b/src/arm/lithium-codegen-arm.cc
@@ -146,11 +146,11 @@
// fp: Caller's frame pointer.
// lr: Caller's pc.
- // Strict mode functions need to replace the receiver with undefined
- // when called as functions (without an explicit receiver
- // object). r5 is zero for method calls and non-zero for function
- // calls.
- if (info_->is_strict_mode()) {
+ // Strict mode functions and builtins need to replace the receiver
+ // with undefined when called as functions (without an explicit
+ // receiver object). r5 is zero for method calls and non-zero for
+ // function calls.
+ if (info_->is_strict_mode() || info_->is_native()) {
Label ok;
__ cmp(r5, Operand(0));
__ b(eq, &ok);
@@ -189,7 +189,7 @@
FastNewContextStub stub(heap_slots);
__ CallStub(&stub);
} else {
- __ CallRuntime(Runtime::kNewContext, 1);
+ __ CallRuntime(Runtime::kNewFunctionContext, 1);
}
RecordSafepoint(Safepoint::kNoDeoptimizationIndex);
// Context is returned in both r0 and cp. It replaces the context
@@ -257,11 +257,20 @@
bool LCodeGen::GenerateDeferredCode() {
ASSERT(is_generating());
- for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
- LDeferredCode* code = deferred_[i];
- __ bind(code->entry());
- code->Generate();
- __ jmp(code->exit());
+ if (deferred_.length() > 0) {
+ for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
+ LDeferredCode* code = deferred_[i];
+ __ bind(code->entry());
+ code->Generate();
+ __ jmp(code->exit());
+ }
+
+ // Pad code to ensure that the last piece of deferred code have
+ // room for lazy bailout.
+ while ((masm()->pc_offset() - LastSafepointEnd())
+ < Deoptimizer::patch_size()) {
+ __ nop();
+ }
}
// Force constant pool emission at the end of the deferred code to make
@@ -777,7 +786,7 @@
void LCodeGen::RecordPosition(int position) {
- if (!FLAG_debug_info || position == RelocInfo::kNoPosition) return;
+ if (position == RelocInfo::kNoPosition) return;
masm()->positions_recorder()->RecordPosition(position);
}
@@ -880,6 +889,7 @@
void LCodeGen::DoModI(LModI* instr) {
if (instr->hydrogen()->HasPowerOf2Divisor()) {
Register dividend = ToRegister(instr->InputAt(0));
+ Register result = ToRegister(instr->result());
int32_t divisor =
HConstant::cast(instr->hydrogen()->right())->Integer32Value();
@@ -889,17 +899,15 @@
Label positive_dividend, done;
__ cmp(dividend, Operand(0));
__ b(pl, &positive_dividend);
- __ rsb(dividend, dividend, Operand(0));
- __ and_(dividend, dividend, Operand(divisor - 1));
- __ rsb(dividend, dividend, Operand(0), SetCC);
+ __ rsb(result, dividend, Operand(0));
+ __ and_(result, result, Operand(divisor - 1), SetCC);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ b(ne, &done);
- DeoptimizeIf(al, instr->environment());
- } else {
- __ b(&done);
+ DeoptimizeIf(eq, instr->environment());
}
+ __ rsb(result, result, Operand(0));
+ __ b(&done);
__ bind(&positive_dividend);
- __ and_(dividend, dividend, Operand(divisor - 1));
+ __ and_(result, dividend, Operand(divisor - 1));
__ bind(&done);
return;
}
@@ -915,8 +923,6 @@
DwVfpRegister divisor = ToDoubleRegister(instr->TempAt(2));
DwVfpRegister quotient = double_scratch0();
- ASSERT(result.is(left));
-
ASSERT(!dividend.is(divisor));
ASSERT(!dividend.is(quotient));
ASSERT(!divisor.is(quotient));
@@ -932,6 +938,8 @@
DeoptimizeIf(eq, instr->environment());
}
+ __ Move(result, left);
+
// (0 % x) must yield 0 (if x is finite, which is the case here).
__ cmp(left, Operand(0));
__ b(eq, &done);
@@ -1128,68 +1136,125 @@
void LCodeGen::DoMulI(LMulI* instr) {
Register scratch = scratch0();
+ Register result = ToRegister(instr->result());
+ // Note that result may alias left.
Register left = ToRegister(instr->InputAt(0));
- Register right = EmitLoadRegister(instr->InputAt(1), scratch);
+ LOperand* right_op = instr->InputAt(1);
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero) &&
- !instr->InputAt(1)->IsConstantOperand()) {
- __ orr(ToRegister(instr->TempAt(0)), left, right);
- }
+ bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
+ bool bailout_on_minus_zero =
+ instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
- if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- // scratch:left = left * right.
- __ smull(left, scratch, left, right);
- __ mov(ip, Operand(left, ASR, 31));
- __ cmp(ip, Operand(scratch));
- DeoptimizeIf(ne, instr->environment());
+ if (right_op->IsConstantOperand() && !can_overflow) {
+ // Use optimized code for specific constants.
+ int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
+
+ if (bailout_on_minus_zero && (constant < 0)) {
+ // The case of a null constant will be handled separately.
+ // If constant is negative and left is null, the result should be -0.
+ __ cmp(left, Operand(0));
+ DeoptimizeIf(eq, instr->environment());
+ }
+
+ switch (constant) {
+ case -1:
+ __ rsb(result, left, Operand(0));
+ break;
+ case 0:
+ if (bailout_on_minus_zero) {
+ // If left is strictly negative and the constant is null, the
+ // result is -0. Deoptimize if required, otherwise return 0.
+ __ cmp(left, Operand(0));
+ DeoptimizeIf(mi, instr->environment());
+ }
+ __ mov(result, Operand(0));
+ break;
+ case 1:
+ __ Move(result, left);
+ break;
+ default:
+ // Multiplying by powers of two and powers of two plus or minus
+ // one can be done faster with shifted operands.
+ // For other constants we emit standard code.
+ int32_t mask = constant >> 31;
+ uint32_t constant_abs = (constant + mask) ^ mask;
+
+ if (IsPowerOf2(constant_abs) ||
+ IsPowerOf2(constant_abs - 1) ||
+ IsPowerOf2(constant_abs + 1)) {
+ if (IsPowerOf2(constant_abs)) {
+ int32_t shift = WhichPowerOf2(constant_abs);
+ __ mov(result, Operand(left, LSL, shift));
+ } else if (IsPowerOf2(constant_abs - 1)) {
+ int32_t shift = WhichPowerOf2(constant_abs - 1);
+ __ add(result, left, Operand(left, LSL, shift));
+ } else if (IsPowerOf2(constant_abs + 1)) {
+ int32_t shift = WhichPowerOf2(constant_abs + 1);
+ __ rsb(result, left, Operand(left, LSL, shift));
+ }
+
+ // Correct the sign of the result is the constant is negative.
+ if (constant < 0) __ rsb(result, result, Operand(0));
+
+ } else {
+ // Generate standard code.
+ __ mov(ip, Operand(constant));
+ __ mul(result, left, ip);
+ }
+ }
+
} else {
- __ mul(left, left, right);
- }
+ Register right = EmitLoadRegister(right_op, scratch);
+ if (bailout_on_minus_zero) {
+ __ orr(ToRegister(instr->TempAt(0)), left, right);
+ }
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- // Bail out if the result is supposed to be negative zero.
- Label done;
- __ cmp(left, Operand(0));
- __ b(ne, &done);
- if (instr->InputAt(1)->IsConstantOperand()) {
- if (ToInteger32(LConstantOperand::cast(instr->InputAt(1))) <= 0) {
- DeoptimizeIf(al, instr->environment());
- }
+ if (can_overflow) {
+ // scratch:result = left * right.
+ __ smull(result, scratch, left, right);
+ __ cmp(scratch, Operand(result, ASR, 31));
+ DeoptimizeIf(ne, instr->environment());
} else {
- // Test the non-zero operand for negative sign.
+ __ mul(result, left, right);
+ }
+
+ if (bailout_on_minus_zero) {
+ // Bail out if the result is supposed to be negative zero.
+ Label done;
+ __ cmp(result, Operand(0));
+ __ b(ne, &done);
__ cmp(ToRegister(instr->TempAt(0)), Operand(0));
DeoptimizeIf(mi, instr->environment());
+ __ bind(&done);
}
- __ bind(&done);
}
}
void LCodeGen::DoBitI(LBitI* instr) {
- LOperand* left = instr->InputAt(0);
- LOperand* right = instr->InputAt(1);
- ASSERT(left->Equals(instr->result()));
- ASSERT(left->IsRegister());
- Register result = ToRegister(left);
- Operand right_operand(no_reg);
+ LOperand* left_op = instr->InputAt(0);
+ LOperand* right_op = instr->InputAt(1);
+ ASSERT(left_op->IsRegister());
+ Register left = ToRegister(left_op);
+ Register result = ToRegister(instr->result());
+ Operand right(no_reg);
- if (right->IsStackSlot() || right->IsArgument()) {
- Register right_reg = EmitLoadRegister(right, ip);
- right_operand = Operand(right_reg);
+ if (right_op->IsStackSlot() || right_op->IsArgument()) {
+ right = Operand(EmitLoadRegister(right_op, ip));
} else {
- ASSERT(right->IsRegister() || right->IsConstantOperand());
- right_operand = ToOperand(right);
+ ASSERT(right_op->IsRegister() || right_op->IsConstantOperand());
+ right = ToOperand(right_op);
}
switch (instr->op()) {
case Token::BIT_AND:
- __ and_(result, ToRegister(left), right_operand);
+ __ and_(result, left, right);
break;
case Token::BIT_OR:
- __ orr(result, ToRegister(left), right_operand);
+ __ orr(result, left, right);
break;
case Token::BIT_XOR:
- __ eor(result, ToRegister(left), right_operand);
+ __ eor(result, left, right);
break;
default:
UNREACHABLE();
@@ -1199,54 +1264,62 @@
void LCodeGen::DoShiftI(LShiftI* instr) {
+ // Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so
+ // result may alias either of them.
+ LOperand* right_op = instr->InputAt(1);
+ Register left = ToRegister(instr->InputAt(0));
+ Register result = ToRegister(instr->result());
Register scratch = scratch0();
- LOperand* left = instr->InputAt(0);
- LOperand* right = instr->InputAt(1);
- ASSERT(left->Equals(instr->result()));
- ASSERT(left->IsRegister());
- Register result = ToRegister(left);
- if (right->IsRegister()) {
- // Mask the right operand.
- __ and_(scratch, ToRegister(right), Operand(0x1F));
+ if (right_op->IsRegister()) {
+ // Mask the right_op operand.
+ __ and_(scratch, ToRegister(right_op), Operand(0x1F));
switch (instr->op()) {
case Token::SAR:
- __ mov(result, Operand(result, ASR, scratch));
+ __ mov(result, Operand(left, ASR, scratch));
break;
case Token::SHR:
if (instr->can_deopt()) {
- __ mov(result, Operand(result, LSR, scratch), SetCC);
+ __ mov(result, Operand(left, LSR, scratch), SetCC);
DeoptimizeIf(mi, instr->environment());
} else {
- __ mov(result, Operand(result, LSR, scratch));
+ __ mov(result, Operand(left, LSR, scratch));
}
break;
case Token::SHL:
- __ mov(result, Operand(result, LSL, scratch));
+ __ mov(result, Operand(left, LSL, scratch));
break;
default:
UNREACHABLE();
break;
}
} else {
- int value = ToInteger32(LConstantOperand::cast(right));
+ // Mask the right_op operand.
+ int value = ToInteger32(LConstantOperand::cast(right_op));
uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
switch (instr->op()) {
case Token::SAR:
if (shift_count != 0) {
- __ mov(result, Operand(result, ASR, shift_count));
+ __ mov(result, Operand(left, ASR, shift_count));
+ } else {
+ __ Move(result, left);
}
break;
case Token::SHR:
- if (shift_count == 0 && instr->can_deopt()) {
- __ tst(result, Operand(0x80000000));
- DeoptimizeIf(ne, instr->environment());
+ if (shift_count != 0) {
+ __ mov(result, Operand(left, LSR, shift_count));
} else {
- __ mov(result, Operand(result, LSR, shift_count));
+ if (instr->can_deopt()) {
+ __ tst(left, Operand(0x80000000));
+ DeoptimizeIf(ne, instr->environment());
+ }
+ __ Move(result, left);
}
break;
case Token::SHL:
if (shift_count != 0) {
- __ mov(result, Operand(result, LSL, shift_count));
+ __ mov(result, Operand(left, LSL, shift_count));
+ } else {
+ __ Move(result, left);
}
break;
default:
@@ -1260,16 +1333,16 @@
void LCodeGen::DoSubI(LSubI* instr) {
LOperand* left = instr->InputAt(0);
LOperand* right = instr->InputAt(1);
- ASSERT(left->Equals(instr->result()));
+ LOperand* result = instr->result();
bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
SBit set_cond = can_overflow ? SetCC : LeaveCC;
if (right->IsStackSlot() || right->IsArgument()) {
Register right_reg = EmitLoadRegister(right, ip);
- __ sub(ToRegister(left), ToRegister(left), Operand(right_reg), set_cond);
+ __ sub(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond);
} else {
ASSERT(right->IsRegister() || right->IsConstantOperand());
- __ sub(ToRegister(left), ToRegister(left), ToOperand(right), set_cond);
+ __ sub(ToRegister(result), ToRegister(left), ToOperand(right), set_cond);
}
if (can_overflow) {
@@ -1288,7 +1361,7 @@
ASSERT(instr->result()->IsDoubleRegister());
DwVfpRegister result = ToDoubleRegister(instr->result());
double v = instr->value();
- __ vmov(result, v);
+ __ Vmov(result, v);
}
@@ -1319,19 +1392,34 @@
}
+void LCodeGen::DoElementsKind(LElementsKind* instr) {
+ Register result = ToRegister(instr->result());
+ Register input = ToRegister(instr->InputAt(0));
+
+ // Load map into |result|.
+ __ ldr(result, FieldMemOperand(input, HeapObject::kMapOffset));
+ // Load the map's "bit field 2" into |result|. We only need the first byte,
+ // but the following bit field extraction takes care of that anyway.
+ __ ldr(result, FieldMemOperand(result, Map::kBitField2Offset));
+ // Retrieve elements_kind from bit field 2.
+ __ ubfx(result, result, Map::kElementsKindShift, Map::kElementsKindBitCount);
+}
+
+
void LCodeGen::DoValueOf(LValueOf* instr) {
Register input = ToRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
Register map = ToRegister(instr->TempAt(0));
- ASSERT(input.is(result));
Label done;
// If the object is a smi return the object.
__ tst(input, Operand(kSmiTagMask));
+ __ Move(result, input, eq);
__ b(eq, &done);
// If the object is not a value type, return the object.
__ CompareObjectType(input, map, map, JS_VALUE_TYPE);
+ __ Move(result, input, ne);
__ b(ne, &done);
__ ldr(result, FieldMemOperand(input, JSValue::kValueOffset));
@@ -1340,9 +1428,9 @@
void LCodeGen::DoBitNotI(LBitNotI* instr) {
- LOperand* input = instr->InputAt(0);
- ASSERT(input->Equals(instr->result()));
- __ mvn(ToRegister(input), Operand(ToRegister(input)));
+ Register input = ToRegister(instr->InputAt(0));
+ Register result = ToRegister(instr->result());
+ __ mvn(result, Operand(input));
}
@@ -1360,16 +1448,16 @@
void LCodeGen::DoAddI(LAddI* instr) {
LOperand* left = instr->InputAt(0);
LOperand* right = instr->InputAt(1);
- ASSERT(left->Equals(instr->result()));
+ LOperand* result = instr->result();
bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
SBit set_cond = can_overflow ? SetCC : LeaveCC;
if (right->IsStackSlot() || right->IsArgument()) {
Register right_reg = EmitLoadRegister(right, ip);
- __ add(ToRegister(left), ToRegister(left), Operand(right_reg), set_cond);
+ __ add(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond);
} else {
ASSERT(right->IsRegister() || right->IsConstantOperand());
- __ add(ToRegister(left), ToRegister(left), ToOperand(right), set_cond);
+ __ add(ToRegister(result), ToRegister(left), ToOperand(right), set_cond);
}
if (can_overflow) {
@@ -1381,18 +1469,19 @@
void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
DoubleRegister left = ToDoubleRegister(instr->InputAt(0));
DoubleRegister right = ToDoubleRegister(instr->InputAt(1));
+ DoubleRegister result = ToDoubleRegister(instr->result());
switch (instr->op()) {
case Token::ADD:
- __ vadd(left, left, right);
+ __ vadd(result, left, right);
break;
case Token::SUB:
- __ vsub(left, left, right);
+ __ vsub(result, left, right);
break;
case Token::MUL:
- __ vmul(left, left, right);
+ __ vmul(result, left, right);
break;
case Token::DIV:
- __ vdiv(left, left, right);
+ __ vdiv(result, left, right);
break;
case Token::MOD: {
// Save r0-r3 on the stack.
@@ -1404,7 +1493,7 @@
ExternalReference::double_fp_operation(Token::MOD, isolate()),
0, 2);
// Move the result in the double result register.
- __ GetCFunctionDoubleResult(ToDoubleRegister(instr->result()));
+ __ GetCFunctionDoubleResult(result);
// Restore r0-r3.
__ ldm(ia_w, sp, r0.bit() | r1.bit() | r2.bit() | r3.bit());
@@ -1459,7 +1548,7 @@
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
- Representation r = instr->hydrogen()->representation();
+ Representation r = instr->hydrogen()->value()->representation();
if (r.IsInteger32()) {
Register reg = ToRegister(instr->InputAt(0));
__ cmp(reg, Operand(0));
@@ -1475,7 +1564,7 @@
} else {
ASSERT(r.IsTagged());
Register reg = ToRegister(instr->InputAt(0));
- if (instr->hydrogen()->type().IsBoolean()) {
+ if (instr->hydrogen()->value()->type().IsBoolean()) {
__ LoadRoot(ip, Heap::kTrueValueRootIndex);
__ cmp(reg, ip);
EmitBranch(true_block, false_block, eq);
@@ -1494,12 +1583,11 @@
__ b(eq, false_label);
__ cmp(reg, Operand(0));
__ b(eq, false_label);
- __ tst(reg, Operand(kSmiTagMask));
- __ b(eq, true_label);
+ __ JumpIfSmi(reg, true_label);
// Test double values. Zero and NaN are false.
Label call_stub;
- DoubleRegister dbl_scratch = d0;
+ DoubleRegister dbl_scratch = double_scratch0();
Register scratch = scratch0();
__ ldr(scratch, FieldMemOperand(reg, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
@@ -1527,45 +1615,17 @@
}
-void LCodeGen::EmitGoto(int block, LDeferredCode* deferred_stack_check) {
+void LCodeGen::EmitGoto(int block) {
block = chunk_->LookupDestination(block);
int next_block = GetNextEmittedBlock(current_block_);
if (block != next_block) {
- // Perform stack overflow check if this goto needs it before jumping.
- if (deferred_stack_check != NULL) {
- __ LoadRoot(ip, Heap::kStackLimitRootIndex);
- __ cmp(sp, Operand(ip));
- __ b(hs, chunk_->GetAssemblyLabel(block));
- __ jmp(deferred_stack_check->entry());
- deferred_stack_check->SetExit(chunk_->GetAssemblyLabel(block));
- } else {
- __ jmp(chunk_->GetAssemblyLabel(block));
- }
+ __ jmp(chunk_->GetAssemblyLabel(block));
}
}
-void LCodeGen::DoDeferredStackCheck(LGoto* instr) {
- PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
- CallRuntimeFromDeferred(Runtime::kStackGuard, 0, instr);
-}
-
-
void LCodeGen::DoGoto(LGoto* instr) {
- class DeferredStackCheck: public LDeferredCode {
- public:
- DeferredStackCheck(LCodeGen* codegen, LGoto* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
- private:
- LGoto* instr_;
- };
-
- DeferredStackCheck* deferred = NULL;
- if (instr->include_stack_check()) {
- deferred = new DeferredStackCheck(this, instr);
- }
- EmitGoto(instr->block_id(), deferred);
+ EmitGoto(instr->block_id());
}
@@ -1602,34 +1662,6 @@
}
-void LCodeGen::DoCmpID(LCmpID* instr) {
- LOperand* left = instr->InputAt(0);
- LOperand* right = instr->InputAt(1);
- LOperand* result = instr->result();
- Register scratch = scratch0();
-
- Label unordered, done;
- if (instr->is_double()) {
- // Compare left and right as doubles and load the
- // resulting flags into the normal status register.
- __ VFPCompareAndSetFlags(ToDoubleRegister(left), ToDoubleRegister(right));
- // If a NaN is involved, i.e. the result is unordered (V set),
- // jump to unordered to return false.
- __ b(vs, &unordered);
- } else {
- EmitCmpI(left, right);
- }
-
- Condition cc = TokenToCondition(instr->op(), instr->is_double());
- __ LoadRoot(ToRegister(result), Heap::kTrueValueRootIndex);
- __ b(cc, &done);
-
- __ bind(&unordered);
- __ LoadRoot(ToRegister(result), Heap::kFalseValueRootIndex);
- __ bind(&done);
-}
-
-
void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
LOperand* left = instr->InputAt(0);
LOperand* right = instr->InputAt(1);
@@ -1652,18 +1684,7 @@
}
-void LCodeGen::DoCmpJSObjectEq(LCmpJSObjectEq* instr) {
- Register left = ToRegister(instr->InputAt(0));
- Register right = ToRegister(instr->InputAt(1));
- Register result = ToRegister(instr->result());
-
- __ cmp(left, Operand(right));
- __ LoadRoot(result, Heap::kTrueValueRootIndex, eq);
- __ LoadRoot(result, Heap::kFalseValueRootIndex, ne);
-}
-
-
-void LCodeGen::DoCmpJSObjectEqAndBranch(LCmpJSObjectEqAndBranch* instr) {
+void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
Register left = ToRegister(instr->InputAt(0));
Register right = ToRegister(instr->InputAt(1));
int false_block = chunk_->LookupDestination(instr->false_block_id());
@@ -1674,62 +1695,16 @@
}
-void LCodeGen::DoCmpSymbolEq(LCmpSymbolEq* instr) {
+void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) {
Register left = ToRegister(instr->InputAt(0));
- Register right = ToRegister(instr->InputAt(1));
- Register result = ToRegister(instr->result());
-
- __ cmp(left, Operand(right));
- __ LoadRoot(result, Heap::kTrueValueRootIndex, eq);
- __ LoadRoot(result, Heap::kFalseValueRootIndex, ne);
-}
-
-
-void LCodeGen::DoCmpSymbolEqAndBranch(LCmpSymbolEqAndBranch* instr) {
- Register left = ToRegister(instr->InputAt(0));
- Register right = ToRegister(instr->InputAt(1));
- int false_block = chunk_->LookupDestination(instr->false_block_id());
int true_block = chunk_->LookupDestination(instr->true_block_id());
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
- __ cmp(left, Operand(right));
+ __ cmp(left, Operand(instr->hydrogen()->right()));
EmitBranch(true_block, false_block, eq);
}
-void LCodeGen::DoIsNull(LIsNull* instr) {
- Register reg = ToRegister(instr->InputAt(0));
- Register result = ToRegister(instr->result());
-
- __ LoadRoot(ip, Heap::kNullValueRootIndex);
- __ cmp(reg, ip);
- if (instr->is_strict()) {
- __ LoadRoot(result, Heap::kTrueValueRootIndex, eq);
- __ LoadRoot(result, Heap::kFalseValueRootIndex, ne);
- } else {
- Label true_value, false_value, done;
- __ b(eq, &true_value);
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ cmp(ip, reg);
- __ b(eq, &true_value);
- __ tst(reg, Operand(kSmiTagMask));
- __ b(eq, &false_value);
- // Check for undetectable objects by looking in the bit field in
- // the map. The object has already been smi checked.
- Register scratch = result;
- __ ldr(scratch, FieldMemOperand(reg, HeapObject::kMapOffset));
- __ ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
- __ tst(scratch, Operand(1 << Map::kIsUndetectable));
- __ b(ne, &true_value);
- __ bind(&false_value);
- __ LoadRoot(result, Heap::kFalseValueRootIndex);
- __ jmp(&done);
- __ bind(&true_value);
- __ LoadRoot(result, Heap::kTrueValueRootIndex);
- __ bind(&done);
- }
-}
-
-
void LCodeGen::DoIsNullAndBranch(LIsNullAndBranch* instr) {
Register scratch = scratch0();
Register reg = ToRegister(instr->InputAt(0));
@@ -1751,8 +1726,7 @@
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
__ cmp(reg, ip);
__ b(eq, true_label);
- __ tst(reg, Operand(kSmiTagMask));
- __ b(eq, false_label);
+ __ JumpIfSmi(reg, false_label);
// Check for undetectable objects by looking in the bit field in
// the map. The object has already been smi checked.
__ ldr(scratch, FieldMemOperand(reg, HeapObject::kMapOffset));
@@ -1765,13 +1739,13 @@
Condition LCodeGen::EmitIsObject(Register input,
Register temp1,
- Register temp2,
Label* is_not_object,
Label* is_object) {
+ Register temp2 = scratch0();
__ JumpIfSmi(input, is_not_object);
- __ LoadRoot(temp1, Heap::kNullValueRootIndex);
- __ cmp(input, temp1);
+ __ LoadRoot(temp2, Heap::kNullValueRootIndex);
+ __ cmp(input, temp2);
__ b(eq, is_object);
// Load map.
@@ -1783,33 +1757,13 @@
// Load instance type and check that it is in object type range.
__ ldrb(temp2, FieldMemOperand(temp1, Map::kInstanceTypeOffset));
- __ cmp(temp2, Operand(FIRST_JS_OBJECT_TYPE));
+ __ cmp(temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
__ b(lt, is_not_object);
- __ cmp(temp2, Operand(LAST_JS_OBJECT_TYPE));
+ __ cmp(temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
return le;
}
-void LCodeGen::DoIsObject(LIsObject* instr) {
- Register reg = ToRegister(instr->InputAt(0));
- Register result = ToRegister(instr->result());
- Register temp = scratch0();
- Label is_false, is_true, done;
-
- Condition true_cond = EmitIsObject(reg, result, temp, &is_false, &is_true);
- __ b(true_cond, &is_true);
-
- __ bind(&is_false);
- __ LoadRoot(result, Heap::kFalseValueRootIndex);
- __ b(&done);
-
- __ bind(&is_true);
- __ LoadRoot(result, Heap::kTrueValueRootIndex);
-
- __ bind(&done);
-}
-
-
void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
Register reg = ToRegister(instr->InputAt(0));
Register temp1 = ToRegister(instr->TempAt(0));
@@ -1821,25 +1775,12 @@
Label* false_label = chunk_->GetAssemblyLabel(false_block);
Condition true_cond =
- EmitIsObject(reg, temp1, temp2, false_label, true_label);
+ EmitIsObject(reg, temp1, false_label, true_label);
EmitBranch(true_block, false_block, true_cond);
}
-void LCodeGen::DoIsSmi(LIsSmi* instr) {
- ASSERT(instr->hydrogen()->value()->representation().IsTagged());
- Register result = ToRegister(instr->result());
- Register input_reg = EmitLoadRegister(instr->InputAt(0), ip);
- __ tst(input_reg, Operand(kSmiTagMask));
- __ LoadRoot(result, Heap::kTrueValueRootIndex);
- Label done;
- __ b(eq, &done);
- __ LoadRoot(result, Heap::kFalseValueRootIndex);
- __ bind(&done);
-}
-
-
void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
@@ -1850,25 +1791,6 @@
}
-void LCodeGen::DoIsUndetectable(LIsUndetectable* instr) {
- Register input = ToRegister(instr->InputAt(0));
- Register result = ToRegister(instr->result());
-
- ASSERT(instr->hydrogen()->value()->representation().IsTagged());
- Label false_label, done;
- __ JumpIfSmi(input, &false_label);
- __ ldr(result, FieldMemOperand(input, HeapObject::kMapOffset));
- __ ldrb(result, FieldMemOperand(result, Map::kBitFieldOffset));
- __ tst(result, Operand(1 << Map::kIsUndetectable));
- __ b(eq, &false_label);
- __ LoadRoot(result, Heap::kTrueValueRootIndex);
- __ jmp(&done);
- __ bind(&false_label);
- __ LoadRoot(result, Heap::kFalseValueRootIndex);
- __ bind(&done);
-}
-
-
void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
Register input = ToRegister(instr->InputAt(0));
Register temp = ToRegister(instr->TempAt(0));
@@ -1884,7 +1806,7 @@
}
-static InstanceType TestType(HHasInstanceType* instr) {
+static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
InstanceType from = instr->from();
InstanceType to = instr->to();
if (from == FIRST_TYPE) return to;
@@ -1893,7 +1815,7 @@
}
-static Condition BranchCondition(HHasInstanceType* instr) {
+static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
InstanceType from = instr->from();
InstanceType to = instr->to();
if (from == to) return eq;
@@ -1904,23 +1826,6 @@
}
-void LCodeGen::DoHasInstanceType(LHasInstanceType* instr) {
- Register input = ToRegister(instr->InputAt(0));
- Register result = ToRegister(instr->result());
-
- ASSERT(instr->hydrogen()->value()->representation().IsTagged());
- Label done;
- __ tst(input, Operand(kSmiTagMask));
- __ LoadRoot(result, Heap::kFalseValueRootIndex, eq);
- __ b(eq, &done);
- __ CompareObjectType(input, result, result, TestType(instr->hydrogen()));
- Condition cond = BranchCondition(instr->hydrogen());
- __ LoadRoot(result, Heap::kTrueValueRootIndex, cond);
- __ LoadRoot(result, Heap::kFalseValueRootIndex, NegateCondition(cond));
- __ bind(&done);
-}
-
-
void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
Register scratch = scratch0();
Register input = ToRegister(instr->InputAt(0));
@@ -1930,8 +1835,7 @@
Label* false_label = chunk_->GetAssemblyLabel(false_block);
- __ tst(input, Operand(kSmiTagMask));
- __ b(eq, false_label);
+ __ JumpIfSmi(input, false_label);
__ CompareObjectType(input, scratch, scratch, TestType(instr->hydrogen()));
EmitBranch(true_block, false_block, BranchCondition(instr->hydrogen()));
@@ -1951,20 +1855,6 @@
}
-void LCodeGen::DoHasCachedArrayIndex(LHasCachedArrayIndex* instr) {
- Register input = ToRegister(instr->InputAt(0));
- Register result = ToRegister(instr->result());
- Register scratch = scratch0();
-
- ASSERT(instr->hydrogen()->value()->representation().IsTagged());
- __ ldr(scratch,
- FieldMemOperand(input, String::kHashFieldOffset));
- __ tst(scratch, Operand(String::kContainsCachedArrayIndexMask));
- __ LoadRoot(result, Heap::kTrueValueRootIndex, eq);
- __ LoadRoot(result, Heap::kFalseValueRootIndex, ne);
-}
-
-
void LCodeGen::DoHasCachedArrayIndexAndBranch(
LHasCachedArrayIndexAndBranch* instr) {
Register input = ToRegister(instr->InputAt(0));
@@ -1990,28 +1880,28 @@
Register temp2) {
ASSERT(!input.is(temp));
ASSERT(!temp.is(temp2)); // But input and temp2 may be the same register.
- __ tst(input, Operand(kSmiTagMask));
- __ b(eq, is_false);
- __ CompareObjectType(input, temp, temp2, FIRST_JS_OBJECT_TYPE);
+ __ JumpIfSmi(input, is_false);
+ __ CompareObjectType(input, temp, temp2, FIRST_SPEC_OBJECT_TYPE);
__ b(lt, is_false);
// Map is now in temp.
// Functions have class 'Function'.
- __ CompareInstanceType(temp, temp2, JS_FUNCTION_TYPE);
+ __ CompareInstanceType(temp, temp2, FIRST_CALLABLE_SPEC_OBJECT_TYPE);
if (class_name->IsEqualTo(CStrVector("Function"))) {
- __ b(eq, is_true);
+ __ b(ge, is_true);
} else {
- __ b(eq, is_false);
+ __ b(ge, is_false);
}
// Check if the constructor in the map is a function.
__ ldr(temp, FieldMemOperand(temp, Map::kConstructorOffset));
- // As long as JS_FUNCTION_TYPE is the last instance type and it is
- // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
- // LAST_JS_OBJECT_TYPE.
- ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
- ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
+ // As long as LAST_CALLABLE_SPEC_OBJECT_TYPE is the last instance type and
+ // FIRST_CALLABLE_SPEC_OBJECT_TYPE comes right after
+ // LAST_NONCALLABLE_SPEC_OBJECT_TYPE, we can avoid checking for the latter.
+ STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
+ STATIC_ASSERT(FIRST_CALLABLE_SPEC_OBJECT_TYPE ==
+ LAST_NONCALLABLE_SPEC_OBJECT_TYPE + 1);
// Objects with a non-function constructor have class 'Object'.
__ CompareObjectType(temp, temp2, temp2, JS_FUNCTION_TYPE);
@@ -2037,27 +1927,6 @@
}
-void LCodeGen::DoClassOfTest(LClassOfTest* instr) {
- Register input = ToRegister(instr->InputAt(0));
- Register result = ToRegister(instr->result());
- ASSERT(input.is(result));
- Handle<String> class_name = instr->hydrogen()->class_name();
-
- Label done, is_true, is_false;
-
- EmitClassOfTest(&is_true, &is_false, class_name, input, scratch0(), input);
- __ b(ne, &is_false);
-
- __ bind(&is_true);
- __ LoadRoot(result, Heap::kTrueValueRootIndex);
- __ jmp(&done);
-
- __ bind(&is_false);
- __ LoadRoot(result, Heap::kFalseValueRootIndex);
- __ bind(&done);
-}
-
-
void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
Register input = ToRegister(instr->InputAt(0));
Register temp = scratch0();
@@ -2101,20 +1970,6 @@
}
-void LCodeGen::DoInstanceOfAndBranch(LInstanceOfAndBranch* instr) {
- ASSERT(ToRegister(instr->InputAt(0)).is(r0)); // Object is in r0.
- ASSERT(ToRegister(instr->InputAt(1)).is(r1)); // Function is in r1.
-
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- InstanceofStub stub(InstanceofStub::kArgsInRegisters);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- __ cmp(r0, Operand(0));
- EmitBranch(true_block, false_block, eq);
-}
-
-
void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
class DeferredInstanceOfKnownGlobal: public LDeferredCode {
public:
@@ -2269,25 +2124,6 @@
}
-void LCodeGen::DoCmpTAndBranch(LCmpTAndBranch* instr) {
- Token::Value op = instr->op();
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- Handle<Code> ic = CompareIC::GetUninitialized(op);
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
-
- // The compare stub expects compare condition and the input operands
- // reversed for GT and LTE.
- Condition condition = ComputeCompareCondition(op);
- if (op == Token::GT || op == Token::LTE) {
- condition = ReverseCondition(condition);
- }
- __ cmp(r0, Operand(0));
- EmitBranch(true_block, false_block, condition);
-}
-
-
void LCodeGen::DoReturn(LReturn* instr) {
if (FLAG_trace) {
// Push the return value on the stack as the parameter.
@@ -2526,7 +2362,7 @@
__ ldr(result, FieldMemOperand(input, JSObject::kElementsOffset));
if (FLAG_debug_code) {
- Label done;
+ Label done, fail;
__ ldr(scratch, FieldMemOperand(result, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
__ cmp(scratch, ip);
@@ -2534,11 +2370,18 @@
__ LoadRoot(ip, Heap::kFixedCOWArrayMapRootIndex);
__ cmp(scratch, ip);
__ b(eq, &done);
- __ ldr(scratch, FieldMemOperand(result, HeapObject::kMapOffset));
- __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
- __ sub(scratch, scratch, Operand(FIRST_EXTERNAL_ARRAY_TYPE));
- __ cmp(scratch, Operand(kExternalArrayTypeCount));
- __ Check(cc, "Check for fast elements failed.");
+ // |scratch| still contains |input|'s map.
+ __ ldr(scratch, FieldMemOperand(scratch, Map::kBitField2Offset));
+ __ ubfx(scratch, scratch, Map::kElementsKindShift,
+ Map::kElementsKindBitCount);
+ __ cmp(scratch, Operand(JSObject::FAST_ELEMENTS));
+ __ b(eq, &done);
+ __ cmp(scratch, Operand(JSObject::FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND));
+ __ b(lt, &fail);
+ __ cmp(scratch, Operand(JSObject::LAST_EXTERNAL_ARRAY_ELEMENTS_KIND));
+ __ b(le, &done);
+ __ bind(&fail);
+ __ Abort("Check for fast or external elements failed.");
__ bind(&done);
}
}
@@ -2576,7 +2419,6 @@
Register key = EmitLoadRegister(instr->key(), scratch0());
Register result = ToRegister(instr->result());
Register scratch = scratch0();
- ASSERT(result.is(elements));
// Load the result.
__ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
@@ -2591,11 +2433,53 @@
}
+void LCodeGen::DoLoadKeyedFastDoubleElement(
+ LLoadKeyedFastDoubleElement* instr) {
+ Register elements = ToRegister(instr->elements());
+ bool key_is_constant = instr->key()->IsConstantOperand();
+ Register key = no_reg;
+ DwVfpRegister result = ToDoubleRegister(instr->result());
+ Register scratch = scratch0();
+
+ int shift_size =
+ ElementsKindToShiftSize(JSObject::FAST_DOUBLE_ELEMENTS);
+ int constant_key = 0;
+ if (key_is_constant) {
+ constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+ if (constant_key & 0xF0000000) {
+ Abort("array index constant value too big.");
+ }
+ } else {
+ key = ToRegister(instr->key());
+ }
+
+ Operand operand = key_is_constant
+ ? Operand(constant_key * (1 << shift_size) +
+ FixedDoubleArray::kHeaderSize - kHeapObjectTag)
+ : Operand(key, LSL, shift_size);
+ __ add(elements, elements, operand);
+ if (!key_is_constant) {
+ __ add(elements, elements,
+ Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
+ }
+
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ // TODO(danno): If no hole check is required, there is no need to allocate
+ // elements into a temporary register, instead scratch can be used.
+ __ ldr(scratch, MemOperand(elements, sizeof(kHoleNanLower32)));
+ __ cmp(scratch, Operand(kHoleNanUpper32));
+ DeoptimizeIf(eq, instr->environment());
+ }
+
+ __ vldr(result, elements, 0);
+}
+
+
void LCodeGen::DoLoadKeyedSpecializedArrayElement(
LLoadKeyedSpecializedArrayElement* instr) {
Register external_pointer = ToRegister(instr->external_pointer());
Register key = no_reg;
- ExternalArrayType array_type = instr->array_type();
+ JSObject::ElementsKind elements_kind = instr->elements_kind();
bool key_is_constant = instr->key()->IsConstantOperand();
int constant_key = 0;
if (key_is_constant) {
@@ -2606,43 +2490,45 @@
} else {
key = ToRegister(instr->key());
}
- int shift_size = ExternalArrayTypeToShiftSize(array_type);
+ int shift_size = ElementsKindToShiftSize(elements_kind);
- if (array_type == kExternalFloatArray || array_type == kExternalDoubleArray) {
+ if (elements_kind == JSObject::EXTERNAL_FLOAT_ELEMENTS ||
+ elements_kind == JSObject::EXTERNAL_DOUBLE_ELEMENTS) {
CpuFeatures::Scope scope(VFP3);
- DwVfpRegister result(ToDoubleRegister(instr->result()));
- Operand operand(key_is_constant ? Operand(constant_key * (1 << shift_size))
- : Operand(key, LSL, shift_size));
+ DwVfpRegister result = ToDoubleRegister(instr->result());
+ Operand operand = key_is_constant
+ ? Operand(constant_key * (1 << shift_size))
+ : Operand(key, LSL, shift_size);
__ add(scratch0(), external_pointer, operand);
- if (array_type == kExternalFloatArray) {
+ if (elements_kind == JSObject::EXTERNAL_FLOAT_ELEMENTS) {
__ vldr(result.low(), scratch0(), 0);
__ vcvt_f64_f32(result, result.low());
- } else { // i.e. array_type == kExternalDoubleArray
+ } else { // i.e. elements_kind == JSObject::EXTERNAL_DOUBLE_ELEMENTS
__ vldr(result, scratch0(), 0);
}
} else {
- Register result(ToRegister(instr->result()));
+ Register result = ToRegister(instr->result());
MemOperand mem_operand(key_is_constant
? MemOperand(external_pointer, constant_key * (1 << shift_size))
: MemOperand(external_pointer, key, LSL, shift_size));
- switch (array_type) {
- case kExternalByteArray:
+ switch (elements_kind) {
+ case JSObject::EXTERNAL_BYTE_ELEMENTS:
__ ldrsb(result, mem_operand);
break;
- case kExternalUnsignedByteArray:
- case kExternalPixelArray:
+ case JSObject::EXTERNAL_PIXEL_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
__ ldrb(result, mem_operand);
break;
- case kExternalShortArray:
+ case JSObject::EXTERNAL_SHORT_ELEMENTS:
__ ldrsh(result, mem_operand);
break;
- case kExternalUnsignedShortArray:
+ case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
__ ldrh(result, mem_operand);
break;
- case kExternalIntArray:
+ case JSObject::EXTERNAL_INT_ELEMENTS:
__ ldr(result, mem_operand);
break;
- case kExternalUnsignedIntArray:
+ case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS:
__ ldr(result, mem_operand);
__ cmp(result, Operand(0x80000000));
// TODO(danno): we could be more clever here, perhaps having a special
@@ -2650,8 +2536,12 @@
// happens, and generate code that returns a double rather than int.
DeoptimizeIf(cs, instr->environment());
break;
- case kExternalFloatArray:
- case kExternalDoubleArray:
+ case JSObject::EXTERNAL_FLOAT_ELEMENTS:
+ case JSObject::EXTERNAL_DOUBLE_ELEMENTS:
+ case JSObject::FAST_DOUBLE_ELEMENTS:
+ case JSObject::FAST_ELEMENTS:
+ case JSObject::DICTIONARY_ELEMENTS:
+ case JSObject::NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
@@ -2717,12 +2607,26 @@
ASSERT(function.is(r1)); // Required by InvokeFunction.
ASSERT(ToRegister(instr->result()).is(r0));
- // TODO(1412): This is not correct if the called function is a
- // strict mode function or a native.
- //
- // If the receiver is null or undefined, we have to pass the global object
- // as a receiver.
+ // If the receiver is null or undefined, we have to pass the global
+ // object as a receiver to normal functions. Values have to be
+ // passed unchanged to builtins and strict-mode functions.
Label global_object, receiver_ok;
+
+ // Do not transform the receiver to object for strict mode
+ // functions.
+ __ ldr(scratch,
+ FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
+ __ ldr(scratch,
+ FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
+ __ tst(scratch,
+ Operand(1 << (SharedFunctionInfo::kStrictModeFunction + kSmiTagSize)));
+ __ b(ne, &receiver_ok);
+
+ // Do not transform the receiver to object for builtins.
+ __ tst(scratch, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
+ __ b(ne, &receiver_ok);
+
+ // Normal function. Replace undefined or null with global receiver.
__ LoadRoot(scratch, Heap::kNullValueRootIndex);
__ cmp(receiver, scratch);
__ b(eq, &global_object);
@@ -2733,8 +2637,8 @@
// Deoptimize if the receiver is not a JS object.
__ tst(receiver, Operand(kSmiTagMask));
DeoptimizeIf(eq, instr->environment());
- __ CompareObjectType(receiver, scratch, scratch, FIRST_JS_OBJECT_TYPE);
- DeoptimizeIf(lo, instr->environment());
+ __ CompareObjectType(receiver, scratch, scratch, FIRST_SPEC_OBJECT_TYPE);
+ DeoptimizeIf(lt, instr->environment());
__ jmp(&receiver_ok);
__ bind(&global_object);
@@ -2797,6 +2701,12 @@
}
+void LCodeGen::DoThisFunction(LThisFunction* instr) {
+ Register result = ToRegister(instr->result());
+ __ ldr(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+}
+
+
void LCodeGen::DoContext(LContext* instr) {
Register result = ToRegister(instr->result());
__ mov(result, cp);
@@ -2807,8 +2717,7 @@
Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
__ ldr(result,
- MemOperand(context, Context::SlotOffset(Context::CLOSURE_INDEX)));
- __ ldr(result, FieldMemOperand(result, JSFunction::kContextOffset));
+ MemOperand(context, Context::SlotOffset(Context::PREVIOUS_INDEX)));
}
@@ -2872,8 +2781,8 @@
void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
- ASSERT(instr->InputAt(0)->Equals(instr->result()));
Register input = ToRegister(instr->InputAt(0));
+ Register result = ToRegister(instr->result());
Register scratch = scratch0();
// Deoptimize if not a heap number.
@@ -2887,10 +2796,10 @@
scratch = no_reg;
__ ldr(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
// Check the sign of the argument. If the argument is positive, just
- // return it. We do not need to patch the stack since |input| and
- // |result| are the same register and |input| would be restored
- // unchanged by popping safepoint registers.
+ // return it.
__ tst(exponent, Operand(HeapNumber::kSignMask));
+ // Move the input to the result if necessary.
+ __ Move(result, input);
__ b(eq, &done);
// Input is negative. Reverse its sign.
@@ -2930,7 +2839,7 @@
__ ldr(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset));
__ str(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset));
- __ StoreToSafepointRegisterSlot(tmp1, input);
+ __ StoreToSafepointRegisterSlot(tmp1, result);
}
__ bind(&done);
@@ -2939,11 +2848,13 @@
void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) {
Register input = ToRegister(instr->InputAt(0));
+ Register result = ToRegister(instr->result());
__ cmp(input, Operand(0));
+ __ Move(result, input, pl);
// We can make rsb conditional because the previous cmp instruction
// will clear the V (overflow) flag and rsb won't set this flag
// if input is positive.
- __ rsb(input, input, Operand(0), SetCC, mi);
+ __ rsb(result, input, Operand(0), SetCC, mi);
// Deoptimize on overflow.
DeoptimizeIf(vs, instr->environment());
}
@@ -2963,11 +2874,11 @@
LUnaryMathOperation* instr_;
};
- ASSERT(instr->InputAt(0)->Equals(instr->result()));
Representation r = instr->hydrogen()->value()->representation();
if (r.IsDouble()) {
DwVfpRegister input = ToDoubleRegister(instr->InputAt(0));
- __ vabs(input, input);
+ DwVfpRegister result = ToDoubleRegister(instr->result());
+ __ vabs(result, input);
} else if (r.IsInteger32()) {
EmitIntegerMathAbs(instr);
} else {
@@ -3045,7 +2956,7 @@
// Save the original sign for later comparison.
__ and_(scratch2, scratch1, Operand(HeapNumber::kSignMask));
- __ vmov(double_scratch0(), 0.5);
+ __ Vmov(double_scratch0(), 0.5);
__ vadd(input, input, double_scratch0());
// Check sign of the result: if the sign changed, the input
@@ -3082,24 +2993,17 @@
void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
- ASSERT(ToDoubleRegister(instr->result()).is(input));
- __ vsqrt(input, input);
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ __ vsqrt(result, input);
}
void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
- Register scratch = scratch0();
- SwVfpRegister single_scratch = double_scratch0().low();
- DoubleRegister double_scratch = double_scratch0();
- ASSERT(ToDoubleRegister(instr->result()).is(input));
-
+ DoubleRegister result = ToDoubleRegister(instr->result());
// Add +0 to convert -0 to +0.
- __ mov(scratch, Operand(0));
- __ vmov(single_scratch, scratch);
- __ vcvt_f64_s32(double_scratch, single_scratch);
- __ vadd(input, input, double_scratch);
- __ vsqrt(input, input);
+ __ vadd(result, input, kDoubleRegZero);
+ __ vsqrt(result, result);
}
@@ -3382,12 +3286,54 @@
}
+void LCodeGen::DoStoreKeyedFastDoubleElement(
+ LStoreKeyedFastDoubleElement* instr) {
+ DwVfpRegister value = ToDoubleRegister(instr->value());
+ Register elements = ToRegister(instr->elements());
+ Register key = no_reg;
+ Register scratch = scratch0();
+ bool key_is_constant = instr->key()->IsConstantOperand();
+ int constant_key = 0;
+ Label not_nan;
+
+ // Calculate the effective address of the slot in the array to store the
+ // double value.
+ if (key_is_constant) {
+ constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+ if (constant_key & 0xF0000000) {
+ Abort("array index constant value too big.");
+ }
+ } else {
+ key = ToRegister(instr->key());
+ }
+ int shift_size = ElementsKindToShiftSize(JSObject::FAST_DOUBLE_ELEMENTS);
+ Operand operand = key_is_constant
+ ? Operand(constant_key * (1 << shift_size) +
+ FixedDoubleArray::kHeaderSize - kHeapObjectTag)
+ : Operand(key, LSL, shift_size);
+ __ add(scratch, elements, operand);
+ if (!key_is_constant) {
+ __ add(scratch, scratch,
+ Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
+ }
+
+ // Check for NaN. All NaNs must be canonicalized.
+ __ VFPCompareAndSetFlags(value, value);
+
+ // Only load canonical NaN if the comparison above set the overflow.
+ __ Vmov(value, FixedDoubleArray::canonical_not_the_hole_nan_as_double(), vs);
+
+ __ bind(¬_nan);
+ __ vstr(value, scratch, 0);
+}
+
+
void LCodeGen::DoStoreKeyedSpecializedArrayElement(
LStoreKeyedSpecializedArrayElement* instr) {
Register external_pointer = ToRegister(instr->external_pointer());
Register key = no_reg;
- ExternalArrayType array_type = instr->array_type();
+ JSObject::ElementsKind elements_kind = instr->elements_kind();
bool key_is_constant = instr->key()->IsConstantOperand();
int constant_key = 0;
if (key_is_constant) {
@@ -3398,18 +3344,19 @@
} else {
key = ToRegister(instr->key());
}
- int shift_size = ExternalArrayTypeToShiftSize(array_type);
+ int shift_size = ElementsKindToShiftSize(elements_kind);
- if (array_type == kExternalFloatArray || array_type == kExternalDoubleArray) {
+ if (elements_kind == JSObject::EXTERNAL_FLOAT_ELEMENTS ||
+ elements_kind == JSObject::EXTERNAL_DOUBLE_ELEMENTS) {
CpuFeatures::Scope scope(VFP3);
DwVfpRegister value(ToDoubleRegister(instr->value()));
Operand operand(key_is_constant ? Operand(constant_key * (1 << shift_size))
: Operand(key, LSL, shift_size));
__ add(scratch0(), external_pointer, operand);
- if (array_type == kExternalFloatArray) {
+ if (elements_kind == JSObject::EXTERNAL_FLOAT_ELEMENTS) {
__ vcvt_f32_f64(double_scratch0().low(), value);
__ vstr(double_scratch0().low(), scratch0(), 0);
- } else { // i.e. array_type == kExternalDoubleArray
+ } else { // i.e. elements_kind == JSObject::EXTERNAL_DOUBLE_ELEMENTS
__ vstr(value, scratch0(), 0);
}
} else {
@@ -3417,22 +3364,26 @@
MemOperand mem_operand(key_is_constant
? MemOperand(external_pointer, constant_key * (1 << shift_size))
: MemOperand(external_pointer, key, LSL, shift_size));
- switch (array_type) {
- case kExternalPixelArray:
- case kExternalByteArray:
- case kExternalUnsignedByteArray:
+ switch (elements_kind) {
+ case JSObject::EXTERNAL_PIXEL_ELEMENTS:
+ case JSObject::EXTERNAL_BYTE_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
__ strb(value, mem_operand);
break;
- case kExternalShortArray:
- case kExternalUnsignedShortArray:
+ case JSObject::EXTERNAL_SHORT_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
__ strh(value, mem_operand);
break;
- case kExternalIntArray:
- case kExternalUnsignedIntArray:
+ case JSObject::EXTERNAL_INT_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS:
__ str(value, mem_operand);
break;
- case kExternalFloatArray:
- case kExternalDoubleArray:
+ case JSObject::EXTERNAL_FLOAT_ELEMENTS:
+ case JSObject::EXTERNAL_DOUBLE_ELEMENTS:
+ case JSObject::FAST_DOUBLE_ELEMENTS:
+ case JSObject::FAST_ELEMENTS:
+ case JSObject::DICTIONARY_ELEMENTS:
+ case JSObject::NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
@@ -3693,8 +3644,8 @@
void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) {
Label slow;
Register reg = ToRegister(instr->InputAt(0));
- DoubleRegister dbl_scratch = d0;
- SwVfpRegister flt_scratch = s0;
+ DoubleRegister dbl_scratch = double_scratch0();
+ SwVfpRegister flt_scratch = dbl_scratch.low();
// Preserve the value of all registers.
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
@@ -3803,14 +3754,13 @@
bool deoptimize_on_undefined,
LEnvironment* env) {
Register scratch = scratch0();
- SwVfpRegister flt_scratch = s0;
- ASSERT(!result_reg.is(d0));
+ SwVfpRegister flt_scratch = double_scratch0().low();
+ ASSERT(!result_reg.is(double_scratch0()));
Label load_smi, heap_number, done;
// Smi check.
- __ tst(input_reg, Operand(kSmiTagMask));
- __ b(eq, &load_smi);
+ __ JumpIfSmi(input_reg, &load_smi);
// Heap number map check.
__ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
@@ -4316,29 +4266,6 @@
}
-void LCodeGen::DoTypeofIs(LTypeofIs* instr) {
- Register input = ToRegister(instr->InputAt(0));
- Register result = ToRegister(instr->result());
- Label true_label;
- Label false_label;
- Label done;
-
- Condition final_branch_condition = EmitTypeofIs(&true_label,
- &false_label,
- input,
- instr->type_literal());
- __ b(final_branch_condition, &true_label);
- __ bind(&false_label);
- __ LoadRoot(result, Heap::kFalseValueRootIndex);
- __ b(&done);
-
- __ bind(&true_label);
- __ LoadRoot(result, Heap::kTrueValueRootIndex);
-
- __ bind(&done);
-}
-
-
void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
Register input = ToRegister(instr->InputAt(0));
int true_block = chunk_->LookupDestination(instr->true_block_id());
@@ -4394,17 +4321,19 @@
} else if (type_name->Equals(heap()->function_symbol())) {
__ JumpIfSmi(input, false_label);
- __ CompareObjectType(input, input, scratch, FIRST_FUNCTION_CLASS_TYPE);
+ __ CompareObjectType(input, input, scratch,
+ FIRST_CALLABLE_SPEC_OBJECT_TYPE);
final_branch_condition = ge;
} else if (type_name->Equals(heap()->object_symbol())) {
__ JumpIfSmi(input, false_label);
__ CompareRoot(input, Heap::kNullValueRootIndex);
__ b(eq, true_label);
- __ CompareObjectType(input, input, scratch, FIRST_JS_OBJECT_TYPE);
- __ b(lo, false_label);
- __ CompareInstanceType(input, scratch, FIRST_FUNCTION_CLASS_TYPE);
- __ b(hs, false_label);
+ __ CompareObjectType(input, input, scratch,
+ FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
+ __ b(lt, false_label);
+ __ CompareInstanceType(input, scratch, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
+ __ b(gt, false_label);
// Check for undetectable objects => false.
__ ldrb(ip, FieldMemOperand(input, Map::kBitFieldOffset));
__ tst(ip, Operand(1 << Map::kIsUndetectable));
@@ -4420,26 +4349,6 @@
}
-void LCodeGen::DoIsConstructCall(LIsConstructCall* instr) {
- Register result = ToRegister(instr->result());
- Label true_label;
- Label false_label;
- Label done;
-
- EmitIsConstructCall(result, scratch0());
- __ b(eq, &true_label);
-
- __ LoadRoot(result, Heap::kFalseValueRootIndex);
- __ b(&done);
-
-
- __ bind(&true_label);
- __ LoadRoot(result, Heap::kTrueValueRootIndex);
-
- __ bind(&done);
-}
-
-
void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
Register temp1 = ToRegister(instr->TempAt(0));
int true_block = chunk_->LookupDestination(instr->true_block_id());
@@ -4514,15 +4423,50 @@
}
+void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
+ {
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
+ RegisterLazyDeoptimization(
+ instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+ }
+
+ // The gap code includes the restoring of the safepoint registers.
+ int pc = masm()->pc_offset();
+ safepoints_.SetPcAfterGap(pc);
+}
+
+
void LCodeGen::DoStackCheck(LStackCheck* instr) {
- // Perform stack overflow check.
- Label ok;
- __ LoadRoot(ip, Heap::kStackLimitRootIndex);
- __ cmp(sp, Operand(ip));
- __ b(hs, &ok);
- StackCheckStub stub;
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- __ bind(&ok);
+ class DeferredStackCheck: public LDeferredCode {
+ public:
+ DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
+ private:
+ LStackCheck* instr_;
+ };
+
+ if (instr->hydrogen()->is_function_entry()) {
+ // Perform stack overflow check.
+ Label done;
+ __ LoadRoot(ip, Heap::kStackLimitRootIndex);
+ __ cmp(sp, Operand(ip));
+ __ b(hs, &done);
+ StackCheckStub stub;
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ __ bind(&done);
+ } else {
+ ASSERT(instr->hydrogen()->is_backwards_branch());
+ // Perform stack overflow check if this goto needs it before jumping.
+ DeferredStackCheck* deferred_stack_check =
+ new DeferredStackCheck(this, instr);
+ __ LoadRoot(ip, Heap::kStackLimitRootIndex);
+ __ cmp(sp, Operand(ip));
+ __ b(lo, deferred_stack_check->entry());
+ __ bind(instr->done_label());
+ deferred_stack_check->SetExit(instr->done_label());
+ }
}
diff --git a/src/arm/lithium-codegen-arm.h b/src/arm/lithium-codegen-arm.h
index 8253c17..ead8489 100644
--- a/src/arm/lithium-codegen-arm.h
+++ b/src/arm/lithium-codegen-arm.h
@@ -108,7 +108,7 @@
void DoDeferredNumberTagI(LNumberTagI* instr);
void DoDeferredTaggedToI(LTaggedToI* instr);
void DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr);
- void DoDeferredStackCheck(LGoto* instr);
+ void DoDeferredStackCheck(LStackCheck* instr);
void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
void DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
@@ -148,7 +148,7 @@
HGraph* graph() const { return chunk_->graph(); }
Register scratch0() { return r9; }
- DwVfpRegister double_scratch0() { return d0; }
+ DwVfpRegister double_scratch0() { return d15; }
int GetNextEmittedBlock(int block);
LInstruction* GetNextInstruction();
@@ -261,7 +261,7 @@
}
static Condition TokenToCondition(Token::Value op, bool is_unsigned);
- void EmitGoto(int block, LDeferredCode* deferred_stack_check = NULL);
+ void EmitGoto(int block);
void EmitBranch(int left_block, int right_block, Condition cc);
void EmitCmpI(LOperand* left, LOperand* right);
void EmitNumberUntagD(Register input,
@@ -280,7 +280,6 @@
// true and false label should be made, to optimize fallthrough.
Condition EmitIsObject(Register input,
Register temp1,
- Register temp2,
Label* is_not_object,
Label* is_object);
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
index c227b13..c34a579 100644
--- a/src/arm/macro-assembler-arm.cc
+++ b/src/arm/macro-assembler-arm.cc
@@ -91,7 +91,7 @@
}
-void MacroAssembler::Jump(byte* target, RelocInfo::Mode rmode,
+void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode,
Condition cond) {
ASSERT(!RelocInfo::IsCodeTarget(rmode));
Jump(reinterpret_cast<intptr_t>(target), rmode, cond);
@@ -118,10 +118,8 @@
void MacroAssembler::Call(Register target, Condition cond) {
// Block constant pool for the call instruction sequence.
BlockConstPoolScope block_const_pool(this);
-#ifdef DEBUG
- int pre_position = pc_offset();
-#endif
-
+ Label start;
+ bind(&start);
#if USE_BLX
blx(target, cond);
#else
@@ -129,34 +127,29 @@
mov(lr, Operand(pc), LeaveCC, cond);
mov(pc, Operand(target), LeaveCC, cond);
#endif
-
-#ifdef DEBUG
- int post_position = pc_offset();
- CHECK_EQ(pre_position + CallSize(target, cond), post_position);
-#endif
+ ASSERT_EQ(CallSize(target, cond), SizeOfCodeGeneratedSince(&start));
}
int MacroAssembler::CallSize(
- intptr_t target, RelocInfo::Mode rmode, Condition cond) {
+ Address target, RelocInfo::Mode rmode, Condition cond) {
int size = 2 * kInstrSize;
Instr mov_instr = cond | MOV | LeaveCC;
- if (!Operand(target, rmode).is_single_instruction(mov_instr)) {
+ intptr_t immediate = reinterpret_cast<intptr_t>(target);
+ if (!Operand(immediate, rmode).is_single_instruction(mov_instr)) {
size += kInstrSize;
}
return size;
}
-void MacroAssembler::Call(intptr_t target,
+void MacroAssembler::Call(Address target,
RelocInfo::Mode rmode,
Condition cond) {
// Block constant pool for the call instruction sequence.
BlockConstPoolScope block_const_pool(this);
-#ifdef DEBUG
- int pre_position = pc_offset();
-#endif
-
+ Label start;
+ bind(&start);
#if USE_BLX
// On ARMv5 and after the recommended call sequence is:
// ldr ip, [pc, #...]
@@ -168,7 +161,7 @@
// we have to do it explicitly.
positions_recorder()->WriteRecordedPositions();
- mov(ip, Operand(target, rmode));
+ mov(ip, Operand(reinterpret_cast<int32_t>(target), rmode));
blx(ip, cond);
ASSERT(kCallTargetAddressOffset == 2 * kInstrSize);
@@ -176,82 +169,36 @@
// Set lr for return at current pc + 8.
mov(lr, Operand(pc), LeaveCC, cond);
// Emit a ldr<cond> pc, [pc + offset of target in constant pool].
- mov(pc, Operand(target, rmode), LeaveCC, cond);
+ mov(pc, Operand(reinterpret_cast<int32_t>(target), rmode), LeaveCC, cond);
ASSERT(kCallTargetAddressOffset == kInstrSize);
#endif
-
-#ifdef DEBUG
- int post_position = pc_offset();
- CHECK_EQ(pre_position + CallSize(target, rmode, cond), post_position);
-#endif
+ ASSERT_EQ(CallSize(target, rmode, cond), SizeOfCodeGeneratedSince(&start));
}
-int MacroAssembler::CallSize(
- byte* target, RelocInfo::Mode rmode, Condition cond) {
- return CallSize(reinterpret_cast<intptr_t>(target), rmode);
-}
-
-
-void MacroAssembler::Call(
- byte* target, RelocInfo::Mode rmode, Condition cond) {
-#ifdef DEBUG
- int pre_position = pc_offset();
-#endif
-
- ASSERT(!RelocInfo::IsCodeTarget(rmode));
- Call(reinterpret_cast<intptr_t>(target), rmode, cond);
-
-#ifdef DEBUG
- int post_position = pc_offset();
- CHECK_EQ(pre_position + CallSize(target, rmode, cond), post_position);
-#endif
-}
-
-
-int MacroAssembler::CallSize(
- Handle<Code> code, RelocInfo::Mode rmode, Condition cond) {
- return CallSize(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
-}
-
-
-void MacroAssembler::CallWithAstId(Handle<Code> code,
- RelocInfo::Mode rmode,
- unsigned ast_id,
- Condition cond) {
-#ifdef DEBUG
- int pre_position = pc_offset();
-#endif
-
- ASSERT(rmode == RelocInfo::CODE_TARGET_WITH_ID);
- ASSERT(ast_id != kNoASTId);
- ASSERT(ast_id_for_reloc_info_ == kNoASTId);
- ast_id_for_reloc_info_ = ast_id;
- // 'code' is always generated ARM code, never THUMB code
- Call(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
-
-#ifdef DEBUG
- int post_position = pc_offset();
- CHECK_EQ(pre_position + CallSize(code, rmode, cond), post_position);
-#endif
+int MacroAssembler::CallSize(Handle<Code> code,
+ RelocInfo::Mode rmode,
+ unsigned ast_id,
+ Condition cond) {
+ return CallSize(reinterpret_cast<Address>(code.location()), rmode, cond);
}
void MacroAssembler::Call(Handle<Code> code,
RelocInfo::Mode rmode,
+ unsigned ast_id,
Condition cond) {
-#ifdef DEBUG
- int pre_position = pc_offset();
-#endif
-
+ Label start;
+ bind(&start);
ASSERT(RelocInfo::IsCodeTarget(rmode));
+ if (rmode == RelocInfo::CODE_TARGET && ast_id != kNoASTId) {
+ SetRecordedAstId(ast_id);
+ rmode = RelocInfo::CODE_TARGET_WITH_ID;
+ }
// 'code' is always generated ARM code, never THUMB code
- Call(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
-
-#ifdef DEBUG
- int post_position = pc_offset();
- CHECK_EQ(pre_position + CallSize(code, rmode, cond), post_position);
-#endif
+ Call(reinterpret_cast<Address>(code.location()), rmode, cond);
+ ASSERT_EQ(CallSize(code, rmode, ast_id, cond),
+ SizeOfCodeGeneratedSince(&start));
}
@@ -298,14 +245,20 @@
}
+void MacroAssembler::Push(Handle<Object> handle) {
+ mov(ip, Operand(handle));
+ push(ip);
+}
+
+
void MacroAssembler::Move(Register dst, Handle<Object> value) {
mov(dst, Operand(value));
}
-void MacroAssembler::Move(Register dst, Register src) {
+void MacroAssembler::Move(Register dst, Register src, Condition cond) {
if (!dst.is(src)) {
- mov(dst, src);
+ mov(dst, src, LeaveCC, cond);
}
}
@@ -330,7 +283,8 @@
!src2.must_use_constant_pool() &&
CpuFeatures::IsSupported(ARMv7) &&
IsPowerOf2(src2.immediate() + 1)) {
- ubfx(dst, src1, 0, WhichPowerOf2(src2.immediate() + 1), cond);
+ ubfx(dst, src1, 0,
+ WhichPowerOf2(static_cast<uint32_t>(src2.immediate()) + 1), cond);
} else {
and_(dst, src1, src2, LeaveCC, cond);
@@ -438,20 +392,6 @@
}
-void MacroAssembler::SmiJumpTable(Register index, Vector<Label*> targets) {
- // Empty the const pool.
- CheckConstPool(true, true);
- add(pc, pc, Operand(index,
- LSL,
- Instruction::kInstrSizeLog2 - kSmiTagSize));
- BlockConstPoolBefore(pc_offset() + (targets.length() + 1) * kInstrSize);
- nop(); // Jump table alignment.
- for (int i = 0; i < targets.length(); i++) {
- b(targets[i]);
- }
-}
-
-
void MacroAssembler::LoadRoot(Register destination,
Heap::RootListIndex index,
Condition cond) {
@@ -654,19 +594,36 @@
ASSERT_EQ(0, dst1.code() % 2);
ASSERT_EQ(dst1.code() + 1, dst2.code());
+ // V8 does not use this addressing mode, so the fallback code
+ // below doesn't support it yet.
+ ASSERT((src.am() != PreIndex) && (src.am() != NegPreIndex));
+
// Generate two ldr instructions if ldrd is not available.
if (CpuFeatures::IsSupported(ARMv7)) {
CpuFeatures::Scope scope(ARMv7);
ldrd(dst1, dst2, src, cond);
} else {
- MemOperand src2(src);
- src2.set_offset(src2.offset() + 4);
- if (dst1.is(src.rn())) {
- ldr(dst2, src2, cond);
- ldr(dst1, src, cond);
- } else {
- ldr(dst1, src, cond);
- ldr(dst2, src2, cond);
+ if ((src.am() == Offset) || (src.am() == NegOffset)) {
+ MemOperand src2(src);
+ src2.set_offset(src2.offset() + 4);
+ if (dst1.is(src.rn())) {
+ ldr(dst2, src2, cond);
+ ldr(dst1, src, cond);
+ } else {
+ ldr(dst1, src, cond);
+ ldr(dst2, src2, cond);
+ }
+ } else { // PostIndex or NegPostIndex.
+ ASSERT((src.am() == PostIndex) || (src.am() == NegPostIndex));
+ if (dst1.is(src.rn())) {
+ ldr(dst2, MemOperand(src.rn(), 4, Offset), cond);
+ ldr(dst1, src, cond);
+ } else {
+ MemOperand src2(src);
+ src2.set_offset(src2.offset() - 4);
+ ldr(dst1, MemOperand(src.rn(), 4, PostIndex), cond);
+ ldr(dst2, src2, cond);
+ }
}
}
}
@@ -679,15 +636,26 @@
ASSERT_EQ(0, src1.code() % 2);
ASSERT_EQ(src1.code() + 1, src2.code());
+ // V8 does not use this addressing mode, so the fallback code
+ // below doesn't support it yet.
+ ASSERT((dst.am() != PreIndex) && (dst.am() != NegPreIndex));
+
// Generate two str instructions if strd is not available.
if (CpuFeatures::IsSupported(ARMv7)) {
CpuFeatures::Scope scope(ARMv7);
strd(src1, src2, dst, cond);
} else {
MemOperand dst2(dst);
- dst2.set_offset(dst2.offset() + 4);
- str(src1, dst, cond);
- str(src2, dst2, cond);
+ if ((dst.am() == Offset) || (dst.am() == NegOffset)) {
+ dst2.set_offset(dst2.offset() + 4);
+ str(src1, dst, cond);
+ str(src2, dst2, cond);
+ } else { // PostIndex or NegPostIndex.
+ ASSERT((dst.am() == PostIndex) || (dst.am() == NegPostIndex));
+ dst2.set_offset(dst2.offset() - 4);
+ str(src1, MemOperand(dst.rn(), 4, PostIndex), cond);
+ str(src2, dst2, cond);
+ }
}
}
@@ -734,6 +702,23 @@
vmrs(fpscr_flags, cond);
}
+void MacroAssembler::Vmov(const DwVfpRegister dst,
+ const double imm,
+ const Condition cond) {
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
+ static const DoubleRepresentation minus_zero(-0.0);
+ static const DoubleRepresentation zero(0.0);
+ DoubleRepresentation value(imm);
+ // Handle special values first.
+ if (value.bits == zero.bits) {
+ vmov(dst, kDoubleRegZero, cond);
+ } else if (value.bits == minus_zero.bits) {
+ vneg(dst, kDoubleRegZero, cond);
+ } else {
+ vmov(dst, imm, cond);
+ }
+}
+
void MacroAssembler::EnterFrame(StackFrame::Type type) {
// r0-r3: preserved
@@ -956,9 +941,9 @@
Handle<Code> adaptor =
isolate()->builtins()->ArgumentsAdaptorTrampoline();
if (flag == CALL_FUNCTION) {
- call_wrapper.BeforeCall(CallSize(adaptor, RelocInfo::CODE_TARGET));
+ call_wrapper.BeforeCall(CallSize(adaptor));
SetCallKind(r5, call_kind);
- Call(adaptor, RelocInfo::CODE_TARGET);
+ Call(adaptor);
call_wrapper.AfterCall();
b(done);
} else {
@@ -1084,9 +1069,9 @@
Register scratch,
Label* fail) {
ldrb(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
- cmp(scratch, Operand(FIRST_JS_OBJECT_TYPE));
+ cmp(scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
b(lt, fail);
- cmp(scratch, Operand(LAST_JS_OBJECT_TYPE));
+ cmp(scratch, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
b(gt, fail);
}
@@ -1358,6 +1343,100 @@
}
+void MacroAssembler::LoadFromNumberDictionary(Label* miss,
+ Register elements,
+ Register key,
+ Register result,
+ Register t0,
+ Register t1,
+ Register t2) {
+ // Register use:
+ //
+ // elements - holds the slow-case elements of the receiver on entry.
+ // Unchanged unless 'result' is the same register.
+ //
+ // key - holds the smi key on entry.
+ // Unchanged unless 'result' is the same register.
+ //
+ // result - holds the result on exit if the load succeeded.
+ // Allowed to be the same as 'key' or 'result'.
+ // Unchanged on bailout so 'key' or 'result' can be used
+ // in further computation.
+ //
+ // Scratch registers:
+ //
+ // t0 - holds the untagged key on entry and holds the hash once computed.
+ //
+ // t1 - used to hold the capacity mask of the dictionary
+ //
+ // t2 - used for the index into the dictionary.
+ Label done;
+
+ // Compute the hash code from the untagged key. This must be kept in sync
+ // with ComputeIntegerHash in utils.h.
+ //
+ // hash = ~hash + (hash << 15);
+ mvn(t1, Operand(t0));
+ add(t0, t1, Operand(t0, LSL, 15));
+ // hash = hash ^ (hash >> 12);
+ eor(t0, t0, Operand(t0, LSR, 12));
+ // hash = hash + (hash << 2);
+ add(t0, t0, Operand(t0, LSL, 2));
+ // hash = hash ^ (hash >> 4);
+ eor(t0, t0, Operand(t0, LSR, 4));
+ // hash = hash * 2057;
+ mov(t1, Operand(2057));
+ mul(t0, t0, t1);
+ // hash = hash ^ (hash >> 16);
+ eor(t0, t0, Operand(t0, LSR, 16));
+
+ // Compute the capacity mask.
+ ldr(t1, FieldMemOperand(elements, NumberDictionary::kCapacityOffset));
+ mov(t1, Operand(t1, ASR, kSmiTagSize)); // convert smi to int
+ sub(t1, t1, Operand(1));
+
+ // Generate an unrolled loop that performs a few probes before giving up.
+ static const int kProbes = 4;
+ for (int i = 0; i < kProbes; i++) {
+ // Use t2 for index calculations and keep the hash intact in t0.
+ mov(t2, t0);
+ // Compute the masked index: (hash + i + i * i) & mask.
+ if (i > 0) {
+ add(t2, t2, Operand(NumberDictionary::GetProbeOffset(i)));
+ }
+ and_(t2, t2, Operand(t1));
+
+ // Scale the index by multiplying by the element size.
+ ASSERT(NumberDictionary::kEntrySize == 3);
+ add(t2, t2, Operand(t2, LSL, 1)); // t2 = t2 * 3
+
+ // Check if the key is identical to the name.
+ add(t2, elements, Operand(t2, LSL, kPointerSizeLog2));
+ ldr(ip, FieldMemOperand(t2, NumberDictionary::kElementsStartOffset));
+ cmp(key, Operand(ip));
+ if (i != kProbes - 1) {
+ b(eq, &done);
+ } else {
+ b(ne, miss);
+ }
+ }
+
+ bind(&done);
+ // Check that the value is a normal property.
+ // t2: elements + (index * kPointerSize)
+ const int kDetailsOffset =
+ NumberDictionary::kElementsStartOffset + 2 * kPointerSize;
+ ldr(t1, FieldMemOperand(t2, kDetailsOffset));
+ tst(t1, Operand(Smi::FromInt(PropertyDetails::TypeField::mask())));
+ b(ne, miss);
+
+ // Get the value at the masked, scaled index and return.
+ const int kValueOffset =
+ NumberDictionary::kElementsStartOffset + kPointerSize;
+ ldr(result, FieldMemOperand(t2, kValueOffset));
+}
+
+
void MacroAssembler::AllocateInNewSpace(int object_size,
Register result,
Register scratch1,
@@ -1677,6 +1756,16 @@
}
+void MacroAssembler::CheckFastElements(Register map,
+ Register scratch,
+ Label* fail) {
+ STATIC_ASSERT(JSObject::FAST_ELEMENTS == 0);
+ ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
+ cmp(scratch, Operand(Map::kMaximumBitField2FastElementValue));
+ b(hi, fail);
+}
+
+
void MacroAssembler::CheckMap(Register obj,
Register scratch,
Handle<Map> map,
@@ -1773,7 +1862,7 @@
void MacroAssembler::CallStub(CodeStub* stub, Condition cond) {
ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs.
- Call(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
+ Call(stub->GetCode(), RelocInfo::CODE_TARGET, kNoASTId, cond);
}
@@ -1783,7 +1872,8 @@
{ MaybeObject* maybe_result = stub->TryGetCode();
if (!maybe_result->ToObject(&result)) return maybe_result;
}
- Call(Handle<Code>(Code::cast(result)), RelocInfo::CODE_TARGET, cond);
+ Handle<Code> code(Code::cast(result));
+ Call(code, RelocInfo::CODE_TARGET, kNoASTId, cond);
return result;
}
@@ -2459,6 +2549,9 @@
LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
cmp(elements, ip);
b(eq, &ok);
+ LoadRoot(ip, Heap::kFixedDoubleArrayMapRootIndex);
+ cmp(elements, ip);
+ b(eq, &ok);
LoadRoot(ip, Heap::kFixedCOWArrayMapRootIndex);
cmp(elements, ip);
b(eq, &ok);
@@ -2521,12 +2614,9 @@
void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
if (context_chain_length > 0) {
// Move up the chain of contexts to the context containing the slot.
- ldr(dst, MemOperand(cp, Context::SlotOffset(Context::CLOSURE_INDEX)));
- // Load the function context (which is the incoming, outer context).
- ldr(dst, FieldMemOperand(dst, JSFunction::kContextOffset));
+ ldr(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX)));
for (int i = 1; i < context_chain_length; i++) {
- ldr(dst, MemOperand(dst, Context::SlotOffset(Context::CLOSURE_INDEX)));
- ldr(dst, FieldMemOperand(dst, JSFunction::kContextOffset));
+ ldr(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
}
} else {
// Slot is in the current function context. Move it into the
@@ -2534,17 +2624,6 @@
// cannot be allowed to destroy the context in esi).
mov(dst, cp);
}
-
- // We should not have found a 'with' context by walking the context chain
- // (i.e., the static scope chain and runtime context chain do not agree).
- // A variable occurring in such a scope should have slot type LOOKUP and
- // not CONTEXT.
- if (emit_debug_code()) {
- ldr(ip, MemOperand(dst, Context::SlotOffset(Context::FCONTEXT_INDEX)));
- cmp(dst, ip);
- Check(eq, "Yo dawg, I heard you liked function contexts "
- "so I put function contexts in all your contexts");
- }
}
@@ -2692,8 +2771,7 @@
// Check that neither is a smi.
STATIC_ASSERT(kSmiTag == 0);
and_(scratch1, first, Operand(second));
- tst(scratch1, Operand(kSmiTagMask));
- b(eq, failure);
+ JumpIfSmi(scratch1, failure);
JumpIfNonSmisNotBothSequentialAsciiStrings(first,
second,
scratch1,
@@ -3085,7 +3163,7 @@
Label done;
Label in_bounds;
- vmov(temp_double_reg, 0.0);
+ Vmov(temp_double_reg, 0.0);
VFPCompareAndSetFlags(input_reg, temp_double_reg);
b(gt, &above_zero);
@@ -3095,7 +3173,7 @@
// Double value is >= 255, return 255.
bind(&above_zero);
- vmov(temp_double_reg, 255.0);
+ Vmov(temp_double_reg, 255.0);
VFPCompareAndSetFlags(input_reg, temp_double_reg);
b(le, &in_bounds);
mov(result_reg, Operand(255));
@@ -3103,7 +3181,7 @@
// In 0-255 range, round and truncate.
bind(&in_bounds);
- vmov(temp_double_reg, 0.5);
+ Vmov(temp_double_reg, 0.5);
vadd(temp_double_reg, input_reg, temp_double_reg);
vcvt_u32_f64(s0, temp_double_reg);
vmov(result_reg, s0);
diff --git a/src/arm/macro-assembler-arm.h b/src/arm/macro-assembler-arm.h
index 1e2c9f4..9c653ad 100644
--- a/src/arm/macro-assembler-arm.h
+++ b/src/arm/macro-assembler-arm.h
@@ -90,21 +90,21 @@
// Jump, Call, and Ret pseudo instructions implementing inter-working.
void Jump(Register target, Condition cond = al);
- void Jump(byte* target, RelocInfo::Mode rmode, Condition cond = al);
+ void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al);
void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
static int CallSize(Register target, Condition cond = al);
void Call(Register target, Condition cond = al);
- static int CallSize(byte* target, RelocInfo::Mode rmode, Condition cond = al);
- void Call(byte* target, RelocInfo::Mode rmode, Condition cond = al);
- static int CallSize(Handle<Code> code,
+ static int CallSize(Address target,
RelocInfo::Mode rmode,
Condition cond = al);
+ void Call(Address target, RelocInfo::Mode rmode, Condition cond = al);
+ static int CallSize(Handle<Code> code,
+ RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
+ unsigned ast_id = kNoASTId,
+ Condition cond = al);
void Call(Handle<Code> code,
- RelocInfo::Mode rmode,
- Condition cond = al);
- void CallWithAstId(Handle<Code> code,
- RelocInfo::Mode rmode,
- unsigned ast_id,
+ RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
+ unsigned ast_id = kNoASTId,
Condition cond = al);
void Ret(Condition cond = al);
@@ -145,11 +145,9 @@
// Register move. May do nothing if the registers are identical.
void Move(Register dst, Handle<Object> value);
- void Move(Register dst, Register src);
+ void Move(Register dst, Register src, Condition cond = al);
void Move(DoubleRegister dst, DoubleRegister src);
- // Jumps to the label at the index given by the Smi in "index".
- void SmiJumpTable(Register index, Vector<Label*> targets);
// Load an object from the root table.
void LoadRoot(Register destination,
Heap::RootListIndex index,
@@ -194,6 +192,9 @@
Register address,
Register scratch);
+ // Push a handle.
+ void Push(Handle<Object> handle);
+
// Push two registers. Pushes leftmost register first (to highest address).
void Push(Register src1, Register src2, Condition cond = al) {
ASSERT(!src1.is(src2));
@@ -313,6 +314,10 @@
const Register fpscr_flags,
const Condition cond = al);
+ void Vmov(const DwVfpRegister dst,
+ const double imm,
+ const Condition cond = al);
+
// ---------------------------------------------------------------------------
// Activation frames
@@ -430,6 +435,16 @@
Register scratch,
Label* miss);
+
+ void LoadFromNumberDictionary(Label* miss,
+ Register elements,
+ Register key,
+ Register result,
+ Register t0,
+ Register t1,
+ Register t2);
+
+
inline void MarkCode(NopMarkerTypes type) {
nop(type);
}
@@ -579,6 +594,12 @@
InstanceType type);
+ // Check if a map for a JSObject indicates that the object has fast elements.
+ // Jump to the specified label if it does not.
+ void CheckFastElements(Register map,
+ Register scratch,
+ Label* fail);
+
// Check if the map of an object is equal to a specified map (either
// given directly or as an index into the root list) and branch to
// label if not. Skip the smi check if not required (object is known
@@ -1027,12 +1048,6 @@
int num_double_arguments);
void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al);
- static int CallSize(intptr_t target,
- RelocInfo::Mode rmode,
- Condition cond = al);
- void Call(intptr_t target,
- RelocInfo::Mode rmode,
- Condition cond = al);
// Helper functions for generating invokes.
void InvokePrologue(const ParameterCount& expected,
diff --git a/src/arm/regexp-macro-assembler-arm.cc b/src/arm/regexp-macro-assembler-arm.cc
index 1c59823..983a528 100644
--- a/src/arm/regexp-macro-assembler-arm.cc
+++ b/src/arm/regexp-macro-assembler-arm.cc
@@ -899,13 +899,12 @@
constant_offset - offset_of_pc_register_read;
ASSERT(pc_offset_of_constant < 0);
if (is_valid_memory_offset(pc_offset_of_constant)) {
- masm_->BlockConstPoolBefore(masm_->pc_offset() + Assembler::kInstrSize);
+ Assembler::BlockConstPoolScope block_const_pool(masm_);
__ ldr(r0, MemOperand(pc, pc_offset_of_constant));
} else {
// Not a 12-bit offset, so it needs to be loaded from the constant
// pool.
- masm_->BlockConstPoolBefore(
- masm_->pc_offset() + 2 * Assembler::kInstrSize);
+ Assembler::BlockConstPoolScope block_const_pool(masm_);
__ mov(r0, Operand(pc_offset_of_constant + Assembler::kInstrSize));
__ ldr(r0, MemOperand(pc, r0));
}
@@ -1185,8 +1184,7 @@
void RegExpMacroAssemblerARM::EmitBacktrackConstantPool() {
__ CheckConstPool(false, false);
- __ BlockConstPoolBefore(
- masm_->pc_offset() + kBacktrackConstantPoolSize * Assembler::kInstrSize);
+ Assembler::BlockConstPoolScope block_const_pool(masm_);
backtrack_constant_pool_offset_ = masm_->pc_offset();
for (int i = 0; i < kBacktrackConstantPoolSize; i++) {
__ emit(0);
diff --git a/src/arm/regexp-macro-assembler-arm.h b/src/arm/regexp-macro-assembler-arm.h
index d771e40..0e65386 100644
--- a/src/arm/regexp-macro-assembler-arm.h
+++ b/src/arm/regexp-macro-assembler-arm.h
@@ -28,6 +28,9 @@
#ifndef V8_ARM_REGEXP_MACRO_ASSEMBLER_ARM_H_
#define V8_ARM_REGEXP_MACRO_ASSEMBLER_ARM_H_
+#include "arm/assembler-arm.h"
+#include "arm/assembler-arm-inl.h"
+
namespace v8 {
namespace internal {
diff --git a/src/arm/stub-cache-arm.cc b/src/arm/stub-cache-arm.cc
index be8b7d6..c2665f8 100644
--- a/src/arm/stub-cache-arm.cc
+++ b/src/arm/stub-cache-arm.cc
@@ -121,7 +121,7 @@
// Check that receiver is a JSObject.
__ ldrb(scratch0, FieldMemOperand(map, Map::kInstanceTypeOffset));
- __ cmp(scratch0, Operand(FIRST_JS_OBJECT_TYPE));
+ __ cmp(scratch0, Operand(FIRST_SPEC_OBJECT_TYPE));
__ b(lt, miss_label);
// Load properties array.
@@ -189,8 +189,7 @@
ASSERT(!extra2.is(no_reg));
// Check that the receiver isn't a smi.
- __ tst(receiver, Operand(kSmiTagMask));
- __ b(eq, &miss);
+ __ JumpIfSmi(receiver, &miss);
// Get the map of the receiver and compute the hash.
__ ldr(scratch, FieldMemOperand(name, String::kHashFieldOffset));
@@ -282,8 +281,7 @@
Register scratch,
Label* miss_label) {
// Check that the receiver isn't a smi.
- __ tst(receiver, Operand(kSmiTagMask));
- __ b(eq, miss_label);
+ __ JumpIfSmi(receiver, miss_label);
// Check that the object is a JS array.
__ CompareObjectType(receiver, scratch, scratch, JS_ARRAY_TYPE);
@@ -305,8 +303,7 @@
Label* smi,
Label* non_string_object) {
// Check that the receiver isn't a smi.
- __ tst(receiver, Operand(kSmiTagMask));
- __ b(eq, smi);
+ __ JumpIfSmi(receiver, smi);
// Check that the object is a string.
__ ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset));
@@ -381,8 +378,7 @@
Label exit;
// Check that the receiver isn't a smi.
- __ tst(receiver_reg, Operand(kSmiTagMask));
- __ b(eq, miss_label);
+ __ JumpIfSmi(receiver_reg, miss_label);
// Check that the map of the receiver hasn't changed.
__ ldr(scratch, FieldMemOperand(receiver_reg, HeapObject::kMapOffset));
@@ -431,8 +427,7 @@
__ str(r0, FieldMemOperand(receiver_reg, offset));
// Skip updating write barrier if storing a smi.
- __ tst(r0, Operand(kSmiTagMask));
- __ b(eq, &exit);
+ __ JumpIfSmi(r0, &exit);
// Update the write barrier for the array address.
// Pass the now unused name_reg as a scratch register.
@@ -445,8 +440,7 @@
__ str(r0, FieldMemOperand(scratch, offset));
// Skip updating write barrier if storing a smi.
- __ tst(r0, Operand(kSmiTagMask));
- __ b(eq, &exit);
+ __ JumpIfSmi(r0, &exit);
// Update the write barrier for the array address.
// Ok to clobber receiver_reg and name_reg, since we return.
@@ -1165,8 +1159,7 @@
String* name,
Label* miss) {
// Check that the receiver isn't a smi.
- __ tst(receiver, Operand(kSmiTagMask));
- __ b(eq, miss);
+ __ JumpIfSmi(receiver, miss);
// Check that the maps haven't changed.
Register reg =
@@ -1187,8 +1180,7 @@
String* name,
Label* miss) {
// Check that the receiver isn't a smi.
- __ tst(receiver, Operand(kSmiTagMask));
- __ b(eq, miss);
+ __ JumpIfSmi(receiver, miss);
// Check that the maps haven't changed.
Register reg =
@@ -1212,8 +1204,7 @@
String* name,
Label* miss) {
// Check that the receiver isn't a smi.
- __ tst(receiver, Operand(kSmiTagMask));
- __ b(eq, miss);
+ __ JumpIfSmi(receiver, miss);
// Check that the maps haven't changed.
Register reg =
@@ -1426,8 +1417,7 @@
// object which can only happen for contextual calls. In this case,
// the receiver cannot be a smi.
if (object != holder) {
- __ tst(r0, Operand(kSmiTagMask));
- __ b(eq, miss);
+ __ JumpIfSmi(r0, miss);
}
// Check that the maps haven't changed.
@@ -1449,8 +1439,7 @@
// the nice side effect that multiple closures based on the same
// function can all use this call IC. Before we load through the
// function, we have to verify that it still is a function.
- __ tst(r1, Operand(kSmiTagMask));
- __ b(eq, miss);
+ __ JumpIfSmi(r1, miss);
__ CompareObjectType(r1, r3, r3, JS_FUNCTION_TYPE);
__ b(ne, miss);
@@ -1495,8 +1484,7 @@
// Get the receiver of the function from the stack into r0.
__ ldr(r0, MemOperand(sp, argc * kPointerSize));
// Check that the receiver isn't a smi.
- __ tst(r0, Operand(kSmiTagMask));
- __ b(eq, &miss);
+ __ JumpIfSmi(r0, &miss);
// Do the right check and compute the holder register.
Register reg = CheckPrototypes(object, r0, holder, r1, r3, r4, name, &miss);
@@ -1967,8 +1955,7 @@
__ ldr(r1, MemOperand(sp, 1 * kPointerSize));
STATIC_ASSERT(kSmiTag == 0);
- __ tst(r1, Operand(kSmiTagMask));
- __ b(eq, &miss);
+ __ JumpIfSmi(r1, &miss);
CheckPrototypes(JSObject::cast(object), r1, holder, r0, r3, r4, name,
&miss);
@@ -1985,8 +1972,7 @@
// Check the code is a smi.
Label slow;
STATIC_ASSERT(kSmiTag == 0);
- __ tst(code, Operand(kSmiTagMask));
- __ b(ne, &slow);
+ __ JumpIfNotSmi(code, &slow);
// Convert the smi code to uint16.
__ and_(code, code, Operand(Smi::FromInt(0xffff)));
@@ -2188,8 +2174,7 @@
__ ldr(r1, MemOperand(sp, 1 * kPointerSize));
STATIC_ASSERT(kSmiTag == 0);
- __ tst(r1, Operand(kSmiTagMask));
- __ b(eq, &miss);
+ __ JumpIfSmi(r1, &miss);
CheckPrototypes(JSObject::cast(object), r1, holder, r0, r3, r4, name,
&miss);
@@ -2292,8 +2277,7 @@
__ ldr(r1, MemOperand(sp, argc * kPointerSize));
// Check that the receiver isn't a smi.
- __ tst(r1, Operand(kSmiTagMask));
- __ b(eq, &miss_before_stack_reserved);
+ __ JumpIfSmi(r1, &miss_before_stack_reserved);
__ IncrementCounter(counters->call_const(), 1, r0, r3);
__ IncrementCounter(counters->call_const_fast_api(), 1, r0, r3);
@@ -2347,8 +2331,7 @@
// Check that the receiver isn't a smi.
if (check != NUMBER_CHECK) {
- __ tst(r1, Operand(kSmiTagMask));
- __ b(eq, &miss);
+ __ JumpIfSmi(r1, &miss);
}
// Make sure that it's okay not to patch the on stack receiver
@@ -2381,7 +2364,7 @@
} else {
// Check that the object is a two-byte string or a symbol.
__ CompareObjectType(r1, r3, r3, FIRST_NONSTRING_TYPE);
- __ b(hs, &miss);
+ __ b(ge, &miss);
// Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(
masm(), Context::STRING_FUNCTION_INDEX, r0, &miss);
@@ -2398,8 +2381,7 @@
} else {
Label fast;
// Check that the object is a smi or a heap number.
- __ tst(r1, Operand(kSmiTagMask));
- __ b(eq, &fast);
+ __ JumpIfSmi(r1, &fast);
__ CompareObjectType(r1, r0, r0, HEAP_NUMBER_TYPE);
__ b(ne, &miss);
__ bind(&fast);
@@ -2619,8 +2601,7 @@
Label miss;
// Check that the object isn't a smi.
- __ tst(r1, Operand(kSmiTagMask));
- __ b(eq, &miss);
+ __ JumpIfSmi(r1, &miss);
// Check that the map of the object hasn't changed.
__ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
@@ -2667,8 +2648,7 @@
Label miss;
// Check that the object isn't a smi.
- __ tst(r1, Operand(kSmiTagMask));
- __ b(eq, &miss);
+ __ JumpIfSmi(r1, &miss);
// Check that the map of the object hasn't changed.
__ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
@@ -2759,8 +2739,7 @@
Label miss;
// Check that receiver is not a smi.
- __ tst(r0, Operand(kSmiTagMask));
- __ b(eq, &miss);
+ __ JumpIfSmi(r0, &miss);
// Check the maps of the full prototype chain.
CheckPrototypes(object, r0, last, r3, r1, r4, name, &miss);
@@ -2904,8 +2883,7 @@
// object which can only happen for contextual calls. In this case,
// the receiver cannot be a smi.
if (object != holder) {
- __ tst(r0, Operand(kSmiTagMask));
- __ b(eq, &miss);
+ __ JumpIfSmi(r0, &miss);
}
// Check that the map of the global has not changed.
@@ -3115,14 +3093,15 @@
}
-MaybeObject* KeyedLoadStubCompiler::CompileLoadFastElement(Map* receiver_map) {
+MaybeObject* KeyedLoadStubCompiler::CompileLoadElement(Map* receiver_map) {
// ----------- S t a t e -------------
// -- lr : return address
// -- r0 : key
// -- r1 : receiver
// -----------------------------------
- MaybeObject* maybe_stub = KeyedLoadFastElementStub().TryGetCode();
Code* stub;
+ JSObject::ElementsKind elements_kind = receiver_map->elements_kind();
+ MaybeObject* maybe_stub = KeyedLoadElementStub(elements_kind).TryGetCode();
if (!maybe_stub->To(&stub)) return maybe_stub;
__ DispatchMap(r1,
r2,
@@ -3206,8 +3185,7 @@
}
-MaybeObject* KeyedStoreStubCompiler::CompileStoreFastElement(
- Map* receiver_map) {
+MaybeObject* KeyedStoreStubCompiler::CompileStoreElement(Map* receiver_map) {
// ----------- S t a t e -------------
// -- r0 : value
// -- r1 : key
@@ -3215,10 +3193,11 @@
// -- lr : return address
// -- r3 : scratch
// -----------------------------------
+ Code* stub;
+ JSObject::ElementsKind elements_kind = receiver_map->elements_kind();
bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
MaybeObject* maybe_stub =
- KeyedStoreFastElementStub(is_js_array).TryGetCode();
- Code* stub;
+ KeyedStoreElementStub(is_js_array, elements_kind).TryGetCode();
if (!maybe_stub->To(&stub)) return maybe_stub;
__ DispatchMap(r2,
r3,
@@ -3292,8 +3271,7 @@
// r1: constructor function
// r7: undefined
__ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
- __ tst(r2, Operand(kSmiTagMask));
- __ b(eq, &generic_stub_call);
+ __ JumpIfSmi(r2, &generic_stub_call);
__ CompareObjectType(r2, r3, r4, MAP_TYPE);
__ b(ne, &generic_stub_call);
@@ -3410,82 +3388,86 @@
}
-MaybeObject* ExternalArrayLoadStubCompiler::CompileLoad(
- JSObject*receiver, ExternalArrayType array_type) {
- // ----------- S t a t e -------------
- // -- lr : return address
- // -- r0 : key
- // -- r1 : receiver
- // -----------------------------------
- MaybeObject* maybe_stub =
- KeyedLoadExternalArrayStub(array_type).TryGetCode();
- Code* stub;
- if (!maybe_stub->To(&stub)) return maybe_stub;
- __ DispatchMap(r1,
- r2,
- Handle<Map>(receiver->map()),
- Handle<Code>(stub),
- DO_SMI_CHECK);
-
- Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Miss();
- __ Jump(ic, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode();
-}
-
-
-MaybeObject* ExternalArrayStoreStubCompiler::CompileStore(
- JSObject* receiver, ExternalArrayType array_type) {
- // ----------- S t a t e -------------
- // -- r0 : value
- // -- r1 : name
- // -- r2 : receiver
- // -- lr : return address
- // -----------------------------------
- MaybeObject* maybe_stub =
- KeyedStoreExternalArrayStub(array_type).TryGetCode();
- Code* stub;
- if (!maybe_stub->To(&stub)) return maybe_stub;
- __ DispatchMap(r2,
- r3,
- Handle<Map>(receiver->map()),
- Handle<Code>(stub),
- DO_SMI_CHECK);
-
- Handle<Code> ic = isolate()->builtins()->KeyedStoreIC_Miss();
- __ Jump(ic, RelocInfo::CODE_TARGET);
-
- return GetCode();
-}
-
-
#undef __
#define __ ACCESS_MASM(masm)
-static bool IsElementTypeSigned(ExternalArrayType array_type) {
- switch (array_type) {
- case kExternalByteArray:
- case kExternalShortArray:
- case kExternalIntArray:
+void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
+ MacroAssembler* masm) {
+ // ---------- S t a t e --------------
+ // -- lr : return address
+ // -- r0 : key
+ // -- r1 : receiver
+ // -----------------------------------
+ Label slow, miss_force_generic;
+
+ Register key = r0;
+ Register receiver = r1;
+
+ __ JumpIfNotSmi(key, &miss_force_generic);
+ __ mov(r2, Operand(key, ASR, kSmiTagSize));
+ __ ldr(r4, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ LoadFromNumberDictionary(&slow, r4, key, r0, r2, r3, r5);
+ __ Ret();
+
+ __ bind(&slow);
+ __ IncrementCounter(
+ masm->isolate()->counters()->keyed_load_external_array_slow(),
+ 1, r2, r3);
+
+ // ---------- S t a t e --------------
+ // -- lr : return address
+ // -- r0 : key
+ // -- r1 : receiver
+ // -----------------------------------
+ Handle<Code> slow_ic =
+ masm->isolate()->builtins()->KeyedLoadIC_Slow();
+ __ Jump(slow_ic, RelocInfo::CODE_TARGET);
+
+ // Miss case, call the runtime.
+ __ bind(&miss_force_generic);
+
+ // ---------- S t a t e --------------
+ // -- lr : return address
+ // -- r0 : key
+ // -- r1 : receiver
+ // -----------------------------------
+
+ Handle<Code> miss_ic =
+ masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
+ __ Jump(miss_ic, RelocInfo::CODE_TARGET);
+}
+
+
+static bool IsElementTypeSigned(JSObject::ElementsKind elements_kind) {
+ switch (elements_kind) {
+ case JSObject::EXTERNAL_BYTE_ELEMENTS:
+ case JSObject::EXTERNAL_SHORT_ELEMENTS:
+ case JSObject::EXTERNAL_INT_ELEMENTS:
return true;
- case kExternalUnsignedByteArray:
- case kExternalUnsignedShortArray:
- case kExternalUnsignedIntArray:
+ case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS:
+ case JSObject::EXTERNAL_PIXEL_ELEMENTS:
return false;
- default:
+ case JSObject::EXTERNAL_FLOAT_ELEMENTS:
+ case JSObject::EXTERNAL_DOUBLE_ELEMENTS:
+ case JSObject::FAST_ELEMENTS:
+ case JSObject::FAST_DOUBLE_ELEMENTS:
+ case JSObject::DICTIONARY_ELEMENTS:
+ case JSObject::NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
return false;
}
+ return false;
}
void KeyedLoadStubCompiler::GenerateLoadExternalArray(
MacroAssembler* masm,
- ExternalArrayType array_type) {
+ JSObject::ElementsKind elements_kind) {
// ---------- S t a t e --------------
// -- lr : return address
// -- r0 : key
@@ -3519,25 +3501,25 @@
ASSERT((kSmiTag == 0) && (kSmiTagSize == 1));
Register value = r2;
- switch (array_type) {
- case kExternalByteArray:
+ switch (elements_kind) {
+ case JSObject::EXTERNAL_BYTE_ELEMENTS:
__ ldrsb(value, MemOperand(r3, key, LSR, 1));
break;
- case kExternalPixelArray:
- case kExternalUnsignedByteArray:
+ case JSObject::EXTERNAL_PIXEL_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
__ ldrb(value, MemOperand(r3, key, LSR, 1));
break;
- case kExternalShortArray:
+ case JSObject::EXTERNAL_SHORT_ELEMENTS:
__ ldrsh(value, MemOperand(r3, key, LSL, 0));
break;
- case kExternalUnsignedShortArray:
+ case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
__ ldrh(value, MemOperand(r3, key, LSL, 0));
break;
- case kExternalIntArray:
- case kExternalUnsignedIntArray:
+ case JSObject::EXTERNAL_INT_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS:
__ ldr(value, MemOperand(r3, key, LSL, 1));
break;
- case kExternalFloatArray:
+ case JSObject::EXTERNAL_FLOAT_ELEMENTS:
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
__ add(r2, r3, Operand(key, LSL, 1));
@@ -3546,7 +3528,7 @@
__ ldr(value, MemOperand(r3, key, LSL, 1));
}
break;
- case kExternalDoubleArray:
+ case JSObject::EXTERNAL_DOUBLE_ELEMENTS:
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
__ add(r2, r3, Operand(key, LSL, 2));
@@ -3558,7 +3540,10 @@
__ ldr(r3, MemOperand(r4, Register::kSizeInBytes));
}
break;
- default:
+ case JSObject::FAST_ELEMENTS:
+ case JSObject::FAST_DOUBLE_ELEMENTS:
+ case JSObject::DICTIONARY_ELEMENTS:
+ case JSObject::NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
@@ -3572,7 +3557,7 @@
// d0: value (if VFP3 is supported)
// r2/r3: value (if VFP3 is not supported)
- if (array_type == kExternalIntArray) {
+ if (elements_kind == JSObject::EXTERNAL_INT_ELEMENTS) {
// For the Int and UnsignedInt array types, we need to see whether
// the value can be represented in a Smi. If not, we need to convert
// it to a HeapNumber.
@@ -3616,7 +3601,7 @@
__ str(dst2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
__ Ret();
}
- } else if (array_type == kExternalUnsignedIntArray) {
+ } else if (elements_kind == JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS) {
// The test is different for unsigned int values. Since we need
// the value to be in the range of a positive smi, we can't
// handle either of the top two bits being set in the value.
@@ -3681,7 +3666,7 @@
__ mov(r0, r4);
__ Ret();
}
- } else if (array_type == kExternalFloatArray) {
+ } else if (elements_kind == JSObject::EXTERNAL_FLOAT_ELEMENTS) {
// For the floating-point array type, we need to always allocate a
// HeapNumber.
if (CpuFeatures::IsSupported(VFP3)) {
@@ -3751,7 +3736,7 @@
__ mov(r0, r3);
__ Ret();
}
- } else if (array_type == kExternalDoubleArray) {
+ } else if (elements_kind == JSObject::EXTERNAL_DOUBLE_ELEMENTS) {
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
// Allocate a HeapNumber for the result. Don't use r0 and r1 as
@@ -3808,7 +3793,7 @@
void KeyedStoreStubCompiler::GenerateStoreExternalArray(
MacroAssembler* masm,
- ExternalArrayType array_type) {
+ JSObject::ElementsKind elements_kind) {
// ---------- S t a t e --------------
// -- r0 : value
// -- r1 : key
@@ -3842,7 +3827,7 @@
// runtime for all other kinds of values.
// r3: external array.
// r4: key (integer).
- if (array_type == kExternalPixelArray) {
+ if (elements_kind == JSObject::EXTERNAL_PIXEL_ELEMENTS) {
// Double to pixel conversion is only implemented in the runtime for now.
__ JumpIfNotSmi(value, &slow);
} else {
@@ -3854,29 +3839,29 @@
// r3: base pointer of external storage.
// r4: key (integer).
// r5: value (integer).
- switch (array_type) {
- case kExternalPixelArray:
+ switch (elements_kind) {
+ case JSObject::EXTERNAL_PIXEL_ELEMENTS:
// Clamp the value to [0..255].
__ Usat(r5, 8, Operand(r5));
__ strb(r5, MemOperand(r3, r4, LSL, 0));
break;
- case kExternalByteArray:
- case kExternalUnsignedByteArray:
+ case JSObject::EXTERNAL_BYTE_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
__ strb(r5, MemOperand(r3, r4, LSL, 0));
break;
- case kExternalShortArray:
- case kExternalUnsignedShortArray:
+ case JSObject::EXTERNAL_SHORT_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
__ strh(r5, MemOperand(r3, r4, LSL, 1));
break;
- case kExternalIntArray:
- case kExternalUnsignedIntArray:
+ case JSObject::EXTERNAL_INT_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS:
__ str(r5, MemOperand(r3, r4, LSL, 2));
break;
- case kExternalFloatArray:
+ case JSObject::EXTERNAL_FLOAT_ELEMENTS:
// Perform int-to-float conversion and store to memory.
StoreIntAsFloat(masm, r3, r4, r5, r6, r7, r9);
break;
- case kExternalDoubleArray:
+ case JSObject::EXTERNAL_DOUBLE_ELEMENTS:
__ add(r3, r3, Operand(r4, LSL, 3));
// r3: effective address of the double element
FloatingPointHelper::Destination destination;
@@ -3897,7 +3882,10 @@
__ str(r7, MemOperand(r3, Register::kSizeInBytes));
}
break;
- default:
+ case JSObject::FAST_ELEMENTS:
+ case JSObject::FAST_DOUBLE_ELEMENTS:
+ case JSObject::DICTIONARY_ELEMENTS:
+ case JSObject::NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
@@ -3905,7 +3893,7 @@
// Entry registers are intact, r0 holds the value which is the return value.
__ Ret();
- if (array_type != kExternalPixelArray) {
+ if (elements_kind != JSObject::EXTERNAL_PIXEL_ELEMENTS) {
// r3: external array.
// r4: index (integer).
__ bind(&check_heap_number);
@@ -3923,7 +3911,7 @@
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
- if (array_type == kExternalFloatArray) {
+ if (elements_kind == JSObject::EXTERNAL_FLOAT_ELEMENTS) {
// vldr requires offset to be a multiple of 4 so we can not
// include -kHeapObjectTag into it.
__ sub(r5, r0, Operand(kHeapObjectTag));
@@ -3931,48 +3919,38 @@
__ add(r5, r3, Operand(r4, LSL, 2));
__ vcvt_f32_f64(s0, d0);
__ vstr(s0, r5, 0);
- } else if (array_type == kExternalDoubleArray) {
+ } else if (elements_kind == JSObject::EXTERNAL_DOUBLE_ELEMENTS) {
__ sub(r5, r0, Operand(kHeapObjectTag));
__ vldr(d0, r5, HeapNumber::kValueOffset);
__ add(r5, r3, Operand(r4, LSL, 3));
__ vstr(d0, r5, 0);
} else {
- // Need to perform float-to-int conversion.
- // Test for NaN or infinity (both give zero).
- __ ldr(r6, FieldMemOperand(value, HeapNumber::kExponentOffset));
-
// Hoisted load. vldr requires offset to be a multiple of 4 so we can
// not include -kHeapObjectTag into it.
__ sub(r5, value, Operand(kHeapObjectTag));
__ vldr(d0, r5, HeapNumber::kValueOffset);
+ __ EmitECMATruncate(r5, d0, s2, r6, r7, r9);
- __ Sbfx(r6, r6, HeapNumber::kExponentShift, HeapNumber::kExponentBits);
- // NaNs and Infinities have all-one exponents so they sign extend to -1.
- __ cmp(r6, Operand(-1));
- __ mov(r5, Operand(0), LeaveCC, eq);
-
- // Not infinity or NaN simply convert to int.
- if (IsElementTypeSigned(array_type)) {
- __ vcvt_s32_f64(s0, d0, kDefaultRoundToZero, ne);
- } else {
- __ vcvt_u32_f64(s0, d0, kDefaultRoundToZero, ne);
- }
- __ vmov(r5, s0, ne);
-
- switch (array_type) {
- case kExternalByteArray:
- case kExternalUnsignedByteArray:
+ switch (elements_kind) {
+ case JSObject::EXTERNAL_BYTE_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
__ strb(r5, MemOperand(r3, r4, LSL, 0));
break;
- case kExternalShortArray:
- case kExternalUnsignedShortArray:
+ case JSObject::EXTERNAL_SHORT_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
__ strh(r5, MemOperand(r3, r4, LSL, 1));
break;
- case kExternalIntArray:
- case kExternalUnsignedIntArray:
+ case JSObject::EXTERNAL_INT_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS:
__ str(r5, MemOperand(r3, r4, LSL, 2));
break;
- default:
+ case JSObject::EXTERNAL_PIXEL_ELEMENTS:
+ case JSObject::EXTERNAL_FLOAT_ELEMENTS:
+ case JSObject::EXTERNAL_DOUBLE_ELEMENTS:
+ case JSObject::FAST_ELEMENTS:
+ case JSObject::FAST_DOUBLE_ELEMENTS:
+ case JSObject::DICTIONARY_ELEMENTS:
+ case JSObject::NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
@@ -3986,7 +3964,7 @@
__ ldr(r5, FieldMemOperand(value, HeapNumber::kExponentOffset));
__ ldr(r6, FieldMemOperand(value, HeapNumber::kMantissaOffset));
- if (array_type == kExternalFloatArray) {
+ if (elements_kind == JSObject::EXTERNAL_FLOAT_ELEMENTS) {
Label done, nan_or_infinity_or_zero;
static const int kMantissaInHiWordShift =
kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord;
@@ -4038,14 +4016,14 @@
__ orr(r9, r9, Operand(r5, LSL, kMantissaInHiWordShift));
__ orr(r5, r9, Operand(r6, LSR, kMantissaInLoWordShift));
__ b(&done);
- } else if (array_type == kExternalDoubleArray) {
+ } else if (elements_kind == JSObject::EXTERNAL_DOUBLE_ELEMENTS) {
__ add(r7, r3, Operand(r4, LSL, 3));
// r7: effective address of destination element.
__ str(r6, MemOperand(r7, 0));
__ str(r5, MemOperand(r7, Register::kSizeInBytes));
__ Ret();
} else {
- bool is_signed_type = IsElementTypeSigned(array_type);
+ bool is_signed_type = IsElementTypeSigned(elements_kind);
int meaningfull_bits = is_signed_type ? (kBitsPerInt - 1) : kBitsPerInt;
int32_t min_value = is_signed_type ? 0x80000000 : 0x00000000;
@@ -4092,20 +4070,26 @@
__ rsb(r5, r5, Operand(0, RelocInfo::NONE), LeaveCC, ne);
__ bind(&done);
- switch (array_type) {
- case kExternalByteArray:
- case kExternalUnsignedByteArray:
+ switch (elements_kind) {
+ case JSObject::EXTERNAL_BYTE_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
__ strb(r5, MemOperand(r3, r4, LSL, 0));
break;
- case kExternalShortArray:
- case kExternalUnsignedShortArray:
+ case JSObject::EXTERNAL_SHORT_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
__ strh(r5, MemOperand(r3, r4, LSL, 1));
break;
- case kExternalIntArray:
- case kExternalUnsignedIntArray:
+ case JSObject::EXTERNAL_INT_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS:
__ str(r5, MemOperand(r3, r4, LSL, 2));
break;
- default:
+ case JSObject::EXTERNAL_PIXEL_ELEMENTS:
+ case JSObject::EXTERNAL_FLOAT_ELEMENTS:
+ case JSObject::EXTERNAL_DOUBLE_ELEMENTS:
+ case JSObject::FAST_ELEMENTS:
+ case JSObject::FAST_DOUBLE_ELEMENTS:
+ case JSObject::DICTIONARY_ELEMENTS:
+ case JSObject::NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
@@ -4184,6 +4168,77 @@
}
+void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(
+ MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- lr : return address
+ // -- r0 : key
+ // -- r1 : receiver
+ // -----------------------------------
+ Label miss_force_generic, slow_allocate_heapnumber;
+
+ Register key_reg = r0;
+ Register receiver_reg = r1;
+ Register elements_reg = r2;
+ Register heap_number_reg = r2;
+ Register indexed_double_offset = r3;
+ Register scratch = r4;
+ Register scratch2 = r5;
+ Register scratch3 = r6;
+ Register heap_number_map = r7;
+
+ // This stub is meant to be tail-jumped to, the receiver must already
+ // have been verified by the caller to not be a smi.
+
+ // Check that the key is a smi.
+ __ JumpIfNotSmi(key_reg, &miss_force_generic);
+
+ // Get the elements array.
+ __ ldr(elements_reg,
+ FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
+
+ // Check that the key is within bounds.
+ __ ldr(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
+ __ cmp(key_reg, Operand(scratch));
+ __ b(hs, &miss_force_generic);
+
+ // Load the upper word of the double in the fixed array and test for NaN.
+ __ add(indexed_double_offset, elements_reg,
+ Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize));
+ uint32_t upper_32_offset = FixedArray::kHeaderSize + sizeof(kHoleNanLower32);
+ __ ldr(scratch, FieldMemOperand(indexed_double_offset, upper_32_offset));
+ __ cmp(scratch, Operand(kHoleNanUpper32));
+ __ b(&miss_force_generic, eq);
+
+ // Non-NaN. Allocate a new heap number and copy the double value into it.
+ __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(heap_number_reg, scratch2, scratch3,
+ heap_number_map, &slow_allocate_heapnumber);
+
+ // Don't need to reload the upper 32 bits of the double, it's already in
+ // scratch.
+ __ str(scratch, FieldMemOperand(heap_number_reg,
+ HeapNumber::kExponentOffset));
+ __ ldr(scratch, FieldMemOperand(indexed_double_offset,
+ FixedArray::kHeaderSize));
+ __ str(scratch, FieldMemOperand(heap_number_reg,
+ HeapNumber::kMantissaOffset));
+
+ __ mov(r0, heap_number_reg);
+ __ Ret();
+
+ __ bind(&slow_allocate_heapnumber);
+ Handle<Code> slow_ic =
+ masm->isolate()->builtins()->KeyedLoadIC_Slow();
+ __ Jump(slow_ic, RelocInfo::CODE_TARGET);
+
+ __ bind(&miss_force_generic);
+ Handle<Code> miss_ic =
+ masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
+ __ Jump(miss_ic, RelocInfo::CODE_TARGET);
+}
+
+
void KeyedStoreStubCompiler::GenerateStoreFastElement(MacroAssembler* masm,
bool is_js_array) {
// ----------- S t a t e -------------
@@ -4206,7 +4261,7 @@
// have been verified by the caller to not be a smi.
// Check that the key is a smi.
- __ JumpIfNotSmi(r0, &miss_force_generic);
+ __ JumpIfNotSmi(key_reg, &miss_force_generic);
// Get the elements array and make sure it is a fast element array, not 'cow'.
__ ldr(elements_reg,
@@ -4247,6 +4302,125 @@
}
+void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
+ MacroAssembler* masm,
+ bool is_js_array) {
+ // ----------- S t a t e -------------
+ // -- r0 : value
+ // -- r1 : key
+ // -- r2 : receiver
+ // -- lr : return address
+ // -- r3 : scratch
+ // -- r4 : scratch
+ // -- r5 : scratch
+ // -----------------------------------
+ Label miss_force_generic, smi_value, is_nan, maybe_nan, have_double_value;
+
+ Register value_reg = r0;
+ Register key_reg = r1;
+ Register receiver_reg = r2;
+ Register scratch = r3;
+ Register elements_reg = r4;
+ Register mantissa_reg = r5;
+ Register exponent_reg = r6;
+ Register scratch4 = r7;
+
+ // This stub is meant to be tail-jumped to, the receiver must already
+ // have been verified by the caller to not be a smi.
+ __ JumpIfNotSmi(key_reg, &miss_force_generic);
+
+ __ ldr(elements_reg,
+ FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
+
+ // Check that the key is within bounds.
+ if (is_js_array) {
+ __ ldr(scratch, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
+ } else {
+ __ ldr(scratch,
+ FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
+ }
+ // Compare smis, unsigned compare catches both negative and out-of-bound
+ // indexes.
+ __ cmp(key_reg, scratch);
+ __ b(hs, &miss_force_generic);
+
+ // Handle smi values specially.
+ __ JumpIfSmi(value_reg, &smi_value);
+
+ // Ensure that the object is a heap number
+ __ CheckMap(value_reg,
+ scratch,
+ masm->isolate()->factory()->heap_number_map(),
+ &miss_force_generic,
+ DONT_DO_SMI_CHECK);
+
+ // Check for nan: all NaN values have a value greater (signed) than 0x7ff00000
+ // in the exponent.
+ __ mov(scratch, Operand(kNaNOrInfinityLowerBoundUpper32));
+ __ ldr(exponent_reg, FieldMemOperand(value_reg, HeapNumber::kExponentOffset));
+ __ cmp(exponent_reg, scratch);
+ __ b(ge, &maybe_nan);
+
+ __ ldr(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
+
+ __ bind(&have_double_value);
+ __ add(scratch, elements_reg,
+ Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize));
+ __ str(mantissa_reg, FieldMemOperand(scratch, FixedDoubleArray::kHeaderSize));
+ uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
+ __ str(exponent_reg, FieldMemOperand(scratch, offset));
+ __ Ret();
+
+ __ bind(&maybe_nan);
+ // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
+ // it's an Infinity, and the non-NaN code path applies.
+ __ b(gt, &is_nan);
+ __ ldr(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
+ __ cmp(mantissa_reg, Operand(0));
+ __ b(eq, &have_double_value);
+ __ bind(&is_nan);
+ // Load canonical NaN for storing into the double array.
+ uint64_t nan_int64 = BitCast<uint64_t>(
+ FixedDoubleArray::canonical_not_the_hole_nan_as_double());
+ __ mov(mantissa_reg, Operand(static_cast<uint32_t>(nan_int64)));
+ __ mov(exponent_reg, Operand(static_cast<uint32_t>(nan_int64 >> 32)));
+ __ jmp(&have_double_value);
+
+ __ bind(&smi_value);
+ __ add(scratch, elements_reg,
+ Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
+ __ add(scratch, scratch,
+ Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize));
+ // scratch is now effective address of the double element
+
+ FloatingPointHelper::Destination destination;
+ if (CpuFeatures::IsSupported(VFP3)) {
+ destination = FloatingPointHelper::kVFPRegisters;
+ } else {
+ destination = FloatingPointHelper::kCoreRegisters;
+ }
+ __ SmiUntag(value_reg, value_reg);
+ FloatingPointHelper::ConvertIntToDouble(
+ masm, value_reg, destination,
+ d0, mantissa_reg, exponent_reg, // These are: double_dst, dst1, dst2.
+ scratch4, s2); // These are: scratch2, single_scratch.
+ if (destination == FloatingPointHelper::kVFPRegisters) {
+ CpuFeatures::Scope scope(VFP3);
+ __ vstr(d0, scratch, 0);
+ } else {
+ __ str(mantissa_reg, MemOperand(scratch, 0));
+ __ str(exponent_reg, MemOperand(scratch, Register::kSizeInBytes));
+ }
+ __ Ret();
+
+ // Handle store cache miss, replacing the ic with the generic stub.
+ __ bind(&miss_force_generic);
+ Handle<Code> ic =
+ masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
+ __ Jump(ic, RelocInfo::CODE_TARGET);
+}
+
+
#undef __
} } // namespace v8::internal
diff --git a/src/array.js b/src/array.js
index df080a7..e6c13d9 100644
--- a/src/array.js
+++ b/src/array.js
@@ -631,7 +631,9 @@
if (end_i < start_i) return result;
- if (IS_ARRAY(this)) {
+ if (IS_ARRAY(this) &&
+ (end_i > 1000) &&
+ (%EstimateNumberOfElements(this) < end_i)) {
SmartSlice(this, start_i, end_i - start_i, len, result);
} else {
SimpleSlice(this, start_i, end_i - start_i, len, result);
@@ -740,14 +742,15 @@
else return x < y ? -1 : 1;
};
}
- var global_receiver = %GetGlobalReceiver();
+ var receiver =
+ %_IsNativeOrStrictMode(comparefn) ? void 0 : %GetGlobalReceiver();
function InsertionSort(a, from, to) {
for (var i = from + 1; i < to; i++) {
var element = a[i];
for (var j = i - 1; j >= from; j--) {
var tmp = a[j];
- var order = %_CallFunction(global_receiver, tmp, element, comparefn);
+ var order = %_CallFunction(receiver, tmp, element, comparefn);
if (order > 0) {
a[j + 1] = tmp;
} else {
@@ -769,14 +772,14 @@
var v1 = a[to - 1];
var middle_index = from + ((to - from) >> 1);
var v2 = a[middle_index];
- var c01 = %_CallFunction(global_receiver, v0, v1, comparefn);
+ var c01 = %_CallFunction(receiver, v0, v1, comparefn);
if (c01 > 0) {
// v1 < v0, so swap them.
var tmp = v0;
v0 = v1;
v1 = tmp;
} // v0 <= v1.
- var c02 = %_CallFunction(global_receiver, v0, v2, comparefn);
+ var c02 = %_CallFunction(receiver, v0, v2, comparefn);
if (c02 >= 0) {
// v2 <= v0 <= v1.
var tmp = v0;
@@ -785,7 +788,7 @@
v1 = tmp;
} else {
// v0 <= v1 && v0 < v2
- var c12 = %_CallFunction(global_receiver, v1, v2, comparefn);
+ var c12 = %_CallFunction(receiver, v1, v2, comparefn);
if (c12 > 0) {
// v0 <= v2 < v1
var tmp = v1;
@@ -806,7 +809,7 @@
// From i to high_start are elements that haven't been compared yet.
partition: for (var i = low_end + 1; i < high_start; i++) {
var element = a[i];
- var order = %_CallFunction(global_receiver, element, pivot, comparefn);
+ var order = %_CallFunction(receiver, element, pivot, comparefn);
if (order < 0) {
%_SwapElements(a, i, low_end);
low_end++;
@@ -815,7 +818,7 @@
high_start--;
if (high_start == i) break partition;
var top_elem = a[high_start];
- order = %_CallFunction(global_receiver, top_elem, pivot, comparefn);
+ order = %_CallFunction(receiver, top_elem, pivot, comparefn);
} while (order > 0);
%_SwapElements(a, i, high_start);
if (order < 0) {
@@ -996,7 +999,7 @@
}
// Pull out the length so that modifications to the length in the
// loop will not affect the looping.
- var length = this.length;
+ var length = ToUint32(this.length);
var result = [];
var result_length = 0;
for (var i = 0; i < length; i++) {
@@ -1233,7 +1236,7 @@
}
// Pull out the length so that modifications to the length in the
// loop will not affect the looping.
- var length = this.length;
+ var length = ToUint32(this.length);
var i = 0;
find_initial: if (%_ArgumentsLength() < 2) {
@@ -1250,7 +1253,7 @@
for (; i < length; i++) {
var element = this[i];
if (!IS_UNDEFINED(element) || i in this) {
- current = callback.call(null, current, element, i, this);
+ current = callback.call(void 0, current, element, i, this);
}
}
return current;
@@ -1265,7 +1268,7 @@
if (!IS_FUNCTION(callback)) {
throw MakeTypeError('called_non_callable', [callback]);
}
- var i = this.length - 1;
+ var i = ToUint32(this.length) - 1;
find_initial: if (%_ArgumentsLength() < 2) {
for (; i >= 0; i--) {
@@ -1281,7 +1284,7 @@
for (; i >= 0; i--) {
var element = this[i];
if (!IS_UNDEFINED(element) || i in this) {
- current = callback.call(null, current, element, i, this);
+ current = callback.call(void 0, current, element, i, this);
}
}
return current;
diff --git a/src/assembler.cc b/src/assembler.cc
index 3c7fc1c..fbd8089 100644
--- a/src/assembler.cc
+++ b/src/assembler.cc
@@ -1,4 +1,4 @@
-// Copyright (c) 1994-2006 Sun Microsystems Inc.
+// Copyright (c) 2011 Sun Microsystems Inc.
// All Rights Reserved.
//
// Redistribution and use in source and binary forms, with or without
@@ -71,7 +71,8 @@
const double DoubleConstant::minus_zero = -0.0;
const double DoubleConstant::uint8_max_value = 255;
const double DoubleConstant::zero = 0.0;
-const double DoubleConstant::nan = OS::nan_value();
+const double DoubleConstant::canonical_non_hole_nan = OS::nan_value();
+const double DoubleConstant::the_hole_nan = BitCast<double>(kHoleNanInt64);
const double DoubleConstant::negative_infinity = -V8_INFINITY;
const char* RelocInfo::kFillerCommentString = "DEOPTIMIZATION PADDING";
@@ -921,9 +922,15 @@
}
-ExternalReference ExternalReference::address_of_nan() {
+ExternalReference ExternalReference::address_of_canonical_non_hole_nan() {
return ExternalReference(reinterpret_cast<void*>(
- const_cast<double*>(&DoubleConstant::nan)));
+ const_cast<double*>(&DoubleConstant::canonical_non_hole_nan)));
+}
+
+
+ExternalReference ExternalReference::address_of_the_hole_nan() {
+ return ExternalReference(reinterpret_cast<void*>(
+ const_cast<double*>(&DoubleConstant::the_hole_nan)));
}
diff --git a/src/assembler.h b/src/assembler.h
index 29f1ea9..2d14f06 100644
--- a/src/assembler.h
+++ b/src/assembler.h
@@ -70,7 +70,8 @@
static const double zero;
static const double uint8_max_value;
static const double negative_infinity;
- static const double nan;
+ static const double canonical_non_hole_nan;
+ static const double the_hole_nan;
};
@@ -90,14 +91,18 @@
Unuse();
UnuseNear();
}
- INLINE(~Label()) { ASSERT(!is_linked()); }
- INLINE(void Unuse()) { pos_ = 0; }
- INLINE(void UnuseNear()) { near_link_pos_ = 0; }
+ INLINE(~Label()) {
+ ASSERT(!is_linked());
+ ASSERT(!is_near_linked());
+ }
- INLINE(bool is_bound() const) { return pos_ < 0; }
- INLINE(bool is_unused() const) { return pos_ == 0 && near_link_pos_ == 0; }
- INLINE(bool is_linked() const) { return pos_ > 0; }
+ INLINE(void Unuse()) { pos_ = 0; }
+ INLINE(void UnuseNear()) { near_link_pos_ = 0; }
+
+ INLINE(bool is_bound() const) { return pos_ < 0; }
+ INLINE(bool is_unused() const) { return pos_ == 0 && near_link_pos_ == 0; }
+ INLINE(bool is_linked() const) { return pos_ > 0; }
INLINE(bool is_near_linked() const) { return near_link_pos_ > 0; }
// Returns the position of bound or linked labels. Cannot be used
@@ -625,7 +630,8 @@
static ExternalReference address_of_zero();
static ExternalReference address_of_uint8_max_value();
static ExternalReference address_of_negative_infinity();
- static ExternalReference address_of_nan();
+ static ExternalReference address_of_canonical_non_hole_nan();
+ static ExternalReference address_of_the_hole_nan();
static ExternalReference math_sin_double_function(Isolate* isolate);
static ExternalReference math_cos_double_function(Isolate* isolate);
diff --git a/src/ast-inl.h b/src/ast-inl.h
index c2bd613..c750e6b 100644
--- a/src/ast-inl.h
+++ b/src/ast-inl.h
@@ -37,68 +37,76 @@
namespace internal {
-SwitchStatement::SwitchStatement(ZoneStringList* labels)
- : BreakableStatement(labels, TARGET_FOR_ANONYMOUS),
+SwitchStatement::SwitchStatement(Isolate* isolate,
+ ZoneStringList* labels)
+ : BreakableStatement(isolate, labels, TARGET_FOR_ANONYMOUS),
tag_(NULL), cases_(NULL) {
}
-Block::Block(ZoneStringList* labels, int capacity, bool is_initializer_block)
- : BreakableStatement(labels, TARGET_FOR_NAMED_ONLY),
+Block::Block(Isolate* isolate,
+ ZoneStringList* labels,
+ int capacity,
+ bool is_initializer_block)
+ : BreakableStatement(isolate, labels, TARGET_FOR_NAMED_ONLY),
statements_(capacity),
is_initializer_block_(is_initializer_block) {
}
-BreakableStatement::BreakableStatement(ZoneStringList* labels, Type type)
+BreakableStatement::BreakableStatement(Isolate* isolate,
+ ZoneStringList* labels,
+ Type type)
: labels_(labels),
type_(type),
- entry_id_(GetNextId()),
- exit_id_(GetNextId()) {
+ entry_id_(GetNextId(isolate)),
+ exit_id_(GetNextId(isolate)) {
ASSERT(labels == NULL || labels->length() > 0);
}
-IterationStatement::IterationStatement(ZoneStringList* labels)
- : BreakableStatement(labels, TARGET_FOR_ANONYMOUS),
+IterationStatement::IterationStatement(Isolate* isolate, ZoneStringList* labels)
+ : BreakableStatement(isolate, labels, TARGET_FOR_ANONYMOUS),
body_(NULL),
continue_target_(),
- osr_entry_id_(GetNextId()) {
+ osr_entry_id_(GetNextId(isolate)) {
}
-DoWhileStatement::DoWhileStatement(ZoneStringList* labels)
- : IterationStatement(labels),
+DoWhileStatement::DoWhileStatement(Isolate* isolate, ZoneStringList* labels)
+ : IterationStatement(isolate, labels),
cond_(NULL),
condition_position_(-1),
- continue_id_(GetNextId()),
- back_edge_id_(GetNextId()) {
+ continue_id_(GetNextId(isolate)),
+ back_edge_id_(GetNextId(isolate)) {
}
-WhileStatement::WhileStatement(ZoneStringList* labels)
- : IterationStatement(labels),
+WhileStatement::WhileStatement(Isolate* isolate, ZoneStringList* labels)
+ : IterationStatement(isolate, labels),
cond_(NULL),
may_have_function_literal_(true),
- body_id_(GetNextId()) {
+ body_id_(GetNextId(isolate)) {
}
-ForStatement::ForStatement(ZoneStringList* labels)
- : IterationStatement(labels),
+ForStatement::ForStatement(Isolate* isolate, ZoneStringList* labels)
+ : IterationStatement(isolate, labels),
init_(NULL),
cond_(NULL),
next_(NULL),
may_have_function_literal_(true),
loop_variable_(NULL),
- continue_id_(GetNextId()),
- body_id_(GetNextId()) {
+ continue_id_(GetNextId(isolate)),
+ body_id_(GetNextId(isolate)) {
}
-ForInStatement::ForInStatement(ZoneStringList* labels)
- : IterationStatement(labels), each_(NULL), enumerable_(NULL),
- assignment_id_(GetNextId()) {
+ForInStatement::ForInStatement(Isolate* isolate, ZoneStringList* labels)
+ : IterationStatement(isolate, labels),
+ each_(NULL),
+ enumerable_(NULL),
+ assignment_id_(GetNextId(isolate)) {
}
diff --git a/src/ast.cc b/src/ast.cc
index b4abf54..2df62ee 100644
--- a/src/ast.cc
+++ b/src/ast.cc
@@ -37,11 +37,11 @@
namespace internal {
AstSentinels::AstSentinels()
- : this_proxy_(true),
- identifier_proxy_(false),
- valid_left_hand_side_sentinel_(),
- this_property_(&this_proxy_, NULL, 0),
- call_sentinel_(NULL, NULL, 0) {
+ : this_proxy_(Isolate::Current(), true),
+ identifier_proxy_(Isolate::Current(), false),
+ valid_left_hand_side_sentinel_(Isolate::Current()),
+ this_property_(Isolate::Current(), &this_proxy_, NULL, 0),
+ call_sentinel_(Isolate::Current(), NULL, NULL, 0) {
}
@@ -72,8 +72,9 @@
}
-VariableProxy::VariableProxy(Variable* var)
- : name_(var->name()),
+VariableProxy::VariableProxy(Isolate* isolate, Variable* var)
+ : Expression(isolate),
+ name_(var->name()),
var_(NULL), // Will be set by the call to BindTo.
is_this_(var->is_this()),
inside_with_(false),
@@ -83,26 +84,29 @@
}
-VariableProxy::VariableProxy(Handle<String> name,
+VariableProxy::VariableProxy(Isolate* isolate,
+ Handle<String> name,
bool is_this,
bool inside_with,
int position)
- : name_(name),
- var_(NULL),
- is_this_(is_this),
- inside_with_(inside_with),
- is_trivial_(false),
- position_(position) {
+ : Expression(isolate),
+ name_(name),
+ var_(NULL),
+ is_this_(is_this),
+ inside_with_(inside_with),
+ is_trivial_(false),
+ position_(position) {
// Names must be canonicalized for fast equality checks.
ASSERT(name->IsSymbol());
}
-VariableProxy::VariableProxy(bool is_this)
- : var_(NULL),
- is_this_(is_this),
- inside_with_(false),
- is_trivial_(false) {
+VariableProxy::VariableProxy(Isolate* isolate, bool is_this)
+ : Expression(isolate),
+ var_(NULL),
+ is_this_(is_this),
+ inside_with_(false),
+ is_trivial_(false) {
}
@@ -120,17 +124,19 @@
}
-Assignment::Assignment(Token::Value op,
+Assignment::Assignment(Isolate* isolate,
+ Token::Value op,
Expression* target,
Expression* value,
int pos)
- : op_(op),
+ : Expression(isolate),
+ op_(op),
target_(target),
value_(value),
pos_(pos),
binary_operation_(NULL),
compound_load_id_(kNoNumber),
- assignment_id_(GetNextId()),
+ assignment_id_(GetNextId(isolate)),
block_start_(false),
block_end_(false),
is_monomorphic_(false),
@@ -138,8 +144,12 @@
ASSERT(Token::IsAssignmentOp(op));
if (is_compound()) {
binary_operation_ =
- new BinaryOperation(binary_op(), target, value, pos + 1);
- compound_load_id_ = GetNextId();
+ new(isolate->zone()) BinaryOperation(isolate,
+ binary_op(),
+ target,
+ value,
+ pos + 1);
+ compound_load_id_ = GetNextId(isolate);
}
}
@@ -186,8 +196,9 @@
ObjectLiteral::Property::Property(bool is_getter, FunctionLiteral* value) {
+ Isolate* isolate = Isolate::Current();
emit_store_ = true;
- key_ = new Literal(value->name());
+ key_ = new(isolate->zone()) Literal(isolate, value->name());
value_ = value;
kind_ = is_getter ? GETTER : SETTER;
}
@@ -293,11 +304,11 @@
void TargetCollector::AddTarget(Label* target) {
// Add the label to the collector, but discard duplicates.
- int length = targets_->length();
+ int length = targets_.length();
for (int i = 0; i < length; i++) {
- if (targets_->at(i) == target) return;
+ if (targets_[i] == target) return;
}
- targets_->Add(target);
+ targets_.Add(target);
}
@@ -337,12 +348,64 @@
}
+bool CompareOperation::IsLiteralCompareTypeof(Expression** expr,
+ Handle<String>* check) {
+ if (op_ != Token::EQ && op_ != Token::EQ_STRICT) return false;
+
+ UnaryOperation* left_unary = left_->AsUnaryOperation();
+ UnaryOperation* right_unary = right_->AsUnaryOperation();
+ Literal* left_literal = left_->AsLiteral();
+ Literal* right_literal = right_->AsLiteral();
+
+ // Check for the pattern: typeof <expression> == <string literal>.
+ if (left_unary != NULL && left_unary->op() == Token::TYPEOF &&
+ right_literal != NULL && right_literal->handle()->IsString()) {
+ *expr = left_unary->expression();
+ *check = Handle<String>::cast(right_literal->handle());
+ return true;
+ }
+
+ // Check for the pattern: <string literal> == typeof <expression>.
+ if (right_unary != NULL && right_unary->op() == Token::TYPEOF &&
+ left_literal != NULL && left_literal->handle()->IsString()) {
+ *expr = right_unary->expression();
+ *check = Handle<String>::cast(left_literal->handle());
+ return true;
+ }
+
+ return false;
+}
+
+
+bool CompareOperation::IsLiteralCompareUndefined(Expression** expr) {
+ if (op_ != Token::EQ_STRICT) return false;
+
+ UnaryOperation* left_unary = left_->AsUnaryOperation();
+ UnaryOperation* right_unary = right_->AsUnaryOperation();
+
+ // Check for the pattern: <expression> === void <literal>.
+ if (right_unary != NULL && right_unary->op() == Token::VOID &&
+ right_unary->expression()->AsLiteral() != NULL) {
+ *expr = left_;
+ return true;
+ }
+
+ // Check for the pattern: void <literal> === <expression>.
+ if (left_unary != NULL && left_unary->op() == Token::VOID &&
+ left_unary->expression()->AsLiteral() != NULL) {
+ *expr = right_;
+ return true;
+ }
+
+ return false;
+}
+
+
// ----------------------------------------------------------------------------
// Inlining support
bool Declaration::IsInlineable() const {
- UNREACHABLE();
- return false;
+ return proxy()->var()->IsStackAllocated() && fun() == NULL;
}
@@ -363,12 +426,12 @@
}
-bool WithEnterStatement::IsInlineable() const {
+bool EnterWithContextStatement::IsInlineable() const {
return false;
}
-bool WithExitStatement::IsInlineable() const {
+bool ExitContextStatement::IsInlineable() const {
return false;
}
@@ -393,11 +456,6 @@
}
-bool CatchExtensionObject::IsInlineable() const {
- return false;
-}
-
-
bool DebuggerStatement::IsInlineable() const {
return false;
}
@@ -593,7 +651,7 @@
void Property::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
// Record type feedback from the oracle in the AST.
- is_monomorphic_ = oracle->LoadIsMonomorphic(this);
+ is_monomorphic_ = oracle->LoadIsMonomorphicNormal(this);
if (key()->IsPropertyName()) {
if (oracle->LoadIsBuiltin(this, Builtins::kLoadIC_ArrayLength)) {
is_array_length_ = true;
@@ -613,9 +671,9 @@
is_string_access_ = true;
} else if (is_monomorphic_) {
monomorphic_receiver_type_ = oracle->LoadMonomorphicReceiverType(this);
- if (monomorphic_receiver_type_->has_external_array_elements()) {
- set_external_array_type(oracle->GetKeyedLoadExternalArrayType(this));
- }
+ } else if (oracle->LoadIsMegamorphicWithTypeInfo(this)) {
+ receiver_types_ = new ZoneMapList(kMaxKeyedPolymorphism);
+ oracle->CollectKeyedReceiverTypes(this->id(), receiver_types_);
}
}
@@ -623,7 +681,7 @@
void Assignment::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
Property* prop = target()->AsProperty();
ASSERT(prop != NULL);
- is_monomorphic_ = oracle->StoreIsMonomorphic(this);
+ is_monomorphic_ = oracle->StoreIsMonomorphicNormal(this);
if (prop->key()->IsPropertyName()) {
Literal* lit_key = prop->key()->AsLiteral();
ASSERT(lit_key != NULL && lit_key->handle()->IsString());
@@ -631,23 +689,23 @@
ZoneMapList* types = oracle->StoreReceiverTypes(this, name);
receiver_types_ = types;
} else if (is_monomorphic_) {
- // Record receiver type for monomorphic keyed loads.
+ // Record receiver type for monomorphic keyed stores.
monomorphic_receiver_type_ = oracle->StoreMonomorphicReceiverType(this);
- if (monomorphic_receiver_type_->has_external_array_elements()) {
- set_external_array_type(oracle->GetKeyedStoreExternalArrayType(this));
- }
+ } else if (oracle->StoreIsMegamorphicWithTypeInfo(this)) {
+ receiver_types_ = new ZoneMapList(kMaxKeyedPolymorphism);
+ oracle->CollectKeyedReceiverTypes(this->id(), receiver_types_);
}
}
void CountOperation::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
- is_monomorphic_ = oracle->StoreIsMonomorphic(this);
+ is_monomorphic_ = oracle->StoreIsMonomorphicNormal(this);
if (is_monomorphic_) {
- // Record receiver type for monomorphic keyed loads.
+ // Record receiver type for monomorphic keyed stores.
monomorphic_receiver_type_ = oracle->StoreMonomorphicReceiverType(this);
- if (monomorphic_receiver_type_->has_external_array_elements()) {
- set_external_array_type(oracle->GetKeyedStoreExternalArrayType(this));
- }
+ } else if (oracle->StoreIsMegamorphicWithTypeInfo(this)) {
+ receiver_types_ = new ZoneMapList(kMaxKeyedPolymorphism);
+ oracle->CollectKeyedReceiverTypes(this->id(), receiver_types_);
}
}
@@ -1143,15 +1201,16 @@
}
-CaseClause::CaseClause(Expression* label,
+CaseClause::CaseClause(Isolate* isolate,
+ Expression* label,
ZoneList<Statement*>* statements,
int pos)
: label_(label),
statements_(statements),
position_(pos),
compare_type_(NONE),
- compare_id_(AstNode::GetNextId()),
- entry_id_(AstNode::GetNextId()) {
+ compare_id_(AstNode::GetNextId(isolate)),
+ entry_id_(AstNode::GetNextId(isolate)) {
}
} } // namespace v8::internal
diff --git a/src/ast.h b/src/ast.h
index 0ac1644..b4705f6 100644
--- a/src/ast.h
+++ b/src/ast.h
@@ -60,8 +60,8 @@
V(ContinueStatement) \
V(BreakStatement) \
V(ReturnStatement) \
- V(WithEnterStatement) \
- V(WithExitStatement) \
+ V(EnterWithContextStatement) \
+ V(ExitContextStatement) \
V(SwitchStatement) \
V(DoWhileStatement) \
V(WhileStatement) \
@@ -80,7 +80,6 @@
V(RegExpLiteral) \
V(ObjectLiteral) \
V(ArrayLiteral) \
- V(CatchExtensionObject) \
V(Assignment) \
V(Throw) \
V(Property) \
@@ -135,11 +134,15 @@
static const int kNoNumber = -1;
static const int kFunctionEntryId = 2; // Using 0 could disguise errors.
- AstNode() : id_(GetNextId()) {
- Isolate* isolate = Isolate::Current();
+ // Override ZoneObject's new to count allocated AST nodes.
+ void* operator new(size_t size, Zone* zone) {
+ Isolate* isolate = zone->isolate();
isolate->set_ast_node_count(isolate->ast_node_count() + 1);
+ return zone->New(static_cast<int>(size));
}
+ AstNode() {}
+
virtual ~AstNode() { }
virtual void Accept(AstVisitor* v) = 0;
@@ -164,24 +167,22 @@
static int Count() { return Isolate::Current()->ast_node_count(); }
static void ResetIds() { Isolate::Current()->set_ast_node_id(0); }
- unsigned id() const { return id_; }
protected:
- static unsigned GetNextId() {
- Isolate* isolate = Isolate::Current();
- unsigned tmp = isolate->ast_node_id();
- isolate->set_ast_node_id(tmp + 1);
- return tmp;
+ static unsigned GetNextId(Isolate* isolate) {
+ return ReserveIdRange(isolate, 1);
}
- static unsigned ReserveIdRange(int n) {
- Isolate* isolate = Isolate::Current();
+
+ static unsigned ReserveIdRange(Isolate* isolate, int n) {
unsigned tmp = isolate->ast_node_id();
isolate->set_ast_node_id(tmp + n);
return tmp;
}
private:
- unsigned id_;
+ // Hidden to prevent accidental usage. It would have to load the
+ // current zone from the TLS.
+ void* operator new(size_t size);
friend class CaseClause; // Generates AST IDs.
};
@@ -220,7 +221,9 @@
kTest
};
- Expression() {}
+ explicit Expression(Isolate* isolate)
+ : id_(GetNextId(isolate)),
+ test_id_(GetNextId(isolate)) {}
virtual int position() const {
UNREACHABLE();
@@ -271,15 +274,12 @@
return Handle<Map>();
}
- ExternalArrayType external_array_type() const {
- return external_array_type_;
- }
- void set_external_array_type(ExternalArrayType array_type) {
- external_array_type_ = array_type;
- }
+ unsigned id() const { return id_; }
+ unsigned test_id() const { return test_id_; }
private:
- ExternalArrayType external_array_type_;
+ unsigned id_;
+ unsigned test_id_;
};
@@ -290,6 +290,7 @@
*/
class ValidLeftHandSideSentinel: public Expression {
public:
+ explicit ValidLeftHandSideSentinel(Isolate* isolate) : Expression(isolate) {}
virtual bool IsValidLeftHandSide() { return true; }
virtual void Accept(AstVisitor* v) { UNREACHABLE(); }
virtual bool IsInlineable() const;
@@ -321,7 +322,7 @@
int ExitId() const { return exit_id_; }
protected:
- inline BreakableStatement(ZoneStringList* labels, Type type);
+ BreakableStatement(Isolate* isolate, ZoneStringList* labels, Type type);
private:
ZoneStringList* labels_;
@@ -334,7 +335,10 @@
class Block: public BreakableStatement {
public:
- inline Block(ZoneStringList* labels, int capacity, bool is_initializer_block);
+ inline Block(Isolate* isolate,
+ ZoneStringList* labels,
+ int capacity,
+ bool is_initializer_block);
DECLARE_NODE_TYPE(Block)
@@ -396,12 +400,13 @@
// Bailout support.
int OsrEntryId() const { return osr_entry_id_; }
virtual int ContinueId() const = 0;
+ virtual int StackCheckId() const = 0;
// Code generation
Label* continue_target() { return &continue_target_; }
protected:
- explicit inline IterationStatement(ZoneStringList* labels);
+ inline IterationStatement(Isolate* isolate, ZoneStringList* labels);
void Initialize(Statement* body) {
body_ = body;
@@ -416,7 +421,7 @@
class DoWhileStatement: public IterationStatement {
public:
- explicit inline DoWhileStatement(ZoneStringList* labels);
+ inline DoWhileStatement(Isolate* isolate, ZoneStringList* labels);
DECLARE_NODE_TYPE(DoWhileStatement)
@@ -434,6 +439,7 @@
// Bailout support.
virtual int ContinueId() const { return continue_id_; }
+ virtual int StackCheckId() const { return back_edge_id_; }
int BackEdgeId() const { return back_edge_id_; }
virtual bool IsInlineable() const;
@@ -448,7 +454,7 @@
class WhileStatement: public IterationStatement {
public:
- explicit inline WhileStatement(ZoneStringList* labels);
+ inline WhileStatement(Isolate* isolate, ZoneStringList* labels);
DECLARE_NODE_TYPE(WhileStatement)
@@ -468,6 +474,7 @@
// Bailout support.
virtual int ContinueId() const { return EntryId(); }
+ virtual int StackCheckId() const { return body_id_; }
int BodyId() const { return body_id_; }
private:
@@ -480,7 +487,7 @@
class ForStatement: public IterationStatement {
public:
- explicit inline ForStatement(ZoneStringList* labels);
+ inline ForStatement(Isolate* isolate, ZoneStringList* labels);
DECLARE_NODE_TYPE(ForStatement)
@@ -507,6 +514,7 @@
// Bailout support.
virtual int ContinueId() const { return continue_id_; }
+ virtual int StackCheckId() const { return body_id_; }
int BodyId() const { return body_id_; }
bool is_fast_smi_loop() { return loop_variable_ != NULL; }
@@ -528,7 +536,7 @@
class ForInStatement: public IterationStatement {
public:
- explicit inline ForInStatement(ZoneStringList* labels);
+ inline ForInStatement(Isolate* isolate, ZoneStringList* labels);
DECLARE_NODE_TYPE(ForInStatement)
@@ -545,6 +553,7 @@
// Bailout support.
int AssignmentId() const { return assignment_id_; }
virtual int ContinueId() const { return EntryId(); }
+ virtual int StackCheckId() const { return EntryId(); }
private:
Expression* each_;
@@ -618,37 +627,36 @@
};
-class WithEnterStatement: public Statement {
+class EnterWithContextStatement: public Statement {
public:
- explicit WithEnterStatement(Expression* expression, bool is_catch_block)
- : expression_(expression), is_catch_block_(is_catch_block) { }
+ explicit EnterWithContextStatement(Expression* expression)
+ : expression_(expression) { }
- DECLARE_NODE_TYPE(WithEnterStatement)
+ DECLARE_NODE_TYPE(EnterWithContextStatement)
Expression* expression() const { return expression_; }
- bool is_catch_block() const { return is_catch_block_; }
virtual bool IsInlineable() const;
private:
Expression* expression_;
- bool is_catch_block_;
};
-class WithExitStatement: public Statement {
+class ExitContextStatement: public Statement {
public:
- WithExitStatement() { }
-
virtual bool IsInlineable() const;
- DECLARE_NODE_TYPE(WithExitStatement)
+ DECLARE_NODE_TYPE(ExitContextStatement)
};
class CaseClause: public ZoneObject {
public:
- CaseClause(Expression* label, ZoneList<Statement*>* statements, int pos);
+ CaseClause(Isolate* isolate,
+ Expression* label,
+ ZoneList<Statement*>* statements,
+ int pos);
bool is_default() const { return label_ == NULL; }
Expression* label() const {
@@ -683,7 +691,7 @@
class SwitchStatement: public BreakableStatement {
public:
- explicit inline SwitchStatement(ZoneStringList* labels);
+ inline SwitchStatement(Isolate* isolate, ZoneStringList* labels);
DECLARE_NODE_TYPE(SwitchStatement)
@@ -709,14 +717,16 @@
// given if-statement has a then- or an else-part containing code.
class IfStatement: public Statement {
public:
- IfStatement(Expression* condition,
+ IfStatement(Isolate* isolate,
+ Expression* condition,
Statement* then_statement,
Statement* else_statement)
: condition_(condition),
then_statement_(then_statement),
else_statement_(else_statement),
- then_id_(GetNextId()),
- else_id_(GetNextId()) {
+ if_id_(GetNextId(isolate)),
+ then_id_(GetNextId(isolate)),
+ else_id_(GetNextId(isolate)) {
}
DECLARE_NODE_TYPE(IfStatement)
@@ -730,6 +740,7 @@
Statement* then_statement() const { return then_statement_; }
Statement* else_statement() const { return else_statement_; }
+ int IfId() const { return if_id_; }
int ThenId() const { return then_id_; }
int ElseId() const { return else_id_; }
@@ -737,6 +748,7 @@
Expression* condition_;
Statement* then_statement_;
Statement* else_statement_;
+ int if_id_;
int then_id_;
int else_id_;
};
@@ -746,9 +758,7 @@
// stack in the compiler; this should probably be reworked.
class TargetCollector: public AstNode {
public:
- explicit TargetCollector(ZoneList<Label*>* targets)
- : targets_(targets) {
- }
+ TargetCollector(): targets_(0) { }
// Adds a jump target to the collector. The collector stores a pointer not
// a copy of the target to make binding work, so make sure not to pass in
@@ -759,11 +769,11 @@
virtual void Accept(AstVisitor* v) { UNREACHABLE(); }
virtual TargetCollector* AsTargetCollector() { return this; }
- ZoneList<Label*>* targets() { return targets_; }
+ ZoneList<Label*>* targets() { return &targets_; }
virtual bool IsInlineable() const;
private:
- ZoneList<Label*>* targets_;
+ ZoneList<Label*> targets_;
};
@@ -789,21 +799,25 @@
class TryCatchStatement: public TryStatement {
public:
TryCatchStatement(Block* try_block,
- VariableProxy* catch_var,
+ Scope* scope,
+ Variable* variable,
Block* catch_block)
: TryStatement(try_block),
- catch_var_(catch_var),
+ scope_(scope),
+ variable_(variable),
catch_block_(catch_block) {
}
DECLARE_NODE_TYPE(TryCatchStatement)
- VariableProxy* catch_var() const { return catch_var_; }
+ Scope* scope() { return scope_; }
+ Variable* variable() { return variable_; }
Block* catch_block() const { return catch_block_; }
virtual bool IsInlineable() const;
private:
- VariableProxy* catch_var_;
+ Scope* scope_;
+ Variable* variable_;
Block* catch_block_;
};
@@ -841,7 +855,8 @@
class Literal: public Expression {
public:
- explicit Literal(Handle<Object> handle) : handle_(handle) { }
+ Literal(Isolate* isolate, Handle<Object> handle)
+ : Expression(isolate), handle_(handle) { }
DECLARE_NODE_TYPE(Literal)
@@ -894,8 +909,14 @@
// Base class for literals that needs space in the corresponding JSFunction.
class MaterializedLiteral: public Expression {
public:
- explicit MaterializedLiteral(int literal_index, bool is_simple, int depth)
- : literal_index_(literal_index), is_simple_(is_simple), depth_(depth) {}
+ MaterializedLiteral(Isolate* isolate,
+ int literal_index,
+ bool is_simple,
+ int depth)
+ : Expression(isolate),
+ literal_index_(literal_index),
+ is_simple_(is_simple),
+ depth_(depth) {}
virtual MaterializedLiteral* AsMaterializedLiteral() { return this; }
@@ -951,14 +972,15 @@
bool emit_store_;
};
- ObjectLiteral(Handle<FixedArray> constant_properties,
+ ObjectLiteral(Isolate* isolate,
+ Handle<FixedArray> constant_properties,
ZoneList<Property*>* properties,
int literal_index,
bool is_simple,
bool fast_elements,
int depth,
bool has_function)
- : MaterializedLiteral(literal_index, is_simple, depth),
+ : MaterializedLiteral(isolate, literal_index, is_simple, depth),
constant_properties_(constant_properties),
properties_(properties),
fast_elements_(fast_elements),
@@ -997,10 +1019,11 @@
// Node for capturing a regexp literal.
class RegExpLiteral: public MaterializedLiteral {
public:
- RegExpLiteral(Handle<String> pattern,
+ RegExpLiteral(Isolate* isolate,
+ Handle<String> pattern,
Handle<String> flags,
int literal_index)
- : MaterializedLiteral(literal_index, false, 1),
+ : MaterializedLiteral(isolate, literal_index, false, 1),
pattern_(pattern),
flags_(flags) {}
@@ -1018,15 +1041,16 @@
// for minimizing the work when constructing it at runtime.
class ArrayLiteral: public MaterializedLiteral {
public:
- ArrayLiteral(Handle<FixedArray> constant_elements,
+ ArrayLiteral(Isolate* isolate,
+ Handle<FixedArray> constant_elements,
ZoneList<Expression*>* values,
int literal_index,
bool is_simple,
int depth)
- : MaterializedLiteral(literal_index, is_simple, depth),
+ : MaterializedLiteral(isolate, literal_index, is_simple, depth),
constant_elements_(constant_elements),
values_(values),
- first_element_id_(ReserveIdRange(values->length())) {}
+ first_element_id_(ReserveIdRange(isolate, values->length())) {}
DECLARE_NODE_TYPE(ArrayLiteral)
@@ -1043,44 +1067,14 @@
};
-// Node for constructing a context extension object for a catch block.
-// The catch context extension object has one property, the catch
-// variable, which should be DontDelete.
-class CatchExtensionObject: public Expression {
- public:
- CatchExtensionObject(Literal* key, VariableProxy* value)
- : key_(key), value_(value) {
- }
-
- DECLARE_NODE_TYPE(CatchExtensionObject)
-
- Literal* key() const { return key_; }
- VariableProxy* value() const { return value_; }
- virtual bool IsInlineable() const;
-
- private:
- Literal* key_;
- VariableProxy* value_;
-};
-
-
class VariableProxy: public Expression {
public:
- explicit VariableProxy(Variable* var);
+ VariableProxy(Isolate* isolate, Variable* var);
DECLARE_NODE_TYPE(VariableProxy)
// Type testing & conversion
- virtual Property* AsProperty() {
- return var_ == NULL ? NULL : var_->AsProperty();
- }
-
- Variable* AsVariable() {
- if (this == NULL || var_ == NULL) return NULL;
- Expression* rewrite = var_->rewrite();
- if (rewrite == NULL || rewrite->AsSlot() != NULL) return var_;
- return NULL;
- }
+ Variable* AsVariable() { return (this == NULL) ? NULL : var_; }
virtual bool IsValidLeftHandSide() {
return var_ == NULL ? true : var_->IsValidLeftHandSide();
@@ -1122,11 +1116,12 @@
bool is_trivial_;
int position_;
- VariableProxy(Handle<String> name,
+ VariableProxy(Isolate* isolate,
+ Handle<String> name,
bool is_this,
bool inside_with,
int position = RelocInfo::kNoPosition);
- explicit VariableProxy(bool is_this);
+ VariableProxy(Isolate* isolate, bool is_this);
friend class Scope;
};
@@ -1137,7 +1132,8 @@
virtual bool IsValidLeftHandSide() { return !is_this(); }
private:
- explicit VariableProxySentinel(bool is_this) : VariableProxy(is_this) { }
+ VariableProxySentinel(Isolate* isolate, bool is_this)
+ : VariableProxy(isolate, is_this) { }
friend class AstSentinels;
};
@@ -1167,8 +1163,8 @@
LOOKUP
};
- Slot(Variable* var, Type type, int index)
- : var_(var), type_(type), index_(index) {
+ Slot(Isolate* isolate, Variable* var, Type type, int index)
+ : Expression(isolate), var_(var), type_(type), index_(index) {
ASSERT(var != NULL);
}
@@ -1199,8 +1195,13 @@
// properties should use the global object as receiver, not the base object
// of the resolved Reference.
enum Type { NORMAL, SYNTHETIC };
- Property(Expression* obj, Expression* key, int pos, Type type = NORMAL)
- : obj_(obj),
+ Property(Isolate* isolate,
+ Expression* obj,
+ Expression* key,
+ int pos,
+ Type type = NORMAL)
+ : Expression(isolate),
+ obj_(obj),
key_(key),
pos_(pos),
type_(type),
@@ -1209,8 +1210,7 @@
is_array_length_(false),
is_string_length_(false),
is_string_access_(false),
- is_function_prototype_(false),
- is_arguments_access_(false) { }
+ is_function_prototype_(false) { }
DECLARE_NODE_TYPE(Property)
@@ -1226,13 +1226,6 @@
bool IsStringAccess() const { return is_string_access_; }
bool IsFunctionPrototype() const { return is_function_prototype_; }
- // Marks that this is actually an argument rewritten to a keyed property
- // accessing the argument through the arguments shadow object.
- void set_is_arguments_access(bool is_arguments_access) {
- is_arguments_access_ = is_arguments_access;
- }
- bool is_arguments_access() const { return is_arguments_access_; }
-
// Type feedback information.
void RecordTypeFeedback(TypeFeedbackOracle* oracle);
virtual bool IsMonomorphic() { return is_monomorphic_; }
@@ -1254,21 +1247,24 @@
bool is_string_length_ : 1;
bool is_string_access_ : 1;
bool is_function_prototype_ : 1;
- bool is_arguments_access_ : 1;
Handle<Map> monomorphic_receiver_type_;
};
class Call: public Expression {
public:
- Call(Expression* expression, ZoneList<Expression*>* arguments, int pos)
- : expression_(expression),
+ Call(Isolate* isolate,
+ Expression* expression,
+ ZoneList<Expression*>* arguments,
+ int pos)
+ : Expression(isolate),
+ expression_(expression),
arguments_(arguments),
pos_(pos),
is_monomorphic_(false),
check_type_(RECEIVER_MAP_CHECK),
receiver_types_(NULL),
- return_id_(GetNextId()) {
+ return_id_(GetNextId(isolate)) {
}
DECLARE_NODE_TYPE(Call)
@@ -1347,8 +1343,14 @@
class CallNew: public Expression {
public:
- CallNew(Expression* expression, ZoneList<Expression*>* arguments, int pos)
- : expression_(expression), arguments_(arguments), pos_(pos) { }
+ CallNew(Isolate* isolate,
+ Expression* expression,
+ ZoneList<Expression*>* arguments,
+ int pos)
+ : Expression(isolate),
+ expression_(expression),
+ arguments_(arguments),
+ pos_(pos) { }
DECLARE_NODE_TYPE(CallNew)
@@ -1371,10 +1373,14 @@
// implemented in JavaScript (see "v8natives.js").
class CallRuntime: public Expression {
public:
- CallRuntime(Handle<String> name,
+ CallRuntime(Isolate* isolate,
+ Handle<String> name,
const Runtime::Function* function,
ZoneList<Expression*>* arguments)
- : name_(name), function_(function), arguments_(arguments) { }
+ : Expression(isolate),
+ name_(name),
+ function_(function),
+ arguments_(arguments) { }
DECLARE_NODE_TYPE(CallRuntime)
@@ -1394,8 +1400,11 @@
class UnaryOperation: public Expression {
public:
- UnaryOperation(Token::Value op, Expression* expression, int pos)
- : op_(op), expression_(expression), pos_(pos) {
+ UnaryOperation(Isolate* isolate,
+ Token::Value op,
+ Expression* expression,
+ int pos)
+ : Expression(isolate), op_(op), expression_(expression), pos_(pos) {
ASSERT(Token::IsUnaryOp(op));
}
@@ -1418,14 +1427,15 @@
class BinaryOperation: public Expression {
public:
- BinaryOperation(Token::Value op,
+ BinaryOperation(Isolate* isolate,
+ Token::Value op,
Expression* left,
Expression* right,
int pos)
- : op_(op), left_(left), right_(right), pos_(pos) {
+ : Expression(isolate), op_(op), left_(left), right_(right), pos_(pos) {
ASSERT(Token::IsBinaryOp(op));
right_id_ = (op == Token::AND || op == Token::OR)
- ? static_cast<int>(GetNextId())
+ ? static_cast<int>(GetNextId(isolate))
: AstNode::kNoNumber;
}
@@ -1456,13 +1466,19 @@
class CountOperation: public Expression {
public:
- CountOperation(Token::Value op, bool is_prefix, Expression* expr, int pos)
- : op_(op),
+ CountOperation(Isolate* isolate,
+ Token::Value op,
+ bool is_prefix,
+ Expression* expr,
+ int pos)
+ : Expression(isolate),
+ op_(op),
is_prefix_(is_prefix),
expression_(expr),
pos_(pos),
- assignment_id_(GetNextId()),
- count_id_(GetNextId()) { }
+ assignment_id_(GetNextId(isolate)),
+ count_id_(GetNextId(isolate)),
+ receiver_types_(NULL) { }
DECLARE_NODE_TYPE(CountOperation)
@@ -1486,6 +1502,7 @@
virtual Handle<Map> GetMonomorphicReceiverType() {
return monomorphic_receiver_type_;
}
+ virtual ZoneMapList* GetReceiverTypes() { return receiver_types_; }
// Bailout support.
int AssignmentId() const { return assignment_id_; }
@@ -1500,16 +1517,23 @@
int assignment_id_;
int count_id_;
Handle<Map> monomorphic_receiver_type_;
+ ZoneMapList* receiver_types_;
};
class CompareOperation: public Expression {
public:
- CompareOperation(Token::Value op,
+ CompareOperation(Isolate* isolate,
+ Token::Value op,
Expression* left,
Expression* right,
int pos)
- : op_(op), left_(left), right_(right), pos_(pos), compare_type_(NONE) {
+ : Expression(isolate),
+ op_(op),
+ left_(left),
+ right_(right),
+ pos_(pos),
+ compare_type_(NONE) {
ASSERT(Token::IsCompareOp(op));
}
@@ -1527,6 +1551,10 @@
bool IsSmiCompare() { return compare_type_ == SMI_ONLY; }
bool IsObjectCompare() { return compare_type_ == OBJECT_ONLY; }
+ // Match special cases.
+ bool IsLiteralCompareTypeof(Expression** expr, Handle<String>* check);
+ bool IsLiteralCompareUndefined(Expression** expr);
+
private:
Token::Value op_;
Expression* left_;
@@ -1540,8 +1568,8 @@
class CompareToNull: public Expression {
public:
- CompareToNull(bool is_strict, Expression* expression)
- : is_strict_(is_strict), expression_(expression) { }
+ CompareToNull(Isolate* isolate, bool is_strict, Expression* expression)
+ : Expression(isolate), is_strict_(is_strict), expression_(expression) { }
DECLARE_NODE_TYPE(CompareToNull)
@@ -1559,18 +1587,20 @@
class Conditional: public Expression {
public:
- Conditional(Expression* condition,
+ Conditional(Isolate* isolate,
+ Expression* condition,
Expression* then_expression,
Expression* else_expression,
int then_expression_position,
int else_expression_position)
- : condition_(condition),
+ : Expression(isolate),
+ condition_(condition),
then_expression_(then_expression),
else_expression_(else_expression),
then_expression_position_(then_expression_position),
else_expression_position_(else_expression_position),
- then_id_(GetNextId()),
- else_id_(GetNextId()) {
+ then_id_(GetNextId(isolate)),
+ else_id_(GetNextId(isolate)) {
}
DECLARE_NODE_TYPE(Conditional)
@@ -1600,7 +1630,11 @@
class Assignment: public Expression {
public:
- Assignment(Token::Value op, Expression* target, Expression* value, int pos);
+ Assignment(Isolate* isolate,
+ Token::Value op,
+ Expression* target,
+ Expression* value,
+ int pos);
DECLARE_NODE_TYPE(Assignment)
@@ -1660,8 +1694,8 @@
class Throw: public Expression {
public:
- Throw(Expression* exception, int pos)
- : exception_(exception), pos_(pos) {}
+ Throw(Isolate* isolate, Expression* exception, int pos)
+ : Expression(isolate), exception_(exception), pos_(pos) {}
DECLARE_NODE_TYPE(Throw)
@@ -1677,7 +1711,14 @@
class FunctionLiteral: public Expression {
public:
- FunctionLiteral(Handle<String> name,
+ enum Type {
+ ANONYMOUS_EXPRESSION,
+ NAMED_EXPRESSION,
+ DECLARATION
+ };
+
+ FunctionLiteral(Isolate* isolate,
+ Handle<String> name,
Scope* scope,
ZoneList<Statement*>* body,
int materialized_literal_count,
@@ -1687,8 +1728,10 @@
int num_parameters,
int start_position,
int end_position,
- bool is_expression)
- : name_(name),
+ Type type,
+ bool has_duplicate_parameters)
+ : Expression(isolate),
+ name_(name),
scope_(scope),
body_(body),
materialized_literal_count_(materialized_literal_count),
@@ -1699,10 +1742,13 @@
num_parameters_(num_parameters),
start_position_(start_position),
end_position_(end_position),
- is_expression_(is_expression),
function_token_position_(RelocInfo::kNoPosition),
inferred_name_(HEAP->empty_string()),
- pretenure_(false) { }
+ is_expression_(type != DECLARATION),
+ is_anonymous_(type == ANONYMOUS_EXPRESSION),
+ pretenure_(false),
+ has_duplicate_parameters_(has_duplicate_parameters) {
+ }
DECLARE_NODE_TYPE(FunctionLiteral)
@@ -1714,6 +1760,7 @@
int start_position() const { return start_position_; }
int end_position() const { return end_position_; }
bool is_expression() const { return is_expression_; }
+ bool is_anonymous() const { return is_anonymous_; }
bool strict_mode() const;
int materialized_literal_count() { return materialized_literal_count_; }
@@ -1742,6 +1789,8 @@
void set_pretenure(bool value) { pretenure_ = value; }
virtual bool IsInlineable() const;
+ bool has_duplicate_parameters() { return has_duplicate_parameters_; }
+
private:
Handle<String> name_;
Scope* scope_;
@@ -1753,18 +1802,21 @@
int num_parameters_;
int start_position_;
int end_position_;
- bool is_expression_;
int function_token_position_;
Handle<String> inferred_name_;
+ bool is_expression_;
+ bool is_anonymous_;
bool pretenure_;
+ bool has_duplicate_parameters_;
};
class SharedFunctionInfoLiteral: public Expression {
public:
- explicit SharedFunctionInfoLiteral(
+ SharedFunctionInfoLiteral(
+ Isolate* isolate,
Handle<SharedFunctionInfo> shared_function_info)
- : shared_function_info_(shared_function_info) { }
+ : Expression(isolate), shared_function_info_(shared_function_info) { }
DECLARE_NODE_TYPE(SharedFunctionInfoLiteral)
@@ -1780,6 +1832,7 @@
class ThisFunction: public Expression {
public:
+ explicit ThisFunction(Isolate* isolate) : Expression(isolate) {}
DECLARE_NODE_TYPE(ThisFunction)
virtual bool IsInlineable() const;
};
@@ -1950,6 +2003,7 @@
uc16 standard_type() { return set_.standard_set_type(); }
ZoneList<CharacterRange>* ranges() { return set_.ranges(); }
bool is_negated() { return is_negated_; }
+
private:
CharacterSet set_;
bool is_negated_;
@@ -2034,6 +2088,7 @@
bool is_non_greedy() { return type_ == NON_GREEDY; }
bool is_greedy() { return type_ == GREEDY; }
RegExpTree* body() { return body_; }
+
private:
RegExpTree* body_;
int min_;
@@ -2066,6 +2121,7 @@
int index() { return index_; }
static int StartRegister(int index) { return index * 2; }
static int EndRegister(int index) { return index * 2 + 1; }
+
private:
RegExpTree* body_;
int index_;
@@ -2096,6 +2152,7 @@
bool is_positive() { return is_positive_; }
int capture_count() { return capture_count_; }
int capture_from() { return capture_from_; }
+
private:
RegExpTree* body_;
bool is_positive_;
diff --git a/src/bignum.cc b/src/bignum.cc
index a973974..9436322 100644
--- a/src/bignum.cc
+++ b/src/bignum.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -25,10 +25,9 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#include "v8.h"
-
-#include "bignum.h"
+#include "../include/v8stdint.h"
#include "utils.h"
+#include "bignum.h"
namespace v8 {
namespace internal {
diff --git a/src/bootstrapper.cc b/src/bootstrapper.cc
index d32ac80..5375cde 100644
--- a/src/bootstrapper.cc
+++ b/src/bootstrapper.cc
@@ -47,8 +47,9 @@
NativesExternalStringResource::NativesExternalStringResource(
Bootstrapper* bootstrapper,
- const char* source)
- : data_(source), length_(StrLength(source)) {
+ const char* source,
+ size_t length)
+ : data_(source), length_(length) {
if (bootstrapper->delete_these_non_arrays_on_tear_down_ == NULL) {
bootstrapper->delete_these_non_arrays_on_tear_down_ = new List<char*>(2);
}
@@ -75,16 +76,18 @@
if (heap->natives_source_cache()->get(index)->IsUndefined()) {
if (!Snapshot::IsEnabled() || FLAG_new_snapshot) {
// We can use external strings for the natives.
+ Vector<const char> source = Natives::GetRawScriptSource(index);
NativesExternalStringResource* resource =
new NativesExternalStringResource(this,
- Natives::GetScriptSource(index).start());
+ source.start(),
+ source.length());
Handle<String> source_code =
factory->NewExternalStringFromAscii(resource);
heap->natives_source_cache()->set(index, *source_code);
} else {
// Old snapshot code can't cope with external strings at all.
Handle<String> source_code =
- factory->NewStringFromAscii(Natives::GetScriptSource(index));
+ factory->NewStringFromAscii(Natives::GetRawScriptSource(index));
heap->natives_source_cache()->set(index, *source_code);
}
}
@@ -811,7 +814,6 @@
// --- G l o b a l C o n t e x t ---
// Use the empty function as closure (no scope info).
global_context()->set_closure(*empty_function);
- global_context()->set_fcontext(*global_context());
global_context()->set_previous(NULL);
// Set extension and global object.
global_context()->set_extension(*inner_global);
@@ -1052,6 +1054,24 @@
#endif
}
+ { // --- aliased_arguments_boilerplate_
+ Handle<Map> old_map(global_context()->arguments_boilerplate()->map());
+ Handle<Map> new_map = factory->CopyMapDropTransitions(old_map);
+ new_map->set_pre_allocated_property_fields(2);
+ Handle<JSObject> result = factory->NewJSObjectFromMap(new_map);
+ new_map->set_elements_kind(JSObject::NON_STRICT_ARGUMENTS_ELEMENTS);
+ // Set up a well-formed parameter map to make assertions happy.
+ Handle<FixedArray> elements = factory->NewFixedArray(2);
+ elements->set_map(heap->non_strict_arguments_elements_map());
+ Handle<FixedArray> array;
+ array = factory->NewFixedArray(0);
+ elements->set(0, *array);
+ array = factory->NewFixedArray(0);
+ elements->set(1, *array);
+ result->set_elements(*elements);
+ global_context()->set_aliased_arguments_boilerplate(*result);
+ }
+
{ // --- strict mode arguments boilerplate
const PropertyAttributes attributes =
static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
@@ -1182,7 +1202,8 @@
Vector<const char> name = ExperimentalNatives::GetScriptName(index);
Factory* factory = isolate->factory();
Handle<String> source_code =
- factory->NewStringFromAscii(ExperimentalNatives::GetScriptSource(index));
+ factory->NewStringFromAscii(
+ ExperimentalNatives::GetRawScriptSource(index));
return CompileNative(name, source_code);
}
@@ -1288,7 +1309,9 @@
void Genesis::InstallExperimentalNativeFunctions() {
if (FLAG_harmony_proxies) {
+ INSTALL_NATIVE(JSFunction, "DerivedHasTrap", derived_has_trap);
INSTALL_NATIVE(JSFunction, "DerivedGetTrap", derived_get_trap);
+ INSTALL_NATIVE(JSFunction, "DerivedSetTrap", derived_set_trap);
}
}
diff --git a/src/bootstrapper.h b/src/bootstrapper.h
index 2e05452..abf61b9 100644
--- a/src/bootstrapper.h
+++ b/src/bootstrapper.h
@@ -168,8 +168,9 @@
class NativesExternalStringResource
: public v8::String::ExternalAsciiStringResource {
public:
- explicit NativesExternalStringResource(Bootstrapper* bootstrapper,
- const char* source);
+ NativesExternalStringResource(Bootstrapper* bootstrapper,
+ const char* source,
+ size_t length);
const char* data() const {
return data_;
diff --git a/src/builtins.cc b/src/builtins.cc
index c34b074..d403a95 100644
--- a/src/builtins.cc
+++ b/src/builtins.cc
@@ -1202,10 +1202,10 @@
ASSERT(!CalledAsConstructor(isolate));
Heap* heap = isolate->heap();
- Handle<Object> receiver = args.at<Object>(0);
+ Handle<Object> receiver = args.receiver();
// Get the object called.
- JSObject* obj = JSObject::cast(*args.receiver());
+ JSObject* obj = JSObject::cast(*receiver);
// Get the invocation callback from the function descriptor that was
// used to create the called object.
@@ -1351,6 +1351,9 @@
KeyedLoadIC::GenerateIndexedInterceptor(masm);
}
+static void Generate_KeyedLoadIC_NonStrictArguments(MacroAssembler* masm) {
+ KeyedLoadIC::GenerateNonStrictArguments(masm);
+}
static void Generate_StoreIC_Initialize(MacroAssembler* masm) {
StoreIC::GenerateInitialize(masm);
@@ -1441,6 +1444,9 @@
KeyedStoreIC::GenerateInitialize(masm);
}
+static void Generate_KeyedStoreIC_NonStrictArguments(MacroAssembler* masm) {
+ KeyedStoreIC::GenerateNonStrictArguments(masm);
+}
#ifdef ENABLE_DEBUGGER_SUPPORT
static void Generate_LoadIC_DebugBreak(MacroAssembler* masm) {
diff --git a/src/builtins.h b/src/builtins.h
index eca998c..f9a5a13 100644
--- a/src/builtins.h
+++ b/src/builtins.h
@@ -132,6 +132,8 @@
Code::kNoExtraICState) \
V(KeyedLoadIC_IndexedInterceptor, KEYED_LOAD_IC, MEGAMORPHIC, \
Code::kNoExtraICState) \
+ V(KeyedLoadIC_NonStrictArguments, KEYED_LOAD_IC, MEGAMORPHIC, \
+ Code::kNoExtraICState) \
\
V(StoreIC_Initialize, STORE_IC, UNINITIALIZED, \
Code::kNoExtraICState) \
@@ -163,6 +165,8 @@
kStrictMode) \
V(KeyedStoreIC_Generic_Strict, KEYED_STORE_IC, MEGAMORPHIC, \
kStrictMode) \
+ V(KeyedStoreIC_NonStrictArguments, KEYED_STORE_IC, MEGAMORPHIC, \
+ Code::kNoExtraICState) \
\
/* Uses KeyedLoadIC_Initialize; must be after in list. */ \
V(FunctionCall, BUILTIN, UNINITIALIZED, \
diff --git a/src/cached-powers.cc b/src/cached-powers.cc
index 43dbc78..30a67a6 100644
--- a/src/cached-powers.cc
+++ b/src/cached-powers.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -26,10 +26,12 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <stdarg.h>
+#include <math.h>
#include <limits.h>
-#include "v8.h"
-
+#include "../include/v8stdint.h"
+#include "globals.h"
+#include "checks.h"
#include "cached-powers.h"
namespace v8 {
@@ -147,7 +149,9 @@
DiyFp* power,
int* decimal_exponent) {
int kQ = DiyFp::kSignificandSize;
- double k = ceiling((min_exponent + kQ - 1) * kD_1_LOG2_10);
+ // Some platforms return incorrect sign on 0 result. We can ignore that here,
+ // which means we can avoid depending on platform.h.
+ double k = ceil((min_exponent + kQ - 1) * kD_1_LOG2_10);
int foo = kCachedPowersOffset;
int index =
(foo + static_cast<int>(k) - 1) / kDecimalExponentDistance + 1;
diff --git a/src/code-stubs.cc b/src/code-stubs.cc
index d12def8..1d1128f 100644
--- a/src/code-stubs.cc
+++ b/src/code-stubs.cc
@@ -61,21 +61,29 @@
}
+SmartPointer<const char> CodeStub::GetName() {
+ char buffer[100];
+ NoAllocationStringAllocator allocator(buffer,
+ static_cast<unsigned>(sizeof(buffer)));
+ StringStream stream(&allocator);
+ PrintName(&stream);
+ return stream.ToCString();
+}
+
+
void CodeStub::RecordCodeGeneration(Code* code, MacroAssembler* masm) {
code->set_major_key(MajorKey());
Isolate* isolate = masm->isolate();
- PROFILE(isolate, CodeCreateEvent(Logger::STUB_TAG, code, GetName()));
- GDBJIT(AddCode(GDBJITInterface::STUB, GetName(), code));
+ SmartPointer<const char> name = GetName();
+ PROFILE(isolate, CodeCreateEvent(Logger::STUB_TAG, code, *name));
+ GDBJIT(AddCode(GDBJITInterface::STUB, *name, code));
Counters* counters = isolate->counters();
counters->total_stubs_code_size()->Increment(code->instruction_size());
#ifdef ENABLE_DISASSEMBLER
if (FLAG_print_code_stubs) {
-#ifdef DEBUG
- Print();
-#endif
- code->Disassemble(GetName());
+ code->Disassemble(*name);
PrintF("\n");
}
#endif
@@ -170,7 +178,7 @@
const char* CodeStub::MajorName(CodeStub::Major major_key,
bool allow_unknown_keys) {
switch (major_key) {
-#define DEF_CASE(name) case name: return #name;
+#define DEF_CASE(name) case name: return #name "Stub";
CODE_STUB_LIST(DEF_CASE)
#undef DEF_CASE
default:
@@ -213,13 +221,7 @@
}
-const char* InstanceofStub::GetName() {
- if (name_ != NULL) return name_;
- const int kMaxNameLength = 100;
- name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(
- kMaxNameLength);
- if (name_ == NULL) return "OOM";
-
+void InstanceofStub::PrintName(StringStream* stream) {
const char* args = "";
if (HasArgsInRegisters()) {
args = "_REGS";
@@ -235,33 +237,96 @@
return_true_false_object = "_TRUEFALSE";
}
- OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
- "InstanceofStub%s%s%s",
- args,
- inline_check,
- return_true_false_object);
- return name_;
+ stream->Add("InstanceofStub%s%s%s",
+ args,
+ inline_check,
+ return_true_false_object);
}
-void KeyedLoadFastElementStub::Generate(MacroAssembler* masm) {
- KeyedLoadStubCompiler::GenerateLoadFastElement(masm);
+void KeyedLoadElementStub::Generate(MacroAssembler* masm) {
+ switch (elements_kind_) {
+ case JSObject::FAST_ELEMENTS:
+ KeyedLoadStubCompiler::GenerateLoadFastElement(masm);
+ break;
+ case JSObject::FAST_DOUBLE_ELEMENTS:
+ KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(masm);
+ break;
+ case JSObject::EXTERNAL_BYTE_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+ case JSObject::EXTERNAL_SHORT_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+ case JSObject::EXTERNAL_INT_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS:
+ case JSObject::EXTERNAL_FLOAT_ELEMENTS:
+ case JSObject::EXTERNAL_DOUBLE_ELEMENTS:
+ case JSObject::EXTERNAL_PIXEL_ELEMENTS:
+ KeyedLoadStubCompiler::GenerateLoadExternalArray(masm, elements_kind_);
+ break;
+ case JSObject::DICTIONARY_ELEMENTS:
+ KeyedLoadStubCompiler::GenerateLoadDictionaryElement(masm);
+ break;
+ case JSObject::NON_STRICT_ARGUMENTS_ELEMENTS:
+ UNREACHABLE();
+ break;
+ }
}
-void KeyedStoreFastElementStub::Generate(MacroAssembler* masm) {
- KeyedStoreStubCompiler::GenerateStoreFastElement(masm, is_js_array_);
+void KeyedStoreElementStub::Generate(MacroAssembler* masm) {
+ switch (elements_kind_) {
+ case JSObject::FAST_ELEMENTS:
+ KeyedStoreStubCompiler::GenerateStoreFastElement(masm, is_js_array_);
+ break;
+ case JSObject::FAST_DOUBLE_ELEMENTS:
+ KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(masm,
+ is_js_array_);
+ break;
+ case JSObject::EXTERNAL_BYTE_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+ case JSObject::EXTERNAL_SHORT_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+ case JSObject::EXTERNAL_INT_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS:
+ case JSObject::EXTERNAL_FLOAT_ELEMENTS:
+ case JSObject::EXTERNAL_DOUBLE_ELEMENTS:
+ case JSObject::EXTERNAL_PIXEL_ELEMENTS:
+ KeyedStoreStubCompiler::GenerateStoreExternalArray(masm, elements_kind_);
+ break;
+ case JSObject::DICTIONARY_ELEMENTS:
+ KeyedStoreStubCompiler::GenerateStoreDictionaryElement(masm);
+ break;
+ case JSObject::NON_STRICT_ARGUMENTS_ELEMENTS:
+ UNREACHABLE();
+ break;
+ }
}
-void KeyedLoadExternalArrayStub::Generate(MacroAssembler* masm) {
- KeyedLoadStubCompiler::GenerateLoadExternalArray(masm, array_type_);
+void ArgumentsAccessStub::PrintName(StringStream* stream) {
+ const char* type_name = NULL; // Make g++ happy.
+ switch (type_) {
+ case READ_ELEMENT: type_name = "ReadElement"; break;
+ case NEW_NON_STRICT_FAST: type_name = "NewNonStrictFast"; break;
+ case NEW_NON_STRICT_SLOW: type_name = "NewNonStrictSlow"; break;
+ case NEW_STRICT: type_name = "NewStrict"; break;
+ }
+ stream->Add("ArgumentsAccessStub_%s", type_name);
}
-void KeyedStoreExternalArrayStub::Generate(MacroAssembler* masm) {
- KeyedStoreStubCompiler::GenerateStoreExternalArray(masm, array_type_);
+void CallFunctionStub::PrintName(StringStream* stream) {
+ const char* in_loop_name = NULL; // Make g++ happy.
+ switch (in_loop_) {
+ case NOT_IN_LOOP: in_loop_name = ""; break;
+ case IN_LOOP: in_loop_name = "_InLoop"; break;
+ }
+ const char* flags_name = NULL; // Make g++ happy.
+ switch (flags_) {
+ case NO_CALL_FUNCTION_FLAGS: flags_name = ""; break;
+ case RECEIVER_MIGHT_BE_IMPLICIT: flags_name = "_Implicit"; break;
+ }
+ stream->Add("CallFunctionStub_Args%d%s%s", argc_, in_loop_name, flags_name);
}
-
} } // namespace v8::internal
diff --git a/src/code-stubs.h b/src/code-stubs.h
index 7ab0b7c..17c245c 100644
--- a/src/code-stubs.h
+++ b/src/code-stubs.h
@@ -34,8 +34,7 @@
namespace v8 {
namespace internal {
-// List of code stubs used on all platforms. The order in this list is important
-// as only the stubs up to and including Instanceof allows nested stub calls.
+// List of code stubs used on all platforms.
#define CODE_STUB_LIST_ALL_PLATFORMS(V) \
V(CallFunction) \
V(UnaryOp) \
@@ -43,12 +42,18 @@
V(StringAdd) \
V(SubString) \
V(StringCompare) \
- V(SmiOp) \
V(Compare) \
V(CompareIC) \
V(MathPow) \
V(TranscendentalCache) \
V(Instanceof) \
+ /* All stubs above this line only exist in a few versions, which are */ \
+ /* generated ahead of time. Therefore compiling a call to one of */ \
+ /* them can't cause a new stub to be compiled, so compiling a call to */ \
+ /* them is GC safe. The ones below this line exist in many variants */ \
+ /* so code compiling a call to one can cause a GC. This means they */ \
+ /* can't be called from other stubs, since stub generation code is */ \
+ /* not GC safe. */ \
V(ConvertToDouble) \
V(WriteInt32ToHeapNumber) \
V(StackCheck) \
@@ -65,10 +70,8 @@
V(NumberToString) \
V(CEntry) \
V(JSEntry) \
- V(KeyedLoadFastElement) \
- V(KeyedStoreFastElement) \
- V(KeyedLoadExternalArray) \
- V(KeyedStoreExternalArray) \
+ V(KeyedLoadElement) \
+ V(KeyedStoreElement) \
V(DebuggerStatement) \
V(StringDictionaryNegativeLookup)
@@ -178,16 +181,15 @@
}
// Returns a name for logging/debugging purposes.
- virtual const char* GetName() { return MajorName(MajorKey(), false); }
+ SmartPointer<const char> GetName();
+ virtual void PrintName(StringStream* stream) {
+ stream->Add("%s", MajorName(MajorKey(), false));
+ }
// Returns whether the code generated for this stub needs to be allocated as
// a fixed (non-moveable) code object.
virtual bool NeedsImmovableCode() { return false; }
- #ifdef DEBUG
- virtual void Print() { PrintF("%s\n", GetName()); }
-#endif
-
// Computes the key based on major and minor.
uint32_t GetKey() {
ASSERT(static_cast<int>(MajorKey()) < NUMBER_OF_IDS);
@@ -195,6 +197,7 @@
MajorKeyBits::encode(MajorKey());
}
+ // See comment above, where Instanceof is defined.
bool AllowsStubCalls() { return MajorKey() <= Instanceof; }
class MajorKeyBits: public BitField<uint32_t, 0, kMajorBits> {};
@@ -268,9 +271,6 @@
void Generate(MacroAssembler* masm);
private:
-
- const char* GetName() { return "StackCheckStub"; }
-
Major MajorKey() { return StackCheck; }
int MinorKey() { return 0; }
};
@@ -285,7 +285,6 @@
private:
Major MajorKey() { return ToNumber; }
int MinorKey() { return 0; }
- const char* GetName() { return "ToNumberStub"; }
};
@@ -297,7 +296,6 @@
void Generate(MacroAssembler* masm);
private:
- const char* GetName() { return "FastNewClosureStub"; }
Major MajorKey() { return FastNewClosure; }
int MinorKey() { return strict_mode_; }
@@ -318,7 +316,6 @@
private:
int slots_;
- const char* GetName() { return "FastNewContextStub"; }
Major MajorKey() { return FastNewContext; }
int MinorKey() { return slots_; }
};
@@ -347,7 +344,6 @@
Mode mode_;
int length_;
- const char* GetName() { return "FastCloneShallowArrayStub"; }
Major MajorKey() { return FastCloneShallowArray; }
int MinorKey() {
ASSERT(mode_ == 0 || mode_ == 1);
@@ -365,7 +361,7 @@
kReturnTrueFalseObject = 1 << 2
};
- explicit InstanceofStub(Flags flags) : flags_(flags), name_(NULL) { }
+ explicit InstanceofStub(Flags flags) : flags_(flags) { }
static Register left();
static Register right();
@@ -388,10 +384,9 @@
return (flags_ & kReturnTrueFalseObject) != 0;
}
- const char* GetName();
+ virtual void PrintName(StringStream* stream);
Flags flags_;
- char* name_;
};
@@ -403,8 +398,6 @@
private:
virtual CodeStub::Major MajorKey() { return MathPow; }
virtual int MinorKey() { return 0; }
-
- const char* GetName() { return "MathPowStub"; }
};
@@ -471,8 +464,7 @@
include_number_compare_((flags & NO_NUMBER_COMPARE_IN_STUB) == 0),
include_smi_compare_((flags & NO_SMI_COMPARE_IN_STUB) == 0),
lhs_(lhs),
- rhs_(rhs),
- name_(NULL) { }
+ rhs_(rhs) { }
CompareStub(Condition cc,
bool strict,
@@ -483,8 +475,7 @@
include_number_compare_((flags & NO_NUMBER_COMPARE_IN_STUB) == 0),
include_smi_compare_((flags & NO_SMI_COMPARE_IN_STUB) == 0),
lhs_(no_reg),
- rhs_(no_reg),
- name_(NULL) { }
+ rhs_(no_reg) { }
void Generate(MacroAssembler* masm);
@@ -538,26 +529,7 @@
// Unfortunately you have to run without snapshots to see most of these
// names in the profile since most compare stubs end up in the snapshot.
- char* name_;
- const char* GetName();
-#ifdef DEBUG
- void Print() {
- PrintF("CompareStub (minor %d) (cc %d), (strict %s), "
- "(never_nan_nan %s), (smi_compare %s) (number_compare %s) ",
- MinorKey(),
- static_cast<int>(cc_),
- strict_ ? "true" : "false",
- never_nan_nan_ ? "true" : "false",
- include_smi_compare_ ? "inluded" : "not included",
- include_number_compare_ ? "included" : "not included");
-
- if (!lhs_.is(no_reg) && !rhs_.is(no_reg)) {
- PrintF("(lhs r%d), (rhs r%d)\n", lhs_.code(), rhs_.code());
- } else {
- PrintF("\n");
- }
- }
-#endif
+ virtual void PrintName(StringStream* stream);
};
@@ -588,8 +560,6 @@
int MinorKey();
bool NeedsImmovableCode();
-
- const char* GetName() { return "CEntryStub"; }
};
@@ -605,8 +575,6 @@
private:
Major MajorKey() { return JSEntry; }
int MinorKey() { return 0; }
-
- const char* GetName() { return "JSEntryStub"; }
};
@@ -619,7 +587,9 @@
private:
int MinorKey() { return 1; }
- const char* GetName() { return "JSConstructEntryStub"; }
+ virtual void PrintName(StringStream* stream) {
+ stream->Add("JSConstructEntryStub");
+ }
};
@@ -627,7 +597,8 @@
public:
enum Type {
READ_ELEMENT,
- NEW_NON_STRICT,
+ NEW_NON_STRICT_FAST,
+ NEW_NON_STRICT_SLOW,
NEW_STRICT
};
@@ -641,28 +612,11 @@
void Generate(MacroAssembler* masm);
void GenerateReadElement(MacroAssembler* masm);
- void GenerateNewObject(MacroAssembler* masm);
+ void GenerateNewStrict(MacroAssembler* masm);
+ void GenerateNewNonStrictFast(MacroAssembler* masm);
+ void GenerateNewNonStrictSlow(MacroAssembler* masm);
- int GetArgumentsBoilerplateIndex() const {
- return (type_ == NEW_STRICT)
- ? Context::STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX
- : Context::ARGUMENTS_BOILERPLATE_INDEX;
- }
-
- int GetArgumentsObjectSize() const {
- if (type_ == NEW_STRICT)
- return Heap::kArgumentsObjectSizeStrict;
- else
- return Heap::kArgumentsObjectSize;
- }
-
- const char* GetName() { return "ArgumentsAccessStub"; }
-
-#ifdef DEBUG
- void Print() {
- PrintF("ArgumentsAccessStub (type %d)\n", type_);
- }
-#endif
+ virtual void PrintName(StringStream* stream);
};
@@ -675,14 +629,6 @@
int MinorKey() { return 0; }
void Generate(MacroAssembler* masm);
-
- const char* GetName() { return "RegExpExecStub"; }
-
-#ifdef DEBUG
- void Print() {
- PrintF("RegExpExecStub\n");
- }
-#endif
};
@@ -695,14 +641,6 @@
int MinorKey() { return 0; }
void Generate(MacroAssembler* masm);
-
- const char* GetName() { return "RegExpConstructResultStub"; }
-
-#ifdef DEBUG
- void Print() {
- PrintF("RegExpConstructResultStub\n");
- }
-#endif
};
@@ -722,14 +660,7 @@
InLoopFlag in_loop_;
CallFunctionFlags flags_;
-#ifdef DEBUG
- void Print() {
- PrintF("CallFunctionStub (args %d, in_loop %d, flags %d)\n",
- argc_,
- static_cast<int>(in_loop_),
- static_cast<int>(flags_));
- }
-#endif
+ virtual void PrintName(StringStream* stream);
// Minor key encoding in 32 bits with Bitfield <Type, shift, size>.
class InLoopBits: public BitField<InLoopFlag, 0, 1> {};
@@ -926,86 +857,59 @@
DISALLOW_COPY_AND_ASSIGN(AllowStubCallsScope);
};
-#ifdef DEBUG
-#define DECLARE_ARRAY_STUB_PRINT(name) void Print() { PrintF(#name); }
-#else
-#define DECLARE_ARRAY_STUB_PRINT(name)
-#endif
-
-class KeyedLoadFastElementStub : public CodeStub {
+class KeyedLoadElementStub : public CodeStub {
public:
- explicit KeyedLoadFastElementStub() {
- }
+ explicit KeyedLoadElementStub(JSObject::ElementsKind elements_kind)
+ : elements_kind_(elements_kind)
+ { }
- Major MajorKey() { return KeyedLoadFastElement; }
- int MinorKey() { return 0; }
+ Major MajorKey() { return KeyedLoadElement; }
+ int MinorKey() { return elements_kind_; }
void Generate(MacroAssembler* masm);
- const char* GetName() { return "KeyedLoadFastElementStub"; }
+ private:
+ JSObject::ElementsKind elements_kind_;
- DECLARE_ARRAY_STUB_PRINT(KeyedLoadFastElementStub)
+ DISALLOW_COPY_AND_ASSIGN(KeyedLoadElementStub);
};
-class KeyedStoreFastElementStub : public CodeStub {
+class KeyedStoreElementStub : public CodeStub {
public:
- explicit KeyedStoreFastElementStub(bool is_js_array)
- : is_js_array_(is_js_array) { }
+ KeyedStoreElementStub(bool is_js_array,
+ JSObject::ElementsKind elements_kind)
+ : is_js_array_(is_js_array),
+ elements_kind_(elements_kind) { }
- Major MajorKey() { return KeyedStoreFastElement; }
- int MinorKey() { return is_js_array_ ? 1 : 0; }
+ Major MajorKey() { return KeyedStoreElement; }
+ int MinorKey() {
+ return (is_js_array_ ? 0 : JSObject::kElementsKindCount) + elements_kind_;
+ }
void Generate(MacroAssembler* masm);
- const char* GetName() { return "KeyedStoreFastElementStub"; }
-
- DECLARE_ARRAY_STUB_PRINT(KeyedStoreFastElementStub)
-
private:
bool is_js_array_;
+ JSObject::ElementsKind elements_kind_;
+
+ DISALLOW_COPY_AND_ASSIGN(KeyedStoreElementStub);
};
-class KeyedLoadExternalArrayStub : public CodeStub {
+class ToBooleanStub: public CodeStub {
public:
- explicit KeyedLoadExternalArrayStub(ExternalArrayType array_type)
- : array_type_(array_type) { }
-
- Major MajorKey() { return KeyedLoadExternalArray; }
- int MinorKey() { return array_type_; }
+ explicit ToBooleanStub(Register tos) : tos_(tos) { }
void Generate(MacroAssembler* masm);
- const char* GetName() { return "KeyedLoadExternalArrayStub"; }
-
- DECLARE_ARRAY_STUB_PRINT(KeyedLoadExternalArrayStub)
-
- protected:
- ExternalArrayType array_type_;
+ private:
+ Register tos_;
+ Major MajorKey() { return ToBoolean; }
+ int MinorKey() { return tos_.code(); }
};
-
-class KeyedStoreExternalArrayStub : public CodeStub {
- public:
- explicit KeyedStoreExternalArrayStub(ExternalArrayType array_type)
- : array_type_(array_type) { }
-
- Major MajorKey() { return KeyedStoreExternalArray; }
- int MinorKey() { return array_type_; }
-
- void Generate(MacroAssembler* masm);
-
- const char* GetName() { return "KeyedStoreExternalArrayStub"; }
-
- DECLARE_ARRAY_STUB_PRINT(KeyedStoreExternalArrayStub)
-
- protected:
- ExternalArrayType array_type_;
-};
-
-
} } // namespace v8::internal
#endif // V8_CODE_STUBS_H_
diff --git a/src/codegen.cc b/src/codegen.cc
index ad3cf1b..fb723a3 100644
--- a/src/codegen.cc
+++ b/src/codegen.cc
@@ -169,8 +169,6 @@
#endif // ENABLE_DISASSEMBLER
}
-#ifdef ENABLE_LOGGING_AND_PROFILING
-
static Vector<const char> kRegexp = CStrVector("regexp");
bool CodeGenerator::ShouldGenerateLog(Expression* type) {
@@ -187,8 +185,6 @@
return false;
}
-#endif
-
bool CodeGenerator::RecordPositions(MacroAssembler* masm,
int pos,
@@ -209,9 +205,14 @@
case READ_ELEMENT:
GenerateReadElement(masm);
break;
- case NEW_NON_STRICT:
+ case NEW_NON_STRICT_FAST:
+ GenerateNewNonStrictFast(masm);
+ break;
+ case NEW_NON_STRICT_SLOW:
+ GenerateNewNonStrictSlow(masm);
+ break;
case NEW_STRICT:
- GenerateNewObject(masm);
+ GenerateNewStrict(masm);
break;
}
}
diff --git a/src/compilation-cache.cc b/src/compilation-cache.cc
index 5bd8bf3..28e833a 100644
--- a/src/compilation-cache.cc
+++ b/src/compilation-cache.cc
@@ -52,8 +52,7 @@
eval_global_(isolate, kEvalGlobalGenerations),
eval_contextual_(isolate, kEvalContextualGenerations),
reg_exp_(isolate, kRegExpGenerations),
- enabled_(true),
- eager_optimizing_set_(NULL) {
+ enabled_(true) {
CompilationSubCache* subcaches[kSubCacheCount] =
{&script_, &eval_global_, &eval_contextual_, ®_exp_};
for (int i = 0; i < kSubCacheCount; ++i) {
@@ -62,10 +61,7 @@
}
-CompilationCache::~CompilationCache() {
- delete eager_optimizing_set_;
- eager_optimizing_set_ = NULL;
-}
+CompilationCache::~CompilationCache() {}
static Handle<CompilationCacheTable> AllocateTable(Isolate* isolate, int size) {
@@ -457,47 +453,6 @@
}
-static bool SourceHashCompare(void* key1, void* key2) {
- return key1 == key2;
-}
-
-
-HashMap* CompilationCache::EagerOptimizingSet() {
- if (eager_optimizing_set_ == NULL) {
- eager_optimizing_set_ = new HashMap(&SourceHashCompare);
- }
- return eager_optimizing_set_;
-}
-
-
-bool CompilationCache::ShouldOptimizeEagerly(Handle<JSFunction> function) {
- if (FLAG_opt_eagerly) return true;
- uint32_t hash = function->SourceHash();
- void* key = reinterpret_cast<void*>(hash);
- return EagerOptimizingSet()->Lookup(key, hash, false) != NULL;
-}
-
-
-void CompilationCache::MarkForEagerOptimizing(Handle<JSFunction> function) {
- uint32_t hash = function->SourceHash();
- void* key = reinterpret_cast<void*>(hash);
- EagerOptimizingSet()->Lookup(key, hash, true);
-}
-
-
-void CompilationCache::MarkForLazyOptimizing(Handle<JSFunction> function) {
- uint32_t hash = function->SourceHash();
- void* key = reinterpret_cast<void*>(hash);
- EagerOptimizingSet()->Remove(key, hash);
-}
-
-
-void CompilationCache::ResetEagerOptimizingData() {
- HashMap* set = EagerOptimizingSet();
- if (set->occupancy() > 0) set->Clear();
-}
-
-
void CompilationCache::Clear() {
for (int i = 0; i < kSubCacheCount; i++) {
subcaches_[i]->Clear();
diff --git a/src/compilation-cache.h b/src/compilation-cache.h
index 887d4e8..1fcf753 100644
--- a/src/compilation-cache.h
+++ b/src/compilation-cache.h
@@ -223,14 +223,6 @@
JSRegExp::Flags flags,
Handle<FixedArray> data);
- // Support for eager optimization tracking.
- bool ShouldOptimizeEagerly(Handle<JSFunction> function);
- void MarkForEagerOptimizing(Handle<JSFunction> function);
- void MarkForLazyOptimizing(Handle<JSFunction> function);
-
- // Reset the eager optimization tracking data.
- void ResetEagerOptimizingData();
-
// Clear the cache - also used to initialize the cache at startup.
void Clear();
@@ -274,8 +266,6 @@
// Current enable state of the compilation cache.
bool enabled_;
- HashMap* eager_optimizing_set_;
-
friend class Isolate;
DISALLOW_COPY_AND_ASSIGN(CompilationCache);
diff --git a/src/compiler.cc b/src/compiler.cc
index d82bcd0..abff8b6 100755
--- a/src/compiler.cc
+++ b/src/compiler.cc
@@ -109,8 +109,6 @@
void CompilationInfo::AbortOptimization() {
Handle<Code> code(shared_info()->code());
SetCode(code);
- Isolate* isolate = code->GetIsolate();
- isolate->compilation_cache()->MarkForLazyOptimizing(closure());
}
@@ -340,7 +338,7 @@
static Handle<SharedFunctionInfo> MakeFunctionInfo(CompilationInfo* info) {
Isolate* isolate = info->isolate();
- CompilationZoneScope zone_scope(isolate, DELETE_ON_EXIT);
+ ZoneScope zone_scope(isolate, DELETE_ON_EXIT);
PostponeInterruptsScope postpone(isolate);
ASSERT(!isolate->global_context().is_null());
@@ -413,7 +411,8 @@
String::cast(script->name())));
GDBJIT(AddCode(Handle<String>(String::cast(script->name())),
script,
- info->code()));
+ info->code(),
+ info));
} else {
PROFILE(isolate, CodeCreateEvent(
info->is_eval()
@@ -422,7 +421,7 @@
*info->code(),
*result,
isolate->heap()->empty_string()));
- GDBJIT(AddCode(Handle<String>(), script, info->code()));
+ GDBJIT(AddCode(Handle<String>(), script, info->code(), info));
}
// Hint to the runtime system used when allocating space for initial
@@ -573,7 +572,7 @@
bool Compiler::CompileLazy(CompilationInfo* info) {
Isolate* isolate = info->isolate();
- CompilationZoneScope zone_scope(isolate, DELETE_ON_EXIT);
+ ZoneScope zone_scope(isolate, DELETE_ON_EXIT);
// The VM is in the COMPILER state until exiting this function.
VMState state(isolate, COMPILER);
@@ -614,6 +613,7 @@
RecordFunctionCompilation(Logger::LAZY_COMPILE_TAG, info, shared);
if (info->IsOptimizing()) {
+ ASSERT(shared->scope_info() != SerializedScopeInfo::Empty());
function->ReplaceCode(*code);
} else {
// Update the shared function info with the compiled code and the
@@ -655,9 +655,6 @@
CompilationInfo optimized(function);
optimized.SetOptimizing(AstNode::kNoNumber);
return CompileLazy(&optimized);
- } else if (isolate->compilation_cache()->ShouldOptimizeEagerly(
- function)) {
- isolate->runtime_profiler()->OptimizeSoon(*function);
}
}
}
@@ -736,6 +733,7 @@
function_info->set_start_position(lit->start_position());
function_info->set_end_position(lit->end_position());
function_info->set_is_expression(lit->is_expression());
+ function_info->set_is_anonymous(lit->is_anonymous());
function_info->set_is_toplevel(is_toplevel);
function_info->set_inferred_name(*lit->inferred_name());
function_info->SetThisPropertyAssignmentsInfo(
@@ -743,6 +741,8 @@
*lit->this_property_assignments());
function_info->set_allows_lazy_compilation(lit->AllowsLazyCompilation());
function_info->set_strict_mode(lit->strict_mode());
+ function_info->set_uses_arguments(lit->scope()->arguments() != NULL);
+ function_info->set_has_duplicate_parameters(lit->has_duplicate_parameters());
}
@@ -781,7 +781,8 @@
GDBJIT(AddCode(Handle<String>(shared->DebugName()),
Handle<Script>(info->script()),
- Handle<Code>(info->code())));
+ Handle<Code>(info->code()),
+ info));
}
} } // namespace v8::internal
diff --git a/src/compiler.h b/src/compiler.h
index ea74d60..181446b 100644
--- a/src/compiler.h
+++ b/src/compiler.h
@@ -167,6 +167,7 @@
void Initialize(Mode mode) {
mode_ = V8::UseCrankshaft() ? mode : NONOPT;
+ ASSERT(!script_.is_null());
if (script_->type()->value() == Script::TYPE_NATIVE) {
MarkAsNative();
}
@@ -195,6 +196,7 @@
// Is this a function from our natives.
class IsNative: public BitField<bool, 6, 1> {};
+
unsigned flags_;
// Fields filled in by the compilation pipeline.
@@ -292,24 +294,6 @@
};
-// During compilation we need a global list of handles to constants
-// for frame elements. When the zone gets deleted, we make sure to
-// clear this list of handles as well.
-class CompilationZoneScope : public ZoneScope {
- public:
- CompilationZoneScope(Isolate* isolate, ZoneScopeMode mode)
- : ZoneScope(isolate, mode) {}
-
- virtual ~CompilationZoneScope() {
- if (ShouldDeleteOnExit()) {
- Isolate* isolate = Isolate::Current();
- isolate->frame_element_constant_list()->Clear();
- isolate->result_constant_list()->Clear();
- }
- }
-};
-
-
} } // namespace v8::internal
#endif // V8_COMPILER_H_
diff --git a/src/contexts.cc b/src/contexts.cc
index f6031f1..d066d34 100644
--- a/src/contexts.cc
+++ b/src/contexts.cc
@@ -34,6 +34,16 @@
namespace v8 {
namespace internal {
+Context* Context::declaration_context() {
+ Context* current = this;
+ while (!current->IsFunctionContext() && !current->IsGlobalContext()) {
+ current = current->previous();
+ ASSERT(current->closure() == closure());
+ }
+ return current;
+}
+
+
JSBuiltinsObject* Context::builtins() {
GlobalObject* object = global();
if (object->IsJSGlobalObject()) {
@@ -74,8 +84,10 @@
}
-Handle<Object> Context::Lookup(Handle<String> name, ContextLookupFlags flags,
- int* index_, PropertyAttributes* attributes) {
+Handle<Object> Context::Lookup(Handle<String> name,
+ ContextLookupFlags flags,
+ int* index_,
+ PropertyAttributes* attributes) {
Isolate* isolate = GetIsolate();
Handle<Context> context(this, isolate);
@@ -96,40 +108,52 @@
PrintF("\n");
}
- // check extension/with object
+ // Check extension/with/global object.
if (context->has_extension()) {
- Handle<JSObject> extension = Handle<JSObject>(context->extension(),
- isolate);
- // Context extension objects needs to behave as if they have no
- // prototype. So even if we want to follow prototype chains, we
- // need to only do a local lookup for context extension objects.
- if ((flags & FOLLOW_PROTOTYPE_CHAIN) == 0 ||
- extension->IsJSContextExtensionObject()) {
- *attributes = extension->GetLocalPropertyAttribute(*name);
- } else {
- *attributes = extension->GetPropertyAttribute(*name);
- }
- if (*attributes != ABSENT) {
- // property found
- if (FLAG_trace_contexts) {
- PrintF("=> found property in context object %p\n",
- reinterpret_cast<void*>(*extension));
+ if (context->IsCatchContext()) {
+ // Catch contexts have the variable name in the extension slot.
+ if (name->Equals(String::cast(context->extension()))) {
+ if (FLAG_trace_contexts) {
+ PrintF("=> found in catch context\n");
+ }
+ *index_ = Context::THROWN_OBJECT_INDEX;
+ *attributes = NONE;
+ return context;
}
- return extension;
+ } else {
+ // Global, function, and with contexts may have an object in the
+ // extension slot.
+ Handle<JSObject> extension(JSObject::cast(context->extension()),
+ isolate);
+ // Context extension objects needs to behave as if they have no
+ // prototype. So even if we want to follow prototype chains, we
+ // need to only do a local lookup for context extension objects.
+ if ((flags & FOLLOW_PROTOTYPE_CHAIN) == 0 ||
+ extension->IsJSContextExtensionObject()) {
+ *attributes = extension->GetLocalPropertyAttribute(*name);
+ } else {
+ *attributes = extension->GetPropertyAttribute(*name);
+ }
+ if (*attributes != ABSENT) {
+ // property found
+ if (FLAG_trace_contexts) {
+ PrintF("=> found property in context object %p\n",
+ reinterpret_cast<void*>(*extension));
+ }
+ return extension;
+ }
}
}
- if (context->is_function_context()) {
- // we have context-local slots
-
- // check non-parameter locals in context
+ // Only functions can have locals, parameters, and a function name.
+ if (context->IsFunctionContext()) {
+ // We may have context-local slots. Check locals in the context.
Handle<SerializedScopeInfo> scope_info(
context->closure()->shared()->scope_info(), isolate);
Variable::Mode mode;
int index = scope_info->ContextSlotIndex(*name, &mode);
ASSERT(index < 0 || index >= MIN_CONTEXT_SLOTS);
if (index >= 0) {
- // slot found
if (FLAG_trace_contexts) {
PrintF("=> found local in context slot %d (mode = %d)\n",
index, mode);
@@ -142,39 +166,28 @@
// declared variables that were introduced through declaration nodes)
// must not appear here.
switch (mode) {
- case Variable::INTERNAL: // fall through
- case Variable::VAR: *attributes = NONE; break;
- case Variable::CONST: *attributes = READ_ONLY; break;
- case Variable::DYNAMIC: UNREACHABLE(); break;
- case Variable::DYNAMIC_GLOBAL: UNREACHABLE(); break;
- case Variable::DYNAMIC_LOCAL: UNREACHABLE(); break;
- case Variable::TEMPORARY: UNREACHABLE(); break;
+ case Variable::INTERNAL: // Fall through.
+ case Variable::VAR:
+ *attributes = NONE;
+ break;
+ case Variable::CONST:
+ *attributes = READ_ONLY;
+ break;
+ case Variable::DYNAMIC:
+ case Variable::DYNAMIC_GLOBAL:
+ case Variable::DYNAMIC_LOCAL:
+ case Variable::TEMPORARY:
+ UNREACHABLE();
+ break;
}
return context;
}
- // check parameter locals in context
- int param_index = scope_info->ParameterIndex(*name);
- if (param_index >= 0) {
- // slot found.
- int index = scope_info->ContextSlotIndex(
- isolate->heap()->arguments_shadow_symbol(), NULL);
- ASSERT(index >= 0); // arguments must exist and be in the heap context
- Handle<JSObject> arguments(JSObject::cast(context->get(index)),
- isolate);
- if (FLAG_trace_contexts) {
- PrintF("=> found parameter %d in arguments object\n", param_index);
- }
- *index_ = param_index;
- *attributes = NONE;
- return arguments;
- }
-
- // check intermediate context (holding only the function name variable)
+ // Check the slot corresponding to the intermediate context holding
+ // only the function name variable.
if (follow_context_chain) {
int index = scope_info->FunctionContextSlotIndex(*name);
if (index >= 0) {
- // slot found
if (FLAG_trace_contexts) {
PrintF("=> found intermediate function in context slot %d\n",
index);
@@ -186,18 +199,14 @@
}
}
- // proceed with enclosing context
+ // Proceed with the previous context.
if (context->IsGlobalContext()) {
follow_context_chain = false;
- } else if (context->is_function_context()) {
- context = Handle<Context>(Context::cast(context->closure()->context()),
- isolate);
} else {
context = Handle<Context>(context->previous(), isolate);
}
} while (follow_context_chain);
- // slot not found
if (FLAG_trace_contexts) {
PrintF("=> no property/slot found\n");
}
@@ -212,11 +221,12 @@
// before the global context and check that there are no context
// extension objects (conservative check for with statements).
while (!context->IsGlobalContext()) {
- // Check if the context is a potentially a with context.
+ // Check if the context is a catch or with context, or has introduced
+ // bindings by calling non-strict eval.
if (context->has_extension()) return false;
// Not a with context so it must be a function context.
- ASSERT(context->is_function_context());
+ ASSERT(context->IsFunctionContext());
// Check non-parameter locals.
Handle<SerializedScopeInfo> scope_info(
@@ -233,7 +243,7 @@
// Check context only holding the function name variable.
index = scope_info->FunctionContextSlotIndex(*name);
if (index >= 0) return false;
- context = Context::cast(context->closure()->context());
+ context = context->previous();
}
// No local or potential with statement found so the variable is
@@ -244,21 +254,24 @@
void Context::ComputeEvalScopeInfo(bool* outer_scope_calls_eval,
bool* outer_scope_calls_non_strict_eval) {
+ // Skip up the context chain checking all the function contexts to see
+ // whether they call eval.
Context* context = this;
- while (true) {
- Handle<SerializedScopeInfo> scope_info(
- context->closure()->shared()->scope_info());
- if (scope_info->CallsEval()) {
- *outer_scope_calls_eval = true;
- if (!scope_info->IsStrictMode()) {
- // No need to go further since the answers will not change
- // from here.
- *outer_scope_calls_non_strict_eval = true;
- return;
+ while (!context->IsGlobalContext()) {
+ if (context->IsFunctionContext()) {
+ Handle<SerializedScopeInfo> scope_info(
+ context->closure()->shared()->scope_info());
+ if (scope_info->CallsEval()) {
+ *outer_scope_calls_eval = true;
+ if (!scope_info->IsStrictMode()) {
+ // No need to go further since the answers will not change from
+ // here.
+ *outer_scope_calls_non_strict_eval = true;
+ return;
+ }
}
}
- if (context->IsGlobalContext()) break;
- context = Context::cast(context->closure()->context());
+ context = context->previous();
}
}
diff --git a/src/contexts.h b/src/contexts.h
index b0d3ae4..53b40f1 100644
--- a/src/contexts.h
+++ b/src/contexts.h
@@ -88,6 +88,8 @@
V(JS_ARRAY_MAP_INDEX, Map, js_array_map)\
V(REGEXP_RESULT_MAP_INDEX, Map, regexp_result_map)\
V(ARGUMENTS_BOILERPLATE_INDEX, JSObject, arguments_boilerplate) \
+ V(ALIASED_ARGUMENTS_BOILERPLATE_INDEX, JSObject, \
+ aliased_arguments_boilerplate) \
V(STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX, JSObject, \
strict_mode_arguments_boilerplate) \
V(MESSAGE_LISTENERS_INDEX, JSObject, message_listeners) \
@@ -108,7 +110,9 @@
V(MAP_CACHE_INDEX, Object, map_cache) \
V(CONTEXT_DATA_INDEX, Object, data) \
V(ALLOW_CODE_GEN_FROM_STRINGS_INDEX, Object, allow_code_gen_from_strings) \
- V(DERIVED_GET_TRAP_INDEX, JSFunction, derived_get_trap)
+ V(DERIVED_HAS_TRAP_INDEX, JSFunction, derived_has_trap) \
+ V(DERIVED_GET_TRAP_INDEX, JSFunction, derived_get_trap) \
+ V(DERIVED_SET_TRAP_INDEX, JSFunction, derived_set_trap)
// JSFunctions are pairs (context, function code), sometimes also called
// closures. A Context object is used to represent function contexts and
@@ -127,13 +131,6 @@
// statically allocated context slots. The names are needed
// for dynamic lookups in the presence of 'with' or 'eval'.
//
-// [ fcontext ] A pointer to the innermost enclosing function context.
-// It is the same for all contexts *allocated* inside a
-// function, and the function context's fcontext points
-// to itself. It is only needed for fast access of the
-// function context (used for declarations, and static
-// context slot access).
-//
// [ previous ] A pointer to the previous context. It is NULL for
// function contexts, and non-NULL for 'with' contexts.
// Used to implement the 'with' statement.
@@ -155,19 +152,6 @@
// (via static context addresses) or through 'eval' (dynamic context lookups).
// Finally, the global context contains additional slots for fast access to
// global properties.
-//
-// We may be able to simplify the implementation:
-//
-// - We may be able to get rid of 'fcontext': We can always use the fact that
-// previous == NULL for function contexts and so we can search for them. They
-// are only needed when doing dynamic declarations, and the context chains
-// tend to be very very short (depth of nesting of 'with' statements). At
-// the moment we also use it in generated code for context slot accesses -
-// and there we don't want a loop because of code bloat - but we may not
-// need it there after all (see comment in codegen_*.cc).
-//
-// - If we cannot get rid of fcontext, consider making 'previous' never NULL
-// except for the global context. This could simplify Context::Lookup.
class Context: public FixedArray {
public:
@@ -181,16 +165,22 @@
enum {
// These slots are in all contexts.
CLOSURE_INDEX,
- FCONTEXT_INDEX,
PREVIOUS_INDEX,
+ // The extension slot is used for either the global object (in global
+ // contexts), eval extension object (function contexts), subject of with
+ // (with contexts), or the variable name (catch contexts).
EXTENSION_INDEX,
GLOBAL_INDEX,
MIN_CONTEXT_SLOTS,
+ // This slot holds the thrown value in catch contexts.
+ THROWN_OBJECT_INDEX = MIN_CONTEXT_SLOTS,
+
// These slots are only in global contexts.
GLOBAL_PROXY_INDEX = MIN_CONTEXT_SLOTS,
SECURITY_TOKEN_INDEX,
ARGUMENTS_BOILERPLATE_INDEX,
+ ALIASED_ARGUMENTS_BOILERPLATE_INDEX,
STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX,
JS_ARRAY_MAP_INDEX,
REGEXP_RESULT_MAP_INDEX,
@@ -236,14 +226,16 @@
OPAQUE_REFERENCE_FUNCTION_INDEX,
CONTEXT_EXTENSION_FUNCTION_INDEX,
OUT_OF_MEMORY_INDEX,
- MAP_CACHE_INDEX,
CONTEXT_DATA_INDEX,
ALLOW_CODE_GEN_FROM_STRINGS_INDEX,
+ DERIVED_HAS_TRAP_INDEX,
DERIVED_GET_TRAP_INDEX,
+ DERIVED_SET_TRAP_INDEX,
// Properties from here are treated as weak references by the full GC.
// Scavenge treats them as strong references.
OPTIMIZED_FUNCTIONS_LIST, // Weak.
+ MAP_CACHE_INDEX, // Weak.
NEXT_CONTEXT_LINK, // Weak.
// Total number of slots.
@@ -256,9 +248,6 @@
JSFunction* closure() { return JSFunction::cast(get(CLOSURE_INDEX)); }
void set_closure(JSFunction* closure) { set(CLOSURE_INDEX, closure); }
- Context* fcontext() { return Context::cast(get(FCONTEXT_INDEX)); }
- void set_fcontext(Context* context) { set(FCONTEXT_INDEX, context); }
-
Context* previous() {
Object* result = unchecked_previous();
ASSERT(IsBootstrappingOrContext(result));
@@ -266,9 +255,13 @@
}
void set_previous(Context* context) { set(PREVIOUS_INDEX, context); }
- bool has_extension() { return unchecked_extension() != NULL; }
- JSObject* extension() { return JSObject::cast(unchecked_extension()); }
- void set_extension(JSObject* object) { set(EXTENSION_INDEX, object); }
+ bool has_extension() { return extension() != NULL; }
+ Object* extension() { return get(EXTENSION_INDEX); }
+ void set_extension(Object* object) { set(EXTENSION_INDEX, object); }
+
+ // Get the context where var declarations will be hoisted to, which
+ // may be the context itself.
+ Context* declaration_context();
GlobalObject* global() {
Object* result = get(GLOBAL_INDEX);
@@ -287,8 +280,21 @@
// Compute the global context by traversing the context chain.
Context* global_context();
- // Tells if this is a function context (as opposed to a 'with' context).
- bool is_function_context() { return unchecked_previous() == NULL; }
+ // Predicates for context types. IsGlobalContext is defined on Object
+ // because we frequently have to know if arbitrary objects are global
+ // contexts.
+ bool IsFunctionContext() {
+ Map* map = this->map();
+ return map == map->GetHeap()->function_context_map();
+ }
+ bool IsCatchContext() {
+ Map* map = this->map();
+ return map == map->GetHeap()->catch_context_map();
+ }
+ bool IsWithContext() {
+ Map* map = this->map();
+ return map == map->GetHeap()->with_context_map();
+ }
// Tells whether the global context is marked with out of memory.
inline bool has_out_of_memory();
@@ -377,7 +383,6 @@
private:
// Unchecked access to the slots.
Object* unchecked_previous() { return get(PREVIOUS_INDEX); }
- Object* unchecked_extension() { return get(EXTENSION_INDEX); }
#ifdef DEBUG
// Bootstrapping-aware type checks.
diff --git a/src/conversions-inl.h b/src/conversions-inl.h
index cb7dbf8..1a20645 100644
--- a/src/conversions-inl.h
+++ b/src/conversions-inl.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -28,19 +28,26 @@
#ifndef V8_CONVERSIONS_INL_H_
#define V8_CONVERSIONS_INL_H_
+#include <limits.h> // Required for INT_MAX etc.
#include <math.h>
-#include <float.h> // required for DBL_MAX and on Win32 for finite()
+#include <float.h> // Required for DBL_MAX and on Win32 for finite()
#include <stdarg.h>
// ----------------------------------------------------------------------------
// Extra POSIX/ANSI functions for Win32/MSVC.
#include "conversions.h"
+#include "strtod.h"
#include "platform.h"
namespace v8 {
namespace internal {
+static inline double JunkStringValue() {
+ return BitCast<double, uint64_t>(kQuietNaNMask);
+}
+
+
// The fast double-to-unsigned-int conversion routine does not guarantee
// rounding towards zero, or any reasonable value if the argument is larger
// than what fits in an unsigned 32-bit integer.
@@ -77,18 +84,6 @@
}
-int32_t NumberToInt32(Object* number) {
- if (number->IsSmi()) return Smi::cast(number)->value();
- return DoubleToInt32(number->Number());
-}
-
-
-uint32_t NumberToUint32(Object* number) {
- if (number->IsSmi()) return Smi::cast(number)->value();
- return DoubleToUint32(number->Number());
-}
-
-
int32_t DoubleToInt32(double x) {
int32_t i = FastD2I(x);
if (FastI2D(i) == x) return i;
@@ -101,6 +96,569 @@
}
+template <class Iterator, class EndMark>
+static bool SubStringEquals(Iterator* current,
+ EndMark end,
+ const char* substring) {
+ ASSERT(**current == *substring);
+ for (substring++; *substring != '\0'; substring++) {
+ ++*current;
+ if (*current == end || **current != *substring) return false;
+ }
+ ++*current;
+ return true;
+}
+
+
+// Returns true if a nonspace character has been found and false if the
+// end was been reached before finding a nonspace character.
+template <class Iterator, class EndMark>
+static inline bool AdvanceToNonspace(UnicodeCache* unicode_cache,
+ Iterator* current,
+ EndMark end) {
+ while (*current != end) {
+ if (!unicode_cache->IsWhiteSpace(**current)) return true;
+ ++*current;
+ }
+ return false;
+}
+
+
+// Parsing integers with radix 2, 4, 8, 16, 32. Assumes current != end.
+template <int radix_log_2, class Iterator, class EndMark>
+static double InternalStringToIntDouble(UnicodeCache* unicode_cache,
+ Iterator current,
+ EndMark end,
+ bool negative,
+ bool allow_trailing_junk) {
+ ASSERT(current != end);
+
+ // Skip leading 0s.
+ while (*current == '0') {
+ ++current;
+ if (current == end) return SignedZero(negative);
+ }
+
+ int64_t number = 0;
+ int exponent = 0;
+ const int radix = (1 << radix_log_2);
+
+ do {
+ int digit;
+ if (*current >= '0' && *current <= '9' && *current < '0' + radix) {
+ digit = static_cast<char>(*current) - '0';
+ } else if (radix > 10 && *current >= 'a' && *current < 'a' + radix - 10) {
+ digit = static_cast<char>(*current) - 'a' + 10;
+ } else if (radix > 10 && *current >= 'A' && *current < 'A' + radix - 10) {
+ digit = static_cast<char>(*current) - 'A' + 10;
+ } else {
+ if (allow_trailing_junk ||
+ !AdvanceToNonspace(unicode_cache, ¤t, end)) {
+ break;
+ } else {
+ return JunkStringValue();
+ }
+ }
+
+ number = number * radix + digit;
+ int overflow = static_cast<int>(number >> 53);
+ if (overflow != 0) {
+ // Overflow occurred. Need to determine which direction to round the
+ // result.
+ int overflow_bits_count = 1;
+ while (overflow > 1) {
+ overflow_bits_count++;
+ overflow >>= 1;
+ }
+
+ int dropped_bits_mask = ((1 << overflow_bits_count) - 1);
+ int dropped_bits = static_cast<int>(number) & dropped_bits_mask;
+ number >>= overflow_bits_count;
+ exponent = overflow_bits_count;
+
+ bool zero_tail = true;
+ while (true) {
+ ++current;
+ if (current == end || !isDigit(*current, radix)) break;
+ zero_tail = zero_tail && *current == '0';
+ exponent += radix_log_2;
+ }
+
+ if (!allow_trailing_junk &&
+ AdvanceToNonspace(unicode_cache, ¤t, end)) {
+ return JunkStringValue();
+ }
+
+ int middle_value = (1 << (overflow_bits_count - 1));
+ if (dropped_bits > middle_value) {
+ number++; // Rounding up.
+ } else if (dropped_bits == middle_value) {
+ // Rounding to even to consistency with decimals: half-way case rounds
+ // up if significant part is odd and down otherwise.
+ if ((number & 1) != 0 || !zero_tail) {
+ number++; // Rounding up.
+ }
+ }
+
+ // Rounding up may cause overflow.
+ if ((number & ((int64_t)1 << 53)) != 0) {
+ exponent++;
+ number >>= 1;
+ }
+ break;
+ }
+ ++current;
+ } while (current != end);
+
+ ASSERT(number < ((int64_t)1 << 53));
+ ASSERT(static_cast<int64_t>(static_cast<double>(number)) == number);
+
+ if (exponent == 0) {
+ if (negative) {
+ if (number == 0) return -0.0;
+ number = -number;
+ }
+ return static_cast<double>(number);
+ }
+
+ ASSERT(number != 0);
+ // The double could be constructed faster from number (mantissa), exponent
+ // and sign. Assuming it's a rare case more simple code is used.
+ return static_cast<double>(negative ? -number : number) * pow(2.0, exponent);
+}
+
+
+template <class Iterator, class EndMark>
+static double InternalStringToInt(UnicodeCache* unicode_cache,
+ Iterator current,
+ EndMark end,
+ int radix) {
+ const bool allow_trailing_junk = true;
+ const double empty_string_val = JunkStringValue();
+
+ if (!AdvanceToNonspace(unicode_cache, ¤t, end)) {
+ return empty_string_val;
+ }
+
+ bool negative = false;
+ bool leading_zero = false;
+
+ if (*current == '+') {
+ // Ignore leading sign; skip following spaces.
+ ++current;
+ if (current == end) {
+ return JunkStringValue();
+ }
+ } else if (*current == '-') {
+ ++current;
+ if (current == end) {
+ return JunkStringValue();
+ }
+ negative = true;
+ }
+
+ if (radix == 0) {
+ // Radix detection.
+ if (*current == '0') {
+ ++current;
+ if (current == end) return SignedZero(negative);
+ if (*current == 'x' || *current == 'X') {
+ radix = 16;
+ ++current;
+ if (current == end) return JunkStringValue();
+ } else {
+ radix = 8;
+ leading_zero = true;
+ }
+ } else {
+ radix = 10;
+ }
+ } else if (radix == 16) {
+ if (*current == '0') {
+ // Allow "0x" prefix.
+ ++current;
+ if (current == end) return SignedZero(negative);
+ if (*current == 'x' || *current == 'X') {
+ ++current;
+ if (current == end) return JunkStringValue();
+ } else {
+ leading_zero = true;
+ }
+ }
+ }
+
+ if (radix < 2 || radix > 36) return JunkStringValue();
+
+ // Skip leading zeros.
+ while (*current == '0') {
+ leading_zero = true;
+ ++current;
+ if (current == end) return SignedZero(negative);
+ }
+
+ if (!leading_zero && !isDigit(*current, radix)) {
+ return JunkStringValue();
+ }
+
+ if (IsPowerOf2(radix)) {
+ switch (radix) {
+ case 2:
+ return InternalStringToIntDouble<1>(
+ unicode_cache, current, end, negative, allow_trailing_junk);
+ case 4:
+ return InternalStringToIntDouble<2>(
+ unicode_cache, current, end, negative, allow_trailing_junk);
+ case 8:
+ return InternalStringToIntDouble<3>(
+ unicode_cache, current, end, negative, allow_trailing_junk);
+
+ case 16:
+ return InternalStringToIntDouble<4>(
+ unicode_cache, current, end, negative, allow_trailing_junk);
+
+ case 32:
+ return InternalStringToIntDouble<5>(
+ unicode_cache, current, end, negative, allow_trailing_junk);
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ if (radix == 10) {
+ // Parsing with strtod.
+ const int kMaxSignificantDigits = 309; // Doubles are less than 1.8e308.
+ // The buffer may contain up to kMaxSignificantDigits + 1 digits and a zero
+ // end.
+ const int kBufferSize = kMaxSignificantDigits + 2;
+ char buffer[kBufferSize];
+ int buffer_pos = 0;
+ while (*current >= '0' && *current <= '9') {
+ if (buffer_pos <= kMaxSignificantDigits) {
+ // If the number has more than kMaxSignificantDigits it will be parsed
+ // as infinity.
+ ASSERT(buffer_pos < kBufferSize);
+ buffer[buffer_pos++] = static_cast<char>(*current);
+ }
+ ++current;
+ if (current == end) break;
+ }
+
+ if (!allow_trailing_junk &&
+ AdvanceToNonspace(unicode_cache, ¤t, end)) {
+ return JunkStringValue();
+ }
+
+ ASSERT(buffer_pos < kBufferSize);
+ buffer[buffer_pos] = '\0';
+ Vector<const char> buffer_vector(buffer, buffer_pos);
+ return negative ? -Strtod(buffer_vector, 0) : Strtod(buffer_vector, 0);
+ }
+
+ // The following code causes accumulating rounding error for numbers greater
+ // than ~2^56. It's explicitly allowed in the spec: "if R is not 2, 4, 8, 10,
+ // 16, or 32, then mathInt may be an implementation-dependent approximation to
+ // the mathematical integer value" (15.1.2.2).
+
+ int lim_0 = '0' + (radix < 10 ? radix : 10);
+ int lim_a = 'a' + (radix - 10);
+ int lim_A = 'A' + (radix - 10);
+
+ // NOTE: The code for computing the value may seem a bit complex at
+ // first glance. It is structured to use 32-bit multiply-and-add
+ // loops as long as possible to avoid loosing precision.
+
+ double v = 0.0;
+ bool done = false;
+ do {
+ // Parse the longest part of the string starting at index j
+ // possible while keeping the multiplier, and thus the part
+ // itself, within 32 bits.
+ unsigned int part = 0, multiplier = 1;
+ while (true) {
+ int d;
+ if (*current >= '0' && *current < lim_0) {
+ d = *current - '0';
+ } else if (*current >= 'a' && *current < lim_a) {
+ d = *current - 'a' + 10;
+ } else if (*current >= 'A' && *current < lim_A) {
+ d = *current - 'A' + 10;
+ } else {
+ done = true;
+ break;
+ }
+
+ // Update the value of the part as long as the multiplier fits
+ // in 32 bits. When we can't guarantee that the next iteration
+ // will not overflow the multiplier, we stop parsing the part
+ // by leaving the loop.
+ const unsigned int kMaximumMultiplier = 0xffffffffU / 36;
+ uint32_t m = multiplier * radix;
+ if (m > kMaximumMultiplier) break;
+ part = part * radix + d;
+ multiplier = m;
+ ASSERT(multiplier > part);
+
+ ++current;
+ if (current == end) {
+ done = true;
+ break;
+ }
+ }
+
+ // Update the value and skip the part in the string.
+ v = v * multiplier + part;
+ } while (!done);
+
+ if (!allow_trailing_junk &&
+ AdvanceToNonspace(unicode_cache, ¤t, end)) {
+ return JunkStringValue();
+ }
+
+ return negative ? -v : v;
+}
+
+
+// Converts a string to a double value. Assumes the Iterator supports
+// the following operations:
+// 1. current == end (other ops are not allowed), current != end.
+// 2. *current - gets the current character in the sequence.
+// 3. ++current (advances the position).
+template <class Iterator, class EndMark>
+static double InternalStringToDouble(UnicodeCache* unicode_cache,
+ Iterator current,
+ EndMark end,
+ int flags,
+ double empty_string_val) {
+ // To make sure that iterator dereferencing is valid the following
+ // convention is used:
+ // 1. Each '++current' statement is followed by check for equality to 'end'.
+ // 2. If AdvanceToNonspace returned false then current == end.
+ // 3. If 'current' becomes be equal to 'end' the function returns or goes to
+ // 'parsing_done'.
+ // 4. 'current' is not dereferenced after the 'parsing_done' label.
+ // 5. Code before 'parsing_done' may rely on 'current != end'.
+ if (!AdvanceToNonspace(unicode_cache, ¤t, end)) {
+ return empty_string_val;
+ }
+
+ const bool allow_trailing_junk = (flags & ALLOW_TRAILING_JUNK) != 0;
+
+ // The longest form of simplified number is: "-<significant digits>'.1eXXX\0".
+ const int kBufferSize = kMaxSignificantDigits + 10;
+ char buffer[kBufferSize]; // NOLINT: size is known at compile time.
+ int buffer_pos = 0;
+
+ // Exponent will be adjusted if insignificant digits of the integer part
+ // or insignificant leading zeros of the fractional part are dropped.
+ int exponent = 0;
+ int significant_digits = 0;
+ int insignificant_digits = 0;
+ bool nonzero_digit_dropped = false;
+
+ bool negative = false;
+
+ if (*current == '+') {
+ // Ignore leading sign.
+ ++current;
+ if (current == end) return JunkStringValue();
+ } else if (*current == '-') {
+ ++current;
+ if (current == end) return JunkStringValue();
+ negative = true;
+ }
+
+ static const char kInfinitySymbol[] = "Infinity";
+ if (*current == kInfinitySymbol[0]) {
+ if (!SubStringEquals(¤t, end, kInfinitySymbol)) {
+ return JunkStringValue();
+ }
+
+ if (!allow_trailing_junk &&
+ AdvanceToNonspace(unicode_cache, ¤t, end)) {
+ return JunkStringValue();
+ }
+
+ ASSERT(buffer_pos == 0);
+ return negative ? -V8_INFINITY : V8_INFINITY;
+ }
+
+ bool leading_zero = false;
+ if (*current == '0') {
+ ++current;
+ if (current == end) return SignedZero(negative);
+
+ leading_zero = true;
+
+ // It could be hexadecimal value.
+ if ((flags & ALLOW_HEX) && (*current == 'x' || *current == 'X')) {
+ ++current;
+ if (current == end || !isDigit(*current, 16)) {
+ return JunkStringValue(); // "0x".
+ }
+
+ return InternalStringToIntDouble<4>(unicode_cache,
+ current,
+ end,
+ negative,
+ allow_trailing_junk);
+ }
+
+ // Ignore leading zeros in the integer part.
+ while (*current == '0') {
+ ++current;
+ if (current == end) return SignedZero(negative);
+ }
+ }
+
+ bool octal = leading_zero && (flags & ALLOW_OCTALS) != 0;
+
+ // Copy significant digits of the integer part (if any) to the buffer.
+ while (*current >= '0' && *current <= '9') {
+ if (significant_digits < kMaxSignificantDigits) {
+ ASSERT(buffer_pos < kBufferSize);
+ buffer[buffer_pos++] = static_cast<char>(*current);
+ significant_digits++;
+ // Will later check if it's an octal in the buffer.
+ } else {
+ insignificant_digits++; // Move the digit into the exponential part.
+ nonzero_digit_dropped = nonzero_digit_dropped || *current != '0';
+ }
+ octal = octal && *current < '8';
+ ++current;
+ if (current == end) goto parsing_done;
+ }
+
+ if (significant_digits == 0) {
+ octal = false;
+ }
+
+ if (*current == '.') {
+ if (octal && !allow_trailing_junk) return JunkStringValue();
+ if (octal) goto parsing_done;
+
+ ++current;
+ if (current == end) {
+ if (significant_digits == 0 && !leading_zero) {
+ return JunkStringValue();
+ } else {
+ goto parsing_done;
+ }
+ }
+
+ if (significant_digits == 0) {
+ // octal = false;
+ // Integer part consists of 0 or is absent. Significant digits start after
+ // leading zeros (if any).
+ while (*current == '0') {
+ ++current;
+ if (current == end) return SignedZero(negative);
+ exponent--; // Move this 0 into the exponent.
+ }
+ }
+
+ // There is a fractional part. We don't emit a '.', but adjust the exponent
+ // instead.
+ while (*current >= '0' && *current <= '9') {
+ if (significant_digits < kMaxSignificantDigits) {
+ ASSERT(buffer_pos < kBufferSize);
+ buffer[buffer_pos++] = static_cast<char>(*current);
+ significant_digits++;
+ exponent--;
+ } else {
+ // Ignore insignificant digits in the fractional part.
+ nonzero_digit_dropped = nonzero_digit_dropped || *current != '0';
+ }
+ ++current;
+ if (current == end) goto parsing_done;
+ }
+ }
+
+ if (!leading_zero && exponent == 0 && significant_digits == 0) {
+ // If leading_zeros is true then the string contains zeros.
+ // If exponent < 0 then string was [+-]\.0*...
+ // If significant_digits != 0 the string is not equal to 0.
+ // Otherwise there are no digits in the string.
+ return JunkStringValue();
+ }
+
+ // Parse exponential part.
+ if (*current == 'e' || *current == 'E') {
+ if (octal) return JunkStringValue();
+ ++current;
+ if (current == end) {
+ if (allow_trailing_junk) {
+ goto parsing_done;
+ } else {
+ return JunkStringValue();
+ }
+ }
+ char sign = '+';
+ if (*current == '+' || *current == '-') {
+ sign = static_cast<char>(*current);
+ ++current;
+ if (current == end) {
+ if (allow_trailing_junk) {
+ goto parsing_done;
+ } else {
+ return JunkStringValue();
+ }
+ }
+ }
+
+ if (current == end || *current < '0' || *current > '9') {
+ if (allow_trailing_junk) {
+ goto parsing_done;
+ } else {
+ return JunkStringValue();
+ }
+ }
+
+ const int max_exponent = INT_MAX / 2;
+ ASSERT(-max_exponent / 2 <= exponent && exponent <= max_exponent / 2);
+ int num = 0;
+ do {
+ // Check overflow.
+ int digit = *current - '0';
+ if (num >= max_exponent / 10
+ && !(num == max_exponent / 10 && digit <= max_exponent % 10)) {
+ num = max_exponent;
+ } else {
+ num = num * 10 + digit;
+ }
+ ++current;
+ } while (current != end && *current >= '0' && *current <= '9');
+
+ exponent += (sign == '-' ? -num : num);
+ }
+
+ if (!allow_trailing_junk &&
+ AdvanceToNonspace(unicode_cache, ¤t, end)) {
+ return JunkStringValue();
+ }
+
+ parsing_done:
+ exponent += insignificant_digits;
+
+ if (octal) {
+ return InternalStringToIntDouble<3>(unicode_cache,
+ buffer,
+ buffer + buffer_pos,
+ negative,
+ allow_trailing_junk);
+ }
+
+ if (nonzero_digit_dropped) {
+ buffer[buffer_pos++] = '1';
+ exponent--;
+ }
+
+ ASSERT(buffer_pos < kBufferSize);
+ buffer[buffer_pos] = '\0';
+
+ double converted = Strtod(Vector<const char>(buffer, buffer_pos), exponent);
+ return negative ? -converted : converted;
+}
+
} } // namespace v8::internal
#endif // V8_CONVERSIONS_INL_H_
diff --git a/src/conversions.cc b/src/conversions.cc
index 353b681..c34fe51 100644
--- a/src/conversions.cc
+++ b/src/conversions.cc
@@ -28,693 +28,15 @@
#include <stdarg.h>
#include <limits.h>
-#include "v8.h"
-
#include "conversions-inl.h"
#include "dtoa.h"
-#include "factory.h"
#include "scanner-base.h"
#include "strtod.h"
+#include "utils.h"
namespace v8 {
namespace internal {
-namespace {
-
-// C++-style iterator adaptor for StringInputBuffer
-// (unlike C++ iterators the end-marker has different type).
-class StringInputBufferIterator {
- public:
- class EndMarker {};
-
- explicit StringInputBufferIterator(StringInputBuffer* buffer);
-
- int operator*() const;
- void operator++();
- bool operator==(EndMarker const&) const { return end_; }
- bool operator!=(EndMarker const& m) const { return !end_; }
-
- private:
- StringInputBuffer* const buffer_;
- int current_;
- bool end_;
-};
-
-
-StringInputBufferIterator::StringInputBufferIterator(
- StringInputBuffer* buffer) : buffer_(buffer) {
- ++(*this);
-}
-
-int StringInputBufferIterator::operator*() const {
- return current_;
-}
-
-
-void StringInputBufferIterator::operator++() {
- end_ = !buffer_->has_more();
- if (!end_) {
- current_ = buffer_->GetNext();
- }
-}
-}
-
-
-template <class Iterator, class EndMark>
-static bool SubStringEquals(Iterator* current,
- EndMark end,
- const char* substring) {
- ASSERT(**current == *substring);
- for (substring++; *substring != '\0'; substring++) {
- ++*current;
- if (*current == end || **current != *substring) return false;
- }
- ++*current;
- return true;
-}
-
-
-// Maximum number of significant digits in decimal representation.
-// The longest possible double in decimal representation is
-// (2^53 - 1) * 2 ^ -1074 that is (2 ^ 53 - 1) * 5 ^ 1074 / 10 ^ 1074
-// (768 digits). If we parse a number whose first digits are equal to a
-// mean of 2 adjacent doubles (that could have up to 769 digits) the result
-// must be rounded to the bigger one unless the tail consists of zeros, so
-// we don't need to preserve all the digits.
-const int kMaxSignificantDigits = 772;
-
-
-static const double JUNK_STRING_VALUE = OS::nan_value();
-
-
-// Returns true if a nonspace found and false if the end has reached.
-template <class Iterator, class EndMark>
-static inline bool AdvanceToNonspace(UnicodeCache* unicode_cache,
- Iterator* current,
- EndMark end) {
- while (*current != end) {
- if (!unicode_cache->IsWhiteSpace(**current)) return true;
- ++*current;
- }
- return false;
-}
-
-
-static bool isDigit(int x, int radix) {
- return (x >= '0' && x <= '9' && x < '0' + radix)
- || (radix > 10 && x >= 'a' && x < 'a' + radix - 10)
- || (radix > 10 && x >= 'A' && x < 'A' + radix - 10);
-}
-
-
-static double SignedZero(bool negative) {
- return negative ? -0.0 : 0.0;
-}
-
-
-// Parsing integers with radix 2, 4, 8, 16, 32. Assumes current != end.
-template <int radix_log_2, class Iterator, class EndMark>
-static double InternalStringToIntDouble(UnicodeCache* unicode_cache,
- Iterator current,
- EndMark end,
- bool negative,
- bool allow_trailing_junk) {
- ASSERT(current != end);
-
- // Skip leading 0s.
- while (*current == '0') {
- ++current;
- if (current == end) return SignedZero(negative);
- }
-
- int64_t number = 0;
- int exponent = 0;
- const int radix = (1 << radix_log_2);
-
- do {
- int digit;
- if (*current >= '0' && *current <= '9' && *current < '0' + radix) {
- digit = static_cast<char>(*current) - '0';
- } else if (radix > 10 && *current >= 'a' && *current < 'a' + radix - 10) {
- digit = static_cast<char>(*current) - 'a' + 10;
- } else if (radix > 10 && *current >= 'A' && *current < 'A' + radix - 10) {
- digit = static_cast<char>(*current) - 'A' + 10;
- } else {
- if (allow_trailing_junk ||
- !AdvanceToNonspace(unicode_cache, ¤t, end)) {
- break;
- } else {
- return JUNK_STRING_VALUE;
- }
- }
-
- number = number * radix + digit;
- int overflow = static_cast<int>(number >> 53);
- if (overflow != 0) {
- // Overflow occurred. Need to determine which direction to round the
- // result.
- int overflow_bits_count = 1;
- while (overflow > 1) {
- overflow_bits_count++;
- overflow >>= 1;
- }
-
- int dropped_bits_mask = ((1 << overflow_bits_count) - 1);
- int dropped_bits = static_cast<int>(number) & dropped_bits_mask;
- number >>= overflow_bits_count;
- exponent = overflow_bits_count;
-
- bool zero_tail = true;
- while (true) {
- ++current;
- if (current == end || !isDigit(*current, radix)) break;
- zero_tail = zero_tail && *current == '0';
- exponent += radix_log_2;
- }
-
- if (!allow_trailing_junk &&
- AdvanceToNonspace(unicode_cache, ¤t, end)) {
- return JUNK_STRING_VALUE;
- }
-
- int middle_value = (1 << (overflow_bits_count - 1));
- if (dropped_bits > middle_value) {
- number++; // Rounding up.
- } else if (dropped_bits == middle_value) {
- // Rounding to even to consistency with decimals: half-way case rounds
- // up if significant part is odd and down otherwise.
- if ((number & 1) != 0 || !zero_tail) {
- number++; // Rounding up.
- }
- }
-
- // Rounding up may cause overflow.
- if ((number & ((int64_t)1 << 53)) != 0) {
- exponent++;
- number >>= 1;
- }
- break;
- }
- ++current;
- } while (current != end);
-
- ASSERT(number < ((int64_t)1 << 53));
- ASSERT(static_cast<int64_t>(static_cast<double>(number)) == number);
-
- if (exponent == 0) {
- if (negative) {
- if (number == 0) return -0.0;
- number = -number;
- }
- return static_cast<double>(number);
- }
-
- ASSERT(number != 0);
- // The double could be constructed faster from number (mantissa), exponent
- // and sign. Assuming it's a rare case more simple code is used.
- return static_cast<double>(negative ? -number : number) * pow(2.0, exponent);
-}
-
-
-template <class Iterator, class EndMark>
-static double InternalStringToInt(UnicodeCache* unicode_cache,
- Iterator current,
- EndMark end,
- int radix) {
- const bool allow_trailing_junk = true;
- const double empty_string_val = JUNK_STRING_VALUE;
-
- if (!AdvanceToNonspace(unicode_cache, ¤t, end)) {
- return empty_string_val;
- }
-
- bool negative = false;
- bool leading_zero = false;
-
- if (*current == '+') {
- // Ignore leading sign; skip following spaces.
- ++current;
- if (current == end) {
- return JUNK_STRING_VALUE;
- }
- } else if (*current == '-') {
- ++current;
- if (current == end) {
- return JUNK_STRING_VALUE;
- }
- negative = true;
- }
-
- if (radix == 0) {
- // Radix detection.
- if (*current == '0') {
- ++current;
- if (current == end) return SignedZero(negative);
- if (*current == 'x' || *current == 'X') {
- radix = 16;
- ++current;
- if (current == end) return JUNK_STRING_VALUE;
- } else {
- radix = 8;
- leading_zero = true;
- }
- } else {
- radix = 10;
- }
- } else if (radix == 16) {
- if (*current == '0') {
- // Allow "0x" prefix.
- ++current;
- if (current == end) return SignedZero(negative);
- if (*current == 'x' || *current == 'X') {
- ++current;
- if (current == end) return JUNK_STRING_VALUE;
- } else {
- leading_zero = true;
- }
- }
- }
-
- if (radix < 2 || radix > 36) return JUNK_STRING_VALUE;
-
- // Skip leading zeros.
- while (*current == '0') {
- leading_zero = true;
- ++current;
- if (current == end) return SignedZero(negative);
- }
-
- if (!leading_zero && !isDigit(*current, radix)) {
- return JUNK_STRING_VALUE;
- }
-
- if (IsPowerOf2(radix)) {
- switch (radix) {
- case 2:
- return InternalStringToIntDouble<1>(
- unicode_cache, current, end, negative, allow_trailing_junk);
- case 4:
- return InternalStringToIntDouble<2>(
- unicode_cache, current, end, negative, allow_trailing_junk);
- case 8:
- return InternalStringToIntDouble<3>(
- unicode_cache, current, end, negative, allow_trailing_junk);
-
- case 16:
- return InternalStringToIntDouble<4>(
- unicode_cache, current, end, negative, allow_trailing_junk);
-
- case 32:
- return InternalStringToIntDouble<5>(
- unicode_cache, current, end, negative, allow_trailing_junk);
- default:
- UNREACHABLE();
- }
- }
-
- if (radix == 10) {
- // Parsing with strtod.
- const int kMaxSignificantDigits = 309; // Doubles are less than 1.8e308.
- // The buffer may contain up to kMaxSignificantDigits + 1 digits and a zero
- // end.
- const int kBufferSize = kMaxSignificantDigits + 2;
- char buffer[kBufferSize];
- int buffer_pos = 0;
- while (*current >= '0' && *current <= '9') {
- if (buffer_pos <= kMaxSignificantDigits) {
- // If the number has more than kMaxSignificantDigits it will be parsed
- // as infinity.
- ASSERT(buffer_pos < kBufferSize);
- buffer[buffer_pos++] = static_cast<char>(*current);
- }
- ++current;
- if (current == end) break;
- }
-
- if (!allow_trailing_junk &&
- AdvanceToNonspace(unicode_cache, ¤t, end)) {
- return JUNK_STRING_VALUE;
- }
-
- ASSERT(buffer_pos < kBufferSize);
- buffer[buffer_pos] = '\0';
- Vector<const char> buffer_vector(buffer, buffer_pos);
- return negative ? -Strtod(buffer_vector, 0) : Strtod(buffer_vector, 0);
- }
-
- // The following code causes accumulating rounding error for numbers greater
- // than ~2^56. It's explicitly allowed in the spec: "if R is not 2, 4, 8, 10,
- // 16, or 32, then mathInt may be an implementation-dependent approximation to
- // the mathematical integer value" (15.1.2.2).
-
- int lim_0 = '0' + (radix < 10 ? radix : 10);
- int lim_a = 'a' + (radix - 10);
- int lim_A = 'A' + (radix - 10);
-
- // NOTE: The code for computing the value may seem a bit complex at
- // first glance. It is structured to use 32-bit multiply-and-add
- // loops as long as possible to avoid loosing precision.
-
- double v = 0.0;
- bool done = false;
- do {
- // Parse the longest part of the string starting at index j
- // possible while keeping the multiplier, and thus the part
- // itself, within 32 bits.
- unsigned int part = 0, multiplier = 1;
- while (true) {
- int d;
- if (*current >= '0' && *current < lim_0) {
- d = *current - '0';
- } else if (*current >= 'a' && *current < lim_a) {
- d = *current - 'a' + 10;
- } else if (*current >= 'A' && *current < lim_A) {
- d = *current - 'A' + 10;
- } else {
- done = true;
- break;
- }
-
- // Update the value of the part as long as the multiplier fits
- // in 32 bits. When we can't guarantee that the next iteration
- // will not overflow the multiplier, we stop parsing the part
- // by leaving the loop.
- const unsigned int kMaximumMultiplier = 0xffffffffU / 36;
- uint32_t m = multiplier * radix;
- if (m > kMaximumMultiplier) break;
- part = part * radix + d;
- multiplier = m;
- ASSERT(multiplier > part);
-
- ++current;
- if (current == end) {
- done = true;
- break;
- }
- }
-
- // Update the value and skip the part in the string.
- v = v * multiplier + part;
- } while (!done);
-
- if (!allow_trailing_junk &&
- AdvanceToNonspace(unicode_cache, ¤t, end)) {
- return JUNK_STRING_VALUE;
- }
-
- return negative ? -v : v;
-}
-
-
-// Converts a string to a double value. Assumes the Iterator supports
-// the following operations:
-// 1. current == end (other ops are not allowed), current != end.
-// 2. *current - gets the current character in the sequence.
-// 3. ++current (advances the position).
-template <class Iterator, class EndMark>
-static double InternalStringToDouble(UnicodeCache* unicode_cache,
- Iterator current,
- EndMark end,
- int flags,
- double empty_string_val) {
- // To make sure that iterator dereferencing is valid the following
- // convention is used:
- // 1. Each '++current' statement is followed by check for equality to 'end'.
- // 2. If AdvanceToNonspace returned false then current == end.
- // 3. If 'current' becomes be equal to 'end' the function returns or goes to
- // 'parsing_done'.
- // 4. 'current' is not dereferenced after the 'parsing_done' label.
- // 5. Code before 'parsing_done' may rely on 'current != end'.
- if (!AdvanceToNonspace(unicode_cache, ¤t, end)) {
- return empty_string_val;
- }
-
- const bool allow_trailing_junk = (flags & ALLOW_TRAILING_JUNK) != 0;
-
- // The longest form of simplified number is: "-<significant digits>'.1eXXX\0".
- const int kBufferSize = kMaxSignificantDigits + 10;
- char buffer[kBufferSize]; // NOLINT: size is known at compile time.
- int buffer_pos = 0;
-
- // Exponent will be adjusted if insignificant digits of the integer part
- // or insignificant leading zeros of the fractional part are dropped.
- int exponent = 0;
- int significant_digits = 0;
- int insignificant_digits = 0;
- bool nonzero_digit_dropped = false;
- bool fractional_part = false;
-
- bool negative = false;
-
- if (*current == '+') {
- // Ignore leading sign.
- ++current;
- if (current == end) return JUNK_STRING_VALUE;
- } else if (*current == '-') {
- ++current;
- if (current == end) return JUNK_STRING_VALUE;
- negative = true;
- }
-
- static const char kInfinitySymbol[] = "Infinity";
- if (*current == kInfinitySymbol[0]) {
- if (!SubStringEquals(¤t, end, kInfinitySymbol)) {
- return JUNK_STRING_VALUE;
- }
-
- if (!allow_trailing_junk &&
- AdvanceToNonspace(unicode_cache, ¤t, end)) {
- return JUNK_STRING_VALUE;
- }
-
- ASSERT(buffer_pos == 0);
- return negative ? -V8_INFINITY : V8_INFINITY;
- }
-
- bool leading_zero = false;
- if (*current == '0') {
- ++current;
- if (current == end) return SignedZero(negative);
-
- leading_zero = true;
-
- // It could be hexadecimal value.
- if ((flags & ALLOW_HEX) && (*current == 'x' || *current == 'X')) {
- ++current;
- if (current == end || !isDigit(*current, 16)) {
- return JUNK_STRING_VALUE; // "0x".
- }
-
- return InternalStringToIntDouble<4>(unicode_cache,
- current,
- end,
- negative,
- allow_trailing_junk);
- }
-
- // Ignore leading zeros in the integer part.
- while (*current == '0') {
- ++current;
- if (current == end) return SignedZero(negative);
- }
- }
-
- bool octal = leading_zero && (flags & ALLOW_OCTALS) != 0;
-
- // Copy significant digits of the integer part (if any) to the buffer.
- while (*current >= '0' && *current <= '9') {
- if (significant_digits < kMaxSignificantDigits) {
- ASSERT(buffer_pos < kBufferSize);
- buffer[buffer_pos++] = static_cast<char>(*current);
- significant_digits++;
- // Will later check if it's an octal in the buffer.
- } else {
- insignificant_digits++; // Move the digit into the exponential part.
- nonzero_digit_dropped = nonzero_digit_dropped || *current != '0';
- }
- octal = octal && *current < '8';
- ++current;
- if (current == end) goto parsing_done;
- }
-
- if (significant_digits == 0) {
- octal = false;
- }
-
- if (*current == '.') {
- if (octal && !allow_trailing_junk) return JUNK_STRING_VALUE;
- if (octal) goto parsing_done;
-
- ++current;
- if (current == end) {
- if (significant_digits == 0 && !leading_zero) {
- return JUNK_STRING_VALUE;
- } else {
- goto parsing_done;
- }
- }
-
- if (significant_digits == 0) {
- // octal = false;
- // Integer part consists of 0 or is absent. Significant digits start after
- // leading zeros (if any).
- while (*current == '0') {
- ++current;
- if (current == end) return SignedZero(negative);
- exponent--; // Move this 0 into the exponent.
- }
- }
-
- // We don't emit a '.', but adjust the exponent instead.
- fractional_part = true;
-
- // There is a fractional part.
- while (*current >= '0' && *current <= '9') {
- if (significant_digits < kMaxSignificantDigits) {
- ASSERT(buffer_pos < kBufferSize);
- buffer[buffer_pos++] = static_cast<char>(*current);
- significant_digits++;
- exponent--;
- } else {
- // Ignore insignificant digits in the fractional part.
- nonzero_digit_dropped = nonzero_digit_dropped || *current != '0';
- }
- ++current;
- if (current == end) goto parsing_done;
- }
- }
-
- if (!leading_zero && exponent == 0 && significant_digits == 0) {
- // If leading_zeros is true then the string contains zeros.
- // If exponent < 0 then string was [+-]\.0*...
- // If significant_digits != 0 the string is not equal to 0.
- // Otherwise there are no digits in the string.
- return JUNK_STRING_VALUE;
- }
-
- // Parse exponential part.
- if (*current == 'e' || *current == 'E') {
- if (octal) return JUNK_STRING_VALUE;
- ++current;
- if (current == end) {
- if (allow_trailing_junk) {
- goto parsing_done;
- } else {
- return JUNK_STRING_VALUE;
- }
- }
- char sign = '+';
- if (*current == '+' || *current == '-') {
- sign = static_cast<char>(*current);
- ++current;
- if (current == end) {
- if (allow_trailing_junk) {
- goto parsing_done;
- } else {
- return JUNK_STRING_VALUE;
- }
- }
- }
-
- if (current == end || *current < '0' || *current > '9') {
- if (allow_trailing_junk) {
- goto parsing_done;
- } else {
- return JUNK_STRING_VALUE;
- }
- }
-
- const int max_exponent = INT_MAX / 2;
- ASSERT(-max_exponent / 2 <= exponent && exponent <= max_exponent / 2);
- int num = 0;
- do {
- // Check overflow.
- int digit = *current - '0';
- if (num >= max_exponent / 10
- && !(num == max_exponent / 10 && digit <= max_exponent % 10)) {
- num = max_exponent;
- } else {
- num = num * 10 + digit;
- }
- ++current;
- } while (current != end && *current >= '0' && *current <= '9');
-
- exponent += (sign == '-' ? -num : num);
- }
-
- if (!allow_trailing_junk &&
- AdvanceToNonspace(unicode_cache, ¤t, end)) {
- return JUNK_STRING_VALUE;
- }
-
- parsing_done:
- exponent += insignificant_digits;
-
- if (octal) {
- return InternalStringToIntDouble<3>(unicode_cache,
- buffer,
- buffer + buffer_pos,
- negative,
- allow_trailing_junk);
- }
-
- if (nonzero_digit_dropped) {
- buffer[buffer_pos++] = '1';
- exponent--;
- }
-
- ASSERT(buffer_pos < kBufferSize);
- buffer[buffer_pos] = '\0';
-
- double converted = Strtod(Vector<const char>(buffer, buffer_pos), exponent);
- return negative ? -converted : converted;
-}
-
-
-double StringToDouble(UnicodeCache* unicode_cache,
- String* str, int flags, double empty_string_val) {
- StringShape shape(str);
- if (shape.IsSequentialAscii()) {
- const char* begin = SeqAsciiString::cast(str)->GetChars();
- const char* end = begin + str->length();
- return InternalStringToDouble(unicode_cache, begin, end, flags,
- empty_string_val);
- } else if (shape.IsSequentialTwoByte()) {
- const uc16* begin = SeqTwoByteString::cast(str)->GetChars();
- const uc16* end = begin + str->length();
- return InternalStringToDouble(unicode_cache, begin, end, flags,
- empty_string_val);
- } else {
- StringInputBuffer buffer(str);
- return InternalStringToDouble(unicode_cache,
- StringInputBufferIterator(&buffer),
- StringInputBufferIterator::EndMarker(),
- flags,
- empty_string_val);
- }
-}
-
-
-double StringToInt(UnicodeCache* unicode_cache,
- String* str,
- int radix) {
- StringShape shape(str);
- if (shape.IsSequentialAscii()) {
- const char* begin = SeqAsciiString::cast(str)->GetChars();
- const char* end = begin + str->length();
- return InternalStringToInt(unicode_cache, begin, end, radix);
- } else if (shape.IsSequentialTwoByte()) {
- const uc16* begin = SeqTwoByteString::cast(str)->GetChars();
- const uc16* end = begin + str->length();
- return InternalStringToInt(unicode_cache, begin, end, radix);
- } else {
- StringInputBuffer buffer(str);
- return InternalStringToInt(unicode_cache,
- StringInputBufferIterator(&buffer),
- StringInputBufferIterator::EndMarker(),
- radix);
- }
-}
double StringToDouble(UnicodeCache* unicode_cache,
@@ -750,7 +72,7 @@
case FP_INFINITE: return (v < 0.0 ? "-Infinity" : "Infinity");
case FP_ZERO: return "0";
default: {
- StringBuilder builder(buffer.start(), buffer.length());
+ SimpleStringBuilder builder(buffer.start(), buffer.length());
int decimal_point;
int sign;
const int kV8DtoaBufferCapacity = kBase10MaximalLength + 1;
@@ -791,7 +113,7 @@
builder.AddCharacter((decimal_point >= 0) ? '+' : '-');
int exponent = decimal_point - 1;
if (exponent < 0) exponent = -exponent;
- builder.AddFormatted("%d", exponent);
+ builder.AddDecimalInteger(exponent);
}
return builder.Finalize();
}
@@ -869,7 +191,7 @@
unsigned rep_length =
zero_prefix_length + decimal_rep_length + zero_postfix_length;
- StringBuilder rep_builder(rep_length + 1);
+ SimpleStringBuilder rep_builder(rep_length + 1);
rep_builder.AddPadding('0', zero_prefix_length);
rep_builder.AddString(decimal_rep);
rep_builder.AddPadding('0', zero_postfix_length);
@@ -878,7 +200,7 @@
// Create the result string by appending a minus and putting in a
// decimal point if needed.
unsigned result_size = decimal_point + f + 2;
- StringBuilder builder(result_size + 1);
+ SimpleStringBuilder builder(result_size + 1);
if (negative) builder.AddCharacter('-');
builder.AddSubstring(rep, decimal_point);
if (f > 0) {
@@ -904,7 +226,7 @@
// letter 'e', a minus or a plus depending on the exponent, and a
// three digit exponent.
unsigned result_size = significant_digits + 7;
- StringBuilder builder(result_size + 1);
+ SimpleStringBuilder builder(result_size + 1);
if (negative) builder.AddCharacter('-');
builder.AddCharacter(decimal_rep[0]);
@@ -917,7 +239,7 @@
builder.AddCharacter('e');
builder.AddCharacter(negative_exponent ? '-' : '+');
- builder.AddFormatted("%d", exponent);
+ builder.AddDecimalInteger(exponent);
return builder.Finalize();
}
@@ -1009,7 +331,7 @@
unsigned result_size = (decimal_point <= 0)
? -decimal_point + p + 3
: p + 2;
- StringBuilder builder(result_size + 1);
+ SimpleStringBuilder builder(result_size + 1);
if (negative) builder.AddCharacter('-');
if (decimal_point <= 0) {
builder.AddString("0.");
@@ -1101,31 +423,11 @@
// If the number has a decimal part, leave room for the period.
if (decimal_pos > 0) result_size++;
// Allocate result and fill in the parts.
- StringBuilder builder(result_size + 1);
+ SimpleStringBuilder builder(result_size + 1);
builder.AddSubstring(integer_buffer + integer_pos + 1, integer_part_size);
if (decimal_pos > 0) builder.AddCharacter('.');
builder.AddSubstring(decimal_buffer, decimal_pos);
return builder.Finalize();
}
-
-static Mutex* dtoa_lock_one = OS::CreateMutex();
-static Mutex* dtoa_lock_zero = OS::CreateMutex();
-
-
} } // namespace v8::internal
-
-
-extern "C" {
-void ACQUIRE_DTOA_LOCK(int n) {
- ASSERT(n == 0 || n == 1);
- (n == 0 ? v8::internal::dtoa_lock_zero : v8::internal::dtoa_lock_one)->Lock();
-}
-
-
-void FREE_DTOA_LOCK(int n) {
- ASSERT(n == 0 || n == 1);
- (n == 0 ? v8::internal::dtoa_lock_zero : v8::internal::dtoa_lock_one)->
- Unlock();
-}
-}
diff --git a/src/conversions.h b/src/conversions.h
index 4cbeeca..9c0b8f3 100644
--- a/src/conversions.h
+++ b/src/conversions.h
@@ -29,10 +29,32 @@
#define V8_CONVERSIONS_H_
#include "scanner-base.h"
+#include "utils.h"
namespace v8 {
namespace internal {
+// Maximum number of significant digits in decimal representation.
+// The longest possible double in decimal representation is
+// (2^53 - 1) * 2 ^ -1074 that is (2 ^ 53 - 1) * 5 ^ 1074 / 10 ^ 1074
+// (768 digits). If we parse a number whose first digits are equal to a
+// mean of 2 adjacent doubles (that could have up to 769 digits) the result
+// must be rounded to the bigger one unless the tail consists of zeros, so
+// we don't need to preserve all the digits.
+const int kMaxSignificantDigits = 772;
+
+
+static bool isDigit(int x, int radix) {
+ return (x >= '0' && x <= '9' && x < '0' + radix)
+ || (radix > 10 && x >= 'a' && x < 'a' + radix - 10)
+ || (radix > 10 && x >= 'A' && x < 'A' + radix - 10);
+}
+
+
+static double SignedZero(bool negative) {
+ return negative ? -0.0 : 0.0;
+}
+
// The fast double-to-(unsigned-)int conversion routine does not guarantee
// rounding towards zero.
@@ -87,17 +109,8 @@
};
-// Convert from Number object to C integer.
-static inline int32_t NumberToInt32(Object* number);
-static inline uint32_t NumberToUint32(Object* number);
-
-
// Converts a string into a double value according to ECMA-262 9.3.1
double StringToDouble(UnicodeCache* unicode_cache,
- String* str,
- int flags,
- double empty_string_val = 0);
-double StringToDouble(UnicodeCache* unicode_cache,
Vector<const char> str,
int flags,
double empty_string_val = 0);
@@ -111,9 +124,6 @@
int flags,
double empty_string_val = 0);
-// Converts a string into an integer.
-double StringToInt(UnicodeCache* unicode_cache, String* str, int radix);
-
// Converts a double to a string value according to ECMA-262 9.8.1.
// The buffer should be large enough for any floating point number.
// 100 characters is enough.
diff --git a/src/cpu-profiler-inl.h b/src/cpu-profiler-inl.h
index b704417..938b632 100644
--- a/src/cpu-profiler-inl.h
+++ b/src/cpu-profiler-inl.h
@@ -30,8 +30,7 @@
#include "cpu-profiler.h"
-#ifdef ENABLE_LOGGING_AND_PROFILING
-
+#include <new>
#include "circular-queue-inl.h"
#include "profile-generator-inl.h"
#include "unbound-queue-inl.h"
@@ -62,24 +61,10 @@
}
-TickSampleEventRecord* TickSampleEventRecord::init(void* value) {
- TickSampleEventRecord* result =
- reinterpret_cast<TickSampleEventRecord*>(value);
- result->filler = 1;
- ASSERT(result->filler != SamplingCircularQueue::kClear);
- // Init the required fields only.
- result->sample.pc = NULL;
- result->sample.frames_count = 0;
- result->sample.has_external_callback = false;
- return result;
-}
-
-
TickSample* ProfilerEventsProcessor::TickSampleEvent() {
generator_->Tick();
TickSampleEventRecord* evt =
- TickSampleEventRecord::init(ticks_buffer_.Enqueue());
- evt->order = enqueue_order_; // No increment!
+ new(ticks_buffer_.Enqueue()) TickSampleEventRecord(enqueue_order_);
return &evt->sample;
}
@@ -96,6 +81,4 @@
} } // namespace v8::internal
-#endif // ENABLE_LOGGING_AND_PROFILING
-
#endif // V8_CPU_PROFILER_INL_H_
diff --git a/src/cpu-profiler.cc b/src/cpu-profiler.cc
index f54e3e8..bb480fc 100644
--- a/src/cpu-profiler.cc
+++ b/src/cpu-profiler.cc
@@ -29,8 +29,6 @@
#include "cpu-profiler-inl.h"
-#ifdef ENABLE_LOGGING_AND_PROFILING
-
#include "frames-inl.h"
#include "hashmap.h"
#include "log-inl.h"
@@ -46,9 +44,8 @@
static const int kTickSamplesBufferChunksCount = 16;
-ProfilerEventsProcessor::ProfilerEventsProcessor(Isolate* isolate,
- ProfileGenerator* generator)
- : Thread(isolate, "v8:ProfEvntProc"),
+ProfilerEventsProcessor::ProfilerEventsProcessor(ProfileGenerator* generator)
+ : Thread("v8:ProfEvntProc"),
generator_(generator),
running_(true),
ticks_buffer_(sizeof(TickSampleEventRecord),
@@ -182,20 +179,16 @@
void ProfilerEventsProcessor::AddCurrentStack() {
- TickSampleEventRecord record;
+ TickSampleEventRecord record(enqueue_order_);
TickSample* sample = &record.sample;
Isolate* isolate = Isolate::Current();
sample->state = isolate->current_vm_state();
sample->pc = reinterpret_cast<Address>(sample); // Not NULL.
- sample->tos = NULL;
- sample->has_external_callback = false;
- sample->frames_count = 0;
for (StackTraceFrameIterator it(isolate);
!it.done() && sample->frames_count < TickSample::kMaxFramesCount;
it.Advance()) {
sample->stack[sample->frames_count++] = it.frame()->pc();
}
- record.order = enqueue_order_;
ticks_from_vm_buffer_.Enqueue(record);
}
@@ -507,7 +500,7 @@
saved_logging_nesting_ = isolate->logger()->logging_nesting_;
isolate->logger()->logging_nesting_ = 0;
generator_ = new ProfileGenerator(profiles_);
- processor_ = new ProfilerEventsProcessor(isolate, generator_);
+ processor_ = new ProfilerEventsProcessor(generator_);
NoBarrier_Store(&is_profiling_, true);
processor_->Start();
// Enumerate stuff we already have in the heap.
@@ -579,31 +572,21 @@
logger->logging_nesting_ = saved_logging_nesting_;
}
-} } // namespace v8::internal
-
-#endif // ENABLE_LOGGING_AND_PROFILING
-
-namespace v8 {
-namespace internal {
void CpuProfiler::Setup() {
-#ifdef ENABLE_LOGGING_AND_PROFILING
Isolate* isolate = Isolate::Current();
if (isolate->cpu_profiler() == NULL) {
isolate->set_cpu_profiler(new CpuProfiler());
}
-#endif
}
void CpuProfiler::TearDown() {
-#ifdef ENABLE_LOGGING_AND_PROFILING
Isolate* isolate = Isolate::Current();
if (isolate->cpu_profiler() != NULL) {
delete isolate->cpu_profiler();
}
isolate->set_cpu_profiler(NULL);
-#endif
}
} } // namespace v8::internal
diff --git a/src/cpu-profiler.h b/src/cpu-profiler.h
index f9f6167..4175e8f 100644
--- a/src/cpu-profiler.h
+++ b/src/cpu-profiler.h
@@ -28,8 +28,6 @@
#ifndef V8_CPU_PROFILER_H_
#define V8_CPU_PROFILER_H_
-#ifdef ENABLE_LOGGING_AND_PROFILING
-
#include "allocation.h"
#include "atomicops.h"
#include "circular-queue.h"
@@ -106,10 +104,14 @@
};
-class TickSampleEventRecord BASE_EMBEDDED {
+class TickSampleEventRecord {
public:
- TickSampleEventRecord()
- : filler(1) {
+ // The parameterless constructor is used when we dequeue data from
+ // the ticks buffer.
+ TickSampleEventRecord() { }
+ explicit TickSampleEventRecord(unsigned order)
+ : filler(1),
+ order(order) {
ASSERT(filler != SamplingCircularQueue::kClear);
}
@@ -125,8 +127,6 @@
static TickSampleEventRecord* cast(void* value) {
return reinterpret_cast<TickSampleEventRecord*>(value);
}
-
- INLINE(static TickSampleEventRecord* init(void* value));
};
@@ -134,8 +134,7 @@
// methods called by event producers: VM and stack sampler threads.
class ProfilerEventsProcessor : public Thread {
public:
- ProfilerEventsProcessor(Isolate* isolate,
- ProfileGenerator* generator);
+ explicit ProfilerEventsProcessor(ProfileGenerator* generator);
virtual ~ProfilerEventsProcessor() {}
// Thread control.
@@ -205,9 +204,6 @@
v8::internal::CpuProfiler::Call; \
} \
} while (false)
-#else
-#define PROFILE(isolate, Call) LOG(isolate, Call)
-#endif // ENABLE_LOGGING_AND_PROFILING
namespace v8 {
@@ -220,7 +216,6 @@
static void Setup();
static void TearDown();
-#ifdef ENABLE_LOGGING_AND_PROFILING
static void StartProfiling(const char* title);
static void StartProfiling(String* title);
static CpuProfile* StopProfiling(const char* title);
@@ -288,10 +283,6 @@
bool need_to_stop_sampler_;
Atomic32 is_profiling_;
-#else
- static INLINE(bool is_profiling(Isolate* isolate)) { return false; }
-#endif // ENABLE_LOGGING_AND_PROFILING
-
private:
DISALLOW_COPY_AND_ASSIGN(CpuProfiler);
};
diff --git a/src/d8-debug.cc b/src/d8-debug.cc
index 3df8693..0662205 100644
--- a/src/d8-debug.cc
+++ b/src/d8-debug.cc
@@ -159,7 +159,7 @@
void RunRemoteDebugger(int port) {
- RemoteDebugger debugger(i::Isolate::Current(), port);
+ RemoteDebugger debugger(port);
debugger.Run();
}
@@ -186,11 +186,11 @@
}
// Start the receiver thread.
- ReceiverThread receiver(isolate_, this);
+ ReceiverThread receiver(this);
receiver.Start();
// Start the keyboard thread.
- KeyboardThread keyboard(isolate_, this);
+ KeyboardThread keyboard(this);
keyboard.Start();
PrintPrompt();
@@ -272,6 +272,7 @@
void RemoteDebugger::HandleMessageReceived(char* message) {
+ Locker lock;
HandleScope scope;
// Print the event details.
@@ -300,6 +301,7 @@
void RemoteDebugger::HandleKeyboardCommand(char* command) {
+ Locker lock;
HandleScope scope;
// Convert the debugger command to a JSON debugger request.
diff --git a/src/d8-debug.h b/src/d8-debug.h
index ceb9e36..4e33e6f 100644
--- a/src/d8-debug.h
+++ b/src/d8-debug.h
@@ -53,11 +53,11 @@
// Remote debugging class.
class RemoteDebugger {
public:
- RemoteDebugger(i::Isolate* isolate, int port)
+ explicit RemoteDebugger(int port)
: port_(port),
event_access_(i::OS::CreateMutex()),
event_available_(i::OS::CreateSemaphore(0)),
- head_(NULL), tail_(NULL), isolate_(isolate) {}
+ head_(NULL), tail_(NULL) {}
void Run();
// Handle events from the subordinate threads.
@@ -89,7 +89,6 @@
i::Semaphore* event_available_;
RemoteDebuggerEvent* head_;
RemoteDebuggerEvent* tail_;
- i::Isolate* isolate_;
friend class ReceiverThread;
};
@@ -98,8 +97,8 @@
// Thread reading from debugged V8 instance.
class ReceiverThread: public i::Thread {
public:
- ReceiverThread(i::Isolate* isolate, RemoteDebugger* remote_debugger)
- : Thread(isolate, "d8:ReceiverThrd"),
+ explicit ReceiverThread(RemoteDebugger* remote_debugger)
+ : Thread("d8:ReceiverThrd"),
remote_debugger_(remote_debugger) {}
~ReceiverThread() {}
@@ -113,8 +112,8 @@
// Thread reading keyboard input.
class KeyboardThread: public i::Thread {
public:
- explicit KeyboardThread(i::Isolate* isolate, RemoteDebugger* remote_debugger)
- : Thread(isolate, "d8:KeyboardThrd"),
+ explicit KeyboardThread(RemoteDebugger* remote_debugger)
+ : Thread("d8:KeyboardThrd"),
remote_debugger_(remote_debugger) {}
~KeyboardThread() {}
diff --git a/src/d8-posix.cc b/src/d8-posix.cc
index a7a4049..658fd4f 100644
--- a/src/d8-posix.cc
+++ b/src/d8-posix.cc
@@ -311,10 +311,6 @@
int read_timeout,
int total_timeout) {
Handle<String> accumulator = String::Empty();
- const char* source = "(function(a, b) { return a + b; })";
- Handle<Value> cons_as_obj(Script::Compile(String::New(source))->Run());
- Handle<Function> cons_function(Function::Cast(*cons_as_obj));
- Handle<Value> cons_args[2];
int fullness = 0;
static const int kStdoutReadBufferSize = 4096;
@@ -350,12 +346,7 @@
bytes_read + fullness :
LengthWithoutIncompleteUtf8(buffer, bytes_read + fullness);
Handle<String> addition = String::New(buffer, length);
- cons_args[0] = accumulator;
- cons_args[1] = addition;
- accumulator = Handle<String>::Cast(cons_function->Call(
- Shell::utility_context()->Global(),
- 2,
- cons_args));
+ accumulator = String::Concat(accumulator, addition);
fullness = bytes_read + fullness - length;
memcpy(buffer, buffer + length, fullness);
}
diff --git a/src/d8.cc b/src/d8.cc
index f1068cb..5fd9d27 100644
--- a/src/d8.cc
+++ b/src/d8.cc
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -26,27 +26,49 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#include <stdlib.h>
-#include <errno.h>
+#ifdef V8_SHARED
+#define USING_V8_SHARED
+#endif
-#include "v8.h"
+#ifdef COMPRESS_STARTUP_DATA_BZ2
+#include <bzlib.h>
+#endif
+
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+
+#ifdef USING_V8_SHARED
+#include <assert.h>
+#include "../include/v8-testing.h"
+#endif // USING_V8_SHARED
#include "d8.h"
+
+#ifndef USING_V8_SHARED
+#include "api.h"
+#include "checks.h"
#include "d8-debug.h"
#include "debug.h"
-#include "api.h"
#include "natives.h"
#include "platform.h"
+#include "v8.h"
+#endif // USING_V8_SHARED
+#if !defined(_WIN32) && !defined(_WIN64)
+#include <unistd.h> // NOLINT
+#endif
+
+#ifdef USING_V8_SHARED
+#define ASSERT(condition) assert(condition)
+#endif // USING_V8_SHARED
namespace v8 {
-const char* Shell::kHistoryFileName = ".d8_history";
-const char* Shell::kPrompt = "d8> ";
-
-
+#ifndef USING_V8_SHARED
LineEditor *LineEditor::first_ = NULL;
+const char* Shell::kHistoryFileName = ".d8_history";
LineEditor::LineEditor(Type type, const char* name)
@@ -92,15 +114,22 @@
i::OS::MemoryMappedFile* Shell::counters_file_ = NULL;
CounterCollection Shell::local_counters_;
CounterCollection* Shell::counters_ = &local_counters_;
+i::Mutex* Shell::context_mutex_(i::OS::CreateMutex());
Persistent<Context> Shell::utility_context_;
+#endif // USING_V8_SHARED
+
Persistent<Context> Shell::evaluation_context_;
+ShellOptions Shell::options;
+const char* Shell::kPrompt = "d8> ";
+#ifndef USING_V8_SHARED
bool CounterMap::Match(void* key1, void* key2) {
const char* name1 = reinterpret_cast<const char*>(key1);
const char* name2 = reinterpret_cast<const char*>(key2);
return strcmp(name1, name2) == 0;
}
+#endif // USING_V8_SHARED
// Converts a V8 value to a C string.
@@ -114,16 +143,22 @@
Handle<Value> name,
bool print_result,
bool report_exceptions) {
+#ifndef USING_V8_SHARED
+ bool FLAG_debugger = i::FLAG_debugger;
+#else
+ bool FLAG_debugger = false;
+#endif // USING_V8_SHARED
HandleScope handle_scope;
TryCatch try_catch;
- if (i::FLAG_debugger) {
+ options.script_executed = true;
+ if (FLAG_debugger) {
// When debugging make exceptions appear to be uncaught.
try_catch.SetVerbose(true);
}
Handle<Script> script = Script::Compile(source, name);
if (script.IsEmpty()) {
// Print errors that happened during compilation.
- if (report_exceptions && !i::FLAG_debugger)
+ if (report_exceptions && !FLAG_debugger)
ReportException(&try_catch);
return false;
} else {
@@ -131,7 +166,7 @@
if (result.IsEmpty()) {
ASSERT(try_catch.HasCaught());
// Print errors that happened during execution.
- if (report_exceptions && !i::FLAG_debugger)
+ if (report_exceptions && !FLAG_debugger)
ReportException(&try_catch);
return false;
} else {
@@ -152,6 +187,7 @@
Handle<Value> Shell::Print(const Arguments& args) {
Handle<Value> val = Write(args);
printf("\n");
+ fflush(stdout);
return val;
}
@@ -187,15 +223,20 @@
Handle<Value> Shell::ReadLine(const Arguments& args) {
- i::SmartPointer<char> line(i::ReadLine(""));
- if (*line == NULL) {
- return Null();
- }
- size_t len = strlen(*line);
- if (len > 0 && line[len - 1] == '\n') {
- --len;
- }
- return String::New(*line, len);
+ static const int kBufferSize = 256;
+ char buffer[kBufferSize];
+ Handle<String> accumulator = String::New("");
+ bool linebreak;
+ int length;
+ do { // Repeat if the line ends with an escape '\'.
+ // fgets got an error. Just give up.
+ if (fgets(buffer, kBufferSize, stdin) == NULL) return Null();
+ length = strlen(buffer);
+ linebreak = (length > 1 && buffer[length-2] == '\\');
+ if (linebreak) buffer[length-2] = '\n';
+ accumulator = String::Concat(accumulator, String::New(buffer, length-1));
+ } while (linebreak);
+ return accumulator;
}
@@ -218,6 +259,107 @@
}
+Handle<Value> Shell::CreateExternalArray(const Arguments& args,
+ ExternalArrayType type,
+ size_t element_size) {
+ ASSERT(element_size == 1 || element_size == 2 || element_size == 4 ||
+ element_size == 8);
+ if (args.Length() != 1) {
+ return ThrowException(
+ String::New("Array constructor needs one parameter."));
+ }
+ static const int kMaxLength = 0x3fffffff;
+#ifndef USING_V8_SHARED
+ ASSERT(kMaxLength == i::ExternalArray::kMaxLength);
+#endif // USING_V8_SHARED
+ size_t length = 0;
+ if (args[0]->IsUint32()) {
+ length = args[0]->Uint32Value();
+ } else if (args[0]->IsNumber()) {
+ double raw_length = args[0]->NumberValue();
+ if (raw_length < 0) {
+ return ThrowException(String::New("Array length must not be negative."));
+ }
+ if (raw_length > kMaxLength) {
+ return ThrowException(
+ String::New("Array length exceeds maximum length."));
+ }
+ length = static_cast<size_t>(raw_length);
+ } else {
+ return ThrowException(String::New("Array length must be a number."));
+ }
+ if (length > static_cast<size_t>(kMaxLength)) {
+ return ThrowException(String::New("Array length exceeds maximum length."));
+ }
+ void* data = calloc(length, element_size);
+ if (data == NULL) {
+ return ThrowException(String::New("Memory allocation failed."));
+ }
+ Handle<Object> array = Object::New();
+ Persistent<Object> persistent_array = Persistent<Object>::New(array);
+ persistent_array.MakeWeak(data, ExternalArrayWeakCallback);
+ persistent_array.MarkIndependent();
+ array->SetIndexedPropertiesToExternalArrayData(data, type, length);
+ array->Set(String::New("length"), Int32::New(length), ReadOnly);
+ array->Set(String::New("BYTES_PER_ELEMENT"), Int32::New(element_size));
+ return array;
+}
+
+
+void Shell::ExternalArrayWeakCallback(Persistent<Value> object, void* data) {
+ free(data);
+ object.Dispose();
+}
+
+
+Handle<Value> Shell::Int8Array(const Arguments& args) {
+ return CreateExternalArray(args, v8::kExternalByteArray, sizeof(int8_t));
+}
+
+
+Handle<Value> Shell::Uint8Array(const Arguments& args) {
+ return CreateExternalArray(args, kExternalUnsignedByteArray, sizeof(uint8_t));
+}
+
+
+Handle<Value> Shell::Int16Array(const Arguments& args) {
+ return CreateExternalArray(args, kExternalShortArray, sizeof(int16_t));
+}
+
+
+Handle<Value> Shell::Uint16Array(const Arguments& args) {
+ return CreateExternalArray(args, kExternalUnsignedShortArray,
+ sizeof(uint16_t));
+}
+
+
+Handle<Value> Shell::Int32Array(const Arguments& args) {
+ return CreateExternalArray(args, kExternalIntArray, sizeof(int32_t));
+}
+
+
+Handle<Value> Shell::Uint32Array(const Arguments& args) {
+ return CreateExternalArray(args, kExternalUnsignedIntArray, sizeof(uint32_t));
+}
+
+
+Handle<Value> Shell::Float32Array(const Arguments& args) {
+ return CreateExternalArray(args, kExternalFloatArray,
+ sizeof(float)); // NOLINT
+}
+
+
+Handle<Value> Shell::Float64Array(const Arguments& args) {
+ return CreateExternalArray(args, kExternalDoubleArray,
+ sizeof(double)); // NOLINT
+}
+
+
+Handle<Value> Shell::PixelArray(const Arguments& args) {
+ return CreateExternalArray(args, kExternalPixelArray, sizeof(uint8_t));
+}
+
+
Handle<Value> Shell::Yield(const Arguments& args) {
v8::Unlocker unlocker;
return Undefined();
@@ -226,7 +368,9 @@
Handle<Value> Shell::Quit(const Arguments& args) {
int exit_code = args[0]->Int32Value();
+#ifndef USING_V8_SHARED
OnExit();
+#endif // USING_V8_SHARED
exit(exit_code);
return Undefined();
}
@@ -275,6 +419,7 @@
}
+#ifndef USING_V8_SHARED
Handle<Array> Shell::GetCompletions(Handle<String> text, Handle<String> full) {
HandleScope handle_scope;
Context::Scope context_scope(utility_context_);
@@ -308,9 +453,11 @@
Handle<Value> val = Handle<Function>::Cast(fun)->Call(global, kArgc, argv);
return val;
}
-#endif
+#endif // ENABLE_DEBUGGER_SUPPORT
+#endif // USING_V8_SHARED
+#ifndef USING_V8_SHARED
int32_t* Counter::Bind(const char* name, bool is_histogram) {
int i;
for (i = 0; i < kMaxNameSize - 1 && name[i]; i++)
@@ -342,8 +489,8 @@
void Shell::MapCounters(const char* name) {
- counters_file_ = i::OS::MemoryMappedFile::create(name,
- sizeof(CounterCollection), &local_counters_);
+ counters_file_ = i::OS::MemoryMappedFile::create(
+ name, sizeof(CounterCollection), &local_counters_);
void* memory = (counters_file_ == NULL) ?
NULL : counters_file_->memory();
if (memory == NULL) {
@@ -409,56 +556,15 @@
}
-void Shell::Initialize() {
- Shell::counter_map_ = new CounterMap();
- // Set up counters
- if (i::StrLength(i::FLAG_map_counters) != 0)
- MapCounters(i::FLAG_map_counters);
- if (i::FLAG_dump_counters) {
- V8::SetCounterFunction(LookupCounter);
- V8::SetCreateHistogramFunction(CreateHistogram);
- V8::SetAddHistogramSampleFunction(AddHistogramSample);
- }
-
- // Initialize the global objects
+void Shell::InstallUtilityScript() {
+ Locker lock;
HandleScope scope;
- Handle<ObjectTemplate> global_template = ObjectTemplate::New();
- global_template->Set(String::New("print"), FunctionTemplate::New(Print));
- global_template->Set(String::New("write"), FunctionTemplate::New(Write));
- global_template->Set(String::New("read"), FunctionTemplate::New(Read));
- global_template->Set(String::New("readline"),
- FunctionTemplate::New(ReadLine));
- global_template->Set(String::New("load"), FunctionTemplate::New(Load));
- global_template->Set(String::New("quit"), FunctionTemplate::New(Quit));
- global_template->Set(String::New("version"), FunctionTemplate::New(Version));
-
-#ifdef LIVE_OBJECT_LIST
- global_template->Set(String::New("lol_is_enabled"), Boolean::New(true));
-#else
- global_template->Set(String::New("lol_is_enabled"), Boolean::New(false));
-#endif
-
- Handle<ObjectTemplate> os_templ = ObjectTemplate::New();
- AddOSMethods(os_templ);
- global_template->Set(String::New("os"), os_templ);
-
- utility_context_ = Context::New(NULL, global_template);
+ // If we use the utility context, we have to set the security tokens so that
+ // utility, evaluation and debug context can all access each other.
utility_context_->SetSecurityToken(Undefined());
+ evaluation_context_->SetSecurityToken(Undefined());
Context::Scope utility_scope(utility_context_);
- i::JSArguments js_args = i::FLAG_js_arguments;
- i::Handle<i::FixedArray> arguments_array =
- FACTORY->NewFixedArray(js_args.argc());
- for (int j = 0; j < js_args.argc(); j++) {
- i::Handle<i::String> arg =
- FACTORY->NewStringFromUtf8(i::CStrVector(js_args[j]));
- arguments_array->set(j, *arg);
- }
- i::Handle<i::JSArray> arguments_jsarray =
- FACTORY->NewJSArrayWithElements(arguments_array);
- global_template->Set(String::New("arguments"),
- Utils::ToLocal(arguments_jsarray));
-
#ifdef ENABLE_DEBUGGER_SUPPORT
// Install the debugger object in the utility scope
i::Debug* debug = i::Isolate::Current()->debug();
@@ -467,21 +573,21 @@
= i::Handle<i::JSObject>(debug->debug_context()->global());
utility_context_->Global()->Set(String::New("$debug"),
Utils::ToLocal(js_debug));
-#endif
+ debug->debug_context()->set_security_token(HEAP->undefined_value());
+#endif // ENABLE_DEBUGGER_SUPPORT
// Run the d8 shell utility script in the utility context
int source_index = i::NativesCollection<i::D8>::GetIndex("d8");
- i::Vector<const char> shell_source
- = i::NativesCollection<i::D8>::GetScriptSource(source_index);
- i::Vector<const char> shell_source_name
- = i::NativesCollection<i::D8>::GetScriptName(source_index);
+ i::Vector<const char> shell_source =
+ i::NativesCollection<i::D8>::GetRawScriptSource(source_index);
+ i::Vector<const char> shell_source_name =
+ i::NativesCollection<i::D8>::GetScriptName(source_index);
Handle<String> source = String::New(shell_source.start(),
- shell_source.length());
+ shell_source.length());
Handle<String> name = String::New(shell_source_name.start(),
- shell_source_name.length());
+ shell_source_name.length());
Handle<Script> script = Script::Compile(source, name);
script->Run();
-
// Mark the d8 shell script as native to avoid it showing up as normal source
// in the debugger.
i::Handle<i::Object> compiled_script = Utils::OpenHandle(*script);
@@ -492,53 +598,189 @@
i::SharedFunctionInfo::cast(*compiled_script)->script()));
script_object->set_type(i::Smi::FromInt(i::Script::TYPE_NATIVE));
- // Create the evaluation context
- evaluation_context_ = Context::New(NULL, global_template);
- evaluation_context_->SetSecurityToken(Undefined());
-
#ifdef ENABLE_DEBUGGER_SUPPORT
- // Set the security token of the debug context to allow access.
- debug->debug_context()->set_security_token(HEAP->undefined_value());
-
- // Start the debugger agent if requested.
- if (i::FLAG_debugger_agent) {
- v8::Debug::EnableAgent("d8 shell", i::FLAG_debugger_port, true);
- }
-
// Start the in-process debugger if requested.
if (i::FLAG_debugger && !i::FLAG_debugger_agent) {
v8::Debug::SetDebugEventListener(HandleDebugEvent);
}
+#endif // ENABLE_DEBUGGER_SUPPORT
+}
+#endif // USING_V8_SHARED
+
+
+#ifdef COMPRESS_STARTUP_DATA_BZ2
+class BZip2Decompressor : public v8::StartupDataDecompressor {
+ public:
+ virtual ~BZip2Decompressor() { }
+
+ protected:
+ virtual int DecompressData(char* raw_data,
+ int* raw_data_size,
+ const char* compressed_data,
+ int compressed_data_size) {
+ ASSERT_EQ(v8::StartupData::kBZip2,
+ v8::V8::GetCompressedStartupDataAlgorithm());
+ unsigned int decompressed_size = *raw_data_size;
+ int result =
+ BZ2_bzBuffToBuffDecompress(raw_data,
+ &decompressed_size,
+ const_cast<char*>(compressed_data),
+ compressed_data_size,
+ 0, 1);
+ if (result == BZ_OK) {
+ *raw_data_size = decompressed_size;
+ }
+ return result;
+ }
+};
#endif
+
+Handle<ObjectTemplate> Shell::CreateGlobalTemplate() {
+ Handle<ObjectTemplate> global_template = ObjectTemplate::New();
+ global_template->Set(String::New("print"), FunctionTemplate::New(Print));
+ global_template->Set(String::New("write"), FunctionTemplate::New(Write));
+ global_template->Set(String::New("read"), FunctionTemplate::New(Read));
+ global_template->Set(String::New("readline"),
+ FunctionTemplate::New(ReadLine));
+ global_template->Set(String::New("load"), FunctionTemplate::New(Load));
+ global_template->Set(String::New("quit"), FunctionTemplate::New(Quit));
+ global_template->Set(String::New("version"), FunctionTemplate::New(Version));
+
+ // Bind the handlers for external arrays.
+ global_template->Set(String::New("Int8Array"),
+ FunctionTemplate::New(Int8Array));
+ global_template->Set(String::New("Uint8Array"),
+ FunctionTemplate::New(Uint8Array));
+ global_template->Set(String::New("Int16Array"),
+ FunctionTemplate::New(Int16Array));
+ global_template->Set(String::New("Uint16Array"),
+ FunctionTemplate::New(Uint16Array));
+ global_template->Set(String::New("Int32Array"),
+ FunctionTemplate::New(Int32Array));
+ global_template->Set(String::New("Uint32Array"),
+ FunctionTemplate::New(Uint32Array));
+ global_template->Set(String::New("Float32Array"),
+ FunctionTemplate::New(Float32Array));
+ global_template->Set(String::New("Float64Array"),
+ FunctionTemplate::New(Float64Array));
+ global_template->Set(String::New("PixelArray"),
+ FunctionTemplate::New(PixelArray));
+
+#ifdef LIVE_OBJECT_LIST
+ global_template->Set(String::New("lol_is_enabled"), Boolean::New(true));
+#else
+ global_template->Set(String::New("lol_is_enabled"), Boolean::New(false));
+#endif
+
+#ifndef USING_V8_SHARED
+ Handle<ObjectTemplate> os_templ = ObjectTemplate::New();
+ AddOSMethods(os_templ);
+ global_template->Set(String::New("os"), os_templ);
+#endif // USING_V8_SHARED
+
+ return global_template;
}
+void Shell::Initialize() {
+#ifdef COMPRESS_STARTUP_DATA_BZ2
+ BZip2Decompressor startup_data_decompressor;
+ int bz2_result = startup_data_decompressor.Decompress();
+ if (bz2_result != BZ_OK) {
+ fprintf(stderr, "bzip error code: %d\n", bz2_result);
+ exit(1);
+ }
+#endif
+
+#ifndef USING_V8_SHARED
+ Shell::counter_map_ = new CounterMap();
+ // Set up counters
+ if (i::StrLength(i::FLAG_map_counters) != 0)
+ MapCounters(i::FLAG_map_counters);
+ if (i::FLAG_dump_counters) {
+ V8::SetCounterFunction(LookupCounter);
+ V8::SetCreateHistogramFunction(CreateHistogram);
+ V8::SetAddHistogramSampleFunction(AddHistogramSample);
+ }
+#endif // USING_V8_SHARED
+ if (options.test_shell) return;
+
+#ifndef USING_V8_SHARED
+ Locker lock;
+ HandleScope scope;
+ Handle<ObjectTemplate> global_template = CreateGlobalTemplate();
+ utility_context_ = Context::New(NULL, global_template);
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ // Start the debugger agent if requested.
+ if (i::FLAG_debugger_agent) {
+ v8::Debug::EnableAgent("d8 shell", i::FLAG_debugger_port, true);
+ }
+#endif // ENABLE_DEBUGGER_SUPPORT
+#endif // USING_V8_SHARED
+}
+
+
+Persistent<Context> Shell::CreateEvaluationContext() {
+#ifndef USING_V8_SHARED
+ // This needs to be a critical section since this is not thread-safe
+ i::ScopedLock lock(context_mutex_);
+#endif // USING_V8_SHARED
+ // Initialize the global objects
+ Handle<ObjectTemplate> global_template = CreateGlobalTemplate();
+ Persistent<Context> context = Context::New(NULL, global_template);
+ Context::Scope scope(context);
+
+#ifndef USING_V8_SHARED
+ i::JSArguments js_args = i::FLAG_js_arguments;
+ i::Handle<i::FixedArray> arguments_array =
+ FACTORY->NewFixedArray(js_args.argc());
+ for (int j = 0; j < js_args.argc(); j++) {
+ i::Handle<i::String> arg =
+ FACTORY->NewStringFromUtf8(i::CStrVector(js_args[j]));
+ arguments_array->set(j, *arg);
+ }
+ i::Handle<i::JSArray> arguments_jsarray =
+ FACTORY->NewJSArrayWithElements(arguments_array);
+ context->Global()->Set(String::New("arguments"),
+ Utils::ToLocal(arguments_jsarray));
+#endif // USING_V8_SHARED
+ return context;
+}
+
+
+#ifndef USING_V8_SHARED
void Shell::OnExit() {
if (i::FLAG_dump_counters) {
- ::printf("+----------------------------------------+-------------+\n");
- ::printf("| Name | Value |\n");
- ::printf("+----------------------------------------+-------------+\n");
+ printf("+----------------------------------------+-------------+\n");
+ printf("| Name | Value |\n");
+ printf("+----------------------------------------+-------------+\n");
for (CounterMap::Iterator i(counter_map_); i.More(); i.Next()) {
Counter* counter = i.CurrentValue();
if (counter->is_histogram()) {
- ::printf("| c:%-36s | %11i |\n", i.CurrentKey(), counter->count());
- ::printf("| t:%-36s | %11i |\n",
- i.CurrentKey(),
- counter->sample_total());
+ printf("| c:%-36s | %11i |\n", i.CurrentKey(), counter->count());
+ printf("| t:%-36s | %11i |\n", i.CurrentKey(), counter->sample_total());
} else {
- ::printf("| %-38s | %11i |\n", i.CurrentKey(), counter->count());
+ printf("| %-38s | %11i |\n", i.CurrentKey(), counter->count());
}
}
- ::printf("+----------------------------------------+-------------+\n");
+ printf("+----------------------------------------+-------------+\n");
}
if (counters_file_ != NULL)
delete counters_file_;
}
+#endif // USING_V8_SHARED
static char* ReadChars(const char* name, int* size_out) {
- v8::Unlocker unlocker; // Release the V8 lock while reading files.
+ // Release the V8 lock while reading files.
+ v8::Unlocker unlocker(Isolate::GetCurrent());
+#ifndef USING_V8_SHARED
FILE* file = i::OS::FOpen(name, "rb");
+#else
+ // TODO(yangguo@chromium.org): reading from a directory hangs!
+ FILE* file = fopen(name, "rb");
+#endif // USING_V8_SHARED
if (file == NULL) return NULL;
fseek(file, 0, SEEK_END);
@@ -557,6 +799,7 @@
}
+#ifndef USING_V8_SHARED
static char* ReadToken(char* data, char token) {
char* next = i::OS::StrChr(data, token);
if (next != NULL) {
@@ -576,6 +819,7 @@
static char* ReadWord(char* data) {
return ReadToken(data, ' ');
}
+#endif // USING_V8_SHARED
// Reads a file into a v8 string.
@@ -590,6 +834,11 @@
void Shell::RunShell() {
+ Locker locker;
+ Context::Scope context_scope(evaluation_context_);
+ HandleScope handle_scope;
+ Handle<String> name = String::New("(d8)");
+#ifndef USING_V8_SHARED
LineEditor* editor = LineEditor::Get();
printf("V8 version %s [console: %s]\n", V8::GetVersion(), editor->name());
if (i::FLAG_debugger) {
@@ -597,26 +846,32 @@
}
editor->Open();
while (true) {
- Locker locker;
- HandleScope handle_scope;
- Context::Scope context_scope(evaluation_context_);
i::SmartPointer<char> input = editor->Prompt(Shell::kPrompt);
- if (input.is_empty())
- break;
+ if (input.is_empty()) break;
editor->AddHistory(*input);
- Handle<String> name = String::New("(d8)");
ExecuteString(String::New(*input), name, true, true);
}
editor->Close();
+#else
+ printf("V8 version %s [D8 light using shared library]\n", V8::GetVersion());
+ static const int kBufferSize = 256;
+ while (true) {
+ char buffer[kBufferSize];
+ printf("%s", Shell::kPrompt);
+ if (fgets(buffer, kBufferSize, stdin) == NULL) break;
+ ExecuteString(String::New(buffer), name, true, true);
+ }
+#endif // USING_V8_SHARED
printf("\n");
}
+#ifndef USING_V8_SHARED
class ShellThread : public i::Thread {
public:
- ShellThread(i::Isolate* isolate, int no, i::Vector<const char> files)
- : Thread(isolate, "d8:ShellThread"),
- no_(no), files_(files) { }
+ ShellThread(int no, i::Vector<const char> files)
+ : Thread("d8:ShellThread"),
+ no_(no), files_(files) { }
virtual void Run();
private:
int no_;
@@ -625,25 +880,6 @@
void ShellThread::Run() {
- // Prepare the context for this thread.
- Locker locker;
- HandleScope scope;
- Handle<ObjectTemplate> global_template = ObjectTemplate::New();
- global_template->Set(String::New("print"),
- FunctionTemplate::New(Shell::Print));
- global_template->Set(String::New("write"),
- FunctionTemplate::New(Shell::Write));
- global_template->Set(String::New("read"),
- FunctionTemplate::New(Shell::Read));
- global_template->Set(String::New("readline"),
- FunctionTemplate::New(Shell::ReadLine));
- global_template->Set(String::New("load"),
- FunctionTemplate::New(Shell::Load));
- global_template->Set(String::New("yield"),
- FunctionTemplate::New(Shell::Yield));
- global_template->Set(String::New("version"),
- FunctionTemplate::New(Shell::Version));
-
char* ptr = const_cast<char*>(files_.start());
while ((ptr != NULL) && (*ptr != '\0')) {
// For each newline-separated line.
@@ -655,8 +891,10 @@
continue;
}
- Persistent<Context> thread_context = Context::New(NULL, global_template);
- thread_context->SetSecurityToken(Undefined());
+ // Prepare the context for this thread.
+ Locker locker;
+ HandleScope scope;
+ Persistent<Context> thread_context = Shell::CreateEvaluationContext();
Context::Scope context_scope(thread_context);
while ((ptr != NULL) && (*ptr != '\0')) {
@@ -681,118 +919,377 @@
ptr = next_line;
}
}
+#endif // USING_V8_SHARED
-int Shell::Main(int argc, char* argv[]) {
- i::FlagList::SetFlagsFromCommandLine(&argc, argv, true);
- if (i::FLAG_help) {
- return 1;
- }
- Initialize();
- bool run_shell = (argc == 1);
+void SourceGroup::ExitShell(int exit_code) {
+ // Use _exit instead of exit to avoid races between isolate
+ // threads and static destructors.
+ fflush(stdout);
+ fflush(stderr);
+ _exit(exit_code);
+}
- // Default use preemption if threads are created.
- bool use_preemption = true;
- // Default to use lowest possible thread preemption interval to test as many
- // edgecases as possible.
- int preemption_interval = 1;
-
- i::List<i::Thread*> threads(1);
-
- {
- // Acquire the V8 lock once initialization has finished. Since the thread
- // below may spawn new threads accessing V8 holding the V8 lock here is
- // mandatory.
- Locker locker;
- Context::Scope context_scope(evaluation_context_);
- for (int i = 1; i < argc; i++) {
- char* str = argv[i];
- if (strcmp(str, "--shell") == 0) {
- run_shell = true;
- } else if (strcmp(str, "--preemption") == 0) {
- use_preemption = true;
- } else if (strcmp(str, "--no-preemption") == 0) {
- use_preemption = false;
- } else if (strcmp(str, "--preemption-interval") == 0) {
- if (i + 1 < argc) {
- char* end = NULL;
- preemption_interval = strtol(argv[++i], &end, 10); // NOLINT
- if (preemption_interval <= 0 || *end != '\0' || errno == ERANGE) {
- printf("Invalid value for --preemption-interval '%s'\n", argv[i]);
- return 1;
- }
- } else {
- printf("Missing value for --preemption-interval\n");
- return 1;
- }
- } else if (strcmp(str, "-f") == 0) {
- // Ignore any -f flags for compatibility with other stand-alone
- // JavaScript engines.
- continue;
- } else if (strncmp(str, "--", 2) == 0) {
- printf("Warning: unknown flag %s.\nTry --help for options\n", str);
- } else if (strcmp(str, "-e") == 0 && i + 1 < argc) {
- // Execute argument given to -e option directly.
- v8::HandleScope handle_scope;
- v8::Handle<v8::String> file_name = v8::String::New("unnamed");
- v8::Handle<v8::String> source = v8::String::New(argv[i + 1]);
- if (!ExecuteString(source, file_name, false, true)) {
- OnExit();
- return 1;
- }
- i++;
- } else if (strcmp(str, "-p") == 0 && i + 1 < argc) {
- int size = 0;
- const char* files = ReadChars(argv[++i], &size);
- if (files == NULL) return 1;
- ShellThread* thread =
- new ShellThread(i::Isolate::Current(),
- threads.length(),
- i::Vector<const char>(files, size));
- thread->Start();
- threads.Add(thread);
- } else {
- // Use all other arguments as names of files to load and run.
- HandleScope handle_scope;
- Handle<String> file_name = v8::String::New(str);
- Handle<String> source = ReadFile(str);
- if (source.IsEmpty()) {
- printf("Error reading '%s'\n", str);
- return 1;
- }
- if (!ExecuteString(source, file_name, false, true)) {
- OnExit();
- return 1;
- }
+void SourceGroup::Execute() {
+ for (int i = begin_offset_; i < end_offset_; ++i) {
+ const char* arg = argv_[i];
+ if (strcmp(arg, "-e") == 0 && i + 1 < end_offset_) {
+ // Execute argument given to -e option directly.
+ HandleScope handle_scope;
+ Handle<String> file_name = String::New("unnamed");
+ Handle<String> source = String::New(argv_[i + 1]);
+ if (!Shell::ExecuteString(source, file_name, false, true)) {
+ ExitShell(1);
+ return;
+ }
+ ++i;
+ } else if (arg[0] == '-') {
+ // Ignore other options. They have been parsed already.
+ } else {
+ // Use all other arguments as names of files to load and run.
+ HandleScope handle_scope;
+ Handle<String> file_name = String::New(arg);
+ Handle<String> source = ReadFile(arg);
+ if (source.IsEmpty()) {
+ printf("Error reading '%s'\n", arg);
+ ExitShell(1);
+ return;
+ }
+ if (!Shell::ExecuteString(source, file_name, false, true)) {
+ ExitShell(1);
+ return;
}
}
+ }
+}
+
+Handle<String> SourceGroup::ReadFile(const char* name) {
+#ifndef USING_V8_SHARED
+ FILE* file = i::OS::FOpen(name, "rb");
+#else
+ // TODO(yangguo@chromium.org): reading from a directory hangs!
+ FILE* file = fopen(name, "rb");
+#endif // USING_V8_SHARED
+ if (file == NULL) return Handle<String>();
+
+ fseek(file, 0, SEEK_END);
+ int size = ftell(file);
+ rewind(file);
+
+ char* chars = new char[size + 1];
+ chars[size] = '\0';
+ for (int i = 0; i < size;) {
+ int read = fread(&chars[i], 1, size - i, file);
+ i += read;
+ }
+ fclose(file);
+ Handle<String> result = String::New(chars, size);
+ delete[] chars;
+ return result;
+}
+
+
+#ifndef USING_V8_SHARED
+i::Thread::Options SourceGroup::GetThreadOptions() {
+ i::Thread::Options options;
+ options.name = "IsolateThread";
+ // On some systems (OSX 10.6) the stack size default is 0.5Mb or less
+ // which is not enough to parse the big literal expressions used in tests.
+ // The stack size should be at least StackGuard::kLimitSize + some
+ // OS-specific padding for thread startup code.
+ options.stack_size = 2 << 20; // 2 Mb seems to be enough
+ return options;
+}
+
+
+void SourceGroup::ExecuteInThread() {
+ Isolate* isolate = Isolate::New();
+ do {
+ if (next_semaphore_ != NULL) next_semaphore_->Wait();
+ {
+ Isolate::Scope iscope(isolate);
+ Locker lock(isolate);
+ HandleScope scope;
+ Persistent<Context> context = Shell::CreateEvaluationContext();
+ {
+ Context::Scope cscope(context);
+ Execute();
+ }
+ context.Dispose();
+ }
+ if (done_semaphore_ != NULL) done_semaphore_->Signal();
+ } while (!Shell::options.last_run);
+ isolate->Dispose();
+}
+
+
+void SourceGroup::StartExecuteInThread() {
+ if (thread_ == NULL) {
+ thread_ = new IsolateThread(this);
+ thread_->Start();
+ }
+ next_semaphore_->Signal();
+}
+
+
+void SourceGroup::WaitForThread() {
+ if (thread_ == NULL) return;
+ if (Shell::options.last_run) {
+ thread_->Join();
+ thread_ = NULL;
+ } else {
+ done_semaphore_->Wait();
+ }
+}
+#endif // USING_V8_SHARED
+
+
+bool Shell::SetOptions(int argc, char* argv[]) {
+ for (int i = 0; i < argc; i++) {
+ if (strcmp(argv[i], "--stress-opt") == 0) {
+ options.stress_opt = true;
+ argv[i] = NULL;
+ } else if (strcmp(argv[i], "--stress-deopt") == 0) {
+ options.stress_deopt = true;
+ argv[i] = NULL;
+ } else if (strcmp(argv[i], "--noalways-opt") == 0) {
+ // No support for stressing if we can't use --always-opt.
+ options.stress_opt = false;
+ options.stress_deopt = false;
+ } else if (strcmp(argv[i], "--shell") == 0) {
+ options.interactive_shell = true;
+ argv[i] = NULL;
+ } else if (strcmp(argv[i], "--test") == 0) {
+ options.test_shell = true;
+ argv[i] = NULL;
+ } else if (strcmp(argv[i], "--preemption") == 0) {
+#ifdef USING_V8_SHARED
+ printf("D8 with shared library does not support multi-threading\n");
+ return false;
+#else
+ options.use_preemption = true;
+ argv[i] = NULL;
+#endif // USING_V8_SHARED
+ } else if (strcmp(argv[i], "--no-preemption") == 0) {
+#ifdef USING_V8_SHARED
+ printf("D8 with shared library does not support multi-threading\n");
+ return false;
+#else
+ options.use_preemption = false;
+ argv[i] = NULL;
+#endif // USING_V8_SHARED
+ } else if (strcmp(argv[i], "--preemption-interval") == 0) {
+#ifdef USING_V8_SHARED
+ printf("D8 with shared library does not support multi-threading\n");
+ return false;
+#else
+ if (++i < argc) {
+ argv[i-1] = NULL;
+ char* end = NULL;
+ options.preemption_interval = strtol(argv[i], &end, 10); // NOLINT
+ if (options.preemption_interval <= 0
+ || *end != '\0'
+ || errno == ERANGE) {
+ printf("Invalid value for --preemption-interval '%s'\n", argv[i]);
+ return false;
+ }
+ argv[i] = NULL;
+ } else {
+ printf("Missing value for --preemption-interval\n");
+ return false;
+ }
+#endif // USING_V8_SHARED
+ } else if (strcmp(argv[i], "-f") == 0) {
+ // Ignore any -f flags for compatibility with other stand-alone
+ // JavaScript engines.
+ continue;
+ } else if (strcmp(argv[i], "--isolate") == 0) {
+#ifdef USING_V8_SHARED
+ printf("D8 with shared library does not support multi-threading\n");
+ return false;
+#endif // USING_V8_SHARED
+ options.num_isolates++;
+ }
+#ifdef USING_V8_SHARED
+ else if (strcmp(argv[i], "--dump-counters") == 0) {
+ printf("D8 with shared library does not include counters\n");
+ return false;
+ } else if (strcmp(argv[i], "-p") == 0) {
+ printf("D8 with shared library does not support multi-threading\n");
+ return false;
+ } else if (strcmp(argv[i], "--debugger") == 0) {
+ printf("Javascript debugger not included\n");
+ return false;
+ }
+#endif // USING_V8_SHARED
+ }
+
+#ifndef USING_V8_SHARED
+ // Run parallel threads if we are not using --isolate
+ for (int i = 1; i < argc; i++) {
+ if (argv[i] == NULL) continue;
+ if (strcmp(argv[i], "-p") == 0 && i + 1 < argc) {
+ if (options.num_isolates > 1) {
+ printf("-p is not compatible with --isolate\n");
+ return false;
+ }
+ argv[i] = NULL;
+ if (options.parallel_files == NULL) {
+ options.parallel_files = new i::List<i::Vector<const char> >();
+ }
+ int size = 0;
+ const char* files = ReadChars(argv[++i], &size);
+ if (files == NULL) {
+ printf("-p option incomplete\n");
+ return false;
+ }
+ argv[i] = NULL;
+ options.parallel_files->Add(i::Vector<const char>(files, size));
+ }
+ }
+#endif // USING_V8_SHARED
+
+ v8::V8::SetFlagsFromCommandLine(&argc, argv, true);
+
+ // set up isolated source groups
+ options.isolate_sources = new SourceGroup[options.num_isolates];
+ SourceGroup* current = options.isolate_sources;
+ current->Begin(argv, 1);
+ for (int i = 1; i < argc; i++) {
+ const char* str = argv[i];
+ if (strcmp(str, "--isolate") == 0) {
+ current->End(i);
+ current++;
+ current->Begin(argv, i + 1);
+ } else if (strncmp(argv[i], "--", 2) == 0) {
+ printf("Warning: unknown flag %s.\nTry --help for options\n", argv[i]);
+ }
+ }
+ current->End(argc);
+
+ return true;
+}
+
+
+int Shell::RunMain(int argc, char* argv[]) {
+#ifndef USING_V8_SHARED
+ i::List<i::Thread*> threads(1);
+ if (options.parallel_files != NULL)
+ for (int i = 0; i < options.parallel_files->length(); i++) {
+ i::Vector<const char> files = options.parallel_files->at(i);
+ ShellThread* thread = new ShellThread(threads.length(), files);
+ thread->Start();
+ threads.Add(thread);
+ }
+
+ for (int i = 1; i < options.num_isolates; ++i) {
+ options.isolate_sources[i].StartExecuteInThread();
+ }
+#endif // USING_V8_SHARED
+ { // NOLINT
+ Locker lock;
+ HandleScope scope;
+ Persistent<Context> context = CreateEvaluationContext();
+ if (options.last_run) {
+ // Keep using the same context in the interactive shell.
+ evaluation_context_ = context;
+#ifndef V8_SHARED
+ // If the interactive debugger is enabled make sure to activate
+ // it before running the files passed on the command line.
+ if (i::FLAG_debugger) {
+ InstallUtilityScript();
+ }
+#endif // V8_SHARED
+ }
+ {
+ Context::Scope cscope(context);
+ options.isolate_sources[0].Execute();
+ }
+ if (!options.last_run) {
+ context.Dispose();
+ }
+
+#ifndef USING_V8_SHARED
// Start preemption if threads have been created and preemption is enabled.
- if (threads.length() > 0 && use_preemption) {
- Locker::StartPreemption(preemption_interval);
+ if (options.parallel_files != NULL
+ && threads.length() > 0
+ && options.use_preemption) {
+ Locker::StartPreemption(options.preemption_interval);
+ }
+#endif // USING_V8_SHARED
+ }
+
+#ifndef USING_V8_SHARED
+ for (int i = 1; i < options.num_isolates; ++i) {
+ options.isolate_sources[i].WaitForThread();
+ }
+
+ if (options.parallel_files != NULL)
+ for (int i = 0; i < threads.length(); i++) {
+ i::Thread* thread = threads[i];
+ thread->Join();
+ delete thread;
}
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // Run the remote debugger if requested.
- if (i::FLAG_remote_debugger) {
- RunRemoteDebugger(i::FLAG_debugger_port);
- return 0;
- }
-#endif
- }
- if (run_shell)
- RunShell();
- for (int i = 0; i < threads.length(); i++) {
- i::Thread* thread = threads[i];
- thread->Join();
- delete thread;
- }
OnExit();
+#endif // USING_V8_SHARED
return 0;
}
+int Shell::Main(int argc, char* argv[]) {
+ if (!SetOptions(argc, argv)) return 1;
+ Initialize();
+
+ int result = 0;
+ if (options.stress_opt || options.stress_deopt) {
+ Testing::SetStressRunType(
+ options.stress_opt ? Testing::kStressTypeOpt
+ : Testing::kStressTypeDeopt);
+ int stress_runs = Testing::GetStressRuns();
+ for (int i = 0; i < stress_runs && result == 0; i++) {
+ printf("============ Stress %d/%d ============\n", i + 1, stress_runs);
+ Testing::PrepareStressRun(i);
+ options.last_run = (i == stress_runs - 1);
+ result = RunMain(argc, argv);
+ }
+ printf("======== Full Deoptimization =======\n");
+ Testing::DeoptimizeAll();
+ } else {
+ result = RunMain(argc, argv);
+ }
+
+
+#if !defined(USING_V8_SHARED) && defined(ENABLE_DEBUGGER_SUPPORT)
+ // Run remote debugger if requested, but never on --test
+ if (i::FLAG_remote_debugger && !options.test_shell) {
+ InstallUtilityScript();
+ RunRemoteDebugger(i::FLAG_debugger_port);
+ return 0;
+ }
+#endif // !USING_V8_SHARED && ENABLE_DEBUGGER_SUPPORT
+
+ // Run interactive shell if explicitly requested or if no script has been
+ // executed, but never on --test
+
+ if (( options.interactive_shell
+ || !options.script_executed )
+ && !options.test_shell ) {
+#ifndef USING_V8_SHARED
+ if (!i::FLAG_debugger) {
+ InstallUtilityScript();
+ }
+#endif // USING_V8_SHARED
+ RunShell();
+ }
+
+ V8::Dispose();
+
+ return result;
+}
+
} // namespace v8
diff --git a/src/d8.gyp b/src/d8.gyp
index 8b52ed9..48442b1 100644
--- a/src/d8.gyp
+++ b/src/d8.gyp
@@ -1,4 +1,4 @@
-# Copyright 2010 the V8 project authors. All rights reserved.
+# Copyright 2011 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
@@ -26,34 +26,42 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
{
+ 'variables': {
+ 'console%': '',
+ },
'targets': [
{
'target_name': 'd8',
'type': 'executable',
'dependencies': [
- 'd8_js2c#host',
'../tools/gyp/v8.gyp:v8',
],
'include_dirs+': [
'../src',
],
'defines': [
- 'ENABLE_LOGGING_AND_PROFILING',
'ENABLE_DEBUGGER_SUPPORT',
- 'ENABLE_VMSTATE_TRACKING',
- 'V8_FAST_TLS',
],
'sources': [
'd8.cc',
- 'd8-debug.cc',
- '<(SHARED_INTERMEDIATE_DIR)/d8-js.cc',
],
'conditions': [
- [ 'OS=="linux" or OS=="mac" or OS=="freebsd" or OS=="openbsd" or OS=="solaris"', {
- 'sources': [ 'd8-posix.cc', ]
- }],
- [ 'OS=="win"', {
- 'sources': [ 'd8-windows.cc', ]
+ [ 'component!="shared_library"', {
+ 'dependencies': [ 'd8_js2c#host', ],
+ 'sources': [ 'd8-debug.cc', '<(SHARED_INTERMEDIATE_DIR)/d8-js.cc', ],
+ 'conditions': [
+ [ 'console=="readline"', {
+ 'libraries': [ '-lreadline', ],
+ 'sources': [ 'd8-readline.cc' ],
+ }],
+ [ '(OS=="linux" or OS=="mac" or OS=="freebsd" \
+ or OS=="openbsd" or OS=="solaris")', {
+ 'sources': [ 'd8-posix.cc', ]
+ }],
+ [ 'OS=="win"', {
+ 'sources': [ 'd8-windows.cc', ]
+ }],
+ ],
}],
],
},
@@ -82,6 +90,7 @@
'../tools/js2c.py',
'<@(_outputs)',
'D8',
+ 'off', # compress startup data
'<@(js_files)'
],
},
diff --git a/src/d8.h b/src/d8.h
index dc02322..840ca1e 100644
--- a/src/d8.h
+++ b/src/d8.h
@@ -28,20 +28,23 @@
#ifndef V8_D8_H_
#define V8_D8_H_
-#include "allocation.h"
-#include "v8.h"
-#include "hashmap.h"
-#ifdef COMPRESS_STARTUP_DATA_BZ2
-#error Using compressed startup data is not supported for D8
-#endif
+#ifndef USING_V8_SHARED
+#include "v8.h"
+#include "allocation.h"
+#include "hashmap.h"
+#else
+#include "../include/v8.h"
+#endif // USING_V8_SHARED
namespace v8 {
-
+#ifndef USING_V8_SHARED
namespace i = v8::internal;
+#endif // USING_V8_SHARED
+#ifndef USING_V8_SHARED
// A single counter in a counter collection.
class Counter {
public:
@@ -108,14 +111,108 @@
i::HashMap* map_;
i::HashMap::Entry* entry_;
};
+
private:
static int Hash(const char* name);
static bool Match(void* key1, void* key2);
i::HashMap hash_map_;
};
+#endif // USING_V8_SHARED
-class Shell: public i::AllStatic {
+class SourceGroup {
+ public:
+ SourceGroup() :
+#ifndef USING_V8_SHARED
+ next_semaphore_(v8::internal::OS::CreateSemaphore(0)),
+ done_semaphore_(v8::internal::OS::CreateSemaphore(0)),
+ thread_(NULL),
+#endif // USING_V8_SHARED
+ argv_(NULL),
+ begin_offset_(0),
+ end_offset_(0) { }
+
+ void Begin(char** argv, int offset) {
+ argv_ = const_cast<const char**>(argv);
+ begin_offset_ = offset;
+ }
+
+ void End(int offset) { end_offset_ = offset; }
+
+ void Execute();
+
+#ifndef USING_V8_SHARED
+ void StartExecuteInThread();
+ void WaitForThread();
+
+ private:
+ class IsolateThread : public i::Thread {
+ public:
+ explicit IsolateThread(SourceGroup* group)
+ : i::Thread(GetThreadOptions()), group_(group) {}
+
+ virtual void Run() {
+ group_->ExecuteInThread();
+ }
+
+ private:
+ SourceGroup* group_;
+ };
+
+ static i::Thread::Options GetThreadOptions();
+ void ExecuteInThread();
+
+ i::Semaphore* next_semaphore_;
+ i::Semaphore* done_semaphore_;
+ i::Thread* thread_;
+#endif // USING_V8_SHARED
+
+ void ExitShell(int exit_code);
+ Handle<String> ReadFile(const char* name);
+
+ const char** argv_;
+ int begin_offset_;
+ int end_offset_;
+};
+
+
+class ShellOptions {
+ public:
+ ShellOptions() :
+#ifndef USING_V8_SHARED
+ use_preemption(true),
+ preemption_interval(10),
+ parallel_files(NULL),
+#endif // USING_V8_SHARED
+ script_executed(false),
+ last_run(true),
+ stress_opt(false),
+ stress_deopt(false),
+ interactive_shell(false),
+ test_shell(false),
+ num_isolates(1),
+ isolate_sources(NULL) { }
+
+#ifndef USING_V8_SHARED
+ bool use_preemption;
+ int preemption_interval;
+ i::List< i::Vector<const char> >* parallel_files;
+#endif // USING_V8_SHARED
+ bool script_executed;
+ bool last_run;
+ bool stress_opt;
+ bool stress_deopt;
+ bool interactive_shell;
+ bool test_shell;
+ int num_isolates;
+ SourceGroup* isolate_sources;
+};
+
+#ifdef USING_V8_SHARED
+class Shell {
+#else
+class Shell : public i::AllStatic {
+#endif // USING_V8_SHARED
public:
static bool ExecuteString(Handle<String> source,
Handle<Value> name,
@@ -123,7 +220,14 @@
bool report_exceptions);
static const char* ToCString(const v8::String::Utf8Value& value);
static void ReportException(TryCatch* try_catch);
- static void Initialize();
+ static Handle<String> ReadFile(const char* name);
+ static Persistent<Context> CreateEvaluationContext();
+ static int RunMain(int argc, char* argv[]);
+ static int Main(int argc, char* argv[]);
+
+#ifndef USING_V8_SHARED
+ static Handle<Array> GetCompletions(Handle<String> text,
+ Handle<String> full);
static void OnExit();
static int* LookupCounter(const char* name);
static void* CreateHistogram(const char* name,
@@ -132,11 +236,8 @@
size_t buckets);
static void AddHistogramSample(void* histogram, int sample);
static void MapCounters(const char* name);
- static Handle<String> ReadFile(const char* name);
- static void RunShell();
- static int Main(int argc, char* argv[]);
- static Handle<Array> GetCompletions(Handle<String> text,
- Handle<String> full);
+#endif // USING_V8_SHARED
+
#ifdef ENABLE_DEBUGGER_SUPPORT
static Handle<Object> DebugMessageDetails(Handle<String> message);
static Handle<Value> DebugCommandToJSONRequest(Handle<String> command);
@@ -154,6 +255,15 @@
static Handle<Value> Read(const Arguments& args);
static Handle<Value> ReadLine(const Arguments& args);
static Handle<Value> Load(const Arguments& args);
+ static Handle<Value> Int8Array(const Arguments& args);
+ static Handle<Value> Uint8Array(const Arguments& args);
+ static Handle<Value> Int16Array(const Arguments& args);
+ static Handle<Value> Uint16Array(const Arguments& args);
+ static Handle<Value> Int32Array(const Arguments& args);
+ static Handle<Value> Uint32Array(const Arguments& args);
+ static Handle<Value> Float32Array(const Arguments& args);
+ static Handle<Value> Float64Array(const Arguments& args);
+ static Handle<Value> PixelArray(const Arguments& args);
// The OS object on the global object contains methods for performing
// operating system calls:
//
@@ -190,24 +300,39 @@
static Handle<Value> RemoveDirectory(const Arguments& args);
static void AddOSMethods(Handle<ObjectTemplate> os_template);
-
- static Handle<Context> utility_context() { return utility_context_; }
-
+#ifndef USING_V8_SHARED
static const char* kHistoryFileName;
+#endif // USING_V8_SHARED
static const char* kPrompt;
+ static ShellOptions options;
+
private:
- static Persistent<Context> utility_context_;
static Persistent<Context> evaluation_context_;
+#ifndef USING_V8_SHARED
+ static Persistent<Context> utility_context_;
static CounterMap* counter_map_;
// We statically allocate a set of local counters to be used if we
// don't want to store the stats in a memory-mapped file
static CounterCollection local_counters_;
static CounterCollection* counters_;
static i::OS::MemoryMappedFile* counters_file_;
+ static i::Mutex* context_mutex_;
+
static Counter* GetCounter(const char* name, bool is_histogram);
+ static void InstallUtilityScript();
+#endif // USING_V8_SHARED
+ static void Initialize();
+ static void RunShell();
+ static bool SetOptions(int argc, char* argv[]);
+ static Handle<ObjectTemplate> CreateGlobalTemplate();
+ static Handle<Value> CreateExternalArray(const Arguments& args,
+ ExternalArrayType type,
+ size_t element_size);
+ static void ExternalArrayWeakCallback(Persistent<Value> object, void* data);
};
+#ifndef USING_V8_SHARED
class LineEditor {
public:
enum Type { DUMB = 0, READLINE = 1 };
@@ -227,6 +352,7 @@
LineEditor* next_;
static LineEditor* first_;
};
+#endif // USING_V8_SHARED
} // namespace v8
diff --git a/src/d8.js b/src/d8.js
index 9798078..033455e 100644
--- a/src/d8.js
+++ b/src/d8.js
@@ -977,9 +977,14 @@
// specification it is considered a function break point.
pos = target.indexOf(':');
if (pos > 0) {
- type = 'script';
var tmp = target.substring(pos + 1, target.length);
target = target.substring(0, pos);
+ if (target[0] == '/' && target[target.length - 1] == '/') {
+ type = 'scriptRegExp';
+ target = target.substring(1, target.length - 1);
+ } else {
+ type = 'script';
+ }
// Check for both line and column.
pos = tmp.indexOf(':');
@@ -1984,6 +1989,9 @@
if (breakpoint.script_name) {
result += ' script_name=' + breakpoint.script_name;
}
+ if (breakpoint.script_regexp) {
+ result += ' script_regexp=' + breakpoint.script_regexp;
+ }
result += ' line=' + (breakpoint.line + 1);
if (breakpoint.column != null) {
result += ' column=' + (breakpoint.column + 1);
diff --git a/src/date.js b/src/date.js
index 5a2e9a2..79b846d 100644
--- a/src/date.js
+++ b/src/date.js
@@ -981,11 +981,22 @@
function DateToISOString() {
var t = DATE_VALUE(this);
if (NUMBER_IS_NAN(t)) return kInvalidDate;
- return this.getUTCFullYear() +
+ var year = this.getUTCFullYear();
+ var year_string;
+ if (year >= 0 && year <= 9999) {
+ year_string = PadInt(year, 4);
+ } else {
+ if (year < 0) {
+ year_string = "-" + PadInt(-year, 6);
+ } else {
+ year_string = "+" + PadInt(year, 6);
+ }
+ }
+ return year_string +
'-' + PadInt(this.getUTCMonth() + 1, 2) +
- '-' + PadInt(this.getUTCDate(), 2) +
+ '-' + PadInt(this.getUTCDate(), 2) +
'T' + PadInt(this.getUTCHours(), 2) +
- ':' + PadInt(this.getUTCMinutes(), 2) +
+ ':' + PadInt(this.getUTCMinutes(), 2) +
':' + PadInt(this.getUTCSeconds(), 2) +
'.' + PadInt(this.getUTCMilliseconds(), 3) +
'Z';
@@ -995,8 +1006,8 @@
function DateToJSON(key) {
var o = ToObject(this);
var tv = DefaultNumber(o);
- if (IS_NUMBER(tv) && !NUMBER_IS_FINITE(tv)) {
- return null;
+ if (IS_NUMBER(tv) && !NUMBER_IS_FINITE(tv)) {
+ return null;
}
return o.toISOString();
}
diff --git a/src/dateparser-inl.h b/src/dateparser-inl.h
index 7f8fac8..32f0f9e 100644
--- a/src/dateparser-inl.h
+++ b/src/dateparser-inl.h
@@ -39,16 +39,71 @@
UnicodeCache* unicode_cache) {
ASSERT(out->length() >= OUTPUT_SIZE);
InputReader<Char> in(unicode_cache, str);
+ DateStringTokenizer<Char> scanner(&in);
TimeZoneComposer tz;
TimeComposer time;
DayComposer day;
- while (!in.IsEnd()) {
- if (in.IsAsciiDigit()) {
- // Parse a number (possibly with 1 or 2 trailing colons).
- int n = in.ReadUnsignedNumber();
- if (in.Skip(':')) {
- if (in.Skip(':')) {
+ // Specification:
+ // Accept ES5 ISO 8601 date-time-strings or legacy dates compatible
+ // with Safari.
+ // ES5 ISO 8601 dates:
+ // [('-'|'+')yy]yyyy[-MM[-DD]][THH:mm[:ss[.sss]][Z|(+|-)hh:mm]]
+ // where yyyy is in the range 0000..9999 and
+ // +/-yyyyyy is in the range -999999..+999999 -
+ // but -000000 is invalid (year zero must be positive),
+ // MM is in the range 01..12,
+ // DD is in the range 01..31,
+ // MM and DD defaults to 01 if missing,,
+ // HH is generally in the range 00..23, but can be 24 if mm, ss
+ // and sss are zero (or missing), representing midnight at the
+ // end of a day,
+ // mm and ss are in the range 00..59,
+ // sss is in the range 000..999,
+ // hh is in the range 00..23,
+ // mm, ss, and sss default to 00 if missing, and
+ // timezone defaults to Z if missing.
+ // Extensions:
+ // We also allow sss to have more or less than three digits (but at
+ // least one).
+ // We allow hh:mm to be specified as hhmm.
+ // Legacy dates:
+ // Any unrecognized word before the first number is ignored.
+ // Parenthesized text is ignored.
+ // An unsigned number followed by ':' is a time value, and is
+ // added to the TimeComposer. A number followed by '::' adds a second
+ // zero as well. A number followed by '.' is also a time and must be
+ // followed by milliseconds.
+ // Any other number is a date component and is added to DayComposer.
+ // A month name (or really: any word having the same first three letters
+ // as a month name) is recorded as a named month in the Day composer.
+ // A word recognizable as a time-zone is recorded as such, as is
+ // '(+|-)(hhmm|hh:)'.
+ // Legacy dates don't allow extra signs ('+' or '-') or umatched ')'
+ // after a number has been read (before the first number, any garbage
+ // is allowed).
+ // Intersection of the two:
+ // A string that matches both formats (e.g. 1970-01-01) will be
+ // parsed as an ES5 date-time string - which means it will default
+ // to UTC time-zone. That's unavoidable if following the ES5
+ // specification.
+ // After a valid "T" has been read while scanning an ES5 datetime string,
+ // the input can no longer be a valid legacy date, since the "T" is a
+ // garbage string after a number has been read.
+
+ // First try getting as far as possible with as ES5 Date Time String.
+ DateToken next_unhandled_token = ParseES5DateTime(&scanner, &day, &time, &tz);
+ if (next_unhandled_token.IsInvalid()) return false;
+ bool has_read_number = !day.IsEmpty();
+ // If there's anything left, continue with the legacy parser.
+ for (DateToken token = next_unhandled_token;
+ !token.IsEndOfInput();
+ token = scanner.Next()) {
+ if (token.IsNumber()) {
+ has_read_number = true;
+ int n = token.number();
+ if (scanner.SkipSymbol(':')) {
+ if (scanner.SkipSymbol(':')) {
// n + "::"
if (!time.IsEmpty()) return false;
time.Add(n);
@@ -56,12 +111,13 @@
} else {
// n + ":"
if (!time.Add(n)) return false;
- in.Skip('.');
+ if (scanner.Peek().IsSymbol('.')) scanner.Next();
}
- } else if (in.Skip('.') && time.IsExpecting(n)) {
+ } else if (scanner.SkipSymbol('.') && time.IsExpecting(n)) {
time.Add(n);
- if (!in.IsAsciiDigit()) return false;
- int n = in.ReadMilliseconds();
+ if (!scanner.Peek().IsNumber()) return false;
+ int n = ReadMilliseconds(scanner.Next());
+ if (n < 0) return false;
time.AddFinal(n);
} else if (tz.IsExpecting(n)) {
tz.SetAbsoluteMinute(n);
@@ -69,59 +125,206 @@
time.AddFinal(n);
// Require end, white space, "Z", "+" or "-" immediately after
// finalizing time.
- if (!in.IsEnd() && !in.SkipWhiteSpace() && !in.Is('Z') &&
- !in.IsAsciiSign()) return false;
+ DateToken peek = scanner.Peek();
+ if (!peek.IsEndOfInput() &&
+ !peek.IsWhiteSpace() &&
+ !peek.IsKeywordZ() &&
+ !peek.IsAsciiSign()) return false;
} else {
if (!day.Add(n)) return false;
- in.Skip('-'); // Ignore suffix '-' for year, month, or day.
- // Skip trailing 'T' for ECMAScript 5 date string format but make
- // sure that it is followed by a digit (for the time).
- if (in.Skip('T') && !in.IsAsciiDigit()) return false;
+ scanner.SkipSymbol('-');
}
- } else if (in.IsAsciiAlphaOrAbove()) {
+ } else if (token.IsKeyword()) {
// Parse a "word" (sequence of chars. >= 'A').
- uint32_t pre[KeywordTable::kPrefixLength];
- int len = in.ReadWord(pre, KeywordTable::kPrefixLength);
- int index = KeywordTable::Lookup(pre, len);
- KeywordType type = KeywordTable::GetType(index);
-
+ KeywordType type = token.keyword_type();
+ int value = token.keyword_value();
if (type == AM_PM && !time.IsEmpty()) {
- time.SetHourOffset(KeywordTable::GetValue(index));
+ time.SetHourOffset(value);
} else if (type == MONTH_NAME) {
- day.SetNamedMonth(KeywordTable::GetValue(index));
- in.Skip('-'); // Ignore suffix '-' for month names
- } else if (type == TIME_ZONE_NAME && in.HasReadNumber()) {
- tz.Set(KeywordTable::GetValue(index));
+ day.SetNamedMonth(value);
+ scanner.SkipSymbol('-');
+ } else if (type == TIME_ZONE_NAME && has_read_number) {
+ tz.Set(value);
} else {
// Garbage words are illegal if a number has been read.
- if (in.HasReadNumber()) return false;
+ if (has_read_number) return false;
}
- } else if (in.IsAsciiSign() && (tz.IsUTC() || !time.IsEmpty())) {
+ } else if (token.IsAsciiSign() && (tz.IsUTC() || !time.IsEmpty())) {
// Parse UTC offset (only after UTC or time).
- tz.SetSign(in.GetAsciiSignValue());
- in.Next();
- int n = in.ReadUnsignedNumber();
- if (in.Skip(':')) {
+ tz.SetSign(token.ascii_sign());
+ // The following number may be empty.
+ int n = 0;
+ if (scanner.Peek().IsNumber()) {
+ n = scanner.Next().number();
+ }
+ has_read_number = true;
+
+ if (scanner.Peek().IsSymbol(':')) {
tz.SetAbsoluteHour(n);
tz.SetAbsoluteMinute(kNone);
} else {
tz.SetAbsoluteHour(n / 100);
tz.SetAbsoluteMinute(n % 100);
}
- } else if (in.Is('(')) {
- // Ignore anything from '(' to a matching ')' or end of string.
- in.SkipParentheses();
- } else if ((in.IsAsciiSign() || in.Is(')')) && in.HasReadNumber()) {
+ } else if ((token.IsAsciiSign() || token.IsSymbol(')')) &&
+ has_read_number) {
// Extra sign or ')' is illegal if a number has been read.
return false;
} else {
- // Ignore other characters.
- in.Next();
+ // Ignore other characters and whitespace.
}
}
+
return day.Write(out) && time.Write(out) && tz.Write(out);
}
+
+template<typename CharType>
+DateParser::DateToken DateParser::DateStringTokenizer<CharType>::Scan() {
+ int pre_pos = in_->position();
+ if (in_->IsEnd()) return DateToken::EndOfInput();
+ if (in_->IsAsciiDigit()) {
+ int n = in_->ReadUnsignedNumeral();
+ int length = in_->position() - pre_pos;
+ return DateToken::Number(n, length);
+ }
+ if (in_->Skip(':')) return DateToken::Symbol(':');
+ if (in_->Skip('-')) return DateToken::Symbol('-');
+ if (in_->Skip('+')) return DateToken::Symbol('+');
+ if (in_->Skip('.')) return DateToken::Symbol('.');
+ if (in_->Skip(')')) return DateToken::Symbol(')');
+ if (in_->IsAsciiAlphaOrAbove()) {
+ ASSERT(KeywordTable::kPrefixLength == 3);
+ uint32_t buffer[3] = {0, 0, 0};
+ int length = in_->ReadWord(buffer, 3);
+ int index = KeywordTable::Lookup(buffer, length);
+ return DateToken::Keyword(KeywordTable::GetType(index),
+ KeywordTable::GetValue(index),
+ length);
+ }
+ if (in_->SkipWhiteSpace()) {
+ return DateToken::WhiteSpace(in_->position() - pre_pos);
+ }
+ if (in_->SkipParentheses()) {
+ return DateToken::Unknown();
+ }
+ in_->Next();
+ return DateToken::Unknown();
+}
+
+
+template <typename Char>
+DateParser::DateToken DateParser::ParseES5DateTime(
+ DateStringTokenizer<Char>* scanner,
+ DayComposer* day,
+ TimeComposer* time,
+ TimeZoneComposer* tz) {
+ ASSERT(day->IsEmpty());
+ ASSERT(time->IsEmpty());
+ ASSERT(tz->IsEmpty());
+
+ // Parse mandatory date string: [('-'|'+')yy]yyyy[':'MM[':'DD]]
+ if (scanner->Peek().IsAsciiSign()) {
+ // Keep the sign token, so we can pass it back to the legacy
+ // parser if we don't use it.
+ DateToken sign_token = scanner->Next();
+ if (!scanner->Peek().IsFixedLengthNumber(6)) return sign_token;
+ int sign = sign_token.ascii_sign();
+ int year = scanner->Next().number();
+ if (sign < 0 && year == 0) return sign_token;
+ day->Add(sign * year);
+ } else if (scanner->Peek().IsFixedLengthNumber(4)) {
+ day->Add(scanner->Next().number());
+ } else {
+ return scanner->Next();
+ }
+ if (scanner->SkipSymbol('-')) {
+ if (!scanner->Peek().IsFixedLengthNumber(2) ||
+ !DayComposer::IsMonth(scanner->Peek().number())) return scanner->Next();
+ day->Add(scanner->Next().number());
+ if (scanner->SkipSymbol('-')) {
+ if (!scanner->Peek().IsFixedLengthNumber(2) ||
+ !DayComposer::IsDay(scanner->Peek().number())) return scanner->Next();
+ day->Add(scanner->Next().number());
+ }
+ }
+ // Check for optional time string: 'T'HH':'mm[':'ss['.'sss]]Z
+ if (!scanner->Peek().IsKeywordType(TIME_SEPARATOR)) {
+ if (!scanner->Peek().IsEndOfInput()) return scanner->Next();
+ } else {
+ // ES5 Date Time String time part is present.
+ scanner->Next();
+ if (!scanner->Peek().IsFixedLengthNumber(2) ||
+ !Between(scanner->Peek().number(), 0, 24)) {
+ return DateToken::Invalid();
+ }
+ // Allow 24:00[:00[.000]], but no other time starting with 24.
+ bool hour_is_24 = (scanner->Peek().number() == 24);
+ time->Add(scanner->Next().number());
+ if (!scanner->SkipSymbol(':')) return DateToken::Invalid();
+ if (!scanner->Peek().IsFixedLengthNumber(2) ||
+ !TimeComposer::IsMinute(scanner->Peek().number()) ||
+ (hour_is_24 && scanner->Peek().number() > 0)) {
+ return DateToken::Invalid();
+ }
+ time->Add(scanner->Next().number());
+ if (scanner->SkipSymbol(':')) {
+ if (!scanner->Peek().IsFixedLengthNumber(2) ||
+ !TimeComposer::IsSecond(scanner->Peek().number()) ||
+ (hour_is_24 && scanner->Peek().number() > 0)) {
+ return DateToken::Invalid();
+ }
+ time->Add(scanner->Next().number());
+ if (scanner->SkipSymbol('.')) {
+ if (!scanner->Peek().IsNumber() ||
+ (hour_is_24 && scanner->Peek().number() > 0)) {
+ return DateToken::Invalid();
+ }
+ // Allow more or less than the mandated three digits.
+ time->Add(ReadMilliseconds(scanner->Next()));
+ }
+ }
+ // Check for optional timezone designation: 'Z' | ('+'|'-')hh':'mm
+ if (scanner->Peek().IsKeywordZ()) {
+ scanner->Next();
+ tz->Set(0);
+ } else if (scanner->Peek().IsSymbol('+') ||
+ scanner->Peek().IsSymbol('-')) {
+ tz->SetSign(scanner->Next().symbol() == '+' ? 1 : -1);
+ if (scanner->Peek().IsFixedLengthNumber(4)) {
+ // hhmm extension syntax.
+ int hourmin = scanner->Next().number();
+ int hour = hourmin / 100;
+ int min = hourmin % 100;
+ if (!TimeComposer::IsHour(hour) || !TimeComposer::IsMinute(min)) {
+ return DateToken::Invalid();
+ }
+ tz->SetAbsoluteHour(hour);
+ tz->SetAbsoluteMinute(min);
+ } else {
+ // hh:mm standard syntax.
+ if (!scanner->Peek().IsFixedLengthNumber(2) ||
+ !TimeComposer::IsHour(scanner->Peek().number())) {
+ return DateToken::Invalid();
+ }
+ tz->SetAbsoluteHour(scanner->Next().number());
+ if (!scanner->SkipSymbol(':')) return DateToken::Invalid();
+ if (!scanner->Peek().IsFixedLengthNumber(2) ||
+ !TimeComposer::IsMinute(scanner->Peek().number())) {
+ return DateToken::Invalid();
+ }
+ tz->SetAbsoluteMinute(scanner->Next().number());
+ }
+ }
+ if (!scanner->Peek().IsEndOfInput()) return DateToken::Invalid();
+ }
+ // Successfully parsed ES5 Date Time String. Default to UTC if no TZ given.
+ if (tz->IsEmpty()) tz->Set(0);
+ day->set_iso_date();
+ return DateToken::EndOfInput();
+}
+
+
} } // namespace v8::internal
#endif // V8_DATEPARSER_INL_H_
diff --git a/src/dateparser.cc b/src/dateparser.cc
index 6d80488..4a0721f 100644
--- a/src/dateparser.cc
+++ b/src/dateparser.cc
@@ -1,4 +1,4 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -44,7 +44,7 @@
int day = kNone;
if (named_month_ == kNone) {
- if (index_ == 3 && !IsDay(comp_[0])) {
+ if (is_iso_date_ || (index_ == 3 && !IsDay(comp_[0]))) {
// YMD
year = comp_[0];
month = comp_[1];
@@ -71,8 +71,10 @@
}
}
- if (Between(year, 0, 49)) year += 2000;
- else if (Between(year, 50, 99)) year += 1900;
+ if (!is_iso_date_) {
+ if (Between(year, 0, 49)) year += 2000;
+ else if (Between(year, 50, 99)) year += 1900;
+ }
if (!Smi::IsValid(year) || !IsMonth(month) || !IsDay(day)) return false;
@@ -151,6 +153,7 @@
{'m', 's', 't', DateParser::TIME_ZONE_NAME, -7},
{'p', 'd', 't', DateParser::TIME_ZONE_NAME, -7},
{'p', 's', 't', DateParser::TIME_ZONE_NAME, -8},
+ {'t', '\0', '\0', DateParser::TIME_SEPARATOR, 0},
{'\0', '\0', '\0', DateParser::INVALID, 0},
};
@@ -175,4 +178,35 @@
}
+int DateParser::ReadMilliseconds(DateToken token) {
+ // Read first three significant digits of the original numeral,
+ // as inferred from the value and the number of digits.
+ // I.e., use the number of digits to see if there were
+ // leading zeros.
+ int number = token.number();
+ int length = token.length();
+ if (length < 3) {
+ // Less than three digits. Multiply to put most significant digit
+ // in hundreds position.
+ if (length == 1) {
+ number *= 100;
+ } else if (length == 2) {
+ number *= 10;
+ }
+ } else if (length > 3) {
+ if (length > kMaxSignificantDigits) length = kMaxSignificantDigits;
+ // More than three digits. Divide by 10^(length - 3) to get three
+ // most significant digits.
+ int factor = 1;
+ do {
+ ASSERT(factor <= 100000000); // factor won't overflow.
+ factor *= 10;
+ length--;
+ } while (length > 3);
+ number /= factor;
+ }
+ return number;
+}
+
+
} } // namespace v8::internal
diff --git a/src/dateparser.h b/src/dateparser.h
index 6e87c34..4bd320e 100644
--- a/src/dateparser.h
+++ b/src/dateparser.h
@@ -61,9 +61,14 @@
static inline bool Between(int x, int lo, int hi) {
return static_cast<unsigned>(x - lo) <= static_cast<unsigned>(hi - lo);
}
+
// Indicates a missing value.
static const int kNone = kMaxInt;
+ // Maximal number of digits used to build the value of a numeral.
+ // Remaining digits are ignored.
+ static const int kMaxSignificantDigits = 9;
+
// InputReader provides basic string parsing and character classification.
template <typename Char>
class InputReader BASE_EMBEDDED {
@@ -71,32 +76,28 @@
InputReader(UnicodeCache* unicode_cache, Vector<Char> s)
: index_(0),
buffer_(s),
- has_read_number_(false),
unicode_cache_(unicode_cache) {
Next();
}
- // Advance to the next character of the string.
- void Next() { ch_ = (index_ < buffer_.length()) ? buffer_[index_++] : 0; }
+ int position() { return index_; }
- // Read a string of digits as an unsigned number (cap just below kMaxInt).
- int ReadUnsignedNumber() {
- has_read_number_ = true;
- int n;
- for (n = 0; IsAsciiDigit() && n < kMaxInt / 10 - 1; Next()) {
- n = n * 10 + ch_ - '0';
- }
- return n;
+ // Advance to the next character of the string.
+ void Next() {
+ ch_ = (index_ < buffer_.length()) ? buffer_[index_] : 0;
+ index_++;
}
- // Read a string of digits, take the first three or fewer as an unsigned
- // number of milliseconds, and ignore any digits after the first three.
- int ReadMilliseconds() {
- has_read_number_ = true;
+ // Read a string of digits as an unsigned number. Cap value at
+ // kMaxSignificantDigits, but skip remaining digits if the numeral
+ // is longer.
+ int ReadUnsignedNumeral() {
int n = 0;
- int power;
- for (power = 100; IsAsciiDigit(); Next(), power = power / 10) {
- n = n + power * (ch_ - '0');
+ int i = 0;
+ while (IsAsciiDigit()) {
+ if (i < kMaxSignificantDigits) n = n * 10 + ch_ - '0';
+ i++;
+ Next();
}
return n;
}
@@ -151,18 +152,138 @@
// Return 1 for '+' and -1 for '-'.
int GetAsciiSignValue() const { return 44 - static_cast<int>(ch_); }
- // Indicates whether any (possibly empty!) numbers have been read.
- bool HasReadNumber() const { return has_read_number_; }
-
private:
int index_;
Vector<Char> buffer_;
- bool has_read_number_;
uint32_t ch_;
UnicodeCache* unicode_cache_;
};
- enum KeywordType { INVALID, MONTH_NAME, TIME_ZONE_NAME, AM_PM };
+ enum KeywordType {
+ INVALID, MONTH_NAME, TIME_ZONE_NAME, TIME_SEPARATOR, AM_PM
+ };
+
+ struct DateToken {
+ public:
+ bool IsInvalid() { return tag_ == kInvalidTokenTag; }
+ bool IsUnknown() { return tag_ == kUnknownTokenTag; }
+ bool IsNumber() { return tag_ == kNumberTag; }
+ bool IsSymbol() { return tag_ == kSymbolTag; }
+ bool IsWhiteSpace() { return tag_ == kWhiteSpaceTag; }
+ bool IsEndOfInput() { return tag_ == kEndOfInputTag; }
+ bool IsKeyword() { return tag_ >= kKeywordTagStart; }
+
+ int length() { return length_; }
+
+ int number() {
+ ASSERT(IsNumber());
+ return value_;
+ }
+ KeywordType keyword_type() {
+ ASSERT(IsKeyword());
+ return static_cast<KeywordType>(tag_);
+ }
+ int keyword_value() {
+ ASSERT(IsKeyword());
+ return value_;
+ }
+ char symbol() {
+ ASSERT(IsSymbol());
+ return static_cast<char>(value_);
+ }
+ bool IsSymbol(char symbol) {
+ return IsSymbol() && this->symbol() == symbol;
+ }
+ bool IsKeywordType(KeywordType tag) {
+ return tag_ == tag;
+ }
+ bool IsFixedLengthNumber(int length) {
+ return IsNumber() && length_ == length;
+ }
+ bool IsAsciiSign() {
+ return tag_ == kSymbolTag && (value_ == '-' || value_ == '+');
+ }
+ int ascii_sign() {
+ ASSERT(IsAsciiSign());
+ return 44 - value_;
+ }
+ bool IsKeywordZ() {
+ return IsKeywordType(TIME_ZONE_NAME) && length_ == 1 && value_ == 0;
+ }
+ bool IsUnknown(int character) {
+ return IsUnknown() && value_ == character;
+ }
+ // Factory functions.
+ static DateToken Keyword(KeywordType tag, int value, int length) {
+ return DateToken(tag, length, value);
+ }
+ static DateToken Number(int value, int length) {
+ return DateToken(kNumberTag, length, value);
+ }
+ static DateToken Symbol(char symbol) {
+ return DateToken(kSymbolTag, 1, symbol);
+ }
+ static DateToken EndOfInput() {
+ return DateToken(kEndOfInputTag, 0, -1);
+ }
+ static DateToken WhiteSpace(int length) {
+ return DateToken(kWhiteSpaceTag, length, -1);
+ }
+ static DateToken Unknown() {
+ return DateToken(kUnknownTokenTag, 1, -1);
+ }
+ static DateToken Invalid() {
+ return DateToken(kInvalidTokenTag, 0, -1);
+ }
+ private:
+ enum TagType {
+ kInvalidTokenTag = -6,
+ kUnknownTokenTag = -5,
+ kWhiteSpaceTag = -4,
+ kNumberTag = -3,
+ kSymbolTag = -2,
+ kEndOfInputTag = -1,
+ kKeywordTagStart = 0
+ };
+ DateToken(int tag, int length, int value)
+ : tag_(tag),
+ length_(length),
+ value_(value) { }
+
+ int tag_;
+ int length_; // Number of characters.
+ int value_;
+ };
+
+ template <typename Char>
+ class DateStringTokenizer {
+ public:
+ explicit DateStringTokenizer(InputReader<Char>* in)
+ : in_(in), next_(Scan()) { }
+ DateToken Next() {
+ DateToken result = next_;
+ next_ = Scan();
+ return result;
+ }
+
+ DateToken Peek() {
+ return next_;
+ }
+ bool SkipSymbol(char symbol) {
+ if (next_.IsSymbol(symbol)) {
+ next_ = Scan();
+ return true;
+ }
+ return false;
+ }
+ private:
+ DateToken Scan();
+
+ InputReader<Char>* in_;
+ DateToken next_;
+ };
+
+ static int ReadMilliseconds(DateToken number);
// KeywordTable maps names of months, time zones, am/pm to numbers.
class KeywordTable : public AllStatic {
@@ -201,6 +322,7 @@
}
bool IsUTC() const { return hour_ == 0 && minute_ == 0; }
bool Write(FixedArray* output);
+ bool IsEmpty() { return hour_ == kNone; }
private:
int sign_;
int hour_;
@@ -228,10 +350,10 @@
bool Write(FixedArray* output);
static bool IsMinute(int x) { return Between(x, 0, 59); }
- private:
static bool IsHour(int x) { return Between(x, 0, 23); }
- static bool IsHour12(int x) { return Between(x, 0, 12); }
static bool IsSecond(int x) { return Between(x, 0, 59); }
+ private:
+ static bool IsHour12(int x) { return Between(x, 0, 12); }
static bool IsMillisecond(int x) { return Between(x, 0, 999); }
static const int kSize = 4;
@@ -242,22 +364,42 @@
class DayComposer BASE_EMBEDDED {
public:
- DayComposer() : index_(0), named_month_(kNone) {}
+ DayComposer() : index_(0), named_month_(kNone), is_iso_date_(false) {}
bool IsEmpty() const { return index_ == 0; }
bool Add(int n) {
- return index_ < kSize ? (comp_[index_++] = n, true) : false;
+ if (index_ < kSize) {
+ comp_[index_] = n;
+ index_++;
+ return true;
+ }
+ return false;
}
void SetNamedMonth(int n) { named_month_ = n; }
bool Write(FixedArray* output);
- private:
+ void set_iso_date() { is_iso_date_ = true; }
static bool IsMonth(int x) { return Between(x, 1, 12); }
static bool IsDay(int x) { return Between(x, 1, 31); }
+ private:
static const int kSize = 3;
int comp_[kSize];
int index_;
int named_month_;
+ // If set, ensures that data is always parsed in year-month-date order.
+ bool is_iso_date_;
};
+
+ // Tries to parse an ES5 Date Time String. Returns the next token
+ // to continue with in the legacy date string parser. If parsing is
+ // complete, returns DateToken::EndOfInput(). If terminally unsuccessful,
+ // returns DateToken::Invalid(). Otherwise parsing continues in the
+ // legacy parser.
+ template <typename Char>
+ static DateParser::DateToken ParseES5DateTime(
+ DateStringTokenizer<Char>* scanner,
+ DayComposer* day,
+ TimeComposer* time,
+ TimeZoneComposer* tz);
};
diff --git a/src/debug-agent.cc b/src/debug-agent.cc
index 498b88a..520bc62 100644
--- a/src/debug-agent.cc
+++ b/src/debug-agent.cc
@@ -116,8 +116,8 @@
}
// Create a new session and hook up the debug message handler.
- session_ = new DebuggerAgentSession(isolate(), this, client);
- v8::Debug::SetMessageHandler2(DebuggerAgentMessageHandler);
+ session_ = new DebuggerAgentSession(this, client);
+ isolate_->debugger()->SetMessageHandler(DebuggerAgentMessageHandler);
session_->Start();
}
@@ -203,7 +203,9 @@
// Send the request received to the debugger.
v8::Debug::SendCommand(reinterpret_cast<const uint16_t *>(temp.start()),
- len);
+ len,
+ NULL,
+ reinterpret_cast<v8::Isolate*>(agent_->isolate()));
if (is_closing_session) {
// Session is closed.
diff --git a/src/debug-agent.h b/src/debug-agent.h
index a25002e..e167871 100644
--- a/src/debug-agent.h
+++ b/src/debug-agent.h
@@ -43,24 +43,27 @@
// handles connection from a remote debugger.
class DebuggerAgent: public Thread {
public:
- DebuggerAgent(Isolate* isolate, const char* name, int port)
- : Thread(isolate, name),
+ DebuggerAgent(const char* name, int port)
+ : Thread(name),
+ isolate_(Isolate::Current()),
name_(StrDup(name)), port_(port),
server_(OS::CreateSocket()), terminate_(false),
session_access_(OS::CreateMutex()), session_(NULL),
terminate_now_(OS::CreateSemaphore(0)),
listening_(OS::CreateSemaphore(0)) {
- ASSERT(Isolate::Current()->debugger_agent_instance() == NULL);
- Isolate::Current()->set_debugger_agent_instance(this);
+ ASSERT(isolate_->debugger_agent_instance() == NULL);
+ isolate_->set_debugger_agent_instance(this);
}
~DebuggerAgent() {
- Isolate::Current()->set_debugger_agent_instance(NULL);
+ isolate_->set_debugger_agent_instance(NULL);
delete server_;
}
void Shutdown();
void WaitUntilListening();
+ Isolate* isolate() { return isolate_; }
+
private:
void Run();
void CreateSession(Socket* socket);
@@ -68,6 +71,7 @@
void CloseSession();
void OnSessionClosed(DebuggerAgentSession* session);
+ Isolate* isolate_;
SmartPointer<const char> name_; // Name of the embedding application.
int port_; // Port to use for the agent.
Socket* server_; // Server socket for listen/accept.
@@ -88,8 +92,8 @@
// debugger and sends debugger events/responses to the remote debugger.
class DebuggerAgentSession: public Thread {
public:
- DebuggerAgentSession(Isolate* isolate, DebuggerAgent* agent, Socket* client)
- : Thread(isolate, "v8:DbgAgntSessn"),
+ DebuggerAgentSession(DebuggerAgent* agent, Socket* client)
+ : Thread("v8:DbgAgntSessn"),
agent_(agent), client_(client) {}
void DebuggerMessage(Vector<uint16_t> message);
diff --git a/src/debug-debugger.js b/src/debug-debugger.js
index 908fcd2..36b624e 100644
--- a/src/debug-debugger.js
+++ b/src/debug-debugger.js
@@ -68,7 +68,8 @@
// The different script break point types.
Debug.ScriptBreakPointType = { ScriptId: 0,
- ScriptName: 1 };
+ ScriptName: 1,
+ ScriptRegExp: 2 };
function ScriptTypeFlag(type) {
return (1 << type);
@@ -255,8 +256,12 @@
this.type_ = type;
if (type == Debug.ScriptBreakPointType.ScriptId) {
this.script_id_ = script_id_or_name;
- } else { // type == Debug.ScriptBreakPointType.ScriptName
+ } else if (type == Debug.ScriptBreakPointType.ScriptName) {
this.script_name_ = script_id_or_name;
+ } else if (type == Debug.ScriptBreakPointType.ScriptRegExp) {
+ this.script_regexp_object_ = new RegExp(script_id_or_name);
+ } else {
+ throw new Error("Unexpected breakpoint type " + type);
}
this.line_ = opt_line || 0;
this.column_ = opt_column;
@@ -309,6 +314,11 @@
};
+ScriptBreakPoint.prototype.script_regexp_object = function() {
+ return this.script_regexp_object_;
+};
+
+
ScriptBreakPoint.prototype.line = function() {
return this.line_;
};
@@ -384,10 +394,19 @@
ScriptBreakPoint.prototype.matchesScript = function(script) {
if (this.type_ == Debug.ScriptBreakPointType.ScriptId) {
return this.script_id_ == script.id;
- } else { // this.type_ == Debug.ScriptBreakPointType.ScriptName
- return this.script_name_ == script.nameOrSourceURL() &&
- script.line_offset <= this.line_ &&
- this.line_ < script.line_offset + script.lineCount();
+ } else {
+ // We might want to account columns here as well.
+ if (!(script.line_offset <= this.line_ &&
+ this.line_ < script.line_offset + script.lineCount())) {
+ return false;
+ }
+ if (this.type_ == Debug.ScriptBreakPointType.ScriptName) {
+ return this.script_name_ == script.nameOrSourceURL();
+ } else if (this.type_ == Debug.ScriptBreakPointType.ScriptRegExp) {
+ return this.script_regexp_object_.test(script.nameOrSourceURL());
+ } else {
+ throw new Error("Unexpected breakpoint type " + this.type_);
+ }
}
};
@@ -431,7 +450,8 @@
}
var actual_location = script.locationFromPosition(actual_position, true);
break_point.actual_location = { line: actual_location.line,
- column: actual_location.column };
+ column: actual_location.column,
+ script_id: script.id };
this.break_points_.push(break_point);
return break_point;
};
@@ -644,7 +664,8 @@
actual_position += this.sourcePosition(func);
var actual_location = script.locationFromPosition(actual_position, true);
break_point.actual_location = { line: actual_location.line,
- column: actual_location.column };
+ column: actual_location.column,
+ script_id: script.id };
break_point.setCondition(opt_condition);
return break_point.number();
}
@@ -799,6 +820,15 @@
}
+Debug.setScriptBreakPointByRegExp = function(script_regexp,
+ opt_line, opt_column,
+ opt_condition, opt_groupId) {
+ return this.setScriptBreakPoint(Debug.ScriptBreakPointType.ScriptRegExp,
+ script_regexp, opt_line, opt_column,
+ opt_condition, opt_groupId);
+}
+
+
Debug.enableScriptBreakPoint = function(break_point_number) {
var script_break_point = this.findScriptBreakPoint(break_point_number, false);
script_break_point.enable();
@@ -1549,12 +1579,7 @@
response.failed('Missing argument "type" or "target"');
return;
}
- if (type != 'function' && type != 'handle' &&
- type != 'script' && type != 'scriptId') {
- response.failed('Illegal type "' + type + '"');
- return;
- }
-
+
// Either function or script break point.
var break_point_number;
if (type == 'function') {
@@ -1598,9 +1623,16 @@
break_point_number =
Debug.setScriptBreakPointByName(target, line, column, condition,
groupId);
- } else { // type == 'scriptId.
+ } else if (type == 'scriptId') {
break_point_number =
Debug.setScriptBreakPointById(target, line, column, condition, groupId);
+ } else if (type == 'scriptRegExp') {
+ break_point_number =
+ Debug.setScriptBreakPointByRegExp(target, line, column, condition,
+ groupId);
+ } else {
+ response.failed('Illegal type "' + type + '"');
+ return;
}
// Set additional break point properties.
@@ -1621,9 +1653,14 @@
if (break_point.type() == Debug.ScriptBreakPointType.ScriptId) {
response.body.type = 'scriptId';
response.body.script_id = break_point.script_id();
- } else {
+ } else if (break_point.type() == Debug.ScriptBreakPointType.ScriptName) {
response.body.type = 'scriptName';
response.body.script_name = break_point.script_name();
+ } else if (break_point.type() == Debug.ScriptBreakPointType.ScriptRegExp) {
+ response.body.type = 'scriptRegExp';
+ response.body.script_regexp = break_point.script_regexp_object().source;
+ } else {
+ throw new Error("Internal error: Unexpected breakpoint type: " + break_point.type());
}
response.body.line = break_point.line();
response.body.column = break_point.column();
@@ -1753,9 +1790,14 @@
if (break_point.type() == Debug.ScriptBreakPointType.ScriptId) {
description.type = 'scriptId';
description.script_id = break_point.script_id();
- } else {
+ } else if (break_point.type() == Debug.ScriptBreakPointType.ScriptName) {
description.type = 'scriptName';
description.script_name = break_point.script_name();
+ } else if (break_point.type() == Debug.ScriptBreakPointType.ScriptRegExp) {
+ description.type = 'scriptRegExp';
+ description.script_regexp = break_point.script_regexp_object().source;
+ } else {
+ throw new Error("Internal error: Unexpected breakpoint type: " + break_point.type());
}
array.push(description);
}
@@ -2269,21 +2311,10 @@
DebugCommandProcessor.prototype.profileRequest_ = function(request, response) {
- if (!request.arguments) {
- return response.failed('Missing arguments');
- }
- var modules = parseInt(request.arguments.modules);
- if (isNaN(modules)) {
- return response.failed('Modules is not an integer');
- }
- var tag = parseInt(request.arguments.tag);
- if (isNaN(tag)) {
- tag = 0;
- }
if (request.arguments.command == 'resume') {
- %ProfilerResume(modules, tag);
+ %ProfilerResume();
} else if (request.arguments.command == 'pause') {
- %ProfilerPause(modules, tag);
+ %ProfilerPause();
} else {
return response.failed('Unknown command');
}
diff --git a/src/debug.cc b/src/debug.cc
index 85c4b5e..aecbb46 100644
--- a/src/debug.cc
+++ b/src/debug.cc
@@ -772,9 +772,9 @@
bool caught_exception = false;
Handle<JSFunction> function =
factory->NewFunctionFromSharedFunctionInfo(function_info, context);
- Handle<Object> result =
- Execution::TryCall(function, Handle<Object>(context->global()),
- 0, NULL, &caught_exception);
+
+ Execution::TryCall(function, Handle<Object>(context->global()),
+ 0, NULL, &caught_exception);
// Check for caught exceptions.
if (caught_exception) {
@@ -796,7 +796,6 @@
// Return if debugger is already loaded.
if (IsLoaded()) return true;
- ASSERT(Isolate::Current() == isolate_);
Debugger* debugger = isolate_->debugger();
// Bail out if we're already in the process of compiling the native
@@ -1048,7 +1047,6 @@
// Check whether a single break point object is triggered.
bool Debug::CheckBreakPoint(Handle<Object> break_point_object) {
- ASSERT(Isolate::Current() == isolate_);
Factory* factory = isolate_->factory();
HandleScope scope(isolate_);
@@ -1234,7 +1232,6 @@
void Debug::PrepareStep(StepAction step_action, int step_count) {
- ASSERT(Isolate::Current() == isolate_);
HandleScope scope(isolate_);
ASSERT(Debug::InDebugger());
@@ -1739,7 +1736,6 @@
void Debug::SetAfterBreakTarget(JavaScriptFrame* frame) {
- ASSERT(Isolate::Current() == isolate_);
HandleScope scope(isolate_);
// Get the executing function in which the debug break occurred.
@@ -1825,6 +1821,13 @@
bool Debug::IsBreakAtReturn(JavaScriptFrame* frame) {
HandleScope scope(isolate_);
+ // If there are no break points this cannot be break at return, as
+ // the debugger statement and stack guard bebug break cannot be at
+ // return.
+ if (!has_break_points_) {
+ return false;
+ }
+
// Get the executing function in which the debug break occurred.
Handle<SharedFunctionInfo> shared =
Handle<SharedFunctionInfo>(JSFunction::cast(frame->function())->shared());
@@ -1872,7 +1875,6 @@
void Debug::ClearMirrorCache() {
- ASSERT(Isolate::Current() == isolate_);
PostponeInterruptsScope postpone(isolate_);
HandleScope scope(isolate_);
ASSERT(isolate_->context() == *Debug::debug_context());
@@ -1884,15 +1886,13 @@
*function_name));
ASSERT(fun->IsJSFunction());
bool caught_exception;
- Handle<Object> js_object = Execution::TryCall(
- Handle<JSFunction>::cast(fun),
+ Execution::TryCall(Handle<JSFunction>::cast(fun),
Handle<JSObject>(Debug::debug_context()->global()),
0, NULL, &caught_exception);
}
void Debug::CreateScriptCache() {
- ASSERT(Isolate::Current() == isolate_);
Heap* heap = isolate_->heap();
HandleScope scope(isolate_);
@@ -1934,7 +1934,6 @@
Handle<FixedArray> Debug::GetLoadedScripts() {
- ASSERT(Isolate::Current() == isolate_);
// Create and fill the script cache when the loaded scripts is requested for
// the first time.
if (script_cache_ == NULL) {
@@ -1964,7 +1963,7 @@
}
-Debugger::Debugger()
+Debugger::Debugger(Isolate* isolate)
: debugger_access_(OS::CreateMutex()),
event_listener_(Handle<Object>()),
event_listener_data_(Handle<Object>()),
@@ -1979,9 +1978,10 @@
message_dispatch_helper_thread_(NULL),
host_dispatch_micros_(100 * 1000),
agent_(NULL),
- command_queue_(kQueueInitialSize),
+ command_queue_(isolate->logger(), kQueueInitialSize),
command_received_(OS::CreateSemaphore(0)),
- event_command_queue_(kQueueInitialSize) {
+ event_command_queue_(isolate->logger(), kQueueInitialSize),
+ isolate_(isolate) {
}
@@ -1998,7 +1998,6 @@
Handle<Object> Debugger::MakeJSObject(Vector<const char> constructor_name,
int argc, Object*** argv,
bool* caught_exception) {
- ASSERT(Isolate::Current() == isolate_);
ASSERT(isolate_->context() == *isolate_->debug()->debug_context());
// Create the execution state object.
@@ -2020,7 +2019,6 @@
Handle<Object> Debugger::MakeExecutionState(bool* caught_exception) {
- ASSERT(Isolate::Current() == isolate_);
// Create the execution state object.
Handle<Object> break_id = isolate_->factory()->NewNumberFromInt(
isolate_->debug()->break_id());
@@ -2034,7 +2032,6 @@
Handle<Object> Debugger::MakeBreakEvent(Handle<Object> exec_state,
Handle<Object> break_points_hit,
bool* caught_exception) {
- ASSERT(Isolate::Current() == isolate_);
// Create the new break event object.
const int argc = 2;
Object** argv[argc] = { exec_state.location(),
@@ -2050,7 +2047,6 @@
Handle<Object> exception,
bool uncaught,
bool* caught_exception) {
- ASSERT(Isolate::Current() == isolate_);
Factory* factory = isolate_->factory();
// Create the new exception event object.
const int argc = 3;
@@ -2065,7 +2061,6 @@
Handle<Object> Debugger::MakeNewFunctionEvent(Handle<Object> function,
bool* caught_exception) {
- ASSERT(Isolate::Current() == isolate_);
// Create the new function event object.
const int argc = 1;
Object** argv[argc] = { function.location() };
@@ -2077,7 +2072,6 @@
Handle<Object> Debugger::MakeCompileEvent(Handle<Script> script,
bool before,
bool* caught_exception) {
- ASSERT(Isolate::Current() == isolate_);
Factory* factory = isolate_->factory();
// Create the compile event object.
Handle<Object> exec_state = MakeExecutionState(caught_exception);
@@ -2097,7 +2091,6 @@
Handle<Object> Debugger::MakeScriptCollectedEvent(int id,
bool* caught_exception) {
- ASSERT(Isolate::Current() == isolate_);
// Create the script collected event object.
Handle<Object> exec_state = MakeExecutionState(caught_exception);
Handle<Object> id_object = Handle<Smi>(Smi::FromInt(id));
@@ -2112,7 +2105,6 @@
void Debugger::OnException(Handle<Object> exception, bool uncaught) {
- ASSERT(Isolate::Current() == isolate_);
HandleScope scope(isolate_);
Debug* debug = isolate_->debug();
@@ -2157,7 +2149,6 @@
void Debugger::OnDebugBreak(Handle<Object> break_points_hit,
bool auto_continue) {
- ASSERT(Isolate::Current() == isolate_);
HandleScope scope(isolate_);
// Debugger has already been entered by caller.
@@ -2167,7 +2158,7 @@
if (!Debugger::EventActive(v8::Break)) return;
// Debugger must be entered in advance.
- ASSERT(Isolate::Current()->context() == *isolate_->debug()->debug_context());
+ ASSERT(isolate_->context() == *isolate_->debug()->debug_context());
// Create the event data object.
bool caught_exception = false;
@@ -2190,7 +2181,6 @@
void Debugger::OnBeforeCompile(Handle<Script> script) {
- ASSERT(Isolate::Current() == isolate_);
HandleScope scope(isolate_);
// Bail out based on state or if there is no listener for this event
@@ -2220,7 +2210,6 @@
// Handle debugger actions when a new script is compiled.
void Debugger::OnAfterCompile(Handle<Script> script,
AfterCompileFlags after_compile_flags) {
- ASSERT(Isolate::Current() == isolate_);
HandleScope scope(isolate_);
Debug* debug = isolate_->debug();
@@ -2262,8 +2251,7 @@
bool caught_exception = false;
const int argc = 1;
Object** argv[argc] = { reinterpret_cast<Object**>(wrapper.location()) };
- Handle<Object> result = Execution::TryCall(
- Handle<JSFunction>::cast(update_script_break_points),
+ Execution::TryCall(Handle<JSFunction>::cast(update_script_break_points),
Isolate::Current()->js_builtins_object(), argc, argv,
&caught_exception);
if (caught_exception) {
@@ -2289,7 +2277,6 @@
void Debugger::OnScriptCollected(int id) {
- ASSERT(Isolate::Current() == isolate_);
HandleScope scope(isolate_);
// No more to do if not debugging.
@@ -2319,7 +2306,6 @@
void Debugger::ProcessDebugEvent(v8::DebugEvent event,
Handle<JSObject> event_data,
bool auto_continue) {
- ASSERT(Isolate::Current() == isolate_);
HandleScope scope(isolate_);
// Clear any pending debug break if this is a real break.
@@ -2395,7 +2381,6 @@
Handle<Object> exec_state,
Handle<Object> event_data) {
ASSERT(event_listener_->IsJSFunction());
- ASSERT(Isolate::Current() == isolate_);
Handle<JSFunction> fun(Handle<JSFunction>::cast(event_listener_));
// Invoke the JavaScript debug event listener.
@@ -2411,7 +2396,6 @@
Handle<Context> Debugger::GetDebugContext() {
- ASSERT(Isolate::Current() == isolate_);
never_unload_debugger_ = true;
EnterDebugger debugger;
return isolate_->debug()->debug_context();
@@ -2419,7 +2403,6 @@
void Debugger::UnloadDebugger() {
- ASSERT(Isolate::Current() == isolate_);
Debug* debug = isolate_->debug();
// Make sure that there are no breakpoints left.
@@ -2439,7 +2422,6 @@
Handle<JSObject> exec_state,
Handle<JSObject> event_data,
bool auto_continue) {
- ASSERT(Isolate::Current() == isolate_);
HandleScope scope(isolate_);
if (!isolate_->debug()->Load()) return;
@@ -2535,7 +2517,8 @@
// Get the command from the queue.
CommandMessage command = command_queue_.Get();
- LOGGER->DebugTag("Got request from command queue, in interactive loop.");
+ isolate_->logger()->DebugTag(
+ "Got request from command queue, in interactive loop.");
if (!Debugger::IsDebuggerActive()) {
// Delete command text and user data.
command.Dispose();
@@ -2609,7 +2592,6 @@
void Debugger::SetEventListener(Handle<Object> callback,
Handle<Object> data) {
- ASSERT(Isolate::Current() == isolate_);
HandleScope scope(isolate_);
GlobalHandles* global_handles = isolate_->global_handles();
@@ -2643,7 +2625,6 @@
void Debugger::SetMessageHandler(v8::Debug::MessageHandler2 handler) {
- ASSERT(Isolate::Current() == isolate_);
ScopedLock with(debugger_access_);
message_handler_ = handler;
@@ -2659,7 +2640,6 @@
void Debugger::ListenersChanged() {
- ASSERT(Isolate::Current() == isolate_);
if (IsDebuggerActive()) {
// Disable the compilation cache when the debugger is active.
isolate_->compilation_cache()->Disable();
@@ -2675,7 +2655,6 @@
void Debugger::SetHostDispatchHandler(v8::Debug::HostDispatchHandler handler,
int period) {
- ASSERT(Isolate::Current() == isolate_);
host_dispatch_handler_ = handler;
host_dispatch_micros_ = period * 1000;
}
@@ -2683,7 +2662,6 @@
void Debugger::SetDebugMessageDispatchHandler(
v8::Debug::DebugMessageDispatchHandler handler, bool provide_locker) {
- ASSERT(Isolate::Current() == isolate_);
ScopedLock with(dispatch_handler_access_);
debug_message_dispatch_handler_ = handler;
@@ -2697,7 +2675,6 @@
// Calls the registered debug message handler. This callback is part of the
// public API.
void Debugger::InvokeMessageHandler(MessageImpl message) {
- ASSERT(Isolate::Current() == isolate_);
ScopedLock with(debugger_access_);
if (message_handler_ != NULL) {
@@ -2712,13 +2689,12 @@
// by the API client thread.
void Debugger::ProcessCommand(Vector<const uint16_t> command,
v8::Debug::ClientData* client_data) {
- ASSERT(Isolate::Current() == isolate_);
// Need to cast away const.
CommandMessage message = CommandMessage::New(
Vector<uint16_t>(const_cast<uint16_t*>(command.start()),
command.length()),
client_data);
- LOGGER->DebugTag("Put command on command_queue.");
+ isolate_->logger()->DebugTag("Put command on command_queue.");
command_queue_.Put(message);
command_received_->Signal();
@@ -2742,13 +2718,11 @@
bool Debugger::HasCommands() {
- ASSERT(Isolate::Current() == isolate_);
return !command_queue_.IsEmpty();
}
void Debugger::EnqueueDebugCommand(v8::Debug::ClientData* client_data) {
- ASSERT(Isolate::Current() == isolate_);
CommandMessage message = CommandMessage::New(Vector<uint16_t>(), client_data);
event_command_queue_.Put(message);
@@ -2760,7 +2734,6 @@
bool Debugger::IsDebuggerActive() {
- ASSERT(Isolate::Current() == isolate_);
ScopedLock with(debugger_access_);
return message_handler_ != NULL || !event_listener_.is_null();
@@ -2770,7 +2743,6 @@
Handle<Object> Debugger::Call(Handle<JSFunction> fun,
Handle<Object> data,
bool* pending_exception) {
- ASSERT(Isolate::Current() == isolate_);
// When calling functions in the debugger prevent it from beeing unloaded.
Debugger::never_unload_debugger_ = true;
@@ -2820,7 +2792,7 @@
if (Socket::Setup()) {
if (agent_ == NULL) {
- agent_ = new DebuggerAgent(isolate_, name, port);
+ agent_ = new DebuggerAgent(name, port);
agent_->Start();
}
return true;
@@ -2849,7 +2821,6 @@
void Debugger::CallMessageDispatchHandler() {
- ASSERT(Isolate::Current() == isolate_);
v8::Debug::DebugMessageDispatchHandler handler;
{
ScopedLock with(dispatch_handler_access_);
@@ -2957,7 +2928,7 @@
v8::Handle<v8::Context> context = GetDebugEventContext(isolate);
// Isolate::context() may be NULL when "script collected" event occures.
ASSERT(!context.IsEmpty() || event_ == v8::ScriptCollected);
- return GetDebugEventContext(isolate);
+ return context;
}
@@ -3083,8 +3054,8 @@
}
-LockingCommandMessageQueue::LockingCommandMessageQueue(int size)
- : queue_(size) {
+LockingCommandMessageQueue::LockingCommandMessageQueue(Logger* logger, int size)
+ : logger_(logger), queue_(size) {
lock_ = OS::CreateMutex();
}
@@ -3103,7 +3074,7 @@
CommandMessage LockingCommandMessageQueue::Get() {
ScopedLock sl(lock_);
CommandMessage result = queue_.Get();
- LOGGER->DebugEvent("Get", result.text());
+ logger_->DebugEvent("Get", result.text());
return result;
}
@@ -3111,7 +3082,7 @@
void LockingCommandMessageQueue::Put(const CommandMessage& message) {
ScopedLock sl(lock_);
queue_.Put(message);
- LOGGER->DebugEvent("Put", message.text());
+ logger_->DebugEvent("Put", message.text());
}
@@ -3122,7 +3093,7 @@
MessageDispatchHelperThread::MessageDispatchHelperThread(Isolate* isolate)
- : Thread(isolate, "v8:MsgDispHelpr"),
+ : Thread("v8:MsgDispHelpr"),
sem_(OS::CreateSemaphore(0)), mutex_(OS::CreateMutex()),
already_signalled_(false) {
}
diff --git a/src/debug.h b/src/debug.h
index 95dca72..c614844 100644
--- a/src/debug.h
+++ b/src/debug.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -35,6 +35,7 @@
#include "execution.h"
#include "factory.h"
#include "flags.h"
+#include "frames-inl.h"
#include "hashmap.h"
#include "platform.h"
#include "string-stream.h"
@@ -678,13 +679,14 @@
// Mutex to CommandMessageQueue. Includes logging of all puts and gets.
class LockingCommandMessageQueue BASE_EMBEDDED {
public:
- explicit LockingCommandMessageQueue(int size);
+ LockingCommandMessageQueue(Logger* logger, int size);
~LockingCommandMessageQueue();
bool IsEmpty() const;
CommandMessage Get();
void Put(const CommandMessage& message);
void Clear();
private:
+ Logger* logger_;
CommandMessageQueue queue_;
Mutex* lock_;
DISALLOW_COPY_AND_ASSIGN(LockingCommandMessageQueue);
@@ -811,7 +813,7 @@
bool IsDebuggerActive();
private:
- Debugger();
+ explicit Debugger(Isolate* isolate);
void CallEventCallback(v8::DebugEvent event,
Handle<Object> exec_state,
diff --git a/src/deoptimizer.cc b/src/deoptimizer.cc
index 2fc0e47..c90df45 100644
--- a/src/deoptimizer.cc
+++ b/src/deoptimizer.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -44,6 +44,9 @@
lazy_deoptimization_entry_code_ = NULL;
current_ = NULL;
deoptimizing_code_list_ = NULL;
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ deoptimized_frame_info_ = NULL;
+#endif
}
@@ -58,6 +61,16 @@
}
}
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+void DeoptimizerData::Iterate(ObjectVisitor* v) {
+ if (deoptimized_frame_info_ != NULL) {
+ deoptimized_frame_info_->Iterate(v);
+ }
+}
+#endif
+
+
Deoptimizer* Deoptimizer::New(JSFunction* function,
BailoutType type,
unsigned bailout_id,
@@ -70,7 +83,8 @@
type,
bailout_id,
from,
- fp_to_sp_delta);
+ fp_to_sp_delta,
+ NULL);
ASSERT(isolate->deoptimizer_data()->current_ == NULL);
isolate->deoptimizer_data()->current_ = deoptimizer;
return deoptimizer;
@@ -86,6 +100,91 @@
return result;
}
+#ifdef ENABLE_DEBUGGER_SUPPORT
+DeoptimizedFrameInfo* Deoptimizer::DebuggerInspectableFrame(
+ JavaScriptFrame* frame,
+ int frame_index,
+ Isolate* isolate) {
+ ASSERT(isolate == Isolate::Current());
+ ASSERT(frame->is_optimized());
+ ASSERT(isolate->deoptimizer_data()->deoptimized_frame_info_ == NULL);
+
+ // Get the function and code from the frame.
+ JSFunction* function = JSFunction::cast(frame->function());
+ Code* code = frame->LookupCode();
+ Address code_start_address = code->instruction_start();
+
+ // Locate the deoptimization point in the code. As we are at a call the
+ // return address must be at a place in the code with deoptimization support.
+ int deoptimization_index = Safepoint::kNoDeoptimizationIndex;
+ // Scope this as the safe point constructor will disallow allocation.
+ {
+ SafepointTable table(code);
+ for (unsigned i = 0; i < table.length(); ++i) {
+ Address address = code_start_address + table.GetPcOffset(i);
+ if (address == frame->pc()) {
+ SafepointEntry safepoint_entry = table.GetEntry(i);
+ ASSERT(safepoint_entry.deoptimization_index() !=
+ Safepoint::kNoDeoptimizationIndex);
+ deoptimization_index = safepoint_entry.deoptimization_index();
+ break;
+ }
+ }
+ }
+ ASSERT(deoptimization_index != Safepoint::kNoDeoptimizationIndex);
+
+ // Always use the actual stack slots when calculating the fp to sp
+ // delta adding two for the function and context.
+ unsigned stack_slots = code->stack_slots();
+ unsigned fp_to_sp_delta = ((stack_slots + 2) * kPointerSize);
+
+ Deoptimizer* deoptimizer = new Deoptimizer(isolate,
+ function,
+ Deoptimizer::DEBUGGER,
+ deoptimization_index,
+ frame->pc(),
+ fp_to_sp_delta,
+ code);
+ Address tos = frame->fp() - fp_to_sp_delta;
+ deoptimizer->FillInputFrame(tos, frame);
+
+ // Calculate the output frames.
+ Deoptimizer::ComputeOutputFrames(deoptimizer);
+
+ // Create the GC safe output frame information and register it for GC
+ // handling.
+ ASSERT_LT(frame_index, deoptimizer->output_count());
+ DeoptimizedFrameInfo* info =
+ new DeoptimizedFrameInfo(deoptimizer, frame_index);
+ isolate->deoptimizer_data()->deoptimized_frame_info_ = info;
+
+ // Get the "simulated" top and size for the requested frame.
+ Address top =
+ reinterpret_cast<Address>(deoptimizer->output_[frame_index]->GetTop());
+ uint32_t size = deoptimizer->output_[frame_index]->GetFrameSize();
+
+ // Done with the GC-unsafe frame descriptions. This re-enables allocation.
+ deoptimizer->DeleteFrameDescriptions();
+
+ // Allocate a heap number for the doubles belonging to this frame.
+ deoptimizer->MaterializeHeapNumbersForDebuggerInspectableFrame(
+ top, size, info);
+
+ // Finished using the deoptimizer instance.
+ delete deoptimizer;
+
+ return info;
+}
+
+
+void Deoptimizer::DeleteDebuggerInspectableFrame(DeoptimizedFrameInfo* info,
+ Isolate* isolate) {
+ ASSERT(isolate == Isolate::Current());
+ ASSERT(isolate->deoptimizer_data()->deoptimized_frame_info_ == info);
+ delete info;
+ isolate->deoptimizer_data()->deoptimized_frame_info_ = NULL;
+}
+#endif
void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
int count,
@@ -209,18 +308,24 @@
BailoutType type,
unsigned bailout_id,
Address from,
- int fp_to_sp_delta)
+ int fp_to_sp_delta,
+ Code* optimized_code)
: isolate_(isolate),
function_(function),
bailout_id_(bailout_id),
bailout_type_(type),
from_(from),
fp_to_sp_delta_(fp_to_sp_delta),
+ input_(NULL),
output_count_(0),
output_(NULL),
deferred_heap_numbers_(0) {
if (FLAG_trace_deopt && type != OSR) {
- PrintF("**** DEOPT: ");
+ if (type == DEBUGGER) {
+ PrintF("**** DEOPT FOR DEBUGGER: ");
+ } else {
+ PrintF("**** DEOPT: ");
+ }
function->PrintName();
PrintF(" at bailout #%u, address 0x%" V8PRIxPTR ", frame size %d\n",
bailout_id,
@@ -248,10 +353,16 @@
optimized_code_ = function_->code();
ASSERT(optimized_code_->kind() == Code::OPTIMIZED_FUNCTION);
ASSERT(!optimized_code_->contains(from));
+ } else if (type == DEBUGGER) {
+ optimized_code_ = optimized_code;
+ ASSERT(optimized_code_->contains(from));
}
ASSERT(HEAP->allow_allocation(false));
unsigned size = ComputeInputFrameSize();
input_ = new(size) FrameDescription(size, function);
+#ifdef DEBUG
+ input_->SetKind(Code::OPTIMIZED_FUNCTION);
+#endif
}
@@ -417,6 +528,7 @@
void Deoptimizer::MaterializeHeapNumbers() {
+ ASSERT_NE(DEBUGGER, bailout_type_);
for (int i = 0; i < deferred_heap_numbers_.length(); i++) {
HeapNumberMaterializationDescriptor d = deferred_heap_numbers_[i];
Handle<Object> num = isolate_->factory()->NewNumber(d.value());
@@ -432,6 +544,47 @@
}
+#ifdef ENABLE_DEBUGGER_SUPPORT
+void Deoptimizer::MaterializeHeapNumbersForDebuggerInspectableFrame(
+ Address top, uint32_t size, DeoptimizedFrameInfo* info) {
+ ASSERT_EQ(DEBUGGER, bailout_type_);
+ for (int i = 0; i < deferred_heap_numbers_.length(); i++) {
+ HeapNumberMaterializationDescriptor d = deferred_heap_numbers_[i];
+
+ // Check of the heap number to materialize actually belong to the frame
+ // being extracted.
+ Address slot = d.slot_address();
+ if (top <= slot && slot < top + size) {
+ Handle<Object> num = isolate_->factory()->NewNumber(d.value());
+ // Calculate the index with the botton of the expression stack
+ // at index 0, and the fixed part (including incoming arguments)
+ // at negative indexes.
+ int index = static_cast<int>(
+ info->expression_count_ - (slot - top) / kPointerSize - 1);
+ if (FLAG_trace_deopt) {
+ PrintF("Materializing a new heap number %p [%e] in slot %p"
+ "for stack index %d\n",
+ reinterpret_cast<void*>(*num),
+ d.value(),
+ d.slot_address(),
+ index);
+ }
+ if (index >=0) {
+ info->SetExpression(index, *num);
+ } else {
+ // Calculate parameter index subtracting one for the receiver.
+ int parameter_index =
+ index +
+ static_cast<int>(size) / kPointerSize -
+ info->expression_count_ - 1;
+ info->SetParameter(parameter_index, *num);
+ }
+ }
+ }
+}
+#endif
+
+
void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
int frame_index,
unsigned output_offset) {
@@ -460,11 +613,13 @@
intptr_t input_value = input_->GetRegister(input_reg);
if (FLAG_trace_deopt) {
PrintF(
- " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" V8PRIxPTR " ; %s\n",
+ " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" V8PRIxPTR " ; %s ",
output_[frame_index]->GetTop() + output_offset,
output_offset,
input_value,
converter.NameOfCPURegister(input_reg));
+ reinterpret_cast<Object*>(input_value)->ShortPrint();
+ PrintF("\n");
}
output_[frame_index]->SetFrameSlot(output_offset, input_value);
return;
@@ -522,10 +677,12 @@
if (FLAG_trace_deopt) {
PrintF(" 0x%08" V8PRIxPTR ": ",
output_[frame_index]->GetTop() + output_offset);
- PrintF("[top + %d] <- 0x%08" V8PRIxPTR " ; [esp + %d]\n",
+ PrintF("[top + %d] <- 0x%08" V8PRIxPTR " ; [esp + %d] ",
output_offset,
input_value,
input_offset);
+ reinterpret_cast<Object*>(input_value)->ShortPrint();
+ PrintF("\n");
}
output_[frame_index]->SetFrameSlot(output_offset, input_value);
return;
@@ -901,6 +1058,9 @@
ASSERT(desc.reloc_size == 0);
LargeObjectChunk* chunk = LargeObjectChunk::New(desc.instr_size, EXECUTABLE);
+ if (chunk == NULL) {
+ V8::FatalProcessOutOfMemory("Not enough memory for deoptimization table");
+ }
memcpy(chunk->GetStartAddress(), desc.buffer, desc.instr_size);
CPU::FlushICache(chunk->GetStartAddress(), desc.instr_size);
return chunk;
@@ -969,18 +1129,48 @@
if (slot_index >= 0) {
// Local or spill slots. Skip the fixed part of the frame
// including all arguments.
- unsigned base = static_cast<unsigned>(
- GetFrameSize() - deoptimizer->ComputeFixedSize(GetFunction()));
+ unsigned base =
+ GetFrameSize() - deoptimizer->ComputeFixedSize(GetFunction());
return base - ((slot_index + 1) * kPointerSize);
} else {
// Incoming parameter.
- unsigned base = static_cast<unsigned>(GetFrameSize() -
- deoptimizer->ComputeIncomingArgumentSize(GetFunction()));
+ unsigned base = GetFrameSize() -
+ deoptimizer->ComputeIncomingArgumentSize(GetFunction());
return base - ((slot_index + 1) * kPointerSize);
}
}
+int FrameDescription::ComputeParametersCount() {
+ return function_->shared()->formal_parameter_count();
+}
+
+
+Object* FrameDescription::GetParameter(Deoptimizer* deoptimizer, int index) {
+ ASSERT_EQ(Code::FUNCTION, kind_);
+ ASSERT(index >= 0);
+ ASSERT(index < ComputeParametersCount());
+ // The slot indexes for incoming arguments are negative.
+ unsigned offset = GetOffsetFromSlotIndex(deoptimizer,
+ index - ComputeParametersCount());
+ return reinterpret_cast<Object*>(*GetFrameSlotPointer(offset));
+}
+
+
+unsigned FrameDescription::GetExpressionCount(Deoptimizer* deoptimizer) {
+ ASSERT_EQ(Code::FUNCTION, kind_);
+ unsigned size = GetFrameSize() - deoptimizer->ComputeFixedSize(GetFunction());
+ return size / kPointerSize;
+}
+
+
+Object* FrameDescription::GetExpression(Deoptimizer* deoptimizer, int index) {
+ ASSERT_EQ(Code::FUNCTION, kind_);
+ unsigned offset = GetOffsetFromSlotIndex(deoptimizer, index);
+ return reinterpret_cast<Object*>(*GetFrameSlotPointer(offset));
+}
+
+
void TranslationBuffer::Add(int32_t value) {
// Encode the sign bit in the least significant bit.
bool is_negative = (value < 0);
@@ -1104,7 +1294,7 @@
}
-#ifdef OBJECT_PRINT
+#if defined(OBJECT_PRINT) || defined(ENABLE_DISASSEMBLER)
const char* Translation::StringFor(Opcode opcode) {
switch (opcode) {
@@ -1253,4 +1443,33 @@
}
+DeoptimizedFrameInfo::DeoptimizedFrameInfo(
+ Deoptimizer* deoptimizer, int frame_index) {
+ FrameDescription* output_frame = deoptimizer->output_[frame_index];
+ SetFunction(output_frame->GetFunction());
+ expression_count_ = output_frame->GetExpressionCount(deoptimizer);
+ parameters_count_ = output_frame->ComputeParametersCount();
+ parameters_ = new Object*[parameters_count_];
+ for (int i = 0; i < parameters_count_; i++) {
+ SetParameter(i, output_frame->GetParameter(deoptimizer, i));
+ }
+ expression_stack_ = new Object*[expression_count_];
+ for (int i = 0; i < expression_count_; i++) {
+ SetExpression(i, output_frame->GetExpression(deoptimizer, i));
+ }
+}
+
+
+DeoptimizedFrameInfo::~DeoptimizedFrameInfo() {
+ delete[] expression_stack_;
+ delete[] parameters_;
+}
+
+void DeoptimizedFrameInfo::Iterate(ObjectVisitor* v) {
+ v->VisitPointer(BitCast<Object**>(&function_));
+ v->VisitPointers(parameters_, parameters_ + parameters_count_);
+ v->VisitPointers(expression_stack_, expression_stack_ + expression_count_);
+}
+
+
} } // namespace v8::internal
diff --git a/src/deoptimizer.h b/src/deoptimizer.h
index 7c5dfb8..9265905 100644
--- a/src/deoptimizer.h
+++ b/src/deoptimizer.h
@@ -41,7 +41,7 @@
class FrameDescription;
class TranslationIterator;
class DeoptimizingCodeListNode;
-
+class DeoptimizedFrameInfo;
class HeapNumberMaterializationDescriptor BASE_EMBEDDED {
public:
@@ -81,11 +81,19 @@
DeoptimizerData();
~DeoptimizerData();
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ void Iterate(ObjectVisitor* v);
+#endif
+
private:
LargeObjectChunk* eager_deoptimization_entry_code_;
LargeObjectChunk* lazy_deoptimization_entry_code_;
Deoptimizer* current_;
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ DeoptimizedFrameInfo* deoptimized_frame_info_;
+#endif
+
// List of deoptimized code which still have references from active stack
// frames. These code objects are needed by the deoptimizer when deoptimizing
// a frame for which the code object for the function function has been
@@ -103,7 +111,10 @@
enum BailoutType {
EAGER,
LAZY,
- OSR
+ OSR,
+ // This last bailout type is not really a bailout, but used by the
+ // debugger to deoptimize stack frames to allow inspection.
+ DEBUGGER
};
int output_count() const { return output_count_; }
@@ -116,6 +127,16 @@
Isolate* isolate);
static Deoptimizer* Grab(Isolate* isolate);
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ // The returned object with information on the optimized frame needs to be
+ // freed before another one can be generated.
+ static DeoptimizedFrameInfo* DebuggerInspectableFrame(JavaScriptFrame* frame,
+ int frame_index,
+ Isolate* isolate);
+ static void DeleteDebuggerInspectableFrame(DeoptimizedFrameInfo* info,
+ Isolate* isolate);
+#endif
+
// Makes sure that there is enough room in the relocation
// information of a code object to perform lazy deoptimization
// patching. If there is not enough room a new relocation
@@ -171,6 +192,10 @@
~Deoptimizer();
void MaterializeHeapNumbers();
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ void MaterializeHeapNumbersForDebuggerInspectableFrame(
+ Address top, uint32_t size, DeoptimizedFrameInfo* info);
+#endif
static void ComputeOutputFrames(Deoptimizer* deoptimizer);
@@ -233,7 +258,8 @@
BailoutType type,
unsigned bailout_id,
Address from,
- int fp_to_sp_delta);
+ int fp_to_sp_delta,
+ Code* optimized_code);
void DeleteFrameDescriptions();
void DoComputeOutputFrames();
@@ -269,6 +295,11 @@
static Code* FindDeoptimizingCodeFromAddress(Address addr);
static void RemoveDeoptimizingCode(Code* code);
+ // Fill the input from from a JavaScript frame. This is used when
+ // the debugger needs to inspect an optimized frame. For normal
+ // deoptimizations the input frame is filled in generated code.
+ void FillInputFrame(Address tos, JavaScriptFrame* frame);
+
Isolate* isolate_;
JSFunction* function_;
Code* optimized_code_;
@@ -290,6 +321,7 @@
friend class FrameDescription;
friend class DeoptimizingCodeListNode;
+ friend class DeoptimizedFrameInfo;
};
@@ -308,7 +340,10 @@
free(description);
}
- intptr_t GetFrameSize() const { return frame_size_; }
+ uint32_t GetFrameSize() const {
+ ASSERT(static_cast<uint32_t>(frame_size_) == frame_size_);
+ return static_cast<uint32_t>(frame_size_);
+ }
JSFunction* GetFunction() const { return function_; }
@@ -360,6 +395,23 @@
void SetContinuation(intptr_t pc) { continuation_ = pc; }
+#ifdef DEBUG
+ Code::Kind GetKind() const { return kind_; }
+ void SetKind(Code::Kind kind) { kind_ = kind; }
+#endif
+
+ // Get the incoming arguments count.
+ int ComputeParametersCount();
+
+ // Get a parameter value for an unoptimized frame.
+ Object* GetParameter(Deoptimizer* deoptimizer, int index);
+
+ // Get the expression stack height for a unoptimized frame.
+ unsigned GetExpressionCount(Deoptimizer* deoptimizer);
+
+ // Get the expression stack value for an unoptimized frame.
+ Object* GetExpression(Deoptimizer* deoptimizer, int index);
+
static int registers_offset() {
return OFFSET_OF(FrameDescription, registers_);
}
@@ -391,6 +443,9 @@
private:
static const uint32_t kZapUint32 = 0xbeeddead;
+ // Frame_size_ must hold a uint32_t value. It is only a uintptr_t to
+ // keep the variable-size array frame_content_ of type intptr_t at
+ // the end of the structure aligned.
uintptr_t frame_size_; // Number of bytes.
JSFunction* function_;
intptr_t registers_[Register::kNumRegisters];
@@ -399,6 +454,9 @@
intptr_t pc_;
intptr_t fp_;
Smi* state_;
+#ifdef DEBUG
+ Code::Kind kind_;
+#endif
// Continuation is the PC where the execution continues after
// deoptimizing.
@@ -495,7 +553,7 @@
static int NumberOfOperandsFor(Opcode opcode);
-#ifdef OBJECT_PRINT
+#if defined(OBJECT_PRINT) || defined(ENABLE_DISASSEMBLER)
static const char* StringFor(Opcode opcode);
#endif
@@ -597,6 +655,70 @@
};
+#ifdef ENABLE_DEBUGGER_SUPPORT
+// Class used to represent an unoptimized frame when the debugger
+// needs to inspect a frame that is part of an optimized frame. The
+// internally used FrameDescription objects are not GC safe so for use
+// by the debugger frame information is copied to an object of this type.
+class DeoptimizedFrameInfo : public Malloced {
+ public:
+ DeoptimizedFrameInfo(Deoptimizer* deoptimizer, int frame_index);
+ virtual ~DeoptimizedFrameInfo();
+
+ // GC support.
+ void Iterate(ObjectVisitor* v);
+
+ // Return the number of incoming arguments.
+ int parameters_count() { return parameters_count_; }
+
+ // Return the height of the expression stack.
+ int expression_count() { return expression_count_; }
+
+ // Get the frame function.
+ JSFunction* GetFunction() {
+ return function_;
+ }
+
+ // Get an incoming argument.
+ Object* GetParameter(int index) {
+ ASSERT(0 <= index && index < parameters_count());
+ return parameters_[index];
+ }
+
+ // Get an expression from the expression stack.
+ Object* GetExpression(int index) {
+ ASSERT(0 <= index && index < expression_count());
+ return expression_stack_[index];
+ }
+
+ private:
+ // Set the frame function.
+ void SetFunction(JSFunction* function) {
+ function_ = function;
+ }
+
+ // Set an incoming argument.
+ void SetParameter(int index, Object* obj) {
+ ASSERT(0 <= index && index < parameters_count());
+ parameters_[index] = obj;
+ }
+
+ // Set an expression on the expression stack.
+ void SetExpression(int index, Object* obj) {
+ ASSERT(0 <= index && index < expression_count());
+ expression_stack_[index] = obj;
+ }
+
+ JSFunction* function_;
+ int parameters_count_;
+ int expression_count_;
+ Object** parameters_;
+ Object** expression_stack_;
+
+ friend class Deoptimizer;
+};
+#endif
+
} } // namespace v8::internal
#endif // V8_DEOPTIMIZER_H_
diff --git a/src/diy-fp.cc b/src/diy-fp.cc
index c54bd1d..4913877 100644
--- a/src/diy-fp.cc
+++ b/src/diy-fp.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -25,8 +25,9 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#include "v8.h"
-
+#include "../include/v8stdint.h"
+#include "globals.h"
+#include "checks.h"
#include "diy-fp.h"
namespace v8 {
diff --git a/src/diy-fp.h b/src/diy-fp.h
index cfe05ef..26ff1a2 100644
--- a/src/diy-fp.h
+++ b/src/diy-fp.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -80,7 +80,7 @@
// This method is mainly called for normalizing boundaries. In general
// boundaries need to be shifted by 10 bits. We thus optimize for this case.
- const uint64_t k10MSBits = V8_2PART_UINT64_C(0xFFC00000, 00000000);
+ const uint64_t k10MSBits = static_cast<uint64_t>(0x3FF) << 54;
while ((f & k10MSBits) == 0) {
f <<= 10;
e -= 10;
@@ -106,7 +106,7 @@
void set_e(int new_value) { e_ = new_value; }
private:
- static const uint64_t kUint64MSB = V8_2PART_UINT64_C(0x80000000, 00000000);
+ static const uint64_t kUint64MSB = static_cast<uint64_t>(1) << 63;
uint64_t f_;
int e_;
diff --git a/src/execution.cc b/src/execution.cc
index e84ab9e..6ab73e7 100644
--- a/src/execution.cc
+++ b/src/execution.cc
@@ -210,23 +210,6 @@
// If you return a function from here, it will be called when an
// attempt is made to call the given object as a function.
- // Regular expressions can be called as functions in both Firefox
- // and Safari so we allow it too.
- if (object->IsJSRegExp()) {
- Handle<String> exec = factory->exec_symbol();
- // TODO(lrn): Bug 617. We should use the default function here, not the
- // one on the RegExp object.
- Object* exec_function;
- { MaybeObject* maybe_exec_function = object->GetProperty(*exec);
- // This can lose an exception, but the alternative is to put a failure
- // object in a handle, which is not GC safe.
- if (!maybe_exec_function->ToObject(&exec_function)) {
- return factory->undefined_value();
- }
- }
- return Handle<Object>(exec_function);
- }
-
// Objects created through the API can have an instance-call handler
// that should be used when calling the object as a function.
if (object->IsHeapObject() &&
@@ -469,8 +452,9 @@
void StackGuard::FreeThreadResources() {
- Isolate::CurrentPerIsolateThreadData()->set_stack_limit(
- thread_local_.real_climit_);
+ Isolate::PerIsolateThreadData* per_thread =
+ isolate_->FindOrAllocatePerThreadDataForThisThread();
+ per_thread->set_stack_limit(thread_local_.real_climit_);
}
@@ -519,7 +503,7 @@
uintptr_t stored_limit = per_thread->stack_limit();
// You should hold the ExecutionAccess lock when you call this.
if (stored_limit != 0) {
- StackGuard::SetStackLimit(stored_limit);
+ SetStackLimit(stored_limit);
}
}
diff --git a/src/extensions/experimental/break-iterator.cc b/src/extensions/experimental/break-iterator.cc
index e8baea7..e695a3e 100644
--- a/src/extensions/experimental/break-iterator.cc
+++ b/src/extensions/experimental/break-iterator.cc
@@ -25,7 +25,9 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#include "break-iterator.h"
+#include "src/extensions/experimental/break-iterator.h"
+
+#include <string.h>
#include "unicode/brkiter.h"
#include "unicode/locid.h"
diff --git a/src/extensions/experimental/break-iterator.h b/src/extensions/experimental/break-iterator.h
index fac1ed8..73b9bbd 100644
--- a/src/extensions/experimental/break-iterator.h
+++ b/src/extensions/experimental/break-iterator.h
@@ -28,7 +28,7 @@
#ifndef V8_EXTENSIONS_EXPERIMENTAL_BREAK_ITERATOR_H_
#define V8_EXTENSIONS_EXPERIMENTAL_BREAK_ITERATOR_H_
-#include <v8.h>
+#include "include/v8.h"
#include "unicode/uversion.h"
diff --git a/src/extensions/experimental/collator.cc b/src/extensions/experimental/collator.cc
index 7d1a21d..5cf2192 100644
--- a/src/extensions/experimental/collator.cc
+++ b/src/extensions/experimental/collator.cc
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#include "collator.h"
+#include "src/extensions/experimental/collator.h"
#include "unicode/coll.h"
#include "unicode/locid.h"
@@ -153,6 +153,11 @@
bool ignore_case, ignore_accents, numeric;
if (ExtractBooleanOption(options, "ignoreCase", &ignore_case)) {
+ // We need to explicitly set the level to secondary to get case ignored.
+ // The default L3 ignores UCOL_CASE_LEVEL == UCOL_OFF !
+ if (ignore_case) {
+ collator->setStrength(icu::Collator::SECONDARY);
+ }
collator->setAttribute(UCOL_CASE_LEVEL, ignore_case ? UCOL_OFF : UCOL_ON,
status);
if (U_FAILURE(status)) {
@@ -215,4 +220,3 @@
}
} } // namespace v8::internal
-
diff --git a/src/extensions/experimental/collator.h b/src/extensions/experimental/collator.h
index 10d6ffb..ca7e4dc 100644
--- a/src/extensions/experimental/collator.h
+++ b/src/extensions/experimental/collator.h
@@ -28,7 +28,7 @@
#ifndef V8_EXTENSIONS_EXPERIMENTAL_COLLATOR_H
#define V8_EXTENSIONS_EXPERIMENTAL_COLLATOR_H_
-#include <v8.h>
+#include "include/v8.h"
#include "unicode/uversion.h"
@@ -66,4 +66,3 @@
} } // namespace v8::internal
#endif // V8_EXTENSIONS_EXPERIMENTAL_COLLATOR
-
diff --git a/src/extensions/experimental/datetime-format.cc b/src/extensions/experimental/datetime-format.cc
new file mode 100644
index 0000000..7f46302
--- /dev/null
+++ b/src/extensions/experimental/datetime-format.cc
@@ -0,0 +1,384 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "src/extensions/experimental/datetime-format.h"
+
+#include <string.h>
+
+#include "src/extensions/experimental/i18n-utils.h"
+#include "unicode/dtfmtsym.h"
+#include "unicode/dtptngen.h"
+#include "unicode/locid.h"
+#include "unicode/smpdtfmt.h"
+
+namespace v8 {
+namespace internal {
+
+v8::Persistent<v8::FunctionTemplate> DateTimeFormat::datetime_format_template_;
+
+static icu::DateFormat* CreateDateTimeFormat(v8::Handle<v8::String>,
+ v8::Handle<v8::Object>);
+static v8::Handle<v8::Value> GetSymbols(
+ const v8::Arguments&,
+ const icu::UnicodeString*, int32_t,
+ const icu::UnicodeString*, int32_t,
+ const icu::UnicodeString*, int32_t);
+static v8::Handle<v8::Value> ThrowUnexpectedObjectError();
+static icu::DateFormat::EStyle GetDateTimeStyle(const icu::UnicodeString&);
+
+icu::SimpleDateFormat* DateTimeFormat::UnpackDateTimeFormat(
+ v8::Handle<v8::Object> obj) {
+ if (datetime_format_template_->HasInstance(obj)) {
+ return static_cast<icu::SimpleDateFormat*>(
+ obj->GetPointerFromInternalField(0));
+ }
+
+ return NULL;
+}
+
+void DateTimeFormat::DeleteDateTimeFormat(v8::Persistent<v8::Value> object,
+ void* param) {
+ v8::Persistent<v8::Object> persistent_object =
+ v8::Persistent<v8::Object>::Cast(object);
+
+ // First delete the hidden C++ object.
+ // Unpacking should never return NULL here. That would only happen if
+ // this method is used as the weak callback for persistent handles not
+ // pointing to a date time formatter.
+ delete UnpackDateTimeFormat(persistent_object);
+
+ // Then dispose of the persistent handle to JS object.
+ persistent_object.Dispose();
+}
+
+v8::Handle<v8::Value> DateTimeFormat::Format(const v8::Arguments& args) {
+ v8::HandleScope handle_scope;
+
+ double millis = 0.0;
+ if (args.Length() != 1 || !args[0]->IsDate()) {
+ // Create a new date.
+ v8::TryCatch try_catch;
+ v8::Local<v8::Script> date_script =
+ v8::Script::Compile(v8::String::New("eval('new Date()')"));
+ millis = date_script->Run()->NumberValue();
+ if (try_catch.HasCaught()) {
+ return try_catch.ReThrow();
+ }
+ } else {
+ millis = v8::Date::Cast(*args[0])->NumberValue();
+ }
+
+ icu::SimpleDateFormat* date_format = UnpackDateTimeFormat(args.Holder());
+ if (!date_format) {
+ return ThrowUnexpectedObjectError();
+ }
+
+ icu::UnicodeString result;
+ date_format->format(millis, result);
+
+ return v8::String::New(
+ reinterpret_cast<const uint16_t*>(result.getBuffer()), result.length());
+}
+
+v8::Handle<v8::Value> DateTimeFormat::GetMonths(const v8::Arguments& args) {
+ icu::SimpleDateFormat* date_format = UnpackDateTimeFormat(args.Holder());
+ if (!date_format) {
+ return ThrowUnexpectedObjectError();
+ }
+
+ const icu::DateFormatSymbols* symbols = date_format->getDateFormatSymbols();
+
+ int32_t narrow_count;
+ const icu::UnicodeString* narrow = symbols->getMonths(
+ narrow_count,
+ icu::DateFormatSymbols::STANDALONE,
+ icu::DateFormatSymbols::NARROW);
+ int32_t abbrev_count;
+ const icu::UnicodeString* abbrev = symbols->getMonths(
+ abbrev_count,
+ icu::DateFormatSymbols::STANDALONE,
+ icu::DateFormatSymbols::ABBREVIATED);
+ int32_t wide_count;
+ const icu::UnicodeString* wide = symbols->getMonths(
+ wide_count,
+ icu::DateFormatSymbols::STANDALONE,
+ icu::DateFormatSymbols::WIDE);
+
+ return GetSymbols(
+ args, narrow, narrow_count, abbrev, abbrev_count, wide, wide_count);
+}
+
+v8::Handle<v8::Value> DateTimeFormat::GetWeekdays(const v8::Arguments& args) {
+ icu::SimpleDateFormat* date_format = UnpackDateTimeFormat(args.Holder());
+ if (!date_format) {
+ ThrowUnexpectedObjectError();
+ }
+
+ const icu::DateFormatSymbols* symbols = date_format->getDateFormatSymbols();
+
+ int32_t narrow_count;
+ const icu::UnicodeString* narrow = symbols->getWeekdays(
+ narrow_count,
+ icu::DateFormatSymbols::STANDALONE,
+ icu::DateFormatSymbols::NARROW);
+ int32_t abbrev_count;
+ const icu::UnicodeString* abbrev = symbols->getWeekdays(
+ abbrev_count,
+ icu::DateFormatSymbols::STANDALONE,
+ icu::DateFormatSymbols::ABBREVIATED);
+ int32_t wide_count;
+ const icu::UnicodeString* wide = symbols->getWeekdays(
+ wide_count,
+ icu::DateFormatSymbols::STANDALONE,
+ icu::DateFormatSymbols::WIDE);
+
+ // getXXXWeekdays always returns 8 elements - ICU stable API.
+ // We can't use ASSERT_EQ(8, narrow_count) because ASSERT is internal to v8.
+ if (narrow_count != 8 || abbrev_count != 8 || wide_count != 8) {
+ return v8::ThrowException(v8::Exception::Error(
+ v8::String::New("Failed to get weekday information.")));
+ }
+
+ // ICU documentation says we should ignore element 0 of the returned array.
+ return GetSymbols(args, narrow + 1, narrow_count - 1, abbrev + 1,
+ abbrev_count -1 , wide + 1, wide_count - 1);
+}
+
+v8::Handle<v8::Value> DateTimeFormat::GetEras(const v8::Arguments& args) {
+ icu::SimpleDateFormat* date_format = UnpackDateTimeFormat(args.Holder());
+ if (!date_format) {
+ return ThrowUnexpectedObjectError();
+ }
+
+ const icu::DateFormatSymbols* symbols = date_format->getDateFormatSymbols();
+
+ int32_t narrow_count;
+ const icu::UnicodeString* narrow = symbols->getNarrowEras(narrow_count);
+ int32_t abbrev_count;
+ const icu::UnicodeString* abbrev = symbols->getEras(abbrev_count);
+ int32_t wide_count;
+ const icu::UnicodeString* wide = symbols->getEraNames(wide_count);
+
+ return GetSymbols(
+ args, narrow, narrow_count, abbrev, abbrev_count, wide, wide_count);
+}
+
+v8::Handle<v8::Value> DateTimeFormat::GetAmPm(const v8::Arguments& args) {
+ icu::SimpleDateFormat* date_format = UnpackDateTimeFormat(args.Holder());
+ if (!date_format) {
+ return ThrowUnexpectedObjectError();
+ }
+
+ const icu::DateFormatSymbols* symbols = date_format->getDateFormatSymbols();
+
+ // In this case narrow == abbreviated == wide
+ int32_t count;
+ const icu::UnicodeString* wide = symbols->getAmPmStrings(count);
+
+ return GetSymbols(args, wide, count, wide, count, wide, count);
+}
+
+v8::Handle<v8::Value> DateTimeFormat::JSDateTimeFormat(
+ const v8::Arguments& args) {
+ v8::HandleScope handle_scope;
+
+ if (args.Length() != 2 || !args[0]->IsString() || !args[1]->IsObject()) {
+ return v8::ThrowException(v8::Exception::SyntaxError(
+ v8::String::New("Locale and date/time options are required.")));
+ }
+
+ icu::SimpleDateFormat* date_format = static_cast<icu::SimpleDateFormat*>(
+ CreateDateTimeFormat(args[0]->ToString(), args[1]->ToObject()));
+
+ if (datetime_format_template_.IsEmpty()) {
+ v8::Local<v8::FunctionTemplate> raw_template(v8::FunctionTemplate::New());
+
+ raw_template->SetClassName(v8::String::New("v8Locale.DateTimeFormat"));
+
+ // Define internal field count on instance template.
+ v8::Local<v8::ObjectTemplate> object_template =
+ raw_template->InstanceTemplate();
+
+ // Set aside internal field for icu date time formatter.
+ object_template->SetInternalFieldCount(1);
+
+ // Define all of the prototype methods on prototype template.
+ v8::Local<v8::ObjectTemplate> proto = raw_template->PrototypeTemplate();
+ proto->Set(v8::String::New("format"),
+ v8::FunctionTemplate::New(Format));
+ proto->Set(v8::String::New("getMonths"),
+ v8::FunctionTemplate::New(GetMonths));
+ proto->Set(v8::String::New("getWeekdays"),
+ v8::FunctionTemplate::New(GetWeekdays));
+ proto->Set(v8::String::New("getEras"),
+ v8::FunctionTemplate::New(GetEras));
+ proto->Set(v8::String::New("getAmPm"),
+ v8::FunctionTemplate::New(GetAmPm));
+
+ datetime_format_template_ =
+ v8::Persistent<v8::FunctionTemplate>::New(raw_template);
+ }
+
+ // Create an empty object wrapper.
+ v8::Local<v8::Object> local_object =
+ datetime_format_template_->GetFunction()->NewInstance();
+ v8::Persistent<v8::Object> wrapper =
+ v8::Persistent<v8::Object>::New(local_object);
+
+ // Set date time formatter as internal field of the resulting JS object.
+ wrapper->SetPointerInInternalField(0, date_format);
+
+ // Set resolved pattern in options.pattern.
+ icu::UnicodeString pattern;
+ date_format->toPattern(pattern);
+ v8::Local<v8::Object> options = v8::Object::New();
+ options->Set(v8::String::New("pattern"),
+ v8::String::New(reinterpret_cast<const uint16_t*>(
+ pattern.getBuffer()), pattern.length()));
+ wrapper->Set(v8::String::New("options"), options);
+
+ // Make object handle weak so we can delete iterator once GC kicks in.
+ wrapper.MakeWeak(NULL, DeleteDateTimeFormat);
+
+ return wrapper;
+}
+
+// Returns SimpleDateFormat.
+static icu::DateFormat* CreateDateTimeFormat(
+ v8::Handle<v8::String> locale, v8::Handle<v8::Object> settings) {
+ v8::HandleScope handle_scope;
+
+ v8::String::AsciiValue ascii_locale(locale);
+ icu::Locale icu_locale(*ascii_locale);
+
+ // Make formatter from skeleton.
+ icu::SimpleDateFormat* date_format = NULL;
+ UErrorCode status = U_ZERO_ERROR;
+ icu::UnicodeString skeleton;
+ if (I18NUtils::ExtractStringSetting(settings, "skeleton", &skeleton)) {
+ v8::Local<icu::DateTimePatternGenerator> generator(
+ icu::DateTimePatternGenerator::createInstance(icu_locale, status));
+ icu::UnicodeString pattern =
+ generator->getBestPattern(skeleton, status);
+
+ date_format = new icu::SimpleDateFormat(pattern, icu_locale, status);
+ if (U_SUCCESS(status)) {
+ return date_format;
+ } else {
+ delete date_format;
+ }
+ }
+
+ // Extract date style and time style from settings.
+ icu::UnicodeString date_style;
+ icu::DateFormat::EStyle icu_date_style = icu::DateFormat::kNone;
+ if (I18NUtils::ExtractStringSetting(settings, "dateStyle", &date_style)) {
+ icu_date_style = GetDateTimeStyle(date_style);
+ }
+
+ icu::UnicodeString time_style;
+ icu::DateFormat::EStyle icu_time_style = icu::DateFormat::kNone;
+ if (I18NUtils::ExtractStringSetting(settings, "timeStyle", &time_style)) {
+ icu_time_style = GetDateTimeStyle(time_style);
+ }
+
+ // Try all combinations of date/time styles.
+ if (icu_date_style == icu::DateFormat::kNone &&
+ icu_time_style == icu::DateFormat::kNone) {
+ // Return default short date, short
+ return icu::DateFormat::createDateTimeInstance(
+ icu::DateFormat::kShort, icu::DateFormat::kShort, icu_locale);
+ } else if (icu_date_style != icu::DateFormat::kNone &&
+ icu_time_style != icu::DateFormat::kNone) {
+ return icu::DateFormat::createDateTimeInstance(
+ icu_date_style, icu_time_style, icu_locale);
+ } else if (icu_date_style != icu::DateFormat::kNone) {
+ return icu::DateFormat::createDateInstance(icu_date_style, icu_locale);
+ } else {
+ // icu_time_style != icu::DateFormat::kNone
+ return icu::DateFormat::createTimeInstance(icu_time_style, icu_locale);
+ }
+}
+
+// Creates a v8::Array of narrow, abbrev or wide symbols.
+static v8::Handle<v8::Value> GetSymbols(const v8::Arguments& args,
+ const icu::UnicodeString* narrow,
+ int32_t narrow_count,
+ const icu::UnicodeString* abbrev,
+ int32_t abbrev_count,
+ const icu::UnicodeString* wide,
+ int32_t wide_count) {
+ v8::HandleScope handle_scope;
+
+ // Make wide width default.
+ const icu::UnicodeString* result = wide;
+ int32_t count = wide_count;
+
+ if (args.Length() == 1 && args[0]->IsString()) {
+ v8::String::AsciiValue ascii_value(args[0]);
+ if (strcmp(*ascii_value, "abbreviated") == 0) {
+ result = abbrev;
+ count = abbrev_count;
+ } else if (strcmp(*ascii_value, "narrow") == 0) {
+ result = narrow;
+ count = narrow_count;
+ }
+ }
+
+ v8::Handle<v8::Array> symbols = v8::Array::New();
+ for (int32_t i = 0; i < count; ++i) {
+ symbols->Set(i, v8::String::New(
+ reinterpret_cast<const uint16_t*>(result[i].getBuffer()),
+ result[i].length()));
+ }
+
+ return handle_scope.Close(symbols);
+}
+
+// Throws a JavaScript exception.
+static v8::Handle<v8::Value> ThrowUnexpectedObjectError() {
+ // Returns undefined, and schedules an exception to be thrown.
+ return v8::ThrowException(v8::Exception::Error(
+ v8::String::New("DateTimeFormat method called on an object "
+ "that is not a DateTimeFormat.")));
+}
+
+// Returns icu date/time style.
+static icu::DateFormat::EStyle GetDateTimeStyle(
+ const icu::UnicodeString& type) {
+ if (type == UNICODE_STRING_SIMPLE("medium")) {
+ return icu::DateFormat::kMedium;
+ } else if (type == UNICODE_STRING_SIMPLE("long")) {
+ return icu::DateFormat::kLong;
+ } else if (type == UNICODE_STRING_SIMPLE("full")) {
+ return icu::DateFormat::kFull;
+ }
+
+ return icu::DateFormat::kShort;
+}
+
+} } // namespace v8::internal
diff --git a/src/extensions/experimental/datetime-format.h b/src/extensions/experimental/datetime-format.h
new file mode 100644
index 0000000..a6a228c
--- /dev/null
+++ b/src/extensions/experimental/datetime-format.h
@@ -0,0 +1,83 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_EXTENSIONS_EXPERIMENTAL_DATETIME_FORMAT_H_
+#define V8_EXTENSIONS_EXPERIMENTAL_DATETIME_FORMAT_H_
+
+#include "include/v8.h"
+
+#include "unicode/uversion.h"
+
+namespace U_ICU_NAMESPACE {
+class SimpleDateFormat;
+}
+
+namespace v8 {
+namespace internal {
+
+class DateTimeFormat {
+ public:
+ static v8::Handle<v8::Value> JSDateTimeFormat(const v8::Arguments& args);
+
+ // Helper methods for various bindings.
+
+ // Unpacks date format object from corresponding JavaScript object.
+ static icu::SimpleDateFormat* UnpackDateTimeFormat(
+ v8::Handle<v8::Object> obj);
+
+ // Release memory we allocated for the DateFormat once the JS object that
+ // holds the pointer gets garbage collected.
+ static void DeleteDateTimeFormat(v8::Persistent<v8::Value> object,
+ void* param);
+
+ // Formats date and returns corresponding string.
+ static v8::Handle<v8::Value> Format(const v8::Arguments& args);
+
+ // All date time symbol methods below return stand-alone names in
+ // either narrow, abbreviated or wide width.
+
+ // Get list of months.
+ static v8::Handle<v8::Value> GetMonths(const v8::Arguments& args);
+
+ // Get list of weekdays.
+ static v8::Handle<v8::Value> GetWeekdays(const v8::Arguments& args);
+
+ // Get list of eras.
+ static v8::Handle<v8::Value> GetEras(const v8::Arguments& args);
+
+ // Get list of day periods.
+ static v8::Handle<v8::Value> GetAmPm(const v8::Arguments& args);
+
+ private:
+ DateTimeFormat();
+
+ static v8::Persistent<v8::FunctionTemplate> datetime_format_template_;
+};
+
+} } // namespace v8::internal
+
+#endif // V8_EXTENSIONS_EXPERIMENTAL_DATETIME_FORMAT_H_
diff --git a/src/extensions/experimental/experimental.gyp b/src/extensions/experimental/experimental.gyp
index 2a7775e..24fb683 100644
--- a/src/extensions/experimental/experimental.gyp
+++ b/src/extensions/experimental/experimental.gyp
@@ -41,32 +41,44 @@
'break-iterator.h',
'collator.cc',
'collator.h',
+ 'datetime-format.cc',
+ 'datetime-format.h',
'i18n-extension.cc',
'i18n-extension.h',
'i18n-locale.cc',
'i18n-locale.h',
+ 'i18n-natives.h',
'i18n-utils.cc',
'i18n-utils.h',
'language-matcher.cc',
'language-matcher.h',
+ 'number-format.cc',
+ 'number-format.h',
'<(SHARED_INTERMEDIATE_DIR)/i18n-js.cc',
],
'include_dirs': [
'<(icu_src_dir)/public/common',
- '../..',
+ # v8/ is root for all includes.
+ '../../..'
],
'dependencies': [
'<(icu_src_dir)/icu.gyp:*',
'js2c_i18n#host',
'../../../tools/gyp/v8.gyp:v8',
],
+ 'direct_dependent_settings': {
+ # Adds -Iv8 for embedders.
+ 'include_dirs': [
+ '../../..'
+ ],
+ },
},
{
'target_name': 'js2c_i18n',
'type': 'none',
'toolsets': ['host'],
'variables': {
- 'library_files': [
+ 'js_files': [
'i18n.js'
],
},
@@ -74,18 +86,17 @@
{
'action_name': 'js2c_i18n',
'inputs': [
- '../../../tools/js2c.py',
- '<@(library_files)',
+ 'i18n-js2c.py',
+ '<@(js_files)',
],
'outputs': [
'<(SHARED_INTERMEDIATE_DIR)/i18n-js.cc',
],
'action': [
'python',
- '../../../tools/js2c.py',
+ 'i18n-js2c.py',
'<@(_outputs)',
- 'I18N',
- '<@(library_files)'
+ '<@(js_files)'
],
},
],
diff --git a/src/extensions/experimental/i18n-extension.cc b/src/extensions/experimental/i18n-extension.cc
index 88c609e..c5afcf0 100644
--- a/src/extensions/experimental/i18n-extension.cc
+++ b/src/extensions/experimental/i18n-extension.cc
@@ -25,30 +25,22 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#include "i18n-extension.h"
+#include "src/extensions/experimental/i18n-extension.h"
-#include "break-iterator.h"
-#include "collator.h"
-#include "i18n-locale.h"
-#include "natives.h"
+#include "src/extensions/experimental/break-iterator.h"
+#include "src/extensions/experimental/collator.h"
+#include "src/extensions/experimental/datetime-format.h"
+#include "src/extensions/experimental/i18n-locale.h"
+#include "src/extensions/experimental/i18n-natives.h"
+#include "src/extensions/experimental/number-format.h"
namespace v8 {
namespace internal {
I18NExtension* I18NExtension::extension_ = NULL;
-// Returns a pointer to static string containing the actual
-// JavaScript code generated from i18n.js file.
-static const char* GetScriptSource() {
- int index = NativesCollection<I18N>::GetIndex("i18n");
- Vector<const char> script_data =
- NativesCollection<I18N>::GetScriptSource(index);
-
- return script_data.start();
-}
-
I18NExtension::I18NExtension()
- : v8::Extension("v8/i18n", GetScriptSource()) {
+ : v8::Extension("v8/i18n", I18Natives::GetScriptSource()) {
}
v8::Handle<v8::FunctionTemplate> I18NExtension::GetNativeFunction(
@@ -59,6 +51,10 @@
return v8::FunctionTemplate::New(BreakIterator::JSBreakIterator);
} else if (name->Equals(v8::String::New("NativeJSCollator"))) {
return v8::FunctionTemplate::New(Collator::JSCollator);
+ } else if (name->Equals(v8::String::New("NativeJSDateTimeFormat"))) {
+ return v8::FunctionTemplate::New(DateTimeFormat::JSDateTimeFormat);
+ } else if (name->Equals(v8::String::New("NativeJSNumberFormat"))) {
+ return v8::FunctionTemplate::New(NumberFormat::JSNumberFormat);
}
return v8::Handle<v8::FunctionTemplate>();
diff --git a/src/extensions/experimental/i18n-extension.h b/src/extensions/experimental/i18n-extension.h
index b4dc7c3..5401f25 100644
--- a/src/extensions/experimental/i18n-extension.h
+++ b/src/extensions/experimental/i18n-extension.h
@@ -28,7 +28,7 @@
#ifndef V8_EXTENSIONS_EXPERIMENTAL_I18N_EXTENSION_H_
#define V8_EXTENSIONS_EXPERIMENTAL_I18N_EXTENSION_H_
-#include <v8.h>
+#include "include/v8.h"
namespace v8 {
namespace internal {
diff --git a/src/extensions/experimental/i18n-js2c.py b/src/extensions/experimental/i18n-js2c.py
new file mode 100644
index 0000000..9c3128b
--- /dev/null
+++ b/src/extensions/experimental/i18n-js2c.py
@@ -0,0 +1,126 @@
+#!/usr/bin/env python
+#
+# Copyright 2011 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials provided
+# with the distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# This is a utility for converting I18N JavaScript source code into C-style
+# char arrays. It is used for embedded JavaScript code in the V8
+# library.
+# This is a pared down copy of v8/tools/js2c.py that avoids use of
+# v8/src/natives.h and produces different cc template.
+
+import os, re, sys, string
+
+
+def ToCArray(lines):
+ result = []
+ for chr in lines:
+ value = ord(chr)
+ assert value < 128
+ result.append(str(value))
+ result.append("0")
+ return ", ".join(result)
+
+
+def RemoveCommentsAndTrailingWhitespace(lines):
+ lines = re.sub(r'//.*\n', '\n', lines) # end-of-line comments
+ lines = re.sub(re.compile(r'/\*.*?\*/', re.DOTALL), '', lines) # comments.
+ lines = re.sub(r'\s+\n+', '\n', lines) # trailing whitespace
+ return lines
+
+
+def ReadFile(filename):
+ file = open(filename, "rt")
+ try:
+ lines = file.read()
+ finally:
+ file.close()
+ return lines
+
+
+EVAL_PATTERN = re.compile(r'\beval\s*\(');
+WITH_PATTERN = re.compile(r'\bwith\s*\(');
+
+
+def Validate(lines, file):
+ lines = RemoveCommentsAndTrailingWhitespace(lines)
+ # Because of simplified context setup, eval and with is not
+ # allowed in the natives files.
+ eval_match = EVAL_PATTERN.search(lines)
+ if eval_match:
+ raise ("Eval disallowed in natives: %s" % file)
+ with_match = WITH_PATTERN.search(lines)
+ if with_match:
+ raise ("With statements disallowed in natives: %s" % file)
+
+
+HEADER_TEMPLATE = """\
+// Copyright 2011 Google Inc. All Rights Reserved.
+
+// This file was generated from .js source files by gyp. If you
+// want to make changes to this file you should either change the
+// javascript source files or the i18n-js2c.py script.
+
+#include "src/extensions/experimental/i18n-natives.h"
+
+namespace v8 {
+namespace internal {
+
+// static
+const char* I18Natives::GetScriptSource() {
+ // JavaScript source gets injected here.
+ static const char i18n_source[] = {%s};
+
+ return i18n_source;
+}
+
+} // internal
+} // v8
+"""
+
+
+def JS2C(source, target):
+ filename = str(source)
+
+ lines = ReadFile(filename)
+ Validate(lines, filename)
+ data = ToCArray(lines)
+
+ # Emit result
+ output = open(target, "w")
+ output.write(HEADER_TEMPLATE % data)
+ output.close()
+
+
+def main():
+ target = sys.argv[1]
+ source = sys.argv[2]
+ JS2C(source, target)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/src/extensions/experimental/i18n-locale.cc b/src/extensions/experimental/i18n-locale.cc
index cf17812..46a5f87 100644
--- a/src/extensions/experimental/i18n-locale.cc
+++ b/src/extensions/experimental/i18n-locale.cc
@@ -25,13 +25,12 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#include "i18n-locale.h"
+#include "src/extensions/experimental/i18n-locale.h"
-#include "i18n-utils.h"
-#include "language-matcher.h"
+#include "src/extensions/experimental/i18n-utils.h"
+#include "src/extensions/experimental/language-matcher.h"
#include "unicode/locid.h"
#include "unicode/uloc.h"
-#include "utils.h"
namespace v8 {
namespace internal {
diff --git a/src/extensions/experimental/i18n-locale.h b/src/extensions/experimental/i18n-locale.h
index 053886b..607818c 100644
--- a/src/extensions/experimental/i18n-locale.h
+++ b/src/extensions/experimental/i18n-locale.h
@@ -28,7 +28,7 @@
#ifndef V8_EXTENSIONS_EXPERIMENTAL_I18N_LOCALE_H_
#define V8_EXTENSIONS_EXPERIMENTAL_I18N_LOCALE_H_
-#include <v8.h>
+#include "include/v8.h"
namespace v8 {
namespace internal {
diff --git a/src/extensions/experimental/i18n-natives.h b/src/extensions/experimental/i18n-natives.h
new file mode 100644
index 0000000..37362d0
--- /dev/null
+++ b/src/extensions/experimental/i18n-natives.h
@@ -0,0 +1,43 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_EXTENSIONS_EXPERIMENTAL_I18N_NATIVES_H_
+#define V8_EXTENSIONS_EXPERIMENTAL_I18N_NATIVES_H_
+
+namespace v8 {
+namespace internal {
+
+class I18Natives {
+ public:
+ // Gets script source from generated file.
+ // Source is statically allocated string.
+ static const char* GetScriptSource();
+};
+
+} } // namespace v8::internal
+
+#endif // V8_EXTENSIONS_EXPERIMENTAL_I18N_NATIVES_H_
diff --git a/src/extensions/experimental/i18n-utils.cc b/src/extensions/experimental/i18n-utils.cc
index a82c8eb..dc2be1a 100644
--- a/src/extensions/experimental/i18n-utils.cc
+++ b/src/extensions/experimental/i18n-utils.cc
@@ -25,10 +25,12 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#include "i18n-utils.h"
+#include "src/extensions/experimental/i18n-utils.h"
#include <string.h>
+#include "unicode/unistr.h"
+
namespace v8 {
namespace internal {
@@ -40,4 +42,46 @@
dest[length - 1] = '\0';
}
+// static
+bool I18NUtils::ExtractStringSetting(const v8::Handle<v8::Object>& settings,
+ const char* setting,
+ icu::UnicodeString* result) {
+ if (!setting || !result) return false;
+
+ v8::HandleScope handle_scope;
+ v8::TryCatch try_catch;
+ v8::Handle<v8::Value> value = settings->Get(v8::String::New(setting));
+ if (try_catch.HasCaught()) {
+ return false;
+ }
+ // No need to check if |value| is empty because it's taken care of
+ // by TryCatch above.
+ if (!value->IsUndefined() && !value->IsNull() && value->IsString()) {
+ v8::String::Utf8Value utf8_value(value);
+ if (*utf8_value == NULL) return false;
+ result->setTo(icu::UnicodeString::fromUTF8(*utf8_value));
+ return true;
+ }
+ return false;
+}
+
+// static
+void I18NUtils::AsciiToUChar(const char* source,
+ int32_t source_length,
+ UChar* target,
+ int32_t target_length) {
+ int32_t length =
+ source_length < target_length ? source_length : target_length;
+
+ if (length <= 0) {
+ return;
+ }
+
+ for (int32_t i = 0; i < length - 1; ++i) {
+ target[i] = static_cast<UChar>(source[i]);
+ }
+
+ target[length - 1] = 0x0u;
+}
+
} } // namespace v8::internal
diff --git a/src/extensions/experimental/i18n-utils.h b/src/extensions/experimental/i18n-utils.h
index 7702708..7c31528 100644
--- a/src/extensions/experimental/i18n-utils.h
+++ b/src/extensions/experimental/i18n-utils.h
@@ -28,6 +28,14 @@
#ifndef V8_EXTENSIONS_EXPERIMENTAL_I18N_UTILS_H_
#define V8_EXTENSIONS_EXPERIMENTAL_I18N_UTILS_H_
+#include "include/v8.h"
+
+#include "unicode/uversion.h"
+
+namespace U_ICU_NAMESPACE {
+class UnicodeString;
+}
+
namespace v8 {
namespace internal {
@@ -37,9 +45,21 @@
// (length - 1) bytes.
// We can't use snprintf since it's not supported on all relevant platforms.
// We can't use OS::SNPrintF, it's only for internal code.
- // TODO(cira): Find a way to use OS::SNPrintF instead.
static void StrNCopy(char* dest, int length, const char* src);
+ // Extract a string setting named in |settings| and set it to |result|.
+ // Return true if it's specified. Otherwise, return false.
+ static bool ExtractStringSetting(const v8::Handle<v8::Object>& settings,
+ const char* setting,
+ icu::UnicodeString* result);
+
+ // Converts ASCII array into UChar array.
+ // Target is always \0 terminated.
+ static void AsciiToUChar(const char* source,
+ int32_t source_length,
+ UChar* target,
+ int32_t target_length);
+
private:
I18NUtils() {}
};
diff --git a/src/extensions/experimental/i18n.js b/src/extensions/experimental/i18n.js
index 0fa7ae7..56bcf9e 100644
--- a/src/extensions/experimental/i18n.js
+++ b/src/extensions/experimental/i18n.js
@@ -45,11 +45,11 @@
}
var properties = NativeJSLocale(
- v8Locale.createSettingsOrDefault_(settings, {'localeID': 'root'}));
+ v8Locale.__createSettingsOrDefault(settings, {'localeID': 'root'}));
// Keep the resolved ICU locale ID around to avoid resolving localeID to
// ICU locale ID every time BreakIterator, Collator and so forth are called.
- this.__icuLocaleID__ = properties.icuLocaleID;
+ this.__icuLocaleID = properties.icuLocaleID;
this.options = {'localeID': properties.localeID,
'regionID': properties.regionID};
};
@@ -61,7 +61,7 @@
*/
v8Locale.prototype.derive = function(settings) {
return new v8Locale(
- v8Locale.createSettingsOrDefault_(settings, this.options));
+ v8Locale.__createSettingsOrDefault(settings, this.options));
};
/**
@@ -74,14 +74,15 @@
* - word
* - sentence
* - line
+ * @private
* @constructor
*/
v8Locale.v8BreakIterator = function(locale, type) {
native function NativeJSBreakIterator();
- locale = v8Locale.createLocaleOrDefault_(locale);
+ locale = v8Locale.__createLocaleOrDefault(locale);
// BCP47 ID would work in this case, but we use ICU locale for consistency.
- var iterator = NativeJSBreakIterator(locale.__icuLocaleID__, type);
+ var iterator = NativeJSBreakIterator(locale.__icuLocaleID, type);
iterator.type = type;
return iterator;
};
@@ -117,27 +118,218 @@
* - ignoreCase
* - ignoreAccents
* - numeric
+ * @private
* @constructor
*/
v8Locale.Collator = function(locale, settings) {
native function NativeJSCollator();
- locale = v8Locale.createLocaleOrDefault_(locale);
+ locale = v8Locale.__createLocaleOrDefault(locale);
var collator = NativeJSCollator(
- locale.__icuLocaleID__, v8Locale.createSettingsOrDefault_(settings, {}));
+ locale.__icuLocaleID, v8Locale.__createSettingsOrDefault(settings, {}));
return collator;
};
/**
* Creates new Collator based on current locale.
* @param {Object} - collation flags. See constructor.
- * @returns {Object} - new v8BreakIterator object.
+ * @returns {Object} - new Collator object.
*/
v8Locale.prototype.createCollator = function(settings) {
return new v8Locale.Collator(this, settings);
};
/**
+ * DateTimeFormat class implements locale-aware date and time formatting.
+ * Constructor is not part of public API.
+ * @param {Object} locale - locale object to pass to formatter.
+ * @param {Object} settings - formatting flags:
+ * - skeleton
+ * - dateStyle
+ * - timeStyle
+ * @private
+ * @constructor
+ */
+v8Locale.__DateTimeFormat = function(locale, settings) {
+ native function NativeJSDateTimeFormat();
+
+ settings = v8Locale.__createSettingsOrDefault(settings, {});
+
+ var cleanSettings = {};
+ if (settings.hasOwnProperty('skeleton')) {
+ cleanSettings['skeleton'] = settings['skeleton'];
+ } else {
+ cleanSettings = {};
+ if (settings.hasOwnProperty('dateStyle')) {
+ var ds = settings['dateStyle'];
+ if (!/^(short|medium|long|full)$/.test(ds)) ds = 'short';
+ cleanSettings['dateStyle'] = ds;
+ } else if (settings.hasOwnProperty('dateType')) {
+ // Obsolete. New spec requires dateStyle, but we'll keep this around
+ // for current users.
+ // TODO(cira): Remove when all internal users switch to dateStyle.
+ var dt = settings['dateType'];
+ if (!/^(short|medium|long|full)$/.test(dt)) dt = 'short';
+ cleanSettings['dateStyle'] = dt;
+ }
+
+ if (settings.hasOwnProperty('timeStyle')) {
+ var ts = settings['timeStyle'];
+ if (!/^(short|medium|long|full)$/.test(ts)) ts = 'short';
+ cleanSettings['timeStyle'] = ts;
+ } else if (settings.hasOwnProperty('timeType')) {
+ // TODO(cira): Remove when all internal users switch to timeStyle.
+ var tt = settings['timeType'];
+ if (!/^(short|medium|long|full)$/.test(tt)) tt = 'short';
+ cleanSettings['timeStyle'] = tt;
+ }
+ }
+
+ // Default is to show short date and time.
+ if (!cleanSettings.hasOwnProperty('skeleton') &&
+ !cleanSettings.hasOwnProperty('dateStyle') &&
+ !cleanSettings.hasOwnProperty('timeStyle')) {
+ cleanSettings = {'dateStyle': 'short',
+ 'timeStyle': 'short'};
+ }
+
+ locale = v8Locale.__createLocaleOrDefault(locale);
+ var formatter = NativeJSDateTimeFormat(locale.__icuLocaleID, cleanSettings);
+
+ // NativeJSDateTimeFormat creates formatter.options for us, we just need
+ // to append actual settings to it.
+ for (key in cleanSettings) {
+ formatter.options[key] = cleanSettings[key];
+ }
+
+ /**
+ * Clones existing date time format with possible overrides for some
+ * of the options.
+ * @param {!Object} overrideSettings - overrides for current format settings.
+ * @returns {Object} - new DateTimeFormat object.
+ * @public
+ */
+ formatter.derive = function(overrideSettings) {
+ // To remove a setting user can specify undefined as its value. We'll remove
+ // it from the map in that case.
+ for (var prop in overrideSettings) {
+ if (settings.hasOwnProperty(prop) && !overrideSettings[prop]) {
+ delete settings[prop];
+ }
+ }
+ return new v8Locale.__DateTimeFormat(
+ locale, v8Locale.__createSettingsOrDefault(overrideSettings, settings));
+ };
+
+ return formatter;
+};
+
+/**
+ * Creates new DateTimeFormat based on current locale.
+ * @param {Object} - formatting flags. See constructor.
+ * @returns {Object} - new DateTimeFormat object.
+ */
+v8Locale.prototype.createDateTimeFormat = function(settings) {
+ return new v8Locale.__DateTimeFormat(this, settings);
+};
+
+/**
+ * NumberFormat class implements locale-aware number formatting.
+ * Constructor is not part of public API.
+ * @param {Object} locale - locale object to pass to formatter.
+ * @param {Object} settings - formatting flags:
+ * - skeleton
+ * - pattern
+ * - style - decimal, currency, percent or scientific
+ * - currencyCode - ISO 4217 3-letter currency code
+ * @private
+ * @constructor
+ */
+v8Locale.__NumberFormat = function(locale, settings) {
+ native function NativeJSNumberFormat();
+
+ settings = v8Locale.__createSettingsOrDefault(settings, {});
+
+ var cleanSettings = {};
+ if (settings.hasOwnProperty('skeleton')) {
+ // Assign skeleton to cleanSettings and fix invalid currency pattern
+ // if present - 'ooxo' becomes 'o'.
+ cleanSettings['skeleton'] =
+ settings['skeleton'].replace(/\u00a4+[^\u00a4]+\u00a4+/g, '\u00a4');
+ } else if (settings.hasOwnProperty('pattern')) {
+ cleanSettings['pattern'] = settings['pattern'];
+ } else if (settings.hasOwnProperty('style')) {
+ var style = settings['style'];
+ if (!/^(decimal|currency|percent|scientific)$/.test(style)) {
+ style = 'decimal';
+ }
+ cleanSettings['style'] = style;
+ }
+
+ // Default is to show decimal style.
+ if (!cleanSettings.hasOwnProperty('skeleton') &&
+ !cleanSettings.hasOwnProperty('pattern') &&
+ !cleanSettings.hasOwnProperty('style')) {
+ cleanSettings = {'style': 'decimal'};
+ }
+
+ // Add currency code if available and valid (3-letter ASCII code).
+ if (settings.hasOwnProperty('currencyCode') &&
+ /^[a-zA-Z]{3}$/.test(settings['currencyCode'])) {
+ cleanSettings['currencyCode'] = settings['currencyCode'].toUpperCase();
+ }
+
+ locale = v8Locale.__createLocaleOrDefault(locale);
+ // Pass in region ID for proper currency detection. Use ZZ if region is empty.
+ var region = locale.options.regionID !== '' ? locale.options.regionID : 'ZZ';
+ var formatter = NativeJSNumberFormat(
+ locale.__icuLocaleID, 'und_' + region, cleanSettings);
+
+ // ICU doesn't always uppercase the currency code.
+ if (formatter.options.hasOwnProperty('currencyCode')) {
+ formatter.options['currencyCode'] =
+ formatter.options['currencyCode'].toUpperCase();
+ }
+
+ for (key in cleanSettings) {
+ // Don't overwrite keys that are alredy in.
+ if (formatter.options.hasOwnProperty(key)) continue;
+
+ formatter.options[key] = cleanSettings[key];
+ }
+
+ /**
+ * Clones existing number format with possible overrides for some
+ * of the options.
+ * @param {!Object} overrideSettings - overrides for current format settings.
+ * @returns {Object} - new or cached NumberFormat object.
+ * @public
+ */
+ formatter.derive = function(overrideSettings) {
+ // To remove a setting user can specify undefined as its value. We'll remove
+ // it from the map in that case.
+ for (var prop in overrideSettings) {
+ if (settings.hasOwnProperty(prop) && !overrideSettings[prop]) {
+ delete settings[prop];
+ }
+ }
+ return new v8Locale.__NumberFormat(
+ locale, v8Locale.__createSettingsOrDefault(overrideSettings, settings));
+ };
+
+ return formatter;
+};
+
+/**
+ * Creates new NumberFormat based on current locale.
+ * @param {Object} - formatting flags. See constructor.
+ * @returns {Object} - new or cached NumberFormat object.
+ */
+v8Locale.prototype.createNumberFormat = function(settings) {
+ return new v8Locale.__NumberFormat(this, settings);
+};
+
+/**
* Merges user settings and defaults.
* Settings that are not of object type are rejected.
* Actual property values are not validated, but whitespace is trimmed if they
@@ -145,8 +337,9 @@
* @param {!Object} settings - user provided settings.
* @param {!Object} defaults - default values for this type of settings.
* @returns {Object} - valid settings object.
+ * @private
*/
-v8Locale.createSettingsOrDefault_ = function(settings, defaults) {
+v8Locale.__createSettingsOrDefault = function(settings, defaults) {
if (!settings || typeof(settings) !== 'object' ) {
return defaults;
}
@@ -155,11 +348,17 @@
settings[key] = defaults[key];
}
}
- // Clean up values, like trimming whitespace.
+ // Clean up settings.
for (var key in settings) {
+ // Trim whitespace.
if (typeof(settings[key]) === "string") {
settings[key] = settings[key].trim();
}
+ // Remove all properties that are set to undefined/null. This allows
+ // derive method to remove a setting we don't need anymore.
+ if (!settings[key]) {
+ delete settings[key];
+ }
}
return settings;
@@ -170,8 +369,9 @@
* we create default locale and return it.
* @param {!Object} locale - user provided locale.
* @returns {Object} - v8Locale object.
+ * @private
*/
-v8Locale.createLocaleOrDefault_ = function(locale) {
+v8Locale.__createLocaleOrDefault = function(locale) {
if (!locale || !(locale instanceof v8Locale)) {
return new v8Locale();
} else {
diff --git a/src/extensions/experimental/language-matcher.cc b/src/extensions/experimental/language-matcher.cc
index 385ebff..127e571 100644
--- a/src/extensions/experimental/language-matcher.cc
+++ b/src/extensions/experimental/language-matcher.cc
@@ -28,13 +28,14 @@
// TODO(cira): Remove LanguageMatcher from v8 when ICU implements
// language matching API.
-#include "language-matcher.h"
+#include "src/extensions/experimental/language-matcher.h"
-#include "i18n-utils.h"
+#include <string.h>
+
+#include "src/extensions/experimental/i18n-utils.h"
#include "unicode/datefmt.h" // For getAvailableLocales
#include "unicode/locid.h"
#include "unicode/uloc.h"
-#include "utils.h"
namespace v8 {
namespace internal {
diff --git a/src/extensions/experimental/language-matcher.h b/src/extensions/experimental/language-matcher.h
index b5336a2..dd29304 100644
--- a/src/extensions/experimental/language-matcher.h
+++ b/src/extensions/experimental/language-matcher.h
@@ -28,7 +28,7 @@
#ifndef V8_EXTENSIONS_EXPERIMENTAL_LANGUAGE_MATCHER_H_
#define V8_EXTENSIONS_EXPERIMENTAL_LANGUAGE_MATCHER_H_
-#include <v8.h>
+#include "include/v8.h"
#include "unicode/uloc.h"
diff --git a/src/extensions/experimental/number-format.cc b/src/extensions/experimental/number-format.cc
new file mode 100644
index 0000000..51e0b95
--- /dev/null
+++ b/src/extensions/experimental/number-format.cc
@@ -0,0 +1,356 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "src/extensions/experimental/number-format.h"
+
+#include <string.h>
+
+#include "src/extensions/experimental/i18n-utils.h"
+#include "unicode/dcfmtsym.h"
+#include "unicode/decimfmt.h"
+#include "unicode/locid.h"
+#include "unicode/numfmt.h"
+#include "unicode/uchar.h"
+#include "unicode/ucurr.h"
+
+namespace v8 {
+namespace internal {
+
+const int NumberFormat::kCurrencyCodeLength = 4;
+
+v8::Persistent<v8::FunctionTemplate> NumberFormat::number_format_template_;
+
+static icu::DecimalFormat* CreateNumberFormat(v8::Handle<v8::String>,
+ v8::Handle<v8::String>,
+ v8::Handle<v8::Object>);
+static icu::DecimalFormat* CreateFormatterFromSkeleton(
+ const icu::Locale&, const icu::UnicodeString&, UErrorCode*);
+static icu::DecimalFormatSymbols* GetFormatSymbols(const icu::Locale&);
+static bool GetCurrencyCode(const icu::Locale&,
+ const char* const,
+ v8::Handle<v8::Object>,
+ UChar*);
+static v8::Handle<v8::Value> ThrowUnexpectedObjectError();
+
+icu::DecimalFormat* NumberFormat::UnpackNumberFormat(
+ v8::Handle<v8::Object> obj) {
+ if (number_format_template_->HasInstance(obj)) {
+ return static_cast<icu::DecimalFormat*>(
+ obj->GetPointerFromInternalField(0));
+ }
+
+ return NULL;
+}
+
+void NumberFormat::DeleteNumberFormat(v8::Persistent<v8::Value> object,
+ void* param) {
+ v8::Persistent<v8::Object> persistent_object =
+ v8::Persistent<v8::Object>::Cast(object);
+
+ // First delete the hidden C++ object.
+ // Unpacking should never return NULL here. That would only happen if
+ // this method is used as the weak callback for persistent handles not
+ // pointing to a number formatter.
+ delete UnpackNumberFormat(persistent_object);
+
+ // Then dispose of the persistent handle to JS object.
+ persistent_object.Dispose();
+}
+
+v8::Handle<v8::Value> NumberFormat::Format(const v8::Arguments& args) {
+ v8::HandleScope handle_scope;
+
+ if (args.Length() != 1 || !args[0]->IsNumber()) {
+ // Just return NaN on invalid input.
+ return v8::String::New("NaN");
+ }
+
+ icu::DecimalFormat* number_format = UnpackNumberFormat(args.Holder());
+ if (!number_format) {
+ return ThrowUnexpectedObjectError();
+ }
+
+ // ICU will handle actual NaN value properly and return NaN string.
+ icu::UnicodeString result;
+ number_format->format(args[0]->NumberValue(), result);
+
+ return v8::String::New(
+ reinterpret_cast<const uint16_t*>(result.getBuffer()), result.length());
+}
+
+v8::Handle<v8::Value> NumberFormat::JSNumberFormat(const v8::Arguments& args) {
+ v8::HandleScope handle_scope;
+
+ // Expect locale id, region id and settings.
+ if (args.Length() != 3 ||
+ !args[0]->IsString() || !args[1]->IsString() || !args[2]->IsObject()) {
+ return v8::ThrowException(v8::Exception::SyntaxError(
+ v8::String::New("Locale, region and number settings are required.")));
+ }
+
+ icu::DecimalFormat* number_format = CreateNumberFormat(
+ args[0]->ToString(), args[1]->ToString(), args[2]->ToObject());
+
+ if (number_format_template_.IsEmpty()) {
+ v8::Local<v8::FunctionTemplate> raw_template(v8::FunctionTemplate::New());
+
+ raw_template->SetClassName(v8::String::New("v8Locale.NumberFormat"));
+
+ // Define internal field count on instance template.
+ v8::Local<v8::ObjectTemplate> object_template =
+ raw_template->InstanceTemplate();
+
+ // Set aside internal field for icu number formatter.
+ object_template->SetInternalFieldCount(1);
+
+ // Define all of the prototype methods on prototype template.
+ v8::Local<v8::ObjectTemplate> proto = raw_template->PrototypeTemplate();
+ proto->Set(v8::String::New("format"),
+ v8::FunctionTemplate::New(Format));
+
+ number_format_template_ =
+ v8::Persistent<v8::FunctionTemplate>::New(raw_template);
+ }
+
+ // Create an empty object wrapper.
+ v8::Local<v8::Object> local_object =
+ number_format_template_->GetFunction()->NewInstance();
+ v8::Persistent<v8::Object> wrapper =
+ v8::Persistent<v8::Object>::New(local_object);
+
+ // Set number formatter as internal field of the resulting JS object.
+ wrapper->SetPointerInInternalField(0, number_format);
+
+ // Create options key.
+ v8::Local<v8::Object> options = v8::Object::New();
+
+ // Show what ICU decided to use for easier problem tracking.
+ // Keep it as v8 specific extension.
+ icu::UnicodeString pattern;
+ number_format->toPattern(pattern);
+ options->Set(v8::String::New("v8ResolvedPattern"),
+ v8::String::New(reinterpret_cast<const uint16_t*>(
+ pattern.getBuffer()), pattern.length()));
+
+ // Set resolved currency code in options.currency if not empty.
+ icu::UnicodeString currency(number_format->getCurrency());
+ if (!currency.isEmpty()) {
+ options->Set(v8::String::New("currencyCode"),
+ v8::String::New(reinterpret_cast<const uint16_t*>(
+ currency.getBuffer()), currency.length()));
+ }
+
+ wrapper->Set(v8::String::New("options"), options);
+
+ // Make object handle weak so we can delete iterator once GC kicks in.
+ wrapper.MakeWeak(NULL, DeleteNumberFormat);
+
+ return wrapper;
+}
+
+// Returns DecimalFormat.
+static icu::DecimalFormat* CreateNumberFormat(v8::Handle<v8::String> locale,
+ v8::Handle<v8::String> region,
+ v8::Handle<v8::Object> settings) {
+ v8::HandleScope handle_scope;
+
+ v8::String::AsciiValue ascii_locale(locale);
+ icu::Locale icu_locale(*ascii_locale);
+
+ // Make formatter from skeleton.
+ icu::DecimalFormat* number_format = NULL;
+ UErrorCode status = U_ZERO_ERROR;
+ icu::UnicodeString setting;
+
+ if (I18NUtils::ExtractStringSetting(settings, "skeleton", &setting)) {
+ // TODO(cira): Use ICU skeleton once
+ // http://bugs.icu-project.org/trac/ticket/8610 is resolved.
+ number_format = CreateFormatterFromSkeleton(icu_locale, setting, &status);
+ } else if (I18NUtils::ExtractStringSetting(settings, "pattern", &setting)) {
+ number_format =
+ new icu::DecimalFormat(setting, GetFormatSymbols(icu_locale), status);
+ } else if (I18NUtils::ExtractStringSetting(settings, "style", &setting)) {
+ if (setting == UNICODE_STRING_SIMPLE("currency")) {
+ number_format = static_cast<icu::DecimalFormat*>(
+ icu::NumberFormat::createCurrencyInstance(icu_locale, status));
+ } else if (setting == UNICODE_STRING_SIMPLE("percent")) {
+ number_format = static_cast<icu::DecimalFormat*>(
+ icu::NumberFormat::createPercentInstance(icu_locale, status));
+ } else if (setting == UNICODE_STRING_SIMPLE("scientific")) {
+ number_format = static_cast<icu::DecimalFormat*>(
+ icu::NumberFormat::createScientificInstance(icu_locale, status));
+ } else {
+ // Make it decimal in any other case.
+ number_format = static_cast<icu::DecimalFormat*>(
+ icu::NumberFormat::createInstance(icu_locale, status));
+ }
+ }
+
+ if (U_FAILURE(status)) {
+ delete number_format;
+ status = U_ZERO_ERROR;
+ number_format = static_cast<icu::DecimalFormat*>(
+ icu::NumberFormat::createInstance(icu_locale, status));
+ }
+
+ // Attach appropriate currency code to the formatter.
+ // It affects currency formatters only.
+ // Region is full language identifier in form 'und_' + region id.
+ v8::String::AsciiValue ascii_region(region);
+
+ UChar currency_code[NumberFormat::kCurrencyCodeLength];
+ if (GetCurrencyCode(icu_locale, *ascii_region, settings, currency_code)) {
+ number_format->setCurrency(currency_code, status);
+ }
+
+ return number_format;
+}
+
+// Generates ICU number format pattern from given skeleton.
+static icu::DecimalFormat* CreateFormatterFromSkeleton(
+ const icu::Locale& icu_locale,
+ const icu::UnicodeString& skeleton,
+ UErrorCode* status) {
+ icu::DecimalFormat skeleton_format(
+ skeleton, GetFormatSymbols(icu_locale), *status);
+
+ // Find out if skeleton contains currency or percent symbol and create
+ // proper instance to tweak.
+ icu::DecimalFormat* base_format = NULL;
+
+ // UChar representation of U+00A4 currency symbol.
+ const UChar currency_symbol = 0xA4u;
+
+ int32_t index = skeleton.indexOf(currency_symbol);
+ if (index != -1) {
+ // Find how many U+00A4 are there. There is at least one.
+ // Case of non-consecutive U+00A4 is taken care of in i18n.js.
+ int32_t end_index = skeleton.lastIndexOf(currency_symbol, index);
+
+ icu::NumberFormat::EStyles style;
+ switch (end_index - index) {
+ case 0:
+ style = icu::NumberFormat::kCurrencyStyle;
+ break;
+ case 1:
+ style = icu::NumberFormat::kIsoCurrencyStyle;
+ break;
+ default:
+ style = icu::NumberFormat::kPluralCurrencyStyle;
+ }
+
+ base_format = static_cast<icu::DecimalFormat*>(
+ icu::NumberFormat::createInstance(icu_locale, style, *status));
+ } else if (skeleton.indexOf('%') != -1) {
+ base_format = static_cast<icu::DecimalFormat*>(
+ icu::NumberFormat::createPercentInstance(icu_locale, *status));
+ } else {
+ // TODO(cira): Handle scientific skeleton.
+ base_format = static_cast<icu::DecimalFormat*>(
+ icu::NumberFormat::createInstance(icu_locale, *status));
+ }
+
+ if (U_FAILURE(*status)) {
+ delete base_format;
+ return NULL;
+ }
+
+ // Copy important information from skeleton to the new formatter.
+ // TODO(cira): copy rounding information from skeleton?
+ base_format->setGroupingUsed(skeleton_format.isGroupingUsed());
+
+ base_format->setMinimumIntegerDigits(
+ skeleton_format.getMinimumIntegerDigits());
+
+ base_format->setMinimumFractionDigits(
+ skeleton_format.getMinimumFractionDigits());
+
+ base_format->setMaximumFractionDigits(
+ skeleton_format.getMaximumFractionDigits());
+
+ return base_format;
+}
+
+// Gets decimal symbols for a locale.
+static icu::DecimalFormatSymbols* GetFormatSymbols(
+ const icu::Locale& icu_locale) {
+ UErrorCode status = U_ZERO_ERROR;
+ icu::DecimalFormatSymbols* symbols =
+ new icu::DecimalFormatSymbols(icu_locale, status);
+
+ if (U_FAILURE(status)) {
+ delete symbols;
+ // Use symbols from default locale.
+ symbols = new icu::DecimalFormatSymbols(status);
+ }
+
+ return symbols;
+}
+
+// Gets currency ISO 4217 3-letter code.
+// Check currencyCode setting first, then @currency=code and in the end
+// try to infer currency code from locale in the form 'und_' + region id.
+// Returns false in case of error.
+static bool GetCurrencyCode(const icu::Locale& icu_locale,
+ const char* const und_region_locale,
+ v8::Handle<v8::Object> settings,
+ UChar* code) {
+ UErrorCode status = U_ZERO_ERROR;
+
+ // If there is user specified currency code, use it.
+ icu::UnicodeString currency;
+ if (I18NUtils::ExtractStringSetting(settings, "currencyCode", ¤cy)) {
+ currency.extract(code, NumberFormat::kCurrencyCodeLength, status);
+ return true;
+ }
+
+ // If ICU locale has -cu- currency code use it.
+ char currency_code[NumberFormat::kCurrencyCodeLength];
+ int32_t length = icu_locale.getKeywordValue(
+ "currency", currency_code, NumberFormat::kCurrencyCodeLength, status);
+ if (length != 0) {
+ I18NUtils::AsciiToUChar(currency_code, length + 1,
+ code, NumberFormat::kCurrencyCodeLength);
+ return true;
+ }
+
+ // Otherwise infer currency code from the region id.
+ ucurr_forLocale(
+ und_region_locale, code, NumberFormat::kCurrencyCodeLength, &status);
+
+ return !!U_SUCCESS(status);
+}
+
+// Throws a JavaScript exception.
+static v8::Handle<v8::Value> ThrowUnexpectedObjectError() {
+ // Returns undefined, and schedules an exception to be thrown.
+ return v8::ThrowException(v8::Exception::Error(
+ v8::String::New("NumberFormat method called on an object "
+ "that is not a NumberFormat.")));
+}
+
+} } // namespace v8::internal
diff --git a/src/extensions/experimental/number-format.h b/src/extensions/experimental/number-format.h
new file mode 100644
index 0000000..bcfaed6
--- /dev/null
+++ b/src/extensions/experimental/number-format.h
@@ -0,0 +1,71 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_EXTENSIONS_EXPERIMENTAL_NUMBER_FORMAT_H_
+#define V8_EXTENSIONS_EXPERIMENTAL_NUMBER_FORMAT_H_
+
+#include "include/v8.h"
+
+#include "unicode/uversion.h"
+
+namespace U_ICU_NAMESPACE {
+class DecimalFormat;
+}
+
+namespace v8 {
+namespace internal {
+
+class NumberFormat {
+ public:
+ // 3-letter ISO 4217 currency code plus \0.
+ static const int kCurrencyCodeLength;
+
+ static v8::Handle<v8::Value> JSNumberFormat(const v8::Arguments& args);
+
+ // Helper methods for various bindings.
+
+ // Unpacks date format object from corresponding JavaScript object.
+ static icu::DecimalFormat* UnpackNumberFormat(
+ v8::Handle<v8::Object> obj);
+
+ // Release memory we allocated for the NumberFormat once the JS object that
+ // holds the pointer gets garbage collected.
+ static void DeleteNumberFormat(v8::Persistent<v8::Value> object,
+ void* param);
+
+ // Formats number and returns corresponding string.
+ static v8::Handle<v8::Value> Format(const v8::Arguments& args);
+
+ private:
+ NumberFormat();
+
+ static v8::Persistent<v8::FunctionTemplate> number_format_template_;
+};
+
+} } // namespace v8::internal
+
+#endif // V8_EXTENSIONS_EXPERIMENTAL_NUMBER_FORMAT_H_
diff --git a/src/factory.cc b/src/factory.cc
index 06d1655..ac96668 100644
--- a/src/factory.cc
+++ b/src/factory.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -58,6 +58,16 @@
}
+Handle<FixedArray> Factory::NewFixedDoubleArray(int size,
+ PretenureFlag pretenure) {
+ ASSERT(0 <= size);
+ CALL_HEAP_FUNCTION(
+ isolate(),
+ isolate()->heap()->AllocateUninitializedFixedDoubleArray(size, pretenure),
+ FixedArray);
+}
+
+
Handle<StringDictionary> Factory::NewStringDictionary(int at_least_space_for) {
ASSERT(0 <= at_least_space_for);
CALL_HEAP_FUNCTION(isolate(),
@@ -169,21 +179,21 @@
}
-Handle<String> Factory::NewRawAsciiString(int length,
- PretenureFlag pretenure) {
+Handle<SeqAsciiString> Factory::NewRawAsciiString(int length,
+ PretenureFlag pretenure) {
CALL_HEAP_FUNCTION(
isolate(),
isolate()->heap()->AllocateRawAsciiString(length, pretenure),
- String);
+ SeqAsciiString);
}
-Handle<String> Factory::NewRawTwoByteString(int length,
- PretenureFlag pretenure) {
+Handle<SeqTwoByteString> Factory::NewRawTwoByteString(int length,
+ PretenureFlag pretenure) {
CALL_HEAP_FUNCTION(
isolate(),
isolate()->heap()->AllocateRawTwoByteString(length, pretenure),
- String);
+ SeqTwoByteString);
}
@@ -204,6 +214,16 @@
}
+Handle<String> Factory::NewProperSubString(Handle<String> str,
+ int begin,
+ int end) {
+ ASSERT(begin > 0 || end < str->length());
+ CALL_HEAP_FUNCTION(isolate(),
+ isolate()->heap()->AllocateSubString(*str, begin, end),
+ String);
+}
+
+
Handle<String> Factory::NewExternalStringFromAscii(
ExternalAsciiString::Resource* resource) {
CALL_HEAP_FUNCTION(
@@ -231,22 +251,34 @@
Handle<Context> Factory::NewFunctionContext(int length,
- Handle<JSFunction> closure) {
+ Handle<JSFunction> function) {
CALL_HEAP_FUNCTION(
isolate(),
- isolate()->heap()->AllocateFunctionContext(length, *closure),
+ isolate()->heap()->AllocateFunctionContext(length, *function),
Context);
}
-Handle<Context> Factory::NewWithContext(Handle<Context> previous,
- Handle<JSObject> extension,
- bool is_catch_context) {
+Handle<Context> Factory::NewCatchContext(Handle<JSFunction> function,
+ Handle<Context> previous,
+ Handle<String> name,
+ Handle<Object> thrown_object) {
CALL_HEAP_FUNCTION(
isolate(),
- isolate()->heap()->AllocateWithContext(*previous,
- *extension,
- is_catch_context),
+ isolate()->heap()->AllocateCatchContext(*function,
+ *previous,
+ *name,
+ *thrown_object),
+ Context);
+}
+
+
+Handle<Context> Factory::NewWithContext(Handle<JSFunction> function,
+ Handle<Context> previous,
+ Handle<JSObject> extension) {
+ CALL_HEAP_FUNCTION(
+ isolate(),
+ isolate()->heap()->AllocateWithContext(*function, *previous, *extension),
Context);
}
@@ -860,6 +892,13 @@
}
+void Factory::BecomeJSObject(Handle<JSProxy> object) {
+ CALL_HEAP_FUNCTION_VOID(
+ isolate(),
+ isolate()->heap()->ReinitializeJSProxyAsJSObject(*object));
+}
+
+
Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
Handle<String> name,
int number_of_literals,
diff --git a/src/factory.h b/src/factory.h
index 55d1e9a..19f3827 100644
--- a/src/factory.h
+++ b/src/factory.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -39,7 +39,7 @@
class Factory {
public:
- // Allocate a new fixed array with undefined entries.
+ // Allocate a new uninitialized fixed array.
Handle<FixedArray> NewFixedArray(
int size,
PretenureFlag pretenure = NOT_TENURED);
@@ -49,6 +49,11 @@
int size,
PretenureFlag pretenure = NOT_TENURED);
+ // Allocate a new uninitialized fixed double array.
+ Handle<FixedArray> NewFixedDoubleArray(
+ int size,
+ PretenureFlag pretenure = NOT_TENURED);
+
Handle<NumberDictionary> NewNumberDictionary(int at_least_space_for);
Handle<StringDictionary> NewStringDictionary(int at_least_space_for);
@@ -112,10 +117,10 @@
// Allocates and partially initializes an ASCII or TwoByte String. The
// characters of the string are uninitialized. Currently used in regexp code
// only, where they are pretenured.
- Handle<String> NewRawAsciiString(
+ Handle<SeqAsciiString> NewRawAsciiString(
int length,
PretenureFlag pretenure = NOT_TENURED);
- Handle<String> NewRawTwoByteString(
+ Handle<SeqTwoByteString> NewRawTwoByteString(
int length,
PretenureFlag pretenure = NOT_TENURED);
@@ -128,6 +133,11 @@
int begin,
int end);
+ // Create a new string object which holds a proper substring of a string.
+ Handle<String> NewProperSubString(Handle<String> str,
+ int begin,
+ int end);
+
// Creates a new external String object. There are two String encodings
// in the system: ASCII and two byte. Unlike other String types, it does
// not make sense to have a UTF-8 factory function for external strings,
@@ -142,12 +152,18 @@
// Create a function context.
Handle<Context> NewFunctionContext(int length,
- Handle<JSFunction> closure);
+ Handle<JSFunction> function);
+
+ // Create a catch context.
+ Handle<Context> NewCatchContext(Handle<JSFunction> function,
+ Handle<Context> previous,
+ Handle<String> name,
+ Handle<Object> thrown_object);
// Create a 'with' context.
- Handle<Context> NewWithContext(Handle<Context> previous,
- Handle<JSObject> extension,
- bool is_catch_context);
+ Handle<Context> NewWithContext(Handle<JSFunction> function,
+ Handle<Context> previous,
+ Handle<JSObject> extension);
// Return the Symbol matching the passed in string.
Handle<String> SymbolFromString(Handle<String> value);
@@ -237,6 +253,9 @@
Handle<JSProxy> NewJSProxy(Handle<Object> handler, Handle<Object> prototype);
+ // Change the type of the argument into a regular JS object and reinitialize.
+ void BecomeJSObject(Handle<JSProxy> object);
+
Handle<JSFunction> NewFunction(Handle<String> name,
Handle<Object> prototype);
diff --git a/src/flag-definitions.h b/src/flag-definitions.h
index a85f5fe..2db44c3 100644
--- a/src/flag-definitions.h
+++ b/src/flag-definitions.h
@@ -99,6 +99,9 @@
// Flags for experimental language features.
DEFINE_bool(harmony_proxies, false, "enable harmony proxies")
+// Flags for experimental implementation features.
+DEFINE_bool(unbox_double_arrays, false, "automatically unbox arrays of doubles")
+
// Flags for Crankshaft.
#ifdef V8_TARGET_ARCH_MIPS
DEFINE_bool(crankshaft, false, "use crankshaft")
@@ -131,12 +134,9 @@
DEFINE_int(deopt_every_n_times,
0,
"deoptimize every n times a deopt point is passed")
-DEFINE_bool(process_arguments_object, true, "try to deal with arguments object")
DEFINE_bool(trap_on_deopt, false, "put a break point before deoptimizing")
DEFINE_bool(deoptimize_uncommon_cases, true, "deoptimize uncommon cases")
DEFINE_bool(polymorphic_inlining, true, "polymorphic inlining")
-DEFINE_bool(aggressive_loop_invariant_motion, true,
- "aggressive motion of instructions out of loops")
DEFINE_bool(use_osr, true, "use on-stack replacement")
DEFINE_bool(trace_osr, false, "trace on-stack replacement")
@@ -149,8 +149,6 @@
DEFINE_bool(code_comments, false, "emit comments in code disassembly")
DEFINE_bool(peephole_optimization, true,
"perform peephole optimizations in assembly code")
-DEFINE_bool(print_peephole_optimization, false,
- "print peephole optimizations in assembly code")
DEFINE_bool(enable_sse2, true,
"enable use of SSE2 instructions if available")
DEFINE_bool(enable_sse3, true,
@@ -189,7 +187,6 @@
// codegen-ia32.cc / codegen-arm.cc
DEFINE_bool(trace, false, "trace function calls")
-DEFINE_bool(defer_negation, true, "defer negation operation")
DEFINE_bool(mask_constants_with_cookie,
true,
"use random jit cookie to mask large constants")
@@ -202,33 +199,23 @@
DEFINE_bool(opt_eagerly, false, "be more eager when adaptively optimizing")
DEFINE_bool(always_opt, false, "always try to optimize functions")
DEFINE_bool(prepare_always_opt, false, "prepare for turning on always opt")
-DEFINE_bool(debug_info, true, "add debug information to compiled functions")
DEFINE_bool(deopt, true, "support deoptimization")
DEFINE_bool(trace_deopt, false, "trace deoptimization")
// compiler.cc
-DEFINE_bool(strict, false, "strict error checking")
DEFINE_int(min_preparse_length, 1024,
"minimum length for automatic enable preparsing")
-DEFINE_bool(full_compiler, true, "enable dedicated backend for run-once code")
DEFINE_bool(always_full_compiler, false,
"try to use the dedicated run-once backend for all code")
DEFINE_bool(trace_bailout, false,
"print reasons for falling back to using the classic V8 backend")
-DEFINE_bool(safe_int32_compiler, true,
- "enable optimized side-effect-free int32 expressions.")
-DEFINE_bool(use_flow_graph, false, "perform flow-graph based optimizations")
// compilation-cache.cc
DEFINE_bool(compilation_cache, true, "enable compilation cache")
DEFINE_bool(cache_prototype_transitions, true, "cache prototype transitions")
-// data-flow.cc
-DEFINE_bool(loop_peeling, false, "Peel off the first iteration of loops.")
-
// debug.cc
-DEFINE_bool(remote_debugging, false, "enable remote debugging")
DEFINE_bool(trace_debug_json, false, "trace debugging JSON request/response")
DEFINE_bool(debugger_auto_break, true,
"automatically set the debug break flag when debugger commands are "
@@ -316,9 +303,6 @@
DEFINE_bool(allow_natives_syntax, false, "allow natives syntax")
DEFINE_bool(strict_mode, true, "allow strict mode directives")
-// rewriter.cc
-DEFINE_bool(optimize_ast, true, "optimize the ast")
-
// simulator-arm.cc and simulator-mips.cc
DEFINE_bool(trace_sim, false, "Trace simulator execution")
DEFINE_bool(check_icache, false, "Check icache flushes in ARM simulator")
@@ -337,7 +321,6 @@
"activate a 100ms timer that switches between V8 threads")
// Regexp
-DEFINE_bool(trace_regexps, false, "trace regexp execution")
DEFINE_bool(regexp_optimization, true, "generate optimized regexp code")
DEFINE_bool(regexp_entry_native, true, "use native code to enter regexp")
@@ -388,6 +371,8 @@
DEFINE_bool(gdbjit, false, "enable GDBJIT interface (disables compacting GC)")
DEFINE_bool(gdbjit_full, false, "enable GDBJIT interface for all code objects")
DEFINE_bool(gdbjit_dump, false, "dump elf objects with debug info to disk")
+DEFINE_string(gdbjit_dump_filter, "",
+ "dump only objects containing this substring")
//
// Debug only flags
@@ -414,16 +399,11 @@
DEFINE_bool(print_json_ast, false, "print source AST as JSON")
DEFINE_bool(print_builtin_json_ast, false,
"print source AST for builtins as JSON")
-DEFINE_bool(trace_calls, false, "trace calls")
-DEFINE_bool(trace_builtin_calls, false, "trace builtins calls")
DEFINE_string(stop_at, "", "function name where to insert a breakpoint")
// compiler.cc
DEFINE_bool(print_builtin_scopes, false, "print scopes for builtins")
DEFINE_bool(print_scopes, false, "print scopes")
-DEFINE_bool(print_ir, false, "print the AST as seen by the backend")
-DEFINE_bool(print_graph_text, false,
- "print a text representation of the flow graph")
// contexts.cc
DEFINE_bool(trace_contexts, false, "trace contexts operations")
@@ -472,14 +452,10 @@
"trace regexp macro assembler calls.")
//
-// Logging and profiling only flags
+// Logging and profiling flags
//
#undef FLAG
-#ifdef ENABLE_LOGGING_AND_PROFILING
#define FLAG FLAG_FULL
-#else
-#define FLAG FLAG_READONLY
-#endif
// log.cc
DEFINE_bool(log, false,
@@ -495,7 +471,6 @@
DEFINE_bool(log_snapshot_positions, false,
"log positions of (de)serialized objects in the snapshot.")
DEFINE_bool(log_suspect, false, "Log suspect operations.")
-DEFINE_bool(log_producers, false, "Log stack traces of JS objects allocations.")
DEFINE_bool(prof, false,
"Log statistical profiling information (implies --log-code).")
DEFINE_bool(prof_auto, true,
@@ -512,19 +487,6 @@
DEFINE_bool(ll_prof, false, "Enable low-level linux profiler.")
//
-// Heap protection flags
-// Using heap protection requires ENABLE_LOGGING_AND_PROFILING as well.
-//
-#ifdef ENABLE_HEAP_PROTECTION
-#undef FLAG
-#define FLAG FLAG_FULL
-
-DEFINE_bool(protect_heap, false,
- "Protect/unprotect V8's heap when leaving/entring the VM.")
-
-#endif
-
-//
// Disassembler only flags
//
#undef FLAG
diff --git a/src/frames.cc b/src/frames.cc
index d81d5af..bebd10a 100644
--- a/src/frames.cc
+++ b/src/frames.cc
@@ -36,6 +36,8 @@
#include "scopeinfo.h"
#include "string-stream.h"
+#include "allocation-inl.h"
+
namespace v8 {
namespace internal {
@@ -346,7 +348,6 @@
// -------------------------------------------------------------------------
-#ifdef ENABLE_LOGGING_AND_PROFILING
SafeStackTraceFrameIterator::SafeStackTraceFrameIterator(
Isolate* isolate,
Address fp, Address sp, Address low_bound, Address high_bound) :
@@ -362,7 +363,6 @@
if (frame()->is_java_script()) return;
}
}
-#endif
Code* StackFrame::GetSafepointData(Isolate* isolate,
@@ -371,7 +371,6 @@
unsigned* stack_slots) {
PcToCodeCache::PcToCodeCacheEntry* entry =
isolate->pc_to_code_cache()->GetCacheEntry(pc);
- SafepointEntry cached_safepoint_entry = entry->safepoint_entry;
if (!entry->safepoint_entry.is_valid()) {
entry->safepoint_entry = entry->code->GetSafepointEntry(pc);
ASSERT(entry->safepoint_entry.is_valid());
@@ -528,6 +527,17 @@
}
+Object* StandardFrame::GetExpression(Address fp, int index) {
+ return Memory::Object_at(GetExpressionAddress(fp, index));
+}
+
+
+Address StandardFrame::GetExpressionAddress(Address fp, int n) {
+ const int offset = StandardFrameConstants::kExpressionsOffset;
+ return fp + offset - n * kPointerSize;
+}
+
+
int StandardFrame::ComputeExpressionsCount() const {
const int offset =
StandardFrameConstants::kExpressionsOffset + kPointerSize;
@@ -646,6 +656,16 @@
}
+int JavaScriptFrame::GetArgumentsLength() const {
+ // If there is an arguments adaptor frame get the arguments length from it.
+ if (has_adapted_arguments()) {
+ return Smi::cast(GetExpression(caller_fp(), 0))->value();
+ } else {
+ return GetNumberOfIncomingArguments();
+ }
+}
+
+
Code* JavaScriptFrame::unchecked_code() const {
JSFunction* function = JSFunction::cast(this->function());
return function->unchecked_code();
@@ -812,6 +832,22 @@
}
+int OptimizedFrame::GetInlineCount() {
+ ASSERT(is_optimized());
+
+ int deopt_index = Safepoint::kNoDeoptimizationIndex;
+ DeoptimizationInputData* data = GetDeoptimizationData(&deopt_index);
+
+ TranslationIterator it(data->TranslationByteArray(),
+ data->TranslationIndex(deopt_index)->value());
+ Translation::Opcode opcode = static_cast<Translation::Opcode>(it.Next());
+ ASSERT(opcode == Translation::BEGIN);
+ USE(opcode);
+ int frame_count = it.Next();
+ return frame_count;
+}
+
+
void OptimizedFrame::GetFunctions(List<JSFunction*>* functions) {
ASSERT(functions->length() == 0);
ASSERT(is_optimized());
diff --git a/src/frames.h b/src/frames.h
index aa91026..f542a92 100644
--- a/src/frames.h
+++ b/src/frames.h
@@ -383,6 +383,7 @@
inline Object* GetExpression(int index) const;
inline void SetExpression(int index, Object* value);
int ComputeExpressionsCount() const;
+ static Object* GetExpression(Address fp, int index);
virtual void SetCallerFp(Address caller_fp);
@@ -411,6 +412,7 @@
// Returns the address of the n'th expression stack element.
Address GetExpressionAddress(int n) const;
+ static Address GetExpressionAddress(Address fp, int n);
// Determines if the n'th expression stack element is in a stack
// handler or not. Requires traversing all handlers in this frame.
@@ -483,6 +485,7 @@
// actual passed arguments are available in an arguments adaptor
// frame below it on the stack.
inline bool has_adapted_arguments() const;
+ int GetArgumentsLength() const;
// Garbage collection support.
virtual void Iterate(ObjectVisitor* v) const;
@@ -495,6 +498,9 @@
// Determine the code for the frame.
virtual Code* unchecked_code() const;
+ // Returns the levels of inlining for this frame.
+ virtual int GetInlineCount() { return 1; }
+
// Return a list with JSFunctions of this frame.
virtual void GetFunctions(List<JSFunction*>* functions);
@@ -533,6 +539,8 @@
// GC support.
virtual void Iterate(ObjectVisitor* v) const;
+ virtual int GetInlineCount();
+
// Return a list with JSFunctions of this frame.
// The functions are ordered bottom-to-top (i.e. functions.last()
// is the top-most activation)
@@ -835,7 +843,6 @@
};
-#ifdef ENABLE_LOGGING_AND_PROFILING
typedef JavaScriptFrameIteratorTemp<SafeStackFrameIterator>
SafeJavaScriptFrameIterator;
@@ -847,7 +854,6 @@
Address low_bound, Address high_bound);
void Advance();
};
-#endif
class StackFrameLocator BASE_EMBEDDED {
diff --git a/src/full-codegen.cc b/src/full-codegen.cc
index 2b43e89..8c2f0d1 100644
--- a/src/full-codegen.cc
+++ b/src/full-codegen.cc
@@ -90,14 +90,14 @@
}
-void BreakableStatementChecker::VisitWithEnterStatement(
- WithEnterStatement* stmt) {
+void BreakableStatementChecker::VisitEnterWithContextStatement(
+ EnterWithContextStatement* stmt) {
Visit(stmt->expression());
}
-void BreakableStatementChecker::VisitWithExitStatement(
- WithExitStatement* stmt) {
+void BreakableStatementChecker::VisitExitContextStatement(
+ ExitContextStatement* stmt) {
}
@@ -187,11 +187,6 @@
}
-void BreakableStatementChecker::VisitCatchExtensionObject(
- CatchExtensionObject* expr) {
-}
-
-
void BreakableStatementChecker::VisitAssignment(Assignment* expr) {
// If assigning to a property (including a global property) the assignment is
// breakable.
@@ -351,7 +346,7 @@
}
-void FullCodeGenerator::PrepareForBailout(AstNode* node, State state) {
+void FullCodeGenerator::PrepareForBailout(Expression* node, State state) {
PrepareForBailoutForId(node->id(), state);
}
@@ -409,7 +404,7 @@
// Adjust by a (parameter or local) base offset.
switch (slot->type()) {
case Slot::PARAMETER:
- offset += (scope()->num_parameters() + 1) * kPointerSize;
+ offset += (info_->scope()->num_parameters() + 1) * kPointerSize;
break;
case Slot::LOCAL:
offset += JavaScriptFrameConstants::kLocal0Offset;
@@ -449,7 +444,7 @@
// For simplicity we always test the accumulator register.
__ Move(result_register(), reg);
codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
- codegen()->DoTest(true_label_, false_label_, fall_through_);
+ codegen()->DoTest(this);
}
@@ -471,7 +466,7 @@
// For simplicity we always test the accumulator register.
__ pop(result_register());
codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
- codegen()->DoTest(true_label_, false_label_, fall_through_);
+ codegen()->DoTest(this);
}
@@ -521,6 +516,14 @@
}
+void FullCodeGenerator::DoTest(const TestContext* context) {
+ DoTest(context->condition(),
+ context->true_label(),
+ context->false_label(),
+ context->fall_through());
+}
+
+
void FullCodeGenerator::VisitDeclarations(
ZoneList<Declaration*>* declarations) {
int length = declarations->length();
@@ -580,88 +583,78 @@
void FullCodeGenerator::SetFunctionPosition(FunctionLiteral* fun) {
- if (FLAG_debug_info) {
- CodeGenerator::RecordPositions(masm_, fun->start_position());
- }
+ CodeGenerator::RecordPositions(masm_, fun->start_position());
}
void FullCodeGenerator::SetReturnPosition(FunctionLiteral* fun) {
- if (FLAG_debug_info) {
- CodeGenerator::RecordPositions(masm_, fun->end_position() - 1);
- }
+ CodeGenerator::RecordPositions(masm_, fun->end_position() - 1);
}
void FullCodeGenerator::SetStatementPosition(Statement* stmt) {
- if (FLAG_debug_info) {
#ifdef ENABLE_DEBUGGER_SUPPORT
- if (!isolate()->debugger()->IsDebuggerActive()) {
- CodeGenerator::RecordPositions(masm_, stmt->statement_pos());
- } else {
- // Check if the statement will be breakable without adding a debug break
- // slot.
- BreakableStatementChecker checker;
- checker.Check(stmt);
- // Record the statement position right here if the statement is not
- // breakable. For breakable statements the actual recording of the
- // position will be postponed to the breakable code (typically an IC).
- bool position_recorded = CodeGenerator::RecordPositions(
- masm_, stmt->statement_pos(), !checker.is_breakable());
- // If the position recording did record a new position generate a debug
- // break slot to make the statement breakable.
- if (position_recorded) {
- Debug::GenerateSlot(masm_);
- }
- }
-#else
+ if (!isolate()->debugger()->IsDebuggerActive()) {
CodeGenerator::RecordPositions(masm_, stmt->statement_pos());
-#endif
+ } else {
+ // Check if the statement will be breakable without adding a debug break
+ // slot.
+ BreakableStatementChecker checker;
+ checker.Check(stmt);
+ // Record the statement position right here if the statement is not
+ // breakable. For breakable statements the actual recording of the
+ // position will be postponed to the breakable code (typically an IC).
+ bool position_recorded = CodeGenerator::RecordPositions(
+ masm_, stmt->statement_pos(), !checker.is_breakable());
+ // If the position recording did record a new position generate a debug
+ // break slot to make the statement breakable.
+ if (position_recorded) {
+ Debug::GenerateSlot(masm_);
+ }
}
+#else
+ CodeGenerator::RecordPositions(masm_, stmt->statement_pos());
+#endif
}
void FullCodeGenerator::SetExpressionPosition(Expression* expr, int pos) {
- if (FLAG_debug_info) {
#ifdef ENABLE_DEBUGGER_SUPPORT
- if (!isolate()->debugger()->IsDebuggerActive()) {
- CodeGenerator::RecordPositions(masm_, pos);
- } else {
- // Check if the expression will be breakable without adding a debug break
- // slot.
- BreakableStatementChecker checker;
- checker.Check(expr);
- // Record a statement position right here if the expression is not
- // breakable. For breakable expressions the actual recording of the
- // position will be postponed to the breakable code (typically an IC).
- // NOTE this will record a statement position for something which might
- // not be a statement. As stepping in the debugger will only stop at
- // statement positions this is used for e.g. the condition expression of
- // a do while loop.
- bool position_recorded = CodeGenerator::RecordPositions(
- masm_, pos, !checker.is_breakable());
- // If the position recording did record a new position generate a debug
- // break slot to make the statement breakable.
- if (position_recorded) {
- Debug::GenerateSlot(masm_);
- }
- }
-#else
+ if (!isolate()->debugger()->IsDebuggerActive()) {
CodeGenerator::RecordPositions(masm_, pos);
-#endif
+ } else {
+ // Check if the expression will be breakable without adding a debug break
+ // slot.
+ BreakableStatementChecker checker;
+ checker.Check(expr);
+ // Record a statement position right here if the expression is not
+ // breakable. For breakable expressions the actual recording of the
+ // position will be postponed to the breakable code (typically an IC).
+ // NOTE this will record a statement position for something which might
+ // not be a statement. As stepping in the debugger will only stop at
+ // statement positions this is used for e.g. the condition expression of
+ // a do while loop.
+ bool position_recorded = CodeGenerator::RecordPositions(
+ masm_, pos, !checker.is_breakable());
+ // If the position recording did record a new position generate a debug
+ // break slot to make the statement breakable.
+ if (position_recorded) {
+ Debug::GenerateSlot(masm_);
+ }
}
+#else
+ CodeGenerator::RecordPositions(masm_, pos);
+#endif
}
void FullCodeGenerator::SetStatementPosition(int pos) {
- if (FLAG_debug_info) {
- CodeGenerator::RecordPositions(masm_, pos);
- }
+ CodeGenerator::RecordPositions(masm_, pos);
}
void FullCodeGenerator::SetSourcePosition(int pos) {
- if (FLAG_debug_info && pos != RelocInfo::kNoPosition) {
+ if (pos != RelocInfo::kNoPosition) {
masm_->positions_recorder()->RecordPosition(pos);
}
}
@@ -693,7 +686,6 @@
void FullCodeGenerator::EmitInlineRuntimeCall(CallRuntime* node) {
ZoneList<Expression*>* args = node->arguments();
- Handle<String> name = node->name();
const Runtime::Function* function = node->function();
ASSERT(function != NULL);
ASSERT(function->intrinsic_type == Runtime::INLINE);
@@ -704,143 +696,116 @@
void FullCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) {
- Comment cmnt(masm_, "[ BinaryOperation");
- Token::Value op = expr->op();
- Expression* left = expr->left();
- Expression* right = expr->right();
-
- OverwriteMode mode = NO_OVERWRITE;
- if (left->ResultOverwriteAllowed()) {
- mode = OVERWRITE_LEFT;
- } else if (right->ResultOverwriteAllowed()) {
- mode = OVERWRITE_RIGHT;
- }
-
- switch (op) {
+ switch (expr->op()) {
case Token::COMMA:
- VisitForEffect(left);
- if (context()->IsTest()) ForwardBailoutToChild(expr);
- context()->HandleExpression(right);
- break;
-
+ return VisitComma(expr);
case Token::OR:
case Token::AND:
- EmitLogicalOperation(expr);
- break;
-
- case Token::ADD:
- case Token::SUB:
- case Token::DIV:
- case Token::MOD:
- case Token::MUL:
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SHL:
- case Token::SHR:
- case Token::SAR: {
- // Load both operands.
- VisitForStackValue(left);
- VisitForAccumulatorValue(right);
-
- SetSourcePosition(expr->position());
- if (ShouldInlineSmiCase(op)) {
- EmitInlineSmiBinaryOp(expr, op, mode, left, right);
- } else {
- EmitBinaryOp(expr, op, mode);
- }
- break;
- }
-
+ return VisitLogicalExpression(expr);
default:
- UNREACHABLE();
+ return VisitArithmeticExpression(expr);
}
}
-void FullCodeGenerator::EmitLogicalOperation(BinaryOperation* expr) {
- Label eval_right, done;
-
- context()->EmitLogicalLeft(expr, &eval_right, &done);
-
- PrepareForBailoutForId(expr->RightId(), NO_REGISTERS);
- __ bind(&eval_right);
+void FullCodeGenerator::VisitComma(BinaryOperation* expr) {
+ Comment cmnt(masm_, "[ Comma");
+ VisitForEffect(expr->left());
if (context()->IsTest()) ForwardBailoutToChild(expr);
- context()->HandleExpression(expr->right());
+ VisitInCurrentContext(expr->right());
+}
+
+void FullCodeGenerator::VisitLogicalExpression(BinaryOperation* expr) {
+ bool is_logical_and = expr->op() == Token::AND;
+ Comment cmnt(masm_, is_logical_and ? "[ Logical AND" : "[ Logical OR");
+ Expression* left = expr->left();
+ Expression* right = expr->right();
+ int right_id = expr->RightId();
+ Label done;
+
+ if (context()->IsTest()) {
+ Label eval_right;
+ const TestContext* test = TestContext::cast(context());
+ if (is_logical_and) {
+ VisitForControl(left, &eval_right, test->false_label(), &eval_right);
+ } else {
+ VisitForControl(left, test->true_label(), &eval_right, &eval_right);
+ }
+ PrepareForBailoutForId(right_id, NO_REGISTERS);
+ __ bind(&eval_right);
+ ForwardBailoutToChild(expr);
+
+ } else if (context()->IsAccumulatorValue()) {
+ VisitForAccumulatorValue(left);
+ // We want the value in the accumulator for the test, and on the stack in
+ // case we need it.
+ __ push(result_register());
+ Label discard, restore;
+ PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
+ if (is_logical_and) {
+ DoTest(left, &discard, &restore, &restore);
+ } else {
+ DoTest(left, &restore, &discard, &restore);
+ }
+ __ bind(&restore);
+ __ pop(result_register());
+ __ jmp(&done);
+ __ bind(&discard);
+ __ Drop(1);
+ PrepareForBailoutForId(right_id, NO_REGISTERS);
+
+ } else if (context()->IsStackValue()) {
+ VisitForAccumulatorValue(left);
+ // We want the value in the accumulator for the test, and on the stack in
+ // case we need it.
+ __ push(result_register());
+ Label discard;
+ PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
+ if (is_logical_and) {
+ DoTest(left, &discard, &done, &discard);
+ } else {
+ DoTest(left, &done, &discard, &discard);
+ }
+ __ bind(&discard);
+ __ Drop(1);
+ PrepareForBailoutForId(right_id, NO_REGISTERS);
+
+ } else {
+ ASSERT(context()->IsEffect());
+ Label eval_right;
+ if (is_logical_and) {
+ VisitForControl(left, &eval_right, &done, &eval_right);
+ } else {
+ VisitForControl(left, &done, &eval_right, &eval_right);
+ }
+ PrepareForBailoutForId(right_id, NO_REGISTERS);
+ __ bind(&eval_right);
+ }
+
+ VisitInCurrentContext(right);
__ bind(&done);
}
-void FullCodeGenerator::EffectContext::EmitLogicalLeft(BinaryOperation* expr,
- Label* eval_right,
- Label* done) const {
- if (expr->op() == Token::OR) {
- codegen()->VisitForControl(expr->left(), done, eval_right, eval_right);
+void FullCodeGenerator::VisitArithmeticExpression(BinaryOperation* expr) {
+ Token::Value op = expr->op();
+ Comment cmnt(masm_, "[ ArithmeticExpression");
+ Expression* left = expr->left();
+ Expression* right = expr->right();
+ OverwriteMode mode =
+ left->ResultOverwriteAllowed()
+ ? OVERWRITE_LEFT
+ : (right->ResultOverwriteAllowed() ? OVERWRITE_RIGHT : NO_OVERWRITE);
+
+ VisitForStackValue(left);
+ VisitForAccumulatorValue(right);
+
+ SetSourcePosition(expr->position());
+ if (ShouldInlineSmiCase(op)) {
+ EmitInlineSmiBinaryOp(expr, op, mode, left, right);
} else {
- ASSERT(expr->op() == Token::AND);
- codegen()->VisitForControl(expr->left(), eval_right, done, eval_right);
- }
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::EmitLogicalLeft(
- BinaryOperation* expr,
- Label* eval_right,
- Label* done) const {
- HandleExpression(expr->left());
- // We want the value in the accumulator for the test, and on the stack in case
- // we need it.
- __ push(result_register());
- Label discard, restore;
- if (expr->op() == Token::OR) {
- codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
- codegen()->DoTest(&restore, &discard, &restore);
- } else {
- ASSERT(expr->op() == Token::AND);
- codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
- codegen()->DoTest(&discard, &restore, &restore);
- }
- __ bind(&restore);
- __ pop(result_register());
- __ jmp(done);
- __ bind(&discard);
- __ Drop(1);
-}
-
-
-void FullCodeGenerator::StackValueContext::EmitLogicalLeft(
- BinaryOperation* expr,
- Label* eval_right,
- Label* done) const {
- codegen()->VisitForAccumulatorValue(expr->left());
- // We want the value in the accumulator for the test, and on the stack in case
- // we need it.
- __ push(result_register());
- Label discard;
- if (expr->op() == Token::OR) {
- codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
- codegen()->DoTest(done, &discard, &discard);
- } else {
- ASSERT(expr->op() == Token::AND);
- codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
- codegen()->DoTest(&discard, done, &discard);
- }
- __ bind(&discard);
- __ Drop(1);
-}
-
-
-void FullCodeGenerator::TestContext::EmitLogicalLeft(BinaryOperation* expr,
- Label* eval_right,
- Label* done) const {
- if (expr->op() == Token::OR) {
- codegen()->VisitForControl(expr->left(),
- true_label_, eval_right, eval_right);
- } else {
- ASSERT(expr->op() == Token::AND);
- codegen()->VisitForControl(expr->left(),
- eval_right, false_label_, eval_right);
+ EmitBinaryOp(expr, op, mode);
}
}
@@ -853,46 +818,23 @@
}
-void FullCodeGenerator::EffectContext::HandleExpression(
- Expression* expr) const {
- codegen()->HandleInNonTestContext(expr, NO_REGISTERS);
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::HandleExpression(
- Expression* expr) const {
- codegen()->HandleInNonTestContext(expr, TOS_REG);
-}
-
-
-void FullCodeGenerator::StackValueContext::HandleExpression(
- Expression* expr) const {
- codegen()->HandleInNonTestContext(expr, NO_REGISTERS);
-}
-
-
-void FullCodeGenerator::TestContext::HandleExpression(Expression* expr) const {
- codegen()->VisitInTestContext(expr);
-}
-
-
-void FullCodeGenerator::HandleInNonTestContext(Expression* expr, State state) {
- ASSERT(forward_bailout_pending_ == NULL);
- AstVisitor::Visit(expr);
- PrepareForBailout(expr, state);
- // Forwarding bailouts to children is a one shot operation. It
- // should have been processed at this point.
- ASSERT(forward_bailout_pending_ == NULL);
-}
-
-
-void FullCodeGenerator::VisitInTestContext(Expression* expr) {
- ForwardBailoutStack stack(expr, forward_bailout_pending_);
- ForwardBailoutStack* saved = forward_bailout_stack_;
- forward_bailout_pending_ = NULL;
- forward_bailout_stack_ = &stack;
- AstVisitor::Visit(expr);
- forward_bailout_stack_ = saved;
+void FullCodeGenerator::VisitInCurrentContext(Expression* expr) {
+ if (context()->IsTest()) {
+ ForwardBailoutStack stack(expr, forward_bailout_pending_);
+ ForwardBailoutStack* saved = forward_bailout_stack_;
+ forward_bailout_pending_ = NULL;
+ forward_bailout_stack_ = &stack;
+ Visit(expr);
+ forward_bailout_stack_ = saved;
+ } else {
+ ASSERT(forward_bailout_pending_ == NULL);
+ Visit(expr);
+ State state = context()->IsAccumulatorValue() ? TOS_REG : NO_REGISTERS;
+ PrepareForBailout(expr, state);
+ // Forwarding bailouts to children is a one shot operation. It should have
+ // been processed at this point.
+ ASSERT(forward_bailout_pending_ == NULL);
+ }
}
@@ -945,7 +887,7 @@
PrepareForBailoutForId(stmt->ElseId(), NO_REGISTERS);
}
__ bind(&done);
- PrepareForBailoutForId(stmt->id(), NO_REGISTERS);
+ PrepareForBailoutForId(stmt->IfId(), NO_REGISTERS);
}
@@ -1010,26 +952,20 @@
}
-void FullCodeGenerator::VisitWithEnterStatement(WithEnterStatement* stmt) {
- Comment cmnt(masm_, "[ WithEnterStatement");
+void FullCodeGenerator::VisitEnterWithContextStatement(
+ EnterWithContextStatement* stmt) {
+ Comment cmnt(masm_, "[ EnterWithContextStatement");
SetStatementPosition(stmt);
VisitForStackValue(stmt->expression());
- if (stmt->is_catch_block()) {
- __ CallRuntime(Runtime::kPushCatchContext, 1);
- } else {
- __ CallRuntime(Runtime::kPushContext, 1);
- }
- // Both runtime calls return the new context in both the context and the
- // result registers.
-
- // Update local stack frame context field.
+ PushFunctionArgumentForContextAllocation();
+ __ CallRuntime(Runtime::kPushWithContext, 2);
StoreToFrameField(StandardFrameConstants::kContextOffset, context_register());
}
-void FullCodeGenerator::VisitWithExitStatement(WithExitStatement* stmt) {
- Comment cmnt(masm_, "[ WithExitStatement");
+void FullCodeGenerator::VisitExitContextStatement(ExitContextStatement* stmt) {
+ Comment cmnt(masm_, "[ ExitContextStatement");
SetStatementPosition(stmt);
// Pop context.
@@ -1170,18 +1106,21 @@
__ Call(&try_handler_setup);
// Try handler code, exception in result register.
- // Store exception in local .catch variable before executing catch block.
- {
- // The catch variable is *always* a variable proxy for a local variable.
- Variable* catch_var = stmt->catch_var()->AsVariableProxy()->AsVariable();
- ASSERT_NOT_NULL(catch_var);
- Slot* variable_slot = catch_var->AsSlot();
- ASSERT_NOT_NULL(variable_slot);
- ASSERT_EQ(Slot::LOCAL, variable_slot->type());
- StoreToFrameField(SlotOffset(variable_slot), result_register());
+ // Extend the context before executing the catch block.
+ { Comment cmnt(masm_, "[ Extend catch context");
+ __ Push(stmt->variable()->name());
+ __ push(result_register());
+ PushFunctionArgumentForContextAllocation();
+ __ CallRuntime(Runtime::kPushCatchContext, 3);
+ StoreToFrameField(StandardFrameConstants::kContextOffset,
+ context_register());
}
+ Scope* saved_scope = scope();
+ scope_ = stmt->scope();
+ ASSERT(scope_->declarations()->is_empty());
Visit(stmt->catch_block());
+ scope_ = saved_scope;
__ jmp(&done);
// Try block code. Sets up the exception handler chain.
@@ -1290,7 +1229,7 @@
for_test->false_label(),
NULL);
} else {
- context()->HandleExpression(expr->then_expression());
+ VisitInCurrentContext(expr->then_expression());
__ jmp(&done);
}
@@ -1299,7 +1238,7 @@
if (context()->IsTest()) ForwardBailoutToChild(expr);
SetExpressionPosition(expr->else_expression(),
expr->else_expression_position());
- context()->HandleExpression(expr->else_expression());
+ VisitInCurrentContext(expr->else_expression());
// If control flow falls through Visit, merge it with true case here.
if (!context()->IsTest()) {
__ bind(&done);
@@ -1334,18 +1273,6 @@
}
-void FullCodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* expr) {
- // Call runtime routine to allocate the catch extension object and
- // assign the exception value to the catch variable.
- Comment cmnt(masm_, "[ CatchExtensionObject");
- VisitForStackValue(expr->key());
- VisitForStackValue(expr->value());
- // Create catch extension object.
- __ CallRuntime(Runtime::kCreateCatchExtensionObject, 2);
- context()->Plug(result_register());
-}
-
-
void FullCodeGenerator::VisitThrow(Throw* expr) {
Comment cmnt(masm_, "[ Throw");
VisitForStackValue(expr->exception());
@@ -1371,6 +1298,26 @@
}
+bool FullCodeGenerator::TryLiteralCompare(CompareOperation* compare,
+ Label* if_true,
+ Label* if_false,
+ Label* fall_through) {
+ Expression *expr;
+ Handle<String> check;
+ if (compare->IsLiteralCompareTypeof(&expr, &check)) {
+ EmitLiteralCompareTypeof(expr, check, if_true, if_false, fall_through);
+ return true;
+ }
+
+ if (compare->IsLiteralCompareUndefined(&expr)) {
+ EmitLiteralCompareUndefined(expr, if_true, if_false, fall_through);
+ return true;
+ }
+
+ return false;
+}
+
+
#undef __
diff --git a/src/full-codegen.h b/src/full-codegen.h
index 0d26e38..6b174f7 100644
--- a/src/full-codegen.h
+++ b/src/full-codegen.h
@@ -80,6 +80,7 @@
explicit FullCodeGenerator(MacroAssembler* masm)
: masm_(masm),
info_(NULL),
+ scope_(NULL),
nesting_stack_(NULL),
loop_depth_(0),
context_(NULL),
@@ -113,6 +114,7 @@
class TryFinally;
class Finally;
class ForIn;
+ class TestContext;
class NestedStatement BASE_EMBEDDED {
public:
@@ -151,9 +153,11 @@
return stack_depth;
}
NestedStatement* outer() { return previous_; }
- protected:
+
+ protected:
MacroAssembler* masm() { return codegen_->masm(); }
- private:
+
+ private:
FullCodeGenerator* codegen_;
NestedStatement* previous_;
DISALLOW_COPY_AND_ASSIGN(NestedStatement);
@@ -296,7 +300,11 @@
// Helper function to convert a pure value into a test context. The value
// is expected on the stack or the accumulator, depending on the platform.
// See the platform-specific implementation for details.
- void DoTest(Label* if_true, Label* if_false, Label* fall_through);
+ void DoTest(Expression* condition,
+ Label* if_true,
+ Label* if_false,
+ Label* fall_through);
+ void DoTest(const TestContext* context);
// Helper function to split control flow and avoid a branch to the
// fall-through label if it is set up.
@@ -328,48 +336,55 @@
void VisitForEffect(Expression* expr) {
EffectContext context(this);
- HandleInNonTestContext(expr, NO_REGISTERS);
+ VisitInCurrentContext(expr);
}
void VisitForAccumulatorValue(Expression* expr) {
AccumulatorValueContext context(this);
- HandleInNonTestContext(expr, TOS_REG);
+ VisitInCurrentContext(expr);
}
void VisitForStackValue(Expression* expr) {
StackValueContext context(this);
- HandleInNonTestContext(expr, NO_REGISTERS);
+ VisitInCurrentContext(expr);
}
void VisitForControl(Expression* expr,
Label* if_true,
Label* if_false,
Label* fall_through) {
- TestContext context(this, if_true, if_false, fall_through);
- VisitInTestContext(expr);
- // Forwarding bailouts to children is a one shot operation. It
- // should have been processed at this point.
- ASSERT(forward_bailout_pending_ == NULL);
+ TestContext context(this, expr, if_true, if_false, fall_through);
+ VisitInCurrentContext(expr);
}
- void HandleInNonTestContext(Expression* expr, State state);
- void VisitInTestContext(Expression* expr);
-
void VisitDeclarations(ZoneList<Declaration*>* declarations);
void DeclareGlobals(Handle<FixedArray> pairs);
// Try to perform a comparison as a fast inlined literal compare if
// the operands allow it. Returns true if the compare operations
// has been matched and all code generated; false otherwise.
- bool TryLiteralCompare(Token::Value op,
- Expression* left,
- Expression* right,
+ bool TryLiteralCompare(CompareOperation* compare,
Label* if_true,
Label* if_false,
Label* fall_through);
+ // Platform-specific code for comparing the type of a value with
+ // a given literal string.
+ void EmitLiteralCompareTypeof(Expression* expr,
+ Handle<String> check,
+ Label* if_true,
+ Label* if_false,
+ Label* fall_through);
+
+ // Platform-specific code for strict equality comparison with
+ // the undefined value.
+ void EmitLiteralCompareUndefined(Expression* expr,
+ Label* if_true,
+ Label* if_false,
+ Label* fall_through);
+
// Bailout support.
- void PrepareForBailout(AstNode* node, State state);
+ void PrepareForBailout(Expression* node, State state);
void PrepareForBailoutForId(int id, State state);
// Record a call's return site offset, used to rebuild the frame if the
@@ -429,7 +444,7 @@
TypeofState typeof_state,
Label* slow,
Label* done);
- void EmitVariableLoad(Variable* expr);
+ void EmitVariableLoad(VariableProxy* proxy);
enum ResolveEvalFlag {
SKIP_CONTEXT_LOOKUP,
@@ -517,23 +532,11 @@
return is_strict_mode() ? kStrictMode : kNonStrictMode;
}
FunctionLiteral* function() { return info_->function(); }
- Scope* scope() { return info_->scope(); }
+ Scope* scope() { return scope_; }
static Register result_register();
static Register context_register();
- // Helper for calling an IC stub.
- void EmitCallIC(Handle<Code> ic,
- RelocInfo::Mode mode,
- unsigned ast_id);
-
- // Calling an IC stub with a patch site. Passing NULL for patch_site
- // or non NULL patch_site which is not activated indicates no inlined smi code
- // and emits a nop after the IC call.
- void EmitCallIC(Handle<Code> ic,
- JumpPatchSite* patch_site,
- unsigned ast_id);
-
// Set fields in the stack frame. Offsets are the frame pointer relative
// offsets defined in, e.g., StandardFrameConstants.
void StoreToFrameField(int frame_offset, Register value);
@@ -542,6 +545,10 @@
// in v8::internal::Context.
void LoadContextField(Register dst, int context_index);
+ // Push the function argument for the runtime functions PushWithContext
+ // and PushCatchContext.
+ void PushFunctionArgumentForContextAllocation();
+
// AST node visit functions.
#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
AST_NODE_LIST(DECLARE_VISIT)
@@ -549,8 +556,10 @@
void EmitUnaryOperation(UnaryOperation* expr, const char* comment);
- // Handles the shortcutted logical binary operations in VisitBinaryOperation.
- void EmitLogicalOperation(BinaryOperation* expr);
+ void VisitComma(BinaryOperation* expr);
+ void VisitLogicalExpression(BinaryOperation* expr);
+ void VisitArithmeticExpression(BinaryOperation* expr);
+ void VisitInCurrentContext(Expression* expr);
void VisitForTypeofValue(Expression* expr);
@@ -598,11 +607,6 @@
// context.
virtual void DropAndPlug(int count, Register reg) const = 0;
- // For shortcutting operations || and &&.
- virtual void EmitLogicalLeft(BinaryOperation* expr,
- Label* eval_right,
- Label* done) const = 0;
-
// Set up branch labels for a test expression. The three Label** parameters
// are output parameters.
virtual void PrepareTest(Label* materialize_true,
@@ -611,12 +615,14 @@
Label** if_false,
Label** fall_through) const = 0;
- virtual void HandleExpression(Expression* expr) const = 0;
-
// Returns true if we are evaluating only for side effects (ie if the result
// will be discarded).
virtual bool IsEffect() const { return false; }
+ // Returns true if we are evaluating for the value (in accu/on stack).
+ virtual bool IsAccumulatorValue() const { return false; }
+ virtual bool IsStackValue() const { return false; }
+
// Returns true if we are branching on the value rather than materializing
// it. Only used for asserts.
virtual bool IsTest() const { return false; }
@@ -644,15 +650,12 @@
virtual void Plug(Heap::RootListIndex) const;
virtual void PlugTOS() const;
virtual void DropAndPlug(int count, Register reg) const;
- virtual void EmitLogicalLeft(BinaryOperation* expr,
- Label* eval_right,
- Label* done) const;
virtual void PrepareTest(Label* materialize_true,
Label* materialize_false,
Label** if_true,
Label** if_false,
Label** fall_through) const;
- virtual void HandleExpression(Expression* expr) const;
+ virtual bool IsAccumulatorValue() const { return true; }
};
class StackValueContext : public ExpressionContext {
@@ -668,24 +671,23 @@
virtual void Plug(Heap::RootListIndex) const;
virtual void PlugTOS() const;
virtual void DropAndPlug(int count, Register reg) const;
- virtual void EmitLogicalLeft(BinaryOperation* expr,
- Label* eval_right,
- Label* done) const;
virtual void PrepareTest(Label* materialize_true,
Label* materialize_false,
Label** if_true,
Label** if_false,
Label** fall_through) const;
- virtual void HandleExpression(Expression* expr) const;
+ virtual bool IsStackValue() const { return true; }
};
class TestContext : public ExpressionContext {
public:
- explicit TestContext(FullCodeGenerator* codegen,
- Label* true_label,
- Label* false_label,
- Label* fall_through)
+ TestContext(FullCodeGenerator* codegen,
+ Expression* condition,
+ Label* true_label,
+ Label* false_label,
+ Label* fall_through)
: ExpressionContext(codegen),
+ condition_(condition),
true_label_(true_label),
false_label_(false_label),
fall_through_(fall_through) { }
@@ -695,6 +697,7 @@
return reinterpret_cast<const TestContext*>(context);
}
+ Expression* condition() const { return condition_; }
Label* true_label() const { return true_label_; }
Label* false_label() const { return false_label_; }
Label* fall_through() const { return fall_through_; }
@@ -707,18 +710,15 @@
virtual void Plug(Heap::RootListIndex) const;
virtual void PlugTOS() const;
virtual void DropAndPlug(int count, Register reg) const;
- virtual void EmitLogicalLeft(BinaryOperation* expr,
- Label* eval_right,
- Label* done) const;
virtual void PrepareTest(Label* materialize_true,
Label* materialize_false,
Label** if_true,
Label** if_false,
Label** fall_through) const;
- virtual void HandleExpression(Expression* expr) const;
virtual bool IsTest() const { return true; }
private:
+ Expression* condition_;
Label* true_label_;
Label* false_label_;
Label* fall_through_;
@@ -737,20 +737,17 @@
virtual void Plug(Heap::RootListIndex) const;
virtual void PlugTOS() const;
virtual void DropAndPlug(int count, Register reg) const;
- virtual void EmitLogicalLeft(BinaryOperation* expr,
- Label* eval_right,
- Label* done) const;
virtual void PrepareTest(Label* materialize_true,
Label* materialize_false,
Label** if_true,
Label** if_false,
Label** fall_through) const;
- virtual void HandleExpression(Expression* expr) const;
virtual bool IsEffect() const { return true; }
};
MacroAssembler* masm_;
CompilationInfo* info_;
+ Scope* scope_;
Label return_label_;
NestedStatement* nesting_stack_;
int loop_depth_;
diff --git a/src/func-name-inferrer.cc b/src/func-name-inferrer.cc
index ebac4b9..239358d 100644
--- a/src/func-name-inferrer.cc
+++ b/src/func-name-inferrer.cc
@@ -34,48 +34,62 @@
namespace v8 {
namespace internal {
+FuncNameInferrer::FuncNameInferrer(Isolate* isolate)
+ : isolate_(isolate),
+ entries_stack_(10),
+ names_stack_(5),
+ funcs_to_infer_(4) {
+}
+
void FuncNameInferrer::PushEnclosingName(Handle<String> name) {
// Enclosing name is a name of a constructor function. To check
// that it is really a constructor, we check that it is not empty
// and starts with a capital letter.
if (name->length() > 0 && Runtime::IsUpperCaseChar(
- Isolate::Current()->runtime_state(), name->Get(0))) {
- names_stack_.Add(name);
+ isolate()->runtime_state(), name->Get(0))) {
+ names_stack_.Add(Name(name, kEnclosingConstructorName));
}
}
void FuncNameInferrer::PushLiteralName(Handle<String> name) {
- if (IsOpen() && !HEAP->prototype_symbol()->Equals(*name)) {
- names_stack_.Add(name);
+ if (IsOpen() && !isolate()->heap()->prototype_symbol()->Equals(*name)) {
+ names_stack_.Add(Name(name, kLiteralName));
}
}
void FuncNameInferrer::PushVariableName(Handle<String> name) {
- if (IsOpen() && !HEAP->result_symbol()->Equals(*name)) {
- names_stack_.Add(name);
+ if (IsOpen() && !isolate()->heap()->result_symbol()->Equals(*name)) {
+ names_stack_.Add(Name(name, kVariableName));
}
}
Handle<String> FuncNameInferrer::MakeNameFromStack() {
- if (names_stack_.is_empty()) {
- return FACTORY->empty_string();
- } else {
- return MakeNameFromStackHelper(1, names_stack_.at(0));
- }
+ return MakeNameFromStackHelper(0, isolate()->factory()->empty_string());
}
Handle<String> FuncNameInferrer::MakeNameFromStackHelper(int pos,
Handle<String> prev) {
- if (pos >= names_stack_.length()) {
- return prev;
+ if (pos >= names_stack_.length()) return prev;
+ if (pos < names_stack_.length() - 1 &&
+ names_stack_.at(pos).type == kVariableName &&
+ names_stack_.at(pos + 1).type == kVariableName) {
+ // Skip consecutive variable declarations.
+ return MakeNameFromStackHelper(pos + 1, prev);
} else {
- Handle<String> curr = FACTORY->NewConsString(dot_, names_stack_.at(pos));
- return MakeNameFromStackHelper(pos + 1, FACTORY->NewConsString(prev, curr));
+ if (prev->length() > 0) {
+ Factory* factory = isolate()->factory();
+ Handle<String> curr = factory->NewConsString(
+ factory->dot_symbol(), names_stack_.at(pos).name);
+ return MakeNameFromStackHelper(pos + 1,
+ factory->NewConsString(prev, curr));
+ } else {
+ return MakeNameFromStackHelper(pos + 1, names_stack_.at(pos).name);
+ }
}
}
diff --git a/src/func-name-inferrer.h b/src/func-name-inferrer.h
index 5aa2b35..bec3a5c 100644
--- a/src/func-name-inferrer.h
+++ b/src/func-name-inferrer.h
@@ -31,6 +31,8 @@
namespace v8 {
namespace internal {
+class Isolate;
+
// FuncNameInferrer is a stateful class that is used to perform name
// inference for anonymous functions during static analysis of source code.
// Inference is performed in cases when an anonymous function is assigned
@@ -43,12 +45,7 @@
// a name.
class FuncNameInferrer : public ZoneObject {
public:
- FuncNameInferrer()
- : entries_stack_(10),
- names_stack_(5),
- funcs_to_infer_(4),
- dot_(FACTORY->NewStringFromAscii(CStrVector("."))) {
- }
+ explicit FuncNameInferrer(Isolate* isolate);
// Returns whether we have entered name collection state.
bool IsOpen() const { return !entries_stack_.is_empty(); }
@@ -81,13 +78,26 @@
}
}
- // Infers a function name and leaves names collection state.
+ // Leaves names collection state.
void Leave() {
ASSERT(IsOpen());
names_stack_.Rewind(entries_stack_.RemoveLast());
}
private:
+ enum NameType {
+ kEnclosingConstructorName,
+ kLiteralName,
+ kVariableName
+ };
+ struct Name {
+ Name(Handle<String> name, NameType type) : name(name), type(type) { }
+ Handle<String> name;
+ NameType type;
+ };
+
+ Isolate* isolate() { return isolate_; }
+
// Constructs a full name in dotted notation from gathered names.
Handle<String> MakeNameFromStack();
@@ -97,10 +107,10 @@
// Performs name inferring for added functions.
void InferFunctionsNames();
+ Isolate* isolate_;
ZoneList<int> entries_stack_;
- ZoneList<Handle<String> > names_stack_;
+ ZoneList<Name> names_stack_;
ZoneList<FunctionLiteral*> funcs_to_infer_;
- Handle<String> dot_;
DISALLOW_COPY_AND_ASSIGN(FuncNameInferrer);
};
diff --git a/src/gdb-jit.cc b/src/gdb-jit.cc
index bf8ac19..4d57e25 100644
--- a/src/gdb-jit.cc
+++ b/src/gdb-jit.cc
@@ -34,16 +34,29 @@
#include "global-handles.h"
#include "messages.h"
#include "natives.h"
+#include "scopeinfo.h"
namespace v8 {
namespace internal {
+#ifdef __APPLE__
+#define __MACH_O
+class MachO;
+class MachOSection;
+typedef MachO DebugObject;
+typedef MachOSection DebugSection;
+#else
+#define __ELF
class ELF;
+class ELFSection;
+typedef ELF DebugObject;
+typedef ELFSection DebugSection;
+#endif
class Writer BASE_EMBEDDED {
public:
- explicit Writer(ELF* elf)
- : elf_(elf),
+ explicit Writer(DebugObject* debug_object)
+ : debug_object_(debug_object),
position_(0),
capacity_(1024),
buffer_(reinterpret_cast<byte*>(malloc(capacity_))) {
@@ -112,7 +125,7 @@
}
}
- ELF* elf() { return elf_; }
+ DebugObject* debug_object() { return debug_object_; }
byte* buffer() { return buffer_; }
@@ -165,7 +178,7 @@
return reinterpret_cast<T*>(&buffer_[offset]);
}
- ELF* elf_;
+ DebugObject* debug_object_;
uintptr_t position_;
uintptr_t capacity_;
byte* buffer_;
@@ -173,21 +186,120 @@
class StringTable;
-class ELFSection : public ZoneObject {
+template<typename THeader>
+class DebugSectionBase : public ZoneObject {
public:
- struct Header {
- uint32_t name;
- uint32_t type;
- uintptr_t flags;
- uintptr_t address;
- uintptr_t offset;
- uintptr_t size;
- uint32_t link;
- uint32_t info;
- uintptr_t alignment;
- uintptr_t entry_size;
+ virtual ~DebugSectionBase() { }
+
+ virtual void WriteBody(Writer::Slot<THeader> header, Writer* writer) {
+ uintptr_t start = writer->position();
+ if (WriteBody(writer)) {
+ uintptr_t end = writer->position();
+ header->offset = start;
+#if defined(__MACH_O)
+ header->addr = 0;
+#endif
+ header->size = end - start;
+ }
+ }
+
+ virtual bool WriteBody(Writer* writer) {
+ return false;
+ }
+
+ typedef THeader Header;
+};
+
+
+struct MachOSectionHeader {
+ char sectname[16];
+ char segname[16];
+#if defined(V8_TARGET_ARCH_IA32)
+ uint32_t addr;
+ uint32_t size;
+#else
+ uint64_t addr;
+ uint64_t size;
+#endif
+ uint32_t offset;
+ uint32_t align;
+ uint32_t reloff;
+ uint32_t nreloc;
+ uint32_t flags;
+ uint32_t reserved1;
+ uint32_t reserved2;
+};
+
+
+class MachOSection : public DebugSectionBase<MachOSectionHeader> {
+ public:
+ enum Type {
+ S_REGULAR = 0x0u,
+ S_ATTR_COALESCED = 0xbu,
+ S_ATTR_SOME_INSTRUCTIONS = 0x400u,
+ S_ATTR_DEBUG = 0x02000000u,
+ S_ATTR_PURE_INSTRUCTIONS = 0x80000000u
};
+ MachOSection(const char* name,
+ const char* segment,
+ uintptr_t align,
+ uint32_t flags)
+ : name_(name),
+ segment_(segment),
+ align_(align),
+ flags_(flags) {
+ ASSERT(IsPowerOf2(align));
+ if (align_ != 0) {
+ align_ = WhichPowerOf2(align_);
+ }
+ }
+
+ virtual ~MachOSection() { }
+
+ virtual void PopulateHeader(Writer::Slot<Header> header) {
+ header->addr = 0;
+ header->size = 0;
+ header->offset = 0;
+ header->align = align_;
+ header->reloff = 0;
+ header->nreloc = 0;
+ header->flags = flags_;
+ header->reserved1 = 0;
+ header->reserved2 = 0;
+ memset(header->sectname, 0, sizeof(header->sectname));
+ memset(header->segname, 0, sizeof(header->segname));
+ ASSERT(strlen(name_) < sizeof(header->sectname));
+ ASSERT(strlen(segment_) < sizeof(header->segname));
+ strncpy(header->sectname, name_, sizeof(header->sectname));
+ strncpy(header->segname, segment_, sizeof(header->segname));
+ }
+
+ private:
+ const char* name_;
+ const char* segment_;
+ uintptr_t align_;
+ uint32_t flags_;
+};
+
+
+struct ELFSectionHeader {
+ uint32_t name;
+ uint32_t type;
+ uintptr_t flags;
+ uintptr_t address;
+ uintptr_t offset;
+ uintptr_t size;
+ uint32_t link;
+ uint32_t info;
+ uintptr_t alignment;
+ uintptr_t entry_size;
+};
+
+
+#if defined(__ELF)
+class ELFSection : public DebugSectionBase<ELFSectionHeader> {
+ public:
enum Type {
TYPE_NULL = 0,
TYPE_PROGBITS = 1,
@@ -252,15 +364,45 @@
header->entry_size = 0;
}
-
private:
const char* name_;
Type type_;
uintptr_t align_;
uint16_t index_;
};
+#endif // defined(__ELF)
+#if defined(__MACH_O)
+class MachOTextSection : public MachOSection {
+ public:
+ MachOTextSection(uintptr_t align,
+ uintptr_t addr,
+ uintptr_t size)
+ : MachOSection("__text",
+ "__TEXT",
+ align,
+ MachOSection::S_REGULAR |
+ MachOSection::S_ATTR_SOME_INSTRUCTIONS |
+ MachOSection::S_ATTR_PURE_INSTRUCTIONS),
+ addr_(addr),
+ size_(size) { }
+
+ protected:
+ virtual void PopulateHeader(Writer::Slot<Header> header) {
+ MachOSection::PopulateHeader(header);
+ header->addr = addr_;
+ header->size = size_;
+ }
+
+ private:
+ uintptr_t addr_;
+ uintptr_t size_;
+};
+#endif // defined(__MACH_O)
+
+
+#if defined(__ELF)
class FullHeaderELFSection : public ELFSection {
public:
FullHeaderELFSection(const char* name,
@@ -349,8 +491,139 @@
header->alignment = align_;
PopulateHeader(header);
}
+#endif // defined(__ELF)
+#if defined(__MACH_O)
+class MachO BASE_EMBEDDED {
+ public:
+ MachO() : sections_(6) { }
+
+ uint32_t AddSection(MachOSection* section) {
+ sections_.Add(section);
+ return sections_.length() - 1;
+ }
+
+ void Write(Writer* w, uintptr_t code_start, uintptr_t code_size) {
+ Writer::Slot<MachOHeader> header = WriteHeader(w);
+ uintptr_t load_command_start = w->position();
+ Writer::Slot<MachOSegmentCommand> cmd = WriteSegmentCommand(w,
+ code_start,
+ code_size);
+ WriteSections(w, cmd, header, load_command_start);
+ }
+
+ private:
+ struct MachOHeader {
+ uint32_t magic;
+ uint32_t cputype;
+ uint32_t cpusubtype;
+ uint32_t filetype;
+ uint32_t ncmds;
+ uint32_t sizeofcmds;
+ uint32_t flags;
+#if defined(V8_TARGET_ARCH_X64)
+ uint32_t reserved;
+#endif
+ };
+
+ struct MachOSegmentCommand {
+ uint32_t cmd;
+ uint32_t cmdsize;
+ char segname[16];
+#if defined(V8_TARGET_ARCH_IA32)
+ uint32_t vmaddr;
+ uint32_t vmsize;
+ uint32_t fileoff;
+ uint32_t filesize;
+#else
+ uint64_t vmaddr;
+ uint64_t vmsize;
+ uint64_t fileoff;
+ uint64_t filesize;
+#endif
+ uint32_t maxprot;
+ uint32_t initprot;
+ uint32_t nsects;
+ uint32_t flags;
+ };
+
+ enum MachOLoadCommandCmd {
+ LC_SEGMENT_32 = 0x00000001u,
+ LC_SEGMENT_64 = 0x00000019u
+ };
+
+
+ Writer::Slot<MachOHeader> WriteHeader(Writer* w) {
+ ASSERT(w->position() == 0);
+ Writer::Slot<MachOHeader> header = w->CreateSlotHere<MachOHeader>();
+#if defined(V8_TARGET_ARCH_IA32)
+ header->magic = 0xFEEDFACEu;
+ header->cputype = 7; // i386
+ header->cpusubtype = 3; // CPU_SUBTYPE_I386_ALL
+#elif defined(V8_TARGET_ARCH_X64)
+ header->magic = 0xFEEDFACFu;
+ header->cputype = 7 | 0x01000000; // i386 | 64-bit ABI
+ header->cpusubtype = 3; // CPU_SUBTYPE_I386_ALL
+ header->reserved = 0;
+#else
+#error Unsupported target architecture.
+#endif
+ header->filetype = 0x1; // MH_OBJECT
+ header->ncmds = 1;
+ header->sizeofcmds = 0;
+ header->flags = 0;
+ return header;
+ }
+
+
+ Writer::Slot<MachOSegmentCommand> WriteSegmentCommand(Writer* w,
+ uintptr_t code_start,
+ uintptr_t code_size) {
+ Writer::Slot<MachOSegmentCommand> cmd =
+ w->CreateSlotHere<MachOSegmentCommand>();
+#if defined(V8_TARGET_ARCH_IA32)
+ cmd->cmd = LC_SEGMENT_32;
+#else
+ cmd->cmd = LC_SEGMENT_64;
+#endif
+ cmd->vmaddr = code_start;
+ cmd->vmsize = code_size;
+ cmd->fileoff = 0;
+ cmd->filesize = 0;
+ cmd->maxprot = 7;
+ cmd->initprot = 7;
+ cmd->flags = 0;
+ cmd->nsects = sections_.length();
+ memset(cmd->segname, 0, 16);
+ cmd->cmdsize = sizeof(MachOSegmentCommand) + sizeof(MachOSection::Header) *
+ cmd->nsects;
+ return cmd;
+ }
+
+
+ void WriteSections(Writer* w,
+ Writer::Slot<MachOSegmentCommand> cmd,
+ Writer::Slot<MachOHeader> header,
+ uintptr_t load_command_start) {
+ Writer::Slot<MachOSection::Header> headers =
+ w->CreateSlotsHere<MachOSection::Header>(sections_.length());
+ cmd->fileoff = w->position();
+ header->sizeofcmds = w->position() - load_command_start;
+ for (int section = 0; section < sections_.length(); ++section) {
+ sections_[section]->PopulateHeader(headers.at(section));
+ sections_[section]->WriteBody(headers.at(section), w);
+ }
+ cmd->filesize = w->position() - (uintptr_t)cmd->fileoff;
+ }
+
+
+ ZoneList<MachOSection*> sections_;
+};
+#endif // defined(__MACH_O)
+
+
+#if defined(__ELF)
class ELF BASE_EMBEDDED {
public:
ELF() : sections_(6) {
@@ -596,7 +869,7 @@
// String table for this symbol table should follow it in the section table.
StringTable* strtab =
- static_cast<StringTable*>(w->elf()->SectionAt(index() + 1));
+ static_cast<StringTable*>(w->debug_object()->SectionAt(index() + 1));
strtab->AttachWriter(w);
symbols.at(0).set(ELFSymbol::SerializedLayout(0,
0,
@@ -640,11 +913,11 @@
ZoneList<ELFSymbol> locals_;
ZoneList<ELFSymbol> globals_;
};
+#endif // defined(__ELF)
class CodeDescription BASE_EMBEDDED {
public:
-
#ifdef V8_TARGET_ARCH_X64
enum StackState {
POST_RBP_PUSH,
@@ -658,12 +931,14 @@
Code* code,
Handle<Script> script,
GDBJITLineInfo* lineinfo,
- GDBJITInterface::CodeTag tag)
+ GDBJITInterface::CodeTag tag,
+ CompilationInfo* info)
: name_(name),
code_(code),
script_(script),
lineinfo_(lineinfo),
- tag_(tag) {
+ tag_(tag),
+ info_(info) {
}
const char* name() const {
@@ -678,6 +953,14 @@
return tag_;
}
+ CompilationInfo* info() const {
+ return info_;
+ }
+
+ bool IsInfoAvailable() const {
+ return info_ != NULL;
+ }
+
uintptr_t CodeStart() const {
return reinterpret_cast<uintptr_t>(code_->instruction_start());
}
@@ -725,12 +1008,13 @@
Handle<Script> script_;
GDBJITLineInfo* lineinfo_;
GDBJITInterface::CodeTag tag_;
+ CompilationInfo* info_;
#ifdef V8_TARGET_ARCH_X64
uintptr_t stack_state_start_addresses_[STACK_STATE_MAX];
#endif
};
-
+#if defined(__ELF)
static void CreateSymbolsTable(CodeDescription* desc,
ELF* elf,
int text_section_index) {
@@ -755,14 +1039,42 @@
ELFSymbol::TYPE_FUNC,
text_section_index));
}
+#endif // defined(__ELF)
-class DebugInfoSection : public ELFSection {
+class DebugInfoSection : public DebugSection {
public:
explicit DebugInfoSection(CodeDescription* desc)
- : ELFSection(".debug_info", TYPE_PROGBITS, 1), desc_(desc) { }
+#if defined(__ELF)
+ : ELFSection(".debug_info", TYPE_PROGBITS, 1),
+#else
+ : MachOSection("__debug_info",
+ "__DWARF",
+ 1,
+ MachOSection::S_REGULAR | MachOSection::S_ATTR_DEBUG),
+#endif
+ desc_(desc) { }
+
+ // DWARF2 standard
+ enum DWARF2LocationOp {
+ DW_OP_reg0 = 0x50,
+ DW_OP_reg1 = 0x51,
+ DW_OP_reg2 = 0x52,
+ DW_OP_reg3 = 0x53,
+ DW_OP_reg4 = 0x54,
+ DW_OP_reg5 = 0x55,
+ DW_OP_reg6 = 0x56,
+ DW_OP_reg7 = 0x57,
+ DW_OP_fbreg = 0x91 // 1 param: SLEB128 offset
+ };
+
+ enum DWARF2Encoding {
+ DW_ATE_ADDRESS = 0x1,
+ DW_ATE_SIGNED = 0x5
+ };
bool WriteBody(Writer* w) {
+ uintptr_t cu_start = w->position();
Writer::Slot<uint32_t> size = w->CreateSlotHere<uint32_t>();
uintptr_t start = w->position();
w->Write<uint16_t>(2); // DWARF version.
@@ -774,6 +1086,123 @@
w->Write<intptr_t>(desc_->CodeStart());
w->Write<intptr_t>(desc_->CodeStart() + desc_->CodeSize());
w->Write<uint32_t>(0);
+
+ uint32_t ty_offset = static_cast<uint32_t>(w->position() - cu_start);
+ w->WriteULEB128(3);
+ w->Write<uint8_t>(kPointerSize);
+ w->WriteString("v8value");
+
+ if (desc_->IsInfoAvailable()) {
+ CompilationInfo* info = desc_->info();
+ ScopeInfo<FreeStoreAllocationPolicy> scope_info(info->scope());
+ w->WriteULEB128(2);
+ w->WriteString(desc_->name());
+ w->Write<intptr_t>(desc_->CodeStart());
+ w->Write<intptr_t>(desc_->CodeStart() + desc_->CodeSize());
+ Writer::Slot<uint32_t> fb_block_size = w->CreateSlotHere<uint32_t>();
+ uintptr_t fb_block_start = w->position();
+#if defined(V8_TARGET_ARCH_IA32)
+ w->Write<uint8_t>(DW_OP_reg5); // The frame pointer's here on ia32
+#elif defined(V8_TARGET_ARCH_X64)
+ w->Write<uint8_t>(DW_OP_reg6); // and here on x64.
+#else
+#error Unsupported target architecture.
+#endif
+ fb_block_size.set(static_cast<uint32_t>(w->position() - fb_block_start));
+
+ int params = scope_info.number_of_parameters();
+ int slots = scope_info.number_of_stack_slots();
+ int context_slots = scope_info.number_of_context_slots();
+ // The real slot ID is internal_slots + context_slot_id.
+ int internal_slots = Context::MIN_CONTEXT_SLOTS;
+ int locals = scope_info.NumberOfLocals();
+ int current_abbreviation = 4;
+
+ for (int param = 0; param < params; ++param) {
+ w->WriteULEB128(current_abbreviation++);
+ w->WriteString(
+ *scope_info.parameter_name(param)->ToCString(DISALLOW_NULLS));
+ w->Write<uint32_t>(ty_offset);
+ Writer::Slot<uint32_t> block_size = w->CreateSlotHere<uint32_t>();
+ uintptr_t block_start = w->position();
+ w->Write<uint8_t>(DW_OP_fbreg);
+ w->WriteSLEB128(
+ JavaScriptFrameConstants::kLastParameterOffset +
+ kPointerSize * (params - param - 1));
+ block_size.set(static_cast<uint32_t>(w->position() - block_start));
+ }
+
+ EmbeddedVector<char, 256> buffer;
+ StringBuilder builder(buffer.start(), buffer.length());
+
+ for (int slot = 0; slot < slots; ++slot) {
+ w->WriteULEB128(current_abbreviation++);
+ builder.Reset();
+ builder.AddFormatted("slot%d", slot);
+ w->WriteString(builder.Finalize());
+ }
+
+ // See contexts.h for more information.
+ ASSERT(Context::MIN_CONTEXT_SLOTS == 4);
+ ASSERT(Context::CLOSURE_INDEX == 0);
+ ASSERT(Context::PREVIOUS_INDEX == 1);
+ ASSERT(Context::EXTENSION_INDEX == 2);
+ ASSERT(Context::GLOBAL_INDEX == 3);
+ w->WriteULEB128(current_abbreviation++);
+ w->WriteString(".closure");
+ w->WriteULEB128(current_abbreviation++);
+ w->WriteString(".previous");
+ w->WriteULEB128(current_abbreviation++);
+ w->WriteString(".extension");
+ w->WriteULEB128(current_abbreviation++);
+ w->WriteString(".global");
+
+ for (int context_slot = 0;
+ context_slot < context_slots;
+ ++context_slot) {
+ w->WriteULEB128(current_abbreviation++);
+ builder.Reset();
+ builder.AddFormatted("context_slot%d", context_slot + internal_slots);
+ w->WriteString(builder.Finalize());
+ }
+
+ for (int local = 0; local < locals; ++local) {
+ w->WriteULEB128(current_abbreviation++);
+ w->WriteString(
+ *scope_info.LocalName(local)->ToCString(DISALLOW_NULLS));
+ w->Write<uint32_t>(ty_offset);
+ Writer::Slot<uint32_t> block_size = w->CreateSlotHere<uint32_t>();
+ uintptr_t block_start = w->position();
+ w->Write<uint8_t>(DW_OP_fbreg);
+ w->WriteSLEB128(
+ JavaScriptFrameConstants::kLocal0Offset -
+ kPointerSize * local);
+ block_size.set(static_cast<uint32_t>(w->position() - block_start));
+ }
+
+ {
+ w->WriteULEB128(current_abbreviation++);
+ w->WriteString("__function");
+ w->Write<uint32_t>(ty_offset);
+ Writer::Slot<uint32_t> block_size = w->CreateSlotHere<uint32_t>();
+ uintptr_t block_start = w->position();
+ w->Write<uint8_t>(DW_OP_fbreg);
+ w->WriteSLEB128(JavaScriptFrameConstants::kFunctionOffset);
+ block_size.set(static_cast<uint32_t>(w->position() - block_start));
+ }
+
+ {
+ w->WriteULEB128(current_abbreviation++);
+ w->WriteString("__context");
+ w->Write<uint32_t>(ty_offset);
+ Writer::Slot<uint32_t> block_size = w->CreateSlotHere<uint32_t>();
+ uintptr_t block_start = w->position();
+ w->Write<uint8_t>(DW_OP_fbreg);
+ w->WriteSLEB128(StandardFrameConstants::kContextOffset);
+ block_size.set(static_cast<uint32_t>(w->position() - block_start));
+ }
+ }
+
size.set(static_cast<uint32_t>(w->position() - start));
return true;
}
@@ -783,13 +1212,28 @@
};
-class DebugAbbrevSection : public ELFSection {
+class DebugAbbrevSection : public DebugSection {
public:
- DebugAbbrevSection() : ELFSection(".debug_abbrev", TYPE_PROGBITS, 1) { }
+ explicit DebugAbbrevSection(CodeDescription* desc)
+#ifdef __ELF
+ : ELFSection(".debug_abbrev", TYPE_PROGBITS, 1),
+#else
+ : MachOSection("__debug_abbrev",
+ "__DWARF",
+ 1,
+ MachOSection::S_REGULAR | MachOSection::S_ATTR_DEBUG),
+#endif
+ desc_(desc) { }
// DWARF2 standard, figure 14.
enum DWARF2Tags {
- DW_TAG_COMPILE_UNIT = 0x11
+ DW_TAG_FORMAL_PARAMETER = 0x05,
+ DW_TAG_POINTER_TYPE = 0xf,
+ DW_TAG_COMPILE_UNIT = 0x11,
+ DW_TAG_STRUCTURE_TYPE = 0x13,
+ DW_TAG_BASE_TYPE = 0x24,
+ DW_TAG_SUBPROGRAM = 0x2e,
+ DW_TAG_VARIABLE = 0x34
};
// DWARF2 standard, figure 16.
@@ -800,25 +1244,57 @@
// DWARF standard, figure 17.
enum DWARF2Attribute {
+ DW_AT_LOCATION = 0x2,
DW_AT_NAME = 0x3,
+ DW_AT_BYTE_SIZE = 0xb,
DW_AT_STMT_LIST = 0x10,
DW_AT_LOW_PC = 0x11,
- DW_AT_HIGH_PC = 0x12
+ DW_AT_HIGH_PC = 0x12,
+ DW_AT_ENCODING = 0x3e,
+ DW_AT_FRAME_BASE = 0x40,
+ DW_AT_TYPE = 0x49
};
// DWARF2 standard, figure 19.
enum DWARF2AttributeForm {
DW_FORM_ADDR = 0x1,
+ DW_FORM_BLOCK4 = 0x4,
DW_FORM_STRING = 0x8,
- DW_FORM_DATA4 = 0x6
+ DW_FORM_DATA4 = 0x6,
+ DW_FORM_BLOCK = 0x9,
+ DW_FORM_DATA1 = 0xb,
+ DW_FORM_FLAG = 0xc,
+ DW_FORM_REF4 = 0x13
};
- bool WriteBody(Writer* w) {
- w->WriteULEB128(1);
- w->WriteULEB128(DW_TAG_COMPILE_UNIT);
+ void WriteVariableAbbreviation(Writer* w,
+ int abbreviation_code,
+ bool has_value,
+ bool is_parameter) {
+ w->WriteULEB128(abbreviation_code);
+ w->WriteULEB128(is_parameter ? DW_TAG_FORMAL_PARAMETER : DW_TAG_VARIABLE);
w->Write<uint8_t>(DW_CHILDREN_NO);
w->WriteULEB128(DW_AT_NAME);
w->WriteULEB128(DW_FORM_STRING);
+ if (has_value) {
+ w->WriteULEB128(DW_AT_TYPE);
+ w->WriteULEB128(DW_FORM_REF4);
+ w->WriteULEB128(DW_AT_LOCATION);
+ w->WriteULEB128(DW_FORM_BLOCK4);
+ }
+ w->WriteULEB128(0);
+ w->WriteULEB128(0);
+ }
+
+ bool WriteBody(Writer* w) {
+ int current_abbreviation = 1;
+ bool extra_info = desc_->IsInfoAvailable();
+ ASSERT(desc_->IsLineInfoAvailable());
+ w->WriteULEB128(current_abbreviation++);
+ w->WriteULEB128(DW_TAG_COMPILE_UNIT);
+ w->Write<uint8_t>(extra_info ? DW_CHILDREN_YES : DW_CHILDREN_NO);
+ w->WriteULEB128(DW_AT_NAME);
+ w->WriteULEB128(DW_FORM_STRING);
w->WriteULEB128(DW_AT_LOW_PC);
w->WriteULEB128(DW_FORM_ADDR);
w->WriteULEB128(DW_AT_HIGH_PC);
@@ -827,16 +1303,101 @@
w->WriteULEB128(DW_FORM_DATA4);
w->WriteULEB128(0);
w->WriteULEB128(0);
- w->WriteULEB128(0);
+
+ if (extra_info) {
+ CompilationInfo* info = desc_->info();
+ ScopeInfo<FreeStoreAllocationPolicy> scope_info(info->scope());
+ int params = scope_info.number_of_parameters();
+ int slots = scope_info.number_of_stack_slots();
+ int context_slots = scope_info.number_of_context_slots();
+ // The real slot ID is internal_slots + context_slot_id.
+ int internal_slots = Context::MIN_CONTEXT_SLOTS;
+ int locals = scope_info.NumberOfLocals();
+ int total_children =
+ params + slots + context_slots + internal_slots + locals + 2;
+
+ // The extra duplication below seems to be necessary to keep
+ // gdb from getting upset on OSX.
+ w->WriteULEB128(current_abbreviation++); // Abbreviation code.
+ w->WriteULEB128(DW_TAG_SUBPROGRAM);
+ w->Write<uint8_t>(
+ total_children != 0 ? DW_CHILDREN_YES : DW_CHILDREN_NO);
+ w->WriteULEB128(DW_AT_NAME);
+ w->WriteULEB128(DW_FORM_STRING);
+ w->WriteULEB128(DW_AT_LOW_PC);
+ w->WriteULEB128(DW_FORM_ADDR);
+ w->WriteULEB128(DW_AT_HIGH_PC);
+ w->WriteULEB128(DW_FORM_ADDR);
+ w->WriteULEB128(DW_AT_FRAME_BASE);
+ w->WriteULEB128(DW_FORM_BLOCK4);
+ w->WriteULEB128(0);
+ w->WriteULEB128(0);
+
+ w->WriteULEB128(current_abbreviation++);
+ w->WriteULEB128(DW_TAG_STRUCTURE_TYPE);
+ w->Write<uint8_t>(DW_CHILDREN_NO);
+ w->WriteULEB128(DW_AT_BYTE_SIZE);
+ w->WriteULEB128(DW_FORM_DATA1);
+ w->WriteULEB128(DW_AT_NAME);
+ w->WriteULEB128(DW_FORM_STRING);
+ w->WriteULEB128(0);
+ w->WriteULEB128(0);
+
+ for (int param = 0; param < params; ++param) {
+ WriteVariableAbbreviation(w, current_abbreviation++, true, true);
+ }
+
+ for (int slot = 0; slot < slots; ++slot) {
+ WriteVariableAbbreviation(w, current_abbreviation++, false, false);
+ }
+
+ for (int internal_slot = 0;
+ internal_slot < internal_slots;
+ ++internal_slot) {
+ WriteVariableAbbreviation(w, current_abbreviation++, false, false);
+ }
+
+ for (int context_slot = 0;
+ context_slot < context_slots;
+ ++context_slot) {
+ WriteVariableAbbreviation(w, current_abbreviation++, false, false);
+ }
+
+ for (int local = 0; local < locals; ++local) {
+ WriteVariableAbbreviation(w, current_abbreviation++, true, false);
+ }
+
+ // The function.
+ WriteVariableAbbreviation(w, current_abbreviation++, true, false);
+
+ // The context.
+ WriteVariableAbbreviation(w, current_abbreviation++, true, false);
+
+ if (total_children != 0) {
+ w->WriteULEB128(0); // Terminate the sibling list.
+ }
+ }
+
+ w->WriteULEB128(0); // Terminate the table.
return true;
}
+
+ private:
+ CodeDescription* desc_;
};
-class DebugLineSection : public ELFSection {
+class DebugLineSection : public DebugSection {
public:
explicit DebugLineSection(CodeDescription* desc)
+#ifdef __ELF
: ELFSection(".debug_line", TYPE_PROGBITS, 1),
+#else
+ : MachOSection("__debug_line",
+ "__DWARF",
+ 1,
+ MachOSection::S_REGULAR | MachOSection::S_ATTR_DEBUG),
+#endif
desc_(desc) { }
// DWARF2 standard, figure 34.
@@ -993,8 +1554,7 @@
#ifdef V8_TARGET_ARCH_X64
-
-class UnwindInfoSection : public ELFSection {
+class UnwindInfoSection : public DebugSection {
public:
explicit UnwindInfoSection(CodeDescription *desc);
virtual bool WriteBody(Writer *w);
@@ -1080,8 +1640,13 @@
UnwindInfoSection::UnwindInfoSection(CodeDescription *desc)
- : ELFSection(".eh_frame", TYPE_X86_64_UNWIND, 1), desc_(desc)
-{ }
+#ifdef __ELF
+ : ELFSection(".eh_frame", TYPE_X86_64_UNWIND, 1),
+#else
+ : MachOSection("__eh_frame", "__TEXT", sizeof(uintptr_t),
+ MachOSection::S_REGULAR),
+#endif
+ desc_(desc) { }
int UnwindInfoSection::WriteCIE(Writer *w) {
Writer::Slot<uint32_t> cie_length_slot = w->CreateSlotHere<uint32_t>();
@@ -1213,15 +1778,14 @@
#endif // V8_TARGET_ARCH_X64
-
-static void CreateDWARFSections(CodeDescription* desc, ELF* elf) {
+static void CreateDWARFSections(CodeDescription* desc, DebugObject* obj) {
if (desc->IsLineInfoAvailable()) {
- elf->AddSection(new DebugInfoSection(desc));
- elf->AddSection(new DebugAbbrevSection);
- elf->AddSection(new DebugLineSection(desc));
+ obj->AddSection(new DebugInfoSection(desc));
+ obj->AddSection(new DebugAbbrevSection(desc));
+ obj->AddSection(new DebugLineSection(desc));
}
#ifdef V8_TARGET_ARCH_X64
- elf->AddSection(new UnwindInfoSection(desc));
+ obj->AddSection(new UnwindInfoSection(desc));
#endif
}
@@ -1261,6 +1825,13 @@
// Static initialization is necessary to prevent GDB from seeing
// uninitialized descriptor.
JITDescriptor __jit_debug_descriptor = { 1, 0, 0, 0 };
+
+#ifdef OBJECT_PRINT
+ void __gdb_print_v8_object(MaybeObject* object) {
+ object->Print();
+ fprintf(stdout, "\n");
+ }
+#endif
}
@@ -1284,17 +1855,23 @@
}
-static void RegisterCodeEntry(JITCodeEntry* entry) {
+static void RegisterCodeEntry(JITCodeEntry* entry,
+ bool dump_if_enabled,
+ const char* name_hint) {
#if defined(DEBUG) && !defined(WIN32)
static int file_num = 0;
- if (FLAG_gdbjit_dump) {
+ if (FLAG_gdbjit_dump && dump_if_enabled) {
static const int kMaxFileNameSize = 64;
static const char* kElfFilePrefix = "/tmp/elfdump";
static const char* kObjFileExt = ".o";
char file_name[64];
- OS::SNPrintF(Vector<char>(file_name, kMaxFileNameSize), "%s%d%s",
- kElfFilePrefix, file_num++, kObjFileExt);
+ OS::SNPrintF(Vector<char>(file_name, kMaxFileNameSize),
+ "%s%s%d%s",
+ kElfFilePrefix,
+ (name_hint != NULL) ? name_hint : "",
+ file_num++,
+ kObjFileExt);
WriteBytes(file_name, entry->symfile_addr_, entry->symfile_size_);
}
#endif
@@ -1327,8 +1904,19 @@
static JITCodeEntry* CreateELFObject(CodeDescription* desc) {
- ZoneScope zone_scope(DELETE_ON_EXIT);
+ ZoneScope zone_scope(Isolate::Current(), DELETE_ON_EXIT);
+#ifdef __MACH_O
+ MachO mach_o;
+ Writer w(&mach_o);
+ mach_o.AddSection(new MachOTextSection(kCodeAlignment,
+ desc->CodeStart(),
+ desc->CodeSize()));
+
+ CreateDWARFSections(desc, &mach_o);
+
+ mach_o.Write(&w, desc->CodeStart(), desc->CodeSize());
+#else
ELF elf;
Writer w(&elf);
@@ -1346,6 +1934,7 @@
CreateDWARFSections(desc, &elf);
elf.Write(&w);
+#endif
return CreateCodeEntry(w.buffer(), w.position());
}
@@ -1394,7 +1983,8 @@
void GDBJITInterface::AddCode(Handle<String> name,
Handle<Script> script,
- Handle<Code> code) {
+ Handle<Code> code,
+ CompilationInfo* info) {
if (!FLAG_gdbjit) return;
// Force initialization of line_ends array.
@@ -1402,9 +1992,9 @@
if (!name.is_null()) {
SmartPointer<char> name_cstring = name->ToCString(DISALLOW_NULLS);
- AddCode(*name_cstring, *code, GDBJITInterface::FUNCTION, *script);
+ AddCode(*name_cstring, *code, GDBJITInterface::FUNCTION, *script, info);
} else {
- AddCode("", *code, GDBJITInterface::FUNCTION, *script);
+ AddCode("", *code, GDBJITInterface::FUNCTION, *script, info);
}
}
@@ -1451,7 +2041,8 @@
void GDBJITInterface::AddCode(const char* name,
Code* code,
GDBJITInterface::CodeTag tag,
- Script* script) {
+ Script* script,
+ CompilationInfo* info) {
if (!FLAG_gdbjit) return;
ScopedLock lock(mutex_);
@@ -1466,7 +2057,8 @@
script != NULL ? Handle<Script>(script)
: Handle<Script>(),
lineinfo,
- tag);
+ tag,
+ info);
if (!FLAG_gdbjit_full && !code_desc.IsLineInfoAvailable()) {
delete lineinfo;
@@ -1481,7 +2073,18 @@
delete lineinfo;
e->value = entry;
- RegisterCodeEntry(entry);
+ const char* name_hint = NULL;
+ bool should_dump = false;
+ if (FLAG_gdbjit_dump) {
+ if (strlen(FLAG_gdbjit_dump_filter) == 0) {
+ name_hint = name;
+ should_dump = true;
+ } else if (name != NULL) {
+ name_hint = strstr(name, FLAG_gdbjit_dump_filter);
+ should_dump = (name_hint != NULL);
+ }
+ }
+ RegisterCodeEntry(entry, should_dump, name_hint);
}
@@ -1501,7 +2104,7 @@
builder.AddFormatted(": code object %p", static_cast<void*>(code));
}
- AddCode(builder.Finalize(), code, tag);
+ AddCode(builder.Finalize(), code, tag, NULL, NULL);
}
diff --git a/src/gdb-jit.h b/src/gdb-jit.h
index 0c80fb6..2cf15bc 100644
--- a/src/gdb-jit.h
+++ b/src/gdb-jit.h
@@ -43,6 +43,8 @@
namespace v8 {
namespace internal {
+class CompilationInfo;
+
#define CODE_TAGS_LIST(V) \
V(LOAD_IC) \
V(KEYED_LOAD_IC) \
@@ -113,11 +115,13 @@
static void AddCode(const char* name,
Code* code,
CodeTag tag,
- Script* script = NULL);
+ Script* script,
+ CompilationInfo* info);
static void AddCode(Handle<String> name,
Handle<Script> script,
- Handle<Code> code);
+ Handle<Code> code,
+ CompilationInfo* info);
static void AddCode(CodeTag tag, String* name, Code* code);
diff --git a/src/global-handles.cc b/src/global-handles.cc
index e4bbc95..87066fa 100644
--- a/src/global-handles.cc
+++ b/src/global-handles.cc
@@ -41,80 +41,151 @@
}
-class GlobalHandles::Node : public Malloced {
+class GlobalHandles::Node {
public:
+ // State transition diagram:
+ // FREE -> NORMAL <-> WEAK -> PENDING -> NEAR_DEATH -> { NORMAL, WEAK, FREE }
+ enum State {
+ FREE,
+ NORMAL, // Normal global handle.
+ WEAK, // Flagged as weak but not yet finalized.
+ PENDING, // Has been recognized as only reachable by weak handles.
+ NEAR_DEATH // Callback has informed the handle is near death.
+ };
- void Initialize(Object* object) {
- // Set the initial value of the handle.
+ // Maps handle location (slot) to the containing node.
+ static Node* FromLocation(Object** location) {
+ ASSERT(OFFSET_OF(Node, object_) == 0);
+ return reinterpret_cast<Node*>(location);
+ }
+
+ Node() {}
+
+#ifdef DEBUG
+ ~Node() {
+ // TODO(1428): if it's a weak handle we should have invoked its callback.
+ // Zap the values for eager trapping.
+ object_ = NULL;
+ class_id_ = v8::HeapProfiler::kPersistentHandleNoClassId;
+ index_ = 0;
+ independent_ = false;
+ in_new_space_list_ = false;
+ parameter_or_next_free_.next_free = NULL;
+ callback_ = NULL;
+ }
+#endif
+
+ void Initialize(int index, Node** first_free) {
+ index_ = static_cast<uint8_t>(index);
+ ASSERT(static_cast<int>(index_) == index);
+ state_ = FREE;
+ in_new_space_list_ = false;
+ parameter_or_next_free_.next_free = *first_free;
+ *first_free = this;
+ }
+
+ void Acquire(Object* object, GlobalHandles* global_handles) {
+ ASSERT(state_ == FREE);
object_ = object;
class_id_ = v8::HeapProfiler::kPersistentHandleNoClassId;
independent_ = false;
state_ = NORMAL;
parameter_or_next_free_.parameter = NULL;
callback_ = NULL;
+ IncreaseBlockUses(global_handles);
}
- Node() {
- state_ = DESTROYED;
- }
-
- explicit Node(Object* object) {
- Initialize(object);
- // Initialize link structure.
- next_ = NULL;
- }
-
- ~Node() {
- if (state_ != DESTROYED) Destroy(Isolate::Current()->global_handles());
-#ifdef DEBUG
- // Zap the values for eager trapping.
- object_ = NULL;
- next_ = NULL;
- parameter_or_next_free_.next_free = NULL;
-#endif
- }
-
- void Destroy(GlobalHandles* global_handles) {
- if (state_ == WEAK || IsNearDeath()) {
+ void Release(GlobalHandles* global_handles) {
+ ASSERT(state_ != FREE);
+ if (IsWeakRetainer()) {
global_handles->number_of_weak_handles_--;
if (object_->IsJSGlobalObject()) {
global_handles->number_of_global_object_weak_handles_--;
}
}
- state_ = DESTROYED;
+ state_ = FREE;
+ parameter_or_next_free_.next_free = global_handles->first_free_;
+ global_handles->first_free_ = this;
+ DecreaseBlockUses(global_handles);
}
- // Accessors for next_.
- Node* next() { return next_; }
- void set_next(Node* value) { next_ = value; }
- Node** next_addr() { return &next_; }
+ // Object slot accessors.
+ Object* object() const { return object_; }
+ Object** location() { return &object_; }
+ Handle<Object> handle() { return Handle<Object>(location()); }
+
+ // Wrapper class ID accessors.
+ bool has_wrapper_class_id() const {
+ return class_id_ != v8::HeapProfiler::kPersistentHandleNoClassId;
+ }
+ uint16_t wrapper_class_id() const { return class_id_; }
+ void set_wrapper_class_id(uint16_t class_id) {
+ class_id_ = class_id;
+ }
+
+ // State accessors.
+
+ State state() const { return state_; }
+
+ bool IsNearDeath() const {
+ // Check for PENDING to ensure correct answer when processing callbacks.
+ return state_ == PENDING || state_ == NEAR_DEATH;
+ }
+
+ bool IsWeak() const { return state_ == WEAK; }
+
+ bool IsRetainer() const { return state_ != FREE; }
+
+ bool IsStrongRetainer() const { return state_ == NORMAL; }
+
+ bool IsWeakRetainer() const {
+ return state_ == WEAK || state_ == PENDING || state_ == NEAR_DEATH;
+ }
+
+ void MarkPending() {
+ ASSERT(state_ == WEAK);
+ state_ = PENDING;
+ }
+
+ // Independent flag accessors.
+ void MarkIndependent() {
+ ASSERT(state_ != FREE);
+ independent_ = true;
+ }
+ bool is_independent() const { return independent_; }
+
+ // In-new-space-list flag accessors.
+ void set_in_new_space_list(bool v) { in_new_space_list_ = v; }
+ bool is_in_new_space_list() const { return in_new_space_list_; }
+
+ // Callback accessor.
+ WeakReferenceCallback callback() { return callback_; }
+
+ // Callback parameter accessors.
+ void set_parameter(void* parameter) {
+ ASSERT(state_ != FREE);
+ parameter_or_next_free_.parameter = parameter;
+ }
+ void* parameter() const {
+ ASSERT(state_ != FREE);
+ return parameter_or_next_free_.parameter;
+ }
// Accessors for next free node in the free list.
Node* next_free() {
- ASSERT(state_ == DESTROYED);
+ ASSERT(state_ == FREE);
return parameter_or_next_free_.next_free;
}
void set_next_free(Node* value) {
- ASSERT(state_ == DESTROYED);
+ ASSERT(state_ == FREE);
parameter_or_next_free_.next_free = value;
}
- // Returns a link from the handle.
- static Node* FromLocation(Object** location) {
- ASSERT(OFFSET_OF(Node, object_) == 0);
- return reinterpret_cast<Node*>(location);
- }
-
- // Returns the handle.
- Handle<Object> handle() { return Handle<Object>(&object_); }
-
- // Make this handle weak.
- void MakeWeak(GlobalHandles* global_handles, void* parameter,
+ void MakeWeak(GlobalHandles* global_handles,
+ void* parameter,
WeakReferenceCallback callback) {
- LOG(global_handles->isolate(),
- HandleEvent("GlobalHandle::MakeWeak", handle().location()));
- ASSERT(state_ != DESTROYED);
- if (state_ != WEAK && !IsNearDeath()) {
+ ASSERT(state_ != FREE);
+ if (!IsWeakRetainer()) {
global_handles->number_of_weak_handles_++;
if (object_->IsJSGlobalObject()) {
global_handles->number_of_global_object_weak_handles_++;
@@ -126,10 +197,8 @@
}
void ClearWeakness(GlobalHandles* global_handles) {
- LOG(global_handles->isolate(),
- HandleEvent("GlobalHandle::ClearWeakness", handle().location()));
- ASSERT(state_ != DESTROYED);
- if (state_ == WEAK || IsNearDeath()) {
+ ASSERT(state_ != FREE);
+ if (IsWeakRetainer()) {
global_handles->number_of_weak_handles_--;
if (object_->IsJSGlobalObject()) {
global_handles->number_of_global_object_weak_handles_--;
@@ -139,50 +208,12 @@
set_parameter(NULL);
}
- void MarkIndependent(GlobalHandles* global_handles) {
- LOG(global_handles->isolate(),
- HandleEvent("GlobalHandle::MarkIndependent", handle().location()));
- ASSERT(state_ != DESTROYED);
- independent_ = true;
- }
-
- bool IsNearDeath() {
- // Check for PENDING to ensure correct answer when processing callbacks.
- return state_ == PENDING || state_ == NEAR_DEATH;
- }
-
- bool IsWeak() {
- return state_ == WEAK;
- }
-
- bool CanBeRetainer() {
- return state_ != DESTROYED && state_ != NEAR_DEATH;
- }
-
- void SetWrapperClassId(uint16_t class_id) {
- class_id_ = class_id;
- }
-
- // Returns the id for this weak handle.
- void set_parameter(void* parameter) {
- ASSERT(state_ != DESTROYED);
- parameter_or_next_free_.parameter = parameter;
- }
- void* parameter() {
- ASSERT(state_ != DESTROYED);
- return parameter_or_next_free_.parameter;
- }
-
- // Returns the callback for this weak handle.
- WeakReferenceCallback callback() { return callback_; }
-
bool PostGarbageCollectionProcessing(Isolate* isolate,
GlobalHandles* global_handles) {
if (state_ != Node::PENDING) return false;
- LOG(isolate, HandleEvent("GlobalHandle::Processing", handle().location()));
WeakReferenceCallback func = callback();
if (func == NULL) {
- Destroy(global_handles);
+ Release(global_handles);
return false;
}
void* par = parameter();
@@ -191,13 +222,6 @@
v8::Persistent<v8::Object> object = ToApi<v8::Object>(handle());
{
- // Forbid reuse of destroyed nodes as they might be already deallocated.
- // It's fine though to reuse nodes that were destroyed in weak callback
- // as those cannot be deallocated until we are back from the callback.
- global_handles->set_first_free(NULL);
- if (global_handles->first_deallocated()) {
- global_handles->first_deallocated()->set_next(global_handles->head());
- }
// Check that we are not passing a finalized external string to
// the callback.
ASSERT(!object_->IsExternalAsciiString() ||
@@ -214,97 +238,145 @@
return true;
}
- // Place the handle address first to avoid offset computation.
- Object* object_; // Storage for object pointer.
+ private:
+ inline NodeBlock* FindBlock();
+ inline void IncreaseBlockUses(GlobalHandles* global_handles);
+ inline void DecreaseBlockUses(GlobalHandles* global_handles);
+ // Storage for object pointer.
+ // Placed first to avoid offset computation.
+ Object* object_;
+
+ // Next word stores class_id, index, state, and independent.
+ // Note: the most aligned fields should go first.
+
+ // Wrapper class ID.
uint16_t class_id_;
- // Transition diagram:
- // NORMAL <-> WEAK -> PENDING -> NEAR_DEATH -> { NORMAL, WEAK, DESTROYED }
- enum State {
- NORMAL, // Normal global handle.
- WEAK, // Flagged as weak but not yet finalized.
- PENDING, // Has been recognized as only reachable by weak handles.
- NEAR_DEATH, // Callback has informed the handle is near death.
- DESTROYED
- };
- State state_ : 4; // Need one more bit for MSVC as it treats enums as signed.
+ // Index in the containing handle block.
+ uint8_t index_;
+
+ // Need one more bit for MSVC as it treats enums as signed.
+ State state_ : 4;
bool independent_ : 1;
+ bool in_new_space_list_ : 1;
- private:
// Handle specific callback.
WeakReferenceCallback callback_;
- // Provided data for callback. In DESTROYED state, this is used for
+
+ // Provided data for callback. In FREE state, this is used for
// the free list link.
union {
void* parameter;
Node* next_free;
} parameter_or_next_free_;
- // Linkage for the list.
- Node* next_;
-
- public:
- TRACK_MEMORY("GlobalHandles::Node")
+ DISALLOW_COPY_AND_ASSIGN(Node);
};
-class GlobalHandles::Pool {
- public:
- Pool() {
- current_ = new Chunk();
- current_->previous = NULL;
- next_ = current_->nodes;
- limit_ = current_->nodes + kNodesPerChunk;
- }
+class GlobalHandles::NodeBlock {
+ public:
+ static const int kSize = 256;
- ~Pool() {
- if (current_ != NULL) {
- Release();
+ explicit NodeBlock(NodeBlock* next)
+ : next_(next), used_nodes_(0), next_used_(NULL), prev_used_(NULL) {}
+
+ void PutNodesOnFreeList(Node** first_free) {
+ for (int i = kSize - 1; i >= 0; --i) {
+ nodes_[i].Initialize(i, first_free);
+ }
+ }
+
+ Node* node_at(int index) {
+ ASSERT(0 <= index && index < kSize);
+ return &nodes_[index];
+ }
+
+ void IncreaseUses(GlobalHandles* global_handles) {
+ ASSERT(used_nodes_ < kSize);
+ if (used_nodes_++ == 0) {
+ NodeBlock* old_first = global_handles->first_used_block_;
+ global_handles->first_used_block_ = this;
+ next_used_ = old_first;
+ prev_used_ = NULL;
+ if (old_first == NULL) return;
+ old_first->prev_used_ = this;
+ }
+ }
+
+ void DecreaseUses(GlobalHandles* global_handles) {
+ ASSERT(used_nodes_ > 0);
+ if (--used_nodes_ == 0) {
+ if (next_used_ != NULL) next_used_->prev_used_ = prev_used_;
+ if (prev_used_ != NULL) prev_used_->next_used_ = next_used_;
+ if (this == global_handles->first_used_block_) {
+ global_handles->first_used_block_ = next_used_;
}
}
+ }
- Node* Allocate() {
- if (next_ < limit_) {
- return next_++;
- }
- return SlowAllocate();
- }
+ // Next block in the list of all blocks.
+ NodeBlock* next() const { return next_; }
- void Release() {
- Chunk* current = current_;
- ASSERT(current != NULL); // At least a single block must by allocated
- do {
- Chunk* previous = current->previous;
- delete current;
- current = previous;
- } while (current != NULL);
- current_ = NULL;
- next_ = limit_ = NULL;
- }
+ // Next/previous block in the list of blocks with used nodes.
+ NodeBlock* next_used() const { return next_used_; }
+ NodeBlock* prev_used() const { return prev_used_; }
- private:
- static const int kNodesPerChunk = (1 << 12) - 1;
- struct Chunk : public Malloced {
- Chunk* previous;
- Node nodes[kNodesPerChunk];
- };
+ private:
+ Node nodes_[kSize];
+ NodeBlock* const next_;
+ int used_nodes_;
+ NodeBlock* next_used_;
+ NodeBlock* prev_used_;
+};
- Node* SlowAllocate() {
- Chunk* chunk = new Chunk();
- chunk->previous = current_;
- current_ = chunk;
- Node* new_nodes = current_->nodes;
- next_ = new_nodes + 1;
- limit_ = new_nodes + kNodesPerChunk;
- return new_nodes;
- }
+GlobalHandles::NodeBlock* GlobalHandles::Node::FindBlock() {
+ intptr_t ptr = reinterpret_cast<intptr_t>(this);
+ ptr = ptr - index_ * sizeof(Node);
+ NodeBlock* block = reinterpret_cast<NodeBlock*>(ptr);
+ ASSERT(block->node_at(index_) == this);
+ return block;
+}
- Chunk* current_;
- Node* next_;
- Node* limit_;
+
+void GlobalHandles::Node::IncreaseBlockUses(GlobalHandles* global_handles) {
+ FindBlock()->IncreaseUses(global_handles);
+}
+
+
+void GlobalHandles::Node::DecreaseBlockUses(GlobalHandles* global_handles) {
+ FindBlock()->DecreaseUses(global_handles);
+}
+
+
+class GlobalHandles::NodeIterator {
+ public:
+ explicit NodeIterator(GlobalHandles* global_handles)
+ : block_(global_handles->first_used_block_),
+ index_(0) {}
+
+ bool done() const { return block_ == NULL; }
+
+ Node* node() const {
+ ASSERT(!done());
+ return block_->node_at(index_);
+ }
+
+ void Advance() {
+ ASSERT(!done());
+ if (++index_ < NodeBlock::kSize) return;
+ index_ = 0;
+ block_ = block_->next_used();
+ }
+
+ private:
+ NodeBlock* block_;
+ int index_;
+
+ DISALLOW_COPY_AND_ASSIGN(NodeIterator);
};
@@ -312,41 +384,39 @@
: isolate_(isolate),
number_of_weak_handles_(0),
number_of_global_object_weak_handles_(0),
- head_(NULL),
+ first_block_(NULL),
+ first_used_block_(NULL),
first_free_(NULL),
- first_deallocated_(NULL),
- pool_(new Pool()),
- post_gc_processing_count_(0),
- object_groups_(4) {
-}
+ post_gc_processing_count_(0) {}
GlobalHandles::~GlobalHandles() {
- delete pool_;
- pool_ = 0;
+ NodeBlock* block = first_block_;
+ while (block != NULL) {
+ NodeBlock* tmp = block->next();
+ delete block;
+ block = tmp;
+ }
+ first_block_ = NULL;
}
Handle<Object> GlobalHandles::Create(Object* value) {
isolate_->counters()->global_handles()->Increment();
- Node* result;
- if (first_free()) {
- // Take the first node in the free list.
- result = first_free();
- set_first_free(result->next_free());
- } else if (first_deallocated()) {
- // Next try deallocated list
- result = first_deallocated();
- set_first_deallocated(result->next_free());
- ASSERT(result->next() == head());
- set_head(result);
- } else {
- // Allocate a new node.
- result = pool_->Allocate();
- result->set_next(head());
- set_head(result);
+ if (first_free_ == NULL) {
+ first_block_ = new NodeBlock(first_block_);
+ first_block_->PutNodesOnFreeList(&first_free_);
}
- result->Initialize(value);
+ ASSERT(first_free_ != NULL);
+ // Take the first node in the free list.
+ Node* result = first_free_;
+ first_free_ = result->next_free();
+ result->Acquire(value, this);
+ if (isolate_->heap()->InNewSpace(value) &&
+ !result->is_in_new_space_list()) {
+ new_space_nodes_.Add(result);
+ result->set_in_new_space_list(true);
+ }
return result->handle();
}
@@ -354,11 +424,7 @@
void GlobalHandles::Destroy(Object** location) {
isolate_->counters()->global_handles()->Decrement();
if (location == NULL) return;
- Node* node = Node::FromLocation(location);
- node->Destroy(this);
- // Link the destroyed.
- node->set_next_free(first_free());
- set_first_free(node);
+ Node::FromLocation(location)->Release(this);
}
@@ -375,7 +441,7 @@
void GlobalHandles::MarkIndependent(Object** location) {
- Node::FromLocation(location)->MarkIndependent(this);
+ Node::FromLocation(location)->MarkIndependent();
}
@@ -390,68 +456,66 @@
void GlobalHandles::SetWrapperClassId(Object** location, uint16_t class_id) {
- Node::FromLocation(location)->SetWrapperClassId(class_id);
+ Node::FromLocation(location)->set_wrapper_class_id(class_id);
}
void GlobalHandles::IterateWeakRoots(ObjectVisitor* v) {
- // Traversal of GC roots in the global handle list that are marked as
- // WEAK, PENDING or NEAR_DEATH.
- for (Node* current = head_; current != NULL; current = current->next()) {
- if (current->state_ == Node::WEAK
- || current->state_ == Node::PENDING
- || current->state_ == Node::NEAR_DEATH) {
- v->VisitPointer(¤t->object_);
- }
- }
-}
-
-
-void GlobalHandles::IterateWeakIndependentRoots(ObjectVisitor* v) {
- // Traversal of GC roots in the global handle list that are independent
- // and marked as WEAK, PENDING or NEAR_DEATH.
- for (Node* current = head_; current != NULL; current = current->next()) {
- if (!current->independent_) continue;
- if (current->state_ == Node::WEAK
- || current->state_ == Node::PENDING
- || current->state_ == Node::NEAR_DEATH) {
- v->VisitPointer(¤t->object_);
- }
+ for (NodeIterator it(this); !it.done(); it.Advance()) {
+ if (it.node()->IsWeakRetainer()) v->VisitPointer(it.node()->location());
}
}
void GlobalHandles::IterateWeakRoots(WeakReferenceGuest f,
WeakReferenceCallback callback) {
- for (Node* current = head_; current != NULL; current = current->next()) {
- if (current->IsWeak() && current->callback() == callback) {
- f(current->object_, current->parameter());
+ for (NodeIterator it(this); !it.done(); it.Advance()) {
+ if (it.node()->IsWeak() && it.node()->callback() == callback) {
+ f(it.node()->object(), it.node()->parameter());
}
}
}
void GlobalHandles::IdentifyWeakHandles(WeakSlotCallback f) {
- for (Node* current = head_; current != NULL; current = current->next()) {
- if (current->state_ == Node::WEAK) {
- if (f(¤t->object_)) {
- current->state_ = Node::PENDING;
- LOG(isolate_,
- HandleEvent("GlobalHandle::Pending", current->handle().location()));
- }
+ for (NodeIterator it(this); !it.done(); it.Advance()) {
+ if (it.node()->IsWeak() && f(it.node()->location())) {
+ it.node()->MarkPending();
}
}
}
-void GlobalHandles::IdentifyWeakIndependentHandles(WeakSlotCallbackWithHeap f) {
- for (Node* current = head_; current != NULL; current = current->next()) {
- if (current->state_ == Node::WEAK && current->independent_) {
- if (f(isolate_->heap(), ¤t->object_)) {
- current->state_ = Node::PENDING;
- LOG(isolate_,
- HandleEvent("GlobalHandle::Pending", current->handle().location()));
- }
+void GlobalHandles::IterateNewSpaceStrongAndDependentRoots(ObjectVisitor* v) {
+ for (int i = 0; i < new_space_nodes_.length(); ++i) {
+ Node* node = new_space_nodes_[i];
+ if (node->IsStrongRetainer() ||
+ (node->IsWeakRetainer() && !node->is_independent())) {
+ v->VisitPointer(node->location());
+ }
+ }
+}
+
+
+void GlobalHandles::IdentifyNewSpaceWeakIndependentHandles(
+ WeakSlotCallbackWithHeap f) {
+ for (int i = 0; i < new_space_nodes_.length(); ++i) {
+ Node* node = new_space_nodes_[i];
+ ASSERT(node->is_in_new_space_list());
+ if (node->is_independent() && node->IsWeak() &&
+ f(isolate_->heap(), node->location())) {
+ node->MarkPending();
+ }
+ }
+}
+
+
+void GlobalHandles::IterateNewSpaceWeakIndependentRoots(ObjectVisitor* v) {
+ for (int i = 0; i < new_space_nodes_.length(); ++i) {
+ Node* node = new_space_nodes_[i];
+ ASSERT(node->is_in_new_space_list());
+ if (node->is_independent() && node->IsWeakRetainer()) {
+ v->VisitPointer(node->location());
}
}
}
@@ -462,116 +526,103 @@
// Process weak global handle callbacks. This must be done after the
// GC is completely done, because the callbacks may invoke arbitrary
// API functions.
- // At the same time deallocate all DESTROYED nodes.
ASSERT(isolate_->heap()->gc_state() == Heap::NOT_IN_GC);
const int initial_post_gc_processing_count = ++post_gc_processing_count_;
bool next_gc_likely_to_collect_more = false;
- Node** p = &head_;
- while (*p != NULL) {
- // Skip dependent handles. Their weak callbacks might expect to be
- // called between two global garbage collection callbacks which
- // are not called for minor collections.
- if (collector == SCAVENGER && !(*p)->independent_) {
- p = (*p)->next_addr();
- continue;
- }
-
- if ((*p)->PostGarbageCollectionProcessing(isolate_, this)) {
- if (initial_post_gc_processing_count != post_gc_processing_count_) {
- // Weak callback triggered another GC and another round of
- // PostGarbageCollection processing. The current node might
- // have been deleted in that round, so we need to bail out (or
- // restart the processing).
- break;
+ if (collector == SCAVENGER) {
+ for (int i = 0; i < new_space_nodes_.length(); ++i) {
+ Node* node = new_space_nodes_[i];
+ ASSERT(node->is_in_new_space_list());
+ // Skip dependent handles. Their weak callbacks might expect to be
+ // called between two global garbage collection callbacks which
+ // are not called for minor collections.
+ if (!node->is_independent()) continue;
+ if (node->PostGarbageCollectionProcessing(isolate_, this)) {
+ if (initial_post_gc_processing_count != post_gc_processing_count_) {
+ // Weak callback triggered another GC and another round of
+ // PostGarbageCollection processing. The current node might
+ // have been deleted in that round, so we need to bail out (or
+ // restart the processing).
+ return next_gc_likely_to_collect_more;
+ }
+ }
+ if (!node->IsRetainer()) {
+ next_gc_likely_to_collect_more = true;
}
}
- if ((*p)->state_ == Node::DESTROYED) {
- // Delete the link.
- Node* node = *p;
- *p = node->next(); // Update the link.
- if (first_deallocated()) {
- first_deallocated()->set_next(node);
+ } else {
+ for (NodeIterator it(this); !it.done(); it.Advance()) {
+ if (it.node()->PostGarbageCollectionProcessing(isolate_, this)) {
+ if (initial_post_gc_processing_count != post_gc_processing_count_) {
+ // See the comment above.
+ return next_gc_likely_to_collect_more;
+ }
}
- node->set_next_free(first_deallocated());
- set_first_deallocated(node);
- next_gc_likely_to_collect_more = true;
+ if (!it.node()->IsRetainer()) {
+ next_gc_likely_to_collect_more = true;
+ }
+ }
+ }
+ // Update the list of new space nodes.
+ int last = 0;
+ for (int i = 0; i < new_space_nodes_.length(); ++i) {
+ Node* node = new_space_nodes_[i];
+ ASSERT(node->is_in_new_space_list());
+ if (node->IsRetainer() && isolate_->heap()->InNewSpace(node->object())) {
+ new_space_nodes_[last++] = node;
} else {
- p = (*p)->next_addr();
+ node->set_in_new_space_list(false);
}
}
- set_first_free(NULL);
- if (first_deallocated()) {
- first_deallocated()->set_next(head());
- }
-
+ new_space_nodes_.Rewind(last);
return next_gc_likely_to_collect_more;
}
void GlobalHandles::IterateStrongRoots(ObjectVisitor* v) {
- // Traversal of global handles marked as NORMAL.
- for (Node* current = head_; current != NULL; current = current->next()) {
- if (current->state_ == Node::NORMAL) {
- v->VisitPointer(¤t->object_);
+ for (NodeIterator it(this); !it.done(); it.Advance()) {
+ if (it.node()->IsStrongRetainer()) {
+ v->VisitPointer(it.node()->location());
}
}
}
void GlobalHandles::IterateAllRoots(ObjectVisitor* v) {
- for (Node* current = head_; current != NULL; current = current->next()) {
- if (current->state_ != Node::DESTROYED) {
- v->VisitPointer(¤t->object_);
- }
- }
-}
-
-
-void GlobalHandles::IterateStrongAndDependentRoots(ObjectVisitor* v) {
- for (Node* current = head_; current != NULL; current = current->next()) {
- if ((current->independent_ && current->state_ == Node::NORMAL) ||
- (!current->independent_ && current->state_ != Node::DESTROYED)) {
- v->VisitPointer(¤t->object_);
+ for (NodeIterator it(this); !it.done(); it.Advance()) {
+ if (it.node()->IsRetainer()) {
+ v->VisitPointer(it.node()->location());
}
}
}
void GlobalHandles::IterateAllRootsWithClassIds(ObjectVisitor* v) {
- for (Node* current = head_; current != NULL; current = current->next()) {
- if (current->class_id_ != v8::HeapProfiler::kPersistentHandleNoClassId &&
- current->CanBeRetainer()) {
- v->VisitEmbedderReference(¤t->object_, current->class_id_);
+ for (NodeIterator it(this); !it.done(); it.Advance()) {
+ if (it.node()->has_wrapper_class_id() && it.node()->IsRetainer()) {
+ v->VisitEmbedderReference(it.node()->location(),
+ it.node()->wrapper_class_id());
}
}
}
-void GlobalHandles::TearDown() {
- // Reset all the lists.
- set_head(NULL);
- set_first_free(NULL);
- set_first_deallocated(NULL);
- pool_->Release();
-}
-
-
void GlobalHandles::RecordStats(HeapStats* stats) {
*stats->global_handle_count = 0;
*stats->weak_global_handle_count = 0;
*stats->pending_global_handle_count = 0;
*stats->near_death_global_handle_count = 0;
- *stats->destroyed_global_handle_count = 0;
- for (Node* current = head_; current != NULL; current = current->next()) {
+ *stats->free_global_handle_count = 0;
+ for (NodeIterator it(this); !it.done(); it.Advance()) {
*stats->global_handle_count += 1;
- if (current->state_ == Node::WEAK) {
+ if (it.node()->state() == Node::WEAK) {
*stats->weak_global_handle_count += 1;
- } else if (current->state_ == Node::PENDING) {
+ } else if (it.node()->state() == Node::PENDING) {
*stats->pending_global_handle_count += 1;
- } else if (current->state_ == Node::NEAR_DEATH) {
+ } else if (it.node()->state() == Node::NEAR_DEATH) {
*stats->near_death_global_handle_count += 1;
- } else if (current->state_ == Node::DESTROYED) {
- *stats->destroyed_global_handle_count += 1;
+ } else if (it.node()->state() == Node::FREE) {
+ *stats->free_global_handle_count += 1;
}
}
}
@@ -585,12 +636,12 @@
int near_death = 0;
int destroyed = 0;
- for (Node* current = head_; current != NULL; current = current->next()) {
+ for (NodeIterator it(this); !it.done(); it.Advance()) {
total++;
- if (current->state_ == Node::WEAK) weak++;
- if (current->state_ == Node::PENDING) pending++;
- if (current->state_ == Node::NEAR_DEATH) near_death++;
- if (current->state_ == Node::DESTROYED) destroyed++;
+ if (it.node()->state() == Node::WEAK) weak++;
+ if (it.node()->state() == Node::PENDING) pending++;
+ if (it.node()->state() == Node::NEAR_DEATH) near_death++;
+ if (it.node()->state() == Node::FREE) destroyed++;
}
PrintF("Global Handle Statistics:\n");
@@ -598,17 +649,17 @@
PrintF(" # weak = %d\n", weak);
PrintF(" # pending = %d\n", pending);
PrintF(" # near_death = %d\n", near_death);
- PrintF(" # destroyed = %d\n", destroyed);
+ PrintF(" # free = %d\n", destroyed);
PrintF(" # total = %d\n", total);
}
void GlobalHandles::Print() {
PrintF("Global handles:\n");
- for (Node* current = head_; current != NULL; current = current->next()) {
- PrintF(" handle %p to %p (weak=%d)\n",
- reinterpret_cast<void*>(current->handle().location()),
- reinterpret_cast<void*>(*current->handle()),
- current->state_ == Node::WEAK);
+ for (NodeIterator it(this); !it.done(); it.Advance()) {
+ PrintF(" handle %p to %p%s\n",
+ reinterpret_cast<void*>(it.node()->location()),
+ reinterpret_cast<void*>(it.node()->object()),
+ it.node()->IsWeak() ? " (weak)" : "");
}
}
@@ -621,7 +672,7 @@
v8::RetainedObjectInfo* info) {
#ifdef DEBUG
for (size_t i = 0; i < length; ++i) {
- ASSERT(!Node::FromLocation(handles[i])->independent_);
+ ASSERT(!Node::FromLocation(handles[i])->is_independent());
}
#endif
if (length == 0) {
@@ -636,9 +687,9 @@
Object*** children,
size_t length) {
#ifdef DEBUG
- ASSERT(!Node::FromLocation(BitCast<Object**>(parent))->independent_);
+ ASSERT(!Node::FromLocation(BitCast<Object**>(parent))->is_independent());
for (size_t i = 0; i < length; ++i) {
- ASSERT(!Node::FromLocation(children[i])->independent_);
+ ASSERT(!Node::FromLocation(children[i])->is_independent());
}
#endif
if (length == 0) return;
@@ -662,4 +713,9 @@
}
+void GlobalHandles::TearDown() {
+ // TODO(1428): invoke weak callbacks.
+}
+
+
} } // namespace v8::internal
diff --git a/src/global-handles.h b/src/global-handles.h
index 3477bca..153d4da 100644
--- a/src/global-handles.h
+++ b/src/global-handles.h
@@ -162,9 +162,6 @@
// Iterates over all strong handles.
void IterateStrongRoots(ObjectVisitor* v);
- // Iterates over all strong and dependent handles.
- void IterateStrongAndDependentRoots(ObjectVisitor* v);
-
// Iterates over all handles.
void IterateAllRoots(ObjectVisitor* v);
@@ -174,9 +171,6 @@
// Iterates over all weak roots in heap.
void IterateWeakRoots(ObjectVisitor* v);
- // Iterates over all weak independent roots in heap.
- void IterateWeakIndependentRoots(ObjectVisitor* v);
-
// Iterates over weak roots that are bound to a given callback.
void IterateWeakRoots(WeakReferenceGuest f,
WeakReferenceCallback callback);
@@ -185,9 +179,20 @@
// them as pending.
void IdentifyWeakHandles(WeakSlotCallback f);
- // Find all weak independent handles satisfying the callback predicate, mark
- // them as pending.
- void IdentifyWeakIndependentHandles(WeakSlotCallbackWithHeap f);
+ // NOTE: Three ...NewSpace... functions below are used during
+ // scavenge collections and iterate over sets of handles that are
+ // guaranteed to contain all handles holding new space objects (but
+ // may also include old space objects).
+
+ // Iterates over strong and dependent handles. See the node above.
+ void IterateNewSpaceStrongAndDependentRoots(ObjectVisitor* v);
+
+ // Finds weak independent handles satisfying the callback predicate
+ // and marks them as pending. See the note above.
+ void IdentifyNewSpaceWeakIndependentHandles(WeakSlotCallbackWithHeap f);
+
+ // Iterates over weak independent handles. See the note above.
+ void IterateNewSpaceWeakIndependentRoots(ObjectVisitor* v);
// Add an object group.
// Should be only used in GC callback function before a collection.
@@ -224,12 +229,14 @@
void PrintStats();
void Print();
#endif
- class Pool;
+
private:
explicit GlobalHandles(Isolate* isolate);
- // Internal node structure, one for each global handle.
+ // Internal node structures.
class Node;
+ class NodeBlock;
+ class NodeIterator;
Isolate* isolate_;
@@ -241,35 +248,21 @@
// number_of_weak_handles_.
int number_of_global_object_weak_handles_;
- // Global handles are kept in a single linked list pointed to by head_.
- Node* head_;
- Node* head() { return head_; }
- void set_head(Node* value) { head_ = value; }
+ // List of all allocated node blocks.
+ NodeBlock* first_block_;
- // Free list for DESTROYED global handles not yet deallocated.
+ // List of node blocks with used nodes.
+ NodeBlock* first_used_block_;
+
+ // Free list of nodes.
Node* first_free_;
- Node* first_free() { return first_free_; }
- void set_first_free(Node* value) { first_free_ = value; }
- // List of deallocated nodes.
- // Deallocated nodes form a prefix of all the nodes and
- // |first_deallocated| points to last deallocated node before
- // |head|. Those deallocated nodes are additionally linked
- // by |next_free|:
- // 1st deallocated head
- // | |
- // V V
- // node node ... node node
- // .next -> .next -> .next ->
- // <- .next_free <- .next_free <- .next_free
- Node* first_deallocated_;
- Node* first_deallocated() { return first_deallocated_; }
- void set_first_deallocated(Node* value) {
- first_deallocated_ = value;
- }
+ // Contains all nodes holding new space objects. Note: when the list
+ // is accessed, some of the objects may have been promoted already.
+ List<Node*> new_space_nodes_;
- Pool* pool_;
int post_gc_processing_count_;
+
List<ObjectGroup*> object_groups_;
List<ImplicitRefGroup*> implicit_ref_groups_;
diff --git a/src/globals.h b/src/globals.h
index 5ab9806..7e41f97 100644
--- a/src/globals.h
+++ b/src/globals.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -199,6 +199,8 @@
const int kIntptrSize = sizeof(intptr_t); // NOLINT
const int kPointerSize = sizeof(void*); // NOLINT
+const int kDoubleSizeLog2 = 3;
+
#if V8_HOST_ARCH_64_BIT
const int kPointerSizeLog2 = 3;
const intptr_t kIntptrSignBit = V8_INT64_C(0x8000000000000000);
@@ -224,6 +226,10 @@
const int kBinary32MantissaBits = 23;
const int kBinary32ExponentShift = 23;
+// Quiet NaNs have bits 51 to 62 set, possibly the sign bit, and no
+// other bits set.
+const uint64_t kQuietNaNMask = static_cast<uint64_t>(0xfff) << 51;
+
// ASCII/UC16 constants
// Code-point values in Unicode 4.0 are 21 bits wide.
typedef uint16_t uc16;
diff --git a/src/handles.cc b/src/handles.cc
index b03b642..b03efbd 100644
--- a/src/handles.cc
+++ b/src/handles.cc
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -214,9 +214,10 @@
}
-void NormalizeElements(Handle<JSObject> object) {
- CALL_HEAP_FUNCTION_VOID(object->GetIsolate(),
- object->NormalizeElements());
+Handle<NumberDictionary> NormalizeElements(Handle<JSObject> object) {
+ CALL_HEAP_FUNCTION(object->GetIsolate(),
+ object->NormalizeElements(),
+ NumberDictionary);
}
@@ -228,12 +229,14 @@
}
-void NumberDictionarySet(Handle<NumberDictionary> dictionary,
- uint32_t index,
- Handle<Object> value,
- PropertyDetails details) {
- CALL_HEAP_FUNCTION_VOID(dictionary->GetIsolate(),
- dictionary->Set(index, *value, details));
+Handle<NumberDictionary> NumberDictionarySet(
+ Handle<NumberDictionary> dictionary,
+ uint32_t index,
+ Handle<Object> value,
+ PropertyDetails details) {
+ CALL_HEAP_FUNCTION(dictionary->GetIsolate(),
+ dictionary->Set(index, *value, details),
+ NumberDictionary);
}
@@ -258,7 +261,7 @@
}
-Handle<Object> SetProperty(Handle<JSObject> object,
+Handle<Object> SetProperty(Handle<JSReceiver> object,
Handle<String> key,
Handle<Object> value,
PropertyAttributes attributes,
@@ -353,7 +356,7 @@
}
-Handle<Object> GetProperty(Handle<JSObject> obj,
+Handle<Object> GetProperty(Handle<JSReceiver> obj,
const char* name) {
Isolate* isolate = obj->GetIsolate();
Handle<String> str = isolate->factory()->LookupAsciiSymbol(name);
@@ -362,17 +365,6 @@
Handle<Object> GetProperty(Handle<Object> obj,
- const char* name,
- LookupResult* result) {
- Isolate* isolate = Isolate::Current();
- Handle<String> str = isolate->factory()->LookupAsciiSymbol(name);
- PropertyAttributes attributes;
- CALL_HEAP_FUNCTION(
- isolate, obj->GetProperty(*obj, result, *str, &attributes), Object);
-}
-
-
-Handle<Object> GetProperty(Handle<Object> obj,
Handle<Object> key) {
Isolate* isolate = Isolate::Current();
CALL_HEAP_FUNCTION(isolate,
@@ -380,7 +372,7 @@
}
-Handle<Object> GetProperty(Handle<JSObject> obj,
+Handle<Object> GetProperty(Handle<JSReceiver> obj,
Handle<String> name,
LookupResult* result) {
PropertyAttributes attributes;
@@ -460,6 +452,11 @@
if (create_if_needed) {
Handle<Object> hidden_obj =
isolate->factory()->NewJSObject(isolate->object_function());
+
+ // Don't allow leakage of the hidden object through accessors
+ // on Object.prototype.
+ SetPrototype(Handle<JSObject>::cast(hidden_obj),
+ isolate->factory()->null_value());
CALL_HEAP_FUNCTION(isolate,
obj->SetHiddenPropertiesObject(*hidden_obj), Object);
} else {
@@ -516,7 +513,8 @@
}
}
CALL_HEAP_FUNCTION(object->GetIsolate(),
- object->SetElement(index, *value, strict_mode), Object);
+ object->SetElement(index, *value, strict_mode, true),
+ Object);
}
@@ -550,11 +548,6 @@
// associated with the wrapper and get rid of both the wrapper and the
// handle.
static void ClearWrapperCache(Persistent<v8::Value> handle, void*) {
-#ifdef ENABLE_HEAP_PROTECTION
- // Weak reference callbacks are called as if from outside V8. We
- // need to reeenter to unprotect the heap.
- VMState state(OTHER);
-#endif
Handle<Object> cache = Utils::OpenHandle(*handle);
JSValue* wrapper = JSValue::cast(*cache);
Foreign* foreign = Script::cast(wrapper->value())->wrapper();
diff --git a/src/handles.h b/src/handles.h
index 3d930fd..13c6dd6 100644
--- a/src/handles.h
+++ b/src/handles.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -170,13 +170,14 @@
void NormalizeProperties(Handle<JSObject> object,
PropertyNormalizationMode mode,
int expected_additional_properties);
-void NormalizeElements(Handle<JSObject> object);
+Handle<NumberDictionary> NormalizeElements(Handle<JSObject> object);
void TransformToFastProperties(Handle<JSObject> object,
int unused_property_fields);
-void NumberDictionarySet(Handle<NumberDictionary> dictionary,
- uint32_t index,
- Handle<Object> value,
- PropertyDetails details);
+MUST_USE_RESULT Handle<NumberDictionary> NumberDictionarySet(
+ Handle<NumberDictionary> dictionary,
+ uint32_t index,
+ Handle<Object> value,
+ PropertyDetails details);
// Flattens a string.
void FlattenString(Handle<String> str);
@@ -185,7 +186,7 @@
// string.
Handle<String> FlattenGetString(Handle<String> str);
-Handle<Object> SetProperty(Handle<JSObject> object,
+Handle<Object> SetProperty(Handle<JSReceiver> object,
Handle<String> key,
Handle<Object> value,
PropertyAttributes attributes,
@@ -239,17 +240,13 @@
Handle<Object> value,
StrictModeFlag strict_mode);
-Handle<Object> GetProperty(Handle<JSObject> obj,
+Handle<Object> GetProperty(Handle<JSReceiver> obj,
const char* name);
Handle<Object> GetProperty(Handle<Object> obj,
- const char* name,
- LookupResult* result);
-
-Handle<Object> GetProperty(Handle<Object> obj,
Handle<Object> key);
-Handle<Object> GetProperty(Handle<JSObject> obj,
+Handle<Object> GetProperty(Handle<JSReceiver> obj,
Handle<String> name,
LookupResult* result);
diff --git a/src/heap-inl.h b/src/heap-inl.h
index 3860fad..b0b4fbe 100644
--- a/src/heap-inl.h
+++ b/src/heap-inl.h
@@ -526,8 +526,6 @@
} while (false)
-// TODO(isolates): cache isolate: either accept as a parameter or
-// set to some known symbol (__CUR_ISOLATE__?)
#define CALL_HEAP_FUNCTION(ISOLATE, FUNCTION_CALL, TYPE) \
CALL_AND_RETRY(ISOLATE, \
FUNCTION_CALL, \
diff --git a/src/heap-profiler.cc b/src/heap-profiler.cc
index ec078ed..7e613e9 100644
--- a/src/heap-profiler.cc
+++ b/src/heap-profiler.cc
@@ -28,294 +28,12 @@
#include "v8.h"
#include "heap-profiler.h"
-#include "frames-inl.h"
-#include "global-handles.h"
#include "profile-generator.h"
-#include "string-stream.h"
namespace v8 {
namespace internal {
-#ifdef ENABLE_LOGGING_AND_PROFILING
-namespace {
-
-// Clusterizer is a set of helper functions for converting
-// object references into clusters.
-class Clusterizer : public AllStatic {
- public:
- static JSObjectsCluster Clusterize(HeapObject* obj) {
- return Clusterize(obj, true);
- }
- static void InsertIntoTree(JSObjectsClusterTree* tree,
- HeapObject* obj, bool fine_grain);
- static void InsertReferenceIntoTree(JSObjectsClusterTree* tree,
- const JSObjectsCluster& cluster) {
- InsertIntoTree(tree, cluster, 0);
- }
-
- private:
- static JSObjectsCluster Clusterize(HeapObject* obj, bool fine_grain);
- static int CalculateNetworkSize(JSObject* obj);
- static int GetObjectSize(HeapObject* obj) {
- return obj->IsJSObject() ?
- CalculateNetworkSize(JSObject::cast(obj)) : obj->Size();
- }
- static void InsertIntoTree(JSObjectsClusterTree* tree,
- const JSObjectsCluster& cluster, int size);
-};
-
-
-JSObjectsCluster Clusterizer::Clusterize(HeapObject* obj, bool fine_grain) {
- if (obj->IsJSObject()) {
- JSObject* js_obj = JSObject::cast(obj);
- String* constructor = GetConstructorNameForHeapProfile(
- JSObject::cast(js_obj));
- // Differentiate Object and Array instances.
- if (fine_grain && (constructor == HEAP->Object_symbol() ||
- constructor == HEAP->Array_symbol())) {
- return JSObjectsCluster(constructor, obj);
- } else {
- return JSObjectsCluster(constructor);
- }
- } else if (obj->IsString()) {
- return JSObjectsCluster(HEAP->String_symbol());
- } else if (obj->IsJSGlobalPropertyCell()) {
- return JSObjectsCluster(JSObjectsCluster::GLOBAL_PROPERTY);
- } else if (obj->IsCode() || obj->IsSharedFunctionInfo() || obj->IsScript()) {
- return JSObjectsCluster(JSObjectsCluster::CODE);
- }
- return JSObjectsCluster();
-}
-
-
-void Clusterizer::InsertIntoTree(JSObjectsClusterTree* tree,
- HeapObject* obj, bool fine_grain) {
- JSObjectsCluster cluster = Clusterize(obj, fine_grain);
- if (cluster.is_null()) return;
- InsertIntoTree(tree, cluster, GetObjectSize(obj));
-}
-
-
-void Clusterizer::InsertIntoTree(JSObjectsClusterTree* tree,
- const JSObjectsCluster& cluster, int size) {
- JSObjectsClusterTree::Locator loc;
- tree->Insert(cluster, &loc);
- NumberAndSizeInfo number_and_size = loc.value();
- number_and_size.increment_number(1);
- number_and_size.increment_bytes(size);
- loc.set_value(number_and_size);
-}
-
-
-int Clusterizer::CalculateNetworkSize(JSObject* obj) {
- int size = obj->Size();
- // If 'properties' and 'elements' are non-empty (thus, non-shared),
- // take their size into account.
- if (obj->properties() != HEAP->empty_fixed_array()) {
- size += obj->properties()->Size();
- }
- if (obj->elements() != HEAP->empty_fixed_array()) {
- size += obj->elements()->Size();
- }
- // For functions, also account non-empty context and literals sizes.
- if (obj->IsJSFunction()) {
- JSFunction* f = JSFunction::cast(obj);
- if (f->unchecked_context()->IsContext()) {
- size += f->context()->Size();
- }
- if (f->literals()->length() != 0) {
- size += f->literals()->Size();
- }
- }
- return size;
-}
-
-
-// A helper class for recording back references.
-class ReferencesExtractor : public ObjectVisitor {
- public:
- ReferencesExtractor(const JSObjectsCluster& cluster,
- RetainerHeapProfile* profile)
- : cluster_(cluster),
- profile_(profile),
- inside_array_(false) {
- }
-
- void VisitPointer(Object** o) {
- if ((*o)->IsFixedArray() && !inside_array_) {
- // Traverse one level deep for data members that are fixed arrays.
- // This covers the case of 'elements' and 'properties' of JSObject,
- // and function contexts.
- inside_array_ = true;
- FixedArray::cast(*o)->Iterate(this);
- inside_array_ = false;
- } else if ((*o)->IsHeapObject()) {
- profile_->StoreReference(cluster_, HeapObject::cast(*o));
- }
- }
-
- void VisitPointers(Object** start, Object** end) {
- for (Object** p = start; p < end; p++) VisitPointer(p);
- }
-
- private:
- const JSObjectsCluster& cluster_;
- RetainerHeapProfile* profile_;
- bool inside_array_;
-};
-
-
-// A printer interface implementation for the Retainers profile.
-class RetainersPrinter : public RetainerHeapProfile::Printer {
- public:
- void PrintRetainers(const JSObjectsCluster& cluster,
- const StringStream& retainers) {
- HeapStringAllocator allocator;
- StringStream stream(&allocator);
- cluster.Print(&stream);
- LOG(ISOLATE,
- HeapSampleJSRetainersEvent(
- *(stream.ToCString()), *(retainers.ToCString())));
- }
-};
-
-
-// Visitor for printing a cluster tree.
-class ClusterTreePrinter BASE_EMBEDDED {
- public:
- explicit ClusterTreePrinter(StringStream* stream) : stream_(stream) {}
- void Call(const JSObjectsCluster& cluster,
- const NumberAndSizeInfo& number_and_size) {
- Print(stream_, cluster, number_and_size);
- }
- static void Print(StringStream* stream,
- const JSObjectsCluster& cluster,
- const NumberAndSizeInfo& number_and_size);
-
- private:
- StringStream* stream_;
-};
-
-
-void ClusterTreePrinter::Print(StringStream* stream,
- const JSObjectsCluster& cluster,
- const NumberAndSizeInfo& number_and_size) {
- stream->Put(',');
- cluster.Print(stream);
- stream->Add(";%d", number_and_size.number());
-}
-
-
-// Visitor for printing a retainer tree.
-class SimpleRetainerTreePrinter BASE_EMBEDDED {
- public:
- explicit SimpleRetainerTreePrinter(RetainerHeapProfile::Printer* printer)
- : printer_(printer) {}
- void Call(const JSObjectsCluster& cluster, JSObjectsClusterTree* tree);
-
- private:
- RetainerHeapProfile::Printer* printer_;
-};
-
-
-void SimpleRetainerTreePrinter::Call(const JSObjectsCluster& cluster,
- JSObjectsClusterTree* tree) {
- HeapStringAllocator allocator;
- StringStream stream(&allocator);
- ClusterTreePrinter retainers_printer(&stream);
- tree->ForEach(&retainers_printer);
- printer_->PrintRetainers(cluster, stream);
-}
-
-
-// Visitor for aggregating references count of equivalent clusters.
-class RetainersAggregator BASE_EMBEDDED {
- public:
- RetainersAggregator(ClustersCoarser* coarser, JSObjectsClusterTree* dest_tree)
- : coarser_(coarser), dest_tree_(dest_tree) {}
- void Call(const JSObjectsCluster& cluster,
- const NumberAndSizeInfo& number_and_size);
-
- private:
- ClustersCoarser* coarser_;
- JSObjectsClusterTree* dest_tree_;
-};
-
-
-void RetainersAggregator::Call(const JSObjectsCluster& cluster,
- const NumberAndSizeInfo& number_and_size) {
- JSObjectsCluster eq = coarser_->GetCoarseEquivalent(cluster);
- if (eq.is_null()) eq = cluster;
- JSObjectsClusterTree::Locator loc;
- dest_tree_->Insert(eq, &loc);
- NumberAndSizeInfo aggregated_number = loc.value();
- aggregated_number.increment_number(number_and_size.number());
- loc.set_value(aggregated_number);
-}
-
-
-// Visitor for printing retainers tree. Aggregates equivalent retainer clusters.
-class AggregatingRetainerTreePrinter BASE_EMBEDDED {
- public:
- AggregatingRetainerTreePrinter(ClustersCoarser* coarser,
- RetainerHeapProfile::Printer* printer)
- : coarser_(coarser), printer_(printer) {}
- void Call(const JSObjectsCluster& cluster, JSObjectsClusterTree* tree);
-
- private:
- ClustersCoarser* coarser_;
- RetainerHeapProfile::Printer* printer_;
-};
-
-
-void AggregatingRetainerTreePrinter::Call(const JSObjectsCluster& cluster,
- JSObjectsClusterTree* tree) {
- if (!coarser_->GetCoarseEquivalent(cluster).is_null()) return;
- JSObjectsClusterTree dest_tree_;
- RetainersAggregator retainers_aggregator(coarser_, &dest_tree_);
- tree->ForEach(&retainers_aggregator);
- HeapStringAllocator allocator;
- StringStream stream(&allocator);
- ClusterTreePrinter retainers_printer(&stream);
- dest_tree_.ForEach(&retainers_printer);
- printer_->PrintRetainers(cluster, stream);
-}
-
-} // namespace
-
-
-// A helper class for building a retainers tree, that aggregates
-// all equivalent clusters.
-class RetainerTreeAggregator {
- public:
- explicit RetainerTreeAggregator(ClustersCoarser* coarser)
- : coarser_(coarser) {}
- void Process(JSObjectsRetainerTree* input_tree) {
- input_tree->ForEach(this);
- }
- void Call(const JSObjectsCluster& cluster, JSObjectsClusterTree* tree);
- JSObjectsRetainerTree& output_tree() { return output_tree_; }
-
- private:
- ClustersCoarser* coarser_;
- JSObjectsRetainerTree output_tree_;
-};
-
-
-void RetainerTreeAggregator::Call(const JSObjectsCluster& cluster,
- JSObjectsClusterTree* tree) {
- JSObjectsCluster eq = coarser_->GetCoarseEquivalent(cluster);
- if (eq.is_null()) return;
- JSObjectsRetainerTree::Locator loc;
- if (output_tree_.Insert(eq, &loc)) {
- loc.set_value(new JSObjectsClusterTree());
- }
- RetainersAggregator retainers_aggregator(coarser_, loc.value());
- tree->ForEach(&retainers_aggregator);
-}
-
-
HeapProfiler::HeapProfiler()
: snapshots_(new HeapSnapshotsCollection()),
next_snapshot_uid_(1) {
@@ -333,29 +51,21 @@
}
-#endif // ENABLE_LOGGING_AND_PROFILING
-
void HeapProfiler::Setup() {
-#ifdef ENABLE_LOGGING_AND_PROFILING
Isolate* isolate = Isolate::Current();
if (isolate->heap_profiler() == NULL) {
isolate->set_heap_profiler(new HeapProfiler());
}
-#endif
}
void HeapProfiler::TearDown() {
-#ifdef ENABLE_LOGGING_AND_PROFILING
Isolate* isolate = Isolate::Current();
delete isolate->heap_profiler();
isolate->set_heap_profiler(NULL);
-#endif
}
-#ifdef ENABLE_LOGGING_AND_PROFILING
-
HeapSnapshot* HeapProfiler::TakeSnapshot(const char* name,
int type,
v8::ActivityControl* control) {
@@ -409,14 +119,6 @@
generation_completed = generator.GenerateSnapshot();
break;
}
- case HeapSnapshot::kAggregated: {
- HEAP->CollectAllGarbage(true);
- AggregatedHeapSnapshot agg_snapshot;
- AggregatedHeapSnapshotGenerator generator(&agg_snapshot);
- generator.GenerateSnapshot();
- generator.FillHeapSnapshot(result);
- break;
- }
default:
UNREACHABLE();
}
@@ -469,705 +171,4 @@
}
-const JSObjectsClusterTreeConfig::Key JSObjectsClusterTreeConfig::kNoKey;
-const JSObjectsClusterTreeConfig::Value JSObjectsClusterTreeConfig::kNoValue;
-
-
-ConstructorHeapProfile::ConstructorHeapProfile()
- : zscope_(Isolate::Current(), DELETE_ON_EXIT) {
-}
-
-
-void ConstructorHeapProfile::Call(const JSObjectsCluster& cluster,
- const NumberAndSizeInfo& number_and_size) {
- HeapStringAllocator allocator;
- StringStream stream(&allocator);
- cluster.Print(&stream);
- LOG(ISOLATE,
- HeapSampleJSConstructorEvent(*(stream.ToCString()),
- number_and_size.number(),
- number_and_size.bytes()));
-}
-
-
-void ConstructorHeapProfile::CollectStats(HeapObject* obj) {
- Clusterizer::InsertIntoTree(&js_objects_info_tree_, obj, false);
-}
-
-
-void ConstructorHeapProfile::PrintStats() {
- js_objects_info_tree_.ForEach(this);
-}
-
-
-static const char* GetConstructorName(const char* name) {
- return name[0] != '\0' ? name : "(anonymous)";
-}
-
-
-const char* JSObjectsCluster::GetSpecialCaseName() const {
- if (constructor_ == FromSpecialCase(ROOTS)) {
- return "(roots)";
- } else if (constructor_ == FromSpecialCase(GLOBAL_PROPERTY)) {
- return "(global property)";
- } else if (constructor_ == FromSpecialCase(CODE)) {
- return "(code)";
- } else if (constructor_ == FromSpecialCase(SELF)) {
- return "(self)";
- }
- return NULL;
-}
-
-
-void JSObjectsCluster::Print(StringStream* accumulator) const {
- ASSERT(!is_null());
- const char* special_case_name = GetSpecialCaseName();
- if (special_case_name != NULL) {
- accumulator->Add(special_case_name);
- } else {
- SmartPointer<char> s_name(
- constructor_->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL));
- accumulator->Add("%s", GetConstructorName(*s_name));
- if (instance_ != NULL) {
- accumulator->Add(":%p", static_cast<void*>(instance_));
- }
- }
-}
-
-
-void JSObjectsCluster::DebugPrint(StringStream* accumulator) const {
- if (!is_null()) {
- Print(accumulator);
- } else {
- accumulator->Add("(null cluster)");
- }
-}
-
-
-inline ClustersCoarser::ClusterBackRefs::ClusterBackRefs(
- const JSObjectsCluster& cluster_)
- : cluster(cluster_), refs(kInitialBackrefsListCapacity) {
-}
-
-
-inline ClustersCoarser::ClusterBackRefs::ClusterBackRefs(
- const ClustersCoarser::ClusterBackRefs& src)
- : cluster(src.cluster), refs(src.refs.capacity()) {
- refs.AddAll(src.refs);
-}
-
-
-inline ClustersCoarser::ClusterBackRefs&
- ClustersCoarser::ClusterBackRefs::operator=(
- const ClustersCoarser::ClusterBackRefs& src) {
- if (this == &src) return *this;
- cluster = src.cluster;
- refs.Clear();
- refs.AddAll(src.refs);
- return *this;
-}
-
-
-inline int ClustersCoarser::ClusterBackRefs::Compare(
- const ClustersCoarser::ClusterBackRefs& a,
- const ClustersCoarser::ClusterBackRefs& b) {
- int cmp = JSObjectsCluster::CompareConstructors(a.cluster, b.cluster);
- if (cmp != 0) return cmp;
- if (a.refs.length() < b.refs.length()) return -1;
- if (a.refs.length() > b.refs.length()) return 1;
- for (int i = 0; i < a.refs.length(); ++i) {
- int cmp = JSObjectsCluster::Compare(a.refs[i], b.refs[i]);
- if (cmp != 0) return cmp;
- }
- return 0;
-}
-
-
-ClustersCoarser::ClustersCoarser()
- : zscope_(Isolate::Current(), DELETE_ON_EXIT),
- sim_list_(ClustersCoarser::kInitialSimilarityListCapacity),
- current_pair_(NULL),
- current_set_(NULL),
- self_(NULL) {
-}
-
-
-void ClustersCoarser::Call(const JSObjectsCluster& cluster,
- JSObjectsClusterTree* tree) {
- if (!cluster.can_be_coarsed()) return;
- ClusterBackRefs pair(cluster);
- ASSERT(current_pair_ == NULL);
- current_pair_ = &pair;
- current_set_ = new JSObjectsRetainerTree();
- self_ = &cluster;
- tree->ForEach(this);
- sim_list_.Add(pair);
- current_pair_ = NULL;
- current_set_ = NULL;
- self_ = NULL;
-}
-
-
-void ClustersCoarser::Call(const JSObjectsCluster& cluster,
- const NumberAndSizeInfo& number_and_size) {
- ASSERT(current_pair_ != NULL);
- ASSERT(current_set_ != NULL);
- ASSERT(self_ != NULL);
- JSObjectsRetainerTree::Locator loc;
- if (JSObjectsCluster::Compare(*self_, cluster) == 0) {
- current_pair_->refs.Add(JSObjectsCluster(JSObjectsCluster::SELF));
- return;
- }
- JSObjectsCluster eq = GetCoarseEquivalent(cluster);
- if (!eq.is_null()) {
- if (current_set_->Find(eq, &loc)) return;
- current_pair_->refs.Add(eq);
- current_set_->Insert(eq, &loc);
- } else {
- current_pair_->refs.Add(cluster);
- }
-}
-
-
-void ClustersCoarser::Process(JSObjectsRetainerTree* tree) {
- int last_eq_clusters = -1;
- for (int i = 0; i < kMaxPassesCount; ++i) {
- sim_list_.Clear();
- const int curr_eq_clusters = DoProcess(tree);
- // If no new cluster equivalents discovered, abort processing.
- if (last_eq_clusters == curr_eq_clusters) break;
- last_eq_clusters = curr_eq_clusters;
- }
-}
-
-
-int ClustersCoarser::DoProcess(JSObjectsRetainerTree* tree) {
- tree->ForEach(this);
- sim_list_.Iterate(ClusterBackRefs::SortRefsIterator);
- sim_list_.Sort(ClusterBackRefsCmp);
- return FillEqualityTree();
-}
-
-
-JSObjectsCluster ClustersCoarser::GetCoarseEquivalent(
- const JSObjectsCluster& cluster) {
- if (!cluster.can_be_coarsed()) return JSObjectsCluster();
- EqualityTree::Locator loc;
- return eq_tree_.Find(cluster, &loc) ? loc.value() : JSObjectsCluster();
-}
-
-
-bool ClustersCoarser::HasAnEquivalent(const JSObjectsCluster& cluster) {
- // Return true for coarsible clusters that have a non-identical equivalent.
- if (!cluster.can_be_coarsed()) return false;
- JSObjectsCluster eq = GetCoarseEquivalent(cluster);
- return !eq.is_null() && JSObjectsCluster::Compare(cluster, eq) != 0;
-}
-
-
-int ClustersCoarser::FillEqualityTree() {
- int eq_clusters_count = 0;
- int eq_to = 0;
- bool first_added = false;
- for (int i = 1; i < sim_list_.length(); ++i) {
- if (ClusterBackRefs::Compare(sim_list_[i], sim_list_[eq_to]) == 0) {
- EqualityTree::Locator loc;
- if (!first_added) {
- // Add self-equivalence, if we have more than one item in this
- // equivalence class.
- eq_tree_.Insert(sim_list_[eq_to].cluster, &loc);
- loc.set_value(sim_list_[eq_to].cluster);
- first_added = true;
- }
- eq_tree_.Insert(sim_list_[i].cluster, &loc);
- loc.set_value(sim_list_[eq_to].cluster);
- ++eq_clusters_count;
- } else {
- eq_to = i;
- first_added = false;
- }
- }
- return eq_clusters_count;
-}
-
-
-const JSObjectsCluster ClustersCoarser::ClusterEqualityConfig::kNoKey;
-const JSObjectsCluster ClustersCoarser::ClusterEqualityConfig::kNoValue;
-const JSObjectsRetainerTreeConfig::Key JSObjectsRetainerTreeConfig::kNoKey;
-const JSObjectsRetainerTreeConfig::Value JSObjectsRetainerTreeConfig::kNoValue =
- NULL;
-
-
-RetainerHeapProfile::RetainerHeapProfile()
- : zscope_(Isolate::Current(), DELETE_ON_EXIT),
- aggregator_(NULL) {
- JSObjectsCluster roots(JSObjectsCluster::ROOTS);
- ReferencesExtractor extractor(roots, this);
- HEAP->IterateRoots(&extractor, VISIT_ONLY_STRONG);
-}
-
-
-RetainerHeapProfile::~RetainerHeapProfile() {
- delete aggregator_;
-}
-
-
-void RetainerHeapProfile::StoreReference(const JSObjectsCluster& cluster,
- HeapObject* ref) {
- JSObjectsCluster ref_cluster = Clusterizer::Clusterize(ref);
- if (ref_cluster.is_null()) return;
- JSObjectsRetainerTree::Locator ref_loc;
- if (retainers_tree_.Insert(ref_cluster, &ref_loc)) {
- ref_loc.set_value(new JSObjectsClusterTree());
- }
- JSObjectsClusterTree* referenced_by = ref_loc.value();
- Clusterizer::InsertReferenceIntoTree(referenced_by, cluster);
-}
-
-
-void RetainerHeapProfile::CollectStats(HeapObject* obj) {
- const JSObjectsCluster cluster = Clusterizer::Clusterize(obj);
- if (cluster.is_null()) return;
- ReferencesExtractor extractor(cluster, this);
- obj->Iterate(&extractor);
-}
-
-
-void RetainerHeapProfile::CoarseAndAggregate() {
- coarser_.Process(&retainers_tree_);
- ASSERT(aggregator_ == NULL);
- aggregator_ = new RetainerTreeAggregator(&coarser_);
- aggregator_->Process(&retainers_tree_);
-}
-
-
-void RetainerHeapProfile::DebugPrintStats(
- RetainerHeapProfile::Printer* printer) {
- // Print clusters that have no equivalents, aggregating their retainers.
- AggregatingRetainerTreePrinter agg_printer(&coarser_, printer);
- retainers_tree_.ForEach(&agg_printer);
- // Print clusters that have equivalents.
- SimpleRetainerTreePrinter s_printer(printer);
- aggregator_->output_tree().ForEach(&s_printer);
-}
-
-
-void RetainerHeapProfile::PrintStats() {
- RetainersPrinter printer;
- DebugPrintStats(&printer);
-}
-
-
-//
-// HeapProfiler class implementation.
-//
-static void StackWeakReferenceCallback(Persistent<Value> object,
- void* trace) {
- DeleteArray(static_cast<Address*>(trace));
- object.Dispose();
-}
-
-
-static void PrintProducerStackTrace(Object* obj, void* trace) {
- if (!obj->IsJSObject()) return;
- String* constructor = GetConstructorNameForHeapProfile(JSObject::cast(obj));
- SmartPointer<char> s_name(
- constructor->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL));
- LOG(ISOLATE,
- HeapSampleJSProducerEvent(GetConstructorName(*s_name),
- reinterpret_cast<Address*>(trace)));
-}
-
-
-void HeapProfiler::WriteSample() {
- Isolate* isolate = Isolate::Current();
- LOG(isolate, HeapSampleBeginEvent("Heap", "allocated"));
- LOG(isolate,
- HeapSampleStats(
- "Heap", "allocated", HEAP->CommittedMemory(), HEAP->SizeOfObjects()));
-
- AggregatedHeapSnapshot snapshot;
- AggregatedHeapSnapshotGenerator generator(&snapshot);
- generator.GenerateSnapshot();
-
- HistogramInfo* info = snapshot.info();
- for (int i = FIRST_NONSTRING_TYPE;
- i <= AggregatedHeapSnapshotGenerator::kAllStringsType;
- ++i) {
- if (info[i].bytes() > 0) {
- LOG(isolate,
- HeapSampleItemEvent(info[i].name(), info[i].number(),
- info[i].bytes()));
- }
- }
-
- snapshot.js_cons_profile()->PrintStats();
- snapshot.js_retainer_profile()->PrintStats();
-
- isolate->global_handles()->IterateWeakRoots(PrintProducerStackTrace,
- StackWeakReferenceCallback);
-
- LOG(isolate, HeapSampleEndEvent("Heap", "allocated"));
-}
-
-
-AggregatedHeapSnapshot::AggregatedHeapSnapshot()
- : info_(NewArray<HistogramInfo>(
- AggregatedHeapSnapshotGenerator::kAllStringsType + 1)) {
-#define DEF_TYPE_NAME(name) info_[name].set_name(#name);
- INSTANCE_TYPE_LIST(DEF_TYPE_NAME);
-#undef DEF_TYPE_NAME
- info_[AggregatedHeapSnapshotGenerator::kAllStringsType].set_name(
- "STRING_TYPE");
-}
-
-
-AggregatedHeapSnapshot::~AggregatedHeapSnapshot() {
- DeleteArray(info_);
-}
-
-
-AggregatedHeapSnapshotGenerator::AggregatedHeapSnapshotGenerator(
- AggregatedHeapSnapshot* agg_snapshot)
- : agg_snapshot_(agg_snapshot) {
-}
-
-
-void AggregatedHeapSnapshotGenerator::CalculateStringsStats() {
- HistogramInfo* info = agg_snapshot_->info();
- HistogramInfo& strings = info[kAllStringsType];
- // Lump all the string types together.
-#define INCREMENT_SIZE(type, size, name, camel_name) \
- strings.increment_number(info[type].number()); \
- strings.increment_bytes(info[type].bytes());
- STRING_TYPE_LIST(INCREMENT_SIZE);
-#undef INCREMENT_SIZE
-}
-
-
-void AggregatedHeapSnapshotGenerator::CollectStats(HeapObject* obj) {
- InstanceType type = obj->map()->instance_type();
- ASSERT(0 <= type && type <= LAST_TYPE);
- agg_snapshot_->info()[type].increment_number(1);
- agg_snapshot_->info()[type].increment_bytes(obj->Size());
-}
-
-
-void AggregatedHeapSnapshotGenerator::GenerateSnapshot() {
- HeapIterator iterator(HeapIterator::kFilterUnreachable);
- for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
- CollectStats(obj);
- agg_snapshot_->js_cons_profile()->CollectStats(obj);
- agg_snapshot_->js_retainer_profile()->CollectStats(obj);
- }
- CalculateStringsStats();
- agg_snapshot_->js_retainer_profile()->CoarseAndAggregate();
-}
-
-
-class CountingConstructorHeapProfileIterator {
- public:
- CountingConstructorHeapProfileIterator()
- : entities_count_(0), children_count_(0) {
- }
-
- void Call(const JSObjectsCluster& cluster,
- const NumberAndSizeInfo& number_and_size) {
- ++entities_count_;
- children_count_ += number_and_size.number();
- }
-
- int entities_count() { return entities_count_; }
- int children_count() { return children_count_; }
-
- private:
- int entities_count_;
- int children_count_;
-};
-
-
-static HeapEntry* AddEntryFromAggregatedSnapshot(HeapSnapshot* snapshot,
- int* root_child_index,
- HeapEntry::Type type,
- const char* name,
- int count,
- int size,
- int children_count,
- int retainers_count) {
- HeapEntry* entry = snapshot->AddEntry(
- type, name, count, size, children_count, retainers_count);
- ASSERT(entry != NULL);
- snapshot->root()->SetUnidirElementReference(*root_child_index,
- *root_child_index + 1,
- entry);
- *root_child_index = *root_child_index + 1;
- return entry;
-}
-
-
-class AllocatingConstructorHeapProfileIterator {
- public:
- AllocatingConstructorHeapProfileIterator(HeapSnapshot* snapshot,
- int* root_child_index)
- : snapshot_(snapshot),
- root_child_index_(root_child_index) {
- }
-
- void Call(const JSObjectsCluster& cluster,
- const NumberAndSizeInfo& number_and_size) {
- const char* name = cluster.GetSpecialCaseName();
- if (name == NULL) {
- name = snapshot_->collection()->names()->GetFunctionName(
- cluster.constructor());
- }
- AddEntryFromAggregatedSnapshot(snapshot_,
- root_child_index_,
- HeapEntry::kObject,
- name,
- number_and_size.number(),
- number_and_size.bytes(),
- 0,
- 0);
- }
-
- private:
- HeapSnapshot* snapshot_;
- int* root_child_index_;
-};
-
-
-static HeapObject* ClusterAsHeapObject(const JSObjectsCluster& cluster) {
- return cluster.can_be_coarsed() ?
- reinterpret_cast<HeapObject*>(cluster.instance()) : cluster.constructor();
-}
-
-
-static JSObjectsCluster HeapObjectAsCluster(HeapObject* object) {
- if (object->IsString()) {
- return JSObjectsCluster(String::cast(object));
- } else {
- JSObject* js_obj = JSObject::cast(object);
- String* constructor = GetConstructorNameForHeapProfile(
- JSObject::cast(js_obj));
- return JSObjectsCluster(constructor, object);
- }
-}
-
-
-class CountingRetainersIterator {
- public:
- CountingRetainersIterator(const JSObjectsCluster& child_cluster,
- HeapEntriesAllocator* allocator,
- HeapEntriesMap* map)
- : child_(ClusterAsHeapObject(child_cluster)),
- allocator_(allocator),
- map_(map) {
- if (map_->Map(child_) == NULL)
- map_->Pair(child_, allocator_, HeapEntriesMap::kHeapEntryPlaceholder);
- }
-
- void Call(const JSObjectsCluster& cluster,
- const NumberAndSizeInfo& number_and_size) {
- if (map_->Map(ClusterAsHeapObject(cluster)) == NULL)
- map_->Pair(ClusterAsHeapObject(cluster),
- allocator_,
- HeapEntriesMap::kHeapEntryPlaceholder);
- map_->CountReference(ClusterAsHeapObject(cluster), child_);
- }
-
- private:
- HeapObject* child_;
- HeapEntriesAllocator* allocator_;
- HeapEntriesMap* map_;
-};
-
-
-class AllocatingRetainersIterator {
- public:
- AllocatingRetainersIterator(const JSObjectsCluster& child_cluster,
- HeapEntriesAllocator*,
- HeapEntriesMap* map)
- : child_(ClusterAsHeapObject(child_cluster)), map_(map) {
- child_entry_ = map_->Map(child_);
- ASSERT(child_entry_ != NULL);
- }
-
- void Call(const JSObjectsCluster& cluster,
- const NumberAndSizeInfo& number_and_size) {
- int child_index, retainer_index;
- map_->CountReference(ClusterAsHeapObject(cluster),
- child_,
- &child_index,
- &retainer_index);
- map_->Map(ClusterAsHeapObject(cluster))->SetIndexedReference(
- HeapGraphEdge::kElement,
- child_index,
- number_and_size.number(),
- child_entry_,
- retainer_index);
- }
-
- private:
- HeapObject* child_;
- HeapEntriesMap* map_;
- HeapEntry* child_entry_;
-};
-
-
-template<class RetainersIterator>
-class AggregatingRetainerTreeIterator {
- public:
- explicit AggregatingRetainerTreeIterator(ClustersCoarser* coarser,
- HeapEntriesAllocator* allocator,
- HeapEntriesMap* map)
- : coarser_(coarser), allocator_(allocator), map_(map) {
- }
-
- void Call(const JSObjectsCluster& cluster, JSObjectsClusterTree* tree) {
- if (coarser_ != NULL &&
- !coarser_->GetCoarseEquivalent(cluster).is_null()) return;
- JSObjectsClusterTree* tree_to_iterate = tree;
- ZoneScope zs(Isolate::Current(), DELETE_ON_EXIT);
- JSObjectsClusterTree dest_tree_;
- if (coarser_ != NULL) {
- RetainersAggregator retainers_aggregator(coarser_, &dest_tree_);
- tree->ForEach(&retainers_aggregator);
- tree_to_iterate = &dest_tree_;
- }
- RetainersIterator iterator(cluster, allocator_, map_);
- tree_to_iterate->ForEach(&iterator);
- }
-
- private:
- ClustersCoarser* coarser_;
- HeapEntriesAllocator* allocator_;
- HeapEntriesMap* map_;
-};
-
-
-class AggregatedRetainerTreeAllocator : public HeapEntriesAllocator {
- public:
- AggregatedRetainerTreeAllocator(HeapSnapshot* snapshot,
- int* root_child_index)
- : snapshot_(snapshot), root_child_index_(root_child_index) {
- }
- ~AggregatedRetainerTreeAllocator() { }
-
- HeapEntry* AllocateEntry(
- HeapThing ptr, int children_count, int retainers_count) {
- HeapObject* obj = reinterpret_cast<HeapObject*>(ptr);
- JSObjectsCluster cluster = HeapObjectAsCluster(obj);
- const char* name = cluster.GetSpecialCaseName();
- if (name == NULL) {
- name = snapshot_->collection()->names()->GetFunctionName(
- cluster.constructor());
- }
- return AddEntryFromAggregatedSnapshot(
- snapshot_, root_child_index_, HeapEntry::kObject, name,
- 0, 0, children_count, retainers_count);
- }
-
- private:
- HeapSnapshot* snapshot_;
- int* root_child_index_;
-};
-
-
-template<class Iterator>
-void AggregatedHeapSnapshotGenerator::IterateRetainers(
- HeapEntriesAllocator* allocator, HeapEntriesMap* entries_map) {
- RetainerHeapProfile* p = agg_snapshot_->js_retainer_profile();
- AggregatingRetainerTreeIterator<Iterator> agg_ret_iter_1(
- p->coarser(), allocator, entries_map);
- p->retainers_tree()->ForEach(&agg_ret_iter_1);
- AggregatingRetainerTreeIterator<Iterator> agg_ret_iter_2(
- NULL, allocator, entries_map);
- p->aggregator()->output_tree().ForEach(&agg_ret_iter_2);
-}
-
-
-void AggregatedHeapSnapshotGenerator::FillHeapSnapshot(HeapSnapshot* snapshot) {
- // Count the number of entities.
- int histogram_entities_count = 0;
- int histogram_children_count = 0;
- int histogram_retainers_count = 0;
- for (int i = FIRST_NONSTRING_TYPE; i <= kAllStringsType; ++i) {
- if (agg_snapshot_->info()[i].bytes() > 0) {
- ++histogram_entities_count;
- }
- }
- CountingConstructorHeapProfileIterator counting_cons_iter;
- agg_snapshot_->js_cons_profile()->ForEach(&counting_cons_iter);
- histogram_entities_count += counting_cons_iter.entities_count();
- HeapEntriesMap entries_map;
- int root_child_index = 0;
- AggregatedRetainerTreeAllocator allocator(snapshot, &root_child_index);
- IterateRetainers<CountingRetainersIterator>(&allocator, &entries_map);
- histogram_entities_count += entries_map.entries_count();
- histogram_children_count += entries_map.total_children_count();
- histogram_retainers_count += entries_map.total_retainers_count();
-
- // Root entry references all other entries.
- histogram_children_count += histogram_entities_count;
- int root_children_count = histogram_entities_count;
- ++histogram_entities_count;
-
- // Allocate and fill entries in the snapshot, allocate references.
- snapshot->AllocateEntries(histogram_entities_count,
- histogram_children_count,
- histogram_retainers_count);
- snapshot->AddRootEntry(root_children_count);
- for (int i = FIRST_NONSTRING_TYPE; i <= kAllStringsType; ++i) {
- if (agg_snapshot_->info()[i].bytes() > 0) {
- AddEntryFromAggregatedSnapshot(snapshot,
- &root_child_index,
- HeapEntry::kHidden,
- agg_snapshot_->info()[i].name(),
- agg_snapshot_->info()[i].number(),
- agg_snapshot_->info()[i].bytes(),
- 0,
- 0);
- }
- }
- AllocatingConstructorHeapProfileIterator alloc_cons_iter(
- snapshot, &root_child_index);
- agg_snapshot_->js_cons_profile()->ForEach(&alloc_cons_iter);
- entries_map.AllocateEntries();
-
- // Fill up references.
- IterateRetainers<AllocatingRetainersIterator>(&allocator, &entries_map);
-
- snapshot->SetDominatorsToSelf();
-}
-
-
-void ProducerHeapProfile::Setup() {
- can_log_ = true;
-}
-
-void ProducerHeapProfile::DoRecordJSObjectAllocation(Object* obj) {
- ASSERT(FLAG_log_producers);
- if (!can_log_) return;
- int framesCount = 0;
- for (JavaScriptFrameIterator it; !it.done(); it.Advance()) {
- ++framesCount;
- }
- if (framesCount == 0) return;
- ++framesCount; // Reserve place for the terminator item.
- Vector<Address> stack(NewArray<Address>(framesCount), framesCount);
- int i = 0;
- for (JavaScriptFrameIterator it; !it.done(); it.Advance()) {
- stack[i++] = it.frame()->pc();
- }
- stack[i] = NULL;
- Handle<Object> handle = isolate_->global_handles()->Create(obj);
- isolate_->global_handles()->MakeWeak(handle.location(),
- static_cast<void*>(stack.start()),
- StackWeakReferenceCallback);
-}
-
-
-#endif // ENABLE_LOGGING_AND_PROFILING
-
-
} } // namespace v8::internal
diff --git a/src/heap-profiler.h b/src/heap-profiler.h
index c1e93c0..b1bc91c 100644
--- a/src/heap-profiler.h
+++ b/src/heap-profiler.h
@@ -28,15 +28,11 @@
#ifndef V8_HEAP_PROFILER_H_
#define V8_HEAP_PROFILER_H_
-#include "allocation.h"
#include "isolate.h"
-#include "zone-inl.h"
namespace v8 {
namespace internal {
-#ifdef ENABLE_LOGGING_AND_PROFILING
-
class HeapSnapshot;
class HeapSnapshotsCollection;
@@ -47,9 +43,6 @@
profiler->call; \
} \
} while (false)
-#else
-#define HEAP_PROFILE(heap, call) ((void) 0)
-#endif // ENABLE_LOGGING_AND_PROFILING
// The HeapProfiler writes data to the log files, which can be postprocessed
// to generate .hp files for use by the GHC/Valgrind tool hp2ps.
@@ -58,7 +51,6 @@
static void Setup();
static void TearDown();
-#ifdef ENABLE_LOGGING_AND_PROFILING
static HeapSnapshot* TakeSnapshot(const char* name,
int type,
v8::ActivityControl* control);
@@ -81,10 +73,6 @@
return snapshots_->is_tracking_objects();
}
- // Obsolete interface.
- // Write a single heap sample to the log file.
- static void WriteSample();
-
private:
HeapProfiler();
~HeapProfiler();
@@ -99,299 +87,8 @@
HeapSnapshotsCollection* snapshots_;
unsigned next_snapshot_uid_;
List<v8::HeapProfiler::WrapperInfoCallback> wrapper_callbacks_;
-
-#endif // ENABLE_LOGGING_AND_PROFILING
};
-
-#ifdef ENABLE_LOGGING_AND_PROFILING
-
-// JSObjectsCluster describes a group of JS objects that are
-// considered equivalent in terms of a particular profile.
-class JSObjectsCluster BASE_EMBEDDED {
- public:
- // These special cases are used in retainer profile.
- enum SpecialCase {
- ROOTS = 1,
- GLOBAL_PROPERTY = 2,
- CODE = 3,
- SELF = 100 // This case is used in ClustersCoarser only.
- };
-
- JSObjectsCluster() : constructor_(NULL), instance_(NULL) {}
- explicit JSObjectsCluster(String* constructor)
- : constructor_(constructor), instance_(NULL) {}
- explicit JSObjectsCluster(SpecialCase special)
- : constructor_(FromSpecialCase(special)), instance_(NULL) {}
- JSObjectsCluster(String* constructor, Object* instance)
- : constructor_(constructor), instance_(instance) {}
-
- static int CompareConstructors(const JSObjectsCluster& a,
- const JSObjectsCluster& b) {
- // Strings are unique, so it is sufficient to compare their pointers.
- return a.constructor_ == b.constructor_ ? 0
- : (a.constructor_ < b.constructor_ ? -1 : 1);
- }
- static int Compare(const JSObjectsCluster& a, const JSObjectsCluster& b) {
- // Strings are unique, so it is sufficient to compare their pointers.
- const int cons_cmp = CompareConstructors(a, b);
- return cons_cmp == 0 ?
- (a.instance_ == b.instance_ ? 0 : (a.instance_ < b.instance_ ? -1 : 1))
- : cons_cmp;
- }
- static int Compare(const JSObjectsCluster* a, const JSObjectsCluster* b) {
- return Compare(*a, *b);
- }
-
- bool is_null() const { return constructor_ == NULL; }
- bool can_be_coarsed() const { return instance_ != NULL; }
- String* constructor() const { return constructor_; }
- Object* instance() const { return instance_; }
-
- const char* GetSpecialCaseName() const;
- void Print(StringStream* accumulator) const;
- // Allows null clusters to be printed.
- void DebugPrint(StringStream* accumulator) const;
-
- private:
- static String* FromSpecialCase(SpecialCase special) {
- // We use symbols that are illegal JS identifiers to identify special cases.
- // Their actual value is irrelevant for us.
- switch (special) {
- case ROOTS: return HEAP->result_symbol();
- case GLOBAL_PROPERTY: return HEAP->code_symbol();
- case CODE: return HEAP->arguments_shadow_symbol();
- case SELF: return HEAP->catch_var_symbol();
- default:
- UNREACHABLE();
- return NULL;
- }
- }
-
- String* constructor_;
- Object* instance_;
-};
-
-
-struct JSObjectsClusterTreeConfig {
- typedef JSObjectsCluster Key;
- typedef NumberAndSizeInfo Value;
- static const Key kNoKey;
- static const Value kNoValue;
- static int Compare(const Key& a, const Key& b) {
- return Key::Compare(a, b);
- }
-};
-typedef ZoneSplayTree<JSObjectsClusterTreeConfig> JSObjectsClusterTree;
-
-
-// ConstructorHeapProfile is responsible for gathering and logging
-// "constructor profile" of JS objects allocated on heap.
-// It is run during garbage collection cycle, thus it doesn't need
-// to use handles.
-class ConstructorHeapProfile BASE_EMBEDDED {
- public:
- ConstructorHeapProfile();
- virtual ~ConstructorHeapProfile() {}
- void CollectStats(HeapObject* obj);
- void PrintStats();
-
- template<class Callback>
- void ForEach(Callback* callback) { js_objects_info_tree_.ForEach(callback); }
- // Used by ZoneSplayTree::ForEach. Made virtual to allow overriding in tests.
- virtual void Call(const JSObjectsCluster& cluster,
- const NumberAndSizeInfo& number_and_size);
-
- private:
- ZoneScope zscope_;
- JSObjectsClusterTree js_objects_info_tree_;
-};
-
-
-// JSObjectsRetainerTree is used to represent retainer graphs using
-// adjacency list form:
-//
-// Cluster -> (Cluster -> NumberAndSizeInfo)
-//
-// Subordinate splay trees are stored by pointer. They are zone-allocated,
-// so it isn't needed to manage their lifetime.
-//
-struct JSObjectsRetainerTreeConfig {
- typedef JSObjectsCluster Key;
- typedef JSObjectsClusterTree* Value;
- static const Key kNoKey;
- static const Value kNoValue;
- static int Compare(const Key& a, const Key& b) {
- return Key::Compare(a, b);
- }
-};
-typedef ZoneSplayTree<JSObjectsRetainerTreeConfig> JSObjectsRetainerTree;
-
-
-class ClustersCoarser BASE_EMBEDDED {
- public:
- ClustersCoarser();
-
- // Processes a given retainer graph.
- void Process(JSObjectsRetainerTree* tree);
-
- // Returns an equivalent cluster (can be the cluster itself).
- // If the given cluster doesn't have an equivalent, returns null cluster.
- JSObjectsCluster GetCoarseEquivalent(const JSObjectsCluster& cluster);
- // Returns whether a cluster can be substitued with an equivalent and thus,
- // skipped in some cases.
- bool HasAnEquivalent(const JSObjectsCluster& cluster);
-
- // Used by JSObjectsRetainerTree::ForEach.
- void Call(const JSObjectsCluster& cluster, JSObjectsClusterTree* tree);
- void Call(const JSObjectsCluster& cluster,
- const NumberAndSizeInfo& number_and_size);
-
- private:
- // Stores a list of back references for a cluster.
- struct ClusterBackRefs {
- explicit ClusterBackRefs(const JSObjectsCluster& cluster_);
- ClusterBackRefs(const ClusterBackRefs& src);
- ClusterBackRefs& operator=(const ClusterBackRefs& src);
-
- static int Compare(const ClusterBackRefs& a, const ClusterBackRefs& b);
- void SortRefs() { refs.Sort(JSObjectsCluster::Compare); }
- static void SortRefsIterator(ClusterBackRefs* ref) { ref->SortRefs(); }
-
- JSObjectsCluster cluster;
- ZoneList<JSObjectsCluster> refs;
- };
- typedef ZoneList<ClusterBackRefs> SimilarityList;
-
- // A tree for storing a list of equivalents for a cluster.
- struct ClusterEqualityConfig {
- typedef JSObjectsCluster Key;
- typedef JSObjectsCluster Value;
- static const Key kNoKey;
- static const Value kNoValue;
- static int Compare(const Key& a, const Key& b) {
- return Key::Compare(a, b);
- }
- };
- typedef ZoneSplayTree<ClusterEqualityConfig> EqualityTree;
-
- static int ClusterBackRefsCmp(const ClusterBackRefs* a,
- const ClusterBackRefs* b) {
- return ClusterBackRefs::Compare(*a, *b);
- }
- int DoProcess(JSObjectsRetainerTree* tree);
- int FillEqualityTree();
-
- static const int kInitialBackrefsListCapacity = 2;
- static const int kInitialSimilarityListCapacity = 2000;
- // Number of passes for finding equivalents. Limits the length of paths
- // that can be considered equivalent.
- static const int kMaxPassesCount = 10;
-
- ZoneScope zscope_;
- SimilarityList sim_list_;
- EqualityTree eq_tree_;
- ClusterBackRefs* current_pair_;
- JSObjectsRetainerTree* current_set_;
- const JSObjectsCluster* self_;
-};
-
-
-// RetainerHeapProfile is responsible for gathering and logging
-// "retainer profile" of JS objects allocated on heap.
-// It is run during garbage collection cycle, thus it doesn't need
-// to use handles.
-class RetainerTreeAggregator;
-
-class RetainerHeapProfile BASE_EMBEDDED {
- public:
- class Printer {
- public:
- virtual ~Printer() {}
- virtual void PrintRetainers(const JSObjectsCluster& cluster,
- const StringStream& retainers) = 0;
- };
-
- RetainerHeapProfile();
- ~RetainerHeapProfile();
-
- RetainerTreeAggregator* aggregator() { return aggregator_; }
- ClustersCoarser* coarser() { return &coarser_; }
- JSObjectsRetainerTree* retainers_tree() { return &retainers_tree_; }
-
- void CollectStats(HeapObject* obj);
- void CoarseAndAggregate();
- void PrintStats();
- void DebugPrintStats(Printer* printer);
- void StoreReference(const JSObjectsCluster& cluster, HeapObject* ref);
-
- private:
- ZoneScope zscope_;
- JSObjectsRetainerTree retainers_tree_;
- ClustersCoarser coarser_;
- RetainerTreeAggregator* aggregator_;
-};
-
-
-class AggregatedHeapSnapshot {
- public:
- AggregatedHeapSnapshot();
- ~AggregatedHeapSnapshot();
-
- HistogramInfo* info() { return info_; }
- ConstructorHeapProfile* js_cons_profile() { return &js_cons_profile_; }
- RetainerHeapProfile* js_retainer_profile() { return &js_retainer_profile_; }
-
- private:
- HistogramInfo* info_;
- ConstructorHeapProfile js_cons_profile_;
- RetainerHeapProfile js_retainer_profile_;
-};
-
-
-class HeapEntriesMap;
-class HeapEntriesAllocator;
-
-class AggregatedHeapSnapshotGenerator {
- public:
- explicit AggregatedHeapSnapshotGenerator(AggregatedHeapSnapshot* snapshot);
- void GenerateSnapshot();
- void FillHeapSnapshot(HeapSnapshot* snapshot);
-
- static const int kAllStringsType = LAST_TYPE + 1;
-
- private:
- void CalculateStringsStats();
- void CollectStats(HeapObject* obj);
- template<class Iterator>
- void IterateRetainers(
- HeapEntriesAllocator* allocator, HeapEntriesMap* entries_map);
-
- AggregatedHeapSnapshot* agg_snapshot_;
-};
-
-
-class ProducerHeapProfile {
- public:
- void Setup();
- void RecordJSObjectAllocation(Object* obj) {
- if (FLAG_log_producers) DoRecordJSObjectAllocation(obj);
- }
-
- private:
- ProducerHeapProfile() : can_log_(false) { }
-
- void DoRecordJSObjectAllocation(Object* obj);
- Isolate* isolate_;
- bool can_log_;
-
- friend class Isolate;
-
- DISALLOW_COPY_AND_ASSIGN(ProducerHeapProfile);
-};
-
-#endif // ENABLE_LOGGING_AND_PROFILING
-
} } // namespace v8::internal
#endif // V8_HEAP_PROFILER_H_
diff --git a/src/heap.cc b/src/heap.cc
index f82c83c..2d27570 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -33,6 +33,7 @@
#include "codegen.h"
#include "compilation-cache.h"
#include "debug.h"
+#include "deoptimizer.h"
#include "global-handles.h"
#include "heap-profiler.h"
#include "liveobjectlist-inl.h"
@@ -80,14 +81,14 @@
reserved_semispace_size_(16*MB),
max_semispace_size_(16*MB),
initial_semispace_size_(1*MB),
- max_old_generation_size_(1*GB),
+ max_old_generation_size_(1400*MB),
max_executable_size_(256*MB),
code_range_size_(512*MB),
#else
reserved_semispace_size_(8*MB),
max_semispace_size_(8*MB),
initial_semispace_size_(512*KB),
- max_old_generation_size_(512*MB),
+ max_old_generation_size_(700*MB),
max_executable_size_(128*MB),
code_range_size_(0),
#endif
@@ -108,6 +109,7 @@
cell_space_(NULL),
lo_space_(NULL),
gc_state_(NOT_IN_GC),
+ gc_post_processing_depth_(0),
mc_count_(0),
ms_count_(0),
gc_count_(0),
@@ -153,6 +155,15 @@
max_semispace_size_ = reserved_semispace_size_ = V8_MAX_SEMISPACE_SIZE;
#endif
+ intptr_t max_virtual = OS::MaxVirtualMemory();
+
+ if (max_virtual > 0) {
+ if (code_range_size_ > 0) {
+ // Reserve no more than 1/8 of the memory for the code range.
+ code_range_size_ = Min(code_range_size_, max_virtual >> 3);
+ }
+ }
+
memset(roots_, 0, sizeof(roots_[0]) * kRootListLength);
global_contexts_list_ = NULL;
mark_compact_collector_.heap_ = this;
@@ -282,12 +293,11 @@
// TODO(1238405): Combine the infrastructure for --heap-stats and
// --log-gc to avoid the complicated preprocessor and flag testing.
-#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
void Heap::ReportStatisticsBeforeGC() {
// Heap::ReportHeapStatistics will also log NewSpace statistics when
- // compiled with ENABLE_LOGGING_AND_PROFILING and --log-gc is set. The
- // following logic is used to avoid double logging.
-#if defined(DEBUG) && defined(ENABLE_LOGGING_AND_PROFILING)
+ // compiled --log-gc is set. The following logic is used to avoid
+ // double logging.
+#ifdef DEBUG
if (FLAG_heap_stats || FLAG_log_gc) new_space_.CollectStatistics();
if (FLAG_heap_stats) {
ReportHeapStatistics("Before GC");
@@ -295,23 +305,16 @@
new_space_.ReportStatistics();
}
if (FLAG_heap_stats || FLAG_log_gc) new_space_.ClearHistograms();
-#elif defined(DEBUG)
- if (FLAG_heap_stats) {
- new_space_.CollectStatistics();
- ReportHeapStatistics("Before GC");
- new_space_.ClearHistograms();
- }
-#elif defined(ENABLE_LOGGING_AND_PROFILING)
+#else
if (FLAG_log_gc) {
new_space_.CollectStatistics();
new_space_.ReportStatistics();
new_space_.ClearHistograms();
}
-#endif
+#endif // DEBUG
}
-#if defined(ENABLE_LOGGING_AND_PROFILING)
void Heap::PrintShortHeapStatistics() {
if (!FLAG_trace_gc_verbose) return;
PrintF("Memory allocator, used: %8" V8_PTR_PREFIX "d"
@@ -357,7 +360,6 @@
lo_space_->Size(),
lo_space_->Available());
}
-#endif
// TODO(1238405): Combine the infrastructure for --heap-stats and
@@ -365,20 +367,17 @@
void Heap::ReportStatisticsAfterGC() {
// Similar to the before GC, we use some complicated logic to ensure that
// NewSpace statistics are logged exactly once when --log-gc is turned on.
-#if defined(DEBUG) && defined(ENABLE_LOGGING_AND_PROFILING)
+#if defined(DEBUG)
if (FLAG_heap_stats) {
new_space_.CollectStatistics();
ReportHeapStatistics("After GC");
} else if (FLAG_log_gc) {
new_space_.ReportStatistics();
}
-#elif defined(DEBUG)
- if (FLAG_heap_stats) ReportHeapStatistics("After GC");
-#elif defined(ENABLE_LOGGING_AND_PROFILING)
+#else
if (FLAG_log_gc) new_space_.ReportStatistics();
-#endif
+#endif // DEBUG
}
-#endif // defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
void Heap::GarbageCollectionPrologue() {
@@ -395,11 +394,11 @@
}
if (FLAG_gc_verbose) Print();
-#endif
+#endif // DEBUG
-#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
+#if defined(DEBUG)
ReportStatisticsBeforeGC();
-#endif
+#endif // DEBUG
LiveObjectList::GCPrologue();
}
@@ -436,12 +435,10 @@
symbol_table()->Capacity());
isolate_->counters()->number_of_symbols()->Set(
symbol_table()->NumberOfElements());
-#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
+#if defined(DEBUG)
ReportStatisticsAfterGC();
-#endif
-#ifdef ENABLE_DEBUGGER_SUPPORT
+#endif // DEBUG
isolate_->debug()->AfterGarbageCollection();
-#endif
}
@@ -514,11 +511,6 @@
GarbageCollectionEpilogue();
}
-
-#ifdef ENABLE_LOGGING_AND_PROFILING
- if (FLAG_log_gc) HeapProfiler::WriteSample();
-#endif
-
return next_gc_likely_to_collect_more;
}
@@ -772,11 +764,13 @@
isolate_->counters()->objs_since_last_young()->Set(0);
+ gc_post_processing_depth_++;
{ DisableAssertNoAllocation allow_allocation;
GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
next_gc_likely_to_collect_more =
isolate_->global_handles()->PostGarbageCollectionProcessing(collector);
}
+ gc_post_processing_depth_--;
// Update relocatables.
Relocatable::PostGarbageCollectionProcessing();
@@ -852,6 +846,9 @@
CompletelyClearInstanceofCache();
if (is_compacting) FlushNumberStringCache();
+ if (FLAG_cleanup_code_caches_at_gc) {
+ polymorphic_code_cache()->set_cache(undefined_value());
+ }
ClearNormalizedMapCaches();
}
@@ -1035,9 +1032,10 @@
scavenge_visitor.VisitPointer(BitCast<Object**>(&global_contexts_list_));
new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
- isolate_->global_handles()->IdentifyWeakIndependentHandles(
+ isolate_->global_handles()->IdentifyNewSpaceWeakIndependentHandles(
&IsUnscavengedHeapObject);
- isolate_->global_handles()->IterateWeakIndependentRoots(&scavenge_visitor);
+ isolate_->global_handles()->IterateNewSpaceWeakIndependentRoots(
+ &scavenge_visitor);
new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
@@ -1280,6 +1278,7 @@
table_.Register(kVisitShortcutCandidate, &EvacuateShortcutCandidate);
table_.Register(kVisitByteArray, &EvacuateByteArray);
table_.Register(kVisitFixedArray, &EvacuateFixedArray);
+ table_.Register(kVisitFixedDoubleArray, &EvacuateFixedDoubleArray);
table_.Register(kVisitGlobalContext,
&ObjectEvacuationStrategy<POINTER_OBJECT>::
@@ -1322,15 +1321,12 @@
enum ObjectContents { DATA_OBJECT, POINTER_OBJECT };
enum SizeRestriction { SMALL, UNKNOWN_SIZE };
-#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
static void RecordCopiedObject(Heap* heap, HeapObject* obj) {
bool should_record = false;
#ifdef DEBUG
should_record = FLAG_heap_stats;
#endif
-#ifdef ENABLE_LOGGING_AND_PROFILING
should_record = should_record || FLAG_log_gc;
-#endif
if (should_record) {
if (heap->new_space()->Contains(obj)) {
heap->new_space()->RecordAllocation(obj);
@@ -1339,7 +1335,6 @@
}
}
}
-#endif // defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
// Helper function used by CopyObject to copy a source object to an
// allocated target object and update the forwarding pointer in the source
@@ -1355,12 +1350,9 @@
source->set_map_word(MapWord::FromForwardingAddress(target));
if (logging_and_profiling_mode == LOGGING_AND_PROFILING_ENABLED) {
-#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
// Update NewSpace stats if necessary.
RecordCopiedObject(heap, target);
-#endif
HEAP_PROFILE(heap, ObjectMoveEvent(source->address(), target->address()));
-#if defined(ENABLE_LOGGING_AND_PROFILING)
Isolate* isolate = heap->isolate();
if (isolate->logger()->is_logging() ||
CpuProfiler::is_profiling(isolate)) {
@@ -1369,7 +1361,6 @@
source->address(), target->address()));
}
}
-#endif
}
return target;
@@ -1431,6 +1422,18 @@
}
+ static inline void EvacuateFixedDoubleArray(Map* map,
+ HeapObject** slot,
+ HeapObject* object) {
+ int length = reinterpret_cast<FixedDoubleArray*>(object)->length();
+ int object_size = FixedDoubleArray::SizeFor(length);
+ EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map,
+ slot,
+ object,
+ object_size);
+ }
+
+
static inline void EvacuateByteArray(Map* map,
HeapObject** slot,
HeapObject* object) {
@@ -1613,7 +1616,8 @@
map->set_prototype_transitions(empty_fixed_array());
map->set_unused_property_fields(0);
map->set_bit_field(0);
- map->set_bit_field2((1 << Map::kIsExtensible) | (1 << Map::kHasFastElements));
+ map->set_bit_field2(1 << Map::kIsExtensible);
+ map->set_elements_kind(JSObject::FAST_ELEMENTS);
// If the map object is aligned fill the padding area with Smi 0 objects.
if (Map::kPadStart < Map::kSize) {
@@ -1637,6 +1641,11 @@
}
+MaybeObject* Heap::AllocatePolymorphicCodeCache() {
+ return AllocateStruct(POLYMORPHIC_CODE_CACHE_TYPE);
+}
+
+
const Heap::StringTypeTable Heap::string_type_table[] = {
#define STRING_TYPE_ELEMENT(type, size, name, camel_name) \
{type, size, k##camel_name##MapRootIndex},
@@ -1762,6 +1771,12 @@
Map::cast(obj)->set_is_undetectable();
{ MaybeObject* maybe_obj =
+ AllocateMap(FIXED_DOUBLE_ARRAY_TYPE, kVariableSizeSentinel);
+ if (!maybe_obj->ToObject(&obj)) return false;
+ }
+ set_fixed_double_array_map(Map::cast(obj));
+
+ { MaybeObject* maybe_obj =
AllocateMap(BYTE_ARRAY_TYPE, kVariableSizeSentinel);
if (!maybe_obj->ToObject(&obj)) return false;
}
@@ -1820,6 +1835,12 @@
}
set_external_float_array_map(Map::cast(obj));
+ { MaybeObject* maybe_obj =
+ AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
+ if (!maybe_obj->ToObject(&obj)) return false;
+ }
+ set_non_strict_arguments_elements_map(Map::cast(obj));
+
{ MaybeObject* maybe_obj = AllocateMap(EXTERNAL_DOUBLE_ARRAY_TYPE,
ExternalArray::kAlignedSize);
if (!maybe_obj->ToObject(&obj)) return false;
@@ -1865,7 +1886,7 @@
AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
if (!maybe_obj->ToObject(&obj)) return false;
}
- set_context_map(Map::cast(obj));
+ set_function_context_map(Map::cast(obj));
{ MaybeObject* maybe_obj =
AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
@@ -1877,6 +1898,12 @@
AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
if (!maybe_obj->ToObject(&obj)) return false;
}
+ set_with_context_map(Map::cast(obj));
+
+ { MaybeObject* maybe_obj =
+ AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
+ if (!maybe_obj->ToObject(&obj)) return false;
+ }
Map* global_context_map = Map::cast(obj);
global_context_map->set_visitor_id(StaticVisitorBase::kVisitGlobalContext);
set_global_context_map(global_context_map);
@@ -2144,6 +2171,11 @@
}
set_non_monomorphic_cache(NumberDictionary::cast(obj));
+ { MaybeObject* maybe_obj = AllocatePolymorphicCodeCache();
+ if (!maybe_obj->ToObject(&obj)) return false;
+ }
+ set_polymorphic_code_cache(PolymorphicCodeCache::cast(obj));
+
set_instanceof_cache_function(Smi::FromInt(0));
set_instanceof_cache_map(Smi::FromInt(0));
set_instanceof_cache_answer(Smi::FromInt(0));
@@ -2361,40 +2393,41 @@
MaybeObject* Heap::AllocateSharedFunctionInfo(Object* name) {
- Object* result;
- { MaybeObject* maybe_result =
- Allocate(shared_function_info_map(), OLD_POINTER_SPACE);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
+ SharedFunctionInfo* share;
+ MaybeObject* maybe = Allocate(shared_function_info_map(), OLD_POINTER_SPACE);
+ if (!maybe->To<SharedFunctionInfo>(&share)) return maybe;
- SharedFunctionInfo* share = SharedFunctionInfo::cast(result);
+ // Set pointer fields.
share->set_name(name);
Code* illegal = isolate_->builtins()->builtin(Builtins::kIllegal);
share->set_code(illegal);
share->set_scope_info(SerializedScopeInfo::Empty());
- Code* construct_stub = isolate_->builtins()->builtin(
- Builtins::kJSConstructStubGeneric);
+ Code* construct_stub =
+ isolate_->builtins()->builtin(Builtins::kJSConstructStubGeneric);
share->set_construct_stub(construct_stub);
- share->set_expected_nof_properties(0);
- share->set_length(0);
- share->set_formal_parameter_count(0);
share->set_instance_class_name(Object_symbol());
share->set_function_data(undefined_value());
share->set_script(undefined_value());
- share->set_start_position_and_type(0);
share->set_debug_info(undefined_value());
share->set_inferred_name(empty_string());
- share->set_compiler_hints(0);
- share->set_deopt_counter(Smi::FromInt(FLAG_deopt_every_n_times));
share->set_initial_map(undefined_value());
- share->set_this_property_assignments_count(0);
share->set_this_property_assignments(undefined_value());
- share->set_opt_count(0);
+ share->set_deopt_counter(Smi::FromInt(FLAG_deopt_every_n_times));
+
+ // Set integer fields (smi or int, depending on the architecture).
+ share->set_length(0);
+ share->set_formal_parameter_count(0);
+ share->set_expected_nof_properties(0);
share->set_num_literals(0);
+ share->set_start_position_and_type(0);
share->set_end_position(0);
share->set_function_token_position(0);
- share->set_es5_native(false);
- return result;
+ // All compiler hints default to false or 0.
+ share->set_compiler_hints(0);
+ share->set_this_property_assignments_count(0);
+ share->set_opt_count(0);
+
+ return share;
}
@@ -2584,12 +2617,13 @@
MaybeObject* Heap::AllocateSubString(String* buffer,
- int start,
- int end,
- PretenureFlag pretenure) {
+ int start,
+ int end,
+ PretenureFlag pretenure) {
int length = end - start;
-
- if (length == 1) {
+ if (length == 0) {
+ return empty_string();
+ } else if (length == 1) {
return LookupSingleCharacterStringFromCode(buffer->Get(start));
} else if (length == 2) {
// Optimization for 2-byte strings often used as keys in a decompression
@@ -2929,9 +2963,6 @@
if (!maybe_result->ToObject(&result)) return maybe_result;
}
HeapObject::cast(result)->set_map(map);
-#ifdef ENABLE_LOGGING_AND_PROFILING
- isolate_->producer_heap_profile()->RecordJSObjectAllocation(result);
-#endif
return result;
}
@@ -3237,14 +3268,13 @@
MaybeObject* maybe_map_obj = AllocateMap(JS_PROXY_TYPE, JSProxy::kSize);
if (!maybe_map_obj->To<Map>(&map)) return maybe_map_obj;
map->set_prototype(prototype);
- map->set_pre_allocated_property_fields(1);
- map->set_inobject_properties(1);
// Allocate the proxy object.
Object* result;
MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
if (!maybe_result->ToObject(&result)) return maybe_result;
JSProxy::cast(result)->set_handler(handler);
+ JSProxy::cast(result)->set_padding(Smi::FromInt(0));
return result;
}
@@ -3380,13 +3410,40 @@
JSObject::cast(clone)->set_properties(FixedArray::cast(prop));
}
// Return the new clone.
-#ifdef ENABLE_LOGGING_AND_PROFILING
- isolate_->producer_heap_profile()->RecordJSObjectAllocation(clone);
-#endif
return clone;
}
+MaybeObject* Heap::ReinitializeJSProxyAsJSObject(JSProxy* object) {
+ // Allocate fresh map.
+ // TODO(rossberg): Once we optimize proxies, cache these maps.
+ Map* map;
+ MaybeObject* maybe_map_obj =
+ AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
+ if (!maybe_map_obj->To<Map>(&map)) return maybe_map_obj;
+
+ // Check that the receiver has the same size as a fresh object.
+ ASSERT(map->instance_size() == object->map()->instance_size());
+
+ map->set_prototype(object->map()->prototype());
+
+ // Allocate the backing storage for the properties.
+ int prop_size = map->unused_property_fields() - map->inobject_properties();
+ Object* properties;
+ { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, TENURED);
+ if (!maybe_properties->ToObject(&properties)) return maybe_properties;
+ }
+
+ // Reset the map for the object.
+ object->set_map(map);
+
+ // Reinitialize the object from the constructor map.
+ InitializeJSObjectFromMap(JSObject::cast(object),
+ FixedArray::cast(properties), map);
+ return object;
+}
+
+
MaybeObject* Heap::ReinitializeJSGlobalProxy(JSFunction* constructor,
JSGlobalProxy* object) {
ASSERT(constructor->has_initial_map());
@@ -3797,6 +3854,62 @@
}
+MaybeObject* Heap::AllocateEmptyFixedDoubleArray() {
+ int size = FixedDoubleArray::SizeFor(0);
+ Object* result;
+ { MaybeObject* maybe_result =
+ AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
+ if (!maybe_result->ToObject(&result)) return maybe_result;
+ }
+ // Initialize the object.
+ reinterpret_cast<FixedDoubleArray*>(result)->set_map(
+ fixed_double_array_map());
+ reinterpret_cast<FixedDoubleArray*>(result)->set_length(0);
+ return result;
+}
+
+
+MaybeObject* Heap::AllocateUninitializedFixedDoubleArray(
+ int length,
+ PretenureFlag pretenure) {
+ if (length == 0) return empty_fixed_double_array();
+
+ Object* obj;
+ { MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(length, pretenure);
+ if (!maybe_obj->ToObject(&obj)) return maybe_obj;
+ }
+
+ reinterpret_cast<FixedDoubleArray*>(obj)->set_map(fixed_double_array_map());
+ FixedDoubleArray::cast(obj)->set_length(length);
+ return obj;
+}
+
+
+MaybeObject* Heap::AllocateRawFixedDoubleArray(int length,
+ PretenureFlag pretenure) {
+ if (length < 0 || length > FixedDoubleArray::kMaxLength) {
+ return Failure::OutOfMemoryException();
+ }
+
+ AllocationSpace space =
+ (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
+ int size = FixedDoubleArray::SizeFor(length);
+ if (space == NEW_SPACE && size > kMaxObjectSizeInNewSpace) {
+ // Too big for new space.
+ space = LO_SPACE;
+ } else if (space == OLD_DATA_SPACE &&
+ size > MaxObjectSizeInPagedSpace()) {
+ // Too big for old data space.
+ space = LO_SPACE;
+ }
+
+ AllocationSpace retry_space =
+ (size <= MaxObjectSizeInPagedSpace()) ? OLD_DATA_SPACE : LO_SPACE;
+
+ return AllocateRaw(size, space, retry_space);
+}
+
+
MaybeObject* Heap::AllocateHashTable(int length, PretenureFlag pretenure) {
Object* result;
{ MaybeObject* maybe_result = AllocateFixedArray(length, pretenure);
@@ -3829,38 +3942,50 @@
if (!maybe_result->ToObject(&result)) return maybe_result;
}
Context* context = reinterpret_cast<Context*>(result);
- context->set_map(context_map());
+ context->set_map(function_context_map());
context->set_closure(function);
- context->set_fcontext(context);
- context->set_previous(NULL);
+ context->set_previous(function->context());
context->set_extension(NULL);
context->set_global(function->context()->global());
- ASSERT(!context->IsGlobalContext());
- ASSERT(context->is_function_context());
- ASSERT(result->IsContext());
- return result;
+ return context;
}
-MaybeObject* Heap::AllocateWithContext(Context* previous,
- JSObject* extension,
- bool is_catch_context) {
+MaybeObject* Heap::AllocateCatchContext(JSFunction* function,
+ Context* previous,
+ String* name,
+ Object* thrown_object) {
+ STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == Context::THROWN_OBJECT_INDEX);
+ Object* result;
+ { MaybeObject* maybe_result =
+ AllocateFixedArray(Context::MIN_CONTEXT_SLOTS + 1);
+ if (!maybe_result->ToObject(&result)) return maybe_result;
+ }
+ Context* context = reinterpret_cast<Context*>(result);
+ context->set_map(catch_context_map());
+ context->set_closure(function);
+ context->set_previous(previous);
+ context->set_extension(name);
+ context->set_global(previous->global());
+ context->set(Context::THROWN_OBJECT_INDEX, thrown_object);
+ return context;
+}
+
+
+MaybeObject* Heap::AllocateWithContext(JSFunction* function,
+ Context* previous,
+ JSObject* extension) {
Object* result;
{ MaybeObject* maybe_result = AllocateFixedArray(Context::MIN_CONTEXT_SLOTS);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
Context* context = reinterpret_cast<Context*>(result);
- context->set_map(is_catch_context ? catch_context_map() :
- context_map());
- context->set_closure(previous->closure());
- context->set_fcontext(previous->fcontext());
+ context->set_map(with_context_map());
+ context->set_closure(function);
context->set_previous(previous);
context->set_extension(extension);
context->set_global(previous->global());
- ASSERT(!context->IsGlobalContext());
- ASSERT(!context->is_function_context());
- ASSERT(result->IsContext());
- return result;
+ return context;
}
@@ -4551,6 +4676,9 @@
#ifdef ENABLE_DEBUGGER_SUPPORT
isolate_->debug()->Iterate(v);
+ if (isolate_->deoptimizer_data() != NULL) {
+ isolate_->deoptimizer_data()->Iterate(v);
+ }
#endif
v->Synchronize("debug");
isolate_->compilation_cache()->Iterate(v);
@@ -4575,7 +4703,7 @@
isolate_->global_handles()->IterateStrongRoots(v);
break;
case VISIT_ALL_IN_SCAVENGE:
- isolate_->global_handles()->IterateStrongAndDependentRoots(v);
+ isolate_->global_handles()->IterateNewSpaceStrongAndDependentRoots(v);
break;
case VISIT_ALL_IN_SWEEP_NEWSPACE:
case VISIT_ALL:
@@ -4998,11 +5126,6 @@
LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
LOG(isolate_, IntPtrTEvent("heap-available", Available()));
-#ifdef ENABLE_LOGGING_AND_PROFILING
- // This should be called only after initial objects have been created.
- isolate_->producer_heap_profile()->Setup();
-#endif
-
return true;
}
@@ -5096,28 +5219,6 @@
}
-#ifdef ENABLE_HEAP_PROTECTION
-
-void Heap::Protect() {
- if (HasBeenSetup()) {
- AllSpaces spaces;
- for (Space* space = spaces.next(); space != NULL; space = spaces.next())
- space->Protect();
- }
-}
-
-
-void Heap::Unprotect() {
- if (HasBeenSetup()) {
- AllSpaces spaces;
- for (Space* space = spaces.next(); space != NULL; space = spaces.next())
- space->Unprotect();
- }
-}
-
-#endif
-
-
void Heap::AddGCPrologueCallback(GCPrologueCallback callback, GCType gc_type) {
ASSERT(callback != NULL);
GCPrologueCallbackPair pair(callback, gc_type);
@@ -5813,9 +5914,7 @@
PrintF("\n");
}
-#if defined(ENABLE_LOGGING_AND_PROFILING)
heap_->PrintShortHeapStatistics();
-#endif
}
diff --git a/src/heap.h b/src/heap.h
index 79ae996..6cd4f84 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -66,6 +66,7 @@
V(Map, global_context_map, GlobalContextMap) \
V(Map, fixed_array_map, FixedArrayMap) \
V(Map, fixed_cow_array_map, FixedCOWArrayMap) \
+ V(Map, fixed_double_array_map, FixedDoubleArrayMap) \
V(Object, no_interceptor_result_sentinel, NoInterceptorResultSentinel) \
V(Map, meta_map, MetaMap) \
V(Map, hash_table_map, HashTableMap) \
@@ -78,6 +79,7 @@
V(Object, termination_exception, TerminationException) \
V(FixedArray, empty_fixed_array, EmptyFixedArray) \
V(ByteArray, empty_byte_array, EmptyByteArray) \
+ V(FixedDoubleArray, empty_fixed_double_array, EmptyFixedDoubleArray) \
V(String, empty_string, EmptyString) \
V(DescriptorArray, empty_descriptor_array, EmptyDescriptorArray) \
V(Map, string_map, StringMap) \
@@ -105,8 +107,10 @@
V(Map, external_unsigned_int_array_map, ExternalUnsignedIntArrayMap) \
V(Map, external_float_array_map, ExternalFloatArrayMap) \
V(Map, external_double_array_map, ExternalDoubleArrayMap) \
- V(Map, context_map, ContextMap) \
+ V(Map, non_strict_arguments_elements_map, NonStrictArgumentsElementsMap) \
+ V(Map, function_context_map, FunctionContextMap) \
V(Map, catch_context_map, CatchContextMap) \
+ V(Map, with_context_map, WithContextMap) \
V(Map, code_map, CodeMap) \
V(Map, oddball_map, OddballMap) \
V(Map, global_property_cell_map, GlobalPropertyCellMap) \
@@ -120,6 +124,7 @@
V(Foreign, prototype_accessors, PrototypeAccessors) \
V(NumberDictionary, code_stubs, CodeStubs) \
V(NumberDictionary, non_monomorphic_cache, NonMonomorphicCache) \
+ V(PolymorphicCodeCache, polymorphic_code_cache, PolymorphicCodeCache) \
V(Code, js_entry_code, JsEntryCode) \
V(Code, js_construct_entry_code, JsConstructEntryCode) \
V(FixedArray, natives_source_cache, NativesSourceCache) \
@@ -139,7 +144,6 @@
V(StringImpl_symbol, "StringImpl") \
V(arguments_symbol, "arguments") \
V(Arguments_symbol, "Arguments") \
- V(arguments_shadow_symbol, ".arguments") \
V(call_symbol, "call") \
V(apply_symbol, "apply") \
V(caller_symbol, "caller") \
@@ -155,6 +159,7 @@
V(function_symbol, "function") \
V(length_symbol, "length") \
V(name_symbol, "name") \
+ V(native_symbol, "native") \
V(number_symbol, "number") \
V(Number_symbol, "Number") \
V(nan_symbol, "NaN") \
@@ -178,14 +183,14 @@
V(value_of_symbol, "valueOf") \
V(InitializeVarGlobal_symbol, "InitializeVarGlobal") \
V(InitializeConstGlobal_symbol, "InitializeConstGlobal") \
- V(KeyedLoadSpecializedMonomorphic_symbol, \
- "KeyedLoadSpecializedMonomorphic") \
- V(KeyedLoadSpecializedPolymorphic_symbol, \
- "KeyedLoadSpecializedPolymorphic") \
- V(KeyedStoreSpecializedMonomorphic_symbol, \
- "KeyedStoreSpecializedMonomorphic") \
- V(KeyedStoreSpecializedPolymorphic_symbol, \
- "KeyedStoreSpecializedPolymorphic") \
+ V(KeyedLoadElementMonomorphic_symbol, \
+ "KeyedLoadElementMonomorphic") \
+ V(KeyedLoadElementPolymorphic_symbol, \
+ "KeyedLoadElementPolymorphic") \
+ V(KeyedStoreElementMonomorphic_symbol, \
+ "KeyedStoreElementMonomorphic") \
+ V(KeyedStoreElementPolymorphic_symbol, \
+ "KeyedStoreElementPolymorphic") \
V(stack_overflow_symbol, "kStackOverflowBoilerplate") \
V(illegal_access_symbol, "illegal access") \
V(out_of_memory_symbol, "out-of-memory") \
@@ -213,7 +218,9 @@
V(global_eval_symbol, "GlobalEval") \
V(identity_hash_symbol, "v8::IdentityHash") \
V(closure_symbol, "(closure)") \
- V(use_strict, "use strict")
+ V(use_strict, "use strict") \
+ V(dot_symbol, ".") \
+ V(anonymous_function_symbol, "(anonymous function)")
// Forward declarations.
class GCTracer;
@@ -402,12 +409,6 @@
// Uncommit unused semi space.
bool UncommitFromSpace() { return new_space_.UncommitFromSpace(); }
-#ifdef ENABLE_HEAP_PROTECTION
- // Protect/unprotect the heap by marking all spaces read-only/writable.
- void Protect();
- void Unprotect();
-#endif
-
// Allocates and initializes a new JavaScript object based on a
// constructor.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
@@ -440,6 +441,11 @@
MUST_USE_RESULT MaybeObject* AllocateJSProxy(Object* handler,
Object* prototype);
+ // Reinitialize a JSProxy into an (empty) JSObject. The receiver
+ // must have the same size as an empty object. The object is reinitialized
+ // and behaves as an object that has been freshly allocated.
+ MUST_USE_RESULT MaybeObject* ReinitializeJSProxyAsJSObject(JSProxy* object);
+
// Reinitialize an JSGlobalProxy based on a constructor. The object
// must have the same size as objects allocated using the
// constructor. The object is reinitialized and behaves as an
@@ -477,6 +483,9 @@
// Allocates an empty code cache.
MUST_USE_RESULT MaybeObject* AllocateCodeCache();
+ // Allocates an empty PolymorphicCodeCache.
+ MUST_USE_RESULT MaybeObject* AllocatePolymorphicCodeCache();
+
// Clear the Instanceof cache (used when a prototype changes).
inline void ClearInstanceofCache();
@@ -616,6 +625,17 @@
int length,
PretenureFlag pretenure = NOT_TENURED);
+ MUST_USE_RESULT MaybeObject* AllocateRawFixedDoubleArray(
+ int length,
+ PretenureFlag pretenure);
+
+ // Allocates a fixed double array with uninitialized values. Returns
+ // Failure::RetryAfterGC(requested_bytes, space) if the allocation failed.
+ // Please note this does not perform a garbage collection.
+ MUST_USE_RESULT MaybeObject* AllocateUninitializedFixedDoubleArray(
+ int length,
+ PretenureFlag pretenure = NOT_TENURED);
+
// AllocateHashTable is identical to AllocateFixedArray except
// that the resulting object has hash_table_map as map.
MUST_USE_RESULT MaybeObject* AllocateHashTable(
@@ -626,12 +646,17 @@
// Allocate a function context.
MUST_USE_RESULT MaybeObject* AllocateFunctionContext(int length,
- JSFunction* closure);
+ JSFunction* function);
+ // Allocate a catch context.
+ MUST_USE_RESULT MaybeObject* AllocateCatchContext(JSFunction* function,
+ Context* previous,
+ String* name,
+ Object* thrown_object);
// Allocate a 'with' context.
- MUST_USE_RESULT MaybeObject* AllocateWithContext(Context* previous,
- JSObject* extension,
- bool is_catch_context);
+ MUST_USE_RESULT MaybeObject* AllocateWithContext(JSFunction* function,
+ Context* previous,
+ JSObject* extension);
// Allocates a new utility object in the old generation.
MUST_USE_RESULT MaybeObject* AllocateStruct(InstanceType type);
@@ -1026,10 +1051,8 @@
void ZapFromSpace();
#endif
-#if defined(ENABLE_LOGGING_AND_PROFILING)
// Print short heap statistics.
void PrintShortHeapStatistics();
-#endif
// Makes a new symbol object
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
@@ -1054,6 +1077,8 @@
enum HeapState { NOT_IN_GC, SCAVENGE, MARK_COMPACT };
inline HeapState gc_state() { return gc_state_; }
+ inline bool IsInGCPostProcessing() { return gc_post_processing_depth_ > 0; }
+
#ifdef DEBUG
bool IsAllocationAllowed() { return allocation_allowed_; }
inline bool allow_allocation(bool enable);
@@ -1285,6 +1310,7 @@
CellSpace* cell_space_;
LargeObjectSpace* lo_space_;
HeapState gc_state_;
+ int gc_post_processing_depth_;
// Returns the size of object residing in non new spaces.
intptr_t PromotedSpaceSize();
@@ -1461,6 +1487,9 @@
// Allocate empty fixed array.
MUST_USE_RESULT MaybeObject* AllocateEmptyFixedArray();
+ // Allocate empty fixed double array.
+ MUST_USE_RESULT MaybeObject* AllocateEmptyFixedDoubleArray();
+
void SwitchScavengingVisitorsTableIfProfilingWasEnabled();
// Performs a minor collection in new generation.
@@ -1482,11 +1511,9 @@
// around a GC).
inline void CompletelyClearInstanceofCache();
-#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
// Record statistics before and after garbage collection.
void ReportStatisticsBeforeGC();
void ReportStatisticsAfterGC();
-#endif
// Slow part of scavenge object.
static void ScavengeObjectSlow(HeapObject** p, HeapObject* object);
@@ -1638,7 +1665,7 @@
int* weak_global_handle_count; // 15
int* pending_global_handle_count; // 16
int* near_death_global_handle_count; // 17
- int* destroyed_global_handle_count; // 18
+ int* free_global_handle_count; // 18
intptr_t* memory_allocator_size; // 19
intptr_t* memory_allocator_capacity; // 20
int* objects_per_type; // 21
@@ -1906,6 +1933,7 @@
void Clear();
static const int kAbsent = -2;
+
private:
DescriptorLookupCache() {
for (int i = 0; i < kLength; ++i) {
diff --git a/src/hydrogen-instructions.cc b/src/hydrogen-instructions.cc
index 682bdb2..d282f37 100644
--- a/src/hydrogen-instructions.cc
+++ b/src/hydrogen-instructions.cc
@@ -669,7 +669,7 @@
}
-void HClassOfTest::PrintDataTo(StringStream* stream) {
+void HClassOfTestAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("class_of_test(");
value()->PrintNameTo(stream);
stream->Add(", \"%o\")", *class_name());
@@ -686,15 +686,13 @@
void HControlInstruction::PrintDataTo(StringStream* stream) {
- if (FirstSuccessor() != NULL) {
- int first_id = FirstSuccessor()->block_id();
- if (SecondSuccessor() == NULL) {
- stream->Add(" B%d", first_id);
- } else {
- int second_id = SecondSuccessor()->block_id();
- stream->Add(" goto (B%d, B%d)", first_id, second_id);
- }
+ stream->Add(" goto (");
+ bool first_block = true;
+ for (HSuccessorIterator it(this); !it.Done(); it.Advance()) {
+ stream->Add(first_block ? "B%d" : ", B%d", it.Current()->block_id());
+ first_block = false;
}
+ stream->Add(")");
}
@@ -704,6 +702,11 @@
}
+void HReturn::PrintDataTo(StringStream* stream) {
+ value()->PrintNameTo(stream);
+}
+
+
void HCompareMap::PrintDataTo(StringStream* stream) {
value()->PrintNameTo(stream);
stream->Add(" (%p)", *map());
@@ -744,10 +747,10 @@
}
-void HHasInstanceType::PrintDataTo(StringStream* stream) {
+void HHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
value()->PrintNameTo(stream);
switch (from_) {
- case FIRST_JS_OBJECT_TYPE:
+ case FIRST_JS_RECEIVER_TYPE:
if (to_ == LAST_TYPE) stream->Add(" spec_object");
break;
case JS_REGEXP_TYPE:
@@ -765,7 +768,7 @@
}
-void HTypeofIs::PrintDataTo(StringStream* stream) {
+void HTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
value()->PrintNameTo(stream);
stream->Add(" == ");
stream->Add(type_literal_->ToAsciiVector());
@@ -781,14 +784,28 @@
}
+HValue* HCheckInstanceType::Canonicalize() {
+ if (check_ == IS_STRING &&
+ !value()->type().IsUninitialized() &&
+ value()->type().IsString()) {
+ return NULL;
+ }
+ if (check_ == IS_SYMBOL &&
+ value()->IsConstant() &&
+ HConstant::cast(value())->handle()->IsSymbol()) {
+ return NULL;
+ }
+ return this;
+}
+
+
void HCheckInstanceType::GetCheckInterval(InstanceType* first,
InstanceType* last) {
ASSERT(is_interval_check());
switch (check_) {
- case IS_JS_OBJECT_OR_JS_FUNCTION:
- STATIC_ASSERT((LAST_JS_OBJECT_TYPE + 1) == JS_FUNCTION_TYPE);
- *first = FIRST_JS_OBJECT_TYPE;
- *last = JS_FUNCTION_TYPE;
+ case IS_SPEC_OBJECT:
+ *first = FIRST_SPEC_OBJECT_TYPE;
+ *last = LAST_SPEC_OBJECT_TYPE;
return;
case IS_JS_ARRAY:
*first = *last = JS_ARRAY_TYPE;
@@ -1084,6 +1101,16 @@
}
+void HDeoptimize::PrintDataTo(StringStream* stream) {
+ if (OperandCount() == 0) return;
+ OperandAt(0)->PrintNameTo(stream);
+ for (int i = 1; i < OperandCount(); ++i) {
+ stream->Add(" ");
+ OperandAt(i)->PrintNameTo(stream);
+ }
+}
+
+
void HEnterInlined::PrintDataTo(StringStream* stream) {
SmartPointer<char> name = function()->debug_name()->ToCString();
stream->Add("%s, id=%d", *name, function()->id());
@@ -1219,25 +1246,28 @@
-void HCompare::PrintDataTo(StringStream* stream) {
+void HCompareGeneric::PrintDataTo(StringStream* stream) {
stream->Add(Token::Name(token()));
stream->Add(" ");
HBinaryOperation::PrintDataTo(stream);
}
-void HCompare::SetInputRepresentation(Representation r) {
+void HCompareIDAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add(Token::Name(token()));
+ stream->Add(" ");
+ left()->PrintNameTo(stream);
+ stream->Add(" ");
+ right()->PrintNameTo(stream);
+}
+
+
+void HCompareIDAndBranch::SetInputRepresentation(Representation r) {
input_representation_ = r;
- if (r.IsTagged()) {
- SetAllSideEffects();
- ClearFlag(kUseGVN);
- } else if (r.IsDouble()) {
+ if (r.IsDouble()) {
SetFlag(kDeoptimizeOnUndefined);
- ClearAllSideEffects();
- SetFlag(kUseGVN);
} else {
- ClearAllSideEffects();
- SetFlag(kUseGVN);
+ ASSERT(r.IsInteger32());
}
}
@@ -1253,13 +1283,15 @@
}
-HLoadNamedFieldPolymorphic::HLoadNamedFieldPolymorphic(HValue* object,
+HLoadNamedFieldPolymorphic::HLoadNamedFieldPolymorphic(HValue* context,
+ HValue* object,
ZoneMapList* types,
Handle<String> name)
- : HUnaryOperation(object),
- types_(Min(types->length(), kMaxLoadPolymorphism)),
+ : types_(Min(types->length(), kMaxLoadPolymorphism)),
name_(name),
need_generic_(false) {
+ SetOperandAt(0, context);
+ SetOperandAt(1, object);
set_representation(Representation::Tagged());
SetFlag(kDependsOnMaps);
for (int i = 0;
@@ -1334,6 +1366,19 @@
}
+void HLoadKeyedFastDoubleElement::PrintDataTo(StringStream* stream) {
+ elements()->PrintNameTo(stream);
+ stream->Add("[");
+ key()->PrintNameTo(stream);
+ stream->Add("]");
+}
+
+
+bool HLoadKeyedFastDoubleElement::RequiresHoleCheck() const {
+ return true;
+}
+
+
void HLoadKeyedGeneric::PrintDataTo(StringStream* stream) {
object()->PrintNameTo(stream);
stream->Add("[");
@@ -1346,34 +1391,40 @@
StringStream* stream) {
external_pointer()->PrintNameTo(stream);
stream->Add(".");
- switch (array_type()) {
- case kExternalByteArray:
+ switch (elements_kind()) {
+ case JSObject::EXTERNAL_BYTE_ELEMENTS:
stream->Add("byte");
break;
- case kExternalUnsignedByteArray:
+ case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
stream->Add("u_byte");
break;
- case kExternalShortArray:
+ case JSObject::EXTERNAL_SHORT_ELEMENTS:
stream->Add("short");
break;
- case kExternalUnsignedShortArray:
+ case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
stream->Add("u_short");
break;
- case kExternalIntArray:
+ case JSObject::EXTERNAL_INT_ELEMENTS:
stream->Add("int");
break;
- case kExternalUnsignedIntArray:
+ case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS:
stream->Add("u_int");
break;
- case kExternalFloatArray:
+ case JSObject::EXTERNAL_FLOAT_ELEMENTS:
stream->Add("float");
break;
- case kExternalDoubleArray:
+ case JSObject::EXTERNAL_DOUBLE_ELEMENTS:
stream->Add("double");
break;
- case kExternalPixelArray:
+ case JSObject::EXTERNAL_PIXEL_ELEMENTS:
stream->Add("pixel");
break;
+ case JSObject::FAST_ELEMENTS:
+ case JSObject::FAST_DOUBLE_ELEMENTS:
+ case JSObject::DICTIONARY_ELEMENTS:
+ case JSObject::NON_STRICT_ARGUMENTS_ELEMENTS:
+ UNREACHABLE();
+ break;
}
stream->Add("[");
key()->PrintNameTo(stream);
@@ -1413,6 +1464,15 @@
}
+void HStoreKeyedFastDoubleElement::PrintDataTo(StringStream* stream) {
+ elements()->PrintNameTo(stream);
+ stream->Add("[");
+ key()->PrintNameTo(stream);
+ stream->Add("] = ");
+ value()->PrintNameTo(stream);
+}
+
+
void HStoreKeyedGeneric::PrintDataTo(StringStream* stream) {
object()->PrintNameTo(stream);
stream->Add("[");
@@ -1426,34 +1486,40 @@
StringStream* stream) {
external_pointer()->PrintNameTo(stream);
stream->Add(".");
- switch (array_type()) {
- case kExternalByteArray:
+ switch (elements_kind()) {
+ case JSObject::EXTERNAL_BYTE_ELEMENTS:
stream->Add("byte");
break;
- case kExternalUnsignedByteArray:
+ case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
stream->Add("u_byte");
break;
- case kExternalShortArray:
+ case JSObject::EXTERNAL_SHORT_ELEMENTS:
stream->Add("short");
break;
- case kExternalUnsignedShortArray:
+ case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
stream->Add("u_short");
break;
- case kExternalIntArray:
+ case JSObject::EXTERNAL_INT_ELEMENTS:
stream->Add("int");
break;
- case kExternalUnsignedIntArray:
+ case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS:
stream->Add("u_int");
break;
- case kExternalFloatArray:
+ case JSObject::EXTERNAL_FLOAT_ELEMENTS:
stream->Add("float");
break;
- case kExternalDoubleArray:
+ case JSObject::EXTERNAL_DOUBLE_ELEMENTS:
stream->Add("double");
break;
- case kExternalPixelArray:
+ case JSObject::EXTERNAL_PIXEL_ELEMENTS:
stream->Add("pixel");
break;
+ case JSObject::FAST_ELEMENTS:
+ case JSObject::FAST_DOUBLE_ELEMENTS:
+ case JSObject::DICTIONARY_ELEMENTS:
+ case JSObject::NON_STRICT_ARGUMENTS_ELEMENTS:
+ UNREACHABLE();
+ break;
}
stream->Add("[");
key()->PrintNameTo(stream);
@@ -1542,17 +1608,22 @@
}
-HType HCompare::CalculateInferredType() {
+HType HCompareGeneric::CalculateInferredType() {
return HType::Boolean();
}
-HType HCompareJSObjectEq::CalculateInferredType() {
+HType HInstanceOf::CalculateInferredType() {
return HType::Boolean();
}
-HType HUnaryPredicate::CalculateInferredType() {
+HType HDeleteProperty::CalculateInferredType() {
+ return HType::Boolean();
+}
+
+
+HType HInstanceOfKnownGlobal::CalculateInferredType() {
return HType::Boolean();
}
diff --git a/src/hydrogen-instructions.h b/src/hydrogen-instructions.h
index 4d32edd..15186ff 100644
--- a/src/hydrogen-instructions.h
+++ b/src/hydrogen-instructions.h
@@ -35,6 +35,8 @@
#include "data-flow.h"
#include "small-pointer-list.h"
#include "string-stream.h"
+#include "v8conversions.h"
+#include "v8utils.h"
#include "zone.h"
namespace v8 {
@@ -71,6 +73,7 @@
V(BitXor) \
V(BlockEntry) \
V(BoundsCheck) \
+ V(Branch) \
V(CallConstantFunction) \
V(CallFunction) \
V(CallGlobal) \
@@ -88,16 +91,18 @@
V(CheckPrototypeMaps) \
V(CheckSmi) \
V(ClampToUint8) \
- V(ClassOfTest) \
- V(Compare) \
- V(CompareJSObjectEq) \
+ V(ClassOfTestAndBranch) \
+ V(CompareIDAndBranch) \
+ V(CompareGeneric) \
+ V(CompareObjectEqAndBranch) \
V(CompareMap) \
- V(CompareSymbolEq) \
+ V(CompareConstantEqAndBranch) \
V(Constant) \
V(Context) \
V(DeleteProperty) \
V(Deoptimize) \
V(Div) \
+ V(ElementsKind) \
V(EnterInlined) \
V(ExternalArrayLength) \
V(FixedArrayLength) \
@@ -107,17 +112,17 @@
V(GlobalObject) \
V(GlobalReceiver) \
V(Goto) \
- V(HasInstanceType) \
- V(HasCachedArrayIndex) \
+ V(HasCachedArrayIndexAndBranch) \
+ V(HasInstanceTypeAndBranch) \
V(In) \
V(InstanceOf) \
V(InstanceOfKnownGlobal) \
V(InvokeFunction) \
- V(IsConstructCall) \
- V(IsNull) \
- V(IsObject) \
- V(IsSmi) \
- V(IsUndetectable) \
+ V(IsConstructCallAndBranch) \
+ V(IsNullAndBranch) \
+ V(IsObjectAndBranch) \
+ V(IsSmiAndBranch) \
+ V(IsUndetectableAndBranch) \
V(JSArrayLength) \
V(LeaveInlined) \
V(LoadContextSlot) \
@@ -126,6 +131,7 @@
V(LoadFunctionPrototype) \
V(LoadGlobalCell) \
V(LoadGlobalGeneric) \
+ V(LoadKeyedFastDoubleElement) \
V(LoadKeyedFastElement) \
V(LoadKeyedGeneric) \
V(LoadKeyedSpecializedArrayElement) \
@@ -146,13 +152,15 @@
V(Shl) \
V(Shr) \
V(Simulate) \
+ V(SoftDeoptimize) \
V(StackCheck) \
V(StoreContextSlot) \
V(StoreGlobalCell) \
V(StoreGlobalGeneric) \
+ V(StoreKeyedFastDoubleElement) \
V(StoreKeyedFastElement) \
- V(StoreKeyedSpecializedArrayElement) \
V(StoreKeyedGeneric) \
+ V(StoreKeyedSpecializedArrayElement) \
V(StoreNamedField) \
V(StoreNamedGeneric) \
V(StringAdd) \
@@ -160,13 +168,15 @@
V(StringCharFromCode) \
V(StringLength) \
V(Sub) \
- V(Test) \
+ V(ThisFunction) \
V(Throw) \
V(ToFastProperties) \
+ V(ToInt32) \
V(Typeof) \
- V(TypeofIs) \
+ V(TypeofIsAndBranch) \
V(UnaryMathOperation) \
V(UnknownOSRValue) \
+ V(UseConst) \
V(ValueOf)
#define GVN_FLAG_LIST(V) \
@@ -397,7 +407,7 @@
kBoolean = 0x85, // 0000 0000 1000 0101
kNonPrimitive = 0x101, // 0000 0001 0000 0001
kJSObject = 0x301, // 0000 0011 0000 0001
- kJSArray = 0x701, // 0000 0111 1000 0001
+ kJSArray = 0x701, // 0000 0111 0000 0001
kUninitialized = 0x1fff // 0001 1111 1111 1111
};
@@ -479,6 +489,10 @@
GVN_FLAG_LIST(DECLARE_DO)
#undef DECLARE_DO
kFlexibleRepresentation,
+ // Participate in Global Value Numbering, i.e. elimination of
+ // unnecessary recomputations. If an instruction sets this flag, it must
+ // implement DataEquals(), which will be used to determine if other
+ // occurrences of the instruction are indeed the same.
kUseGVN,
kCanOverflow,
kBailoutOnMinusZero,
@@ -576,9 +590,9 @@
// it would otherwise output what should be a minus zero as an int32 zero.
// If the operation also exists in a form that takes int32 and outputs int32
// then the operation should return its input value so that we can propagate
- // back. There are two operations that need to propagate back to more than
- // one input. They are phi and binary add. They always return NULL and
- // expect the caller to take care of things.
+ // back. There are three operations that need to propagate back to more than
+ // one input. They are phi and binary div and mul. They always return NULL
+ // and expect the caller to take care of things.
virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited) {
visited->Add(id());
return NULL;
@@ -723,10 +737,6 @@
virtual void Verify();
#endif
- // Returns whether this is some kind of deoptimizing check
- // instruction.
- virtual bool IsCheckInstruction() const { return false; }
-
virtual bool IsCall() { return false; }
DECLARE_ABSTRACT_INSTRUCTION(Instruction)
@@ -757,53 +767,6 @@
};
-class HControlInstruction: public HInstruction {
- public:
- HControlInstruction(HBasicBlock* first, HBasicBlock* second)
- : first_successor_(first), second_successor_(second) {
- }
-
- HBasicBlock* FirstSuccessor() const { return first_successor_; }
- HBasicBlock* SecondSuccessor() const { return second_successor_; }
-
- virtual void PrintDataTo(StringStream* stream);
-
- DECLARE_ABSTRACT_INSTRUCTION(ControlInstruction)
-
- private:
- HBasicBlock* first_successor_;
- HBasicBlock* second_successor_;
-};
-
-
-template<int NumElements>
-class HOperandContainer {
- public:
- HOperandContainer() : elems_() { }
-
- int length() { return NumElements; }
- HValue*& operator[](int i) {
- ASSERT(i < length());
- return elems_[i];
- }
-
- private:
- HValue* elems_[NumElements];
-};
-
-
-template<>
-class HOperandContainer<0> {
- public:
- int length() { return 0; }
- HValue*& operator[](int i) {
- UNREACHABLE();
- static HValue* t = 0;
- return t;
- }
-};
-
-
template<int V>
class HTemplateInstruction : public HInstruction {
public:
@@ -814,23 +777,61 @@
void InternalSetOperandAt(int i, HValue* value) { inputs_[i] = value; }
private:
- HOperandContainer<V> inputs_;
+ EmbeddedContainer<HValue*, V> inputs_;
};
-template<int V>
-class HTemplateControlInstruction : public HControlInstruction {
+class HControlInstruction: public HInstruction {
public:
- HTemplateControlInstruction<V>(HBasicBlock* first, HBasicBlock* second)
- : HControlInstruction(first, second) { }
+ virtual HBasicBlock* SuccessorAt(int i) = 0;
+ virtual int SuccessorCount() = 0;
+ virtual void SetSuccessorAt(int i, HBasicBlock* block) = 0;
+
+ virtual void PrintDataTo(StringStream* stream);
+
+ HBasicBlock* FirstSuccessor() {
+ return SuccessorCount() > 0 ? SuccessorAt(0) : NULL;
+ }
+ HBasicBlock* SecondSuccessor() {
+ return SuccessorCount() > 1 ? SuccessorAt(1) : NULL;
+ }
+
+ DECLARE_ABSTRACT_INSTRUCTION(ControlInstruction)
+};
+
+
+class HSuccessorIterator BASE_EMBEDDED {
+ public:
+ explicit HSuccessorIterator(HControlInstruction* instr)
+ : instr_(instr), current_(0) { }
+
+ bool Done() { return current_ >= instr_->SuccessorCount(); }
+ HBasicBlock* Current() { return instr_->SuccessorAt(current_); }
+ void Advance() { current_++; }
+
+ private:
+ HControlInstruction* instr_;
+ int current_;
+};
+
+
+template<int S, int V>
+class HTemplateControlInstruction: public HControlInstruction {
+ public:
+ int SuccessorCount() { return S; }
+ HBasicBlock* SuccessorAt(int i) { return successors_[i]; }
+ void SetSuccessorAt(int i, HBasicBlock* block) { successors_[i] = block; }
+
int OperandCount() { return V; }
HValue* OperandAt(int i) { return inputs_[i]; }
+
protected:
void InternalSetOperandAt(int i, HValue* value) { inputs_[i] = value; }
private:
- HOperandContainer<V> inputs_;
+ EmbeddedContainer<HBasicBlock*, S> successors_;
+ EmbeddedContainer<HValue*, V> inputs_;
};
@@ -844,11 +845,22 @@
};
+// We insert soft-deoptimize when we hit code with unknown typefeedback,
+// so that we get a chance of re-optimizing with useful typefeedback.
+// HSoftDeoptimize does not end a basic block as opposed to HDeoptimize.
+class HSoftDeoptimize: public HTemplateInstruction<0> {
+ public:
+ virtual Representation RequiredInputRepresentation(int index) const {
+ return Representation::None();
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(SoftDeoptimize)
+};
+
+
class HDeoptimize: public HControlInstruction {
public:
- explicit HDeoptimize(int environment_length)
- : HControlInstruction(NULL, NULL),
- values_(environment_length) { }
+ explicit HDeoptimize(int environment_length) : values_(environment_length) { }
virtual Representation RequiredInputRepresentation(int index) const {
return Representation::None();
@@ -856,6 +868,16 @@
virtual int OperandCount() { return values_.length(); }
virtual HValue* OperandAt(int index) { return values_[index]; }
+ virtual void PrintDataTo(StringStream* stream);
+
+ virtual int SuccessorCount() { return 0; }
+ virtual HBasicBlock* SuccessorAt(int i) {
+ UNREACHABLE();
+ return NULL;
+ }
+ virtual void SetSuccessorAt(int i, HBasicBlock* block) {
+ UNREACHABLE();
+ }
void AddEnvironmentValue(HValue* value) {
values_.Add(NULL);
@@ -879,35 +901,28 @@
};
-class HGoto: public HTemplateControlInstruction<0> {
+class HGoto: public HTemplateControlInstruction<1, 0> {
public:
- explicit HGoto(HBasicBlock* target)
- : HTemplateControlInstruction<0>(target, NULL),
- include_stack_check_(false) { }
-
- void set_include_stack_check(bool include_stack_check) {
- include_stack_check_ = include_stack_check;
- }
- bool include_stack_check() const { return include_stack_check_; }
+ explicit HGoto(HBasicBlock* target) {
+ SetSuccessorAt(0, target);
+ }
virtual Representation RequiredInputRepresentation(int index) const {
return Representation::None();
}
DECLARE_CONCRETE_INSTRUCTION(Goto)
-
- private:
- bool include_stack_check_;
};
-class HUnaryControlInstruction: public HTemplateControlInstruction<1> {
+class HUnaryControlInstruction: public HTemplateControlInstruction<2, 1> {
public:
- explicit HUnaryControlInstruction(HValue* value,
- HBasicBlock* true_target,
- HBasicBlock* false_target)
- : HTemplateControlInstruction<1>(true_target, false_target) {
+ HUnaryControlInstruction(HValue* value,
+ HBasicBlock* true_target,
+ HBasicBlock* false_target) {
SetOperandAt(0, value);
+ SetSuccessorAt(0, true_target);
+ SetSuccessorAt(1, false_target);
}
virtual void PrintDataTo(StringStream* stream);
@@ -916,18 +931,21 @@
};
-class HTest: public HUnaryControlInstruction {
+class HBranch: public HUnaryControlInstruction {
public:
- HTest(HValue* value, HBasicBlock* true_target, HBasicBlock* false_target)
+ HBranch(HValue* value, HBasicBlock* true_target, HBasicBlock* false_target)
: HUnaryControlInstruction(value, true_target, false_target) {
ASSERT(true_target != NULL && false_target != NULL);
}
+ explicit HBranch(HValue* value)
+ : HUnaryControlInstruction(value, NULL, NULL) { }
+
virtual Representation RequiredInputRepresentation(int index) const {
return Representation::None();
}
- DECLARE_CONCRETE_INSTRUCTION(Test)
+ DECLARE_CONCRETE_INSTRUCTION(Branch)
};
@@ -959,24 +977,26 @@
};
-class HReturn: public HUnaryControlInstruction {
+class HReturn: public HTemplateControlInstruction<0, 1> {
public:
- explicit HReturn(HValue* value)
- : HUnaryControlInstruction(value, NULL, NULL) {
+ explicit HReturn(HValue* value) {
+ SetOperandAt(0, value);
}
virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
+ virtual void PrintDataTo(StringStream* stream);
+
+ HValue* value() { return OperandAt(0); }
+
DECLARE_CONCRETE_INSTRUCTION(Return)
};
-class HAbnormalExit: public HTemplateControlInstruction<0> {
+class HAbnormalExit: public HTemplateControlInstruction<0, 0> {
public:
- HAbnormalExit() : HTemplateControlInstruction<0>(NULL, NULL) { }
-
virtual Representation RequiredInputRepresentation(int index) const {
return Representation::None();
}
@@ -991,14 +1011,24 @@
SetOperandAt(0, value);
}
+ static HUnaryOperation* cast(HValue* value) {
+ return reinterpret_cast<HUnaryOperation*>(value);
+ }
+
+ virtual bool CanTruncateToInt32() const {
+ return CheckFlag(kTruncatingToInt32);
+ }
+
HValue* value() { return OperandAt(0); }
virtual void PrintDataTo(StringStream* stream);
};
-class HThrow: public HUnaryOperation {
+class HThrow: public HTemplateInstruction<2> {
public:
- explicit HThrow(HValue* value) : HUnaryOperation(value) {
+ HThrow(HValue* context, HValue* value) {
+ SetOperandAt(0, context);
+ SetOperandAt(1, value);
SetAllSideEffects();
}
@@ -1006,10 +1036,25 @@
return Representation::Tagged();
}
+ HValue* context() { return OperandAt(0); }
+ HValue* value() { return OperandAt(1); }
+
DECLARE_CONCRETE_INSTRUCTION(Throw)
};
+class HUseConst: public HUnaryOperation {
+ public:
+ explicit HUseConst(HValue* old_value) : HUnaryOperation(old_value) { }
+
+ virtual Representation RequiredInputRepresentation(int index) const {
+ return Representation::None();
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(UseConst)
+};
+
+
class HForceRepresentation: public HTemplateInstruction<1> {
public:
HForceRepresentation(HValue* value, Representation required_representation) {
@@ -1059,8 +1104,6 @@
return from_;
}
- bool CanTruncateToInt32() const { return CheckFlag(kTruncatingToInt32); }
-
virtual void PrintDataTo(StringStream* stream);
DECLARE_CONCRETE_INSTRUCTION(Change)
@@ -1069,8 +1112,7 @@
virtual bool DataEquals(HValue* other) {
if (!other->IsChange()) return false;
HChange* change = HChange::cast(other);
- return value() == change->value()
- && to().Equals(change->to())
+ return to().Equals(change->to())
&& deoptimize_on_undefined() == change->deoptimize_on_undefined();
}
@@ -1083,40 +1125,50 @@
class HClampToUint8: public HUnaryOperation {
public:
explicit HClampToUint8(HValue* value)
- : HUnaryOperation(value),
- input_rep_(Representation::None()) {
- SetFlag(kFlexibleRepresentation);
- set_representation(Representation::Tagged());
+ : HUnaryOperation(value) {
+ set_representation(Representation::Integer32());
SetFlag(kUseGVN);
}
virtual Representation RequiredInputRepresentation(int index) const {
- return input_rep_;
- }
-
- virtual Representation InferredRepresentation() {
- // TODO(danno): Inference on input types should happen separately from
- // return representation.
- Representation new_rep = value()->representation();
- if (input_rep_.IsNone()) {
- if (!new_rep.IsNone()) {
- input_rep_ = new_rep;
- return Representation::Integer32();
- } else {
- return Representation::None();
- }
- } else {
- return Representation::Integer32();
- }
+ return Representation::None();
}
DECLARE_CONCRETE_INSTRUCTION(ClampToUint8)
protected:
virtual bool DataEquals(HValue* other) { return true; }
+};
- private:
- Representation input_rep_;
+
+class HToInt32: public HUnaryOperation {
+ public:
+ explicit HToInt32(HValue* value)
+ : HUnaryOperation(value) {
+ set_representation(Representation::Integer32());
+ SetFlag(kUseGVN);
+ }
+
+ virtual Representation RequiredInputRepresentation(int index) const {
+ return Representation::None();
+ }
+
+ virtual bool CanTruncateToInt32() const {
+ return true;
+ }
+
+ virtual HValue* Canonicalize() {
+ if (value()->representation().IsInteger32()) {
+ return value();
+ } else {
+ return this;
+ }
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(ToInt32)
+
+ protected:
+ virtual bool DataEquals(HValue* other) { return true; }
};
@@ -1188,15 +1240,38 @@
};
-class HStackCheck: public HTemplateInstruction<0> {
+class HStackCheck: public HTemplateInstruction<1> {
public:
- HStackCheck() { }
+ enum Type {
+ kFunctionEntry,
+ kBackwardsBranch
+ };
- virtual Representation RequiredInputRepresentation(int index) const {
- return Representation::None();
+ HStackCheck(HValue* context, Type type) : type_(type) {
+ SetOperandAt(0, context);
}
+ HValue* context() { return OperandAt(0); }
+
+ virtual Representation RequiredInputRepresentation(int index) const {
+ return Representation::Tagged();
+ }
+
+ void Eliminate() {
+ // The stack check eliminator might try to eliminate the same stack
+ // check instruction multiple times.
+ if (IsLinked()) {
+ DeleteFromGraph();
+ }
+ }
+
+ bool is_function_entry() { return type_ == kFunctionEntry; }
+ bool is_backwards_branch() { return type_ == kBackwardsBranch; }
+
DECLARE_CONCRETE_INSTRUCTION(StackCheck)
+
+ private:
+ Type type_;
};
@@ -1257,6 +1332,24 @@
};
+class HThisFunction: public HTemplateInstruction<0> {
+ public:
+ HThisFunction() {
+ set_representation(Representation::Tagged());
+ SetFlag(kUseGVN);
+ }
+
+ virtual Representation RequiredInputRepresentation(int index) const {
+ return Representation::None();
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(ThisFunction)
+
+ protected:
+ virtual bool DataEquals(HValue* other) { return true; }
+};
+
+
class HContext: public HTemplateInstruction<0> {
public:
HContext() {
@@ -1268,7 +1361,7 @@
return Representation::None();
}
- DECLARE_CONCRETE_INSTRUCTION(Context);
+ DECLARE_CONCRETE_INSTRUCTION(Context)
protected:
virtual bool DataEquals(HValue* other) { return true; }
@@ -1542,19 +1635,24 @@
};
-class HCallRuntime: public HCall<0> {
+class HCallRuntime: public HCall<1> {
public:
- HCallRuntime(Handle<String> name,
+ HCallRuntime(HValue* context,
+ Handle<String> name,
const Runtime::Function* c_function,
int argument_count)
- : HCall<0>(argument_count), c_function_(c_function), name_(name) { }
+ : HCall<1>(argument_count), c_function_(c_function), name_(name) {
+ SetOperandAt(0, context);
+ }
+
virtual void PrintDataTo(StringStream* stream);
+ HValue* context() { return OperandAt(0); }
const Runtime::Function* function() const { return c_function_; }
Handle<String> name() const { return name_; }
virtual Representation RequiredInputRepresentation(int index) const {
- return Representation::None();
+ return Representation::Tagged();
}
DECLARE_CONCRETE_INSTRUCTION(CallRuntime)
@@ -1628,6 +1726,25 @@
};
+class HElementsKind: public HUnaryOperation {
+ public:
+ explicit HElementsKind(HValue* value) : HUnaryOperation(value) {
+ set_representation(Representation::Integer32());
+ SetFlag(kUseGVN);
+ SetFlag(kDependsOnMaps);
+ }
+
+ virtual Representation RequiredInputRepresentation(int index) const {
+ return Representation::Tagged();
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(ElementsKind)
+
+ protected:
+ virtual bool DataEquals(HValue* other) { return true; }
+};
+
+
class HBitNot: public HUnaryOperation {
public:
explicit HBitNot(HValue* value) : HUnaryOperation(value) {
@@ -1648,10 +1765,12 @@
};
-class HUnaryMathOperation: public HUnaryOperation {
+class HUnaryMathOperation: public HTemplateInstruction<2> {
public:
- HUnaryMathOperation(HValue* value, BuiltinFunctionId op)
- : HUnaryOperation(value), op_(op) {
+ HUnaryMathOperation(HValue* context, HValue* value, BuiltinFunctionId op)
+ : op_(op) {
+ SetOperandAt(0, context);
+ SetOperandAt(1, value);
switch (op) {
case kMathFloor:
case kMathRound:
@@ -1675,6 +1794,9 @@
SetFlag(kUseGVN);
}
+ HValue* context() { return OperandAt(0); }
+ HValue* value() { return OperandAt(1); }
+
virtual void PrintDataTo(StringStream* stream);
virtual HType CalculateInferredType();
@@ -1682,21 +1804,25 @@
virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
virtual Representation RequiredInputRepresentation(int index) const {
- switch (op_) {
- case kMathFloor:
- case kMathRound:
- case kMathCeil:
- case kMathSqrt:
- case kMathPowHalf:
- case kMathLog:
- case kMathSin:
- case kMathCos:
- return Representation::Double();
- case kMathAbs:
- return representation();
- default:
- UNREACHABLE();
- return Representation::None();
+ if (index == 0) {
+ return Representation::Tagged();
+ } else {
+ switch (op_) {
+ case kMathFloor:
+ case kMathRound:
+ case kMathCeil:
+ case kMathSqrt:
+ case kMathPowHalf:
+ case kMathLog:
+ case kMathSin:
+ case kMathCos:
+ return Representation::Double();
+ case kMathAbs:
+ return representation();
+ default:
+ UNREACHABLE();
+ return Representation::None();
+ }
}
}
@@ -1777,8 +1903,6 @@
SetFlag(kDependsOnMaps);
}
- virtual bool IsCheckInstruction() const { return true; }
-
virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
@@ -1812,8 +1936,6 @@
SetFlag(kUseGVN);
}
- virtual bool IsCheckInstruction() const { return true; }
-
virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
@@ -1841,8 +1963,8 @@
class HCheckInstanceType: public HUnaryOperation {
public:
- static HCheckInstanceType* NewIsJSObjectOrJSFunction(HValue* value) {
- return new HCheckInstanceType(value, IS_JS_OBJECT_OR_JS_FUNCTION);
+ static HCheckInstanceType* NewIsSpecObject(HValue* value) {
+ return new HCheckInstanceType(value, IS_SPEC_OBJECT);
}
static HCheckInstanceType* NewIsJSArray(HValue* value) {
return new HCheckInstanceType(value, IS_JS_ARRAY);
@@ -1854,8 +1976,6 @@
return new HCheckInstanceType(value, IS_SYMBOL);
}
- virtual bool IsCheckInstruction() const { return true; }
-
virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
@@ -1864,14 +1984,7 @@
virtual void Verify();
#endif
- virtual HValue* Canonicalize() {
- if (!value()->type().IsUninitialized() &&
- value()->type().IsString() &&
- check_ == IS_STRING) {
- return NULL;
- }
- return this;
- }
+ virtual HValue* Canonicalize();
bool is_interval_check() const { return check_ <= LAST_INTERVAL_CHECK; }
void GetCheckInterval(InstanceType* first, InstanceType* last);
@@ -1890,7 +2003,7 @@
private:
enum Check {
- IS_JS_OBJECT_OR_JS_FUNCTION,
+ IS_SPEC_OBJECT,
IS_JS_ARRAY,
IS_STRING,
IS_SYMBOL,
@@ -1914,8 +2027,6 @@
SetFlag(kUseGVN);
}
- virtual bool IsCheckInstruction() const { return true; }
-
virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
@@ -1953,8 +2064,6 @@
SetFlag(kDependsOnMaps);
}
- virtual bool IsCheckInstruction() const { return true; }
-
#ifdef DEBUG
virtual void Verify();
#endif
@@ -1995,8 +2104,6 @@
SetFlag(kUseGVN);
}
- virtual bool IsCheckInstruction() const { return true; }
-
virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
@@ -2211,16 +2318,18 @@
};
-class HBinaryOperation: public HTemplateInstruction<2> {
+class HBinaryOperation: public HTemplateInstruction<3> {
public:
- HBinaryOperation(HValue* left, HValue* right) {
+ HBinaryOperation(HValue* context, HValue* left, HValue* right) {
ASSERT(left != NULL && right != NULL);
- SetOperandAt(0, left);
- SetOperandAt(1, right);
+ SetOperandAt(0, context);
+ SetOperandAt(1, left);
+ SetOperandAt(2, right);
}
- HValue* left() { return OperandAt(0); }
- HValue* right() { return OperandAt(1); }
+ HValue* context() { return OperandAt(0); }
+ HValue* left() { return OperandAt(1); }
+ HValue* right() { return OperandAt(2); }
// TODO(kasperl): Move these helpers to the IA-32 Lithium
// instruction sequence builder.
@@ -2336,16 +2445,15 @@
};
-class HBoundsCheck: public HBinaryOperation {
+class HBoundsCheck: public HTemplateInstruction<2> {
public:
- HBoundsCheck(HValue* index, HValue* length)
- : HBinaryOperation(index, length) {
+ HBoundsCheck(HValue* index, HValue* length) {
+ SetOperandAt(0, index);
+ SetOperandAt(1, length);
set_representation(Representation::Integer32());
SetFlag(kUseGVN);
}
- virtual bool IsCheckInstruction() const { return true; }
-
virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Integer32();
}
@@ -2354,8 +2462,8 @@
virtual void Verify();
#endif
- HValue* index() { return left(); }
- HValue* length() { return right(); }
+ HValue* index() { return OperandAt(0); }
+ HValue* length() { return OperandAt(1); }
DECLARE_CONCRETE_INSTRUCTION(BoundsCheck)
@@ -2366,15 +2474,17 @@
class HBitwiseBinaryOperation: public HBinaryOperation {
public:
- HBitwiseBinaryOperation(HValue* left, HValue* right)
- : HBinaryOperation(left, right) {
+ HBitwiseBinaryOperation(HValue* context, HValue* left, HValue* right)
+ : HBinaryOperation(context, left, right) {
set_representation(Representation::Tagged());
SetFlag(kFlexibleRepresentation);
SetAllSideEffects();
}
virtual Representation RequiredInputRepresentation(int index) const {
- return representation();
+ return index == 0
+ ? Representation::Tagged()
+ : representation();
}
virtual void RepresentationChanged(Representation to) {
@@ -2394,8 +2504,8 @@
class HArithmeticBinaryOperation: public HBinaryOperation {
public:
- HArithmeticBinaryOperation(HValue* left, HValue* right)
- : HBinaryOperation(left, right) {
+ HArithmeticBinaryOperation(HValue* context, HValue* left, HValue* right)
+ : HBinaryOperation(context, left, right) {
set_representation(Representation::Tagged());
SetFlag(kFlexibleRepresentation);
SetAllSideEffects();
@@ -2410,8 +2520,11 @@
virtual HType CalculateInferredType();
virtual Representation RequiredInputRepresentation(int index) const {
- return representation();
+ return index == 0
+ ? Representation::Tagged()
+ : representation();
}
+
virtual Representation InferredRepresentation() {
if (left()->representation().Equals(right()->representation())) {
return left()->representation();
@@ -2421,208 +2534,186 @@
};
-class HCompare: public HBinaryOperation {
+class HCompareGeneric: public HBinaryOperation {
public:
- HCompare(HValue* left, HValue* right, Token::Value token)
- : HBinaryOperation(left, right), token_(token) {
+ HCompareGeneric(HValue* context,
+ HValue* left,
+ HValue* right,
+ Token::Value token)
+ : HBinaryOperation(context, left, right), token_(token) {
ASSERT(Token::IsCompareOp(token));
set_representation(Representation::Tagged());
SetAllSideEffects();
}
- void SetInputRepresentation(Representation r);
-
- virtual bool EmitAtUses() {
- return !HasSideEffects() && !HasMultipleUses();
- }
-
virtual Representation RequiredInputRepresentation(int index) const {
- return input_representation_;
+ return Representation::Tagged();
}
+
Representation GetInputRepresentation() const {
- return input_representation_;
+ return Representation::Tagged();
}
+
Token::Value token() const { return token_; }
virtual void PrintDataTo(StringStream* stream);
virtual HType CalculateInferredType();
- virtual intptr_t Hashcode() {
- return HValue::Hashcode() * 7 + token_;
+ DECLARE_CONCRETE_INSTRUCTION(CompareGeneric)
+
+ private:
+ Token::Value token_;
+};
+
+
+class HCompareIDAndBranch: public HTemplateControlInstruction<2, 2> {
+ public:
+ HCompareIDAndBranch(HValue* left, HValue* right, Token::Value token)
+ : token_(token) {
+ ASSERT(Token::IsCompareOp(token));
+ SetOperandAt(0, left);
+ SetOperandAt(1, right);
}
- DECLARE_CONCRETE_INSTRUCTION(Compare)
+ HValue* left() { return OperandAt(0); }
+ HValue* right() { return OperandAt(1); }
+ Token::Value token() const { return token_; }
- protected:
- virtual bool DataEquals(HValue* other) {
- HCompare* comp = HCompare::cast(other);
- return token_ == comp->token();
+ void SetInputRepresentation(Representation r);
+ Representation GetInputRepresentation() const {
+ return input_representation_;
}
+ virtual Representation RequiredInputRepresentation(int index) const {
+ return input_representation_;
+ }
+ virtual void PrintDataTo(StringStream* stream);
+
+ DECLARE_CONCRETE_INSTRUCTION(CompareIDAndBranch)
+
private:
Representation input_representation_;
Token::Value token_;
};
-class HCompareJSObjectEq: public HBinaryOperation {
+class HCompareObjectEqAndBranch: public HTemplateControlInstruction<2, 2> {
public:
- HCompareJSObjectEq(HValue* left, HValue* right)
- : HBinaryOperation(left, right) {
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- SetFlag(kDependsOnMaps);
+ HCompareObjectEqAndBranch(HValue* left, HValue* right) {
+ SetOperandAt(0, left);
+ SetOperandAt(1, right);
}
- virtual bool EmitAtUses() {
- return !HasSideEffects() && !HasMultipleUses();
- }
+ HValue* left() { return OperandAt(0); }
+ HValue* right() { return OperandAt(1); }
virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
- virtual HType CalculateInferredType();
- DECLARE_CONCRETE_INSTRUCTION(CompareJSObjectEq)
-
- protected:
- virtual bool DataEquals(HValue* other) { return true; }
+ DECLARE_CONCRETE_INSTRUCTION(CompareObjectEqAndBranch)
};
-class HCompareSymbolEq: public HBinaryOperation {
+class HCompareConstantEqAndBranch: public HUnaryControlInstruction {
public:
- HCompareSymbolEq(HValue* left, HValue* right, Token::Value op)
- : HBinaryOperation(left, right), op_(op) {
- ASSERT(op == Token::EQ || op == Token::EQ_STRICT);
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- SetFlag(kDependsOnMaps);
+ HCompareConstantEqAndBranch(HValue* left, int right, Token::Value op)
+ : HUnaryControlInstruction(left, NULL, NULL), op_(op), right_(right) {
+ ASSERT(op == Token::EQ_STRICT);
}
Token::Value op() const { return op_; }
-
- virtual bool EmitAtUses() {
- return !HasSideEffects() && !HasMultipleUses();
- }
+ HValue* left() { return value(); }
+ int right() const { return right_; }
virtual Representation RequiredInputRepresentation(int index) const {
- return Representation::Tagged();
+ return Representation::Integer32();
}
- virtual HType CalculateInferredType() { return HType::Boolean(); }
-
- DECLARE_CONCRETE_INSTRUCTION(CompareSymbolEq);
-
- protected:
- virtual bool DataEquals(HValue* other) {
- return op_ == HCompareSymbolEq::cast(other)->op_;
- }
+ DECLARE_CONCRETE_INSTRUCTION(CompareConstantEqAndBranch);
private:
const Token::Value op_;
+ const int right_;
};
-class HUnaryPredicate: public HUnaryOperation {
+class HIsNullAndBranch: public HUnaryControlInstruction {
public:
- explicit HUnaryPredicate(HValue* value) : HUnaryOperation(value) {
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- }
+ HIsNullAndBranch(HValue* value, bool is_strict)
+ : HUnaryControlInstruction(value, NULL, NULL), is_strict_(is_strict) { }
- virtual bool EmitAtUses() {
- return !HasSideEffects() && !HasMultipleUses();
- }
+ bool is_strict() const { return is_strict_; }
virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
- virtual HType CalculateInferredType();
-};
-
-class HIsNull: public HUnaryPredicate {
- public:
- HIsNull(HValue* value, bool is_strict)
- : HUnaryPredicate(value), is_strict_(is_strict) { }
-
- bool is_strict() const { return is_strict_; }
-
- DECLARE_CONCRETE_INSTRUCTION(IsNull)
-
- protected:
- virtual bool DataEquals(HValue* other) {
- HIsNull* b = HIsNull::cast(other);
- return is_strict_ == b->is_strict();
- }
+ DECLARE_CONCRETE_INSTRUCTION(IsNullAndBranch)
private:
bool is_strict_;
};
-class HIsObject: public HUnaryPredicate {
+class HIsObjectAndBranch: public HUnaryControlInstruction {
public:
- explicit HIsObject(HValue* value) : HUnaryPredicate(value) { }
+ explicit HIsObjectAndBranch(HValue* value)
+ : HUnaryControlInstruction(value, NULL, NULL) { }
- DECLARE_CONCRETE_INSTRUCTION(IsObject)
-
- protected:
- virtual bool DataEquals(HValue* other) { return true; }
-};
-
-
-class HIsSmi: public HUnaryPredicate {
- public:
- explicit HIsSmi(HValue* value) : HUnaryPredicate(value) { }
-
- DECLARE_CONCRETE_INSTRUCTION(IsSmi)
-
- protected:
- virtual bool DataEquals(HValue* other) { return true; }
-};
-
-
-class HIsUndetectable: public HUnaryPredicate {
- public:
- explicit HIsUndetectable(HValue* value) : HUnaryPredicate(value) { }
-
- DECLARE_CONCRETE_INSTRUCTION(IsUndetectable)
-
- protected:
- virtual bool DataEquals(HValue* other) { return true; }
-};
-
-
-class HIsConstructCall: public HTemplateInstruction<0> {
- public:
- HIsConstructCall() {
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
+ virtual Representation RequiredInputRepresentation(int index) const {
+ return Representation::Tagged();
}
- virtual bool EmitAtUses() {
- return !HasSideEffects() && !HasMultipleUses();
+ DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch)
+};
+
+
+class HIsSmiAndBranch: public HUnaryControlInstruction {
+ public:
+ explicit HIsSmiAndBranch(HValue* value)
+ : HUnaryControlInstruction(value, NULL, NULL) { }
+
+ DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch)
+
+ virtual Representation RequiredInputRepresentation(int index) const {
+ return Representation::Tagged();
}
+ protected:
+ virtual bool DataEquals(HValue* other) { return true; }
+};
+
+
+class HIsUndetectableAndBranch: public HUnaryControlInstruction {
+ public:
+ explicit HIsUndetectableAndBranch(HValue* value)
+ : HUnaryControlInstruction(value, NULL, NULL) { }
+
+ virtual Representation RequiredInputRepresentation(int index) const {
+ return Representation::Tagged();
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(IsUndetectableAndBranch)
+};
+
+
+class HIsConstructCallAndBranch: public HTemplateControlInstruction<2, 0> {
+ public:
virtual Representation RequiredInputRepresentation(int index) const {
return Representation::None();
}
- DECLARE_CONCRETE_INSTRUCTION(IsConstructCall)
-
- protected:
- virtual bool DataEquals(HValue* other) { return true; }
+ DECLARE_CONCRETE_INSTRUCTION(IsConstructCallAndBranch)
};
-class HHasInstanceType: public HUnaryPredicate {
+class HHasInstanceTypeAndBranch: public HUnaryControlInstruction {
public:
- HHasInstanceType(HValue* value, InstanceType type)
- : HUnaryPredicate(value), from_(type), to_(type) { }
- HHasInstanceType(HValue* value, InstanceType from, InstanceType to)
- : HUnaryPredicate(value), from_(from), to_(to) {
+ HHasInstanceTypeAndBranch(HValue* value, InstanceType type)
+ : HUnaryControlInstruction(value, NULL, NULL), from_(type), to_(type) { }
+ HHasInstanceTypeAndBranch(HValue* value, InstanceType from, InstanceType to)
+ : HUnaryControlInstruction(value, NULL, NULL), from_(from), to_(to) {
ASSERT(to == LAST_TYPE); // Others not implemented yet in backend.
}
@@ -2631,34 +2722,41 @@
virtual void PrintDataTo(StringStream* stream);
- DECLARE_CONCRETE_INSTRUCTION(HasInstanceType)
-
- protected:
- virtual bool DataEquals(HValue* other) {
- HHasInstanceType* b = HHasInstanceType::cast(other);
- return (from_ == b->from()) && (to_ == b->to());
+ virtual Representation RequiredInputRepresentation(int index) const {
+ return Representation::Tagged();
}
+ DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch)
+
private:
InstanceType from_;
InstanceType to_; // Inclusive range, not all combinations work.
};
-class HHasCachedArrayIndex: public HUnaryPredicate {
+class HHasCachedArrayIndexAndBranch: public HUnaryControlInstruction {
public:
- explicit HHasCachedArrayIndex(HValue* value) : HUnaryPredicate(value) { }
+ explicit HHasCachedArrayIndexAndBranch(HValue* value)
+ : HUnaryControlInstruction(value, NULL, NULL) { }
- DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndex)
+ virtual Representation RequiredInputRepresentation(int index) const {
+ return Representation::Tagged();
+ }
- protected:
- virtual bool DataEquals(HValue* other) { return true; }
+ DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndexAndBranch)
};
-class HGetCachedArrayIndex: public HUnaryPredicate {
+class HGetCachedArrayIndex: public HUnaryOperation {
public:
- explicit HGetCachedArrayIndex(HValue* value) : HUnaryPredicate(value) { }
+ explicit HGetCachedArrayIndex(HValue* value) : HUnaryOperation(value) {
+ set_representation(Representation::Tagged());
+ SetFlag(kUseGVN);
+ }
+
+ virtual Representation RequiredInputRepresentation(int index) const {
+ return Representation::Tagged();
+ }
DECLARE_CONCRETE_INSTRUCTION(GetCachedArrayIndex)
@@ -2667,42 +2765,40 @@
};
-class HClassOfTest: public HUnaryPredicate {
+class HClassOfTestAndBranch: public HUnaryControlInstruction {
public:
- HClassOfTest(HValue* value, Handle<String> class_name)
- : HUnaryPredicate(value), class_name_(class_name) { }
+ HClassOfTestAndBranch(HValue* value, Handle<String> class_name)
+ : HUnaryControlInstruction(value, NULL, NULL),
+ class_name_(class_name) { }
- DECLARE_CONCRETE_INSTRUCTION(ClassOfTest)
+ DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch)
+
+ virtual Representation RequiredInputRepresentation(int index) const {
+ return Representation::Tagged();
+ }
virtual void PrintDataTo(StringStream* stream);
Handle<String> class_name() const { return class_name_; }
- protected:
- virtual bool DataEquals(HValue* other) {
- HClassOfTest* b = HClassOfTest::cast(other);
- return class_name_.is_identical_to(b->class_name_);
- }
-
private:
Handle<String> class_name_;
};
-class HTypeofIs: public HUnaryPredicate {
+class HTypeofIsAndBranch: public HUnaryControlInstruction {
public:
- HTypeofIs(HValue* value, Handle<String> type_literal)
- : HUnaryPredicate(value), type_literal_(type_literal) { }
+ HTypeofIsAndBranch(HValue* value, Handle<String> type_literal)
+ : HUnaryControlInstruction(value, NULL, NULL),
+ type_literal_(type_literal) { }
Handle<String> type_literal() { return type_literal_; }
virtual void PrintDataTo(StringStream* stream);
- DECLARE_CONCRETE_INSTRUCTION(TypeofIs)
+ DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch)
- protected:
- virtual bool DataEquals(HValue* other) {
- HTypeofIs* b = HTypeofIs::cast(other);
- return type_literal_.is_identical_to(b->type_literal_);
+ virtual Representation RequiredInputRepresentation(int index) const {
+ return Representation::Tagged();
}
private:
@@ -2710,48 +2806,48 @@
};
-class HInstanceOf: public HTemplateInstruction<3> {
+class HInstanceOf: public HBinaryOperation {
public:
- HInstanceOf(HValue* context, HValue* left, HValue* right) {
- SetOperandAt(0, context);
- SetOperandAt(1, left);
- SetOperandAt(2, right);
+ HInstanceOf(HValue* context, HValue* left, HValue* right)
+ : HBinaryOperation(context, left, right) {
set_representation(Representation::Tagged());
SetAllSideEffects();
}
- HValue* context() { return OperandAt(0); }
- HValue* left() { return OperandAt(1); }
- HValue* right() { return OperandAt(2); }
-
- virtual bool EmitAtUses() {
- return !HasSideEffects() && !HasMultipleUses();
- }
-
virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
+ virtual HType CalculateInferredType();
+
virtual void PrintDataTo(StringStream* stream);
DECLARE_CONCRETE_INSTRUCTION(InstanceOf)
};
-class HInstanceOfKnownGlobal: public HUnaryOperation {
+class HInstanceOfKnownGlobal: public HTemplateInstruction<2> {
public:
- HInstanceOfKnownGlobal(HValue* left, Handle<JSFunction> right)
- : HUnaryOperation(left), function_(right) {
+ HInstanceOfKnownGlobal(HValue* context,
+ HValue* left,
+ Handle<JSFunction> right)
+ : function_(right) {
+ SetOperandAt(0, context);
+ SetOperandAt(1, left);
set_representation(Representation::Tagged());
SetAllSideEffects();
}
+ HValue* context() { return OperandAt(0); }
+ HValue* left() { return OperandAt(1); }
Handle<JSFunction> function() { return function_; }
virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
+ virtual HType CalculateInferredType();
+
DECLARE_CONCRETE_INSTRUCTION(InstanceOfKnownGlobal)
private:
@@ -2759,16 +2855,22 @@
};
-class HPower: public HBinaryOperation {
+class HPower: public HTemplateInstruction<2> {
public:
- HPower(HValue* left, HValue* right)
- : HBinaryOperation(left, right) {
+ HPower(HValue* left, HValue* right) {
+ SetOperandAt(0, left);
+ SetOperandAt(1, right);
set_representation(Representation::Double());
SetFlag(kUseGVN);
}
+ HValue* left() { return OperandAt(0); }
+ HValue* right() { return OperandAt(1); }
+
virtual Representation RequiredInputRepresentation(int index) const {
- return (index == 1) ? Representation::None() : Representation::Double();
+ return index == 0
+ ? Representation::Double()
+ : Representation::None();
}
DECLARE_CONCRETE_INSTRUCTION(Power)
@@ -2780,7 +2882,8 @@
class HAdd: public HArithmeticBinaryOperation {
public:
- HAdd(HValue* left, HValue* right) : HArithmeticBinaryOperation(left, right) {
+ HAdd(HValue* context, HValue* left, HValue* right)
+ : HArithmeticBinaryOperation(context, left, right) {
SetFlag(kCanOverflow);
}
@@ -2805,7 +2908,8 @@
class HSub: public HArithmeticBinaryOperation {
public:
- HSub(HValue* left, HValue* right) : HArithmeticBinaryOperation(left, right) {
+ HSub(HValue* context, HValue* left, HValue* right)
+ : HArithmeticBinaryOperation(context, left, right) {
SetFlag(kCanOverflow);
}
@@ -2822,7 +2926,8 @@
class HMul: public HArithmeticBinaryOperation {
public:
- HMul(HValue* left, HValue* right) : HArithmeticBinaryOperation(left, right) {
+ HMul(HValue* context, HValue* left, HValue* right)
+ : HArithmeticBinaryOperation(context, left, right) {
SetFlag(kCanOverflow);
}
@@ -2844,7 +2949,8 @@
class HMod: public HArithmeticBinaryOperation {
public:
- HMod(HValue* left, HValue* right) : HArithmeticBinaryOperation(left, right) {
+ HMod(HValue* context, HValue* left, HValue* right)
+ : HArithmeticBinaryOperation(context, left, right) {
SetFlag(kCanBeDivByZero);
}
@@ -2871,7 +2977,8 @@
class HDiv: public HArithmeticBinaryOperation {
public:
- HDiv(HValue* left, HValue* right) : HArithmeticBinaryOperation(left, right) {
+ HDiv(HValue* context, HValue* left, HValue* right)
+ : HArithmeticBinaryOperation(context, left, right) {
SetFlag(kCanBeDivByZero);
SetFlag(kCanOverflow);
}
@@ -2889,8 +2996,8 @@
class HBitAnd: public HBitwiseBinaryOperation {
public:
- HBitAnd(HValue* left, HValue* right)
- : HBitwiseBinaryOperation(left, right) { }
+ HBitAnd(HValue* context, HValue* left, HValue* right)
+ : HBitwiseBinaryOperation(context, left, right) { }
virtual bool IsCommutative() const { return true; }
virtual HType CalculateInferredType();
@@ -2906,8 +3013,8 @@
class HBitXor: public HBitwiseBinaryOperation {
public:
- HBitXor(HValue* left, HValue* right)
- : HBitwiseBinaryOperation(left, right) { }
+ HBitXor(HValue* context, HValue* left, HValue* right)
+ : HBitwiseBinaryOperation(context, left, right) { }
virtual bool IsCommutative() const { return true; }
virtual HType CalculateInferredType();
@@ -2921,8 +3028,8 @@
class HBitOr: public HBitwiseBinaryOperation {
public:
- HBitOr(HValue* left, HValue* right)
- : HBitwiseBinaryOperation(left, right) { }
+ HBitOr(HValue* context, HValue* left, HValue* right)
+ : HBitwiseBinaryOperation(context, left, right) { }
virtual bool IsCommutative() const { return true; }
virtual HType CalculateInferredType();
@@ -2938,8 +3045,8 @@
class HShl: public HBitwiseBinaryOperation {
public:
- HShl(HValue* left, HValue* right)
- : HBitwiseBinaryOperation(left, right) { }
+ HShl(HValue* context, HValue* left, HValue* right)
+ : HBitwiseBinaryOperation(context, left, right) { }
virtual Range* InferRange();
virtual HType CalculateInferredType();
@@ -2953,8 +3060,8 @@
class HShr: public HBitwiseBinaryOperation {
public:
- HShr(HValue* left, HValue* right)
- : HBitwiseBinaryOperation(left, right) { }
+ HShr(HValue* context, HValue* left, HValue* right)
+ : HBitwiseBinaryOperation(context, left, right) { }
virtual HType CalculateInferredType();
@@ -2967,8 +3074,8 @@
class HSar: public HBitwiseBinaryOperation {
public:
- HSar(HValue* left, HValue* right)
- : HBitwiseBinaryOperation(left, right) { }
+ HSar(HValue* context, HValue* left, HValue* right)
+ : HBitwiseBinaryOperation(context, left, right) { }
virtual Range* InferRange();
virtual HType CalculateInferredType();
@@ -3102,15 +3209,16 @@
};
-class HLoadGlobalGeneric: public HBinaryOperation {
+class HLoadGlobalGeneric: public HTemplateInstruction<2> {
public:
HLoadGlobalGeneric(HValue* context,
HValue* global_object,
Handle<Object> name,
bool for_typeof)
- : HBinaryOperation(context, global_object),
- name_(name),
+ : name_(name),
for_typeof_(for_typeof) {
+ SetOperandAt(0, context);
+ SetOperandAt(1, global_object);
set_representation(Representation::Tagged());
SetAllSideEffects();
}
@@ -3228,15 +3336,18 @@
static inline bool StoringValueNeedsWriteBarrier(HValue* value) {
- return !value->type().IsSmi() &&
- !(value->IsConstant() && HConstant::cast(value)->InOldSpace());
+ return !value->type().IsBoolean()
+ && !value->type().IsSmi()
+ && !(value->IsConstant() && HConstant::cast(value)->InOldSpace());
}
-class HStoreContextSlot: public HBinaryOperation {
+class HStoreContextSlot: public HTemplateInstruction<2> {
public:
HStoreContextSlot(HValue* context, int slot_index, HValue* value)
- : HBinaryOperation(context, value), slot_index_(slot_index) {
+ : slot_index_(slot_index) {
+ SetOperandAt(0, context);
+ SetOperandAt(1, value);
SetFlag(kChangesContextSlots);
}
@@ -3300,13 +3411,15 @@
};
-class HLoadNamedFieldPolymorphic: public HUnaryOperation {
+class HLoadNamedFieldPolymorphic: public HTemplateInstruction<2> {
public:
- HLoadNamedFieldPolymorphic(HValue* object,
+ HLoadNamedFieldPolymorphic(HValue* context,
+ HValue* object,
ZoneMapList* types,
Handle<String> name);
- HValue* object() { return OperandAt(0); }
+ HValue* context() { return OperandAt(0); }
+ HValue* object() { return OperandAt(1); }
ZoneMapList* types() { return &types_; }
Handle<String> name() { return name_; }
bool need_generic() { return need_generic_; }
@@ -3330,10 +3443,12 @@
-class HLoadNamedGeneric: public HBinaryOperation {
+class HLoadNamedGeneric: public HTemplateInstruction<2> {
public:
HLoadNamedGeneric(HValue* context, HValue* object, Handle<Object> name)
- : HBinaryOperation(context, object), name_(name) {
+ : name_(name) {
+ SetOperandAt(0, context);
+ SetOperandAt(1, object);
set_representation(Representation::Tagged());
SetAllSideEffects();
}
@@ -3375,9 +3490,11 @@
};
-class HLoadKeyedFastElement: public HBinaryOperation {
+class HLoadKeyedFastElement: public HTemplateInstruction<2> {
public:
- HLoadKeyedFastElement(HValue* obj, HValue* key) : HBinaryOperation(obj, key) {
+ HLoadKeyedFastElement(HValue* obj, HValue* key) {
+ SetOperandAt(0, obj);
+ SetOperandAt(1, key);
set_representation(Representation::Tagged());
SetFlag(kDependsOnArrayElements);
SetFlag(kUseGVN);
@@ -3388,8 +3505,9 @@
virtual Representation RequiredInputRepresentation(int index) const {
// The key is supposed to be Integer32.
- return (index == 1) ? Representation::Integer32()
- : Representation::Tagged();
+ return index == 0
+ ? Representation::Tagged()
+ : Representation::Integer32();
}
virtual void PrintDataTo(StringStream* stream);
@@ -3403,15 +3521,47 @@
};
-class HLoadKeyedSpecializedArrayElement: public HBinaryOperation {
+class HLoadKeyedFastDoubleElement: public HTemplateInstruction<2> {
+ public:
+ HLoadKeyedFastDoubleElement(HValue* elements, HValue* key) {
+ SetOperandAt(0, elements);
+ SetOperandAt(1, key);
+ set_representation(Representation::Double());
+ SetFlag(kDependsOnArrayElements);
+ SetFlag(kUseGVN);
+ }
+
+ HValue* elements() { return OperandAt(0); }
+ HValue* key() { return OperandAt(1); }
+
+ virtual Representation RequiredInputRepresentation(int index) const {
+ // The key is supposed to be Integer32.
+ return index == 0
+ ? Representation::Tagged()
+ : Representation::Integer32();
+ }
+
+ virtual void PrintDataTo(StringStream* stream);
+
+ bool RequiresHoleCheck() const;
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastDoubleElement)
+
+ protected:
+ virtual bool DataEquals(HValue* other) { return true; }
+};
+
+
+class HLoadKeyedSpecializedArrayElement: public HTemplateInstruction<2> {
public:
HLoadKeyedSpecializedArrayElement(HValue* external_elements,
HValue* key,
- ExternalArrayType array_type)
- : HBinaryOperation(external_elements, key),
- array_type_(array_type) {
- if (array_type == kExternalFloatArray ||
- array_type == kExternalDoubleArray) {
+ JSObject::ElementsKind elements_kind)
+ : elements_kind_(elements_kind) {
+ SetOperandAt(0, external_elements);
+ SetOperandAt(1, key);
+ if (elements_kind == JSObject::EXTERNAL_FLOAT_ELEMENTS ||
+ elements_kind == JSObject::EXTERNAL_DOUBLE_ELEMENTS) {
set_representation(Representation::Double());
} else {
set_representation(Representation::Integer32());
@@ -3427,13 +3577,14 @@
virtual Representation RequiredInputRepresentation(int index) const {
// The key is supposed to be Integer32, but the base pointer
// for the element load is a naked pointer.
- return (index == 1) ? Representation::Integer32()
- : Representation::External();
+ return index == 0
+ ? Representation::External()
+ : Representation::Integer32();
}
HValue* external_pointer() { return OperandAt(0); }
HValue* key() { return OperandAt(1); }
- ExternalArrayType array_type() const { return array_type_; }
+ JSObject::ElementsKind elements_kind() const { return elements_kind_; }
DECLARE_CONCRETE_INSTRUCTION(LoadKeyedSpecializedArrayElement)
@@ -3442,11 +3593,11 @@
if (!other->IsLoadKeyedSpecializedArrayElement()) return false;
HLoadKeyedSpecializedArrayElement* cast_other =
HLoadKeyedSpecializedArrayElement::cast(other);
- return array_type_ == cast_other->array_type();
+ return elements_kind_ == cast_other->elements_kind();
}
private:
- ExternalArrayType array_type_;
+ JSObject::ElementsKind elements_kind_;
};
@@ -3474,17 +3625,18 @@
};
-class HStoreNamedField: public HBinaryOperation {
+class HStoreNamedField: public HTemplateInstruction<2> {
public:
HStoreNamedField(HValue* obj,
Handle<String> name,
HValue* val,
bool in_object,
int offset)
- : HBinaryOperation(obj, val),
- name_(name),
+ : name_(name),
is_in_object_(in_object),
offset_(offset) {
+ SetOperandAt(0, obj);
+ SetOperandAt(1, val);
if (is_in_object_) {
SetFlag(kChangesInobjectFields);
} else {
@@ -3566,7 +3718,8 @@
virtual Representation RequiredInputRepresentation(int index) const {
// The key is supposed to be Integer32.
- return (index == 1) ? Representation::Integer32()
+ return index == 1
+ ? Representation::Integer32()
: Representation::Tagged();
}
@@ -3584,13 +3737,48 @@
};
+class HStoreKeyedFastDoubleElement: public HTemplateInstruction<3> {
+ public:
+ HStoreKeyedFastDoubleElement(HValue* elements,
+ HValue* key,
+ HValue* val) {
+ SetOperandAt(0, elements);
+ SetOperandAt(1, key);
+ SetOperandAt(2, val);
+ SetFlag(kChangesArrayElements);
+ }
+
+ virtual Representation RequiredInputRepresentation(int index) const {
+ if (index == 1) {
+ return Representation::Integer32();
+ } else if (index == 2) {
+ return Representation::Double();
+ } else {
+ return Representation::Tagged();
+ }
+ }
+
+ HValue* elements() { return OperandAt(0); }
+ HValue* key() { return OperandAt(1); }
+ HValue* value() { return OperandAt(2); }
+
+ bool NeedsWriteBarrier() {
+ return StoringValueNeedsWriteBarrier(value());
+ }
+
+ virtual void PrintDataTo(StringStream* stream);
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastDoubleElement)
+};
+
+
class HStoreKeyedSpecializedArrayElement: public HTemplateInstruction<3> {
public:
HStoreKeyedSpecializedArrayElement(HValue* external_elements,
HValue* key,
HValue* val,
- ExternalArrayType array_type)
- : array_type_(array_type) {
+ JSObject::ElementsKind elements_kind)
+ : elements_kind_(elements_kind) {
SetFlag(kChangesSpecializedArrayElements);
SetOperandAt(0, external_elements);
SetOperandAt(1, key);
@@ -3603,8 +3791,10 @@
if (index == 0) {
return Representation::External();
} else {
- if (index == 2 && (array_type() == kExternalFloatArray ||
- array_type() == kExternalDoubleArray)) {
+ bool float_or_double_elements =
+ elements_kind() == JSObject::EXTERNAL_FLOAT_ELEMENTS ||
+ elements_kind() == JSObject::EXTERNAL_DOUBLE_ELEMENTS;
+ if (index == 2 && float_or_double_elements) {
return Representation::Double();
} else {
return Representation::Integer32();
@@ -3615,12 +3805,12 @@
HValue* external_pointer() { return OperandAt(0); }
HValue* key() { return OperandAt(1); }
HValue* value() { return OperandAt(2); }
- ExternalArrayType array_type() const { return array_type_; }
+ JSObject::ElementsKind elements_kind() const { return elements_kind_; }
DECLARE_CONCRETE_INSTRUCTION(StoreKeyedSpecializedArrayElement)
private:
- ExternalArrayType array_type_;
+ JSObject::ElementsKind elements_kind_;
};
@@ -3660,7 +3850,8 @@
class HStringAdd: public HBinaryOperation {
public:
- HStringAdd(HValue* left, HValue* right) : HBinaryOperation(left, right) {
+ HStringAdd(HValue* context, HValue* left, HValue* right)
+ : HBinaryOperation(context, left, right) {
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
SetFlag(kDependsOnMaps);
@@ -3681,10 +3872,12 @@
};
-class HStringCharCodeAt: public HBinaryOperation {
+class HStringCharCodeAt: public HTemplateInstruction<3> {
public:
- HStringCharCodeAt(HValue* string, HValue* index)
- : HBinaryOperation(string, index) {
+ HStringCharCodeAt(HValue* context, HValue* string, HValue* index) {
+ SetOperandAt(0, context);
+ SetOperandAt(1, string);
+ SetOperandAt(2, index);
set_representation(Representation::Integer32());
SetFlag(kUseGVN);
SetFlag(kDependsOnMaps);
@@ -3692,12 +3885,14 @@
virtual Representation RequiredInputRepresentation(int index) const {
// The index is supposed to be Integer32.
- return (index == 1) ? Representation::Integer32()
+ return index == 2
+ ? Representation::Integer32()
: Representation::Tagged();
}
- HValue* string() { return OperandAt(0); }
- HValue* index() { return OperandAt(1); }
+ HValue* context() { return OperandAt(0); }
+ HValue* string() { return OperandAt(1); }
+ HValue* index() { return OperandAt(2); }
DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt)
@@ -3710,17 +3905,24 @@
};
-class HStringCharFromCode: public HUnaryOperation {
+class HStringCharFromCode: public HTemplateInstruction<2> {
public:
- explicit HStringCharFromCode(HValue* char_code) : HUnaryOperation(char_code) {
- set_representation(Representation::Tagged());
+ HStringCharFromCode(HValue* context, HValue* char_code) {
+ SetOperandAt(0, context);
+ SetOperandAt(1, char_code);
+ set_representation(Representation::Tagged());
SetFlag(kUseGVN);
}
virtual Representation RequiredInputRepresentation(int index) const {
- return Representation::Integer32();
+ return index == 0
+ ? Representation::Tagged()
+ : Representation::Integer32();
}
+ HValue* context() { return OperandAt(0); }
+ HValue* value() { return OperandAt(1); }
+
virtual bool DataEquals(HValue* other) { return true; }
DECLARE_CONCRETE_INSTRUCTION(StringCharFromCode)
@@ -3772,23 +3974,27 @@
};
-class HArrayLiteral: public HMaterializedLiteral<0> {
+class HArrayLiteral: public HMaterializedLiteral<1> {
public:
- HArrayLiteral(Handle<FixedArray> constant_elements,
+ HArrayLiteral(HValue* context,
+ Handle<FixedArray> constant_elements,
int length,
int literal_index,
int depth)
- : HMaterializedLiteral<0>(literal_index, depth),
+ : HMaterializedLiteral<1>(literal_index, depth),
length_(length),
- constant_elements_(constant_elements) {}
+ constant_elements_(constant_elements) {
+ SetOperandAt(0, context);
+ }
+ HValue* context() { return OperandAt(0); }
Handle<FixedArray> constant_elements() const { return constant_elements_; }
int length() const { return length_; }
bool IsCopyOnWrite() const;
virtual Representation RequiredInputRepresentation(int index) const {
- return Representation::None();
+ return Representation::Tagged();
}
DECLARE_CONCRETE_INSTRUCTION(ArrayLiteral)
@@ -3834,20 +4040,24 @@
};
-class HRegExpLiteral: public HMaterializedLiteral<0> {
+class HRegExpLiteral: public HMaterializedLiteral<1> {
public:
- HRegExpLiteral(Handle<String> pattern,
+ HRegExpLiteral(HValue* context,
+ Handle<String> pattern,
Handle<String> flags,
int literal_index)
- : HMaterializedLiteral<0>(literal_index, 0),
+ : HMaterializedLiteral<1>(literal_index, 0),
pattern_(pattern),
- flags_(flags) { }
+ flags_(flags) {
+ SetOperandAt(0, context);
+ }
+ HValue* context() { return OperandAt(0); }
Handle<String> pattern() { return pattern_; }
Handle<String> flags() { return flags_; }
virtual Representation RequiredInputRepresentation(int index) const {
- return Representation::None();
+ return Representation::Tagged();
}
DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral)
@@ -3858,15 +4068,20 @@
};
-class HFunctionLiteral: public HTemplateInstruction<0> {
+class HFunctionLiteral: public HTemplateInstruction<1> {
public:
- HFunctionLiteral(Handle<SharedFunctionInfo> shared, bool pretenure)
+ HFunctionLiteral(HValue* context,
+ Handle<SharedFunctionInfo> shared,
+ bool pretenure)
: shared_info_(shared), pretenure_(pretenure) {
+ SetOperandAt(0, context);
set_representation(Representation::Tagged());
}
+ HValue* context() { return OperandAt(0); }
+
virtual Representation RequiredInputRepresentation(int index) const {
- return Representation::None();
+ return Representation::Tagged();
}
DECLARE_CONCRETE_INSTRUCTION(FunctionLiteral)
@@ -3880,12 +4095,17 @@
};
-class HTypeof: public HUnaryOperation {
+class HTypeof: public HTemplateInstruction<2> {
public:
- explicit HTypeof(HValue* value) : HUnaryOperation(value) {
+ explicit HTypeof(HValue* context, HValue* value) {
+ SetOperandAt(0, context);
+ SetOperandAt(1, value);
set_representation(Representation::Tagged());
}
+ HValue* context() { return OperandAt(0); }
+ HValue* value() { return OperandAt(1); }
+
virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
@@ -3928,8 +4148,8 @@
class HDeleteProperty: public HBinaryOperation {
public:
- HDeleteProperty(HValue* obj, HValue* key)
- : HBinaryOperation(obj, key) {
+ HDeleteProperty(HValue* context, HValue* obj, HValue* key)
+ : HBinaryOperation(context, obj, key) {
set_representation(Representation::Tagged());
SetAllSideEffects();
}
@@ -3938,6 +4158,8 @@
return Representation::Tagged();
}
+ virtual HType CalculateInferredType();
+
DECLARE_CONCRETE_INSTRUCTION(DeleteProperty)
HValue* object() { return left(); }
@@ -3945,17 +4167,19 @@
};
-class HIn: public HTemplateInstruction<2> {
+class HIn: public HTemplateInstruction<3> {
public:
- HIn(HValue* key, HValue* object) {
- SetOperandAt(0, key);
- SetOperandAt(1, object);
+ HIn(HValue* context, HValue* key, HValue* object) {
+ SetOperandAt(0, context);
+ SetOperandAt(1, key);
+ SetOperandAt(2, object);
set_representation(Representation::Tagged());
SetAllSideEffects();
}
- HValue* key() { return OperandAt(0); }
- HValue* object() { return OperandAt(1); }
+ HValue* context() { return OperandAt(0); }
+ HValue* key() { return OperandAt(1); }
+ HValue* object() { return OperandAt(2); }
virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
diff --git a/src/hydrogen.cc b/src/hydrogen.cc
index 1b37d93..f105703 100644
--- a/src/hydrogen.cc
+++ b/src/hydrogen.cc
@@ -33,6 +33,7 @@
#include "hashmap.h"
#include "lithium-allocator.h"
#include "parser.h"
+#include "scopeinfo.h"
#include "scopes.h"
#include "stub-cache.h"
@@ -68,8 +69,8 @@
last_instruction_index_(-1),
deleted_phis_(4),
parent_loop_header_(NULL),
- is_inline_return_target_(false) {
-}
+ is_inline_return_target_(false),
+ is_deoptimizing_(false) { }
void HBasicBlock::AttachLoopInformation() {
@@ -131,16 +132,16 @@
}
-HSimulate* HBasicBlock::CreateSimulate(int id) {
+HSimulate* HBasicBlock::CreateSimulate(int ast_id) {
ASSERT(HasEnvironment());
HEnvironment* environment = last_environment();
- ASSERT(id == AstNode::kNoNumber ||
- environment->closure()->shared()->VerifyBailoutId(id));
+ ASSERT(ast_id == AstNode::kNoNumber ||
+ environment->closure()->shared()->VerifyBailoutId(ast_id));
int push_count = environment->push_count();
int pop_count = environment->pop_count();
- HSimulate* instr = new(zone()) HSimulate(id, pop_count);
+ HSimulate* instr = new(zone()) HSimulate(ast_id, pop_count);
for (int i = push_count - 1; i >= 0; --i) {
instr->AddPushedValue(environment->ExpressionStackAt(i));
}
@@ -157,23 +158,19 @@
ASSERT(!IsFinished());
AddInstruction(end);
end_ = end;
- if (end->FirstSuccessor() != NULL) {
- end->FirstSuccessor()->RegisterPredecessor(this);
- if (end->SecondSuccessor() != NULL) {
- end->SecondSuccessor()->RegisterPredecessor(this);
- }
+ for (HSuccessorIterator it(end); !it.Done(); it.Advance()) {
+ it.Current()->RegisterPredecessor(this);
}
}
-void HBasicBlock::Goto(HBasicBlock* block, bool include_stack_check) {
+void HBasicBlock::Goto(HBasicBlock* block) {
if (block->IsInlineReturnTarget()) {
AddInstruction(new(zone()) HLeaveInlined);
last_environment_ = last_environment()->outer();
}
AddSimulate(AstNode::kNoNumber);
HGoto* instr = new(zone()) HGoto(block);
- instr->set_include_stack_check(include_stack_check);
Finish(instr);
}
@@ -197,7 +194,7 @@
}
-void HBasicBlock::SetJoinId(int id) {
+void HBasicBlock::SetJoinId(int ast_id) {
int length = predecessors_.length();
ASSERT(length > 0);
for (int i = 0; i < length; i++) {
@@ -207,8 +204,8 @@
// We only need to verify the ID once.
ASSERT(i != 0 ||
predecessor->last_environment()->closure()->shared()
- ->VerifyBailoutId(id));
- simulate->set_ast_id(id);
+ ->VerifyBailoutId(ast_id));
+ simulate->set_ast_id(ast_id);
}
}
@@ -401,8 +398,9 @@
void Analyze() {
while (!stack_.is_empty()) {
HControlInstruction* end = stack_.RemoveLast()->end();
- PushBlock(end->FirstSuccessor());
- PushBlock(end->SecondSuccessor());
+ for (HSuccessorIterator it(end); !it.Done(); it.Advance()) {
+ PushBlock(it.Current());
+ }
}
}
@@ -521,6 +519,12 @@
return GetConstant(&constant_false_, isolate()->heap()->false_value());
}
+
+HConstant* HGraph::GetConstantHole() {
+ return GetConstant(&constant_hole_, isolate()->heap()->the_hole_value());
+}
+
+
HGraphBuilder::HGraphBuilder(CompilationInfo* info,
TypeFeedbackOracle* oracle)
: function_state_(NULL),
@@ -572,7 +576,7 @@
HBasicBlock* body_exit,
HBasicBlock* loop_successor,
HBasicBlock* break_block) {
- if (body_exit != NULL) body_exit->Goto(loop_entry, true);
+ if (body_exit != NULL) body_exit->Goto(loop_entry);
loop_entry->PostProcessLoopHeader(statement);
if (break_block != NULL) {
if (loop_successor != NULL) loop_successor->Goto(break_block);
@@ -691,8 +695,9 @@
HBasicBlock* loop_header) {
for (int i = 0; i < loop->blocks()->length(); ++i) {
HBasicBlock* b = loop->blocks()->at(i);
- Postorder(b->end()->SecondSuccessor(), visited, order, loop_header);
- Postorder(b->end()->FirstSuccessor(), visited, order, loop_header);
+ for (HSuccessorIterator it(b->end()); !it.Done(); it.Advance()) {
+ Postorder(it.Current(), visited, order, loop_header);
+ }
if (b->IsLoopHeader() && b != loop->loop_header()) {
PostorderLoopBlocks(b->loop_information(), visited, order, loop_header);
}
@@ -709,11 +714,13 @@
visited->Add(block->block_id());
if (block->IsLoopHeader()) {
PostorderLoopBlocks(block->loop_information(), visited, order, loop_header);
- Postorder(block->end()->SecondSuccessor(), visited, order, block);
- Postorder(block->end()->FirstSuccessor(), visited, order, block);
+ for (HSuccessorIterator it(block->end()); !it.Done(); it.Advance()) {
+ Postorder(it.Current(), visited, order, block);
+ }
} else {
- Postorder(block->end()->SecondSuccessor(), visited, order, loop_header);
- Postorder(block->end()->FirstSuccessor(), visited, order, loop_header);
+ for (HSuccessorIterator it(block->end()); !it.Done(); it.Advance()) {
+ Postorder(it.Current(), visited, order, loop_header);
+ }
}
ASSERT(block->end()->FirstSuccessor() == NULL ||
order->Contains(block->end()->FirstSuccessor()) ||
@@ -736,9 +743,21 @@
}
}
}
+
+ // Propagate flag marking blocks containing unconditional deoptimize.
+ MarkAsDeoptimizingRecursively(entry_block());
}
+// Mark all blocks that are dominated by an unconditional deoptimize.
+void HGraph::MarkAsDeoptimizingRecursively(HBasicBlock* block) {
+ for (int i = 0; i < block->dominated_blocks()->length(); ++i) {
+ HBasicBlock* dominated = block->dominated_blocks()->at(i);
+ if (block->IsDeoptimizing()) dominated->MarkAsDeoptimizing();
+ MarkAsDeoptimizingRecursively(dominated);
+ }
+}
+
void HGraph::EliminateRedundantPhis() {
HPhase phase("Redundant phi elimination", this);
@@ -817,7 +836,7 @@
}
-bool HGraph::CheckPhis() {
+bool HGraph::CheckArgumentsPhiUses() {
int block_count = blocks_.length();
for (int i = 0; i < block_count; ++i) {
for (int j = 0; j < blocks_[i]->phis()->length(); ++j) {
@@ -830,7 +849,22 @@
}
-bool HGraph::CollectPhis() {
+bool HGraph::CheckConstPhiUses() {
+ int block_count = blocks_.length();
+ for (int i = 0; i < block_count; ++i) {
+ for (int j = 0; j < blocks_[i]->phis()->length(); ++j) {
+ HPhi* phi = blocks_[i]->phis()->at(j);
+ // Check for the hole value (from an uninitialized const).
+ for (int k = 0; k < phi->OperandCount(); k++) {
+ if (phi->OperandAt(k) == GetConstantHole()) return false;
+ }
+ }
+ }
+ return true;
+}
+
+
+void HGraph::CollectPhis() {
int block_count = blocks_.length();
phi_list_ = new ZoneList<HPhi*>(block_count);
for (int i = 0; i < block_count; ++i) {
@@ -839,7 +873,6 @@
phi_list_->Add(phi);
}
}
- return true;
}
@@ -875,9 +908,8 @@
private:
void TraceRange(const char* msg, ...);
void Analyze(HBasicBlock* block);
- void InferControlFlowRange(HTest* test, HBasicBlock* dest);
- void InferControlFlowRange(Token::Value op, HValue* value, HValue* other);
- void InferPhiRange(HPhi* phi);
+ void InferControlFlowRange(HCompareIDAndBranch* test, HBasicBlock* dest);
+ void UpdateControlFlowRange(Token::Value op, HValue* value, HValue* other);
void InferRange(HValue* value);
void RollBackTo(int index);
void AddRange(HValue* value, Range* range);
@@ -899,7 +931,7 @@
void HRangeAnalysis::Analyze() {
HPhase phase("Range analysis", graph_);
- Analyze(graph_->blocks()->at(0));
+ Analyze(graph_->entry_block());
}
@@ -911,15 +943,15 @@
// Infer range based on control flow.
if (block->predecessors()->length() == 1) {
HBasicBlock* pred = block->predecessors()->first();
- if (pred->end()->IsTest()) {
- InferControlFlowRange(HTest::cast(pred->end()), block);
+ if (pred->end()->IsCompareIDAndBranch()) {
+ InferControlFlowRange(HCompareIDAndBranch::cast(pred->end()), block);
}
}
// Process phi instructions.
for (int i = 0; i < block->phis()->length(); ++i) {
HPhi* phi = block->phis()->at(i);
- InferPhiRange(phi);
+ InferRange(phi);
}
// Go through all instructions of the current block.
@@ -938,28 +970,26 @@
}
-void HRangeAnalysis::InferControlFlowRange(HTest* test, HBasicBlock* dest) {
+void HRangeAnalysis::InferControlFlowRange(HCompareIDAndBranch* test,
+ HBasicBlock* dest) {
ASSERT((test->FirstSuccessor() == dest) == (test->SecondSuccessor() != dest));
- if (test->value()->IsCompare()) {
- HCompare* compare = HCompare::cast(test->value());
- if (compare->GetInputRepresentation().IsInteger32()) {
- Token::Value op = compare->token();
- if (test->SecondSuccessor() == dest) {
- op = Token::NegateCompareOp(op);
- }
- Token::Value inverted_op = Token::InvertCompareOp(op);
- InferControlFlowRange(op, compare->left(), compare->right());
- InferControlFlowRange(inverted_op, compare->right(), compare->left());
+ if (test->GetInputRepresentation().IsInteger32()) {
+ Token::Value op = test->token();
+ if (test->SecondSuccessor() == dest) {
+ op = Token::NegateCompareOp(op);
}
+ Token::Value inverted_op = Token::InvertCompareOp(op);
+ UpdateControlFlowRange(op, test->left(), test->right());
+ UpdateControlFlowRange(inverted_op, test->right(), test->left());
}
}
// We know that value [op] other. Use this information to update the range on
// value.
-void HRangeAnalysis::InferControlFlowRange(Token::Value op,
- HValue* value,
- HValue* other) {
+void HRangeAnalysis::UpdateControlFlowRange(Token::Value op,
+ HValue* value,
+ HValue* other) {
Range temp_range;
Range* range = other->range() != NULL ? other->range() : &temp_range;
Range* new_range = NULL;
@@ -990,12 +1020,6 @@
}
-void HRangeAnalysis::InferPhiRange(HPhi* phi) {
- // TODO(twuerthinger): Infer loop phi ranges.
- InferRange(phi);
-}
-
-
void HRangeAnalysis::InferRange(HValue* value) {
ASSERT(!value->HasRange());
if (!value->representation().IsNone()) {
@@ -1222,8 +1246,6 @@
void Process();
private:
- void RemoveStackCheck(HBasicBlock* block);
-
HGraph* graph_;
};
@@ -1238,16 +1260,20 @@
if (block->IsLoopHeader()) {
HBasicBlock* back_edge = block->loop_information()->GetLastBackEdge();
HBasicBlock* dominator = back_edge;
- bool back_edge_dominated_by_call = false;
- while (dominator != block && !back_edge_dominated_by_call) {
+ while (true) {
HInstruction* instr = dominator->first();
- while (instr != NULL && !back_edge_dominated_by_call) {
+ while (instr != NULL) {
if (instr->IsCall()) {
- RemoveStackCheck(back_edge);
- back_edge_dominated_by_call = true;
+ block->loop_information()->stack_check()->Eliminate();
+ break;
}
instr = instr->next();
}
+
+ // Done when the loop header is processed.
+ if (dominator == block) break;
+
+ // Move up the dominator tree.
dominator = dominator->dominator();
}
}
@@ -1255,18 +1281,6 @@
}
-void HStackCheckEliminator::RemoveStackCheck(HBasicBlock* block) {
- HInstruction* instr = block->first();
- while (instr != NULL) {
- if (instr->IsGoto()) {
- HGoto::cast(instr)->set_include_stack_check(false);
- return;
- }
- instr = instr->next();
- }
-}
-
-
// Simple sparse set with O(1) add, contains, and clear.
class SparseSet {
public:
@@ -1274,7 +1288,12 @@
: capacity_(capacity),
length_(0),
dense_(zone->NewArray<int>(capacity)),
- sparse_(zone->NewArray<int>(capacity)) {}
+ sparse_(zone->NewArray<int>(capacity)) {
+#ifndef NVALGRIND
+ // Initialize the sparse array to make valgrind happy.
+ memset(sparse_, 0, sizeof(sparse_[0]) * capacity);
+#endif
+ }
bool Contains(int n) const {
ASSERT(0 <= n && n < capacity_);
@@ -1357,7 +1376,7 @@
LoopInvariantCodeMotion();
}
HValueMap* map = new(zone()) HValueMap();
- AnalyzeBlock(graph_->blocks()->at(0), map);
+ AnalyzeBlock(graph_->entry_block(), map);
}
@@ -1449,37 +1468,9 @@
bool HGlobalValueNumberer::ShouldMove(HInstruction* instr,
HBasicBlock* loop_header) {
- // If we've disabled code motion, don't move any instructions.
- if (!AllowCodeMotion()) return false;
-
- // If --aggressive-loop-invariant-motion, move everything except change
- // instructions.
- if (FLAG_aggressive_loop_invariant_motion && !instr->IsChange()) {
- return true;
- }
-
- // Otherwise only move instructions that postdominate the loop header
- // (i.e. are always executed inside the loop). This is to avoid
- // unnecessary deoptimizations assuming the loop is executed at least
- // once. TODO(fschneider): Better type feedback should give us
- // information about code that was never executed.
- HBasicBlock* block = instr->block();
- bool result = true;
- if (block != loop_header) {
- for (int i = 1; i < loop_header->predecessors()->length(); ++i) {
- bool found = false;
- HBasicBlock* pred = loop_header->predecessors()->at(i);
- while (pred != loop_header) {
- if (pred == block) found = true;
- pred = pred->dominator();
- }
- if (!found) {
- result = false;
- break;
- }
- }
- }
- return result;
+ // If we've disabled code motion or we're in a block that unconditionally
+ // deoptimizes, don't move any instructions.
+ return AllowCodeMotion() && !instr->block()->IsDeoptimizing();
}
@@ -1699,8 +1690,8 @@
HValue* use = it.value();
if (use->IsPhi()) {
int id = HPhi::cast(use)->phi_id();
- change = change ||
- connected_phis[i]->UnionIsChanged(*connected_phis[id]);
+ if (connected_phis[i]->UnionIsChanged(*connected_phis[id]))
+ change = true;
}
}
}
@@ -1965,7 +1956,7 @@
HPhase phase("MarkDeoptimizeOnUndefined", this);
// Compute DeoptimizeOnUndefined flag for phis.
// Any phi that can reach a use with DeoptimizeOnUndefined set must
- // have DeoptimizeOnUndefined set. Currently only HCompare, with
+ // have DeoptimizeOnUndefined set. Currently only HCompareIDAndBranch, with
// double input representation, has this flag set.
// The flag is used by HChange tagged->double, which must deoptimize
// if one of its uses has this flag set.
@@ -2026,9 +2017,10 @@
HBasicBlock* if_false = owner->graph()->CreateBasicBlock();
if_true->MarkAsInlineReturnTarget();
if_false->MarkAsInlineReturnTarget();
+ Expression* cond = TestContext::cast(owner->ast_context())->condition();
// The AstContext constructor pushed on the context stack. This newed
// instance is the reason that AstContext can't be BASE_EMBEDDED.
- test_context_ = new TestContext(owner, if_true, if_false);
+ test_context_ = new TestContext(owner, cond, if_true, if_false);
} else {
function_return_ = owner->graph()->CreateBasicBlock();
function_return()->MarkAsInlineReturnTarget();
@@ -2102,14 +2094,28 @@
void EffectContext::ReturnInstruction(HInstruction* instr, int ast_id) {
+ ASSERT(!instr->IsControlInstruction());
owner()->AddInstruction(instr);
if (instr->HasSideEffects()) owner()->AddSimulate(ast_id);
}
+void EffectContext::ReturnControl(HControlInstruction* instr, int ast_id) {
+ ASSERT(!instr->HasSideEffects());
+ HBasicBlock* empty_true = owner()->graph()->CreateBasicBlock();
+ HBasicBlock* empty_false = owner()->graph()->CreateBasicBlock();
+ instr->SetSuccessorAt(0, empty_true);
+ instr->SetSuccessorAt(1, empty_false);
+ owner()->current_block()->Finish(instr);
+ HBasicBlock* join = owner()->CreateJoin(empty_true, empty_false, ast_id);
+ owner()->set_current_block(join);
+}
+
+
void ValueContext::ReturnInstruction(HInstruction* instr, int ast_id) {
+ ASSERT(!instr->IsControlInstruction());
if (!arguments_allowed() && instr->CheckFlag(HValue::kIsArguments)) {
- owner()->Bailout("bad value context for arguments object value");
+ return owner()->Bailout("bad value context for arguments object value");
}
owner()->AddInstruction(instr);
owner()->Push(instr);
@@ -2117,7 +2123,28 @@
}
+void ValueContext::ReturnControl(HControlInstruction* instr, int ast_id) {
+ ASSERT(!instr->HasSideEffects());
+ if (!arguments_allowed() && instr->CheckFlag(HValue::kIsArguments)) {
+ return owner()->Bailout("bad value context for arguments object value");
+ }
+ HBasicBlock* materialize_false = owner()->graph()->CreateBasicBlock();
+ HBasicBlock* materialize_true = owner()->graph()->CreateBasicBlock();
+ instr->SetSuccessorAt(0, materialize_true);
+ instr->SetSuccessorAt(1, materialize_false);
+ owner()->current_block()->Finish(instr);
+ owner()->set_current_block(materialize_true);
+ owner()->Push(owner()->graph()->GetConstantTrue());
+ owner()->set_current_block(materialize_false);
+ owner()->Push(owner()->graph()->GetConstantFalse());
+ HBasicBlock* join =
+ owner()->CreateJoin(materialize_true, materialize_false, ast_id);
+ owner()->set_current_block(join);
+}
+
+
void TestContext::ReturnInstruction(HInstruction* instr, int ast_id) {
+ ASSERT(!instr->IsControlInstruction());
HGraphBuilder* builder = owner();
builder->AddInstruction(instr);
// We expect a simulate after every expression with side effects, though
@@ -2131,22 +2158,35 @@
}
+void TestContext::ReturnControl(HControlInstruction* instr, int ast_id) {
+ ASSERT(!instr->HasSideEffects());
+ HBasicBlock* empty_true = owner()->graph()->CreateBasicBlock();
+ HBasicBlock* empty_false = owner()->graph()->CreateBasicBlock();
+ instr->SetSuccessorAt(0, empty_true);
+ instr->SetSuccessorAt(1, empty_false);
+ owner()->current_block()->Finish(instr);
+ empty_true->Goto(if_true());
+ empty_false->Goto(if_false());
+ owner()->set_current_block(NULL);
+}
+
+
void TestContext::BuildBranch(HValue* value) {
// We expect the graph to be in edge-split form: there is no edge that
// connects a branch node to a join node. We conservatively ensure that
// property by always adding an empty block on the outgoing edges of this
// branch.
HGraphBuilder* builder = owner();
- if (value->CheckFlag(HValue::kIsArguments)) {
+ if (value != NULL && value->CheckFlag(HValue::kIsArguments)) {
builder->Bailout("arguments object value in a test context");
}
HBasicBlock* empty_true = builder->graph()->CreateBasicBlock();
HBasicBlock* empty_false = builder->graph()->CreateBasicBlock();
- HTest* test = new(zone()) HTest(value, empty_true, empty_false);
+ HBranch* test = new(zone()) HBranch(value, empty_true, empty_false);
builder->current_block()->Finish(test);
- empty_true->Goto(if_true(), false);
- empty_false->Goto(if_false(), false);
+ empty_true->Goto(if_true());
+ empty_false->Goto(if_false());
builder->set_current_block(NULL);
}
@@ -2198,14 +2238,17 @@
void HGraphBuilder::VisitForControl(Expression* expr,
HBasicBlock* true_block,
HBasicBlock* false_block) {
- TestContext for_test(this, true_block, false_block);
+ TestContext for_test(this, expr, true_block, false_block);
Visit(expr);
}
-void HGraphBuilder::VisitArgument(Expression* expr) {
- CHECK_ALIVE(VisitForValue(expr));
- Push(AddInstruction(new(zone()) HPushArgument(Pop())));
+HValue* HGraphBuilder::VisitArgument(Expression* expr) {
+ VisitForValue(expr);
+ if (HasStackOverflow() || current_block() == NULL) return NULL;
+ HValue* value = Pop();
+ Push(AddInstruction(new(zone()) HPushArgument(value)));
+ return value;
}
@@ -2238,7 +2281,9 @@
}
SetupScope(scope);
VisitDeclarations(scope->declarations());
- AddInstruction(new(zone()) HStackCheck());
+ HValue* context = environment()->LookupContext();
+ AddInstruction(
+ new(zone()) HStackCheck(context, HStackCheck::kFunctionEntry));
// Add an edge to the body entry. This is warty: the graph's start
// environment will be used by the Lithium translation as the initial
@@ -2272,16 +2317,17 @@
graph()->OrderBlocks();
graph()->AssignDominators();
+ if (!graph()->CheckConstPhiUses()) {
+ Bailout("Unsupported phi use of const variable");
+ return NULL;
+ }
graph()->EliminateRedundantPhis();
- if (!graph()->CheckPhis()) {
- Bailout("Unsupported phi use of arguments object");
+ if (!graph()->CheckArgumentsPhiUses()) {
+ Bailout("Unsupported phi use of arguments");
return NULL;
}
if (FLAG_eliminate_dead_phis) graph()->EliminateUnreachablePhis();
- if (!graph()->CollectPhis()) {
- Bailout("Unsupported phi use of uninitialized constant");
- return NULL;
- }
+ graph()->CollectPhis();
HInferRepresentation rep(graph());
rep.Analyze();
@@ -2341,9 +2387,9 @@
}
-void HGraphBuilder::AddSimulate(int id) {
+void HGraphBuilder::AddSimulate(int ast_id) {
ASSERT(current_block() != NULL);
- current_block()->AddSimulate(id);
+ current_block()->AddSimulate(ast_id);
}
@@ -2375,9 +2421,6 @@
void HGraphBuilder::SetupScope(Scope* scope) {
- // We don't yet handle the function name for named function expressions.
- if (scope->function() != NULL) return Bailout("named function expression");
-
HConstant* undefined_constant = new(zone()) HConstant(
isolate()->factory()->undefined_value(), Representation::Tagged());
AddInstruction(undefined_constant);
@@ -2406,18 +2449,21 @@
// Handle the arguments and arguments shadow variables specially (they do
// not have declarations).
if (scope->arguments() != NULL) {
- if (!scope->arguments()->IsStackAllocated() ||
- (scope->arguments_shadow() != NULL &&
- !scope->arguments_shadow()->IsStackAllocated())) {
+ if (!scope->arguments()->IsStackAllocated()) {
return Bailout("context-allocated arguments");
}
HArgumentsObject* object = new(zone()) HArgumentsObject;
AddInstruction(object);
graph()->SetArgumentsObject(object);
environment()->Bind(scope->arguments(), object);
- if (scope->arguments_shadow() != NULL) {
- environment()->Bind(scope->arguments_shadow(), object);
+ }
+ // Handle implicit declaration of the function name in named function
+ // expressions before other declarations.
+ if (scope->is_function_scope() && scope->function() != NULL) {
+ if (!scope->function()->IsStackAllocated()) {
+ return Bailout("unsupported declaration");
}
+ environment()->Bind(scope->function(), graph()->GetConstantHole());
}
}
@@ -2510,7 +2556,7 @@
cond_false = NULL;
}
- HBasicBlock* join = CreateJoin(cond_true, cond_false, stmt->id());
+ HBasicBlock* join = CreateJoin(cond_true, cond_false, stmt->IfId());
set_current_block(join);
}
}
@@ -2588,7 +2634,7 @@
test->if_false());
} else if (context->IsEffect()) {
CHECK_ALIVE(VisitForEffect(stmt->expression()));
- current_block()->Goto(function_return(), false);
+ current_block()->Goto(function_return());
} else {
ASSERT(context->IsValue());
CHECK_ALIVE(VisitForValue(stmt->expression()));
@@ -2600,19 +2646,20 @@
}
-void HGraphBuilder::VisitWithEnterStatement(WithEnterStatement* stmt) {
+void HGraphBuilder::VisitEnterWithContextStatement(
+ EnterWithContextStatement* stmt) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
- return Bailout("WithEnterStatement");
+ return Bailout("EnterWithContextStatement");
}
-void HGraphBuilder::VisitWithExitStatement(WithExitStatement* stmt) {
+void HGraphBuilder::VisitExitContextStatement(ExitContextStatement* stmt) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
- return Bailout("WithExitStatement");
+ return Bailout("ExitContextStatement");
}
@@ -2656,15 +2703,16 @@
// Otherwise generate a compare and branch.
CHECK_ALIVE(VisitForValue(clause->label()));
HValue* label_value = Pop();
- HCompare* compare =
- new(zone()) HCompare(tag_value, label_value, Token::EQ_STRICT);
+ HCompareIDAndBranch* compare =
+ new(zone()) HCompareIDAndBranch(tag_value,
+ label_value,
+ Token::EQ_STRICT);
compare->SetInputRepresentation(Representation::Integer32());
- ASSERT(!compare->HasSideEffects());
- AddInstruction(compare);
HBasicBlock* body_block = graph()->CreateBasicBlock();
HBasicBlock* next_test_block = graph()->CreateBasicBlock();
- HTest* branch = new(zone()) HTest(compare, body_block, next_test_block);
- current_block()->Finish(branch);
+ compare->SetSuccessorAt(0, body_block);
+ compare->SetSuccessorAt(1, next_test_block);
+ current_block()->Finish(compare);
set_current_block(next_test_block);
}
@@ -2750,7 +2798,7 @@
HBasicBlock* non_osr_entry = graph()->CreateBasicBlock();
HBasicBlock* osr_entry = graph()->CreateBasicBlock();
HValue* true_value = graph()->GetConstantTrue();
- HTest* test = new(zone()) HTest(true_value, non_osr_entry, osr_entry);
+ HBranch* test = new(zone()) HBranch(true_value, non_osr_entry, osr_entry);
current_block()->Finish(test);
HBasicBlock* loop_predecessor = graph()->CreateBasicBlock();
@@ -2778,6 +2826,21 @@
}
+void HGraphBuilder::VisitLoopBody(IterationStatement* stmt,
+ HBasicBlock* loop_entry,
+ BreakAndContinueInfo* break_info) {
+ BreakAndContinueScope push(break_info, this);
+ AddSimulate(stmt->StackCheckId());
+ HValue* context = environment()->LookupContext();
+ HStackCheck* stack_check =
+ new(zone()) HStackCheck(context, HStackCheck::kBackwardsBranch);
+ AddInstruction(stack_check);
+ ASSERT(loop_entry->IsLoopHeader());
+ loop_entry->loop_information()->set_stack_check(stack_check);
+ CHECK_BAILOUT(Visit(stmt->body()));
+}
+
+
void HGraphBuilder::VisitDoWhileStatement(DoWhileStatement* stmt) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
@@ -2785,13 +2848,11 @@
ASSERT(current_block() != NULL);
PreProcessOsrEntry(stmt);
HBasicBlock* loop_entry = CreateLoopHeaderBlock();
- current_block()->Goto(loop_entry, false);
+ current_block()->Goto(loop_entry);
set_current_block(loop_entry);
BreakAndContinueInfo break_info(stmt);
- { BreakAndContinueScope push(&break_info, this);
- CHECK_BAILOUT(Visit(stmt->body()));
- }
+ CHECK_BAILOUT(VisitLoopBody(stmt, loop_entry, &break_info));
HBasicBlock* body_exit =
JoinContinue(stmt, current_block(), break_info.continue_block());
HBasicBlock* loop_successor = NULL;
@@ -2829,7 +2890,7 @@
ASSERT(current_block() != NULL);
PreProcessOsrEntry(stmt);
HBasicBlock* loop_entry = CreateLoopHeaderBlock();
- current_block()->Goto(loop_entry, false);
+ current_block()->Goto(loop_entry);
set_current_block(loop_entry);
// If the condition is constant true, do not generate a branch.
@@ -2852,7 +2913,7 @@
BreakAndContinueInfo break_info(stmt);
if (current_block() != NULL) {
BreakAndContinueScope push(&break_info, this);
- CHECK_BAILOUT(Visit(stmt->body()));
+ CHECK_BAILOUT(VisitLoopBody(stmt, loop_entry, &break_info));
}
HBasicBlock* body_exit =
JoinContinue(stmt, current_block(), break_info.continue_block());
@@ -2875,7 +2936,7 @@
ASSERT(current_block() != NULL);
PreProcessOsrEntry(stmt);
HBasicBlock* loop_entry = CreateLoopHeaderBlock();
- current_block()->Goto(loop_entry, false);
+ current_block()->Goto(loop_entry);
set_current_block(loop_entry);
HBasicBlock* loop_successor = NULL;
@@ -2897,7 +2958,7 @@
BreakAndContinueInfo break_info(stmt);
if (current_block() != NULL) {
BreakAndContinueScope push(&break_info, this);
- CHECK_BAILOUT(Visit(stmt->body()));
+ CHECK_BAILOUT(VisitLoopBody(stmt, loop_entry, &break_info));
}
HBasicBlock* body_exit =
JoinContinue(stmt, current_block(), break_info.continue_block());
@@ -2981,9 +3042,10 @@
}
// We also have a stack overflow if the recursive compilation did.
if (HasStackOverflow()) return;
+ HValue* context = environment()->LookupContext();
HFunctionLiteral* instr =
- new(zone()) HFunctionLiteral(shared_info, expr->pretenure());
- ast_context()->ReturnInstruction(instr, expr->id());
+ new(zone()) HFunctionLiteral(context, shared_info, expr->pretenure());
+ return ast_context()->ReturnInstruction(instr, expr->id());
}
@@ -3028,7 +3090,7 @@
HBasicBlock* join = CreateJoin(cond_true, cond_false, expr->id());
set_current_block(join);
if (join != NULL && !ast_context()->IsEffect()) {
- ast_context()->ReturnValue(Pop());
+ return ast_context()->ReturnValue(Pop());
}
}
}
@@ -3073,7 +3135,12 @@
if (variable == NULL) {
return Bailout("reference to rewritten variable");
} else if (variable->IsStackAllocated()) {
- ast_context()->ReturnValue(environment()->Lookup(variable));
+ HValue* value = environment()->Lookup(variable);
+ if (variable->mode() == Variable::CONST &&
+ value == graph()->GetConstantHole()) {
+ return Bailout("reference to uninitialized const variable");
+ }
+ return ast_context()->ReturnValue(value);
} else if (variable->IsContextSlot()) {
if (variable->mode() == Variable::CONST) {
return Bailout("reference to const context slot");
@@ -3081,7 +3148,7 @@
HValue* context = BuildContextChainWalk(variable);
int index = variable->AsSlot()->index();
HLoadContextSlot* instr = new(zone()) HLoadContextSlot(context, index);
- ast_context()->ReturnInstruction(instr, expr->id());
+ return ast_context()->ReturnInstruction(instr, expr->id());
} else if (variable->is_global()) {
LookupResult lookup;
GlobalPropertyAccess type = LookupGlobalProperty(variable, &lookup, false);
@@ -3096,7 +3163,7 @@
Handle<JSGlobalPropertyCell> cell(global->GetPropertyCell(&lookup));
bool check_hole = !lookup.IsDontDelete() || lookup.IsReadOnly();
HLoadGlobalCell* instr = new(zone()) HLoadGlobalCell(cell, check_hole);
- ast_context()->ReturnInstruction(instr, expr->id());
+ return ast_context()->ReturnInstruction(instr, expr->id());
} else {
HValue* context = environment()->LookupContext();
HGlobalObject* global_object = new(zone()) HGlobalObject(context);
@@ -3108,7 +3175,7 @@
ast_context()->is_for_typeof());
instr->set_position(expr->position());
ASSERT(instr->HasSideEffects());
- ast_context()->ReturnInstruction(instr, expr->id());
+ return ast_context()->ReturnInstruction(instr, expr->id());
}
} else {
return Bailout("reference to a variable which requires dynamic lookup");
@@ -3122,7 +3189,7 @@
ASSERT(current_block()->HasPredecessor());
HConstant* instr =
new(zone()) HConstant(expr->handle(), Representation::Tagged());
- ast_context()->ReturnInstruction(instr, expr->id());
+ return ast_context()->ReturnInstruction(instr, expr->id());
}
@@ -3130,10 +3197,13 @@
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
- HRegExpLiteral* instr = new(zone()) HRegExpLiteral(expr->pattern(),
+ HValue* context = environment()->LookupContext();
+
+ HRegExpLiteral* instr = new(zone()) HRegExpLiteral(context,
+ expr->pattern(),
expr->flags(),
expr->literal_index());
- ast_context()->ReturnInstruction(instr, expr->id());
+ return ast_context()->ReturnInstruction(instr, expr->id());
}
@@ -3203,9 +3273,9 @@
// (e.g. because of code motion).
HToFastProperties* result = new(zone()) HToFastProperties(Pop());
AddInstruction(result);
- ast_context()->ReturnValue(result);
+ return ast_context()->ReturnValue(result);
} else {
- ast_context()->ReturnValue(Pop());
+ return ast_context()->ReturnValue(Pop());
}
}
@@ -3216,8 +3286,10 @@
ASSERT(current_block()->HasPredecessor());
ZoneList<Expression*>* subexprs = expr->values();
int length = subexprs->length();
+ HValue* context = environment()->LookupContext();
- HArrayLiteral* literal = new(zone()) HArrayLiteral(expr->constant_elements(),
+ HArrayLiteral* literal = new(zone()) HArrayLiteral(context,
+ expr->constant_elements(),
length,
expr->literal_index(),
expr->depth());
@@ -3249,15 +3321,7 @@
AddInstruction(new(zone()) HStoreKeyedFastElement(elements, key, value));
AddSimulate(expr->GetIdForElement(i));
}
- ast_context()->ReturnValue(Pop());
-}
-
-
-void HGraphBuilder::VisitCatchExtensionObject(CatchExtensionObject* expr) {
- ASSERT(!HasStackOverflow());
- ASSERT(current_block() != NULL);
- ASSERT(current_block()->HasPredecessor());
- return Bailout("CatchExtensionObject");
+ return ast_context()->ReturnValue(Pop());
}
@@ -3419,15 +3483,14 @@
Drop(1);
}
}
- ast_context()->ReturnValue(value);
- return;
+ return ast_context()->ReturnValue(value);
}
}
ASSERT(join != NULL);
join->SetJoinId(expr->id());
set_current_block(join);
- if (!ast_context()->IsEffect()) ast_context()->ReturnValue(Pop());
+ if (!ast_context()->IsEffect()) return ast_context()->ReturnValue(Pop());
}
@@ -3471,13 +3534,21 @@
value = Pop();
HValue* key = Pop();
HValue* object = Pop();
- instr = BuildStoreKeyed(object, key, value, expr);
+ bool has_side_effects = false;
+ HandleKeyedElementAccess(object, key, value, expr, expr->AssignmentId(),
+ expr->position(),
+ true, // is_store
+ &has_side_effects);
+ Push(value);
+ ASSERT(has_side_effects); // Stores always have side effects.
+ AddSimulate(expr->AssignmentId());
+ return ast_context()->ReturnValue(Pop());
}
Push(value);
instr->set_position(expr->position());
AddInstruction(instr);
if (instr->HasSideEffects()) AddSimulate(expr->AssignmentId());
- ast_context()->ReturnValue(Pop());
+ return ast_context()->ReturnValue(Pop());
}
@@ -3528,6 +3599,10 @@
BinaryOperation* operation = expr->binary_operation();
if (var != NULL) {
+ if (var->mode() == Variable::CONST) {
+ return Bailout("unsupported const compound assignment");
+ }
+
CHECK_ALIVE(VisitForValue(operation));
if (var->is_global()) {
@@ -3538,6 +3613,20 @@
} else if (var->IsStackAllocated()) {
Bind(var, Top());
} else if (var->IsContextSlot()) {
+ // Bail out if we try to mutate a parameter value in a function using
+ // the arguments object. We do not (yet) correctly handle the
+ // arguments property of the function.
+ if (info()->scope()->arguments() != NULL) {
+ // Parameters will rewrite to context slots. We have no direct way
+ // to detect that the variable is a parameter.
+ int count = info()->scope()->num_parameters();
+ for (int i = 0; i < count; ++i) {
+ if (var == info()->scope()->parameter(i)) {
+ Bailout("assignment to parameter, function uses arguments object");
+ }
+ }
+ }
+
HValue* context = BuildContextChainWalk(var);
int index = var->AsSlot()->index();
HStoreContextSlot* instr =
@@ -3547,7 +3636,7 @@
} else {
return Bailout("compound assignment to lookup slot");
}
- ast_context()->ReturnValue(Pop());
+ return ast_context()->ReturnValue(Pop());
} else if (prop != NULL) {
prop->RecordTypeFeedback(oracle());
@@ -3582,7 +3671,7 @@
Drop(2);
Push(instr);
if (store->HasSideEffects()) AddSimulate(expr->AssignmentId());
- ast_context()->ReturnValue(Pop());
+ return ast_context()->ReturnValue(Pop());
} else {
// Keyed property.
@@ -3591,9 +3680,14 @@
HValue* obj = environment()->ExpressionStackAt(1);
HValue* key = environment()->ExpressionStackAt(0);
- HInstruction* load = BuildLoadKeyed(obj, key, prop);
- PushAndAdd(load);
- if (load->HasSideEffects()) AddSimulate(expr->CompoundLoadId());
+ bool has_side_effects = false;
+ HValue* load = HandleKeyedElementAccess(
+ obj, key, NULL, prop, expr->CompoundLoadId(), RelocInfo::kNoPosition,
+ false, // is_store
+ &has_side_effects);
+ Push(load);
+ if (has_side_effects) AddSimulate(expr->CompoundLoadId());
+
CHECK_ALIVE(VisitForValue(expr->value()));
HValue* right = Pop();
@@ -3604,13 +3698,17 @@
if (instr->HasSideEffects()) AddSimulate(operation->id());
expr->RecordTypeFeedback(oracle());
- HInstruction* store = BuildStoreKeyed(obj, key, instr, expr);
- AddInstruction(store);
+ HandleKeyedElementAccess(obj, key, instr, expr, expr->AssignmentId(),
+ RelocInfo::kNoPosition,
+ true, // is_store
+ &has_side_effects);
+
// Drop the simulated receiver, key, and value. Return the value.
Drop(3);
Push(instr);
- if (store->HasSideEffects()) AddSimulate(expr->AssignmentId());
- ast_context()->ReturnValue(Pop());
+ ASSERT(has_side_effects); // Stores always have side effects.
+ AddSimulate(expr->AssignmentId());
+ return ast_context()->ReturnValue(Pop());
}
} else {
@@ -3634,6 +3732,19 @@
}
if (var != NULL) {
+ if (var->mode() == Variable::CONST) {
+ if (expr->op() != Token::INIT_CONST) {
+ return Bailout("non-initializer assignment to const");
+ }
+ if (!var->IsStackAllocated()) {
+ return Bailout("assignment to const context slot");
+ }
+ // We insert a use of the old value to detect unsupported uses of const
+ // variables (e.g. initialization inside a loop).
+ HValue* old_value = environment()->Lookup(var);
+ AddInstruction(new HUseConst(old_value));
+ }
+
if (proxy->IsArguments()) return Bailout("assignment to arguments");
// Handle the assignment.
@@ -3644,9 +3755,24 @@
CHECK_ALIVE(VisitForValue(expr->value(), ARGUMENTS_ALLOWED));
HValue* value = Pop();
Bind(var, value);
- ast_context()->ReturnValue(value);
+ return ast_context()->ReturnValue(value);
- } else if (var->IsContextSlot() && var->mode() != Variable::CONST) {
+ } else if (var->IsContextSlot()) {
+ ASSERT(var->mode() != Variable::CONST);
+ // Bail out if we try to mutate a parameter value in a function using
+ // the arguments object. We do not (yet) correctly handle the
+ // arguments property of the function.
+ if (info()->scope()->arguments() != NULL) {
+ // Parameters will rewrite to context slots. We have no direct way
+ // to detect that the variable is a parameter.
+ int count = info()->scope()->num_parameters();
+ for (int i = 0; i < count; ++i) {
+ if (var == info()->scope()->parameter(i)) {
+ Bailout("assignment to parameter, function uses arguments object");
+ }
+ }
+ }
+
CHECK_ALIVE(VisitForValue(expr->value()));
HValue* context = BuildContextChainWalk(var);
int index = var->AsSlot()->index();
@@ -3654,7 +3780,7 @@
new(zone()) HStoreContextSlot(context, index, Top());
AddInstruction(instr);
if (instr->HasSideEffects()) AddSimulate(expr->AssignmentId());
- ast_context()->ReturnValue(Pop());
+ return ast_context()->ReturnValue(Pop());
} else if (var->is_global()) {
CHECK_ALIVE(VisitForValue(expr->value()));
@@ -3662,7 +3788,7 @@
Top(),
expr->position(),
expr->AssignmentId());
- ast_context()->ReturnValue(Pop());
+ return ast_context()->ReturnValue(Pop());
} else {
return Bailout("assignment to LOOKUP or const CONTEXT variable");
@@ -3686,8 +3812,9 @@
ASSERT(ast_context()->IsEffect());
CHECK_ALIVE(VisitForValue(expr->exception()));
+ HValue* context = environment()->LookupContext();
HValue* value = environment()->Pop();
- HThrow* instr = new(zone()) HThrow(value);
+ HThrow* instr = new(zone()) HThrow(context, value);
instr->set_position(expr->position());
AddInstruction(instr);
AddSimulate(expr->id());
@@ -3759,72 +3886,322 @@
}
-HInstruction* HGraphBuilder::BuildLoadKeyedFastElement(HValue* object,
- HValue* key,
- Property* expr) {
- ASSERT(!expr->key()->IsPropertyName() && expr->IsMonomorphic());
- AddInstruction(new(zone()) HCheckNonSmi(object));
+HInstruction* HGraphBuilder::BuildExternalArrayElementAccess(
+ HValue* external_elements,
+ HValue* checked_key,
+ HValue* val,
+ JSObject::ElementsKind elements_kind,
+ bool is_store) {
+ if (is_store) {
+ ASSERT(val != NULL);
+ switch (elements_kind) {
+ case JSObject::EXTERNAL_PIXEL_ELEMENTS: {
+ HClampToUint8* clamp = new(zone()) HClampToUint8(val);
+ AddInstruction(clamp);
+ val = clamp;
+ break;
+ }
+ case JSObject::EXTERNAL_BYTE_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+ case JSObject::EXTERNAL_SHORT_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+ case JSObject::EXTERNAL_INT_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS: {
+ HToInt32* floor_val = new(zone()) HToInt32(val);
+ AddInstruction(floor_val);
+ val = floor_val;
+ break;
+ }
+ case JSObject::EXTERNAL_FLOAT_ELEMENTS:
+ case JSObject::EXTERNAL_DOUBLE_ELEMENTS:
+ break;
+ case JSObject::FAST_ELEMENTS:
+ case JSObject::FAST_DOUBLE_ELEMENTS:
+ case JSObject::DICTIONARY_ELEMENTS:
+ case JSObject::NON_STRICT_ARGUMENTS_ELEMENTS:
+ UNREACHABLE();
+ break;
+ }
+ return new(zone()) HStoreKeyedSpecializedArrayElement(
+ external_elements, checked_key, val, elements_kind);
+ } else {
+ return new(zone()) HLoadKeyedSpecializedArrayElement(
+ external_elements, checked_key, elements_kind);
+ }
+}
+
+
+HInstruction* HGraphBuilder::BuildMonomorphicElementAccess(HValue* object,
+ HValue* key,
+ HValue* val,
+ Expression* expr,
+ bool is_store) {
+ ASSERT(expr->IsMonomorphic());
Handle<Map> map = expr->GetMonomorphicReceiverType();
- ASSERT(map->has_fast_elements());
+ if (!map->has_fast_elements() &&
+ !map->has_fast_double_elements() &&
+ !map->has_external_array_elements()) {
+ return is_store ? BuildStoreKeyedGeneric(object, key, val)
+ : BuildLoadKeyedGeneric(object, key);
+ }
+ AddInstruction(new(zone()) HCheckNonSmi(object));
AddInstruction(new(zone()) HCheckMap(object, map));
- bool is_array = (map->instance_type() == JS_ARRAY_TYPE);
- HLoadElements* elements = new(zone()) HLoadElements(object);
+ HInstruction* elements = new(zone()) HLoadElements(object);
HInstruction* length = NULL;
HInstruction* checked_key = NULL;
- if (is_array) {
+ if (map->has_external_array_elements()) {
+ AddInstruction(elements);
+ length = AddInstruction(new(zone()) HExternalArrayLength(elements));
+ checked_key = AddInstruction(new(zone()) HBoundsCheck(key, length));
+ HLoadExternalArrayPointer* external_elements =
+ new(zone()) HLoadExternalArrayPointer(elements);
+ AddInstruction(external_elements);
+ return BuildExternalArrayElementAccess(external_elements, checked_key,
+ val, map->elements_kind(), is_store);
+ }
+ bool fast_double_elements = map->has_fast_double_elements();
+ ASSERT(map->has_fast_elements() || fast_double_elements);
+ if (map->instance_type() == JS_ARRAY_TYPE) {
length = AddInstruction(new(zone()) HJSArrayLength(object));
checked_key = AddInstruction(new(zone()) HBoundsCheck(key, length));
AddInstruction(elements);
+ if (is_store && !fast_double_elements) {
+ AddInstruction(new(zone()) HCheckMap(
+ elements, isolate()->factory()->fixed_array_map()));
+ }
} else {
AddInstruction(elements);
+ if (is_store && !fast_double_elements) {
+ AddInstruction(new(zone()) HCheckMap(
+ elements, isolate()->factory()->fixed_array_map()));
+ }
length = AddInstruction(new(zone()) HFixedArrayLength(elements));
checked_key = AddInstruction(new(zone()) HBoundsCheck(key, length));
}
- return new(zone()) HLoadKeyedFastElement(elements, checked_key);
-}
-
-
-HInstruction* HGraphBuilder::BuildLoadKeyedSpecializedArrayElement(
- HValue* object,
- HValue* key,
- Property* expr) {
- ASSERT(!expr->key()->IsPropertyName() && expr->IsMonomorphic());
- AddInstruction(new(zone()) HCheckNonSmi(object));
- Handle<Map> map = expr->GetMonomorphicReceiverType();
- ASSERT(!map->has_fast_elements());
- ASSERT(map->has_external_array_elements());
- AddInstruction(new(zone()) HCheckMap(object, map));
- HLoadElements* elements = new(zone()) HLoadElements(object);
- AddInstruction(elements);
- HInstruction* length = new(zone()) HExternalArrayLength(elements);
- AddInstruction(length);
- HInstruction* checked_key =
- AddInstruction(new(zone()) HBoundsCheck(key, length));
- HLoadExternalArrayPointer* external_elements =
- new(zone()) HLoadExternalArrayPointer(elements);
- AddInstruction(external_elements);
- HLoadKeyedSpecializedArrayElement* pixel_array_value =
- new(zone()) HLoadKeyedSpecializedArrayElement(
- external_elements, checked_key, expr->external_array_type());
- return pixel_array_value;
-}
-
-
-HInstruction* HGraphBuilder::BuildLoadKeyed(HValue* obj,
- HValue* key,
- Property* prop) {
- if (prop->IsMonomorphic()) {
- Handle<Map> receiver_type(prop->GetMonomorphicReceiverType());
- // An object has either fast elements or pixel array elements, but never
- // both. Pixel array maps that are assigned to pixel array elements are
- // always created with the fast elements flag cleared.
- if (receiver_type->has_external_array_elements()) {
- return BuildLoadKeyedSpecializedArrayElement(obj, key, prop);
- } else if (receiver_type->has_fast_elements()) {
- return BuildLoadKeyedFastElement(obj, key, prop);
+ if (is_store) {
+ if (fast_double_elements) {
+ return new(zone()) HStoreKeyedFastDoubleElement(elements,
+ checked_key,
+ val);
+ } else {
+ return new(zone()) HStoreKeyedFastElement(elements, checked_key, val);
+ }
+ } else {
+ if (fast_double_elements) {
+ return new(zone()) HLoadKeyedFastDoubleElement(elements, checked_key);
+ } else {
+ return new(zone()) HLoadKeyedFastElement(elements, checked_key);
}
}
- return BuildLoadKeyedGeneric(obj, key);
+}
+
+
+HValue* HGraphBuilder::HandlePolymorphicElementAccess(HValue* object,
+ HValue* key,
+ HValue* val,
+ Expression* prop,
+ int ast_id,
+ int position,
+ bool is_store,
+ bool* has_side_effects) {
+ *has_side_effects = false;
+ AddInstruction(new(zone()) HCheckNonSmi(object));
+ AddInstruction(HCheckInstanceType::NewIsSpecObject(object));
+ ZoneMapList* maps = prop->GetReceiverTypes();
+ bool todo_external_array = false;
+
+ static const int kNumElementTypes = JSObject::kElementsKindCount;
+ bool type_todo[kNumElementTypes];
+ for (int i = 0; i < kNumElementTypes; ++i) {
+ type_todo[i] = false;
+ }
+
+ for (int i = 0; i < maps->length(); ++i) {
+ ASSERT(maps->at(i)->IsMap());
+ type_todo[maps->at(i)->elements_kind()] = true;
+ if (maps->at(i)->elements_kind()
+ >= JSObject::FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND) {
+ todo_external_array = true;
+ }
+ }
+
+ HBasicBlock* join = graph()->CreateBasicBlock();
+
+ HInstruction* elements_kind_instr =
+ AddInstruction(new(zone()) HElementsKind(object));
+ HInstruction* elements = NULL;
+ HLoadExternalArrayPointer* external_elements = NULL;
+ HInstruction* checked_key = NULL;
+
+ // FAST_ELEMENTS is assumed to be the first case.
+ STATIC_ASSERT(JSObject::FAST_ELEMENTS == 0);
+
+ for (JSObject::ElementsKind elements_kind = JSObject::FAST_ELEMENTS;
+ elements_kind <= JSObject::LAST_ELEMENTS_KIND;
+ elements_kind = JSObject::ElementsKind(elements_kind + 1)) {
+ // After having handled FAST_ELEMENTS and DICTIONARY_ELEMENTS, we
+ // need to add some code that's executed for all external array cases.
+ STATIC_ASSERT(JSObject::LAST_EXTERNAL_ARRAY_ELEMENTS_KIND ==
+ JSObject::LAST_ELEMENTS_KIND);
+ if (elements_kind == JSObject::FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND
+ && todo_external_array) {
+ elements = AddInstruction(new(zone()) HLoadElements(object));
+ // We need to forcibly prevent some ElementsKind-dependent instructions
+ // from being hoisted out of any loops they might occur in, because
+ // the current loop-invariant-code-motion algorithm isn't clever enough
+ // to deal with them properly.
+ // There's some performance to be gained by developing a smarter
+ // solution for this.
+ elements->ClearFlag(HValue::kUseGVN);
+ HInstruction* length =
+ AddInstruction(new(zone()) HExternalArrayLength(elements));
+ checked_key = AddInstruction(new(zone()) HBoundsCheck(key, length));
+ external_elements = new(zone()) HLoadExternalArrayPointer(elements);
+ AddInstruction(external_elements);
+ }
+ if (type_todo[elements_kind]) {
+ HBasicBlock* if_true = graph()->CreateBasicBlock();
+ HBasicBlock* if_false = graph()->CreateBasicBlock();
+ HCompareConstantEqAndBranch* compare =
+ new(zone()) HCompareConstantEqAndBranch(elements_kind_instr,
+ elements_kind,
+ Token::EQ_STRICT);
+ compare->SetSuccessorAt(0, if_true);
+ compare->SetSuccessorAt(1, if_false);
+ current_block()->Finish(compare);
+
+ set_current_block(if_true);
+ HInstruction* access;
+ if (elements_kind == JSObject::FAST_ELEMENTS ||
+ elements_kind == JSObject::FAST_DOUBLE_ELEMENTS) {
+ HBasicBlock* if_jsarray = graph()->CreateBasicBlock();
+ HBasicBlock* if_fastobject = graph()->CreateBasicBlock();
+ HHasInstanceTypeAndBranch* typecheck =
+ new(zone()) HHasInstanceTypeAndBranch(object, JS_ARRAY_TYPE);
+ typecheck->SetSuccessorAt(0, if_jsarray);
+ typecheck->SetSuccessorAt(1, if_fastobject);
+ current_block()->Finish(typecheck);
+
+ set_current_block(if_jsarray);
+ HInstruction* length = new(zone()) HJSArrayLength(object);
+ AddInstruction(length);
+ length->ClearFlag(HValue::kUseGVN);
+ checked_key = AddInstruction(new(zone()) HBoundsCheck(key, length));
+ elements = AddInstruction(new(zone()) HLoadElements(object));
+ elements->ClearFlag(HValue::kUseGVN);
+ bool fast_double_elements =
+ elements_kind == JSObject::FAST_DOUBLE_ELEMENTS;
+ if (is_store) {
+ if (fast_double_elements) {
+ access = AddInstruction(
+ new(zone()) HStoreKeyedFastDoubleElement(elements,
+ checked_key,
+ val));
+ } else {
+ AddInstruction(new(zone()) HCheckMap(
+ elements, isolate()->factory()->fixed_array_map()));
+ access = AddInstruction(
+ new(zone()) HStoreKeyedFastElement(elements, checked_key, val));
+ }
+ } else {
+ if (fast_double_elements) {
+ access = AddInstruction(
+ new(zone()) HLoadKeyedFastDoubleElement(elements, checked_key));
+ } else {
+ access = AddInstruction(
+ new(zone()) HLoadKeyedFastElement(elements, checked_key));
+ }
+ Push(access);
+ }
+ *has_side_effects |= access->HasSideEffects();
+ if (position != -1) {
+ access->set_position(position);
+ }
+ if_jsarray->Goto(join);
+
+ set_current_block(if_fastobject);
+ elements = AddInstruction(new(zone()) HLoadElements(object));
+ elements->ClearFlag(HValue::kUseGVN);
+ if (is_store && !fast_double_elements) {
+ AddInstruction(new(zone()) HCheckMap(
+ elements, isolate()->factory()->fixed_array_map()));
+ }
+ length = AddInstruction(new(zone()) HFixedArrayLength(elements));
+ checked_key = AddInstruction(new(zone()) HBoundsCheck(key, length));
+ if (is_store) {
+ if (fast_double_elements) {
+ access = AddInstruction(
+ new(zone()) HStoreKeyedFastDoubleElement(elements,
+ checked_key,
+ val));
+ } else {
+ access = AddInstruction(
+ new(zone()) HStoreKeyedFastElement(elements, checked_key, val));
+ }
+ } else {
+ if (fast_double_elements) {
+ access = AddInstruction(
+ new(zone()) HLoadKeyedFastDoubleElement(elements, checked_key));
+ } else {
+ access = AddInstruction(
+ new(zone()) HLoadKeyedFastElement(elements, checked_key));
+ }
+ }
+ } else if (elements_kind == JSObject::DICTIONARY_ELEMENTS) {
+ if (is_store) {
+ access = AddInstruction(BuildStoreKeyedGeneric(object, key, val));
+ } else {
+ access = AddInstruction(BuildLoadKeyedGeneric(object, key));
+ }
+ } else { // External array elements.
+ access = AddInstruction(BuildExternalArrayElementAccess(
+ external_elements, checked_key, val, elements_kind, is_store));
+ }
+ *has_side_effects |= access->HasSideEffects();
+ access->set_position(position);
+ if (!is_store) {
+ Push(access);
+ }
+ current_block()->Goto(join);
+ set_current_block(if_false);
+ }
+ }
+
+ // Deopt if none of the cases matched.
+ current_block()->FinishExitWithDeoptimization(HDeoptimize::kNoUses);
+ join->SetJoinId(ast_id);
+ set_current_block(join);
+ return is_store ? NULL : Pop();
+}
+
+
+HValue* HGraphBuilder::HandleKeyedElementAccess(HValue* obj,
+ HValue* key,
+ HValue* val,
+ Expression* expr,
+ int ast_id,
+ int position,
+ bool is_store,
+ bool* has_side_effects) {
+ ASSERT(!expr->IsPropertyName());
+ HInstruction* instr = NULL;
+ if (expr->IsMonomorphic()) {
+ instr = BuildMonomorphicElementAccess(obj, key, val, expr, is_store);
+ } else if (expr->GetReceiverTypes() != NULL &&
+ !expr->GetReceiverTypes()->is_empty()) {
+ return HandlePolymorphicElementAccess(
+ obj, key, val, expr, ast_id, position, is_store, has_side_effects);
+ } else {
+ if (is_store) {
+ instr = BuildStoreKeyedGeneric(obj, key, val);
+ } else {
+ instr = BuildLoadKeyedGeneric(obj, key);
+ }
+ }
+ instr->set_position(position);
+ AddInstruction(instr);
+ *has_side_effects = instr->HasSideEffects();
+ return instr;
}
@@ -3840,87 +4217,6 @@
function_strict_mode());
}
-
-HInstruction* HGraphBuilder::BuildStoreKeyedFastElement(HValue* object,
- HValue* key,
- HValue* val,
- Expression* expr) {
- ASSERT(expr->IsMonomorphic());
- AddInstruction(new(zone()) HCheckNonSmi(object));
- Handle<Map> map = expr->GetMonomorphicReceiverType();
- ASSERT(map->has_fast_elements());
- AddInstruction(new(zone()) HCheckMap(object, map));
- HInstruction* elements = AddInstruction(new(zone()) HLoadElements(object));
- AddInstruction(new(zone()) HCheckMap(
- elements, isolate()->factory()->fixed_array_map()));
- bool is_array = (map->instance_type() == JS_ARRAY_TYPE);
- HInstruction* length = NULL;
- if (is_array) {
- length = AddInstruction(new(zone()) HJSArrayLength(object));
- } else {
- length = AddInstruction(new(zone()) HFixedArrayLength(elements));
- }
- HInstruction* checked_key =
- AddInstruction(new(zone()) HBoundsCheck(key, length));
- return new(zone()) HStoreKeyedFastElement(elements, checked_key, val);
-}
-
-
-HInstruction* HGraphBuilder::BuildStoreKeyedSpecializedArrayElement(
- HValue* object,
- HValue* key,
- HValue* val,
- Expression* expr) {
- ASSERT(expr->IsMonomorphic());
- AddInstruction(new(zone()) HCheckNonSmi(object));
- Handle<Map> map = expr->GetMonomorphicReceiverType();
- ASSERT(!map->has_fast_elements());
- ASSERT(map->has_external_array_elements());
- AddInstruction(new(zone()) HCheckMap(object, map));
- HLoadElements* elements = new(zone()) HLoadElements(object);
- AddInstruction(elements);
- HInstruction* length = AddInstruction(
- new(zone()) HExternalArrayLength(elements));
- HInstruction* checked_key =
- AddInstruction(new(zone()) HBoundsCheck(key, length));
- HLoadExternalArrayPointer* external_elements =
- new(zone()) HLoadExternalArrayPointer(elements);
- AddInstruction(external_elements);
- if (expr->external_array_type() == kExternalPixelArray) {
- HClampToUint8* clamp = new(zone()) HClampToUint8(val);
- AddInstruction(clamp);
- val = clamp;
- }
- return new(zone()) HStoreKeyedSpecializedArrayElement(
- external_elements,
- checked_key,
- val,
- expr->external_array_type());
-}
-
-
-HInstruction* HGraphBuilder::BuildStoreKeyed(HValue* object,
- HValue* key,
- HValue* value,
- Expression* expr) {
- if (expr->IsMonomorphic()) {
- Handle<Map> receiver_type(expr->GetMonomorphicReceiverType());
- // An object has either fast elements or external array elements, but
- // never both. Pixel array maps that are assigned to pixel array elements
- // are always created with the fast elements flag cleared.
- if (receiver_type->has_external_array_elements()) {
- return BuildStoreKeyedSpecializedArrayElement(object,
- key,
- value,
- expr);
- } else if (receiver_type->has_fast_elements()) {
- return BuildStoreKeyedFastElement(object, key, value, expr);
- }
- }
- return BuildStoreKeyedGeneric(object, key, value);
-}
-
-
bool HGraphBuilder::TryArgumentsAccess(Property* expr) {
VariableProxy* proxy = expr->obj()->AsVariableProxy();
if (proxy == NULL) return false;
@@ -3986,9 +4282,11 @@
CHECK_ALIVE(VisitForValue(expr->key()));
HValue* index = Pop();
HValue* string = Pop();
- HStringCharCodeAt* char_code = BuildStringCharCodeAt(string, index);
+ HValue* context = environment()->LookupContext();
+ HStringCharCodeAt* char_code =
+ BuildStringCharCodeAt(context, string, index);
AddInstruction(char_code);
- instr = new(zone()) HStringCharFromCode(char_code);
+ instr = new(zone()) HStringCharFromCode(context, char_code);
} else if (expr->IsFunctionPrototype()) {
HValue* function = Pop();
@@ -4004,7 +4302,8 @@
instr = BuildLoadNamed(obj, expr, types->first(), name);
} else if (types != NULL && types->length() > 1) {
AddInstruction(new(zone()) HCheckNonSmi(obj));
- instr = new(zone()) HLoadNamedFieldPolymorphic(obj, types, name);
+ HValue* context = environment()->LookupContext();
+ instr = new(zone()) HLoadNamedFieldPolymorphic(context, obj, types, name);
} else {
instr = BuildLoadNamedGeneric(obj, expr);
}
@@ -4014,10 +4313,25 @@
HValue* key = Pop();
HValue* obj = Pop();
- instr = BuildLoadKeyed(obj, key, expr);
+
+ bool has_side_effects = false;
+ HValue* load = HandleKeyedElementAccess(
+ obj, key, NULL, expr, expr->id(), expr->position(),
+ false, // is_store
+ &has_side_effects);
+ if (has_side_effects) {
+ if (ast_context()->IsEffect()) {
+ AddSimulate(expr->id());
+ } else {
+ Push(load);
+ AddSimulate(expr->id());
+ Drop(1);
+ }
+ }
+ return ast_context()->ReturnValue(load);
}
instr->set_position(expr->position());
- ast_context()->ReturnInstruction(instr, expr->id());
+ return ast_context()->ReturnInstruction(instr, expr->id());
}
@@ -4105,8 +4419,7 @@
if (!ast_context()->IsEffect()) Push(call);
current_block()->Goto(join);
} else {
- ast_context()->ReturnInstruction(call, expr->id());
- return;
+ return ast_context()->ReturnInstruction(call, expr->id());
}
}
@@ -4117,7 +4430,7 @@
if (join->HasPredecessor()) {
set_current_block(join);
join->SetJoinId(expr->id());
- if (!ast_context()->IsEffect()) ast_context()->ReturnValue(Pop());
+ if (!ast_context()->IsEffect()) return ast_context()->ReturnValue(Pop());
} else {
set_current_block(NULL);
}
@@ -4229,14 +4542,6 @@
return false;
}
- // Check if we can handle all declarations in the inlined functions.
- VisitDeclarations(target_info.scope()->declarations());
- if (HasStackOverflow()) {
- TraceInline(target, caller, "target has non-trivial declaration");
- ClearStackOverflow();
- return false;
- }
-
// Don't inline functions that uses the arguments object or that
// have a mismatching number of parameters.
int arity = expr->arguments()->length();
@@ -4246,6 +4551,15 @@
return false;
}
+ // All declarations must be inlineable.
+ ZoneList<Declaration*>* decls = target_info.scope()->declarations();
+ int decl_count = decls->length();
+ for (int i = 0; i < decl_count; ++i) {
+ if (!decls->at(i)->IsInlineable()) {
+ TraceInline(target, caller, "target has non-trivial declaration");
+ return false;
+ }
+ }
// All statements in the body must be inlineable.
for (int i = 0, count = function->body()->length(); i < count; ++i) {
if (!function->body()->at(i)->IsInlineable()) {
@@ -4264,6 +4578,13 @@
TraceInline(target, caller, "could not generate deoptimization info");
return false;
}
+ if (target_shared->scope_info() == SerializedScopeInfo::Empty()) {
+ // The scope info might not have been set if a lazily compiled
+ // function is inlined before being called for the first time.
+ Handle<SerializedScopeInfo> target_scope_info =
+ SerializedScopeInfo::Create(target_info.scope());
+ target_shared->set_scope_info(*target_scope_info);
+ }
target_shared->EnableDeoptimizationSupport(*target_info.code());
Compiler::RecordFunctionCompilation(Logger::FUNCTION_TAG,
&target_info,
@@ -4271,6 +4592,9 @@
}
// ----------------------------------------------------------------
+ // After this point, we've made a decision to inline this function (so
+ // TryInline should always return true).
+
// Save the pending call context and type feedback oracle. Set up new ones
// for the inlined function.
ASSERT(target_shared->has_deoptimization_support());
@@ -4283,17 +4607,16 @@
HEnvironment* inner_env =
environment()->CopyForInlining(target,
function,
- HEnvironment::HYDROGEN,
undefined,
call_kind);
HBasicBlock* body_entry = CreateBasicBlock(inner_env);
current_block()->Goto(body_entry);
-
body_entry->SetJoinId(expr->ReturnId());
set_current_block(body_entry);
AddInstruction(new(zone()) HEnterInlined(target,
function,
call_kind));
+ VisitDeclarations(target_info.scope()->declarations());
VisitStatements(function->body());
if (HasStackOverflow()) {
// Bail out if the inline function did, as we cannot residualize a call
@@ -4316,7 +4639,7 @@
ASSERT(function_return() != NULL);
ASSERT(call_context()->IsEffect() || call_context()->IsValue());
if (call_context()->IsEffect()) {
- current_block()->Goto(function_return(), false);
+ current_block()->Goto(function_return());
} else {
current_block()->AddLeaveInlined(undefined, function_return());
}
@@ -4328,11 +4651,11 @@
// TODO(3168478): refactor to avoid this.
HBasicBlock* empty_true = graph()->CreateBasicBlock();
HBasicBlock* empty_false = graph()->CreateBasicBlock();
- HTest* test = new(zone()) HTest(undefined, empty_true, empty_false);
+ HBranch* test = new(zone()) HBranch(undefined, empty_true, empty_false);
current_block()->Finish(test);
- empty_true->Goto(inlined_test_context()->if_true(), false);
- empty_false->Goto(inlined_test_context()->if_false(), false);
+ empty_true->Goto(inlined_test_context()->if_true());
+ empty_false->Goto(inlined_test_context()->if_false());
}
}
@@ -4340,25 +4663,22 @@
if (inlined_test_context() != NULL) {
HBasicBlock* if_true = inlined_test_context()->if_true();
HBasicBlock* if_false = inlined_test_context()->if_false();
- ASSERT(ast_context() == inlined_test_context());
+
// Pop the return test context from the expression context stack.
+ ASSERT(ast_context() == inlined_test_context());
ClearInlinedTestContext();
// Forward to the real test context.
if (if_true->HasPredecessor()) {
if_true->SetJoinId(expr->id());
HBasicBlock* true_target = TestContext::cast(ast_context())->if_true();
- if_true->Goto(true_target, false);
+ if_true->Goto(true_target);
}
if (if_false->HasPredecessor()) {
if_false->SetJoinId(expr->id());
HBasicBlock* false_target = TestContext::cast(ast_context())->if_false();
- if_false->Goto(false_target, false);
+ if_false->Goto(false_target);
}
-
- // TODO(kmillikin): Come up with a better way to handle this. It is too
- // subtle. NULL here indicates that the enclosing context has no control
- // flow to handle.
set_current_block(NULL);
} else if (function_return()->HasPredecessor()) {
@@ -4387,18 +4707,20 @@
if (argument_count == 2 && check_type == STRING_CHECK) {
HValue* index = Pop();
HValue* string = Pop();
+ HValue* context = environment()->LookupContext();
ASSERT(!expr->holder().is_null());
AddInstruction(new(zone()) HCheckPrototypeMaps(
oracle()->GetPrototypeForPrimitiveCheck(STRING_CHECK),
expr->holder()));
- HStringCharCodeAt* char_code = BuildStringCharCodeAt(string, index);
+ HStringCharCodeAt* char_code =
+ BuildStringCharCodeAt(context, string, index);
if (id == kStringCharCodeAt) {
ast_context()->ReturnInstruction(char_code, expr->id());
return true;
}
AddInstruction(char_code);
HStringCharFromCode* result =
- new(zone()) HStringCharFromCode(char_code);
+ new(zone()) HStringCharFromCode(context, char_code);
ast_context()->ReturnInstruction(result, expr->id());
return true;
}
@@ -4413,8 +4735,10 @@
if (argument_count == 2 && check_type == RECEIVER_MAP_CHECK) {
AddCheckConstantFunction(expr, receiver, receiver_map, true);
HValue* argument = Pop();
+ HValue* context = environment()->LookupContext();
Drop(1); // Receiver.
- HUnaryMathOperation* op = new(zone()) HUnaryMathOperation(argument, id);
+ HUnaryMathOperation* op =
+ new(zone()) HUnaryMathOperation(context, argument, id);
op->set_position(expr->position());
ast_context()->ReturnInstruction(op, expr->id());
return true;
@@ -4426,31 +4750,33 @@
HValue* right = Pop();
HValue* left = Pop();
Pop(); // Pop receiver.
+ HValue* context = environment()->LookupContext();
HInstruction* result = NULL;
// Use sqrt() if exponent is 0.5 or -0.5.
if (right->IsConstant() && HConstant::cast(right)->HasDoubleValue()) {
double exponent = HConstant::cast(right)->DoubleValue();
if (exponent == 0.5) {
- result = new(zone()) HUnaryMathOperation(left, kMathPowHalf);
+ result =
+ new(zone()) HUnaryMathOperation(context, left, kMathPowHalf);
} else if (exponent == -0.5) {
HConstant* double_one =
new(zone()) HConstant(Handle<Object>(Smi::FromInt(1)),
Representation::Double());
AddInstruction(double_one);
HUnaryMathOperation* square_root =
- new(zone()) HUnaryMathOperation(left, kMathPowHalf);
+ new(zone()) HUnaryMathOperation(context, left, kMathPowHalf);
AddInstruction(square_root);
// MathPowHalf doesn't have side effects so there's no need for
// an environment simulation here.
ASSERT(!square_root->HasSideEffects());
- result = new(zone()) HDiv(double_one, square_root);
+ result = new(zone()) HDiv(context, double_one, square_root);
} else if (exponent == 2.0) {
- result = new(zone()) HMul(left, left);
+ result = new(zone()) HMul(context, left, left);
}
} else if (right->IsConstant() &&
HConstant::cast(right)->HasInteger32Value() &&
HConstant::cast(right)->Integer32Value() == 2) {
- result = new(zone()) HMul(left, left);
+ result = new(zone()) HMul(context, left, left);
}
if (result == NULL) {
@@ -4532,7 +4858,7 @@
if (prop != NULL) {
if (!prop->key()->IsPropertyName()) {
// Keyed function call.
- CHECK_ALIVE(VisitForValue(prop->obj()));
+ CHECK_ALIVE(VisitArgument(prop->obj()));
CHECK_ALIVE(VisitForValue(prop->key()));
// Push receiver and key like the non-optimized code generator expects it.
@@ -4541,15 +4867,13 @@
Push(key);
Push(receiver);
- CHECK_ALIVE(VisitExpressions(expr->arguments()));
+ CHECK_ALIVE(VisitArgumentList(expr->arguments()));
HValue* context = environment()->LookupContext();
- call = PreProcessCall(
- new(zone()) HCallKeyed(context, key, argument_count));
+ call = new(zone()) HCallKeyed(context, key, argument_count);
call->set_position(expr->position());
- Drop(1); // Key.
- ast_context()->ReturnInstruction(call, expr->id());
- return;
+ Drop(argument_count + 1); // 1 is the key.
+ return ast_context()->ReturnInstruction(call, expr->id());
}
// Named function call.
@@ -4607,11 +4931,6 @@
Variable* var = expr->expression()->AsVariableProxy()->AsVariable();
bool global_call = (var != NULL) && var->is_global() && !var->is_this();
- if (!global_call) {
- ++argument_count;
- CHECK_ALIVE(VisitForValue(expr->expression()));
- }
-
if (global_call) {
bool known_global_function = false;
// If there is a global property cell for the name at compile time and
@@ -4651,27 +4970,34 @@
argument_count));
} else {
HValue* context = environment()->LookupContext();
- PushAndAdd(new(zone()) HGlobalObject(context));
- CHECK_ALIVE(VisitExpressions(expr->arguments()));
+ HGlobalObject* receiver = new(zone()) HGlobalObject(context);
+ AddInstruction(receiver);
+ PushAndAdd(new(zone()) HPushArgument(receiver));
+ CHECK_ALIVE(VisitArgumentList(expr->arguments()));
- call = PreProcessCall(new(zone()) HCallGlobal(context,
- var->name(),
- argument_count));
+ call = new(zone()) HCallGlobal(context, var->name(), argument_count);
+ Drop(argument_count);
}
} else {
+ CHECK_ALIVE(VisitArgument(expr->expression()));
HValue* context = environment()->LookupContext();
HGlobalObject* global_object = new(zone()) HGlobalObject(context);
+ HGlobalReceiver* receiver = new(zone()) HGlobalReceiver(global_object);
AddInstruction(global_object);
- PushAndAdd(new(zone()) HGlobalReceiver(global_object));
- CHECK_ALIVE(VisitExpressions(expr->arguments()));
+ AddInstruction(receiver);
+ PushAndAdd(new(zone()) HPushArgument(receiver));
+ CHECK_ALIVE(VisitArgumentList(expr->arguments()));
- call = PreProcessCall(new(zone()) HCallFunction(context, argument_count));
+ // The function to call is treated as an argument to the call function
+ // stub.
+ call = new(zone()) HCallFunction(context, argument_count + 1);
+ Drop(argument_count + 1);
}
}
call->set_position(expr->position());
- ast_context()->ReturnInstruction(call, expr->id());
+ return ast_context()->ReturnInstruction(call, expr->id());
}
@@ -4681,19 +5007,19 @@
ASSERT(current_block()->HasPredecessor());
// The constructor function is also used as the receiver argument to the
// JS construct call builtin.
- CHECK_ALIVE(VisitForValue(expr->expression()));
- CHECK_ALIVE(VisitExpressions(expr->arguments()));
+ HValue* constructor = NULL;
+ CHECK_ALIVE(constructor = VisitArgument(expr->expression()));
+ CHECK_ALIVE(VisitArgumentList(expr->arguments()));
HValue* context = environment()->LookupContext();
// The constructor is both an operand to the instruction and an argument
// to the construct call.
int arg_count = expr->arguments()->length() + 1; // Plus constructor.
- HValue* constructor = environment()->ExpressionStackAt(arg_count - 1);
HCallNew* call = new(zone()) HCallNew(context, constructor, arg_count);
call->set_position(expr->position());
- PreProcessCall(call);
- ast_context()->ReturnInstruction(call, expr->id());
+ Drop(arg_count);
+ return ast_context()->ReturnInstruction(call, expr->id());
}
@@ -4739,13 +5065,14 @@
ASSERT(function->intrinsic_type == Runtime::RUNTIME);
CHECK_ALIVE(VisitArgumentList(expr->arguments()));
+ HValue* context = environment()->LookupContext();
Handle<String> name = expr->name();
int argument_count = expr->arguments()->length();
HCallRuntime* call =
- new(zone()) HCallRuntime(name, function, argument_count);
+ new(zone()) HCallRuntime(context, name, function, argument_count);
call->set_position(RelocInfo::kNoPosition);
Drop(argument_count);
- ast_context()->ReturnInstruction(call, expr->id());
+ return ast_context()->ReturnInstruction(call, expr->id());
}
}
@@ -4773,26 +5100,27 @@
// Result of deleting non-property, non-variable reference is true.
// Evaluate the subexpression for side effects.
CHECK_ALIVE(VisitForEffect(expr->expression()));
- ast_context()->ReturnValue(graph()->GetConstantTrue());
+ return ast_context()->ReturnValue(graph()->GetConstantTrue());
} else if (var != NULL &&
!var->is_global() &&
var->AsSlot() != NULL &&
var->AsSlot()->type() != Slot::LOOKUP) {
// Result of deleting non-global, non-dynamic variables is false.
// The subexpression does not have side effects.
- ast_context()->ReturnValue(graph()->GetConstantFalse());
+ return ast_context()->ReturnValue(graph()->GetConstantFalse());
} else if (prop != NULL) {
if (prop->is_synthetic()) {
// Result of deleting parameters is false, even when they rewrite
// to accesses on the arguments object.
- ast_context()->ReturnValue(graph()->GetConstantFalse());
- } else {
+ return ast_context()->ReturnValue(graph()->GetConstantFalse());
+ } else {
CHECK_ALIVE(VisitForValue(prop->obj()));
CHECK_ALIVE(VisitForValue(prop->key()));
HValue* key = Pop();
HValue* obj = Pop();
- HDeleteProperty* instr = new(zone()) HDeleteProperty(obj, key);
- ast_context()->ReturnInstruction(instr, expr->id());
+ HValue* context = environment()->LookupContext();
+ HDeleteProperty* instr = new(zone()) HDeleteProperty(context, obj, key);
+ return ast_context()->ReturnInstruction(instr, expr->id());
}
} else if (var->is_global()) {
Bailout("delete with global variable");
@@ -4804,42 +5132,58 @@
void HGraphBuilder::VisitVoid(UnaryOperation* expr) {
CHECK_ALIVE(VisitForEffect(expr->expression()));
- ast_context()->ReturnValue(graph()->GetConstantUndefined());
+ return ast_context()->ReturnValue(graph()->GetConstantUndefined());
}
void HGraphBuilder::VisitTypeof(UnaryOperation* expr) {
CHECK_ALIVE(VisitForTypeOf(expr->expression()));
HValue* value = Pop();
- ast_context()->ReturnInstruction(new(zone()) HTypeof(value), expr->id());
+ HValue* context = environment()->LookupContext();
+ HInstruction* instr = new(zone()) HTypeof(context, value);
+ return ast_context()->ReturnInstruction(instr, expr->id());
}
void HGraphBuilder::VisitAdd(UnaryOperation* expr) {
CHECK_ALIVE(VisitForValue(expr->expression()));
HValue* value = Pop();
- HInstruction* instr = new(zone()) HMul(value, graph_->GetConstant1());
- ast_context()->ReturnInstruction(instr, expr->id());
+ HValue* context = environment()->LookupContext();
+ HInstruction* instr =
+ new(zone()) HMul(context, value, graph_->GetConstant1());
+ return ast_context()->ReturnInstruction(instr, expr->id());
}
void HGraphBuilder::VisitSub(UnaryOperation* expr) {
CHECK_ALIVE(VisitForValue(expr->expression()));
HValue* value = Pop();
- HInstruction* instr = new(zone()) HMul(value, graph_->GetConstantMinus1());
+ HValue* context = environment()->LookupContext();
+ HInstruction* instr =
+ new(zone()) HMul(context, value, graph_->GetConstantMinus1());
TypeInfo info = oracle()->UnaryType(expr);
+ if (info.IsUninitialized()) {
+ AddInstruction(new(zone()) HSoftDeoptimize);
+ current_block()->MarkAsDeoptimizing();
+ info = TypeInfo::Unknown();
+ }
Representation rep = ToRepresentation(info);
TraceRepresentation(expr->op(), info, instr, rep);
instr->AssumeRepresentation(rep);
- ast_context()->ReturnInstruction(instr, expr->id());
+ return ast_context()->ReturnInstruction(instr, expr->id());
}
void HGraphBuilder::VisitBitNot(UnaryOperation* expr) {
CHECK_ALIVE(VisitForValue(expr->expression()));
HValue* value = Pop();
+ TypeInfo info = oracle()->UnaryType(expr);
+ if (info.IsUninitialized()) {
+ AddInstruction(new(zone()) HSoftDeoptimize);
+ current_block()->MarkAsDeoptimizing();
+ }
HInstruction* instr = new(zone()) HBitNot(value);
- ast_context()->ReturnInstruction(instr, expr->id());
+ return ast_context()->ReturnInstruction(instr, expr->id());
}
@@ -4884,7 +5228,7 @@
HBasicBlock* join =
CreateJoin(materialize_false, materialize_true, expr->id());
set_current_block(join);
- if (join != NULL) ast_context()->ReturnValue(Pop());
+ if (join != NULL) return ast_context()->ReturnValue(Pop());
}
@@ -4913,7 +5257,8 @@
HConstant* delta = (expr->op() == Token::INC)
? graph_->GetConstant1()
: graph_->GetConstantMinus1();
- HInstruction* instr = new(zone()) HAdd(Top(), delta);
+ HValue* context = environment()->LookupContext();
+ HInstruction* instr = new(zone()) HAdd(context, Top(), delta);
TraceRepresentation(expr->op(), info, instr, rep);
instr->AssumeRepresentation(rep);
AddInstruction(instr);
@@ -4942,6 +5287,9 @@
HValue* after = NULL; // The result after incrementing or decrementing.
if (var != NULL) {
+ if (var->mode() == Variable::CONST) {
+ return Bailout("unsupported count operation with const");
+ }
// Argument of the count operation is a variable, not a property.
ASSERT(prop == NULL);
CHECK_ALIVE(VisitForValue(target));
@@ -4958,6 +5306,20 @@
} else if (var->IsStackAllocated()) {
Bind(var, after);
} else if (var->IsContextSlot()) {
+ // Bail out if we try to mutate a parameter value in a function using
+ // the arguments object. We do not (yet) correctly handle the
+ // arguments property of the function.
+ if (info()->scope()->arguments() != NULL) {
+ // Parameters will rewrite to context slots. We have no direct way
+ // to detect that the variable is a parameter.
+ int count = info()->scope()->num_parameters();
+ for (int i = 0; i < count; ++i) {
+ if (var == info()->scope()->parameter(i)) {
+ Bailout("assignment to parameter, function uses arguments object");
+ }
+ }
+ }
+
HValue* context = BuildContextChainWalk(var);
int index = var->AsSlot()->index();
HStoreContextSlot* instr =
@@ -5013,16 +5375,22 @@
HValue* obj = environment()->ExpressionStackAt(1);
HValue* key = environment()->ExpressionStackAt(0);
- HInstruction* load = BuildLoadKeyed(obj, key, prop);
- PushAndAdd(load);
- if (load->HasSideEffects()) AddSimulate(expr->CountId());
+ bool has_side_effects = false;
+ HValue* load = HandleKeyedElementAccess(
+ obj, key, NULL, prop, expr->CountId(), RelocInfo::kNoPosition,
+ false, // is_store
+ &has_side_effects);
+ Push(load);
+ if (has_side_effects) AddSimulate(expr->CountId());
after = BuildIncrement(returns_original_input, expr);
input = Pop();
expr->RecordTypeFeedback(oracle());
- HInstruction* store = BuildStoreKeyed(obj, key, after, expr);
- AddInstruction(store);
+ HandleKeyedElementAccess(obj, key, after, expr, expr->AssignmentId(),
+ RelocInfo::kNoPosition,
+ true, // is_store
+ &has_side_effects);
// Drop the key from the bailout environment. Overwrite the receiver
// with the result of the operation, and the placeholder with the
@@ -5030,28 +5398,18 @@
Drop(1);
environment()->SetExpressionStackAt(0, after);
if (returns_original_input) environment()->SetExpressionStackAt(1, input);
- if (store->HasSideEffects()) AddSimulate(expr->AssignmentId());
+ ASSERT(has_side_effects); // Stores always have side effects.
+ AddSimulate(expr->AssignmentId());
}
}
Drop(returns_original_input ? 2 : 1);
- ast_context()->ReturnValue(expr->is_postfix() ? input : after);
+ return ast_context()->ReturnValue(expr->is_postfix() ? input : after);
}
-HCompareSymbolEq* HGraphBuilder::BuildSymbolCompare(HValue* left,
- HValue* right,
- Token::Value op) {
- ASSERT(op == Token::EQ || op == Token::EQ_STRICT);
- AddInstruction(new(zone()) HCheckNonSmi(left));
- AddInstruction(HCheckInstanceType::NewIsSymbol(left));
- AddInstruction(new(zone()) HCheckNonSmi(right));
- AddInstruction(HCheckInstanceType::NewIsSymbol(right));
- return new(zone()) HCompareSymbolEq(left, right, op);
-}
-
-
-HStringCharCodeAt* HGraphBuilder::BuildStringCharCodeAt(HValue* string,
+HStringCharCodeAt* HGraphBuilder::BuildStringCharCodeAt(HValue* context,
+ HValue* string,
HValue* index) {
AddInstruction(new(zone()) HCheckNonSmi(string));
AddInstruction(HCheckInstanceType::NewIsString(string));
@@ -5059,15 +5417,67 @@
AddInstruction(length);
HInstruction* checked_index =
AddInstruction(new(zone()) HBoundsCheck(index, length));
- return new(zone()) HStringCharCodeAt(string, checked_index);
+ return new(zone()) HStringCharCodeAt(context, string, checked_index);
}
HInstruction* HGraphBuilder::BuildBinaryOperation(BinaryOperation* expr,
HValue* left,
HValue* right) {
+ HValue* context = environment()->LookupContext();
TypeInfo info = oracle()->BinaryType(expr);
- HInstruction* instr = BuildBinaryOperation(expr->op(), left, right, info);
+ if (info.IsUninitialized()) {
+ AddInstruction(new(zone()) HSoftDeoptimize);
+ current_block()->MarkAsDeoptimizing();
+ info = TypeInfo::Unknown();
+ }
+ HInstruction* instr = NULL;
+ switch (expr->op()) {
+ case Token::ADD:
+ if (info.IsString()) {
+ AddInstruction(new(zone()) HCheckNonSmi(left));
+ AddInstruction(HCheckInstanceType::NewIsString(left));
+ AddInstruction(new(zone()) HCheckNonSmi(right));
+ AddInstruction(HCheckInstanceType::NewIsString(right));
+ instr = new(zone()) HStringAdd(context, left, right);
+ } else {
+ instr = new(zone()) HAdd(context, left, right);
+ }
+ break;
+ case Token::SUB:
+ instr = new(zone()) HSub(context, left, right);
+ break;
+ case Token::MUL:
+ instr = new(zone()) HMul(context, left, right);
+ break;
+ case Token::MOD:
+ instr = new(zone()) HMod(context, left, right);
+ break;
+ case Token::DIV:
+ instr = new(zone()) HDiv(context, left, right);
+ break;
+ case Token::BIT_XOR:
+ instr = new(zone()) HBitXor(context, left, right);
+ break;
+ case Token::BIT_AND:
+ instr = new(zone()) HBitAnd(context, left, right);
+ break;
+ case Token::BIT_OR:
+ instr = new(zone()) HBitOr(context, left, right);
+ break;
+ case Token::SAR:
+ instr = new(zone()) HSar(context, left, right);
+ break;
+ case Token::SHR:
+ instr = new(zone()) HShr(context, left, right);
+ break;
+ case Token::SHL:
+ instr = new(zone()) HShl(context, left, right);
+ break;
+ default:
+ UNREACHABLE();
+ }
+
// If we hit an uninitialized binary op stub we will get type info
// for a smi operation. If one of the operands is a constant string
// do not generate code assuming it is a smi operation.
@@ -5087,36 +5497,6 @@
}
-HInstruction* HGraphBuilder::BuildBinaryOperation(
- Token::Value op, HValue* left, HValue* right, TypeInfo info) {
- switch (op) {
- case Token::ADD:
- if (info.IsString()) {
- AddInstruction(new(zone()) HCheckNonSmi(left));
- AddInstruction(HCheckInstanceType::NewIsString(left));
- AddInstruction(new(zone()) HCheckNonSmi(right));
- AddInstruction(HCheckInstanceType::NewIsString(right));
- return new(zone()) HStringAdd(left, right);
- } else {
- return new(zone()) HAdd(left, right);
- }
- case Token::SUB: return new(zone()) HSub(left, right);
- case Token::MUL: return new(zone()) HMul(left, right);
- case Token::MOD: return new(zone()) HMod(left, right);
- case Token::DIV: return new(zone()) HDiv(left, right);
- case Token::BIT_XOR: return new(zone()) HBitXor(left, right);
- case Token::BIT_AND: return new(zone()) HBitAnd(left, right);
- case Token::BIT_OR: return new(zone()) HBitOr(left, right);
- case Token::SAR: return new(zone()) HSar(left, right);
- case Token::SHR: return new(zone()) HShr(left, right);
- case Token::SHL: return new(zone()) HShl(left, right);
- default:
- UNREACHABLE();
- return NULL;
- }
-}
-
-
// Check for the form (%_ClassOf(foo) === 'BarClass').
static bool IsClassOfTest(CompareOperation* expr) {
if (expr->op() != Token::EQ_STRICT) return false;
@@ -5136,10 +5516,13 @@
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
switch (expr->op()) {
- case Token::COMMA: return VisitComma(expr);
- case Token::OR: return VisitAndOr(expr, false);
- case Token::AND: return VisitAndOr(expr, true);
- default: return VisitCommon(expr);
+ case Token::COMMA:
+ return VisitComma(expr);
+ case Token::OR:
+ case Token::AND:
+ return VisitLogicalExpression(expr);
+ default:
+ return VisitArithmeticExpression(expr);
}
}
@@ -5152,7 +5535,8 @@
}
-void HGraphBuilder::VisitAndOr(BinaryOperation* expr, bool is_logical_and) {
+void HGraphBuilder::VisitLogicalExpression(BinaryOperation* expr) {
+ bool is_logical_and = expr->op() == Token::AND;
if (ast_context()->IsTest()) {
TestContext* context = TestContext::cast(ast_context());
// Translate left subexpression.
@@ -5182,9 +5566,9 @@
// We need an extra block to maintain edge-split form.
HBasicBlock* empty_block = graph()->CreateBasicBlock();
HBasicBlock* eval_right = graph()->CreateBasicBlock();
- HTest* test = is_logical_and
- ? new(zone()) HTest(Top(), eval_right, empty_block)
- : new(zone()) HTest(Top(), empty_block, eval_right);
+ HBranch* test = is_logical_and
+ ? new(zone()) HBranch(Top(), eval_right, empty_block)
+ : new(zone()) HBranch(Top(), empty_block, eval_right);
current_block()->Finish(test);
set_current_block(eval_right);
@@ -5194,7 +5578,7 @@
HBasicBlock* join_block =
CreateJoin(empty_block, current_block(), expr->id());
set_current_block(join_block);
- ast_context()->ReturnValue(Pop());
+ return ast_context()->ReturnValue(Pop());
} else {
ASSERT(ast_context()->IsEffect());
@@ -5239,14 +5623,14 @@
}
-void HGraphBuilder::VisitCommon(BinaryOperation* expr) {
+void HGraphBuilder::VisitArithmeticExpression(BinaryOperation* expr) {
CHECK_ALIVE(VisitForValue(expr->left()));
CHECK_ALIVE(VisitForValue(expr->right()));
HValue* right = Pop();
HValue* left = Pop();
HInstruction* instr = BuildBinaryOperation(expr, left, right);
instr->set_position(expr->position());
- ast_context()->ReturnInstruction(instr, expr->id());
+ return ast_context()->ReturnInstruction(instr, expr->id());
}
@@ -5279,46 +5663,75 @@
}
+void HGraphBuilder::HandleLiteralCompareTypeof(CompareOperation* compare_expr,
+ Expression* expr,
+ Handle<String> check) {
+ CHECK_ALIVE(VisitForTypeOf(expr));
+ HValue* expr_value = Pop();
+ HTypeofIsAndBranch* instr = new(zone()) HTypeofIsAndBranch(expr_value, check);
+ instr->set_position(compare_expr->position());
+ return ast_context()->ReturnControl(instr, compare_expr->id());
+}
+
+
+void HGraphBuilder::HandleLiteralCompareUndefined(
+ CompareOperation* compare_expr, Expression* expr) {
+ CHECK_ALIVE(VisitForValue(expr));
+ HValue* lhs = Pop();
+ HValue* rhs = graph()->GetConstantUndefined();
+ HCompareObjectEqAndBranch* instr =
+ new(zone()) HCompareObjectEqAndBranch(lhs, rhs);
+ instr->set_position(compare_expr->position());
+ return ast_context()->ReturnControl(instr, compare_expr->id());
+}
+
+
void HGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
if (IsClassOfTest(expr)) {
CallRuntime* call = expr->left()->AsCallRuntime();
+ ASSERT(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
Literal* literal = expr->right()->AsLiteral();
Handle<String> rhs = Handle<String>::cast(literal->handle());
- HInstruction* instr = new(zone()) HClassOfTest(value, rhs);
+ HClassOfTestAndBranch* instr =
+ new(zone()) HClassOfTestAndBranch(value, rhs);
instr->set_position(expr->position());
- ast_context()->ReturnInstruction(instr, expr->id());
+ return ast_context()->ReturnControl(instr, expr->id());
+ }
+
+ // Check for special cases that compare against literals.
+ Expression *sub_expr;
+ Handle<String> check;
+ if (expr->IsLiteralCompareTypeof(&sub_expr, &check)) {
+ HandleLiteralCompareTypeof(expr, sub_expr, check);
return;
}
- // Check for the pattern: typeof <expression> == <string literal>.
- UnaryOperation* left_unary = expr->left()->AsUnaryOperation();
- Literal* right_literal = expr->right()->AsLiteral();
- if ((expr->op() == Token::EQ || expr->op() == Token::EQ_STRICT) &&
- left_unary != NULL && left_unary->op() == Token::TYPEOF &&
- right_literal != NULL && right_literal->handle()->IsString()) {
- CHECK_ALIVE(VisitForTypeOf(left_unary->expression()));
- HValue* left = Pop();
- HInstruction* instr = new(zone()) HTypeofIs(left,
- Handle<String>::cast(right_literal->handle()));
- instr->set_position(expr->position());
- ast_context()->ReturnInstruction(instr, expr->id());
+ if (expr->IsLiteralCompareUndefined(&sub_expr)) {
+ HandleLiteralCompareUndefined(expr, sub_expr);
return;
}
+ TypeInfo type_info = oracle()->CompareType(expr);
+ // Check if this expression was ever executed according to type feedback.
+ if (type_info.IsUninitialized()) {
+ AddInstruction(new(zone()) HSoftDeoptimize);
+ current_block()->MarkAsDeoptimizing();
+ type_info = TypeInfo::Unknown();
+ }
+
CHECK_ALIVE(VisitForValue(expr->left()));
CHECK_ALIVE(VisitForValue(expr->right()));
+ HValue* context = environment()->LookupContext();
HValue* right = Pop();
HValue* left = Pop();
Token::Value op = expr->op();
- TypeInfo type_info = oracle()->CompareType(expr);
- HInstruction* instr = NULL;
if (op == Token::INSTANCEOF) {
// Check to see if the rhs of the instanceof is a global function not
// residing in new space. If it is we assume that the function will stay the
@@ -5348,40 +5761,61 @@
// If the target is not null we have found a known global function that is
// assumed to stay the same for this instanceof.
if (target.is_null()) {
- HValue* context = environment()->LookupContext();
- instr = new(zone()) HInstanceOf(context, left, right);
+ HInstanceOf* result = new(zone()) HInstanceOf(context, left, right);
+ result->set_position(expr->position());
+ return ast_context()->ReturnInstruction(result, expr->id());
} else {
AddInstruction(new(zone()) HCheckFunction(right, target));
- instr = new(zone()) HInstanceOfKnownGlobal(left, target);
+ HInstanceOfKnownGlobal* result =
+ new(zone()) HInstanceOfKnownGlobal(context, left, target);
+ result->set_position(expr->position());
+ return ast_context()->ReturnInstruction(result, expr->id());
}
} else if (op == Token::IN) {
- instr = new(zone()) HIn(left, right);
+ HIn* result = new(zone()) HIn(context, left, right);
+ result->set_position(expr->position());
+ return ast_context()->ReturnInstruction(result, expr->id());
} else if (type_info.IsNonPrimitive()) {
switch (op) {
case Token::EQ:
case Token::EQ_STRICT: {
AddInstruction(new(zone()) HCheckNonSmi(left));
- AddInstruction(HCheckInstanceType::NewIsJSObjectOrJSFunction(left));
+ AddInstruction(HCheckInstanceType::NewIsSpecObject(left));
AddInstruction(new(zone()) HCheckNonSmi(right));
- AddInstruction(HCheckInstanceType::NewIsJSObjectOrJSFunction(right));
- instr = new(zone()) HCompareJSObjectEq(left, right);
- break;
+ AddInstruction(HCheckInstanceType::NewIsSpecObject(right));
+ HCompareObjectEqAndBranch* result =
+ new(zone()) HCompareObjectEqAndBranch(left, right);
+ result->set_position(expr->position());
+ return ast_context()->ReturnControl(result, expr->id());
}
default:
return Bailout("Unsupported non-primitive compare");
- break;
}
} else if (type_info.IsString() && oracle()->IsSymbolCompare(expr) &&
(op == Token::EQ || op == Token::EQ_STRICT)) {
- instr = BuildSymbolCompare(left, right, op);
+ AddInstruction(new(zone()) HCheckNonSmi(left));
+ AddInstruction(HCheckInstanceType::NewIsSymbol(left));
+ AddInstruction(new(zone()) HCheckNonSmi(right));
+ AddInstruction(HCheckInstanceType::NewIsSymbol(right));
+ HCompareObjectEqAndBranch* result =
+ new(zone()) HCompareObjectEqAndBranch(left, right);
+ result->set_position(expr->position());
+ return ast_context()->ReturnControl(result, expr->id());
} else {
- HCompare* compare = new(zone()) HCompare(left, right, op);
Representation r = ToRepresentation(type_info);
- compare->SetInputRepresentation(r);
- instr = compare;
+ if (r.IsTagged()) {
+ HCompareGeneric* result =
+ new(zone()) HCompareGeneric(context, left, right, op);
+ result->set_position(expr->position());
+ return ast_context()->ReturnInstruction(result, expr->id());
+ } else {
+ HCompareIDAndBranch* result =
+ new(zone()) HCompareIDAndBranch(left, right, op);
+ result->set_position(expr->position());
+ result->SetInputRepresentation(r);
+ return ast_context()->ReturnControl(result, expr->id());
+ }
}
- instr->set_position(expr->position());
- ast_context()->ReturnInstruction(instr, expr->id());
}
@@ -5390,10 +5824,10 @@
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
CHECK_ALIVE(VisitForValue(expr->expression()));
-
HValue* value = Pop();
- HIsNull* compare = new(zone()) HIsNull(value, expr->is_strict());
- ast_context()->ReturnInstruction(compare, expr->id());
+ HIsNullAndBranch* instr =
+ new(zone()) HIsNullAndBranch(value, expr->is_strict());
+ return ast_context()->ReturnControl(instr, expr->id());
}
@@ -5401,23 +5835,22 @@
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
- return Bailout("ThisFunction");
+ HThisFunction* self = new(zone()) HThisFunction;
+ return ast_context()->ReturnInstruction(self, expr->id());
}
void HGraphBuilder::VisitDeclaration(Declaration* decl) {
- // We allow only declarations that do not require code generation.
- // The following all require code generation: global variables and
- // functions, variables with slot type LOOKUP, declarations with
- // mode CONST, and functions.
+ // We support only declarations that do not require code generation.
Variable* var = decl->proxy()->var();
- Slot* slot = var->AsSlot();
- if (var->is_global() ||
- (slot != NULL && slot->type() == Slot::LOOKUP) ||
- decl->mode() == Variable::CONST ||
- decl->fun() != NULL) {
+ if (!var->IsStackAllocated() || decl->fun() != NULL) {
return Bailout("unsupported declaration");
}
+
+ if (decl->mode() == Variable::CONST) {
+ ASSERT(var->IsStackAllocated());
+ environment()->Bind(var, graph()->GetConstantHole());
+ }
}
@@ -5427,8 +5860,8 @@
ASSERT(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
- HIsSmi* result = new(zone()) HIsSmi(value);
- ast_context()->ReturnInstruction(result, call->id());
+ HIsSmiAndBranch* result = new(zone()) HIsSmiAndBranch(value);
+ return ast_context()->ReturnControl(result, call->id());
}
@@ -5436,9 +5869,11 @@
ASSERT(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
- HHasInstanceType* result =
- new(zone()) HHasInstanceType(value, FIRST_JS_OBJECT_TYPE, LAST_TYPE);
- ast_context()->ReturnInstruction(result, call->id());
+ HHasInstanceTypeAndBranch* result =
+ new(zone()) HHasInstanceTypeAndBranch(value,
+ FIRST_SPEC_OBJECT_TYPE,
+ LAST_SPEC_OBJECT_TYPE);
+ return ast_context()->ReturnControl(result, call->id());
}
@@ -5446,9 +5881,9 @@
ASSERT(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
- HHasInstanceType* result =
- new(zone()) HHasInstanceType(value, JS_FUNCTION_TYPE);
- ast_context()->ReturnInstruction(result, call->id());
+ HHasInstanceTypeAndBranch* result =
+ new(zone()) HHasInstanceTypeAndBranch(value, JS_FUNCTION_TYPE);
+ return ast_context()->ReturnControl(result, call->id());
}
@@ -5456,8 +5891,9 @@
ASSERT(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
- HHasCachedArrayIndex* result = new(zone()) HHasCachedArrayIndex(value);
- ast_context()->ReturnInstruction(result, call->id());
+ HHasCachedArrayIndexAndBranch* result =
+ new(zone()) HHasCachedArrayIndexAndBranch(value);
+ return ast_context()->ReturnControl(result, call->id());
}
@@ -5465,8 +5901,9 @@
ASSERT(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
- HHasInstanceType* result = new(zone()) HHasInstanceType(value, JS_ARRAY_TYPE);
- ast_context()->ReturnInstruction(result, call->id());
+ HHasInstanceTypeAndBranch* result =
+ new(zone()) HHasInstanceTypeAndBranch(value, JS_ARRAY_TYPE);
+ return ast_context()->ReturnControl(result, call->id());
}
@@ -5474,9 +5911,9 @@
ASSERT(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
- HHasInstanceType* result =
- new(zone()) HHasInstanceType(value, JS_REGEXP_TYPE);
- ast_context()->ReturnInstruction(result, call->id());
+ HHasInstanceTypeAndBranch* result =
+ new(zone()) HHasInstanceTypeAndBranch(value, JS_REGEXP_TYPE);
+ return ast_context()->ReturnControl(result, call->id());
}
@@ -5484,8 +5921,8 @@
ASSERT(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
- HIsObject* test = new(zone()) HIsObject(value);
- ast_context()->ReturnInstruction(test, call->id());
+ HIsObjectAndBranch* result = new(zone()) HIsObjectAndBranch(value);
+ return ast_context()->ReturnControl(result, call->id());
}
@@ -5498,8 +5935,9 @@
ASSERT(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
- ast_context()->ReturnInstruction(new(zone()) HIsUndetectable(value),
- call->id());
+ HIsUndetectableAndBranch* result =
+ new(zone()) HIsUndetectableAndBranch(value);
+ return ast_context()->ReturnControl(result, call->id());
}
@@ -5517,9 +5955,10 @@
// We are generating graph for inlined function. Currently
// constructor inlining is not supported and we can just return
// false from %_IsConstructCall().
- ast_context()->ReturnValue(graph()->GetConstantFalse());
+ return ast_context()->ReturnValue(graph()->GetConstantFalse());
} else {
- ast_context()->ReturnInstruction(new(zone()) HIsConstructCall, call->id());
+ return ast_context()->ReturnControl(new(zone()) HIsConstructCallAndBranch,
+ call->id());
}
}
@@ -5533,7 +5972,7 @@
ASSERT(call->arguments()->length() == 0);
HInstruction* elements = AddInstruction(new(zone()) HArgumentsElements);
HArgumentsLength* result = new(zone()) HArgumentsLength(elements);
- ast_context()->ReturnInstruction(result, call->id());
+ return ast_context()->ReturnInstruction(result, call->id());
}
@@ -5549,7 +5988,7 @@
HInstruction* length = AddInstruction(new(zone()) HArgumentsLength(elements));
HAccessArgumentsAt* result =
new(zone()) HAccessArgumentsAt(elements, length, index);
- ast_context()->ReturnInstruction(result, call->id());
+ return ast_context()->ReturnInstruction(result, call->id());
}
@@ -5566,7 +6005,7 @@
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
HValueOf* result = new(zone()) HValueOf(value);
- ast_context()->ReturnInstruction(result, call->id());
+ return ast_context()->ReturnInstruction(result, call->id());
}
@@ -5582,8 +6021,9 @@
CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
HValue* index = Pop();
HValue* string = Pop();
- HStringCharCodeAt* result = BuildStringCharCodeAt(string, index);
- ast_context()->ReturnInstruction(result, call->id());
+ HValue* context = environment()->LookupContext();
+ HStringCharCodeAt* result = BuildStringCharCodeAt(context, string, index);
+ return ast_context()->ReturnInstruction(result, call->id());
}
@@ -5592,8 +6032,10 @@
ASSERT(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* char_code = Pop();
- HStringCharFromCode* result = new(zone()) HStringCharFromCode(char_code);
- ast_context()->ReturnInstruction(result, call->id());
+ HValue* context = environment()->LookupContext();
+ HStringCharFromCode* result =
+ new(zone()) HStringCharFromCode(context, char_code);
+ return ast_context()->ReturnInstruction(result, call->id());
}
@@ -5604,10 +6046,12 @@
CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
HValue* index = Pop();
HValue* string = Pop();
- HStringCharCodeAt* char_code = BuildStringCharCodeAt(string, index);
+ HValue* context = environment()->LookupContext();
+ HStringCharCodeAt* char_code = BuildStringCharCodeAt(context, string, index);
AddInstruction(char_code);
- HStringCharFromCode* result = new(zone()) HStringCharFromCode(char_code);
- ast_context()->ReturnInstruction(result, call->id());
+ HStringCharFromCode* result =
+ new(zone()) HStringCharFromCode(context, char_code);
+ return ast_context()->ReturnInstruction(result, call->id());
}
@@ -5618,14 +6062,15 @@
CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
HValue* right = Pop();
HValue* left = Pop();
- HCompareJSObjectEq* result = new(zone()) HCompareJSObjectEq(left, right);
- ast_context()->ReturnInstruction(result, call->id());
+ HCompareObjectEqAndBranch* result =
+ new(zone()) HCompareObjectEqAndBranch(left, right);
+ return ast_context()->ReturnControl(result, call->id());
}
void HGraphBuilder::GenerateLog(CallRuntime* call) {
// %_Log is ignored in optimized code.
- ast_context()->ReturnValue(graph()->GetConstantUndefined());
+ return ast_context()->ReturnValue(graph()->GetConstantUndefined());
}
@@ -5642,7 +6087,7 @@
HValue* context = environment()->LookupContext();
HCallStub* result = new(zone()) HCallStub(context, CodeStub::StringAdd, 2);
Drop(2);
- ast_context()->ReturnInstruction(result, call->id());
+ return ast_context()->ReturnInstruction(result, call->id());
}
@@ -5653,7 +6098,7 @@
HValue* context = environment()->LookupContext();
HCallStub* result = new(zone()) HCallStub(context, CodeStub::SubString, 3);
Drop(3);
- ast_context()->ReturnInstruction(result, call->id());
+ return ast_context()->ReturnInstruction(result, call->id());
}
@@ -5665,7 +6110,7 @@
HCallStub* result =
new(zone()) HCallStub(context, CodeStub::StringCompare, 2);
Drop(2);
- ast_context()->ReturnInstruction(result, call->id());
+ return ast_context()->ReturnInstruction(result, call->id());
}
@@ -5676,7 +6121,7 @@
HValue* context = environment()->LookupContext();
HCallStub* result = new(zone()) HCallStub(context, CodeStub::RegExpExec, 4);
Drop(4);
- ast_context()->ReturnInstruction(result, call->id());
+ return ast_context()->ReturnInstruction(result, call->id());
}
@@ -5688,7 +6133,7 @@
HCallStub* result =
new(zone()) HCallStub(context, CodeStub::RegExpConstructResult, 3);
Drop(3);
- ast_context()->ReturnInstruction(result, call->id());
+ return ast_context()->ReturnInstruction(result, call->id());
}
@@ -5706,7 +6151,7 @@
HCallStub* result =
new(zone()) HCallStub(context, CodeStub::NumberToString, 1);
Drop(1);
- ast_context()->ReturnInstruction(result, call->id());
+ return ast_context()->ReturnInstruction(result, call->id());
}
@@ -5733,7 +6178,7 @@
HInvokeFunction* result =
new(zone()) HInvokeFunction(context, function, arg_count);
Drop(arg_count);
- ast_context()->ReturnInstruction(result, call->id());
+ return ast_context()->ReturnInstruction(result, call->id());
}
@@ -5745,7 +6190,7 @@
HValue* right = Pop();
HValue* left = Pop();
HPower* result = new(zone()) HPower(left, right);
- ast_context()->ReturnInstruction(result, call->id());
+ return ast_context()->ReturnInstruction(result, call->id());
}
@@ -5757,7 +6202,7 @@
new(zone()) HCallStub(context, CodeStub::TranscendentalCache, 1);
result->set_transcendental_type(TranscendentalCache::SIN);
Drop(1);
- ast_context()->ReturnInstruction(result, call->id());
+ return ast_context()->ReturnInstruction(result, call->id());
}
@@ -5769,7 +6214,7 @@
new(zone()) HCallStub(context, CodeStub::TranscendentalCache, 1);
result->set_transcendental_type(TranscendentalCache::COS);
Drop(1);
- ast_context()->ReturnInstruction(result, call->id());
+ return ast_context()->ReturnInstruction(result, call->id());
}
@@ -5781,7 +6226,7 @@
new(zone()) HCallStub(context, CodeStub::TranscendentalCache, 1);
result->set_transcendental_type(TranscendentalCache::LOG);
Drop(1);
- ast_context()->ReturnInstruction(result, call->id());
+ return ast_context()->ReturnInstruction(result, call->id());
}
@@ -5801,7 +6246,7 @@
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
HGetCachedArrayIndex* result = new(zone()) HGetCachedArrayIndex(value);
- ast_context()->ReturnInstruction(result, call->id());
+ return ast_context()->ReturnInstruction(result, call->id());
}
@@ -5810,6 +6255,11 @@
}
+void HGraphBuilder::GenerateIsNativeOrStrictMode(CallRuntime* call) {
+ return Bailout("inlined runtime function: IsNativeOrStrictMode");
+}
+
+
#undef CHECK_BAILOUT
#undef CHECK_ALIVE
@@ -5972,7 +6422,6 @@
HEnvironment* HEnvironment::CopyForInlining(
Handle<JSFunction> target,
FunctionLiteral* function,
- CompilationPhase compilation_phase,
HConstant* undefined,
CallKind call_kind) const {
// Outer environment is a copy of this one without the arguments.
@@ -5984,22 +6433,15 @@
HEnvironment* inner =
new(zone) HEnvironment(outer, function->scope(), target);
// Get the argument values from the original environment.
- if (compilation_phase == HYDROGEN) {
- for (int i = 0; i <= arity; ++i) { // Include receiver.
- HValue* push = ExpressionStackAt(arity - i);
- inner->SetValueAt(i, push);
- }
- } else {
- ASSERT(compilation_phase == LITHIUM);
- for (int i = 0; i <= arity; ++i) { // Include receiver.
- HValue* push = ExpressionStackAt(arity - i);
- inner->SetValueAt(i, push);
- }
+ for (int i = 0; i <= arity; ++i) { // Include receiver.
+ HValue* push = ExpressionStackAt(arity - i);
+ inner->SetValueAt(i, push);
}
- // If the function we are inlining is a strict mode function, pass
- // undefined as the receiver for function calls (instead of the
- // global receiver).
- if (function->strict_mode() && call_kind == CALL_AS_FUNCTION) {
+ // If the function we are inlining is a strict mode function or a
+ // builtin function, pass undefined as the receiver for function
+ // calls (instead of the global receiver).
+ if ((target->shared()->native() || function->strict_mode()) &&
+ call_kind == CALL_AS_FUNCTION) {
inner->SetValueAt(0, undefined);
}
inner->SetValueAt(arity + 1, outer->LookupContext());
@@ -6081,15 +6523,15 @@
PrintEmptyProperty("predecessors");
}
- if (current->end() == NULL || current->end()->FirstSuccessor() == NULL) {
+ if (current->end()->SuccessorCount() == 0) {
PrintEmptyProperty("successors");
- } else if (current->end()->SecondSuccessor() == NULL) {
- PrintBlockProperty("successors",
- current->end()->FirstSuccessor()->block_id());
- } else {
- PrintBlockProperty("successors",
- current->end()->FirstSuccessor()->block_id(),
- current->end()->SecondSuccessor()->block_id());
+ } else {
+ PrintIndent();
+ trace_.Add("successors");
+ for (HSuccessorIterator it(current->end()); !it.Done(); it.Advance()) {
+ trace_.Add(" \"B%d\"", it.Current()->block_id());
+ }
+ trace_.Add("\n");
}
PrintEmptyProperty("xhandlers");
diff --git a/src/hydrogen.h b/src/hydrogen.h
index 4d8a153..ffa8aa7 100644
--- a/src/hydrogen.h
+++ b/src/hydrogen.h
@@ -116,14 +116,14 @@
bool HasParentLoopHeader() const { return parent_loop_header_ != NULL; }
- void SetJoinId(int id);
+ void SetJoinId(int ast_id);
void Finish(HControlInstruction* last);
void FinishExit(HControlInstruction* instruction);
- void Goto(HBasicBlock* block, bool include_stack_check = false);
+ void Goto(HBasicBlock* block);
int PredecessorIndexOf(HBasicBlock* predecessor) const;
- void AddSimulate(int id) { AddInstruction(CreateSimulate(id)); }
+ void AddSimulate(int ast_id) { AddInstruction(CreateSimulate(ast_id)); }
void AssignCommonDominator(HBasicBlock* other);
void FinishExitWithDeoptimization(HDeoptimize::UseEnvironment has_uses) {
@@ -143,6 +143,9 @@
bool IsInlineReturnTarget() const { return is_inline_return_target_; }
void MarkAsInlineReturnTarget() { is_inline_return_target_ = true; }
+ bool IsDeoptimizing() const { return is_deoptimizing_; }
+ void MarkAsDeoptimizing() { is_deoptimizing_ = true; }
+
inline Zone* zone();
#ifdef DEBUG
@@ -153,7 +156,7 @@
void RegisterPredecessor(HBasicBlock* pred);
void AddDominatedBlock(HBasicBlock* block);
- HSimulate* CreateSimulate(int id);
+ HSimulate* CreateSimulate(int ast_id);
HDeoptimize* CreateDeoptimize(HDeoptimize::UseEnvironment has_uses);
int block_id_;
@@ -175,13 +178,17 @@
ZoneList<int> deleted_phis_;
HBasicBlock* parent_loop_header_;
bool is_inline_return_target_;
+ bool is_deoptimizing_;
};
class HLoopInformation: public ZoneObject {
public:
explicit HLoopInformation(HBasicBlock* loop_header)
- : back_edges_(4), loop_header_(loop_header), blocks_(8) {
+ : back_edges_(4),
+ loop_header_(loop_header),
+ blocks_(8),
+ stack_check_(NULL) {
blocks_.Add(loop_header);
}
virtual ~HLoopInformation() {}
@@ -192,12 +199,18 @@
HBasicBlock* GetLastBackEdge() const;
void RegisterBackEdge(HBasicBlock* block);
+ HStackCheck* stack_check() const { return stack_check_; }
+ void set_stack_check(HStackCheck* stack_check) {
+ stack_check_ = stack_check;
+ }
+
private:
void AddBlock(HBasicBlock* block);
ZoneList<HBasicBlock*> back_edges_;
HBasicBlock* loop_header_;
ZoneList<HBasicBlock*> blocks_;
+ HStackCheck* stack_check_;
};
@@ -225,14 +238,17 @@
void OrderBlocks();
void AssignDominators();
void ReplaceCheckedValues();
+ void MarkAsDeoptimizingRecursively(HBasicBlock* block);
// Returns false if there are phi-uses of the arguments-object
// which are not supported by the optimizing compiler.
- bool CheckPhis();
+ bool CheckArgumentsPhiUses();
- // Returns false if there are phi-uses of hole values comming
- // from uninitialized consts.
- bool CollectPhis();
+ // Returns false if there are phi-uses of an uninitialized const
+ // which are not supported by the optimizing compiler.
+ bool CheckConstPhiUses();
+
+ void CollectPhis();
Handle<Code> Compile(CompilationInfo* info);
@@ -244,6 +260,7 @@
HConstant* GetConstantMinus1();
HConstant* GetConstantTrue();
HConstant* GetConstantFalse();
+ HConstant* GetConstantHole();
HBasicBlock* CreateBasicBlock();
HArgumentsObject* GetArgumentsObject() const {
@@ -306,6 +323,7 @@
SetOncePointer<HConstant> constant_minus1_;
SetOncePointer<HConstant> constant_true_;
SetOncePointer<HConstant> constant_false_;
+ SetOncePointer<HConstant> constant_hole_;
SetOncePointer<HArgumentsObject> arguments_object_;
DISALLOW_COPY_AND_ASSIGN(HGraph);
@@ -317,8 +335,6 @@
class HEnvironment: public ZoneObject {
public:
- enum CompilationPhase { HYDROGEN, LITHIUM };
-
HEnvironment(HEnvironment* outer,
Scope* scope,
Handle<JSFunction> closure);
@@ -405,12 +421,9 @@
// Create an "inlined version" of this environment, where the original
// environment is the outer environment but the top expression stack
- // elements are moved to an inner environment as parameters. If
- // is_speculative, the argument values are expected to be PushArgument
- // instructions, otherwise they are the actual values.
+ // elements are moved to an inner environment as parameters.
HEnvironment* CopyForInlining(Handle<JSFunction> target,
FunctionLiteral* function,
- CompilationPhase compilation_phase,
HConstant* undefined,
CallKind call_kind) const;
@@ -491,6 +504,12 @@
// the instruction as value.
virtual void ReturnInstruction(HInstruction* instr, int ast_id) = 0;
+ // Finishes the current basic block and materialize a boolean for
+ // value context, nothing for effect, generate a branch for test context.
+ // Call this function in tail position in the Visit functions for
+ // expressions.
+ virtual void ReturnControl(HControlInstruction* instr, int ast_id) = 0;
+
void set_for_typeof(bool for_typeof) { for_typeof_ = for_typeof; }
bool is_for_typeof() { return for_typeof_; }
@@ -525,6 +544,7 @@
virtual void ReturnValue(HValue* value);
virtual void ReturnInstruction(HInstruction* instr, int ast_id);
+ virtual void ReturnControl(HControlInstruction* instr, int ast_id);
};
@@ -537,6 +557,7 @@
virtual void ReturnValue(HValue* value);
virtual void ReturnInstruction(HInstruction* instr, int ast_id);
+ virtual void ReturnControl(HControlInstruction* instr, int ast_id);
bool arguments_allowed() { return flag_ == ARGUMENTS_ALLOWED; }
@@ -548,21 +569,25 @@
class TestContext: public AstContext {
public:
TestContext(HGraphBuilder* owner,
+ Expression* condition,
HBasicBlock* if_true,
HBasicBlock* if_false)
: AstContext(owner, Expression::kTest),
+ condition_(condition),
if_true_(if_true),
if_false_(if_false) {
}
virtual void ReturnValue(HValue* value);
virtual void ReturnInstruction(HInstruction* instr, int ast_id);
+ virtual void ReturnControl(HControlInstruction* instr, int ast_id);
static TestContext* cast(AstContext* context) {
ASSERT(context->IsTest());
return reinterpret_cast<TestContext*>(context);
}
+ Expression* condition() const { return condition_; }
HBasicBlock* if_true() const { return if_true_; }
HBasicBlock* if_false() const { return if_false_; }
@@ -571,6 +596,7 @@
// control flow.
void BuildBranch(HValue* value);
+ Expression* condition_;
HBasicBlock* if_true_;
HBasicBlock* if_false_;
};
@@ -687,7 +713,7 @@
// Adding instructions.
HInstruction* AddInstruction(HInstruction* instr);
- void AddSimulate(int id);
+ void AddSimulate(int ast_id);
// Bailout environment manipulation.
void Push(HValue* value) { environment()->Push(value); }
@@ -695,6 +721,10 @@
void Bailout(const char* reason);
+ HBasicBlock* CreateJoin(HBasicBlock* first,
+ HBasicBlock* second,
+ int join_id);
+
private:
// Type of a member function that generates inline code for a native function.
typedef void (HGraphBuilder::*InlineFunctionGenerator)(CallRuntime* call);
@@ -758,16 +788,15 @@
void VisitNot(UnaryOperation* expr);
void VisitComma(BinaryOperation* expr);
- void VisitAndOr(BinaryOperation* expr, bool is_logical_and);
- void VisitCommon(BinaryOperation* expr);
+ void VisitLogicalExpression(BinaryOperation* expr);
+ void VisitArithmeticExpression(BinaryOperation* expr);
void PreProcessOsrEntry(IterationStatement* statement);
// True iff. we are compiling for OSR and the statement is the entry.
bool HasOsrEntryAt(IterationStatement* statement);
-
- HBasicBlock* CreateJoin(HBasicBlock* first,
- HBasicBlock* second,
- int join_id);
+ void VisitLoopBody(IterationStatement* stmt,
+ HBasicBlock* loop_entry,
+ BreakAndContinueInfo* break_info);
// Create a back edge in the flow graph. body_exit is the predecessor
// block and loop_entry is the successor block. loop_successor is the
@@ -802,8 +831,9 @@
HBasicBlock* false_block);
// Visit an argument subexpression and emit a push to the outgoing
- // arguments.
- void VisitArgument(Expression* expr);
+ // arguments. Returns the hydrogen value that was pushed.
+ HValue* VisitArgument(Expression* expr);
+
void VisitArgumentList(ZoneList<Expression*>* arguments);
// Visit a list of expressions from left to right, each in a value context.
@@ -877,19 +907,18 @@
HValue* receiver,
ZoneMapList* types,
Handle<String> name);
+ void HandleLiteralCompareTypeof(CompareOperation* compare_expr,
+ Expression* expr,
+ Handle<String> check);
+ void HandleLiteralCompareUndefined(CompareOperation* compare_expr,
+ Expression* expr);
- HCompareSymbolEq* BuildSymbolCompare(HValue* left,
- HValue* right,
- Token::Value op);
- HStringCharCodeAt* BuildStringCharCodeAt(HValue* string,
+ HStringCharCodeAt* BuildStringCharCodeAt(HValue* context,
+ HValue* string,
HValue* index);
HInstruction* BuildBinaryOperation(BinaryOperation* expr,
HValue* left,
HValue* right);
- HInstruction* BuildBinaryOperation(Token::Value op,
- HValue* left,
- HValue* right,
- TypeInfo info);
HInstruction* BuildIncrement(bool returns_original_input,
CountOperation* expr);
HLoadNamedField* BuildLoadNamedField(HValue* object,
@@ -898,18 +927,37 @@
LookupResult* result,
bool smi_and_map_check);
HInstruction* BuildLoadNamedGeneric(HValue* object, Property* expr);
- HInstruction* BuildLoadKeyedFastElement(HValue* object,
- HValue* key,
- Property* expr);
- HInstruction* BuildLoadKeyedSpecializedArrayElement(HValue* object,
- HValue* key,
- Property* expr);
HInstruction* BuildLoadKeyedGeneric(HValue* object,
HValue* key);
+ HInstruction* BuildExternalArrayElementAccess(
+ HValue* external_elements,
+ HValue* checked_key,
+ HValue* val,
+ JSObject::ElementsKind elements_kind,
+ bool is_store);
- HInstruction* BuildLoadKeyed(HValue* obj,
- HValue* key,
- Property* prop);
+ HInstruction* BuildMonomorphicElementAccess(HValue* object,
+ HValue* key,
+ HValue* val,
+ Expression* expr,
+ bool is_store);
+ HValue* HandlePolymorphicElementAccess(HValue* object,
+ HValue* key,
+ HValue* val,
+ Expression* prop,
+ int ast_id,
+ int position,
+ bool is_store,
+ bool* has_side_effects);
+
+ HValue* HandleKeyedElementAccess(HValue* obj,
+ HValue* key,
+ HValue* val,
+ Expression* expr,
+ int ast_id,
+ int position,
+ bool is_store,
+ bool* has_side_effects);
HInstruction* BuildLoadNamed(HValue* object,
Property* prop,
@@ -931,22 +979,6 @@
HValue* key,
HValue* value);
- HInstruction* BuildStoreKeyedFastElement(HValue* object,
- HValue* key,
- HValue* val,
- Expression* expr);
-
- HInstruction* BuildStoreKeyedSpecializedArrayElement(
- HValue* object,
- HValue* key,
- HValue* val,
- Expression* expr);
-
- HInstruction* BuildStoreKeyed(HValue* object,
- HValue* key,
- HValue* value,
- Expression* assignment);
-
HValue* BuildContextChainWalk(Variable* var);
void AddCheckConstantFunction(Call* expr,
@@ -1060,7 +1092,6 @@
}
private:
-
HStatistics()
: timing_(5),
names_(5),
@@ -1184,11 +1215,6 @@
trace_.Add("%s \"B%d\"\n", name, block_id);
}
- void PrintBlockProperty(const char* name, int block_id1, int block_id2) {
- PrintIndent();
- trace_.Add("%s \"B%d\" \"B%d\"\n", name, block_id1, block_id2);
- }
-
void PrintIntProperty(const char* name, int value) {
PrintIndent();
trace_.Add("%s %d\n", name, value);
diff --git a/src/ia32/assembler-ia32.cc b/src/ia32/assembler-ia32.cc
index a7602e7..0dc5194 100644
--- a/src/ia32/assembler-ia32.cc
+++ b/src/ia32/assembler-ia32.cc
@@ -449,6 +449,13 @@
}
+void Assembler::push(Handle<Object> handle) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x68);
+ emit(handle);
+}
+
+
void Assembler::pop(Register dst) {
ASSERT(reloc_info_writer.last_pc() != NULL);
EnsureSpace ensure_space(this);
diff --git a/src/ia32/assembler-ia32.h b/src/ia32/assembler-ia32.h
index e933102..da38e13 100644
--- a/src/ia32/assembler-ia32.h
+++ b/src/ia32/assembler-ia32.h
@@ -659,6 +659,7 @@
void push_imm32(int32_t imm32);
void push(Register src);
void push(const Operand& src);
+ void push(Handle<Object> handle);
void pop(Register dst);
void pop(const Operand& dst);
@@ -834,7 +835,7 @@
void call(const Operand& adr);
int CallSize(Handle<Code> code, RelocInfo::Mode mode);
void call(Handle<Code> code,
- RelocInfo::Mode rmode,
+ RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
unsigned ast_id = kNoASTId);
// Jumps
@@ -989,7 +990,9 @@
void Print();
// Check the code size generated from label to here.
- int SizeOfCodeGeneratedSince(Label* l) { return pc_offset() - l->pos(); }
+ int SizeOfCodeGeneratedSince(Label* label) {
+ return pc_offset() - label->pos();
+ }
// Mark address of the ExitJSFrame code.
void RecordJSReturn();
diff --git a/src/ia32/builtins-ia32.cc b/src/ia32/builtins-ia32.cc
index 1212566..f8a85de 100644
--- a/src/ia32/builtins-ia32.cc
+++ b/src/ia32/builtins-ia32.cc
@@ -82,8 +82,7 @@
Label non_function_call;
// Check that function is not a smi.
- __ test(edi, Immediate(kSmiTagMask));
- __ j(zero, &non_function_call);
+ __ JumpIfSmi(edi, &non_function_call);
// Check that function is a JSFunction.
__ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
__ j(not_equal, &non_function_call);
@@ -140,8 +139,7 @@
// edi: constructor
__ mov(eax, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &rt_call);
+ __ JumpIfSmi(eax, &rt_call);
// edi: constructor
// eax: initial map (if proven valid below)
__ CmpObjectType(eax, MAP_TYPE, ebx);
@@ -357,12 +355,11 @@
Label use_receiver, exit;
// If the result is a smi, it is *not* an object in the ECMA sense.
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &use_receiver);
+ __ JumpIfSmi(eax, &use_receiver);
// If the type of the result (stored in its map) is less than
- // FIRST_JS_OBJECT_TYPE, it is not an object in the ECMA sense.
- __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
+ // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
+ __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
__ j(above_equal, &exit);
// Throw away the result of the constructor invocation and use the
@@ -596,8 +593,7 @@
Label non_function;
// 1 ~ return address.
__ mov(edi, Operand(esp, eax, times_4, 1 * kPointerSize));
- __ test(edi, Immediate(kSmiTagMask));
- __ j(zero, &non_function);
+ __ JumpIfSmi(edi, &non_function);
__ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
__ j(not_equal, &non_function);
@@ -615,8 +611,8 @@
__ j(not_equal, &shift_arguments);
// Do not transform the receiver for natives (shared already in ebx).
- __ test_b(FieldOperand(ebx, SharedFunctionInfo::kES5NativeByteOffset),
- 1 << SharedFunctionInfo::kES5NativeBitWithinByte);
+ __ test_b(FieldOperand(ebx, SharedFunctionInfo::kNativeByteOffset),
+ 1 << SharedFunctionInfo::kNativeBitWithinByte);
__ j(not_equal, &shift_arguments);
// Compute the receiver in non-strict mode.
@@ -624,15 +620,13 @@
// Call ToObject on the receiver if it is not an object, or use the
// global object if it is null or undefined.
- __ test(ebx, Immediate(kSmiTagMask));
- __ j(zero, &convert_to_object);
+ __ JumpIfSmi(ebx, &convert_to_object);
__ cmp(ebx, factory->null_value());
__ j(equal, &use_global_receiver);
__ cmp(ebx, factory->undefined_value());
__ j(equal, &use_global_receiver);
- STATIC_ASSERT(LAST_JS_OBJECT_TYPE + 1 == LAST_TYPE);
- STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
- __ CmpObjectType(ebx, FIRST_JS_OBJECT_TYPE, ecx);
+ STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+ __ CmpObjectType(ebx, FIRST_SPEC_OBJECT_TYPE, ecx);
__ j(above_equal, &shift_arguments);
__ bind(&convert_to_object);
@@ -780,22 +774,20 @@
Factory* factory = masm->isolate()->factory();
// Do not transform the receiver for natives (shared already in ecx).
- __ test_b(FieldOperand(ecx, SharedFunctionInfo::kES5NativeByteOffset),
- 1 << SharedFunctionInfo::kES5NativeBitWithinByte);
+ __ test_b(FieldOperand(ecx, SharedFunctionInfo::kNativeByteOffset),
+ 1 << SharedFunctionInfo::kNativeBitWithinByte);
__ j(not_equal, &push_receiver);
// Compute the receiver in non-strict mode.
// Call ToObject on the receiver if it is not an object, or use the
// global object if it is null or undefined.
- __ test(ebx, Immediate(kSmiTagMask));
- __ j(zero, &call_to_object);
+ __ JumpIfSmi(ebx, &call_to_object);
__ cmp(ebx, factory->null_value());
__ j(equal, &use_global_receiver);
__ cmp(ebx, factory->undefined_value());
__ j(equal, &use_global_receiver);
- STATIC_ASSERT(LAST_JS_OBJECT_TYPE + 1 == LAST_TYPE);
- STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
- __ CmpObjectType(ebx, FIRST_JS_OBJECT_TYPE, ecx);
+ STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+ __ CmpObjectType(ebx, FIRST_SPEC_OBJECT_TYPE, ecx);
__ j(above_equal, &push_receiver);
__ bind(&call_to_object);
@@ -1392,8 +1384,7 @@
Label convert_argument;
__ bind(¬_cached);
STATIC_ASSERT(kSmiTag == 0);
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &convert_argument);
+ __ JumpIfSmi(eax, &convert_argument);
Condition is_string = masm->IsObjectStringType(eax, ebx, ecx);
__ j(NegateCondition(is_string), &convert_argument);
__ mov(ebx, eax);
diff --git a/src/ia32/code-stubs-ia32.cc b/src/ia32/code-stubs-ia32.cc
index 8bf2dd4..71aacf9 100644
--- a/src/ia32/code-stubs-ia32.cc
+++ b/src/ia32/code-stubs-ia32.cc
@@ -43,8 +43,7 @@
void ToNumberStub::Generate(MacroAssembler* masm) {
// The ToNumber stub takes one argument in eax.
Label check_heap_number, call_builtin;
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, &check_heap_number, Label::kNear);
+ __ JumpIfNotSmi(eax, &check_heap_number, Label::kNear);
__ ret(0);
__ bind(&check_heap_number);
@@ -129,22 +128,19 @@
// Setup the object header.
Factory* factory = masm->isolate()->factory();
- __ mov(FieldOperand(eax, HeapObject::kMapOffset), factory->context_map());
+ __ mov(FieldOperand(eax, HeapObject::kMapOffset),
+ factory->function_context_map());
__ mov(FieldOperand(eax, Context::kLengthOffset),
Immediate(Smi::FromInt(length)));
// Setup the fixed slots.
__ Set(ebx, Immediate(0)); // Set to NULL.
__ mov(Operand(eax, Context::SlotOffset(Context::CLOSURE_INDEX)), ecx);
- __ mov(Operand(eax, Context::SlotOffset(Context::FCONTEXT_INDEX)), eax);
- __ mov(Operand(eax, Context::SlotOffset(Context::PREVIOUS_INDEX)), ebx);
+ __ mov(Operand(eax, Context::SlotOffset(Context::PREVIOUS_INDEX)), esi);
__ mov(Operand(eax, Context::SlotOffset(Context::EXTENSION_INDEX)), ebx);
- // Copy the global object from the surrounding context. We go through the
- // context in the function (ecx) to match the allocation behavior we have
- // in the runtime system (see Heap::AllocateFunctionContext).
- __ mov(ebx, FieldOperand(ecx, JSFunction::kContextOffset));
- __ mov(ebx, Operand(ebx, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ // Copy the global object from the previous context.
+ __ mov(ebx, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
__ mov(Operand(eax, Context::SlotOffset(Context::GLOBAL_INDEX)), ebx);
// Initialize the rest of the slots to undefined.
@@ -159,7 +155,7 @@
// Need to collect. Call into runtime system.
__ bind(&gc);
- __ TailCallRuntime(Runtime::kNewContext, 1, 1);
+ __ TailCallRuntime(Runtime::kNewFunctionContext, 1, 1);
}
@@ -240,56 +236,55 @@
}
-// NOTE: The stub does not handle the inlined cases (Smis, Booleans, undefined).
+// The stub returns zero for false, and a non-zero value for true.
void ToBooleanStub::Generate(MacroAssembler* masm) {
Label false_result, true_result, not_string;
- __ mov(eax, Operand(esp, 1 * kPointerSize));
Factory* factory = masm->isolate()->factory();
+ const Register map = edx;
+
+ __ mov(eax, Operand(esp, 1 * kPointerSize));
// undefined -> false
__ cmp(eax, factory->undefined_value());
__ j(equal, &false_result);
// Boolean -> its value
- __ cmp(eax, factory->true_value());
- __ j(equal, &true_result);
__ cmp(eax, factory->false_value());
__ j(equal, &false_result);
+ __ cmp(eax, factory->true_value());
+ __ j(equal, &true_result);
// Smis: 0 -> false, all other -> true
__ test(eax, Operand(eax));
__ j(zero, &false_result);
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &true_result);
+ __ JumpIfSmi(eax, &true_result);
- // 'null' => false.
+ // 'null' -> false.
__ cmp(eax, factory->null_value());
__ j(equal, &false_result, Label::kNear);
- // Get the map and type of the heap object.
- __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
- __ movzx_b(ecx, FieldOperand(edx, Map::kInstanceTypeOffset));
+ // Get the map of the heap object.
+ __ mov(map, FieldOperand(eax, HeapObject::kMapOffset));
- // Undetectable => false.
- __ test_b(FieldOperand(edx, Map::kBitFieldOffset),
+ // Undetectable -> false.
+ __ test_b(FieldOperand(map, Map::kBitFieldOffset),
1 << Map::kIsUndetectable);
__ j(not_zero, &false_result, Label::kNear);
- // JavaScript object => true.
- __ CmpInstanceType(edx, FIRST_JS_OBJECT_TYPE);
+ // JavaScript object -> true.
+ __ CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
__ j(above_equal, &true_result, Label::kNear);
- // String value => false iff empty.
- __ CmpInstanceType(edx, FIRST_NONSTRING_TYPE);
+ // String value -> false iff empty.
+ __ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
__ j(above_equal, ¬_string, Label::kNear);
- STATIC_ASSERT(kSmiTag == 0);
__ cmp(FieldOperand(eax, String::kLengthOffset), Immediate(0));
__ j(zero, &false_result, Label::kNear);
__ jmp(&true_result, Label::kNear);
__ bind(¬_string);
- // HeapNumber => false iff +0, -0, or NaN.
- __ cmp(edx, factory->heap_number_map());
+ // HeapNumber -> false iff +0, -0, or NaN.
+ __ cmp(map, factory->heap_number_map());
__ j(not_equal, &true_result, Label::kNear);
__ fldz();
__ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
@@ -297,19 +292,18 @@
__ j(zero, &false_result, Label::kNear);
// Fall through to |true_result|.
- // Return 1/0 for true/false in eax.
+ // Return 1/0 for true/false in tos_.
__ bind(&true_result);
- __ mov(eax, 1);
+ __ mov(tos_, 1);
__ ret(1 * kPointerSize);
__ bind(&false_result);
- __ mov(eax, 0);
+ __ mov(tos_, 0);
__ ret(1 * kPointerSize);
}
class FloatingPointHelper : public AllStatic {
public:
-
enum ArgLocation {
ARGS_ON_STACK,
ARGS_IN_REGISTERS
@@ -517,31 +511,17 @@
}
-Handle<Code> GetUnaryOpStub(int key, UnaryOpIC::TypeInfo type_info) {
- UnaryOpStub stub(key, type_info);
- return stub.GetCode();
-}
-
-
-const char* UnaryOpStub::GetName() {
- if (name_ != NULL) return name_;
- const int kMaxNameLength = 100;
- name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(
- kMaxNameLength);
- if (name_ == NULL) return "OOM";
+void UnaryOpStub::PrintName(StringStream* stream) {
const char* op_name = Token::Name(op_);
const char* overwrite_name = NULL; // Make g++ happy.
switch (mode_) {
case UNARY_NO_OVERWRITE: overwrite_name = "Alloc"; break;
case UNARY_OVERWRITE: overwrite_name = "Overwrite"; break;
}
-
- OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
- "UnaryOpStub_%s_%s_%s",
- op_name,
- overwrite_name,
- UnaryOpIC::GetName(operand_type_));
- return name_;
+ stream->Add("UnaryOpStub_%s_%s_%s",
+ op_name,
+ overwrite_name,
+ UnaryOpIC::GetName(operand_type_));
}
@@ -566,12 +546,10 @@
void UnaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
__ pop(ecx); // Save return address.
- __ push(eax);
- // the argument is now on top.
- // Push this stub's key. Although the operation and the type info are
- // encoded into the key, the encoding is opaque, so push them too.
- __ push(Immediate(Smi::FromInt(MinorKey())));
+
+ __ push(eax); // the operand
__ push(Immediate(Smi::FromInt(op_)));
+ __ push(Immediate(Smi::FromInt(mode_)));
__ push(Immediate(Smi::FromInt(operand_type_)));
__ push(ecx); // Push return address.
@@ -579,8 +557,7 @@
// Patch the caller to an appropriate specialized stub and return the
// operation result to the caller of the stub.
__ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kUnaryOp_Patch),
- masm->isolate()), 4, 1);
+ ExternalReference(IC_Utility(IC::kUnaryOp_Patch), masm->isolate()), 4, 1);
}
@@ -627,8 +604,7 @@
Label::Distance undo_near,
Label::Distance slow_near) {
// Check whether the value is a smi.
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, non_smi, non_smi_near);
+ __ JumpIfNotSmi(eax, non_smi, non_smi_near);
// We can't handle -0 with smis, so use a type transition for that case.
__ test(eax, Operand(eax));
@@ -648,8 +624,7 @@
Label* non_smi,
Label::Distance non_smi_near) {
// Check whether the value is a smi.
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, non_smi, non_smi_near);
+ __ JumpIfNotSmi(eax, non_smi, non_smi_near);
// Flip bits and revert inverted smi-tag.
__ not_(eax);
@@ -853,14 +828,6 @@
}
-Handle<Code> GetBinaryOpStub(int key,
- BinaryOpIC::TypeInfo type_info,
- BinaryOpIC::TypeInfo result_type_info) {
- BinaryOpStub stub(key, type_info, result_type_info);
- return stub.GetCode();
-}
-
-
void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
__ pop(ecx); // Save return address.
__ push(edx);
@@ -939,12 +906,7 @@
}
-const char* BinaryOpStub::GetName() {
- if (name_ != NULL) return name_;
- const int kMaxNameLength = 100;
- name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(
- kMaxNameLength);
- if (name_ == NULL) return "OOM";
+void BinaryOpStub::PrintName(StringStream* stream) {
const char* op_name = Token::Name(op_);
const char* overwrite_name;
switch (mode_) {
@@ -953,13 +915,10 @@
case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
default: overwrite_name = "UnknownOverwrite"; break;
}
-
- OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
- "BinaryOpStub_%s_%s_%s",
- op_name,
- overwrite_name,
- BinaryOpIC::GetName(operands_type_));
- return name_;
+ stream->Add("BinaryOpStub_%s_%s_%s",
+ op_name,
+ overwrite_name,
+ BinaryOpIC::GetName(operands_type_));
}
@@ -1023,8 +982,7 @@
// 3. Perform the smi check of the operands.
STATIC_ASSERT(kSmiTag == 0); // Adjust zero check if not the case.
- __ test(combined, Immediate(kSmiTagMask));
- __ j(not_zero, ¬_smis);
+ __ JumpIfNotSmi(combined, ¬_smis);
// 4. Operands are both smis, perform the operation leaving the result in
// eax and check the result if necessary.
@@ -1412,14 +1370,12 @@
Register right = eax;
// Test if left operand is a string.
- __ test(left, Immediate(kSmiTagMask));
- __ j(zero, &call_runtime);
+ __ JumpIfSmi(left, &call_runtime);
__ CmpObjectType(left, FIRST_NONSTRING_TYPE, ecx);
__ j(above_equal, &call_runtime);
// Test if right operand is a string.
- __ test(right, Immediate(kSmiTagMask));
- __ j(zero, &call_runtime);
+ __ JumpIfSmi(right, &call_runtime);
__ CmpObjectType(right, FIRST_NONSTRING_TYPE, ecx);
__ j(above_equal, &call_runtime);
@@ -1555,8 +1511,7 @@
// allocation of a heap number.
__ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
1 * kPointerSize : 2 * kPointerSize));
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, &skip_allocation, Label::kNear);
+ __ JumpIfNotSmi(eax, &skip_allocation, Label::kNear);
// Fall through!
case NO_OVERWRITE:
__ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
@@ -1770,8 +1725,7 @@
// allocation of a heap number.
__ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
1 * kPointerSize : 2 * kPointerSize));
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, &skip_allocation, Label::kNear);
+ __ JumpIfNotSmi(eax, &skip_allocation, Label::kNear);
// Fall through!
case NO_OVERWRITE:
__ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
@@ -1970,8 +1924,7 @@
// allocation of a heap number.
__ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
1 * kPointerSize : 2 * kPointerSize));
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, &skip_allocation, Label::kNear);
+ __ JumpIfNotSmi(eax, &skip_allocation, Label::kNear);
// Fall through!
case NO_OVERWRITE:
__ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
@@ -2054,8 +2007,7 @@
Register right = eax;
// Test if left operand is a string.
- __ test(left, Immediate(kSmiTagMask));
- __ j(zero, &left_not_string, Label::kNear);
+ __ JumpIfSmi(left, &left_not_string, Label::kNear);
__ CmpObjectType(left, FIRST_NONSTRING_TYPE, ecx);
__ j(above_equal, &left_not_string, Label::kNear);
@@ -2065,8 +2017,7 @@
// Left operand is not a string, test right.
__ bind(&left_not_string);
- __ test(right, Immediate(kSmiTagMask));
- __ j(zero, &call_runtime, Label::kNear);
+ __ JumpIfSmi(right, &call_runtime, Label::kNear);
__ CmpObjectType(right, FIRST_NONSTRING_TYPE, ecx);
__ j(above_equal, &call_runtime, Label::kNear);
@@ -2088,8 +2039,7 @@
case OVERWRITE_LEFT: {
// If the argument in edx is already an object, we skip the
// allocation of a heap number.
- __ test(edx, Immediate(kSmiTagMask));
- __ j(not_zero, &skip_allocation);
+ __ JumpIfNotSmi(edx, &skip_allocation, Label::kNear);
// Allocate a heap number for the result. Keep eax and edx intact
// for the possible runtime call.
__ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure);
@@ -2104,8 +2054,7 @@
case OVERWRITE_RIGHT:
// If the argument in eax is already an object, we skip the
// allocation of a heap number.
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, &skip_allocation);
+ __ JumpIfNotSmi(eax, &skip_allocation, Label::kNear);
// Fall through!
case NO_OVERWRITE:
// Allocate a heap number for the result. Keep eax and edx intact
@@ -2152,8 +2101,7 @@
Label input_not_smi;
Label loaded;
__ mov(eax, Operand(esp, kPointerSize));
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, &input_not_smi, Label::kNear);
+ __ JumpIfNotSmi(eax, &input_not_smi, Label::kNear);
// Input is a smi. Untag and load it onto the FPU stack.
// Then load the low and high words of the double into ebx, edx.
STATIC_ASSERT(kSmiTagSize == 1);
@@ -2431,8 +2379,7 @@
Label load_arg2, done;
// Test if arg1 is a Smi.
- __ test(edx, Immediate(kSmiTagMask));
- __ j(not_zero, &arg1_is_object);
+ __ JumpIfNotSmi(edx, &arg1_is_object);
__ SmiUntag(edx);
__ jmp(&load_arg2);
@@ -2458,8 +2405,7 @@
__ bind(&load_arg2);
// Test if arg2 is a Smi.
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, &arg2_is_object);
+ __ JumpIfNotSmi(eax, &arg2_is_object);
__ SmiUntag(eax);
__ mov(ecx, eax);
@@ -2495,8 +2441,7 @@
Register number) {
Label load_smi, done;
- __ test(number, Immediate(kSmiTagMask));
- __ j(zero, &load_smi, Label::kNear);
+ __ JumpIfSmi(number, &load_smi, Label::kNear);
__ fld_d(FieldOperand(number, HeapNumber::kValueOffset));
__ jmp(&done, Label::kNear);
@@ -2513,16 +2458,12 @@
void FloatingPointHelper::LoadSSE2Operands(MacroAssembler* masm) {
Label load_smi_edx, load_eax, load_smi_eax, done;
// Load operand in edx into xmm0.
- __ test(edx, Immediate(kSmiTagMask));
- // Argument in edx is a smi.
- __ j(zero, &load_smi_edx, Label::kNear);
+ __ JumpIfSmi(edx, &load_smi_edx, Label::kNear);
__ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
__ bind(&load_eax);
// Load operand in eax into xmm1.
- __ test(eax, Immediate(kSmiTagMask));
- // Argument in eax is a smi.
- __ j(zero, &load_smi_eax, Label::kNear);
+ __ JumpIfSmi(eax, &load_smi_eax, Label::kNear);
__ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
__ jmp(&done, Label::kNear);
@@ -2545,18 +2486,14 @@
Label* not_numbers) {
Label load_smi_edx, load_eax, load_smi_eax, load_float_eax, done;
// Load operand in edx into xmm0, or branch to not_numbers.
- __ test(edx, Immediate(kSmiTagMask));
- // Argument in edx is a smi.
- __ j(zero, &load_smi_edx, Label::kNear);
+ __ JumpIfSmi(edx, &load_smi_edx, Label::kNear);
Factory* factory = masm->isolate()->factory();
__ cmp(FieldOperand(edx, HeapObject::kMapOffset), factory->heap_number_map());
__ j(not_equal, not_numbers); // Argument in edx is not a number.
__ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
__ bind(&load_eax);
// Load operand in eax into xmm1, or branch to not_numbers.
- __ test(eax, Immediate(kSmiTagMask));
- // Argument in eax is a smi.
- __ j(zero, &load_smi_eax, Label::kNear);
+ __ JumpIfSmi(eax, &load_smi_eax, Label::kNear);
__ cmp(FieldOperand(eax, HeapObject::kMapOffset), factory->heap_number_map());
__ j(equal, &load_float_eax, Label::kNear);
__ jmp(not_numbers); // Argument in eax is not a number.
@@ -2616,8 +2553,7 @@
} else {
__ mov(scratch, Operand(esp, 2 * kPointerSize));
}
- __ test(scratch, Immediate(kSmiTagMask));
- __ j(zero, &load_smi_1, Label::kNear);
+ __ JumpIfSmi(scratch, &load_smi_1, Label::kNear);
__ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset));
__ bind(&done_load_1);
@@ -2626,8 +2562,7 @@
} else {
__ mov(scratch, Operand(esp, 1 * kPointerSize));
}
- __ test(scratch, Immediate(kSmiTagMask));
- __ j(zero, &load_smi_2, Label::kNear);
+ __ JumpIfSmi(scratch, &load_smi_2, Label::kNear);
__ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset));
__ jmp(&done, Label::kNear);
@@ -2672,16 +2607,14 @@
Label test_other, done;
// Test if both operands are floats or smi -> scratch=k_is_float;
// Otherwise scratch = k_not_float.
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &test_other, Label::kNear); // argument in edx is OK
+ __ JumpIfSmi(edx, &test_other, Label::kNear);
__ mov(scratch, FieldOperand(edx, HeapObject::kMapOffset));
Factory* factory = masm->isolate()->factory();
__ cmp(scratch, factory->heap_number_map());
__ j(not_equal, non_float); // argument in edx is not a number -> NaN
__ bind(&test_other);
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &done, Label::kNear); // argument in eax is OK
+ __ JumpIfSmi(eax, &done, Label::kNear);
__ mov(scratch, FieldOperand(eax, HeapObject::kMapOffset));
__ cmp(scratch, factory->heap_number_map());
__ j(not_equal, non_float); // argument in eax is not a number -> NaN
@@ -2717,10 +2650,8 @@
Label exponent_nonsmi;
Label base_nonsmi;
// If the exponent is a heap number go to that specific case.
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, &exponent_nonsmi);
- __ test(edx, Immediate(kSmiTagMask));
- __ j(not_zero, &base_nonsmi);
+ __ JumpIfNotSmi(eax, &exponent_nonsmi);
+ __ JumpIfNotSmi(edx, &base_nonsmi);
// Optimized version when both exponent and base are smis.
Label powi;
@@ -2792,8 +2723,7 @@
Label base_not_smi;
Label handle_special_cases;
- __ test(edx, Immediate(kSmiTagMask));
- __ j(not_zero, &base_not_smi, Label::kNear);
+ __ JumpIfNotSmi(edx, &base_not_smi, Label::kNear);
__ SmiUntag(edx);
__ cvtsi2sd(xmm0, Operand(edx));
__ jmp(&handle_special_cases, Label::kNear);
@@ -2865,8 +2795,7 @@
// Check that the key is a smi.
Label slow;
- __ test(edx, Immediate(kSmiTagMask));
- __ j(not_zero, &slow);
+ __ JumpIfNotSmi(edx, &slow);
// Check if the calling frame is an arguments adaptor frame.
Label adaptor;
@@ -2915,16 +2844,259 @@
}
-void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
+void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) {
// esp[0] : return address
// esp[4] : number of parameters
// esp[8] : receiver displacement
- // esp[16] : function
+ // esp[12] : function
- // The displacement is used for skipping the return address and the
- // frame pointer on the stack. It is the offset of the last
- // parameter (if any) relative to the frame pointer.
- static const int kDisplacement = 2 * kPointerSize;
+ // Check if the calling frame is an arguments adaptor frame.
+ Label runtime;
+ __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+ __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
+ __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ j(not_equal, &runtime, Label::kNear);
+
+ // Patch the arguments.length and the parameters pointer.
+ __ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ mov(Operand(esp, 1 * kPointerSize), ecx);
+ __ lea(edx, Operand(edx, ecx, times_2,
+ StandardFrameConstants::kCallerSPOffset));
+ __ mov(Operand(esp, 2 * kPointerSize), edx);
+
+ __ bind(&runtime);
+ __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
+}
+
+
+void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
+ // esp[0] : return address
+ // esp[4] : number of parameters (tagged)
+ // esp[8] : receiver displacement
+ // esp[12] : function
+
+ // ebx = parameter count (tagged)
+ __ mov(ebx, Operand(esp, 1 * kPointerSize));
+
+ // Check if the calling frame is an arguments adaptor frame.
+ // TODO(rossberg): Factor out some of the bits that are shared with the other
+ // Generate* functions.
+ Label runtime;
+ Label adaptor_frame, try_allocate;
+ __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+ __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
+ __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ j(equal, &adaptor_frame, Label::kNear);
+
+ // No adaptor, parameter count = argument count.
+ __ mov(ecx, ebx);
+ __ jmp(&try_allocate, Label::kNear);
+
+ // We have an adaptor frame. Patch the parameters pointer.
+ __ bind(&adaptor_frame);
+ __ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ lea(edx, Operand(edx, ecx, times_2,
+ StandardFrameConstants::kCallerSPOffset));
+ __ mov(Operand(esp, 2 * kPointerSize), edx);
+
+ // ebx = parameter count (tagged)
+ // ecx = argument count (tagged)
+ // esp[4] = parameter count (tagged)
+ // esp[8] = address of receiver argument
+ // Compute the mapped parameter count = min(ebx, ecx) in ebx.
+ __ cmp(ebx, Operand(ecx));
+ __ j(less_equal, &try_allocate, Label::kNear);
+ __ mov(ebx, ecx);
+
+ __ bind(&try_allocate);
+
+ // Save mapped parameter count.
+ __ push(ebx);
+
+ // Compute the sizes of backing store, parameter map, and arguments object.
+ // 1. Parameter map, has 2 extra words containing context and backing store.
+ const int kParameterMapHeaderSize =
+ FixedArray::kHeaderSize + 2 * kPointerSize;
+ Label no_parameter_map;
+ __ test(ebx, Operand(ebx));
+ __ j(zero, &no_parameter_map, Label::kNear);
+ __ lea(ebx, Operand(ebx, times_2, kParameterMapHeaderSize));
+ __ bind(&no_parameter_map);
+
+ // 2. Backing store.
+ __ lea(ebx, Operand(ebx, ecx, times_2, FixedArray::kHeaderSize));
+
+ // 3. Arguments object.
+ __ add(Operand(ebx), Immediate(Heap::kArgumentsObjectSize));
+
+ // Do the allocation of all three objects in one go.
+ __ AllocateInNewSpace(ebx, eax, edx, edi, &runtime, TAG_OBJECT);
+
+ // eax = address of new object(s) (tagged)
+ // ecx = argument count (tagged)
+ // esp[0] = mapped parameter count (tagged)
+ // esp[8] = parameter count (tagged)
+ // esp[12] = address of receiver argument
+ // Get the arguments boilerplate from the current (global) context into edi.
+ Label has_mapped_parameters, copy;
+ __ mov(edi, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ mov(edi, FieldOperand(edi, GlobalObject::kGlobalContextOffset));
+ __ mov(ebx, Operand(esp, 0 * kPointerSize));
+ __ test(ebx, Operand(ebx));
+ __ j(not_zero, &has_mapped_parameters, Label::kNear);
+ __ mov(edi, Operand(edi,
+ Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX)));
+ __ jmp(©, Label::kNear);
+
+ __ bind(&has_mapped_parameters);
+ __ mov(edi, Operand(edi,
+ Context::SlotOffset(Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX)));
+ __ bind(©);
+
+ // eax = address of new object (tagged)
+ // ebx = mapped parameter count (tagged)
+ // ecx = argument count (tagged)
+ // edi = address of boilerplate object (tagged)
+ // esp[0] = mapped parameter count (tagged)
+ // esp[8] = parameter count (tagged)
+ // esp[12] = address of receiver argument
+ // Copy the JS object part.
+ for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
+ __ mov(edx, FieldOperand(edi, i));
+ __ mov(FieldOperand(eax, i), edx);
+ }
+
+ // Setup the callee in-object property.
+ STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
+ __ mov(edx, Operand(esp, 4 * kPointerSize));
+ __ mov(FieldOperand(eax, JSObject::kHeaderSize +
+ Heap::kArgumentsCalleeIndex * kPointerSize),
+ edx);
+
+ // Use the length (smi tagged) and set that as an in-object property too.
+ STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
+ __ mov(FieldOperand(eax, JSObject::kHeaderSize +
+ Heap::kArgumentsLengthIndex * kPointerSize),
+ ecx);
+
+ // Setup the elements pointer in the allocated arguments object.
+ // If we allocated a parameter map, edi will point there, otherwise to the
+ // backing store.
+ __ lea(edi, Operand(eax, Heap::kArgumentsObjectSize));
+ __ mov(FieldOperand(eax, JSObject::kElementsOffset), edi);
+
+ // eax = address of new object (tagged)
+ // ebx = mapped parameter count (tagged)
+ // ecx = argument count (tagged)
+ // edi = address of parameter map or backing store (tagged)
+ // esp[0] = mapped parameter count (tagged)
+ // esp[8] = parameter count (tagged)
+ // esp[12] = address of receiver argument
+ // Free a register.
+ __ push(eax);
+
+ // Initialize parameter map. If there are no mapped arguments, we're done.
+ Label skip_parameter_map;
+ __ test(ebx, Operand(ebx));
+ __ j(zero, &skip_parameter_map);
+
+ __ mov(FieldOperand(edi, FixedArray::kMapOffset),
+ Immediate(FACTORY->non_strict_arguments_elements_map()));
+ __ lea(eax, Operand(ebx, reinterpret_cast<intptr_t>(Smi::FromInt(2))));
+ __ mov(FieldOperand(edi, FixedArray::kLengthOffset), eax);
+ __ mov(FieldOperand(edi, FixedArray::kHeaderSize + 0 * kPointerSize), esi);
+ __ lea(eax, Operand(edi, ebx, times_2, kParameterMapHeaderSize));
+ __ mov(FieldOperand(edi, FixedArray::kHeaderSize + 1 * kPointerSize), eax);
+
+ // Copy the parameter slots and the holes in the arguments.
+ // We need to fill in mapped_parameter_count slots. They index the context,
+ // where parameters are stored in reverse order, at
+ // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
+ // The mapped parameter thus need to get indices
+ // MIN_CONTEXT_SLOTS+parameter_count-1 ..
+ // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
+ // We loop from right to left.
+ Label parameters_loop, parameters_test;
+ __ push(ecx);
+ __ mov(eax, Operand(esp, 2 * kPointerSize));
+ __ mov(ebx, Immediate(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
+ __ add(ebx, Operand(esp, 4 * kPointerSize));
+ __ sub(ebx, Operand(eax));
+ __ mov(ecx, FACTORY->the_hole_value());
+ __ mov(edx, edi);
+ __ lea(edi, Operand(edi, eax, times_2, kParameterMapHeaderSize));
+ // eax = loop variable (tagged)
+ // ebx = mapping index (tagged)
+ // ecx = the hole value
+ // edx = address of parameter map (tagged)
+ // edi = address of backing store (tagged)
+ // esp[0] = argument count (tagged)
+ // esp[4] = address of new object (tagged)
+ // esp[8] = mapped parameter count (tagged)
+ // esp[16] = parameter count (tagged)
+ // esp[20] = address of receiver argument
+ __ jmp(¶meters_test, Label::kNear);
+
+ __ bind(¶meters_loop);
+ __ sub(Operand(eax), Immediate(Smi::FromInt(1)));
+ __ mov(FieldOperand(edx, eax, times_2, kParameterMapHeaderSize), ebx);
+ __ mov(FieldOperand(edi, eax, times_2, FixedArray::kHeaderSize), ecx);
+ __ add(Operand(ebx), Immediate(Smi::FromInt(1)));
+ __ bind(¶meters_test);
+ __ test(eax, Operand(eax));
+ __ j(not_zero, ¶meters_loop, Label::kNear);
+ __ pop(ecx);
+
+ __ bind(&skip_parameter_map);
+
+ // ecx = argument count (tagged)
+ // edi = address of backing store (tagged)
+ // esp[0] = address of new object (tagged)
+ // esp[4] = mapped parameter count (tagged)
+ // esp[12] = parameter count (tagged)
+ // esp[16] = address of receiver argument
+ // Copy arguments header and remaining slots (if there are any).
+ __ mov(FieldOperand(edi, FixedArray::kMapOffset),
+ Immediate(FACTORY->fixed_array_map()));
+ __ mov(FieldOperand(edi, FixedArray::kLengthOffset), ecx);
+
+ Label arguments_loop, arguments_test;
+ __ mov(ebx, Operand(esp, 1 * kPointerSize));
+ __ mov(edx, Operand(esp, 4 * kPointerSize));
+ __ sub(Operand(edx), ebx); // Is there a smarter way to do negative scaling?
+ __ sub(Operand(edx), ebx);
+ __ jmp(&arguments_test, Label::kNear);
+
+ __ bind(&arguments_loop);
+ __ sub(Operand(edx), Immediate(kPointerSize));
+ __ mov(eax, Operand(edx, 0));
+ __ mov(FieldOperand(edi, ebx, times_2, FixedArray::kHeaderSize), eax);
+ __ add(Operand(ebx), Immediate(Smi::FromInt(1)));
+
+ __ bind(&arguments_test);
+ __ cmp(ebx, Operand(ecx));
+ __ j(less, &arguments_loop, Label::kNear);
+
+ // Restore.
+ __ pop(eax); // Address of arguments object.
+ __ pop(ebx); // Parameter count.
+
+ // Return and remove the on-stack parameters.
+ __ ret(3 * kPointerSize);
+
+ // Do the runtime call to allocate the arguments object.
+ __ bind(&runtime);
+ __ pop(eax); // Remove saved parameter count.
+ __ mov(Operand(esp, 1 * kPointerSize), ecx); // Patch argument count.
+ __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1);
+}
+
+
+void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
+ // esp[0] : return address
+ // esp[4] : number of parameters
+ // esp[8] : receiver displacement
+ // esp[12] : function
// Check if the calling frame is an arguments adaptor frame.
Label adaptor_frame, try_allocate, runtime;
@@ -2941,7 +3113,8 @@
__ bind(&adaptor_frame);
__ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ mov(Operand(esp, 1 * kPointerSize), ecx);
- __ lea(edx, Operand(edx, ecx, times_2, kDisplacement));
+ __ lea(edx, Operand(edx, ecx, times_2,
+ StandardFrameConstants::kCallerSPOffset));
__ mov(Operand(esp, 2 * kPointerSize), edx);
// Try the new space allocation. Start out with computing the size of
@@ -2952,7 +3125,7 @@
__ j(zero, &add_arguments_object, Label::kNear);
__ lea(ecx, Operand(ecx, times_2, FixedArray::kHeaderSize));
__ bind(&add_arguments_object);
- __ add(Operand(ecx), Immediate(GetArgumentsObjectSize()));
+ __ add(Operand(ecx), Immediate(Heap::kArgumentsObjectSizeStrict));
// Do the allocation of both objects in one go.
__ AllocateInNewSpace(ecx, eax, edx, ebx, &runtime, TAG_OBJECT);
@@ -2960,8 +3133,9 @@
// Get the arguments boilerplate from the current (global) context.
__ mov(edi, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
__ mov(edi, FieldOperand(edi, GlobalObject::kGlobalContextOffset));
- __ mov(edi, Operand(edi,
- Context::SlotOffset(GetArgumentsBoilerplateIndex())));
+ const int offset =
+ Context::SlotOffset(Context::STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX);
+ __ mov(edi, Operand(edi, offset));
// Copy the JS object part.
for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
@@ -2969,20 +3143,11 @@
__ mov(FieldOperand(eax, i), ebx);
}
- if (type_ == NEW_NON_STRICT) {
- // Setup the callee in-object property.
- STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
- __ mov(ebx, Operand(esp, 3 * kPointerSize));
- __ mov(FieldOperand(eax, JSObject::kHeaderSize +
- Heap::kArgumentsCalleeIndex * kPointerSize),
- ebx);
- }
-
// Get the length (smi tagged) and set that as an in-object property too.
STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
__ mov(ecx, Operand(esp, 1 * kPointerSize));
__ mov(FieldOperand(eax, JSObject::kHeaderSize +
- Heap::kArgumentsLengthIndex * kPointerSize),
+ Heap::kArgumentsLengthIndex * kPointerSize),
ecx);
// If there are no actual arguments, we're done.
@@ -2995,10 +3160,10 @@
// Setup the elements pointer in the allocated arguments object and
// initialize the header in the elements fixed array.
- __ lea(edi, Operand(eax, GetArgumentsObjectSize()));
+ __ lea(edi, Operand(eax, Heap::kArgumentsObjectSizeStrict));
__ mov(FieldOperand(eax, JSObject::kElementsOffset), edi);
__ mov(FieldOperand(edi, FixedArray::kMapOffset),
- Immediate(masm->isolate()->factory()->fixed_array_map()));
+ Immediate(FACTORY->fixed_array_map()));
__ mov(FieldOperand(edi, FixedArray::kLengthOffset), ecx);
// Untag the length for the loop below.
@@ -3020,7 +3185,7 @@
// Do the runtime call to allocate the arguments object.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
+ __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1);
}
@@ -3063,8 +3228,7 @@
// Check that the first argument is a JSRegExp object.
__ mov(eax, Operand(esp, kJSRegExpOffset));
STATIC_ASSERT(kSmiTag == 0);
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &runtime);
+ __ JumpIfSmi(eax, &runtime);
__ CmpObjectType(eax, JS_REGEXP_TYPE, ecx);
__ j(not_equal, &runtime);
// Check that the RegExp has been compiled (data contains a fixed array).
@@ -3098,8 +3262,7 @@
// edx: Number of capture registers
// Check that the second argument is a string.
__ mov(eax, Operand(esp, kSubjectOffset));
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &runtime);
+ __ JumpIfSmi(eax, &runtime);
Condition is_string = masm->IsObjectStringType(eax, ebx, ebx);
__ j(NegateCondition(is_string), &runtime);
// Get the length of the string to ebx.
@@ -3111,8 +3274,7 @@
// Check that the third argument is a positive smi less than the subject
// string length. A negative value will be greater (unsigned comparison).
__ mov(eax, Operand(esp, kPreviousIndexOffset));
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, &runtime);
+ __ JumpIfNotSmi(eax, &runtime);
__ cmp(eax, Operand(ebx));
__ j(above_equal, &runtime);
@@ -3120,8 +3282,7 @@
// edx: Number of capture registers
// Check that the fourth object is a JSArray object.
__ mov(eax, Operand(esp, kLastMatchInfoOffset));
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &runtime);
+ __ JumpIfSmi(eax, &runtime);
__ CmpObjectType(eax, JS_ARRAY_TYPE, ebx);
__ j(not_equal, &runtime);
// Check that the JSArray is in fast case.
@@ -3392,8 +3553,7 @@
Label slowcase;
Label done;
__ mov(ebx, Operand(esp, kPointerSize * 3));
- __ test(ebx, Immediate(kSmiTagMask));
- __ j(not_zero, &slowcase);
+ __ JumpIfNotSmi(ebx, &slowcase);
__ cmp(Operand(ebx), Immediate(Smi::FromInt(kMaxInlineLength)));
__ j(above, &slowcase);
// Smi-tagging is equivalent to multiplying by 2.
@@ -3505,8 +3665,7 @@
} else {
Label not_smi;
STATIC_ASSERT(kSmiTag == 0);
- __ test(object, Immediate(kSmiTagMask));
- __ j(not_zero, ¬_smi, Label::kNear);
+ __ JumpIfNotSmi(object, ¬_smi, Label::kNear);
__ mov(scratch, object);
__ SmiUntag(scratch);
__ jmp(&smi_hash_calculated, Label::kNear);
@@ -3526,8 +3685,7 @@
index,
times_twice_pointer_size,
FixedArray::kHeaderSize));
- __ test(probe, Immediate(kSmiTagMask));
- __ j(zero, not_found);
+ __ JumpIfSmi(probe, not_found);
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope fscope(SSE2);
__ movdbl(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
@@ -3599,8 +3757,7 @@
Label non_smi, smi_done;
__ mov(ecx, Operand(edx));
__ or_(ecx, Operand(eax));
- __ test(ecx, Immediate(kSmiTagMask));
- __ j(not_zero, &non_smi);
+ __ JumpIfNotSmi(ecx, &non_smi);
__ sub(edx, Operand(eax)); // Return on the result of the subtraction.
__ j(no_overflow, &smi_done);
__ not_(edx); // Correct sign in case of overflow. edx is never 0 here.
@@ -3649,7 +3806,7 @@
__ j(equal, &heap_number, Label::kNear);
if (cc_ != equal) {
// Call runtime on identical JSObjects. Otherwise return equal.
- __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
+ __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
__ j(above_equal, ¬_identical);
}
__ Set(eax, Immediate(Smi::FromInt(EQUAL)));
@@ -3737,8 +3894,8 @@
// Get the type of the first operand.
// If the first object is a JS object, we have done pointer comparison.
Label first_non_object;
- STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
- __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
+ STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
+ __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
__ j(below, &first_non_object, Label::kNear);
// Return non-zero (eax is not zero)
@@ -3752,7 +3909,7 @@
__ CmpInstanceType(ecx, ODDBALL_TYPE);
__ j(equal, &return_not_equal);
- __ CmpObjectType(edx, FIRST_JS_OBJECT_TYPE, ecx);
+ __ CmpObjectType(edx, FIRST_SPEC_OBJECT_TYPE, ecx);
__ j(above_equal, &return_not_equal);
// Check for oddballs: true, false, null, undefined.
@@ -3876,9 +4033,9 @@
__ lea(ecx, Operand(eax, edx, times_1, 0));
__ test(ecx, Immediate(kSmiTagMask));
__ j(not_zero, ¬_both_objects, Label::kNear);
- __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
+ __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
__ j(below, ¬_both_objects, Label::kNear);
- __ CmpObjectType(edx, FIRST_JS_OBJECT_TYPE, ebx);
+ __ CmpObjectType(edx, FIRST_SPEC_OBJECT_TYPE, ebx);
__ j(below, ¬_both_objects, Label::kNear);
// We do not bail out after this point. Both are JSObjects, and
// they are equal if and only if both are undetectable.
@@ -3926,8 +4083,7 @@
Label* label,
Register object,
Register scratch) {
- __ test(object, Immediate(kSmiTagMask));
- __ j(zero, label);
+ __ JumpIfSmi(object, label);
__ mov(scratch, FieldOperand(object, HeapObject::kMapOffset));
__ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
__ and_(scratch, kIsSymbolMask | kIsNotStringMask);
@@ -3967,8 +4123,7 @@
__ mov(edi, Operand(esp, (argc_ + 2) * kPointerSize));
// Check that the function really is a JavaScript function.
- __ test(edi, Immediate(kSmiTagMask));
- __ j(zero, &slow);
+ __ JumpIfSmi(edi, &slow);
// Goto slow case if we do not have a function.
__ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
__ j(not_equal, &slow);
@@ -4003,6 +4158,7 @@
__ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION);
Handle<Code> adaptor =
masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
+ __ SetCallKind(ecx, CALL_AS_METHOD);
__ jmp(adaptor, RelocInfo::CODE_TARGET);
}
@@ -4209,9 +4365,7 @@
void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
Label invoke, exit;
-#ifdef ENABLE_LOGGING_AND_PROFILING
Label not_outermost_js, not_outermost_js_2;
-#endif
// Setup frame.
__ push(ebp);
@@ -4230,7 +4384,6 @@
ExternalReference c_entry_fp(Isolate::k_c_entry_fp_address, masm->isolate());
__ push(Operand::StaticVariable(c_entry_fp));
-#ifdef ENABLE_LOGGING_AND_PROFILING
// If this is the outermost JS call, set js_entry_sp value.
ExternalReference js_entry_sp(Isolate::k_js_entry_sp_address,
masm->isolate());
@@ -4243,7 +4396,6 @@
__ bind(¬_outermost_js);
__ push(Immediate(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME)));
__ bind(&cont);
-#endif
// Call a faked try-block that does the invoke.
__ call(&invoke);
@@ -4291,7 +4443,6 @@
__ PopTryHandler();
__ bind(&exit);
-#ifdef ENABLE_LOGGING_AND_PROFILING
// Check if the current stack frame is marked as the outermost JS frame.
__ pop(ebx);
__ cmp(Operand(ebx),
@@ -4299,7 +4450,6 @@
__ j(not_equal, ¬_outermost_js_2);
__ mov(Operand::StaticVariable(js_entry_sp), Immediate(0));
__ bind(¬_outermost_js_2);
-#endif
// Restore the top frame descriptor from the stack.
__ pop(Operand::StaticVariable(ExternalReference(
@@ -4367,8 +4517,7 @@
}
// Check that the left hand is a JS object.
- __ test(object, Immediate(kSmiTagMask));
- __ j(zero, ¬_js_object);
+ __ JumpIfSmi(object, ¬_js_object);
__ IsObjectJSObjectType(object, map, scratch, ¬_js_object);
// If there is a call site cache don't look in the global cache, but do the
@@ -4395,8 +4544,7 @@
__ TryGetFunctionPrototype(function, prototype, scratch, &slow);
// Check that the function prototype is a JS object.
- __ test(prototype, Immediate(kSmiTagMask));
- __ j(zero, &slow);
+ __ JumpIfSmi(prototype, &slow);
__ IsObjectJSObjectType(prototype, scratch, scratch, &slow);
// Update the global instanceof or call site inlined cache with the current
@@ -4485,8 +4633,7 @@
__ bind(¬_js_object);
// Before null, smi and string value checks, check that the rhs is a function
// as for a non-function rhs an exception needs to be thrown.
- __ test(function, Immediate(kSmiTagMask));
- __ j(zero, &slow);
+ __ JumpIfSmi(function, &slow);
__ CmpObjectType(function, JS_FUNCTION_TYPE, scratch);
__ j(not_equal, &slow);
@@ -4498,8 +4645,7 @@
__ bind(&object_not_null);
// Smi values is not instance of anything.
- __ test(object, Immediate(kSmiTagMask));
- __ j(not_zero, &object_not_null_or_smi);
+ __ JumpIfNotSmi(object, &object_not_null_or_smi);
__ Set(eax, Immediate(Smi::FromInt(1)));
__ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
@@ -4565,15 +4711,8 @@
// Unfortunately you have to run without snapshots to see most of these
// names in the profile since most compare stubs end up in the snapshot.
-const char* CompareStub::GetName() {
+void CompareStub::PrintName(StringStream* stream) {
ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
-
- if (name_ != NULL) return name_;
- const int kMaxNameLength = 100;
- name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(
- kMaxNameLength);
- if (name_ == NULL) return "OOM";
-
const char* cc_name;
switch (cc_) {
case less: cc_name = "LT"; break;
@@ -4584,35 +4723,12 @@
case not_equal: cc_name = "NE"; break;
default: cc_name = "UnknownCondition"; break;
}
-
- const char* strict_name = "";
- if (strict_ && (cc_ == equal || cc_ == not_equal)) {
- strict_name = "_STRICT";
- }
-
- const char* never_nan_nan_name = "";
- if (never_nan_nan_ && (cc_ == equal || cc_ == not_equal)) {
- never_nan_nan_name = "_NO_NAN";
- }
-
- const char* include_number_compare_name = "";
- if (!include_number_compare_) {
- include_number_compare_name = "_NO_NUMBER";
- }
-
- const char* include_smi_compare_name = "";
- if (!include_smi_compare_) {
- include_smi_compare_name = "_NO_SMI";
- }
-
- OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
- "CompareStub_%s%s%s%s%s",
- cc_name,
- strict_name,
- never_nan_nan_name,
- include_number_compare_name,
- include_smi_compare_name);
- return name_;
+ bool is_equality = cc_ == equal || cc_ == not_equal;
+ stream->Add("CompareStub_%s", cc_name);
+ if (strict_ && is_equality) stream->Add("_STRICT");
+ if (never_nan_nan_ && is_equality) stream->Add("_NO_NAN");
+ if (!include_number_compare_) stream->Add("_NO_NUMBER");
+ if (!include_smi_compare_) stream->Add("_NO_SMI");
}
@@ -4626,8 +4742,7 @@
// If the receiver is a smi trigger the non-string case.
STATIC_ASSERT(kSmiTag == 0);
- __ test(object_, Immediate(kSmiTagMask));
- __ j(zero, receiver_not_string_);
+ __ JumpIfSmi(object_, receiver_not_string_);
// Fetch the instance type of the receiver into result register.
__ mov(result_, FieldOperand(object_, HeapObject::kMapOffset));
@@ -4638,8 +4753,7 @@
// If the index is non-smi trigger the non-smi case.
STATIC_ASSERT(kSmiTag == 0);
- __ test(index_, Immediate(kSmiTagMask));
- __ j(not_zero, &index_not_smi_);
+ __ JumpIfNotSmi(index_, &index_not_smi_);
// Put smi-tagged index into scratch register.
__ mov(scratch_, index_);
@@ -4737,8 +4851,7 @@
call_helper.AfterCall(masm);
// If index is still not a smi, it must be out of range.
STATIC_ASSERT(kSmiTag == 0);
- __ test(scratch_, Immediate(kSmiTagMask));
- __ j(not_zero, index_out_of_range_);
+ __ JumpIfNotSmi(scratch_, index_out_of_range_);
// Otherwise, return to the fast path.
__ jmp(&got_smi_index_);
@@ -4832,14 +4945,12 @@
// Make sure that both arguments are strings if not known in advance.
if (flags_ == NO_STRING_ADD_FLAGS) {
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &string_add_runtime);
+ __ JumpIfSmi(eax, &string_add_runtime);
__ CmpObjectType(eax, FIRST_NONSTRING_TYPE, ebx);
__ j(above_equal, &string_add_runtime);
// First argument is a a string, test second.
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &string_add_runtime);
+ __ JumpIfSmi(edx, &string_add_runtime);
__ CmpObjectType(edx, FIRST_NONSTRING_TYPE, ebx);
__ j(above_equal, &string_add_runtime);
} else {
@@ -5107,8 +5218,7 @@
Label* slow) {
// First check if the argument is already a string.
Label not_string, done;
- __ test(arg, Immediate(kSmiTagMask));
- __ j(zero, ¬_string);
+ __ JumpIfSmi(arg, ¬_string);
__ CmpObjectType(arg, FIRST_NONSTRING_TYPE, scratch1);
__ j(below, &done);
@@ -5129,8 +5239,7 @@
// Check if the argument is a safe string wrapper.
__ bind(¬_cached);
- __ test(arg, Immediate(kSmiTagMask));
- __ j(zero, slow);
+ __ JumpIfSmi(arg, slow);
__ CmpObjectType(arg, JS_VALUE_TYPE, scratch1); // map -> scratch1.
__ j(not_equal, slow);
__ test_b(FieldOperand(scratch1, Map::kBitField2Offset),
@@ -5424,8 +5533,7 @@
// Make sure first argument is a string.
__ mov(eax, Operand(esp, 3 * kPointerSize));
STATIC_ASSERT(kSmiTag == 0);
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &runtime);
+ __ JumpIfSmi(eax, &runtime);
Condition is_string = masm->IsObjectStringType(eax, ebx, ebx);
__ j(NegateCondition(is_string), &runtime);
@@ -5435,11 +5543,9 @@
// Calculate length of sub string using the smi values.
Label result_longer_than_two;
__ mov(ecx, Operand(esp, 1 * kPointerSize)); // To index.
- __ test(ecx, Immediate(kSmiTagMask));
- __ j(not_zero, &runtime);
+ __ JumpIfNotSmi(ecx, &runtime);
__ mov(edx, Operand(esp, 2 * kPointerSize)); // From index.
- __ test(edx, Immediate(kSmiTagMask));
- __ j(not_zero, &runtime);
+ __ JumpIfNotSmi(edx, &runtime);
__ sub(ecx, Operand(edx));
__ cmp(ecx, FieldOperand(eax, String::kLengthOffset));
Label return_eax;
@@ -5731,8 +5837,7 @@
Label miss;
__ mov(ecx, Operand(edx));
__ or_(ecx, Operand(eax));
- __ test(ecx, Immediate(kSmiTagMask));
- __ j(not_zero, &miss, Label::kNear);
+ __ JumpIfNotSmi(ecx, &miss, Label::kNear);
if (GetCondition() == equal) {
// For equality we do not care about the sign of the result.
@@ -5761,8 +5866,7 @@
Label miss;
__ mov(ecx, Operand(edx));
__ and_(ecx, Operand(eax));
- __ test(ecx, Immediate(kSmiTagMask));
- __ j(zero, &generic_stub, Label::kNear);
+ __ JumpIfSmi(ecx, &generic_stub, Label::kNear);
__ CmpObjectType(eax, HEAP_NUMBER_TYPE, ecx);
__ j(not_equal, &miss, Label::kNear);
@@ -5821,8 +5925,7 @@
__ mov(tmp1, Operand(left));
STATIC_ASSERT(kSmiTag == 0);
__ and_(tmp1, Operand(right));
- __ test(tmp1, Immediate(kSmiTagMask));
- __ j(zero, &miss, Label::kNear);
+ __ JumpIfSmi(tmp1, &miss, Label::kNear);
// Check that both operands are symbols.
__ mov(tmp1, FieldOperand(left, HeapObject::kMapOffset));
@@ -5868,8 +5971,7 @@
__ mov(tmp1, Operand(left));
STATIC_ASSERT(kSmiTag == 0);
__ and_(tmp1, Operand(right));
- __ test(tmp1, Immediate(kSmiTagMask));
- __ j(zero, &miss);
+ __ JumpIfSmi(tmp1, &miss);
// Check that both operands are strings. This leaves the instance
// types loaded in tmp1 and tmp2.
@@ -5934,8 +6036,7 @@
Label miss;
__ mov(ecx, Operand(edx));
__ and_(ecx, Operand(eax));
- __ test(ecx, Immediate(kSmiTagMask));
- __ j(zero, &miss, Label::kNear);
+ __ JumpIfSmi(ecx, &miss, Label::kNear);
__ CmpObjectType(eax, JS_OBJECT_TYPE, ecx);
__ j(not_equal, &miss, Label::kNear);
diff --git a/src/ia32/code-stubs-ia32.h b/src/ia32/code-stubs-ia32.h
index ead7761..fa255da 100644
--- a/src/ia32/code-stubs-ia32.h
+++ b/src/ia32/code-stubs-ia32.h
@@ -60,32 +60,14 @@
};
-class ToBooleanStub: public CodeStub {
- public:
- ToBooleanStub() { }
-
- void Generate(MacroAssembler* masm);
-
- private:
- Major MajorKey() { return ToBoolean; }
- int MinorKey() { return 0; }
-};
-
-
class UnaryOpStub: public CodeStub {
public:
- UnaryOpStub(Token::Value op, UnaryOverwriteMode mode)
+ UnaryOpStub(Token::Value op,
+ UnaryOverwriteMode mode,
+ UnaryOpIC::TypeInfo operand_type = UnaryOpIC::UNINITIALIZED)
: op_(op),
mode_(mode),
- operand_type_(UnaryOpIC::UNINITIALIZED),
- name_(NULL) {
- }
-
- UnaryOpStub(int key, UnaryOpIC::TypeInfo operand_type)
- : op_(OpBits::decode(key)),
- mode_(ModeBits::decode(key)),
- operand_type_(operand_type),
- name_(NULL) {
+ operand_type_(operand_type) {
}
private:
@@ -95,20 +77,7 @@
// Operand type information determined at runtime.
UnaryOpIC::TypeInfo operand_type_;
- char* name_;
-
- const char* GetName();
-
-#ifdef DEBUG
- void Print() {
- PrintF("TypeRecordingUnaryOpStub %d (op %s), "
- "(mode %d, runtime_type_info %s)\n",
- MinorKey(),
- Token::String(op_),
- static_cast<int>(mode_),
- UnaryOpIC::GetName(operand_type_));
- }
-#endif
+ virtual void PrintName(StringStream* stream);
class ModeBits: public BitField<UnaryOverwriteMode, 0, 1> {};
class OpBits: public BitField<Token::Value, 1, 7> {};
@@ -171,8 +140,7 @@
: op_(op),
mode_(mode),
operands_type_(BinaryOpIC::UNINITIALIZED),
- result_type_(BinaryOpIC::UNINITIALIZED),
- name_(NULL) {
+ result_type_(BinaryOpIC::UNINITIALIZED) {
use_sse3_ = CpuFeatures::IsSupported(SSE3);
ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
}
@@ -185,8 +153,7 @@
mode_(ModeBits::decode(key)),
use_sse3_(SSE3Bits::decode(key)),
operands_type_(operands_type),
- result_type_(result_type),
- name_(NULL) { }
+ result_type_(result_type) { }
private:
enum SmiCodeGenerateHeapNumberResults {
@@ -202,20 +169,7 @@
BinaryOpIC::TypeInfo operands_type_;
BinaryOpIC::TypeInfo result_type_;
- char* name_;
-
- const char* GetName();
-
-#ifdef DEBUG
- void Print() {
- PrintF("BinaryOpStub %d (op %s), "
- "(mode %d, runtime_type_info %s)\n",
- MinorKey(),
- Token::String(op_),
- static_cast<int>(mode_),
- BinaryOpIC::GetName(operands_type_));
- }
-#endif
+ virtual void PrintName(StringStream* stream);
// Minor key encoding in 16 bits RRRTTTSOOOOOOOMM.
class ModeBits: public BitField<OverwriteMode, 0, 2> {};
@@ -433,14 +387,6 @@
int MinorKey() { return 0; }
void Generate(MacroAssembler* masm);
-
- const char* GetName() { return "NumberToStringStub"; }
-
-#ifdef DEBUG
- void Print() {
- PrintF("NumberToStringStub\n");
- }
-#endif
};
@@ -484,13 +430,6 @@
StringDictionary::kHeaderSize +
StringDictionary::kElementsStartIndex * kPointerSize;
-
-#ifdef DEBUG
- void Print() {
- PrintF("StringDictionaryLookupStub\n");
- }
-#endif
-
Major MajorKey() { return StringDictionaryNegativeLookup; }
int MinorKey() {
diff --git a/src/ia32/codegen-ia32.cc b/src/ia32/codegen-ia32.cc
index 572c36c..3a657bd 100644
--- a/src/ia32/codegen-ia32.cc
+++ b/src/ia32/codegen-ia32.cc
@@ -255,6 +255,7 @@
ASSERT(desc.reloc_size == 0);
CPU::FlushICache(buffer, actual_size);
+ OS::ProtectCode(buffer, actual_size);
return FUNCTION_CAST<OS::MemCopyFunction>(buffer);
}
diff --git a/src/ia32/codegen-ia32.h b/src/ia32/codegen-ia32.h
index 8f090b1..c85fa83 100644
--- a/src/ia32/codegen-ia32.h
+++ b/src/ia32/codegen-ia32.h
@@ -53,9 +53,7 @@
// Print the code after compiling it.
static void PrintCode(Handle<Code> code, CompilationInfo* info);
-#ifdef ENABLE_LOGGING_AND_PROFILING
static bool ShouldGenerateLog(Expression* type);
-#endif
static bool RecordPositions(MacroAssembler* masm,
int pos,
diff --git a/src/ia32/cpu-ia32.cc b/src/ia32/cpu-ia32.cc
index 615dbfe..57e66df 100644
--- a/src/ia32/cpu-ia32.cc
+++ b/src/ia32/cpu-ia32.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -67,7 +67,8 @@
// solution is to run valgrind with --smc-check=all, but this comes at a big
// performance cost. We can notify valgrind to invalidate its cache.
#ifdef VALGRIND_DISCARD_TRANSLATIONS
- VALGRIND_DISCARD_TRANSLATIONS(start, size);
+ unsigned res = VALGRIND_DISCARD_TRANSLATIONS(start, size);
+ USE(res);
#endif
}
diff --git a/src/ia32/deoptimizer-ia32.cc b/src/ia32/deoptimizer-ia32.cc
index 72fdac8..4ff1bfc 100644
--- a/src/ia32/deoptimizer-ia32.cc
+++ b/src/ia32/deoptimizer-ia32.cc
@@ -348,6 +348,9 @@
output_ = new FrameDescription*[1];
output_[0] = new(output_frame_size) FrameDescription(
output_frame_size, function_);
+#ifdef DEBUG
+ output_[0]->SetKind(Code::OPTIMIZED_FUNCTION);
+#endif
// Clear the incoming parameters in the optimized frame to avoid
// confusing the garbage collector.
@@ -461,6 +464,9 @@
// Allocate and store the output frame description.
FrameDescription* output_frame =
new(output_frame_size) FrameDescription(output_frame_size, function);
+#ifdef DEBUG
+ output_frame->SetKind(Code::FUNCTION);
+#endif
bool is_bottommost = (0 == frame_index);
bool is_topmost = (output_count_ - 1 == frame_index);
@@ -587,7 +593,7 @@
output_frame->SetState(Smi::FromInt(state));
// Set the continuation for the topmost frame.
- if (is_topmost) {
+ if (is_topmost && bailout_type_ != DEBUGGER) {
Builtins* builtins = isolate_->builtins();
Code* continuation = (bailout_type_ == EAGER)
? builtins->builtin(Builtins::kNotifyDeoptimized)
@@ -600,6 +606,27 @@
}
+void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
+ // Set the register values. The values are not important as there are no
+ // callee saved registers in JavaScript frames, so all registers are
+ // spilled. Registers ebp and esp are set to the correct values though.
+
+ for (int i = 0; i < Register::kNumRegisters; i++) {
+ input_->SetRegister(i, i * 4);
+ }
+ input_->SetRegister(esp.code(), reinterpret_cast<intptr_t>(frame->sp()));
+ input_->SetRegister(ebp.code(), reinterpret_cast<intptr_t>(frame->fp()));
+ for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; i++) {
+ input_->SetDoubleRegister(i, 0.0);
+ }
+
+ // Fill the frame content from the actual data on the frame.
+ for (unsigned i = 0; i < input_->GetFrameSize(); i += kPointerSize) {
+ input_->SetFrameSlot(i, Memory::uint32_at(tos + i));
+ }
+}
+
+
#define __ masm()->
void Deoptimizer::EntryGenerator::Generate() {
diff --git a/src/ia32/full-codegen-ia32.cc b/src/ia32/full-codegen-ia32.cc
index 5f0a0b6..7633856 100644
--- a/src/ia32/full-codegen-ia32.cc
+++ b/src/ia32/full-codegen-ia32.cc
@@ -78,16 +78,18 @@
}
void EmitPatchInfo() {
- int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(&patch_site_);
- ASSERT(is_int8(delta_to_patch_site));
- __ test(eax, Immediate(delta_to_patch_site));
+ if (patch_site_.is_bound()) {
+ int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(&patch_site_);
+ ASSERT(is_int8(delta_to_patch_site));
+ __ test(eax, Immediate(delta_to_patch_site));
#ifdef DEBUG
- info_emitted_ = true;
+ info_emitted_ = true;
#endif
+ } else {
+ __ nop(); // Signals no inlined code.
+ }
}
- bool is_bound() const { return patch_site_.is_bound(); }
-
private:
// jc will be patched with jz, jnc will become jnz.
void EmitJump(Condition cc, Label* target, Label::Distance distance) {
@@ -121,6 +123,7 @@
void FullCodeGenerator::Generate(CompilationInfo* info) {
ASSERT(info_ == NULL);
info_ = info;
+ scope_ = info->scope();
SetFunctionPosition(function());
Comment cmnt(masm_, "[ function compiled by full code generator");
@@ -131,16 +134,16 @@
}
#endif
- // Strict mode functions need to replace the receiver with undefined
- // when called as functions (without an explicit receiver
- // object). ecx is zero for method calls and non-zero for function
- // calls.
- if (info->is_strict_mode()) {
+ // Strict mode functions and builtins need to replace the receiver
+ // with undefined when called as functions (without an explicit
+ // receiver object). ecx is zero for method calls and non-zero for
+ // function calls.
+ if (info->is_strict_mode() || info->is_native()) {
Label ok;
__ test(ecx, Operand(ecx));
__ j(zero, &ok, Label::kNear);
// +1 for return address.
- int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize;
+ int receiver_offset = (info->scope()->num_parameters() + 1) * kPointerSize;
__ mov(Operand(esp, receiver_offset),
Immediate(isolate()->factory()->undefined_value()));
__ bind(&ok);
@@ -152,7 +155,7 @@
__ push(edi); // Callee's JS Function.
{ Comment cmnt(masm_, "[ Allocate locals");
- int locals_count = scope()->num_stack_slots();
+ int locals_count = info->scope()->num_stack_slots();
if (locals_count == 1) {
__ push(Immediate(isolate()->factory()->undefined_value()));
} else if (locals_count > 1) {
@@ -166,7 +169,7 @@
bool function_in_register = true;
// Possibly allocate a local context.
- int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+ int heap_slots = info->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
if (heap_slots > 0) {
Comment cmnt(masm_, "[ Allocate local context");
// Argument to NewContext is the function, which is still in edi.
@@ -175,7 +178,7 @@
FastNewContextStub stub(heap_slots);
__ CallStub(&stub);
} else {
- __ CallRuntime(Runtime::kNewContext, 1);
+ __ CallRuntime(Runtime::kNewFunctionContext, 1);
}
function_in_register = false;
// Context is returned in both eax and esi. It replaces the context
@@ -183,7 +186,7 @@
__ mov(Operand(ebp, StandardFrameConstants::kContextOffset), esi);
// Copy parameters into context if necessary.
- int num_parameters = scope()->num_parameters();
+ int num_parameters = info->scope()->num_parameters();
for (int i = 0; i < num_parameters; i++) {
Slot* slot = scope()->parameter(i)->AsSlot();
if (slot != NULL && slot->type() == Slot::CONTEXT) {
@@ -213,25 +216,27 @@
__ push(Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
}
// Receiver is just before the parameters on the caller's stack.
- int offset = scope()->num_parameters() * kPointerSize;
+ int num_parameters = info->scope()->num_parameters();
+ int offset = num_parameters * kPointerSize;
__ lea(edx,
Operand(ebp, StandardFrameConstants::kCallerSPOffset + offset));
__ push(edx);
- __ SafePush(Immediate(Smi::FromInt(scope()->num_parameters())));
- // Arguments to ArgumentsAccessStub:
+ __ SafePush(Immediate(Smi::FromInt(num_parameters)));
+ // Arguments to ArgumentsAccessStub and/or New...:
// function, receiver address, parameter count.
// The stub will rewrite receiver and parameter count if the previous
// stack frame was an arguments adapter frame.
- ArgumentsAccessStub stub(
- is_strict_mode() ? ArgumentsAccessStub::NEW_STRICT
- : ArgumentsAccessStub::NEW_NON_STRICT);
+ ArgumentsAccessStub::Type type;
+ if (is_strict_mode()) {
+ type = ArgumentsAccessStub::NEW_STRICT;
+ } else if (function()->has_duplicate_parameters()) {
+ type = ArgumentsAccessStub::NEW_NON_STRICT_SLOW;
+ } else {
+ type = ArgumentsAccessStub::NEW_NON_STRICT_FAST;
+ }
+ ArgumentsAccessStub stub(type);
__ CallStub(&stub);
- Variable* arguments_shadow = scope()->arguments_shadow();
- if (arguments_shadow != NULL) {
- __ mov(ecx, eax); // Duplicate result.
- Move(arguments_shadow->AsSlot(), ecx, ebx, edx);
- }
Move(arguments->AsSlot(), eax, ebx, edx);
}
@@ -341,7 +346,7 @@
__ mov(esp, ebp);
__ pop(ebp);
- int arguments_bytes = (scope()->num_parameters() + 1) * kPointerSize;
+ int arguments_bytes = (info_->scope()->num_parameters() + 1) * kPointerSize;
__ Ret(arguments_bytes, ecx);
#ifdef ENABLE_DEBUGGER_SUPPORT
// Check that the size of the code used for returning is large enough
@@ -374,7 +379,7 @@
// For simplicity we always test the accumulator register.
codegen()->Move(result_register(), slot);
codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
- codegen()->DoTest(true_label_, false_label_, fall_through_);
+ codegen()->DoTest(this);
}
@@ -448,7 +453,7 @@
} else {
// For simplicity we always test the accumulator register.
__ mov(result_register(), lit);
- codegen()->DoTest(true_label_, false_label_, fall_through_);
+ codegen()->DoTest(this);
}
}
@@ -484,7 +489,7 @@
__ Drop(count);
__ Move(result_register(), reg);
codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
- codegen()->DoTest(true_label_, false_label_, fall_through_);
+ codegen()->DoTest(this);
}
@@ -561,13 +566,14 @@
}
-void FullCodeGenerator::DoTest(Label* if_true,
+void FullCodeGenerator::DoTest(Expression* condition,
+ Label* if_true,
Label* if_false,
Label* fall_through) {
- ToBooleanStub stub;
+ ToBooleanStub stub(result_register());
__ push(result_register());
__ CallStub(&stub);
- __ test(eax, Operand(eax));
+ __ test(result_register(), Operand(result_register()));
// The stub returns nonzero for true.
Split(not_zero, if_true, if_false, fall_through);
}
@@ -685,10 +691,12 @@
// context.
ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
if (FLAG_debug_code) {
- // Check that we're not inside a 'with'.
- __ mov(ebx, ContextOperand(esi, Context::FCONTEXT_INDEX));
- __ cmp(ebx, Operand(esi));
- __ Check(equal, "Unexpected declaration in current context.");
+ // Check that we're not inside a with or catch context.
+ __ mov(ebx, FieldOperand(esi, HeapObject::kMapOffset));
+ __ cmp(ebx, isolate()->factory()->with_context_map());
+ __ Check(not_equal, "Declaration in with context.");
+ __ cmp(ebx, isolate()->factory()->catch_context_map());
+ __ Check(not_equal, "Declaration in catch context.");
}
if (mode == Variable::CONST) {
__ mov(ContextOperand(esi, slot->index()),
@@ -736,7 +744,7 @@
// IDs for bailouts from optimized code.
ASSERT(prop->obj()->AsVariableProxy() != NULL);
{ AccumulatorValueContext for_object(this);
- EmitVariableLoad(prop->obj()->AsVariableProxy()->var());
+ EmitVariableLoad(prop->obj()->AsVariableProxy());
}
__ push(eax);
@@ -750,7 +758,7 @@
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
: isolate()->builtins()->KeyedStoreIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, AstNode::kNoNumber);
+ __ call(ic);
}
}
}
@@ -823,7 +831,8 @@
// Record position before stub call for type feedback.
SetSourcePosition(clause->position());
Handle<Code> ic = CompareIC::GetUninitialized(Token::EQ_STRICT);
- EmitCallIC(ic, &patch_site, clause->CompareId());
+ __ call(ic, RelocInfo::CODE_TARGET, clause->CompareId());
+ patch_site.EmitPatchInfo();
__ test(eax, Operand(eax));
__ j(not_equal, &next_test);
__ Drop(1); // Switch value is no longer needed.
@@ -873,9 +882,8 @@
// Convert the object to a JS object.
Label convert, done_convert;
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &convert, Label::kNear);
- __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
+ __ JumpIfSmi(eax, &convert, Label::kNear);
+ __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
__ j(above_equal, &done_convert, Label::kNear);
__ bind(&convert);
__ push(eax);
@@ -908,8 +916,7 @@
// descriptors (edx). This is the case if the next enumeration
// index field does not contain a smi.
__ mov(edx, FieldOperand(edx, DescriptorArray::kEnumerationIndexOffset));
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &call_runtime);
+ __ JumpIfSmi(edx, &call_runtime);
// For all objects but the receiver, check that the cache is empty.
Label check_prototype;
@@ -1057,7 +1064,7 @@
void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
Comment cmnt(masm_, "[ VariableProxy");
- EmitVariableLoad(expr->var());
+ EmitVariableLoad(expr);
}
@@ -1078,8 +1085,7 @@
__ j(not_equal, slow);
}
// Load next context in chain.
- __ mov(temp, ContextOperand(context, Context::CLOSURE_INDEX));
- __ mov(temp, FieldOperand(temp, JSFunction::kContextOffset));
+ __ mov(temp, ContextOperand(context, Context::PREVIOUS_INDEX));
// Walk the rest of the chain without clobbering esi.
context = temp;
}
@@ -1106,8 +1112,7 @@
__ cmp(ContextOperand(temp, Context::EXTENSION_INDEX), Immediate(0));
__ j(not_equal, slow);
// Load next context in chain.
- __ mov(temp, ContextOperand(temp, Context::CLOSURE_INDEX));
- __ mov(temp, FieldOperand(temp, JSFunction::kContextOffset));
+ __ mov(temp, ContextOperand(temp, Context::PREVIOUS_INDEX));
__ jmp(&next);
__ bind(&fast);
}
@@ -1120,7 +1125,7 @@
RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF)
? RelocInfo::CODE_TARGET
: RelocInfo::CODE_TARGET_CONTEXT;
- EmitCallIC(ic, mode, AstNode::kNoNumber);
+ __ call(ic, mode);
}
@@ -1139,8 +1144,7 @@
Immediate(0));
__ j(not_equal, slow);
}
- __ mov(temp, ContextOperand(context, Context::CLOSURE_INDEX));
- __ mov(temp, FieldOperand(temp, JSFunction::kContextOffset));
+ __ mov(temp, ContextOperand(context, Context::PREVIOUS_INDEX));
// Walk the rest of the chain without clobbering esi.
context = temp;
}
@@ -1201,7 +1205,7 @@
__ SafeSet(eax, Immediate(key_literal->handle()));
Handle<Code> ic =
isolate()->builtins()->KeyedLoadIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, GetPropertyId(property));
+ __ call(ic, RelocInfo::CODE_TARGET, GetPropertyId(property));
__ jmp(done);
}
}
@@ -1210,24 +1214,27 @@
}
-void FullCodeGenerator::EmitVariableLoad(Variable* var) {
- // Four cases: non-this global variables, lookup slots, all other
- // types of slots, and parameters that rewrite to explicit property
- // accesses on the arguments object.
- Slot* slot = var->AsSlot();
- Property* property = var->AsProperty();
+void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
+ // Record position before possible IC call.
+ SetSourcePosition(proxy->position());
+ Variable* var = proxy->var();
- if (var->is_global() && !var->is_this()) {
+ // Three cases: non-this global variables, lookup slots, and all other
+ // types of slots.
+ Slot* slot = var->AsSlot();
+ ASSERT((var->is_global() && !var->is_this()) == (slot == NULL));
+
+ if (slot == NULL) {
Comment cmnt(masm_, "Global variable");
// Use inline caching. Variable name is passed in ecx and the global
// object on the stack.
__ mov(eax, GlobalObjectOperand());
__ mov(ecx, var->name());
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT, AstNode::kNoNumber);
+ __ call(ic, RelocInfo::CODE_TARGET_CONTEXT);
context()->Plug(eax);
- } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
+ } else if (slot->type() == Slot::LOOKUP) {
Label done, slow;
// Generate code for loading from variables potentially shadowed
@@ -1243,7 +1250,7 @@
context()->Plug(eax);
- } else if (slot != NULL) {
+ } else {
Comment cmnt(masm_, (slot->type() == Slot::CONTEXT)
? "Context slot"
: "Stack slot");
@@ -1261,36 +1268,6 @@
} else {
context()->Plug(slot);
}
-
- } else {
- Comment cmnt(masm_, "Rewritten parameter");
- ASSERT_NOT_NULL(property);
- // Rewritten parameter accesses are of the form "slot[literal]".
-
- // Assert that the object is in a slot.
- Variable* object_var = property->obj()->AsVariableProxy()->AsVariable();
- ASSERT_NOT_NULL(object_var);
- Slot* object_slot = object_var->AsSlot();
- ASSERT_NOT_NULL(object_slot);
-
- // Load the object.
- MemOperand object_loc = EmitSlotSearch(object_slot, eax);
- __ mov(edx, object_loc);
-
- // Assert that the key is a smi.
- Literal* key_literal = property->key()->AsLiteral();
- ASSERT_NOT_NULL(key_literal);
- ASSERT(key_literal->handle()->IsSmi());
-
- // Load the key.
- __ SafeSet(eax, Immediate(key_literal->handle()));
-
- // Do a keyed property load.
- Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, GetPropertyId(property));
-
- // Drop key and object left on the stack by IC.
- context()->Plug(eax);
}
}
@@ -1400,7 +1377,7 @@
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->StoreIC_Initialize_Strict()
: isolate()->builtins()->StoreIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, key->id());
+ __ call(ic, RelocInfo::CODE_TARGET, key->id());
PrepareForBailoutForId(key->id(), NO_REGISTERS);
} else {
VisitForEffect(value);
@@ -1523,7 +1500,7 @@
}
// Left-hand side can only be a property, a global or a (parameter or local)
- // slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
+ // slot.
enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
LhsKind assign_type = VARIABLE;
Property* property = expr->target()->AsProperty();
@@ -1549,29 +1526,13 @@
break;
case KEYED_PROPERTY: {
if (expr->is_compound()) {
- if (property->is_arguments_access()) {
- VariableProxy* obj_proxy = property->obj()->AsVariableProxy();
- MemOperand slot_operand =
- EmitSlotSearch(obj_proxy->var()->AsSlot(), ecx);
- __ push(slot_operand);
- __ SafeSet(eax, Immediate(property->key()->AsLiteral()->handle()));
- } else {
- VisitForStackValue(property->obj());
- VisitForAccumulatorValue(property->key());
- }
+ VisitForStackValue(property->obj());
+ VisitForAccumulatorValue(property->key());
__ mov(edx, Operand(esp, 0));
__ push(eax);
} else {
- if (property->is_arguments_access()) {
- VariableProxy* obj_proxy = property->obj()->AsVariableProxy();
- MemOperand slot_operand =
- EmitSlotSearch(obj_proxy->var()->AsSlot(), ecx);
- __ push(slot_operand);
- __ SafePush(Immediate(property->key()->AsLiteral()->handle()));
- } else {
- VisitForStackValue(property->obj());
- VisitForStackValue(property->key());
- }
+ VisitForStackValue(property->obj());
+ VisitForStackValue(property->key());
}
break;
}
@@ -1583,7 +1544,7 @@
{ AccumulatorValueContext context(this);
switch (assign_type) {
case VARIABLE:
- EmitVariableLoad(expr->target()->AsVariableProxy()->var());
+ EmitVariableLoad(expr->target()->AsVariableProxy());
PrepareForBailout(expr->target(), TOS_REG);
break;
case NAMED_PROPERTY:
@@ -1649,14 +1610,14 @@
ASSERT(!key->handle()->IsSmi());
__ mov(ecx, Immediate(key->handle()));
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
+ __ call(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
}
void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
+ __ call(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
}
@@ -1677,7 +1638,8 @@
__ bind(&stub_call);
__ mov(eax, ecx);
BinaryOpStub stub(op, mode);
- EmitCallIC(stub.GetCode(), &patch_site, expr->id());
+ __ call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
+ patch_site.EmitPatchInfo();
__ jmp(&done, Label::kNear);
// Smi case.
@@ -1760,8 +1722,9 @@
OverwriteMode mode) {
__ pop(edx);
BinaryOpStub stub(op, mode);
- // NULL signals no inlined smi code.
- EmitCallIC(stub.GetCode(), NULL, expr->id());
+ JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
+ __ call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
+ patch_site.EmitPatchInfo();
context()->Plug(eax);
}
@@ -1775,7 +1738,7 @@
}
// Left-hand side can only be a property, a global or a (parameter or local)
- // slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
+ // slot.
enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
LhsKind assign_type = VARIABLE;
Property* prop = expr->AsProperty();
@@ -1801,7 +1764,7 @@
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->StoreIC_Initialize_Strict()
: isolate()->builtins()->StoreIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, AstNode::kNoNumber);
+ __ call(ic);
break;
}
case KEYED_PROPERTY: {
@@ -1810,7 +1773,7 @@
ASSERT(prop->obj()->AsVariableProxy() != NULL);
ASSERT(prop->key()->AsLiteral() != NULL);
{ AccumulatorValueContext for_object(this);
- EmitVariableLoad(prop->obj()->AsVariableProxy()->var());
+ EmitVariableLoad(prop->obj()->AsVariableProxy());
}
__ mov(edx, eax);
__ SafeSet(ecx, Immediate(prop->key()->AsLiteral()->handle()));
@@ -1824,7 +1787,7 @@
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
: isolate()->builtins()->KeyedStoreIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, AstNode::kNoNumber);
+ __ call(ic);
break;
}
}
@@ -1835,8 +1798,6 @@
void FullCodeGenerator::EmitVariableAssignment(Variable* var,
Token::Value op) {
- // Left-hand sides that rewrite to explicit property accesses do not reach
- // here.
ASSERT(var != NULL);
ASSERT(var->is_global() || var->AsSlot() != NULL);
@@ -1850,7 +1811,7 @@
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->StoreIC_Initialize_Strict()
: isolate()->builtins()->StoreIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT, AstNode::kNoNumber);
+ __ call(ic, RelocInfo::CODE_TARGET_CONTEXT);
} else if (op == Token::INIT_CONST) {
// Like var declarations, const declarations are hoisted to function
@@ -1870,17 +1831,7 @@
__ j(not_equal, &skip);
__ mov(Operand(ebp, SlotOffset(slot)), eax);
break;
- case Slot::CONTEXT: {
- __ mov(ecx, ContextOperand(esi, Context::FCONTEXT_INDEX));
- __ mov(edx, ContextOperand(ecx, slot->index()));
- __ cmp(edx, isolate()->factory()->the_hole_value());
- __ j(not_equal, &skip);
- __ mov(ContextOperand(ecx, slot->index()), eax);
- int offset = Context::SlotOffset(slot->index());
- __ mov(edx, eax); // Preserve the stored value in eax.
- __ RecordWrite(ecx, offset, edx, ebx);
- break;
- }
+ case Slot::CONTEXT:
case Slot::LOOKUP:
__ push(eax);
__ push(esi);
@@ -1953,7 +1904,7 @@
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->StoreIC_Initialize_Strict()
: isolate()->builtins()->StoreIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+ __ call(ic, RelocInfo::CODE_TARGET, expr->id());
// If the assignment ends an initialization block, revert to fast case.
if (expr->ends_initialization_block()) {
@@ -1993,7 +1944,7 @@
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
: isolate()->builtins()->KeyedStoreIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+ __ call(ic, RelocInfo::CODE_TARGET, expr->id());
// If the assignment ends an initialization block, revert to fast case.
if (expr->ends_initialization_block()) {
@@ -2044,7 +1995,7 @@
InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
Handle<Code> ic =
isolate()->stub_cache()->ComputeCallInitialize(arg_count, in_loop, mode);
- EmitCallIC(ic, mode, expr->id());
+ __ call(ic, mode, expr->id());
RecordJSReturnSite(expr);
// Restore context register.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
@@ -2077,7 +2028,7 @@
Handle<Code> ic = isolate()->stub_cache()->ComputeKeyedCallInitialize(
arg_count, in_loop);
__ mov(ecx, Operand(esp, (arg_count + 1) * kPointerSize)); // Key.
- EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+ __ call(ic, RelocInfo::CODE_TARGET, expr->id());
RecordJSReturnSite(expr);
// Restore context register.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
@@ -2116,7 +2067,7 @@
}
// Push the receiver of the enclosing function.
- __ push(Operand(ebp, (2 + scope()->num_parameters()) * kPointerSize));
+ __ push(Operand(ebp, (2 + info_->scope()->num_parameters()) * kPointerSize));
// Push the strict mode flag.
__ push(Immediate(Smi::FromInt(strict_mode_flag())));
@@ -2230,9 +2181,9 @@
__ bind(&done);
// Push function.
__ push(eax);
- // Push global receiver.
- __ mov(ebx, GlobalObjectOperand());
- __ push(FieldOperand(ebx, GlobalObject::kGlobalReceiverOffset));
+ // The receiver is implicitly the global receiver. Indicate this
+ // by passing the hole to the call function stub.
+ __ push(Immediate(isolate()->factory()->the_hole_value()));
__ bind(&call);
}
@@ -2253,7 +2204,7 @@
} else {
// Call to a keyed property.
// For a synthetic property use keyed load IC followed by function call,
- // for a regular property use keyed EmitCallIC.
+ // for a regular property use EmitKeyedCallWithIC.
if (prop->is_synthetic()) {
// Do not visit the object and key subexpressions (they are shared
// by all occurrences of the same rewritten parameter).
@@ -2271,7 +2222,7 @@
SetSourcePosition(prop->position());
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
+ __ call(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
// Push result (function).
__ push(eax);
// Push Global receiver.
@@ -2388,8 +2339,7 @@
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, if_false);
+ __ JumpIfSmi(eax, if_false);
__ cmp(eax, isolate()->factory()->null_value());
__ j(equal, if_true);
__ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
@@ -2398,9 +2348,9 @@
__ test(ecx, Immediate(1 << Map::kIsUndetectable));
__ j(not_zero, if_false);
__ movzx_b(ecx, FieldOperand(ebx, Map::kInstanceTypeOffset));
- __ cmp(ecx, FIRST_JS_OBJECT_TYPE);
+ __ cmp(ecx, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
__ j(below, if_false);
- __ cmp(ecx, LAST_JS_OBJECT_TYPE);
+ __ cmp(ecx, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
Split(below_equal, if_true, if_false, fall_through);
@@ -2420,9 +2370,8 @@
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
- __ test(eax, Immediate(kSmiTagMask));
- __ j(equal, if_false);
- __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ebx);
+ __ JumpIfSmi(eax, if_false);
+ __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ebx);
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
Split(above_equal, if_true, if_false, fall_through);
@@ -2442,8 +2391,7 @@
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, if_false);
+ __ JumpIfSmi(eax, if_false);
__ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
__ movzx_b(ebx, FieldOperand(ebx, Map::kBitFieldOffset));
__ test(ebx, Immediate(1 << Map::kIsUndetectable));
@@ -2517,8 +2465,7 @@
// If a valueOf property is not found on the object check that it's
// prototype is the un-modified String prototype. If not result is false.
__ mov(ecx, FieldOperand(ebx, Map::kPrototypeOffset));
- __ test(ecx, Immediate(kSmiTagMask));
- __ j(zero, if_false);
+ __ JumpIfSmi(ecx, if_false);
__ mov(ecx, FieldOperand(ecx, HeapObject::kMapOffset));
__ mov(edx, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
__ mov(edx,
@@ -2550,8 +2497,7 @@
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, if_false);
+ __ JumpIfSmi(eax, if_false);
__ CmpObjectType(eax, JS_FUNCTION_TYPE, ebx);
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
Split(equal, if_true, if_false, fall_through);
@@ -2572,8 +2518,7 @@
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
- __ test(eax, Immediate(kSmiTagMask));
- __ j(equal, if_false);
+ __ JumpIfSmi(eax, if_false);
__ CmpObjectType(eax, JS_ARRAY_TYPE, ebx);
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
Split(equal, if_true, if_false, fall_through);
@@ -2594,8 +2539,7 @@
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
- __ test(eax, Immediate(kSmiTagMask));
- __ j(equal, if_false);
+ __ JumpIfSmi(eax, if_false);
__ CmpObjectType(eax, JS_REGEXP_TYPE, ebx);
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
Split(equal, if_true, if_false, fall_through);
@@ -2666,7 +2610,7 @@
// parameter count in eax.
VisitForAccumulatorValue(args->at(0));
__ mov(edx, eax);
- __ SafeSet(eax, Immediate(Smi::FromInt(scope()->num_parameters())));
+ __ SafeSet(eax, Immediate(Smi::FromInt(info_->scope()->num_parameters())));
ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
__ CallStub(&stub);
context()->Plug(eax);
@@ -2678,7 +2622,7 @@
Label exit;
// Get the number of formal parameters.
- __ SafeSet(eax, Immediate(Smi::FromInt(scope()->num_parameters())));
+ __ SafeSet(eax, Immediate(Smi::FromInt(info_->scope()->num_parameters())));
// Check if the calling frame is an arguments adaptor frame.
__ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
@@ -2703,21 +2647,22 @@
VisitForAccumulatorValue(args->at(0));
// If the object is a smi, we return null.
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &null);
+ __ JumpIfSmi(eax, &null);
// Check that the object is a JS object but take special care of JS
// functions to make sure they have 'Function' as their class.
- __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, eax); // Map is now in eax.
+ __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, eax);
+ // Map is now in eax.
__ j(below, &null);
- // As long as JS_FUNCTION_TYPE is the last instance type and it is
- // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
- // LAST_JS_OBJECT_TYPE.
- ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
- ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
- __ CmpInstanceType(eax, JS_FUNCTION_TYPE);
- __ j(equal, &function);
+ // As long as LAST_CALLABLE_SPEC_OBJECT_TYPE is the last instance type, and
+ // FIRST_CALLABLE_SPEC_OBJECT_TYPE comes right after
+ // LAST_NONCALLABLE_SPEC_OBJECT_TYPE, we can avoid checking for the latter.
+ STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
+ STATIC_ASSERT(FIRST_CALLABLE_SPEC_OBJECT_TYPE ==
+ LAST_NONCALLABLE_SPEC_OBJECT_TYPE + 1);
+ __ CmpInstanceType(eax, FIRST_CALLABLE_SPEC_OBJECT_TYPE);
+ __ j(above_equal, &function);
// Check if the constructor in the map is a function.
__ mov(eax, FieldOperand(eax, Map::kConstructorOffset));
@@ -2760,13 +2705,11 @@
// with '%2s' (see Logger::LogRuntime for all the formats).
// 2 (array): Arguments to the format string.
ASSERT_EQ(args->length(), 3);
-#ifdef ENABLE_LOGGING_AND_PROFILING
if (CodeGenerator::ShouldGenerateLog(args->at(0))) {
VisitForStackValue(args->at(1));
VisitForStackValue(args->at(2));
__ CallRuntime(Runtime::kLog, 2);
}
-#endif
// Finally, we're expected to leave a value on the top of the stack.
__ mov(eax, isolate()->factory()->undefined_value());
context()->Plug(eax);
@@ -2855,8 +2798,7 @@
Label done;
// If the object is a smi return the object.
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &done, Label::kNear);
+ __ JumpIfSmi(eax, &done, Label::kNear);
// If the object is not a value type, return the object.
__ CmpObjectType(eax, JS_VALUE_TYPE, ebx);
__ j(not_equal, &done, Label::kNear);
@@ -2892,8 +2834,7 @@
Label done;
// If the object is a smi, return the value.
- __ test(ebx, Immediate(kSmiTagMask));
- __ j(zero, &done, Label::kNear);
+ __ JumpIfSmi(ebx, &done, Label::kNear);
// If the object is not a value type, return the value.
__ CmpObjectType(ebx, JS_VALUE_TYPE, ecx);
@@ -3167,8 +3108,7 @@
__ mov(index_2, Operand(esp, 0));
__ mov(temp, index_1);
__ or_(temp, Operand(index_2));
- __ test(temp, Immediate(kSmiTagMask));
- __ j(not_zero, &slow_case);
+ __ JumpIfNotSmi(temp, &slow_case);
// Check that both indices are valid.
__ mov(temp, FieldOperand(object, JSArray::kLengthOffset));
@@ -3273,8 +3213,7 @@
// Fail if either is a non-HeapObject.
__ mov(tmp, left);
__ and_(Operand(tmp), right);
- __ test(Operand(tmp), Immediate(kSmiTagMask));
- __ j(zero, &fail);
+ __ JumpIfSmi(tmp, &fail);
__ mov(tmp, FieldOperand(left, HeapObject::kMapOffset));
__ CmpInstanceType(tmp, JS_REGEXP_TYPE);
__ j(not_equal, &fail);
@@ -3366,15 +3305,12 @@
__ sub(Operand(esp), Immediate(2 * kPointerSize));
__ cld();
// Check that the array is a JSArray
- __ test(array, Immediate(kSmiTagMask));
- __ j(zero, &bailout);
+ __ JumpIfSmi(array, &bailout);
__ CmpObjectType(array, JS_ARRAY_TYPE, scratch);
__ j(not_equal, &bailout);
// Check that the array has fast elements.
- __ test_b(FieldOperand(scratch, Map::kBitField2Offset),
- 1 << Map::kHasFastElements);
- __ j(zero, &bailout);
+ __ CheckFastElements(scratch, &bailout);
// If the array has length zero, return the empty string.
__ mov(array_length, FieldOperand(array, JSArray::kLengthOffset));
@@ -3410,8 +3346,7 @@
index,
times_pointer_size,
FixedArray::kHeaderSize));
- __ test(string, Immediate(kSmiTagMask));
- __ j(zero, &bailout);
+ __ JumpIfSmi(string, &bailout);
__ mov(scratch, FieldOperand(string, HeapObject::kMapOffset));
__ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
__ and_(scratch, Immediate(
@@ -3444,8 +3379,7 @@
// Check that the separator is a flat ASCII string.
__ mov(string, separator_operand);
- __ test(string, Immediate(kSmiTagMask));
- __ j(zero, &bailout);
+ __ JumpIfSmi(string, &bailout);
__ mov(scratch, FieldOperand(string, HeapObject::kMapOffset));
__ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
__ and_(scratch, Immediate(
@@ -3602,6 +3536,39 @@
}
+void FullCodeGenerator::EmitIsNativeOrStrictMode(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+
+ // Load the function into eax.
+ VisitForAccumulatorValue(args->at(0));
+
+ // Prepare for the test.
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ // Test for strict mode function.
+ __ mov(ecx, FieldOperand(eax, JSFunction::kSharedFunctionInfoOffset));
+ __ test_b(FieldOperand(ecx, SharedFunctionInfo::kStrictModeByteOffset),
+ 1 << SharedFunctionInfo::kStrictModeBitWithinByte);
+ __ j(not_equal, if_true);
+
+ // Test for native function.
+ __ test_b(FieldOperand(ecx, SharedFunctionInfo::kNativeByteOffset),
+ 1 << SharedFunctionInfo::kNativeBitWithinByte);
+ __ j(not_equal, if_true);
+
+ // Not native or strict-mode function.
+ __ jmp(if_false);
+
+ PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ context()->Plug(if_true, if_false);
+}
+
+
void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
Handle<String> name = expr->name();
if (name->length() > 0 && name->Get(0) == '_') {
@@ -3632,7 +3599,7 @@
RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
Handle<Code> ic = isolate()->stub_cache()->ComputeCallInitialize(
arg_count, in_loop, mode);
- EmitCallIC(ic, mode, expr->id());
+ __ call(ic, mode, expr->id());
// Restore context register.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
} else {
@@ -3737,8 +3704,7 @@
Comment cmt(masm_, "[ UnaryOperation (ADD)");
VisitForAccumulatorValue(expr->expression());
Label no_conversion;
- __ test(result_register(), Immediate(kSmiTagMask));
- __ j(zero, &no_conversion);
+ __ JumpIfSmi(result_register(), &no_conversion);
ToNumberStub convert_stub;
__ CallStub(&convert_stub);
__ bind(&no_conversion);
@@ -3772,7 +3738,7 @@
// accumulator register eax.
VisitForAccumulatorValue(expr->expression());
SetSourcePosition(expr->position());
- EmitCallIC(stub.GetCode(), NULL, expr->id());
+ __ call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
context()->Plug(eax);
}
@@ -3789,7 +3755,7 @@
}
// Expression can only be a property, a global or a (parameter or local)
- // slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
+ // slot.
enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
LhsKind assign_type = VARIABLE;
Property* prop = expr->expression()->AsProperty();
@@ -3804,7 +3770,7 @@
if (assign_type == VARIABLE) {
ASSERT(expr->expression()->AsVariableProxy()->var() != NULL);
AccumulatorValueContext context(this);
- EmitVariableLoad(expr->expression()->AsVariableProxy()->var());
+ EmitVariableLoad(expr->expression()->AsVariableProxy());
} else {
// Reserve space for result of postfix operation.
if (expr->is_postfix() && !context()->IsEffect()) {
@@ -3816,16 +3782,8 @@
__ push(eax);
EmitNamedPropertyLoad(prop);
} else {
- if (prop->is_arguments_access()) {
- VariableProxy* obj_proxy = prop->obj()->AsVariableProxy();
- MemOperand slot_operand =
- EmitSlotSearch(obj_proxy->var()->AsSlot(), ecx);
- __ push(slot_operand);
- __ SafeSet(eax, Immediate(prop->key()->AsLiteral()->handle()));
- } else {
- VisitForStackValue(prop->obj());
- VisitForAccumulatorValue(prop->key());
- }
+ VisitForStackValue(prop->obj());
+ VisitForAccumulatorValue(prop->key());
__ mov(edx, Operand(esp, 0));
__ push(eax);
EmitKeyedPropertyLoad(prop);
@@ -3843,8 +3801,7 @@
// Call ToNumber only if operand is not a smi.
Label no_conversion;
if (ShouldInlineSmiCase(expr->op())) {
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &no_conversion, Label::kNear);
+ __ JumpIfSmi(eax, &no_conversion, Label::kNear);
}
ToNumberStub convert_stub;
__ CallStub(&convert_stub);
@@ -3901,7 +3858,8 @@
__ mov(edx, eax);
__ mov(eax, Immediate(Smi::FromInt(1)));
BinaryOpStub stub(expr->binary_op(), NO_OVERWRITE);
- EmitCallIC(stub.GetCode(), &patch_site, expr->CountId());
+ __ call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->CountId());
+ patch_site.EmitPatchInfo();
__ bind(&done);
// Store the value returned in eax.
@@ -3934,7 +3892,7 @@
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->StoreIC_Initialize_Strict()
: isolate()->builtins()->StoreIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+ __ call(ic, RelocInfo::CODE_TARGET, expr->id());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -3951,7 +3909,7 @@
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
: isolate()->builtins()->KeyedStoreIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+ __ call(ic, RelocInfo::CODE_TARGET, expr->id());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
// Result is on the stack
@@ -3979,7 +3937,7 @@
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
// Use a regular load, not a contextual load, to avoid a reference
// error.
- EmitCallIC(ic, RelocInfo::CODE_TARGET, AstNode::kNoNumber);
+ __ call(ic);
PrepareForBailout(expr, TOS_REG);
context()->Plug(eax);
} else if (proxy != NULL &&
@@ -4002,30 +3960,18 @@
context()->Plug(eax);
} else {
// This expression cannot throw a reference error at the top level.
- context()->HandleExpression(expr);
+ VisitInCurrentContext(expr);
}
}
-bool FullCodeGenerator::TryLiteralCompare(Token::Value op,
- Expression* left,
- Expression* right,
- Label* if_true,
- Label* if_false,
- Label* fall_through) {
- if (op != Token::EQ && op != Token::EQ_STRICT) return false;
-
- // Check for the pattern: typeof <expression> == <string literal>.
- Literal* right_literal = right->AsLiteral();
- if (right_literal == NULL) return false;
- Handle<Object> right_literal_value = right_literal->handle();
- if (!right_literal_value->IsString()) return false;
- UnaryOperation* left_unary = left->AsUnaryOperation();
- if (left_unary == NULL || left_unary->op() != Token::TYPEOF) return false;
- Handle<String> check = Handle<String>::cast(right_literal_value);
-
+void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
+ Handle<String> check,
+ Label* if_true,
+ Label* if_false,
+ Label* fall_through) {
{ AccumulatorValueContext context(this);
- VisitForTypeofValue(left_unary->expression());
+ VisitForTypeofValue(expr);
}
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
@@ -4058,16 +4004,16 @@
Split(not_zero, if_true, if_false, fall_through);
} else if (check->Equals(isolate()->heap()->function_symbol())) {
__ JumpIfSmi(eax, if_false);
- __ CmpObjectType(eax, FIRST_FUNCTION_CLASS_TYPE, edx);
+ __ CmpObjectType(eax, FIRST_CALLABLE_SPEC_OBJECT_TYPE, edx);
Split(above_equal, if_true, if_false, fall_through);
} else if (check->Equals(isolate()->heap()->object_symbol())) {
__ JumpIfSmi(eax, if_false);
__ cmp(eax, isolate()->factory()->null_value());
__ j(equal, if_true);
- __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, edx);
+ __ CmpObjectType(eax, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, edx);
__ j(below, if_false);
- __ CmpInstanceType(edx, FIRST_FUNCTION_CLASS_TYPE);
- __ j(above_equal, if_false);
+ __ CmpInstanceType(edx, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
+ __ j(above, if_false);
// Check for undetectable objects => false.
__ test_b(FieldOperand(edx, Map::kBitFieldOffset),
1 << Map::kIsUndetectable);
@@ -4075,8 +4021,18 @@
} else {
if (if_false != fall_through) __ jmp(if_false);
}
+}
- return true;
+
+void FullCodeGenerator::EmitLiteralCompareUndefined(Expression* expr,
+ Label* if_true,
+ Label* if_false,
+ Label* fall_through) {
+ VisitForAccumulatorValue(expr);
+ PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+
+ __ cmp(eax, isolate()->factory()->undefined_value());
+ Split(equal, if_true, if_false, fall_through);
}
@@ -4096,14 +4052,12 @@
// First we try a fast inlined version of the compare when one of
// the operands is a literal.
- Token::Value op = expr->op();
- Expression* left = expr->left();
- Expression* right = expr->right();
- if (TryLiteralCompare(op, left, right, if_true, if_false, fall_through)) {
+ if (TryLiteralCompare(expr, if_true, if_false, fall_through)) {
context()->Plug(if_true, if_false);
return;
}
+ Token::Value op = expr->op();
VisitForStackValue(expr->left());
switch (expr->op()) {
case Token::IN:
@@ -4128,11 +4082,8 @@
default: {
VisitForAccumulatorValue(expr->right());
Condition cc = no_condition;
- bool strict = false;
switch (op) {
case Token::EQ_STRICT:
- strict = true;
- // Fall through
case Token::EQ:
cc = equal;
__ pop(edx);
@@ -4178,7 +4129,8 @@
// Record position and call the compare IC.
SetSourcePosition(expr->position());
Handle<Code> ic = CompareIC::GetUninitialized(op);
- EmitCallIC(ic, &patch_site, expr->id());
+ __ call(ic, RelocInfo::CODE_TARGET, expr->id());
+ patch_site.EmitPatchInfo();
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
__ test(eax, Operand(eax));
@@ -4210,8 +4162,7 @@
__ j(equal, if_true);
__ cmp(eax, isolate()->factory()->undefined_value());
__ j(equal, if_true);
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, if_false);
+ __ JumpIfSmi(eax, if_false);
// It can be an undetectable object.
__ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
__ movzx_b(edx, FieldOperand(edx, Map::kBitFieldOffset));
@@ -4238,58 +4189,6 @@
}
-void FullCodeGenerator::EmitCallIC(Handle<Code> ic,
- RelocInfo::Mode mode,
- unsigned ast_id) {
- ASSERT(mode == RelocInfo::CODE_TARGET ||
- mode == RelocInfo::CODE_TARGET_CONTEXT);
- switch (ic->kind()) {
- case Code::LOAD_IC:
- __ IncrementCounter(isolate()->counters()->named_load_full(), 1);
- break;
- case Code::KEYED_LOAD_IC:
- __ IncrementCounter(isolate()->counters()->keyed_load_full(), 1);
- break;
- case Code::STORE_IC:
- __ IncrementCounter(isolate()->counters()->named_store_full(), 1);
- break;
- case Code::KEYED_STORE_IC:
- __ IncrementCounter(isolate()->counters()->keyed_store_full(), 1);
- default:
- break;
- }
- __ call(ic, mode, ast_id);
-}
-
-
-void FullCodeGenerator::EmitCallIC(Handle<Code> ic,
- JumpPatchSite* patch_site,
- unsigned ast_id) {
- Counters* counters = isolate()->counters();
- switch (ic->kind()) {
- case Code::LOAD_IC:
- __ IncrementCounter(counters->named_load_full(), 1);
- break;
- case Code::KEYED_LOAD_IC:
- __ IncrementCounter(counters->keyed_load_full(), 1);
- break;
- case Code::STORE_IC:
- __ IncrementCounter(counters->named_store_full(), 1);
- break;
- case Code::KEYED_STORE_IC:
- __ IncrementCounter(counters->keyed_store_full(), 1);
- default:
- break;
- }
- __ call(ic, RelocInfo::CODE_TARGET, ast_id);
- if (patch_site != NULL && patch_site->is_bound()) {
- patch_site->EmitPatchInfo();
- } else {
- __ nop(); // Signals no inlined code.
- }
-}
-
-
void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
ASSERT_EQ(POINTER_SIZE_ALIGN(frame_offset), frame_offset);
__ mov(Operand(ebp, frame_offset), value);
@@ -4301,18 +4200,38 @@
}
+void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
+ Scope* declaration_scope = scope()->DeclarationScope();
+ if (declaration_scope->is_global_scope()) {
+ // Contexts nested in the global context have a canonical empty function
+ // as their closure, not the anonymous closure containing the global
+ // code. Pass a smi sentinel and let the runtime look up the empty
+ // function.
+ __ push(Immediate(Smi::FromInt(0)));
+ } else if (declaration_scope->is_eval_scope()) {
+ // Contexts nested inside eval code have the same closure as the context
+ // calling eval, not the anonymous closure containing the eval code.
+ // Fetch it from the context.
+ __ push(ContextOperand(esi, Context::CLOSURE_INDEX));
+ } else {
+ ASSERT(declaration_scope->is_function_scope());
+ __ push(Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+ }
+}
+
+
// ----------------------------------------------------------------------------
// Non-local control flow support.
void FullCodeGenerator::EnterFinallyBlock() {
// Cook return address on top of stack (smi encoded Code* delta)
ASSERT(!result_register().is(edx));
- __ mov(edx, Operand(esp, 0));
+ __ pop(edx);
__ sub(Operand(edx), Immediate(masm_->CodeObject()));
ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
ASSERT_EQ(0, kSmiTag);
- __ add(edx, Operand(edx)); // Convert to smi.
- __ mov(Operand(esp, 0), edx);
+ __ SmiTag(edx);
+ __ push(edx);
// Store result register while executing finally block.
__ push(result_register());
}
@@ -4320,15 +4239,12 @@
void FullCodeGenerator::ExitFinallyBlock() {
ASSERT(!result_register().is(edx));
- // Restore result register from stack.
__ pop(result_register());
// Uncook return address.
- __ mov(edx, Operand(esp, 0));
- __ sar(edx, 1); // Convert smi to int.
+ __ pop(edx);
+ __ SmiUntag(edx);
__ add(Operand(edx), Immediate(masm_->CodeObject()));
- __ mov(Operand(esp, 0), edx);
- // And return.
- __ ret(0);
+ __ jmp(Operand(edx));
}
diff --git a/src/ia32/ic-ia32.cc b/src/ia32/ic-ia32.cc
index 3941cfc..5f143b1 100644
--- a/src/ia32/ic-ia32.cc
+++ b/src/ia32/ic-ia32.cc
@@ -72,17 +72,16 @@
// r1: used to hold receivers map.
// Check that the receiver isn't a smi.
- __ test(receiver, Immediate(kSmiTagMask));
- __ j(zero, miss);
+ __ JumpIfSmi(receiver, miss);
// Check that the receiver is a valid JS object.
__ mov(r1, FieldOperand(receiver, HeapObject::kMapOffset));
__ movzx_b(r0, FieldOperand(r1, Map::kInstanceTypeOffset));
- __ cmp(r0, FIRST_JS_OBJECT_TYPE);
+ __ cmp(r0, FIRST_SPEC_OBJECT_TYPE);
__ j(below, miss);
// If this assert fails, we have to check upper bound too.
- ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+ STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
GenerateGlobalInstanceTypeCheck(masm, r0, miss);
@@ -217,105 +216,6 @@
}
-static void GenerateNumberDictionaryLoad(MacroAssembler* masm,
- Label* miss,
- Register elements,
- Register key,
- Register r0,
- Register r1,
- Register r2,
- Register result) {
- // Register use:
- //
- // elements - holds the slow-case elements of the receiver and is unchanged.
- //
- // key - holds the smi key on entry and is unchanged.
- //
- // Scratch registers:
- //
- // r0 - holds the untagged key on entry and holds the hash once computed.
- //
- // r1 - used to hold the capacity mask of the dictionary
- //
- // r2 - used for the index into the dictionary.
- //
- // result - holds the result on exit if the load succeeds and we fall through.
-
- Label done;
-
- // Compute the hash code from the untagged key. This must be kept in sync
- // with ComputeIntegerHash in utils.h.
- //
- // hash = ~hash + (hash << 15);
- __ mov(r1, r0);
- __ not_(r0);
- __ shl(r1, 15);
- __ add(r0, Operand(r1));
- // hash = hash ^ (hash >> 12);
- __ mov(r1, r0);
- __ shr(r1, 12);
- __ xor_(r0, Operand(r1));
- // hash = hash + (hash << 2);
- __ lea(r0, Operand(r0, r0, times_4, 0));
- // hash = hash ^ (hash >> 4);
- __ mov(r1, r0);
- __ shr(r1, 4);
- __ xor_(r0, Operand(r1));
- // hash = hash * 2057;
- __ imul(r0, r0, 2057);
- // hash = hash ^ (hash >> 16);
- __ mov(r1, r0);
- __ shr(r1, 16);
- __ xor_(r0, Operand(r1));
-
- // Compute capacity mask.
- __ mov(r1, FieldOperand(elements, NumberDictionary::kCapacityOffset));
- __ shr(r1, kSmiTagSize); // convert smi to int
- __ dec(r1);
-
- // Generate an unrolled loop that performs a few probes before giving up.
- const int kProbes = 4;
- for (int i = 0; i < kProbes; i++) {
- // Use r2 for index calculations and keep the hash intact in r0.
- __ mov(r2, r0);
- // Compute the masked index: (hash + i + i * i) & mask.
- if (i > 0) {
- __ add(Operand(r2), Immediate(NumberDictionary::GetProbeOffset(i)));
- }
- __ and_(r2, Operand(r1));
-
- // Scale the index by multiplying by the entry size.
- ASSERT(NumberDictionary::kEntrySize == 3);
- __ lea(r2, Operand(r2, r2, times_2, 0)); // r2 = r2 * 3
-
- // Check if the key matches.
- __ cmp(key, FieldOperand(elements,
- r2,
- times_pointer_size,
- NumberDictionary::kElementsStartOffset));
- if (i != (kProbes - 1)) {
- __ j(equal, &done);
- } else {
- __ j(not_equal, miss);
- }
- }
-
- __ bind(&done);
- // Check that the value is a normal propety.
- const int kDetailsOffset =
- NumberDictionary::kElementsStartOffset + 2 * kPointerSize;
- ASSERT_EQ(NORMAL, 0);
- __ test(FieldOperand(elements, r2, times_pointer_size, kDetailsOffset),
- Immediate(PropertyDetails::TypeField::mask() << kSmiTagSize));
- __ j(not_zero, miss);
-
- // Get the value at the masked, scaled index.
- const int kValueOffset =
- NumberDictionary::kElementsStartOffset + kPointerSize;
- __ mov(result, FieldOperand(elements, r2, times_pointer_size, kValueOffset));
-}
-
-
void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : receiver
@@ -373,8 +273,7 @@
// map - used to hold the map of the receiver.
// Check that the object isn't a smi.
- __ test(receiver, Immediate(kSmiTagMask));
- __ j(zero, slow);
+ __ JumpIfSmi(receiver, slow);
// Get the map of the receiver.
__ mov(map, FieldOperand(receiver, HeapObject::kMapOffset));
@@ -465,6 +364,83 @@
}
+static Operand GenerateMappedArgumentsLookup(MacroAssembler* masm,
+ Register object,
+ Register key,
+ Register scratch1,
+ Register scratch2,
+ Label* unmapped_case,
+ Label* slow_case) {
+ Heap* heap = masm->isolate()->heap();
+ Factory* factory = masm->isolate()->factory();
+
+ // Check that the receiver is a JSObject. Because of the elements
+ // map check later, we do not need to check for interceptors or
+ // whether it requires access checks.
+ __ JumpIfSmi(object, slow_case);
+ // Check that the object is some kind of JSObject.
+ __ CmpObjectType(object, FIRST_JS_RECEIVER_TYPE, scratch1);
+ __ j(below, slow_case);
+
+ // Check that the key is a positive smi.
+ __ test(key, Immediate(0x8000001));
+ __ j(not_zero, slow_case);
+
+ // Load the elements into scratch1 and check its map.
+ Handle<Map> arguments_map(heap->non_strict_arguments_elements_map());
+ __ mov(scratch1, FieldOperand(object, JSObject::kElementsOffset));
+ __ CheckMap(scratch1, arguments_map, slow_case, DONT_DO_SMI_CHECK);
+
+ // Check if element is in the range of mapped arguments. If not, jump
+ // to the unmapped lookup with the parameter map in scratch1.
+ __ mov(scratch2, FieldOperand(scratch1, FixedArray::kLengthOffset));
+ __ sub(Operand(scratch2), Immediate(Smi::FromInt(2)));
+ __ cmp(key, Operand(scratch2));
+ __ j(greater_equal, unmapped_case);
+
+ // Load element index and check whether it is the hole.
+ const int kHeaderSize = FixedArray::kHeaderSize + 2 * kPointerSize;
+ __ mov(scratch2, FieldOperand(scratch1,
+ key,
+ times_half_pointer_size,
+ kHeaderSize));
+ __ cmp(scratch2, factory->the_hole_value());
+ __ j(equal, unmapped_case);
+
+ // Load value from context and return it. We can reuse scratch1 because
+ // we do not jump to the unmapped lookup (which requires the parameter
+ // map in scratch1).
+ const int kContextOffset = FixedArray::kHeaderSize;
+ __ mov(scratch1, FieldOperand(scratch1, kContextOffset));
+ return FieldOperand(scratch1,
+ scratch2,
+ times_half_pointer_size,
+ Context::kHeaderSize);
+}
+
+
+static Operand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
+ Register key,
+ Register parameter_map,
+ Register scratch,
+ Label* slow_case) {
+ // Element is in arguments backing store, which is referenced by the
+ // second element of the parameter_map.
+ const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize;
+ Register backing_store = parameter_map;
+ __ mov(backing_store, FieldOperand(parameter_map, kBackingStoreOffset));
+ Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map());
+ __ CheckMap(backing_store, fixed_array_map, slow_case, DONT_DO_SMI_CHECK);
+ __ mov(scratch, FieldOperand(backing_store, FixedArray::kLengthOffset));
+ __ cmp(key, Operand(scratch));
+ __ j(greater_equal, slow_case);
+ return FieldOperand(backing_store,
+ key,
+ times_half_pointer_size,
+ FixedArray::kHeaderSize);
+}
+
+
void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : key
@@ -475,8 +451,7 @@
Label probe_dictionary, check_number_dictionary;
// Check that the key is a smi.
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, &check_string);
+ __ JumpIfNotSmi(eax, &check_string);
__ bind(&index_smi);
// Now the key is known to be a smi. This place is also jumped to from
// where a numeric string is converted to a smi.
@@ -484,11 +459,8 @@
GenerateKeyedLoadReceiverCheck(
masm, edx, ecx, Map::kHasIndexedInterceptor, &slow);
- // Check the "has fast elements" bit in the receiver's map which is
- // now in ecx.
- __ test_b(FieldOperand(ecx, Map::kBitField2Offset),
- 1 << Map::kHasFastElements);
- __ j(zero, &check_number_dictionary);
+ // Check the receiver's map to see if it has fast elements.
+ __ CheckFastElements(ecx, &check_number_dictionary);
GenerateFastArrayLoad(masm,
edx,
@@ -520,14 +492,13 @@
// Push receiver on the stack to free up a register for the dictionary
// probing.
__ push(edx);
- GenerateNumberDictionaryLoad(masm,
- &slow_pop_receiver,
- ecx,
- eax,
- ebx,
- edx,
- edi,
- eax);
+ __ LoadFromNumberDictionary(&slow_pop_receiver,
+ ecx,
+ eax,
+ ebx,
+ edx,
+ edi,
+ eax);
// Pop receiver before returning.
__ pop(edx);
__ ret(0);
@@ -668,8 +639,7 @@
Label slow;
// Check that the receiver isn't a smi.
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &slow);
+ __ JumpIfSmi(edx, &slow);
// Check that the key is an array index, that is Uint32.
__ test(eax, Immediate(kSmiTagMask | kSmiSignMask));
@@ -702,6 +672,60 @@
}
+void KeyedLoadIC::GenerateNonStrictArguments(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : key
+ // -- edx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+ Label slow, notin;
+ Factory* factory = masm->isolate()->factory();
+ Operand mapped_location =
+ GenerateMappedArgumentsLookup(masm, edx, eax, ebx, ecx, ¬in, &slow);
+ __ mov(eax, mapped_location);
+ __ Ret();
+ __ bind(¬in);
+ // The unmapped lookup expects that the parameter map is in ebx.
+ Operand unmapped_location =
+ GenerateUnmappedArgumentsLookup(masm, eax, ebx, ecx, &slow);
+ __ cmp(unmapped_location, factory->the_hole_value());
+ __ j(equal, &slow);
+ __ mov(eax, unmapped_location);
+ __ Ret();
+ __ bind(&slow);
+ GenerateMiss(masm, false);
+}
+
+
+void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : value
+ // -- ecx : key
+ // -- edx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+ Label slow, notin;
+ Operand mapped_location =
+ GenerateMappedArgumentsLookup(masm, edx, ecx, ebx, edi, ¬in, &slow);
+ __ mov(mapped_location, eax);
+ __ lea(ecx, mapped_location);
+ __ mov(edx, eax);
+ __ RecordWrite(ebx, ecx, edx);
+ __ Ret();
+ __ bind(¬in);
+ // The unmapped lookup expects that the parameter map is in ebx.
+ Operand unmapped_location =
+ GenerateUnmappedArgumentsLookup(masm, ecx, ebx, edi, &slow);
+ __ mov(unmapped_location, eax);
+ __ lea(edi, unmapped_location);
+ __ mov(edx, eax);
+ __ RecordWrite(ebx, edi, edx);
+ __ Ret();
+ __ bind(&slow);
+ GenerateMiss(masm, false);
+}
+
+
void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
StrictModeFlag strict_mode) {
// ----------- S t a t e -------------
@@ -713,8 +737,7 @@
Label slow, fast, array, extra;
// Check that the object isn't a smi.
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &slow);
+ __ JumpIfSmi(edx, &slow);
// Get the map from the receiver.
__ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
// Check that the receiver does not require access checks. We need
@@ -723,13 +746,16 @@
1 << Map::kIsAccessCheckNeeded);
__ j(not_zero, &slow);
// Check that the key is a smi.
- __ test(ecx, Immediate(kSmiTagMask));
- __ j(not_zero, &slow);
+ __ JumpIfNotSmi(ecx, &slow);
__ CmpInstanceType(edi, JS_ARRAY_TYPE);
__ j(equal, &array);
- // Check that the object is some kind of JS object.
- __ CmpInstanceType(edi, FIRST_JS_OBJECT_TYPE);
+ // Check that the object is some kind of JSObject.
+ __ CmpInstanceType(edi, FIRST_JS_RECEIVER_TYPE);
__ j(below, &slow);
+ __ CmpInstanceType(edi, JS_PROXY_TYPE);
+ __ j(equal, &slow);
+ __ CmpInstanceType(edi, JS_FUNCTION_PROXY_TYPE);
+ __ j(equal, &slow);
// Object case: Check key against length in the elements array.
// eax: value
@@ -820,8 +846,7 @@
// to probe.
//
// Check for number.
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &number);
+ __ JumpIfSmi(edx, &number);
__ CmpObjectType(edx, HEAP_NUMBER_TYPE, ebx);
__ j(not_equal, &non_number);
__ bind(&number);
@@ -868,8 +893,7 @@
// -----------------------------------
// Check that the result is not a smi.
- __ test(edi, Immediate(kSmiTagMask));
- __ j(zero, miss);
+ __ JumpIfSmi(edi, miss);
// Check that the value is a JavaScript function, fetching its map into eax.
__ CmpObjectType(edi, JS_FUNCTION_TYPE, eax);
@@ -950,8 +974,7 @@
if (id == IC::kCallIC_Miss) {
Label invoke, global;
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize)); // receiver
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &invoke, Label::kNear);
+ __ JumpIfSmi(edx, &invoke, Label::kNear);
__ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
__ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
__ cmp(ebx, JS_GLOBAL_OBJECT_TYPE);
@@ -1044,8 +1067,7 @@
Label index_smi, index_string;
// Check that the key is a smi.
- __ test(ecx, Immediate(kSmiTagMask));
- __ j(not_zero, &check_string);
+ __ JumpIfNotSmi(ecx, &check_string);
__ bind(&index_smi);
// Now the key is known to be a smi. This place is also jumped to from
@@ -1078,8 +1100,8 @@
__ SmiUntag(ebx);
// ebx: untagged index
// Receiver in edx will be clobbered, need to reload it on miss.
- GenerateNumberDictionaryLoad(
- masm, &slow_reload_receiver, eax, ecx, ebx, edx, edi, edi);
+ __ LoadFromNumberDictionary(
+ &slow_reload_receiver, eax, ecx, ebx, edx, edi, edi);
__ IncrementCounter(counters->keyed_call_generic_smi_dict(), 1);
__ jmp(&do_call);
@@ -1145,6 +1167,35 @@
}
+void KeyedCallIC::GenerateNonStrictArguments(MacroAssembler* masm,
+ int argc) {
+ // ----------- S t a t e -------------
+ // -- ecx : name
+ // -- esp[0] : return address
+ // -- esp[(argc - n) * 4] : arg[n] (zero-based)
+ // -- ...
+ // -- esp[(argc + 1) * 4] : receiver
+ // -----------------------------------
+ Label slow, notin;
+ Factory* factory = masm->isolate()->factory();
+ __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
+ Operand mapped_location =
+ GenerateMappedArgumentsLookup(masm, edx, ecx, ebx, eax, ¬in, &slow);
+ __ mov(edi, mapped_location);
+ GenerateFunctionTailCall(masm, argc, &slow);
+ __ bind(¬in);
+ // The unmapped lookup expects that the parameter map is in ebx.
+ Operand unmapped_location =
+ GenerateUnmappedArgumentsLookup(masm, ecx, ebx, eax, &slow);
+ __ cmp(unmapped_location, factory->the_hole_value());
+ __ j(equal, &slow);
+ __ mov(edi, unmapped_location);
+ GenerateFunctionTailCall(masm, argc, &slow);
+ __ bind(&slow);
+ GenerateMiss(masm, argc);
+}
+
+
void KeyedCallIC::GenerateNormal(MacroAssembler* masm, int argc) {
// ----------- S t a t e -------------
// -- ecx : name
@@ -1156,8 +1207,7 @@
// Check if the name is a string.
Label miss;
- __ test(ecx, Immediate(kSmiTagMask));
- __ j(zero, &miss);
+ __ JumpIfSmi(ecx, &miss);
Condition cond = masm->IsObjectStringType(ecx, eax, eax);
__ j(NegateCondition(cond), &miss);
GenerateCallNormal(masm, argc);
@@ -1342,8 +1392,7 @@
Register scratch = ebx;
// Check that the receiver isn't a smi.
- __ test(receiver, Immediate(kSmiTagMask));
- __ j(zero, &miss);
+ __ JumpIfSmi(receiver, &miss);
// Check that the object is a JS array.
__ CmpObjectType(receiver, JS_ARRAY_TYPE, scratch);
@@ -1357,8 +1406,7 @@
__ j(not_equal, &miss);
// Check that value is a smi.
- __ test(value, Immediate(kSmiTagMask));
- __ j(not_zero, &miss);
+ __ JumpIfNotSmi(value, &miss);
// Prepare tail call to StoreIC_ArrayLength.
__ pop(scratch);
diff --git a/src/ia32/lithium-codegen-ia32.cc b/src/ia32/lithium-codegen-ia32.cc
index 3e95867..982eddb 100644
--- a/src/ia32/lithium-codegen-ia32.cc
+++ b/src/ia32/lithium-codegen-ia32.cc
@@ -128,11 +128,11 @@
}
#endif
- // Strict mode functions need to replace the receiver with undefined
- // when called as functions (without an explicit receiver
- // object). ecx is zero for method calls and non-zero for function
- // calls.
- if (info_->is_strict_mode()) {
+ // Strict mode functions and builtins need to replace the receiver
+ // with undefined when called as functions (without an explicit
+ // receiver object). ecx is zero for method calls and non-zero for
+ // function calls.
+ if (info_->is_strict_mode() || info_->is_native()) {
Label ok;
__ test(ecx, Operand(ecx));
__ j(zero, &ok, Label::kNear);
@@ -184,7 +184,7 @@
FastNewContextStub stub(heap_slots);
__ CallStub(&stub);
} else {
- __ CallRuntime(Runtime::kNewContext, 1);
+ __ CallRuntime(Runtime::kNewFunctionContext, 1);
}
RecordSafepoint(Safepoint::kNoDeoptimizationIndex);
// Context is returned in both eax and esi. It replaces the context
@@ -255,11 +255,20 @@
bool LCodeGen::GenerateDeferredCode() {
ASSERT(is_generating());
- for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
- LDeferredCode* code = deferred_[i];
- __ bind(code->entry());
- code->Generate();
- __ jmp(code->exit());
+ if (deferred_.length() > 0) {
+ for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
+ LDeferredCode* code = deferred_[i];
+ __ bind(code->entry());
+ code->Generate();
+ __ jmp(code->exit());
+ }
+
+ // Pad code to ensure that the last piece of deferred code have
+ // room for lazy bailout.
+ while ((masm()->pc_offset() - LastSafepointEnd())
+ < Deoptimizer::patch_size()) {
+ __ nop();
+ }
}
// Deferred code is the last part of the instruction sequence. Mark
@@ -428,15 +437,11 @@
void LCodeGen::CallCodeGeneric(Handle<Code> code,
RelocInfo::Mode mode,
LInstruction* instr,
- ContextMode context_mode,
SafepointMode safepoint_mode) {
ASSERT(instr != NULL);
LPointerMap* pointers = instr->pointer_map();
RecordPosition(pointers->position());
- if (context_mode == RESTORE_CONTEXT) {
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- }
__ call(code, mode);
RegisterLazyDeoptimization(instr, safepoint_mode);
@@ -452,24 +457,19 @@
void LCodeGen::CallCode(Handle<Code> code,
RelocInfo::Mode mode,
- LInstruction* instr,
- ContextMode context_mode) {
- CallCodeGeneric(code, mode, instr, context_mode, RECORD_SIMPLE_SAFEPOINT);
+ LInstruction* instr) {
+ CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
}
void LCodeGen::CallRuntime(const Runtime::Function* fun,
int argc,
- LInstruction* instr,
- ContextMode context_mode) {
+ LInstruction* instr) {
ASSERT(instr != NULL);
ASSERT(instr->HasPointerMap());
LPointerMap* pointers = instr->pointer_map();
RecordPosition(pointers->position());
- if (context_mode == RESTORE_CONTEXT) {
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- }
__ CallRuntime(fun, argc);
RegisterLazyDeoptimization(instr, RECORD_SIMPLE_SAFEPOINT);
@@ -478,8 +478,18 @@
void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
int argc,
- LInstruction* instr) {
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ LInstruction* instr,
+ LOperand* context) {
+ ASSERT(context->IsRegister() || context->IsStackSlot());
+ if (context->IsRegister()) {
+ if (!ToRegister(context).is(esi)) {
+ __ mov(esi, ToRegister(context));
+ }
+ } else {
+ // Context is stack slot.
+ __ mov(esi, ToOperand(context));
+ }
+
__ CallRuntimeSaveDoubles(id);
RecordSafepointWithRegisters(
instr->pointer_map(), argc, Safepoint::kNoDeoptimizationIndex);
@@ -693,7 +703,7 @@
void LCodeGen::RecordPosition(int position) {
- if (!FLAG_debug_info || position == RelocInfo::kNoPosition) return;
+ if (position == RelocInfo::kNoPosition) return;
masm()->positions_recorder()->RecordPosition(position);
}
@@ -748,38 +758,38 @@
switch (instr->hydrogen()->major_key()) {
case CodeStub::RegExpConstructResult: {
RegExpConstructResultStub stub;
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
break;
}
case CodeStub::RegExpExec: {
RegExpExecStub stub;
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
break;
}
case CodeStub::SubString: {
SubStringStub stub;
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
break;
}
case CodeStub::NumberToString: {
NumberToStringStub stub;
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
break;
}
case CodeStub::StringAdd: {
StringAddStub stub(NO_STRING_ADD_FLAGS);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
break;
}
case CodeStub::StringCompare: {
StringCompareStub stub;
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
break;
}
case CodeStub::TranscendentalCache: {
TranscendentalCacheStub stub(instr->transcendental_type(),
TranscendentalCacheStub::TAGGED);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
break;
}
default:
@@ -1215,6 +1225,21 @@
}
+void LCodeGen::DoElementsKind(LElementsKind* instr) {
+ Register result = ToRegister(instr->result());
+ Register input = ToRegister(instr->InputAt(0));
+
+ // Load map into |result|.
+ __ mov(result, FieldOperand(input, HeapObject::kMapOffset));
+ // Load the map's "bit field 2" into |result|. We only need the first byte,
+ // but the following masking takes care of that anyway.
+ __ mov(result, FieldOperand(result, Map::kBitField2Offset));
+ // Retrieve elements_kind from bit field 2.
+ __ and_(result, Map::kElementsKindMask);
+ __ shr(result, Map::kElementsKindShift);
+}
+
+
void LCodeGen::DoValueOf(LValueOf* instr) {
Register input = ToRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
@@ -1222,8 +1247,7 @@
ASSERT(input.is(result));
Label done;
// If the object is a smi return the object.
- __ test(input, Immediate(kSmiTagMask));
- __ j(zero, &done, Label::kNear);
+ __ JumpIfSmi(input, &done, Label::kNear);
// If the object is not a value type, return the object.
__ CmpObjectType(input, JS_VALUE_TYPE, map);
@@ -1242,8 +1266,9 @@
void LCodeGen::DoThrow(LThrow* instr) {
- __ push(ToOperand(instr->InputAt(0)));
- CallRuntime(Runtime::kThrow, 1, instr, RESTORE_CONTEXT);
+ __ push(ToOperand(instr->value()));
+ ASSERT(ToRegister(instr->context()).is(esi));
+ CallRuntime(Runtime::kThrow, 1, instr);
if (FLAG_debug_code) {
Comment("Unreachable code.");
@@ -1313,12 +1338,13 @@
void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
- ASSERT(ToRegister(instr->InputAt(0)).is(edx));
- ASSERT(ToRegister(instr->InputAt(1)).is(eax));
+ ASSERT(ToRegister(instr->context()).is(esi));
+ ASSERT(ToRegister(instr->left()).is(edx));
+ ASSERT(ToRegister(instr->right()).is(eax));
ASSERT(ToRegister(instr->result()).is(eax));
BinaryOpStub stub(instr->op(), NO_OVERWRITE);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, RESTORE_CONTEXT);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
__ nop(); // Signals no inlined code.
}
@@ -1354,7 +1380,7 @@
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
- Representation r = instr->hydrogen()->representation();
+ Representation r = instr->hydrogen()->value()->representation();
if (r.IsInteger32()) {
Register reg = ToRegister(instr->InputAt(0));
__ test(reg, Operand(reg));
@@ -1367,7 +1393,7 @@
} else {
ASSERT(r.IsTagged());
Register reg = ToRegister(instr->InputAt(0));
- if (instr->hydrogen()->type().IsBoolean()) {
+ if (instr->hydrogen()->value()->type().IsBoolean()) {
__ cmp(reg, factory()->true_value());
EmitBranch(true_block, false_block, equal);
} else {
@@ -1382,8 +1408,7 @@
__ j(equal, false_label);
__ test(reg, Operand(reg));
__ j(equal, false_label);
- __ test(reg, Immediate(kSmiTagMask));
- __ j(zero, true_label);
+ __ JumpIfSmi(reg, true_label);
// Test for double values. Zero is false.
Label call_stub;
@@ -1399,7 +1424,7 @@
// The conversion stub doesn't cause garbage collections so it's
// safe to not record a safepoint after the call.
__ bind(&call_stub);
- ToBooleanStub stub;
+ ToBooleanStub stub(eax);
__ pushad();
__ push(reg);
__ CallStub(&stub);
@@ -1411,45 +1436,17 @@
}
-void LCodeGen::EmitGoto(int block, LDeferredCode* deferred_stack_check) {
+void LCodeGen::EmitGoto(int block) {
block = chunk_->LookupDestination(block);
int next_block = GetNextEmittedBlock(current_block_);
if (block != next_block) {
- // Perform stack overflow check if this goto needs it before jumping.
- if (deferred_stack_check != NULL) {
- ExternalReference stack_limit =
- ExternalReference::address_of_stack_limit(isolate());
- __ cmp(esp, Operand::StaticVariable(stack_limit));
- __ j(above_equal, chunk_->GetAssemblyLabel(block));
- __ jmp(deferred_stack_check->entry());
- deferred_stack_check->SetExit(chunk_->GetAssemblyLabel(block));
- } else {
- __ jmp(chunk_->GetAssemblyLabel(block));
- }
+ __ jmp(chunk_->GetAssemblyLabel(block));
}
}
-void LCodeGen::DoDeferredStackCheck(LGoto* instr) {
- PushSafepointRegistersScope scope(this);
- CallRuntimeFromDeferred(Runtime::kStackGuard, 0, instr);
-}
-
void LCodeGen::DoGoto(LGoto* instr) {
- class DeferredStackCheck: public LDeferredCode {
- public:
- DeferredStackCheck(LCodeGen* codegen, LGoto* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
- private:
- LGoto* instr_;
- };
-
- DeferredStackCheck* deferred = NULL;
- if (instr->include_stack_check()) {
- deferred = new DeferredStackCheck(this, instr);
- }
- EmitGoto(instr->block_id(), deferred);
+ EmitGoto(instr->block_id());
}
@@ -1490,32 +1487,6 @@
}
-void LCodeGen::DoCmpID(LCmpID* instr) {
- LOperand* left = instr->InputAt(0);
- LOperand* right = instr->InputAt(1);
- LOperand* result = instr->result();
-
- Label unordered;
- if (instr->is_double()) {
- // Don't base result on EFLAGS when a NaN is involved. Instead
- // jump to the unordered case, which produces a false value.
- __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
- __ j(parity_even, &unordered, Label::kNear);
- } else {
- EmitCmpI(left, right);
- }
-
- Label done;
- Condition cc = TokenToCondition(instr->op(), instr->is_double());
- __ mov(ToRegister(result), factory()->true_value());
- __ j(cc, &done, Label::kNear);
-
- __ bind(&unordered);
- __ mov(ToRegister(result), factory()->false_value());
- __ bind(&done);
-}
-
-
void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
LOperand* left = instr->InputAt(0);
LOperand* right = instr->InputAt(1);
@@ -1536,23 +1507,9 @@
}
-void LCodeGen::DoCmpJSObjectEq(LCmpJSObjectEq* instr) {
+void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
Register left = ToRegister(instr->InputAt(0));
- Register right = ToRegister(instr->InputAt(1));
- Register result = ToRegister(instr->result());
-
- __ cmp(left, Operand(right));
- __ mov(result, factory()->true_value());
- Label done;
- __ j(equal, &done, Label::kNear);
- __ mov(result, factory()->false_value());
- __ bind(&done);
-}
-
-
-void LCodeGen::DoCmpJSObjectEqAndBranch(LCmpJSObjectEqAndBranch* instr) {
- Register left = ToRegister(instr->InputAt(0));
- Register right = ToRegister(instr->InputAt(1));
+ Operand right = ToOperand(instr->InputAt(1));
int false_block = chunk_->LookupDestination(instr->false_block_id());
int true_block = chunk_->LookupDestination(instr->true_block_id());
@@ -1561,69 +1518,16 @@
}
-void LCodeGen::DoCmpSymbolEq(LCmpSymbolEq* instr) {
+void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) {
Register left = ToRegister(instr->InputAt(0));
- Register right = ToRegister(instr->InputAt(1));
- Register result = ToRegister(instr->result());
-
- Label done;
- __ cmp(left, Operand(right));
- __ mov(result, factory()->false_value());
- __ j(not_equal, &done, Label::kNear);
- __ mov(result, factory()->true_value());
- __ bind(&done);
-}
-
-
-void LCodeGen::DoCmpSymbolEqAndBranch(LCmpSymbolEqAndBranch* instr) {
- Register left = ToRegister(instr->InputAt(0));
- Register right = ToRegister(instr->InputAt(1));
- int false_block = chunk_->LookupDestination(instr->false_block_id());
int true_block = chunk_->LookupDestination(instr->true_block_id());
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
- __ cmp(left, Operand(right));
+ __ cmp(left, instr->hydrogen()->right());
EmitBranch(true_block, false_block, equal);
}
-void LCodeGen::DoIsNull(LIsNull* instr) {
- Register reg = ToRegister(instr->InputAt(0));
- Register result = ToRegister(instr->result());
-
- // TODO(fsc): If the expression is known to be a smi, then it's
- // definitely not null. Materialize false.
-
- __ cmp(reg, factory()->null_value());
- if (instr->is_strict()) {
- __ mov(result, factory()->true_value());
- Label done;
- __ j(equal, &done, Label::kNear);
- __ mov(result, factory()->false_value());
- __ bind(&done);
- } else {
- Label true_value, false_value, done;
- __ j(equal, &true_value, Label::kNear);
- __ cmp(reg, factory()->undefined_value());
- __ j(equal, &true_value, Label::kNear);
- __ test(reg, Immediate(kSmiTagMask));
- __ j(zero, &false_value, Label::kNear);
- // Check for undetectable objects by looking in the bit field in
- // the map. The object has already been smi checked.
- Register scratch = result;
- __ mov(scratch, FieldOperand(reg, HeapObject::kMapOffset));
- __ movzx_b(scratch, FieldOperand(scratch, Map::kBitFieldOffset));
- __ test(scratch, Immediate(1 << Map::kIsUndetectable));
- __ j(not_zero, &true_value, Label::kNear);
- __ bind(&false_value);
- __ mov(result, factory()->false_value());
- __ jmp(&done, Label::kNear);
- __ bind(&true_value);
- __ mov(result, factory()->true_value());
- __ bind(&done);
- }
-}
-
-
void LCodeGen::DoIsNullAndBranch(LIsNullAndBranch* instr) {
Register reg = ToRegister(instr->InputAt(0));
@@ -1642,8 +1546,7 @@
__ j(equal, true_label);
__ cmp(reg, factory()->undefined_value());
__ j(equal, true_label);
- __ test(reg, Immediate(kSmiTagMask));
- __ j(zero, false_label);
+ __ JumpIfSmi(reg, false_label);
// Check for undetectable objects by looking in the bit field in
// the map. The object has already been smi checked.
Register scratch = ToRegister(instr->TempAt(0));
@@ -1657,83 +1560,42 @@
Condition LCodeGen::EmitIsObject(Register input,
Register temp1,
- Register temp2,
Label* is_not_object,
Label* is_object) {
- ASSERT(!input.is(temp1));
- ASSERT(!input.is(temp2));
- ASSERT(!temp1.is(temp2));
-
- __ test(input, Immediate(kSmiTagMask));
- __ j(equal, is_not_object);
+ __ JumpIfSmi(input, is_not_object);
__ cmp(input, isolate()->factory()->null_value());
__ j(equal, is_object);
__ mov(temp1, FieldOperand(input, HeapObject::kMapOffset));
// Undetectable objects behave like undefined.
- __ movzx_b(temp2, FieldOperand(temp1, Map::kBitFieldOffset));
- __ test(temp2, Immediate(1 << Map::kIsUndetectable));
+ __ test_b(FieldOperand(temp1, Map::kBitFieldOffset),
+ 1 << Map::kIsUndetectable);
__ j(not_zero, is_not_object);
- __ movzx_b(temp2, FieldOperand(temp1, Map::kInstanceTypeOffset));
- __ cmp(temp2, FIRST_JS_OBJECT_TYPE);
+ __ movzx_b(temp1, FieldOperand(temp1, Map::kInstanceTypeOffset));
+ __ cmp(temp1, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
__ j(below, is_not_object);
- __ cmp(temp2, LAST_JS_OBJECT_TYPE);
+ __ cmp(temp1, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
return below_equal;
}
-void LCodeGen::DoIsObject(LIsObject* instr) {
- Register reg = ToRegister(instr->InputAt(0));
- Register result = ToRegister(instr->result());
- Register temp = ToRegister(instr->TempAt(0));
- Label is_false, is_true, done;
-
- Condition true_cond = EmitIsObject(reg, result, temp, &is_false, &is_true);
- __ j(true_cond, &is_true);
-
- __ bind(&is_false);
- __ mov(result, factory()->false_value());
- __ jmp(&done);
-
- __ bind(&is_true);
- __ mov(result, factory()->true_value());
-
- __ bind(&done);
-}
-
-
void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
Register reg = ToRegister(instr->InputAt(0));
Register temp = ToRegister(instr->TempAt(0));
- Register temp2 = ToRegister(instr->TempAt(1));
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
Label* true_label = chunk_->GetAssemblyLabel(true_block);
Label* false_label = chunk_->GetAssemblyLabel(false_block);
- Condition true_cond = EmitIsObject(reg, temp, temp2, false_label, true_label);
+ Condition true_cond = EmitIsObject(reg, temp, false_label, true_label);
EmitBranch(true_block, false_block, true_cond);
}
-void LCodeGen::DoIsSmi(LIsSmi* instr) {
- Operand input = ToOperand(instr->InputAt(0));
- Register result = ToRegister(instr->result());
-
- ASSERT(instr->hydrogen()->value()->representation().IsTagged());
- __ test(input, Immediate(kSmiTagMask));
- __ mov(result, factory()->true_value());
- Label done;
- __ j(zero, &done, Label::kNear);
- __ mov(result, factory()->false_value());
- __ bind(&done);
-}
-
-
void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
Operand input = ToOperand(instr->InputAt(0));
@@ -1745,27 +1607,6 @@
}
-void LCodeGen::DoIsUndetectable(LIsUndetectable* instr) {
- Register input = ToRegister(instr->InputAt(0));
- Register result = ToRegister(instr->result());
-
- ASSERT(instr->hydrogen()->value()->representation().IsTagged());
- Label false_label, done;
- STATIC_ASSERT(kSmiTag == 0);
- __ test(input, Immediate(kSmiTagMask));
- __ j(zero, &false_label, Label::kNear);
- __ mov(result, FieldOperand(input, HeapObject::kMapOffset));
- __ test_b(FieldOperand(result, Map::kBitFieldOffset),
- 1 << Map::kIsUndetectable);
- __ j(zero, &false_label, Label::kNear);
- __ mov(result, factory()->true_value());
- __ jmp(&done);
- __ bind(&false_label);
- __ mov(result, factory()->false_value());
- __ bind(&done);
-}
-
-
void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
Register input = ToRegister(instr->InputAt(0));
Register temp = ToRegister(instr->TempAt(0));
@@ -1774,8 +1615,7 @@
int false_block = chunk_->LookupDestination(instr->false_block_id());
STATIC_ASSERT(kSmiTag == 0);
- __ test(input, Immediate(kSmiTagMask));
- __ j(zero, chunk_->GetAssemblyLabel(false_block));
+ __ JumpIfSmi(input, chunk_->GetAssemblyLabel(false_block));
__ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
__ test_b(FieldOperand(temp, Map::kBitFieldOffset),
1 << Map::kIsUndetectable);
@@ -1783,7 +1623,7 @@
}
-static InstanceType TestType(HHasInstanceType* instr) {
+static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
InstanceType from = instr->from();
InstanceType to = instr->to();
if (from == FIRST_TYPE) return to;
@@ -1792,7 +1632,7 @@
}
-static Condition BranchCondition(HHasInstanceType* instr) {
+static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
InstanceType from = instr->from();
InstanceType to = instr->to();
if (from == to) return equal;
@@ -1803,25 +1643,6 @@
}
-void LCodeGen::DoHasInstanceType(LHasInstanceType* instr) {
- Register input = ToRegister(instr->InputAt(0));
- Register result = ToRegister(instr->result());
-
- ASSERT(instr->hydrogen()->value()->representation().IsTagged());
- __ test(input, Immediate(kSmiTagMask));
- Label done, is_false;
- __ j(zero, &is_false, Label::kNear);
- __ CmpObjectType(input, TestType(instr->hydrogen()), result);
- __ j(NegateCondition(BranchCondition(instr->hydrogen())),
- &is_false, Label::kNear);
- __ mov(result, factory()->true_value());
- __ jmp(&done, Label::kNear);
- __ bind(&is_false);
- __ mov(result, factory()->false_value());
- __ bind(&done);
-}
-
-
void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
Register input = ToRegister(instr->InputAt(0));
Register temp = ToRegister(instr->TempAt(0));
@@ -1831,8 +1652,7 @@
Label* false_label = chunk_->GetAssemblyLabel(false_block);
- __ test(input, Immediate(kSmiTagMask));
- __ j(zero, false_label);
+ __ JumpIfSmi(input, false_label);
__ CmpObjectType(input, TestType(instr->hydrogen()), temp);
EmitBranch(true_block, false_block, BranchCondition(instr->hydrogen()));
@@ -1852,21 +1672,6 @@
}
-void LCodeGen::DoHasCachedArrayIndex(LHasCachedArrayIndex* instr) {
- Register input = ToRegister(instr->InputAt(0));
- Register result = ToRegister(instr->result());
-
- ASSERT(instr->hydrogen()->value()->representation().IsTagged());
- __ mov(result, factory()->true_value());
- __ test(FieldOperand(input, String::kHashFieldOffset),
- Immediate(String::kContainsCachedArrayIndexMask));
- Label done;
- __ j(zero, &done, Label::kNear);
- __ mov(result, factory()->false_value());
- __ bind(&done);
-}
-
-
void LCodeGen::DoHasCachedArrayIndexAndBranch(
LHasCachedArrayIndexAndBranch* instr) {
Register input = ToRegister(instr->InputAt(0));
@@ -1890,28 +1695,28 @@
Register temp2) {
ASSERT(!input.is(temp));
ASSERT(!temp.is(temp2)); // But input and temp2 may be the same register.
- __ test(input, Immediate(kSmiTagMask));
- __ j(zero, is_false);
- __ CmpObjectType(input, FIRST_JS_OBJECT_TYPE, temp);
+ __ JumpIfSmi(input, is_false);
+ __ CmpObjectType(input, FIRST_SPEC_OBJECT_TYPE, temp);
__ j(below, is_false);
// Map is now in temp.
// Functions have class 'Function'.
- __ CmpInstanceType(temp, JS_FUNCTION_TYPE);
+ __ CmpInstanceType(temp, FIRST_CALLABLE_SPEC_OBJECT_TYPE);
if (class_name->IsEqualTo(CStrVector("Function"))) {
- __ j(equal, is_true);
+ __ j(above_equal, is_true);
} else {
- __ j(equal, is_false);
+ __ j(above_equal, is_false);
}
// Check if the constructor in the map is a function.
__ mov(temp, FieldOperand(temp, Map::kConstructorOffset));
- // As long as JS_FUNCTION_TYPE is the last instance type and it is
- // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
- // LAST_JS_OBJECT_TYPE.
- ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
- ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
+ // As long as LAST_CALLABLE_SPEC_OBJECT_TYPE is the last instance type, and
+ // FIRST_CALLABLE_SPEC_OBJECT_TYPE comes right after
+ // LAST_NONCALLABLE_SPEC_OBJECT_TYPE, we can avoid checking for the latter.
+ STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
+ STATIC_ASSERT(FIRST_CALLABLE_SPEC_OBJECT_TYPE ==
+ LAST_NONCALLABLE_SPEC_OBJECT_TYPE + 1);
// Objects with a non-function constructor have class 'Object'.
__ CmpObjectType(temp, JS_FUNCTION_TYPE, temp2);
@@ -1937,29 +1742,6 @@
}
-void LCodeGen::DoClassOfTest(LClassOfTest* instr) {
- Register input = ToRegister(instr->InputAt(0));
- Register result = ToRegister(instr->result());
- ASSERT(input.is(result));
- Register temp = ToRegister(instr->TempAt(0));
- Handle<String> class_name = instr->hydrogen()->class_name();
- Label done;
- Label is_true, is_false;
-
- EmitClassOfTest(&is_true, &is_false, class_name, input, temp, input);
-
- __ j(not_equal, &is_false, Label::kNear);
-
- __ bind(&is_true);
- __ mov(result, factory()->true_value());
- __ jmp(&done, Label::kNear);
-
- __ bind(&is_false);
- __ mov(result, factory()->false_value());
- __ bind(&done);
-}
-
-
void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
Register input = ToRegister(instr->InputAt(0));
Register temp = ToRegister(instr->TempAt(0));
@@ -1998,7 +1780,7 @@
// Object and function are in fixed registers defined by the stub.
ASSERT(ToRegister(instr->context()).is(esi));
InstanceofStub stub(InstanceofStub::kArgsInRegisters);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
Label true_value, done;
__ test(eax, Operand(eax));
@@ -2011,18 +1793,6 @@
}
-void LCodeGen::DoInstanceOfAndBranch(LInstanceOfAndBranch* instr) {
- ASSERT(ToRegister(instr->context()).is(esi));
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- InstanceofStub stub(InstanceofStub::kArgsInRegisters);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
- __ test(eax, Operand(eax));
- EmitBranch(true_block, false_block, zero);
-}
-
-
void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
class DeferredInstanceOfKnownGlobal: public LDeferredCode {
public:
@@ -2044,12 +1814,11 @@
deferred = new DeferredInstanceOfKnownGlobal(this, instr);
Label done, false_result;
- Register object = ToRegister(instr->InputAt(0));
+ Register object = ToRegister(instr->InputAt(1));
Register temp = ToRegister(instr->TempAt(0));
// A Smi is not an instance of anything.
- __ test(object, Immediate(kSmiTagMask));
- __ j(zero, &false_result);
+ __ JumpIfSmi(object, &false_result);
// This is the inlined call site instanceof cache. The two occurences of the
// hole value will be patched to the last map/result pair generated by the
@@ -2107,14 +1876,13 @@
Register temp = ToRegister(instr->TempAt(0));
ASSERT(MacroAssembler::SafepointRegisterStackIndex(temp) == 0);
__ mov(InstanceofStub::right(), Immediate(instr->function()));
- static const int kAdditionalDelta = 16;
+ static const int kAdditionalDelta = 13;
int delta = masm_->SizeOfCodeGeneratedSince(map_check) + kAdditionalDelta;
__ mov(temp, Immediate(delta));
__ StoreToSafepointRegisterSlot(temp, temp);
CallCodeGeneric(stub.GetCode(),
RelocInfo::CODE_TARGET,
instr,
- RESTORE_CONTEXT,
RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
// Put the result value into the eax slot and restore all registers.
__ StoreToSafepointRegisterSlot(eax, eax);
@@ -2145,7 +1913,7 @@
Token::Value op = instr->op();
Handle<Code> ic = CompareIC::GetUninitialized(op);
- CallCode(ic, RelocInfo::CODE_TARGET, instr, RESTORE_CONTEXT);
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
Condition condition = ComputeCompareCondition(op);
if (op == Token::GT || op == Token::LTE) {
@@ -2162,25 +1930,6 @@
}
-void LCodeGen::DoCmpTAndBranch(LCmpTAndBranch* instr) {
- Token::Value op = instr->op();
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- Handle<Code> ic = CompareIC::GetUninitialized(op);
- CallCode(ic, RelocInfo::CODE_TARGET, instr, RESTORE_CONTEXT);
-
- // The compare stub expects compare condition and the input operands
- // reversed for GT and LTE.
- Condition condition = ComputeCompareCondition(op);
- if (op == Token::GT || op == Token::LTE) {
- condition = ReverseCondition(condition);
- }
- __ test(eax, Operand(eax));
- EmitBranch(true_block, false_block, condition);
-}
-
-
void LCodeGen::DoReturn(LReturn* instr) {
if (FLAG_trace) {
// Preserve the return value on the stack and rely on the runtime call
@@ -2216,7 +1965,7 @@
RelocInfo::Mode mode = instr->for_typeof() ? RelocInfo::CODE_TARGET :
RelocInfo::CODE_TARGET_CONTEXT;
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallCode(ic, mode, instr, CONTEXT_ADJUSTED);
+ CallCode(ic, mode, instr);
}
@@ -2247,7 +1996,7 @@
Handle<Code> ic = instr->strict_mode()
? isolate()->builtins()->StoreIC_Initialize_Strict()
: isolate()->builtins()->StoreIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr, CONTEXT_ADJUSTED);
+ CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
}
@@ -2319,7 +2068,7 @@
ASSERT(instr->hydrogen()->need_generic());
__ mov(ecx, name);
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET, instr, RESTORE_CONTEXT);
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
} else {
Label done;
for (int i = 0; i < map_count - 1; ++i) {
@@ -2341,7 +2090,7 @@
__ bind(&generic);
__ mov(ecx, name);
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET, instr, RESTORE_CONTEXT);
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
} else {
DeoptimizeIf(not_equal, instr->environment());
EmitLoadFieldOrConstantFunction(result, object, map, name);
@@ -2358,7 +2107,7 @@
__ mov(ecx, instr->name());
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -2409,7 +2158,7 @@
Register input = ToRegister(instr->InputAt(0));
__ mov(result, FieldOperand(input, JSObject::kElementsOffset));
if (FLAG_debug_code) {
- Label done;
+ Label done, ok, fail;
__ cmp(FieldOperand(result, HeapObject::kMapOffset),
Immediate(factory()->fixed_array_map()));
__ j(equal, &done, Label::kNear);
@@ -2419,11 +2168,19 @@
Register temp((result.is(eax)) ? ebx : eax);
__ push(temp);
__ mov(temp, FieldOperand(result, HeapObject::kMapOffset));
- __ movzx_b(temp, FieldOperand(temp, Map::kInstanceTypeOffset));
- __ sub(Operand(temp), Immediate(FIRST_EXTERNAL_ARRAY_TYPE));
- __ cmp(Operand(temp), Immediate(kExternalArrayTypeCount));
+ __ movzx_b(temp, FieldOperand(temp, Map::kBitField2Offset));
+ __ and_(temp, Map::kElementsKindMask);
+ __ shr(temp, Map::kElementsKindShift);
+ __ cmp(temp, JSObject::FAST_ELEMENTS);
+ __ j(equal, &ok, Label::kNear);
+ __ cmp(temp, JSObject::FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND);
+ __ j(less, &fail, Label::kNear);
+ __ cmp(temp, JSObject::LAST_EXTERNAL_ARRAY_ELEMENTS_KIND);
+ __ j(less_equal, &ok, Label::kNear);
+ __ bind(&fail);
+ __ Abort("Check for fast or external elements failed.");
+ __ bind(&ok);
__ pop(temp);
- __ Check(below, "Check for fast elements or pixel array failed.");
__ bind(&done);
}
}
@@ -2473,55 +2230,80 @@
}
-Operand LCodeGen::BuildExternalArrayOperand(LOperand* external_pointer,
- LOperand* key,
- ExternalArrayType array_type) {
+void LCodeGen::DoLoadKeyedFastDoubleElement(
+ LLoadKeyedFastDoubleElement* instr) {
+ XMMRegister result = ToDoubleRegister(instr->result());
+
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag +
+ sizeof(kHoleNanLower32);
+ Operand hole_check_operand = BuildFastArrayOperand(
+ instr->elements(), instr->key(),
+ JSObject::FAST_DOUBLE_ELEMENTS,
+ offset);
+ __ cmp(hole_check_operand, Immediate(kHoleNanUpper32));
+ DeoptimizeIf(equal, instr->environment());
+ }
+
+ Operand double_load_operand = BuildFastArrayOperand(
+ instr->elements(), instr->key(), JSObject::FAST_DOUBLE_ELEMENTS,
+ FixedDoubleArray::kHeaderSize - kHeapObjectTag);
+ __ movdbl(result, double_load_operand);
+}
+
+
+Operand LCodeGen::BuildFastArrayOperand(
+ LOperand* external_pointer,
+ LOperand* key,
+ JSObject::ElementsKind elements_kind,
+ uint32_t offset) {
Register external_pointer_reg = ToRegister(external_pointer);
- int shift_size = ExternalArrayTypeToShiftSize(array_type);
+ int shift_size = ElementsKindToShiftSize(elements_kind);
if (key->IsConstantOperand()) {
int constant_value = ToInteger32(LConstantOperand::cast(key));
if (constant_value & 0xF0000000) {
Abort("array index constant value too big");
}
- return Operand(external_pointer_reg, constant_value * (1 << shift_size));
+ return Operand(external_pointer_reg,
+ constant_value * (1 << shift_size) + offset);
} else {
ScaleFactor scale_factor = static_cast<ScaleFactor>(shift_size);
- return Operand(external_pointer_reg, ToRegister(key), scale_factor, 0);
+ return Operand(external_pointer_reg, ToRegister(key), scale_factor, offset);
}
}
void LCodeGen::DoLoadKeyedSpecializedArrayElement(
LLoadKeyedSpecializedArrayElement* instr) {
- ExternalArrayType array_type = instr->array_type();
- Operand operand(BuildExternalArrayOperand(instr->external_pointer(),
- instr->key(), array_type));
- if (array_type == kExternalFloatArray) {
+ JSObject::ElementsKind elements_kind = instr->elements_kind();
+ Operand operand(BuildFastArrayOperand(instr->external_pointer(),
+ instr->key(), elements_kind, 0));
+ if (elements_kind == JSObject::EXTERNAL_FLOAT_ELEMENTS) {
XMMRegister result(ToDoubleRegister(instr->result()));
__ movss(result, operand);
__ cvtss2sd(result, result);
- } else if (array_type == kExternalDoubleArray) {
+ } else if (elements_kind == JSObject::EXTERNAL_DOUBLE_ELEMENTS) {
__ movdbl(ToDoubleRegister(instr->result()), operand);
} else {
Register result(ToRegister(instr->result()));
- switch (array_type) {
- case kExternalByteArray:
+ switch (elements_kind) {
+ case JSObject::EXTERNAL_BYTE_ELEMENTS:
__ movsx_b(result, operand);
break;
- case kExternalUnsignedByteArray:
- case kExternalPixelArray:
+ case JSObject::EXTERNAL_PIXEL_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
__ movzx_b(result, operand);
break;
- case kExternalShortArray:
+ case JSObject::EXTERNAL_SHORT_ELEMENTS:
__ movsx_w(result, operand);
break;
- case kExternalUnsignedShortArray:
+ case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
__ movzx_w(result, operand);
break;
- case kExternalIntArray:
+ case JSObject::EXTERNAL_INT_ELEMENTS:
__ mov(result, operand);
break;
- case kExternalUnsignedIntArray:
+ case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS:
__ mov(result, operand);
__ test(result, Operand(result));
// TODO(danno): we could be more clever here, perhaps having a special
@@ -2529,8 +2311,12 @@
// happens, and generate code that returns a double rather than int.
DeoptimizeIf(negative, instr->environment());
break;
- case kExternalFloatArray:
- case kExternalDoubleArray:
+ case JSObject::EXTERNAL_FLOAT_ELEMENTS:
+ case JSObject::EXTERNAL_DOUBLE_ELEMENTS:
+ case JSObject::FAST_ELEMENTS:
+ case JSObject::FAST_DOUBLE_ELEMENTS:
+ case JSObject::DICTIONARY_ELEMENTS:
+ case JSObject::NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
@@ -2544,7 +2330,7 @@
ASSERT(ToRegister(instr->key()).is(eax));
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -2605,12 +2391,25 @@
ASSERT(function.is(edi)); // Required by InvokeFunction.
ASSERT(ToRegister(instr->result()).is(eax));
- // TODO(1412): This is not correct if the called function is a
- // strict mode function or a native.
- //
- // If the receiver is null or undefined, we have to pass the global object
- // as a receiver.
+ // If the receiver is null or undefined, we have to pass the global
+ // object as a receiver to normal functions. Values have to be
+ // passed unchanged to builtins and strict-mode functions.
Label global_object, receiver_ok;
+
+ // Do not transform the receiver to object for strict mode
+ // functions.
+ __ mov(scratch,
+ FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
+ __ test_b(FieldOperand(scratch, SharedFunctionInfo::kStrictModeByteOffset),
+ 1 << SharedFunctionInfo::kStrictModeBitWithinByte);
+ __ j(not_equal, &receiver_ok, Label::kNear);
+
+ // Do not transform the receiver to object for builtins.
+ __ test_b(FieldOperand(scratch, SharedFunctionInfo::kNativeByteOffset),
+ 1 << SharedFunctionInfo::kNativeBitWithinByte);
+ __ j(not_equal, &receiver_ok, Label::kNear);
+
+ // Normal function. Replace undefined or null with global receiver.
__ cmp(receiver, factory()->null_value());
__ j(equal, &global_object, Label::kNear);
__ cmp(receiver, factory()->undefined_value());
@@ -2619,7 +2418,7 @@
// The receiver should be a JS object.
__ test(receiver, Immediate(kSmiTagMask));
DeoptimizeIf(equal, instr->environment());
- __ CmpObjectType(receiver, FIRST_JS_OBJECT_TYPE, scratch);
+ __ CmpObjectType(receiver, FIRST_SPEC_OBJECT_TYPE, scratch);
DeoptimizeIf(below, instr->environment());
__ jmp(&receiver_ok, Label::kNear);
@@ -2679,6 +2478,12 @@
}
+void LCodeGen::DoThisFunction(LThisFunction* instr) {
+ Register result = ToRegister(instr->result());
+ __ mov(result, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+}
+
+
void LCodeGen::DoContext(LContext* instr) {
Register result = ToRegister(instr->result());
__ mov(result, Operand(ebp, StandardFrameConstants::kContextOffset));
@@ -2688,8 +2493,8 @@
void LCodeGen::DoOuterContext(LOuterContext* instr) {
Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
- __ mov(result, Operand(context, Context::SlotOffset(Context::CLOSURE_INDEX)));
- __ mov(result, FieldOperand(result, JSFunction::kContextOffset));
+ __ mov(result,
+ Operand(context, Context::SlotOffset(Context::PREVIOUS_INDEX)));
}
@@ -2755,7 +2560,7 @@
void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
- Register input_reg = ToRegister(instr->InputAt(0));
+ Register input_reg = ToRegister(instr->value());
__ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
factory()->heap_number_map());
DeoptimizeIf(not_equal, instr->environment());
@@ -2786,7 +2591,8 @@
// Slow case: Call the runtime system to do the number allocation.
__ bind(&slow);
- CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
+ CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0,
+ instr, instr->context());
// Set the pointer to the new heap number in tmp.
if (!tmp.is(eax)) __ mov(tmp, eax);
@@ -2807,7 +2613,7 @@
void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) {
- Register input_reg = ToRegister(instr->InputAt(0));
+ Register input_reg = ToRegister(instr->value());
__ test(input_reg, Operand(input_reg));
Label is_positive;
__ j(not_sign, &is_positive);
@@ -2832,12 +2638,12 @@
LUnaryMathOperation* instr_;
};
- ASSERT(instr->InputAt(0)->Equals(instr->result()));
+ ASSERT(instr->value()->Equals(instr->result()));
Representation r = instr->hydrogen()->value()->representation();
if (r.IsDouble()) {
XMMRegister scratch = xmm0;
- XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
+ XMMRegister input_reg = ToDoubleRegister(instr->value());
__ xorps(scratch, scratch);
__ subsd(scratch, input_reg);
__ pand(input_reg, scratch);
@@ -2846,10 +2652,9 @@
} else { // Tagged case.
DeferredMathAbsTaggedHeapNumber* deferred =
new DeferredMathAbsTaggedHeapNumber(this, instr);
- Register input_reg = ToRegister(instr->InputAt(0));
+ Register input_reg = ToRegister(instr->value());
// Smi check.
- __ test(input_reg, Immediate(kSmiTagMask));
- __ j(not_zero, deferred->entry());
+ __ JumpIfNotSmi(input_reg, deferred->entry());
EmitIntegerMathAbs(instr);
__ bind(deferred->exit());
}
@@ -2859,7 +2664,7 @@
void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
XMMRegister xmm_scratch = xmm0;
Register output_reg = ToRegister(instr->result());
- XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
+ XMMRegister input_reg = ToDoubleRegister(instr->value());
__ xorps(xmm_scratch, xmm_scratch); // Zero the register.
__ ucomisd(input_reg, xmm_scratch);
@@ -2881,7 +2686,7 @@
void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
XMMRegister xmm_scratch = xmm0;
Register output_reg = ToRegister(instr->result());
- XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
+ XMMRegister input_reg = ToDoubleRegister(instr->value());
Label below_half, done;
// xmm_scratch = 0.5
@@ -2926,7 +2731,7 @@
void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
- XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
+ XMMRegister input_reg = ToDoubleRegister(instr->value());
ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
__ sqrtsd(input_reg, input_reg);
}
@@ -2934,7 +2739,7 @@
void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
XMMRegister xmm_scratch = xmm0;
- XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
+ XMMRegister input_reg = ToDoubleRegister(instr->value());
ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
__ xorps(xmm_scratch, xmm_scratch);
__ addsd(input_reg, xmm_scratch); // Convert -0 to +0.
@@ -2971,8 +2776,7 @@
Register right_reg = ToRegister(right);
Label non_smi, call;
- __ test(right_reg, Immediate(kSmiTagMask));
- __ j(not_zero, &non_smi);
+ __ JumpIfNotSmi(right_reg, &non_smi);
__ SmiUntag(right_reg);
__ cvtsi2sd(result_reg, Operand(right_reg));
__ jmp(&call);
@@ -3003,14 +2807,15 @@
void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
- ASSERT(instr->InputAt(0)->Equals(instr->result()));
- XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
+ ASSERT(instr->value()->Equals(instr->result()));
+ XMMRegister input_reg = ToDoubleRegister(instr->value());
Label positive, done, zero;
__ xorps(xmm0, xmm0);
__ ucomisd(input_reg, xmm0);
__ j(above, &positive, Label::kNear);
__ j(equal, &zero, Label::kNear);
- ExternalReference nan = ExternalReference::address_of_nan();
+ ExternalReference nan =
+ ExternalReference::address_of_canonical_non_hole_nan();
__ movdbl(input_reg, Operand::StaticVariable(nan));
__ jmp(&done, Label::kNear);
__ bind(&zero);
@@ -3036,7 +2841,7 @@
ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
TranscendentalCacheStub stub(TranscendentalCache::COS,
TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, RESTORE_CONTEXT);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
@@ -3044,7 +2849,7 @@
ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
TranscendentalCacheStub stub(TranscendentalCache::SIN,
TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, RESTORE_CONTEXT);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
@@ -3104,7 +2909,7 @@
int arity = instr->arity();
Handle<Code> ic = isolate()->stub_cache()->
ComputeKeyedCallInitialize(arity, NOT_IN_LOOP);
- CallCode(ic, RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -3117,7 +2922,7 @@
Handle<Code> ic =
isolate()->stub_cache()->ComputeCallInitialize(arity, NOT_IN_LOOP, mode);
__ mov(ecx, instr->name());
- CallCode(ic, mode, instr, CONTEXT_ADJUSTED);
+ CallCode(ic, mode, instr);
}
@@ -3127,7 +2932,7 @@
int arity = instr->arity();
CallFunctionStub stub(arity, NOT_IN_LOOP, RECEIVER_MIGHT_BE_IMPLICIT);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
__ Drop(1);
}
@@ -3141,7 +2946,7 @@
Handle<Code> ic =
isolate()->stub_cache()->ComputeCallInitialize(arity, NOT_IN_LOOP, mode);
__ mov(ecx, instr->name());
- CallCode(ic, mode, instr, CONTEXT_ADJUSTED);
+ CallCode(ic, mode, instr);
}
@@ -3159,12 +2964,12 @@
Handle<Code> builtin = isolate()->builtins()->JSConstructCall();
__ Set(eax, Immediate(instr->arity()));
- CallCode(builtin, RelocInfo::CONSTRUCT_CALL, instr, CONTEXT_ADJUSTED);
+ CallCode(builtin, RelocInfo::CONSTRUCT_CALL, instr);
}
void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
- CallRuntime(instr->function(), instr->arity(), instr, RESTORE_CONTEXT);
+ CallRuntime(instr->function(), instr->arity(), instr);
}
@@ -3207,7 +3012,7 @@
Handle<Code> ic = instr->strict_mode()
? isolate()->builtins()->StoreIC_Initialize_Strict()
: isolate()->builtins()->StoreIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -3219,32 +3024,36 @@
void LCodeGen::DoStoreKeyedSpecializedArrayElement(
LStoreKeyedSpecializedArrayElement* instr) {
- ExternalArrayType array_type = instr->array_type();
- Operand operand(BuildExternalArrayOperand(instr->external_pointer(),
- instr->key(), array_type));
- if (array_type == kExternalFloatArray) {
+ JSObject::ElementsKind elements_kind = instr->elements_kind();
+ Operand operand(BuildFastArrayOperand(instr->external_pointer(),
+ instr->key(), elements_kind, 0));
+ if (elements_kind == JSObject::EXTERNAL_FLOAT_ELEMENTS) {
__ cvtsd2ss(xmm0, ToDoubleRegister(instr->value()));
__ movss(operand, xmm0);
- } else if (array_type == kExternalDoubleArray) {
+ } else if (elements_kind == JSObject::EXTERNAL_DOUBLE_ELEMENTS) {
__ movdbl(operand, ToDoubleRegister(instr->value()));
} else {
Register value = ToRegister(instr->value());
- switch (array_type) {
- case kExternalPixelArray:
- case kExternalByteArray:
- case kExternalUnsignedByteArray:
+ switch (elements_kind) {
+ case JSObject::EXTERNAL_PIXEL_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+ case JSObject::EXTERNAL_BYTE_ELEMENTS:
__ mov_b(operand, value);
break;
- case kExternalShortArray:
- case kExternalUnsignedShortArray:
+ case JSObject::EXTERNAL_SHORT_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
__ mov_w(operand, value);
break;
- case kExternalIntArray:
- case kExternalUnsignedIntArray:
+ case JSObject::EXTERNAL_INT_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS:
__ mov(operand, value);
break;
- case kExternalFloatArray:
- case kExternalDoubleArray:
+ case JSObject::EXTERNAL_FLOAT_ELEMENTS:
+ case JSObject::EXTERNAL_DOUBLE_ELEMENTS:
+ case JSObject::FAST_ELEMENTS:
+ case JSObject::FAST_DOUBLE_ELEMENTS:
+ case JSObject::DICTIONARY_ELEMENTS:
+ case JSObject::NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
@@ -3284,6 +3093,26 @@
}
+void LCodeGen::DoStoreKeyedFastDoubleElement(
+ LStoreKeyedFastDoubleElement* instr) {
+ XMMRegister value = ToDoubleRegister(instr->value());
+ Label have_value;
+
+ __ ucomisd(value, value);
+ __ j(parity_odd, &have_value); // NaN.
+
+ ExternalReference canonical_nan_reference =
+ ExternalReference::address_of_canonical_non_hole_nan();
+ __ movdbl(value, Operand::StaticVariable(canonical_nan_reference));
+ __ bind(&have_value);
+
+ Operand double_store_operand = BuildFastArrayOperand(
+ instr->elements(), instr->key(), JSObject::FAST_DOUBLE_ELEMENTS,
+ FixedDoubleArray::kHeaderSize - kHeapObjectTag);
+ __ movdbl(double_store_operand, value);
+}
+
+
void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
ASSERT(ToRegister(instr->context()).is(esi));
ASSERT(ToRegister(instr->object()).is(edx));
@@ -3293,7 +3122,7 @@
Handle<Code> ic = instr->strict_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
: isolate()->builtins()->KeyedStoreIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -3424,7 +3253,8 @@
__ SmiTag(index);
__ push(index);
}
- CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2, instr);
+ CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2,
+ instr, instr->context());
if (FLAG_debug_code) {
__ AbortIfNotSmi(eax);
}
@@ -3475,7 +3305,7 @@
PushSafepointRegistersScope scope(this);
__ SmiTag(char_code);
__ push(char_code);
- CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr);
+ CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
__ StoreToSafepointRegisterSlot(result, eax);
}
@@ -3499,7 +3329,7 @@
__ push(ToOperand(instr->right()));
}
StringAddStub stub(NO_STRING_CHECK_IN_STUB);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, RESTORE_CONTEXT);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
@@ -3560,8 +3390,15 @@
// register is stored, as this register is in the pointer map, but contains an
// integer value.
__ StoreToSafepointRegisterSlot(reg, Immediate(0));
-
- CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
+ // NumberTagI and NumberTagD use the context from the frame, rather than
+ // the environment's HContext or HInlinedContext value.
+ // They only call Runtime::kAllocateHeapNumber.
+ // The corresponding HChange instructions are added in a phase that does
+ // not have easy access to the local context.
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
if (!reg.is(eax)) __ mov(reg, eax);
// Done. Put the value in xmm0 into the value of the allocated heap
@@ -3605,7 +3442,15 @@
__ Set(reg, Immediate(0));
PushSafepointRegistersScope scope(this);
- CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
+ // NumberTagI and NumberTagD use the context from the frame, rather than
+ // the environment's HContext or HInlinedContext value.
+ // They only call Runtime::kAllocateHeapNumber.
+ // The corresponding HChange instructions are added in a phase that does
+ // not have easy access to the local context.
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
+ RecordSafepointWithRegisters(instr->pointer_map(), 0,
+ Safepoint::kNoDeoptimizationIndex);
__ StoreToSafepointRegisterSlot(reg, eax);
}
@@ -3636,8 +3481,7 @@
Label load_smi, done;
// Smi check.
- __ test(input_reg, Immediate(kSmiTagMask));
- __ j(zero, &load_smi, Label::kNear);
+ __ JumpIfSmi(input_reg, &load_smi, Label::kNear);
// Heap number map check.
__ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
@@ -3652,7 +3496,8 @@
DeoptimizeIf(not_equal, env);
// Convert undefined to NaN.
- ExternalReference nan = ExternalReference::address_of_nan();
+ ExternalReference nan =
+ ExternalReference::address_of_canonical_non_hole_nan();
__ movdbl(result_reg, Operand::StaticVariable(nan));
__ jmp(&done, Label::kNear);
@@ -3771,8 +3616,7 @@
DeferredTaggedToI* deferred = new DeferredTaggedToI(this, instr);
// Smi check.
- __ test(input_reg, Immediate(kSmiTagMask));
- __ j(not_zero, deferred->entry());
+ __ JumpIfNotSmi(input_reg, deferred->entry());
// Smi to int32 conversion
__ SmiUntag(input_reg); // Untag smi.
@@ -3915,14 +3759,14 @@
void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
LOperand* input = instr->InputAt(0);
- __ test(ToRegister(input), Immediate(kSmiTagMask));
+ __ test(ToOperand(input), Immediate(kSmiTagMask));
DeoptimizeIf(not_zero, instr->environment());
}
void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
LOperand* input = instr->InputAt(0);
- __ test(ToRegister(input), Immediate(kSmiTagMask));
+ __ test(ToOperand(input), Immediate(kSmiTagMask));
DeoptimizeIf(zero, instr->environment());
}
@@ -3974,8 +3818,8 @@
void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
ASSERT(instr->InputAt(0)->IsRegister());
- Register reg = ToRegister(instr->InputAt(0));
- __ cmp(reg, instr->hydrogen()->target());
+ Operand operand = ToOperand(instr->InputAt(0));
+ __ cmp(operand, instr->hydrogen()->target());
DeoptimizeIf(not_equal, instr->environment());
}
@@ -4077,6 +3921,7 @@
void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
+ ASSERT(ToRegister(instr->context()).is(esi));
// Setup the parameters to the stub/runtime call.
__ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
__ push(FieldOperand(eax, JSFunction::kLiteralsOffset));
@@ -4090,16 +3935,16 @@
FastCloneShallowArrayStub::Mode mode =
FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS;
FastCloneShallowArrayStub stub(mode, length);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, RESTORE_CONTEXT);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
} else if (instr->hydrogen()->depth() > 1) {
- CallRuntime(Runtime::kCreateArrayLiteral, 3, instr, RESTORE_CONTEXT);
+ CallRuntime(Runtime::kCreateArrayLiteral, 3, instr);
} else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
- CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr, RESTORE_CONTEXT);
+ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr);
} else {
FastCloneShallowArrayStub::Mode mode =
FastCloneShallowArrayStub::CLONE_ELEMENTS;
FastCloneShallowArrayStub stub(mode, length);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, RESTORE_CONTEXT);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
}
@@ -4121,12 +3966,9 @@
// Pick the right runtime function to call.
if (instr->hydrogen()->depth() > 1) {
- CallRuntime(Runtime::kCreateObjectLiteral, 4, instr, CONTEXT_ADJUSTED);
+ CallRuntime(Runtime::kCreateObjectLiteral, 4, instr);
} else {
- CallRuntime(Runtime::kCreateObjectLiteralShallow,
- 4,
- instr,
- CONTEXT_ADJUSTED);
+ CallRuntime(Runtime::kCreateObjectLiteralShallow, 4, instr);
}
}
@@ -4134,17 +3976,19 @@
void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
ASSERT(ToRegister(instr->InputAt(0)).is(eax));
__ push(eax);
- CallRuntime(Runtime::kToFastProperties, 1, instr, CONTEXT_ADJUSTED);
+ CallRuntime(Runtime::kToFastProperties, 1, instr);
}
void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
+ ASSERT(ToRegister(instr->context()).is(esi));
Label materialized;
// Registers will be used as follows:
// edi = JS function.
// ecx = literals array.
// ebx = regexp literal.
// eax = regexp literal clone.
+ // esi = context.
__ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
__ mov(ecx, FieldOperand(edi, JSFunction::kLiteralsOffset));
int literal_offset = FixedArray::kHeaderSize +
@@ -4159,7 +4003,7 @@
__ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
__ push(Immediate(instr->hydrogen()->pattern()));
__ push(Immediate(instr->hydrogen()->flags()));
- CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr, RESTORE_CONTEXT);
+ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
__ mov(ebx, eax);
__ bind(&materialized);
@@ -4171,7 +4015,7 @@
__ bind(&runtime_allocate);
__ push(ebx);
__ push(Immediate(Smi::FromInt(size)));
- CallRuntime(Runtime::kAllocateInNewSpace, 1, instr, RESTORE_CONTEXT);
+ CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
__ pop(ebx);
__ bind(&allocated);
@@ -4191,6 +4035,7 @@
void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
+ ASSERT(ToRegister(instr->context()).is(esi));
// Use the fast case closure allocation code that allocates in new
// space for nested functions that don't need literals cloning.
Handle<SharedFunctionInfo> shared_info = instr->shared_info();
@@ -4199,49 +4044,26 @@
FastNewClosureStub stub(
shared_info->strict_mode() ? kStrictMode : kNonStrictMode);
__ push(Immediate(shared_info));
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, RESTORE_CONTEXT);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
} else {
__ push(Operand(ebp, StandardFrameConstants::kContextOffset));
__ push(Immediate(shared_info));
__ push(Immediate(pretenure
? factory()->true_value()
: factory()->false_value()));
- CallRuntime(Runtime::kNewClosure, 3, instr, RESTORE_CONTEXT);
+ CallRuntime(Runtime::kNewClosure, 3, instr);
}
}
void LCodeGen::DoTypeof(LTypeof* instr) {
- LOperand* input = instr->InputAt(0);
+ LOperand* input = instr->InputAt(1);
if (input->IsConstantOperand()) {
__ push(ToImmediate(input));
} else {
__ push(ToOperand(input));
}
- CallRuntime(Runtime::kTypeof, 1, instr, RESTORE_CONTEXT);
-}
-
-
-void LCodeGen::DoTypeofIs(LTypeofIs* instr) {
- Register input = ToRegister(instr->InputAt(0));
- Register result = ToRegister(instr->result());
- Label true_label;
- Label false_label;
- Label done;
-
- Condition final_branch_condition = EmitTypeofIs(&true_label,
- &false_label,
- input,
- instr->type_literal());
- __ j(final_branch_condition, &true_label, Label::kNear);
- __ bind(&false_label);
- __ mov(result, factory()->false_value());
- __ jmp(&done, Label::kNear);
-
- __ bind(&true_label);
- __ mov(result, factory()->true_value());
-
- __ bind(&done);
+ CallRuntime(Runtime::kTypeof, 1, instr);
}
@@ -4297,22 +4119,19 @@
final_branch_condition = not_zero;
} else if (type_name->Equals(heap()->function_symbol())) {
+ STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
__ JumpIfSmi(input, false_label);
- __ CmpObjectType(input, JS_FUNCTION_TYPE, input);
- __ j(equal, true_label);
- // Regular expressions => 'function' (they are callable).
- __ CmpInstanceType(input, JS_REGEXP_TYPE);
- final_branch_condition = equal;
+ __ CmpObjectType(input, FIRST_CALLABLE_SPEC_OBJECT_TYPE, input);
+ final_branch_condition = above_equal;
} else if (type_name->Equals(heap()->object_symbol())) {
__ JumpIfSmi(input, false_label);
__ cmp(input, factory()->null_value());
__ j(equal, true_label);
- // Regular expressions => 'function', not 'object'.
- __ CmpObjectType(input, FIRST_JS_OBJECT_TYPE, input);
+ __ CmpObjectType(input, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, input);
__ j(below, false_label);
- __ CmpInstanceType(input, FIRST_FUNCTION_CLASS_TYPE);
- __ j(above_equal, false_label);
+ __ CmpInstanceType(input, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
+ __ j(above, false_label);
// Check for undetectable objects => false.
__ test_b(FieldOperand(input, Map::kBitFieldOffset),
1 << Map::kIsUndetectable);
@@ -4328,24 +4147,6 @@
}
-void LCodeGen::DoIsConstructCall(LIsConstructCall* instr) {
- Register result = ToRegister(instr->result());
- Label true_label;
- Label done;
-
- EmitIsConstructCall(result);
- __ j(equal, &true_label, Label::kNear);
-
- __ mov(result, factory()->false_value());
- __ jmp(&done, Label::kNear);
-
- __ bind(&true_label);
- __ mov(result, factory()->true_value());
-
- __ bind(&done);
-}
-
-
void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
Register temp = ToRegister(instr->TempAt(0));
int true_block = chunk_->LookupDestination(instr->true_block_id());
@@ -4405,23 +4206,61 @@
SafepointGenerator safepoint_generator(this,
pointers,
env->deoptimization_index());
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
__ push(Immediate(Smi::FromInt(strict_mode_flag())));
__ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, safepoint_generator);
}
-void LCodeGen::DoStackCheck(LStackCheck* instr) {
- // Perform stack overflow check.
- Label done;
- ExternalReference stack_limit =
- ExternalReference::address_of_stack_limit(isolate());
- __ cmp(esp, Operand::StaticVariable(stack_limit));
- __ j(above_equal, &done, Label::kNear);
+void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
+ {
+ PushSafepointRegistersScope scope(this);
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
+ RegisterLazyDeoptimization(
+ instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+ }
- StackCheckStub stub;
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, RESTORE_CONTEXT);
- __ bind(&done);
+ // The gap code includes the restoring of the safepoint registers.
+ int pc = masm()->pc_offset();
+ safepoints_.SetPcAfterGap(pc);
+}
+
+
+void LCodeGen::DoStackCheck(LStackCheck* instr) {
+ class DeferredStackCheck: public LDeferredCode {
+ public:
+ DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
+ private:
+ LStackCheck* instr_;
+ };
+
+ if (instr->hydrogen()->is_function_entry()) {
+ // Perform stack overflow check.
+ Label done;
+ ExternalReference stack_limit =
+ ExternalReference::address_of_stack_limit(isolate());
+ __ cmp(esp, Operand::StaticVariable(stack_limit));
+ __ j(above_equal, &done, Label::kNear);
+
+ ASSERT(instr->context()->IsRegister());
+ ASSERT(ToRegister(instr->context()).is(esi));
+ StackCheckStub stub;
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ __ bind(&done);
+ } else {
+ ASSERT(instr->hydrogen()->is_backwards_branch());
+ // Perform stack overflow check if this goto needs it before jumping.
+ DeferredStackCheck* deferred_stack_check =
+ new DeferredStackCheck(this, instr);
+ ExternalReference stack_limit =
+ ExternalReference::address_of_stack_limit(isolate());
+ __ cmp(esp, Operand::StaticVariable(stack_limit));
+ __ j(below, deferred_stack_check->entry());
+ __ bind(instr->done_label());
+ deferred_stack_check->SetExit(instr->done_label());
+ }
}
@@ -4466,7 +4305,6 @@
SafepointGenerator safepoint_generator(this,
pointers,
env->deoptimization_index());
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
__ InvokeBuiltin(Builtins::IN, CALL_FUNCTION, safepoint_generator);
}
diff --git a/src/ia32/lithium-codegen-ia32.h b/src/ia32/lithium-codegen-ia32.h
index 1a98d8d..c568bef 100644
--- a/src/ia32/lithium-codegen-ia32.h
+++ b/src/ia32/lithium-codegen-ia32.h
@@ -97,7 +97,7 @@
void DoDeferredNumberTagI(LNumberTagI* instr);
void DoDeferredTaggedToI(LTaggedToI* instr);
void DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr);
- void DoDeferredStackCheck(LGoto* instr);
+ void DoDeferredStackCheck(LStackCheck* instr);
void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
void DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
@@ -166,11 +166,6 @@
bool GenerateRelocPadding();
bool GenerateSafepointTable();
- enum ContextMode {
- RESTORE_CONTEXT,
- CONTEXT_ADJUSTED
- };
-
enum SafepointMode {
RECORD_SIMPLE_SAFEPOINT,
RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS
@@ -178,31 +173,28 @@
void CallCode(Handle<Code> code,
RelocInfo::Mode mode,
- LInstruction* instr,
- ContextMode context_mode);
+ LInstruction* instr);
void CallCodeGeneric(Handle<Code> code,
RelocInfo::Mode mode,
LInstruction* instr,
- ContextMode context_mode,
SafepointMode safepoint_mode);
void CallRuntime(const Runtime::Function* fun,
int argc,
- LInstruction* instr,
- ContextMode context_mode);
+ LInstruction* instr);
void CallRuntime(Runtime::FunctionId id,
int argc,
- LInstruction* instr,
- ContextMode context_mode) {
+ LInstruction* instr) {
const Runtime::Function* function = Runtime::FunctionForId(id);
- CallRuntime(function, argc, instr, context_mode);
+ CallRuntime(function, argc, instr);
}
void CallRuntimeFromDeferred(Runtime::FunctionId id,
int argc,
- LInstruction* instr);
+ LInstruction* instr,
+ LOperand* context);
// Generate a direct call to a known function. Expects the function
// to be in edi.
@@ -230,9 +222,10 @@
Register ToRegister(int index) const;
XMMRegister ToDoubleRegister(int index) const;
int ToInteger32(LConstantOperand* op) const;
- Operand BuildExternalArrayOperand(LOperand* external_pointer,
- LOperand* key,
- ExternalArrayType array_type);
+ Operand BuildFastArrayOperand(LOperand* external_pointer,
+ LOperand* key,
+ JSObject::ElementsKind elements_kind,
+ uint32_t offset);
// Specific math operations - used from DoUnaryMathOperation.
void EmitIntegerMathAbs(LUnaryMathOperation* instr);
@@ -256,9 +249,12 @@
int arguments,
int deoptimization_index);
void RecordPosition(int position);
+ int LastSafepointEnd() {
+ return static_cast<int>(safepoints_.GetPcAfterGap());
+ }
static Condition TokenToCondition(Token::Value op, bool is_unsigned);
- void EmitGoto(int block, LDeferredCode* deferred_stack_check = NULL);
+ void EmitGoto(int block);
void EmitBranch(int left_block, int right_block, Condition cc);
void EmitCmpI(LOperand* left, LOperand* right);
void EmitNumberUntagD(Register input,
@@ -277,7 +273,6 @@
// true and false label should be made, to optimize fallthrough.
Condition EmitIsObject(Register input,
Register temp1,
- Register temp2,
Label* is_not_object,
Label* is_object);
diff --git a/src/ia32/lithium-gap-resolver-ia32.cc b/src/ia32/lithium-gap-resolver-ia32.cc
index 9d91c61..fcf1f91 100644
--- a/src/ia32/lithium-gap-resolver-ia32.cc
+++ b/src/ia32/lithium-gap-resolver-ia32.cc
@@ -305,8 +305,13 @@
} else if (source->IsConstantOperand()) {
ASSERT(destination->IsRegister() || destination->IsStackSlot());
Immediate src = cgen_->ToImmediate(source);
- Operand dst = cgen_->ToOperand(destination);
- __ mov(dst, src);
+ if (destination->IsRegister()) {
+ Register dst = cgen_->ToRegister(destination);
+ __ Set(dst, src);
+ } else {
+ Operand dst = cgen_->ToOperand(destination);
+ __ Set(dst, src);
+ }
} else if (source->IsDoubleRegister()) {
XMMRegister src = cgen_->ToDoubleRegister(source);
diff --git a/src/ia32/lithium-ia32.cc b/src/ia32/lithium-ia32.cc
index 91606ce..f0615ef 100644
--- a/src/ia32/lithium-ia32.cc
+++ b/src/ia32/lithium-ia32.cc
@@ -78,13 +78,13 @@
ASSERT(Output() == NULL ||
LUnallocated::cast(Output())->HasFixedPolicy() ||
!LUnallocated::cast(Output())->HasRegisterPolicy());
- for (UseIterator it(this); it.HasNext(); it.Advance()) {
- LUnallocated* operand = LUnallocated::cast(it.Next());
+ for (UseIterator it(this); !it.Done(); it.Advance()) {
+ LUnallocated* operand = LUnallocated::cast(it.Current());
ASSERT(operand->HasFixedPolicy() ||
operand->IsUsedAtStart());
}
- for (TempIterator it(this); it.HasNext(); it.Advance()) {
- LUnallocated* operand = LUnallocated::cast(it.Next());
+ for (TempIterator it(this); !it.Done(); it.Advance()) {
+ LUnallocated* operand = LUnallocated::cast(it.Current());
ASSERT(operand->HasFixedPolicy() ||!operand->HasRegisterPolicy());
}
}
@@ -113,21 +113,18 @@
template<int R, int I, int T>
void LTemplateInstruction<R, I, T>::PrintDataTo(StringStream* stream) {
stream->Add("= ");
- inputs_.PrintOperandsTo(stream);
+ for (int i = 0; i < inputs_.length(); i++) {
+ if (i > 0) stream->Add(" ");
+ inputs_[i]->PrintTo(stream);
+ }
}
template<int R, int I, int T>
void LTemplateInstruction<R, I, T>::PrintOutputOperandTo(StringStream* stream) {
- results_.PrintOperandsTo(stream);
-}
-
-
-template<typename T, int N>
-void OperandContainer<T, N>::PrintOperandsTo(StringStream* stream) {
- for (int i = 0; i < N; i++) {
+ for (int i = 0; i < results_.length(); i++) {
if (i > 0) stream->Add(" ");
- elems_[i]->PrintTo(stream);
+ results_[i]->PrintTo(stream);
}
}
@@ -270,12 +267,6 @@
}
-void LTypeofIs::PrintDataTo(StringStream* stream) {
- InputAt(0)->PrintTo(stream);
- stream->Add(" == \"%s\"", *hydrogen()->type_literal()->ToCString());
-}
-
-
void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if typeof ");
InputAt(0)->PrintTo(stream);
@@ -347,13 +338,6 @@
}
-void LClassOfTest::PrintDataTo(StringStream* stream) {
- stream->Add("= class_of_test(");
- InputAt(0)->PrintTo(stream);
- stream->Add(", \"%o\")", *hydrogen()->class_name());
-}
-
-
void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
arguments()->PrintTo(stream);
@@ -394,8 +378,7 @@
LLabel* label = LLabel::cast(first_instr);
if (last_instr->IsGoto()) {
LGoto* goto_instr = LGoto::cast(last_instr);
- if (!goto_instr->include_stack_check() &&
- label->IsRedundant() &&
+ if (label->IsRedundant() &&
!label->is_loop_header()) {
bool can_eliminate = true;
for (int i = first + 1; i < last && can_eliminate; ++i) {
@@ -446,6 +429,15 @@
}
+void LStoreKeyedFastDoubleElement::PrintDataTo(StringStream* stream) {
+ elements()->PrintTo(stream);
+ stream->Add("[");
+ key()->PrintTo(stream);
+ stream->Add("] <- ");
+ value()->PrintTo(stream);
+}
+
+
void LStoreKeyedGeneric::PrintDataTo(StringStream* stream) {
object()->PrintTo(stream);
stream->Add("[");
@@ -714,7 +706,9 @@
LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
HEnvironment* hydrogen_env = current_block_->last_environment();
- instr->set_environment(CreateEnvironment(hydrogen_env));
+ int argument_index_accumulator = 0;
+ instr->set_environment(CreateEnvironment(hydrogen_env,
+ &argument_index_accumulator));
return instr;
}
@@ -804,6 +798,11 @@
}
+LInstruction* LChunkBuilder::DoSoftDeoptimize(HSoftDeoptimize* instr) {
+ return AssignEnvironment(new LDeoptimize);
+}
+
+
LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
return AssignEnvironment(new LDeoptimize);
}
@@ -823,9 +822,10 @@
ASSERT(instr->left()->representation().IsTagged());
ASSERT(instr->right()->representation().IsTagged());
+ LOperand* context = UseFixed(instr->context(), esi);
LOperand* left = UseFixed(instr->left(), edx);
LOperand* right = UseFixed(instr->right(), eax);
- LArithmeticT* result = new LArithmeticT(op, left, right);
+ LArithmeticT* result = new LArithmeticT(op, context, left, right);
return MarkAsCall(DefineFixed(result, eax), instr);
}
}
@@ -837,18 +837,19 @@
ASSERT(instr->left()->representation().IsTagged());
ASSERT(instr->right()->representation().IsTagged());
+ LOperand* context = UseFixed(instr->context(), esi);
LOperand* left = UseFixed(instr->left(), edx);
LOperand* right = UseFixed(instr->right(), eax);
- LArithmeticT* result = new LArithmeticT(op, left, right);
+ LArithmeticT* result = new LArithmeticT(op, context, left, right);
return MarkAsCall(DefineFixed(result, eax), instr);
}
ASSERT(instr->representation().IsInteger32());
- ASSERT(instr->OperandAt(0)->representation().IsInteger32());
- ASSERT(instr->OperandAt(1)->representation().IsInteger32());
- LOperand* left = UseRegisterAtStart(instr->OperandAt(0));
+ ASSERT(instr->left()->representation().IsInteger32());
+ ASSERT(instr->right()->representation().IsInteger32());
+ LOperand* left = UseRegisterAtStart(instr->left());
- HValue* right_value = instr->OperandAt(1);
+ HValue* right_value = instr->right();
LOperand* right = NULL;
int constant_value = 0;
if (right_value->IsConstant()) {
@@ -902,12 +903,15 @@
HValue* right = instr->right();
ASSERT(left->representation().IsTagged());
ASSERT(right->representation().IsTagged());
+ LOperand* context = UseFixed(instr->context(), esi);
LOperand* left_operand = UseFixed(left, edx);
LOperand* right_operand = UseFixed(right, eax);
- LArithmeticT* result = new LArithmeticT(op, left_operand, right_operand);
+ LArithmeticT* result =
+ new LArithmeticT(op, context, left_operand, right_operand);
return MarkAsCall(DefineFixed(result, eax), instr);
}
+
void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) {
ASSERT(is_building());
current_block_ = block;
@@ -984,28 +988,20 @@
if (FLAG_stress_environments && !instr->HasEnvironment()) {
instr = AssignEnvironment(instr);
}
- if (current->IsTest() && !instr->IsGoto()) {
- ASSERT(instr->IsControl());
- HTest* test = HTest::cast(current);
- instr->set_hydrogen_value(test->value());
- HBasicBlock* first = test->FirstSuccessor();
- HBasicBlock* second = test->SecondSuccessor();
- ASSERT(first != NULL && second != NULL);
- instr->SetBranchTargets(first->block_id(), second->block_id());
- } else {
- instr->set_hydrogen_value(current);
- }
-
+ instr->set_hydrogen_value(current);
chunk_->AddInstruction(instr, current_block_);
}
current_instruction_ = old_current;
}
-LEnvironment* LChunkBuilder::CreateEnvironment(HEnvironment* hydrogen_env) {
+LEnvironment* LChunkBuilder::CreateEnvironment(
+ HEnvironment* hydrogen_env,
+ int* argument_index_accumulator) {
if (hydrogen_env == NULL) return NULL;
- LEnvironment* outer = CreateEnvironment(hydrogen_env->outer());
+ LEnvironment* outer =
+ CreateEnvironment(hydrogen_env->outer(), argument_index_accumulator);
int ast_id = hydrogen_env->ast_id();
ASSERT(ast_id != AstNode::kNoNumber);
int value_count = hydrogen_env->length();
@@ -1015,7 +1011,6 @@
argument_count_,
value_count,
outer);
- int argument_index = 0;
for (int i = 0; i < value_count; ++i) {
if (hydrogen_env->is_special_index(i)) continue;
@@ -1024,7 +1019,7 @@
if (value->IsArgumentsObject()) {
op = NULL;
} else if (value->IsPushArgument()) {
- op = new LArgument(argument_index++);
+ op = new LArgument((*argument_index_accumulator)++);
} else {
op = UseAny(value);
}
@@ -1036,112 +1031,21 @@
LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
- LGoto* result = new LGoto(instr->FirstSuccessor()->block_id(),
- instr->include_stack_check());
- return (instr->include_stack_check())
- ? AssignPointerMap(result)
- : result;
+ return new LGoto(instr->FirstSuccessor()->block_id());
}
-LInstruction* LChunkBuilder::DoTest(HTest* instr) {
+LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
HValue* v = instr->value();
- if (!v->EmitAtUses()) {
- return new LBranch(UseRegisterAtStart(v));
- } else if (v->IsClassOfTest()) {
- HClassOfTest* compare = HClassOfTest::cast(v);
- ASSERT(compare->value()->representation().IsTagged());
- return new LClassOfTestAndBranch(UseTempRegister(compare->value()),
- TempRegister(),
- TempRegister());
- } else if (v->IsCompare()) {
- HCompare* compare = HCompare::cast(v);
- Token::Value op = compare->token();
- HValue* left = compare->left();
- HValue* right = compare->right();
- Representation r = compare->GetInputRepresentation();
- if (r.IsInteger32()) {
- ASSERT(left->representation().IsInteger32());
- ASSERT(right->representation().IsInteger32());
- return new LCmpIDAndBranch(UseRegisterAtStart(left),
- UseOrConstantAtStart(right));
- } else if (r.IsDouble()) {
- ASSERT(left->representation().IsDouble());
- ASSERT(right->representation().IsDouble());
- return new LCmpIDAndBranch(UseRegisterAtStart(left),
- UseRegisterAtStart(right));
- } else {
- ASSERT(left->representation().IsTagged());
- ASSERT(right->representation().IsTagged());
- bool reversed = op == Token::GT || op == Token::LTE;
- LOperand* left_operand = UseFixed(left, reversed ? eax : edx);
- LOperand* right_operand = UseFixed(right, reversed ? edx : eax);
- LCmpTAndBranch* result = new LCmpTAndBranch(left_operand, right_operand);
- return MarkAsCall(result, instr);
- }
- } else if (v->IsIsSmi()) {
- HIsSmi* compare = HIsSmi::cast(v);
- ASSERT(compare->value()->representation().IsTagged());
- return new LIsSmiAndBranch(Use(compare->value()));
- } else if (v->IsIsUndetectable()) {
- HIsUndetectable* compare = HIsUndetectable::cast(v);
- ASSERT(compare->value()->representation().IsTagged());
- return new LIsUndetectableAndBranch(UseRegisterAtStart(compare->value()),
- TempRegister());
- } else if (v->IsHasInstanceType()) {
- HHasInstanceType* compare = HHasInstanceType::cast(v);
- ASSERT(compare->value()->representation().IsTagged());
- return new LHasInstanceTypeAndBranch(UseRegisterAtStart(compare->value()),
- TempRegister());
- } else if (v->IsHasCachedArrayIndex()) {
- HHasCachedArrayIndex* compare = HHasCachedArrayIndex::cast(v);
- ASSERT(compare->value()->representation().IsTagged());
- return new LHasCachedArrayIndexAndBranch(
- UseRegisterAtStart(compare->value()));
- } else if (v->IsIsNull()) {
- HIsNull* compare = HIsNull::cast(v);
- ASSERT(compare->value()->representation().IsTagged());
- // We only need a temp register for non-strict compare.
- LOperand* temp = compare->is_strict() ? NULL : TempRegister();
- return new LIsNullAndBranch(UseRegisterAtStart(compare->value()), temp);
- } else if (v->IsIsObject()) {
- HIsObject* compare = HIsObject::cast(v);
- ASSERT(compare->value()->representation().IsTagged());
- LOperand* temp1 = TempRegister();
- LOperand* temp2 = TempRegister();
- return new LIsObjectAndBranch(UseRegister(compare->value()),
- temp1,
- temp2);
- } else if (v->IsCompareJSObjectEq()) {
- HCompareJSObjectEq* compare = HCompareJSObjectEq::cast(v);
- return new LCmpJSObjectEqAndBranch(UseRegisterAtStart(compare->left()),
- UseRegisterAtStart(compare->right()));
- } else if (v->IsCompareSymbolEq()) {
- HCompareSymbolEq* compare = HCompareSymbolEq::cast(v);
- return new LCmpSymbolEqAndBranch(UseRegisterAtStart(compare->left()),
- UseRegisterAtStart(compare->right()));
- } else if (v->IsInstanceOf()) {
- HInstanceOf* instance_of = HInstanceOf::cast(v);
- LOperand* left = UseFixed(instance_of->left(), InstanceofStub::left());
- LOperand* right = UseFixed(instance_of->right(), InstanceofStub::right());
- LOperand* context = UseFixed(instance_of->context(), esi);
- LInstanceOfAndBranch* result =
- new LInstanceOfAndBranch(context, left, right);
- return MarkAsCall(result, instr);
- } else if (v->IsTypeofIs()) {
- HTypeofIs* typeof_is = HTypeofIs::cast(v);
- return new LTypeofIsAndBranch(UseTempRegister(typeof_is->value()));
- } else if (v->IsIsConstructCall()) {
- return new LIsConstructCallAndBranch(TempRegister());
- } else if (v->IsConstant()) {
+ if (v->EmitAtUses()) {
+ ASSERT(v->IsConstant());
+ ASSERT(!v->representation().IsDouble());
HBasicBlock* successor = HConstant::cast(v)->ToBoolean()
? instr->FirstSuccessor()
: instr->SecondSuccessor();
return new LGoto(successor->block_id());
- } else {
- Abort("Undefined compare before branch");
- return NULL;
}
+ return new LBranch(UseRegisterAtStart(v));
}
@@ -1175,7 +1079,8 @@
HInstanceOfKnownGlobal* instr) {
LInstanceOfKnownGlobal* result =
new LInstanceOfKnownGlobal(
- UseFixed(instr->value(), InstanceofStub::left()),
+ UseFixed(instr->context(), esi),
+ UseFixed(instr->left(), InstanceofStub::left()),
FixedTemp(edi));
return MarkAsCall(DefineFixed(result, eax), instr);
}
@@ -1203,6 +1108,11 @@
}
+LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) {
+ return instr->HasNoUses() ? NULL : DefineAsRegister(new LThisFunction);
+}
+
+
LInstruction* LChunkBuilder::DoContext(HContext* instr) {
return instr->HasNoUses() ? NULL : DefineAsRegister(new LContext);
}
@@ -1247,16 +1157,19 @@
if (op == kMathLog) {
ASSERT(instr->representation().IsDouble());
ASSERT(instr->value()->representation().IsDouble());
+ LOperand* context = UseAny(instr->context()); // Not actually used.
LOperand* input = UseRegisterAtStart(instr->value());
- LUnaryMathOperation* result = new LUnaryMathOperation(input);
+ LUnaryMathOperation* result = new LUnaryMathOperation(context, input);
return DefineSameAsFirst(result);
} else if (op == kMathSin || op == kMathCos) {
+ LOperand* context = UseFixed(instr->context(), esi);
LOperand* input = UseFixedDouble(instr->value(), xmm1);
- LUnaryMathOperation* result = new LUnaryMathOperation(input);
+ LUnaryMathOperation* result = new LUnaryMathOperation(context, input);
return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
} else {
LOperand* input = UseRegisterAtStart(instr->value());
- LUnaryMathOperation* result = new LUnaryMathOperation(input);
+ LOperand* context = UseAny(instr->context()); // Deferred use by MathAbs.
+ LUnaryMathOperation* result = new LUnaryMathOperation(context, input);
switch (op) {
case kMathAbs:
return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
@@ -1327,7 +1240,8 @@
LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new LCallRuntime, eax), instr);
+ LOperand* context = UseFixed(instr->context(), esi);
+ return MarkAsCall(DefineFixed(new LCallRuntime(context), eax), instr);
}
@@ -1507,88 +1421,86 @@
}
-LInstruction* LChunkBuilder::DoCompare(HCompare* instr) {
+LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
Token::Value op = instr->token();
+ ASSERT(instr->left()->representation().IsTagged());
+ ASSERT(instr->right()->representation().IsTagged());
+ bool reversed = (op == Token::GT || op == Token::LTE);
+ LOperand* context = UseFixed(instr->context(), esi);
+ LOperand* left = UseFixed(instr->left(), reversed ? eax : edx);
+ LOperand* right = UseFixed(instr->right(), reversed ? edx : eax);
+ LCmpT* result = new LCmpT(context, left, right);
+ return MarkAsCall(DefineFixed(result, eax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCompareIDAndBranch(
+ HCompareIDAndBranch* instr) {
Representation r = instr->GetInputRepresentation();
if (r.IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseOrConstantAtStart(instr->right());
- return DefineAsRegister(new LCmpID(left, right));
- } else if (r.IsDouble()) {
+ return new LCmpIDAndBranch(left, right);
+ } else {
+ ASSERT(r.IsDouble());
ASSERT(instr->left()->representation().IsDouble());
ASSERT(instr->right()->representation().IsDouble());
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseRegisterAtStart(instr->right());
- return DefineAsRegister(new LCmpID(left, right));
- } else {
- ASSERT(instr->left()->representation().IsTagged());
- ASSERT(instr->right()->representation().IsTagged());
- bool reversed = (op == Token::GT || op == Token::LTE);
- LOperand* left = UseFixed(instr->left(), reversed ? eax : edx);
- LOperand* right = UseFixed(instr->right(), reversed ? edx : eax);
- LCmpT* result = new LCmpT(left, right);
- return MarkAsCall(DefineFixed(result, eax), instr);
+ return new LCmpIDAndBranch(left, right);
}
}
-LInstruction* LChunkBuilder::DoCompareJSObjectEq(
- HCompareJSObjectEq* instr) {
+LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch(
+ HCompareObjectEqAndBranch* instr) {
LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseRegisterAtStart(instr->right());
- LCmpJSObjectEq* result = new LCmpJSObjectEq(left, right);
- return DefineAsRegister(result);
+ LOperand* right = UseAtStart(instr->right());
+ return new LCmpObjectEqAndBranch(left, right);
}
-LInstruction* LChunkBuilder::DoCompareSymbolEq(
- HCompareSymbolEq* instr) {
- LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseRegisterAtStart(instr->right());
- LCmpSymbolEq* result = new LCmpSymbolEq(left, right);
- return DefineAsRegister(result);
+LInstruction* LChunkBuilder::DoCompareConstantEqAndBranch(
+ HCompareConstantEqAndBranch* instr) {
+ return new LCmpConstantEqAndBranch(UseRegisterAtStart(instr->value()));
}
-LInstruction* LChunkBuilder::DoIsNull(HIsNull* instr) {
+LInstruction* LChunkBuilder::DoIsNullAndBranch(HIsNullAndBranch* instr) {
+ // We only need a temp register for non-strict compare.
+ LOperand* temp = instr->is_strict() ? NULL : TempRegister();
+ return new LIsNullAndBranch(UseRegisterAtStart(instr->value()), temp);
+}
+
+
+LInstruction* LChunkBuilder::DoIsObjectAndBranch(HIsObjectAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
- LOperand* value = UseRegisterAtStart(instr->value());
-
- return DefineAsRegister(new LIsNull(value));
+ LOperand* temp = TempRegister();
+ return new LIsObjectAndBranch(UseRegister(instr->value()), temp);
}
-LInstruction* LChunkBuilder::DoIsObject(HIsObject* instr) {
+LInstruction* LChunkBuilder::DoIsSmiAndBranch(HIsSmiAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
- LOperand* value = UseRegister(instr->value());
-
- return DefineAsRegister(new LIsObject(value, TempRegister()));
+ return new LIsSmiAndBranch(Use(instr->value()));
}
-LInstruction* LChunkBuilder::DoIsSmi(HIsSmi* instr) {
- ASSERT(instr->value()->representation().IsTagged());
- LOperand* value = UseAtStart(instr->value());
-
- return DefineAsRegister(new LIsSmi(value));
+LInstruction* LChunkBuilder::DoIsUndetectableAndBranch(
+ HIsUndetectableAndBranch* instr) {
+ ASSERT(instr ->value()->representation().IsTagged());
+ return new LIsUndetectableAndBranch(UseRegisterAtStart(instr->value()),
+ TempRegister());
}
-LInstruction* LChunkBuilder::DoIsUndetectable(HIsUndetectable* instr) {
+LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch(
+ HHasInstanceTypeAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
- LOperand* value = UseRegisterAtStart(instr->value());
-
- return DefineAsRegister(new LIsUndetectable(value));
-}
-
-
-LInstruction* LChunkBuilder::DoHasInstanceType(HHasInstanceType* instr) {
- ASSERT(instr->value()->representation().IsTagged());
- LOperand* value = UseRegisterAtStart(instr->value());
-
- return DefineAsRegister(new LHasInstanceType(value));
+ return new LHasInstanceTypeAndBranch(UseRegisterAtStart(instr->value()),
+ TempRegister());
}
@@ -1601,20 +1513,20 @@
}
-LInstruction* LChunkBuilder::DoHasCachedArrayIndex(
- HHasCachedArrayIndex* instr) {
+LInstruction* LChunkBuilder::DoHasCachedArrayIndexAndBranch(
+ HHasCachedArrayIndexAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
- LOperand* value = UseRegister(instr->value());
-
- return DefineAsRegister(new LHasCachedArrayIndex(value));
+ return new LHasCachedArrayIndexAndBranch(
+ UseRegisterAtStart(instr->value()));
}
-LInstruction* LChunkBuilder::DoClassOfTest(HClassOfTest* instr) {
+LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
+ HClassOfTestAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
- LOperand* value = UseTempRegister(instr->value());
-
- return DefineSameAsFirst(new LClassOfTest(value, TempRegister()));
+ return new LClassOfTestAndBranch(UseTempRegister(instr->value()),
+ TempRegister(),
+ TempRegister());
}
@@ -1637,6 +1549,12 @@
}
+LInstruction* LChunkBuilder::DoElementsKind(HElementsKind* instr) {
+ LOperand* object = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new LElementsKind(object));
+}
+
+
LInstruction* LChunkBuilder::DoValueOf(HValueOf* instr) {
LOperand* object = UseRegister(instr->value());
LValueOf* result = new LValueOf(object, TempRegister());
@@ -1646,7 +1564,7 @@
LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
return AssignEnvironment(new LBoundsCheck(UseRegisterAtStart(instr->index()),
- Use(instr->length())));
+ UseAtStart(instr->length())));
}
@@ -1658,8 +1576,14 @@
LInstruction* LChunkBuilder::DoThrow(HThrow* instr) {
+ LOperand* context = UseFixed(instr->context(), esi);
LOperand* value = UseFixed(instr->value(), eax);
- return MarkAsCall(new LThrow(value), instr);
+ return MarkAsCall(new LThrow(context, value), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoUseConst(HUseConst* instr) {
+ return NULL;
}
@@ -1684,8 +1608,9 @@
LOperand* value = UseRegister(instr->value());
bool needs_check = !instr->value()->type().IsSmi();
if (needs_check) {
+ bool truncating = instr->CanTruncateToInt32();
LOperand* xmm_temp =
- (instr->CanTruncateToInt32() && CpuFeatures::IsSupported(SSE3))
+ (truncating && CpuFeatures::IsSupported(SSE3))
? NULL
: FixedTemp(xmm1);
LTaggedToI* res = new LTaggedToI(value, xmm_temp);
@@ -1705,8 +1630,8 @@
return AssignPointerMap(Define(result, result_temp));
} else {
ASSERT(to.IsInteger32());
- bool needs_temp = instr->CanTruncateToInt32() &&
- !CpuFeatures::IsSupported(SSE3);
+ bool truncating = instr->CanTruncateToInt32();
+ bool needs_temp = truncating && !CpuFeatures::IsSupported(SSE3);
LOperand* value = needs_temp ?
UseTempRegister(instr->value()) : UseRegister(instr->value());
LOperand* temp = needs_temp ? TempRegister() : NULL;
@@ -1733,7 +1658,7 @@
LInstruction* LChunkBuilder::DoCheckNonSmi(HCheckNonSmi* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
+ LOperand* value = UseAtStart(instr->value());
return AssignEnvironment(new LCheckNonSmi(value));
}
@@ -1754,13 +1679,13 @@
LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
+ LOperand* value = UseAtStart(instr->value());
return AssignEnvironment(new LCheckSmi(value));
}
LInstruction* LChunkBuilder::DoCheckFunction(HCheckFunction* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
+ LOperand* value = UseAtStart(instr->value());
return AssignEnvironment(new LCheckFunction(value));
}
@@ -1793,6 +1718,34 @@
}
+LInstruction* LChunkBuilder::DoToInt32(HToInt32* instr) {
+ HValue* value = instr->value();
+ Representation input_rep = value->representation();
+
+ LInstruction* result;
+ if (input_rep.IsDouble()) {
+ LOperand* reg = UseRegister(value);
+ LOperand* temp_reg =
+ CpuFeatures::IsSupported(SSE3) ? NULL : TempRegister();
+ result = DefineAsRegister(new LDoubleToI(reg, temp_reg));
+ } else if (input_rep.IsInteger32()) {
+ // Canonicalization should already have removed the hydrogen instruction in
+ // this case, since it is a noop.
+ UNREACHABLE();
+ return NULL;
+ } else {
+ ASSERT(input_rep.IsTagged());
+ LOperand* reg = UseRegister(value);
+ // Register allocator doesn't (yet) support allocation of double
+ // temps. Reserve xmm1 explicitly.
+ LOperand* xmm_temp =
+ CpuFeatures::IsSupported(SSE3) ? NULL : FixedTemp(xmm1);
+ result = DefineSameAsFirst(new LTaggedToI(reg, xmm_temp));
+ }
+ return AssignEnvironment(result);
+}
+
+
LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
return new LReturn(UseFixed(instr->value(), eax));
}
@@ -1883,13 +1836,16 @@
LInstruction* LChunkBuilder::DoLoadNamedFieldPolymorphic(
HLoadNamedFieldPolymorphic* instr) {
ASSERT(instr->representation().IsTagged());
+ LOperand* context = UseFixed(instr->context(), esi);
if (instr->need_generic()) {
LOperand* obj = UseFixed(instr->object(), eax);
- LLoadNamedFieldPolymorphic* result = new LLoadNamedFieldPolymorphic(obj);
+ LLoadNamedFieldPolymorphic* result =
+ new LLoadNamedFieldPolymorphic(context, obj);
return MarkAsCall(DefineFixed(result, eax), instr);
} else {
LOperand* obj = UseRegisterAtStart(instr->object());
- LLoadNamedFieldPolymorphic* result = new LLoadNamedFieldPolymorphic(obj);
+ LLoadNamedFieldPolymorphic* result =
+ new LLoadNamedFieldPolymorphic(context, obj);
return AssignEnvironment(DefineAsRegister(result));
}
}
@@ -1935,15 +1891,29 @@
}
+LInstruction* LChunkBuilder::DoLoadKeyedFastDoubleElement(
+ HLoadKeyedFastDoubleElement* instr) {
+ ASSERT(instr->representation().IsDouble());
+ ASSERT(instr->key()->representation().IsInteger32());
+ LOperand* elements = UseRegisterAtStart(instr->elements());
+ LOperand* key = UseRegisterOrConstantAtStart(instr->key());
+ LLoadKeyedFastDoubleElement* result =
+ new LLoadKeyedFastDoubleElement(elements, key);
+ return AssignEnvironment(DefineAsRegister(result));
+}
+
+
LInstruction* LChunkBuilder::DoLoadKeyedSpecializedArrayElement(
HLoadKeyedSpecializedArrayElement* instr) {
- ExternalArrayType array_type = instr->array_type();
+ JSObject::ElementsKind elements_kind = instr->elements_kind();
Representation representation(instr->representation());
ASSERT(
- (representation.IsInteger32() && (array_type != kExternalFloatArray &&
- array_type != kExternalDoubleArray)) ||
- (representation.IsDouble() && (array_type == kExternalFloatArray ||
- array_type == kExternalDoubleArray)));
+ (representation.IsInteger32() &&
+ (elements_kind != JSObject::EXTERNAL_FLOAT_ELEMENTS) &&
+ (elements_kind != JSObject::EXTERNAL_DOUBLE_ELEMENTS)) ||
+ (representation.IsDouble() &&
+ ((elements_kind == JSObject::EXTERNAL_FLOAT_ELEMENTS) ||
+ (elements_kind == JSObject::EXTERNAL_DOUBLE_ELEMENTS))));
ASSERT(instr->key()->representation().IsInteger32());
LOperand* external_pointer = UseRegister(instr->external_pointer());
LOperand* key = UseRegisterOrConstant(instr->key());
@@ -1953,7 +1923,7 @@
LInstruction* load_instr = DefineAsRegister(result);
// An unsigned int array load might overflow and cause a deopt, make sure it
// has an environment.
- return (array_type == kExternalUnsignedIntArray)
+ return (elements_kind == JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS)
? AssignEnvironment(load_instr)
: load_instr;
}
@@ -1988,24 +1958,40 @@
}
+LInstruction* LChunkBuilder::DoStoreKeyedFastDoubleElement(
+ HStoreKeyedFastDoubleElement* instr) {
+ ASSERT(instr->value()->representation().IsDouble());
+ ASSERT(instr->elements()->representation().IsTagged());
+ ASSERT(instr->key()->representation().IsInteger32());
+
+ LOperand* elements = UseRegisterAtStart(instr->elements());
+ LOperand* val = UseTempRegister(instr->value());
+ LOperand* key = UseRegisterOrConstantAtStart(instr->key());
+
+ return new LStoreKeyedFastDoubleElement(elements, key, val);
+}
+
+
LInstruction* LChunkBuilder::DoStoreKeyedSpecializedArrayElement(
HStoreKeyedSpecializedArrayElement* instr) {
Representation representation(instr->value()->representation());
- ExternalArrayType array_type = instr->array_type();
- ASSERT(
- (representation.IsInteger32() && (array_type != kExternalFloatArray &&
- array_type != kExternalDoubleArray)) ||
- (representation.IsDouble() && (array_type == kExternalFloatArray ||
- array_type == kExternalDoubleArray)));
+ JSObject::ElementsKind elements_kind = instr->elements_kind();
+ ASSERT(
+ (representation.IsInteger32() &&
+ (elements_kind != JSObject::EXTERNAL_FLOAT_ELEMENTS) &&
+ (elements_kind != JSObject::EXTERNAL_DOUBLE_ELEMENTS)) ||
+ (representation.IsDouble() &&
+ ((elements_kind == JSObject::EXTERNAL_FLOAT_ELEMENTS) ||
+ (elements_kind == JSObject::EXTERNAL_DOUBLE_ELEMENTS))));
ASSERT(instr->external_pointer()->representation().IsExternal());
ASSERT(instr->key()->representation().IsInteger32());
LOperand* external_pointer = UseRegister(instr->external_pointer());
LOperand* key = UseRegisterOrConstant(instr->key());
LOperand* val = NULL;
- if (array_type == kExternalByteArray ||
- array_type == kExternalUnsignedByteArray ||
- array_type == kExternalPixelArray) {
+ if (elements_kind == JSObject::EXTERNAL_BYTE_ELEMENTS ||
+ elements_kind == JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS ||
+ elements_kind == JSObject::EXTERNAL_PIXEL_ELEMENTS) {
// We need a byte register in this case for the value.
val = UseFixed(instr->value(), eax);
} else {
@@ -2066,23 +2052,27 @@
LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
+ LOperand* context = UseFixed(instr->context(), esi);
LOperand* left = UseOrConstantAtStart(instr->left());
LOperand* right = UseOrConstantAtStart(instr->right());
- return MarkAsCall(DefineFixed(new LStringAdd(left, right), eax), instr);
+ LStringAdd* string_add = new LStringAdd(context, left, right);
+ return MarkAsCall(DefineFixed(string_add, eax), instr);
}
LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
LOperand* string = UseRegister(instr->string());
LOperand* index = UseRegisterOrConstant(instr->index());
- LStringCharCodeAt* result = new LStringCharCodeAt(string, index);
+ LOperand* context = UseAny(instr->context());
+ LStringCharCodeAt* result = new LStringCharCodeAt(context, string, index);
return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
}
LInstruction* LChunkBuilder::DoStringCharFromCode(HStringCharFromCode* instr) {
LOperand* char_code = UseRegister(instr->value());
- LStringCharFromCode* result = new LStringCharFromCode(char_code);
+ LOperand* context = UseAny(instr->context());
+ LStringCharFromCode* result = new LStringCharFromCode(context, char_code);
return AssignPointerMap(DefineAsRegister(result));
}
@@ -2094,7 +2084,8 @@
LInstruction* LChunkBuilder::DoArrayLiteral(HArrayLiteral* instr) {
- return MarkAsCall(DefineFixed(new LArrayLiteral, eax), instr);
+ LOperand* context = UseFixed(instr->context(), esi);
+ return MarkAsCall(DefineFixed(new LArrayLiteral(context), eax), instr);
}
@@ -2105,19 +2096,22 @@
LInstruction* LChunkBuilder::DoRegExpLiteral(HRegExpLiteral* instr) {
- return MarkAsCall(DefineFixed(new LRegExpLiteral, eax), instr);
+ LOperand* context = UseFixed(instr->context(), esi);
+ return MarkAsCall(DefineFixed(new LRegExpLiteral(context), eax), instr);
}
LInstruction* LChunkBuilder::DoFunctionLiteral(HFunctionLiteral* instr) {
- return MarkAsCall(DefineFixed(new LFunctionLiteral, eax), instr);
+ LOperand* context = UseFixed(instr->context(), esi);
+ return MarkAsCall(DefineFixed(new LFunctionLiteral(context), eax), instr);
}
LInstruction* LChunkBuilder::DoDeleteProperty(HDeleteProperty* instr) {
- LDeleteProperty* result =
- new LDeleteProperty(UseAtStart(instr->object()),
- UseOrConstantAtStart(instr->key()));
+ LOperand* context = UseFixed(instr->context(), esi);
+ LOperand* object = UseAtStart(instr->object());
+ LOperand* key = UseOrConstantAtStart(instr->key());
+ LDeleteProperty* result = new LDeleteProperty(context, object, key);
return MarkAsCall(DefineFixed(result, eax), instr);
}
@@ -2179,18 +2173,21 @@
LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
- LTypeof* result = new LTypeof(UseAtStart(instr->value()));
+ LOperand* context = UseFixed(instr->context(), esi);
+ LOperand* value = UseAtStart(instr->value());
+ LTypeof* result = new LTypeof(context, value);
return MarkAsCall(DefineFixed(result, eax), instr);
}
-LInstruction* LChunkBuilder::DoTypeofIs(HTypeofIs* instr) {
- return DefineSameAsFirst(new LTypeofIs(UseRegister(instr->value())));
+LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) {
+ return new LTypeofIsAndBranch(UseTempRegister(instr->value()));
}
-LInstruction* LChunkBuilder::DoIsConstructCall(HIsConstructCall* instr) {
- return DefineAsRegister(new LIsConstructCall);
+LInstruction* LChunkBuilder::DoIsConstructCallAndBranch(
+ HIsConstructCallAndBranch* instr) {
+ return new LIsConstructCallAndBranch(TempRegister());
}
@@ -2227,7 +2224,14 @@
LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
- return MarkAsCall(new LStackCheck, instr);
+ if (instr->is_function_entry()) {
+ LOperand* context = UseFixed(instr->context(), esi);
+ return MarkAsCall(new LStackCheck(context), instr);
+ } else {
+ ASSERT(instr->is_backwards_branch());
+ LOperand* context = UseAny(instr->context());
+ return AssignEnvironment(AssignPointerMap(new LStackCheck(context)));
+ }
}
@@ -2236,7 +2240,6 @@
HConstant* undefined = graph()->GetConstantUndefined();
HEnvironment* inner = outer->CopyForInlining(instr->closure(),
instr->function(),
- HEnvironment::LITHIUM,
undefined,
instr->call_kind());
current_block_->UpdateEnvironment(inner);
@@ -2253,9 +2256,10 @@
LInstruction* LChunkBuilder::DoIn(HIn* instr) {
+ LOperand* context = UseFixed(instr->context(), esi);
LOperand* key = UseOrConstantAtStart(instr->key());
LOperand* object = UseOrConstantAtStart(instr->object());
- LIn* result = new LIn(key, object);
+ LIn* result = new LIn(context, key, object);
return MarkAsCall(DefineFixed(result, eax), instr);
}
diff --git a/src/ia32/lithium-ia32.h b/src/ia32/lithium-ia32.h
index 979c494..0ea7c6b 100644
--- a/src/ia32/lithium-ia32.h
+++ b/src/ia32/lithium-ia32.h
@@ -32,6 +32,7 @@
#include "lithium-allocator.h"
#include "lithium.h"
#include "safepoint-table.h"
+#include "utils.h"
namespace v8 {
namespace internal {
@@ -70,17 +71,12 @@
V(ClampDToUint8) \
V(ClampIToUint8) \
V(ClampTToUint8) \
- V(ClassOfTest) \
V(ClassOfTestAndBranch) \
- V(CmpID) \
V(CmpIDAndBranch) \
- V(CmpJSObjectEq) \
- V(CmpJSObjectEqAndBranch) \
- V(CmpSymbolEq) \
- V(CmpSymbolEqAndBranch) \
+ V(CmpObjectEqAndBranch) \
V(CmpMapAndBranch) \
V(CmpT) \
- V(CmpTAndBranch) \
+ V(CmpConstantEqAndBranch) \
V(ConstantD) \
V(ConstantI) \
V(ConstantT) \
@@ -89,6 +85,7 @@
V(Deoptimize) \
V(DivI) \
V(DoubleToI) \
+ V(ElementsKind) \
V(ExternalArrayLength) \
V(FixedArrayLength) \
V(FunctionLiteral) \
@@ -96,26 +93,18 @@
V(GlobalObject) \
V(GlobalReceiver) \
V(Goto) \
- V(HasCachedArrayIndex) \
V(HasCachedArrayIndexAndBranch) \
- V(HasInstanceType) \
V(HasInstanceTypeAndBranch) \
V(In) \
V(InstanceOf) \
- V(InstanceOfAndBranch) \
V(InstanceOfKnownGlobal) \
V(InstructionGap) \
V(Integer32ToDouble) \
V(InvokeFunction) \
- V(IsConstructCall) \
V(IsConstructCallAndBranch) \
- V(IsNull) \
V(IsNullAndBranch) \
- V(IsObject) \
V(IsObjectAndBranch) \
- V(IsSmi) \
V(IsSmiAndBranch) \
- V(IsUndetectable) \
V(IsUndetectableAndBranch) \
V(JSArrayLength) \
V(Label) \
@@ -127,6 +116,7 @@
V(LoadGlobalCell) \
V(LoadGlobalGeneric) \
V(LoadKeyedFastElement) \
+ V(LoadKeyedFastDoubleElement) \
V(LoadKeyedGeneric) \
V(LoadKeyedSpecializedArrayElement) \
V(LoadNamedField) \
@@ -152,6 +142,7 @@
V(StoreContextSlot) \
V(StoreGlobalCell) \
V(StoreGlobalGeneric) \
+ V(StoreKeyedFastDoubleElement) \
V(StoreKeyedFastElement) \
V(StoreKeyedGeneric) \
V(StoreKeyedSpecializedArrayElement) \
@@ -163,10 +154,10 @@
V(StringLength) \
V(SubI) \
V(TaggedToI) \
+ V(ThisFunction) \
V(Throw) \
V(ToFastProperties) \
V(Typeof) \
- V(TypeofIs) \
V(TypeofIsAndBranch) \
V(UnaryMathOperation) \
V(UnknownOSRValue) \
@@ -225,7 +216,6 @@
virtual bool IsGap() const { return false; }
virtual bool IsControl() const { return false; }
- virtual void SetBranchTargets(int true_block_id, int false_block_id) { }
void set_environment(LEnvironment* env) { environment_ = env; }
LEnvironment* environment() const { return environment_; }
@@ -281,37 +271,6 @@
};
-template<typename ElementType, int NumElements>
-class OperandContainer {
- public:
- OperandContainer() {
- for (int i = 0; i < NumElements; i++) elems_[i] = NULL;
- }
- int length() { return NumElements; }
- ElementType& operator[](int i) {
- ASSERT(i < length());
- return elems_[i];
- }
- void PrintOperandsTo(StringStream* stream);
-
- private:
- ElementType elems_[NumElements];
-};
-
-
-template<typename ElementType>
-class OperandContainer<ElementType, 0> {
- public:
- int length() { return 0; }
- void PrintOperandsTo(StringStream* stream) { }
- ElementType& operator[](int i) {
- UNREACHABLE();
- static ElementType t = 0;
- return t;
- }
-};
-
-
// R = number of result operands (0 or 1).
// I = number of input operands.
// T = number of temporary operands.
@@ -334,9 +293,9 @@
virtual void PrintOutputOperandTo(StringStream* stream);
protected:
- OperandContainer<LOperand*, R> results_;
- OperandContainer<LOperand*, I> inputs_;
- OperandContainer<LOperand*, T> temps_;
+ EmbeddedContainer<LOperand*, R> results_;
+ EmbeddedContainer<LOperand*, I> inputs_;
+ EmbeddedContainer<LOperand*, T> temps_;
};
@@ -395,19 +354,16 @@
class LGoto: public LTemplateInstruction<0, 0, 0> {
public:
- LGoto(int block_id, bool include_stack_check = false)
- : block_id_(block_id), include_stack_check_(include_stack_check) { }
+ explicit LGoto(int block_id) : block_id_(block_id) { }
DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
virtual void PrintDataTo(StringStream* stream);
virtual bool IsControl() const { return true; }
int block_id() const { return block_id_; }
- bool include_stack_check() const { return include_stack_check_; }
private:
int block_id_;
- bool include_stack_check_;
};
@@ -489,16 +445,15 @@
public:
virtual bool IsControl() const { return true; }
- int true_block_id() const { return true_block_id_; }
- int false_block_id() const { return false_block_id_; }
- void SetBranchTargets(int true_block_id, int false_block_id) {
- true_block_id_ = true_block_id;
- false_block_id_ = false_block_id;
- }
+ int SuccessorCount() { return hydrogen()->SuccessorCount(); }
+ HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); }
+ int true_block_id() { return hydrogen()->SuccessorAt(0)->block_id(); }
+ int false_block_id() { return hydrogen()->SuccessorAt(1)->block_id(); }
private:
- int true_block_id_;
- int false_block_id_;
+ HControlInstruction* hydrogen() {
+ return HControlInstruction::cast(this->hydrogen_value());
+ }
};
@@ -600,23 +555,6 @@
};
-class LCmpID: public LTemplateInstruction<1, 2, 0> {
- public:
- LCmpID(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpID, "cmp-id")
- DECLARE_HYDROGEN_ACCESSOR(Compare)
-
- Token::Value op() const { return hydrogen()->token(); }
- bool is_double() const {
- return hydrogen()->GetInputRepresentation().IsDouble();
- }
-};
-
-
class LCmpIDAndBranch: public LControlInstruction<2, 0> {
public:
LCmpIDAndBranch(LOperand* left, LOperand* right) {
@@ -625,7 +563,7 @@
}
DECLARE_CONCRETE_INSTRUCTION(CmpIDAndBranch, "cmp-id-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(Compare)
+ DECLARE_HYDROGEN_ACCESSOR(CompareIDAndBranch)
Token::Value op() const { return hydrogen()->token(); }
bool is_double() const {
@@ -636,12 +574,16 @@
};
-class LUnaryMathOperation: public LTemplateInstruction<1, 1, 0> {
+class LUnaryMathOperation: public LTemplateInstruction<1, 2, 0> {
public:
- explicit LUnaryMathOperation(LOperand* value) {
+ LUnaryMathOperation(LOperand* context, LOperand* value) {
+ inputs_[1] = context;
inputs_[0] = value;
}
+ LOperand* context() { return inputs_[1]; }
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(UnaryMathOperation, "unary-math-operation")
DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
@@ -650,61 +592,27 @@
};
-class LCmpJSObjectEq: public LTemplateInstruction<1, 2, 0> {
+class LCmpObjectEqAndBranch: public LControlInstruction<2, 0> {
public:
- LCmpJSObjectEq(LOperand* left, LOperand* right) {
+ LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
inputs_[0] = left;
inputs_[1] = right;
}
- DECLARE_CONCRETE_INSTRUCTION(CmpJSObjectEq, "cmp-jsobject-eq")
+ DECLARE_CONCRETE_INSTRUCTION(CmpObjectEqAndBranch,
+ "cmp-object-eq-and-branch")
};
-class LCmpJSObjectEqAndBranch: public LControlInstruction<2, 0> {
+class LCmpConstantEqAndBranch: public LControlInstruction<1, 0> {
public:
- LCmpJSObjectEqAndBranch(LOperand* left, LOperand* right) {
+ explicit LCmpConstantEqAndBranch(LOperand* left) {
inputs_[0] = left;
- inputs_[1] = right;
}
- DECLARE_CONCRETE_INSTRUCTION(CmpJSObjectEqAndBranch,
- "cmp-jsobject-eq-and-branch")
-};
-
-
-class LCmpSymbolEq: public LTemplateInstruction<1, 2, 0> {
- public:
- LCmpSymbolEq(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpSymbolEq, "cmp-symbol-eq")
-};
-
-
-class LCmpSymbolEqAndBranch: public LControlInstruction<2, 0> {
- public:
- LCmpSymbolEqAndBranch(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpSymbolEqAndBranch, "cmp-symbol-eq-and-branch")
-};
-
-
-class LIsNull: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LIsNull(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(IsNull, "is-null")
- DECLARE_HYDROGEN_ACCESSOR(IsNull)
-
- bool is_strict() const { return hydrogen()->is_strict(); }
+ DECLARE_CONCRETE_INSTRUCTION(CmpConstantEqAndBranch,
+ "cmp-constant-eq-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(CompareConstantEqAndBranch)
};
@@ -716,7 +624,7 @@
}
DECLARE_CONCRETE_INSTRUCTION(IsNullAndBranch, "is-null-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(IsNull)
+ DECLARE_HYDROGEN_ACCESSOR(IsNullAndBranch)
bool is_strict() const { return hydrogen()->is_strict(); }
@@ -724,42 +632,19 @@
};
-class LIsObject: public LTemplateInstruction<1, 1, 1> {
+class LIsObjectAndBranch: public LControlInstruction<1, 1> {
public:
- LIsObject(LOperand* value, LOperand* temp) {
+ LIsObjectAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
temps_[0] = temp;
}
- DECLARE_CONCRETE_INSTRUCTION(IsObject, "is-object")
-};
-
-
-class LIsObjectAndBranch: public LControlInstruction<1, 2> {
- public:
- LIsObjectAndBranch(LOperand* value, LOperand* temp, LOperand* temp2) {
- inputs_[0] = value;
- temps_[0] = temp;
- temps_[1] = temp2;
- }
-
DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
virtual void PrintDataTo(StringStream* stream);
};
-class LIsSmi: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LIsSmi(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(IsSmi, "is-smi")
- DECLARE_HYDROGEN_ACCESSOR(IsSmi)
-};
-
-
class LIsSmiAndBranch: public LControlInstruction<1, 0> {
public:
explicit LIsSmiAndBranch(LOperand* value) {
@@ -767,25 +652,15 @@
}
DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch)
virtual void PrintDataTo(StringStream* stream);
};
-class LIsUndetectable: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LIsUndetectable(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(IsUndetectable, "is-undetectable")
- DECLARE_HYDROGEN_ACCESSOR(IsUndetectable)
-};
-
-
class LIsUndetectableAndBranch: public LControlInstruction<1, 1> {
public:
- explicit LIsUndetectableAndBranch(LOperand* value, LOperand* temp) {
+ LIsUndetectableAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
temps_[0] = temp;
}
@@ -797,17 +672,6 @@
};
-class LHasInstanceType: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LHasInstanceType(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(HasInstanceType, "has-instance-type")
- DECLARE_HYDROGEN_ACCESSOR(HasInstanceType)
-};
-
-
class LHasInstanceTypeAndBranch: public LControlInstruction<1, 1> {
public:
LHasInstanceTypeAndBranch(LOperand* value, LOperand* temp) {
@@ -817,7 +681,7 @@
DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch,
"has-instance-type-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(HasInstanceType)
+ DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch)
virtual void PrintDataTo(StringStream* stream);
};
@@ -834,17 +698,6 @@
};
-class LHasCachedArrayIndex: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LHasCachedArrayIndex(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndex, "has-cached-array-index")
- DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndex)
-};
-
-
class LHasCachedArrayIndexAndBranch: public LControlInstruction<1, 0> {
public:
explicit LHasCachedArrayIndexAndBranch(LOperand* value) {
@@ -857,13 +710,6 @@
};
-class LIsConstructCall: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(IsConstructCall, "is-construct-call")
- DECLARE_HYDROGEN_ACCESSOR(IsConstructCall)
-};
-
-
class LIsConstructCallAndBranch: public LControlInstruction<0, 1> {
public:
explicit LIsConstructCallAndBranch(LOperand* temp) {
@@ -875,20 +721,6 @@
};
-class LClassOfTest: public LTemplateInstruction<1, 1, 1> {
- public:
- LClassOfTest(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(ClassOfTest, "class-of-test")
- DECLARE_HYDROGEN_ACCESSOR(ClassOfTest)
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
class LClassOfTestAndBranch: public LControlInstruction<1, 2> {
public:
LClassOfTestAndBranch(LOperand* value, LOperand* temp, LOperand* temp2) {
@@ -899,35 +731,22 @@
DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch,
"class-of-test-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(ClassOfTest)
+ DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch)
virtual void PrintDataTo(StringStream* stream);
};
-class LCmpT: public LTemplateInstruction<1, 2, 0> {
+class LCmpT: public LTemplateInstruction<1, 3, 0> {
public:
- LCmpT(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
+ LCmpT(LOperand* context, LOperand* left, LOperand* right) {
+ inputs_[0] = context;
+ inputs_[1] = left;
+ inputs_[2] = right;
}
DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
- DECLARE_HYDROGEN_ACCESSOR(Compare)
-
- Token::Value op() const { return hydrogen()->token(); }
-};
-
-
-class LCmpTAndBranch: public LControlInstruction<2, 0> {
- public:
- LCmpTAndBranch(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpTAndBranch, "cmp-t-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(Compare)
+ DECLARE_HYDROGEN_ACCESSOR(CompareGeneric)
Token::Value op() const { return hydrogen()->token(); }
};
@@ -947,24 +766,11 @@
};
-class LInstanceOfAndBranch: public LControlInstruction<3, 0> {
+class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 2, 1> {
public:
- LInstanceOfAndBranch(LOperand* context, LOperand* left, LOperand* right) {
+ LInstanceOfKnownGlobal(LOperand* context, LOperand* value, LOperand* temp) {
inputs_[0] = context;
- inputs_[1] = left;
- inputs_[2] = right;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(InstanceOfAndBranch, "instance-of-and-branch")
-
- LOperand* context() { return inputs_[0]; }
-};
-
-
-class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 1, 1> {
- public:
- LInstanceOfKnownGlobal(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
+ inputs_[1] = value;
temps_[0] = temp;
}
@@ -1077,7 +883,7 @@
}
DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
- DECLARE_HYDROGEN_ACCESSOR(Value)
+ DECLARE_HYDROGEN_ACCESSOR(Branch)
virtual void PrintDataTo(StringStream* stream);
};
@@ -1137,6 +943,17 @@
};
+class LElementsKind: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LElementsKind(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(ElementsKind, "elements-kind")
+ DECLARE_HYDROGEN_ACCESSOR(ElementsKind)
+};
+
+
class LValueOf: public LTemplateInstruction<1, 1, 1> {
public:
LValueOf(LOperand* value, LOperand* temp) {
@@ -1149,12 +966,16 @@
};
-class LThrow: public LTemplateInstruction<0, 1, 0> {
+class LThrow: public LTemplateInstruction<0, 2, 0> {
public:
- explicit LThrow(LOperand* value) {
- inputs_[0] = value;
+ LThrow(LOperand* context, LOperand* value) {
+ inputs_[0] = context;
+ inputs_[1] = value;
}
+ LOperand* context() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
+
DECLARE_CONCRETE_INSTRUCTION(Throw, "throw")
};
@@ -1212,12 +1033,16 @@
};
-class LArithmeticT: public LTemplateInstruction<1, 2, 0> {
+class LArithmeticT: public LTemplateInstruction<1, 3, 0> {
public:
- LArithmeticT(Token::Value op, LOperand* left, LOperand* right)
+ LArithmeticT(Token::Value op,
+ LOperand* context,
+ LOperand* left,
+ LOperand* right)
: op_(op) {
- inputs_[0] = left;
- inputs_[1] = right;
+ inputs_[0] = context;
+ inputs_[1] = left;
+ inputs_[2] = right;
}
virtual Opcode opcode() const { return LInstruction::kArithmeticT; }
@@ -1225,6 +1050,9 @@
virtual const char* Mnemonic() const;
Token::Value op() const { return op_; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* left() { return inputs_[1]; }
+ LOperand* right() { return inputs_[2]; }
private:
Token::Value op_;
@@ -1254,16 +1082,18 @@
};
-class LLoadNamedFieldPolymorphic: public LTemplateInstruction<1, 1, 0> {
+class LLoadNamedFieldPolymorphic: public LTemplateInstruction<1, 2, 0> {
public:
- explicit LLoadNamedFieldPolymorphic(LOperand* object) {
- inputs_[0] = object;
+ LLoadNamedFieldPolymorphic(LOperand* context, LOperand* object) {
+ inputs_[0] = context;
+ inputs_[1] = object;
}
DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field-polymorphic")
DECLARE_HYDROGEN_ACCESSOR(LoadNamedFieldPolymorphic)
- LOperand* object() { return inputs_[0]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
};
@@ -1333,6 +1163,23 @@
};
+class LLoadKeyedFastDoubleElement: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LLoadKeyedFastDoubleElement(LOperand* elements,
+ LOperand* key) {
+ inputs_[0] = elements;
+ inputs_[1] = key;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastDoubleElement,
+ "load-keyed-fast-double-element")
+ DECLARE_HYDROGEN_ACCESSOR(LoadKeyedFastDoubleElement)
+
+ LOperand* elements() { return inputs_[0]; }
+ LOperand* key() { return inputs_[1]; }
+};
+
+
class LLoadKeyedSpecializedArrayElement: public LTemplateInstruction<1, 2, 0> {
public:
LLoadKeyedSpecializedArrayElement(LOperand* external_pointer,
@@ -1347,8 +1194,8 @@
LOperand* external_pointer() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
- ExternalArrayType array_type() const {
- return hydrogen()->array_type();
+ JSObject::ElementsKind elements_kind() const {
+ return hydrogen()->elements_kind();
}
};
@@ -1406,9 +1253,9 @@
class LStoreGlobalGeneric: public LTemplateInstruction<0, 3, 0> {
public:
- explicit LStoreGlobalGeneric(LOperand* context,
- LOperand* global_object,
- LOperand* value) {
+ LStoreGlobalGeneric(LOperand* context,
+ LOperand* global_object,
+ LOperand* value) {
inputs_[0] = context;
inputs_[1] = global_object;
inputs_[2] = value;
@@ -1471,6 +1318,11 @@
};
+class LThisFunction: public LTemplateInstruction<1, 0, 0> {
+ DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
+};
+
+
class LContext: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(Context, "context")
@@ -1641,11 +1493,15 @@
};
-class LCallRuntime: public LTemplateInstruction<1, 0, 0> {
+class LCallRuntime: public LTemplateInstruction<1, 1, 0> {
public:
+ explicit LCallRuntime(LOperand* context) {
+ inputs_[0] = context;
+ }
DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
+ LOperand* context() { return inputs_[0]; }
const Runtime::Function* function() const { return hydrogen()->function(); }
int arity() const { return hydrogen()->argument_count(); }
};
@@ -1691,7 +1547,7 @@
}
DECLARE_CONCRETE_INSTRUCTION(DoubleToI, "double-to-i")
- DECLARE_HYDROGEN_ACCESSOR(Change)
+ DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
bool truncating() { return hydrogen()->CanTruncateToInt32(); }
};
@@ -1706,7 +1562,7 @@
}
DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i")
- DECLARE_HYDROGEN_ACCESSOR(Change)
+ DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
bool truncating() { return hydrogen()->CanTruncateToInt32(); }
};
@@ -1814,6 +1670,28 @@
};
+class LStoreKeyedFastDoubleElement: public LTemplateInstruction<0, 3, 0> {
+ public:
+ LStoreKeyedFastDoubleElement(LOperand* elements,
+ LOperand* key,
+ LOperand* val) {
+ inputs_[0] = elements;
+ inputs_[1] = key;
+ inputs_[2] = val;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastDoubleElement,
+ "store-keyed-fast-double-element")
+ DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastDoubleElement)
+
+ virtual void PrintDataTo(StringStream* stream);
+
+ LOperand* elements() { return inputs_[0]; }
+ LOperand* key() { return inputs_[1]; }
+ LOperand* value() { return inputs_[2]; }
+};
+
+
class LStoreKeyedSpecializedArrayElement: public LTemplateInstruction<0, 3, 0> {
public:
LStoreKeyedSpecializedArrayElement(LOperand* external_pointer,
@@ -1831,8 +1709,8 @@
LOperand* external_pointer() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
- ExternalArrayType array_type() const {
- return hydrogen()->array_type();
+ JSObject::ElementsKind elements_kind() const {
+ return hydrogen()->elements_kind();
}
};
@@ -1862,46 +1740,52 @@
};
-class LStringAdd: public LTemplateInstruction<1, 2, 0> {
+class LStringAdd: public LTemplateInstruction<1, 3, 0> {
public:
- LStringAdd(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
+ LStringAdd(LOperand* context, LOperand* left, LOperand* right) {
+ inputs_[0] = context;
+ inputs_[1] = left;
+ inputs_[2] = right;
}
DECLARE_CONCRETE_INSTRUCTION(StringAdd, "string-add")
DECLARE_HYDROGEN_ACCESSOR(StringAdd)
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* left() { return inputs_[1]; }
+ LOperand* right() { return inputs_[2]; }
};
-class LStringCharCodeAt: public LTemplateInstruction<1, 2, 0> {
+class LStringCharCodeAt: public LTemplateInstruction<1, 3, 0> {
public:
- LStringCharCodeAt(LOperand* string, LOperand* index) {
- inputs_[0] = string;
- inputs_[1] = index;
+ LStringCharCodeAt(LOperand* context, LOperand* string, LOperand* index) {
+ inputs_[0] = context;
+ inputs_[1] = string;
+ inputs_[2] = index;
}
DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string-char-code-at")
DECLARE_HYDROGEN_ACCESSOR(StringCharCodeAt)
- LOperand* string() { return inputs_[0]; }
- LOperand* index() { return inputs_[1]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* string() { return inputs_[1]; }
+ LOperand* index() { return inputs_[2]; }
};
-class LStringCharFromCode: public LTemplateInstruction<1, 1, 0> {
+class LStringCharFromCode: public LTemplateInstruction<1, 2, 0> {
public:
- explicit LStringCharFromCode(LOperand* char_code) {
- inputs_[0] = char_code;
+ LStringCharFromCode(LOperand* context, LOperand* char_code) {
+ inputs_[0] = context;
+ inputs_[1] = char_code;
}
DECLARE_CONCRETE_INSTRUCTION(StringCharFromCode, "string-char-from-code")
DECLARE_HYDROGEN_ACCESSOR(StringCharFromCode)
- LOperand* char_code() { return inputs_[0]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* char_code() { return inputs_[1]; }
};
@@ -2023,8 +1907,14 @@
};
-class LArrayLiteral: public LTemplateInstruction<1, 0, 0> {
+class LArrayLiteral: public LTemplateInstruction<1, 1, 0> {
public:
+ explicit LArrayLiteral(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(ArrayLiteral, "array-literal")
DECLARE_HYDROGEN_ACCESSOR(ArrayLiteral)
};
@@ -2036,22 +1926,34 @@
inputs_[0] = context;
}
+ LOperand* context() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(ObjectLiteral, "object-literal")
DECLARE_HYDROGEN_ACCESSOR(ObjectLiteral)
-
- LOperand* context() { return inputs_[0]; }
};
-class LRegExpLiteral: public LTemplateInstruction<1, 0, 0> {
+class LRegExpLiteral: public LTemplateInstruction<1, 1, 0> {
public:
+ explicit LRegExpLiteral(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral, "regexp-literal")
DECLARE_HYDROGEN_ACCESSOR(RegExpLiteral)
};
-class LFunctionLiteral: public LTemplateInstruction<1, 0, 0> {
+class LFunctionLiteral: public LTemplateInstruction<1, 1, 0> {
public:
+ explicit LFunctionLiteral(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(FunctionLiteral, "function-literal")
DECLARE_HYDROGEN_ACCESSOR(FunctionLiteral)
@@ -2070,31 +1972,17 @@
};
-class LTypeof: public LTemplateInstruction<1, 1, 0> {
+class LTypeof: public LTemplateInstruction<1, 2, 0> {
public:
- explicit LTypeof(LOperand* value) {
- inputs_[0] = value;
+ LTypeof(LOperand* context, LOperand* value) {
+ inputs_[0] = context;
+ inputs_[1] = value;
}
DECLARE_CONCRETE_INSTRUCTION(Typeof, "typeof")
};
-class LTypeofIs: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LTypeofIs(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(TypeofIs, "typeof-is")
- DECLARE_HYDROGEN_ACCESSOR(TypeofIs)
-
- Handle<String> type_literal() { return hydrogen()->type_literal(); }
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
class LTypeofIsAndBranch: public LControlInstruction<1, 0> {
public:
explicit LTypeofIsAndBranch(LOperand* value) {
@@ -2102,7 +1990,7 @@
}
DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch, "typeof-is-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(TypeofIs)
+ DECLARE_HYDROGEN_ACCESSOR(TypeofIsAndBranch)
Handle<String> type_literal() { return hydrogen()->type_literal(); }
@@ -2110,17 +1998,19 @@
};
-class LDeleteProperty: public LTemplateInstruction<1, 2, 0> {
+class LDeleteProperty: public LTemplateInstruction<1, 3, 0> {
public:
- LDeleteProperty(LOperand* obj, LOperand* key) {
- inputs_[0] = obj;
- inputs_[1] = key;
+ LDeleteProperty(LOperand* context, LOperand* obj, LOperand* key) {
+ inputs_[0] = context;
+ inputs_[1] = obj;
+ inputs_[2] = key;
}
DECLARE_CONCRETE_INSTRUCTION(DeleteProperty, "delete-property")
- LOperand* object() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
+ LOperand* key() { return inputs_[2]; }
};
@@ -2147,21 +2037,35 @@
};
-class LStackCheck: public LTemplateInstruction<0, 0, 0> {
+class LStackCheck: public LTemplateInstruction<0, 1, 0> {
public:
+ explicit LStackCheck(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(StackCheck, "stack-check")
+ DECLARE_HYDROGEN_ACCESSOR(StackCheck)
+
+ Label* done_label() { return &done_label_; }
+
+ private:
+ Label done_label_;
};
-class LIn: public LTemplateInstruction<1, 2, 0> {
+class LIn: public LTemplateInstruction<1, 3, 0> {
public:
- LIn(LOperand* key, LOperand* object) {
- inputs_[0] = key;
- inputs_[1] = object;
+ LIn(LOperand* context, LOperand* key, LOperand* object) {
+ inputs_[0] = context;
+ inputs_[1] = key;
+ inputs_[2] = object;
}
- LOperand* key() { return inputs_[0]; }
- LOperand* object() { return inputs_[1]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* key() { return inputs_[1]; }
+ LOperand* object() { return inputs_[2]; }
DECLARE_CONCRETE_INSTRUCTION(In, "in")
};
@@ -2170,7 +2074,7 @@
class LChunkBuilder;
class LChunk: public ZoneObject {
public:
- explicit LChunk(CompilationInfo* info, HGraph* graph)
+ LChunk(CompilationInfo* info, HGraph* graph)
: spill_slot_count_(0),
info_(info),
graph_(graph),
@@ -2361,7 +2265,8 @@
LInstruction* instr, int ast_id);
void ClearInstructionPendingDeoptimizationEnvironment();
- LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env);
+ LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env,
+ int* argument_index_accumulator);
void VisitInstruction(HInstruction* current);
diff --git a/src/ia32/macro-assembler-ia32.cc b/src/ia32/macro-assembler-ia32.cc
index 6e66b6e..3e037d7 100644
--- a/src/ia32/macro-assembler-ia32.cc
+++ b/src/ia32/macro-assembler-ia32.cc
@@ -149,8 +149,7 @@
// Skip barrier if writing a smi.
ASSERT_EQ(0, kSmiTag);
- test(value, Immediate(kSmiTagMask));
- j(zero, &done, Label::kNear);
+ JumpIfSmi(value, &done, Label::kNear);
InNewSpace(object, value, equal, &done, Label::kNear);
@@ -195,8 +194,7 @@
// Skip barrier if writing a smi.
ASSERT_EQ(0, kSmiTag);
- test(value, Immediate(kSmiTagMask));
- j(zero, &done);
+ JumpIfSmi(value, &done, Label::kNear);
InNewSpace(object, value, equal, &done);
@@ -279,6 +277,16 @@
}
+void MacroAssembler::CheckFastElements(Register map,
+ Label* fail,
+ Label::Distance distance) {
+ STATIC_ASSERT(JSObject::FAST_ELEMENTS == 0);
+ cmpb(FieldOperand(map, Map::kBitField2Offset),
+ Map::kMaximumBitField2FastElementValue);
+ j(above, fail, distance);
+}
+
+
void MacroAssembler::CheckMap(Register obj,
Handle<Map> map,
Label* fail,
@@ -330,8 +338,9 @@
Register scratch,
Label* fail) {
movzx_b(scratch, FieldOperand(map, Map::kInstanceTypeOffset));
- sub(Operand(scratch), Immediate(FIRST_JS_OBJECT_TYPE));
- cmp(scratch, LAST_JS_OBJECT_TYPE - FIRST_JS_OBJECT_TYPE);
+ sub(Operand(scratch), Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+ cmp(scratch,
+ LAST_NONCALLABLE_SPEC_OBJECT_TYPE - FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
j(above, fail);
}
@@ -353,8 +362,7 @@
void MacroAssembler::AbortIfNotNumber(Register object) {
Label ok;
- test(object, Immediate(kSmiTagMask));
- j(zero, &ok);
+ JumpIfSmi(object, &ok);
cmp(FieldOperand(object, HeapObject::kMapOffset),
isolate()->factory()->heap_number_map());
Assert(equal, "Operand not a number");
@@ -726,6 +734,104 @@
}
+void MacroAssembler::LoadFromNumberDictionary(Label* miss,
+ Register elements,
+ Register key,
+ Register r0,
+ Register r1,
+ Register r2,
+ Register result) {
+ // Register use:
+ //
+ // elements - holds the slow-case elements of the receiver and is unchanged.
+ //
+ // key - holds the smi key on entry and is unchanged.
+ //
+ // Scratch registers:
+ //
+ // r0 - holds the untagged key on entry and holds the hash once computed.
+ //
+ // r1 - used to hold the capacity mask of the dictionary
+ //
+ // r2 - used for the index into the dictionary.
+ //
+ // result - holds the result on exit if the load succeeds and we fall through.
+
+ Label done;
+
+ // Compute the hash code from the untagged key. This must be kept in sync
+ // with ComputeIntegerHash in utils.h.
+ //
+ // hash = ~hash + (hash << 15);
+ mov(r1, r0);
+ not_(r0);
+ shl(r1, 15);
+ add(r0, Operand(r1));
+ // hash = hash ^ (hash >> 12);
+ mov(r1, r0);
+ shr(r1, 12);
+ xor_(r0, Operand(r1));
+ // hash = hash + (hash << 2);
+ lea(r0, Operand(r0, r0, times_4, 0));
+ // hash = hash ^ (hash >> 4);
+ mov(r1, r0);
+ shr(r1, 4);
+ xor_(r0, Operand(r1));
+ // hash = hash * 2057;
+ imul(r0, r0, 2057);
+ // hash = hash ^ (hash >> 16);
+ mov(r1, r0);
+ shr(r1, 16);
+ xor_(r0, Operand(r1));
+
+ // Compute capacity mask.
+ mov(r1, FieldOperand(elements, NumberDictionary::kCapacityOffset));
+ shr(r1, kSmiTagSize); // convert smi to int
+ dec(r1);
+
+ // Generate an unrolled loop that performs a few probes before giving up.
+ const int kProbes = 4;
+ for (int i = 0; i < kProbes; i++) {
+ // Use r2 for index calculations and keep the hash intact in r0.
+ mov(r2, r0);
+ // Compute the masked index: (hash + i + i * i) & mask.
+ if (i > 0) {
+ add(Operand(r2), Immediate(NumberDictionary::GetProbeOffset(i)));
+ }
+ and_(r2, Operand(r1));
+
+ // Scale the index by multiplying by the entry size.
+ ASSERT(NumberDictionary::kEntrySize == 3);
+ lea(r2, Operand(r2, r2, times_2, 0)); // r2 = r2 * 3
+
+ // Check if the key matches.
+ cmp(key, FieldOperand(elements,
+ r2,
+ times_pointer_size,
+ NumberDictionary::kElementsStartOffset));
+ if (i != (kProbes - 1)) {
+ j(equal, &done);
+ } else {
+ j(not_equal, miss);
+ }
+ }
+
+ bind(&done);
+ // Check that the value is a normal propety.
+ const int kDetailsOffset =
+ NumberDictionary::kElementsStartOffset + 2 * kPointerSize;
+ ASSERT_EQ(NORMAL, 0);
+ test(FieldOperand(elements, r2, times_pointer_size, kDetailsOffset),
+ Immediate(PropertyDetails::TypeField::mask() << kSmiTagSize));
+ j(not_zero, miss);
+
+ // Get the value at the masked, scaled index.
+ const int kValueOffset =
+ NumberDictionary::kElementsStartOffset + kPointerSize;
+ mov(result, FieldOperand(elements, r2, times_pointer_size, kValueOffset));
+}
+
+
void MacroAssembler::LoadAllocationTopHelper(Register result,
Register scratch,
AllocationFlags flags) {
@@ -1165,8 +1271,7 @@
Register scratch,
Label* miss) {
// Check that the receiver isn't a smi.
- test(function, Immediate(kSmiTagMask));
- j(zero, miss);
+ JumpIfSmi(function, miss);
// Check that the function really is a function.
CmpObjectType(function, JS_FUNCTION_TYPE, result);
@@ -1405,32 +1510,30 @@
}
-void MacroAssembler::PrepareCallApiFunction(int argc, Register scratch) {
+void MacroAssembler::PrepareCallApiFunction(int argc) {
if (kReturnHandlesDirectly) {
EnterApiExitFrame(argc);
// When handles are returned directly we don't have to allocate extra
// space for and pass an out parameter.
+ if (emit_debug_code()) {
+ mov(esi, Immediate(BitCast<int32_t>(kZapValue)));
+ }
} else {
// We allocate two additional slots: return value and pointer to it.
EnterApiExitFrame(argc + 2);
// The argument slots are filled as follows:
//
- // n + 1: output cell
+ // n + 1: output slot
// n: arg n
// ...
// 1: arg1
- // 0: pointer to the output cell
- //
- // Note that this is one more "argument" than the function expects
- // so the out cell will have to be popped explicitly after returning
- // from the function. The out cell contains Handle.
+ // 0: pointer to the output slot
- // pointer to out cell.
- lea(scratch, Operand(esp, (argc + 1) * kPointerSize));
- mov(Operand(esp, 0 * kPointerSize), scratch); // output.
+ lea(esi, Operand(esp, (argc + 1) * kPointerSize));
+ mov(Operand(esp, 0 * kPointerSize), esi);
if (emit_debug_code()) {
- mov(Operand(esp, (argc + 1) * kPointerSize), Immediate(0)); // out cell.
+ mov(Operand(esi, 0), Immediate(0));
}
}
}
@@ -1454,9 +1557,9 @@
call(function->address(), RelocInfo::RUNTIME_ENTRY);
if (!kReturnHandlesDirectly) {
- // The returned value is a pointer to the handle holding the result.
- // Dereference this to get to the location.
- mov(eax, Operand(eax, 0));
+ // PrepareCallApiFunction saved pointer to the output slot into
+ // callee-save register esi.
+ mov(eax, Operand(esi, 0));
}
Label empty_handle;
@@ -1748,12 +1851,9 @@
void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
if (context_chain_length > 0) {
// Move up the chain of contexts to the context containing the slot.
- mov(dst, Operand(esi, Context::SlotOffset(Context::CLOSURE_INDEX)));
- // Load the function context (which is the incoming, outer context).
- mov(dst, FieldOperand(dst, JSFunction::kContextOffset));
+ mov(dst, Operand(esi, Context::SlotOffset(Context::PREVIOUS_INDEX)));
for (int i = 1; i < context_chain_length; i++) {
- mov(dst, Operand(dst, Context::SlotOffset(Context::CLOSURE_INDEX)));
- mov(dst, FieldOperand(dst, JSFunction::kContextOffset));
+ mov(dst, Operand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
}
} else {
// Slot is in the current function context. Move it into the
@@ -1762,14 +1862,14 @@
mov(dst, esi);
}
- // We should not have found a 'with' context by walking the context chain
+ // We should not have found a with context by walking the context chain
// (i.e., the static scope chain and runtime context chain do not agree).
// A variable occurring in such a scope should have slot type LOOKUP and
// not CONTEXT.
if (emit_debug_code()) {
- cmp(dst, Operand(dst, Context::SlotOffset(Context::FCONTEXT_INDEX)));
- Check(equal, "Yo dawg, I heard you liked function contexts "
- "so I put function contexts in all your contexts");
+ cmp(FieldOperand(dst, HeapObject::kMapOffset),
+ isolate()->factory()->with_context_map());
+ Check(not_equal, "Variable resolved to with context.");
}
}
@@ -1944,6 +2044,9 @@
Immediate(factory->fixed_array_map()));
j(equal, &ok);
cmp(FieldOperand(elements, HeapObject::kMapOffset),
+ Immediate(factory->fixed_double_array_map()));
+ j(equal, &ok);
+ cmp(FieldOperand(elements, HeapObject::kMapOffset),
Immediate(factory->fixed_cow_array_map()));
j(equal, &ok);
Abort("JSObject with fast elements map has slow elements");
@@ -2048,8 +2151,7 @@
ASSERT_EQ(0, kSmiTag);
mov(scratch1, Operand(object1));
and_(scratch1, Operand(object2));
- test(scratch1, Immediate(kSmiTagMask));
- j(zero, failure);
+ JumpIfSmi(scratch1, failure);
// Load instance type for both strings.
mov(scratch1, FieldOperand(object1, HeapObject::kMapOffset));
diff --git a/src/ia32/macro-assembler-ia32.h b/src/ia32/macro-assembler-ia32.h
index 2ab98c5..dac2273 100644
--- a/src/ia32/macro-assembler-ia32.h
+++ b/src/ia32/macro-assembler-ia32.h
@@ -216,6 +216,12 @@
// Compare instance type for map.
void CmpInstanceType(Register map, InstanceType type);
+ // Check if a map for a JSObject indicates that the object has fast elements.
+ // Jump to the specified label if it does not.
+ void CheckFastElements(Register map,
+ Label* fail,
+ Label::Distance distance = Label::kFar);
+
// Check if the map of an object is equal to a specified map and branch to
// label if not. Skip the smi check if not required (object is known to be a
// heap object)
@@ -283,14 +289,25 @@
}
// Jump the register contains a smi.
- inline void JumpIfSmi(Register value, Label* smi_label) {
+ inline void JumpIfSmi(Register value,
+ Label* smi_label,
+ Label::Distance distance = Label::kFar) {
test(value, Immediate(kSmiTagMask));
- j(zero, smi_label);
+ j(zero, smi_label, distance);
+ }
+ // Jump if the operand is a smi.
+ inline void JumpIfSmi(Operand value,
+ Label* smi_label,
+ Label::Distance distance = Label::kFar) {
+ test(value, Immediate(kSmiTagMask));
+ j(zero, smi_label, distance);
}
// Jump if register contain a non-smi.
- inline void JumpIfNotSmi(Register value, Label* not_smi_label) {
+ inline void JumpIfNotSmi(Register value,
+ Label* not_smi_label,
+ Label::Distance distance = Label::kFar) {
test(value, Immediate(kSmiTagMask));
- j(not_zero, not_smi_label);
+ j(not_zero, not_smi_label, distance);
}
void LoadInstanceDescriptors(Register map, Register descriptors);
@@ -335,6 +352,15 @@
Label* miss);
+ void LoadFromNumberDictionary(Label* miss,
+ Register elements,
+ Register key,
+ Register r0,
+ Register r1,
+ Register r2,
+ Register result);
+
+
// ---------------------------------------------------------------------------
// Allocation support
@@ -541,10 +567,10 @@
// Prepares stack to put arguments (aligns and so on). Reserves
// space for return value if needed (assumes the return value is a handle).
- // Uses callee-saved esi to restore stack state after call. Arguments must be
- // stored in ApiParameterOperand(0), ApiParameterOperand(1) etc. Saves
- // context (esi).
- void PrepareCallApiFunction(int argc, Register scratch);
+ // Arguments must be stored in ApiParameterOperand(0), ApiParameterOperand(1)
+ // etc. Saves context (esi). If space was reserved for return value then
+ // stores the pointer to the reserved slot into esi.
+ void PrepareCallApiFunction(int argc);
// Calls an API function. Allocates HandleScope, extracts
// returned value from handle and propagates exceptions.
@@ -585,6 +611,9 @@
void Move(Register target, Handle<Object> value);
+ // Push a handle value.
+ void Push(Handle<Object> handle) { push(handle); }
+
Handle<Object> CodeObject() {
ASSERT(!code_object_.is_null());
return code_object_;
diff --git a/src/ia32/regexp-macro-assembler-ia32.h b/src/ia32/regexp-macro-assembler-ia32.h
index 21c86d0..d504470 100644
--- a/src/ia32/regexp-macro-assembler-ia32.h
+++ b/src/ia32/regexp-macro-assembler-ia32.h
@@ -28,6 +28,9 @@
#ifndef V8_IA32_REGEXP_MACRO_ASSEMBLER_IA32_H_
#define V8_IA32_REGEXP_MACRO_ASSEMBLER_IA32_H_
+#include "ia32/assembler-ia32.h"
+#include "ia32/assembler-ia32-inl.h"
+
namespace v8 {
namespace internal {
diff --git a/src/ia32/stub-cache-ia32.cc b/src/ia32/stub-cache-ia32.cc
index 550a6ff..9a690d7 100644
--- a/src/ia32/stub-cache-ia32.cc
+++ b/src/ia32/stub-cache-ia32.cc
@@ -129,7 +129,7 @@
__ j(not_zero, miss_label);
// Check that receiver is a JSObject.
- __ CmpInstanceType(r0, FIRST_JS_OBJECT_TYPE);
+ __ CmpInstanceType(r0, FIRST_SPEC_OBJECT_TYPE);
__ j(below, miss_label);
// Load properties array.
@@ -188,8 +188,7 @@
ASSERT(extra2.is(no_reg));
// Check that the receiver isn't a smi.
- __ test(receiver, Immediate(kSmiTagMask));
- __ j(zero, &miss);
+ __ JumpIfSmi(receiver, &miss);
// Get the map of the receiver and compute the hash.
__ mov(scratch, FieldOperand(name, String::kHashFieldOffset));
@@ -249,8 +248,7 @@
Register scratch,
Label* miss_label) {
// Check that the receiver isn't a smi.
- __ test(receiver, Immediate(kSmiTagMask));
- __ j(zero, miss_label);
+ __ JumpIfSmi(receiver, miss_label);
// Check that the object is a JS array.
__ CmpObjectType(receiver, JS_ARRAY_TYPE, scratch);
@@ -270,8 +268,7 @@
Label* smi,
Label* non_string_object) {
// Check that the object isn't a smi.
- __ test(receiver, Immediate(kSmiTagMask));
- __ j(zero, smi);
+ __ JumpIfSmi(receiver, smi);
// Check that the object is a string.
__ mov(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
@@ -462,7 +459,7 @@
// it's not controlled by GC.
const int kApiStackSpace = 4;
- __ PrepareCallApiFunction(kApiArgc + kApiStackSpace, ebx);
+ __ PrepareCallApiFunction(kApiArgc + kApiStackSpace);
__ mov(ApiParameterOperand(1), eax); // v8::Arguments::implicit_args_.
__ add(Operand(eax), Immediate(argc * kPointerSize));
@@ -509,8 +506,7 @@
ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
// Check that the receiver isn't a smi.
- __ test(receiver, Immediate(kSmiTagMask));
- __ j(zero, miss);
+ __ JumpIfSmi(receiver, miss);
CallOptimization optimization(lookup);
@@ -738,8 +734,7 @@
Register scratch,
Label* miss_label) {
// Check that the object isn't a smi.
- __ test(receiver_reg, Immediate(kSmiTagMask));
- __ j(zero, miss_label);
+ __ JumpIfSmi(receiver_reg, miss_label);
// Check that the map of the object hasn't changed.
__ cmp(FieldOperand(receiver_reg, HeapObject::kMapOffset),
@@ -1020,8 +1015,7 @@
String* name,
Label* miss) {
// Check that the receiver isn't a smi.
- __ test(receiver, Immediate(kSmiTagMask));
- __ j(zero, miss);
+ __ JumpIfSmi(receiver, miss);
// Check the prototype chain.
Register reg =
@@ -1045,8 +1039,7 @@
String* name,
Label* miss) {
// Check that the receiver isn't a smi.
- __ test(receiver, Immediate(kSmiTagMask));
- __ j(zero, miss);
+ __ JumpIfSmi(receiver, miss);
// Check that the maps haven't changed.
Register reg =
@@ -1089,7 +1082,7 @@
const int kStackSpace = 5;
const int kApiArgc = 2;
- __ PrepareCallApiFunction(kApiArgc, eax);
+ __ PrepareCallApiFunction(kApiArgc);
__ mov(ApiParameterOperand(0), ebx); // name.
__ add(Operand(ebx), Immediate(kPointerSize));
__ mov(ApiParameterOperand(1), ebx); // arguments pointer.
@@ -1112,8 +1105,7 @@
String* name,
Label* miss) {
// Check that the receiver isn't a smi.
- __ test(receiver, Immediate(kSmiTagMask));
- __ j(zero, miss);
+ __ JumpIfSmi(receiver, miss);
// Check that the maps haven't changed.
CheckPrototypes(object, receiver, holder,
@@ -1139,8 +1131,7 @@
ASSERT(!interceptor_holder->GetNamedInterceptor()->getter()->IsUndefined());
// Check that the receiver isn't a smi.
- __ test(receiver, Immediate(kSmiTagMask));
- __ j(zero, miss);
+ __ JumpIfSmi(receiver, miss);
// So far the most popular follow ups for interceptor loads are FIELD
// and CALLBACKS, so inline only them, other cases may be added
@@ -1290,8 +1281,7 @@
// object which can only happen for contextual calls. In this case,
// the receiver cannot be a smi.
if (object != holder) {
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, miss);
+ __ JumpIfSmi(edx, miss);
}
// Check that the maps haven't changed.
@@ -1317,8 +1307,7 @@
// the nice side effect that multiple closures based on the same
// function can all use this call IC. Before we load through the
// function, we have to verify that it still is a function.
- __ test(edi, Immediate(kSmiTagMask));
- __ j(zero, miss);
+ __ JumpIfSmi(edi, miss);
__ CmpObjectType(edi, JS_FUNCTION_TYPE, ebx);
__ j(not_equal, miss);
@@ -1366,8 +1355,7 @@
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
// Check that the receiver isn't a smi.
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &miss);
+ __ JumpIfSmi(edx, &miss);
// Do the right check and compute the holder register.
Register reg = CheckPrototypes(object, edx, holder, ebx, eax, edi,
@@ -1376,8 +1364,7 @@
GenerateFastPropertyLoad(masm(), edi, reg, holder, index);
// Check that the function really is a function.
- __ test(edi, Immediate(kSmiTagMask));
- __ j(zero, &miss);
+ __ JumpIfSmi(edi, &miss);
__ CmpObjectType(edi, JS_FUNCTION_TYPE, ebx);
__ j(not_equal, &miss);
@@ -1432,8 +1419,7 @@
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
// Check that the receiver isn't a smi.
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &miss);
+ __ JumpIfSmi(edx, &miss);
CheckPrototypes(JSObject::cast(object), edx,
holder, ebx,
@@ -1481,8 +1467,7 @@
__ mov(Operand(edx, 0), ecx);
// Check if value is a smi.
- __ test(ecx, Immediate(kSmiTagMask));
- __ j(not_zero, &with_write_barrier);
+ __ JumpIfNotSmi(ecx, &with_write_barrier);
__ bind(&exit);
__ ret((argc + 1) * kPointerSize);
@@ -1585,8 +1570,7 @@
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
// Check that the receiver isn't a smi.
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &miss);
+ __ JumpIfSmi(edx, &miss);
CheckPrototypes(JSObject::cast(object), edx,
holder, ebx,
eax, edi, name, &miss);
@@ -1845,8 +1829,7 @@
__ mov(edx, Operand(esp, 2 * kPointerSize));
STATIC_ASSERT(kSmiTag == 0);
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &miss);
+ __ JumpIfSmi(edx, &miss);
CheckPrototypes(JSObject::cast(object), edx, holder, ebx, eax, edi, name,
&miss);
@@ -1863,8 +1846,7 @@
// Check the code is a smi.
Label slow;
STATIC_ASSERT(kSmiTag == 0);
- __ test(code, Immediate(kSmiTagMask));
- __ j(not_zero, &slow);
+ __ JumpIfNotSmi(code, &slow);
// Convert the smi code to uint16.
__ and_(code, Immediate(Smi::FromInt(0xffff)));
@@ -1929,8 +1911,7 @@
__ mov(edx, Operand(esp, 2 * kPointerSize));
STATIC_ASSERT(kSmiTag == 0);
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &miss);
+ __ JumpIfSmi(edx, &miss);
CheckPrototypes(JSObject::cast(object), edx, holder, ebx, eax, edi, name,
&miss);
@@ -1946,8 +1927,7 @@
// Check if the argument is a smi.
Label smi;
STATIC_ASSERT(kSmiTag == 0);
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &smi);
+ __ JumpIfSmi(eax, &smi);
// Check if the argument is a heap number and load its value into xmm0.
Label slow;
@@ -2054,8 +2034,7 @@
__ mov(edx, Operand(esp, 2 * kPointerSize));
STATIC_ASSERT(kSmiTag == 0);
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &miss);
+ __ JumpIfSmi(edx, &miss);
CheckPrototypes(JSObject::cast(object), edx, holder, ebx, eax, edi, name,
&miss);
@@ -2071,8 +2050,7 @@
// Check if the argument is a smi.
Label not_smi;
STATIC_ASSERT(kSmiTag == 0);
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, ¬_smi);
+ __ JumpIfNotSmi(eax, ¬_smi);
// Set ebx to 1...1 (== -1) if the argument is negative, or to 0...0
// otherwise.
@@ -2158,8 +2136,7 @@
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
// Check that the receiver isn't a smi.
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &miss_before_stack_reserved);
+ __ JumpIfSmi(edx, &miss_before_stack_reserved);
Counters* counters = isolate()->counters();
__ IncrementCounter(counters->call_const(), 1);
@@ -2227,8 +2204,7 @@
// Check that the receiver isn't a smi.
if (check != NUMBER_CHECK) {
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &miss);
+ __ JumpIfSmi(edx, &miss);
}
// Make sure that it's okay not to patch the on stack receiver
@@ -2277,8 +2253,7 @@
} else {
Label fast;
// Check that the object is a smi or a heap number.
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &fast);
+ __ JumpIfSmi(edx, &fast);
__ CmpObjectType(edx, HEAP_NUMBER_TYPE, eax);
__ j(not_equal, &miss);
__ bind(&fast);
@@ -2373,8 +2348,7 @@
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
// Check that the function really is a function.
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &miss);
+ __ JumpIfSmi(eax, &miss);
__ CmpObjectType(eax, JS_FUNCTION_TYPE, ebx);
__ j(not_equal, &miss);
@@ -2522,8 +2496,7 @@
Label miss;
// Check that the object isn't a smi.
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &miss);
+ __ JumpIfSmi(edx, &miss);
// Check that the map of the object hasn't changed.
__ cmp(FieldOperand(edx, HeapObject::kMapOffset),
@@ -2572,8 +2545,7 @@
Label miss;
// Check that the object isn't a smi.
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &miss);
+ __ JumpIfSmi(edx, &miss);
// Check that the map of the object hasn't changed.
__ cmp(FieldOperand(edx, HeapObject::kMapOffset),
@@ -2699,18 +2671,18 @@
}
-MaybeObject* KeyedStoreStubCompiler::CompileStoreFastElement(
- Map* receiver_map) {
+MaybeObject* KeyedStoreStubCompiler::CompileStoreElement(Map* receiver_map) {
// ----------- S t a t e -------------
// -- eax : value
// -- ecx : key
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
- bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
- MaybeObject* maybe_stub =
- KeyedStoreFastElementStub(is_js_array).TryGetCode();
Code* stub;
+ JSObject::ElementsKind elements_kind = receiver_map->elements_kind();
+ bool is_jsarray = receiver_map->instance_type() == JS_ARRAY_TYPE;
+ MaybeObject* maybe_stub =
+ KeyedStoreElementStub(is_jsarray, elements_kind).TryGetCode();
if (!maybe_stub->To(&stub)) return maybe_stub;
__ DispatchMap(edx,
Handle<Map>(receiver_map),
@@ -2765,8 +2737,7 @@
Label miss;
// Check that the receiver isn't a smi.
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &miss);
+ __ JumpIfSmi(eax, &miss);
ASSERT(last->IsGlobalObject() || last->HasFastProperties());
@@ -2918,8 +2889,7 @@
// object which can only happen for contextual loads. In this case,
// the receiver cannot be a smi.
if (object != holder) {
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &miss);
+ __ JumpIfSmi(eax, &miss);
}
// Check that the maps haven't changed.
@@ -3163,14 +3133,15 @@
}
-MaybeObject* KeyedLoadStubCompiler::CompileLoadFastElement(Map* receiver_map) {
+MaybeObject* KeyedLoadStubCompiler::CompileLoadElement(Map* receiver_map) {
// ----------- S t a t e -------------
// -- eax : key
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
- MaybeObject* maybe_stub = KeyedLoadFastElementStub().TryGetCode();
Code* stub;
+ JSObject::ElementsKind elements_kind = receiver_map->elements_kind();
+ MaybeObject* maybe_stub = KeyedLoadElementStub(elements_kind).TryGetCode();
if (!maybe_stub->To(&stub)) return maybe_stub;
__ DispatchMap(edx,
Handle<Map>(receiver_map),
@@ -3235,8 +3206,7 @@
// Load the initial map and verify that it is in fact a map.
__ mov(ebx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi.
- __ test(ebx, Immediate(kSmiTagMask));
- __ j(zero, &generic_stub_call);
+ __ JumpIfSmi(ebx, &generic_stub_call);
__ CmpObjectType(ebx, MAP_TYPE, ecx);
__ j(not_equal, &generic_stub_call);
@@ -3351,61 +3321,71 @@
}
-MaybeObject* ExternalArrayLoadStubCompiler::CompileLoad(
- JSObject*receiver, ExternalArrayType array_type) {
+#undef __
+#define __ ACCESS_MASM(masm)
+
+
+void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
+ MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : key
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
- MaybeObject* maybe_stub =
- KeyedLoadExternalArrayStub(array_type).TryGetCode();
- Code* stub;
- if (!maybe_stub->To(&stub)) return maybe_stub;
- __ DispatchMap(edx,
- Handle<Map>(receiver->map()),
- Handle<Code>(stub),
- DO_SMI_CHECK);
+ Label slow, miss_force_generic;
- Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Miss();
- __ jmp(ic, RelocInfo::CODE_TARGET);
+ // This stub is meant to be tail-jumped to, the receiver must already
+ // have been verified by the caller to not be a smi.
+ __ JumpIfNotSmi(eax, &miss_force_generic);
+ __ mov(ebx, eax);
+ __ SmiUntag(ebx);
+ __ mov(ecx, FieldOperand(edx, JSObject::kElementsOffset));
- // Return the generated code.
- return GetCode();
-}
+ // Push receiver on the stack to free up a register for the dictionary
+ // probing.
+ __ push(edx);
+ __ LoadFromNumberDictionary(&slow,
+ ecx,
+ eax,
+ ebx,
+ edx,
+ edi,
+ eax);
+ // Pop receiver before returning.
+ __ pop(edx);
+ __ ret(0);
+ __ bind(&slow);
+ __ pop(edx);
-MaybeObject* ExternalArrayStoreStubCompiler::CompileStore(
- JSObject* receiver, ExternalArrayType array_type) {
// ----------- S t a t e -------------
// -- eax : value
// -- ecx : key
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
- MaybeObject* maybe_stub =
- KeyedStoreExternalArrayStub(array_type).TryGetCode();
- Code* stub;
- if (!maybe_stub->To(&stub)) return maybe_stub;
- __ DispatchMap(edx,
- Handle<Map>(receiver->map()),
- Handle<Code>(stub),
- DO_SMI_CHECK);
- Handle<Code> ic = isolate()->builtins()->KeyedStoreIC_Miss();
- __ jmp(ic, RelocInfo::CODE_TARGET);
+ Handle<Code> slow_ic =
+ masm->isolate()->builtins()->KeyedLoadIC_Slow();
+ __ jmp(slow_ic, RelocInfo::CODE_TARGET);
- return GetCode();
+ __ bind(&miss_force_generic);
+ // ----------- S t a t e -------------
+ // -- eax : value
+ // -- ecx : key
+ // -- edx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+
+ Handle<Code> miss_force_generic_ic =
+ masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
+ __ jmp(miss_force_generic_ic, RelocInfo::CODE_TARGET);
}
-#undef __
-#define __ ACCESS_MASM(masm)
-
-
void KeyedLoadStubCompiler::GenerateLoadExternalArray(
MacroAssembler* masm,
- ExternalArrayType array_type) {
+ JSObject::ElementsKind elements_kind) {
// ----------- S t a t e -------------
// -- eax : key
// -- edx : receiver
@@ -3417,8 +3397,7 @@
// have been verified by the caller to not be a smi.
// Check that the key is a smi.
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, &miss_force_generic);
+ __ JumpIfNotSmi(eax, &miss_force_generic);
// Check that the index is in range.
__ mov(ecx, eax);
@@ -3429,28 +3408,28 @@
__ j(above_equal, &miss_force_generic);
__ mov(ebx, FieldOperand(ebx, ExternalArray::kExternalPointerOffset));
// ebx: base pointer of external storage
- switch (array_type) {
- case kExternalByteArray:
+ switch (elements_kind) {
+ case JSObject::EXTERNAL_BYTE_ELEMENTS:
__ movsx_b(eax, Operand(ebx, ecx, times_1, 0));
break;
- case kExternalUnsignedByteArray:
- case kExternalPixelArray:
+ case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+ case JSObject::EXTERNAL_PIXEL_ELEMENTS:
__ movzx_b(eax, Operand(ebx, ecx, times_1, 0));
break;
- case kExternalShortArray:
+ case JSObject::EXTERNAL_SHORT_ELEMENTS:
__ movsx_w(eax, Operand(ebx, ecx, times_2, 0));
break;
- case kExternalUnsignedShortArray:
+ case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
__ movzx_w(eax, Operand(ebx, ecx, times_2, 0));
break;
- case kExternalIntArray:
- case kExternalUnsignedIntArray:
+ case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS:
+ case JSObject::EXTERNAL_INT_ELEMENTS:
__ mov(ecx, Operand(ebx, ecx, times_4, 0));
break;
- case kExternalFloatArray:
+ case JSObject::EXTERNAL_FLOAT_ELEMENTS:
__ fld_s(Operand(ebx, ecx, times_4, 0));
break;
- case kExternalDoubleArray:
+ case JSObject::EXTERNAL_DOUBLE_ELEMENTS:
__ fld_d(Operand(ebx, ecx, times_8, 0));
break;
default:
@@ -3463,17 +3442,17 @@
// For floating-point array type:
// FP(0): value
- if (array_type == kExternalIntArray ||
- array_type == kExternalUnsignedIntArray) {
+ if (elements_kind == JSObject::EXTERNAL_INT_ELEMENTS ||
+ elements_kind == JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS) {
// For the Int and UnsignedInt array types, we need to see whether
// the value can be represented in a Smi. If not, we need to convert
// it to a HeapNumber.
Label box_int;
- if (array_type == kExternalIntArray) {
+ if (elements_kind == JSObject::EXTERNAL_INT_ELEMENTS) {
__ cmp(ecx, 0xC0000000);
__ j(sign, &box_int);
} else {
- ASSERT_EQ(array_type, kExternalUnsignedIntArray);
+ ASSERT_EQ(JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS, elements_kind);
// The test is different for unsigned int values. Since we need
// the value to be in the range of a positive smi, we can't
// handle either of the top two bits being set in the value.
@@ -3489,12 +3468,12 @@
// Allocate a HeapNumber for the int and perform int-to-double
// conversion.
- if (array_type == kExternalIntArray) {
+ if (elements_kind == JSObject::EXTERNAL_INT_ELEMENTS) {
__ push(ecx);
__ fild_s(Operand(esp, 0));
__ pop(ecx);
} else {
- ASSERT(array_type == kExternalUnsignedIntArray);
+ ASSERT_EQ(JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS, elements_kind);
// Need to zero-extend the value.
// There's no fild variant for unsigned values, so zero-extend
// to a 64-bit int manually.
@@ -3510,8 +3489,8 @@
__ mov(eax, ecx);
__ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
__ ret(0);
- } else if (array_type == kExternalFloatArray ||
- array_type == kExternalDoubleArray) {
+ } else if (elements_kind == JSObject::EXTERNAL_FLOAT_ELEMENTS ||
+ elements_kind == JSObject::EXTERNAL_DOUBLE_ELEMENTS) {
// For the floating-point array type, we need to always allocate a
// HeapNumber.
__ AllocateHeapNumber(ecx, ebx, edi, &failed_allocation);
@@ -3561,7 +3540,7 @@
void KeyedStoreStubCompiler::GenerateStoreExternalArray(
MacroAssembler* masm,
- ExternalArrayType array_type) {
+ JSObject::ElementsKind elements_kind) {
// ----------- S t a t e -------------
// -- eax : key
// -- edx : receiver
@@ -3573,8 +3552,7 @@
// have been verified by the caller to not be a smi.
// Check that the key is a smi.
- __ test(ecx, Immediate(kSmiTagMask));
- __ j(not_zero, &miss_force_generic);
+ __ JumpIfNotSmi(ecx, &miss_force_generic);
// Check that the index is in range.
__ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
@@ -3591,19 +3569,19 @@
// ecx: key
// edi: elements array
// ebx: untagged index
- __ test(eax, Immediate(kSmiTagMask));
- if (array_type == kExternalPixelArray)
- __ j(not_equal, &slow);
- else
- __ j(not_equal, &check_heap_number);
+ if (elements_kind == JSObject::EXTERNAL_PIXEL_ELEMENTS) {
+ __ JumpIfNotSmi(eax, &slow);
+ } else {
+ __ JumpIfNotSmi(eax, &check_heap_number);
+ }
// smi case
__ mov(ecx, eax); // Preserve the value in eax. Key is no longer needed.
__ SmiUntag(ecx);
__ mov(edi, FieldOperand(edi, ExternalArray::kExternalPointerOffset));
// ecx: base pointer of external storage
- switch (array_type) {
- case kExternalPixelArray:
+ switch (elements_kind) {
+ case JSObject::EXTERNAL_PIXEL_ELEMENTS:
{ // Clamp the value to [0..255].
Label done;
__ test(ecx, Immediate(0xFFFFFF00));
@@ -3614,27 +3592,27 @@
}
__ mov_b(Operand(edi, ebx, times_1, 0), ecx);
break;
- case kExternalByteArray:
- case kExternalUnsignedByteArray:
+ case JSObject::EXTERNAL_BYTE_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
__ mov_b(Operand(edi, ebx, times_1, 0), ecx);
break;
- case kExternalShortArray:
- case kExternalUnsignedShortArray:
+ case JSObject::EXTERNAL_SHORT_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
__ mov_w(Operand(edi, ebx, times_2, 0), ecx);
break;
- case kExternalIntArray:
- case kExternalUnsignedIntArray:
+ case JSObject::EXTERNAL_INT_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS:
__ mov(Operand(edi, ebx, times_4, 0), ecx);
break;
- case kExternalFloatArray:
- case kExternalDoubleArray:
+ case JSObject::EXTERNAL_FLOAT_ELEMENTS:
+ case JSObject::EXTERNAL_DOUBLE_ELEMENTS:
// Need to perform int-to-float conversion.
__ push(ecx);
__ fild_s(Operand(esp, 0));
__ pop(ecx);
- if (array_type == kExternalFloatArray) {
+ if (elements_kind == JSObject::EXTERNAL_FLOAT_ELEMENTS) {
__ fstp_s(Operand(edi, ebx, times_4, 0));
- } else { // array_type == kExternalDoubleArray.
+ } else { // elements_kind == JSObject::EXTERNAL_DOUBLE_ELEMENTS.
__ fstp_d(Operand(edi, ebx, times_8, 0));
}
break;
@@ -3645,7 +3623,7 @@
__ ret(0); // Return the original value.
// TODO(danno): handle heap number -> pixel array conversion
- if (array_type != kExternalPixelArray) {
+ if (elements_kind != JSObject::EXTERNAL_PIXEL_ELEMENTS) {
__ bind(&check_heap_number);
// eax: value
// edx: receiver
@@ -3662,11 +3640,11 @@
__ mov(edi, FieldOperand(edi, ExternalArray::kExternalPointerOffset));
// ebx: untagged index
// edi: base pointer of external storage
- if (array_type == kExternalFloatArray) {
+ if (elements_kind == JSObject::EXTERNAL_FLOAT_ELEMENTS) {
__ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
__ fstp_s(Operand(edi, ebx, times_4, 0));
__ ret(0);
- } else if (array_type == kExternalDoubleArray) {
+ } else if (elements_kind == JSObject::EXTERNAL_DOUBLE_ELEMENTS) {
__ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
__ fstp_d(Operand(edi, ebx, times_8, 0));
__ ret(0);
@@ -3679,14 +3657,14 @@
// (code-stubs-ia32.cc) is roughly what is needed here though the
// conversion failure case does not need to be handled.
if (CpuFeatures::IsSupported(SSE2)) {
- if (array_type != kExternalIntArray &&
- array_type != kExternalUnsignedIntArray) {
+ if (elements_kind != JSObject::EXTERNAL_INT_ELEMENTS &&
+ elements_kind != JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS) {
ASSERT(CpuFeatures::IsSupported(SSE2));
CpuFeatures::Scope scope(SSE2);
__ cvttsd2si(ecx, FieldOperand(eax, HeapNumber::kValueOffset));
// ecx: untagged integer value
- switch (array_type) {
- case kExternalPixelArray:
+ switch (elements_kind) {
+ case JSObject::EXTERNAL_PIXEL_ELEMENTS:
{ // Clamp the value to [0..255].
Label done;
__ test(ecx, Immediate(0xFFFFFF00));
@@ -3697,12 +3675,12 @@
}
__ mov_b(Operand(edi, ebx, times_1, 0), ecx);
break;
- case kExternalByteArray:
- case kExternalUnsignedByteArray:
+ case JSObject::EXTERNAL_BYTE_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
__ mov_b(Operand(edi, ebx, times_1, 0), ecx);
break;
- case kExternalShortArray:
- case kExternalUnsignedShortArray:
+ case JSObject::EXTERNAL_SHORT_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
__ mov_w(Operand(edi, ebx, times_2, 0), ecx);
break;
default:
@@ -3775,8 +3753,6 @@
}
-
-
void KeyedLoadStubCompiler::GenerateLoadFastElement(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : key
@@ -3789,8 +3765,7 @@
// have been verified by the caller to not be a smi.
// Check that the key is a smi.
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, &miss_force_generic);
+ __ JumpIfNotSmi(eax, &miss_force_generic);
// Get the elements array.
__ mov(ecx, FieldOperand(edx, JSObject::kElementsOffset));
@@ -3815,10 +3790,76 @@
}
+void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(
+ MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : key
+ // -- edx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+ Label miss_force_generic, slow_allocate_heapnumber;
+
+ // This stub is meant to be tail-jumped to, the receiver must already
+ // have been verified by the caller to not be a smi.
+
+ // Check that the key is a smi.
+ __ JumpIfNotSmi(eax, &miss_force_generic);
+
+ // Get the elements array.
+ __ mov(ecx, FieldOperand(edx, JSObject::kElementsOffset));
+ __ AssertFastElements(ecx);
+
+ // Check that the key is within bounds.
+ __ cmp(eax, FieldOperand(ecx, FixedDoubleArray::kLengthOffset));
+ __ j(above_equal, &miss_force_generic);
+
+ // Check for the hole
+ uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
+ __ cmp(FieldOperand(ecx, eax, times_4, offset), Immediate(kHoleNanUpper32));
+ __ j(equal, &miss_force_generic);
+
+ // Always allocate a heap number for the result.
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatures::Scope use_sse2(SSE2);
+ __ movdbl(xmm0, FieldOperand(ecx, eax, times_4,
+ FixedDoubleArray::kHeaderSize));
+ } else {
+ __ fld_d(FieldOperand(ecx, eax, times_4, FixedDoubleArray::kHeaderSize));
+ }
+ __ AllocateHeapNumber(ecx, ebx, edi, &slow_allocate_heapnumber);
+ // Set the value.
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatures::Scope use_sse2(SSE2);
+ __ movdbl(FieldOperand(ecx, HeapNumber::kValueOffset), xmm0);
+ } else {
+ __ fstp_d(FieldOperand(ecx, HeapNumber::kValueOffset));
+ }
+ __ mov(eax, ecx);
+ __ ret(0);
+
+ __ bind(&slow_allocate_heapnumber);
+ // A value was pushed on the floating point stack before the allocation, if
+ // the allocation fails it needs to be removed.
+ if (!CpuFeatures::IsSupported(SSE2)) {
+ __ ffree();
+ __ fincstp();
+ }
+ Handle<Code> slow_ic =
+ masm->isolate()->builtins()->KeyedLoadIC_Slow();
+ __ jmp(slow_ic, RelocInfo::CODE_TARGET);
+
+ __ bind(&miss_force_generic);
+ Handle<Code> miss_ic =
+ masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
+ __ jmp(miss_ic, RelocInfo::CODE_TARGET);
+}
+
+
void KeyedStoreStubCompiler::GenerateStoreFastElement(MacroAssembler* masm,
bool is_js_array) {
// ----------- S t a t e -------------
- // -- eax : key
+ // -- eax : value
+ // -- ecx : key
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
@@ -3828,8 +3869,7 @@
// have been verified by the caller to not be a smi.
// Check that the key is a smi.
- __ test(ecx, Immediate(kSmiTagMask));
- __ j(not_zero, &miss_force_generic);
+ __ JumpIfNotSmi(ecx, &miss_force_generic);
// Get the elements array and make sure it is a fast element array, not 'cow'.
__ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
@@ -3864,6 +3904,98 @@
}
+void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
+ MacroAssembler* masm,
+ bool is_js_array) {
+ // ----------- S t a t e -------------
+ // -- eax : value
+ // -- ecx : key
+ // -- edx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+ Label miss_force_generic, smi_value, is_nan, maybe_nan;
+ Label have_double_value, not_nan;
+
+ // This stub is meant to be tail-jumped to, the receiver must already
+ // have been verified by the caller to not be a smi.
+
+ // Check that the key is a smi.
+ __ JumpIfNotSmi(ecx, &miss_force_generic);
+
+ // Get the elements array.
+ __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
+ __ AssertFastElements(edi);
+
+ if (is_js_array) {
+ // Check that the key is within bounds.
+ __ cmp(ecx, FieldOperand(edx, JSArray::kLengthOffset)); // smis.
+ } else {
+ // Check that the key is within bounds.
+ __ cmp(ecx, FieldOperand(edi, FixedArray::kLengthOffset)); // smis.
+ }
+ __ j(above_equal, &miss_force_generic);
+
+ __ JumpIfSmi(eax, &smi_value, Label::kNear);
+
+ __ CheckMap(eax,
+ masm->isolate()->factory()->heap_number_map(),
+ &miss_force_generic,
+ DONT_DO_SMI_CHECK);
+
+ // Double value, canonicalize NaN.
+ uint32_t offset = HeapNumber::kValueOffset + sizeof(kHoleNanLower32);
+ __ cmp(FieldOperand(eax, offset), Immediate(kNaNOrInfinityLowerBoundUpper32));
+ __ j(greater_equal, &maybe_nan, Label::kNear);
+
+ __ bind(¬_nan);
+ ExternalReference canonical_nan_reference =
+ ExternalReference::address_of_canonical_non_hole_nan();
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatures::Scope use_sse2(SSE2);
+ __ movdbl(xmm0, FieldOperand(eax, HeapNumber::kValueOffset));
+ __ bind(&have_double_value);
+ __ movdbl(FieldOperand(edi, ecx, times_4, FixedDoubleArray::kHeaderSize),
+ xmm0);
+ __ ret(0);
+ } else {
+ __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
+ __ bind(&have_double_value);
+ __ fstp_d(FieldOperand(edi, ecx, times_4, FixedDoubleArray::kHeaderSize));
+ __ ret(0);
+ }
+
+ __ bind(&maybe_nan);
+ // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
+ // it's an Infinity, and the non-NaN code path applies.
+ __ j(greater, &is_nan, Label::kNear);
+ __ cmp(FieldOperand(eax, HeapNumber::kValueOffset), Immediate(0));
+ __ j(zero, ¬_nan);
+ __ bind(&is_nan);
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatures::Scope use_sse2(SSE2);
+ __ movdbl(xmm0, Operand::StaticVariable(canonical_nan_reference));
+ } else {
+ __ fld_d(Operand::StaticVariable(canonical_nan_reference));
+ }
+ __ jmp(&have_double_value, Label::kNear);
+
+ __ bind(&smi_value);
+ // Value is a smi. convert to a double and store.
+ __ SmiUntag(eax);
+ __ push(eax);
+ __ fild_s(Operand(esp, 0));
+ __ pop(eax);
+ __ fstp_d(FieldOperand(edi, ecx, times_4, FixedDoubleArray::kHeaderSize));
+ __ ret(0);
+
+ // Handle store cache miss, replacing the ic with the generic stub.
+ __ bind(&miss_force_generic);
+ Handle<Code> ic_force_generic =
+ masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
+ __ jmp(ic_force_generic, RelocInfo::CODE_TARGET);
+}
+
+
#undef __
} } // namespace v8::internal
diff --git a/src/ic.cc b/src/ic.cc
index 0e87e51..f70f75a 100644
--- a/src/ic.cc
+++ b/src/ic.cc
@@ -578,13 +578,7 @@
// out of bounds, update the state to record this fact.
if (StringStubState::decode(*extra_ic_state) == DEFAULT_STRING_STUB &&
argc >= 1 && args[1]->IsNumber()) {
- double index;
- if (args[1]->IsSmi()) {
- index = Smi::cast(args[1])->value();
- } else {
- ASSERT(args[1]->IsHeapNumber());
- index = DoubleToInteger(HeapNumber::cast(args[1])->value());
- }
+ double index = DoubleToInteger(args.number_at(1));
if (index < 0 || index >= string->length()) {
*extra_ic_state =
StringStubState::update(*extra_ic_state,
@@ -799,18 +793,35 @@
return TypeError("non_object_property_call", object, key);
}
- if (FLAG_use_ic && state != MEGAMORPHIC && !object->IsAccessCheckNeeded()) {
+ if (FLAG_use_ic && state != MEGAMORPHIC && object->IsHeapObject()) {
int argc = target()->arguments_count();
InLoopFlag in_loop = target()->ic_in_loop();
- MaybeObject* maybe_code = isolate()->stub_cache()->ComputeCallMegamorphic(
- argc, in_loop, Code::KEYED_CALL_IC, Code::kNoExtraICState);
- Object* code;
- if (maybe_code->ToObject(&code)) {
- set_target(Code::cast(code));
+ Heap* heap = Handle<HeapObject>::cast(object)->GetHeap();
+ Map* map = heap->non_strict_arguments_elements_map();
+ if (object->IsJSObject() &&
+ Handle<JSObject>::cast(object)->elements()->map() == map) {
+ MaybeObject* maybe_code = isolate()->stub_cache()->ComputeCallArguments(
+ argc, in_loop, Code::KEYED_CALL_IC);
+ Object* code;
+ if (maybe_code->ToObject(&code)) {
+ set_target(Code::cast(code));
#ifdef DEBUG
- TraceIC(
- "KeyedCallIC", key, state, target(), in_loop ? " (in-loop)" : "");
+ TraceIC(
+ "KeyedCallIC", key, state, target(), in_loop ? " (in-loop)" : "");
#endif
+ }
+ } else if (FLAG_use_ic && state != MEGAMORPHIC &&
+ !object->IsAccessCheckNeeded()) {
+ MaybeObject* maybe_code = isolate()->stub_cache()->ComputeCallMegamorphic(
+ argc, in_loop, Code::KEYED_CALL_IC, Code::kNoExtraICState);
+ Object* code;
+ if (maybe_code->ToObject(&code)) {
+ set_target(Code::cast(code));
+#ifdef DEBUG
+ TraceIC(
+ "KeyedCallIC", key, state, target(), in_loop ? " (in-loop)" : "");
+#endif
+ }
}
}
@@ -849,37 +860,40 @@
}
if (FLAG_use_ic) {
- Code* non_monomorphic_stub =
- (state == UNINITIALIZED) ? pre_monomorphic_stub() : megamorphic_stub();
-
// Use specialized code for getting the length of strings and
// string wrapper objects. The length property of string wrapper
// objects is read-only and therefore always returns the length of
// the underlying string value. See ECMA-262 15.5.5.1.
if ((object->IsString() || object->IsStringWrapper()) &&
name->Equals(isolate()->heap()->length_symbol())) {
- HandleScope scope(isolate());
-#ifdef DEBUG
- if (FLAG_trace_ic) PrintF("[LoadIC : +#length /string]\n");
-#endif
- if (state == PREMONOMORPHIC) {
+ AssertNoAllocation no_allocation;
+ Code* stub = NULL;
+ if (state == UNINITIALIZED) {
+ stub = pre_monomorphic_stub();
+ } else if (state == PREMONOMORPHIC) {
if (object->IsString()) {
- set_target(isolate()->builtins()->builtin(
- Builtins::kLoadIC_StringLength));
+ stub = isolate()->builtins()->builtin(
+ Builtins::kLoadIC_StringLength);
} else {
- set_target(isolate()->builtins()->builtin(
- Builtins::kLoadIC_StringWrapperLength));
+ stub = isolate()->builtins()->builtin(
+ Builtins::kLoadIC_StringWrapperLength);
}
} else if (state == MONOMORPHIC && object->IsStringWrapper()) {
- set_target(isolate()->builtins()->builtin(
- Builtins::kLoadIC_StringWrapperLength));
- } else {
- set_target(non_monomorphic_stub);
+ stub = isolate()->builtins()->builtin(
+ Builtins::kLoadIC_StringWrapperLength);
+ } else if (state != MEGAMORPHIC) {
+ stub = megamorphic_stub();
+ }
+ if (stub != NULL) {
+ set_target(stub);
+#ifdef DEBUG
+ if (FLAG_trace_ic) PrintF("[LoadIC : +#length /string]\n");
+#endif
}
// Get the string if we have a string wrapper object.
if (object->IsJSValue()) {
- object = Handle<Object>(Handle<JSValue>::cast(object)->value(),
- isolate());
+ return Smi::FromInt(
+ String::cast(Handle<JSValue>::cast(object)->value())->length());
}
return Smi::FromInt(String::cast(*object)->length());
}
@@ -887,14 +901,21 @@
// Use specialized code for getting the length of arrays.
if (object->IsJSArray() &&
name->Equals(isolate()->heap()->length_symbol())) {
+ AssertNoAllocation no_allocation;
+ Code* stub = NULL;
+ if (state == UNINITIALIZED) {
+ stub = pre_monomorphic_stub();
+ } else if (state == PREMONOMORPHIC) {
+ stub = isolate()->builtins()->builtin(
+ Builtins::kLoadIC_ArrayLength);
+ } else if (state != MEGAMORPHIC) {
+ stub = megamorphic_stub();
+ }
+ if (stub != NULL) {
+ set_target(stub);
#ifdef DEBUG
- if (FLAG_trace_ic) PrintF("[LoadIC : +#length /array]\n");
+ if (FLAG_trace_ic) PrintF("[LoadIC : +#length /array]\n");
#endif
- if (state == PREMONOMORPHIC) {
- set_target(isolate()->builtins()->builtin(
- Builtins::kLoadIC_ArrayLength));
- } else {
- set_target(non_monomorphic_stub);
}
return JSArray::cast(*object)->length();
}
@@ -903,14 +924,22 @@
if (object->IsJSFunction() &&
name->Equals(isolate()->heap()->prototype_symbol()) &&
JSFunction::cast(*object)->should_have_prototype()) {
+ { AssertNoAllocation no_allocation;
+ Code* stub = NULL;
+ if (state == UNINITIALIZED) {
+ stub = pre_monomorphic_stub();
+ } else if (state == PREMONOMORPHIC) {
+ stub = isolate()->builtins()->builtin(
+ Builtins::kLoadIC_FunctionPrototype);
+ } else if (state != MEGAMORPHIC) {
+ stub = megamorphic_stub();
+ }
+ if (stub != NULL) {
+ set_target(stub);
#ifdef DEBUG
- if (FLAG_trace_ic) PrintF("[LoadIC : +#prototype /function]\n");
+ if (FLAG_trace_ic) PrintF("[LoadIC : +#prototype /function]\n");
#endif
- if (state == PREMONOMORPHIC) {
- set_target(isolate()->builtins()->builtin(
- Builtins::kLoadIC_FunctionPrototype));
- } else {
- set_target(non_monomorphic_stub);
+ }
}
return Accessors::FunctionGetPrototype(*object, 0);
}
@@ -927,7 +956,7 @@
// If we did not find a property, check if we need to throw an exception.
if (!lookup.IsProperty()) {
- if (FLAG_strict || IsContextual(object)) {
+ if (IsContextual(object)) {
return ReferenceError("not_defined", name);
}
LOG(isolate(), SuspectReadEvent(*name, *object));
@@ -1068,25 +1097,10 @@
}
-String* KeyedLoadIC::GetStubNameForCache(IC::State ic_state) {
- if (ic_state == MONOMORPHIC) {
- return isolate()->heap()->KeyedLoadSpecializedMonomorphic_symbol();
- } else {
- ASSERT(ic_state == MEGAMORPHIC);
- return isolate()->heap()->KeyedLoadSpecializedPolymorphic_symbol();
- }
-}
-
-
-MaybeObject* KeyedLoadIC::GetFastElementStubWithoutMapCheck(
- bool is_js_array) {
- return KeyedLoadFastElementStub().TryGetCode();
-}
-
-
-MaybeObject* KeyedLoadIC::GetExternalArrayStubWithoutMapCheck(
- ExternalArrayType array_type) {
- return KeyedLoadExternalArrayStub(array_type).TryGetCode();
+MaybeObject* KeyedLoadIC::GetElementStubWithoutMapCheck(
+ bool is_js_array,
+ JSObject::ElementsKind elements_kind) {
+ return KeyedLoadElementStub(elements_kind).TryGetCode();
}
@@ -1201,10 +1215,8 @@
LookupForRead(*object, *name, &lookup);
// If we did not find a property, check if we need to throw an exception.
- if (!lookup.IsProperty()) {
- if (FLAG_strict || IsContextual(object)) {
- return ReferenceError("not_defined", name);
- }
+ if (!lookup.IsProperty() && IsContextual(object)) {
+ return ReferenceError("not_defined", name);
}
if (FLAG_use_ic) {
@@ -1243,9 +1255,13 @@
}
} else if (object->IsJSObject()) {
JSObject* receiver = JSObject::cast(*object);
- if (receiver->HasIndexedInterceptor()) {
+ Heap* heap = Handle<JSObject>::cast(object)->GetHeap();
+ Map* elements_map = Handle<JSObject>::cast(object)->elements()->map();
+ if (elements_map == heap->non_strict_arguments_elements_map()) {
+ stub = non_strict_arguments_stub();
+ } else if (receiver->HasIndexedInterceptor()) {
stub = indexed_interceptor_stub();
- } else if (key->IsSmi()) {
+ } else if (key->IsSmi() && (target() != non_strict_arguments_stub())) {
MaybeObject* maybe_stub = ComputeStub(receiver,
false,
kNonStrictMode,
@@ -1355,15 +1371,16 @@
}
-static bool LookupForWrite(JSObject* object,
+static bool LookupForWrite(JSReceiver* receiver,
String* name,
LookupResult* lookup) {
- object->LocalLookup(name, lookup);
+ receiver->LocalLookup(name, lookup);
if (!StoreICableLookup(lookup)) {
return false;
}
if (lookup->type() == INTERCEPTOR) {
+ JSObject* object = JSObject::cast(receiver);
if (object->GetNamedInterceptor()->setter()->IsUndefined()) {
object->LocalLookupRealNamedProperty(name, lookup);
return StoreICableLookup(lookup);
@@ -1385,7 +1402,7 @@
return TypeError("non_object_property_store", object, name);
}
- if (!object->IsJSObject()) {
+ if (!object->IsJSReceiver()) {
// The length property of string values is read-only. Throw in strict mode.
if (strict_mode == kStrictMode && object->IsString() &&
name->Equals(isolate()->heap()->length_symbol())) {
@@ -1395,6 +1412,12 @@
return *value;
}
+ // Handle proxies.
+ if (object->IsJSProxy()) {
+ return JSReceiver::cast(*object)->
+ SetProperty(*name, *value, NONE, strict_mode);
+ }
+
Handle<JSObject> receiver = Handle<JSObject>::cast(object);
// Check if the given name is an array index.
@@ -1409,7 +1432,7 @@
// Use specialized code for setting the length of arrays.
if (receiver->IsJSArray()
&& name->Equals(isolate()->heap()->length_symbol())
- && receiver->AllowsSetElementsLength()) {
+ && JSArray::cast(*receiver)->AllowsSetElementsLength()) {
#ifdef DEBUG
if (FLAG_trace_ic) PrintF("[StoreIC : +#length /array]\n");
#endif
@@ -1596,18 +1619,14 @@
StrictModeFlag strict_mode,
Code* generic_stub) {
State ic_state = target()->ic_state();
- Code* monomorphic_stub;
- // Always compute the MONOMORPHIC stub, even if the MEGAMORPHIC stub ends up
- // being used. This is necessary because the megamorphic stub needs to have
- // access to more information than what is stored in the receiver map in some
- // cases (external arrays need the array type from the MONOMORPHIC stub).
- MaybeObject* maybe_stub = ComputeMonomorphicStub(receiver,
- is_store,
- strict_mode,
- generic_stub);
- if (!maybe_stub->To(&monomorphic_stub)) return maybe_stub;
-
if (ic_state == UNINITIALIZED || ic_state == PREMONOMORPHIC) {
+ Code* monomorphic_stub;
+ MaybeObject* maybe_stub = ComputeMonomorphicStub(receiver,
+ is_store,
+ strict_mode,
+ generic_stub);
+ if (!maybe_stub->To(&monomorphic_stub)) return maybe_stub;
+
return monomorphic_stub;
}
ASSERT(target() != generic_stub);
@@ -1629,79 +1648,40 @@
return generic_stub;
}
- // TODO(1385): Currently MEGAMORPHIC stubs are cached in the receiver map stub
- // cache, but that can put receiver types together from unrelated call sites
- // into the same stub--they always handle the union of all receiver maps seen
- // at all call sites involving the receiver map. This is only an
- // approximation: ideally, there would be a global cache that mapped sets of
- // receiver maps to MEGAMORPHIC stubs. The complexity of the MEGAMORPHIC stub
- // computation also leads to direct manipulation of the stub cache from the IC
- // code, which the global cache solution would avoid.
- Code::Kind kind = this->kind();
- Code::Flags flags = Code::ComputeFlags(kind,
- NOT_IN_LOOP,
- MEGAMORPHIC,
- strict_mode);
- String* megamorphic_name = GetStubNameForCache(MEGAMORPHIC);
- Object* maybe_cached_stub = receiver->map()->FindInCodeCache(megamorphic_name,
- flags);
-
- // Create a set of all receiver maps that have been seen at the IC call site
- // and those seen by the MEGAMORPHIC cached stub, if that's the stub that's
- // been selected.
- MapList receiver_maps;
- if (!maybe_cached_stub->IsUndefined()) {
- GetReceiverMapsForStub(Code::cast(maybe_cached_stub), &receiver_maps);
- }
- bool added_map = false;
- for (int i = 0; i < target_receiver_maps.length(); ++i) {
- if (AddOneReceiverMapIfMissing(&receiver_maps,
- target_receiver_maps.at(i))) {
- added_map = true;
- }
- }
- ASSERT(receiver_maps.length() > 0);
-
- // If the maximum number of receiver maps has been exceeded, use the Generic
+ // If the maximum number of receiver maps has been exceeded, use the generic
// version of the IC.
- if (receiver_maps.length() > KeyedIC::kMaxKeyedPolymorphism) {
+ if (target_receiver_maps.length() > kMaxKeyedPolymorphism) {
return generic_stub;
}
- // If no maps have been seen at the call site that aren't in the cached
- // stub, then use it.
- if (!added_map) {
- ASSERT(!maybe_cached_stub->IsUndefined());
+ PolymorphicCodeCache* cache = isolate()->heap()->polymorphic_code_cache();
+ Code::Flags flags = Code::ComputeFlags(this->kind(),
+ NOT_IN_LOOP,
+ MEGAMORPHIC,
+ strict_mode);
+ Object* maybe_cached_stub = cache->Lookup(&target_receiver_maps, flags);
+ // If there is a cached stub, use it.
+ if (!maybe_cached_stub->IsUndefined()) {
ASSERT(maybe_cached_stub->IsCode());
return Code::cast(maybe_cached_stub);
}
-
- // Lookup all of the receiver maps in the cache, they should all already
- // have MONOMORPHIC stubs.
- CodeList handler_ics(KeyedIC::kMaxKeyedPolymorphism);
- for (int current = 0; current < receiver_maps.length(); ++current) {
- Map* receiver_map(receiver_maps.at(current));
+ // Collect MONOMORPHIC stubs for all target_receiver_maps.
+ CodeList handler_ics(target_receiver_maps.length());
+ for (int i = 0; i < target_receiver_maps.length(); ++i) {
+ Map* receiver_map(target_receiver_maps.at(i));
MaybeObject* maybe_cached_stub = ComputeMonomorphicStubWithoutMapCheck(
- receiver_map,
- strict_mode,
- generic_stub);
+ receiver_map, strict_mode);
Code* cached_stub;
- if (!maybe_cached_stub->To(&cached_stub)) {
- return maybe_cached_stub;
- }
+ if (!maybe_cached_stub->To(&cached_stub)) return maybe_cached_stub;
handler_ics.Add(cached_stub);
}
-
- Code* stub;
// Build the MEGAMORPHIC stub.
- maybe_stub = ConstructMegamorphicStub(&receiver_maps,
- &handler_ics,
- strict_mode);
+ Code* stub;
+ MaybeObject* maybe_stub = ConstructMegamorphicStub(&target_receiver_maps,
+ &handler_ics,
+ strict_mode);
if (!maybe_stub->To(&stub)) return maybe_stub;
-
- MaybeObject* maybe_update = receiver->UpdateMapCodeCache(
- megamorphic_name,
- stub);
+ MaybeObject* maybe_update = cache->Update(&target_receiver_maps, flags, stub);
if (maybe_update->IsFailure()) return maybe_update;
return stub;
}
@@ -1709,33 +1689,18 @@
MaybeObject* KeyedIC::ComputeMonomorphicStubWithoutMapCheck(
Map* receiver_map,
- StrictModeFlag strict_mode,
- Code* generic_stub) {
+ StrictModeFlag strict_mode) {
if ((receiver_map->instance_type() & kNotStringTag) == 0) {
ASSERT(string_stub() != NULL);
return string_stub();
- } else if (receiver_map->has_external_array_elements()) {
- // Determine the array type from the default MONOMORPHIC already generated
- // stub. There is no other way to determine the type of the external array
- // directly from the receiver type.
- Code::Kind kind = this->kind();
- Code::Flags flags = Code::ComputeMonomorphicFlags(kind,
- NORMAL,
- strict_mode);
- String* monomorphic_name = GetStubNameForCache(MONOMORPHIC);
- Object* maybe_default_stub = receiver_map->FindInCodeCache(monomorphic_name,
- flags);
- if (maybe_default_stub->IsUndefined()) {
- return generic_stub;
- }
- Code* default_stub = Code::cast(maybe_default_stub);
- return GetExternalArrayStubWithoutMapCheck(
- default_stub->external_array_type());
- } else if (receiver_map->has_fast_elements()) {
- bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
- return GetFastElementStubWithoutMapCheck(is_js_array);
} else {
- return generic_stub;
+ ASSERT(receiver_map->has_dictionary_elements() ||
+ receiver_map->has_fast_elements() ||
+ receiver_map->has_fast_double_elements() ||
+ receiver_map->has_external_array_elements());
+ bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
+ return GetElementStubWithoutMapCheck(is_js_array,
+ receiver_map->elements_kind());
}
}
@@ -1745,14 +1710,12 @@
StrictModeFlag strict_mode,
Code* generic_stub) {
Code* result = NULL;
- if (receiver->HasExternalArrayElements()) {
+ if (receiver->HasFastElements() ||
+ receiver->HasExternalArrayElements() ||
+ receiver->HasFastDoubleElements() ||
+ receiver->HasDictionaryElements()) {
MaybeObject* maybe_stub =
- isolate()->stub_cache()->ComputeKeyedLoadOrStoreExternalArray(
- receiver, is_store, strict_mode);
- if (!maybe_stub->To(&result)) return maybe_stub;
- } else if (receiver->map()->has_fast_elements()) {
- MaybeObject* maybe_stub =
- isolate()->stub_cache()->ComputeKeyedLoadOrStoreFastElement(
+ isolate()->stub_cache()->ComputeKeyedLoadOrStoreElement(
receiver, is_store, strict_mode);
if (!maybe_stub->To(&result)) return maybe_stub;
} else {
@@ -1762,25 +1725,10 @@
}
-String* KeyedStoreIC::GetStubNameForCache(IC::State ic_state) {
- if (ic_state == MONOMORPHIC) {
- return isolate()->heap()->KeyedStoreSpecializedMonomorphic_symbol();
- } else {
- ASSERT(ic_state == MEGAMORPHIC);
- return isolate()->heap()->KeyedStoreSpecializedPolymorphic_symbol();
- }
-}
-
-
-MaybeObject* KeyedStoreIC::GetFastElementStubWithoutMapCheck(
- bool is_js_array) {
- return KeyedStoreFastElementStub(is_js_array).TryGetCode();
-}
-
-
-MaybeObject* KeyedStoreIC::GetExternalArrayStubWithoutMapCheck(
- ExternalArrayType array_type) {
- return KeyedStoreExternalArrayStub(array_type).TryGetCode();
+MaybeObject* KeyedStoreIC::GetElementStubWithoutMapCheck(
+ bool is_js_array,
+ JSObject::ElementsKind elements_kind) {
+ return KeyedStoreElementStub(is_js_array, elements_kind).TryGetCode();
}
@@ -1851,15 +1799,22 @@
Code* stub = (strict_mode == kStrictMode)
? generic_stub_strict()
: generic_stub();
- if (!force_generic) {
- if (object->IsJSObject() && key->IsSmi()) {
- JSObject* receiver = JSObject::cast(*object);
- MaybeObject* maybe_stub = ComputeStub(receiver,
- true,
- strict_mode,
- stub);
- stub = maybe_stub->IsFailure() ?
- NULL : Code::cast(maybe_stub->ToObjectUnchecked());
+ if (object->IsJSObject()) {
+ JSObject* receiver = JSObject::cast(*object);
+ Heap* heap = Handle<JSObject>::cast(object)->GetHeap();
+ Map* elements_map = Handle<JSObject>::cast(object)->elements()->map();
+ if (elements_map == heap->non_strict_arguments_elements_map()) {
+ stub = non_strict_arguments_stub();
+ } else if (!force_generic) {
+ if (key->IsSmi() && (target() != non_strict_arguments_stub())) {
+ HandleScope scope(isolate());
+ MaybeObject* maybe_stub = ComputeStub(receiver,
+ true,
+ strict_mode,
+ stub);
+ stub = maybe_stub->IsFailure() ?
+ NULL : Code::cast(maybe_stub->ToObjectUnchecked());
+ }
}
}
if (stub != NULL) set_target(stub);
@@ -2325,25 +2280,21 @@
}
-// defined in code-stubs-<arch>.cc
-// Only needed to remove dependency of ic.cc on code-stubs-<arch>.h.
-Handle<Code> GetUnaryOpStub(int key, UnaryOpIC::TypeInfo type_info);
-
-
RUNTIME_FUNCTION(MaybeObject*, UnaryOp_Patch) {
ASSERT(args.length() == 4);
HandleScope scope(isolate);
Handle<Object> operand = args.at<Object>(0);
- int key = Smi::cast(args[1])->value();
- Token::Value op = static_cast<Token::Value>(Smi::cast(args[2])->value());
+ Token::Value op = static_cast<Token::Value>(args.smi_at(1));
+ UnaryOverwriteMode mode = static_cast<UnaryOverwriteMode>(args.smi_at(2));
UnaryOpIC::TypeInfo previous_type =
- static_cast<UnaryOpIC::TypeInfo>(Smi::cast(args[3])->value());
+ static_cast<UnaryOpIC::TypeInfo>(args.smi_at(3));
UnaryOpIC::TypeInfo type = UnaryOpIC::GetTypeInfo(operand);
type = UnaryOpIC::ComputeNewType(type, previous_type);
- Handle<Code> code = GetUnaryOpStub(key, type);
+ UnaryOpStub stub(op, mode, type);
+ Handle<Code> code = stub.GetCode();
if (!code.is_null()) {
if (FLAG_trace_ic) {
PrintF("[UnaryOpIC (%s->%s)#%s]\n",
@@ -2380,23 +2331,16 @@
return *result;
}
-// defined in code-stubs-<arch>.cc
-// Only needed to remove dependency of ic.cc on code-stubs-<arch>.h.
-Handle<Code> GetBinaryOpStub(int key,
- BinaryOpIC::TypeInfo type_info,
- BinaryOpIC::TypeInfo result_type);
-
-
RUNTIME_FUNCTION(MaybeObject*, BinaryOp_Patch) {
ASSERT(args.length() == 5);
HandleScope scope(isolate);
Handle<Object> left = args.at<Object>(0);
Handle<Object> right = args.at<Object>(1);
- int key = Smi::cast(args[2])->value();
- Token::Value op = static_cast<Token::Value>(Smi::cast(args[3])->value());
+ int key = args.smi_at(2);
+ Token::Value op = static_cast<Token::Value>(args.smi_at(3));
BinaryOpIC::TypeInfo previous_type =
- static_cast<BinaryOpIC::TypeInfo>(Smi::cast(args[4])->value());
+ static_cast<BinaryOpIC::TypeInfo>(args.smi_at(4));
BinaryOpIC::TypeInfo type = BinaryOpIC::GetTypeInfo(left, right);
type = BinaryOpIC::JoinTypes(type, previous_type);
@@ -2425,7 +2369,8 @@
result_type = BinaryOpIC::HEAP_NUMBER;
}
- Handle<Code> code = GetBinaryOpStub(key, type, result_type);
+ BinaryOpStub stub(key, type, result_type);
+ Handle<Code> code = stub.GetCode();
if (!code.is_null()) {
if (FLAG_trace_ic) {
PrintF("[BinaryOpIC (%s->(%s->%s))#%s]\n",
@@ -2555,7 +2500,7 @@
RUNTIME_FUNCTION(Code*, CompareIC_Miss) {
NoHandleAllocation na;
ASSERT(args.length() == 3);
- CompareIC ic(isolate, static_cast<Token::Value>(Smi::cast(args[2])->value()));
+ CompareIC ic(isolate, static_cast<Token::Value>(args.smi_at(2)));
ic.UpdateCaches(args.at<Object>(0), args.at<Object>(1));
return ic.target();
}
diff --git a/src/ic.h b/src/ic.h
index 25d0986..11c2e3a 100644
--- a/src/ic.h
+++ b/src/ic.h
@@ -66,7 +66,6 @@
//
class IC {
public:
-
// The ids for utility called from the generated code.
enum UtilityId {
#define CONST_NAME(name) k##name,
@@ -284,6 +283,7 @@
static void GenerateMiss(MacroAssembler* masm, int argc);
static void GenerateMegamorphic(MacroAssembler* masm, int argc);
static void GenerateNormal(MacroAssembler* masm, int argc);
+ static void GenerateNonStrictArguments(MacroAssembler* masm, int argc);
};
@@ -345,13 +345,9 @@
explicit KeyedIC(Isolate* isolate) : IC(NO_EXTRA_FRAME, isolate) {}
virtual ~KeyedIC() {}
- static const int kMaxKeyedPolymorphism = 4;
-
- virtual MaybeObject* GetFastElementStubWithoutMapCheck(
- bool is_js_array) = 0;
-
- virtual MaybeObject* GetExternalArrayStubWithoutMapCheck(
- ExternalArrayType array_type) = 0;
+ virtual MaybeObject* GetElementStubWithoutMapCheck(
+ bool is_js_array,
+ JSObject::ElementsKind elements_kind) = 0;
protected:
virtual Code* string_stub() {
@@ -360,8 +356,6 @@
virtual Code::Kind kind() const = 0;
- virtual String* GetStubNameForCache(IC::State ic_state) = 0;
-
MaybeObject* ComputeStub(JSObject* receiver,
bool is_store,
StrictModeFlag strict_mode,
@@ -377,8 +371,7 @@
MaybeObject* ComputeMonomorphicStubWithoutMapCheck(
Map* receiver_map,
- StrictModeFlag strict_mode,
- Code* generic_stub);
+ StrictModeFlag strict_mode);
MaybeObject* ComputeMonomorphicStub(JSObject* receiver,
bool is_store,
@@ -409,8 +402,8 @@
}
static void GenerateGeneric(MacroAssembler* masm);
static void GenerateString(MacroAssembler* masm);
-
static void GenerateIndexedInterceptor(MacroAssembler* masm);
+ static void GenerateNonStrictArguments(MacroAssembler* masm);
// Bit mask to be tested against bit field for the cases when
// generic stub should go into slow case.
@@ -419,17 +412,13 @@
static const int kSlowCaseBitFieldMask =
(1 << Map::kIsAccessCheckNeeded) | (1 << Map::kHasIndexedInterceptor);
- virtual MaybeObject* GetFastElementStubWithoutMapCheck(
- bool is_js_array);
-
- virtual MaybeObject* GetExternalArrayStubWithoutMapCheck(
- ExternalArrayType array_type);
+ virtual MaybeObject* GetElementStubWithoutMapCheck(
+ bool is_js_array,
+ JSObject::ElementsKind elements_kind);
protected:
virtual Code::Kind kind() const { return Code::KEYED_LOAD_IC; }
- virtual String* GetStubNameForCache(IC::State ic_state);
-
virtual MaybeObject* ConstructMegamorphicStub(
MapList* receiver_maps,
CodeList* targets,
@@ -468,6 +457,10 @@
return isolate()->builtins()->builtin(
Builtins::kKeyedLoadIC_IndexedInterceptor);
}
+ Code* non_strict_arguments_stub() {
+ return isolate()->builtins()->builtin(
+ Builtins::kKeyedLoadIC_NonStrictArguments);
+ }
static void Clear(Address address, Code* target);
@@ -568,18 +561,15 @@
static void GenerateRuntimeSetProperty(MacroAssembler* masm,
StrictModeFlag strict_mode);
static void GenerateGeneric(MacroAssembler* masm, StrictModeFlag strict_mode);
+ static void GenerateNonStrictArguments(MacroAssembler* masm);
- virtual MaybeObject* GetFastElementStubWithoutMapCheck(
- bool is_js_array);
-
- virtual MaybeObject* GetExternalArrayStubWithoutMapCheck(
- ExternalArrayType array_type);
+ virtual MaybeObject* GetElementStubWithoutMapCheck(
+ bool is_js_array,
+ JSObject::ElementsKind elements_kind);
protected:
virtual Code::Kind kind() const { return Code::KEYED_STORE_IC; }
- virtual String* GetStubNameForCache(IC::State ic_state);
-
virtual MaybeObject* ConstructMegamorphicStub(
MapList* receiver_maps,
CodeList* targets,
@@ -626,6 +616,10 @@
return isolate()->builtins()->builtin(
Builtins::kKeyedStoreIC_Generic_Strict);
}
+ Code* non_strict_arguments_stub() {
+ return isolate()->builtins()->builtin(
+ Builtins::kKeyedStoreIC_NonStrictArguments);
+ }
static void Clear(Address address, Code* target);
@@ -635,7 +629,6 @@
class UnaryOpIC: public IC {
public:
-
// sorted: increasingly more unspecific (ignoring UNINITIALIZED)
// TODO(svenpanne) Using enums+switch is an antipattern, use a class instead.
enum TypeInfo {
@@ -662,7 +655,6 @@
// Type Recording BinaryOpIC, that records the types of the inputs and outputs.
class BinaryOpIC: public IC {
public:
-
enum TypeInfo {
UNINITIALIZED,
SMI,
diff --git a/src/isolate.cc b/src/isolate.cc
index a7bf7d9..8a30e79 100644
--- a/src/isolate.cc
+++ b/src/isolate.cc
@@ -85,13 +85,9 @@
#ifdef USE_SIMULATOR
simulator_ = NULL;
#endif
-#ifdef ENABLE_LOGGING_AND_PROFILING
js_entry_sp_ = NULL;
external_callback_ = NULL;
-#endif
-#ifdef ENABLE_VMSTATE_TRACKING
current_vm_state_ = EXTERNAL;
-#endif
try_catch_handler_address_ = NULL;
context_ = NULL;
thread_id_ = ThreadId::Invalid();
@@ -190,8 +186,8 @@
private:
- explicit PreallocatedMemoryThread(Isolate* isolate)
- : Thread(isolate, "v8:PreallocMem"),
+ PreallocatedMemoryThread()
+ : Thread("v8:PreallocMem"),
keep_running_(true),
wait_for_ever_semaphore_(OS::CreateSemaphore(0)),
data_ready_semaphore_(OS::CreateSemaphore(0)),
@@ -219,7 +215,7 @@
void Isolate::PreallocatedMemoryThreadStart() {
if (preallocated_memory_thread_ != NULL) return;
- preallocated_memory_thread_ = new PreallocatedMemoryThread(this);
+ preallocated_memory_thread_ = new PreallocatedMemoryThread();
preallocated_memory_thread_->Start();
}
@@ -1304,6 +1300,7 @@
if (RuntimeProfiler::IsEnabled() && current_vm_state() == JS) {
RuntimeProfiler::IsolateEnteredJS(this);
}
+ ASSERT(context() == NULL || context()->IsContext());
return from + sizeof(ThreadLocalTop);
}
@@ -1346,6 +1343,16 @@
}
+void Isolate::ThreadDataTable::RemoveAllThreads(Isolate* isolate) {
+ PerIsolateThreadData* data = list_;
+ while (data != NULL) {
+ PerIsolateThreadData* next = data->next_;
+ if (data->isolate() == isolate) Remove(data);
+ data = next;
+ }
+}
+
+
#ifdef DEBUG
#define TRACE_ISOLATE(tag) \
do { \
@@ -1396,8 +1403,6 @@
ast_sentinels_(NULL),
string_tracker_(NULL),
regexp_stack_(NULL),
- frame_element_constant_list_(0),
- result_constant_list_(0),
embedder_data_(NULL) {
TRACE_ISOLATE(constructor);
@@ -1432,10 +1437,6 @@
debugger_ = NULL;
#endif
-#ifdef ENABLE_LOGGING_AND_PROFILING
- producer_heap_profile_ = NULL;
-#endif
-
handle_scope_data_.Initialize();
#define ISOLATE_INIT_EXECUTE(type, name, initial_value) \
@@ -1462,6 +1463,10 @@
Deinit();
+ { ScopedLock lock(process_wide_mutex_);
+ thread_data_table_->RemoveAllThreads(this);
+ }
+
if (!IsDefaultIsolate()) {
delete this;
}
@@ -1520,11 +1525,6 @@
Isolate::~Isolate() {
TRACE_ISOLATE(destructor);
-#ifdef ENABLE_LOGGING_AND_PROFILING
- delete producer_heap_profile_;
- producer_heap_profile_ = NULL;
-#endif
-
delete unicode_cache_;
unicode_cache_ = NULL;
@@ -1600,8 +1600,7 @@
ASSERT(Isolate::Current() == this);
#ifdef ENABLE_DEBUGGER_SUPPORT
debug_ = new Debug(this);
- debugger_ = new Debugger();
- debugger_->isolate_ = this;
+ debugger_ = new Debugger(this);
#endif
memory_allocator_ = new MemoryAllocator();
@@ -1620,7 +1619,6 @@
#define C(name) isolate_addresses_[Isolate::k_##name] = \
reinterpret_cast<Address>(name());
ISOLATE_ADDRESS_LIST(C)
- ISOLATE_ADDRESS_LIST_PROF(C)
#undef C
string_tracker_ = new StringTracker();
@@ -1641,11 +1639,6 @@
regexp_stack_ = new RegExpStack();
regexp_stack_->isolate_ = this;
-#ifdef ENABLE_LOGGING_AND_PROFILING
- producer_heap_profile_ = new ProducerHeapProfile();
- producer_heap_profile_->isolate_ = this;
-#endif
-
state_ = PREINITIALIZED;
return true;
}
@@ -1729,11 +1722,11 @@
return false;
}
+ InitializeThreadLocal();
+
bootstrapper_->Initialize(create_heap_objects);
builtins_.Setup(create_heap_objects);
- InitializeThreadLocal();
-
// Only preallocate on the first initialization.
if (FLAG_preallocate_message_memory && preallocated_message_space_ == NULL) {
// Start the thread which will set aside some memory.
@@ -1853,11 +1846,6 @@
}
-void Isolate::ResetEagerOptimizingData() {
- compilation_cache_->ResetEagerOptimizingData();
-}
-
-
#ifdef DEBUG
#define ISOLATE_FIELD_OFFSET(type, name, ignored) \
const intptr_t Isolate::name##_debug_offset_ = OFFSET_OF(Isolate, name##_);
diff --git a/src/isolate.h b/src/isolate.h
index 0d36b3f..f2281aa 100644
--- a/src/isolate.h
+++ b/src/isolate.h
@@ -69,7 +69,6 @@
class NoAllocationStringAllocator;
class PcToCodeCache;
class PreallocatedMemoryThread;
-class ProducerHeapProfile;
class RegExpStack;
class SaveContext;
class UnicodeCache;
@@ -126,14 +125,8 @@
C(c_entry_fp_address) \
C(context_address) \
C(pending_exception_address) \
- C(external_caught_exception_address)
-
-#ifdef ENABLE_LOGGING_AND_PROFILING
-#define ISOLATE_ADDRESS_LIST_PROF(C) \
+ C(external_caught_exception_address) \
C(js_entry_sp_address)
-#else
-#define ISOLATE_ADDRESS_LIST_PROF(C)
-#endif
// Platform-independent, reliable thread identifier.
@@ -253,14 +246,9 @@
#endif
#endif // USE_SIMULATOR
-#ifdef ENABLE_LOGGING_AND_PROFILING
Address js_entry_sp_; // the stack pointer of the bottom js entry frame
Address external_callback_; // the external callback we're currently in
-#endif
-
-#ifdef ENABLE_VMSTATE_TRACKING
StateTag current_vm_state_;
-#endif
// Generated code scratch locations.
int32_t formal_count_;
@@ -314,18 +302,6 @@
#endif
-#ifdef ENABLE_LOGGING_AND_PROFILING
-
-#define ISOLATE_LOGGING_INIT_LIST(V) \
- V(CpuProfiler*, cpu_profiler, NULL) \
- V(HeapProfiler*, heap_profiler, NULL)
-
-#else
-
-#define ISOLATE_LOGGING_INIT_LIST(V)
-
-#endif
-
#define ISOLATE_INIT_ARRAY_LIST(V) \
/* SerializerDeserializer state. */ \
V(Object*, serialize_partial_snapshot_cache, kPartialSnapshotCacheCapacity) \
@@ -333,6 +309,8 @@
V(int, bad_char_shift_table, kUC16AlphabetSize) \
V(int, good_suffix_shift_table, (kBMMaxShift + 1)) \
V(int, suffix_table, (kBMMaxShift + 1)) \
+ V(uint32_t, random_seed, 2) \
+ V(uint32_t, private_random_seed, 2) \
ISOLATE_INIT_DEBUG_ARRAY_LIST(V)
typedef List<HeapObject*, PreallocatedStorage> DebugObjectCache;
@@ -372,8 +350,9 @@
/* SafeStackFrameIterator activations count. */ \
V(int, safe_stack_iterator_counter, 0) \
V(uint64_t, enabled_cpu_features, 0) \
+ V(CpuProfiler*, cpu_profiler, NULL) \
+ V(HeapProfiler*, heap_profiler, NULL) \
ISOLATE_PLATFORM_INIT_LIST(V) \
- ISOLATE_LOGGING_INIT_LIST(V) \
ISOLATE_DEBUGGER_INIT_LIST(V)
class Isolate {
@@ -444,7 +423,6 @@
enum AddressId {
#define C(name) k_##name,
ISOLATE_ADDRESS_LIST(C)
- ISOLATE_ADDRESS_LIST_PROF(C)
#undef C
k_isolate_address_count
};
@@ -529,6 +507,7 @@
// Access to top context (where the current function object was created).
Context* context() { return thread_local_top_.context_; }
void set_context(Context* context) {
+ ASSERT(context == NULL || context->IsContext());
thread_local_top_.context_ = context;
}
Context** context_address() { return &thread_local_top_.context_; }
@@ -594,7 +573,7 @@
return thread_local_top_.scheduled_exception_;
}
bool has_scheduled_exception() {
- return !thread_local_top_.scheduled_exception_->IsTheHole();
+ return thread_local_top_.scheduled_exception_ != heap_.the_hole_value();
}
void clear_scheduled_exception() {
thread_local_top_.scheduled_exception_ = heap_.the_hole_value();
@@ -618,7 +597,6 @@
}
inline Address* handler_address() { return &thread_local_top_.handler_; }
-#ifdef ENABLE_LOGGING_AND_PROFILING
// Bottom JS entry (see StackTracer::Trace in log.cc).
static Address js_entry_sp(ThreadLocalTop* thread) {
return thread->js_entry_sp_;
@@ -626,7 +604,6 @@
inline Address* js_entry_sp_address() {
return &thread_local_top_.js_entry_sp_;
}
-#endif
// Generated code scratch locations.
void* formal_count_address() { return &thread_local_top_.formal_count_; }
@@ -895,14 +872,6 @@
return &interp_canonicalize_mapping_;
}
- ZoneObjectList* frame_element_constant_list() {
- return &frame_element_constant_list_;
- }
-
- ZoneObjectList* result_constant_list() {
- return &result_constant_list_;
- }
-
void* PreallocatedStorageNew(size_t size);
void PreallocatedStorageDelete(void* p);
void PreallocatedStorageInit(size_t size);
@@ -914,12 +883,6 @@
inline bool DebuggerHasBreakPoints();
-#ifdef ENABLE_LOGGING_AND_PROFILING
- ProducerHeapProfile* producer_heap_profile() {
- return producer_heap_profile_;
- }
-#endif
-
#ifdef DEBUG
HistogramInfo* heap_histograms() { return heap_histograms_; }
@@ -957,22 +920,21 @@
static const int kJSRegexpStaticOffsetsVectorSize = 50;
-#ifdef ENABLE_LOGGING_AND_PROFILING
Address external_callback() {
return thread_local_top_.external_callback_;
}
void set_external_callback(Address callback) {
thread_local_top_.external_callback_ = callback;
}
-#endif
-#ifdef ENABLE_VMSTATE_TRACKING
StateTag current_vm_state() {
return thread_local_top_.current_vm_state_;
}
void SetCurrentVMState(StateTag state) {
if (RuntimeProfiler::IsEnabled()) {
+ // Make sure thread local top is initialized.
+ ASSERT(thread_local_top_.isolate_ == this);
StateTag current_state = thread_local_top_.current_vm_state_;
if (current_state != JS && state == JS) {
// Non-JS -> JS transition.
@@ -990,9 +952,6 @@
}
thread_local_top_.current_vm_state_ = state;
}
-#endif
-
- void ResetEagerOptimizingData();
void SetData(void* data) { embedder_data_ = data; }
void* GetData() { return embedder_data_; }
@@ -1011,6 +970,7 @@
void Insert(PerIsolateThreadData* data);
void Remove(Isolate* isolate, ThreadId thread_id);
void Remove(PerIsolateThreadData* data);
+ void RemoveAllThreads(Isolate* isolate);
private:
PerIsolateThreadData* list_;
@@ -1157,8 +1117,6 @@
regexp_macro_assembler_canonicalize_;
RegExpStack* regexp_stack_;
unibrow::Mapping<unibrow::Ecma262Canonicalize> interp_canonicalize_mapping_;
- ZoneObjectList frame_element_constant_list_;
- ZoneObjectList result_constant_list_;
void* embedder_data_;
#if defined(V8_TARGET_ARCH_ARM) && !defined(__arm__) || \
@@ -1180,10 +1138,6 @@
Debug* debug_;
#endif
-#ifdef ENABLE_LOGGING_AND_PROFILING
- ProducerHeapProfile* producer_heap_profile_;
-#endif
-
#define GLOBAL_BACKING_STORE(type, name, initialvalue) \
type name##_;
ISOLATE_INIT_LIST(GLOBAL_BACKING_STORE)
@@ -1373,10 +1327,4 @@
} } // namespace v8::internal
-// TODO(isolates): Get rid of these -inl.h includes and place them only where
-// they're needed.
-#include "allocation-inl.h"
-#include "zone-inl.h"
-#include "frames-inl.h"
-
#endif // V8_ISOLATE_H_
diff --git a/src/json-parser.cc b/src/json-parser.cc
deleted file mode 100644
index b7f57c2..0000000
--- a/src/json-parser.cc
+++ /dev/null
@@ -1,513 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "char-predicates-inl.h"
-#include "conversions.h"
-#include "json-parser.h"
-#include "messages.h"
-#include "spaces.h"
-
-namespace v8 {
-namespace internal {
-
-
-Handle<Object> JsonParser::ParseJson(Handle<String> source) {
- isolate_ = source->map()->isolate();
- source_ = Handle<String>(source->TryFlattenGetString());
- source_length_ = source_->length() - 1;
-
- // Optimized fast case where we only have ascii characters.
- if (source_->IsSeqAsciiString()) {
- is_sequential_ascii_ = true;
- seq_source_ = Handle<SeqAsciiString>::cast(source_);
- } else {
- is_sequential_ascii_ = false;
- }
-
- // Set initial position right before the string.
- position_ = -1;
- // Advance to the first character (posibly EOS)
- Advance();
- Next();
- Handle<Object> result = ParseJsonValue();
- if (result.is_null() || Next() != Token::EOS) {
- // Parse failed. Scanner's current token is the unexpected token.
- Token::Value token = current_.token;
-
- const char* message;
- const char* name_opt = NULL;
-
- switch (token) {
- case Token::EOS:
- message = "unexpected_eos";
- break;
- case Token::NUMBER:
- message = "unexpected_token_number";
- break;
- case Token::STRING:
- message = "unexpected_token_string";
- break;
- case Token::IDENTIFIER:
- case Token::FUTURE_RESERVED_WORD:
- message = "unexpected_token_identifier";
- break;
- default:
- message = "unexpected_token";
- name_opt = Token::String(token);
- ASSERT(name_opt != NULL);
- break;
- }
-
- Factory* factory = isolate()->factory();
- MessageLocation location(factory->NewScript(source),
- current_.beg_pos,
- current_.end_pos);
- Handle<JSArray> array;
- if (name_opt == NULL) {
- array = factory->NewJSArray(0);
- } else {
- Handle<String> name = factory->NewStringFromUtf8(CStrVector(name_opt));
- Handle<FixedArray> element = factory->NewFixedArray(1);
- element->set(0, *name);
- array = factory->NewJSArrayWithElements(element);
- }
- Handle<Object> result = factory->NewSyntaxError(message, array);
- isolate()->Throw(*result, &location);
- return Handle<Object>::null();
- }
- return result;
-}
-
-
-// Parse any JSON value.
-Handle<Object> JsonParser::ParseJsonValue() {
- Token::Value token = Next();
- switch (token) {
- case Token::STRING:
- return GetString(false);
- case Token::NUMBER:
- return isolate()->factory()->NewNumber(number_);
- case Token::FALSE_LITERAL:
- return isolate()->factory()->false_value();
- case Token::TRUE_LITERAL:
- return isolate()->factory()->true_value();
- case Token::NULL_LITERAL:
- return isolate()->factory()->null_value();
- case Token::LBRACE:
- return ParseJsonObject();
- case Token::LBRACK:
- return ParseJsonArray();
- default:
- return ReportUnexpectedToken();
- }
-}
-
-
-// Parse a JSON object. Scanner must be right after '{' token.
-Handle<Object> JsonParser::ParseJsonObject() {
- Handle<JSFunction> object_constructor(
- isolate()->global_context()->object_function());
- Handle<JSObject> json_object =
- isolate()->factory()->NewJSObject(object_constructor);
-
- if (Peek() == Token::RBRACE) {
- Next();
- } else {
- do {
- if (Next() != Token::STRING) {
- return ReportUnexpectedToken();
- }
- Handle<String> key = GetString(true);
- if (Next() != Token::COLON) {
- return ReportUnexpectedToken();
- }
-
- Handle<Object> value = ParseJsonValue();
- if (value.is_null()) return Handle<Object>::null();
-
- uint32_t index;
- if (key->AsArrayIndex(&index)) {
- SetOwnElement(json_object, index, value, kNonStrictMode);
- } else if (key->Equals(isolate()->heap()->Proto_symbol())) {
- SetPrototype(json_object, value);
- } else {
- SetLocalPropertyIgnoreAttributes(json_object, key, value, NONE);
- }
- } while (Next() == Token::COMMA);
- if (current_.token != Token::RBRACE) {
- return ReportUnexpectedToken();
- }
- }
- return json_object;
-}
-
-// Parse a JSON array. Scanner must be right after '[' token.
-Handle<Object> JsonParser::ParseJsonArray() {
- ZoneScope zone_scope(isolate(), DELETE_ON_EXIT);
- ZoneList<Handle<Object> > elements(4);
-
- Token::Value token = Peek();
- if (token == Token::RBRACK) {
- Next();
- } else {
- do {
- Handle<Object> element = ParseJsonValue();
- if (element.is_null()) return Handle<Object>::null();
- elements.Add(element);
- token = Next();
- } while (token == Token::COMMA);
- if (token != Token::RBRACK) {
- return ReportUnexpectedToken();
- }
- }
-
- // Allocate a fixed array with all the elements.
- Handle<FixedArray> fast_elements =
- isolate()->factory()->NewFixedArray(elements.length());
-
- for (int i = 0, n = elements.length(); i < n; i++) {
- fast_elements->set(i, *elements[i]);
- }
-
- return isolate()->factory()->NewJSArrayWithElements(fast_elements);
-}
-
-
-Token::Value JsonParser::Next() {
- current_ = next_;
- ScanJson();
- return current_.token;
-}
-
-void JsonParser::ScanJson() {
- if (source_->IsSeqAsciiString()) {
- is_sequential_ascii_ = true;
- } else {
- is_sequential_ascii_ = false;
- }
-
- Token::Value token;
- do {
- // Remember the position of the next token
- next_.beg_pos = position_;
- switch (c0_) {
- case '\t':
- case '\r':
- case '\n':
- case ' ':
- Advance();
- token = Token::WHITESPACE;
- break;
- case '{':
- Advance();
- token = Token::LBRACE;
- break;
- case '}':
- Advance();
- token = Token::RBRACE;
- break;
- case '[':
- Advance();
- token = Token::LBRACK;
- break;
- case ']':
- Advance();
- token = Token::RBRACK;
- break;
- case ':':
- Advance();
- token = Token::COLON;
- break;
- case ',':
- Advance();
- token = Token::COMMA;
- break;
- case '"':
- token = ScanJsonString();
- break;
- case '-':
- case '0':
- case '1':
- case '2':
- case '3':
- case '4':
- case '5':
- case '6':
- case '7':
- case '8':
- case '9':
- token = ScanJsonNumber();
- break;
- case 't':
- token = ScanJsonIdentifier("true", Token::TRUE_LITERAL);
- break;
- case 'f':
- token = ScanJsonIdentifier("false", Token::FALSE_LITERAL);
- break;
- case 'n':
- token = ScanJsonIdentifier("null", Token::NULL_LITERAL);
- break;
- default:
- if (c0_ < 0) {
- Advance();
- token = Token::EOS;
- } else {
- Advance();
- token = Token::ILLEGAL;
- }
- }
- } while (token == Token::WHITESPACE);
-
- next_.end_pos = position_;
- next_.token = token;
-}
-
-
-Token::Value JsonParser::ScanJsonIdentifier(const char* text,
- Token::Value token) {
- while (*text != '\0') {
- if (c0_ != *text) return Token::ILLEGAL;
- Advance();
- text++;
- }
- return token;
-}
-
-
-Token::Value JsonParser::ScanJsonNumber() {
- bool negative = false;
-
- if (c0_ == '-') {
- Advance();
- negative = true;
- }
- if (c0_ == '0') {
- Advance();
- // Prefix zero is only allowed if it's the only digit before
- // a decimal point or exponent.
- if ('0' <= c0_ && c0_ <= '9') return Token::ILLEGAL;
- } else {
- int i = 0;
- int digits = 0;
- if (c0_ < '1' || c0_ > '9') return Token::ILLEGAL;
- do {
- i = i * 10 + c0_ - '0';
- digits++;
- Advance();
- } while (c0_ >= '0' && c0_ <= '9');
- if (c0_ != '.' && c0_ != 'e' && c0_ != 'E' && digits < 10) {
- number_ = (negative ? -i : i);
- return Token::NUMBER;
- }
- }
- if (c0_ == '.') {
- Advance();
- if (c0_ < '0' || c0_ > '9') return Token::ILLEGAL;
- do {
- Advance();
- } while (c0_ >= '0' && c0_ <= '9');
- }
- if (AsciiAlphaToLower(c0_) == 'e') {
- Advance();
- if (c0_ == '-' || c0_ == '+') Advance();
- if (c0_ < '0' || c0_ > '9') return Token::ILLEGAL;
- do {
- Advance();
- } while (c0_ >= '0' && c0_ <= '9');
- }
- if (is_sequential_ascii_) {
- Vector<const char> chars(seq_source_->GetChars() + next_.beg_pos,
- position_ - next_.beg_pos);
- number_ = StringToDouble(isolate()->unicode_cache(),
- chars,
- NO_FLAGS, // Hex, octal or trailing junk.
- OS::nan_value());
- } else {
- Vector<char> buffer = Vector<char>::New(position_ - next_.beg_pos);
- String::WriteToFlat(*source_, buffer.start(), next_.beg_pos, position_);
- Vector<const char> result =
- Vector<const char>(reinterpret_cast<const char*>(buffer.start()),
- position_ - next_.beg_pos);
- number_ = StringToDouble(isolate()->unicode_cache(),
- result,
- NO_FLAGS, // Hex, octal or trailing junk.
- 0.0);
- buffer.Dispose();
- }
- return Token::NUMBER;
-}
-
-Token::Value JsonParser::SlowScanJsonString() {
- // The currently scanned ascii characters.
- Handle<String> ascii(isolate()->factory()->NewSubString(source_,
- next_.beg_pos + 1,
- position_));
- Handle<String> two_byte =
- isolate()->factory()->NewRawTwoByteString(kInitialSpecialStringSize,
- NOT_TENURED);
- Handle<SeqTwoByteString> seq_two_byte =
- Handle<SeqTwoByteString>::cast(two_byte);
-
- int allocation_count = 1;
- int count = 0;
-
- while (c0_ != '"') {
- // Create new seq string
- if (count >= kInitialSpecialStringSize * allocation_count) {
- allocation_count = allocation_count * 2;
- int new_size = allocation_count * kInitialSpecialStringSize;
- Handle<String> new_two_byte =
- isolate()->factory()->NewRawTwoByteString(new_size,
- NOT_TENURED);
- uc16* char_start =
- Handle<SeqTwoByteString>::cast(new_two_byte)->GetChars();
- String::WriteToFlat(*seq_two_byte, char_start, 0, count);
- seq_two_byte = Handle<SeqTwoByteString>::cast(new_two_byte);
- }
-
- // Check for control character (0x00-0x1f) or unterminated string (<0).
- if (c0_ < 0x20) return Token::ILLEGAL;
- if (c0_ != '\\') {
- seq_two_byte->SeqTwoByteStringSet(count++, c0_);
- Advance();
- } else {
- Advance();
- switch (c0_) {
- case '"':
- case '\\':
- case '/':
- seq_two_byte->SeqTwoByteStringSet(count++, c0_);
- break;
- case 'b':
- seq_two_byte->SeqTwoByteStringSet(count++, '\x08');
- break;
- case 'f':
- seq_two_byte->SeqTwoByteStringSet(count++, '\x0c');
- break;
- case 'n':
- seq_two_byte->SeqTwoByteStringSet(count++, '\x0a');
- break;
- case 'r':
- seq_two_byte->SeqTwoByteStringSet(count++, '\x0d');
- break;
- case 't':
- seq_two_byte->SeqTwoByteStringSet(count++, '\x09');
- break;
- case 'u': {
- uc32 value = 0;
- for (int i = 0; i < 4; i++) {
- Advance();
- int digit = HexValue(c0_);
- if (digit < 0) {
- return Token::ILLEGAL;
- }
- value = value * 16 + digit;
- }
- seq_two_byte->SeqTwoByteStringSet(count++, value);
- break;
- }
- default:
- return Token::ILLEGAL;
- }
- Advance();
- }
- }
- // Advance past the last '"'.
- ASSERT_EQ('"', c0_);
- Advance();
-
- // Shrink the the string to our length.
- if (isolate()->heap()->InNewSpace(*seq_two_byte)) {
- isolate()->heap()->new_space()->
- ShrinkStringAtAllocationBoundary<SeqTwoByteString>(*seq_two_byte,
- count);
- } else {
- int string_size = SeqTwoByteString::SizeFor(count);
- int allocated_string_size =
- SeqTwoByteString::SizeFor(kInitialSpecialStringSize * allocation_count);
- int delta = allocated_string_size - string_size;
- Address start_filler_object = seq_two_byte->address() + string_size;
- seq_two_byte->set_length(count);
- isolate()->heap()->CreateFillerObjectAt(start_filler_object, delta);
- }
- string_val_ = isolate()->factory()->NewConsString(ascii, seq_two_byte);
- return Token::STRING;
-}
-
-
-Token::Value JsonParser::ScanJsonString() {
- ASSERT_EQ('"', c0_);
- // Set string_val to null. If string_val is not set we assume an
- // ascii string begining at next_.beg_pos + 1 to next_.end_pos - 1.
- string_val_ = Handle<String>::null();
- Advance();
- // Fast case for ascii only without escape characters.
- while (c0_ != '"') {
- // Check for control character (0x00-0x1f) or unterminated string (<0).
- if (c0_ < 0x20) return Token::ILLEGAL;
- if (c0_ != '\\' && c0_ < kMaxAsciiCharCode) {
- Advance();
- } else {
- return SlowScanJsonString();
- }
- }
- ASSERT_EQ('"', c0_);
- // Advance past the last '"'.
- Advance();
- return Token::STRING;
-}
-
-Handle<String> JsonParser::GetString() {
- return GetString(false);
-}
-
-Handle<String> JsonParser::GetSymbol() {
- Handle<String> result = GetString(true);
- if (result->IsSymbol()) return result;
- return isolate()->factory()->LookupSymbol(result);
-}
-
-Handle<String> JsonParser::GetString(bool hint_symbol) {
- // We have a non ascii string, return that.
- if (!string_val_.is_null()) return string_val_;
-
- if (is_sequential_ascii_ && hint_symbol) {
- Handle<SeqAsciiString> seq = Handle<SeqAsciiString>::cast(source_);
- // The current token includes the '"' in both ends.
- int length = current_.end_pos - current_.beg_pos - 2;
- return isolate()->factory()->LookupAsciiSymbol(seq_source_,
- current_.beg_pos + 1,
- length);
- }
- // The current token includes the '"' in both ends.
- return isolate()->factory()->NewSubString(
- source_, current_.beg_pos + 1, current_.end_pos - 1);
-}
-
-} } // namespace v8::internal
diff --git a/src/json-parser.h b/src/json-parser.h
index 5903d21..0c01461 100644
--- a/src/json-parser.h
+++ b/src/json-parser.h
@@ -28,12 +28,19 @@
#ifndef V8_JSON_PARSER_H_
#define V8_JSON_PARSER_H_
+#include "v8.h"
+
+#include "char-predicates-inl.h"
+#include "v8conversions.h"
+#include "messages.h"
+#include "spaces-inl.h"
#include "token.h"
namespace v8 {
namespace internal {
// A simple json parser.
+template <bool seq_ascii>
class JsonParser BASE_EMBEDDED {
public:
static Handle<Object> Parse(Handle<String> source) {
@@ -47,41 +54,64 @@
Handle<Object> ParseJson(Handle<String> source);
inline void Advance() {
+ position_++;
if (position_ >= source_length_) {
- position_++;
c0_ = kEndOfString;
- } else if (is_sequential_ascii_) {
- position_++;
+ } else if (seq_ascii) {
c0_ = seq_source_->SeqAsciiStringGet(position_);
} else {
- position_++;
c0_ = source_->Get(position_);
}
}
- inline Isolate* isolate() { return isolate_; }
+ // The JSON lexical grammar is specified in the ECMAScript 5 standard,
+ // section 15.12.1.1. The only allowed whitespace characters between tokens
+ // are tab, carriage-return, newline and space.
- // Get the string for the current string token.
- Handle<String> GetString(bool hint_symbol);
- Handle<String> GetString();
- Handle<String> GetSymbol();
+ inline void AdvanceSkipWhitespace() {
+ do {
+ Advance();
+ } while (c0_ == '\t' || c0_ == '\r' || c0_ == '\n' || c0_ == ' ');
+ }
- // Scan a single JSON token. The JSON lexical grammar is specified in the
- // ECMAScript 5 standard, section 15.12.1.1.
- // Recognizes all of the single-character tokens directly, or calls a function
- // to scan a number, string or identifier literal.
- // The only allowed whitespace characters between tokens are tab,
- // carriage-return, newline and space.
- void ScanJson();
+ inline void SkipWhitespace() {
+ while (c0_ == '\t' || c0_ == '\r' || c0_ == '\n' || c0_ == ' ') {
+ Advance();
+ }
+ }
+
+ inline uc32 AdvanceGetChar() {
+ Advance();
+ return c0_;
+ }
+
+ // Checks that current charater is c.
+ // If so, then consume c and skip whitespace.
+ inline bool MatchSkipWhiteSpace(uc32 c) {
+ if (c0_ == c) {
+ AdvanceSkipWhitespace();
+ return true;
+ }
+ return false;
+ }
// A JSON string (production JSONString) is subset of valid JavaScript string
// literals. The string must only be double-quoted (not single-quoted), and
// the only allowed backslash-escapes are ", /, \, b, f, n, r, t and
// four-digit hex escapes (uXXXX). Any other use of backslashes is invalid.
- Token::Value ScanJsonString();
- // Slow version for unicode support, uses the first ascii_count characters,
- // as first part of a ConsString
- Token::Value SlowScanJsonString();
+ Handle<String> ParseJsonString() {
+ return ScanJsonString<false>();
+ }
+ Handle<String> ParseJsonSymbol() {
+ return ScanJsonString<true>();
+ }
+ template <bool is_symbol>
+ Handle<String> ScanJsonString();
+ // Creates a new string and copies prefix[start..end] into the beginning
+ // of it. Then scans the rest of the string, adding characters after the
+ // prefix. Called by ScanJsonString when reaching a '\' or non-ASCII char.
+ template <typename StringType, typename SinkChar>
+ Handle<String> SlowScanJsonString(Handle<String> prefix, int start, int end);
// A JSON number (production JSONNumber) is a subset of the valid JavaScript
// decimal number literals.
@@ -89,12 +119,7 @@
// digit before and after a decimal point, may not have prefixed zeros (unless
// the integer part is zero), and may include an exponent part (e.g., "e-10").
// Hexadecimal and octal numbers are not allowed.
- Token::Value ScanJsonNumber();
-
- // Used to recognizes one of the literals "true", "false", or "null". These
- // are the only valid JSON identifiers (productions JSONBooleanLiteral,
- // JSONNullLiteral).
- Token::Value ScanJsonIdentifier(const char* text, Token::Value token);
+ Handle<Object> ParseJsonNumber();
// Parse a single JSON value from input (grammar production JSONValue).
// A JSON value is either a (double-quoted) string literal, a number literal,
@@ -119,23 +144,13 @@
// Mark that a parsing error has happened at the current token, and
// return a null handle. Primarily for readability.
- Handle<Object> ReportUnexpectedToken() { return Handle<Object>::null(); }
+ inline Handle<Object> ReportUnexpectedCharacter() {
+ return Handle<Object>::null();
+ }
- // Peek at the next token.
- Token::Value Peek() { return next_.token; }
- // Scan the next token and return the token scanned on the last call.
- Token::Value Next();
+ inline Isolate* isolate() { return isolate_; }
- struct TokenInfo {
- TokenInfo() : token(Token::ILLEGAL),
- beg_pos(0),
- end_pos(0) { }
- Token::Value token;
- int beg_pos;
- int end_pos;
- };
-
- static const int kInitialSpecialStringSize = 1024;
+ static const int kInitialSpecialStringLength = 1024;
private:
@@ -143,19 +158,441 @@
int source_length_;
Handle<SeqAsciiString> seq_source_;
- bool is_sequential_ascii_;
- // Current and next token
- TokenInfo current_;
- TokenInfo next_;
Isolate* isolate_;
uc32 c0_;
int position_;
-
-
- Handle<String> string_val_;
- double number_;
};
+template <bool seq_ascii>
+Handle<Object> JsonParser<seq_ascii>::ParseJson(Handle<String> source) {
+ isolate_ = source->map()->isolate();
+ source_ = Handle<String>(source->TryFlattenGetString());
+ source_length_ = source_->length();
+
+ // Optimized fast case where we only have ASCII characters.
+ if (seq_ascii) {
+ seq_source_ = Handle<SeqAsciiString>::cast(source_);
+ }
+
+ // Set initial position right before the string.
+ position_ = -1;
+ // Advance to the first character (posibly EOS)
+ AdvanceSkipWhitespace();
+ Handle<Object> result = ParseJsonValue();
+ if (result.is_null() || c0_ != kEndOfString) {
+ // Parse failed. Current character is the unexpected token.
+
+ const char* message;
+ Factory* factory = isolate()->factory();
+ Handle<JSArray> array;
+
+ switch (c0_) {
+ case kEndOfString:
+ message = "unexpected_eos";
+ array = factory->NewJSArray(0);
+ break;
+ case '-':
+ case '0':
+ case '1':
+ case '2':
+ case '3':
+ case '4':
+ case '5':
+ case '6':
+ case '7':
+ case '8':
+ case '9':
+ message = "unexpected_token_number";
+ array = factory->NewJSArray(0);
+ break;
+ case '"':
+ message = "unexpected_token_string";
+ array = factory->NewJSArray(0);
+ break;
+ default:
+ message = "unexpected_token";
+ Handle<Object> name = LookupSingleCharacterStringFromCode(c0_);
+ Handle<FixedArray> element = factory->NewFixedArray(1);
+ element->set(0, *name);
+ array = factory->NewJSArrayWithElements(element);
+ break;
+ }
+
+ MessageLocation location(factory->NewScript(source),
+ position_,
+ position_ + 1);
+ Handle<Object> result = factory->NewSyntaxError(message, array);
+ isolate()->Throw(*result, &location);
+ return Handle<Object>::null();
+ }
+ return result;
+}
+
+
+// Parse any JSON value.
+template <bool seq_ascii>
+Handle<Object> JsonParser<seq_ascii>::ParseJsonValue() {
+ switch (c0_) {
+ case '"':
+ return ParseJsonString();
+ case '-':
+ case '0':
+ case '1':
+ case '2':
+ case '3':
+ case '4':
+ case '5':
+ case '6':
+ case '7':
+ case '8':
+ case '9':
+ return ParseJsonNumber();
+ case 'f':
+ if (AdvanceGetChar() == 'a' && AdvanceGetChar() == 'l' &&
+ AdvanceGetChar() == 's' && AdvanceGetChar() == 'e') {
+ AdvanceSkipWhitespace();
+ return isolate()->factory()->false_value();
+ } else {
+ return ReportUnexpectedCharacter();
+ }
+ case 't':
+ if (AdvanceGetChar() == 'r' && AdvanceGetChar() == 'u' &&
+ AdvanceGetChar() == 'e') {
+ AdvanceSkipWhitespace();
+ return isolate()->factory()->true_value();
+ } else {
+ return ReportUnexpectedCharacter();
+ }
+ case 'n':
+ if (AdvanceGetChar() == 'u' && AdvanceGetChar() == 'l' &&
+ AdvanceGetChar() == 'l') {
+ AdvanceSkipWhitespace();
+ return isolate()->factory()->null_value();
+ } else {
+ return ReportUnexpectedCharacter();
+ }
+ case '{':
+ return ParseJsonObject();
+ case '[':
+ return ParseJsonArray();
+ default:
+ return ReportUnexpectedCharacter();
+ }
+}
+
+
+// Parse a JSON object. Position must be right at '{'.
+template <bool seq_ascii>
+Handle<Object> JsonParser<seq_ascii>::ParseJsonObject() {
+ Handle<JSFunction> object_constructor(
+ isolate()->global_context()->object_function());
+ Handle<JSObject> json_object =
+ isolate()->factory()->NewJSObject(object_constructor);
+ ASSERT_EQ(c0_, '{');
+
+ AdvanceSkipWhitespace();
+ if (c0_ != '}') {
+ do {
+ if (c0_ != '"') return ReportUnexpectedCharacter();
+ Handle<String> key = ParseJsonSymbol();
+ if (key.is_null() || c0_ != ':') return ReportUnexpectedCharacter();
+ AdvanceSkipWhitespace();
+ Handle<Object> value = ParseJsonValue();
+ if (value.is_null()) return ReportUnexpectedCharacter();
+
+ uint32_t index;
+ if (key->AsArrayIndex(&index)) {
+ SetOwnElement(json_object, index, value, kNonStrictMode);
+ } else if (key->Equals(isolate()->heap()->Proto_symbol())) {
+ SetPrototype(json_object, value);
+ } else {
+ SetLocalPropertyIgnoreAttributes(json_object, key, value, NONE);
+ }
+ } while (MatchSkipWhiteSpace(','));
+ if (c0_ != '}') {
+ return ReportUnexpectedCharacter();
+ }
+ }
+ AdvanceSkipWhitespace();
+ return json_object;
+}
+
+// Parse a JSON array. Position must be right at '['.
+template <bool seq_ascii>
+Handle<Object> JsonParser<seq_ascii>::ParseJsonArray() {
+ ZoneScope zone_scope(isolate(), DELETE_ON_EXIT);
+ ZoneList<Handle<Object> > elements(4);
+ ASSERT_EQ(c0_, '[');
+
+ AdvanceSkipWhitespace();
+ if (c0_ != ']') {
+ do {
+ Handle<Object> element = ParseJsonValue();
+ if (element.is_null()) return ReportUnexpectedCharacter();
+ elements.Add(element);
+ } while (MatchSkipWhiteSpace(','));
+ if (c0_ != ']') {
+ return ReportUnexpectedCharacter();
+ }
+ }
+ AdvanceSkipWhitespace();
+ // Allocate a fixed array with all the elements.
+ Handle<FixedArray> fast_elements =
+ isolate()->factory()->NewFixedArray(elements.length());
+ for (int i = 0, n = elements.length(); i < n; i++) {
+ fast_elements->set(i, *elements[i]);
+ }
+ return isolate()->factory()->NewJSArrayWithElements(fast_elements);
+}
+
+
+template <bool seq_ascii>
+Handle<Object> JsonParser<seq_ascii>::ParseJsonNumber() {
+ bool negative = false;
+ int beg_pos = position_;
+ if (c0_ == '-') {
+ Advance();
+ negative = true;
+ }
+ if (c0_ == '0') {
+ Advance();
+ // Prefix zero is only allowed if it's the only digit before
+ // a decimal point or exponent.
+ if ('0' <= c0_ && c0_ <= '9') return ReportUnexpectedCharacter();
+ } else {
+ int i = 0;
+ int digits = 0;
+ if (c0_ < '1' || c0_ > '9') return ReportUnexpectedCharacter();
+ do {
+ i = i * 10 + c0_ - '0';
+ digits++;
+ Advance();
+ } while (c0_ >= '0' && c0_ <= '9');
+ if (c0_ != '.' && c0_ != 'e' && c0_ != 'E' && digits < 10) {
+ SkipWhitespace();
+ return Handle<Smi>(Smi::FromInt((negative ? -i : i)), isolate());
+ }
+ }
+ if (c0_ == '.') {
+ Advance();
+ if (c0_ < '0' || c0_ > '9') return ReportUnexpectedCharacter();
+ do {
+ Advance();
+ } while (c0_ >= '0' && c0_ <= '9');
+ }
+ if (AsciiAlphaToLower(c0_) == 'e') {
+ Advance();
+ if (c0_ == '-' || c0_ == '+') Advance();
+ if (c0_ < '0' || c0_ > '9') return ReportUnexpectedCharacter();
+ do {
+ Advance();
+ } while (c0_ >= '0' && c0_ <= '9');
+ }
+ int length = position_ - beg_pos;
+ double number;
+ if (seq_ascii) {
+ Vector<const char> chars(seq_source_->GetChars() + beg_pos, length);
+ number = StringToDouble(isolate()->unicode_cache(),
+ chars,
+ NO_FLAGS, // Hex, octal or trailing junk.
+ OS::nan_value());
+ } else {
+ Vector<char> buffer = Vector<char>::New(length);
+ String::WriteToFlat(*source_, buffer.start(), beg_pos, position_);
+ Vector<const char> result =
+ Vector<const char>(reinterpret_cast<const char*>(buffer.start()),
+ length);
+ number = StringToDouble(isolate()->unicode_cache(),
+ result,
+ NO_FLAGS, // Hex, octal or trailing junk.
+ 0.0);
+ buffer.Dispose();
+ }
+ SkipWhitespace();
+ return isolate()->factory()->NewNumber(number);
+}
+
+
+template <typename StringType>
+inline void SeqStringSet(Handle<StringType> seq_str, int i, uc32 c);
+
+template <>
+inline void SeqStringSet(Handle<SeqTwoByteString> seq_str, int i, uc32 c) {
+ seq_str->SeqTwoByteStringSet(i, c);
+}
+
+template <>
+inline void SeqStringSet(Handle<SeqAsciiString> seq_str, int i, uc32 c) {
+ seq_str->SeqAsciiStringSet(i, c);
+}
+
+template <typename StringType>
+inline Handle<StringType> NewRawString(Factory* factory, int length);
+
+template <>
+inline Handle<SeqTwoByteString> NewRawString(Factory* factory, int length) {
+ return factory->NewRawTwoByteString(length, NOT_TENURED);
+}
+
+template <>
+inline Handle<SeqAsciiString> NewRawString(Factory* factory, int length) {
+ return factory->NewRawAsciiString(length, NOT_TENURED);
+}
+
+
+// Scans the rest of a JSON string starting from position_ and writes
+// prefix[start..end] along with the scanned characters into a
+// sequential string of type StringType.
+template <bool seq_ascii>
+template <typename StringType, typename SinkChar>
+Handle<String> JsonParser<seq_ascii>::SlowScanJsonString(
+ Handle<String> prefix, int start, int end) {
+ int count = end - start;
+ int max_length = count + source_length_ - position_;
+ int length = Min(max_length, Max(kInitialSpecialStringLength, 2 * count));
+ Handle<StringType> seq_str = NewRawString<StringType>(isolate()->factory(),
+ length);
+ // Copy prefix into seq_str.
+ SinkChar* dest = seq_str->GetChars();
+ String::WriteToFlat(*prefix, dest, start, end);
+
+ while (c0_ != '"') {
+ // Check for control character (0x00-0x1f) or unterminated string (<0).
+ if (c0_ < 0x20) return Handle<String>::null();
+ if (count >= length) {
+ // We need to create a longer sequential string for the result.
+ return SlowScanJsonString<StringType, SinkChar>(seq_str, 0, count);
+ }
+ if (c0_ != '\\') {
+ // If the sink can contain UC16 characters, or source_ contains only
+ // ASCII characters, there's no need to test whether we can store the
+ // character. Otherwise check whether the UC16 source character can fit
+ // in the ASCII sink.
+ if (sizeof(SinkChar) == kUC16Size ||
+ seq_ascii ||
+ c0_ <= kMaxAsciiCharCode) {
+ SeqStringSet(seq_str, count++, c0_);
+ Advance();
+ } else {
+ // StringType is SeqAsciiString and we just read a non-ASCII char.
+ return SlowScanJsonString<SeqTwoByteString, uc16>(seq_str, 0, count);
+ }
+ } else {
+ Advance(); // Advance past the \.
+ switch (c0_) {
+ case '"':
+ case '\\':
+ case '/':
+ SeqStringSet(seq_str, count++, c0_);
+ break;
+ case 'b':
+ SeqStringSet(seq_str, count++, '\x08');
+ break;
+ case 'f':
+ SeqStringSet(seq_str, count++, '\x0c');
+ break;
+ case 'n':
+ SeqStringSet(seq_str, count++, '\x0a');
+ break;
+ case 'r':
+ SeqStringSet(seq_str, count++, '\x0d');
+ break;
+ case 't':
+ SeqStringSet(seq_str, count++, '\x09');
+ break;
+ case 'u': {
+ uc32 value = 0;
+ for (int i = 0; i < 4; i++) {
+ Advance();
+ int digit = HexValue(c0_);
+ if (digit < 0) {
+ return Handle<String>::null();
+ }
+ value = value * 16 + digit;
+ }
+ if (sizeof(SinkChar) == kUC16Size || value <= kMaxAsciiCharCode) {
+ SeqStringSet(seq_str, count++, value);
+ break;
+ } else {
+ // StringType is SeqAsciiString and we just read a non-ASCII char.
+ position_ -= 6; // Rewind position_ to \ in \uxxxx.
+ Advance();
+ return SlowScanJsonString<SeqTwoByteString, uc16>(seq_str,
+ 0,
+ count);
+ }
+ }
+ default:
+ return Handle<String>::null();
+ }
+ Advance();
+ }
+ }
+ // Shrink seq_string length to count.
+ if (isolate()->heap()->InNewSpace(*seq_str)) {
+ isolate()->heap()->new_space()->
+ template ShrinkStringAtAllocationBoundary<StringType>(
+ *seq_str, count);
+ } else {
+ int string_size = StringType::SizeFor(count);
+ int allocated_string_size = StringType::SizeFor(length);
+ int delta = allocated_string_size - string_size;
+ Address start_filler_object = seq_str->address() + string_size;
+ seq_str->set_length(count);
+ isolate()->heap()->CreateFillerObjectAt(start_filler_object, delta);
+ }
+ ASSERT_EQ('"', c0_);
+ // Advance past the last '"'.
+ AdvanceSkipWhitespace();
+ return seq_str;
+}
+
+
+template <bool seq_ascii>
+template <bool is_symbol>
+Handle<String> JsonParser<seq_ascii>::ScanJsonString() {
+ ASSERT_EQ('"', c0_);
+ Advance();
+ if (c0_ == '"') {
+ AdvanceSkipWhitespace();
+ return Handle<String>(isolate()->heap()->empty_string());
+ }
+ int beg_pos = position_;
+ // Fast case for ASCII only without escape characters.
+ do {
+ // Check for control character (0x00-0x1f) or unterminated string (<0).
+ if (c0_ < 0x20) return Handle<String>::null();
+ if (c0_ != '\\') {
+ if (seq_ascii || c0_ <= kMaxAsciiCharCode) {
+ Advance();
+ } else {
+ return SlowScanJsonString<SeqTwoByteString, uc16>(source_,
+ beg_pos,
+ position_);
+ }
+ } else {
+ return SlowScanJsonString<SeqAsciiString, char>(source_,
+ beg_pos,
+ position_);
+ }
+ } while (c0_ != '"');
+ int length = position_ - beg_pos;
+ Handle<String> result;
+ if (seq_ascii && is_symbol) {
+ result = isolate()->factory()->LookupAsciiSymbol(seq_source_,
+ beg_pos,
+ length);
+ } else {
+ result = isolate()->factory()->NewRawAsciiString(length);
+ char* dest = SeqAsciiString::cast(*result)->GetChars();
+ String::WriteToFlat(*source_, dest, beg_pos, position_);
+ }
+ ASSERT_EQ('"', c0_);
+ // Advance past the last '"'.
+ AdvanceSkipWhitespace();
+ return result;
+}
+
} } // namespace v8::internal
#endif // V8_JSON_PARSER_H_
diff --git a/src/json.js b/src/json.js
index 7a6189c..6c984a1 100644
--- a/src/json.js
+++ b/src/json.js
@@ -153,7 +153,7 @@
if (IS_STRING(value)) {
return %QuoteJSONString(value);
} else if (IS_NUMBER(value)) {
- return NUMBER_IS_FINITE(value) ? $String(value) : "null";
+ return JSON_NUMBER_TO_STRING(value);
} else if (IS_BOOLEAN(value)) {
return value ? "true" : "false";
} else if (IS_NULL(value)) {
@@ -164,7 +164,7 @@
return SerializeArray(value, replacer, stack, indent, gap);
} else if (IS_NUMBER_WRAPPER(value)) {
value = ToNumber(value);
- return NUMBER_IS_FINITE(value) ? ToString(value) : "null";
+ return JSON_NUMBER_TO_STRING(value);
} else if (IS_STRING_WRAPPER(value)) {
return %QuoteJSONString(ToString(value));
} else if (IS_BOOLEAN_WRAPPER(value)) {
@@ -191,31 +191,37 @@
var val = value[0];
if (IS_STRING(val)) {
// First entry is a string. Remaining entries are likely to be strings too.
- builder.push(%QuoteJSONString(val));
- for (var i = 1; i < len; i++) {
- val = value[i];
- if (IS_STRING(val)) {
- builder.push(%QuoteJSONStringComma(val));
- } else {
- builder.push(",");
- var before = builder.length;
- BasicJSONSerialize(i, value[i], stack, builder);
- if (before == builder.length) builder[before - 1] = ",null";
+ var array_string = %QuoteJSONStringArray(value);
+ if (!IS_UNDEFINED(array_string)) {
+ // array_string also includes bracket characters so we are done.
+ builder[builder.length - 1] = array_string;
+ stack.pop();
+ return;
+ } else {
+ builder.push(%QuoteJSONString(val));
+ for (var i = 1; i < len; i++) {
+ val = value[i];
+ if (IS_STRING(val)) {
+ builder.push(%QuoteJSONStringComma(val));
+ } else {
+ builder.push(",");
+ var before = builder.length;
+ BasicJSONSerialize(i, val, stack, builder);
+ if (before == builder.length) builder[before - 1] = ",null";
+ }
}
}
} else if (IS_NUMBER(val)) {
// First entry is a number. Remaining entries are likely to be numbers too.
- builder.push(NUMBER_IS_FINITE(val) ? %_NumberToString(val) : "null");
+ builder.push(JSON_NUMBER_TO_STRING(val));
for (var i = 1; i < len; i++) {
builder.push(",");
val = value[i];
if (IS_NUMBER(val)) {
- builder.push(NUMBER_IS_FINITE(val)
- ? %_NumberToString(val)
- : "null");
+ builder.push(JSON_NUMBER_TO_STRING(val));
} else {
var before = builder.length;
- BasicJSONSerialize(i, value[i], stack, builder);
+ BasicJSONSerialize(i, val, stack, builder);
if (before == builder.length) builder[before - 1] = ",null";
}
}
@@ -226,8 +232,7 @@
for (var i = 1; i < len; i++) {
builder.push(",");
before = builder.length;
- val = value[i];
- BasicJSONSerialize(i, val, stack, builder);
+ BasicJSONSerialize(i, value[i], stack, builder);
if (before == builder.length) builder[before - 1] = ",null";
}
}
@@ -273,9 +278,9 @@
}
}
if (IS_STRING(value)) {
- builder.push(%QuoteJSONString(value));
+ builder.push(value !== "" ? %QuoteJSONString(value) : '""');
} else if (IS_NUMBER(value)) {
- builder.push(NUMBER_IS_FINITE(value) ? %_NumberToString(value) : "null");
+ builder.push(JSON_NUMBER_TO_STRING(value));
} else if (IS_BOOLEAN(value)) {
builder.push(value ? "true" : "false");
} else if (IS_NULL(value)) {
@@ -285,7 +290,7 @@
// Unwrap value if necessary
if (IS_NUMBER_WRAPPER(value)) {
value = ToNumber(value);
- builder.push(NUMBER_IS_FINITE(value) ? %_NumberToString(value) : "null");
+ builder.push(JSON_NUMBER_TO_STRING(value));
} else if (IS_STRING_WRAPPER(value)) {
builder.push(%QuoteJSONString(ToString(value)));
} else if (IS_BOOLEAN_WRAPPER(value)) {
diff --git a/src/jsregexp.cc b/src/jsregexp.cc
index e7aa860..bc47df8 100644
--- a/src/jsregexp.cc
+++ b/src/jsregexp.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -127,7 +127,7 @@
return re;
}
pattern = FlattenGetString(pattern);
- CompilationZoneScope zone_scope(isolate, DELETE_ON_EXIT);
+ ZoneScope zone_scope(isolate, DELETE_ON_EXIT);
PostponeInterruptsScope postpone(isolate);
RegExpCompileData parse_result;
FlatStringReader reader(isolate, pattern);
@@ -327,7 +327,7 @@
bool RegExpImpl::CompileIrregexp(Handle<JSRegExp> re, bool is_ascii) {
// Compile the RegExp.
Isolate* isolate = re->GetIsolate();
- CompilationZoneScope zone_scope(isolate, DELETE_ON_EXIT);
+ ZoneScope zone_scope(isolate, DELETE_ON_EXIT);
PostponeInterruptsScope postpone(isolate);
// If we had a compilation error the last time this is saved at the
// saved code index.
@@ -848,6 +848,7 @@
}
static const int kNoRegister = -1;
+
private:
EndNode* accept_;
int next_register_;
@@ -1957,13 +1958,10 @@
ASSERT(characters_filled_in < details->characters());
int characters = details->characters();
int char_mask;
- int char_shift;
if (compiler->ascii()) {
char_mask = String::kMaxAsciiCharCode;
- char_shift = 8;
} else {
char_mask = String::kMaxUC16CharCode;
- char_shift = 16;
}
for (int k = 0; k < elms_->length(); k++) {
TextElement elm = elms_->at(k);
@@ -2806,6 +2804,7 @@
AlternativeGeneration* at(int i) {
return alt_gens_[i];
}
+
private:
static const int kAFew = 10;
ZoneList<AlternativeGeneration*> alt_gens_;
@@ -3365,6 +3364,7 @@
}
stream()->Add("}}");
}
+
private:
bool first_;
StringStream* stream() { return stream_; }
@@ -4885,7 +4885,6 @@
cp_offset += elm.data.u_atom->data().length();
} else {
cp_offset++;
- Vector<const uc16> quarks = elm.data.u_atom->data();
}
}
}
@@ -5324,8 +5323,6 @@
return CompilationResult(error_message);
}
- NodeInfo info = *node->info();
-
// Create the correct assembler for the architecture.
#ifndef V8_INTERPRETED_REGEXP
// Native regexp implementation.
diff --git a/src/jsregexp.h b/src/jsregexp.h
index 58958d8..13f9e2e 100644
--- a/src/jsregexp.h
+++ b/src/jsregexp.h
@@ -29,7 +29,6 @@
#define V8_JSREGEXP_H_
#include "allocation.h"
-#include "macro-assembler.h"
#include "zone-inl.h"
namespace v8 {
diff --git a/src/lithium-allocator-inl.h b/src/lithium-allocator-inl.h
index c0beaaf..8f660ce 100644
--- a/src/lithium-allocator-inl.h
+++ b/src/lithium-allocator-inl.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -62,27 +62,27 @@
: instr_(instr),
limit_(instr->TempCount()),
current_(0) {
- current_ = AdvanceToNext(0);
+ SkipUninteresting();
}
-bool TempIterator::HasNext() { return current_ < limit_; }
+bool TempIterator::Done() { return current_ >= limit_; }
-LOperand* TempIterator::Next() {
- ASSERT(HasNext());
+LOperand* TempIterator::Current() {
+ ASSERT(!Done());
return instr_->TempAt(current_);
}
-int TempIterator::AdvanceToNext(int start) {
- while (start < limit_ && instr_->TempAt(start) == NULL) start++;
- return start;
+void TempIterator::SkipUninteresting() {
+ while (current_ < limit_ && instr_->TempAt(current_) == NULL) ++current_;
}
void TempIterator::Advance() {
- current_ = AdvanceToNext(current_ + 1);
+ ++current_;
+ SkipUninteresting();
}
@@ -90,27 +90,29 @@
: instr_(instr),
limit_(instr->InputCount()),
current_(0) {
- current_ = AdvanceToNext(0);
+ SkipUninteresting();
}
-bool InputIterator::HasNext() { return current_ < limit_; }
+bool InputIterator::Done() { return current_ >= limit_; }
-LOperand* InputIterator::Next() {
- ASSERT(HasNext());
+LOperand* InputIterator::Current() {
+ ASSERT(!Done());
return instr_->InputAt(current_);
}
void InputIterator::Advance() {
- current_ = AdvanceToNext(current_ + 1);
+ ++current_;
+ SkipUninteresting();
}
-int InputIterator::AdvanceToNext(int start) {
- while (start < limit_ && instr_->InputAt(start)->IsConstantOperand()) start++;
- return start;
+void InputIterator::SkipUninteresting() {
+ while (current_ < limit_ && instr_->InputAt(current_)->IsConstantOperand()) {
+ ++current_;
+ }
}
@@ -118,23 +120,23 @@
: input_iterator_(instr), env_iterator_(instr->environment()) { }
-bool UseIterator::HasNext() {
- return input_iterator_.HasNext() || env_iterator_.HasNext();
+bool UseIterator::Done() {
+ return input_iterator_.Done() && env_iterator_.Done();
}
-LOperand* UseIterator::Next() {
- ASSERT(HasNext());
- return input_iterator_.HasNext()
- ? input_iterator_.Next()
- : env_iterator_.Next();
+LOperand* UseIterator::Current() {
+ ASSERT(!Done());
+ return input_iterator_.Done()
+ ? env_iterator_.Current()
+ : input_iterator_.Current();
}
void UseIterator::Advance() {
- input_iterator_.HasNext()
- ? input_iterator_.Advance()
- : env_iterator_.Advance();
+ input_iterator_.Done()
+ ? env_iterator_.Advance()
+ : input_iterator_.Advance();
}
} } // namespace v8::internal
diff --git a/src/lithium-allocator.cc b/src/lithium-allocator.cc
index 50ed122..4661106 100644
--- a/src/lithium-allocator.cc
+++ b/src/lithium-allocator.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -303,6 +303,11 @@
// we need to split use positons in a special way.
bool split_at_start = false;
+ if (current->start().Value() == position.Value()) {
+ // When splitting at start we need to locate the previous use interval.
+ current = first_interval_;
+ }
+
while (current != NULL) {
if (current->Contains(position)) {
current->SplitAt(position);
@@ -352,6 +357,11 @@
}
result->first_pos_ = use_after;
+ // Discard cached iteration state. It might be pointing
+ // to the use that no longer belongs to this live range.
+ last_processed_use_ = NULL;
+ current_interval_ = NULL;
+
// Link the new live range in the chain before any of the other
// ranges linked from the range before the split.
result->parent_ = (parent_ == NULL) ? this : parent_;
@@ -565,10 +575,10 @@
BitVector* live_out = new BitVector(next_virtual_register_);
// Process all successor blocks.
- HBasicBlock* successor = block->end()->FirstSuccessor();
- while (successor != NULL) {
+ for (HSuccessorIterator it(block->end()); !it.Done(); it.Advance()) {
// Add values live on entry to the successor. Note the successor's
// live_in will not be computed yet for backwards edges.
+ HBasicBlock* successor = it.Current();
BitVector* live_in = live_in_sets_[successor->block_id()];
if (live_in != NULL) live_out->Union(*live_in);
@@ -582,11 +592,6 @@
live_out->Add(phi->OperandAt(index)->id());
}
}
-
- // Check if we are done with second successor.
- if (successor == block->end()->SecondSuccessor()) break;
-
- successor = block->end()->SecondSuccessor();
}
return live_out;
@@ -790,8 +795,8 @@
int gap_index) {
// Handle fixed temporaries.
if (first != NULL) {
- for (TempIterator it(first); it.HasNext(); it.Advance()) {
- LUnallocated* temp = LUnallocated::cast(it.Next());
+ for (TempIterator it(first); !it.Done(); it.Advance()) {
+ LUnallocated* temp = LUnallocated::cast(it.Current());
if (temp->HasFixedPolicy()) {
AllocateFixed(temp, gap_index - 1, false);
}
@@ -832,8 +837,8 @@
// Handle fixed input operands of second instruction.
if (second != NULL) {
- for (UseIterator it(second); it.HasNext(); it.Advance()) {
- LUnallocated* cur_input = LUnallocated::cast(it.Next());
+ for (UseIterator it(second); !it.Done(); it.Advance()) {
+ LUnallocated* cur_input = LUnallocated::cast(it.Current());
if (cur_input->HasFixedPolicy()) {
LUnallocated* input_copy = cur_input->CopyUnconstrained();
bool is_tagged = HasTaggedValue(cur_input->VirtualRegister());
@@ -968,8 +973,8 @@
}
}
- for (UseIterator it(instr); it.HasNext(); it.Advance()) {
- LOperand* input = it.Next();
+ for (UseIterator it(instr); !it.Done(); it.Advance()) {
+ LOperand* input = it.Current();
LifetimePosition use_pos;
if (input->IsUnallocated() &&
@@ -983,8 +988,8 @@
if (input->IsUnallocated()) live->Add(input->VirtualRegister());
}
- for (TempIterator it(instr); it.HasNext(); it.Advance()) {
- LOperand* temp = it.Next();
+ for (TempIterator it(instr); !it.Done(); it.Advance()) {
+ LOperand* temp = it.Current();
if (instr->IsMarkedAsCall()) {
if (temp->IsRegister()) continue;
if (temp->IsUnallocated()) {
@@ -1019,7 +1024,7 @@
operand = chunk_->DefineConstantOperand(constant);
} else {
ASSERT(!op->EmitAtUses());
- LUnallocated* unalloc = new LUnallocated(LUnallocated::NONE);
+ LUnallocated* unalloc = new LUnallocated(LUnallocated::ANY);
unalloc->set_virtual_register(op->id());
operand = unalloc;
}
diff --git a/src/lithium-allocator.h b/src/lithium-allocator.h
index d456558..e4e6497 100644
--- a/src/lithium-allocator.h
+++ b/src/lithium-allocator.h
@@ -162,12 +162,12 @@
class TempIterator BASE_EMBEDDED {
public:
inline explicit TempIterator(LInstruction* instr);
- inline bool HasNext();
- inline LOperand* Next();
+ inline bool Done();
+ inline LOperand* Current();
inline void Advance();
private:
- inline int AdvanceToNext(int start);
+ inline void SkipUninteresting();
LInstruction* instr_;
int limit_;
int current_;
@@ -178,12 +178,12 @@
class InputIterator BASE_EMBEDDED {
public:
inline explicit InputIterator(LInstruction* instr);
- inline bool HasNext();
- inline LOperand* Next();
+ inline bool Done();
+ inline LOperand* Current();
inline void Advance();
private:
- inline int AdvanceToNext(int start);
+ inline void SkipUninteresting();
LInstruction* instr_;
int limit_;
int current_;
@@ -193,8 +193,8 @@
class UseIterator BASE_EMBEDDED {
public:
inline explicit UseIterator(LInstruction* instr);
- inline bool HasNext();
- inline LOperand* Next();
+ inline bool Done();
+ inline LOperand* Current();
inline void Advance();
private:
diff --git a/src/lithium.cc b/src/lithium.cc
index 62b263b..64ef469 100644
--- a/src/lithium.cc
+++ b/src/lithium.cc
@@ -166,21 +166,26 @@
}
-int ExternalArrayTypeToShiftSize(ExternalArrayType type) {
- switch (type) {
- case kExternalByteArray:
- case kExternalUnsignedByteArray:
- case kExternalPixelArray:
+int ElementsKindToShiftSize(JSObject::ElementsKind elements_kind) {
+ switch (elements_kind) {
+ case JSObject::EXTERNAL_BYTE_ELEMENTS:
+ case JSObject::EXTERNAL_PIXEL_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
return 0;
- case kExternalShortArray:
- case kExternalUnsignedShortArray:
+ case JSObject::EXTERNAL_SHORT_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
return 1;
- case kExternalIntArray:
- case kExternalUnsignedIntArray:
- case kExternalFloatArray:
+ case JSObject::EXTERNAL_INT_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS:
+ case JSObject::EXTERNAL_FLOAT_ELEMENTS:
return 2;
- case kExternalDoubleArray:
+ case JSObject::EXTERNAL_DOUBLE_ELEMENTS:
+ case JSObject::FAST_DOUBLE_ELEMENTS:
return 3;
+ case JSObject::FAST_ELEMENTS:
+ case JSObject::DICTIONARY_ELEMENTS:
+ case JSObject::NON_STRICT_ARGUMENTS_ELEMENTS:
+ return kPointerSizeLog2;
}
UNREACHABLE();
return 0;
diff --git a/src/lithium.h b/src/lithium.h
index ffc236d..6010b77 100644
--- a/src/lithium.h
+++ b/src/lithium.h
@@ -519,34 +519,34 @@
: env_(env),
limit_(env != NULL ? env->values()->length() : 0),
current_(0) {
- current_ = AdvanceToNext(0);
+ SkipUninteresting();
}
- inline bool HasNext() {
- return env_ != NULL && current_ < limit_;
- }
+ bool Done() { return current_ >= limit_; }
- inline LOperand* Next() {
- ASSERT(HasNext());
+ LOperand* Current() {
+ ASSERT(!Done());
return env_->values()->at(current_);
}
- inline void Advance() {
- current_ = AdvanceToNext(current_ + 1);
+ void Advance() {
+ ASSERT(!Done());
+ ++current_;
+ SkipUninteresting();
}
- inline LEnvironment* env() { return env_; }
+ LEnvironment* env() { return env_; }
private:
- inline bool ShouldSkip(LOperand* op) {
+ bool ShouldSkip(LOperand* op) {
return op == NULL || op->IsConstantOperand() || op->IsArgument();
}
- inline int AdvanceToNext(int start) {
- while (start < limit_ && ShouldSkip(env_->values()->at(start))) {
- start++;
+ // Skip until something interesting, beginning with and including current_.
+ void SkipUninteresting() {
+ while (current_ < limit_ && ShouldSkip(env_->values()->at(current_))) {
+ ++current_;
}
- return start;
}
LEnvironment* env_;
@@ -559,38 +559,34 @@
class DeepIterator BASE_EMBEDDED {
public:
explicit DeepIterator(LEnvironment* env)
- : current_iterator_(env) { }
-
- inline bool HasNext() {
- if (current_iterator_.HasNext()) return true;
- if (current_iterator_.env() == NULL) return false;
- AdvanceToOuter();
- return current_iterator_.HasNext();
+ : current_iterator_(env) {
+ SkipUninteresting();
}
- inline LOperand* Next() {
- ASSERT(current_iterator_.HasNext());
- return current_iterator_.Next();
+ bool Done() { return current_iterator_.Done(); }
+
+ LOperand* Current() {
+ ASSERT(!current_iterator_.Done());
+ return current_iterator_.Current();
}
- inline void Advance() {
- if (current_iterator_.HasNext()) {
- current_iterator_.Advance();
- } else {
- AdvanceToOuter();
- }
+ void Advance() {
+ current_iterator_.Advance();
+ SkipUninteresting();
}
private:
- inline void AdvanceToOuter() {
- current_iterator_ = ShallowIterator(current_iterator_.env()->outer());
+ void SkipUninteresting() {
+ while (current_iterator_.env() != NULL && current_iterator_.Done()) {
+ current_iterator_ = ShallowIterator(current_iterator_.env()->outer());
+ }
}
ShallowIterator current_iterator_;
};
-int ExternalArrayTypeToShiftSize(ExternalArrayType type);
+int ElementsKindToShiftSize(JSObject::ElementsKind elements_kind);
} } // namespace v8::internal
diff --git a/src/liveedit.cc b/src/liveedit.cc
index e89cae3..0b01e8a 100644
--- a/src/liveedit.cc
+++ b/src/liveedit.cc
@@ -66,7 +66,7 @@
class Differencer {
public:
explicit Differencer(Comparator::Input* input)
- : input_(input), len1_(input->getLength1()), len2_(input->getLength2()) {
+ : input_(input), len1_(input->GetLength1()), len2_(input->GetLength2()) {
buffer_ = NewArray<int>(len1_ * len2_);
}
~Differencer() {
@@ -151,7 +151,7 @@
if (cached_res == kEmptyCellValue) {
Direction dir;
int res;
- if (input_->equals(pos1, pos2)) {
+ if (input_->Equals(pos1, pos2)) {
res = CompareUpToTail(pos1 + 1, pos2 + 1);
dir = EQ;
} else {
@@ -279,6 +279,70 @@
}
+// Additional to Input interface. Lets switch Input range to subrange.
+// More elegant way would be to wrap one Input as another Input object
+// and translate positions there, but that would cost us additional virtual
+// call per comparison.
+class SubrangableInput : public Comparator::Input {
+ public:
+ virtual void SetSubrange1(int offset, int len) = 0;
+ virtual void SetSubrange2(int offset, int len) = 0;
+};
+
+
+class SubrangableOutput : public Comparator::Output {
+ public:
+ virtual void SetSubrange1(int offset, int len) = 0;
+ virtual void SetSubrange2(int offset, int len) = 0;
+};
+
+
+static int min(int a, int b) {
+ return a < b ? a : b;
+}
+
+
+// Finds common prefix and suffix in input. This parts shouldn't take space in
+// linear programming table. Enable subranging in input and output.
+static void NarrowDownInput(SubrangableInput* input,
+ SubrangableOutput* output) {
+ const int len1 = input->GetLength1();
+ const int len2 = input->GetLength2();
+
+ int common_prefix_len;
+ int common_suffix_len;
+
+ {
+ common_prefix_len = 0;
+ int prefix_limit = min(len1, len2);
+ while (common_prefix_len < prefix_limit &&
+ input->Equals(common_prefix_len, common_prefix_len)) {
+ common_prefix_len++;
+ }
+
+ common_suffix_len = 0;
+ int suffix_limit = min(len1 - common_prefix_len, len2 - common_prefix_len);
+
+ while (common_suffix_len < suffix_limit &&
+ input->Equals(len1 - common_suffix_len - 1,
+ len2 - common_suffix_len - 1)) {
+ common_suffix_len++;
+ }
+ }
+
+ if (common_prefix_len > 0 || common_suffix_len > 0) {
+ int new_len1 = len1 - common_suffix_len - common_prefix_len;
+ int new_len2 = len2 - common_suffix_len - common_prefix_len;
+
+ input->SetSubrange1(common_prefix_len, new_len1);
+ input->SetSubrange2(common_prefix_len, new_len2);
+
+ output->SetSubrange1(common_prefix_len, new_len1);
+ output->SetSubrange2(common_prefix_len, new_len2);
+ }
+}
+
+
// A helper class that writes chunk numbers into JSArray.
// Each chunk is stored as 3 array elements: (pos1_begin, pos1_end, pos2_end).
class CompareOutputArrayWriter {
@@ -319,13 +383,13 @@
: s1_(s1), offset1_(offset1), len1_(len1),
s2_(s2), offset2_(offset2), len2_(len2) {
}
- virtual int getLength1() {
+ virtual int GetLength1() {
return len1_;
}
- virtual int getLength2() {
+ virtual int GetLength2() {
return len2_;
}
- bool equals(int index1, int index2) {
+ bool Equals(int index1, int index2) {
return s1_->Get(offset1_ + index1) == s2_->Get(offset2_ + index2);
}
@@ -401,20 +465,26 @@
// Represents 2 strings as 2 arrays of lines.
-class LineArrayCompareInput : public Comparator::Input {
+class LineArrayCompareInput : public SubrangableInput {
public:
LineArrayCompareInput(Handle<String> s1, Handle<String> s2,
LineEndsWrapper line_ends1, LineEndsWrapper line_ends2)
: s1_(s1), s2_(s2), line_ends1_(line_ends1),
- line_ends2_(line_ends2) {
+ line_ends2_(line_ends2),
+ subrange_offset1_(0), subrange_offset2_(0),
+ subrange_len1_(line_ends1_.length()),
+ subrange_len2_(line_ends2_.length()) {
}
- int getLength1() {
- return line_ends1_.length();
+ int GetLength1() {
+ return subrange_len1_;
}
- int getLength2() {
- return line_ends2_.length();
+ int GetLength2() {
+ return subrange_len2_;
}
- bool equals(int index1, int index2) {
+ bool Equals(int index1, int index2) {
+ index1 += subrange_offset1_;
+ index2 += subrange_offset2_;
+
int line_start1 = line_ends1_.GetLineStart(index1);
int line_start2 = line_ends2_.GetLineStart(index2);
int line_end1 = line_ends1_.GetLineEnd(index1);
@@ -427,26 +497,42 @@
return CompareSubstrings(s1_, line_start1, s2_, line_start2,
len1);
}
+ void SetSubrange1(int offset, int len) {
+ subrange_offset1_ = offset;
+ subrange_len1_ = len;
+ }
+ void SetSubrange2(int offset, int len) {
+ subrange_offset2_ = offset;
+ subrange_len2_ = len;
+ }
private:
Handle<String> s1_;
Handle<String> s2_;
LineEndsWrapper line_ends1_;
LineEndsWrapper line_ends2_;
+ int subrange_offset1_;
+ int subrange_offset2_;
+ int subrange_len1_;
+ int subrange_len2_;
};
// Stores compare result in JSArray. For each chunk tries to conduct
// a fine-grained nested diff token-wise.
-class TokenizingLineArrayCompareOutput : public Comparator::Output {
+class TokenizingLineArrayCompareOutput : public SubrangableOutput {
public:
TokenizingLineArrayCompareOutput(LineEndsWrapper line_ends1,
LineEndsWrapper line_ends2,
Handle<String> s1, Handle<String> s2)
- : line_ends1_(line_ends1), line_ends2_(line_ends2), s1_(s1), s2_(s2) {
+ : line_ends1_(line_ends1), line_ends2_(line_ends2), s1_(s1), s2_(s2),
+ subrange_offset1_(0), subrange_offset2_(0) {
}
void AddChunk(int line_pos1, int line_pos2, int line_len1, int line_len2) {
+ line_pos1 += subrange_offset1_;
+ line_pos2 += subrange_offset2_;
+
int char_pos1 = line_ends1_.GetLineStart(line_pos1);
int char_pos2 = line_ends2_.GetLineStart(line_pos2);
int char_len1 = line_ends1_.GetLineStart(line_pos1 + line_len1) - char_pos1;
@@ -466,6 +552,12 @@
array_writer_.WriteChunk(char_pos1, char_pos2, char_len1, char_len2);
}
}
+ void SetSubrange1(int offset, int len) {
+ subrange_offset1_ = offset;
+ }
+ void SetSubrange2(int offset, int len) {
+ subrange_offset2_ = offset;
+ }
Handle<JSArray> GetResult() {
return array_writer_.GetResult();
@@ -479,6 +571,8 @@
LineEndsWrapper line_ends2_;
Handle<String> s1_;
Handle<String> s2_;
+ int subrange_offset1_;
+ int subrange_offset2_;
};
@@ -493,6 +587,8 @@
LineArrayCompareInput input(s1, s2, line_ends1, line_ends2);
TokenizingLineArrayCompareOutput output(line_ends1, line_ends2, s1, s2);
+ NarrowDownInput(&input, &output);
+
Comparator::CalculateDifference(&input, &output);
return output.GetResult();
@@ -814,7 +910,7 @@
JSArray* LiveEdit::GatherCompileInfo(Handle<Script> script,
Handle<String> source) {
Isolate* isolate = Isolate::Current();
- CompilationZoneScope zone_scope(isolate, DELETE_ON_EXIT);
+ ZoneScope zone_scope(isolate, DELETE_ON_EXIT);
FunctionInfoListener listener;
Handle<Object> original_source = Handle<Object>(script->source());
diff --git a/src/liveedit.h b/src/liveedit.h
index 60e6238..4ee4466 100644
--- a/src/liveedit.h
+++ b/src/liveedit.h
@@ -143,14 +143,13 @@
// A general-purpose comparator between 2 arrays.
class Comparator {
public:
-
// Holds 2 arrays of some elements allowing to compare any pair of
// element from the first array and element from the second array.
class Input {
public:
- virtual int getLength1() = 0;
- virtual int getLength2() = 0;
- virtual bool equals(int index1, int index2) = 0;
+ virtual int GetLength1() = 0;
+ virtual int GetLength2() = 0;
+ virtual bool Equals(int index1, int index2) = 0;
protected:
virtual ~Input() {}
diff --git a/src/liveobjectlist.cc b/src/liveobjectlist.cc
index 29a9b01..e382a06 100644
--- a/src/liveobjectlist.cc
+++ b/src/liveobjectlist.cc
@@ -1649,7 +1649,6 @@
// Helper class for copying HeapObjects.
class LolVisitor: public ObjectVisitor {
public:
-
LolVisitor(HeapObject* target, Handle<HeapObject> handle_to_skip)
: target_(target), handle_to_skip_(handle_to_skip), found_(false) {}
@@ -2586,4 +2585,3 @@
} } // namespace v8::internal
#endif // LIVE_OBJECT_LIST
-
diff --git a/src/log-inl.h b/src/log-inl.h
index 02238fe..8aebbc7 100644
--- a/src/log-inl.h
+++ b/src/log-inl.h
@@ -34,8 +34,6 @@
namespace v8 {
namespace internal {
-#ifdef ENABLE_LOGGING_AND_PROFILING
-
Logger::LogEventsAndTags Logger::ToNativeByScript(Logger::LogEventsAndTags tag,
Script* script) {
if ((tag == FUNCTION_TAG || tag == LAZY_COMPILE_TAG || tag == SCRIPT_TAG)
@@ -51,8 +49,6 @@
}
}
-#endif // ENABLE_LOGGING_AND_PROFILING
-
} } // namespace v8::internal
diff --git a/src/log-utils.cc b/src/log-utils.cc
index 1bba7cd..2d1ce23 100644
--- a/src/log-utils.cc
+++ b/src/log-utils.cc
@@ -33,101 +33,14 @@
namespace v8 {
namespace internal {
-#ifdef ENABLE_LOGGING_AND_PROFILING
-LogDynamicBuffer::LogDynamicBuffer(
- int block_size, int max_size, const char* seal, int seal_size)
- : block_size_(block_size),
- max_size_(max_size - (max_size % block_size_)),
- seal_(seal),
- seal_size_(seal_size),
- blocks_(max_size_ / block_size_ + 1),
- write_pos_(0), block_index_(0), block_write_pos_(0), is_sealed_(false) {
- ASSERT(BlocksCount() > 0);
- AllocateBlock(0);
- for (int i = 1; i < BlocksCount(); ++i) {
- blocks_[i] = NULL;
- }
-}
+const char* Log::kLogToTemporaryFile = "&";
-LogDynamicBuffer::~LogDynamicBuffer() {
- for (int i = 0; i < BlocksCount(); ++i) {
- DeleteArray(blocks_[i]);
- }
-}
-
-
-int LogDynamicBuffer::Read(int from_pos, char* dest_buf, int buf_size) {
- if (buf_size == 0) return 0;
- int read_pos = from_pos;
- int block_read_index = BlockIndex(from_pos);
- int block_read_pos = PosInBlock(from_pos);
- int dest_buf_pos = 0;
- // Read until dest_buf is filled, or write_pos_ encountered.
- while (read_pos < write_pos_ && dest_buf_pos < buf_size) {
- const int read_size = Min(write_pos_ - read_pos,
- Min(buf_size - dest_buf_pos, block_size_ - block_read_pos));
- memcpy(dest_buf + dest_buf_pos,
- blocks_[block_read_index] + block_read_pos, read_size);
- block_read_pos += read_size;
- dest_buf_pos += read_size;
- read_pos += read_size;
- if (block_read_pos == block_size_) {
- block_read_pos = 0;
- ++block_read_index;
- }
- }
- return dest_buf_pos;
-}
-
-
-int LogDynamicBuffer::Seal() {
- WriteInternal(seal_, seal_size_);
- is_sealed_ = true;
- return 0;
-}
-
-
-int LogDynamicBuffer::Write(const char* data, int data_size) {
- if (is_sealed_) {
- return 0;
- }
- if ((write_pos_ + data_size) <= (max_size_ - seal_size_)) {
- return WriteInternal(data, data_size);
- } else {
- return Seal();
- }
-}
-
-
-int LogDynamicBuffer::WriteInternal(const char* data, int data_size) {
- int data_pos = 0;
- while (data_pos < data_size) {
- const int write_size =
- Min(data_size - data_pos, block_size_ - block_write_pos_);
- memcpy(blocks_[block_index_] + block_write_pos_, data + data_pos,
- write_size);
- block_write_pos_ += write_size;
- data_pos += write_size;
- if (block_write_pos_ == block_size_) {
- block_write_pos_ = 0;
- AllocateBlock(++block_index_);
- }
- }
- write_pos_ += data_size;
- return data_size;
-}
-
-// Must be the same message as in Logger::PauseProfiler.
-const char* const Log::kDynamicBufferSeal = "profiler,\"pause\"\n";
-
Log::Log(Logger* logger)
- : write_to_file_(false),
- is_stopped_(false),
+ : is_stopped_(false),
output_handle_(NULL),
ll_output_handle_(NULL),
- output_buffer_(NULL),
mutex_(NULL),
message_buffer_(NULL),
logger_(logger) {
@@ -142,7 +55,6 @@
void Log::Initialize() {
-#ifdef ENABLE_LOGGING_AND_PROFILING
mutex_ = OS::CreateMutex();
message_buffer_ = NewArray<char>(kMessageBufferSize);
@@ -166,19 +78,19 @@
FLAG_prof_auto = false;
}
- bool start_logging = FLAG_log || FLAG_log_runtime || FLAG_log_api
+ bool open_log_file = FLAG_log || FLAG_log_runtime || FLAG_log_api
|| FLAG_log_code || FLAG_log_gc || FLAG_log_handles || FLAG_log_suspect
|| FLAG_log_regexp || FLAG_log_state_changes || FLAG_ll_prof;
- bool open_log_file = start_logging || FLAG_prof_lazy;
-
// If we're logging anything, we need to open the log file.
if (open_log_file) {
if (strcmp(FLAG_logfile, "-") == 0) {
OpenStdout();
} else if (strcmp(FLAG_logfile, "*") == 0) {
- OpenMemoryBuffer();
- } else {
+ // Does nothing for now. Will be removed.
+ } else if (strcmp(FLAG_logfile, kLogToTemporaryFile) == 0) {
+ OpenTemporaryFile();
+ } else {
if (strchr(FLAG_logfile, '%') != NULL ||
!Isolate::Current()->IsDefaultIsolate()) {
// If there's a '%' in the log file name we have to expand
@@ -222,14 +134,18 @@
}
}
}
-#endif
}
void Log::OpenStdout() {
ASSERT(!IsEnabled());
output_handle_ = stdout;
- write_to_file_ = true;
+}
+
+
+void Log::OpenTemporaryFile() {
+ ASSERT(!IsEnabled());
+ output_handle_ = i::OS::OpenTemporaryFile();
}
@@ -244,7 +160,6 @@
void Log::OpenFile(const char* name) {
ASSERT(!IsEnabled());
output_handle_ = OS::FOpen(name, OS::LogFileOpenMode);
- write_to_file_ = true;
if (FLAG_ll_prof) {
// Open the low-level log file.
size_t len = strlen(name);
@@ -257,25 +172,18 @@
}
-void Log::OpenMemoryBuffer() {
- ASSERT(!IsEnabled());
- output_buffer_ = new LogDynamicBuffer(
- kDynamicBufferBlockSize, kMaxDynamicBufferSize,
- kDynamicBufferSeal, StrLength(kDynamicBufferSeal));
- write_to_file_ = false;
-}
-
-
-void Log::Close() {
- if (write_to_file_) {
- if (output_handle_ != NULL) fclose(output_handle_);
- output_handle_ = NULL;
- if (ll_output_handle_ != NULL) fclose(ll_output_handle_);
- ll_output_handle_ = NULL;
- } else {
- delete output_buffer_;
- output_buffer_ = NULL;
+FILE* Log::Close() {
+ FILE* result = NULL;
+ if (output_handle_ != NULL) {
+ if (strcmp(FLAG_logfile, kLogToTemporaryFile) != 0) {
+ fclose(output_handle_);
+ } else {
+ result = output_handle_;
+ }
}
+ output_handle_ = NULL;
+ if (ll_output_handle_ != NULL) fclose(ll_output_handle_);
+ ll_output_handle_ = NULL;
DeleteArray(message_buffer_);
message_buffer_ = NULL;
@@ -284,27 +192,7 @@
mutex_ = NULL;
is_stopped_ = false;
-}
-
-
-int Log::GetLogLines(int from_pos, char* dest_buf, int max_size) {
- if (write_to_file_) return 0;
- ASSERT(output_buffer_ != NULL);
- ASSERT(from_pos >= 0);
- ASSERT(max_size >= 0);
- int actual_size = output_buffer_->Read(from_pos, dest_buf, max_size);
- ASSERT(actual_size <= max_size);
- if (actual_size == 0) return 0;
-
- // Find previous log line boundary.
- char* end_pos = dest_buf + actual_size - 1;
- while (end_pos >= dest_buf && *end_pos != '\n') --end_pos;
- actual_size = static_cast<int>(end_pos - dest_buf + 1);
- // If the assertion below is hit, it means that there was no line end
- // found --- something wrong has happened.
- ASSERT(actual_size > 0);
- ASSERT(actual_size <= max_size);
- return actual_size;
+ return result;
}
@@ -413,9 +301,7 @@
void LogMessageBuilder::WriteToLogFile() {
ASSERT(pos_ <= Log::kMessageBufferSize);
- const int written = log_->write_to_file_ ?
- log_->WriteToFile(log_->message_buffer_, pos_) :
- log_->WriteToMemory(log_->message_buffer_, pos_);
+ const int written = log_->WriteToFile(log_->message_buffer_, pos_);
if (written != pos_) {
log_->stop();
log_->logger_->LogFailure();
@@ -423,6 +309,4 @@
}
-#endif // ENABLE_LOGGING_AND_PROFILING
-
} } // namespace v8::internal
diff --git a/src/log-utils.h b/src/log-utils.h
index 81bbf77..d336d71 100644
--- a/src/log-utils.h
+++ b/src/log-utils.h
@@ -33,69 +33,11 @@
namespace v8 {
namespace internal {
-#ifdef ENABLE_LOGGING_AND_PROFILING
-
class Logger;
-// A memory buffer that increments its size as you write in it. Size
-// is incremented with 'block_size' steps, never exceeding 'max_size'.
-// During growth, memory contents are never copied. At the end of the
-// buffer an amount of memory specified in 'seal_size' is reserved.
-// When writing position reaches max_size - seal_size, buffer auto-seals
-// itself with 'seal' and allows no further writes. Data pointed by
-// 'seal' must be available during entire LogDynamicBuffer lifetime.
-//
-// An instance of this class is created dynamically by Log.
-class LogDynamicBuffer {
- public:
- LogDynamicBuffer(
- int block_size, int max_size, const char* seal, int seal_size);
-
- ~LogDynamicBuffer();
-
- // Reads contents of the buffer starting from 'from_pos'. Upon
- // return, 'dest_buf' is filled with the data. Actual amount of data
- // filled is returned, it is <= 'buf_size'.
- int Read(int from_pos, char* dest_buf, int buf_size);
-
- // Writes 'data' to the buffer, making it larger if necessary. If
- // data is too big to fit in the buffer, it doesn't get written at
- // all. In that case, buffer auto-seals itself and stops to accept
- // any incoming writes. Returns amount of data written (it is either
- // 'data_size', or 0, if 'data' is too big).
- int Write(const char* data, int data_size);
-
- private:
- void AllocateBlock(int index) {
- blocks_[index] = NewArray<char>(block_size_);
- }
-
- int BlockIndex(int pos) const { return pos / block_size_; }
-
- int BlocksCount() const { return BlockIndex(max_size_) + 1; }
-
- int PosInBlock(int pos) const { return pos % block_size_; }
-
- int Seal();
-
- int WriteInternal(const char* data, int data_size);
-
- const int block_size_;
- const int max_size_;
- const char* seal_;
- const int seal_size_;
- ScopedVector<char*> blocks_;
- int write_pos_;
- int block_index_;
- int block_write_pos_;
- bool is_sealed_;
-};
-
-
// Functions and data for performing output of log messages.
class Log {
public:
-
// Performs process-wide initialization.
void Initialize();
@@ -103,18 +45,21 @@
void stop() { is_stopped_ = true; }
// Frees all resources acquired in Initialize and Open... functions.
- void Close();
-
- // See description in include/v8.h.
- int GetLogLines(int from_pos, char* dest_buf, int max_size);
+ // When a temporary file is used for the log, returns its stream descriptor,
+ // leaving the file open.
+ FILE* Close();
// Returns whether logging is enabled.
bool IsEnabled() {
- return !is_stopped_ && (output_handle_ != NULL || output_buffer_ != NULL);
+ return !is_stopped_ && output_handle_ != NULL;
}
// Size of buffer used for formatting log messages.
- static const int kMessageBufferSize = v8::V8::kMinimumSizeForLogLinesBuffer;
+ static const int kMessageBufferSize = 2048;
+
+ // This mode is only used in tests, as temporary files are automatically
+ // deleted on close and thus can't be accessed afterwards.
+ static const char* kLogToTemporaryFile;
private:
explicit Log(Logger* logger);
@@ -125,8 +70,8 @@
// Opens file for logging.
void OpenFile(const char* name);
- // Opens memory buffer for logging.
- void OpenMemoryBuffer();
+ // Opens a temporary file for logging.
+ void OpenTemporaryFile();
// Implementation of writing to a log file.
int WriteToFile(const char* msg, int length) {
@@ -138,38 +83,16 @@
return length;
}
- // Implementation of writing to a memory buffer.
- int WriteToMemory(const char* msg, int length) {
- ASSERT(output_buffer_ != NULL);
- return output_buffer_->Write(msg, length);
- }
-
- bool write_to_file_;
-
// Whether logging is stopped (e.g. due to insufficient resources).
bool is_stopped_;
- // When logging is active, either output_handle_ or output_buffer_ is used
- // to store a pointer to log destination. If logging was opened via OpenStdout
- // or OpenFile, then output_handle_ is used. If logging was opened
- // via OpenMemoryBuffer, then output_buffer_ is used.
- // mutex_ should be acquired before using output_handle_ or output_buffer_.
+ // When logging is active output_handle_ is used to store a pointer to log
+ // destination. mutex_ should be acquired before using output_handle_.
FILE* output_handle_;
// Used when low-level profiling is active.
FILE* ll_output_handle_;
- LogDynamicBuffer* output_buffer_;
-
- // Size of dynamic buffer block (and dynamic buffer initial size).
- static const int kDynamicBufferBlockSize = 65536;
-
- // Maximum size of dynamic buffer.
- static const int kMaxDynamicBufferSize = 50 * 1024 * 1024;
-
- // Message to "seal" dynamic buffer with.
- static const char* const kDynamicBufferSeal;
-
// mutex_ is a Mutex used for enforcing exclusive
// access to the formatting buffer and the log file or log memory buffer.
Mutex* mutex_;
@@ -224,8 +147,6 @@
int pos_;
};
-#endif // ENABLE_LOGGING_AND_PROFILING
-
} } // namespace v8::internal
#endif // V8_LOG_UTILS_H_
diff --git a/src/log.cc b/src/log.cc
index 0474ce0..04fd22e 100644
--- a/src/log.cc
+++ b/src/log.cc
@@ -43,8 +43,6 @@
namespace v8 {
namespace internal {
-#ifdef ENABLE_LOGGING_AND_PROFILING
-
//
// Sliding state window. Updates counters to keep track of the last
// window of kBufferSize states. This is useful to track where we
@@ -122,6 +120,7 @@
// Returns the next index in the cyclic buffer.
int Succ(int index) { return (index + 1) % kBufferSize; }
+ Isolate* isolate_;
// Cyclic buffer for communicating profiling samples
// between the signal handler and the worker thread.
static const int kBufferSize = 128;
@@ -148,10 +147,6 @@
void StackTracer::Trace(Isolate* isolate, TickSample* sample) {
ASSERT(isolate->IsInitialized());
- sample->tos = NULL;
- sample->frames_count = 0;
- sample->has_external_callback = false;
-
// Avoid collecting traces while doing GC.
if (sample->state == GC) return;
@@ -271,7 +266,8 @@
// Profiler implementation.
//
Profiler::Profiler(Isolate* isolate)
- : Thread(isolate, "v8:Profiler"),
+ : Thread("v8:Profiler"),
+ isolate_(isolate),
head_(0),
tail_(0),
overflow_(false),
@@ -326,9 +322,8 @@
void Profiler::Run() {
TickSample sample;
bool overflow = Remove(&sample);
- i::Isolate* isolate = ISOLATE;
while (running_) {
- LOG(isolate, TickEvent(&sample, overflow));
+ LOG(isolate_, TickEvent(&sample, overflow));
overflow = Remove(&sample);
}
}
@@ -399,8 +394,10 @@
void Remove(Address code_address) {
HashMap::Entry* entry = FindEntry(code_address);
- if (entry != NULL) DeleteArray(static_cast<const char*>(entry->value));
- RemoveEntry(entry);
+ if (entry != NULL) {
+ DeleteArray(static_cast<char*>(entry->value));
+ RemoveEntry(entry);
+ }
}
void Move(Address from, Address to) {
@@ -522,7 +519,6 @@
log_events_(NULL),
logging_nesting_(0),
cpu_profiler_nesting_(0),
- heap_profiler_nesting_(0),
log_(new Log(this)),
name_buffer_(new NameBuffer),
address_to_name_map_(NULL),
@@ -556,71 +552,54 @@
msg.WriteToLogFile();
}
-#endif // ENABLE_LOGGING_AND_PROFILING
-
void Logger::StringEvent(const char* name, const char* value) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
if (FLAG_log) UncheckedStringEvent(name, value);
-#endif
}
-#ifdef ENABLE_LOGGING_AND_PROFILING
void Logger::UncheckedStringEvent(const char* name, const char* value) {
if (!log_->IsEnabled()) return;
LogMessageBuilder msg(this);
msg.Append("%s,\"%s\"\n", name, value);
msg.WriteToLogFile();
}
-#endif
void Logger::IntEvent(const char* name, int value) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
if (FLAG_log) UncheckedIntEvent(name, value);
-#endif
}
void Logger::IntPtrTEvent(const char* name, intptr_t value) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
if (FLAG_log) UncheckedIntPtrTEvent(name, value);
-#endif
}
-#ifdef ENABLE_LOGGING_AND_PROFILING
void Logger::UncheckedIntEvent(const char* name, int value) {
if (!log_->IsEnabled()) return;
LogMessageBuilder msg(this);
msg.Append("%s,%d\n", name, value);
msg.WriteToLogFile();
}
-#endif
-#ifdef ENABLE_LOGGING_AND_PROFILING
void Logger::UncheckedIntPtrTEvent(const char* name, intptr_t value) {
if (!log_->IsEnabled()) return;
LogMessageBuilder msg(this);
msg.Append("%s,%" V8_PTR_PREFIX "d\n", name, value);
msg.WriteToLogFile();
}
-#endif
void Logger::HandleEvent(const char* name, Object** location) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
if (!log_->IsEnabled() || !FLAG_log_handles) return;
LogMessageBuilder msg(this);
msg.Append("%s,0x%" V8PRIxPTR "\n", name, location);
msg.WriteToLogFile();
-#endif
}
-#ifdef ENABLE_LOGGING_AND_PROFILING
// ApiEvent is private so all the calls come from the Logger class. It is the
// caller's responsibility to ensure that log is enabled and that
// FLAG_log_api is true.
@@ -633,11 +612,9 @@
va_end(ap);
msg.WriteToLogFile();
}
-#endif
void Logger::ApiNamedSecurityCheck(Object* key) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
if (!log_->IsEnabled() || !FLAG_log_api) return;
if (key->IsString()) {
SmartPointer<char> str =
@@ -648,14 +625,12 @@
} else {
ApiEvent("api,check-security,['no-name']\n");
}
-#endif
}
void Logger::SharedLibraryEvent(const char* library_path,
uintptr_t start,
uintptr_t end) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
if (!log_->IsEnabled() || !FLAG_prof) return;
LogMessageBuilder msg(this);
msg.Append("shared-library,\"%s\",0x%08" V8PRIxPTR ",0x%08" V8PRIxPTR "\n",
@@ -663,14 +638,12 @@
start,
end);
msg.WriteToLogFile();
-#endif
}
void Logger::SharedLibraryEvent(const wchar_t* library_path,
uintptr_t start,
uintptr_t end) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
if (!log_->IsEnabled() || !FLAG_prof) return;
LogMessageBuilder msg(this);
msg.Append("shared-library,\"%ls\",0x%08" V8PRIxPTR ",0x%08" V8PRIxPTR "\n",
@@ -678,11 +651,9 @@
start,
end);
msg.WriteToLogFile();
-#endif
}
-#ifdef ENABLE_LOGGING_AND_PROFILING
void Logger::LogRegExpSource(Handle<JSRegExp> regexp) {
// Prints "/" + re.source + "/" +
// (re.global?"g":"") + (re.ignorecase?"i":"") + (re.multiline?"m":"")
@@ -723,23 +694,19 @@
msg.WriteToLogFile();
}
-#endif // ENABLE_LOGGING_AND_PROFILING
void Logger::RegExpCompileEvent(Handle<JSRegExp> regexp, bool in_cache) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
if (!log_->IsEnabled() || !FLAG_log_regexp) return;
LogMessageBuilder msg(this);
msg.Append("regexp-compile,");
LogRegExpSource(regexp);
msg.Append(in_cache ? ",hit\n" : ",miss\n");
msg.WriteToLogFile();
-#endif
}
void Logger::LogRuntime(Vector<const char> format, JSArray* args) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
if (!log_->IsEnabled() || !FLAG_log_runtime) return;
HandleScope scope;
LogMessageBuilder msg(this);
@@ -780,22 +747,18 @@
}
msg.Append('\n');
msg.WriteToLogFile();
-#endif
}
void Logger::ApiIndexedSecurityCheck(uint32_t index) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
if (!log_->IsEnabled() || !FLAG_log_api) return;
ApiEvent("api,check-security,%u\n", index);
-#endif
}
void Logger::ApiNamedPropertyAccess(const char* tag,
JSObject* holder,
Object* name) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
ASSERT(name->IsString());
if (!log_->IsEnabled() || !FLAG_log_api) return;
String* class_name_obj = holder->class_name();
@@ -804,58 +767,47 @@
SmartPointer<char> property_name =
String::cast(name)->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
ApiEvent("api,%s,\"%s\",\"%s\"\n", tag, *class_name, *property_name);
-#endif
}
void Logger::ApiIndexedPropertyAccess(const char* tag,
JSObject* holder,
uint32_t index) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
if (!log_->IsEnabled() || !FLAG_log_api) return;
String* class_name_obj = holder->class_name();
SmartPointer<char> class_name =
class_name_obj->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
ApiEvent("api,%s,\"%s\",%u\n", tag, *class_name, index);
-#endif
}
void Logger::ApiObjectAccess(const char* tag, JSObject* object) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
if (!log_->IsEnabled() || !FLAG_log_api) return;
String* class_name_obj = object->class_name();
SmartPointer<char> class_name =
class_name_obj->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
ApiEvent("api,%s,\"%s\"\n", tag, *class_name);
-#endif
}
void Logger::ApiEntryCall(const char* name) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
if (!log_->IsEnabled() || !FLAG_log_api) return;
ApiEvent("api,%s\n", name);
-#endif
}
void Logger::NewEvent(const char* name, void* object, size_t size) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
if (!log_->IsEnabled() || !FLAG_log) return;
LogMessageBuilder msg(this);
msg.Append("new,%s,0x%" V8PRIxPTR ",%u\n", name, object,
static_cast<unsigned int>(size));
msg.WriteToLogFile();
-#endif
}
void Logger::DeleteEvent(const char* name, void* object) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
if (!log_->IsEnabled() || !FLAG_log) return;
LogMessageBuilder msg(this);
msg.Append("delete,%s,0x%" V8PRIxPTR "\n", name, object);
msg.WriteToLogFile();
-#endif
}
@@ -868,7 +820,6 @@
LOGGER->DeleteEvent(name, object);
}
-#ifdef ENABLE_LOGGING_AND_PROFILING
void Logger::CallbackEventInternal(const char* prefix, const char* name,
Address entry_point) {
if (!log_->IsEnabled() || !FLAG_log_code) return;
@@ -881,43 +832,35 @@
msg.Append('\n');
msg.WriteToLogFile();
}
-#endif
void Logger::CallbackEvent(String* name, Address entry_point) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
if (!log_->IsEnabled() || !FLAG_log_code) return;
SmartPointer<char> str =
name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
CallbackEventInternal("", *str, entry_point);
-#endif
}
void Logger::GetterCallbackEvent(String* name, Address entry_point) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
if (!log_->IsEnabled() || !FLAG_log_code) return;
SmartPointer<char> str =
name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
CallbackEventInternal("get ", *str, entry_point);
-#endif
}
void Logger::SetterCallbackEvent(String* name, Address entry_point) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
if (!log_->IsEnabled() || !FLAG_log_code) return;
SmartPointer<char> str =
name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
CallbackEventInternal("set ", *str, entry_point);
-#endif
}
void Logger::CodeCreateEvent(LogEventsAndTags tag,
Code* code,
const char* comment) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
if (!log_->IsEnabled()) return;
if (FLAG_ll_prof || Serializer::enabled()) {
name_buffer_->Reset();
@@ -947,14 +890,12 @@
msg.Append('"');
msg.Append('\n');
msg.WriteToLogFile();
-#endif
}
void Logger::CodeCreateEvent(LogEventsAndTags tag,
Code* code,
String* name) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
if (!log_->IsEnabled()) return;
if (FLAG_ll_prof || Serializer::enabled()) {
name_buffer_->Reset();
@@ -979,11 +920,9 @@
msg.Append('"');
msg.Append('\n');
msg.WriteToLogFile();
-#endif
}
-#ifdef ENABLE_LOGGING_AND_PROFILING
// ComputeMarker must only be used when SharedFunctionInfo is known.
static const char* ComputeMarker(Code* code) {
switch (code->kind()) {
@@ -992,14 +931,12 @@
default: return "";
}
}
-#endif
void Logger::CodeCreateEvent(LogEventsAndTags tag,
Code* code,
SharedFunctionInfo* shared,
String* name) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
if (!log_->IsEnabled()) return;
if (FLAG_ll_prof || Serializer::enabled()) {
name_buffer_->Reset();
@@ -1031,7 +968,6 @@
msg.Append(",%s", ComputeMarker(code));
msg.Append('\n');
msg.WriteToLogFile();
-#endif
}
@@ -1042,7 +978,6 @@
Code* code,
SharedFunctionInfo* shared,
String* source, int line) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
if (!log_->IsEnabled()) return;
if (FLAG_ll_prof || Serializer::enabled()) {
name_buffer_->Reset();
@@ -1080,12 +1015,10 @@
msg.Append(",%s", ComputeMarker(code));
msg.Append('\n');
msg.WriteToLogFile();
-#endif
}
void Logger::CodeCreateEvent(LogEventsAndTags tag, Code* code, int args_count) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
if (!log_->IsEnabled()) return;
if (FLAG_ll_prof || Serializer::enabled()) {
name_buffer_->Reset();
@@ -1108,21 +1041,17 @@
msg.Append(",%d,\"args_count: %d\"", code->ExecutableSize(), args_count);
msg.Append('\n');
msg.WriteToLogFile();
-#endif
}
void Logger::CodeMovingGCEvent() {
-#ifdef ENABLE_LOGGING_AND_PROFILING
if (!log_->IsEnabled() || !FLAG_ll_prof) return;
LowLevelLogWriteBytes(&kCodeMovingGCTag, sizeof(kCodeMovingGCTag));
OS::SignalCodeMovingGC();
-#endif
}
void Logger::RegExpCodeCreateEvent(Code* code, String* source) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
if (!log_->IsEnabled()) return;
if (FLAG_ll_prof || Serializer::enabled()) {
name_buffer_->Reset();
@@ -1147,36 +1076,30 @@
msg.Append('\"');
msg.Append('\n');
msg.WriteToLogFile();
-#endif
}
void Logger::CodeMoveEvent(Address from, Address to) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
if (!log_->IsEnabled()) return;
if (FLAG_ll_prof) LowLevelCodeMoveEvent(from, to);
if (Serializer::enabled() && address_to_name_map_ != NULL) {
address_to_name_map_->Move(from, to);
}
MoveEventInternal(CODE_MOVE_EVENT, from, to);
-#endif
}
void Logger::CodeDeleteEvent(Address from) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
if (!log_->IsEnabled()) return;
if (FLAG_ll_prof) LowLevelCodeDeleteEvent(from);
if (Serializer::enabled() && address_to_name_map_ != NULL) {
address_to_name_map_->Remove(from);
}
DeleteEventInternal(CODE_DELETE_EVENT, from);
-#endif
}
void Logger::SnapshotPositionEvent(Address addr, int pos) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
if (!log_->IsEnabled()) return;
if (FLAG_ll_prof) LowLevelSnapshotPositionEvent(addr, pos);
if (Serializer::enabled() && address_to_name_map_ != NULL) {
@@ -1198,18 +1121,14 @@
msg.Append(",%d", pos);
msg.Append('\n');
msg.WriteToLogFile();
-#endif
}
void Logger::SharedFunctionInfoMoveEvent(Address from, Address to) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
MoveEventInternal(SHARED_FUNC_MOVE_EVENT, from, to);
-#endif
}
-#ifdef ENABLE_LOGGING_AND_PROFILING
void Logger::MoveEventInternal(LogEventsAndTags event,
Address from,
Address to) {
@@ -1222,10 +1141,8 @@
msg.Append('\n');
msg.WriteToLogFile();
}
-#endif
-#ifdef ENABLE_LOGGING_AND_PROFILING
void Logger::DeleteEventInternal(LogEventsAndTags event, Address from) {
if (!log_->IsEnabled() || !FLAG_log_code) return;
LogMessageBuilder msg(this);
@@ -1234,11 +1151,9 @@
msg.Append('\n');
msg.WriteToLogFile();
}
-#endif
void Logger::ResourceEvent(const char* name, const char* tag) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
if (!log_->IsEnabled() || !FLAG_log) return;
LogMessageBuilder msg(this);
msg.Append("%s,%s,", name, tag);
@@ -1251,12 +1166,10 @@
msg.Append('\n');
msg.WriteToLogFile();
-#endif
}
void Logger::SuspectReadEvent(String* name, Object* obj) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
if (!log_->IsEnabled() || !FLAG_log_suspect) return;
LogMessageBuilder msg(this);
String* class_name = obj->IsJSObject()
@@ -1270,12 +1183,10 @@
msg.Append('"');
msg.Append('\n');
msg.WriteToLogFile();
-#endif
}
void Logger::HeapSampleBeginEvent(const char* space, const char* kind) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
if (!log_->IsEnabled() || !FLAG_log_gc) return;
LogMessageBuilder msg(this);
// Using non-relative system time in order to be able to synchronize with
@@ -1283,121 +1194,34 @@
msg.Append("heap-sample-begin,\"%s\",\"%s\",%.0f\n",
space, kind, OS::TimeCurrentMillis());
msg.WriteToLogFile();
-#endif
-}
-
-
-void Logger::HeapSampleStats(const char* space, const char* kind,
- intptr_t capacity, intptr_t used) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!log_->IsEnabled() || !FLAG_log_gc) return;
- LogMessageBuilder msg(this);
- msg.Append("heap-sample-stats,\"%s\",\"%s\","
- "%" V8_PTR_PREFIX "d,%" V8_PTR_PREFIX "d\n",
- space, kind, capacity, used);
- msg.WriteToLogFile();
-#endif
}
void Logger::HeapSampleEndEvent(const char* space, const char* kind) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
if (!log_->IsEnabled() || !FLAG_log_gc) return;
LogMessageBuilder msg(this);
msg.Append("heap-sample-end,\"%s\",\"%s\"\n", space, kind);
msg.WriteToLogFile();
-#endif
}
void Logger::HeapSampleItemEvent(const char* type, int number, int bytes) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
if (!log_->IsEnabled() || !FLAG_log_gc) return;
LogMessageBuilder msg(this);
msg.Append("heap-sample-item,%s,%d,%d\n", type, number, bytes);
msg.WriteToLogFile();
-#endif
-}
-
-
-void Logger::HeapSampleJSConstructorEvent(const char* constructor,
- int number, int bytes) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!log_->IsEnabled() || !FLAG_log_gc) return;
- LogMessageBuilder msg(this);
- msg.Append("heap-js-cons-item,%s,%d,%d\n", constructor, number, bytes);
- msg.WriteToLogFile();
-#endif
-}
-
-// Event starts with comma, so we don't have it in the format string.
-static const char kEventText[] = "heap-js-ret-item,%s";
-// We take placeholder strings into account, but it's OK to be conservative.
-static const int kEventTextLen = sizeof(kEventText)/sizeof(kEventText[0]);
-
-void Logger::HeapSampleJSRetainersEvent(
- const char* constructor, const char* event) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!log_->IsEnabled() || !FLAG_log_gc) return;
- const int cons_len = StrLength(constructor);
- const int event_len = StrLength(event);
- int pos = 0;
- // Retainer lists can be long. We may need to split them into multiple events.
- do {
- LogMessageBuilder msg(this);
- msg.Append(kEventText, constructor);
- int to_write = event_len - pos;
- if (to_write > Log::kMessageBufferSize - (cons_len + kEventTextLen)) {
- int cut_pos = pos + Log::kMessageBufferSize - (cons_len + kEventTextLen);
- ASSERT(cut_pos < event_len);
- while (cut_pos > pos && event[cut_pos] != ',') --cut_pos;
- if (event[cut_pos] != ',') {
- // Crash in debug mode, skip in release mode.
- ASSERT(false);
- return;
- }
- // Append a piece of event that fits, without trailing comma.
- msg.AppendStringPart(event + pos, cut_pos - pos);
- // Start next piece with comma.
- pos = cut_pos;
- } else {
- msg.Append("%s", event + pos);
- pos += event_len;
- }
- msg.Append('\n');
- msg.WriteToLogFile();
- } while (pos < event_len);
-#endif
-}
-
-
-void Logger::HeapSampleJSProducerEvent(const char* constructor,
- Address* stack) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!log_->IsEnabled() || !FLAG_log_gc) return;
- LogMessageBuilder msg(this);
- msg.Append("heap-js-prod-item,%s", constructor);
- while (*stack != NULL) {
- msg.Append(",0x%" V8PRIxPTR, *stack++);
- }
- msg.Append("\n");
- msg.WriteToLogFile();
-#endif
}
void Logger::DebugTag(const char* call_site_tag) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
if (!log_->IsEnabled() || !FLAG_log) return;
LogMessageBuilder msg(this);
msg.Append("debug-tag,%s\n", call_site_tag);
msg.WriteToLogFile();
-#endif
}
void Logger::DebugEvent(const char* event_type, Vector<uint16_t> parameter) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
if (!log_->IsEnabled() || !FLAG_log) return;
StringBuilder s(parameter.length() + 1);
for (int i = 0; i < parameter.length(); ++i) {
@@ -1411,11 +1235,9 @@
parameter_string);
DeleteArray(parameter_string);
msg.WriteToLogFile();
-#endif
}
-#ifdef ENABLE_LOGGING_AND_PROFILING
void Logger::TickEvent(TickSample* sample, bool overflow) {
if (!log_->IsEnabled() || !FLAG_prof) return;
LogMessageBuilder msg(this);
@@ -1443,21 +1265,14 @@
}
-int Logger::GetActiveProfilerModules() {
- int result = PROFILER_MODULE_NONE;
- if (profiler_ != NULL && !profiler_->paused()) {
- result |= PROFILER_MODULE_CPU;
- }
- if (FLAG_log_gc) {
- result |= PROFILER_MODULE_HEAP_STATS | PROFILER_MODULE_JS_CONSTRUCTORS;
- }
- return result;
+bool Logger::IsProfilerPaused() {
+ return profiler_ == NULL || profiler_->paused();
}
-void Logger::PauseProfiler(int flags, int tag) {
+void Logger::PauseProfiler() {
if (!log_->IsEnabled()) return;
- if (profiler_ != NULL && (flags & PROFILER_MODULE_CPU)) {
+ if (profiler_ != NULL) {
// It is OK to have negative nesting.
if (--cpu_profiler_nesting_ == 0) {
profiler_->pause();
@@ -1466,31 +1281,17 @@
ticker_->Stop();
}
FLAG_log_code = false;
- // Must be the same message as Log::kDynamicBufferSeal.
LOG(ISOLATE, UncheckedStringEvent("profiler", "pause"));
}
--logging_nesting_;
}
}
- if (flags &
- (PROFILER_MODULE_HEAP_STATS | PROFILER_MODULE_JS_CONSTRUCTORS)) {
- if (--heap_profiler_nesting_ == 0) {
- FLAG_log_gc = false;
- --logging_nesting_;
- }
- }
- if (tag != 0) {
- UncheckedIntEvent("close-tag", tag);
- }
}
-void Logger::ResumeProfiler(int flags, int tag) {
+void Logger::ResumeProfiler() {
if (!log_->IsEnabled()) return;
- if (tag != 0) {
- UncheckedIntEvent("open-tag", tag);
- }
- if (profiler_ != NULL && (flags & PROFILER_MODULE_CPU)) {
+ if (profiler_ != NULL) {
if (cpu_profiler_nesting_++ == 0) {
++logging_nesting_;
if (FLAG_prof_lazy) {
@@ -1506,20 +1307,13 @@
profiler_->resume();
}
}
- if (flags &
- (PROFILER_MODULE_HEAP_STATS | PROFILER_MODULE_JS_CONSTRUCTORS)) {
- if (heap_profiler_nesting_++ == 0) {
- ++logging_nesting_;
- FLAG_log_gc = true;
- }
- }
}
// This function can be called when Log's mutex is acquired,
// either from main or Profiler's thread.
void Logger::LogFailure() {
- PauseProfiler(PROFILER_MODULE_CPU, 0);
+ PauseProfiler();
}
@@ -1528,11 +1322,6 @@
}
-int Logger::GetLogLines(int from_pos, char* dest_buf, int max_size) {
- return log_->GetLogLines(from_pos, dest_buf, max_size);
-}
-
-
class EnumerateOptimizedFunctionsVisitor: public OptimizedFunctionVisitor {
public:
EnumerateOptimizedFunctionsVisitor(Handle<SharedFunctionInfo>* sfis,
@@ -1544,8 +1333,12 @@
virtual void LeaveContext(Context* context) {}
virtual void VisitFunction(JSFunction* function) {
+ SharedFunctionInfo* sfi = SharedFunctionInfo::cast(function->shared());
+ Object* maybe_script = sfi->script();
+ if (maybe_script->IsScript()
+ && !Script::cast(maybe_script)->HasValidSource()) return;
if (sfis_ != NULL) {
- sfis_[*count_] = Handle<SharedFunctionInfo>(function->shared());
+ sfis_[*count_] = Handle<SharedFunctionInfo>(sfi);
}
if (code_objects_ != NULL) {
ASSERT(function->code()->kind() == Code::OPTIMIZED_FUNCTION);
@@ -1649,7 +1442,6 @@
void Logger::LogCodeInfo() {
-#ifdef ENABLE_LOGGING_AND_PROFILING
if (!log_->IsEnabled() || !FLAG_ll_prof) return;
#if V8_TARGET_ARCH_IA32
const char arch[] = "ia32";
@@ -1661,7 +1453,6 @@
const char arch[] = "unknown";
#endif
LowLevelLogWriteBytes(arch, sizeof(arch));
-#endif // ENABLE_LOGGING_AND_PROFILING
}
@@ -1814,11 +1605,8 @@
}
}
-#endif
-
bool Logger::Setup() {
-#ifdef ENABLE_LOGGING_AND_PROFILING
// Tests and EnsureInitialize() can call this twice in a row. It's harmless.
if (is_initialized_) return true;
is_initialized_ = true;
@@ -1842,9 +1630,9 @@
if (FLAG_ll_prof) LogCodeInfo();
- ticker_ = new Ticker(Isolate::Current(), kSamplingIntervalMs);
-
Isolate* isolate = Isolate::Current();
+ ticker_ = new Ticker(isolate, kSamplingIntervalMs);
+
if (FLAG_sliding_state_window && sliding_state_window_ == NULL) {
sliding_state_window_ = new SlidingStateWindow(isolate);
}
@@ -1870,10 +1658,6 @@
}
return true;
-
-#else
- return false;
-#endif
}
@@ -1883,23 +1667,18 @@
void Logger::EnsureTickerStarted() {
-#ifdef ENABLE_LOGGING_AND_PROFILING
ASSERT(ticker_ != NULL);
if (!ticker_->IsActive()) ticker_->Start();
-#endif
}
void Logger::EnsureTickerStopped() {
-#ifdef ENABLE_LOGGING_AND_PROFILING
if (ticker_ != NULL && ticker_->IsActive()) ticker_->Stop();
-#endif
}
-void Logger::TearDown() {
-#ifdef ENABLE_LOGGING_AND_PROFILING
- if (!is_initialized_) return;
+FILE* Logger::TearDown() {
+ if (!is_initialized_) return NULL;
is_initialized_ = false;
// Stop the profiler before closing the file.
@@ -1915,13 +1694,11 @@
delete ticker_;
ticker_ = NULL;
- log_->Close();
-#endif
+ return log_->Close();
}
void Logger::EnableSlidingStateWindow() {
-#ifdef ENABLE_LOGGING_AND_PROFILING
// If the ticker is NULL, Logger::Setup has not been called yet. In
// that case, we set the sliding_state_window flag so that the
// sliding window computation will be started when Logger::Setup is
@@ -1935,7 +1712,6 @@
if (sliding_state_window_ == NULL) {
sliding_state_window_ = new SlidingStateWindow(Isolate::Current());
}
-#endif
}
diff --git a/src/log.h b/src/log.h
index 93f3fe7..31d8404 100644
--- a/src/log.h
+++ b/src/log.h
@@ -78,7 +78,6 @@
class Ticker;
#undef LOG
-#ifdef ENABLE_LOGGING_AND_PROFILING
#define LOG(isolate, Call) \
do { \
v8::internal::Logger* logger = \
@@ -86,9 +85,6 @@
if (logger->is_logging()) \
logger->Call; \
} while (false)
-#else
-#define LOG(isolate, Call) ((void) 0)
-#endif
#define LOG_EVENTS_AND_TAGS_LIST(V) \
V(CODE_CREATION_EVENT, "code-creation") \
@@ -161,7 +157,9 @@
Sampler* sampler();
// Frees resources acquired in Setup.
- void TearDown();
+ // When a temporary file is used for the log, returns its stream descriptor,
+ // leaving the file open.
+ FILE* TearDown();
// Enable the computation of a sliding window of states.
void EnableSlidingStateWindow();
@@ -272,7 +270,6 @@
// Log an event reported from generated code
void LogRuntime(Vector<const char> format, JSArray* args);
-#ifdef ENABLE_LOGGING_AND_PROFILING
bool is_logging() {
return logging_nesting_ > 0;
}
@@ -280,13 +277,9 @@
// Pause/Resume collection of profiling data.
// When data collection is paused, CPU Tick events are discarded until
// data collection is Resumed.
- void PauseProfiler(int flags, int tag);
- void ResumeProfiler(int flags, int tag);
- int GetActiveProfilerModules();
-
- // If logging is performed into a memory buffer, allows to
- // retrieve previously written messages. See v8.h.
- int GetLogLines(int from_pos, char* dest_buf, int max_size);
+ void PauseProfiler();
+ void ResumeProfiler();
+ bool IsProfilerPaused();
// Logs all compiled functions found in the heap.
void LogCompiledFunctions();
@@ -407,7 +400,6 @@
int logging_nesting_;
int cpu_profiler_nesting_;
- int heap_profiler_nesting_;
Log* log_;
@@ -431,9 +423,6 @@
Address prev_code_;
friend class CpuProfiler;
-#else
- bool is_logging() { return false; }
-#endif
};
diff --git a/src/macros.py b/src/macros.py
index 28d501f..fc08cb1 100644
--- a/src/macros.py
+++ b/src/macros.py
@@ -38,12 +38,13 @@
const SETTER = 1;
# These definitions must match the index of the properties in objects.h.
-const kApiTagOffset = 0;
-const kApiPropertyListOffset = 1;
-const kApiSerialNumberOffset = 2;
-const kApiConstructorOffset = 2;
-const kApiPrototypeTemplateOffset = 5;
-const kApiParentTemplateOffset = 6;
+const kApiTagOffset = 0;
+const kApiPropertyListOffset = 1;
+const kApiSerialNumberOffset = 2;
+const kApiConstructorOffset = 2;
+const kApiPrototypeTemplateOffset = 5;
+const kApiParentTemplateOffset = 6;
+const kApiPrototypeAttributesOffset = 15;
const NO_HINT = 0;
const NUMBER_HINT = 1;
@@ -128,6 +129,7 @@
macro TO_STRING_INLINE(arg) = (IS_STRING(%IS_VAR(arg)) ? arg : NonStringToString(arg));
macro TO_NUMBER_INLINE(arg) = (IS_NUMBER(%IS_VAR(arg)) ? arg : NonNumberToNumber(arg));
macro TO_OBJECT_INLINE(arg) = (IS_SPEC_OBJECT(%IS_VAR(arg)) ? arg : ToObject(arg));
+macro JSON_NUMBER_TO_STRING(arg) = ((%_IsSmi(%IS_VAR(arg)) || arg - arg == 0) ? %_NumberToString(arg) : "null");
# Macros implemented in Python.
python macro CHAR_CODE(str) = ord(str[1]);
diff --git a/src/mark-compact.cc b/src/mark-compact.cc
index 9439905..0bf8286 100644
--- a/src/mark-compact.cc
+++ b/src/mark-compact.cc
@@ -396,6 +396,8 @@
FixedArray::BodyDescriptor,
void>::Visit);
+ table_.Register(kVisitFixedDoubleArray, DataObjectVisitor::Visit);
+
table_.Register(kVisitGlobalContext,
&FixedBodyVisitor<StaticMarkingVisitor,
Context::MarkCompactBodyDescriptor,
@@ -675,8 +677,9 @@
Map* map = SafeMap(ctx);
Heap* heap = map->heap();
- if (!(map == heap->raw_unchecked_context_map() ||
+ if (!(map == heap->raw_unchecked_function_context_map() ||
map == heap->raw_unchecked_catch_context_map() ||
+ map == heap->raw_unchecked_with_context_map() ||
map == heap->raw_unchecked_global_context_map())) {
return false;
}
@@ -942,18 +945,6 @@
StaticMarkingVisitor::VisitPointers(heap_, start, end);
}
- void VisitCodeTarget(Heap* heap, RelocInfo* rinfo) {
- StaticMarkingVisitor::VisitCodeTarget(heap, rinfo);
- }
-
- void VisitGlobalPropertyCell(Heap* heap, RelocInfo* rinfo) {
- StaticMarkingVisitor::VisitGlobalPropertyCell(heap, rinfo);
- }
-
- void VisitDebugTarget(Heap* heap, RelocInfo* rinfo) {
- StaticMarkingVisitor::VisitDebugTarget(heap, rinfo);
- }
-
private:
Heap* heap_;
};
@@ -1106,6 +1097,7 @@
int PointersRemoved() {
return pointers_removed_;
}
+
private:
Heap* heap_;
int pointers_removed_;
@@ -1136,9 +1128,12 @@
map->ClearCodeCache(heap());
}
SetMark(map);
- if (FLAG_collect_maps &&
- map->instance_type() >= FIRST_JS_OBJECT_TYPE &&
- map->instance_type() <= JS_FUNCTION_TYPE) {
+
+ // When map collection is enabled we have to mark through map's transitions
+ // in a special way to make transition links weak.
+ // Only maps for subclasses of JSReceiver can have transitions.
+ STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
+ if (FLAG_collect_maps && map->instance_type() >= FIRST_JS_RECEIVER_TYPE) {
MarkMapContents(map);
} else {
marking_stack_.Push(map);
@@ -1220,8 +1215,8 @@
next_object != NULL; next_object = iterator.next()) {
if (next_object->IsMap()) { // Could also be ByteArray on free list.
Map* map = Map::cast(next_object);
- if (map->instance_type() >= FIRST_JS_OBJECT_TYPE &&
- map->instance_type() <= JS_FUNCTION_TYPE) {
+ STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
+ if (map->instance_type() >= FIRST_JS_RECEIVER_TYPE) {
map->CreateBackPointers();
} else {
ASSERT(map->instance_descriptors() == heap()->empty_descriptor_array());
@@ -1505,6 +1500,12 @@
// reachable from the weak roots.
ProcessExternalMarking();
+ // Object literal map caches reference symbols (cache keys) and maps
+ // (cache values). At this point still useful maps have already been
+ // marked. Mark the keys for the alive values before we process the
+ // symbol table.
+ ProcessMapCaches();
+
// Prune the symbol table removing all symbols only pointed to by the
// symbol table. Cannot use symbol_table() here because the symbol
// table is marked.
@@ -1533,6 +1534,57 @@
}
+void MarkCompactCollector::ProcessMapCaches() {
+ Object* raw_context = heap()->global_contexts_list_;
+ while (raw_context != heap()->undefined_value()) {
+ Context* context = reinterpret_cast<Context*>(raw_context);
+ if (context->IsMarked()) {
+ HeapObject* raw_map_cache =
+ HeapObject::cast(context->get(Context::MAP_CACHE_INDEX));
+ // A map cache may be reachable from the stack. In this case
+ // it's already transitively marked and it's too late to clean
+ // up its parts.
+ if (!raw_map_cache->IsMarked() &&
+ raw_map_cache != heap()->undefined_value()) {
+ MapCache* map_cache = reinterpret_cast<MapCache*>(raw_map_cache);
+ int existing_elements = map_cache->NumberOfElements();
+ int used_elements = 0;
+ for (int i = MapCache::kElementsStartIndex;
+ i < map_cache->length();
+ i += MapCache::kEntrySize) {
+ Object* raw_key = map_cache->get(i);
+ if (raw_key == heap()->undefined_value() ||
+ raw_key == heap()->null_value()) continue;
+ STATIC_ASSERT(MapCache::kEntrySize == 2);
+ Object* raw_map = map_cache->get(i + 1);
+ if (raw_map->IsHeapObject() &&
+ HeapObject::cast(raw_map)->IsMarked()) {
+ ++used_elements;
+ } else {
+ // Delete useless entries with unmarked maps.
+ ASSERT(raw_map->IsMap());
+ map_cache->set_null_unchecked(heap(), i);
+ map_cache->set_null_unchecked(heap(), i + 1);
+ }
+ }
+ if (used_elements == 0) {
+ context->set(Context::MAP_CACHE_INDEX, heap()->undefined_value());
+ } else {
+ // Note: we don't actually shrink the cache here to avoid
+ // extra complexity during GC. We rely on subsequent cache
+ // usages (EnsureCapacity) to do this.
+ map_cache->ElementsRemoved(existing_elements - used_elements);
+ MarkObject(map_cache);
+ }
+ }
+ }
+ // Move to next element in the list.
+ raw_context = context->get(Context::NEXT_CONTEXT_LINK);
+ }
+ ProcessMarkingStack();
+}
+
+
#ifdef DEBUG
void MarkCompactCollector::UpdateLiveObjectCount(HeapObject* obj) {
live_bytes_ += obj->Size();
@@ -1597,8 +1649,8 @@
ASSERT(SafeIsMap(map));
// Only JSObject and subtypes have map transitions and back pointers.
- if (map->instance_type() < FIRST_JS_OBJECT_TYPE) continue;
- if (map->instance_type() > JS_FUNCTION_TYPE) continue;
+ STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
+ if (map->instance_type() < FIRST_JS_RECEIVER_TYPE) continue;
if (map->IsMarked() && map->attached_to_shared_function_info()) {
// This map is used for inobject slack tracking and has been detached
@@ -1608,38 +1660,48 @@
}
// Clear dead prototype transitions.
- FixedArray* prototype_transitions = map->unchecked_prototype_transitions();
- if (prototype_transitions->length() > 0) {
- int finger = Smi::cast(prototype_transitions->get(0))->value();
- int new_finger = 1;
- for (int i = 1; i < finger; i += 2) {
- Object* prototype = prototype_transitions->get(i);
- Object* cached_map = prototype_transitions->get(i + 1);
+ int number_of_transitions = map->NumberOfProtoTransitions();
+ if (number_of_transitions > 0) {
+ FixedArray* prototype_transitions =
+ map->unchecked_prototype_transitions();
+ int new_number_of_transitions = 0;
+ const int header = Map::kProtoTransitionHeaderSize;
+ const int proto_offset =
+ header + Map::kProtoTransitionPrototypeOffset;
+ const int map_offset = header + Map::kProtoTransitionMapOffset;
+ const int step = Map::kProtoTransitionElementsPerEntry;
+ for (int i = 0; i < number_of_transitions; i++) {
+ Object* prototype = prototype_transitions->get(proto_offset + i * step);
+ Object* cached_map = prototype_transitions->get(map_offset + i * step);
if (HeapObject::cast(prototype)->IsMarked() &&
HeapObject::cast(cached_map)->IsMarked()) {
- if (new_finger != i) {
- prototype_transitions->set_unchecked(heap_,
- new_finger,
- prototype,
- UPDATE_WRITE_BARRIER);
- prototype_transitions->set_unchecked(heap_,
- new_finger + 1,
- cached_map,
- SKIP_WRITE_BARRIER);
+ if (new_number_of_transitions != i) {
+ prototype_transitions->set_unchecked(
+ heap_,
+ proto_offset + new_number_of_transitions * step,
+ prototype,
+ UPDATE_WRITE_BARRIER);
+ prototype_transitions->set_unchecked(
+ heap_,
+ map_offset + new_number_of_transitions * step,
+ cached_map,
+ SKIP_WRITE_BARRIER);
}
- new_finger += 2;
+ new_number_of_transitions++;
}
}
// Fill slots that became free with undefined value.
Object* undefined = heap()->raw_unchecked_undefined_value();
- for (int i = new_finger; i < finger; i++) {
+ for (int i = new_number_of_transitions * step;
+ i < number_of_transitions * step;
+ i++) {
prototype_transitions->set_unchecked(heap_,
- i,
+ header + i,
undefined,
SKIP_WRITE_BARRIER);
}
- prototype_transitions->set_unchecked(0, Smi::FromInt(new_finger));
+ map->SetNumberOfProtoTransitions(new_number_of_transitions);
}
// Follow the chain of back pointers to find the prototype.
@@ -2001,6 +2063,7 @@
VisitPointer(&target);
rinfo->set_call_address(Code::cast(target)->instruction_start());
}
+
private:
Heap* heap_;
};
@@ -3195,11 +3258,9 @@
GDBJITInterface::RemoveCode(reinterpret_cast<Code*>(obj));
}
#endif
-#ifdef ENABLE_LOGGING_AND_PROFILING
if (obj->IsCode()) {
PROFILE(isolate, CodeDeleteEvent(obj->address()));
}
-#endif
}
diff --git a/src/mark-compact.h b/src/mark-compact.h
index 04d0ff6..179edba 100644
--- a/src/mark-compact.h
+++ b/src/mark-compact.h
@@ -306,6 +306,10 @@
// flag on the marking stack.
void RefillMarkingStack();
+ // After reachable maps have been marked process per context object
+ // literal map caches removing unmarked entries.
+ void ProcessMapCaches();
+
// Callback function for telling whether the object *p is an unmarked
// heap object.
static bool IsUnmarkedHeapObject(Object** p);
diff --git a/src/messages.js b/src/messages.js
index 75ea99d..0b314a4 100644
--- a/src/messages.js
+++ b/src/messages.js
@@ -151,6 +151,7 @@
unexpected_token_number: ["Unexpected number"],
unexpected_token_string: ["Unexpected string"],
unexpected_token_identifier: ["Unexpected identifier"],
+ unexpected_reserved: ["Unexpected reserved word"],
unexpected_strict_reserved: ["Unexpected strict mode reserved word"],
unexpected_eos: ["Unexpected end of input"],
malformed_regexp: ["Invalid regular expression: /", "%0", "/: ", "%1"],
@@ -193,9 +194,15 @@
proto_object_or_null: ["Object prototype may only be an Object or null"],
property_desc_object: ["Property description must be an object: ", "%0"],
redefine_disallowed: ["Cannot redefine property: ", "%0"],
- define_disallowed: ["Cannot define property, object is not extensible: ", "%0"],
+ define_disallowed: ["Cannot define property:", "%0", ", object is not extensible."],
non_extensible_proto: ["%0", " is not extensible"],
+ handler_non_object: ["Proxy.", "%0", " called with non-object as handler"],
handler_trap_missing: ["Proxy handler ", "%0", " has no '", "%1", "' trap"],
+ handler_returned_false: ["Proxy handler ", "%0", " returned false for '", "%1", "' trap"],
+ handler_returned_undefined: ["Proxy handler ", "%0", " returned undefined for '", "%1", "' trap"],
+ proxy_prop_not_configurable: ["Trap ", "%1", " of proxy handler ", "%0", " returned non-configurable descriptor for property ", "%2"],
+ proxy_non_object_prop_names: ["Trap ", "%1", " returned non-object ", "%0"],
+ proxy_repeated_prop_name: ["Trap ", "%1", " returned repeated property name ", "%2"],
// RangeError
invalid_array_length: ["Invalid array length"],
stack_overflow: ["Maximum call stack size exceeded"],
@@ -242,7 +249,7 @@
strict_poison_pill: ["'caller', 'callee', and 'arguments' properties may not be accessed on strict mode functions or the arguments objects for calls to them"],
strict_caller: ["Illegal access to a strict mode caller function."],
cant_prevent_ext_external_array_elements: ["Cannot prevent extension of an object with external array elements"],
- redef_external_array_element: ["Cannot redefine a property of an object with external array elements"],
+ redef_external_array_element: ["Cannot redefine a property of an object"]
};
}
var message_type = %MessageGetType(message);
@@ -687,18 +694,24 @@
// can't rely on 'this' being the same as 'obj'.
var hasBeenSet = false;
var value;
- obj.__defineGetter__(name, function () {
+ function getter() {
if (hasBeenSet) {
return value;
}
hasBeenSet = true;
value = fun(obj);
return value;
- });
- obj.__defineSetter__(name, function (v) {
+ }
+ function setter(v) {
hasBeenSet = true;
value = v;
- });
+ }
+ var desc = { get: getter,
+ set: setter,
+ enumerable: false,
+ configurable: true };
+ desc = ToPropertyDescriptor(desc);
+ DefineOwnProperty(obj, name, desc, true);
}
function CallSite(receiver, fun, pos) {
@@ -1014,15 +1027,15 @@
// overwriting allows leaks of error objects between script blocks
// in the same context in a browser setting. Therefore we fix the
// name.
- %SetProperty(f.prototype, "name", name, READ_ONLY | DONT_DELETE);
+ %SetProperty(f.prototype, "name", name, DONT_ENUM | DONT_DELETE | READ_ONLY);
%SetCode(f, function(m) {
if (%_IsConstructCall()) {
// Define all the expected properties directly on the error
// object. This avoids going through getters and setters defined
// on prototype objects.
- %IgnoreAttributesAndSetProperty(this, 'stack', void 0);
- %IgnoreAttributesAndSetProperty(this, 'arguments', void 0);
- %IgnoreAttributesAndSetProperty(this, 'type', void 0);
+ %IgnoreAttributesAndSetProperty(this, 'stack', void 0, DONT_ENUM);
+ %IgnoreAttributesAndSetProperty(this, 'arguments', void 0, DONT_ENUM);
+ %IgnoreAttributesAndSetProperty(this, 'type', void 0, DONT_ENUM);
if (m === kAddMessageAccessorsMarker) {
// DefineOneShotAccessor always inserts a message property and
// ignores setters.
@@ -1030,7 +1043,10 @@
return FormatMessage(%NewMessageObject(obj.type, obj.arguments));
});
} else if (!IS_UNDEFINED(m)) {
- %IgnoreAttributesAndSetProperty(this, 'message', ToString(m));
+ %IgnoreAttributesAndSetProperty(this,
+ 'message',
+ ToString(m),
+ DONT_ENUM);
}
captureStackTrace(this, f);
} else {
@@ -1055,18 +1071,32 @@
$Math.__proto__ = global.Object.prototype;
-DefineError(function Error() { });
-DefineError(function TypeError() { });
-DefineError(function RangeError() { });
-DefineError(function SyntaxError() { });
-DefineError(function ReferenceError() { });
-DefineError(function EvalError() { });
-DefineError(function URIError() { });
+// DefineError is a native function. Use explicit receiver. Otherwise
+// the receiver will be 'undefined'.
+this.DefineError(function Error() { });
+this.DefineError(function TypeError() { });
+this.DefineError(function RangeError() { });
+this.DefineError(function SyntaxError() { });
+this.DefineError(function ReferenceError() { });
+this.DefineError(function EvalError() { });
+this.DefineError(function URIError() { });
$Error.captureStackTrace = captureStackTrace;
// Setup extra properties of the Error.prototype object.
-$Error.prototype.message = '';
+function setErrorMessage() {
+ var desc = {value: '',
+ enumerable: false,
+ configurable: true,
+ writable: true };
+ DefineOwnProperty($Error.prototype,
+ 'message',
+ ToPropertyDescriptor(desc),
+ true);
+
+}
+
+setErrorMessage();
// Global list of error objects visited during errorToString. This is
// used to detect cycles in error toString formatting.
diff --git a/src/mips/assembler-mips-inl.h b/src/mips/assembler-mips-inl.h
index e787fed..b5ffe73 100644
--- a/src/mips/assembler-mips-inl.h
+++ b/src/mips/assembler-mips-inl.h
@@ -83,8 +83,12 @@
// RelocInfo.
void RelocInfo::apply(intptr_t delta) {
- // On MIPS we do not use pc relative addressing, so we don't need to patch the
- // code here.
+ if (IsInternalReference(rmode_)) {
+ // Absolute code pointer inside code object moves with the code object.
+ byte* p = reinterpret_cast<byte*>(pc_);
+ int count = Assembler::RelocateInternalReference(p, delta);
+ CPU::FlushICache(p, count * sizeof(uint32_t));
+ }
}
@@ -300,7 +304,9 @@
void Assembler::emit(Instr x) {
- CheckBuffer();
+ if (!is_buffer_growth_blocked()) {
+ CheckBuffer();
+ }
*reinterpret_cast<Instr*>(pc_) = x;
pc_ += kInstrSize;
CheckTrampolinePoolQuick();
diff --git a/src/mips/assembler-mips.cc b/src/mips/assembler-mips.cc
index 2e10904..51642e0 100644
--- a/src/mips/assembler-mips.cc
+++ b/src/mips/assembler-mips.cc
@@ -140,7 +140,7 @@
// -----------------------------------------------------------------------------
// Implementation of RelocInfo.
-const int RelocInfo::kApplyMask = 0;
+const int RelocInfo::kApplyMask = 1 << RelocInfo::INTERNAL_REFERENCE;
bool RelocInfo::IsCodedSpecially() {
@@ -275,11 +275,17 @@
last_trampoline_pool_end_ = 0;
no_trampoline_pool_before_ = 0;
trampoline_pool_blocked_nesting_ = 0;
- next_buffer_check_ = kMaxBranchOffset - kTrampolineSize;
+ // We leave space (16 * kTrampolineSlotsSize)
+ // for BlockTrampolinePoolScope buffer.
+ next_buffer_check_ = kMaxBranchOffset - kTrampolineSlotsSize * 16;
internal_trampoline_exception_ = false;
last_bound_pos_ = 0;
- ast_id_for_reloc_info_ = kNoASTId;
+ trampoline_emitted_ = false;
+ unbound_labels_count_ = 0;
+ block_buffer_growth_ = false;
+
+ ClearRecordedAstId();
}
@@ -386,6 +392,16 @@
}
+uint32_t Assembler::GetFunction(Instr instr) {
+ return (instr & kFunctionFieldMask) >> kFunctionShift;
+}
+
+
+uint32_t Assembler::GetFunctionField(Instr instr) {
+ return instr & kFunctionFieldMask;
+}
+
+
uint32_t Assembler::GetImmediate16(Instr instr) {
return instr & kImm16Mask;
}
@@ -444,6 +460,8 @@
// code is conv to an 18-bit value addressing bytes, hence the -4 value.
const int kEndOfChain = -4;
+// Determines the end of the Jump chain (a subset of the label link chain).
+const int kEndOfJumpChain = 0;
bool Assembler::IsBranch(Instr instr) {
@@ -477,6 +495,39 @@
}
+bool Assembler::IsJump(Instr instr) {
+ uint32_t opcode = GetOpcodeField(instr);
+ uint32_t rt_field = GetRtField(instr);
+ uint32_t rd_field = GetRdField(instr);
+ uint32_t function_field = GetFunctionField(instr);
+ // Checks if the instruction is a jump.
+ return opcode == J || opcode == JAL ||
+ (opcode == SPECIAL && rt_field == 0 &&
+ ((function_field == JALR) || (rd_field == 0 && (function_field == JR))));
+}
+
+
+bool Assembler::IsJ(Instr instr) {
+ uint32_t opcode = GetOpcodeField(instr);
+ // Checks if the instruction is a jump.
+ return opcode == J;
+}
+
+
+bool Assembler::IsLui(Instr instr) {
+ uint32_t opcode = GetOpcodeField(instr);
+ // Checks if the instruction is a load upper immediate.
+ return opcode == LUI;
+}
+
+
+bool Assembler::IsOri(Instr instr) {
+ uint32_t opcode = GetOpcodeField(instr);
+ // Checks if the instruction is a load upper immediate.
+ return opcode == ORI;
+}
+
+
bool Assembler::IsNop(Instr instr, unsigned int type) {
// See Assembler::nop(type).
ASSERT(type < 32);
@@ -564,17 +615,47 @@
return (imm18 + pos);
}
}
- // Check we have a branch instruction.
- ASSERT(IsBranch(instr));
+ // Check we have a branch or jump instruction.
+ ASSERT(IsBranch(instr) || IsJ(instr) || IsLui(instr));
// Do NOT change this to <<2. We rely on arithmetic shifts here, assuming
// the compiler uses arithmectic shifts for signed integers.
- int32_t imm18 = ((instr & static_cast<int32_t>(kImm16Mask)) << 16) >> 14;
+ if (IsBranch(instr)) {
+ int32_t imm18 = ((instr & static_cast<int32_t>(kImm16Mask)) << 16) >> 14;
- if (imm18 == kEndOfChain) {
- // EndOfChain sentinel is returned directly, not relative to pc or pos.
- return kEndOfChain;
+ if (imm18 == kEndOfChain) {
+ // EndOfChain sentinel is returned directly, not relative to pc or pos.
+ return kEndOfChain;
+ } else {
+ return pos + kBranchPCOffset + imm18;
+ }
+ } else if (IsLui(instr)) {
+ Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize);
+ Instr instr_ori = instr_at(pos + 1 * Assembler::kInstrSize);
+ ASSERT(IsOri(instr_ori));
+ int32_t imm = (instr_lui & static_cast<int32_t>(kImm16Mask)) << kLuiShift;
+ imm |= (instr_ori & static_cast<int32_t>(kImm16Mask));
+
+ if (imm == kEndOfJumpChain) {
+ // EndOfChain sentinel is returned directly, not relative to pc or pos.
+ return kEndOfChain;
+ } else {
+ uint32_t instr_address = reinterpret_cast<int32_t>(buffer_ + pos);
+ int32_t delta = instr_address - imm;
+ ASSERT(pos > delta);
+ return pos - delta;
+ }
} else {
- return pos + kBranchPCOffset + imm18;
+ int32_t imm28 = (instr & static_cast<int32_t>(kImm26Mask)) << 2;
+ if (imm28 == kEndOfJumpChain) {
+ // EndOfChain sentinel is returned directly, not relative to pc or pos.
+ return kEndOfChain;
+ } else {
+ uint32_t instr_address = reinterpret_cast<int32_t>(buffer_ + pos);
+ instr_address &= kImm28Mask;
+ int32_t delta = instr_address - imm28;
+ ASSERT(pos > delta);
+ return pos - delta;
+ }
}
}
@@ -589,15 +670,41 @@
return;
}
- ASSERT(IsBranch(instr));
- int32_t imm18 = target_pos - (pos + kBranchPCOffset);
- ASSERT((imm18 & 3) == 0);
+ ASSERT(IsBranch(instr) || IsJ(instr) || IsLui(instr));
+ if (IsBranch(instr)) {
+ int32_t imm18 = target_pos - (pos + kBranchPCOffset);
+ ASSERT((imm18 & 3) == 0);
- instr &= ~kImm16Mask;
- int32_t imm16 = imm18 >> 2;
- ASSERT(is_int16(imm16));
+ instr &= ~kImm16Mask;
+ int32_t imm16 = imm18 >> 2;
+ ASSERT(is_int16(imm16));
- instr_at_put(pos, instr | (imm16 & kImm16Mask));
+ instr_at_put(pos, instr | (imm16 & kImm16Mask));
+ } else if (IsLui(instr)) {
+ Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize);
+ Instr instr_ori = instr_at(pos + 1 * Assembler::kInstrSize);
+ ASSERT(IsOri(instr_ori));
+ uint32_t imm = (uint32_t)buffer_ + target_pos;
+ ASSERT((imm & 3) == 0);
+
+ instr_lui &= ~kImm16Mask;
+ instr_ori &= ~kImm16Mask;
+
+ instr_at_put(pos + 0 * Assembler::kInstrSize,
+ instr_lui | ((imm & kHiMask) >> kLuiShift));
+ instr_at_put(pos + 1 * Assembler::kInstrSize,
+ instr_ori | (imm & kImm16Mask));
+ } else {
+ uint32_t imm28 = (uint32_t)buffer_ + target_pos;
+ imm28 &= kImm28Mask;
+ ASSERT((imm28 & 3) == 0);
+
+ instr &= ~kImm26Mask;
+ uint32_t imm26 = imm28 >> 2;
+ ASSERT(is_uint26(imm26));
+
+ instr_at_put(pos, instr | (imm26 & kImm26Mask));
+ }
}
@@ -627,36 +734,33 @@
void Assembler::bind_to(Label* L, int pos) {
ASSERT(0 <= pos && pos <= pc_offset()); // Must have valid binding position.
+ int32_t trampoline_pos = kInvalidSlotPos;
+ if (L->is_linked() && !trampoline_emitted_) {
+ unbound_labels_count_--;
+ next_buffer_check_ += kTrampolineSlotsSize;
+ }
+
while (L->is_linked()) {
int32_t fixup_pos = L->pos();
int32_t dist = pos - fixup_pos;
next(L); // Call next before overwriting link with target at fixup_pos.
- if (dist > kMaxBranchOffset) {
- do {
- int32_t trampoline_pos = get_trampoline_entry(fixup_pos);
- if (kInvalidSlotPos == trampoline_pos) {
- // Internal error.
- return;
+ Instr instr = instr_at(fixup_pos);
+ if (IsBranch(instr)) {
+ if (dist > kMaxBranchOffset) {
+ if (trampoline_pos == kInvalidSlotPos) {
+ trampoline_pos = get_trampoline_entry(fixup_pos);
+ CHECK(trampoline_pos != kInvalidSlotPos);
}
ASSERT((trampoline_pos - fixup_pos) <= kMaxBranchOffset);
target_at_put(fixup_pos, trampoline_pos);
fixup_pos = trampoline_pos;
dist = pos - fixup_pos;
- } while (dist > kMaxBranchOffset);
- } else if (dist < -kMaxBranchOffset) {
- do {
- int32_t trampoline_pos = get_trampoline_entry(fixup_pos, false);
- if (kInvalidSlotPos == trampoline_pos) {
- // Internal error.
- return;
- }
- ASSERT((trampoline_pos - fixup_pos) >= -kMaxBranchOffset);
- target_at_put(fixup_pos, trampoline_pos);
- fixup_pos = trampoline_pos;
- dist = pos - fixup_pos;
- } while (dist < -kMaxBranchOffset);
- };
- target_at_put(fixup_pos, pos);
+ }
+ target_at_put(fixup_pos, pos);
+ } else {
+ ASSERT(IsJ(instr) || IsLui(instr));
+ target_at_put(fixup_pos, pos);
+ }
}
L->bind_to(pos);
@@ -667,27 +771,6 @@
}
-void Assembler::link_to(Label* L, Label* appendix) {
- if (appendix->is_linked()) {
- if (L->is_linked()) {
- // Append appendix to L's list.
- int fixup_pos;
- int link = L->pos();
- do {
- fixup_pos = link;
- link = target_at(fixup_pos);
- } while (link > 0);
- ASSERT(link == kEndOfChain);
- target_at_put(fixup_pos, appendix->pos());
- } else {
- // L is empty, simply use appendix.
- *L = *appendix;
- }
- }
- appendix->Unuse(); // Appendix should not be used anymore.
-}
-
-
void Assembler::bind(Label* L) {
ASSERT(!L->is_bound()); // Label can only be bound once.
bind_to(L, pc_offset());
@@ -705,6 +788,12 @@
}
}
+bool Assembler::is_near(Label* L) {
+ if (L->is_bound()) {
+ return ((pc_offset() - L->pos()) < kMaxBranchOffset - 4 * kInstrSize);
+ }
+ return false;
+}
// We have to use a temporary register for things that can be relocated even
// if they can be encoded in the MIPS's 16 bits of immediate-offset instruction
@@ -817,7 +906,6 @@
}
-// Registers are in the order of the instruction encoding, from left to right.
void Assembler::GenInstrJump(Opcode opcode,
uint32_t address) {
BlockTrampolinePoolScope block_trampoline_pool(this);
@@ -828,53 +916,15 @@
}
-// Returns the next free label entry from the next trampoline pool.
-int32_t Assembler::get_label_entry(int32_t pos, bool next_pool) {
- int trampoline_count = trampolines_.length();
- int32_t label_entry = 0;
- ASSERT(trampoline_count > 0);
-
- if (next_pool) {
- for (int i = 0; i < trampoline_count; i++) {
- if (trampolines_[i].start() > pos) {
- label_entry = trampolines_[i].take_label();
- break;
- }
- }
- } else { // Caller needs a label entry from the previous pool.
- for (int i = trampoline_count-1; i >= 0; i--) {
- if (trampolines_[i].end() < pos) {
- label_entry = trampolines_[i].take_label();
- break;
- }
- }
- }
- return label_entry;
-}
-
-
-// Returns the next free trampoline entry from the next trampoline pool.
-int32_t Assembler::get_trampoline_entry(int32_t pos, bool next_pool) {
- int trampoline_count = trampolines_.length();
+// Returns the next free trampoline entry.
+int32_t Assembler::get_trampoline_entry(int32_t pos) {
int32_t trampoline_entry = kInvalidSlotPos;
- ASSERT(trampoline_count > 0);
if (!internal_trampoline_exception_) {
- if (next_pool) {
- for (int i = 0; i < trampoline_count; i++) {
- if (trampolines_[i].start() > pos) {
- trampoline_entry = trampolines_[i].take_slot();
- break;
- }
- }
- } else { // Caller needs a trampoline entry from the previous pool.
- for (int i = trampoline_count-1; i >= 0; i--) {
- if (trampolines_[i].end() < pos) {
- trampoline_entry = trampolines_[i].take_slot();
- break;
- }
- }
+ if (trampoline_.start() > pos) {
+ trampoline_entry = trampoline_.take_slot();
}
+
if (kInvalidSlotPos == trampoline_entry) {
internal_trampoline_exception_ = true;
}
@@ -883,64 +933,43 @@
}
-int32_t Assembler::branch_offset(Label* L, bool jump_elimination_allowed) {
+uint32_t Assembler::jump_address(Label* L) {
int32_t target_pos;
- int32_t pc_offset_v = pc_offset();
if (L->is_bound()) {
target_pos = L->pos();
- int32_t dist = pc_offset_v - target_pos;
- if (dist > kMaxBranchOffset) {
- do {
- int32_t trampoline_pos = get_trampoline_entry(target_pos);
- if (kInvalidSlotPos == trampoline_pos) {
- // Internal error.
- return 0;
- }
- ASSERT((trampoline_pos - target_pos) > 0);
- ASSERT((trampoline_pos - target_pos) <= kMaxBranchOffset);
- target_at_put(trampoline_pos, target_pos);
- target_pos = trampoline_pos;
- dist = pc_offset_v - target_pos;
- } while (dist > kMaxBranchOffset);
- } else if (dist < -kMaxBranchOffset) {
- do {
- int32_t trampoline_pos = get_trampoline_entry(target_pos, false);
- if (kInvalidSlotPos == trampoline_pos) {
- // Internal error.
- return 0;
- }
- ASSERT((target_pos - trampoline_pos) > 0);
- ASSERT((target_pos - trampoline_pos) <= kMaxBranchOffset);
- target_at_put(trampoline_pos, target_pos);
- target_pos = trampoline_pos;
- dist = pc_offset_v - target_pos;
- } while (dist < -kMaxBranchOffset);
- }
} else {
if (L->is_linked()) {
target_pos = L->pos(); // L's link.
- int32_t dist = pc_offset_v - target_pos;
- if (dist > kMaxBranchOffset) {
- do {
- int32_t label_pos = get_label_entry(target_pos);
- ASSERT((label_pos - target_pos) < kMaxBranchOffset);
- label_at_put(L, label_pos);
- target_pos = label_pos;
- dist = pc_offset_v - target_pos;
- } while (dist > kMaxBranchOffset);
- } else if (dist < -kMaxBranchOffset) {
- do {
- int32_t label_pos = get_label_entry(target_pos, false);
- ASSERT((label_pos - target_pos) > -kMaxBranchOffset);
- label_at_put(L, label_pos);
- target_pos = label_pos;
- dist = pc_offset_v - target_pos;
- } while (dist < -kMaxBranchOffset);
- }
L->link_to(pc_offset());
} else {
L->link_to(pc_offset());
+ return kEndOfJumpChain;
+ }
+ }
+
+ uint32_t imm = (uint32_t)buffer_ + target_pos;
+ ASSERT((imm & 3) == 0);
+
+ return imm;
+}
+
+
+int32_t Assembler::branch_offset(Label* L, bool jump_elimination_allowed) {
+ int32_t target_pos;
+
+ if (L->is_bound()) {
+ target_pos = L->pos();
+ } else {
+ if (L->is_linked()) {
+ target_pos = L->pos();
+ L->link_to(pc_offset());
+ } else {
+ L->link_to(pc_offset());
+ if (!trampoline_emitted_) {
+ unbound_labels_count_++;
+ next_buffer_check_ -= kTrampolineSlotsSize;
+ }
return kEndOfChain;
}
}
@@ -969,6 +998,10 @@
} else {
target_pos = kEndOfChain;
instr_at_put(at_offset, 0);
+ if (!trampoline_emitted_) {
+ unbound_labels_count_++;
+ next_buffer_check_ -= kTrampolineSlotsSize;
+ }
}
L->link_to(at_offset);
}
@@ -1336,13 +1369,37 @@
//-------------Misc-instructions--------------
// Break / Trap instructions.
-void Assembler::break_(uint32_t code) {
+void Assembler::break_(uint32_t code, bool break_as_stop) {
ASSERT((code & ~0xfffff) == 0);
+ // We need to invalidate breaks that could be stops as well because the
+ // simulator expects a char pointer after the stop instruction.
+ // See constants-mips.h for explanation.
+ ASSERT((break_as_stop &&
+ code <= kMaxStopCode &&
+ code > kMaxWatchpointCode) ||
+ (!break_as_stop &&
+ (code > kMaxStopCode ||
+ code <= kMaxWatchpointCode)));
Instr break_instr = SPECIAL | BREAK | (code << 6);
emit(break_instr);
}
+void Assembler::stop(const char* msg, uint32_t code) {
+ ASSERT(code > kMaxWatchpointCode);
+ ASSERT(code <= kMaxStopCode);
+#if defined(V8_HOST_ARCH_MIPS)
+ break_(0x54321);
+#else // V8_HOST_ARCH_MIPS
+ BlockTrampolinePoolFor(2);
+ // The Simulator will handle the stop instruction and get the message address.
+ // On MIPS stop() is just a special kind of break_().
+ break_(code, true);
+ emit(reinterpret_cast<Instr>(msg));
+#endif
+}
+
+
void Assembler::tge(Register rs, Register rt, uint16_t code) {
ASSERT(is_uint10(code));
Instr instr = SPECIAL | TGE | rs.code() << kRsShift
@@ -1767,6 +1824,48 @@
}
+int Assembler::RelocateInternalReference(byte* pc, intptr_t pc_delta) {
+ Instr instr = instr_at(pc);
+ ASSERT(IsJ(instr) || IsLui(instr));
+ if (IsLui(instr)) {
+ Instr instr_lui = instr_at(pc + 0 * Assembler::kInstrSize);
+ Instr instr_ori = instr_at(pc + 1 * Assembler::kInstrSize);
+ ASSERT(IsOri(instr_ori));
+ int32_t imm = (instr_lui & static_cast<int32_t>(kImm16Mask)) << kLuiShift;
+ imm |= (instr_ori & static_cast<int32_t>(kImm16Mask));
+ if (imm == kEndOfJumpChain) {
+ return 0; // Number of instructions patched.
+ }
+ imm += pc_delta;
+ ASSERT((imm & 3) == 0);
+
+ instr_lui &= ~kImm16Mask;
+ instr_ori &= ~kImm16Mask;
+
+ instr_at_put(pc + 0 * Assembler::kInstrSize,
+ instr_lui | ((imm >> kLuiShift) & kImm16Mask));
+ instr_at_put(pc + 1 * Assembler::kInstrSize,
+ instr_ori | (imm & kImm16Mask));
+ return 2; // Number of instructions patched.
+ } else {
+ uint32_t imm28 = (instr & static_cast<int32_t>(kImm26Mask)) << 2;
+ if ((int32_t)imm28 == kEndOfJumpChain) {
+ return 0; // Number of instructions patched.
+ }
+ imm28 += pc_delta;
+ imm28 &= kImm28Mask;
+ ASSERT((imm28 & 3) == 0);
+
+ instr &= ~kImm26Mask;
+ uint32_t imm26 = imm28 >> 2;
+ ASSERT(is_uint26(imm26));
+
+ instr_at_put(pc, instr | (imm26 & kImm26Mask));
+ return 1; // Number of instructions patched.
+ }
+}
+
+
void Assembler::GrowBuffer() {
if (!own_buffer_) FATAL("external code buffer is too small");
@@ -1802,9 +1901,14 @@
reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
reloc_info_writer.last_pc() + pc_delta);
- // On ia32 and ARM pc relative addressing is used, and we thus need to apply a
- // shift by pc_delta. But on MIPS the target address it directly loaded, so
- // we do not need to relocate here.
+ // Relocate runtime entries.
+ for (RelocIterator it(desc); !it.done(); it.next()) {
+ RelocInfo::Mode rmode = it.rinfo()->rmode();
+ if (rmode == RelocInfo::INTERNAL_REFERENCE) {
+ byte* p = reinterpret_cast<byte*>(it.rinfo()->pc());
+ RelocateInternalReference(p, pc_delta);
+ }
+ }
ASSERT(!overflow());
}
@@ -1843,9 +1947,8 @@
}
ASSERT(buffer_space() >= kMaxRelocSize); // Too late to grow buffer here.
if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
- ASSERT(ast_id_for_reloc_info_ != kNoASTId);
- RelocInfo reloc_info_with_ast_id(pc_, rmode, ast_id_for_reloc_info_);
- ast_id_for_reloc_info_ = kNoASTId;
+ RelocInfo reloc_info_with_ast_id(pc_, rmode, RecordedAstId());
+ ClearRecordedAstId();
reloc_info_writer.Write(&reloc_info_with_ast_id);
} else {
reloc_info_writer.Write(&rinfo);
@@ -1859,16 +1962,7 @@
}
-void Assembler::CheckTrampolinePool(bool force_emit) {
- // Calculate the offset of the next check.
- next_buffer_check_ = pc_offset() + kCheckConstInterval;
-
- int dist = pc_offset() - last_trampoline_pool_end_;
-
- if (dist <= kMaxDistBetweenPools && !force_emit) {
- return;
- }
-
+void Assembler::CheckTrampolinePool() {
// Some small sequences of instructions must not be broken up by the
// insertion of a trampoline pool; such sequences are protected by setting
// either trampoline_pool_blocked_nesting_ or no_trampoline_pool_before_,
@@ -1886,29 +1980,43 @@
return;
}
- // First we emit jump (2 instructions), then we emit trampoline pool.
- { BlockTrampolinePoolScope block_trampoline_pool(this);
- Label after_pool;
- b(&after_pool);
- nop();
-
- int pool_start = pc_offset();
- for (int i = 0; i < kSlotsPerTrampoline; i++) {
+ ASSERT(!trampoline_emitted_);
+ ASSERT(unbound_labels_count_ >= 0);
+ if (unbound_labels_count_ > 0) {
+ // First we emit jump (2 instructions), then we emit trampoline pool.
+ { BlockTrampolinePoolScope block_trampoline_pool(this);
+ Label after_pool;
b(&after_pool);
nop();
- }
- for (int i = 0; i < kLabelsPerTrampoline; i++) {
- emit(0);
- }
- last_trampoline_pool_end_ = pc_offset() - kInstrSize;
- bind(&after_pool);
- trampolines_.Add(Trampoline(pool_start,
- kSlotsPerTrampoline,
- kLabelsPerTrampoline));
- // Since a trampoline pool was just emitted,
- // move the check offset forward by the standard interval.
- next_buffer_check_ = last_trampoline_pool_end_ + kMaxDistBetweenPools;
+ int pool_start = pc_offset();
+ for (int i = 0; i < unbound_labels_count_; i++) {
+ uint32_t imm32;
+ imm32 = jump_address(&after_pool);
+ { BlockGrowBufferScope block_buf_growth(this);
+ // Buffer growth (and relocation) must be blocked for internal
+ // references until associated instructions are emitted and available
+ // to be patched.
+ RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
+ lui(at, (imm32 & kHiMask) >> kLuiShift);
+ ori(at, at, (imm32 & kImm16Mask));
+ }
+ jr(at);
+ nop();
+ }
+ bind(&after_pool);
+ trampoline_ = Trampoline(pool_start, unbound_labels_count_);
+
+ trampoline_emitted_ = true;
+ // As we are only going to emit trampoline once, we need to prevent any
+ // further emission.
+ next_buffer_check_ = kMaxInt;
+ }
+ } else {
+ // Number of branches to unbound label at this point is zero, so we can
+ // move next buffer check to maximum.
+ next_buffer_check_ = pc_offset() +
+ kMaxBranchOffset - kTrampolineSlotsSize * 16;
}
return;
}
diff --git a/src/mips/assembler-mips.h b/src/mips/assembler-mips.h
index a167393..a16cd80 100644
--- a/src/mips/assembler-mips.h
+++ b/src/mips/assembler-mips.h
@@ -328,8 +328,8 @@
// Class MemOperand represents a memory operand in load and store instructions.
class MemOperand : public Operand {
public:
-
explicit MemOperand(Register rn, int32_t offset = 0);
+ int32_t offset() const { return offset_; }
private:
int32_t offset_;
@@ -372,6 +372,7 @@
// Enable a specified feature within a scope.
class Scope BASE_EMBEDDED {
#ifdef DEBUG
+
public:
explicit Scope(CpuFeature f) {
unsigned mask = 1u << f;
@@ -391,11 +392,13 @@
isolate_->set_enabled_cpu_features(old_enabled_);
}
}
- private:
+
+ private:
Isolate* isolate_;
unsigned old_enabled_;
#else
- public:
+
+ public:
explicit Scope(CpuFeature f) {}
#endif
};
@@ -478,6 +481,9 @@
// Note: The same Label can be used for forward and backward branches
// but it may be bound only once.
void bind(Label* L); // Binds an unbound label L to current code position.
+ // Determines if Label is bound and near enough so that branch instruction
+ // can be used to reach it, instead of jump instruction.
+ bool is_near(Label* L);
// Returns the branch offset to the given label from the current code
// position. Links the label to the current position if it is still unbound.
@@ -488,6 +494,7 @@
ASSERT((o & 3) == 0); // Assert the offset is aligned.
return o >> 2;
}
+ uint32_t jump_address(Label* L);
// Puts a labels target address at the given position.
// The high 8 bits are set to zero.
@@ -675,7 +682,8 @@
//-------------Misc-instructions--------------
// Break / Trap instructions.
- void break_(uint32_t code);
+ void break_(uint32_t code, bool break_as_stop = false);
+ void stop(const char* msg, uint32_t code = kMaxStopCode);
void tge(Register rs, Register rt, uint16_t code);
void tgeu(Register rs, Register rt, uint16_t code);
void tlt(Register rs, Register rt, uint16_t code);
@@ -771,8 +779,13 @@
void fcmp(FPURegister src1, const double src2, FPUCondition cond);
// Check the code size generated from label to here.
- int InstructionsGeneratedSince(Label* l) {
- return (pc_offset() - l->pos()) / kInstrSize;
+ int SizeOfCodeGeneratedSince(Label* label) {
+ return pc_offset() - label->pos();
+ }
+
+ // Check the number of instructions generated from label to here.
+ int InstructionsGeneratedSince(Label* label) {
+ return SizeOfCodeGeneratedSince(label) / kInstrSize;
}
// Class for scoping postponing the trampoline pool generation.
@@ -791,6 +804,25 @@
DISALLOW_IMPLICIT_CONSTRUCTORS(BlockTrampolinePoolScope);
};
+ // Class for postponing the assembly buffer growth. Typically used for
+ // sequences of instructions that must be emitted as a unit, before
+ // buffer growth (and relocation) can occur.
+ // This blocking scope is not nestable.
+ class BlockGrowBufferScope {
+ public:
+ explicit BlockGrowBufferScope(Assembler* assem) : assem_(assem) {
+ assem_->StartBlockGrowBuffer();
+ }
+ ~BlockGrowBufferScope() {
+ assem_->EndBlockGrowBuffer();
+ }
+
+ private:
+ Assembler* assem_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(BlockGrowBufferScope);
+ };
+
// Debugging.
// Mark address of the ExitJSFrame code.
@@ -801,12 +833,24 @@
// Record the AST id of the CallIC being compiled, so that it can be placed
// in the relocation information.
- void RecordAstId(unsigned ast_id) { ast_id_for_reloc_info_ = ast_id; }
+ void SetRecordedAstId(unsigned ast_id) {
+ ASSERT(recorded_ast_id_ == kNoASTId);
+ recorded_ast_id_ = ast_id;
+ }
+
+ unsigned RecordedAstId() {
+ ASSERT(recorded_ast_id_ != kNoASTId);
+ return recorded_ast_id_;
+ }
+
+ void ClearRecordedAstId() { recorded_ast_id_ = kNoASTId; }
// Record a comment relocation entry that can be used by a disassembler.
// Use --code-comments to enable.
void RecordComment(const char* msg);
+ static int RelocateInternalReference(byte* pc, intptr_t pc_delta);
+
// Writes a single byte or word of data in the code stream. Used for
// inline tables, e.g., jump-tables.
void db(uint8_t data);
@@ -843,6 +887,11 @@
static bool IsBeq(Instr instr);
static bool IsBne(Instr instr);
+ static bool IsJump(Instr instr);
+ static bool IsJ(Instr instr);
+ static bool IsLui(Instr instr);
+ static bool IsOri(Instr instr);
+
static bool IsNop(Instr instr, unsigned int type);
static bool IsPop(Instr instr);
static bool IsPush(Instr instr);
@@ -864,6 +913,8 @@
static uint32_t GetSa(Instr instr);
static uint32_t GetSaField(Instr instr);
static uint32_t GetOpcodeField(Instr instr);
+ static uint32_t GetFunction(Instr instr);
+ static uint32_t GetFunctionField(Instr instr);
static uint32_t GetImmediate16(Instr instr);
static uint32_t GetLabelConst(Instr instr);
@@ -879,13 +930,13 @@
static bool IsAndImmediate(Instr instr);
- void CheckTrampolinePool(bool force_emit = false);
+ void CheckTrampolinePool();
protected:
// Relocation for a type-recording IC has the AST id added to it. This
// member variable is a way to pass the information from the call site to
// the relocation info.
- unsigned ast_id_for_reloc_info_;
+ unsigned recorded_ast_id_;
bool emit_debug_code() const { return emit_debug_code_; }
@@ -912,6 +963,7 @@
void StartBlockTrampolinePool() {
trampoline_pool_blocked_nesting_++;
}
+
void EndBlockTrampolinePool() {
trampoline_pool_blocked_nesting_--;
}
@@ -924,6 +976,25 @@
return internal_trampoline_exception_;
}
+ bool is_trampoline_emitted() const {
+ return trampoline_emitted_;
+ }
+
+ // Temporarily block automatic assembly buffer growth.
+ void StartBlockGrowBuffer() {
+ ASSERT(!block_buffer_growth_);
+ block_buffer_growth_ = true;
+ }
+
+ void EndBlockGrowBuffer() {
+ ASSERT(block_buffer_growth_);
+ block_buffer_growth_ = false;
+ }
+
+ bool is_buffer_growth_blocked() const {
+ return block_buffer_growth_;
+ }
+
private:
// Code buffer:
// The buffer into which code and relocation info are generated.
@@ -960,6 +1031,9 @@
// Keep track of the last emitted pool to guarantee a maximal distance.
int last_trampoline_pool_end_; // pc offset of the end of the last pool.
+ // Automatic growth of the assembly buffer may be blocked for some sequences.
+ bool block_buffer_growth_; // Block growth when true.
+
// Relocation information generation.
// Each relocation is encoded as a variable size value.
static const int kMaxRelocSize = RelocInfoWriter::kMaxSize;
@@ -1040,7 +1114,6 @@
// Labels.
void print(Label* L);
void bind_to(Label* L, int pos);
- void link_to(Label* L, Label* appendix);
void next(Label* L);
// One trampoline consists of:
@@ -1053,13 +1126,17 @@
// label_count * kInstrSize.
class Trampoline {
public:
- Trampoline(int start, int slot_count, int label_count) {
+ Trampoline() {
+ start_ = 0;
+ next_slot_ = 0;
+ free_slot_count_ = 0;
+ end_ = 0;
+ }
+ Trampoline(int start, int slot_count) {
start_ = start;
next_slot_ = start;
free_slot_count_ = slot_count;
- next_label_ = start + slot_count * 2 * kInstrSize;
- free_label_count_ = label_count;
- end_ = next_label_ + (label_count - 1) * kInstrSize;
+ end_ = start + slot_count * kTrampolineSlotsSize;
}
int start() {
return start_;
@@ -1078,40 +1155,30 @@
} else {
trampoline_slot = next_slot_;
free_slot_count_--;
- next_slot_ += 2*kInstrSize;
+ next_slot_ += kTrampolineSlotsSize;
}
return trampoline_slot;
}
- int take_label() {
- int label_pos = next_label_;
- ASSERT(free_label_count_ > 0);
- free_label_count_--;
- next_label_ += kInstrSize;
- return label_pos;
- }
private:
int start_;
int end_;
int next_slot_;
int free_slot_count_;
- int next_label_;
- int free_label_count_;
};
- int32_t get_label_entry(int32_t pos, bool next_pool = true);
- int32_t get_trampoline_entry(int32_t pos, bool next_pool = true);
-
- static const int kSlotsPerTrampoline = 2304;
- static const int kLabelsPerTrampoline = 8;
- static const int kTrampolineInst =
- 2 * kSlotsPerTrampoline + kLabelsPerTrampoline;
- static const int kTrampolineSize = kTrampolineInst * kInstrSize;
+ int32_t get_trampoline_entry(int32_t pos);
+ int unbound_labels_count_;
+ // If trampoline is emitted, generated code is becoming large. As this is
+ // already a slow case which can possibly break our code generation for the
+ // extreme case, we use this information to trigger different mode of
+ // branch instruction generation, where we use jump instructions rather
+ // than regular branch instructions.
+ bool trampoline_emitted_;
+ static const int kTrampolineSlotsSize = 4 * kInstrSize;
static const int kMaxBranchOffset = (1 << (18 - 1)) - 1;
- static const int kMaxDistBetweenPools =
- kMaxBranchOffset - 2 * kTrampolineSize;
static const int kInvalidSlotPos = -1;
- List<Trampoline> trampolines_;
+ Trampoline trampoline_;
bool internal_trampoline_exception_;
friend class RegExpMacroAssemblerMIPS;
diff --git a/src/mips/builtins-mips.cc b/src/mips/builtins-mips.cc
index e22259d..1555653 100644
--- a/src/mips/builtins-mips.cc
+++ b/src/mips/builtins-mips.cc
@@ -344,7 +344,7 @@
// Handle construction of an empty array of a certain size. Bail out if size
// is too large to actually allocate an elements array.
ASSERT(kSmiTag == 0);
- __ Branch(call_generic_code, ge, a2,
+ __ Branch(call_generic_code, Ugreater_equal, a2,
Operand(JSObject::kInitialMaxFastElementArray << kSmiTagSize));
// a0: argc
@@ -634,7 +634,7 @@
__ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ lw(a2, FieldMemOperand(a2, SharedFunctionInfo::kConstructStubOffset));
__ Addu(t9, a2, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(Operand(t9));
+ __ Jump(t9);
// a0: number of arguments
// a1: called object
@@ -942,10 +942,11 @@
masm->isolate()->builtins()->HandleApiCallConstruct();
ParameterCount expected(0);
__ InvokeCode(code, expected, expected,
- RelocInfo::CODE_TARGET, CALL_FUNCTION);
+ RelocInfo::CODE_TARGET, CALL_FUNCTION, CALL_AS_METHOD);
} else {
ParameterCount actual(a0);
- __ InvokeFunction(a1, actual, CALL_FUNCTION);
+ __ InvokeFunction(a1, actual, CALL_FUNCTION,
+ NullCallWrapper(), CALL_AS_METHOD);
}
// Pop the function from the stack.
@@ -973,9 +974,9 @@
__ Branch(&use_receiver, eq, t0, Operand(zero_reg));
// If the type of the result (stored in its map) is less than
- // FIRST_JS_OBJECT_TYPE, it is not an object in the ECMA sense.
+ // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
__ GetObjectType(v0, a3, a3);
- __ Branch(&exit, greater_equal, a3, Operand(FIRST_JS_OBJECT_TYPE));
+ __ Branch(&exit, greater_equal, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
// Throw away the result of the constructor invocation and use the
// on-stack receiver as the result.
@@ -1074,11 +1075,11 @@
// Invoke the code and pass argc as a0.
__ mov(a0, a3);
if (is_construct) {
- __ Call(masm->isolate()->builtins()->JSConstructCall(),
- RelocInfo::CODE_TARGET);
+ __ Call(masm->isolate()->builtins()->JSConstructCall());
} else {
ParameterCount actual(a0);
- __ InvokeFunction(a1, actual, CALL_FUNCTION);
+ __ InvokeFunction(a1, actual, CALL_FUNCTION,
+ NullCallWrapper(), CALL_AS_METHOD);
}
__ LeaveInternalFrame();
@@ -1214,8 +1215,7 @@
__ Branch(&shift_arguments, ne, t0, Operand(zero_reg));
// Do not transform the receiver for native (Compilerhints already in a3).
- __ And(t0, a3, Operand(1 << (SharedFunctionInfo::kES5Native +
- kSmiTagSize)));
+ __ And(t0, a3, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
__ Branch(&shift_arguments, ne, t0, Operand(zero_reg));
// Compute the receiver in non-strict mode.
@@ -1233,10 +1233,9 @@
__ LoadRoot(a3, Heap::kNullValueRootIndex);
__ Branch(&use_global_receiver, eq, a2, Operand(a3));
- STATIC_ASSERT(LAST_JS_OBJECT_TYPE + 1 == LAST_TYPE);
- STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+ STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
__ GetObjectType(a2, a3, a3);
- __ Branch(&shift_arguments, ge, a3, Operand(FIRST_JS_OBJECT_TYPE));
+ __ Branch(&shift_arguments, ge, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
__ bind(&convert_to_object);
__ EnterInternalFrame(); // In order to preserve argument count.
@@ -1339,7 +1338,8 @@
RelocInfo::CODE_TARGET, ne, a2, Operand(a0));
ParameterCount expected(0);
- __ InvokeCode(a3, expected, expected, JUMP_FUNCTION);
+ __ InvokeCode(a3, expected, expected, JUMP_FUNCTION,
+ NullCallWrapper(), CALL_AS_METHOD);
}
@@ -1401,8 +1401,7 @@
__ Branch(&push_receiver, ne, t0, Operand(zero_reg));
// Do not transform the receiver for native (Compilerhints already in a2).
- __ And(t0, a2, Operand(1 << (SharedFunctionInfo::kES5Native +
- kSmiTagSize)));
+ __ And(t0, a2, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
__ Branch(&push_receiver, ne, t0, Operand(zero_reg));
// Compute the receiver in non-strict mode.
@@ -1415,10 +1414,9 @@
// Check if the receiver is already a JavaScript object.
// a0: receiver
- STATIC_ASSERT(LAST_JS_OBJECT_TYPE + 1 == LAST_TYPE);
- STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+ STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
__ GetObjectType(a0, a1, a1);
- __ Branch(&push_receiver, ge, a1, Operand(FIRST_JS_OBJECT_TYPE));
+ __ Branch(&push_receiver, ge, a1, Operand(FIRST_SPEC_OBJECT_TYPE));
// Convert the receiver to a regular object.
// a0: receiver
@@ -1473,7 +1471,8 @@
ParameterCount actual(a0);
__ sra(a0, a0, kSmiTagSize);
__ lw(a1, MemOperand(fp, kFunctionOffset));
- __ InvokeFunction(a1, actual, CALL_FUNCTION);
+ __ InvokeFunction(a1, actual, CALL_FUNCTION,
+ NullCallWrapper(), CALL_AS_METHOD);
// Tear down the internal frame and remove function, receiver and args.
__ LeaveInternalFrame();
@@ -1573,20 +1572,20 @@
// Adjust for return address and receiver.
__ Addu(a0, a0, Operand(2 * kPointerSize));
// Compute copy end address. Also adjust for return address.
- __ Addu(t1, fp, kPointerSize);
+ __ Addu(t3, fp, kPointerSize);
// Copy the arguments (including the receiver) to the new stack frame.
// a0: copy start address
// a1: function
// a2: expected number of arguments
// a3: code entry to call
- // t1: copy end address
+ // t3: copy end address
Label copy;
__ bind(©);
__ lw(t0, MemOperand(a0)); // Adjusted above for return addr and receiver.
__ push(t0);
__ Subu(a0, a0, kPointerSize);
- __ Branch(©, ne, a0, Operand(t1));
+ __ Branch(©, ne, a0, Operand(t3));
// Fill the remaining expected arguments with undefined.
// a1: function
diff --git a/src/mips/code-stubs-mips.cc b/src/mips/code-stubs-mips.cc
index c999994..d89d3e5 100644
--- a/src/mips/code-stubs-mips.cc
+++ b/src/mips/code-stubs-mips.cc
@@ -158,7 +158,7 @@
__ lw(a3, MemOperand(sp, 0));
// Setup the object header.
- __ LoadRoot(a2, Heap::kContextMapRootIndex);
+ __ LoadRoot(a2, Heap::kFunctionContextMapRootIndex);
__ sw(a2, FieldMemOperand(v0, HeapObject::kMapOffset));
__ li(a2, Operand(Smi::FromInt(length)));
__ sw(a2, FieldMemOperand(v0, FixedArray::kLengthOffset));
@@ -166,11 +166,10 @@
// Setup the fixed slots.
__ li(a1, Operand(Smi::FromInt(0)));
__ sw(a3, MemOperand(v0, Context::SlotOffset(Context::CLOSURE_INDEX)));
- __ sw(v0, MemOperand(v0, Context::SlotOffset(Context::FCONTEXT_INDEX)));
- __ sw(a1, MemOperand(v0, Context::SlotOffset(Context::PREVIOUS_INDEX)));
+ __ sw(cp, MemOperand(v0, Context::SlotOffset(Context::PREVIOUS_INDEX)));
__ sw(a1, MemOperand(v0, Context::SlotOffset(Context::EXTENSION_INDEX)));
- // Copy the global object from the surrounding context.
+ // Copy the global object from the previous context.
__ lw(a1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
__ sw(a1, MemOperand(v0, Context::SlotOffset(Context::GLOBAL_INDEX)));
@@ -187,7 +186,7 @@
// Need to collect. Call into runtime system.
__ bind(&gc);
- __ TailCallRuntime(Runtime::kNewContext, 1, 1);
+ __ TailCallRuntime(Runtime::kNewFunctionContext, 1, 1);
}
@@ -306,12 +305,6 @@
}
void Generate(MacroAssembler* masm);
-
- const char* GetName() { return "ConvertToDoubleStub"; }
-
-#ifdef DEBUG
- void Print() { PrintF("ConvertToDoubleStub\n"); }
-#endif
};
@@ -397,11 +390,11 @@
__ mov(scratch1, a0);
ConvertToDoubleStub stub1(a3, a2, scratch1, scratch2);
__ push(ra);
- __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
+ __ Call(stub1.GetCode());
// Write Smi from a1 to a1 and a0 in double format.
__ mov(scratch1, a1);
ConvertToDoubleStub stub2(a1, a0, scratch1, scratch2);
- __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
+ __ Call(stub2.GetCode());
__ pop(ra);
}
}
@@ -483,7 +476,7 @@
__ mov(scratch1, object);
ConvertToDoubleStub stub(dst2, dst1, scratch1, scratch2);
__ push(ra);
- __ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
+ __ Call(stub.GetCode());
__ pop(ra);
}
@@ -672,9 +665,8 @@
// Restore FCSR.
__ ctc1(scratch1, FCSR);
- // Check for inexact conversion.
- __ srl(scratch2, scratch2, kFCSRFlagShift);
- __ And(scratch2, scratch2, (kFCSRFlagMask | kFCSRInexactFlagBit));
+ // Check for inexact conversion or exception.
+ __ And(scratch2, scratch2, kFCSRFlagMask);
// Jump to not_int32 if the operation did not succeed.
__ Branch(not_int32, ne, scratch2, Operand(zero_reg));
@@ -757,9 +749,8 @@
// Restore FCSR.
__ ctc1(scratch1, FCSR);
- // Check for inexact conversion.
- __ srl(scratch2, scratch2, kFCSRFlagShift);
- __ And(scratch2, scratch2, (kFCSRFlagMask | kFCSRInexactFlagBit));
+ // Check for inexact conversion or exception.
+ __ And(scratch2, scratch2, kFCSRFlagMask);
// Jump to not_int32 if the operation did not succeed.
__ Branch(not_int32, ne, scratch2, Operand(zero_reg));
@@ -985,13 +976,13 @@
// Smis. If it's not a heap number, then return equal.
if (cc == less || cc == greater) {
__ GetObjectType(a0, t4, t4);
- __ Branch(slow, greater, t4, Operand(FIRST_JS_OBJECT_TYPE));
+ __ Branch(slow, greater, t4, Operand(FIRST_SPEC_OBJECT_TYPE));
} else {
__ GetObjectType(a0, t4, t4);
__ Branch(&heap_number, eq, t4, Operand(HEAP_NUMBER_TYPE));
// Comparing JS objects with <=, >= is complicated.
if (cc != eq) {
- __ Branch(slow, greater, t4, Operand(FIRST_JS_OBJECT_TYPE));
+ __ Branch(slow, greater, t4, Operand(FIRST_SPEC_OBJECT_TYPE));
// Normally here we fall through to return_equal, but undefined is
// special: (undefined == undefined) == true, but
// (undefined <= undefined) == false! See ECMAScript 11.8.5.
@@ -1110,7 +1101,7 @@
__ mov(t6, rhs);
ConvertToDoubleStub stub1(a1, a0, t6, t5);
__ push(ra);
- __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
+ __ Call(stub1.GetCode());
__ pop(ra);
}
@@ -1145,7 +1136,7 @@
__ mov(t6, lhs);
ConvertToDoubleStub stub2(a3, a2, t6, t5);
__ push(ra);
- __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
+ __ Call(stub2.GetCode());
__ pop(ra);
// Load rhs to a double in a1, a0.
if (rhs.is(a0)) {
@@ -1309,15 +1300,15 @@
static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
Register lhs,
Register rhs) {
- // If either operand is a JSObject or an oddball value, then they are
+ // If either operand is a JS object or an oddball value, then they are
// not equal since their pointers are different.
// There is no test for undetectability in strict equality.
- STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+ STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
Label first_non_object;
// Get the type of the first operand into a2 and compare it with
- // FIRST_JS_OBJECT_TYPE.
+ // FIRST_SPEC_OBJECT_TYPE.
__ GetObjectType(lhs, a2, a2);
- __ Branch(&first_non_object, less, a2, Operand(FIRST_JS_OBJECT_TYPE));
+ __ Branch(&first_non_object, less, a2, Operand(FIRST_SPEC_OBJECT_TYPE));
// Return non-zero.
Label return_not_equal;
@@ -1330,7 +1321,7 @@
__ Branch(&return_not_equal, eq, a2, Operand(ODDBALL_TYPE));
__ GetObjectType(rhs, a3, a3);
- __ Branch(&return_not_equal, greater, a3, Operand(FIRST_JS_OBJECT_TYPE));
+ __ Branch(&return_not_equal, greater, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
// Check for oddballs: true, false, null, undefined.
__ Branch(&return_not_equal, eq, a3, Operand(ODDBALL_TYPE));
@@ -1406,9 +1397,9 @@
__ Ret();
__ bind(&object_test);
- __ Branch(not_both_strings, lt, a2, Operand(FIRST_JS_OBJECT_TYPE));
+ __ Branch(not_both_strings, lt, a2, Operand(FIRST_SPEC_OBJECT_TYPE));
__ GetObjectType(rhs, a2, a3);
- __ Branch(not_both_strings, lt, a3, Operand(FIRST_JS_OBJECT_TYPE));
+ __ Branch(not_both_strings, lt, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
// If both objects are undetectable, they are equal. Otherwise, they
// are not equal, since they are different objects and an object is not
@@ -1720,7 +1711,6 @@
}
-// This stub does not handle the inlined cases (Smis, Booleans, undefined).
// The stub returns zero for false, and a non-zero value for true.
void ToBooleanStub::Generate(MacroAssembler* masm) {
// This stub uses FPU instructions.
@@ -1784,7 +1774,7 @@
// "tos_" is a register and contains a non-zero value.
// Hence we implicitly return true if the greater than
// condition is satisfied.
- __ Ret(gt, scratch0, Operand(FIRST_JS_OBJECT_TYPE));
+ __ Ret(ge, scratch0, Operand(FIRST_SPEC_OBJECT_TYPE));
// Check for string.
__ lw(scratch0, FieldMemOperand(tos_, HeapObject::kMapOffset));
@@ -1792,7 +1782,7 @@
// "tos_" is a register and contains a non-zero value.
// Hence we implicitly return true if the greater than
// condition is satisfied.
- __ Ret(gt, scratch0, Operand(FIRST_NONSTRING_TYPE));
+ __ Ret(ge, scratch0, Operand(FIRST_NONSTRING_TYPE));
// String value => false iff empty, i.e., length is zero.
__ lw(tos_, FieldMemOperand(tos_, String::kLengthOffset));
@@ -1807,31 +1797,17 @@
}
-Handle<Code> GetUnaryOpStub(int key, UnaryOpIC::TypeInfo type_info) {
- UnaryOpStub stub(key, type_info);
- return stub.GetCode();
-}
-
-
-const char* UnaryOpStub::GetName() {
- if (name_ != NULL) return name_;
- const int kMaxNameLength = 100;
- name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(
- kMaxNameLength);
- if (name_ == NULL) return "OOM";
+void UnaryOpStub::PrintName(StringStream* stream) {
const char* op_name = Token::Name(op_);
const char* overwrite_name = NULL; // Make g++ happy.
switch (mode_) {
case UNARY_NO_OVERWRITE: overwrite_name = "Alloc"; break;
case UNARY_OVERWRITE: overwrite_name = "Overwrite"; break;
}
-
- OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
- "UnaryOpStub_%s_%s_%s",
- op_name,
- overwrite_name,
- UnaryOpIC::GetName(operand_type_));
- return name_;
+ stream->Add("UnaryOpStub_%s_%s_%s",
+ op_name,
+ overwrite_name,
+ UnaryOpIC::GetName(operand_type_));
}
@@ -1856,19 +1832,13 @@
void UnaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
// Argument is in a0 and v0 at this point, so we can overwrite a0.
- // Push this stub's key. Although the operation and the type info are
- // encoded into the key, the encoding is opaque, so push them too.
- __ li(a2, Operand(Smi::FromInt(MinorKey())));
- __ li(a1, Operand(Smi::FromInt(op_)));
+ __ li(a2, Operand(Smi::FromInt(op_)));
+ __ li(a1, Operand(Smi::FromInt(mode_)));
__ li(a0, Operand(Smi::FromInt(operand_type_)));
-
__ Push(v0, a2, a1, a0);
__ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kUnaryOp_Patch),
- masm->isolate()),
- 4,
- 1);
+ ExternalReference(IC_Utility(IC::kUnaryOp_Patch), masm->isolate()), 4, 1);
}
@@ -1966,6 +1936,7 @@
GenerateTypeTransition(masm);
}
+
void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm,
Label* slow) {
EmitCheckForHeapNumber(masm, a0, a1, t2, slow);
@@ -2002,6 +1973,8 @@
void UnaryOpStub::GenerateHeapNumberCodeBitNot(
MacroAssembler* masm,
Label* slow) {
+ Label impossible;
+
EmitCheckForHeapNumber(masm, a0, a1, t2, slow);
// Convert the heap number in a0 to an untagged integer in a1.
__ ConvertToInt32(a0, a1, a2, a3, f0, slow);
@@ -2020,17 +1993,28 @@
__ bind(&try_float);
if (mode_ == UNARY_NO_OVERWRITE) {
Label slow_allocate_heapnumber, heapnumber_allocated;
- __ AllocateHeapNumber(v0, a2, a3, t2, &slow_allocate_heapnumber);
+ // Allocate a new heap number without zapping v0, which we need if it fails.
+ __ AllocateHeapNumber(a2, a3, t0, t2, &slow_allocate_heapnumber);
__ jmp(&heapnumber_allocated);
__ bind(&slow_allocate_heapnumber);
__ EnterInternalFrame();
- __ push(a1);
+ __ push(v0); // Push the heap number, not the untagged int32.
__ CallRuntime(Runtime::kNumberAlloc, 0);
- __ pop(a1);
+ __ mov(a2, v0); // Move the new heap number into a2.
+ // Get the heap number into v0, now that the new heap number is in a2.
+ __ pop(v0);
__ LeaveInternalFrame();
+ // Convert the heap number in v0 to an untagged integer in a1.
+ // This can't go slow-case because it's the same number we already
+ // converted once again.
+ __ ConvertToInt32(v0, a1, a3, t0, f0, &impossible);
+ // Negate the result.
+ __ Xor(a1, a1, -1);
+
__ bind(&heapnumber_allocated);
+ __ mov(v0, a2); // Move newly allocated heap number to v0.
}
if (CpuFeatures::IsSupported(FPU)) {
@@ -2046,6 +2030,11 @@
WriteInt32ToHeapNumberStub stub(a1, v0, a2, a3);
__ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
}
+
+ __ bind(&impossible);
+ if (FLAG_debug_code) {
+ __ stop("Incorrect assumption in bit-not stub");
+ }
}
@@ -2101,14 +2090,6 @@
}
-Handle<Code> GetBinaryOpStub(int key,
- BinaryOpIC::TypeInfo type_info,
- BinaryOpIC::TypeInfo result_type_info) {
- BinaryOpStub stub(key, type_info, result_type_info);
- return stub.GetCode();
-}
-
-
void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
Label get_result;
@@ -2165,12 +2146,7 @@
}
-const char* BinaryOpStub::GetName() {
- if (name_ != NULL) return name_;
- const int kMaxNameLength = 100;
- name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(
- kMaxNameLength);
- if (name_ == NULL) return "OOM";
+void BinaryOpStub::PrintName(StringStream* stream) {
const char* op_name = Token::Name(op_);
const char* overwrite_name;
switch (mode_) {
@@ -2179,13 +2155,10 @@
case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
default: overwrite_name = "UnknownOverwrite"; break;
}
-
- OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
- "BinaryOpStub_%s_%s_%s",
- op_name,
- overwrite_name,
- BinaryOpIC::GetName(operands_type_));
- return name_;
+ stream->Add("BinaryOpStub_%s_%s_%s",
+ op_name,
+ overwrite_name,
+ BinaryOpIC::GetName(operands_type_));
}
@@ -2687,37 +2660,36 @@
case Token::MUL:
case Token::DIV:
case Token::MOD: {
- // Load both operands and check that they are 32-bit integer.
- // Jump to type transition if they are not. The registers a0 and a1 (right
- // and left) are preserved for the runtime call.
- FloatingPointHelper::Destination destination =
- CpuFeatures::IsSupported(FPU) &&
- op_ != Token::MOD ?
- FloatingPointHelper::kFPURegisters :
- FloatingPointHelper::kCoreRegisters;
+ // Load both operands and check that they are 32-bit integer.
+ // Jump to type transition if they are not. The registers a0 and a1 (right
+ // and left) are preserved for the runtime call.
+ FloatingPointHelper::Destination destination =
+ (CpuFeatures::IsSupported(FPU) && op_ != Token::MOD)
+ ? FloatingPointHelper::kFPURegisters
+ : FloatingPointHelper::kCoreRegisters;
- FloatingPointHelper::LoadNumberAsInt32Double(masm,
- right,
- destination,
- f14,
- a2,
- a3,
- heap_number_map,
- scratch1,
- scratch2,
- f2,
- &transition);
- FloatingPointHelper::LoadNumberAsInt32Double(masm,
- left,
- destination,
- f12,
- t0,
- t1,
- heap_number_map,
- scratch1,
- scratch2,
- f2,
- &transition);
+ FloatingPointHelper::LoadNumberAsInt32Double(masm,
+ right,
+ destination,
+ f14,
+ a2,
+ a3,
+ heap_number_map,
+ scratch1,
+ scratch2,
+ f2,
+ &transition);
+ FloatingPointHelper::LoadNumberAsInt32Double(masm,
+ left,
+ destination,
+ f12,
+ t0,
+ t1,
+ heap_number_map,
+ scratch1,
+ scratch2,
+ f2,
+ &transition);
if (destination == FloatingPointHelper::kFPURegisters) {
CpuFeatures::Scope scope(FPU);
@@ -2759,8 +2731,7 @@
// Restore FCSR.
__ ctc1(scratch1, FCSR);
- // Check for inexact conversion.
- __ srl(scratch2, scratch2, kFCSRFlagShift);
+ // Check for inexact conversion or exception.
__ And(scratch2, scratch2, kFCSRFlagMask);
if (result_type_ <= BinaryOpIC::INT32) {
@@ -2788,9 +2759,11 @@
// DIV just falls through to allocating a heap number.
}
- if (result_type_ >= (op_ == Token::DIV) ? BinaryOpIC::HEAP_NUMBER
- : BinaryOpIC::INT32) {
- __ bind(&return_heap_number);
+ __ bind(&return_heap_number);
+ // Return a heap number, or fall through to type transition or runtime
+ // call if we can't.
+ if (result_type_ >= ((op_ == Token::DIV) ? BinaryOpIC::HEAP_NUMBER
+ : BinaryOpIC::INT32)) {
// We are using FPU registers so s0 is available.
heap_number_result = s0;
GenerateHeapResultAllocation(masm,
@@ -2969,7 +2942,11 @@
UNREACHABLE();
}
- if (transition.is_linked()) {
+ // We never expect DIV to yield an integer result, so we always generate
+ // type transition code for DIV operations expecting an integer result: the
+ // code will fall through to this type transition.
+ if (transition.is_linked() ||
+ ((op_ == Token::DIV) && (result_type_ <= BinaryOpIC::INT32))) {
__ bind(&transition);
GenerateTypeTransition(masm);
}
@@ -3541,15 +3518,10 @@
__ li(a2, Operand(ExternalReference::isolate_address()));
- // From arm version of this function:
- // TODO(1242173): To let the GC traverse the return address of the exit
- // frames, we need to know where the return address is. Right now,
- // we push it on the stack to be able to find it again, but we never
- // restore from it in case of changes, which makes it impossible to
- // support moving the C entry code stub. This should be fixed, but currently
- // this is OK because the CEntryStub gets generated so early in the V8 boot
- // sequence that it is not moving ever.
-
+ // To let the GC traverse the return address of the exit frames, we need to
+ // know where the return address is. The CEntryStub is unmovable, so
+ // we can store the address on the stack to be able to find it again and
+ // we never have to restore it, because it will not change.
{ Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm);
// This branch-and-link sequence is needed to find the current PC on mips,
// saved to the ra register.
@@ -3755,24 +3727,22 @@
// 4 args slots
// args
- #ifdef ENABLE_LOGGING_AND_PROFILING
- // If this is the outermost JS call, set js_entry_sp value.
- Label non_outermost_js;
- ExternalReference js_entry_sp(Isolate::k_js_entry_sp_address,
- masm->isolate());
- __ li(t1, Operand(ExternalReference(js_entry_sp)));
- __ lw(t2, MemOperand(t1));
- __ Branch(&non_outermost_js, ne, t2, Operand(zero_reg));
- __ sw(fp, MemOperand(t1));
- __ li(t0, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
- Label cont;
- __ b(&cont);
- __ nop(); // Branch delay slot nop.
- __ bind(&non_outermost_js);
- __ li(t0, Operand(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME)));
- __ bind(&cont);
- __ push(t0);
- #endif
+ // If this is the outermost JS call, set js_entry_sp value.
+ Label non_outermost_js;
+ ExternalReference js_entry_sp(Isolate::k_js_entry_sp_address,
+ masm->isolate());
+ __ li(t1, Operand(ExternalReference(js_entry_sp)));
+ __ lw(t2, MemOperand(t1));
+ __ Branch(&non_outermost_js, ne, t2, Operand(zero_reg));
+ __ sw(fp, MemOperand(t1));
+ __ li(t0, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
+ Label cont;
+ __ b(&cont);
+ __ nop(); // Branch delay slot nop.
+ __ bind(&non_outermost_js);
+ __ li(t0, Operand(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME)));
+ __ bind(&cont);
+ __ push(t0);
// Call a faked try-block that does the invoke.
__ bal(&invoke); // bal exposes branch delay slot.
@@ -3841,16 +3811,14 @@
__ PopTryHandler();
__ bind(&exit); // v0 holds result
- #ifdef ENABLE_LOGGING_AND_PROFILING
- // Check if the current stack frame is marked as the outermost JS frame.
- Label non_outermost_js_2;
- __ pop(t1);
- __ Branch(&non_outermost_js_2, ne, t1,
- Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
- __ li(t1, Operand(ExternalReference(js_entry_sp)));
- __ sw(zero_reg, MemOperand(t1));
- __ bind(&non_outermost_js_2);
- #endif
+ // Check if the current stack frame is marked as the outermost JS frame.
+ Label non_outermost_js_2;
+ __ pop(t1);
+ __ Branch(&non_outermost_js_2, ne, t1,
+ Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
+ __ li(t1, Operand(ExternalReference(js_entry_sp)));
+ __ sw(zero_reg, MemOperand(t1));
+ __ bind(&non_outermost_js_2);
// Restore the top frame descriptors from the stack.
__ pop(t1);
@@ -4074,11 +4042,252 @@
}
-void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
+void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) {
// sp[0] : number of parameters
// sp[4] : receiver displacement
// sp[8] : function
+ // Check if the calling frame is an arguments adaptor frame.
+ Label runtime;
+ __ lw(a3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ lw(a2, MemOperand(a3, StandardFrameConstants::kContextOffset));
+ __ Branch(&runtime, ne,
+ a2, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ // Patch the arguments.length and the parameters pointer in the current frame.
+ __ lw(a2, MemOperand(a3, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ sw(a2, MemOperand(sp, 0 * kPointerSize));
+ __ sll(t3, a2, 1);
+ __ Addu(a3, a3, Operand(t3));
+ __ addiu(a3, a3, StandardFrameConstants::kCallerSPOffset);
+ __ sw(a3, MemOperand(sp, 1 * kPointerSize));
+
+ __ bind(&runtime);
+ __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
+}
+
+
+void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
+ // Stack layout:
+ // sp[0] : number of parameters (tagged)
+ // sp[4] : address of receiver argument
+ // sp[8] : function
+ // Registers used over whole function:
+ // t2 : allocated object (tagged)
+ // t5 : mapped parameter count (tagged)
+
+ __ lw(a1, MemOperand(sp, 0 * kPointerSize));
+ // a1 = parameter count (tagged)
+
+ // Check if the calling frame is an arguments adaptor frame.
+ Label runtime;
+ Label adaptor_frame, try_allocate;
+ __ lw(a3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ lw(a2, MemOperand(a3, StandardFrameConstants::kContextOffset));
+ __ Branch(&adaptor_frame, eq, a2,
+ Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+
+ // No adaptor, parameter count = argument count.
+ __ mov(a2, a1);
+ __ b(&try_allocate);
+ __ nop(); // Branch delay slot nop.
+
+ // We have an adaptor frame. Patch the parameters pointer.
+ __ bind(&adaptor_frame);
+ __ lw(a2, MemOperand(a3, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ sll(t6, a2, 1);
+ __ Addu(a3, a3, Operand(t6));
+ __ Addu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
+ __ sw(a3, MemOperand(sp, 1 * kPointerSize));
+
+ // a1 = parameter count (tagged)
+ // a2 = argument count (tagged)
+ // Compute the mapped parameter count = min(a1, a2) in a1.
+ Label skip_min;
+ __ Branch(&skip_min, lt, a1, Operand(a2));
+ __ mov(a1, a2);
+ __ bind(&skip_min);
+
+ __ bind(&try_allocate);
+
+ // Compute the sizes of backing store, parameter map, and arguments object.
+ // 1. Parameter map, has 2 extra words containing context and backing store.
+ const int kParameterMapHeaderSize =
+ FixedArray::kHeaderSize + 2 * kPointerSize;
+ // If there are no mapped parameters, we do not need the parameter_map.
+ Label param_map_size;
+ ASSERT_EQ(0, Smi::FromInt(0));
+ __ Branch(USE_DELAY_SLOT, ¶m_map_size, eq, a1, Operand(zero_reg));
+ __ mov(t5, zero_reg); // In delay slot: param map size = 0 when a1 == 0.
+ __ sll(t5, a1, 1);
+ __ addiu(t5, t5, kParameterMapHeaderSize);
+ __ bind(¶m_map_size);
+
+ // 2. Backing store.
+ __ sll(t6, a2, 1);
+ __ Addu(t5, t5, Operand(t6));
+ __ Addu(t5, t5, Operand(FixedArray::kHeaderSize));
+
+ // 3. Arguments object.
+ __ Addu(t5, t5, Operand(Heap::kArgumentsObjectSize));
+
+ // Do the allocation of all three objects in one go.
+ __ AllocateInNewSpace(t5, v0, a3, t0, &runtime, TAG_OBJECT);
+
+ // v0 = address of new object(s) (tagged)
+ // a2 = argument count (tagged)
+ // Get the arguments boilerplate from the current (global) context into t0.
+ const int kNormalOffset =
+ Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX);
+ const int kAliasedOffset =
+ Context::SlotOffset(Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX);
+
+ __ lw(t0, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ lw(t0, FieldMemOperand(t0, GlobalObject::kGlobalContextOffset));
+ Label skip2_ne, skip2_eq;
+ __ Branch(&skip2_ne, ne, a1, Operand(zero_reg));
+ __ lw(t0, MemOperand(t0, kNormalOffset));
+ __ bind(&skip2_ne);
+
+ __ Branch(&skip2_eq, eq, a1, Operand(zero_reg));
+ __ lw(t0, MemOperand(t0, kAliasedOffset));
+ __ bind(&skip2_eq);
+
+ // v0 = address of new object (tagged)
+ // a1 = mapped parameter count (tagged)
+ // a2 = argument count (tagged)
+ // t0 = address of boilerplate object (tagged)
+ // Copy the JS object part.
+ for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
+ __ lw(a3, FieldMemOperand(t0, i));
+ __ sw(a3, FieldMemOperand(v0, i));
+ }
+
+ // Setup the callee in-object property.
+ STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
+ __ lw(a3, MemOperand(sp, 2 * kPointerSize));
+ const int kCalleeOffset = JSObject::kHeaderSize +
+ Heap::kArgumentsCalleeIndex * kPointerSize;
+ __ sw(a3, FieldMemOperand(v0, kCalleeOffset));
+
+ // Use the length (smi tagged) and set that as an in-object property too.
+ STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
+ const int kLengthOffset = JSObject::kHeaderSize +
+ Heap::kArgumentsLengthIndex * kPointerSize;
+ __ sw(a2, FieldMemOperand(v0, kLengthOffset));
+
+ // Setup the elements pointer in the allocated arguments object.
+ // If we allocated a parameter map, t0 will point there, otherwise
+ // it will point to the backing store.
+ __ Addu(t0, v0, Operand(Heap::kArgumentsObjectSize));
+ __ sw(t0, FieldMemOperand(v0, JSObject::kElementsOffset));
+
+ // v0 = address of new object (tagged)
+ // a1 = mapped parameter count (tagged)
+ // a2 = argument count (tagged)
+ // t0 = address of parameter map or backing store (tagged)
+ // Initialize parameter map. If there are no mapped arguments, we're done.
+ Label skip_parameter_map;
+ Label skip3;
+ __ Branch(&skip3, ne, a1, Operand(Smi::FromInt(0)));
+ // Move backing store address to a3, because it is
+ // expected there when filling in the unmapped arguments.
+ __ mov(a3, t0);
+ __ bind(&skip3);
+
+ __ Branch(&skip_parameter_map, eq, a1, Operand(Smi::FromInt(0)));
+
+ __ LoadRoot(t2, Heap::kNonStrictArgumentsElementsMapRootIndex);
+ __ sw(t2, FieldMemOperand(t0, FixedArray::kMapOffset));
+ __ Addu(t2, a1, Operand(Smi::FromInt(2)));
+ __ sw(t2, FieldMemOperand(t0, FixedArray::kLengthOffset));
+ __ sw(cp, FieldMemOperand(t0, FixedArray::kHeaderSize + 0 * kPointerSize));
+ __ sll(t6, a1, 1);
+ __ Addu(t2, t0, Operand(t6));
+ __ Addu(t2, t2, Operand(kParameterMapHeaderSize));
+ __ sw(t2, FieldMemOperand(t0, FixedArray::kHeaderSize + 1 * kPointerSize));
+
+ // Copy the parameter slots and the holes in the arguments.
+ // We need to fill in mapped_parameter_count slots. They index the context,
+ // where parameters are stored in reverse order, at
+ // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
+ // The mapped parameter thus need to get indices
+ // MIN_CONTEXT_SLOTS+parameter_count-1 ..
+ // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
+ // We loop from right to left.
+ Label parameters_loop, parameters_test;
+ __ mov(t2, a1);
+ __ lw(t5, MemOperand(sp, 0 * kPointerSize));
+ __ Addu(t5, t5, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
+ __ Subu(t5, t5, Operand(a1));
+ __ LoadRoot(t3, Heap::kTheHoleValueRootIndex);
+ __ sll(t6, t2, 1);
+ __ Addu(a3, t0, Operand(t6));
+ __ Addu(a3, a3, Operand(kParameterMapHeaderSize));
+
+ // t2 = loop variable (tagged)
+ // a1 = mapping index (tagged)
+ // a3 = address of backing store (tagged)
+ // t0 = address of parameter map (tagged)
+ // t1 = temporary scratch (a.o., for address calculation)
+ // t3 = the hole value
+ __ jmp(¶meters_test);
+
+ __ bind(¶meters_loop);
+ __ Subu(t2, t2, Operand(Smi::FromInt(1)));
+ __ sll(t1, t2, 1);
+ __ Addu(t1, t1, Operand(kParameterMapHeaderSize - kHeapObjectTag));
+ __ Addu(t6, t0, t1);
+ __ sw(t5, MemOperand(t6));
+ __ Subu(t1, t1, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize));
+ __ Addu(t6, a3, t1);
+ __ sw(t3, MemOperand(t6));
+ __ Addu(t5, t5, Operand(Smi::FromInt(1)));
+ __ bind(¶meters_test);
+ __ Branch(¶meters_loop, ne, t2, Operand(Smi::FromInt(0)));
+
+ __ bind(&skip_parameter_map);
+ // a2 = argument count (tagged)
+ // a3 = address of backing store (tagged)
+ // t1 = scratch
+ // Copy arguments header and remaining slots (if there are any).
+ __ LoadRoot(t1, Heap::kFixedArrayMapRootIndex);
+ __ sw(t1, FieldMemOperand(a3, FixedArray::kMapOffset));
+ __ sw(a2, FieldMemOperand(a3, FixedArray::kLengthOffset));
+
+ Label arguments_loop, arguments_test;
+ __ mov(t5, a1);
+ __ lw(t0, MemOperand(sp, 1 * kPointerSize));
+ __ sll(t6, t5, 1);
+ __ Subu(t0, t0, Operand(t6));
+ __ jmp(&arguments_test);
+
+ __ bind(&arguments_loop);
+ __ Subu(t0, t0, Operand(kPointerSize));
+ __ lw(t2, MemOperand(t0, 0));
+ __ sll(t6, t5, 1);
+ __ Addu(t1, a3, Operand(t6));
+ __ sw(t2, FieldMemOperand(t1, FixedArray::kHeaderSize));
+ __ Addu(t5, t5, Operand(Smi::FromInt(1)));
+
+ __ bind(&arguments_test);
+ __ Branch(&arguments_loop, lt, t5, Operand(a2));
+
+ // Return and remove the on-stack parameters.
+ __ Addu(sp, sp, Operand(3 * kPointerSize));
+ __ Ret();
+
+ // Do the runtime call to allocate the arguments object.
+ // a2 = argument count (taggged)
+ __ bind(&runtime);
+ __ sw(a2, MemOperand(sp, 0 * kPointerSize)); // Patch argument count.
+ __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
+}
+
+
+void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
+ // sp[0] : number of parameters
+ // sp[4] : receiver displacement
+ // sp[8] : function
// Check if the calling frame is an arguments adaptor frame.
Label adaptor_frame, try_allocate, runtime;
__ lw(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
@@ -4111,40 +4320,31 @@
__ Addu(a1, a1, Operand(FixedArray::kHeaderSize / kPointerSize));
__ bind(&add_arguments_object);
- __ Addu(a1, a1, Operand(GetArgumentsObjectSize() / kPointerSize));
+ __ Addu(a1, a1, Operand(Heap::kArgumentsObjectSizeStrict / kPointerSize));
// Do the allocation of both objects in one go.
- __ AllocateInNewSpace(
- a1,
- v0,
- a2,
- a3,
- &runtime,
- static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
+ __ AllocateInNewSpace(a1,
+ v0,
+ a2,
+ a3,
+ &runtime,
+ static_cast<AllocationFlags>(TAG_OBJECT |
+ SIZE_IN_WORDS));
// Get the arguments boilerplate from the current (global) context.
__ lw(t0, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
__ lw(t0, FieldMemOperand(t0, GlobalObject::kGlobalContextOffset));
- __ lw(t0, MemOperand(t0,
- Context::SlotOffset(GetArgumentsBoilerplateIndex())));
+ __ lw(t0, MemOperand(t0, Context::SlotOffset(
+ Context::STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX)));
// Copy the JS object part.
__ CopyFields(v0, t0, a3.bit(), JSObject::kHeaderSize / kPointerSize);
- if (type_ == NEW_NON_STRICT) {
- // Setup the callee in-object property.
- STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
- __ lw(a3, MemOperand(sp, 2 * kPointerSize));
- const int kCalleeOffset = JSObject::kHeaderSize +
- Heap::kArgumentsCalleeIndex * kPointerSize;
- __ sw(a3, FieldMemOperand(v0, kCalleeOffset));
- }
-
// Get the length (smi tagged) and set that as an in-object property too.
STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
__ lw(a1, MemOperand(sp, 0 * kPointerSize));
__ sw(a1, FieldMemOperand(v0, JSObject::kHeaderSize +
- Heap::kArgumentsLengthIndex * kPointerSize));
+ Heap::kArgumentsLengthIndex * kPointerSize));
Label done;
__ Branch(&done, eq, a1, Operand(zero_reg));
@@ -4154,12 +4354,13 @@
// Setup the elements pointer in the allocated arguments object and
// initialize the header in the elements fixed array.
- __ Addu(t0, v0, Operand(GetArgumentsObjectSize()));
+ __ Addu(t0, v0, Operand(Heap::kArgumentsObjectSizeStrict));
__ sw(t0, FieldMemOperand(v0, JSObject::kElementsOffset));
__ LoadRoot(a3, Heap::kFixedArrayMapRootIndex);
__ sw(a3, FieldMemOperand(t0, FixedArray::kMapOffset));
__ sw(a1, FieldMemOperand(t0, FixedArray::kLengthOffset));
- __ srl(a1, a1, kSmiTagSize); // Untag the length for the loop.
+ // Untag the length for the loop.
+ __ srl(a1, a1, kSmiTagSize);
// Copy the fixed array slots.
Label loop;
@@ -4183,7 +4384,7 @@
// Do the runtime call to allocate the arguments object.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
+ __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1);
}
@@ -4365,10 +4566,9 @@
__ movz(t9, t0, a0); // If UC16 (a0 is 0), replace t9 w/kDataUC16CodeOffset.
// Check that the irregexp code has been generated for the actual string
- // encoding. If it has, the field contains a code object otherwise it
- // contains the hole.
- __ GetObjectType(t9, a0, a0);
- __ Branch(&runtime, ne, a0, Operand(CODE_TYPE));
+ // encoding. If it has, the field contains a code object otherwise it contains
+ // a smi (code flushing support).
+ __ JumpIfSmi(t9, &runtime);
// a3: encoding of subject string (1 if ASCII, 0 if two_byte);
// t9: code
@@ -4692,7 +4892,11 @@
Label call_as_function;
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
__ Branch(&call_as_function, eq, t0, Operand(at));
- __ InvokeFunction(a1, actual, JUMP_FUNCTION);
+ __ InvokeFunction(a1,
+ actual,
+ JUMP_FUNCTION,
+ NullCallWrapper(),
+ CALL_AS_METHOD);
__ bind(&call_as_function);
}
__ InvokeFunction(a1,
@@ -4709,6 +4913,7 @@
__ li(a0, Operand(argc_)); // Setup the number of arguments.
__ mov(a2, zero_reg);
__ GetBuiltinEntry(a3, Builtins::CALL_NON_FUNCTION);
+ __ SetCallKind(t1, CALL_AS_METHOD);
__ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
RelocInfo::CODE_TARGET);
}
@@ -4716,16 +4921,9 @@
// Unfortunately you have to run without snapshots to see most of these
// names in the profile since most compare stubs end up in the snapshot.
-const char* CompareStub::GetName() {
+void CompareStub::PrintName(StringStream* stream) {
ASSERT((lhs_.is(a0) && rhs_.is(a1)) ||
(lhs_.is(a1) && rhs_.is(a0)));
-
- if (name_ != NULL) return name_;
- const int kMaxNameLength = 100;
- name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(
- kMaxNameLength);
- if (name_ == NULL) return "OOM";
-
const char* cc_name;
switch (cc_) {
case lt: cc_name = "LT"; break;
@@ -4736,40 +4934,14 @@
case ne: cc_name = "NE"; break;
default: cc_name = "UnknownCondition"; break;
}
-
- const char* lhs_name = lhs_.is(a0) ? "_a0" : "_a1";
- const char* rhs_name = rhs_.is(a0) ? "_a0" : "_a1";
-
- const char* strict_name = "";
- if (strict_ && (cc_ == eq || cc_ == ne)) {
- strict_name = "_STRICT";
- }
-
- const char* never_nan_nan_name = "";
- if (never_nan_nan_ && (cc_ == eq || cc_ == ne)) {
- never_nan_nan_name = "_NO_NAN";
- }
-
- const char* include_number_compare_name = "";
- if (!include_number_compare_) {
- include_number_compare_name = "_NO_NUMBER";
- }
-
- const char* include_smi_compare_name = "";
- if (!include_smi_compare_) {
- include_smi_compare_name = "_NO_SMI";
- }
-
- OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
- "CompareStub_%s%s%s%s%s%s",
- cc_name,
- lhs_name,
- rhs_name,
- strict_name,
- never_nan_nan_name,
- include_number_compare_name,
- include_smi_compare_name);
- return name_;
+ bool is_equality = cc_ == eq || cc_ == ne;
+ stream->Add("CompareStub_%s", cc_name);
+ stream->Add(lhs_.is(a0) ? "_a0" : "_a1");
+ stream->Add(rhs_.is(a0) ? "_a0" : "_a1");
+ if (strict_ && is_equality) stream->Add("_STRICT");
+ if (never_nan_nan_ && is_equality) stream->Add("_NO_NAN");
+ if (!include_number_compare_) stream->Add("_NO_NUMBER");
+ if (!include_smi_compare_) stream->Add("_NO_SMI");
}
@@ -6352,6 +6524,7 @@
__ Jump(a2);
}
+
void DirectCEntryStub::Generate(MacroAssembler* masm) {
// No need to pop or drop anything, LeaveExitFrame will restore the old
// stack, thus dropping the allocated space for the return value.
@@ -6376,6 +6549,7 @@
this->GenerateCall(masm, t9);
}
+
void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
Register target) {
__ Move(t9, target);
diff --git a/src/mips/code-stubs-mips.h b/src/mips/code-stubs-mips.h
index 356aa97..aa224bc 100644
--- a/src/mips/code-stubs-mips.h
+++ b/src/mips/code-stubs-mips.h
@@ -59,35 +59,14 @@
};
-class ToBooleanStub: public CodeStub {
- public:
- explicit ToBooleanStub(Register tos) : tos_(tos) { }
-
- void Generate(MacroAssembler* masm);
-
- private:
- Register tos_;
- Major MajorKey() { return ToBoolean; }
- int MinorKey() { return tos_.code(); }
-};
-
-
class UnaryOpStub: public CodeStub {
public:
- UnaryOpStub(Token::Value op, UnaryOverwriteMode mode)
+ UnaryOpStub(Token::Value op,
+ UnaryOverwriteMode mode,
+ UnaryOpIC::TypeInfo operand_type = UnaryOpIC::UNINITIALIZED)
: op_(op),
mode_(mode),
- operand_type_(UnaryOpIC::UNINITIALIZED),
- name_(NULL) {
- }
-
- UnaryOpStub(
- int key,
- UnaryOpIC::TypeInfo operand_type)
- : op_(OpBits::decode(key)),
- mode_(ModeBits::decode(key)),
- operand_type_(operand_type),
- name_(NULL) {
+ operand_type_(operand_type) {
}
private:
@@ -97,20 +76,7 @@
// Operand type information determined at runtime.
UnaryOpIC::TypeInfo operand_type_;
- char* name_;
-
- const char* GetName();
-
-#ifdef DEBUG
- void Print() {
- PrintF("UnaryOpStub %d (op %s), "
- "(mode %d, runtime_type_info %s)\n",
- MinorKey(),
- Token::String(op_),
- static_cast<int>(mode_),
- UnaryOpIC::GetName(operand_type_));
- }
-#endif
+ virtual void PrintName(StringStream* stream);
class ModeBits: public BitField<UnaryOverwriteMode, 0, 1> {};
class OpBits: public BitField<Token::Value, 1, 7> {};
@@ -164,8 +130,7 @@
: op_(op),
mode_(mode),
operands_type_(BinaryOpIC::UNINITIALIZED),
- result_type_(BinaryOpIC::UNINITIALIZED),
- name_(NULL) {
+ result_type_(BinaryOpIC::UNINITIALIZED) {
use_fpu_ = CpuFeatures::IsSupported(FPU);
ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
}
@@ -178,8 +143,7 @@
mode_(ModeBits::decode(key)),
use_fpu_(FPUBits::decode(key)),
operands_type_(operands_type),
- result_type_(result_type),
- name_(NULL) { }
+ result_type_(result_type) { }
private:
enum SmiCodeGenerateHeapNumberResults {
@@ -195,20 +159,7 @@
BinaryOpIC::TypeInfo operands_type_;
BinaryOpIC::TypeInfo result_type_;
- char* name_;
-
- const char* GetName();
-
-#ifdef DEBUG
- void Print() {
- PrintF("BinaryOpStub %d (op %s), "
- "(mode %d, runtime_type_info %s)\n",
- MinorKey(),
- Token::String(op_),
- static_cast<int>(mode_),
- BinaryOpIC::GetName(operands_type_));
- }
-#endif
+ virtual void PrintName(StringStream* stream);
// Minor key encoding in 16 bits RRRTTTVOOOOOOOMM.
class ModeBits: public BitField<OverwriteMode, 0, 2> {};
@@ -395,12 +346,6 @@
}
void Generate(MacroAssembler* masm);
-
- const char* GetName() { return "WriteInt32ToHeapNumberStub"; }
-
-#ifdef DEBUG
- void Print() { PrintF("WriteInt32ToHeapNumberStub\n"); }
-#endif
};
@@ -427,14 +372,6 @@
int MinorKey() { return 0; }
void Generate(MacroAssembler* masm);
-
- const char* GetName() { return "NumberToStringStub"; }
-
-#ifdef DEBUG
- void Print() {
- PrintF("NumberToStringStub\n");
- }
-#endif
};
@@ -452,8 +389,6 @@
int MinorKey() { return 0; }
bool NeedsImmovableCode() { return true; }
-
- const char* GetName() { return "RegExpCEntryStub"; }
};
// Trampoline stub to call into native code. To call safely into native code
@@ -474,13 +409,10 @@
int MinorKey() { return 0; }
bool NeedsImmovableCode() { return true; }
-
- const char* GetName() { return "DirectCEntryStub"; }
};
class FloatingPointHelper : public AllStatic {
public:
-
enum Destination {
kFPURegisters,
kCoreRegisters
@@ -658,13 +590,6 @@
StringDictionary::kHeaderSize +
StringDictionary::kElementsStartIndex * kPointerSize;
-
-#ifdef DEBUG
- void Print() {
- PrintF("StringDictionaryLookupStub\n");
- }
-#endif
-
Major MajorKey() { return StringDictionaryNegativeLookup; }
int MinorKey() {
diff --git a/src/mips/codegen-mips.h b/src/mips/codegen-mips.h
index fecd321..a8de9c8 100644
--- a/src/mips/codegen-mips.h
+++ b/src/mips/codegen-mips.h
@@ -60,9 +60,7 @@
// Print the code after compiling it.
static void PrintCode(Handle<Code> code, CompilationInfo* info);
-#ifdef ENABLE_LOGGING_AND_PROFILING
static bool ShouldGenerateLog(Expression* type);
-#endif
static void SetFunctionInfo(Handle<JSFunction> fun,
FunctionLiteral* lit,
diff --git a/src/mips/constants-mips.h b/src/mips/constants-mips.h
index 2567330..6bf2570 100644
--- a/src/mips/constants-mips.h
+++ b/src/mips/constants-mips.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -93,13 +93,27 @@
static const uint32_t kFPUInvalidResult = (uint32_t) (1 << 31) - 1;
// FCSR constants.
-static const uint32_t kFCSRFlagMask = (1 << 6) - 1;
-static const uint32_t kFCSRFlagShift = 2;
-static const uint32_t kFCSRInexactFlagBit = 1 << 0;
-static const uint32_t kFCSRUnderflowFlagBit = 1 << 1;
-static const uint32_t kFCSROverflowFlagBit = 1 << 2;
-static const uint32_t kFCSRDivideByZeroFlagBit = 1 << 3;
-static const uint32_t kFCSRInvalidOpFlagBit = 1 << 4;
+static const uint32_t kFCSRInexactFlagBit = 2;
+static const uint32_t kFCSRUnderflowFlagBit = 3;
+static const uint32_t kFCSROverflowFlagBit = 4;
+static const uint32_t kFCSRDivideByZeroFlagBit = 5;
+static const uint32_t kFCSRInvalidOpFlagBit = 6;
+
+static const uint32_t kFCSRInexactFlagMask = 1 << kFCSRInexactFlagBit;
+static const uint32_t kFCSRUnderflowFlagMask = 1 << kFCSRUnderflowFlagBit;
+static const uint32_t kFCSROverflowFlagMask = 1 << kFCSROverflowFlagBit;
+static const uint32_t kFCSRDivideByZeroFlagMask = 1 << kFCSRDivideByZeroFlagBit;
+static const uint32_t kFCSRInvalidOpFlagMask = 1 << kFCSRInvalidOpFlagBit;
+
+static const uint32_t kFCSRFlagMask =
+ kFCSRInexactFlagMask |
+ kFCSRUnderflowFlagMask |
+ kFCSROverflowFlagMask |
+ kFCSRDivideByZeroFlagMask |
+ kFCSRInvalidOpFlagMask;
+
+static const uint32_t kFCSRExceptionFlagMask =
+ kFCSRFlagMask ^ kFCSRInexactFlagMask;
// Helper functions for converting between register numbers and names.
class Registers {
@@ -119,7 +133,6 @@
static const int32_t kMinValue = 0x80000000;
private:
-
static const char* names_[kNumSimuRegisters];
static const RegisterAlias aliases_[];
};
@@ -139,7 +152,6 @@
};
private:
-
static const char* names_[kNumFPURegisters];
static const RegisterAlias aliases_[];
};
@@ -158,6 +170,18 @@
call_rt_redirected = 0xfffff
};
+// On MIPS Simulator breakpoints can have different codes:
+// - Breaks between 0 and kMaxWatchpointCode are treated as simple watchpoints,
+// the simulator will run through them and print the registers.
+// - Breaks between kMaxWatchpointCode and kMaxStopCode are treated as stop()
+// instructions (see Assembler::stop()).
+// - Breaks larger than kMaxStopCode are simple breaks, dropping you into the
+// debugger.
+static const uint32_t kMaxWatchpointCode = 31;
+static const uint32_t kMaxStopCode = 127;
+STATIC_ASSERT(kMaxWatchpointCode < kMaxStopCode);
+
+
// ----- Fields offset and length.
static const int kOpcodeShift = 26;
static const int kOpcodeBits = 6;
@@ -177,6 +201,8 @@
static const int kImm16Bits = 16;
static const int kImm26Shift = 0;
static const int kImm26Bits = 26;
+static const int kImm28Shift = 0;
+static const int kImm28Bits = 28;
static const int kFsShift = 11;
static const int kFsBits = 5;
@@ -196,6 +222,7 @@
static const int kOpcodeMask = ((1 << kOpcodeBits) - 1) << kOpcodeShift;
static const int kImm16Mask = ((1 << kImm16Bits) - 1) << kImm16Shift;
static const int kImm26Mask = ((1 << kImm26Bits) - 1) << kImm26Shift;
+static const int kImm28Mask = ((1 << kImm28Bits) - 1) << kImm28Shift;
static const int kRsFieldMask = ((1 << kRsBits) - 1) << kRsShift;
static const int kRtFieldMask = ((1 << kRtBits) - 1) << kRtShift;
static const int kRdFieldMask = ((1 << kRdBits) - 1) << kRdShift;
@@ -736,4 +763,3 @@
} } // namespace v8::internal
#endif // #ifndef V8_MIPS_CONSTANTS_H_
-
diff --git a/src/mips/deoptimizer-mips.cc b/src/mips/deoptimizer-mips.cc
index 4b69859..9a19aba 100644
--- a/src/mips/deoptimizer-mips.cc
+++ b/src/mips/deoptimizer-mips.cc
@@ -78,6 +78,11 @@
}
+void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
+ UNIMPLEMENTED();
+}
+
+
void Deoptimizer::EntryGenerator::Generate() {
UNIMPLEMENTED();
}
diff --git a/src/mips/full-codegen-mips.cc b/src/mips/full-codegen-mips.cc
index 9c93c63..7834273 100644
--- a/src/mips/full-codegen-mips.cc
+++ b/src/mips/full-codegen-mips.cc
@@ -101,16 +101,18 @@
}
void EmitPatchInfo() {
- int delta_to_patch_site = masm_->InstructionsGeneratedSince(&patch_site_);
- Register reg = Register::from_code(delta_to_patch_site / kImm16Mask);
- __ andi(at, reg, delta_to_patch_site % kImm16Mask);
+ if (patch_site_.is_bound()) {
+ int delta_to_patch_site = masm_->InstructionsGeneratedSince(&patch_site_);
+ Register reg = Register::from_code(delta_to_patch_site / kImm16Mask);
+ __ andi(at, reg, delta_to_patch_site % kImm16Mask);
#ifdef DEBUG
- info_emitted_ = true;
+ info_emitted_ = true;
#endif
+ } else {
+ __ nop(); // Signals no inlined code.
+ }
}
- bool is_bound() const { return patch_site_.is_bound(); }
-
private:
MacroAssembler* masm_;
Label patch_site_;
@@ -137,6 +139,7 @@
void FullCodeGenerator::Generate(CompilationInfo* info) {
ASSERT(info_ == NULL);
info_ = info;
+ scope_ = info->scope();
SetFunctionPosition(function());
Comment cmnt(masm_, "[ function compiled by full code generator");
@@ -147,20 +150,20 @@
}
#endif
- // Strict mode functions need to replace the receiver with undefined
- // when called as functions (without an explicit receiver
- // object). t1 is zero for method calls and non-zero for function
- // calls.
- if (info->is_strict_mode()) {
+ // Strict mode functions and builtins need to replace the receiver
+ // with undefined when called as functions (without an explicit
+ // receiver object). t1 is zero for method calls and non-zero for
+ // function calls.
+ if (info->is_strict_mode() || info->is_native()) {
Label ok;
__ Branch(&ok, eq, t1, Operand(zero_reg));
- int receiver_offset = scope()->num_parameters() * kPointerSize;
+ int receiver_offset = info->scope()->num_parameters() * kPointerSize;
__ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
__ sw(a2, MemOperand(sp, receiver_offset));
__ bind(&ok);
}
- int locals_count = scope()->num_stack_slots();
+ int locals_count = info->scope()->num_stack_slots();
__ Push(ra, fp, cp, a1);
if (locals_count > 0) {
@@ -180,7 +183,7 @@
bool function_in_register = true;
// Possibly allocate a local context.
- int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+ int heap_slots = info->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
if (heap_slots > 0) {
Comment cmnt(masm_, "[ Allocate local context");
// Argument to NewContext is the function, which is in a1.
@@ -189,14 +192,14 @@
FastNewContextStub stub(heap_slots);
__ CallStub(&stub);
} else {
- __ CallRuntime(Runtime::kNewContext, 1);
+ __ CallRuntime(Runtime::kNewFunctionContext, 1);
}
function_in_register = false;
// Context is returned in both v0 and cp. It replaces the context
// passed to us. It's saved in the stack and kept live in cp.
__ sw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
// Copy any necessary parameters into the context.
- int num_parameters = scope()->num_parameters();
+ int num_parameters = info->scope()->num_parameters();
for (int i = 0; i < num_parameters; i++) {
Slot* slot = scope()->parameter(i)->AsSlot();
if (slot != NULL && slot->type() == Slot::CONTEXT) {
@@ -228,27 +231,28 @@
__ mov(a3, a1);
}
// Receiver is just before the parameters on the caller's stack.
- int offset = scope()->num_parameters() * kPointerSize;
+ int num_parameters = info->scope()->num_parameters();
+ int offset = num_parameters * kPointerSize;
__ Addu(a2, fp,
Operand(StandardFrameConstants::kCallerSPOffset + offset));
- __ li(a1, Operand(Smi::FromInt(scope()->num_parameters())));
+ __ li(a1, Operand(Smi::FromInt(num_parameters)));
__ Push(a3, a2, a1);
// Arguments to ArgumentsAccessStub:
// function, receiver address, parameter count.
// The stub will rewrite receiever and parameter count if the previous
// stack frame was an arguments adapter frame.
- ArgumentsAccessStub stub(
- is_strict_mode() ? ArgumentsAccessStub::NEW_STRICT
- : ArgumentsAccessStub::NEW_NON_STRICT);
+ ArgumentsAccessStub::Type type;
+ if (is_strict_mode()) {
+ type = ArgumentsAccessStub::NEW_STRICT;
+ } else if (function()->has_duplicate_parameters()) {
+ type = ArgumentsAccessStub::NEW_NON_STRICT_SLOW;
+ } else {
+ type = ArgumentsAccessStub::NEW_NON_STRICT_FAST;
+ }
+ ArgumentsAccessStub stub(type);
__ CallStub(&stub);
- Variable* arguments_shadow = scope()->arguments_shadow();
- if (arguments_shadow != NULL) {
- // Duplicate the value; move-to-slot operation might clobber registers.
- __ mov(a3, v0);
- Move(arguments_shadow->AsSlot(), a3, a1, a2);
- }
Move(arguments->AsSlot(), v0, a1, a2);
}
@@ -348,7 +352,7 @@
{ Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
// Here we use masm_-> instead of the __ macro to avoid the code coverage
// tool from instrumenting as we rely on the code size here.
- int32_t sp_delta = (scope()->num_parameters() + 1) * kPointerSize;
+ int32_t sp_delta = (info_->scope()->num_parameters() + 1) * kPointerSize;
CodeGenerator::RecordPositions(masm_, function()->end_position() - 1);
__ RecordJSReturn();
masm_->mov(sp, fp);
@@ -386,7 +390,7 @@
// For simplicity we always test the accumulator register.
codegen()->Move(result_register(), slot);
codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
- codegen()->DoTest(true_label_, false_label_, fall_through_);
+ codegen()->DoTest(this);
}
@@ -420,7 +424,7 @@
if (true_label_ != fall_through_) __ Branch(true_label_);
} else {
__ LoadRoot(result_register(), index);
- codegen()->DoTest(true_label_, false_label_, fall_through_);
+ codegen()->DoTest(this);
}
}
@@ -467,7 +471,7 @@
} else {
// For simplicity we always test the accumulator register.
__ li(result_register(), Operand(lit));
- codegen()->DoTest(true_label_, false_label_, fall_through_);
+ codegen()->DoTest(this);
}
}
@@ -503,7 +507,7 @@
__ Drop(count);
__ Move(result_register(), reg);
codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
- codegen()->DoTest(true_label_, false_label_, fall_through_);
+ codegen()->DoTest(this);
}
@@ -581,7 +585,8 @@
}
-void FullCodeGenerator::DoTest(Label* if_true,
+void FullCodeGenerator::DoTest(Expression* condition,
+ Label* if_true,
Label* if_false,
Label* fall_through) {
if (CpuFeatures::IsSupported(FPU)) {
@@ -715,10 +720,14 @@
// context.
ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
if (FLAG_debug_code) {
- // Check that we're not inside a 'with'.
- __ lw(a1, ContextOperand(cp, Context::FCONTEXT_INDEX));
- __ Check(eq, "Unexpected declaration in current context.",
- a1, Operand(cp));
+ // Check that we're not inside a with or catch context.
+ __ lw(a1, FieldMemOperand(cp, HeapObject::kMapOffset));
+ __ LoadRoot(t0, Heap::kWithContextMapRootIndex);
+ __ Check(ne, "Declaration in with context.",
+ a1, Operand(t0));
+ __ LoadRoot(t0, Heap::kCatchContextMapRootIndex);
+ __ Check(ne, "Declaration in catch context.",
+ a1, Operand(t0));
}
if (mode == Variable::CONST) {
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
@@ -774,7 +783,7 @@
// IDs for bailouts from optimized code.
ASSERT(prop->obj()->AsVariableProxy() != NULL);
{ AccumulatorValueContext for_object(this);
- EmitVariableLoad(prop->obj()->AsVariableProxy()->var());
+ EmitVariableLoad(prop->obj()->AsVariableProxy());
}
__ push(result_register());
@@ -789,7 +798,7 @@
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
: isolate()->builtins()->KeyedStoreIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, AstNode::kNoNumber);
+ __ Call(ic);
// Value in v0 is ignored (declarations are statements).
}
}
@@ -864,7 +873,8 @@
// Record position before stub call for type feedback.
SetSourcePosition(clause->position());
Handle<Code> ic = CompareIC::GetUninitialized(Token::EQ_STRICT);
- EmitCallIC(ic, &patch_site, clause->CompareId());
+ __ Call(ic, RelocInfo::CODE_TARGET, clause->CompareId());
+ patch_site.EmitPatchInfo();
__ Branch(&next_test, ne, v0, Operand(zero_reg));
__ Drop(1); // Switch value is no longer needed.
@@ -918,7 +928,7 @@
Label convert, done_convert;
__ JumpIfSmi(a0, &convert);
__ GetObjectType(a0, a1, a1);
- __ Branch(&done_convert, hs, a1, Operand(FIRST_JS_OBJECT_TYPE));
+ __ Branch(&done_convert, ge, a1, Operand(FIRST_SPEC_OBJECT_TYPE));
__ bind(&convert);
__ push(a0);
__ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
@@ -1107,7 +1117,7 @@
void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
Comment cmnt(masm_, "[ VariableProxy");
- EmitVariableLoad(expr->var());
+ EmitVariableLoad(expr);
}
@@ -1128,8 +1138,7 @@
__ Branch(slow, ne, temp, Operand(zero_reg));
}
// Load next context in chain.
- __ lw(next, ContextOperand(current, Context::CLOSURE_INDEX));
- __ lw(next, FieldMemOperand(next, JSFunction::kContextOffset));
+ __ lw(next, ContextOperand(current, Context::PREVIOUS_INDEX));
// Walk the rest of the chain without clobbering cp.
current = next;
}
@@ -1153,8 +1162,7 @@
__ lw(temp, ContextOperand(next, Context::EXTENSION_INDEX));
__ Branch(slow, ne, temp, Operand(zero_reg));
// Load next context in chain.
- __ lw(next, ContextOperand(next, Context::CLOSURE_INDEX));
- __ lw(next, FieldMemOperand(next, JSFunction::kContextOffset));
+ __ lw(next, ContextOperand(next, Context::PREVIOUS_INDEX));
__ Branch(&loop);
__ bind(&fast);
}
@@ -1165,7 +1173,7 @@
? RelocInfo::CODE_TARGET
: RelocInfo::CODE_TARGET_CONTEXT;
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- EmitCallIC(ic, mode, AstNode::kNoNumber);
+ __ Call(ic, mode);
}
@@ -1184,8 +1192,7 @@
__ lw(temp, ContextOperand(context, Context::EXTENSION_INDEX));
__ Branch(slow, ne, temp, Operand(zero_reg));
}
- __ lw(next, ContextOperand(context, Context::CLOSURE_INDEX));
- __ lw(next, FieldMemOperand(next, JSFunction::kContextOffset));
+ __ lw(next, ContextOperand(context, Context::PREVIOUS_INDEX));
// Walk the rest of the chain without clobbering cp.
context = next;
}
@@ -1246,7 +1253,7 @@
__ li(a0, Operand(key_literal->handle()));
Handle<Code> ic =
isolate()->builtins()->KeyedLoadIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, GetPropertyId(property));
+ __ Call(ic, RelocInfo::CODE_TARGET, GetPropertyId(property));
__ Branch(done);
}
}
@@ -1255,24 +1262,27 @@
}
-void FullCodeGenerator::EmitVariableLoad(Variable* var) {
- // Four cases: non-this global variables, lookup slots, all other
- // types of slots, and parameters that rewrite to explicit property
- // accesses on the arguments object.
- Slot* slot = var->AsSlot();
- Property* property = var->AsProperty();
+void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
+ // Record position before possible IC call.
+ SetSourcePosition(proxy->position());
+ Variable* var = proxy->var();
- if (var->is_global() && !var->is_this()) {
+ // Three cases: non-this global variables, lookup slots, and all other
+ // types of slots.
+ Slot* slot = var->AsSlot();
+ ASSERT((var->is_global() && !var->is_this()) == (slot == NULL));
+
+ if (slot == NULL) {
Comment cmnt(masm_, "Global variable");
// Use inline caching. Variable name is passed in a2 and the global
// object (receiver) in a0.
__ lw(a0, GlobalObjectOperand());
__ li(a2, Operand(var->name()));
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT, AstNode::kNoNumber);
+ __ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
context()->Plug(v0);
- } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
+ } else if (slot->type() == Slot::LOOKUP) {
Label done, slow;
// Generate code for loading from variables potentially shadowed
@@ -1288,7 +1298,7 @@
context()->Plug(v0);
- } else if (slot != NULL) {
+ } else {
Comment cmnt(masm_, (slot->type() == Slot::CONTEXT)
? "Context slot"
: "Stack slot");
@@ -1305,31 +1315,6 @@
} else {
context()->Plug(slot);
}
- } else {
- Comment cmnt(masm_, "Rewritten parameter");
- ASSERT_NOT_NULL(property);
- // Rewritten parameter accesses are of the form "slot[literal]".
- // Assert that the object is in a slot.
- Variable* object_var = property->obj()->AsVariableProxy()->AsVariable();
- ASSERT_NOT_NULL(object_var);
- Slot* object_slot = object_var->AsSlot();
- ASSERT_NOT_NULL(object_slot);
-
- // Load the object.
- Move(a1, object_slot);
-
- // Assert that the key is a smi.
- Literal* key_literal = property->key()->AsLiteral();
- ASSERT_NOT_NULL(key_literal);
- ASSERT(key_literal->handle()->IsSmi());
-
- // Load the key.
- __ li(a0, Operand(key_literal->handle()));
-
- // Call keyed load IC. It has arguments key and receiver in a0 and a1.
- Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, GetPropertyId(property));
- context()->Plug(v0);
}
}
@@ -1440,7 +1425,7 @@
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->StoreIC_Initialize_Strict()
: isolate()->builtins()->StoreIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, key->id());
+ __ Call(ic, RelocInfo::CODE_TARGET, key->id());
PrepareForBailoutForId(key->id(), NO_REGISTERS);
} else {
VisitForEffect(value);
@@ -1573,7 +1558,7 @@
}
// Left-hand side can only be a property, a global or a (parameter or local)
- // slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
+ // slot.
enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
LhsKind assign_type = VARIABLE;
Property* property = expr->target()->AsProperty();
@@ -1600,27 +1585,13 @@
case KEYED_PROPERTY:
// We need the key and receiver on both the stack and in v0 and a1.
if (expr->is_compound()) {
- if (property->is_arguments_access()) {
- VariableProxy* obj_proxy = property->obj()->AsVariableProxy();
- __ lw(v0, EmitSlotSearch(obj_proxy->var()->AsSlot(), v0));
- __ push(v0);
- __ li(v0, Operand(property->key()->AsLiteral()->handle()));
- } else {
- VisitForStackValue(property->obj());
- VisitForAccumulatorValue(property->key());
- }
+ VisitForStackValue(property->obj());
+ VisitForAccumulatorValue(property->key());
__ lw(a1, MemOperand(sp, 0));
__ push(v0);
} else {
- if (property->is_arguments_access()) {
- VariableProxy* obj_proxy = property->obj()->AsVariableProxy();
- __ lw(a1, EmitSlotSearch(obj_proxy->var()->AsSlot(), v0));
- __ li(v0, Operand(property->key()->AsLiteral()->handle()));
- __ Push(a1, v0);
- } else {
- VisitForStackValue(property->obj());
- VisitForStackValue(property->key());
- }
+ VisitForStackValue(property->obj());
+ VisitForStackValue(property->key());
}
break;
}
@@ -1631,7 +1602,7 @@
{ AccumulatorValueContext context(this);
switch (assign_type) {
case VARIABLE:
- EmitVariableLoad(expr->target()->AsVariableProxy()->var());
+ EmitVariableLoad(expr->target()->AsVariableProxy());
PrepareForBailout(expr->target(), TOS_REG);
break;
case NAMED_PROPERTY:
@@ -1698,7 +1669,7 @@
__ li(a2, Operand(key->handle()));
// Call load IC. It has arguments receiver and property name a0 and a2.
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
+ __ Call(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
}
@@ -1707,7 +1678,7 @@
__ mov(a0, result_register());
// Call keyed load IC. It has arguments key and receiver in a0 and a1.
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
+ __ Call(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
}
@@ -1735,7 +1706,8 @@
__ bind(&stub_call);
BinaryOpStub stub(op, mode);
- EmitCallIC(stub.GetCode(), &patch_site, expr->id());
+ __ Call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
+ patch_site.EmitPatchInfo();
__ jmp(&done);
__ bind(&smi_case);
@@ -1816,7 +1788,9 @@
__ mov(a0, result_register());
__ pop(a1);
BinaryOpStub stub(op, mode);
- EmitCallIC(stub.GetCode(), NULL, expr->id());
+ JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
+ __ Call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
+ patch_site.EmitPatchInfo();
context()->Plug(v0);
}
@@ -1830,7 +1804,7 @@
}
// Left-hand side can only be a property, a global or a (parameter or local)
- // slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
+ // slot.
enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
LhsKind assign_type = VARIABLE;
Property* prop = expr->AsProperty();
@@ -1856,30 +1830,20 @@
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->StoreIC_Initialize_Strict()
: isolate()->builtins()->StoreIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, AstNode::kNoNumber);
+ __ Call(ic);
break;
}
case KEYED_PROPERTY: {
__ push(result_register()); // Preserve value.
- if (prop->is_synthetic()) {
- ASSERT(prop->obj()->AsVariableProxy() != NULL);
- ASSERT(prop->key()->AsLiteral() != NULL);
- { AccumulatorValueContext for_object(this);
- EmitVariableLoad(prop->obj()->AsVariableProxy()->var());
- }
- __ mov(a2, result_register());
- __ li(a1, Operand(prop->key()->AsLiteral()->handle()));
- } else {
- VisitForStackValue(prop->obj());
- VisitForAccumulatorValue(prop->key());
- __ mov(a1, result_register());
- __ pop(a2);
- }
+ VisitForStackValue(prop->obj());
+ VisitForAccumulatorValue(prop->key());
+ __ mov(a1, result_register());
+ __ pop(a2);
__ pop(a0); // Restore value.
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
: isolate()->builtins()->KeyedStoreIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, AstNode::kNoNumber);
+ __ Call(ic);
break;
}
}
@@ -1890,8 +1854,6 @@
void FullCodeGenerator::EmitVariableAssignment(Variable* var,
Token::Value op) {
- // Left-hand sides that rewrite to explicit property accesses do not reach
- // here.
ASSERT(var != NULL);
ASSERT(var->is_global() || var->AsSlot() != NULL);
@@ -1906,7 +1868,7 @@
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->StoreIC_Initialize_Strict()
: isolate()->builtins()->StoreIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT, AstNode::kNoNumber);
+ __ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
} else if (op == Token::INIT_CONST) {
// Like var declarations, const declarations are hoisted to function
@@ -1927,17 +1889,7 @@
__ Branch(&skip, ne, a1, Operand(t0));
__ sw(result_register(), MemOperand(fp, SlotOffset(slot)));
break;
- case Slot::CONTEXT: {
- __ lw(a1, ContextOperand(cp, Context::FCONTEXT_INDEX));
- __ lw(a2, ContextOperand(a1, slot->index()));
- __ LoadRoot(t0, Heap::kTheHoleValueRootIndex);
- __ Branch(&skip, ne, a2, Operand(t0));
- __ sw(result_register(), ContextOperand(a1, slot->index()));
- int offset = Context::SlotOffset(slot->index());
- __ mov(a3, result_register()); // Preserve the stored value in v0.
- __ RecordWrite(a1, Operand(offset), a3, a2);
- break;
- }
+ case Slot::CONTEXT:
case Slot::LOOKUP:
__ push(result_register());
__ li(a0, Operand(slot->var()->name()));
@@ -2014,7 +1966,7 @@
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->StoreIC_Initialize_Strict()
: isolate()->builtins()->StoreIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+ __ Call(ic, RelocInfo::CODE_TARGET, expr->id());
// If the assignment ends an initialization block, revert to fast case.
if (expr->ends_initialization_block()) {
@@ -2066,7 +2018,7 @@
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
: isolate()->builtins()->KeyedStoreIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+ __ Call(ic, RelocInfo::CODE_TARGET, expr->id());
// If the assignment ends an initialization block, revert to fast case.
if (expr->ends_initialization_block()) {
@@ -2119,7 +2071,7 @@
InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
Handle<Code> ic =
isolate()->stub_cache()->ComputeCallInitialize(arg_count, in_loop, mode);
- EmitCallIC(ic, mode, expr->id());
+ __ Call(ic, mode, expr->id());
RecordJSReturnSite(expr);
// Restore context register.
__ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -2153,7 +2105,7 @@
Handle<Code> ic =
isolate()->stub_cache()->ComputeKeyedCallInitialize(arg_count, in_loop);
__ lw(a2, MemOperand(sp, (arg_count + 1) * kPointerSize)); // Key.
- EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+ __ Call(ic, RelocInfo::CODE_TARGET, expr->id());
RecordJSReturnSite(expr);
// Restore context register.
__ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -2193,7 +2145,8 @@
__ push(a1);
// Push the receiver of the enclosing function and do runtime call.
- __ lw(a1, MemOperand(fp, (2 + scope()->num_parameters()) * kPointerSize));
+ int receiver_offset = 2 + info_->scope()->num_parameters();
+ __ lw(a1, MemOperand(fp, receiver_offset * kPointerSize));
__ push(a1);
// Push the strict mode flag.
__ li(a1, Operand(Smi::FromInt(strict_mode_flag())));
@@ -2310,9 +2263,9 @@
__ bind(&done);
// Push function.
__ push(v0);
- // Push global receiver.
- __ lw(a1, GlobalObjectOperand());
- __ lw(a1, FieldMemOperand(a1, GlobalObject::kGlobalReceiverOffset));
+ // The receiver is implicitly the global receiver. Indicate this
+ // by passing the hole to the call function stub.
+ __ LoadRoot(a1, Heap::kTheHoleValueRootIndex);
__ push(a1);
__ bind(&call);
}
@@ -2334,7 +2287,7 @@
} else {
// Call to a keyed property.
// For a synthetic property use keyed load IC followed by function call,
- // for a regular property use keyed EmitCallIC.
+ // for a regular property use EmitKeyedCallWithIC.
if (prop->is_synthetic()) {
// Do not visit the object and key subexpressions (they are shared
// by all occurrences of the same rewritten parameter).
@@ -2352,7 +2305,7 @@
SetSourcePosition(prop->position());
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
+ __ Call(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
__ lw(a1, GlobalObjectOperand());
__ lw(a1, FieldMemOperand(a1, GlobalObject::kGlobalReceiverOffset));
__ Push(v0, a1); // Function, receiver.
@@ -2477,9 +2430,10 @@
__ And(at, a1, Operand(1 << Map::kIsUndetectable));
__ Branch(if_false, ne, at, Operand(zero_reg));
__ lbu(a1, FieldMemOperand(a2, Map::kInstanceTypeOffset));
- __ Branch(if_false, lt, a1, Operand(FIRST_JS_OBJECT_TYPE));
+ __ Branch(if_false, lt, a1, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
- Split(le, a1, Operand(LAST_JS_OBJECT_TYPE), if_true, if_false, fall_through);
+ Split(le, a1, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE),
+ if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
}
@@ -2500,7 +2454,7 @@
__ JumpIfSmi(v0, if_false);
__ GetObjectType(v0, a1, a1);
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
- Split(ge, a1, Operand(FIRST_JS_OBJECT_TYPE),
+ Split(ge, a1, Operand(FIRST_SPEC_OBJECT_TYPE),
if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
@@ -2738,7 +2692,7 @@
// parameter count in a0.
VisitForAccumulatorValue(args->at(0));
__ mov(a1, v0);
- __ li(a0, Operand(Smi::FromInt(scope()->num_parameters())));
+ __ li(a0, Operand(Smi::FromInt(info_->scope()->num_parameters())));
ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
__ CallStub(&stub);
context()->Plug(v0);
@@ -2750,7 +2704,7 @@
Label exit;
// Get the number of formal parameters.
- __ li(v0, Operand(Smi::FromInt(scope()->num_parameters())));
+ __ li(v0, Operand(Smi::FromInt(info_->scope()->num_parameters())));
// Check if the calling frame is an arguments adaptor frame.
__ lw(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
@@ -2779,14 +2733,15 @@
// Check that the object is a JS object but take special care of JS
// functions to make sure they have 'Function' as their class.
__ GetObjectType(v0, v0, a1); // Map is now in v0.
- __ Branch(&null, lt, a1, Operand(FIRST_JS_OBJECT_TYPE));
+ __ Branch(&null, lt, a1, Operand(FIRST_SPEC_OBJECT_TYPE));
- // As long as JS_FUNCTION_TYPE is the last instance type and it is
- // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
- // LAST_JS_OBJECT_TYPE.
- ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
- ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
- __ Branch(&function, eq, a1, Operand(JS_FUNCTION_TYPE));
+ // As long as LAST_CALLABLE_SPEC_OBJECT_TYPE is the last instance type, and
+ // FIRST_CALLABLE_SPEC_OBJECT_TYPE comes right after
+ // LAST_NONCALLABLE_SPEC_OBJECT_TYPE, we can avoid checking for the latter.
+ STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
+ STATIC_ASSERT(FIRST_CALLABLE_SPEC_OBJECT_TYPE ==
+ LAST_NONCALLABLE_SPEC_OBJECT_TYPE + 1);
+ __ Branch(&function, ge, a1, Operand(FIRST_CALLABLE_SPEC_OBJECT_TYPE));
// Check if the constructor in the map is a function.
__ lw(v0, FieldMemOperand(v0, Map::kConstructorOffset));
@@ -2829,13 +2784,12 @@
// with '%2s' (see Logger::LogRuntime for all the formats).
// 2 (array): Arguments to the format string.
ASSERT_EQ(args->length(), 3);
-#ifdef ENABLE_LOGGING_AND_PROFILING
if (CodeGenerator::ShouldGenerateLog(args->at(0))) {
VisitForStackValue(args->at(1));
VisitForStackValue(args->at(2));
__ CallRuntime(Runtime::kLog, 2);
}
-#endif
+
// Finally, we're expected to leave a value on the top of the stack.
__ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
context()->Plug(v0);
@@ -3184,7 +3138,8 @@
// InvokeFunction requires the function in a1. Move it in there.
__ mov(a1, result_register());
ParameterCount count(arg_count);
- __ InvokeFunction(a1, count, CALL_FUNCTION);
+ __ InvokeFunction(a1, count, CALL_FUNCTION,
+ NullCallWrapper(), CALL_AS_METHOD);
__ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
context()->Plug(v0);
}
@@ -3443,9 +3398,7 @@
__ Branch(&bailout, ne, scratch2, Operand(JS_ARRAY_TYPE));
// Check that the array has fast elements.
- __ lbu(scratch2, FieldMemOperand(scratch1, Map::kBitField2Offset));
- __ And(scratch3, scratch2, Operand(1 << Map::kHasFastElements));
- __ Branch(&bailout, eq, scratch3, Operand(zero_reg));
+ __ CheckFastElements(scratch1, scratch2, &bailout);
// If the array has length zero, return the empty string.
__ lw(array_length, FieldMemOperand(array, JSArray::kLengthOffset));
@@ -3649,6 +3602,39 @@
}
+void FullCodeGenerator::EmitIsNativeOrStrictMode(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+
+ // Load the function into v0.
+ VisitForAccumulatorValue(args->at(0));
+
+ // Prepare for the test.
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ // Test for strict mode function.
+ __ lw(a1, FieldMemOperand(v0, JSFunction::kSharedFunctionInfoOffset));
+ __ lw(a1, FieldMemOperand(a1, SharedFunctionInfo::kCompilerHintsOffset));
+ __ And(at, a1, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
+ kSmiTagSize)));
+ __ Branch(if_true, ne, at, Operand(zero_reg));
+
+ // Test for native function.
+ __ And(at, a1, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
+ __ Branch(if_true, ne, at, Operand(zero_reg));
+
+ // Not native or strict-mode function.
+ __ Branch(if_false);
+
+ PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ context()->Plug(if_true, if_false);
+}
+
+
void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
Handle<String> name = expr->name();
if (name->length() > 0 && name->Get(0) == '_') {
@@ -3681,7 +3667,7 @@
isolate()->stub_cache()->ComputeCallInitialize(arg_count,
NOT_IN_LOOP,
mode);
- EmitCallIC(ic, mode, expr->id());
+ __ Call(ic, mode, expr->id());
// Restore context register.
__ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
} else {
@@ -3824,7 +3810,7 @@
VisitForAccumulatorValue(expr->expression());
SetSourcePosition(expr->position());
__ mov(a0, result_register());
- EmitCallIC(stub.GetCode(), NULL, expr->id());
+ __ Call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
context()->Plug(v0);
}
@@ -3841,7 +3827,7 @@
}
// Expression can only be a property, a global or a (parameter or local)
- // slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
+ // slot.
enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
LhsKind assign_type = VARIABLE;
Property* prop = expr->expression()->AsProperty();
@@ -3856,7 +3842,7 @@
if (assign_type == VARIABLE) {
ASSERT(expr->expression()->AsVariableProxy()->var() != NULL);
AccumulatorValueContext context(this);
- EmitVariableLoad(expr->expression()->AsVariableProxy()->var());
+ EmitVariableLoad(expr->expression()->AsVariableProxy());
} else {
// Reserve space for result of postfix operation.
if (expr->is_postfix() && !context()->IsEffect()) {
@@ -3869,15 +3855,8 @@
__ push(v0);
EmitNamedPropertyLoad(prop);
} else {
- if (prop->is_arguments_access()) {
- VariableProxy* obj_proxy = prop->obj()->AsVariableProxy();
- __ lw(v0, EmitSlotSearch(obj_proxy->var()->AsSlot(), v0));
- __ push(v0);
- __ li(v0, Operand(prop->key()->AsLiteral()->handle()));
- } else {
- VisitForStackValue(prop->obj());
- VisitForAccumulatorValue(prop->key());
- }
+ VisitForStackValue(prop->obj());
+ VisitForAccumulatorValue(prop->key());
__ lw(a1, MemOperand(sp, 0));
__ push(v0);
EmitKeyedPropertyLoad(prop);
@@ -3942,7 +3921,8 @@
SetSourcePosition(expr->position());
BinaryOpStub stub(Token::ADD, NO_OVERWRITE);
- EmitCallIC(stub.GetCode(), &patch_site, expr->CountId());
+ __ Call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->CountId());
+ patch_site.EmitPatchInfo();
__ bind(&done);
// Store the value returned in v0.
@@ -3974,7 +3954,7 @@
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->StoreIC_Initialize_Strict()
: isolate()->builtins()->StoreIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+ __ Call(ic, RelocInfo::CODE_TARGET, expr->id());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -3992,7 +3972,7 @@
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
: isolate()->builtins()->KeyedStoreIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+ __ Call(ic, RelocInfo::CODE_TARGET, expr->id());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -4016,7 +3996,7 @@
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
// Use a regular load, not a contextual load, to avoid a reference
// error.
- EmitCallIC(ic, RelocInfo::CODE_TARGET, AstNode::kNoNumber);
+ __ Call(ic);
PrepareForBailout(expr, TOS_REG);
context()->Plug(v0);
} else if (proxy != NULL &&
@@ -4039,30 +4019,17 @@
context()->Plug(v0);
} else {
// This expression cannot throw a reference error at the top level.
- context()->HandleExpression(expr);
+ VisitInCurrentContext(expr);
}
}
-
-bool FullCodeGenerator::TryLiteralCompare(Token::Value op,
- Expression* left,
- Expression* right,
- Label* if_true,
- Label* if_false,
- Label* fall_through) {
- if (op != Token::EQ && op != Token::EQ_STRICT) return false;
-
- // Check for the pattern: typeof <expression> == <string literal>.
- Literal* right_literal = right->AsLiteral();
- if (right_literal == NULL) return false;
- Handle<Object> right_literal_value = right_literal->handle();
- if (!right_literal_value->IsString()) return false;
- UnaryOperation* left_unary = left->AsUnaryOperation();
- if (left_unary == NULL || left_unary->op() != Token::TYPEOF) return false;
- Handle<String> check = Handle<String>::cast(right_literal_value);
-
+void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
+ Handle<String> check,
+ Label* if_true,
+ Label* if_false,
+ Label* fall_through) {
{ AccumulatorValueContext context(this);
- VisitForTypeofValue(left_unary->expression());
+ VisitForTypeofValue(expr);
}
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
@@ -4097,7 +4064,7 @@
} else if (check->Equals(isolate()->heap()->function_symbol())) {
__ JumpIfSmi(v0, if_false);
__ GetObjectType(v0, a1, v0); // Leave map in a1.
- Split(ge, v0, Operand(FIRST_FUNCTION_CLASS_TYPE),
+ Split(ge, v0, Operand(FIRST_CALLABLE_SPEC_OBJECT_TYPE),
if_true, if_false, fall_through);
} else if (check->Equals(isolate()->heap()->object_symbol())) {
@@ -4106,9 +4073,9 @@
__ Branch(if_true, eq, v0, Operand(at));
// Check for JS objects => true.
__ GetObjectType(v0, v0, a1);
- __ Branch(if_false, lo, a1, Operand(FIRST_JS_OBJECT_TYPE));
+ __ Branch(if_false, lt, a1, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
__ lbu(a1, FieldMemOperand(v0, Map::kInstanceTypeOffset));
- __ Branch(if_false, hs, a1, Operand(FIRST_FUNCTION_CLASS_TYPE));
+ __ Branch(if_false, gt, a1, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
// Check for undetectable objects => false.
__ lbu(a1, FieldMemOperand(v0, Map::kBitFieldOffset));
__ And(a1, a1, Operand(1 << Map::kIsUndetectable));
@@ -4116,8 +4083,18 @@
} else {
if (if_false != fall_through) __ jmp(if_false);
}
+}
- return true;
+
+void FullCodeGenerator::EmitLiteralCompareUndefined(Expression* expr,
+ Label* if_true,
+ Label* if_false,
+ Label* fall_through) {
+ VisitForAccumulatorValue(expr);
+ PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+
+ __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+ Split(eq, v0, Operand(at), if_true, if_false, fall_through);
}
@@ -4137,14 +4114,12 @@
// First we try a fast inlined version of the compare when one of
// the operands is a literal.
- Token::Value op = expr->op();
- Expression* left = expr->left();
- Expression* right = expr->right();
- if (TryLiteralCompare(op, left, right, if_true, if_false, fall_through)) {
+ if (TryLiteralCompare(expr, if_true, if_false, fall_through)) {
context()->Plug(if_true, if_false);
return;
}
+ Token::Value op = expr->op();
VisitForStackValue(expr->left());
switch (op) {
case Token::IN:
@@ -4168,11 +4143,8 @@
default: {
VisitForAccumulatorValue(expr->right());
Condition cc = eq;
- bool strict = false;
switch (op) {
case Token::EQ_STRICT:
- strict = true;
- // Fall through.
case Token::EQ:
cc = eq;
__ mov(a0, result_register());
@@ -4218,7 +4190,8 @@
// Record position and call the compare IC.
SetSourcePosition(expr->position());
Handle<Code> ic = CompareIC::GetUninitialized(op);
- EmitCallIC(ic, &patch_site, expr->id());
+ __ Call(ic, RelocInfo::CODE_TARGET, expr->id());
+ patch_site.EmitPatchInfo();
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
Split(cc, v0, Operand(zero_reg), if_true, if_false, fall_through);
}
@@ -4277,70 +4250,6 @@
}
-void FullCodeGenerator::EmitCallIC(Handle<Code> ic,
- RelocInfo::Mode mode,
- unsigned ast_id) {
- ASSERT(mode == RelocInfo::CODE_TARGET ||
- mode == RelocInfo::CODE_TARGET_CONTEXT);
- Counters* counters = isolate()->counters();
- switch (ic->kind()) {
- case Code::LOAD_IC:
- __ IncrementCounter(counters->named_load_full(), 1, a1, a2);
- break;
- case Code::KEYED_LOAD_IC:
- __ IncrementCounter(counters->keyed_load_full(), 1, a1, a2);
- break;
- case Code::STORE_IC:
- __ IncrementCounter(counters->named_store_full(), 1, a1, a2);
- break;
- case Code::KEYED_STORE_IC:
- __ IncrementCounter(counters->keyed_store_full(), 1, a1, a2);
- default:
- break;
- }
- if (ast_id == kNoASTId || mode == RelocInfo::CODE_TARGET_CONTEXT) {
- __ Call(ic, mode);
- } else {
- ASSERT(mode == RelocInfo::CODE_TARGET);
- mode = RelocInfo::CODE_TARGET_WITH_ID;
- __ CallWithAstId(ic, mode, ast_id);
- }
-}
-
-
-void FullCodeGenerator::EmitCallIC(Handle<Code> ic,
- JumpPatchSite* patch_site,
- unsigned ast_id) {
- Counters* counters = isolate()->counters();
- switch (ic->kind()) {
- case Code::LOAD_IC:
- __ IncrementCounter(counters->named_load_full(), 1, a1, a2);
- break;
- case Code::KEYED_LOAD_IC:
- __ IncrementCounter(counters->keyed_load_full(), 1, a1, a2);
- break;
- case Code::STORE_IC:
- __ IncrementCounter(counters->named_store_full(), 1, a1, a2);
- break;
- case Code::KEYED_STORE_IC:
- __ IncrementCounter(counters->keyed_store_full(), 1, a1, a2);
- default:
- break;
- }
-
- if (ast_id == kNoASTId) {
- __ Call(ic, RelocInfo::CODE_TARGET);
- } else {
- __ CallWithAstId(ic, RelocInfo::CODE_TARGET_WITH_ID, ast_id);
- }
- if (patch_site != NULL && patch_site->is_bound()) {
- patch_site->EmitPatchInfo();
- } else {
- __ nop(); // Signals no inlined code.
- }
-}
-
-
void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
ASSERT_EQ(POINTER_SIZE_ALIGN(frame_offset), frame_offset);
__ sw(value, MemOperand(fp, frame_offset));
@@ -4352,6 +4261,27 @@
}
+void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
+ Scope* declaration_scope = scope()->DeclarationScope();
+ if (declaration_scope->is_global_scope()) {
+ // Contexts nested in the global context have a canonical empty function
+ // as their closure, not the anonymous closure containing the global
+ // code. Pass a smi sentinel and let the runtime look up the empty
+ // function.
+ __ li(at, Operand(Smi::FromInt(0)));
+ } else if (declaration_scope->is_eval_scope()) {
+ // Contexts created by a call to eval have the same closure as the
+ // context calling eval, not the anonymous closure containing the eval
+ // code. Fetch it from the context.
+ __ lw(at, ContextOperand(cp, Context::CLOSURE_INDEX));
+ } else {
+ ASSERT(declaration_scope->is_function_scope());
+ __ lw(at, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ }
+ __ push(at);
+}
+
+
// ----------------------------------------------------------------------------
// Non-local control flow support.
diff --git a/src/mips/ic-mips.cc b/src/mips/ic-mips.cc
index 12c81c2..da39962 100644
--- a/src/mips/ic-mips.cc
+++ b/src/mips/ic-mips.cc
@@ -80,10 +80,10 @@
// Check that the receiver is a valid JS object.
__ GetObjectType(receiver, scratch0, scratch1);
- __ Branch(miss, lt, scratch1, Operand(FIRST_JS_OBJECT_TYPE));
+ __ Branch(miss, lt, scratch1, Operand(FIRST_SPEC_OBJECT_TYPE));
// If this assert fails, we have to check upper bound too.
- ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+ STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
GenerateGlobalInstanceTypeCheck(masm, scratch1, miss);
@@ -214,115 +214,6 @@
}
-static void GenerateNumberDictionaryLoad(MacroAssembler* masm,
- Label* miss,
- Register elements,
- Register key,
- Register result,
- Register reg0,
- Register reg1,
- Register reg2) {
- // Register use:
- //
- // elements - holds the slow-case elements of the receiver on entry.
- // Unchanged unless 'result' is the same register.
- //
- // key - holds the smi key on entry.
- // Unchanged unless 'result' is the same register.
- //
- //
- // result - holds the result on exit if the load succeeded.
- // Allowed to be the same as 'key' or 'result'.
- // Unchanged on bailout so 'key' or 'result' can be used
- // in further computation.
- //
- // Scratch registers:
- //
- // reg0 - holds the untagged key on entry and holds the hash once computed.
- //
- // reg1 - Used to hold the capacity mask of the dictionary.
- //
- // reg2 - Used for the index into the dictionary.
- // at - Temporary (avoid MacroAssembler instructions also using 'at').
- Label done;
-
- // Compute the hash code from the untagged key. This must be kept in sync
- // with ComputeIntegerHash in utils.h.
- //
- // hash = ~hash + (hash << 15);
- __ nor(reg1, reg0, zero_reg);
- __ sll(at, reg0, 15);
- __ addu(reg0, reg1, at);
-
- // hash = hash ^ (hash >> 12);
- __ srl(at, reg0, 12);
- __ xor_(reg0, reg0, at);
-
- // hash = hash + (hash << 2);
- __ sll(at, reg0, 2);
- __ addu(reg0, reg0, at);
-
- // hash = hash ^ (hash >> 4);
- __ srl(at, reg0, 4);
- __ xor_(reg0, reg0, at);
-
- // hash = hash * 2057;
- __ li(reg1, Operand(2057));
- __ mul(reg0, reg0, reg1);
-
- // hash = hash ^ (hash >> 16);
- __ srl(at, reg0, 16);
- __ xor_(reg0, reg0, at);
-
- // Compute the capacity mask.
- __ lw(reg1, FieldMemOperand(elements, NumberDictionary::kCapacityOffset));
- __ sra(reg1, reg1, kSmiTagSize);
- __ Subu(reg1, reg1, Operand(1));
-
- // Generate an unrolled loop that performs a few probes before giving up.
- static const int kProbes = 4;
- for (int i = 0; i < kProbes; i++) {
- // Use reg2 for index calculations and keep the hash intact in reg0.
- __ mov(reg2, reg0);
- // Compute the masked index: (hash + i + i * i) & mask.
- if (i > 0) {
- __ Addu(reg2, reg2, Operand(NumberDictionary::GetProbeOffset(i)));
- }
- __ and_(reg2, reg2, reg1);
-
- // Scale the index by multiplying by the element size.
- ASSERT(NumberDictionary::kEntrySize == 3);
- __ sll(at, reg2, 1); // 2x.
- __ addu(reg2, reg2, at); // reg2 = reg2 * 3.
-
- // Check if the key is identical to the name.
- __ sll(at, reg2, kPointerSizeLog2);
- __ addu(reg2, elements, at);
-
- __ lw(at, FieldMemOperand(reg2, NumberDictionary::kElementsStartOffset));
- if (i != kProbes - 1) {
- __ Branch(&done, eq, key, Operand(at));
- } else {
- __ Branch(miss, ne, key, Operand(at));
- }
- }
-
- __ bind(&done);
- // Check that the value is a normal property.
- // reg2: elements + (index * kPointerSize).
- const int kDetailsOffset =
- NumberDictionary::kElementsStartOffset + 2 * kPointerSize;
- __ lw(reg1, FieldMemOperand(reg2, kDetailsOffset));
- __ And(at, reg1, Operand(Smi::FromInt(PropertyDetails::TypeField::mask())));
- __ Branch(miss, ne, at, Operand(zero_reg));
-
- // Get the value at the masked, scaled index and return.
- const int kValueOffset =
- NumberDictionary::kElementsStartOffset + kPointerSize;
- __ lw(result, FieldMemOperand(reg2, kValueOffset));
-}
-
-
void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a2 : name
@@ -567,7 +458,8 @@
// Invoke the function.
ParameterCount actual(argc);
- __ InvokeFunction(a1, actual, JUMP_FUNCTION);
+ __ InvokeFunction(a1, actual, JUMP_FUNCTION,
+ NullCallWrapper(), CALL_AS_METHOD);
}
@@ -750,7 +642,7 @@
__ Branch(&slow_load, ne, a3, Operand(at));
__ sra(a0, a2, kSmiTagSize);
// a0: untagged index
- GenerateNumberDictionaryLoad(masm, &slow_load, t0, a2, a1, a0, a3, t1);
+ __ LoadFromNumberDictionary(&slow_load, t0, a2, a1, a0, a3, t1);
__ IncrementCounter(counters->keyed_call_generic_smi_dict(), 1, a0, a3);
__ jmp(&do_call);
@@ -892,6 +784,175 @@
}
+static MemOperand GenerateMappedArgumentsLookup(MacroAssembler* masm,
+ Register object,
+ Register key,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* unmapped_case,
+ Label* slow_case) {
+ Heap* heap = masm->isolate()->heap();
+
+ // Check that the receiver is a JSObject. Because of the map check
+ // later, we do not need to check for interceptors or whether it
+ // requires access checks.
+ __ JumpIfSmi(object, slow_case);
+ // Check that the object is some kind of JSObject.
+ __ GetObjectType(object, scratch1, scratch2);
+ __ Branch(slow_case, lt, scratch2, Operand(FIRST_JS_RECEIVER_TYPE));
+
+ // Check that the key is a positive smi.
+ __ And(scratch1, key, Operand(0x8000001));
+ __ Branch(slow_case, ne, scratch1, Operand(zero_reg));
+
+ // Load the elements into scratch1 and check its map.
+ Handle<Map> arguments_map(heap->non_strict_arguments_elements_map());
+ __ lw(scratch1, FieldMemOperand(object, JSObject::kElementsOffset));
+ __ CheckMap(scratch1, scratch2, arguments_map, slow_case, DONT_DO_SMI_CHECK);
+
+ // Check if element is in the range of mapped arguments. If not, jump
+ // to the unmapped lookup with the parameter map in scratch1.
+ __ lw(scratch2, FieldMemOperand(scratch1, FixedArray::kLengthOffset));
+ __ Subu(scratch2, scratch2, Operand(Smi::FromInt(2)));
+ __ Branch(unmapped_case, Ugreater_equal, key, Operand(scratch2));
+
+ // Load element index and check whether it is the hole.
+ const int kOffset =
+ FixedArray::kHeaderSize + 2 * kPointerSize - kHeapObjectTag;
+
+ __ li(scratch3, Operand(kPointerSize >> 1));
+ __ mul(scratch3, key, scratch3);
+ __ Addu(scratch3, scratch3, Operand(kOffset));
+
+ __ Addu(scratch2, scratch1, scratch3);
+ __ lw(scratch2, MemOperand(scratch2));
+ __ LoadRoot(scratch3, Heap::kTheHoleValueRootIndex);
+ __ Branch(unmapped_case, eq, scratch2, Operand(scratch3));
+
+ // Load value from context and return it. We can reuse scratch1 because
+ // we do not jump to the unmapped lookup (which requires the parameter
+ // map in scratch1).
+ __ lw(scratch1, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
+ __ li(scratch3, Operand(kPointerSize >> 1));
+ __ mul(scratch3, scratch2, scratch3);
+ __ Addu(scratch3, scratch3, Operand(Context::kHeaderSize - kHeapObjectTag));
+ __ Addu(scratch2, scratch1, scratch3);
+ return MemOperand(scratch2);
+}
+
+
+static MemOperand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
+ Register key,
+ Register parameter_map,
+ Register scratch,
+ Label* slow_case) {
+ // Element is in arguments backing store, which is referenced by the
+ // second element of the parameter_map. The parameter_map register
+ // must be loaded with the parameter map of the arguments object and is
+ // overwritten.
+ const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize;
+ Register backing_store = parameter_map;
+ __ lw(backing_store, FieldMemOperand(parameter_map, kBackingStoreOffset));
+ Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map());
+ __ CheckMap(backing_store, scratch, fixed_array_map, slow_case,
+ DONT_DO_SMI_CHECK);
+ __ lw(scratch, FieldMemOperand(backing_store, FixedArray::kLengthOffset));
+ __ Branch(slow_case, Ugreater_equal, key, Operand(scratch));
+ __ li(scratch, Operand(kPointerSize >> 1));
+ __ mul(scratch, key, scratch);
+ __ Addu(scratch,
+ scratch,
+ Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ Addu(scratch, backing_store, scratch);
+ return MemOperand(scratch);
+}
+
+
+void KeyedLoadIC::GenerateNonStrictArguments(MacroAssembler* masm) {
+ // ---------- S t a t e --------------
+ // -- lr : return address
+ // -- a0 : key
+ // -- a1 : receiver
+ // -----------------------------------
+ Label slow, notin;
+ MemOperand mapped_location =
+ GenerateMappedArgumentsLookup(masm, a1, a0, a2, a3, t0, ¬in, &slow);
+ __ lw(v0, mapped_location);
+ __ Ret();
+ __ bind(¬in);
+ // The unmapped lookup expects that the parameter map is in a2.
+ MemOperand unmapped_location =
+ GenerateUnmappedArgumentsLookup(masm, a0, a2, a3, &slow);
+ __ lw(a2, unmapped_location);
+ __ Branch(&slow, eq, a2, Operand(a3));
+ __ LoadRoot(a3, Heap::kTheHoleValueRootIndex);
+ __ mov(v0, a2);
+ __ Ret();
+ __ bind(&slow);
+ GenerateMiss(masm, false);
+}
+
+
+void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
+ // ---------- S t a t e --------------
+ // -- a0 : value
+ // -- a1 : key
+ // -- a2 : receiver
+ // -- lr : return address
+ // -----------------------------------
+ Label slow, notin;
+ MemOperand mapped_location =
+ GenerateMappedArgumentsLookup(masm, a2, a1, a3, t0, t1, ¬in, &slow);
+ __ sw(a0, mapped_location);
+ // Verify mapped_location MemOperand is register, with no offset.
+ ASSERT_EQ(mapped_location.offset(), 0);
+ __ RecordWrite(a3, mapped_location.rm(), t5);
+ __ Ret(USE_DELAY_SLOT);
+ __ mov(v0, a0); // (In delay slot) return the value stored in v0.
+ __ bind(¬in);
+ // The unmapped lookup expects that the parameter map is in a3.
+ MemOperand unmapped_location =
+ GenerateUnmappedArgumentsLookup(masm, a1, a3, t0, &slow);
+ __ sw(a0, unmapped_location);
+ ASSERT_EQ(unmapped_location.offset(), 0);
+ __ RecordWrite(a3, unmapped_location.rm(), t5);
+ __ Ret(USE_DELAY_SLOT);
+ __ mov(v0, a0); // (In delay slot) return the value stored in v0.
+ __ bind(&slow);
+ GenerateMiss(masm, false);
+}
+
+
+void KeyedCallIC::GenerateNonStrictArguments(MacroAssembler* masm,
+ int argc) {
+ // ----------- S t a t e -------------
+ // -- a2 : name
+ // -- lr : return address
+ // -----------------------------------
+ Label slow, notin;
+ // Load receiver.
+ __ lw(a1, MemOperand(sp, argc * kPointerSize));
+ MemOperand mapped_location =
+ GenerateMappedArgumentsLookup(masm, a1, a2, a3, t0, t1, ¬in, &slow);
+ __ lw(a1, mapped_location);
+ GenerateFunctionTailCall(masm, argc, &slow, a3);
+ __ bind(¬in);
+ // The unmapped lookup expects that the parameter map is in a3.
+ MemOperand unmapped_location =
+ GenerateUnmappedArgumentsLookup(masm, a2, a3, t0, &slow);
+ __ lw(a1, unmapped_location);
+ __ LoadRoot(a3, Heap::kTheHoleValueRootIndex);
+ __ Branch(&slow, eq, a1, Operand(a3));
+ GenerateFunctionTailCall(masm, argc, &slow, a3);
+ __ bind(&slow);
+ GenerateMiss(masm, argc);
+}
+
+
+Object* KeyedLoadIC_Miss(Arguments args);
+
+
void KeyedLoadIC::GenerateMiss(MacroAssembler* masm, bool force_generic) {
// ---------- S t a t e --------------
// -- ra : return address
@@ -949,11 +1010,8 @@
GenerateKeyedLoadReceiverCheck(
masm, receiver, a2, a3, Map::kHasIndexedInterceptor, &slow);
- // Check the "has fast elements" bit in the receiver's map which is
- // now in a2.
- __ lbu(a3, FieldMemOperand(a2, Map::kBitField2Offset));
- __ And(at, a3, Operand(1 << Map::kHasFastElements));
- __ Branch(&check_number_dictionary, eq, at, Operand(zero_reg));
+ // Check the receiver's map to see if it has fast elements.
+ __ CheckFastElements(a2, a3, &check_number_dictionary);
GenerateFastArrayLoad(
masm, receiver, key, t0, a3, a2, v0, NULL, &slow);
@@ -972,7 +1030,7 @@
__ LoadRoot(at, Heap::kHashTableMapRootIndex);
__ Branch(&slow, ne, a3, Operand(at));
__ sra(a2, a0, kSmiTagSize);
- GenerateNumberDictionaryLoad(masm, &slow, t0, a0, v0, a2, a3, t1);
+ __ LoadFromNumberDictionary(&slow, t0, a0, v0, a2, a3, t1);
__ Ret();
// Slow case, key and receiver still in a0 and a1.
@@ -1173,8 +1231,10 @@
__ lbu(t3, FieldMemOperand(t3, Map::kInstanceTypeOffset));
__ Branch(&array, eq, t3, Operand(JS_ARRAY_TYPE));
- // Check that the object is some kind of JS object.
- __ Branch(&slow, lt, t3, Operand(FIRST_JS_OBJECT_TYPE));
+ // Check that the object is some kind of JSObject.
+ __ Branch(&slow, lt, t3, Operand(FIRST_JS_RECEIVER_TYPE));
+ __ Branch(&slow, eq, t3, Operand(JS_PROXY_TYPE));
+ __ Branch(&slow, eq, t3, Operand(JS_FUNCTION_PROXY_TYPE));
// Object case: Check key against length in the elements array.
__ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
diff --git a/src/mips/macro-assembler-mips.cc b/src/mips/macro-assembler-mips.cc
index 8b342a2..5e8d676 100644
--- a/src/mips/macro-assembler-mips.cc
+++ b/src/mips/macro-assembler-mips.cc
@@ -50,87 +50,6 @@
}
-// Arguments macros.
-#define COND_TYPED_ARGS Condition cond, Register r1, const Operand& r2
-#define COND_ARGS cond, r1, r2
-
-#define REGISTER_TARGET_BODY(Name) \
-void MacroAssembler::Name(Register target, \
- BranchDelaySlot bd) { \
- Name(Operand(target), bd); \
-} \
-void MacroAssembler::Name(Register target, COND_TYPED_ARGS, \
- BranchDelaySlot bd) { \
- Name(Operand(target), COND_ARGS, bd); \
-}
-
-
-#define INT_PTR_TARGET_BODY(Name) \
-void MacroAssembler::Name(intptr_t target, RelocInfo::Mode rmode, \
- BranchDelaySlot bd) { \
- Name(Operand(target, rmode), bd); \
-} \
-void MacroAssembler::Name(intptr_t target, \
- RelocInfo::Mode rmode, \
- COND_TYPED_ARGS, \
- BranchDelaySlot bd) { \
- Name(Operand(target, rmode), COND_ARGS, bd); \
-}
-
-
-#define BYTE_PTR_TARGET_BODY(Name) \
-void MacroAssembler::Name(byte* target, RelocInfo::Mode rmode, \
- BranchDelaySlot bd) { \
- Name(reinterpret_cast<intptr_t>(target), rmode, bd); \
-} \
-void MacroAssembler::Name(byte* target, \
- RelocInfo::Mode rmode, \
- COND_TYPED_ARGS, \
- BranchDelaySlot bd) { \
- Name(reinterpret_cast<intptr_t>(target), rmode, COND_ARGS, bd); \
-}
-
-
-#define CODE_TARGET_BODY(Name) \
-void MacroAssembler::Name(Handle<Code> target, RelocInfo::Mode rmode, \
- BranchDelaySlot bd) { \
- Name(reinterpret_cast<intptr_t>(target.location()), rmode, bd); \
-} \
-void MacroAssembler::Name(Handle<Code> target, \
- RelocInfo::Mode rmode, \
- COND_TYPED_ARGS, \
- BranchDelaySlot bd) { \
- Name(reinterpret_cast<intptr_t>(target.location()), rmode, COND_ARGS, bd); \
-}
-
-
-REGISTER_TARGET_BODY(Jump)
-REGISTER_TARGET_BODY(Call)
-INT_PTR_TARGET_BODY(Jump)
-INT_PTR_TARGET_BODY(Call)
-BYTE_PTR_TARGET_BODY(Jump)
-BYTE_PTR_TARGET_BODY(Call)
-CODE_TARGET_BODY(Jump)
-CODE_TARGET_BODY(Call)
-
-#undef COND_TYPED_ARGS
-#undef COND_ARGS
-#undef REGISTER_TARGET_BODY
-#undef BYTE_PTR_TARGET_BODY
-#undef CODE_TARGET_BODY
-
-
-void MacroAssembler::Ret(BranchDelaySlot bd) {
- Jump(Operand(ra), bd);
-}
-
-
-void MacroAssembler::Ret(Condition cond, Register r1, const Operand& r2,
- BranchDelaySlot bd) {
- Jump(Operand(ra), cond, r1, r2, bd);
-}
-
-
void MacroAssembler::LoadRoot(Register destination,
Heap::RootListIndex index) {
lw(destination, MemOperand(s6, index << kPointerSizeLog2));
@@ -193,6 +112,7 @@
sw(scratch, MemOperand(object, Page::kDirtyFlagOffset));
}
+
// Push and pop all registers that can hold pointers.
void MacroAssembler::PushSafepointRegisters() {
// Safepoints expect a block of kNumSafepointRegisters values on the
@@ -203,12 +123,14 @@
MultiPush(kSafepointSavedRegisters);
}
+
void MacroAssembler::PopSafepointRegisters() {
const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
MultiPop(kSafepointSavedRegisters);
Addu(sp, sp, Operand(num_unsaved * kPointerSize));
}
+
void MacroAssembler::PushSafepointRegistersAndDoubles() {
PushSafepointRegisters();
Subu(sp, sp, Operand(FPURegister::kNumAllocatableRegisters * kDoubleSize));
@@ -218,6 +140,7 @@
}
}
+
void MacroAssembler::PopSafepointRegistersAndDoubles() {
for (int i = 0; i < FPURegister::kNumAllocatableRegisters; i+=2) {
FPURegister reg = FPURegister::FromAllocationIndex(i);
@@ -227,6 +150,7 @@
PopSafepointRegisters();
}
+
void MacroAssembler::StoreToSafepointRegistersAndDoublesSlot(Register src,
Register dst) {
sw(src, SafepointRegistersAndDoublesSlot(dst));
@@ -419,6 +343,114 @@
}
+void MacroAssembler::LoadFromNumberDictionary(Label* miss,
+ Register elements,
+ Register key,
+ Register result,
+ Register reg0,
+ Register reg1,
+ Register reg2) {
+ // Register use:
+ //
+ // elements - holds the slow-case elements of the receiver on entry.
+ // Unchanged unless 'result' is the same register.
+ //
+ // key - holds the smi key on entry.
+ // Unchanged unless 'result' is the same register.
+ //
+ //
+ // result - holds the result on exit if the load succeeded.
+ // Allowed to be the same as 'key' or 'result'.
+ // Unchanged on bailout so 'key' or 'result' can be used
+ // in further computation.
+ //
+ // Scratch registers:
+ //
+ // reg0 - holds the untagged key on entry and holds the hash once computed.
+ //
+ // reg1 - Used to hold the capacity mask of the dictionary.
+ //
+ // reg2 - Used for the index into the dictionary.
+ // at - Temporary (avoid MacroAssembler instructions also using 'at').
+ Label done;
+
+ // Compute the hash code from the untagged key. This must be kept in sync
+ // with ComputeIntegerHash in utils.h.
+ //
+ // hash = ~hash + (hash << 15);
+ nor(reg1, reg0, zero_reg);
+ sll(at, reg0, 15);
+ addu(reg0, reg1, at);
+
+ // hash = hash ^ (hash >> 12);
+ srl(at, reg0, 12);
+ xor_(reg0, reg0, at);
+
+ // hash = hash + (hash << 2);
+ sll(at, reg0, 2);
+ addu(reg0, reg0, at);
+
+ // hash = hash ^ (hash >> 4);
+ srl(at, reg0, 4);
+ xor_(reg0, reg0, at);
+
+ // hash = hash * 2057;
+ li(reg1, Operand(2057));
+ mul(reg0, reg0, reg1);
+
+ // hash = hash ^ (hash >> 16);
+ srl(at, reg0, 16);
+ xor_(reg0, reg0, at);
+
+ // Compute the capacity mask.
+ lw(reg1, FieldMemOperand(elements, NumberDictionary::kCapacityOffset));
+ sra(reg1, reg1, kSmiTagSize);
+ Subu(reg1, reg1, Operand(1));
+
+ // Generate an unrolled loop that performs a few probes before giving up.
+ static const int kProbes = 4;
+ for (int i = 0; i < kProbes; i++) {
+ // Use reg2 for index calculations and keep the hash intact in reg0.
+ mov(reg2, reg0);
+ // Compute the masked index: (hash + i + i * i) & mask.
+ if (i > 0) {
+ Addu(reg2, reg2, Operand(NumberDictionary::GetProbeOffset(i)));
+ }
+ and_(reg2, reg2, reg1);
+
+ // Scale the index by multiplying by the element size.
+ ASSERT(NumberDictionary::kEntrySize == 3);
+ sll(at, reg2, 1); // 2x.
+ addu(reg2, reg2, at); // reg2 = reg2 * 3.
+
+ // Check if the key is identical to the name.
+ sll(at, reg2, kPointerSizeLog2);
+ addu(reg2, elements, at);
+
+ lw(at, FieldMemOperand(reg2, NumberDictionary::kElementsStartOffset));
+ if (i != kProbes - 1) {
+ Branch(&done, eq, key, Operand(at));
+ } else {
+ Branch(miss, ne, key, Operand(at));
+ }
+ }
+
+ bind(&done);
+ // Check that the value is a normal property.
+ // reg2: elements + (index * kPointerSize).
+ const int kDetailsOffset =
+ NumberDictionary::kElementsStartOffset + 2 * kPointerSize;
+ lw(reg1, FieldMemOperand(reg2, kDetailsOffset));
+ And(at, reg1, Operand(Smi::FromInt(PropertyDetails::TypeField::mask())));
+ Branch(miss, ne, at, Operand(zero_reg));
+
+ // Get the value at the masked, scaled index and return.
+ const int kValueOffset =
+ NumberDictionary::kElementsStartOffset + kPointerSize;
+ lw(result, FieldMemOperand(reg2, kValueOffset));
+}
+
+
// ---------------------------------------------------------------------------
// Instruction macros.
@@ -670,14 +702,6 @@
}
-// Exception-generating instructions and debugging support.
-void MacroAssembler::stop(const char* msg) {
- // TO_UPGRADE: Just a break for now. Maybe we could upgrade it.
- // We use the 0x54321 value to be able to find it easily when reading memory.
- break_(0x54321);
-}
-
-
void MacroAssembler::MultiPush(RegList regs) {
int16_t NumSaved = 0;
int16_t NumToPush = NumberOfBitsSet(regs);
@@ -840,11 +864,11 @@
void MacroAssembler::Trunc_uw_d(FPURegister fd, Register rs) {
ASSERT(!fd.is(f22));
- ASSERT(!rs.is(t6));
+ ASSERT(!rs.is(t8));
// Load 2^31 into f22.
- Or(t6, zero_reg, 0x80000000);
- Cvt_d_uw(f22, t6);
+ Or(t8, zero_reg, 0x80000000);
+ Cvt_d_uw(f22, t8);
// Test if f22 > fd.
c(OLT, D, fd, f22);
@@ -859,7 +883,7 @@
sub_d(f22, fd, f22);
trunc_w_d(f22, f22);
mfc1(rs, f22);
- or_(rs, rs, t6);
+ or_(rs, rs, t8);
Label done;
Branch(&done);
@@ -1052,6 +1076,51 @@
}
+void MacroAssembler::EmitECMATruncate(Register result,
+ FPURegister double_input,
+ FPURegister single_scratch,
+ Register scratch,
+ Register input_high,
+ Register input_low) {
+ CpuFeatures::Scope scope(FPU);
+ ASSERT(!input_high.is(result));
+ ASSERT(!input_low.is(result));
+ ASSERT(!input_low.is(input_high));
+ ASSERT(!scratch.is(result) &&
+ !scratch.is(input_high) &&
+ !scratch.is(input_low));
+ ASSERT(!single_scratch.is(double_input));
+
+ Label done;
+ Label manual;
+
+ // Clear cumulative exception flags and save the FCSR.
+ Register scratch2 = input_high;
+ cfc1(scratch2, FCSR);
+ ctc1(zero_reg, FCSR);
+ // Try a conversion to a signed integer.
+ trunc_w_d(single_scratch, double_input);
+ mfc1(result, single_scratch);
+ // Retrieve and restore the FCSR.
+ cfc1(scratch, FCSR);
+ ctc1(scratch2, FCSR);
+ // Check for overflow and NaNs.
+ And(scratch,
+ scratch,
+ kFCSROverflowFlagMask | kFCSRUnderflowFlagMask | kFCSRInvalidOpFlagMask);
+ // If we had no exceptions we are done.
+ Branch(&done, eq, scratch, Operand(zero_reg));
+
+ // Load the double value and perform a manual truncation.
+ Move(input_low, input_high, double_input);
+ EmitOutOfInt32RangeTruncate(result,
+ input_high,
+ input_low,
+ scratch);
+ bind(&done);
+}
+
+
void MacroAssembler::GetLeastBitsFromSmi(Register dst,
Register src,
int num_least_bits) {
@@ -1074,7 +1143,54 @@
(cond != cc_always && (!rs.is(zero_reg) || !rt.rm().is(zero_reg))))
+bool MacroAssembler::UseAbsoluteCodePointers() {
+ if (is_trampoline_emitted()) {
+ return true;
+ } else {
+ return false;
+ }
+}
+
+
void MacroAssembler::Branch(int16_t offset, BranchDelaySlot bdslot) {
+ BranchShort(offset, bdslot);
+}
+
+
+void MacroAssembler::Branch(int16_t offset, Condition cond, Register rs,
+ const Operand& rt,
+ BranchDelaySlot bdslot) {
+ BranchShort(offset, cond, rs, rt, bdslot);
+}
+
+
+void MacroAssembler::Branch(Label* L, BranchDelaySlot bdslot) {
+ bool is_label_near = is_near(L);
+ if (UseAbsoluteCodePointers() && !is_label_near) {
+ Jr(L, bdslot);
+ } else {
+ BranchShort(L, bdslot);
+ }
+}
+
+
+void MacroAssembler::Branch(Label* L, Condition cond, Register rs,
+ const Operand& rt,
+ BranchDelaySlot bdslot) {
+ bool is_label_near = is_near(L);
+ if (UseAbsoluteCodePointers() && !is_label_near) {
+ Label skip;
+ Condition neg_cond = NegateCondition(cond);
+ BranchShort(&skip, neg_cond, rs, rt);
+ Jr(L, bdslot);
+ bind(&skip);
+ } else {
+ BranchShort(L, cond, rs, rt, bdslot);
+ }
+}
+
+
+void MacroAssembler::BranchShort(int16_t offset, BranchDelaySlot bdslot) {
b(offset);
// Emit a nop in the branch delay slot if required.
@@ -1083,9 +1199,9 @@
}
-void MacroAssembler::Branch(int16_t offset, Condition cond, Register rs,
- const Operand& rt,
- BranchDelaySlot bdslot) {
+void MacroAssembler::BranchShort(int16_t offset, Condition cond, Register rs,
+ const Operand& rt,
+ BranchDelaySlot bdslot) {
BRANCH_ARGS_CHECK(cond, rs, rt);
ASSERT(!rs.is(zero_reg));
Register r2 = no_reg;
@@ -1157,7 +1273,8 @@
break;
case Uless:
if (r2.is(zero_reg)) {
- b(offset);
+ // No code needs to be emitted.
+ return;
} else {
sltu(scratch, rs, r2);
bne(scratch, zero_reg, offset);
@@ -1216,7 +1333,7 @@
} else {
r2 = scratch;
li(r2, rt);
- sltu(scratch, rs, r2);
+ slt(scratch, rs, r2);
beq(scratch, zero_reg, offset);
}
break;
@@ -1269,7 +1386,8 @@
break;
case Uless:
if (rt.imm32_ == 0) {
- b(offset);
+ // No code needs to be emitted.
+ return;
} else if (is_int16(rt.imm32_)) {
sltiu(scratch, rs, rt.imm32_);
bne(scratch, zero_reg, offset);
@@ -1300,7 +1418,7 @@
}
-void MacroAssembler::Branch(Label* L, BranchDelaySlot bdslot) {
+void MacroAssembler::BranchShort(Label* L, BranchDelaySlot bdslot) {
// We use branch_offset as an argument for the branch instructions to be sure
// it is called just before generating the branch instruction, as needed.
@@ -1312,9 +1430,9 @@
}
-void MacroAssembler::Branch(Label* L, Condition cond, Register rs,
- const Operand& rt,
- BranchDelaySlot bdslot) {
+void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs,
+ const Operand& rt,
+ BranchDelaySlot bdslot) {
BRANCH_ARGS_CHECK(cond, rs, rt);
int32_t offset;
@@ -1402,8 +1520,8 @@
break;
case Uless:
if (r2.is(zero_reg)) {
- offset = shifted_branch_offset(L, false);
- b(offset);
+ // No code needs to be emitted.
+ return;
} else {
sltu(scratch, rs, r2);
offset = shifted_branch_offset(L, false);
@@ -1468,7 +1586,7 @@
} else {
r2 = scratch;
li(r2, rt);
- sltu(scratch, rs, r2);
+ slt(scratch, rs, r2);
offset = shifted_branch_offset(L, false);
beq(scratch, zero_reg, offset);
}
@@ -1532,8 +1650,8 @@
break;
case Uless:
if (rt.imm32_ == 0) {
- offset = shifted_branch_offset(L, false);
- b(offset);
+ // No code needs to be emitted.
+ return;
} else if (is_int16(rt.imm32_)) {
sltiu(scratch, rs, rt.imm32_);
offset = shifted_branch_offset(L, false);
@@ -1570,11 +1688,49 @@
}
+void MacroAssembler::BranchAndLink(int16_t offset, BranchDelaySlot bdslot) {
+ BranchAndLinkShort(offset, bdslot);
+}
+
+
+void MacroAssembler::BranchAndLink(int16_t offset, Condition cond, Register rs,
+ const Operand& rt,
+ BranchDelaySlot bdslot) {
+ BranchAndLinkShort(offset, cond, rs, rt, bdslot);
+}
+
+
+void MacroAssembler::BranchAndLink(Label* L, BranchDelaySlot bdslot) {
+ bool is_label_near = is_near(L);
+ if (UseAbsoluteCodePointers() && !is_label_near) {
+ Jalr(L, bdslot);
+ } else {
+ BranchAndLinkShort(L, bdslot);
+ }
+}
+
+
+void MacroAssembler::BranchAndLink(Label* L, Condition cond, Register rs,
+ const Operand& rt,
+ BranchDelaySlot bdslot) {
+ bool is_label_near = is_near(L);
+ if (UseAbsoluteCodePointers() && !is_label_near) {
+ Label skip;
+ Condition neg_cond = NegateCondition(cond);
+ BranchShort(&skip, neg_cond, rs, rt);
+ Jalr(L, bdslot);
+ bind(&skip);
+ } else {
+ BranchAndLinkShort(L, cond, rs, rt, bdslot);
+ }
+}
+
+
// We need to use a bgezal or bltzal, but they can't be used directly with the
// slt instructions. We could use sub or add instead but we would miss overflow
// cases, so we keep slt and add an intermediate third instruction.
-void MacroAssembler::BranchAndLink(int16_t offset,
- BranchDelaySlot bdslot) {
+void MacroAssembler::BranchAndLinkShort(int16_t offset,
+ BranchDelaySlot bdslot) {
bal(offset);
// Emit a nop in the branch delay slot if required.
@@ -1583,9 +1739,9 @@
}
-void MacroAssembler::BranchAndLink(int16_t offset, Condition cond, Register rs,
- const Operand& rt,
- BranchDelaySlot bdslot) {
+void MacroAssembler::BranchAndLinkShort(int16_t offset, Condition cond,
+ Register rs, const Operand& rt,
+ BranchDelaySlot bdslot) {
BRANCH_ARGS_CHECK(cond, rs, rt);
Register r2 = no_reg;
Register scratch = at;
@@ -1665,7 +1821,7 @@
}
-void MacroAssembler::BranchAndLink(Label* L, BranchDelaySlot bdslot) {
+void MacroAssembler::BranchAndLinkShort(Label* L, BranchDelaySlot bdslot) {
bal(shifted_branch_offset(L, false));
// Emit a nop in the branch delay slot if required.
@@ -1674,9 +1830,9 @@
}
-void MacroAssembler::BranchAndLink(Label* L, Condition cond, Register rs,
- const Operand& rt,
- BranchDelaySlot bdslot) {
+void MacroAssembler::BranchAndLinkShort(Label* L, Condition cond, Register rs,
+ const Operand& rt,
+ BranchDelaySlot bdslot) {
BRANCH_ARGS_CHECK(cond, rs, rt);
int32_t offset;
@@ -1772,164 +1928,230 @@
}
-void MacroAssembler::Jump(const Operand& target, BranchDelaySlot bdslot) {
- BlockTrampolinePoolScope block_trampoline_pool(this);
- if (target.is_reg()) {
- jr(target.rm());
- } else {
- if (!MustUseReg(target.rmode_)) {
- j(target.imm32_);
- } else {
- li(t9, target);
- jr(t9);
- }
- }
- // Emit a nop in the branch delay slot if required.
- if (bdslot == PROTECT)
- nop();
-}
-
-
-void MacroAssembler::Jump(const Operand& target,
- Condition cond, Register rs, const Operand& rt,
- BranchDelaySlot bdslot) {
- BlockTrampolinePoolScope block_trampoline_pool(this);
- BRANCH_ARGS_CHECK(cond, rs, rt);
- if (target.is_reg()) {
- if (cond == cc_always) {
- jr(target.rm());
- } else {
- Branch(2, NegateCondition(cond), rs, rt);
- jr(target.rm());
- }
- } else { // Not register target.
- if (!MustUseReg(target.rmode_)) {
- if (cond == cc_always) {
- j(target.imm32_);
- } else {
- Branch(2, NegateCondition(cond), rs, rt);
- j(target.imm32_); // Will generate only one instruction.
- }
- } else { // MustUseReg(target).
- li(t9, target);
- if (cond == cc_always) {
- jr(t9);
- } else {
- Branch(2, NegateCondition(cond), rs, rt);
- jr(t9); // Will generate only one instruction.
- }
- }
- }
- // Emit a nop in the branch delay slot if required.
- if (bdslot == PROTECT)
- nop();
-}
-
-
-int MacroAssembler::CallSize(Handle<Code> code, RelocInfo::Mode rmode) {
- return 4 * kInstrSize;
-}
-
-
-int MacroAssembler::CallSize(Register reg) {
- return 2 * kInstrSize;
-}
-
-
-// Note: To call gcc-compiled C code on mips, you must call thru t9.
-void MacroAssembler::Call(const Operand& target, BranchDelaySlot bdslot) {
- BlockTrampolinePoolScope block_trampoline_pool(this);
- if (target.is_reg()) {
- jalr(target.rm());
- } else { // !target.is_reg().
- if (!MustUseReg(target.rmode_)) {
- jal(target.imm32_);
- } else { // MustUseReg(target).
- // Must record previous source positions before the
- // li() generates a new code target.
- positions_recorder()->WriteRecordedPositions();
- li(t9, target);
- jalr(t9);
- }
- }
- // Emit a nop in the branch delay slot if required.
- if (bdslot == PROTECT)
- nop();
-}
-
-
-// Note: To call gcc-compiled C code on mips, you must call thru t9.
-void MacroAssembler::Call(const Operand& target,
- Condition cond, Register rs, const Operand& rt,
- BranchDelaySlot bdslot) {
- BlockTrampolinePoolScope block_trampoline_pool(this);
- BRANCH_ARGS_CHECK(cond, rs, rt);
- if (target.is_reg()) {
- if (cond == cc_always) {
- jalr(target.rm());
- } else {
- Branch(2, NegateCondition(cond), rs, rt);
- jalr(target.rm());
- }
- } else { // !target.is_reg().
- if (!MustUseReg(target.rmode_)) {
- if (cond == cc_always) {
- jal(target.imm32_);
- } else {
- Branch(2, NegateCondition(cond), rs, rt);
- jal(target.imm32_); // Will generate only one instruction.
- }
- } else { // MustUseReg(target)
- li(t9, target);
- if (cond == cc_always) {
- jalr(t9);
- } else {
- Branch(2, NegateCondition(cond), rs, rt);
- jalr(t9); // Will generate only one instruction.
- }
- }
- }
- // Emit a nop in the branch delay slot if required.
- if (bdslot == PROTECT)
- nop();
-}
-
-
-void MacroAssembler::CallWithAstId(Handle<Code> code,
- RelocInfo::Mode rmode,
- unsigned ast_id,
- Condition cond,
- Register r1,
- const Operand& r2) {
- ASSERT(rmode == RelocInfo::CODE_TARGET_WITH_ID);
- ASSERT(ast_id != kNoASTId);
- ASSERT(ast_id_for_reloc_info_ == kNoASTId);
- ast_id_for_reloc_info_ = ast_id;
- Call(reinterpret_cast<intptr_t>(code.location()), rmode, cond, r1, r2);
-}
-
-
-void MacroAssembler::Drop(int count,
+void MacroAssembler::Jump(Register target,
Condition cond,
- Register reg,
- const Operand& op) {
- if (count <= 0) {
- return;
+ Register rs,
+ const Operand& rt,
+ BranchDelaySlot bd) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ if (cond == cc_always) {
+ jr(target);
+ } else {
+ BRANCH_ARGS_CHECK(cond, rs, rt);
+ Branch(2, NegateCondition(cond), rs, rt);
+ jr(target);
+ }
+ // Emit a nop in the branch delay slot if required.
+ if (bd == PROTECT)
+ nop();
+}
+
+
+void MacroAssembler::Jump(intptr_t target,
+ RelocInfo::Mode rmode,
+ Condition cond,
+ Register rs,
+ const Operand& rt,
+ BranchDelaySlot bd) {
+ li(t9, Operand(target, rmode));
+ Jump(t9, cond, rs, rt, bd);
+}
+
+
+void MacroAssembler::Jump(Address target,
+ RelocInfo::Mode rmode,
+ Condition cond,
+ Register rs,
+ const Operand& rt,
+ BranchDelaySlot bd) {
+ ASSERT(!RelocInfo::IsCodeTarget(rmode));
+ Jump(reinterpret_cast<intptr_t>(target), rmode, cond, rs, rt, bd);
+}
+
+
+void MacroAssembler::Jump(Handle<Code> code,
+ RelocInfo::Mode rmode,
+ Condition cond,
+ Register rs,
+ const Operand& rt,
+ BranchDelaySlot bd) {
+ ASSERT(RelocInfo::IsCodeTarget(rmode));
+ Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond, rs, rt, bd);
+}
+
+
+int MacroAssembler::CallSize(Register target,
+ Condition cond,
+ Register rs,
+ const Operand& rt,
+ BranchDelaySlot bd) {
+ int size = 0;
+
+ if (cond == cc_always) {
+ size += 1;
+ } else {
+ size += 3;
}
- Label skip;
+ if (bd == PROTECT)
+ size += 1;
- if (cond != al) {
- Branch(&skip, NegateCondition(cond), reg, op);
- }
+ return size * kInstrSize;
+}
- if (count > 0) {
- addiu(sp, sp, count * kPointerSize);
- }
- if (cond != al) {
- bind(&skip);
+// Note: To call gcc-compiled C code on mips, you must call thru t9.
+void MacroAssembler::Call(Register target,
+ Condition cond,
+ Register rs,
+ const Operand& rt,
+ BranchDelaySlot bd) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ Label start;
+ bind(&start);
+ if (cond == cc_always) {
+ jalr(target);
+ } else {
+ BRANCH_ARGS_CHECK(cond, rs, rt);
+ Branch(2, NegateCondition(cond), rs, rt);
+ jalr(target);
}
+ // Emit a nop in the branch delay slot if required.
+ if (bd == PROTECT)
+ nop();
+
+ ASSERT_EQ(CallSize(target, cond, rs, rt, bd),
+ SizeOfCodeGeneratedSince(&start));
+}
+
+
+int MacroAssembler::CallSize(Address target,
+ RelocInfo::Mode rmode,
+ Condition cond,
+ Register rs,
+ const Operand& rt,
+ BranchDelaySlot bd) {
+ int size = CallSize(t9, cond, rs, rt, bd);
+ return size + 2 * kInstrSize;
+}
+
+
+void MacroAssembler::Call(Address target,
+ RelocInfo::Mode rmode,
+ Condition cond,
+ Register rs,
+ const Operand& rt,
+ BranchDelaySlot bd) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ Label start;
+ bind(&start);
+ int32_t target_int = reinterpret_cast<int32_t>(target);
+ // Must record previous source positions before the
+ // li() generates a new code target.
+ positions_recorder()->WriteRecordedPositions();
+ li(t9, Operand(target_int, rmode), true);
+ Call(t9, cond, rs, rt, bd);
+ ASSERT_EQ(CallSize(target, rmode, cond, rs, rt, bd),
+ SizeOfCodeGeneratedSince(&start));
+}
+
+
+int MacroAssembler::CallSize(Handle<Code> code,
+ RelocInfo::Mode rmode,
+ unsigned ast_id,
+ Condition cond,
+ Register rs,
+ const Operand& rt,
+ BranchDelaySlot bd) {
+ return CallSize(reinterpret_cast<Address>(code.location()),
+ rmode, cond, rs, rt, bd);
+}
+
+
+void MacroAssembler::Call(Handle<Code> code,
+ RelocInfo::Mode rmode,
+ unsigned ast_id,
+ Condition cond,
+ Register rs,
+ const Operand& rt,
+ BranchDelaySlot bd) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ Label start;
+ bind(&start);
+ ASSERT(RelocInfo::IsCodeTarget(rmode));
+ if (rmode == RelocInfo::CODE_TARGET && ast_id != kNoASTId) {
+ SetRecordedAstId(ast_id);
+ rmode = RelocInfo::CODE_TARGET_WITH_ID;
+ }
+ Call(reinterpret_cast<Address>(code.location()), rmode, cond, rs, rt, bd);
+ ASSERT_EQ(CallSize(code, rmode, ast_id, cond, rs, rt),
+ SizeOfCodeGeneratedSince(&start));
+}
+
+
+void MacroAssembler::Ret(Condition cond,
+ Register rs,
+ const Operand& rt,
+ BranchDelaySlot bd) {
+ Jump(ra, cond, rs, rt, bd);
+}
+
+
+void MacroAssembler::J(Label* L, BranchDelaySlot bdslot) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+
+ uint32_t imm28;
+ imm28 = jump_address(L);
+ imm28 &= kImm28Mask;
+ { BlockGrowBufferScope block_buf_growth(this);
+ // Buffer growth (and relocation) must be blocked for internal references
+ // until associated instructions are emitted and available to be patched.
+ RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
+ j(imm28);
+ }
+ // Emit a nop in the branch delay slot if required.
+ if (bdslot == PROTECT)
+ nop();
+}
+
+
+void MacroAssembler::Jr(Label* L, BranchDelaySlot bdslot) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+
+ uint32_t imm32;
+ imm32 = jump_address(L);
+ { BlockGrowBufferScope block_buf_growth(this);
+ // Buffer growth (and relocation) must be blocked for internal references
+ // until associated instructions are emitted and available to be patched.
+ RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
+ lui(at, (imm32 & kHiMask) >> kLuiShift);
+ ori(at, at, (imm32 & kImm16Mask));
+ }
+ jr(at);
+
+ // Emit a nop in the branch delay slot if required.
+ if (bdslot == PROTECT)
+ nop();
+}
+
+
+void MacroAssembler::Jalr(Label* L, BranchDelaySlot bdslot) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+
+ uint32_t imm32;
+ imm32 = jump_address(L);
+ { BlockGrowBufferScope block_buf_growth(this);
+ // Buffer growth (and relocation) must be blocked for internal references
+ // until associated instructions are emitted and available to be patched.
+ RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
+ lui(at, (imm32 & kHiMask) >> kLuiShift);
+ ori(at, at, (imm32 & kImm16Mask));
+ }
+ jalr(at);
+
+ // Emit a nop in the branch delay slot if required.
+ if (bdslot == PROTECT)
+ nop();
}
@@ -1954,6 +2176,29 @@
}
+void MacroAssembler::Drop(int count,
+ Condition cond,
+ Register reg,
+ const Operand& op) {
+ if (count <= 0) {
+ return;
+ }
+
+ Label skip;
+
+ if (cond != al) {
+ Branch(&skip, NegateCondition(cond), reg, op);
+ }
+
+ addiu(sp, sp, count * kPointerSize);
+
+ if (cond != al) {
+ bind(&skip);
+ }
+}
+
+
+
void MacroAssembler::Swap(Register reg1,
Register reg2,
Register scratch) {
@@ -1974,6 +2219,12 @@
}
+void MacroAssembler::Push(Handle<Object> handle) {
+ li(at, Operand(handle));
+ push(at);
+}
+
+
#ifdef ENABLE_DEBUGGER_SUPPORT
void MacroAssembler::DebugBreak() {
@@ -2515,8 +2766,8 @@
Register scratch1,
Register scratch2,
Label* gc_required) {
- LoadRoot(t6, Heap::kHeapNumberMapRootIndex);
- AllocateHeapNumber(result, scratch1, scratch2, t6, gc_required);
+ LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
+ AllocateHeapNumber(result, scratch1, scratch2, t8, gc_required);
sdc1(value, FieldMemOperand(result, HeapNumber::kValueOffset));
}
@@ -2605,6 +2856,15 @@
}
+void MacroAssembler::CheckFastElements(Register map,
+ Register scratch,
+ Label* fail) {
+ STATIC_ASSERT(JSObject::FAST_ELEMENTS == 0);
+ lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
+ Branch(fail, hi, scratch, Operand(Map::kMaximumBitField2FastElementValue));
+}
+
+
void MacroAssembler::CheckMap(Register obj,
Register scratch,
Handle<Map> map,
@@ -2775,9 +3035,9 @@
Handle<Code> adaptor =
isolate()->builtins()->ArgumentsAdaptorTrampoline();
if (flag == CALL_FUNCTION) {
- call_wrapper.BeforeCall(CallSize(adaptor, RelocInfo::CODE_TARGET));
+ call_wrapper.BeforeCall(CallSize(adaptor));
SetCallKind(t1, call_kind);
- Call(adaptor, RelocInfo::CODE_TARGET);
+ Call(adaptor);
call_wrapper.AfterCall();
jmp(done);
} else {
@@ -2861,7 +3121,8 @@
void MacroAssembler::InvokeFunction(JSFunction* function,
const ParameterCount& actual,
- InvokeFlag flag) {
+ InvokeFlag flag,
+ CallKind call_kind) {
ASSERT(function->is_compiled());
// Get the function and setup the context.
@@ -2874,7 +3135,7 @@
if (V8::UseCrankshaft()) {
UNIMPLEMENTED_MIPS();
} else {
- InvokeCode(code, expected, actual, RelocInfo::CODE_TARGET, flag);
+ InvokeCode(code, expected, actual, RelocInfo::CODE_TARGET, flag, call_kind);
}
}
@@ -2892,8 +3153,8 @@
Register scratch,
Label* fail) {
lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
- Branch(fail, lt, scratch, Operand(FIRST_JS_OBJECT_TYPE));
- Branch(fail, gt, scratch, Operand(LAST_JS_OBJECT_TYPE));
+ Branch(fail, lt, scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+ Branch(fail, gt, scratch, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
}
@@ -2973,7 +3234,7 @@
void MacroAssembler::CallStub(CodeStub* stub, Condition cond,
Register r1, const Operand& r2) {
ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs.
- Call(stub->GetCode(), RelocInfo::CODE_TARGET, cond, r1, r2);
+ Call(stub->GetCode(), RelocInfo::CODE_TARGET, kNoASTId, cond, r1, r2);
}
@@ -2984,17 +3245,18 @@
{ MaybeObject* maybe_result = stub->TryGetCode();
if (!maybe_result->ToObject(&result)) return maybe_result;
}
- Call(Handle<Code>(Code::cast(result)), RelocInfo::CODE_TARGET, cond, r1, r2);
+ Call(Handle<Code>(Code::cast(result)), RelocInfo::CODE_TARGET,
+ kNoASTId, cond, r1, r2);
return result;
}
-
void MacroAssembler::TailCallStub(CodeStub* stub) {
ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs.
Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
}
+
MaybeObject* MacroAssembler::TryTailCallStub(CodeStub* stub,
Condition cond,
Register r1,
@@ -3190,23 +3452,18 @@
ASSERT(!overflow_dst.is(right));
ASSERT(!left.is(right));
- // TODO(kalmard) There must be a way to optimize dst == left and dst == right
- // cases.
-
if (dst.is(left)) {
- addu(overflow_dst, left, right);
- xor_(dst, overflow_dst, left);
- xor_(scratch, overflow_dst, right);
- and_(scratch, scratch, dst);
- mov(dst, overflow_dst);
- mov(overflow_dst, scratch);
+ mov(scratch, left); // Preserve left.
+ addu(dst, left, right); // Left is overwritten.
+ xor_(scratch, dst, scratch); // Original left.
+ xor_(overflow_dst, dst, right);
+ and_(overflow_dst, overflow_dst, scratch);
} else if (dst.is(right)) {
- addu(overflow_dst, left, right);
- xor_(dst, overflow_dst, right);
- xor_(scratch, overflow_dst, left);
- and_(scratch, scratch, dst);
- mov(dst, overflow_dst);
- mov(overflow_dst, scratch);
+ mov(scratch, right); // Preserve right.
+ addu(dst, left, right); // Right is overwritten.
+ xor_(scratch, dst, scratch); // Original right.
+ xor_(overflow_dst, dst, left);
+ and_(overflow_dst, overflow_dst, scratch);
} else {
addu(dst, left, right);
xor_(overflow_dst, dst, left);
@@ -3230,23 +3487,18 @@
ASSERT(!scratch.is(left));
ASSERT(!scratch.is(right));
- // TODO(kalmard) There must be a way to optimize dst == left and dst == right
- // cases.
-
if (dst.is(left)) {
- subu(overflow_dst, left, right);
- xor_(scratch, overflow_dst, left);
- xor_(dst, left, right);
- and_(scratch, scratch, dst);
- mov(dst, overflow_dst);
- mov(overflow_dst, scratch);
+ mov(scratch, left); // Preserve left.
+ subu(dst, left, right); // Left is overwritten.
+ xor_(overflow_dst, dst, scratch); // scratch is original left.
+ xor_(scratch, scratch, right); // scratch is original left.
+ and_(overflow_dst, scratch, overflow_dst);
} else if (dst.is(right)) {
- subu(overflow_dst, left, right);
- xor_(dst, left, right);
- xor_(scratch, overflow_dst, left);
- and_(scratch, scratch, dst);
- mov(dst, overflow_dst);
- mov(overflow_dst, scratch);
+ mov(scratch, right); // Preserve right.
+ subu(dst, left, right); // Right is overwritten.
+ xor_(overflow_dst, dst, left);
+ xor_(scratch, left, scratch); // Original right.
+ and_(overflow_dst, scratch, overflow_dst);
} else {
subu(dst, left, right);
xor_(overflow_dst, dst, left);
@@ -3315,6 +3567,7 @@
JumpToExternalReference(ext);
}
+
MaybeObject* MacroAssembler::TryTailCallExternalReference(
const ExternalReference& ext, int num_arguments, int result_size) {
// TODO(1236192): Most runtime routines don't need the number of
@@ -3356,10 +3609,12 @@
GetBuiltinEntry(t9, id);
if (flag == CALL_FUNCTION) {
call_wrapper.BeforeCall(CallSize(t9));
+ SetCallKind(t1, CALL_AS_METHOD);
Call(t9);
call_wrapper.AfterCall();
} else {
ASSERT(flag == JUMP_FUNCTION);
+ SetCallKind(t1, CALL_AS_METHOD);
Jump(t9);
}
}
@@ -3445,6 +3700,8 @@
lw(elements, FieldMemOperand(elements, HeapObject::kMapOffset));
LoadRoot(at, Heap::kFixedArrayMapRootIndex);
Branch(&ok, eq, elements, Operand(at));
+ LoadRoot(at, Heap::kFixedDoubleArrayMapRootIndex);
+ Branch(&ok, eq, elements, Operand(at));
LoadRoot(at, Heap::kFixedCOWArrayMapRootIndex);
Branch(&ok, eq, elements, Operand(at));
Abort("JSObject with fast elements map has slow elements");
@@ -3509,12 +3766,9 @@
void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
if (context_chain_length > 0) {
// Move up the chain of contexts to the context containing the slot.
- lw(dst, MemOperand(cp, Context::SlotOffset(Context::CLOSURE_INDEX)));
- // Load the function context (which is the incoming, outer context).
- lw(dst, FieldMemOperand(dst, JSFunction::kContextOffset));
+ lw(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX)));
for (int i = 1; i < context_chain_length; i++) {
- lw(dst, MemOperand(dst, Context::SlotOffset(Context::CLOSURE_INDEX)));
- lw(dst, FieldMemOperand(dst, JSFunction::kContextOffset));
+ lw(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
}
} else {
// Slot is in the current function context. Move it into the
@@ -3522,17 +3776,6 @@
// cannot be allowed to destroy the context in esi).
Move(dst, cp);
}
-
- // We should not have found a 'with' context by walking the context chain
- // (i.e., the static scope chain and runtime context chain do not agree).
- // A variable occurring in such a scope should have slot type LOOKUP and
- // not CONTEXT.
- if (emit_debug_code()) {
- lw(t9, MemOperand(dst, Context::SlotOffset(Context::FCONTEXT_INDEX)));
- Check(eq, "Yo dawg, I heard you liked function contexts "
- "so I put function contexts in all your contexts",
- dst, Operand(t9));
- }
}
@@ -3718,6 +3961,7 @@
#endif // defined(V8_HOST_ARCH_MIPS)
}
+
void MacroAssembler::AssertStackIsAligned() {
if (emit_debug_code()) {
const int frame_alignment = ActivationFrameAlignment();
diff --git a/src/mips/macro-assembler-mips.h b/src/mips/macro-assembler-mips.h
index bcb459e..4994516 100644
--- a/src/mips/macro-assembler-mips.h
+++ b/src/mips/macro-assembler-mips.h
@@ -99,44 +99,11 @@
// macro assembler.
MacroAssembler(Isolate* isolate, void* buffer, int size);
-// Arguments macros.
+ // Arguments macros.
#define COND_TYPED_ARGS Condition cond, Register r1, const Operand& r2
#define COND_ARGS cond, r1, r2
-// Prototypes.
-
-// Prototypes for functions with no target (eg Ret()).
-#define DECLARE_NOTARGET_PROTOTYPE(Name) \
- void Name(BranchDelaySlot bd = PROTECT); \
- void Name(COND_TYPED_ARGS, BranchDelaySlot bd = PROTECT); \
- inline void Name(BranchDelaySlot bd, COND_TYPED_ARGS) { \
- Name(COND_ARGS, bd); \
- }
-
-// Prototypes for functions with a target.
-
-// Cases when relocation may be needed.
-#define DECLARE_RELOC_PROTOTYPE(Name, target_type) \
- void Name(target_type target, \
- RelocInfo::Mode rmode, \
- BranchDelaySlot bd = PROTECT); \
- inline void Name(BranchDelaySlot bd, \
- target_type target, \
- RelocInfo::Mode rmode) { \
- Name(target, rmode, bd); \
- } \
- void Name(target_type target, \
- RelocInfo::Mode rmode, \
- COND_TYPED_ARGS, \
- BranchDelaySlot bd = PROTECT); \
- inline void Name(BranchDelaySlot bd, \
- target_type target, \
- RelocInfo::Mode rmode, \
- COND_TYPED_ARGS) { \
- Name(target, rmode, COND_ARGS, bd); \
- }
-
-// Cases when relocation is not needed.
+ // Cases when relocation is not needed.
#define DECLARE_NORELOC_PROTOTYPE(Name, target_type) \
void Name(target_type target, BranchDelaySlot bd = PROTECT); \
inline void Name(BranchDelaySlot bd, target_type target) { \
@@ -151,44 +118,44 @@
Name(target, COND_ARGS, bd); \
}
-// Target prototypes.
-
-#define DECLARE_JUMP_CALL_PROTOTYPES(Name) \
- DECLARE_NORELOC_PROTOTYPE(Name, Register) \
- DECLARE_NORELOC_PROTOTYPE(Name, const Operand&) \
- DECLARE_RELOC_PROTOTYPE(Name, byte*) \
- DECLARE_RELOC_PROTOTYPE(Name, Handle<Code>)
-
#define DECLARE_BRANCH_PROTOTYPES(Name) \
DECLARE_NORELOC_PROTOTYPE(Name, Label*) \
DECLARE_NORELOC_PROTOTYPE(Name, int16_t)
+ DECLARE_BRANCH_PROTOTYPES(Branch)
+ DECLARE_BRANCH_PROTOTYPES(BranchAndLink)
-DECLARE_JUMP_CALL_PROTOTYPES(Jump)
-DECLARE_JUMP_CALL_PROTOTYPES(Call)
-
-DECLARE_BRANCH_PROTOTYPES(Branch)
-DECLARE_BRANCH_PROTOTYPES(BranchAndLink)
-
-DECLARE_NOTARGET_PROTOTYPE(Ret)
-
+#undef DECLARE_BRANCH_PROTOTYPES
#undef COND_TYPED_ARGS
#undef COND_ARGS
-#undef DECLARE_NOTARGET_PROTOTYPE
-#undef DECLARE_NORELOC_PROTOTYPE
-#undef DECLARE_RELOC_PROTOTYPE
-#undef DECLARE_JUMP_CALL_PROTOTYPES
-#undef DECLARE_BRANCH_PROTOTYPES
- void CallWithAstId(Handle<Code> code,
- RelocInfo::Mode rmode,
- unsigned ast_id,
- Condition cond = al,
- Register r1 = zero_reg,
- const Operand& r2 = Operand(zero_reg));
- int CallSize(Register reg);
- int CallSize(Handle<Code> code, RelocInfo::Mode rmode);
+ // Jump, Call, and Ret pseudo instructions implementing inter-working.
+#define COND_ARGS Condition cond = al, Register rs = zero_reg, \
+ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
+
+ void Jump(Register target, COND_ARGS);
+ void Jump(intptr_t target, RelocInfo::Mode rmode, COND_ARGS);
+ void Jump(Address target, RelocInfo::Mode rmode, COND_ARGS);
+ void Jump(Handle<Code> code, RelocInfo::Mode rmode, COND_ARGS);
+ int CallSize(Register target, COND_ARGS);
+ void Call(Register target, COND_ARGS);
+ int CallSize(Address target, RelocInfo::Mode rmode, COND_ARGS);
+ void Call(Address target, RelocInfo::Mode rmode, COND_ARGS);
+ int CallSize(Handle<Code> code,
+ RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
+ unsigned ast_id = kNoASTId,
+ COND_ARGS);
+ void Call(Handle<Code> code,
+ RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
+ unsigned ast_id = kNoASTId,
+ COND_ARGS);
+ void Ret(COND_ARGS);
+ inline void Ret(BranchDelaySlot bd) {
+ Ret(al, zero_reg, Operand(zero_reg), bd);
+ }
+
+#undef COND_ARGS
// Emit code to discard a non-negative number of pointer-sized elements
// from the stack, clobbering only the sp register.
@@ -299,6 +266,16 @@
Register scratch,
Label* miss);
+
+ void LoadFromNumberDictionary(Label* miss,
+ Register elements,
+ Register key,
+ Register result,
+ Register reg0,
+ Register reg1,
+ Register reg2);
+
+
inline void MarkCode(NopMarkerTypes type) {
nop(type);
}
@@ -459,9 +436,6 @@
li(dst, Operand(value), gen2instr);
}
- // Exception-generating instructions and debugging support.
- void stop(const char* msg);
-
// Push multiple registers on the stack.
// Registers are saved in numerical order, with higher numbered registers
// saved in higher memory addresses.
@@ -474,6 +448,9 @@
sw(src, MemOperand(sp, 0));
}
+ // Push a handle.
+ void Push(Handle<Object> handle);
+
// Push two registers. Pushes leftmost register first (to highest address).
void Push(Register src1, Register src2) {
Subu(sp, sp, Operand(2 * kPointerSize));
@@ -576,6 +553,16 @@
Register input_low,
Register scratch);
+ // Performs a truncating conversion of a floating point number as used by
+ // the JS bitwise operations. See ECMA-262 9.5: ToInt32.
+ // Exits with 'result' holding the answer and all other registers clobbered.
+ void EmitECMATruncate(Register result,
+ FPURegister double_input,
+ FPURegister single_scratch,
+ Register scratch,
+ Register scratch2,
+ Register scratch3);
+
// -------------------------------------------------------------------------
// Activation frames.
@@ -624,27 +611,28 @@
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
- const CallWrapper& call_wrapper = NullCallWrapper(),
- CallKind call_kind = CALL_AS_METHOD);
+ const CallWrapper& call_wrapper,
+ CallKind call_kind);
void InvokeCode(Handle<Code> code,
const ParameterCount& expected,
const ParameterCount& actual,
RelocInfo::Mode rmode,
InvokeFlag flag,
- CallKind call_kind = CALL_AS_METHOD);
+ CallKind call_kind);
// Invoke the JavaScript function in the given register. Changes the
// current context to the context in the function before invoking.
void InvokeFunction(Register function,
const ParameterCount& actual,
InvokeFlag flag,
- const CallWrapper& call_wrapper = NullCallWrapper(),
- CallKind call_kind = CALL_AS_METHOD);
+ const CallWrapper& call_wrapper,
+ CallKind call_kind);
void InvokeFunction(JSFunction* function,
const ParameterCount& actual,
- InvokeFlag flag);
+ InvokeFlag flag,
+ CallKind call_kind);
void IsObjectJSObjectType(Register heap_object,
@@ -715,6 +703,12 @@
Register map,
Register type_reg);
+ // Check if a map for a JSObject indicates that the object has fast elements.
+ // Jump to the specified label if it does not.
+ void CheckFastElements(Register map,
+ Register scratch,
+ Label* fail);
+
// Check if the map of an object is equal to a specified map (either
// given directly or as an index into the root list) and branch to
// label if not. Skip the smi check if not required (object is known
@@ -1088,16 +1082,25 @@
Register scratch,
int num_arguments);
- void Jump(intptr_t target, RelocInfo::Mode rmode,
- BranchDelaySlot bd = PROTECT);
- void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = cc_always,
- Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg),
- BranchDelaySlot bd = PROTECT);
- void Call(intptr_t target, RelocInfo::Mode rmode,
- BranchDelaySlot bd = PROTECT);
- void Call(intptr_t target, RelocInfo::Mode rmode, Condition cond = cc_always,
- Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg),
- BranchDelaySlot bd = PROTECT);
+ void BranchShort(int16_t offset, BranchDelaySlot bdslot = PROTECT);
+ void BranchShort(int16_t offset, Condition cond, Register rs,
+ const Operand& rt,
+ BranchDelaySlot bdslot = PROTECT);
+ void BranchShort(Label* L, BranchDelaySlot bdslot = PROTECT);
+ void BranchShort(Label* L, Condition cond, Register rs,
+ const Operand& rt,
+ BranchDelaySlot bdslot = PROTECT);
+ void BranchAndLinkShort(int16_t offset, BranchDelaySlot bdslot = PROTECT);
+ void BranchAndLinkShort(int16_t offset, Condition cond, Register rs,
+ const Operand& rt,
+ BranchDelaySlot bdslot = PROTECT);
+ void BranchAndLinkShort(Label* L, BranchDelaySlot bdslot = PROTECT);
+ void BranchAndLinkShort(Label* L, Condition cond, Register rs,
+ const Operand& rt,
+ BranchDelaySlot bdslot = PROTECT);
+ void J(Label* L, BranchDelaySlot bdslot);
+ void Jr(Label* L, BranchDelaySlot bdslot);
+ void Jalr(Label* L, BranchDelaySlot bdslot);
// Helper functions for generating invokes.
void InvokePrologue(const ParameterCount& expected,
@@ -1106,8 +1109,8 @@
Register code_reg,
Label* done,
InvokeFlag flag,
- const CallWrapper& call_wrapper = NullCallWrapper(),
- CallKind call_kind = CALL_AS_METHOD);
+ const CallWrapper& call_wrapper,
+ CallKind call_kind);
// Get the code for the given builtin. Returns if able to resolve
// the function in the 'resolved' flag.
@@ -1128,6 +1131,8 @@
MemOperand SafepointRegisterSlot(Register reg);
MemOperand SafepointRegistersAndDoublesSlot(Register reg);
+ bool UseAbsoluteCodePointers();
+
bool generating_stub_;
bool allow_stub_calls_;
// This handle will be patched with the code object on installation.
diff --git a/src/mips/regexp-macro-assembler-mips.cc b/src/mips/regexp-macro-assembler-mips.cc
index cfc8f65..9935ef9 100644
--- a/src/mips/regexp-macro-assembler-mips.cc
+++ b/src/mips/regexp-macro-assembler-mips.cc
@@ -179,7 +179,7 @@
// Pop Code* offset from backtrack stack, add Code* and jump to location.
Pop(a0);
__ Addu(a0, a0, code_pointer());
- __ Jump(Operand(a0));
+ __ Jump(a0);
}
@@ -1238,7 +1238,7 @@
__ Call(t9);
__ lw(ra, MemOperand(sp, 0));
__ Addu(sp, sp, Operand(stack_alignment));
- __ Jump(Operand(ra));
+ __ Jump(ra);
}
diff --git a/src/mips/regexp-macro-assembler-mips.h b/src/mips/regexp-macro-assembler-mips.h
index ad7ada5..7fe0c88 100644
--- a/src/mips/regexp-macro-assembler-mips.h
+++ b/src/mips/regexp-macro-assembler-mips.h
@@ -29,6 +29,12 @@
#ifndef V8_MIPS_REGEXP_MACRO_ASSEMBLER_MIPS_H_
#define V8_MIPS_REGEXP_MACRO_ASSEMBLER_MIPS_H_
+#include "mips/assembler-mips.h"
+#include "mips/assembler-mips-inl.h"
+#include "macro-assembler.h"
+#include "code.h"
+#include "mips/macro-assembler-mips.h"
+
namespace v8 {
namespace internal {
@@ -249,4 +255,3 @@
}} // namespace v8::internal
#endif // V8_MIPS_REGEXP_MACRO_ASSEMBLER_MIPS_H_
-
diff --git a/src/mips/simulator-mips.cc b/src/mips/simulator-mips.cc
index 68fb7ce..30e12e7 100644
--- a/src/mips/simulator-mips.cc
+++ b/src/mips/simulator-mips.cc
@@ -126,16 +126,29 @@
void MipsDebugger::Stop(Instruction* instr) {
- UNIMPLEMENTED_MIPS();
- char* str = reinterpret_cast<char*>(instr->InstructionBits());
- if (strlen(str) > 0) {
+ // Get the stop code.
+ uint32_t code = instr->Bits(25, 6);
+ // Retrieve the encoded address, which comes just after this stop.
+ char** msg_address =
+ reinterpret_cast<char**>(sim_->get_pc() + Instr::kInstrSize);
+ char* msg = *msg_address;
+ ASSERT(msg != NULL);
+
+ // Update this stop description.
+ if (!watched_stops[code].desc) {
+ watched_stops[code].desc = msg;
+ }
+
+ if (strlen(msg) > 0) {
if (coverage_log != NULL) {
fprintf(coverage_log, "%s\n", str);
fflush(coverage_log);
}
- instr->SetInstructionBits(0x0); // Overwrite with nop.
+ // Overwrite the instruction and address with nops.
+ instr->SetInstructionBits(kNopInstr);
+ reinterpret_cast<Instr*>(msg_address)->SetInstructionBits(kNopInstr);
}
- sim_->set_pc(sim_->get_pc() + Instruction::kInstrSize);
+ sim_->set_pc(sim_->get_pc() + 2 * Instruction::kInstructionSize);
}
@@ -147,9 +160,17 @@
void MipsDebugger::Stop(Instruction* instr) {
- const char* str = reinterpret_cast<char*>(instr->InstructionBits());
- PrintF("Simulator hit %s\n", str);
- sim_->set_pc(sim_->get_pc() + Instruction::kInstrSize);
+ // Get the stop code.
+ uint32_t code = instr->Bits(25, 6);
+ // Retrieve the encoded address, which comes just after this stop.
+ char* msg = *reinterpret_cast<char**>(sim_->get_pc() +
+ Instruction::kInstrSize);
+ // Update this stop description.
+ if (!sim_->watched_stops[code].desc) {
+ sim_->watched_stops[code].desc = msg;
+ }
+ PrintF("Simulator hit %s (%u)\n", msg, code);
+ sim_->set_pc(sim_->get_pc() + 2 * Instruction::kInstrSize);
Debug();
}
#endif // GENERATED_CODE_COVERAGE
@@ -585,8 +606,67 @@
}
} else if (strcmp(cmd, "flags") == 0) {
PrintF("No flags on MIPS !\n");
- } else if (strcmp(cmd, "unstop") == 0) {
- PrintF("Unstop command not implemented on MIPS.");
+ } else if (strcmp(cmd, "stop") == 0) {
+ int32_t value;
+ intptr_t stop_pc = sim_->get_pc() -
+ 2 * Instruction::kInstrSize;
+ Instruction* stop_instr = reinterpret_cast<Instruction*>(stop_pc);
+ Instruction* msg_address =
+ reinterpret_cast<Instruction*>(stop_pc +
+ Instruction::kInstrSize);
+ if ((argc == 2) && (strcmp(arg1, "unstop") == 0)) {
+ // Remove the current stop.
+ if (sim_->IsStopInstruction(stop_instr)) {
+ stop_instr->SetInstructionBits(kNopInstr);
+ msg_address->SetInstructionBits(kNopInstr);
+ } else {
+ PrintF("Not at debugger stop.\n");
+ }
+ } else if (argc == 3) {
+ // Print information about all/the specified breakpoint(s).
+ if (strcmp(arg1, "info") == 0) {
+ if (strcmp(arg2, "all") == 0) {
+ PrintF("Stop information:\n");
+ for (uint32_t i = kMaxWatchpointCode + 1;
+ i <= kMaxStopCode;
+ i++) {
+ sim_->PrintStopInfo(i);
+ }
+ } else if (GetValue(arg2, &value)) {
+ sim_->PrintStopInfo(value);
+ } else {
+ PrintF("Unrecognized argument.\n");
+ }
+ } else if (strcmp(arg1, "enable") == 0) {
+ // Enable all/the specified breakpoint(s).
+ if (strcmp(arg2, "all") == 0) {
+ for (uint32_t i = kMaxWatchpointCode + 1;
+ i <= kMaxStopCode;
+ i++) {
+ sim_->EnableStop(i);
+ }
+ } else if (GetValue(arg2, &value)) {
+ sim_->EnableStop(value);
+ } else {
+ PrintF("Unrecognized argument.\n");
+ }
+ } else if (strcmp(arg1, "disable") == 0) {
+ // Disable all/the specified breakpoint(s).
+ if (strcmp(arg2, "all") == 0) {
+ for (uint32_t i = kMaxWatchpointCode + 1;
+ i <= kMaxStopCode;
+ i++) {
+ sim_->DisableStop(i);
+ }
+ } else if (GetValue(arg2, &value)) {
+ sim_->DisableStop(value);
+ } else {
+ PrintF("Unrecognized argument.\n");
+ }
+ }
+ } else {
+ PrintF("Wrong usage. Use help command for more information.\n");
+ }
} else if ((strcmp(cmd, "stat") == 0) || (strcmp(cmd, "st") == 0)) {
// Print registers and disassemble.
PrintAllRegs();
@@ -652,9 +732,26 @@
PrintF(" set a break point on the address\n");
PrintF("del\n");
PrintF(" delete the breakpoint\n");
- PrintF("unstop\n");
- PrintF(" ignore the stop instruction at the current location");
- PrintF(" from now on\n");
+ PrintF("stop feature:\n");
+ PrintF(" Description:\n");
+ PrintF(" Stops are debug instructions inserted by\n");
+ PrintF(" the Assembler::stop() function.\n");
+ PrintF(" When hitting a stop, the Simulator will\n");
+ PrintF(" stop and and give control to the Debugger.\n");
+ PrintF(" All stop codes are watched:\n");
+ PrintF(" - They can be enabled / disabled: the Simulator\n");
+ PrintF(" will / won't stop when hitting them.\n");
+ PrintF(" - The Simulator keeps track of how many times they \n");
+ PrintF(" are met. (See the info command.) Going over a\n");
+ PrintF(" disabled stop still increases its counter. \n");
+ PrintF(" Commands:\n");
+ PrintF(" stop info all/<code> : print infos about number <code>\n");
+ PrintF(" or all stop(s).\n");
+ PrintF(" stop enable/disable all/<code> : enables / disables\n");
+ PrintF(" all or number <code> stop(s)\n");
+ PrintF(" stop unstop\n");
+ PrintF(" ignore the stop instruction at the current location\n");
+ PrintF(" from now on\n");
} else {
PrintF("Unknown command: %s\n", cmd);
}
@@ -1062,15 +1159,30 @@
// Sets the rounding error codes in FCSR based on the result of the rounding.
// Returns true if the operation was invalid.
bool Simulator::set_fcsr_round_error(double original, double rounded) {
- if (!isfinite(original) ||
- rounded > LONG_MAX ||
- rounded < LONG_MIN) {
- set_fcsr_bit(6, true); // Invalid operation.
- return true;
- } else if (original != static_cast<double>(rounded)) {
- set_fcsr_bit(2, true); // Inexact.
+ bool ret = false;
+
+ if (!isfinite(original) || !isfinite(rounded)) {
+ set_fcsr_bit(kFCSRInvalidOpFlagBit, true);
+ ret = true;
}
- return false;
+
+ if (original != rounded) {
+ set_fcsr_bit(kFCSRInexactFlagBit, true);
+ }
+
+ if (rounded < DBL_MIN && rounded > -DBL_MIN && rounded != 0) {
+ set_fcsr_bit(kFCSRUnderflowFlagBit, true);
+ ret = true;
+ }
+
+ if (rounded > INT_MAX || rounded < INT_MIN) {
+ set_fcsr_bit(kFCSROverflowFlagBit, true);
+ // The reference is not really clear but it seems this is required:
+ set_fcsr_bit(kFCSRInvalidOpFlagBit, true);
+ ret = true;
+ }
+
+ return ret;
}
@@ -1288,7 +1400,7 @@
// the break_ instruction, or several variants of traps. All
// Are "SPECIAL" class opcode, and are distinuished by function.
int32_t func = instr->FunctionFieldRaw();
- int32_t code = (func == BREAK) ? instr->Bits(25, 6) : -1;
+ uint32_t code = (func == BREAK) ? instr->Bits(25, 6) : -1;
// We first check if we met a call_rt_redirected.
if (instr->InstructionBits() == rtCallRedirInstr) {
@@ -1372,7 +1484,7 @@
case ExternalReference::BUILTIN_FP_CALL:
GetFpArgs(&dval0);
PrintF("Call to host function at %p with arg %f",
- FUNCTION_ADDR(target), dval1);
+ FUNCTION_ADDR(target), dval0);
break;
case ExternalReference::BUILTIN_FP_INT_CALL:
GetFpArgs(&dval0, &ival);
@@ -1440,14 +1552,13 @@
set_register(ra, saved_ra);
set_pc(get_register(ra));
- } else if (func == BREAK && code >= 0 && code < 32) {
- // First 32 break_ codes interpreted as debug-markers/watchpoints.
- MipsDebugger dbg(this);
- ++break_count_;
- PrintF("\n---- break %d marker: %3d (instr count: %8d) ----------"
- "----------------------------------",
- code, break_count_, icount_);
- dbg.PrintAllRegs(); // Print registers and continue running.
+ } else if (func == BREAK && code <= kMaxStopCode) {
+ if (IsWatchpoint(code)) {
+ PrintWatchpoint(code);
+ } else {
+ IncreaseStopCounter(code);
+ HandleStop(code, instr);
+ }
} else {
// All remaining break_ codes, and all traps are handled here.
MipsDebugger dbg(this);
@@ -1456,6 +1567,99 @@
}
+// Stop helper functions.
+bool Simulator::IsWatchpoint(uint32_t code) {
+ return (code <= kMaxWatchpointCode);
+}
+
+
+void Simulator::PrintWatchpoint(uint32_t code) {
+ MipsDebugger dbg(this);
+ ++break_count_;
+ PrintF("\n---- break %d marker: %3d (instr count: %8d) ----------"
+ "----------------------------------",
+ code, break_count_, icount_);
+ dbg.PrintAllRegs(); // Print registers and continue running.
+}
+
+
+void Simulator::HandleStop(uint32_t code, Instruction* instr) {
+ // Stop if it is enabled, otherwise go on jumping over the stop
+ // and the message address.
+ if (IsEnabledStop(code)) {
+ MipsDebugger dbg(this);
+ dbg.Stop(instr);
+ } else {
+ set_pc(get_pc() + 2 * Instruction::kInstrSize);
+ }
+}
+
+
+bool Simulator::IsStopInstruction(Instruction* instr) {
+ int32_t func = instr->FunctionFieldRaw();
+ uint32_t code = static_cast<uint32_t>(instr->Bits(25, 6));
+ return (func == BREAK) && code > kMaxWatchpointCode && code <= kMaxStopCode;
+}
+
+
+bool Simulator::IsEnabledStop(uint32_t code) {
+ ASSERT(code <= kMaxStopCode);
+ ASSERT(code > kMaxWatchpointCode);
+ return !(watched_stops[code].count & kStopDisabledBit);
+}
+
+
+void Simulator::EnableStop(uint32_t code) {
+ if (!IsEnabledStop(code)) {
+ watched_stops[code].count &= ~kStopDisabledBit;
+ }
+}
+
+
+void Simulator::DisableStop(uint32_t code) {
+ if (IsEnabledStop(code)) {
+ watched_stops[code].count |= kStopDisabledBit;
+ }
+}
+
+
+void Simulator::IncreaseStopCounter(uint32_t code) {
+ ASSERT(code <= kMaxStopCode);
+ if ((watched_stops[code].count & ~(1 << 31)) == 0x7fffffff) {
+ PrintF("Stop counter for code %i has overflowed.\n"
+ "Enabling this code and reseting the counter to 0.\n", code);
+ watched_stops[code].count = 0;
+ EnableStop(code);
+ } else {
+ watched_stops[code].count++;
+ }
+}
+
+
+// Print a stop status.
+void Simulator::PrintStopInfo(uint32_t code) {
+ if (code <= kMaxWatchpointCode) {
+ PrintF("That is a watchpoint, not a stop.\n");
+ return;
+ } else if (code > kMaxStopCode) {
+ PrintF("Code too large, only %u stops can be used\n", kMaxStopCode + 1);
+ return;
+ }
+ const char* state = IsEnabledStop(code) ? "Enabled" : "Disabled";
+ int32_t count = watched_stops[code].count & ~kStopDisabledBit;
+ // Don't print the state of unused breakpoints.
+ if (count != 0) {
+ if (watched_stops[code].desc) {
+ PrintF("stop %i - 0x%x: \t%s, \tcounter = %i, \t%s\n",
+ code, code, state, count, watched_stops[code].desc);
+ } else {
+ PrintF("stop %i - 0x%x: \t%s, \tcounter = %i\n",
+ code, code, state, count);
+ }
+ }
+}
+
+
void Simulator::SignalExceptions() {
for (int i = 1; i < kNumExceptions; i++) {
if (exceptions[i] != 0) {
@@ -1865,9 +2069,10 @@
break;
case TRUNC_W_D: // Truncate double to word (round towards 0).
{
- int32_t result = static_cast<int32_t>(fs);
+ double rounded = trunc(fs);
+ int32_t result = static_cast<int32_t>(rounded);
set_fpu_register(fd_reg, result);
- if (set_fcsr_round_error(fs, static_cast<double>(result))) {
+ if (set_fcsr_round_error(fs, rounded)) {
set_fpu_register(fd_reg, kFPUInvalidResult);
}
}
@@ -1895,16 +2100,20 @@
case CVT_S_D: // Convert double to float (single).
set_fpu_register_float(fd_reg, static_cast<float>(fs));
break;
- case CVT_L_D: // Mips32r2: Truncate double to 64-bit long-word.
- i64 = static_cast<int64_t>(fs);
+ case CVT_L_D: { // Mips32r2: Truncate double to 64-bit long-word.
+ double rounded = trunc(fs);
+ i64 = static_cast<int64_t>(rounded);
set_fpu_register(fd_reg, i64 & 0xffffffff);
set_fpu_register(fd_reg + 1, i64 >> 32);
break;
- case TRUNC_L_D: // Mips32r2 instruction.
- i64 = static_cast<int64_t>(fs);
+ }
+ case TRUNC_L_D: { // Mips32r2 instruction.
+ double rounded = trunc(fs);
+ i64 = static_cast<int64_t>(rounded);
set_fpu_register(fd_reg, i64 & 0xffffffff);
set_fpu_register(fd_reg + 1, i64 >> 32);
break;
+ }
case ROUND_L_D: { // Mips32r2 instruction.
double rounded = fs > 0 ? floor(fs + 0.5) : ceil(fs - 0.5);
i64 = static_cast<int64_t>(rounded);
diff --git a/src/mips/simulator-mips.h b/src/mips/simulator-mips.h
index 21476dc..69dddfa 100644
--- a/src/mips/simulator-mips.h
+++ b/src/mips/simulator-mips.h
@@ -289,6 +289,18 @@
// Used for breakpoints and traps.
void SoftwareInterrupt(Instruction* instr);
+ // Stop helper functions.
+ bool IsWatchpoint(uint32_t code);
+ void PrintWatchpoint(uint32_t code);
+ void HandleStop(uint32_t code, Instruction* instr);
+ bool IsStopInstruction(Instruction* instr);
+ bool IsEnabledStop(uint32_t code);
+ void EnableStop(uint32_t code);
+ void DisableStop(uint32_t code);
+ void IncreaseStopCounter(uint32_t code);
+ void PrintStopInfo(uint32_t code);
+
+
// Executes one instruction.
void InstructionDecode(Instruction* instr);
// Execute one instruction placed in a branch delay slot.
@@ -354,6 +366,19 @@
// Registered breakpoints.
Instruction* break_pc_;
Instr break_instr_;
+
+ // Stop is disabled if bit 31 is set.
+ static const uint32_t kStopDisabledBit = 1 << 31;
+
+ // A stop is enabled, meaning the simulator will stop when meeting the
+ // instruction, if bit 31 of watched_stops[code].count is unset.
+ // The value watched_stops[code].count & ~(1 << 31) indicates how many times
+ // the breakpoint was hit or gone through.
+ struct StopCountAndDesc {
+ uint32_t count;
+ char* desc;
+ };
+ StopCountAndDesc watched_stops[kMaxStopCode + 1];
};
@@ -398,4 +423,3 @@
#endif // !defined(USE_SIMULATOR)
#endif // V8_MIPS_SIMULATOR_MIPS_H_
-
diff --git a/src/mips/stub-cache-mips.cc b/src/mips/stub-cache-mips.cc
index 47428a8..f1ffe9b 100644
--- a/src/mips/stub-cache-mips.cc
+++ b/src/mips/stub-cache-mips.cc
@@ -126,7 +126,7 @@
// Check that receiver is a JSObject.
__ lbu(scratch0, FieldMemOperand(map, Map::kInstanceTypeOffset));
- __ Branch(miss_label, lt, scratch0, Operand(FIRST_JS_OBJECT_TYPE));
+ __ Branch(miss_label, lt, scratch0, Operand(FIRST_SPEC_OBJECT_TYPE));
// Load properties array.
Register properties = scratch0;
@@ -472,7 +472,8 @@
static void GenerateCallFunction(MacroAssembler* masm,
Object* object,
const ParameterCount& arguments,
- Label* miss) {
+ Label* miss,
+ Code::ExtraICState extra_ic_state) {
// ----------- S t a t e -------------
// -- a0: receiver
// -- a1: function to call
@@ -490,7 +491,10 @@
}
// Invoke the function.
- __ InvokeFunction(a1, arguments, JUMP_FUNCTION);
+ CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state)
+ ? CALL_AS_FUNCTION
+ : CALL_AS_METHOD;
+ __ InvokeFunction(a1, arguments, JUMP_FUNCTION, NullCallWrapper(), call_kind);
}
@@ -629,10 +633,12 @@
public:
CallInterceptorCompiler(StubCompiler* stub_compiler,
const ParameterCount& arguments,
- Register name)
+ Register name,
+ Code::ExtraICState extra_ic_state)
: stub_compiler_(stub_compiler),
arguments_(arguments),
- name_(name) {}
+ name_(name),
+ extra_ic_state_(extra_ic_state) {}
MaybeObject* Compile(MacroAssembler* masm,
JSObject* object,
@@ -760,8 +766,11 @@
arguments_.immediate());
if (result->IsFailure()) return result;
} else {
+ CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+ ? CALL_AS_FUNCTION
+ : CALL_AS_METHOD;
__ InvokeFunction(optimization.constant_function(), arguments_,
- JUMP_FUNCTION);
+ JUMP_FUNCTION, call_kind);
}
// Deferred code for fast API call case---clean preallocated space.
@@ -844,6 +853,7 @@
StubCompiler* stub_compiler_;
const ParameterCount& arguments_;
Register name_;
+ Code::ExtraICState extra_ic_state_;
};
@@ -1503,7 +1513,7 @@
Register reg = CheckPrototypes(object, a0, holder, a1, a3, t0, name, &miss);
GenerateFastPropertyLoad(masm(), a1, reg, holder, index);
- GenerateCallFunction(masm(), object, arguments(), &miss);
+ GenerateCallFunction(masm(), object, arguments(), &miss, extra_ic_state_);
// Handle call cache miss.
__ bind(&miss);
@@ -2001,7 +2011,7 @@
// Tail call the full function. We do not have to patch the receiver
// because the function makes no use of it.
__ bind(&slow);
- __ InvokeFunction(function, arguments(), JUMP_FUNCTION);
+ __ InvokeFunction(function, arguments(), JUMP_FUNCTION, CALL_AS_METHOD);
__ bind(&miss);
// a2: function name.
@@ -2086,9 +2096,7 @@
// Retrieve FCSR and check for fpu errors.
__ cfc1(t5, FCSR);
- __ srl(t5, t5, kFCSRFlagShift);
- // Flag 1 marks an inaccurate but still good result so we ignore it.
- __ And(t5, t5, Operand(kFCSRFlagMask ^ 1));
+ __ And(t5, t5, Operand(kFCSRExceptionFlagMask));
__ Branch(&no_fpu_error, eq, t5, Operand(zero_reg));
// Check for NaN, Infinity, and -Infinity.
@@ -2137,7 +2145,7 @@
__ bind(&slow);
// Tail call the full function. We do not have to patch the receiver
// because the function makes no use of it.
- __ InvokeFunction(function, arguments(), JUMP_FUNCTION);
+ __ InvokeFunction(function, arguments(), JUMP_FUNCTION, CALL_AS_METHOD);
__ bind(&miss);
// a2: function name.
@@ -2239,7 +2247,7 @@
// Tail call the full function. We do not have to patch the receiver
// because the function makes no use of it.
__ bind(&slow);
- __ InvokeFunction(function, arguments(), JUMP_FUNCTION);
+ __ InvokeFunction(function, arguments(), JUMP_FUNCTION, CALL_AS_METHOD);
__ bind(&miss);
// a2: function name.
@@ -2425,7 +2433,10 @@
UNREACHABLE();
}
- __ InvokeFunction(function, arguments(), JUMP_FUNCTION);
+ CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
+ ? CALL_AS_FUNCTION
+ : CALL_AS_METHOD;
+ __ InvokeFunction(function, arguments(), JUMP_FUNCTION, call_kind);
// Handle call cache miss.
__ bind(&miss);
@@ -2459,7 +2470,7 @@
// Get the receiver from the stack.
__ lw(a1, MemOperand(sp, argc * kPointerSize));
- CallInterceptorCompiler compiler(this, arguments(), a2);
+ CallInterceptorCompiler compiler(this, arguments(), a2, extra_ic_state_);
MaybeObject* result = compiler.Compile(masm(),
object,
holder,
@@ -2479,7 +2490,7 @@
// Restore receiver.
__ lw(a0, MemOperand(sp, argc * kPointerSize));
- GenerateCallFunction(masm(), object, arguments(), &miss);
+ GenerateCallFunction(masm(), object, arguments(), &miss, extra_ic_state_);
// Handle call cache miss.
__ bind(&miss);
@@ -2491,13 +2502,11 @@
}
-MaybeObject* CallStubCompiler::CompileCallGlobal(
- JSObject* object,
- GlobalObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name,
- Code::ExtraICState extra_ic_state) {
+MaybeObject* CallStubCompiler::CompileCallGlobal(JSObject* object,
+ GlobalObject* holder,
+ JSGlobalPropertyCell* cell,
+ JSFunction* function,
+ String* name) {
// ----------- S t a t e -------------
// -- a2 : name
// -- ra : return address
@@ -2538,7 +2547,7 @@
ASSERT(function->is_compiled());
Handle<Code> code(function->code());
ParameterCount expected(function->shared()->formal_parameter_count());
- CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state)
+ CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
? CALL_AS_FUNCTION
: CALL_AS_METHOD;
if (V8::UseCrankshaft()) {
@@ -3083,14 +3092,15 @@
}
-MaybeObject* KeyedLoadStubCompiler::CompileLoadFastElement(Map* receiver_map) {
+MaybeObject* KeyedLoadStubCompiler::CompileLoadElement(Map* receiver_map) {
// ----------- S t a t e -------------
// -- ra : return address
// -- a0 : key
// -- a1 : receiver
// -----------------------------------
- MaybeObject* maybe_stub = KeyedLoadFastElementStub().TryGetCode();
Code* stub;
+ JSObject::ElementsKind elements_kind = receiver_map->elements_kind();
+ MaybeObject* maybe_stub = KeyedLoadElementStub(elements_kind).TryGetCode();
if (!maybe_stub->To(&stub)) return maybe_stub;
__ DispatchMap(a1,
a2,
@@ -3172,8 +3182,7 @@
}
-MaybeObject* KeyedStoreStubCompiler::CompileStoreFastElement(
- Map* receiver_map) {
+MaybeObject* KeyedStoreStubCompiler::CompileStoreElement(Map* receiver_map) {
// ----------- S t a t e -------------
// -- a0 : value
// -- a1 : key
@@ -3181,10 +3190,11 @@
// -- ra : return address
// -- a3 : scratch
// -----------------------------------
+ Code* stub;
+ JSObject::ElementsKind elements_kind = receiver_map->elements_kind();
bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
MaybeObject* maybe_stub =
- KeyedStoreFastElementStub(is_js_array).TryGetCode();
- Code* stub;
+ KeyedStoreElementStub(is_js_array, elements_kind).TryGetCode();
if (!maybe_stub->To(&stub)) return maybe_stub;
__ DispatchMap(a2,
a3,
@@ -3380,82 +3390,87 @@
}
-MaybeObject* ExternalArrayLoadStubCompiler::CompileLoad(
- JSObject*receiver, ExternalArrayType array_type) {
- // ----------- S t a t e -------------
- // -- ra : return address
- // -- a0 : key
- // -- a1 : receiver
- // -----------------------------------
- MaybeObject* maybe_stub =
- KeyedLoadExternalArrayStub(array_type).TryGetCode();
- Code* stub;
- if (!maybe_stub->To(&stub)) return maybe_stub;
- __ DispatchMap(a1,
- a2,
- Handle<Map>(receiver->map()),
- Handle<Code>(stub),
- DO_SMI_CHECK);
-
- Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Miss();
- __ Jump(ic, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode();
-}
-
-
-MaybeObject* ExternalArrayStoreStubCompiler::CompileStore(
- JSObject* receiver, ExternalArrayType array_type) {
- // ----------- S t a t e -------------
- // -- a0 : value
- // -- a1 : name
- // -- a2 : receiver
- // -- ra : return address
- // -----------------------------------
- MaybeObject* maybe_stub =
- KeyedStoreExternalArrayStub(array_type).TryGetCode();
- Code* stub;
- if (!maybe_stub->To(&stub)) return maybe_stub;
- __ DispatchMap(a2,
- a3,
- Handle<Map>(receiver->map()),
- Handle<Code>(stub),
- DO_SMI_CHECK);
-
- Handle<Code> ic = isolate()->builtins()->KeyedStoreIC_Miss();
- __ Jump(ic, RelocInfo::CODE_TARGET);
-
- return GetCode();
-}
-
-
#undef __
#define __ ACCESS_MASM(masm)
-static bool IsElementTypeSigned(ExternalArrayType array_type) {
- switch (array_type) {
- case kExternalByteArray:
- case kExternalShortArray:
- case kExternalIntArray:
+void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
+ MacroAssembler* masm) {
+ // ---------- S t a t e --------------
+ // -- ra : return address
+ // -- a0 : key
+ // -- a1 : receiver
+ // -----------------------------------
+ Label slow, miss_force_generic;
+
+ Register key = a0;
+ Register receiver = a1;
+
+ __ JumpIfNotSmi(key, &miss_force_generic);
+ __ lw(t0, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ sra(a2, a0, kSmiTagSize);
+ __ LoadFromNumberDictionary(&slow, t0, a0, v0, a2, a3, t1);
+ __ Ret();
+
+ // Slow case, key and receiver still in a0 and a1.
+ __ bind(&slow);
+ __ IncrementCounter(
+ masm->isolate()->counters()->keyed_load_external_array_slow(),
+ 1, a2, a3);
+ // Entry registers are intact.
+ // ---------- S t a t e --------------
+ // -- ra : return address
+ // -- a0 : key
+ // -- a1 : receiver
+ // -----------------------------------
+ Handle<Code> slow_ic =
+ masm->isolate()->builtins()->KeyedLoadIC_Slow();
+ __ Jump(slow_ic, RelocInfo::CODE_TARGET);
+
+ // Miss case, call the runtime.
+ __ bind(&miss_force_generic);
+
+ // ---------- S t a t e --------------
+ // -- ra : return address
+ // -- a0 : key
+ // -- a1 : receiver
+ // -----------------------------------
+
+ Handle<Code> miss_ic =
+ masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
+ __ Jump(miss_ic, RelocInfo::CODE_TARGET);
+}
+
+
+static bool IsElementTypeSigned(JSObject::ElementsKind elements_kind) {
+ switch (elements_kind) {
+ case JSObject::EXTERNAL_BYTE_ELEMENTS:
+ case JSObject::EXTERNAL_SHORT_ELEMENTS:
+ case JSObject::EXTERNAL_INT_ELEMENTS:
return true;
- case kExternalUnsignedByteArray:
- case kExternalUnsignedShortArray:
- case kExternalUnsignedIntArray:
+ case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS:
+ case JSObject::EXTERNAL_PIXEL_ELEMENTS:
return false;
- default:
+ case JSObject::EXTERNAL_FLOAT_ELEMENTS:
+ case JSObject::EXTERNAL_DOUBLE_ELEMENTS:
+ case JSObject::FAST_ELEMENTS:
+ case JSObject::FAST_DOUBLE_ELEMENTS:
+ case JSObject::DICTIONARY_ELEMENTS:
+ case JSObject::NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
return false;
}
+ return false;
}
void KeyedLoadStubCompiler::GenerateLoadExternalArray(
MacroAssembler* masm,
- ExternalArrayType array_type) {
+ JSObject::ElementsKind elements_kind) {
// ---------- S t a t e --------------
// -- ra : return address
// -- a0 : key
@@ -3489,33 +3504,33 @@
ASSERT((kSmiTag == 0) && (kSmiTagSize == 1));
Register value = a2;
- switch (array_type) {
- case kExternalByteArray:
+ switch (elements_kind) {
+ case JSObject::EXTERNAL_BYTE_ELEMENTS:
__ srl(t2, key, 1);
__ addu(t3, a3, t2);
__ lb(value, MemOperand(t3, 0));
break;
- case kExternalPixelArray:
- case kExternalUnsignedByteArray:
+ case JSObject::EXTERNAL_PIXEL_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
__ srl(t2, key, 1);
__ addu(t3, a3, t2);
__ lbu(value, MemOperand(t3, 0));
break;
- case kExternalShortArray:
+ case JSObject::EXTERNAL_SHORT_ELEMENTS:
__ addu(t3, a3, key);
__ lh(value, MemOperand(t3, 0));
break;
- case kExternalUnsignedShortArray:
+ case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
__ addu(t3, a3, key);
__ lhu(value, MemOperand(t3, 0));
break;
- case kExternalIntArray:
- case kExternalUnsignedIntArray:
+ case JSObject::EXTERNAL_INT_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS:
__ sll(t2, key, 1);
__ addu(t3, a3, t2);
__ lw(value, MemOperand(t3, 0));
break;
- case kExternalFloatArray:
+ case JSObject::EXTERNAL_FLOAT_ELEMENTS:
__ sll(t3, t2, 2);
__ addu(t3, a3, t3);
if (CpuFeatures::IsSupported(FPU)) {
@@ -3525,7 +3540,7 @@
__ lw(value, MemOperand(t3, 0));
}
break;
- case kExternalDoubleArray:
+ case JSObject::EXTERNAL_DOUBLE_ELEMENTS:
__ sll(t2, key, 2);
__ addu(t3, a3, t2);
if (CpuFeatures::IsSupported(FPU)) {
@@ -3537,7 +3552,10 @@
__ lw(a3, MemOperand(t3, Register::kSizeInBytes));
}
break;
- default:
+ case JSObject::FAST_ELEMENTS:
+ case JSObject::FAST_DOUBLE_ELEMENTS:
+ case JSObject::DICTIONARY_ELEMENTS:
+ case JSObject::NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
@@ -3551,7 +3569,7 @@
// f0: value (if FPU is supported)
// a2/a3: value (if FPU is not supported)
- if (array_type == kExternalIntArray) {
+ if (elements_kind == JSObject::EXTERNAL_INT_ELEMENTS) {
// For the Int and UnsignedInt array types, we need to see whether
// the value can be represented in a Smi. If not, we need to convert
// it to a HeapNumber.
@@ -3593,7 +3611,7 @@
__ sw(dst2, FieldMemOperand(v0, HeapNumber::kExponentOffset));
__ Ret();
}
- } else if (array_type == kExternalUnsignedIntArray) {
+ } else if (elements_kind == JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS) {
// The test is different for unsigned int values. Since we need
// the value to be in the range of a positive smi, we can't
// handle either of the top two bits being set in the value.
@@ -3664,7 +3682,7 @@
__ mov(v0, t2);
__ Ret();
}
- } else if (array_type == kExternalFloatArray) {
+ } else if (elements_kind == JSObject::EXTERNAL_FLOAT_ELEMENTS) {
// For the floating-point array type, we need to always allocate a
// HeapNumber.
if (CpuFeatures::IsSupported(FPU)) {
@@ -3731,7 +3749,7 @@
__ Ret();
}
- } else if (array_type == kExternalDoubleArray) {
+ } else if (elements_kind == JSObject::EXTERNAL_DOUBLE_ELEMENTS) {
if (CpuFeatures::IsSupported(FPU)) {
CpuFeatures::Scope scope(FPU);
// Allocate a HeapNumber for the result. Don't use a0 and a1 as
@@ -3785,7 +3803,7 @@
void KeyedStoreStubCompiler::GenerateStoreExternalArray(
MacroAssembler* masm,
- ExternalArrayType array_type) {
+ JSObject::ElementsKind elements_kind) {
// ---------- S t a t e --------------
// -- a0 : value
// -- a1 : key
@@ -3820,7 +3838,7 @@
// a3: external array.
// t0: key (integer).
- if (array_type == kExternalPixelArray) {
+ if (elements_kind == JSObject::EXTERNAL_PIXEL_ELEMENTS) {
// Double to pixel conversion is only implemented in the runtime for now.
__ JumpIfNotSmi(value, &slow);
} else {
@@ -3833,8 +3851,8 @@
// t0: key (integer).
// t1: value (integer).
- switch (array_type) {
- case kExternalPixelArray: {
+ switch (elements_kind) {
+ case JSObject::EXTERNAL_PIXEL_ELEMENTS: {
// Clamp the value to [0..255].
// v0 is used as a scratch register here.
Label done;
@@ -3851,28 +3869,28 @@
__ sb(t1, MemOperand(t8, 0));
}
break;
- case kExternalByteArray:
- case kExternalUnsignedByteArray:
+ case JSObject::EXTERNAL_BYTE_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
__ addu(t8, a3, t0);
__ sb(t1, MemOperand(t8, 0));
break;
- case kExternalShortArray:
- case kExternalUnsignedShortArray:
+ case JSObject::EXTERNAL_SHORT_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
__ sll(t8, t0, 1);
__ addu(t8, a3, t8);
__ sh(t1, MemOperand(t8, 0));
break;
- case kExternalIntArray:
- case kExternalUnsignedIntArray:
+ case JSObject::EXTERNAL_INT_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS:
__ sll(t8, t0, 2);
__ addu(t8, a3, t8);
__ sw(t1, MemOperand(t8, 0));
break;
- case kExternalFloatArray:
+ case JSObject::EXTERNAL_FLOAT_ELEMENTS:
// Perform int-to-float conversion and store to memory.
StoreIntAsFloat(masm, a3, t0, t1, t2, t3, t4);
break;
- case kExternalDoubleArray:
+ case JSObject::EXTERNAL_DOUBLE_ELEMENTS:
__ sll(t8, t0, 3);
__ addu(a3, a3, t8);
// a3: effective address of the double element
@@ -3894,7 +3912,10 @@
__ sw(t3, MemOperand(a3, Register::kSizeInBytes));
}
break;
- default:
+ case JSObject::FAST_ELEMENTS:
+ case JSObject::FAST_DOUBLE_ELEMENTS:
+ case JSObject::DICTIONARY_ELEMENTS:
+ case JSObject::NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
@@ -3903,7 +3924,7 @@
__ mov(v0, value);
__ Ret();
- if (array_type != kExternalPixelArray) {
+ if (elements_kind != JSObject::EXTERNAL_PIXEL_ELEMENTS) {
// a3: external array.
// t0: index (integer).
__ bind(&check_heap_number);
@@ -3924,56 +3945,43 @@
__ ldc1(f0, FieldMemOperand(a0, HeapNumber::kValueOffset));
- if (array_type == kExternalFloatArray) {
+ if (elements_kind == JSObject::EXTERNAL_FLOAT_ELEMENTS) {
__ cvt_s_d(f0, f0);
__ sll(t8, t0, 2);
__ addu(t8, a3, t8);
__ swc1(f0, MemOperand(t8, 0));
- } else if (array_type == kExternalDoubleArray) {
+ } else if (elements_kind == JSObject::EXTERNAL_DOUBLE_ELEMENTS) {
__ sll(t8, t0, 3);
__ addu(t8, a3, t8);
__ sdc1(f0, MemOperand(t8, 0));
} else {
- Label done;
+ __ EmitECMATruncate(t3, f0, f2, t2, t1, t5);
- // Need to perform float-to-int conversion.
- // Test whether exponent equal to 0x7FF (infinity or NaN).
-
- __ mfc1(t3, f1); // Move exponent word of double to t3 (as raw bits).
- __ li(t1, Operand(0x7FF00000));
- __ And(t3, t3, Operand(t1));
- __ Branch(USE_DELAY_SLOT, &done, eq, t3, Operand(t1));
- __ mov(t3, zero_reg); // In delay slot.
-
- // Not infinity or NaN simply convert to int.
- if (IsElementTypeSigned(array_type)) {
- __ trunc_w_d(f0, f0);
- __ mfc1(t3, f0);
- } else {
- __ Trunc_uw_d(f0, t3);
- }
-
- // t3: HeapNumber converted to integer
- __ bind(&done);
- switch (array_type) {
- case kExternalByteArray:
- case kExternalUnsignedByteArray:
+ switch (elements_kind) {
+ case JSObject::EXTERNAL_BYTE_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
__ addu(t8, a3, t0);
__ sb(t3, MemOperand(t8, 0));
break;
- case kExternalShortArray:
- case kExternalUnsignedShortArray:
+ case JSObject::EXTERNAL_SHORT_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
__ sll(t8, t0, 1);
__ addu(t8, a3, t8);
__ sh(t3, MemOperand(t8, 0));
break;
- case kExternalIntArray:
- case kExternalUnsignedIntArray:
+ case JSObject::EXTERNAL_INT_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS:
__ sll(t8, t0, 2);
__ addu(t8, a3, t8);
__ sw(t3, MemOperand(t8, 0));
break;
- default:
+ case JSObject::EXTERNAL_PIXEL_ELEMENTS:
+ case JSObject::EXTERNAL_FLOAT_ELEMENTS:
+ case JSObject::EXTERNAL_DOUBLE_ELEMENTS:
+ case JSObject::FAST_ELEMENTS:
+ case JSObject::FAST_DOUBLE_ELEMENTS:
+ case JSObject::DICTIONARY_ELEMENTS:
+ case JSObject::NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
@@ -3989,7 +3997,7 @@
__ lw(t3, FieldMemOperand(value, HeapNumber::kExponentOffset));
__ lw(t4, FieldMemOperand(value, HeapNumber::kMantissaOffset));
- if (array_type == kExternalFloatArray) {
+ if (elements_kind == JSObject::EXTERNAL_FLOAT_ELEMENTS) {
Label done, nan_or_infinity_or_zero;
static const int kMantissaInHiWordShift =
kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord;
@@ -4054,7 +4062,7 @@
__ srl(t4, t4, kMantissaInLoWordShift);
__ or_(t3, t6, t4);
__ Branch(&done);
- } else if (array_type == kExternalDoubleArray) {
+ } else if (elements_kind == JSObject::EXTERNAL_DOUBLE_ELEMENTS) {
__ sll(t8, t0, 3);
__ addu(t8, a3, t8);
// t8: effective address of destination element.
@@ -4062,7 +4070,7 @@
__ sw(t3, MemOperand(t8, Register::kSizeInBytes));
__ Ret();
} else {
- bool is_signed_type = IsElementTypeSigned(array_type);
+ bool is_signed_type = IsElementTypeSigned(elements_kind);
int meaningfull_bits = is_signed_type ? (kBitsPerInt - 1) : kBitsPerInt;
int32_t min_value = is_signed_type ? 0x80000000 : 0x00000000;
@@ -4119,25 +4127,31 @@
// Result is in t3.
// This switch block should be exactly the same as above (FPU mode).
- switch (array_type) {
- case kExternalByteArray:
- case kExternalUnsignedByteArray:
+ switch (elements_kind) {
+ case JSObject::EXTERNAL_BYTE_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
__ addu(t8, a3, t0);
__ sb(t3, MemOperand(t8, 0));
break;
- case kExternalShortArray:
- case kExternalUnsignedShortArray:
+ case JSObject::EXTERNAL_SHORT_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
__ sll(t8, t0, 1);
__ addu(t8, a3, t8);
__ sh(t3, MemOperand(t8, 0));
break;
- case kExternalIntArray:
- case kExternalUnsignedIntArray:
+ case JSObject::EXTERNAL_INT_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS:
__ sll(t8, t0, 2);
__ addu(t8, a3, t8);
__ sw(t3, MemOperand(t8, 0));
break;
- default:
+ case JSObject::EXTERNAL_PIXEL_ELEMENTS:
+ case JSObject::EXTERNAL_FLOAT_ELEMENTS:
+ case JSObject::EXTERNAL_DOUBLE_ELEMENTS:
+ case JSObject::FAST_ELEMENTS:
+ case JSObject::FAST_DOUBLE_ELEMENTS:
+ case JSObject::DICTIONARY_ELEMENTS:
+ case JSObject::NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
@@ -4215,6 +4229,75 @@
}
+void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(
+ MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- ra : return address
+ // -- a0 : key
+ // -- a1 : receiver
+ // -----------------------------------
+ Label miss_force_generic, slow_allocate_heapnumber;
+
+ Register key_reg = a0;
+ Register receiver_reg = a1;
+ Register elements_reg = a2;
+ Register heap_number_reg = a2;
+ Register indexed_double_offset = a3;
+ Register scratch = t0;
+ Register scratch2 = t1;
+ Register scratch3 = t2;
+ Register heap_number_map = t3;
+
+ // This stub is meant to be tail-jumped to, the receiver must already
+ // have been verified by the caller to not be a smi.
+
+ // Check that the key is a smi.
+ __ JumpIfNotSmi(key_reg, &miss_force_generic);
+
+ // Get the elements array.
+ __ lw(elements_reg,
+ FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
+
+ // Check that the key is within bounds.
+ __ lw(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
+ __ Branch(&miss_force_generic, hs, key_reg, Operand(scratch));
+
+ // Load the upper word of the double in the fixed array and test for NaN.
+ __ sll(scratch2, key_reg, kDoubleSizeLog2 - kSmiTagSize);
+ __ Addu(indexed_double_offset, elements_reg, Operand(scratch2));
+ uint32_t upper_32_offset = FixedArray::kHeaderSize + sizeof(kHoleNanLower32);
+ __ lw(scratch, FieldMemOperand(indexed_double_offset, upper_32_offset));
+ __ Branch(&miss_force_generic, eq, scratch, Operand(kHoleNanUpper32));
+
+ // Non-NaN. Allocate a new heap number and copy the double value into it.
+ __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(heap_number_reg, scratch2, scratch3,
+ heap_number_map, &slow_allocate_heapnumber);
+
+ // Don't need to reload the upper 32 bits of the double, it's already in
+ // scratch.
+ __ sw(scratch, FieldMemOperand(heap_number_reg,
+ HeapNumber::kExponentOffset));
+ __ lw(scratch, FieldMemOperand(indexed_double_offset,
+ FixedArray::kHeaderSize));
+ __ sw(scratch, FieldMemOperand(heap_number_reg,
+ HeapNumber::kMantissaOffset));
+
+ __ mov(v0, heap_number_reg);
+ __ Ret();
+
+ __ bind(&slow_allocate_heapnumber);
+ Handle<Code> slow_ic =
+ masm->isolate()->builtins()->KeyedLoadIC_Slow();
+ __ Jump(slow_ic, RelocInfo::CODE_TARGET);
+
+ __ bind(&miss_force_generic);
+ Handle<Code> miss_ic =
+ masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
+ __ Jump(miss_ic, RelocInfo::CODE_TARGET);
+}
+
+
void KeyedStoreStubCompiler::GenerateStoreFastElement(MacroAssembler* masm,
bool is_js_array) {
// ----------- S t a t e -------------
@@ -4239,7 +4322,7 @@
// have been verified by the caller to not be a smi.
// Check that the key is a smi.
- __ JumpIfNotSmi(a0, &miss_force_generic);
+ __ JumpIfNotSmi(key_reg, &miss_force_generic);
// Get the elements array and make sure it is a fast element array, not 'cow'.
__ lw(elements_reg,
@@ -4278,6 +4361,126 @@
}
+void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
+ MacroAssembler* masm,
+ bool is_js_array) {
+ // ----------- S t a t e -------------
+ // -- a0 : value
+ // -- a1 : key
+ // -- a2 : receiver
+ // -- ra : return address
+ // -- a3 : scratch
+ // -- t0 : scratch (elements_reg)
+ // -- t1 : scratch (mantissa_reg)
+ // -- t2 : scratch (exponent_reg)
+ // -- t3 : scratch4
+ // -----------------------------------
+ Label miss_force_generic, smi_value, is_nan, maybe_nan, have_double_value;
+
+ Register value_reg = a0;
+ Register key_reg = a1;
+ Register receiver_reg = a2;
+ Register scratch = a3;
+ Register elements_reg = t0;
+ Register mantissa_reg = t1;
+ Register exponent_reg = t2;
+ Register scratch4 = t3;
+
+ // This stub is meant to be tail-jumped to, the receiver must already
+ // have been verified by the caller to not be a smi.
+ __ JumpIfNotSmi(key_reg, &miss_force_generic);
+
+ __ lw(elements_reg,
+ FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
+
+ // Check that the key is within bounds.
+ if (is_js_array) {
+ __ lw(scratch, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
+ } else {
+ __ lw(scratch,
+ FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
+ }
+ // Compare smis, unsigned compare catches both negative and out-of-bound
+ // indexes.
+ __ Branch(&miss_force_generic, hs, key_reg, Operand(scratch));
+
+ // Handle smi values specially.
+ __ JumpIfSmi(value_reg, &smi_value);
+
+ // Ensure that the object is a heap number
+ __ CheckMap(value_reg,
+ scratch,
+ masm->isolate()->factory()->heap_number_map(),
+ &miss_force_generic,
+ DONT_DO_SMI_CHECK);
+
+ // Check for nan: all NaN values have a value greater (signed) than 0x7ff00000
+ // in the exponent.
+ __ li(scratch, Operand(kNaNOrInfinityLowerBoundUpper32));
+ __ lw(exponent_reg, FieldMemOperand(value_reg, HeapNumber::kExponentOffset));
+ __ Branch(&maybe_nan, ge, exponent_reg, Operand(scratch));
+
+ __ lw(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
+
+ __ bind(&have_double_value);
+ __ sll(scratch4, key_reg, kDoubleSizeLog2 - kSmiTagSize);
+ __ Addu(scratch, elements_reg, Operand(scratch4));
+ __ sw(mantissa_reg, FieldMemOperand(scratch, FixedDoubleArray::kHeaderSize));
+ uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
+ __ sw(exponent_reg, FieldMemOperand(scratch, offset));
+ __ Ret();
+
+ __ bind(&maybe_nan);
+ // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
+ // it's an Infinity, and the non-NaN code path applies.
+ __ li(scratch, Operand(kNaNOrInfinityLowerBoundUpper32));
+ __ Branch(&is_nan, gt, exponent_reg, Operand(scratch));
+ __ lw(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
+ __ Branch(&have_double_value, eq, mantissa_reg, Operand(zero_reg));
+
+ __ bind(&is_nan);
+ // Load canonical NaN for storing into the double array.
+ uint64_t nan_int64 = BitCast<uint64_t>(
+ FixedDoubleArray::canonical_not_the_hole_nan_as_double());
+ __ li(mantissa_reg, Operand(static_cast<uint32_t>(nan_int64)));
+ __ li(exponent_reg, Operand(static_cast<uint32_t>(nan_int64 >> 32)));
+ __ jmp(&have_double_value);
+
+ __ bind(&smi_value);
+ __ Addu(scratch, elements_reg,
+ Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
+ __ sll(scratch4, key_reg, kDoubleSizeLog2 - kSmiTagSize);
+ __ Addu(scratch, scratch, scratch4);
+ // scratch is now effective address of the double element
+
+ FloatingPointHelper::Destination destination;
+ if (CpuFeatures::IsSupported(FPU)) {
+ destination = FloatingPointHelper::kFPURegisters;
+ } else {
+ destination = FloatingPointHelper::kCoreRegisters;
+ }
+ __ SmiUntag(value_reg, value_reg);
+ FloatingPointHelper::ConvertIntToDouble(
+ masm, value_reg, destination,
+ f0, mantissa_reg, exponent_reg, // These are: double_dst, dst1, dst2.
+ scratch4, f2); // These are: scratch2, single_scratch.
+ if (destination == FloatingPointHelper::kFPURegisters) {
+ CpuFeatures::Scope scope(FPU);
+ __ sdc1(f0, MemOperand(scratch, 0));
+ } else {
+ __ sw(mantissa_reg, MemOperand(scratch, 0));
+ __ sw(exponent_reg, MemOperand(scratch, Register::kSizeInBytes));
+ }
+ __ Ret();
+
+ // Handle store cache miss, replacing the ic with the generic stub.
+ __ bind(&miss_force_generic);
+ Handle<Code> ic =
+ masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
+ __ Jump(ic, RelocInfo::CODE_TARGET);
+}
+
+
#undef __
} } // namespace v8::internal
diff --git a/src/mirror-debugger.js b/src/mirror-debugger.js
index 3a03535..bad0800 100644
--- a/src/mirror-debugger.js
+++ b/src/mirror-debugger.js
@@ -1243,13 +1243,17 @@
const kFrameDetailsSourcePositionIndex = 5;
const kFrameDetailsConstructCallIndex = 6;
const kFrameDetailsAtReturnIndex = 7;
-const kFrameDetailsDebuggerFrameIndex = 8;
+const kFrameDetailsFlagsIndex = 8;
const kFrameDetailsFirstDynamicIndex = 9;
const kFrameDetailsNameIndex = 0;
const kFrameDetailsValueIndex = 1;
const kFrameDetailsNameValueSize = 2;
+const kFrameDetailsFlagDebuggerFrameMask = 1 << 0;
+const kFrameDetailsFlagOptimizedFrameMask = 1 << 1;
+const kFrameDetailsFlagInlinedFrameIndexMask = 7 << 2;
+
/**
* Wrapper for the frame details information retreived from the VM. The frame
* details from the VM is an array with the following content. See runtime.cc
@@ -1262,7 +1266,7 @@
* 5: Source position
* 6: Construct call
* 7: Is at return
- * 8: Debugger frame
+ * 8: Flags (debugger frame, optimized frame, inlined frame index)
* Arguments name, value
* Locals name, value
* Return value if any
@@ -1308,7 +1312,27 @@
FrameDetails.prototype.isDebuggerFrame = function() {
%CheckExecutionState(this.break_id_);
- return this.details_[kFrameDetailsDebuggerFrameIndex];
+ var f = kFrameDetailsFlagDebuggerFrameMask;
+ return (this.details_[kFrameDetailsFlagsIndex] & f) == f;
+}
+
+
+FrameDetails.prototype.isOptimizedFrame = function() {
+ %CheckExecutionState(this.break_id_);
+ var f = kFrameDetailsFlagOptimizedFrameMask;
+ return (this.details_[kFrameDetailsFlagsIndex] & f) == f;
+}
+
+
+FrameDetails.prototype.isInlinedFrame = function() {
+ return this.inlinedFrameIndex() > 0;
+}
+
+
+FrameDetails.prototype.inlinedFrameIndex = function() {
+ %CheckExecutionState(this.break_id_);
+ var f = kFrameDetailsFlagInlinedFrameIndexMask;
+ return (this.details_[kFrameDetailsFlagsIndex] & f) >> 2
}
@@ -1447,6 +1471,21 @@
};
+FrameMirror.prototype.isOptimizedFrame = function() {
+ return this.details_.isOptimizedFrame();
+};
+
+
+FrameMirror.prototype.isInlinedFrame = function() {
+ return this.details_.isInlinedFrame();
+};
+
+
+FrameMirror.prototype.inlinedFrameIndex = function() {
+ return this.details_.inlinedFrameIndex();
+};
+
+
FrameMirror.prototype.argumentCount = function() {
return this.details_.argumentCount();
};
@@ -1536,8 +1575,12 @@
FrameMirror.prototype.evaluate = function(source, disable_break, opt_context_object) {
- var result = %DebugEvaluate(this.break_id_, this.details_.frameId(),
- source, Boolean(disable_break), opt_context_object);
+ var result = %DebugEvaluate(this.break_id_,
+ this.details_.frameId(),
+ this.details_.inlinedFrameIndex(),
+ source,
+ Boolean(disable_break),
+ opt_context_object);
return MakeMirror(result);
};
@@ -1562,8 +1605,10 @@
// Try to find the function as a property in the receiver. Include the
// prototype chain in the lookup.
var property = GetUndefinedMirror();
- if (!receiver.isUndefined()) {
- for (var r = receiver; !r.isNull() && property.isUndefined(); r = r.protoObject()) {
+ if (receiver.isObject()) {
+ for (var r = receiver;
+ !r.isNull() && property.isUndefined();
+ r = r.protoObject()) {
property = r.lookupProperty(func);
}
}
@@ -1690,6 +1735,7 @@
this.break_id_ = frame.break_id_;
this.details_ = %GetScopeDetails(frame.break_id_,
frame.details_.frameId(),
+ frame.details_.inlinedFrameIndex(),
index);
}
diff --git a/src/misc-intrinsics.h b/src/misc-intrinsics.h
new file mode 100644
index 0000000..5393de2
--- /dev/null
+++ b/src/misc-intrinsics.h
@@ -0,0 +1,89 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_MISC_INTRINSICS_H_
+#define V8_MISC_INTRINSICS_H_
+
+#include "../include/v8.h"
+#include "globals.h"
+
+namespace v8 {
+namespace internal {
+
+// Returns the index of the leading 1 bit, counting the least significant bit at
+// index 0. (1 << IntegerLog2(x)) is a mask for the most significant bit of x.
+// Result is undefined if input is zero.
+int IntegerLog2(uint32_t value);
+
+#if defined(__GNUC__)
+
+inline int IntegerLog2(uint32_t value) {
+ return 31 - __builtin_clz(value);
+}
+
+#elif defined(_MSC_VER)
+
+#pragma intrinsic(_BitScanReverse)
+
+inline int IntegerLog2(uint32_t value) {
+ unsigned long result; // NOLINT: MSVC intrinsic demands this type.
+ _BitScanReverse(&result, value);
+ return result;
+}
+
+#else
+
+// Default version using regular operations. Code taken from:
+// http://graphics.stanford.edu/~seander/bithacks.html#IntegerLog
+inline int IntegerLog2(uint32_t value) {
+ int result, shift;
+
+ shift = (value > 0xFFFF) << 4;
+ value >>= shift;
+ result = shift;
+
+ shift = (value > 0xFF) << 3;
+ value >>= shift;
+ result |= shift;
+
+ shift = (value > 0xF) << 2;
+ value >>= shift;
+ result |= shift;
+
+ shift = (value > 0x3) << 1;
+ value >>= shift;
+ result |= shift;
+
+ result |= (value >> 1);
+
+ return result;
+}
+#endif
+
+} } // namespace v8::internal
+
+#endif // V8_MISC_INTRINSICS_H_
diff --git a/src/mksnapshot.cc b/src/mksnapshot.cc
index dd49116..c5ce12f 100644
--- a/src/mksnapshot.cc
+++ b/src/mksnapshot.cc
@@ -136,6 +136,7 @@
return true;
}
int raw_size() { return raw_size_; }
+
private:
i::List<char> data_;
int raw_size_;
@@ -265,14 +266,39 @@
private:
i::ScopedVector<char>* output_;
};
+
+
+class BZip2Decompressor : public StartupDataDecompressor {
+ public:
+ virtual ~BZip2Decompressor() { }
+
+ protected:
+ virtual int DecompressData(char* raw_data,
+ int* raw_data_size,
+ const char* compressed_data,
+ int compressed_data_size) {
+ ASSERT_EQ(StartupData::kBZip2,
+ V8::GetCompressedStartupDataAlgorithm());
+ unsigned int decompressed_size = *raw_data_size;
+ int result =
+ BZ2_bzBuffToBuffDecompress(raw_data,
+ &decompressed_size,
+ const_cast<char*>(compressed_data),
+ compressed_data_size,
+ 0, 1);
+ if (result == BZ_OK) {
+ *raw_data_size = decompressed_size;
+ }
+ return result;
+ }
+};
#endif
int main(int argc, char** argv) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
// By default, log code create information in the snapshot.
i::FLAG_log_code = true;
-#endif
+
// Print the usage if an error occurs when parsing the command line
// flags or if the help flag is set.
int result = i::FlagList::SetFlagsFromCommandLine(&argc, argv, true);
@@ -281,6 +307,14 @@
i::FlagList::PrintHelp();
return !i::FLAG_help;
}
+#ifdef COMPRESS_STARTUP_DATA_BZ2
+ BZip2Decompressor natives_decompressor;
+ int bz2_result = natives_decompressor.Decompress();
+ if (bz2_result != BZ_OK) {
+ fprintf(stderr, "bzip error code: %d\n", bz2_result);
+ exit(1);
+ }
+#endif
i::Serializer::Enable();
Persistent<Context> context = v8::Context::New();
ASSERT(!context.IsEmpty());
diff --git a/src/natives.h b/src/natives.h
index 92f0d90..5f34420 100644
--- a/src/natives.h
+++ b/src/natives.h
@@ -36,7 +36,7 @@
int index);
enum NativeType {
- CORE, EXPERIMENTAL, D8, I18N
+ CORE, EXPERIMENTAL, D8, TEST
};
template <NativeType type>
@@ -52,8 +52,11 @@
// non-debugger scripts have an index in the interval [GetDebuggerCount(),
// GetNativesCount()).
static int GetIndex(const char* name);
- static Vector<const char> GetScriptSource(int index);
+ static int GetRawScriptsSize();
+ static Vector<const char> GetRawScriptSource(int index);
static Vector<const char> GetScriptName(int index);
+ static Vector<const byte> GetScriptsSource();
+ static void SetRawScriptsSource(Vector<const char> raw_source);
};
typedef NativesCollection<CORE> Natives;
diff --git a/src/objects-debug.cc b/src/objects-debug.cc
index 76c520e..2963231 100644
--- a/src/objects-debug.cc
+++ b/src/objects-debug.cc
@@ -88,6 +88,9 @@
case FIXED_ARRAY_TYPE:
FixedArray::cast(this)->FixedArrayVerify();
break;
+ case FIXED_DOUBLE_ARRAY_TYPE:
+ FixedDoubleArray::cast(this)->FixedDoubleArrayVerify();
+ break;
case BYTE_ARRAY_TYPE:
ByteArray::cast(this)->ByteArrayVerify();
break;
@@ -289,6 +292,12 @@
}
+void PolymorphicCodeCache::PolymorphicCodeCacheVerify() {
+ VerifyHeapPointer(cache());
+ ASSERT(cache()->IsUndefined() || cache()->IsPolymorphicCodeCacheHashTable());
+}
+
+
void FixedArray::FixedArrayVerify() {
for (int i = 0; i < length(); i++) {
Object* e = get(i);
@@ -301,6 +310,18 @@
}
+void FixedDoubleArray::FixedDoubleArrayVerify() {
+ for (int i = 0; i < length(); i++) {
+ if (!is_the_hole(i)) {
+ double value = get(i);
+ ASSERT(!isnan(value) ||
+ (BitCast<uint64_t>(value) ==
+ BitCast<uint64_t>(canonical_not_the_hole_nan_as_double())));
+ }
+ }
+}
+
+
void JSValue::JSValueVerify() {
Object* v = value();
if (v->IsHeapObject()) {
@@ -426,7 +447,9 @@
void JSArray::JSArrayVerify() {
JSObjectVerify();
ASSERT(length()->IsNumber() || length()->IsUndefined());
- ASSERT(elements()->IsUndefined() || elements()->IsFixedArray());
+ ASSERT(elements()->IsUndefined() ||
+ elements()->IsFixedArray() ||
+ elements()->IsFixedDoubleArray());
}
diff --git a/src/objects-inl.h b/src/objects-inl.h
index aa5fc86..70f6267 100644
--- a/src/objects-inl.h
+++ b/src/objects-inl.h
@@ -217,6 +217,10 @@
String::cast(this)->IsTwoByteRepresentation();
}
+bool Object::HasValidElements() {
+ // Dictionary is covered under FixedArray.
+ return IsFixedArray() || IsFixedDoubleArray() || IsExternalArray();
+}
StringShape::StringShape(String* str)
: type_(str->map()->instance_type()) {
@@ -446,9 +450,27 @@
}
+bool Object::IsJSReceiver() {
+ return IsHeapObject() &&
+ HeapObject::cast(this)->map()->instance_type() >= FIRST_JS_RECEIVER_TYPE;
+}
+
+
bool Object::IsJSObject() {
- return IsHeapObject()
- && HeapObject::cast(this)->map()->instance_type() >= FIRST_JS_OBJECT_TYPE;
+ return IsJSReceiver() && !IsJSProxy();
+}
+
+
+bool Object::IsJSProxy() {
+ return Object::IsHeapObject() &&
+ (HeapObject::cast(this)->map()->instance_type() == JS_PROXY_TYPE ||
+ HeapObject::cast(this)->map()->instance_type() == JS_FUNCTION_PROXY_TYPE);
+}
+
+
+bool Object::IsJSFunctionProxy() {
+ return Object::IsHeapObject() &&
+ HeapObject::cast(this)->map()->instance_type() == JS_FUNCTION_PROXY_TYPE;
}
@@ -471,6 +493,13 @@
}
+bool Object::IsFixedDoubleArray() {
+ return Object::IsHeapObject()
+ && HeapObject::cast(this)->map()->instance_type() ==
+ FIXED_DOUBLE_ARRAY_TYPE;
+}
+
+
bool Object::IsDescriptorArray() {
return IsFixedArray();
}
@@ -505,22 +534,17 @@
bool Object::IsContext() {
if (Object::IsHeapObject()) {
- Heap* heap = HeapObject::cast(this)->GetHeap();
- return (HeapObject::cast(this)->map() == heap->context_map() ||
- HeapObject::cast(this)->map() == heap->catch_context_map() ||
- HeapObject::cast(this)->map() == heap->global_context_map());
+ Map* map = HeapObject::cast(this)->map();
+ Heap* heap = map->GetHeap();
+ return (map == heap->function_context_map() ||
+ map == heap->catch_context_map() ||
+ map == heap->with_context_map() ||
+ map == heap->global_context_map());
}
return false;
}
-bool Object::IsCatchContext() {
- return Object::IsHeapObject() &&
- HeapObject::cast(this)->map() ==
- HeapObject::cast(this)->GetHeap()->catch_context_map();
-}
-
-
bool Object::IsGlobalContext() {
return Object::IsHeapObject() &&
HeapObject::cast(this)->map() ==
@@ -584,12 +608,6 @@
}
-bool Object::IsJSProxy() {
- return Object::IsHeapObject()
- && HeapObject::cast(this)->map()->instance_type() == JS_PROXY_TYPE;
-}
-
-
bool Object::IsForeign() {
return Object::IsHeapObject()
&& HeapObject::cast(this)->map()->instance_type() == FOREIGN_TYPE;
@@ -627,8 +645,8 @@
bool Object::IsDictionary() {
- return IsHashTable() && this !=
- HeapObject::cast(this)->GetHeap()->symbol_table();
+ return IsHashTable() &&
+ this != HeapObject::cast(this)->GetHeap()->symbol_table();
}
@@ -676,6 +694,11 @@
}
+bool Object::IsPolymorphicCodeCacheHashTable() {
+ return IsHashTable();
+}
+
+
bool Object::IsMapCache() {
return IsHashTable();
}
@@ -867,7 +890,7 @@
#else // V8_TARGET_ARCH_MIPS
// Prevent gcc from using load-double (mips ldc1) on (possibly)
// non-64-bit aligned HeapNumber::value.
- static inline double read_double_field(HeapNumber* p, int offset) {
+ static inline double read_double_field(void* p, int offset) {
union conversion {
double d;
uint32_t u[2];
@@ -886,7 +909,7 @@
#else // V8_TARGET_ARCH_MIPS
// Prevent gcc from using store-double (mips sdc1) on (possibly)
// non-64-bit aligned HeapNumber::value.
- static inline void write_double_field(HeapNumber* p, int offset,
+ static inline void write_double_field(void* p, int offset,
double value) {
union conversion {
double d;
@@ -1301,8 +1324,7 @@
HeapObject* JSObject::elements() {
Object* array = READ_FIELD(this, kElementsOffset);
- // In the assert below Dictionary is covered under FixedArray.
- ASSERT(array->IsFixedArray() || array->IsExternalArray());
+ ASSERT(array->HasValidElements());
return reinterpret_cast<HeapObject*>(array);
}
@@ -1311,8 +1333,7 @@
ASSERT(map()->has_fast_elements() ==
(value->map() == GetHeap()->fixed_array_map() ||
value->map() == GetHeap()->fixed_cow_array_map()));
- // In the assert below Dictionary is covered under FixedArray.
- ASSERT(value->IsFixedArray() || value->IsExternalArray());
+ ASSERT(value->HasValidElements());
WRITE_FIELD(this, kElementsOffset, value);
CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kElementsOffset, mode);
}
@@ -1560,6 +1581,12 @@
}
+FixedArrayBase* FixedArrayBase::cast(Object* object) {
+ ASSERT(object->IsFixedArray() || object->IsFixedDoubleArray());
+ return reinterpret_cast<FixedArrayBase*>(object);
+}
+
+
Object* FixedArray::get(int index) {
ASSERT(index >= 0 && index < this->length());
return READ_FIELD(this, kHeaderSize + index * kPointerSize);
@@ -1583,6 +1610,105 @@
}
+inline bool FixedDoubleArray::is_the_hole_nan(double value) {
+ return BitCast<uint64_t, double>(value) == kHoleNanInt64;
+}
+
+
+inline double FixedDoubleArray::hole_nan_as_double() {
+ return BitCast<double, uint64_t>(kHoleNanInt64);
+}
+
+
+inline double FixedDoubleArray::canonical_not_the_hole_nan_as_double() {
+ ASSERT(BitCast<uint64_t>(OS::nan_value()) != kHoleNanInt64);
+ ASSERT((BitCast<uint64_t>(OS::nan_value()) >> 32) != kHoleNanUpper32);
+ return OS::nan_value();
+}
+
+
+double FixedDoubleArray::get(int index) {
+ ASSERT(map() != HEAP->fixed_cow_array_map() &&
+ map() != HEAP->fixed_array_map());
+ ASSERT(index >= 0 && index < this->length());
+ double result = READ_DOUBLE_FIELD(this, kHeaderSize + index * kDoubleSize);
+ ASSERT(!is_the_hole_nan(result));
+ return result;
+}
+
+
+void FixedDoubleArray::set(int index, double value) {
+ ASSERT(map() != HEAP->fixed_cow_array_map() &&
+ map() != HEAP->fixed_array_map());
+ int offset = kHeaderSize + index * kDoubleSize;
+ if (isnan(value)) value = canonical_not_the_hole_nan_as_double();
+ WRITE_DOUBLE_FIELD(this, offset, value);
+}
+
+
+void FixedDoubleArray::set_the_hole(int index) {
+ ASSERT(map() != HEAP->fixed_cow_array_map() &&
+ map() != HEAP->fixed_array_map());
+ int offset = kHeaderSize + index * kDoubleSize;
+ WRITE_DOUBLE_FIELD(this, offset, hole_nan_as_double());
+}
+
+
+bool FixedDoubleArray::is_the_hole(int index) {
+ int offset = kHeaderSize + index * kDoubleSize;
+ return is_the_hole_nan(READ_DOUBLE_FIELD(this, offset));
+}
+
+
+void FixedDoubleArray::Initialize(FixedDoubleArray* from) {
+ int old_length = from->length();
+ ASSERT(old_length < length());
+ OS::MemCopy(FIELD_ADDR(this, kHeaderSize),
+ FIELD_ADDR(from, kHeaderSize),
+ old_length * kDoubleSize);
+ int offset = kHeaderSize + old_length * kDoubleSize;
+ for (int current = from->length(); current < length(); ++current) {
+ WRITE_DOUBLE_FIELD(this, offset, hole_nan_as_double());
+ offset += kDoubleSize;
+ }
+}
+
+
+void FixedDoubleArray::Initialize(FixedArray* from) {
+ int old_length = from->length();
+ ASSERT(old_length < length());
+ for (int i = 0; i < old_length; i++) {
+ Object* hole_or_object = from->get(i);
+ if (hole_or_object->IsTheHole()) {
+ set_the_hole(i);
+ } else {
+ set(i, hole_or_object->Number());
+ }
+ }
+ int offset = kHeaderSize + old_length * kDoubleSize;
+ for (int current = from->length(); current < length(); ++current) {
+ WRITE_DOUBLE_FIELD(this, offset, hole_nan_as_double());
+ offset += kDoubleSize;
+ }
+}
+
+
+void FixedDoubleArray::Initialize(NumberDictionary* from) {
+ int offset = kHeaderSize;
+ for (int current = 0; current < length(); ++current) {
+ WRITE_DOUBLE_FIELD(this, offset, hole_nan_as_double());
+ offset += kDoubleSize;
+ }
+ for (int i = 0; i < from->Capacity(); i++) {
+ Object* key = from->KeyAt(i);
+ if (key->IsNumber()) {
+ uint32_t entry = static_cast<uint32_t>(key->Number());
+ set(entry, from->ValueAt(i)->Number());
+ }
+ }
+}
+
+
WriteBarrierMode HeapObject::GetWriteBarrierMode(const AssertNoAllocation&) {
if (GetHeap()->InNewSpace(this)) return SKIP_WRITE_BARRIER;
return UPDATE_WRITE_BARRIER;
@@ -1883,6 +2009,7 @@
CAST_ACCESSOR(FixedArray)
+CAST_ACCESSOR(FixedDoubleArray)
CAST_ACCESSOR(DescriptorArray)
CAST_ACCESSOR(DeoptimizationInputData)
CAST_ACCESSOR(DeoptimizationOutputData)
@@ -1891,6 +2018,7 @@
CAST_ACCESSOR(NormalizedMapCache)
CAST_ACCESSOR(CompilationCacheTable)
CAST_ACCESSOR(CodeCacheHashTable)
+CAST_ACCESSOR(PolymorphicCodeCacheHashTable)
CAST_ACCESSOR(MapCache)
CAST_ACCESSOR(String)
CAST_ACCESSOR(SeqString)
@@ -1900,6 +2028,7 @@
CAST_ACCESSOR(ExternalString)
CAST_ACCESSOR(ExternalAsciiString)
CAST_ACCESSOR(ExternalTwoByteString)
+CAST_ACCESSOR(JSReceiver)
CAST_ACCESSOR(JSObject)
CAST_ACCESSOR(Smi)
CAST_ACCESSOR(HeapObject)
@@ -1917,6 +2046,7 @@
CAST_ACCESSOR(JSArray)
CAST_ACCESSOR(JSRegExp)
CAST_ACCESSOR(JSProxy)
+CAST_ACCESSOR(JSFunctionProxy)
CAST_ACCESSOR(Foreign)
CAST_ACCESSOR(ByteArray)
CAST_ACCESSOR(ExternalArray)
@@ -1944,9 +2074,11 @@
}
-SMI_ACCESSORS(FixedArray, length, kLengthOffset)
+SMI_ACCESSORS(FixedArrayBase, length, kLengthOffset)
SMI_ACCESSORS(ByteArray, length, kLengthOffset)
+// TODO(1493): Investigate if it's possible to s/INT/SMI/ here (and
+// subsequently unify H{Fixed,External}ArrayLength).
INT_ACCESSORS(ExternalArray, length, kLengthOffset)
@@ -2403,6 +2535,10 @@
return SeqTwoByteString::SizeFor(
reinterpret_cast<SeqTwoByteString*>(this)->length());
}
+ if (instance_type == FIXED_DOUBLE_ARRAY_TYPE) {
+ return FixedDoubleArray::SizeFor(
+ reinterpret_cast<FixedDoubleArray*>(this)->length());
+ }
ASSERT(instance_type == CODE_TYPE);
return reinterpret_cast<Code*>(this)->CodeSize();
}
@@ -2724,19 +2860,6 @@
}
-ExternalArrayType Code::external_array_type() {
- ASSERT(is_keyed_load_stub() || is_keyed_store_stub());
- byte type = READ_BYTE_FIELD(this, kExternalArrayTypeOffset);
- return static_cast<ExternalArrayType>(type);
-}
-
-
-void Code::set_external_array_type(ExternalArrayType value) {
- ASSERT(is_keyed_load_stub() || is_keyed_store_stub());
- WRITE_BYTE_FIELD(this, kExternalArrayTypeOffset, value);
-}
-
-
byte Code::unary_op_type() {
ASSERT(is_unary_op_stub());
return READ_BYTE_FIELD(this, kUnaryOpTypeOffset);
@@ -2947,7 +3070,7 @@
void Map::set_prototype(Object* value, WriteBarrierMode mode) {
- ASSERT(value->IsNull() || value->IsJSObject());
+ ASSERT(value->IsNull() || value->IsJSReceiver());
WRITE_FIELD(this, kPrototypeOffset, value);
CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kPrototypeOffset, mode);
}
@@ -2960,21 +3083,34 @@
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
Map* new_map = Map::cast(obj);
- new_map->set_has_fast_elements(true);
- isolate()->counters()->map_slow_to_fast_elements()->Increment();
+ new_map->set_elements_kind(JSObject::FAST_ELEMENTS);
+ isolate()->counters()->map_to_fast_elements()->Increment();
return new_map;
}
-MaybeObject* Map::GetSlowElementsMap() {
- if (!has_fast_elements()) return this;
+MaybeObject* Map::GetFastDoubleElementsMap() {
+ if (has_fast_double_elements()) return this;
Object* obj;
{ MaybeObject* maybe_obj = CopyDropTransitions();
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
Map* new_map = Map::cast(obj);
- new_map->set_has_fast_elements(false);
- isolate()->counters()->map_fast_to_slow_elements()->Increment();
+ new_map->set_elements_kind(JSObject::FAST_DOUBLE_ELEMENTS);
+ isolate()->counters()->map_to_fast_double_elements()->Increment();
+ return new_map;
+}
+
+
+MaybeObject* Map::GetSlowElementsMap() {
+ if (!has_fast_elements() && !has_fast_double_elements()) return this;
+ Object* obj;
+ { MaybeObject* maybe_obj = CopyDropTransitions();
+ if (!maybe_obj->ToObject(&obj)) return maybe_obj;
+ }
+ Map* new_map = Map::cast(obj);
+ new_map->set_elements_kind(JSObject::DICTIONARY_ELEMENTS);
+ isolate()->counters()->map_to_slow_elements()->Increment();
return new_map;
}
@@ -3113,6 +3249,8 @@
ACCESSORS(FunctionTemplateInfo, access_check_info, Object,
kAccessCheckInfoOffset)
ACCESSORS(FunctionTemplateInfo, flag, Smi, kFlagOffset)
+ACCESSORS(FunctionTemplateInfo, prototype_attributes, Smi,
+ kPrototypeAttributesOffset)
ACCESSORS(ObjectTemplateInfo, constructor, Object, kConstructorOffset)
ACCESSORS(ObjectTemplateInfo, internal_field_count, Object,
@@ -3171,13 +3309,22 @@
kIsExpressionBit)
BOOL_ACCESSORS(SharedFunctionInfo, start_position_and_type, is_toplevel,
kIsTopLevelBit)
-BOOL_GETTER(SharedFunctionInfo, compiler_hints,
+BOOL_GETTER(SharedFunctionInfo,
+ compiler_hints,
has_only_simple_this_property_assignments,
kHasOnlySimpleThisPropertyAssignments)
BOOL_ACCESSORS(SharedFunctionInfo,
compiler_hints,
allows_lazy_compilation,
kAllowLazyCompilation)
+BOOL_ACCESSORS(SharedFunctionInfo,
+ compiler_hints,
+ uses_arguments,
+ kUsesArguments)
+BOOL_ACCESSORS(SharedFunctionInfo,
+ compiler_hints,
+ has_duplicate_parameters,
+ kHasDuplicateParameters)
#if V8_HOST_ARCH_32_BIT
@@ -3261,18 +3408,10 @@
}
-bool SharedFunctionInfo::live_objects_may_exist() {
- return (compiler_hints() & (1 << kLiveObjectsMayExist)) != 0;
-}
-
-
-void SharedFunctionInfo::set_live_objects_may_exist(bool value) {
- if (value) {
- set_compiler_hints(compiler_hints() | (1 << kLiveObjectsMayExist));
- } else {
- set_compiler_hints(compiler_hints() & ~(1 << kLiveObjectsMayExist));
- }
-}
+BOOL_ACCESSORS(SharedFunctionInfo,
+ compiler_hints,
+ live_objects_may_exist,
+ kLiveObjectsMayExist)
bool SharedFunctionInfo::IsInobjectSlackTrackingInProgress() {
@@ -3280,9 +3419,10 @@
}
-bool SharedFunctionInfo::optimization_disabled() {
- return BooleanBit::get(compiler_hints(), kOptimizationDisabled);
-}
+BOOL_GETTER(SharedFunctionInfo,
+ compiler_hints,
+ optimization_disabled,
+ kOptimizationDisabled)
void SharedFunctionInfo::set_optimization_disabled(bool disable) {
@@ -3297,33 +3437,20 @@
}
-bool SharedFunctionInfo::strict_mode() {
- return BooleanBit::get(compiler_hints(), kStrictModeFunction);
-}
-
-
-void SharedFunctionInfo::set_strict_mode(bool value) {
- set_compiler_hints(BooleanBit::set(compiler_hints(),
- kStrictModeFunction,
- value));
-}
-
-
-bool SharedFunctionInfo::es5_native() {
- return BooleanBit::get(compiler_hints(), kES5Native);
-}
-
-
-void SharedFunctionInfo::set_es5_native(bool value) {
- set_compiler_hints(BooleanBit::set(compiler_hints(),
- kES5Native,
- value));
-}
-
+BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, strict_mode,
+ kStrictModeFunction)
+BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, native, kNative)
+BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints,
+ name_should_print_as_anonymous,
+ kNameShouldPrintAsAnonymous)
+BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, bound, kBoundFunction)
+BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_anonymous, kIsAnonymous)
ACCESSORS(CodeCache, default_cache, FixedArray, kDefaultCacheOffset)
ACCESSORS(CodeCache, normal_type_cache, Object, kNormalTypeCacheOffset)
+ACCESSORS(PolymorphicCodeCache, cache, Object, kCacheOffset)
+
bool Script::HasValidSource() {
Object* src = this->source();
if (!src->IsString()) return true;
@@ -3613,6 +3740,7 @@
ACCESSORS(JSProxy, handler, Object, kHandlerOffset)
+ACCESSORS(JSProxy, padding, Object, kPaddingOffset)
Address Foreign::address() {
@@ -3786,45 +3914,17 @@
JSObject::ElementsKind JSObject::GetElementsKind() {
- if (map()->has_fast_elements()) {
- ASSERT(elements()->map() == GetHeap()->fixed_array_map() ||
- elements()->map() == GetHeap()->fixed_cow_array_map());
- return FAST_ELEMENTS;
- }
- HeapObject* array = elements();
- if (array->IsFixedArray()) {
- // FAST_ELEMENTS or DICTIONARY_ELEMENTS are both stored in a
- // FixedArray, but FAST_ELEMENTS is already handled above.
- ASSERT(array->IsDictionary());
- return DICTIONARY_ELEMENTS;
- }
- ASSERT(!map()->has_fast_elements());
- if (array->IsExternalArray()) {
- switch (array->map()->instance_type()) {
- case EXTERNAL_BYTE_ARRAY_TYPE:
- return EXTERNAL_BYTE_ELEMENTS;
- case EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE:
- return EXTERNAL_UNSIGNED_BYTE_ELEMENTS;
- case EXTERNAL_SHORT_ARRAY_TYPE:
- return EXTERNAL_SHORT_ELEMENTS;
- case EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE:
- return EXTERNAL_UNSIGNED_SHORT_ELEMENTS;
- case EXTERNAL_INT_ARRAY_TYPE:
- return EXTERNAL_INT_ELEMENTS;
- case EXTERNAL_UNSIGNED_INT_ARRAY_TYPE:
- return EXTERNAL_UNSIGNED_INT_ELEMENTS;
- case EXTERNAL_FLOAT_ARRAY_TYPE:
- return EXTERNAL_FLOAT_ELEMENTS;
- case EXTERNAL_DOUBLE_ARRAY_TYPE:
- return EXTERNAL_DOUBLE_ELEMENTS;
- case EXTERNAL_PIXEL_ARRAY_TYPE:
- return EXTERNAL_PIXEL_ELEMENTS;
- default:
- break;
- }
- }
- UNREACHABLE();
- return DICTIONARY_ELEMENTS;
+ ElementsKind kind = map()->elements_kind();
+ ASSERT((kind == FAST_ELEMENTS &&
+ (elements()->map() == GetHeap()->fixed_array_map() ||
+ elements()->map() == GetHeap()->fixed_cow_array_map())) ||
+ (kind == FAST_DOUBLE_ELEMENTS &&
+ elements()->IsFixedDoubleArray()) ||
+ (kind == DICTIONARY_ELEMENTS &&
+ elements()->IsFixedArray() &&
+ elements()->IsDictionary()) ||
+ (kind > DICTIONARY_ELEMENTS));
+ return kind;
}
@@ -3833,6 +3933,11 @@
}
+bool JSObject::HasFastDoubleElements() {
+ return GetElementsKind() == FAST_DOUBLE_ELEMENTS;
+}
+
+
bool JSObject::HasDictionaryElements() {
return GetElementsKind() == DICTIONARY_ELEMENTS;
}
@@ -3881,7 +3986,8 @@
bool JSObject::AllowsSetElementsLength() {
- bool result = elements()->IsFixedArray();
+ bool result = elements()->IsFixedArray() ||
+ elements()->IsFixedDoubleArray();
ASSERT(result == !HasExternalArrayElements());
return result;
}
@@ -4026,12 +4132,28 @@
}
-Object* JSObject::GetPrototype() {
- return JSObject::cast(this)->map()->prototype();
+Object* JSReceiver::GetPrototype() {
+ return HeapObject::cast(this)->map()->prototype();
}
-PropertyAttributes JSObject::GetPropertyAttribute(String* key) {
+bool JSReceiver::HasProperty(String* name) {
+ if (IsJSProxy()) {
+ return JSProxy::cast(this)->HasPropertyWithHandler(name);
+ }
+ return GetPropertyAttribute(name) != ABSENT;
+}
+
+
+bool JSReceiver::HasLocalProperty(String* name) {
+ if (IsJSProxy()) {
+ return JSProxy::cast(this)->HasPropertyWithHandler(name);
+ }
+ return GetLocalPropertyAttribute(name) != ABSENT;
+}
+
+
+PropertyAttributes JSReceiver::GetPropertyAttribute(String* key) {
return GetPropertyAttributeWithReceiver(this, key);
}
diff --git a/src/objects-printer.cc b/src/objects-printer.cc
index 60028c0..158789e 100644
--- a/src/objects-printer.cc
+++ b/src/objects-printer.cc
@@ -351,6 +351,15 @@
case DICTIONARY_ELEMENTS:
elements()->Print(out);
break;
+ case NON_STRICT_ARGUMENTS_ELEMENTS: {
+ FixedArray* p = FixedArray::cast(elements());
+ for (int i = 2; i < p->length(); i++) {
+ PrintF(out, " %d: ", i);
+ p->get(i)->ShortPrint(out);
+ PrintF(out, "\n");
+ }
+ break;
+ }
default:
UNREACHABLE();
break;
@@ -472,6 +481,13 @@
}
+void PolymorphicCodeCache::PolymorphicCodeCachePrint(FILE* out) {
+ HeapObject::PrintHeader(out, "PolymorphicCodeCache");
+ PrintF(out, "\n - cache: ");
+ cache()->ShortPrint(out);
+}
+
+
void FixedArray::FixedArrayPrint(FILE* out) {
HeapObject::PrintHeader(out, "FixedArray");
PrintF(out, " - length: %d", length());
diff --git a/src/objects-visiting.cc b/src/objects-visiting.cc
index 685e8ad..4cd795e 100644
--- a/src/objects-visiting.cc
+++ b/src/objects-visiting.cc
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -73,6 +73,9 @@
case FIXED_ARRAY_TYPE:
return kVisitFixedArray;
+ case FIXED_DOUBLE_ARRAY_TYPE:
+ return kVisitFixedDoubleArray;
+
case ODDBALL_TYPE:
return kVisitOddball;
diff --git a/src/objects-visiting.h b/src/objects-visiting.h
index dcbf2f8..cc64763 100644
--- a/src/objects-visiting.h
+++ b/src/objects-visiting.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -30,6 +30,22 @@
#include "allocation.h"
+#if V8_TARGET_ARCH_IA32
+#include "ia32/assembler-ia32.h"
+#include "ia32/assembler-ia32-inl.h"
+#elif V8_TARGET_ARCH_X64
+#include "x64/assembler-x64.h"
+#include "x64/assembler-x64-inl.h"
+#elif V8_TARGET_ARCH_ARM
+#include "arm/assembler-arm.h"
+#include "arm/assembler-arm-inl.h"
+#elif V8_TARGET_ARCH_MIPS
+#include "mips/assembler-mips.h"
+#include "mips/assembler-mips-inl.h"
+#else
+#error Unsupported target architecture.
+#endif
+
// This file provides base classes and auxiliary methods for defining
// static object visitors used during GC.
// Visiting HeapObject body with a normal ObjectVisitor requires performing
@@ -52,6 +68,7 @@
kVisitShortcutCandidate,
kVisitByteArray,
kVisitFixedArray,
+ kVisitFixedDoubleArray,
kVisitGlobalContext,
// For data objects, JS objects and structs along with generic visitor which
@@ -286,6 +303,8 @@
FixedArray::BodyDescriptor,
int>::Visit);
+ table_.Register(kVisitFixedDoubleArray, &VisitFixedDoubleArray);
+
table_.Register(kVisitGlobalContext,
&FixedBodyVisitor<StaticVisitor,
Context::ScavengeBodyDescriptor,
@@ -332,6 +351,11 @@
return reinterpret_cast<ByteArray*>(object)->ByteArraySize();
}
+ static inline int VisitFixedDoubleArray(Map* map, HeapObject* object) {
+ int length = reinterpret_cast<FixedDoubleArray*>(object)->length();
+ return FixedDoubleArray::SizeFor(length);
+ }
+
static inline int VisitSeqAsciiString(Map* map, HeapObject* object) {
return SeqAsciiString::cast(object)->
SeqAsciiStringSize(map->instance_type());
diff --git a/src/objects.cc b/src/objects.cc
index b407c01..1ab5dd2 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -58,7 +58,6 @@
const int kGetterIndex = 0;
const int kSetterIndex = 1;
-
MUST_USE_RESULT static MaybeObject* CreateJSValue(JSFunction* constructor,
Object* value) {
Object* result;
@@ -140,6 +139,8 @@
HeapObject* heap_object = HeapObject::cast(this);
if (heap_object->IsJSObject()) {
return JSObject::cast(this)->Lookup(name, result);
+ } else if (heap_object->IsJSProxy()) {
+ return result->HandlerResult();
}
Context* global_context = Isolate::Current()->context()->global_context();
if (heap_object->IsString()) {
@@ -148,8 +149,6 @@
holder = global_context->number_function()->instance_prototype();
} else if (heap_object->IsBoolean()) {
holder = global_context->boolean_function()->instance_prototype();
- } else if (heap_object->IsJSProxy()) {
- return result->HandlerResult();
}
}
ASSERT(holder != NULL); // Cannot handle null or undefined.
@@ -190,7 +189,7 @@
AccessorInfo* data = AccessorInfo::cast(structure);
Object* fun_obj = data->getter();
v8::AccessorGetter call_fun = v8::ToCData<v8::AccessorGetter>(fun_obj);
- HandleScope scope;
+ HandleScope scope(isolate);
JSObject* self = JSObject::cast(receiver);
JSObject* holder_handle = JSObject::cast(holder);
Handle<String> key(name);
@@ -230,15 +229,15 @@
String* name_raw,
Object* handler_raw) {
Isolate* isolate = name_raw->GetIsolate();
- HandleScope scope;
+ HandleScope scope(isolate);
Handle<Object> receiver(receiver_raw);
Handle<Object> name(name_raw);
Handle<Object> handler(handler_raw);
// Extract trap function.
- LookupResult lookup;
- Handle<Object> trap(v8::internal::GetProperty(handler, "get", &lookup));
- if (!lookup.IsFound()) {
+ Handle<String> trap_name = isolate->factory()->LookupAsciiSymbol("get");
+ Handle<Object> trap(v8::internal::GetProperty(handler, trap_name));
+ if (trap->IsUndefined()) {
// Get the derived `get' property.
trap = isolate->derived_get_trap();
}
@@ -489,7 +488,16 @@
cell->set_value(cell->heap()->the_hole_value());
dictionary->DetailsAtPut(entry, details.AsDeleted());
} else {
- return dictionary->DeleteProperty(entry, mode);
+ Object* deleted = dictionary->DeleteProperty(entry, mode);
+ if (deleted == GetHeap()->true_value()) {
+ FixedArray* new_properties = NULL;
+ MaybeObject* maybe_properties = dictionary->Shrink(name);
+ if (!maybe_properties->To(&new_properties)) {
+ return maybe_properties;
+ }
+ set_properties(new_properties);
+ }
+ return deleted;
}
}
return GetHeap()->true_value();
@@ -638,7 +646,7 @@
// The object is either a number, a string, a boolean,
// a real JS object, or a Harmony proxy.
- if (heap_object->IsJSObject() || heap_object->IsJSProxy()) {
+ if (heap_object->IsJSReceiver()) {
return heap_object->map()->prototype();
}
Heap* heap = heap_object->GetHeap();
@@ -1001,7 +1009,6 @@
global_object ? "Global Object: " : "",
vowel ? "n" : "");
accumulator->Put(str);
- accumulator->Put('>');
printed = true;
}
}
@@ -1178,6 +1185,8 @@
case FIXED_ARRAY_TYPE:
FixedArray::BodyDescriptor::IterateBody(this, object_size, v);
break;
+ case FIXED_DOUBLE_ARRAY_TYPE:
+ break;
case JS_OBJECT_TYPE:
case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
case JS_VALUE_TYPE:
@@ -1281,8 +1290,8 @@
}
-String* JSObject::class_name() {
- if (IsJSFunction()) {
+String* JSReceiver::class_name() {
+ if (IsJSFunction() && IsJSFunctionProxy()) {
return GetHeap()->function_class_symbol();
}
if (map()->constructor()->IsJSFunction()) {
@@ -1294,7 +1303,7 @@
}
-String* JSObject::constructor_name() {
+String* JSReceiver::constructor_name() {
if (map()->constructor()->IsJSFunction()) {
JSFunction* constructor = JSFunction::cast(map()->constructor());
String* name = String::cast(constructor->shared()->name());
@@ -1304,6 +1313,7 @@
Object* proto = GetPrototype();
if (proto->IsJSObject()) return JSObject::cast(proto)->constructor_name();
}
+ // TODO(rossberg): what about proxies?
// If the constructor is not present, return "Object".
return GetHeap()->Object_symbol();
}
@@ -1758,10 +1768,10 @@
}
-MaybeObject* JSObject::SetProperty(String* name,
- Object* value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode) {
+MaybeObject* JSReceiver::SetProperty(String* name,
+ Object* value,
+ PropertyAttributes attributes,
+ StrictModeFlag strict_mode) {
LookupResult result;
LocalLookup(name, &result);
return SetProperty(&result, name, value, attributes, strict_mode);
@@ -1771,7 +1781,8 @@
MaybeObject* JSObject::SetPropertyWithCallback(Object* structure,
String* name,
Object* value,
- JSObject* holder) {
+ JSObject* holder,
+ StrictModeFlag strict_mode) {
Isolate* isolate = GetIsolate();
HandleScope scope(isolate);
@@ -1819,6 +1830,9 @@
if (setter->IsJSFunction()) {
return SetPropertyWithDefinedSetter(JSFunction::cast(setter), value);
} else {
+ if (strict_mode == kNonStrictMode) {
+ return value;
+ }
Handle<String> key(name);
Handle<Object> holder_handle(holder, isolate);
Handle<Object> args[2] = { key, holder_handle };
@@ -1872,15 +1886,17 @@
}
-MaybeObject* JSObject::SetElementWithCallbackSetterInPrototypes(uint32_t index,
- Object* value,
- bool* found) {
+MaybeObject* JSObject::SetElementWithCallbackSetterInPrototypes(
+ uint32_t index,
+ Object* value,
+ bool* found,
+ StrictModeFlag strict_mode) {
Heap* heap = GetHeap();
for (Object* pt = GetPrototype();
pt != heap->null_value();
pt = pt->GetPrototype()) {
if (!JSObject::cast(pt)->HasDictionaryElements()) {
- continue;
+ continue;
}
NumberDictionary* dictionary = JSObject::cast(pt)->element_dictionary();
int entry = dictionary->FindEntry(index);
@@ -1888,8 +1904,11 @@
PropertyDetails details = dictionary->DetailsAt(entry);
if (details.type() == CALLBACKS) {
*found = true;
- return SetElementWithCallback(
- dictionary->ValueAt(entry), index, value, JSObject::cast(pt));
+ return SetElementWithCallback(dictionary->ValueAt(entry),
+ index,
+ value,
+ JSObject::cast(pt),
+ strict_mode);
}
}
}
@@ -1927,6 +1946,42 @@
}
+static JSObject::ElementsKind GetElementsKindFromExternalArrayType(
+ ExternalArrayType array_type) {
+ switch (array_type) {
+ case kExternalByteArray:
+ return JSObject::EXTERNAL_BYTE_ELEMENTS;
+ break;
+ case kExternalUnsignedByteArray:
+ return JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS;
+ break;
+ case kExternalShortArray:
+ return JSObject::EXTERNAL_SHORT_ELEMENTS;
+ break;
+ case kExternalUnsignedShortArray:
+ return JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS;
+ break;
+ case kExternalIntArray:
+ return JSObject::EXTERNAL_INT_ELEMENTS;
+ break;
+ case kExternalUnsignedIntArray:
+ return JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS;
+ break;
+ case kExternalFloatArray:
+ return JSObject::EXTERNAL_FLOAT_ELEMENTS;
+ break;
+ case kExternalDoubleArray:
+ return JSObject::EXTERNAL_DOUBLE_ELEMENTS;
+ break;
+ case kExternalPixelArray:
+ return JSObject::EXTERNAL_PIXEL_ELEMENTS;
+ break;
+ }
+ UNREACHABLE();
+ return JSObject::DICTIONARY_ELEMENTS;
+}
+
+
MaybeObject* Map::GetExternalArrayElementsMap(ExternalArrayType array_type,
bool safe_to_add_transition) {
Heap* current_heap = heap();
@@ -1969,8 +2024,7 @@
}
Map* new_map = Map::cast(obj);
- new_map->set_has_fast_elements(false);
- new_map->set_has_external_array_elements(true);
+ new_map->set_elements_kind(GetElementsKindFromExternalArrayType(array_type));
GetIsolate()->counters()->map_to_external_array_elements()->Increment();
// Only remember the map transition if the object's map is NOT equal to the
@@ -2070,10 +2124,12 @@
// We only need to deal with CALLBACKS and INTERCEPTORS
-MaybeObject* JSObject::SetPropertyWithFailedAccessCheck(LookupResult* result,
- String* name,
- Object* value,
- bool check_prototype) {
+MaybeObject* JSObject::SetPropertyWithFailedAccessCheck(
+ LookupResult* result,
+ String* name,
+ Object* value,
+ bool check_prototype,
+ StrictModeFlag strict_mode) {
if (check_prototype && !result->IsProperty()) {
LookupCallbackSetterInPrototypes(name, result);
}
@@ -2089,7 +2145,8 @@
return SetPropertyWithCallback(result->GetCallbackObject(),
name,
value,
- result->holder());
+ result->holder(),
+ strict_mode);
}
}
break;
@@ -2100,8 +2157,11 @@
LookupResult r;
LookupRealNamedProperty(name, &r);
if (r.IsProperty()) {
- return SetPropertyWithFailedAccessCheck(&r, name, value,
- check_prototype);
+ return SetPropertyWithFailedAccessCheck(&r,
+ name,
+ value,
+ check_prototype,
+ strict_mode);
}
break;
}
@@ -2112,19 +2172,175 @@
}
}
- HandleScope scope;
- Handle<Object> value_handle(value);
Heap* heap = GetHeap();
+ HandleScope scope(heap->isolate());
+ Handle<Object> value_handle(value);
heap->isolate()->ReportFailedAccessCheck(this, v8::ACCESS_SET);
return *value_handle;
}
-MaybeObject* JSObject::SetProperty(LookupResult* result,
- String* name,
- Object* value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode) {
+MaybeObject* JSReceiver::SetProperty(LookupResult* result,
+ String* key,
+ Object* value,
+ PropertyAttributes attributes,
+ StrictModeFlag strict_mode) {
+ if (result->IsFound() && result->type() == HANDLER) {
+ return JSProxy::cast(this)->SetPropertyWithHandler(
+ key, value, attributes, strict_mode);
+ } else {
+ return JSObject::cast(this)->SetPropertyForResult(
+ result, key, value, attributes, strict_mode);
+ }
+}
+
+
+bool JSProxy::HasPropertyWithHandler(String* name_raw) {
+ Isolate* isolate = GetIsolate();
+ HandleScope scope(isolate);
+ Handle<Object> receiver(this);
+ Handle<Object> name(name_raw);
+ Handle<Object> handler(this->handler());
+
+ // Extract trap function.
+ Handle<String> trap_name = isolate->factory()->LookupAsciiSymbol("has");
+ Handle<Object> trap(v8::internal::GetProperty(handler, trap_name));
+ if (trap->IsUndefined()) {
+ trap = isolate->derived_has_trap();
+ }
+
+ // Call trap function.
+ Object** args[] = { name.location() };
+ bool has_exception;
+ Handle<Object> result =
+ Execution::Call(trap, handler, ARRAY_SIZE(args), args, &has_exception);
+ if (has_exception) return Failure::Exception();
+
+ return result->ToBoolean()->IsTrue();
+}
+
+
+MUST_USE_RESULT MaybeObject* JSProxy::SetPropertyWithHandler(
+ String* name_raw,
+ Object* value_raw,
+ PropertyAttributes attributes,
+ StrictModeFlag strict_mode) {
+ Isolate* isolate = GetIsolate();
+ HandleScope scope(isolate);
+ Handle<Object> receiver(this);
+ Handle<Object> name(name_raw);
+ Handle<Object> value(value_raw);
+ Handle<Object> handler(this->handler());
+
+ // Extract trap function.
+ Handle<String> trap_name = isolate->factory()->LookupAsciiSymbol("set");
+ Handle<Object> trap(v8::internal::GetProperty(handler, trap_name));
+ if (trap->IsUndefined()) {
+ trap = isolate->derived_set_trap();
+ }
+
+ // Call trap function.
+ Object** args[] = {
+ receiver.location(), name.location(), value.location()
+ };
+ bool has_exception;
+ Execution::Call(trap, handler, ARRAY_SIZE(args), args, &has_exception);
+ if (has_exception) return Failure::Exception();
+
+ return *value;
+}
+
+
+MUST_USE_RESULT MaybeObject* JSProxy::DeletePropertyWithHandler(
+ String* name_raw, DeleteMode mode) {
+ Isolate* isolate = GetIsolate();
+ HandleScope scope(isolate);
+ Handle<Object> receiver(this);
+ Handle<Object> name(name_raw);
+ Handle<Object> handler(this->handler());
+
+ // Extract trap function.
+ Handle<String> trap_name = isolate->factory()->LookupAsciiSymbol("delete");
+ Handle<Object> trap(v8::internal::GetProperty(handler, trap_name));
+ if (trap->IsUndefined()) {
+ Handle<Object> args[] = { handler, trap_name };
+ Handle<Object> error = isolate->factory()->NewTypeError(
+ "handler_trap_missing", HandleVector(args, ARRAY_SIZE(args)));
+ isolate->Throw(*error);
+ return Failure::Exception();
+ }
+
+ // Call trap function.
+ Object** args[] = { name.location() };
+ bool has_exception;
+ Handle<Object> result =
+ Execution::Call(trap, handler, ARRAY_SIZE(args), args, &has_exception);
+ if (has_exception) return Failure::Exception();
+
+ Object* bool_result = result->ToBoolean();
+ if (mode == STRICT_DELETION && bool_result == GetHeap()->false_value()) {
+ Handle<Object> args[] = { handler, trap_name };
+ Handle<Object> error = isolate->factory()->NewTypeError(
+ "handler_failed", HandleVector(args, ARRAY_SIZE(args)));
+ isolate->Throw(*error);
+ return Failure::Exception();
+ }
+ return bool_result;
+}
+
+
+MUST_USE_RESULT PropertyAttributes JSProxy::GetPropertyAttributeWithHandler(
+ JSReceiver* receiver_raw,
+ String* name_raw,
+ bool* has_exception) {
+ Isolate* isolate = GetIsolate();
+ HandleScope scope(isolate);
+ Handle<JSReceiver> receiver(receiver_raw);
+ Handle<Object> name(name_raw);
+ Handle<Object> handler(this->handler());
+
+ // Extract trap function.
+ Handle<String> trap_name =
+ isolate->factory()->LookupAsciiSymbol("getPropertyDescriptor");
+ Handle<Object> trap(v8::internal::GetProperty(handler, trap_name));
+ if (trap->IsUndefined()) {
+ Handle<Object> args[] = { handler, trap_name };
+ Handle<Object> error = isolate->factory()->NewTypeError(
+ "handler_trap_missing", HandleVector(args, ARRAY_SIZE(args)));
+ isolate->Throw(*error);
+ *has_exception = true;
+ return NONE;
+ }
+
+ // Call trap function.
+ Object** args[] = { name.location() };
+ Handle<Object> result =
+ Execution::Call(trap, handler, ARRAY_SIZE(args), args, has_exception);
+ if (has_exception) return NONE;
+
+ // TODO(rossberg): convert result to PropertyAttributes
+ USE(result);
+ return NONE;
+}
+
+
+void JSProxy::Fix() {
+ Isolate* isolate = GetIsolate();
+ HandleScope scope(isolate);
+ Handle<JSProxy> self(this);
+
+ isolate->factory()->BecomeJSObject(self);
+ ASSERT(IsJSObject());
+ // TODO(rossberg): recognize function proxies.
+}
+
+
+
+MaybeObject* JSObject::SetPropertyForResult(LookupResult* result,
+ String* name,
+ Object* value,
+ PropertyAttributes attributes,
+ StrictModeFlag strict_mode) {
Heap* heap = GetHeap();
// Make sure that the top context does not change when doing callbacks or
// interceptor calls.
@@ -2145,7 +2361,11 @@
// Check access rights if needed.
if (IsAccessCheckNeeded()
&& !heap->isolate()->MayNamedAccess(this, name, v8::ACCESS_SET)) {
- return SetPropertyWithFailedAccessCheck(result, name, value, true);
+ return SetPropertyWithFailedAccessCheck(result,
+ name,
+ value,
+ true,
+ strict_mode);
}
if (IsJSGlobalProxy()) {
@@ -2165,7 +2385,8 @@
return SetPropertyWithCallback(accessor_result.GetCallbackObject(),
name,
value,
- accessor_result.holder());
+ accessor_result.holder(),
+ strict_mode);
}
}
if (!result->IsFound()) {
@@ -2174,7 +2395,7 @@
}
if (result->IsReadOnly() && result->IsProperty()) {
if (strict_mode == kStrictMode) {
- HandleScope scope;
+ HandleScope scope(heap->isolate());
Handle<String> key(name);
Handle<Object> holder(this);
Handle<Object> args[2] = { key, holder };
@@ -2209,7 +2430,8 @@
return SetPropertyWithCallback(result->GetCallbackObject(),
name,
value,
- result->holder());
+ result->holder(),
+ strict_mode);
case INTERCEPTOR:
return SetPropertyWithInterceptor(name, value, attributes, strict_mode);
case CONSTANT_TRANSITION: {
@@ -2262,7 +2484,11 @@
if (IsAccessCheckNeeded()) {
Heap* heap = GetHeap();
if (!heap->isolate()->MayNamedAccess(this, name, v8::ACCESS_SET)) {
- return SetPropertyWithFailedAccessCheck(&result, name, value, false);
+ return SetPropertyWithFailedAccessCheck(&result,
+ name,
+ value,
+ false,
+ kNonStrictMode);
}
}
@@ -2395,12 +2621,13 @@
}
-PropertyAttributes JSObject::GetPropertyAttributeWithReceiver(
- JSObject* receiver,
+PropertyAttributes JSReceiver::GetPropertyAttributeWithReceiver(
+ JSReceiver* receiver,
String* key) {
uint32_t index = 0;
- if (key->AsArrayIndex(&index)) {
- if (HasElementWithReceiver(receiver, index)) return NONE;
+ if (IsJSObject() && key->AsArrayIndex(&index)) {
+ if (JSObject::cast(this)->HasElementWithReceiver(receiver, index))
+ return NONE;
return ABSENT;
}
// Named property.
@@ -2410,18 +2637,17 @@
}
-PropertyAttributes JSObject::GetPropertyAttribute(JSObject* receiver,
- LookupResult* result,
- String* name,
- bool continue_search) {
+PropertyAttributes JSReceiver::GetPropertyAttribute(JSReceiver* receiver,
+ LookupResult* result,
+ String* name,
+ bool continue_search) {
// Check access rights if needed.
if (IsAccessCheckNeeded()) {
+ JSObject* this_obj = JSObject::cast(this);
Heap* heap = GetHeap();
- if (!heap->isolate()->MayNamedAccess(this, name, v8::ACCESS_HAS)) {
- return GetPropertyAttributeWithFailedAccessCheck(receiver,
- result,
- name,
- continue_search);
+ if (!heap->isolate()->MayNamedAccess(this_obj, name, v8::ACCESS_HAS)) {
+ return this_obj->GetPropertyAttributeWithFailedAccessCheck(
+ receiver, result, name, continue_search);
}
}
if (result->IsProperty()) {
@@ -2431,9 +2657,15 @@
case CONSTANT_FUNCTION:
case CALLBACKS:
return result->GetAttributes();
+ case HANDLER: {
+ // TODO(rossberg): propagate exceptions properly.
+ bool has_exception = false;
+ return JSProxy::cast(this)->GetPropertyAttributeWithHandler(
+ receiver, name, &has_exception);
+ }
case INTERCEPTOR:
- return result->holder()->
- GetPropertyAttributeWithInterceptor(receiver, name, continue_search);
+ return result->holder()->GetPropertyAttributeWithInterceptor(
+ JSObject::cast(receiver), name, continue_search);
default:
UNREACHABLE();
}
@@ -2442,11 +2674,11 @@
}
-PropertyAttributes JSObject::GetLocalPropertyAttribute(String* name) {
+PropertyAttributes JSReceiver::GetLocalPropertyAttribute(String* name) {
// Check whether the name is an array index.
uint32_t index = 0;
- if (name->AsArrayIndex(&index)) {
- if (HasLocalElement(index)) return NONE;
+ if (IsJSObject() && name->AsArrayIndex(&index)) {
+ if (JSObject::cast(this)->HasLocalElement(index)) return NONE;
return ABSENT;
}
// Named property.
@@ -2460,10 +2692,12 @@
PropertyNormalizationMode mode) {
Isolate* isolate = obj->GetIsolate();
Map* fast = obj->map();
- int index = Hash(fast) % kEntries;
+ int index = fast->Hash() % kEntries;
Object* result = get(index);
- if (result->IsMap() && CheckHit(Map::cast(result), fast, mode)) {
+ if (result->IsMap() &&
+ Map::cast(result)->EquivalentToForNormalization(fast, mode)) {
#ifdef DEBUG
+ Map::cast(result)->SharedMapVerify();
if (FLAG_enable_slow_asserts) {
// The cached map should match newly created normalized map bit-by-bit.
Object* fresh;
@@ -2499,43 +2733,6 @@
}
-int NormalizedMapCache::Hash(Map* fast) {
- // For performance reasons we only hash the 3 most variable fields of a map:
- // constructor, prototype and bit_field2.
-
- // Shift away the tag.
- int hash = (static_cast<uint32_t>(
- reinterpret_cast<uintptr_t>(fast->constructor())) >> 2);
-
- // XOR-ing the prototype and constructor directly yields too many zero bits
- // when the two pointers are close (which is fairly common).
- // To avoid this we shift the prototype 4 bits relatively to the constructor.
- hash ^= (static_cast<uint32_t>(
- reinterpret_cast<uintptr_t>(fast->prototype())) << 2);
-
- return hash ^ (hash >> 16) ^ fast->bit_field2();
-}
-
-
-bool NormalizedMapCache::CheckHit(Map* slow,
- Map* fast,
- PropertyNormalizationMode mode) {
-#ifdef DEBUG
- slow->SharedMapVerify();
-#endif
- return
- slow->constructor() == fast->constructor() &&
- slow->prototype() == fast->prototype() &&
- slow->inobject_properties() == ((mode == CLEAR_INOBJECT_PROPERTIES) ?
- 0 :
- fast->inobject_properties()) &&
- slow->instance_type() == fast->instance_type() &&
- slow->bit_field() == fast->bit_field() &&
- slow->bit_field2() == fast->bit_field2() &&
- (slow->bit_field3() & ~(1<<Map::kIsShared)) == fast->bit_field3();
-}
-
-
MaybeObject* JSObject::UpdateMapCodeCache(String* name, Code* code) {
if (map()->is_shared()) {
// Fast case maps are never marked as shared.
@@ -2680,48 +2877,76 @@
MaybeObject* JSObject::NormalizeElements() {
ASSERT(!HasExternalArrayElements());
- if (HasDictionaryElements()) return this;
- Map* old_map = map();
- ASSERT(old_map->has_fast_elements());
- Object* obj;
- { MaybeObject* maybe_obj = old_map->GetSlowElementsMap();
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
+ // Find the backing store.
+ FixedArrayBase* array = FixedArrayBase::cast(elements());
+ Map* old_map = array->map();
+ bool is_arguments =
+ (old_map == old_map->heap()->non_strict_arguments_elements_map());
+ if (is_arguments) {
+ array = FixedArrayBase::cast(FixedArray::cast(array)->get(1));
}
- Map* new_map = Map::cast(obj);
+ if (array->IsDictionary()) return array;
- // Get number of entries.
- FixedArray* array = FixedArray::cast(elements());
-
- // Compute the effective length.
- int length = IsJSArray() ?
- Smi::cast(JSArray::cast(this)->length())->value() :
- array->length();
- { MaybeObject* maybe_obj = NumberDictionary::Allocate(length);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
+ ASSERT(HasFastElements() ||
+ HasFastDoubleElements() ||
+ HasFastArgumentsElements());
+ // Compute the effective length and allocate a new backing store.
+ int length = IsJSArray()
+ ? Smi::cast(JSArray::cast(this)->length())->value()
+ : array->length();
+ NumberDictionary* dictionary = NULL;
+ { Object* object;
+ MaybeObject* maybe = NumberDictionary::Allocate(length);
+ if (!maybe->ToObject(&object)) return maybe;
+ dictionary = NumberDictionary::cast(object);
}
- NumberDictionary* dictionary = NumberDictionary::cast(obj);
- // Copy entries.
+
+ // Copy the elements to the new backing store.
+ bool has_double_elements = array->IsFixedDoubleArray();
for (int i = 0; i < length; i++) {
- Object* value = array->get(i);
- if (!value->IsTheHole()) {
- PropertyDetails details = PropertyDetails(NONE, NORMAL);
- Object* result;
- { MaybeObject* maybe_result =
- dictionary->AddNumberEntry(i, array->get(i), details);
- if (!maybe_result->ToObject(&result)) return maybe_result;
+ Object* value = NULL;
+ if (has_double_elements) {
+ FixedDoubleArray* double_array = FixedDoubleArray::cast(array);
+ if (double_array->is_the_hole(i)) {
+ value = GetIsolate()->heap()->the_hole_value();
+ } else {
+ // Objects must be allocated in the old object space, since the
+ // overall number of HeapNumbers needed for the conversion might
+ // exceed the capacity of new space, and we would fail repeatedly
+ // trying to convert the FixedDoubleArray.
+ MaybeObject* maybe_value_object =
+ GetHeap()->AllocateHeapNumber(double_array->get(i), TENURED);
+ if (!maybe_value_object->ToObject(&value)) return maybe_value_object;
}
+ } else {
+ ASSERT(old_map->has_fast_elements());
+ value = FixedArray::cast(array)->get(i);
+ }
+ PropertyDetails details = PropertyDetails(NONE, NORMAL);
+ if (!value->IsTheHole()) {
+ Object* result;
+ MaybeObject* maybe_result =
+ dictionary->AddNumberEntry(i, value, details);
+ if (!maybe_result->ToObject(&result)) return maybe_result;
dictionary = NumberDictionary::cast(result);
}
}
- // Switch to using the dictionary as the backing storage for
- // elements. Set the new map first to satify the elements type
- // assert in set_elements().
- set_map(new_map);
- set_elements(dictionary);
- new_map->heap()->isolate()->counters()->elements_to_dictionary()->
- Increment();
+ // Switch to using the dictionary as the backing storage for elements.
+ if (is_arguments) {
+ FixedArray::cast(elements())->set(1, dictionary);
+ } else {
+ // Set the new map first to satify the elements type assert in
+ // set_elements().
+ Object* new_map;
+ MaybeObject* maybe = map()->GetSlowElementsMap();
+ if (!maybe->ToObject(&new_map)) return maybe;
+ set_map(Map::cast(new_map));
+ set_elements(dictionary);
+ }
+
+ old_map->isolate()->counters()->elements_to_dictionary()->Increment();
#ifdef DEBUG
if (FLAG_trace_normalization) {
@@ -2730,7 +2955,8 @@
}
#endif
- return this;
+ ASSERT(HasDictionaryElements() || HasDictionaryArgumentsElements());
+ return dictionary;
}
@@ -2804,7 +3030,16 @@
NumberDictionary* dictionary = element_dictionary();
int entry = dictionary->FindEntry(index);
if (entry != NumberDictionary::kNotFound) {
- return dictionary->DeleteProperty(entry, mode);
+ Object* deleted = dictionary->DeleteProperty(entry, mode);
+ if (deleted == GetHeap()->true_value()) {
+ MaybeObject* maybe_elements = dictionary->Shrink(index);
+ FixedArray* new_elements = NULL;
+ if (!maybe_elements->To(&new_elements)) {
+ return maybe_elements;
+ }
+ set_elements(new_elements);
+ }
+ return deleted;
}
break;
}
@@ -2850,6 +3085,93 @@
}
+MaybeObject* JSObject::DeleteFastElement(uint32_t index) {
+ ASSERT(HasFastElements() || HasFastArgumentsElements());
+ Heap* heap = GetHeap();
+ FixedArray* backing_store = FixedArray::cast(elements());
+ if (backing_store->map() == heap->non_strict_arguments_elements_map()) {
+ backing_store = FixedArray::cast(backing_store->get(1));
+ } else {
+ Object* writable;
+ MaybeObject* maybe = EnsureWritableFastElements();
+ if (!maybe->ToObject(&writable)) return maybe;
+ backing_store = FixedArray::cast(writable);
+ }
+ uint32_t length = static_cast<uint32_t>(
+ IsJSArray()
+ ? Smi::cast(JSArray::cast(this)->length())->value()
+ : backing_store->length());
+ if (index < length) {
+ backing_store->set_the_hole(index);
+ // If an old space backing store is larger than a certain size and
+ // has too few used values, normalize it.
+ // To avoid doing the check on every delete we require at least
+ // one adjacent hole to the value being deleted.
+ Object* hole = heap->the_hole_value();
+ const int kMinLengthForSparsenessCheck = 64;
+ if (backing_store->length() >= kMinLengthForSparsenessCheck &&
+ !heap->InNewSpace(backing_store) &&
+ ((index > 0 && backing_store->get(index - 1) == hole) ||
+ (index + 1 < length && backing_store->get(index + 1) == hole))) {
+ int num_used = 0;
+ for (int i = 0; i < backing_store->length(); ++i) {
+ if (backing_store->get(i) != hole) ++num_used;
+ // Bail out early if more than 1/4 is used.
+ if (4 * num_used > backing_store->length()) break;
+ }
+ if (4 * num_used <= backing_store->length()) {
+ MaybeObject* result = NormalizeElements();
+ if (result->IsFailure()) return result;
+ }
+ }
+ }
+ return heap->true_value();
+}
+
+
+MaybeObject* JSObject::DeleteDictionaryElement(uint32_t index,
+ DeleteMode mode) {
+ Isolate* isolate = GetIsolate();
+ Heap* heap = isolate->heap();
+ FixedArray* backing_store = FixedArray::cast(elements());
+ bool is_arguments =
+ (GetElementsKind() == JSObject::NON_STRICT_ARGUMENTS_ELEMENTS);
+ if (is_arguments) {
+ backing_store = FixedArray::cast(backing_store->get(1));
+ }
+ NumberDictionary* dictionary = NumberDictionary::cast(backing_store);
+ int entry = dictionary->FindEntry(index);
+ if (entry != NumberDictionary::kNotFound) {
+ Object* result = dictionary->DeleteProperty(entry, mode);
+ if (result == heap->true_value()) {
+ MaybeObject* maybe_elements = dictionary->Shrink(index);
+ FixedArray* new_elements = NULL;
+ if (!maybe_elements->To(&new_elements)) {
+ return maybe_elements;
+ }
+ if (is_arguments) {
+ FixedArray::cast(elements())->set(1, new_elements);
+ } else {
+ set_elements(new_elements);
+ }
+ }
+ if (mode == STRICT_DELETION && result == heap->false_value()) {
+ // In strict mode, attempting to delete a non-configurable property
+ // throws an exception.
+ HandleScope scope(isolate);
+ Handle<Object> holder(this);
+ Handle<Object> name = isolate->factory()->NewNumberFromUint(index);
+ Handle<Object> args[2] = { name, holder };
+ Handle<Object> error =
+ isolate->factory()->NewTypeError("strict_delete_property",
+ HandleVector(args, 2));
+ return isolate->Throw(*error);
+ }
+ }
+ return heap->true_value();
+}
+
+
MaybeObject* JSObject::DeleteElement(uint32_t index, DeleteMode mode) {
Isolate* isolate = GetIsolate();
// Check access rights if needed.
@@ -2868,23 +3190,24 @@
if (HasIndexedInterceptor()) {
// Skip interceptor if forcing deletion.
- if (mode == FORCE_DELETION) {
- return DeleteElementPostInterceptor(index, mode);
- }
- return DeleteElementWithInterceptor(index);
+ return (mode == FORCE_DELETION)
+ ? DeleteElementPostInterceptor(index, FORCE_DELETION)
+ : DeleteElementWithInterceptor(index);
}
switch (GetElementsKind()) {
- case FAST_ELEMENTS: {
- Object* obj;
- { MaybeObject* maybe_obj = EnsureWritableFastElements();
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- uint32_t length = IsJSArray() ?
- static_cast<uint32_t>(Smi::cast(JSArray::cast(this)->length())->value()) :
- static_cast<uint32_t>(FixedArray::cast(elements())->length());
- if (index < length) {
- FixedArray::cast(elements())->set_the_hole(index);
+ case FAST_ELEMENTS:
+ return DeleteFastElement(index);
+
+ case DICTIONARY_ELEMENTS:
+ return DeleteDictionaryElement(index, mode);
+
+ case FAST_DOUBLE_ELEMENTS: {
+ int length = IsJSArray()
+ ? Smi::cast(JSArray::cast(this)->length())->value()
+ : FixedDoubleArray::cast(elements())->length();
+ if (index < static_cast<uint32_t>(length)) {
+ FixedDoubleArray::cast(elements())->set_the_hole(index);
}
break;
}
@@ -2900,34 +3223,41 @@
// Pixel and external array elements cannot be deleted. Just
// silently ignore here.
break;
- case DICTIONARY_ELEMENTS: {
- NumberDictionary* dictionary = element_dictionary();
- int entry = dictionary->FindEntry(index);
- if (entry != NumberDictionary::kNotFound) {
- Object* result = dictionary->DeleteProperty(entry, mode);
- if (mode == STRICT_DELETION && result ==
- isolate->heap()->false_value()) {
- // In strict mode, deleting a non-configurable property throws
- // exception. dictionary->DeleteProperty will return false_value()
- // if a non-configurable property is being deleted.
- HandleScope scope;
- Handle<Object> self(this);
- Handle<Object> i = isolate->factory()->NewNumberFromUint(index);
- Handle<Object> args[2] = { i, self };
- return isolate->Throw(*isolate->factory()->NewTypeError(
- "strict_delete_property", HandleVector(args, 2)));
+
+ case NON_STRICT_ARGUMENTS_ELEMENTS: {
+ FixedArray* parameter_map = FixedArray::cast(elements());
+ uint32_t length = parameter_map->length();
+ Object* probe =
+ index < (length - 2) ? parameter_map->get(index + 2) : NULL;
+ if (probe != NULL && !probe->IsTheHole()) {
+ // TODO(kmillikin): We could check if this was the last aliased
+ // parameter, and revert to normal elements in that case. That
+ // would enable GC of the context.
+ parameter_map->set_the_hole(index + 2);
+ } else {
+ FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
+ if (arguments->IsDictionary()) {
+ return DeleteDictionaryElement(index, mode);
+ } else {
+ return DeleteFastElement(index);
}
}
break;
}
- default:
- UNREACHABLE();
- break;
}
return isolate->heap()->true_value();
}
+MaybeObject* JSReceiver::DeleteProperty(String* name, DeleteMode mode) {
+ if (IsJSProxy()) {
+ return JSProxy::cast(this)->DeletePropertyWithHandler(name, mode);
+ } else {
+ return JSObject::cast(this)->DeleteProperty(name, mode);
+ }
+}
+
+
MaybeObject* JSObject::DeleteProperty(String* name, DeleteMode mode) {
Isolate* isolate = GetIsolate();
// ECMA-262, 3rd, 8.6.2.5
@@ -2985,6 +3315,26 @@
}
+bool JSObject::ReferencesObjectFromElements(FixedArray* elements,
+ ElementsKind kind,
+ Object* object) {
+ ASSERT(kind == FAST_ELEMENTS || kind == DICTIONARY_ELEMENTS);
+ if (kind == FAST_ELEMENTS) {
+ int length = IsJSArray()
+ ? Smi::cast(JSArray::cast(this)->length())->value()
+ : elements->length();
+ for (int i = 0; i < length; ++i) {
+ Object* element = elements->get(i);
+ if (!element->IsTheHole() && element == object) return true;
+ }
+ } else {
+ Object* key = NumberDictionary::cast(elements)->SlowReverseLookup(object);
+ if (!key->IsUndefined()) return true;
+ }
+ return false;
+}
+
+
// Check whether this object references another object.
bool JSObject::ReferencesObject(Object* obj) {
Map* map_of_this = map();
@@ -3008,7 +3358,8 @@
}
// Check if the object is among the indexed properties.
- switch (GetElementsKind()) {
+ ElementsKind kind = GetElementsKind();
+ switch (kind) {
case EXTERNAL_PIXEL_ELEMENTS:
case EXTERNAL_BYTE_ELEMENTS:
case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
@@ -3018,31 +3369,30 @@
case EXTERNAL_UNSIGNED_INT_ELEMENTS:
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
+ case FAST_DOUBLE_ELEMENTS:
// Raw pixels and external arrays do not reference other
// objects.
break;
- case FAST_ELEMENTS: {
- int length = IsJSArray() ?
- Smi::cast(JSArray::cast(this)->length())->value() :
- FixedArray::cast(elements())->length();
- for (int i = 0; i < length; i++) {
- Object* element = FixedArray::cast(elements())->get(i);
- if (!element->IsTheHole() && element == obj) {
- return true;
- }
- }
- break;
- }
+ case FAST_ELEMENTS:
case DICTIONARY_ELEMENTS: {
- key = element_dictionary()->SlowReverseLookup(obj);
- if (!key->IsUndefined()) {
- return true;
- }
+ FixedArray* elements = FixedArray::cast(this->elements());
+ if (ReferencesObjectFromElements(elements, kind, obj)) return true;
break;
}
- default:
- UNREACHABLE();
+ case NON_STRICT_ARGUMENTS_ELEMENTS: {
+ FixedArray* parameter_map = FixedArray::cast(elements());
+ // Check the mapped parameters.
+ int length = parameter_map->length();
+ for (int i = 2; i < length; ++i) {
+ Object* value = parameter_map->get(i);
+ if (!value->IsTheHole() && value == obj) return true;
+ }
+ // Check the arguments.
+ FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
+ kind = arguments->IsDictionary() ? DICTIONARY_ELEMENTS : FAST_ELEMENTS;
+ if (ReferencesObjectFromElements(arguments, kind, obj)) return true;
break;
+ }
}
// For functions check the context.
@@ -3077,9 +3427,9 @@
}
}
- // Check the context extension if any.
- if (context->has_extension()) {
- return context->extension()->ReferencesObject(obj);
+ // Check the context extension (if any) if it can have references.
+ if (context->has_extension() && !context->IsCatchContext()) {
+ return JSObject::cast(context->extension())->ReferencesObject(obj);
}
}
@@ -3117,23 +3467,22 @@
}
// If there are fast elements we normalize.
- if (HasFastElements()) {
- Object* ok;
- { MaybeObject* maybe_ok = NormalizeElements();
- if (!maybe_ok->ToObject(&ok)) return maybe_ok;
- }
+ NumberDictionary* dictionary = NULL;
+ { MaybeObject* maybe = NormalizeElements();
+ if (!maybe->To<NumberDictionary>(&dictionary)) return maybe;
}
+ ASSERT(HasDictionaryElements() || HasDictionaryArgumentsElements());
// Make sure that we never go back to fast case.
- element_dictionary()->set_requires_slow_elements();
+ dictionary->set_requires_slow_elements();
// Do a map transition, other objects with this map may still
// be extensible.
- Object* new_map;
- { MaybeObject* maybe_new_map = map()->CopyDropTransitions();
- if (!maybe_new_map->ToObject(&new_map)) return maybe_new_map;
+ Map* new_map;
+ { MaybeObject* maybe = map()->CopyDropTransitions();
+ if (!maybe->To<Map>(&new_map)) return maybe;
}
- Map::cast(new_map)->set_is_extensible(false);
- set_map(Map::cast(new_map));
+ new_map->set_is_extensible(false);
+ set_map(new_map);
ASSERT(!map()->is_extensible());
return new_map;
}
@@ -3210,6 +3559,15 @@
}
+void JSReceiver::LocalLookup(String* name, LookupResult* result) {
+ if (IsJSProxy()) {
+ result->HandlerResult();
+ } else {
+ JSObject::cast(this)->LocalLookup(name, result);
+ }
+}
+
+
void JSObject::LocalLookup(String* name, LookupResult* result) {
ASSERT(name->IsString());
@@ -3229,8 +3587,7 @@
}
// Check __proto__ before interceptor.
- if (name->Equals(heap->Proto_symbol()) &&
- !IsJSContextExtensionObject()) {
+ if (name->Equals(heap->Proto_symbol()) && !IsJSContextExtensionObject()) {
result->ConstantResult(this);
return;
}
@@ -3245,7 +3602,7 @@
}
-void JSObject::Lookup(String* name, LookupResult* result) {
+void JSReceiver::Lookup(String* name, LookupResult* result) {
// Ecma-262 3rd 8.6.2.4
Heap* heap = GetHeap();
for (Object* current = this;
@@ -3271,6 +3628,24 @@
}
+// Search for a getter or setter in an elements dictionary. Returns either
+// undefined if the element is read-only, or the getter/setter pair (fixed
+// array) if there is an existing one, or the hole value if the element does
+// not exist or is a normal non-getter/setter data element.
+static Object* FindGetterSetterInDictionary(NumberDictionary* dictionary,
+ uint32_t index,
+ Heap* heap) {
+ int entry = dictionary->FindEntry(index);
+ if (entry != NumberDictionary::kNotFound) {
+ Object* result = dictionary->ValueAt(entry);
+ PropertyDetails details = dictionary->DetailsAt(entry);
+ if (details.IsReadOnly()) return heap->undefined_value();
+ if (details.type() == CALLBACKS && result->IsFixedArray()) return result;
+ }
+ return heap->the_hole_value();
+}
+
+
MaybeObject* JSObject::DefineGetterSetter(String* name,
PropertyAttributes attributes) {
Heap* heap = GetHeap();
@@ -3301,29 +3676,35 @@
case EXTERNAL_UNSIGNED_INT_ELEMENTS:
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
+ case FAST_DOUBLE_ELEMENTS:
// Ignore getters and setters on pixel and external array
// elements.
return heap->undefined_value();
case DICTIONARY_ELEMENTS: {
- // Lookup the index.
- NumberDictionary* dictionary = element_dictionary();
- int entry = dictionary->FindEntry(index);
- if (entry != NumberDictionary::kNotFound) {
- Object* result = dictionary->ValueAt(entry);
- PropertyDetails details = dictionary->DetailsAt(entry);
- if (details.IsReadOnly()) return heap->undefined_value();
- if (details.type() == CALLBACKS) {
- if (result->IsFixedArray()) {
- return result;
- }
- // Otherwise allow to override it.
+ Object* probe =
+ FindGetterSetterInDictionary(element_dictionary(), index, heap);
+ if (!probe->IsTheHole()) return probe;
+ // Otherwise allow to override it.
+ break;
+ }
+ case NON_STRICT_ARGUMENTS_ELEMENTS: {
+ // Ascertain whether we have read-only properties or an existing
+ // getter/setter pair in an arguments elements dictionary backing
+ // store.
+ FixedArray* parameter_map = FixedArray::cast(elements());
+ uint32_t length = parameter_map->length();
+ Object* probe =
+ index < (length - 2) ? parameter_map->get(index + 2) : NULL;
+ if (probe == NULL || probe->IsTheHole()) {
+ FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
+ if (arguments->IsDictionary()) {
+ NumberDictionary* dictionary = NumberDictionary::cast(arguments);
+ probe = FindGetterSetterInDictionary(dictionary, index, heap);
+ if (!probe->IsTheHole()) return probe;
}
}
break;
}
- default:
- UNREACHABLE();
- break;
}
} else {
// Lookup the name.
@@ -3386,22 +3767,38 @@
PropertyDetails details = PropertyDetails(attributes, CALLBACKS);
// Normalize elements to make this operation simple.
- Object* ok;
- { MaybeObject* maybe_ok = NormalizeElements();
- if (!maybe_ok->ToObject(&ok)) return maybe_ok;
+ NumberDictionary* dictionary = NULL;
+ { Object* result;
+ MaybeObject* maybe = NormalizeElements();
+ if (!maybe->ToObject(&result)) return maybe;
+ dictionary = NumberDictionary::cast(result);
}
+ ASSERT(HasDictionaryElements() || HasDictionaryArgumentsElements());
// Update the dictionary with the new CALLBACKS property.
- Object* dict;
- { MaybeObject* maybe_dict =
- element_dictionary()->Set(index, structure, details);
- if (!maybe_dict->ToObject(&dict)) return maybe_dict;
+ { Object* result;
+ MaybeObject* maybe = dictionary->Set(index, structure, details);
+ if (!maybe->ToObject(&result)) return maybe;
+ dictionary = NumberDictionary::cast(result);
}
- NumberDictionary* elements = NumberDictionary::cast(dict);
- elements->set_requires_slow_elements();
- // Set the potential new dictionary on the object.
- set_elements(elements);
+ dictionary->set_requires_slow_elements();
+ // Update the dictionary backing store on the object.
+ if (elements()->map() == GetHeap()->non_strict_arguments_elements_map()) {
+ // Also delete any parameter alias.
+ //
+ // TODO(kmillikin): when deleting the last parameter alias we could
+ // switch to a direct backing store without the parameter map. This
+ // would allow GC of the context.
+ FixedArray* parameter_map = FixedArray::cast(elements());
+ uint32_t length = parameter_map->length();
+ if (index < length - 2) {
+ parameter_map->set(index + 2, GetHeap()->the_hole_value());
+ }
+ parameter_map->set(1, dictionary);
+ } else {
+ set_elements(dictionary);
+ }
return structure;
}
@@ -3528,13 +3925,14 @@
case EXTERNAL_UNSIGNED_INT_ELEMENTS:
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
+ case FAST_DOUBLE_ELEMENTS:
// Ignore getters and setters on pixel and external array
// elements.
return isolate->heap()->undefined_value();
case DICTIONARY_ELEMENTS:
break;
- default:
- UNREACHABLE();
+ case NON_STRICT_ARGUMENTS_ELEMENTS:
+ UNIMPLEMENTED();
break;
}
@@ -3819,7 +4217,7 @@
Object** proto_map_or_index_field =
RawField(prototype_transitions, HeapObject::kMapOffset);
Object* map_or_index = *proto_map_or_index_field;
- const int start = 2;
+ const int start = kProtoTransitionHeaderSize + kProtoTransitionMapOffset;
int i = map_or_index->IsSmi() ? Smi::cast(map_or_index)->value() : start;
if (i < prototype_transitions->length()) {
// Found a map in the prototype transition array. Record progress in
@@ -3829,7 +4227,7 @@
Map* next = Map::cast(perhaps_map);
next->set_map(current);
*proto_map_or_index_field =
- Smi::FromInt(i + 2);
+ Smi::FromInt(i + kProtoTransitionElementsPerEntry);
current = next;
continue;
}
@@ -4071,6 +4469,7 @@
private:
String* name_;
Code::Flags flags_;
+ // TODO(jkummerow): We should be able to get by without this.
Code* code_;
};
@@ -4090,7 +4489,7 @@
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
- // Don't use this, as the table might have grown.
+ // Don't use |this|, as the table might have grown.
CodeCacheHashTable* cache = reinterpret_cast<CodeCacheHashTable*>(obj);
int entry = cache->FindInsertionEntry(key.Hash());
@@ -4136,6 +4535,164 @@
}
+MaybeObject* PolymorphicCodeCache::Update(MapList* maps,
+ Code::Flags flags,
+ Code* code) {
+ // Initialize cache if necessary.
+ if (cache()->IsUndefined()) {
+ Object* result;
+ { MaybeObject* maybe_result =
+ PolymorphicCodeCacheHashTable::Allocate(
+ PolymorphicCodeCacheHashTable::kInitialSize);
+ if (!maybe_result->ToObject(&result)) return maybe_result;
+ }
+ set_cache(result);
+ } else {
+ // This entry shouldn't be contained in the cache yet.
+ ASSERT(PolymorphicCodeCacheHashTable::cast(cache())
+ ->Lookup(maps, flags)->IsUndefined());
+ }
+ PolymorphicCodeCacheHashTable* hash_table =
+ PolymorphicCodeCacheHashTable::cast(cache());
+ Object* new_cache;
+ { MaybeObject* maybe_new_cache = hash_table->Put(maps, flags, code);
+ if (!maybe_new_cache->ToObject(&new_cache)) return maybe_new_cache;
+ }
+ set_cache(new_cache);
+ return this;
+}
+
+
+Object* PolymorphicCodeCache::Lookup(MapList* maps, Code::Flags flags) {
+ if (!cache()->IsUndefined()) {
+ PolymorphicCodeCacheHashTable* hash_table =
+ PolymorphicCodeCacheHashTable::cast(cache());
+ return hash_table->Lookup(maps, flags);
+ } else {
+ return GetHeap()->undefined_value();
+ }
+}
+
+
+// Despite their name, object of this class are not stored in the actual
+// hash table; instead they're temporarily used for lookups. It is therefore
+// safe to have a weak (non-owning) pointer to a MapList as a member field.
+class PolymorphicCodeCacheHashTableKey : public HashTableKey {
+ public:
+ // Callers must ensure that |maps| outlives the newly constructed object.
+ PolymorphicCodeCacheHashTableKey(MapList* maps, int code_flags)
+ : maps_(maps),
+ code_flags_(code_flags) {}
+
+ bool IsMatch(Object* other) {
+ MapList other_maps(kDefaultListAllocationSize);
+ int other_flags;
+ FromObject(other, &other_flags, &other_maps);
+ if (code_flags_ != other_flags) return false;
+ if (maps_->length() != other_maps.length()) return false;
+ // Compare just the hashes first because it's faster.
+ int this_hash = MapsHashHelper(maps_, code_flags_);
+ int other_hash = MapsHashHelper(&other_maps, other_flags);
+ if (this_hash != other_hash) return false;
+
+ // Full comparison: for each map in maps_, look for an equivalent map in
+ // other_maps. This implementation is slow, but probably good enough for
+ // now because the lists are short (<= 4 elements currently).
+ for (int i = 0; i < maps_->length(); ++i) {
+ bool match_found = false;
+ for (int j = 0; j < other_maps.length(); ++j) {
+ if (maps_->at(i)->EquivalentTo(other_maps.at(j))) {
+ match_found = true;
+ break;
+ }
+ }
+ if (!match_found) return false;
+ }
+ return true;
+ }
+
+ static uint32_t MapsHashHelper(MapList* maps, int code_flags) {
+ uint32_t hash = code_flags;
+ for (int i = 0; i < maps->length(); ++i) {
+ hash ^= maps->at(i)->Hash();
+ }
+ return hash;
+ }
+
+ uint32_t Hash() {
+ return MapsHashHelper(maps_, code_flags_);
+ }
+
+ uint32_t HashForObject(Object* obj) {
+ MapList other_maps(kDefaultListAllocationSize);
+ int other_flags;
+ FromObject(obj, &other_flags, &other_maps);
+ return MapsHashHelper(&other_maps, other_flags);
+ }
+
+ MUST_USE_RESULT MaybeObject* AsObject() {
+ Object* obj;
+ // The maps in |maps_| must be copied to a newly allocated FixedArray,
+ // both because the referenced MapList is short-lived, and because C++
+ // objects can't be stored in the heap anyway.
+ { MaybeObject* maybe_obj =
+ HEAP->AllocateUninitializedFixedArray(maps_->length() + 1);
+ if (!maybe_obj->ToObject(&obj)) return maybe_obj;
+ }
+ FixedArray* list = FixedArray::cast(obj);
+ list->set(0, Smi::FromInt(code_flags_));
+ for (int i = 0; i < maps_->length(); ++i) {
+ list->set(i + 1, maps_->at(i));
+ }
+ return list;
+ }
+
+ private:
+ static MapList* FromObject(Object* obj, int* code_flags, MapList* maps) {
+ FixedArray* list = FixedArray::cast(obj);
+ maps->Rewind(0);
+ *code_flags = Smi::cast(list->get(0))->value();
+ for (int i = 1; i < list->length(); ++i) {
+ maps->Add(Map::cast(list->get(i)));
+ }
+ return maps;
+ }
+
+ MapList* maps_; // weak.
+ int code_flags_;
+ static const int kDefaultListAllocationSize = kMaxKeyedPolymorphism + 1;
+};
+
+
+Object* PolymorphicCodeCacheHashTable::Lookup(MapList* maps, int code_flags) {
+ PolymorphicCodeCacheHashTableKey key(maps, code_flags);
+ int entry = FindEntry(&key);
+ if (entry == kNotFound) return GetHeap()->undefined_value();
+ return get(EntryToIndex(entry) + 1);
+}
+
+
+MaybeObject* PolymorphicCodeCacheHashTable::Put(MapList* maps,
+ int code_flags,
+ Code* code) {
+ PolymorphicCodeCacheHashTableKey key(maps, code_flags);
+ Object* obj;
+ { MaybeObject* maybe_obj = EnsureCapacity(1, &key);
+ if (!maybe_obj->ToObject(&obj)) return maybe_obj;
+ }
+ PolymorphicCodeCacheHashTable* cache =
+ reinterpret_cast<PolymorphicCodeCacheHashTable*>(obj);
+ int entry = cache->FindInsertionEntry(key.Hash());
+ { MaybeObject* maybe_obj = key.AsObject();
+ if (!maybe_obj->ToObject(&obj)) return maybe_obj;
+ }
+ cache->set(EntryToIndex(entry), obj);
+ cache->set(EntryToIndex(entry) + 1, code);
+ cache->ElementAdded();
+ return cache;
+}
+
+
MaybeObject* FixedArray::AddKeysFromJSArray(JSArray* array) {
ASSERT(!array->HasExternalArrayElements());
switch (array->GetElementsKind()) {
@@ -4163,8 +4720,20 @@
// Compute the union of this and the temporary fixed array.
return UnionOfKeys(key_array);
}
- default:
- UNREACHABLE();
+ case JSObject::NON_STRICT_ARGUMENTS_ELEMENTS:
+ UNIMPLEMENTED();
+ break;
+ case JSObject::EXTERNAL_BYTE_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+ case JSObject::EXTERNAL_SHORT_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+ case JSObject::EXTERNAL_INT_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS:
+ case JSObject::EXTERNAL_FLOAT_ELEMENTS:
+ case JSObject::EXTERNAL_DOUBLE_ELEMENTS:
+ case JSObject::EXTERNAL_PIXEL_ELEMENTS:
+ case JSObject::FAST_DOUBLE_ELEMENTS:
+ break;
}
UNREACHABLE();
return GetHeap()->null_value(); // Failure case needs to "return" a value.
@@ -5547,16 +6116,11 @@
bool String::IsAsciiEqualTo(Vector<const char> str) {
int slen = length();
if (str.length() != slen) return false;
- if (this->IsSeqAsciiString()) {
- SeqAsciiString* seq = SeqAsciiString::cast(this);
- char* ch = seq->GetChars();
- for (int i = 0; i < slen; i++, ch++) {
- if (*ch != str[i]) return false;
- }
- } else {
- for (int i = 0; i < slen; i++) {
- if (Get(i) != static_cast<uint16_t>(str[i])) return false;
- }
+ if (IsFlat() && IsAsciiRepresentation()) {
+ return CompareChars(ToAsciiVector().start(), str.start(), slen) == 0;
+ }
+ for (int i = 0; i < slen; i++) {
+ if (Get(i) != static_cast<uint16_t>(str[i])) return false;
}
return true;
}
@@ -5565,6 +6129,9 @@
bool String::IsTwoByteEqualTo(Vector<const uc16> str) {
int slen = length();
if (str.length() != slen) return false;
+ if (IsFlat() && IsTwoByteRepresentation()) {
+ return CompareChars(ToUC16Vector().start(), str.start(), slen) == 0;
+ }
for (int i = 0; i < slen; i++) {
if (Get(i) != str[i]) return false;
}
@@ -5783,6 +6350,40 @@
}
+int Map::Hash() {
+ // For performance reasons we only hash the 3 most variable fields of a map:
+ // constructor, prototype and bit_field2.
+
+ // Shift away the tag.
+ int hash = (static_cast<uint32_t>(
+ reinterpret_cast<uintptr_t>(constructor())) >> 2);
+
+ // XOR-ing the prototype and constructor directly yields too many zero bits
+ // when the two pointers are close (which is fairly common).
+ // To avoid this we shift the prototype 4 bits relatively to the constructor.
+ hash ^= (static_cast<uint32_t>(
+ reinterpret_cast<uintptr_t>(prototype())) << 2);
+
+ return hash ^ (hash >> 16) ^ bit_field2();
+}
+
+
+bool Map::EquivalentToForNormalization(Map* other,
+ PropertyNormalizationMode mode) {
+ return
+ constructor() == other->constructor() &&
+ prototype() == other->prototype() &&
+ inobject_properties() == ((mode == CLEAR_INOBJECT_PROPERTIES) ?
+ 0 :
+ other->inobject_properties()) &&
+ instance_type() == other->instance_type() &&
+ bit_field() == other->bit_field() &&
+ bit_field2() == other->bit_field2() &&
+ (bit_field3() & ~(1<<Map::kIsShared)) ==
+ (other->bit_field3() & ~(1<<Map::kIsShared));
+}
+
+
void JSFunction::JSFunctionIterateBody(int object_size, ObjectVisitor* v) {
// Iterate over all fields in the body but take care in dealing with
// the code entry.
@@ -5801,19 +6402,6 @@
}
-uint32_t JSFunction::SourceHash() {
- uint32_t hash = 0;
- Object* script = shared()->script();
- if (!script->IsUndefined()) {
- Object* source = Script::cast(script)->source();
- if (source->IsUndefined()) hash = String::cast(source)->Hash();
- }
- hash ^= ComputeIntegerHash(shared()->start_position_and_type());
- hash += ComputeIntegerHash(shared()->end_position());
- return hash;
-}
-
-
bool JSFunction::IsInlineable() {
if (IsBuiltin()) return false;
SharedFunctionInfo* shared_info = shared();
@@ -6281,6 +6869,7 @@
if (slack != 0) {
// Resize the initial map and all maps in its transition tree.
map->TraverseTransitionTree(&ShrinkInstanceSize, &slack);
+
// Give the correct expected_nof_properties to initial maps created later.
ASSERT(expected_nof_properties() >= slack);
set_expected_nof_properties(expected_nof_properties() - slack);
@@ -6465,8 +7054,6 @@
#ifdef ENABLE_DISASSEMBLER
-#ifdef OBJECT_PRINT
-
void DeoptimizationInputData::DeoptimizationInputDataPrint(FILE* out) {
disasm::NameConverter converter;
int deopt_count = DeoptCount();
@@ -6613,8 +7200,6 @@
}
}
-#endif
-
// Identify kind of code.
const char* Code::Kind2String(Kind kind) {
@@ -6691,7 +7276,7 @@
if (name != NULL) {
PrintF(out, "extra_ic_state = %s\n", name);
} else {
- PrintF(out, "etra_ic_state = %d\n", extra);
+ PrintF(out, "extra_ic_state = %d\n", extra);
}
}
@@ -6705,6 +7290,9 @@
if (ic_state() == MONOMORPHIC) {
PrintF(out, "type = %s\n", PropertyType2String(type()));
}
+ if (is_call_stub() || is_keyed_call_stub()) {
+ PrintF(out, "argc = %d\n", arguments_count());
+ }
}
if ((name != NULL) && (name[0] != '\0')) {
PrintF(out, "name = %s\n", name);
@@ -6717,7 +7305,6 @@
Disassembler::Decode(out, this);
PrintF(out, "\n");
-#ifdef DEBUG
if (kind() == FUNCTION) {
DeoptimizationOutputData* data =
DeoptimizationOutputData::cast(this->deoptimization_data());
@@ -6728,7 +7315,6 @@
data->DeoptimizationInputDataPrint(out);
}
PrintF("\n");
-#endif
if (kind() == OPTIMIZED_FUNCTION) {
SafepointTable table(this);
@@ -6775,44 +7361,166 @@
#endif // ENABLE_DISASSEMBLER
+static void CopyFastElementsToFast(FixedArray* source,
+ FixedArray* destination,
+ WriteBarrierMode mode) {
+ uint32_t count = static_cast<uint32_t>(source->length());
+ for (uint32_t i = 0; i < count; ++i) {
+ destination->set(i, source->get(i), mode);
+ }
+}
+
+
+static void CopySlowElementsToFast(NumberDictionary* source,
+ FixedArray* destination,
+ WriteBarrierMode mode) {
+ for (int i = 0; i < source->Capacity(); ++i) {
+ Object* key = source->KeyAt(i);
+ if (key->IsNumber()) {
+ uint32_t entry = static_cast<uint32_t>(key->Number());
+ destination->set(entry, source->ValueAt(i), mode);
+ }
+ }
+}
+
+
MaybeObject* JSObject::SetFastElementsCapacityAndLength(int capacity,
int length) {
Heap* heap = GetHeap();
// We should never end in here with a pixel or external array.
ASSERT(!HasExternalArrayElements());
+ // Allocate a new fast elements backing store.
+ FixedArray* new_elements = NULL;
+ { Object* object;
+ MaybeObject* maybe = heap->AllocateFixedArrayWithHoles(capacity);
+ if (!maybe->ToObject(&object)) return maybe;
+ new_elements = FixedArray::cast(object);
+ }
+
+ // Find the new map to use for this object if there is a map change.
+ Map* new_map = NULL;
+ if (elements()->map() != heap->non_strict_arguments_elements_map()) {
+ Object* object;
+ MaybeObject* maybe = map()->GetFastElementsMap();
+ if (!maybe->ToObject(&object)) return maybe;
+ new_map = Map::cast(object);
+ }
+
+ switch (GetElementsKind()) {
+ case FAST_ELEMENTS: {
+ AssertNoAllocation no_gc;
+ WriteBarrierMode mode = new_elements->GetWriteBarrierMode(no_gc);
+ CopyFastElementsToFast(FixedArray::cast(elements()), new_elements, mode);
+ set_map(new_map);
+ set_elements(new_elements);
+ break;
+ }
+ case DICTIONARY_ELEMENTS: {
+ AssertNoAllocation no_gc;
+ WriteBarrierMode mode = new_elements->GetWriteBarrierMode(no_gc);
+ CopySlowElementsToFast(NumberDictionary::cast(elements()),
+ new_elements,
+ mode);
+ set_map(new_map);
+ set_elements(new_elements);
+ break;
+ }
+ case NON_STRICT_ARGUMENTS_ELEMENTS: {
+ AssertNoAllocation no_gc;
+ WriteBarrierMode mode = new_elements->GetWriteBarrierMode(no_gc);
+ // The object's map and the parameter map are unchanged, the unaliased
+ // arguments are copied to the new backing store.
+ FixedArray* parameter_map = FixedArray::cast(elements());
+ FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
+ if (arguments->IsDictionary()) {
+ CopySlowElementsToFast(NumberDictionary::cast(arguments),
+ new_elements,
+ mode);
+ } else {
+ CopyFastElementsToFast(arguments, new_elements, mode);
+ }
+ parameter_map->set(1, new_elements);
+ break;
+ }
+ case FAST_DOUBLE_ELEMENTS: {
+ FixedDoubleArray* old_elements = FixedDoubleArray::cast(elements());
+ uint32_t old_length = static_cast<uint32_t>(old_elements->length());
+ // Fill out the new array with this content and array holes.
+ for (uint32_t i = 0; i < old_length; i++) {
+ if (!old_elements->is_the_hole(i)) {
+ Object* obj;
+ // Objects must be allocated in the old object space, since the
+ // overall number of HeapNumbers needed for the conversion might
+ // exceed the capacity of new space, and we would fail repeatedly
+ // trying to convert the FixedDoubleArray.
+ MaybeObject* maybe_value_object =
+ GetHeap()->AllocateHeapNumber(old_elements->get(i), TENURED);
+ if (!maybe_value_object->ToObject(&obj)) return maybe_value_object;
+ // Force write barrier. It's not worth trying to exploit
+ // elems->GetWriteBarrierMode(), since it requires an
+ // AssertNoAllocation stack object that would have to be positioned
+ // after the HeapNumber allocation anyway.
+ new_elements->set(i, obj, UPDATE_WRITE_BARRIER);
+ }
+ }
+ set_map(new_map);
+ set_elements(new_elements);
+ break;
+ }
+ case EXTERNAL_BYTE_ELEMENTS:
+ case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+ case EXTERNAL_SHORT_ELEMENTS:
+ case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+ case EXTERNAL_INT_ELEMENTS:
+ case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+ case EXTERNAL_FLOAT_ELEMENTS:
+ case EXTERNAL_DOUBLE_ELEMENTS:
+ case EXTERNAL_PIXEL_ELEMENTS:
+ UNREACHABLE();
+ break;
+ }
+
+ // Update the length if necessary.
+ if (IsJSArray()) {
+ JSArray::cast(this)->set_length(Smi::FromInt(length));
+ }
+
+ return new_elements;
+}
+
+
+MaybeObject* JSObject::SetFastDoubleElementsCapacityAndLength(
+ int capacity,
+ int length) {
+ Heap* heap = GetHeap();
+ // We should never end in here with a pixel or external array.
+ ASSERT(!HasExternalArrayElements());
+
Object* obj;
- { MaybeObject* maybe_obj = heap->AllocateFixedArrayWithHoles(capacity);
+ { MaybeObject* maybe_obj =
+ heap->AllocateUninitializedFixedDoubleArray(capacity);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
- FixedArray* elems = FixedArray::cast(obj);
+ FixedDoubleArray* elems = FixedDoubleArray::cast(obj);
- { MaybeObject* maybe_obj = map()->GetFastElementsMap();
+ { MaybeObject* maybe_obj = map()->GetFastDoubleElementsMap();
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
Map* new_map = Map::cast(obj);
AssertNoAllocation no_gc;
- WriteBarrierMode mode = elems->GetWriteBarrierMode(no_gc);
switch (GetElementsKind()) {
case FAST_ELEMENTS: {
- FixedArray* old_elements = FixedArray::cast(elements());
- uint32_t old_length = static_cast<uint32_t>(old_elements->length());
- // Fill out the new array with this content and array holes.
- for (uint32_t i = 0; i < old_length; i++) {
- elems->set(i, old_elements->get(i), mode);
- }
+ elems->Initialize(FixedArray::cast(elements()));
+ break;
+ }
+ case FAST_DOUBLE_ELEMENTS: {
+ elems->Initialize(FixedDoubleArray::cast(elements()));
break;
}
case DICTIONARY_ELEMENTS: {
- NumberDictionary* dictionary = NumberDictionary::cast(elements());
- for (int i = 0; i < dictionary->Capacity(); i++) {
- Object* key = dictionary->KeyAt(i);
- if (key->IsNumber()) {
- uint32_t entry = static_cast<uint32_t>(key->Number());
- elems->set(entry, dictionary->ValueAt(i), mode);
- }
- }
+ elems->Initialize(NumberDictionary::cast(elements()));
break;
}
default:
@@ -6820,7 +7528,9 @@
break;
}
+ ASSERT(new_map->has_fast_double_elements());
set_map(new_map);
+ ASSERT(elems->IsFixedDoubleArray());
set_elements(elems);
if (IsJSArray()) {
@@ -6842,10 +7552,8 @@
// Make sure we never try to shrink dense arrays into sparse arrays.
ASSERT(static_cast<uint32_t>(FixedArray::cast(elements())->length()) <=
new_length);
- Object* obj;
- { MaybeObject* maybe_obj = NormalizeElements();
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
+ MaybeObject* result = NormalizeElements();
+ if (result->IsFailure()) return result;
// Update length for JSArrays.
if (IsJSArray()) JSArray::cast(this)->set_length(len);
@@ -6860,7 +7568,19 @@
}
break;
}
- default:
+ case NON_STRICT_ARGUMENTS_ELEMENTS:
+ UNIMPLEMENTED();
+ break;
+ case EXTERNAL_BYTE_ELEMENTS:
+ case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+ case EXTERNAL_SHORT_ELEMENTS:
+ case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+ case EXTERNAL_INT_ELEMENTS:
+ case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+ case EXTERNAL_FLOAT_ELEMENTS:
+ case EXTERNAL_DOUBLE_ELEMENTS:
+ case EXTERNAL_PIXEL_ELEMENTS:
+ case FAST_DOUBLE_ELEMENTS:
UNREACHABLE();
break;
}
@@ -6900,7 +7620,7 @@
static Failure* ArrayLengthRangeError(Heap* heap) {
- HandleScope scope;
+ HandleScope scope(heap->isolate());
return heap->isolate()->Throw(
*FACTORY->NewRangeError("invalid_array_length",
HandleVector<Object>(NULL, 0)));
@@ -6916,21 +7636,58 @@
if (maybe_smi_length->ToObject(&smi_length) && smi_length->IsSmi()) {
const int value = Smi::cast(smi_length)->value();
if (value < 0) return ArrayLengthRangeError(GetHeap());
- switch (GetElementsKind()) {
- case FAST_ELEMENTS: {
- int old_capacity = FixedArray::cast(elements())->length();
+ JSObject::ElementsKind elements_kind = GetElementsKind();
+ switch (elements_kind) {
+ case FAST_ELEMENTS:
+ case FAST_DOUBLE_ELEMENTS: {
+ int old_capacity = FixedArrayBase::cast(elements())->length();
if (value <= old_capacity) {
if (IsJSArray()) {
Object* obj;
- { MaybeObject* maybe_obj = EnsureWritableFastElements();
+ if (elements_kind == FAST_ELEMENTS) {
+ MaybeObject* maybe_obj = EnsureWritableFastElements();
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
- int old_length = FastD2I(JSArray::cast(this)->length()->Number());
- // NOTE: We may be able to optimize this by removing the
- // last part of the elements backing storage array and
- // setting the capacity to the new size.
- for (int i = value; i < old_length; i++) {
- FixedArray::cast(elements())->set_the_hole(i);
+ if (2 * value <= old_capacity) {
+ // If more than half the elements won't be used, trim the array.
+ if (value == 0) {
+ initialize_elements();
+ } else {
+ Address filler_start;
+ int filler_size;
+ if (GetElementsKind() == FAST_ELEMENTS) {
+ FixedArray* fast_elements = FixedArray::cast(elements());
+ fast_elements->set_length(value);
+ filler_start = fast_elements->address() +
+ FixedArray::OffsetOfElementAt(value);
+ filler_size = (old_capacity - value) * kPointerSize;
+ } else {
+ ASSERT(GetElementsKind() == FAST_DOUBLE_ELEMENTS);
+ FixedDoubleArray* fast_double_elements =
+ FixedDoubleArray::cast(elements());
+ fast_double_elements->set_length(value);
+ filler_start = fast_double_elements->address() +
+ FixedDoubleArray::OffsetOfElementAt(value);
+ filler_size = (old_capacity - value) * kDoubleSize;
+ }
+ GetHeap()->CreateFillerObjectAt(filler_start, filler_size);
+ }
+ } else {
+ // Otherwise, fill the unused tail with holes.
+ int old_length = FastD2I(JSArray::cast(this)->length()->Number());
+ if (GetElementsKind() == FAST_ELEMENTS) {
+ FixedArray* fast_elements = FixedArray::cast(elements());
+ for (int i = value; i < old_length; i++) {
+ fast_elements->set_the_hole(i);
+ }
+ } else {
+ ASSERT(GetElementsKind() == FAST_DOUBLE_ELEMENTS);
+ FixedDoubleArray* fast_double_elements =
+ FixedDoubleArray::cast(elements());
+ for (int i = value; i < old_length; i++) {
+ fast_double_elements->set_the_hole(i);
+ }
+ }
}
JSArray::cast(this)->set_length(Smi::cast(smi_length));
}
@@ -6940,11 +7697,15 @@
int new_capacity = value > min ? value : min;
if (new_capacity <= kMaxFastElementsLength ||
!ShouldConvertToSlowElements(new_capacity)) {
- Object* obj;
- { MaybeObject* maybe_obj =
- SetFastElementsCapacityAndLength(new_capacity, value);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
+ MaybeObject* result;
+ if (GetElementsKind() == FAST_ELEMENTS) {
+ result = SetFastElementsCapacityAndLength(new_capacity, value);
+ } else {
+ ASSERT(GetElementsKind() == FAST_DOUBLE_ELEMENTS);
+ result = SetFastDoubleElementsCapacityAndLength(new_capacity,
+ value);
}
+ if (result->IsFailure()) return result;
return this;
}
break;
@@ -6969,7 +7730,16 @@
}
return this;
}
- default:
+ case NON_STRICT_ARGUMENTS_ELEMENTS:
+ case EXTERNAL_BYTE_ELEMENTS:
+ case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+ case EXTERNAL_SHORT_ELEMENTS:
+ case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+ case EXTERNAL_INT_ELEMENTS:
+ case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+ case EXTERNAL_FLOAT_ELEMENTS:
+ case EXTERNAL_DOUBLE_ELEMENTS:
+ case EXTERNAL_PIXEL_ELEMENTS:
UNREACHABLE();
break;
}
@@ -7000,53 +7770,74 @@
Object* Map::GetPrototypeTransition(Object* prototype) {
FixedArray* cache = prototype_transitions();
- int capacity = cache->length();
- if (capacity == 0) return NULL;
- int finger = Smi::cast(cache->get(0))->value();
- for (int i = 1; i < finger; i += 2) {
- if (cache->get(i) == prototype) return cache->get(i + 1);
+ int number_of_transitions = NumberOfProtoTransitions();
+ const int proto_offset =
+ kProtoTransitionHeaderSize + kProtoTransitionPrototypeOffset;
+ const int map_offset = kProtoTransitionHeaderSize + kProtoTransitionMapOffset;
+ const int step = kProtoTransitionElementsPerEntry;
+ for (int i = 0; i < number_of_transitions; i++) {
+ if (cache->get(proto_offset + i * step) == prototype) {
+ Object* map = cache->get(map_offset + i * step);
+ ASSERT(map->IsMap());
+ return map;
+ }
}
return NULL;
}
MaybeObject* Map::PutPrototypeTransition(Object* prototype, Map* map) {
+ ASSERT(map->IsMap());
+ ASSERT(HeapObject::cast(prototype)->map()->IsMap());
// Don't cache prototype transition if this map is shared.
if (is_shared() || !FLAG_cache_prototype_transitions) return this;
FixedArray* cache = prototype_transitions();
- int capacity = cache->length();
+ const int step = kProtoTransitionElementsPerEntry;
+ const int header = kProtoTransitionHeaderSize;
- int finger = (capacity == 0) ? 1 : Smi::cast(cache->get(0))->value();
+ int capacity = (cache->length() - header) / step;
- if (finger >= capacity) {
+ int transitions = NumberOfProtoTransitions() + 1;
+
+ if (transitions > capacity) {
if (capacity > kMaxCachedPrototypeTransitions) return this;
FixedArray* new_cache;
- { MaybeObject* maybe_cache = heap()->AllocateFixedArray(finger * 2 + 1);
+ // Grow array by factor 2 over and above what we need.
+ { MaybeObject* maybe_cache =
+ heap()->AllocateFixedArray(transitions * 2 * step + header);
if (!maybe_cache->To<FixedArray>(&new_cache)) return maybe_cache;
}
- for (int i = 1; i < capacity; i++) new_cache->set(i, cache->get(i));
+ for (int i = 0; i < capacity * step; i++) {
+ new_cache->set(i + header, cache->get(i + header));
+ }
cache = new_cache;
set_prototype_transitions(cache);
}
- cache->set(finger, prototype);
- cache->set(finger + 1, map);
- cache->set(0, Smi::FromInt(finger + 2));
+ int last = transitions - 1;
+
+ cache->set(header + last * step + kProtoTransitionPrototypeOffset, prototype);
+ cache->set(header + last * step + kProtoTransitionMapOffset, map);
+ SetNumberOfProtoTransitions(transitions);
return cache;
}
-MaybeObject* JSObject::SetPrototype(Object* value,
- bool skip_hidden_prototypes) {
+MaybeObject* JSReceiver::SetPrototype(Object* value,
+ bool skip_hidden_prototypes) {
+#ifdef DEBUG
+ int size = Size();
+#endif
+
Heap* heap = GetHeap();
// Silently ignore the change if value is not a JSObject or null.
// SpiderMonkey behaves this way.
- if (!value->IsJSObject() && !value->IsNull()) return value;
+ if (!value->IsJSReceiver() && !value->IsNull()) return value;
// From 8.6.2 Object Internal Methods
// ...
@@ -7057,7 +7848,7 @@
// or [[Extensible]] must not violate the invariants defined in the preceding
// paragraph.
if (!this->map()->is_extensible()) {
- HandleScope scope;
+ HandleScope scope(heap->isolate());
Handle<Object> handle(this, heap->isolate());
return heap->isolate()->Throw(
*FACTORY->NewTypeError("non_extensible_proto",
@@ -7071,13 +7862,13 @@
for (Object* pt = value; pt != heap->null_value(); pt = pt->GetPrototype()) {
if (JSObject::cast(pt) == this) {
// Cycle detected.
- HandleScope scope;
+ HandleScope scope(heap->isolate());
return heap->isolate()->Throw(
*FACTORY->NewError("cyclic_proto", HandleVector<Object>(NULL, 0)));
}
}
- JSObject* real_receiver = this;
+ JSReceiver* real_receiver = this;
if (skip_hidden_prototypes) {
// Find the first object in the chain whose prototype object is not
@@ -7113,12 +7904,12 @@
real_receiver->set_map(Map::cast(new_map));
heap->ClearInstanceofCache();
-
+ ASSERT(size == Size());
return value;
}
-bool JSObject::HasElementPostInterceptor(JSObject* receiver, uint32_t index) {
+bool JSObject::HasElementPostInterceptor(JSReceiver* receiver, uint32_t index) {
switch (GetElementsKind()) {
case FAST_ELEMENTS: {
uint32_t length = IsJSArray() ?
@@ -7145,7 +7936,8 @@
case EXTERNAL_INT_ELEMENTS:
case EXTERNAL_UNSIGNED_INT_ELEMENTS:
case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS: {
+ case EXTERNAL_DOUBLE_ELEMENTS:
+ case FAST_DOUBLE_ELEMENTS: {
ExternalArray* array = ExternalArray::cast(elements());
if (index < static_cast<uint32_t>(array->length())) {
return true;
@@ -7159,7 +7951,7 @@
}
break;
}
- default:
+ case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
@@ -7173,14 +7965,14 @@
}
-bool JSObject::HasElementWithInterceptor(JSObject* receiver, uint32_t index) {
+bool JSObject::HasElementWithInterceptor(JSReceiver* receiver, uint32_t index) {
Isolate* isolate = GetIsolate();
// Make sure that the top context does not change when doing
// callbacks or interceptor calls.
AssertNoContextChange ncc;
HandleScope scope(isolate);
Handle<InterceptorInfo> interceptor(GetIndexedInterceptor());
- Handle<JSObject> receiver_handle(receiver);
+ Handle<JSReceiver> receiver_handle(receiver);
Handle<JSObject> holder_handle(this);
CustomArguments args(isolate, interceptor->data(), receiver, this);
v8::AccessorInfo info(args.end());
@@ -7273,23 +8065,68 @@
if (index < static_cast<uint32_t>(array->length())) return FAST_ELEMENT;
break;
}
+ case FAST_DOUBLE_ELEMENTS:
+ UNREACHABLE();
+ break;
case DICTIONARY_ELEMENTS: {
if (element_dictionary()->FindEntry(index) !=
- NumberDictionary::kNotFound) {
+ NumberDictionary::kNotFound) {
return DICTIONARY_ELEMENT;
}
break;
}
- default:
- UNREACHABLE();
+ case NON_STRICT_ARGUMENTS_ELEMENTS: {
+ // Aliased parameters and non-aliased elements in a fast backing store
+ // behave as FAST_ELEMENT. Non-aliased elements in a dictionary
+ // backing store behave as DICTIONARY_ELEMENT.
+ FixedArray* parameter_map = FixedArray::cast(elements());
+ uint32_t length = parameter_map->length();
+ Object* probe =
+ index < (length - 2) ? parameter_map->get(index + 2) : NULL;
+ if (probe != NULL && !probe->IsTheHole()) return FAST_ELEMENT;
+ // If not aliased, check the arguments.
+ FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
+ if (arguments->IsDictionary()) {
+ NumberDictionary* dictionary = NumberDictionary::cast(arguments);
+ if (dictionary->FindEntry(index) != NumberDictionary::kNotFound) {
+ return DICTIONARY_ELEMENT;
+ }
+ } else {
+ length = arguments->length();
+ probe = (index < length) ? arguments->get(index) : NULL;
+ if (probe != NULL && !probe->IsTheHole()) return FAST_ELEMENT;
+ }
break;
+ }
}
return UNDEFINED_ELEMENT;
}
-bool JSObject::HasElementWithReceiver(JSObject* receiver, uint32_t index) {
+bool JSObject::HasElementInElements(FixedArray* elements,
+ ElementsKind kind,
+ uint32_t index) {
+ ASSERT(kind == FAST_ELEMENTS || kind == DICTIONARY_ELEMENTS);
+ if (kind == FAST_ELEMENTS) {
+ int length = IsJSArray()
+ ? Smi::cast(JSArray::cast(this)->length())->value()
+ : elements->length();
+ if (index < static_cast<uint32_t>(length) &&
+ !elements->get(index)->IsTheHole()) {
+ return true;
+ }
+ } else {
+ if (NumberDictionary::cast(elements)->FindEntry(index) !=
+ NumberDictionary::kNotFound) {
+ return true;
+ }
+ }
+ return false;
+}
+
+
+bool JSObject::HasElementWithReceiver(JSReceiver* receiver, uint32_t index) {
// Check access rights if needed.
if (IsAccessCheckNeeded()) {
Heap* heap = GetHeap();
@@ -7304,7 +8141,8 @@
return HasElementWithInterceptor(receiver, index);
}
- switch (GetElementsKind()) {
+ ElementsKind kind = GetElementsKind();
+ switch (kind) {
case FAST_ELEMENTS: {
uint32_t length = IsJSArray() ?
static_cast<uint32_t>
@@ -7314,6 +8152,15 @@
!FixedArray::cast(elements())->get(index)->IsTheHole()) return true;
break;
}
+ case FAST_DOUBLE_ELEMENTS: {
+ uint32_t length = IsJSArray() ?
+ static_cast<uint32_t>
+ (Smi::cast(JSArray::cast(this)->length())->value()) :
+ static_cast<uint32_t>(FixedDoubleArray::cast(elements())->length());
+ if ((index < length) &&
+ !FixedDoubleArray::cast(elements())->is_the_hole(index)) return true;
+ break;
+ }
case EXTERNAL_PIXEL_ELEMENTS: {
ExternalPixelArray* pixels = ExternalPixelArray::cast(elements());
if (index < static_cast<uint32_t>(pixels->length())) {
@@ -7342,9 +8189,19 @@
}
break;
}
- default:
- UNREACHABLE();
+ case NON_STRICT_ARGUMENTS_ELEMENTS: {
+ FixedArray* parameter_map = FixedArray::cast(elements());
+ uint32_t length = parameter_map->length();
+ Object* probe =
+ (index < length - 2) ? parameter_map->get(index + 2) : NULL;
+ if (probe != NULL && !probe->IsTheHole()) return true;
+
+ // Not a mapped parameter, check the arguments.
+ FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
+ kind = arguments->IsDictionary() ? DICTIONARY_ELEMENTS : FAST_ELEMENTS;
+ if (HasElementInElements(arguments, kind, index)) return true;
break;
+ }
}
// Handle [] on String objects.
@@ -7410,7 +8267,7 @@
Handle<JSObject> self(JSObject::cast(receiver));
Handle<JSObject> holder_handle(JSObject::cast(holder));
Handle<Object> number = isolate->factory()->NewNumberFromUint(index);
- Handle<String> key(isolate->factory()->NumberToString(number));
+ Handle<String> key = isolate->factory()->NumberToString(number);
LOG(isolate, ApiNamedPropertyAccess("load", *self, *key));
CustomArguments args(isolate, data->data(), *self, *holder_handle);
v8::AccessorInfo info(args.end());
@@ -7444,7 +8301,8 @@
MaybeObject* JSObject::SetElementWithCallback(Object* structure,
uint32_t index,
Object* value,
- JSObject* holder) {
+ JSObject* holder,
+ StrictModeFlag strict_mode) {
Isolate* isolate = GetIsolate();
HandleScope scope(isolate);
@@ -7483,10 +8341,13 @@
}
if (structure->IsFixedArray()) {
- Object* setter = FixedArray::cast(structure)->get(kSetterIndex);
+ Handle<Object> setter(FixedArray::cast(structure)->get(kSetterIndex));
if (setter->IsJSFunction()) {
- return SetPropertyWithDefinedSetter(JSFunction::cast(setter), value);
+ return SetPropertyWithDefinedSetter(JSFunction::cast(*setter), value);
} else {
+ if (strict_mode == kNonStrictMode) {
+ return value;
+ }
Handle<Object> holder_handle(holder, isolate);
Handle<Object> key(isolate->factory()->NewNumberFromUint(index));
Handle<Object> args[2] = { key, holder_handle };
@@ -7501,6 +8362,30 @@
}
+bool JSObject::HasFastArgumentsElements() {
+ Heap* heap = GetHeap();
+ if (!elements()->IsFixedArray()) return false;
+ FixedArray* elements = FixedArray::cast(this->elements());
+ if (elements->map() != heap->non_strict_arguments_elements_map()) {
+ return false;
+ }
+ FixedArray* arguments = FixedArray::cast(elements->get(1));
+ return !arguments->IsDictionary();
+}
+
+
+bool JSObject::HasDictionaryArgumentsElements() {
+ Heap* heap = GetHeap();
+ if (!elements()->IsFixedArray()) return false;
+ FixedArray* elements = FixedArray::cast(this->elements());
+ if (elements->map() != heap->non_strict_arguments_elements_map()) {
+ return false;
+ }
+ FixedArray* arguments = FixedArray::cast(elements->get(1));
+ return arguments->IsDictionary();
+}
+
+
// Adding n elements in fast case is O(n*n).
// Note: revisit design to have dual undefined values to capture absent
// elements.
@@ -7508,27 +8393,215 @@
Object* value,
StrictModeFlag strict_mode,
bool check_prototype) {
- ASSERT(HasFastElements());
+ ASSERT(HasFastElements() || HasFastArgumentsElements());
- Object* elms_obj;
- { MaybeObject* maybe_elms_obj = EnsureWritableFastElements();
- if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj;
+ FixedArray* backing_store = FixedArray::cast(elements());
+ if (backing_store->map() == GetHeap()->non_strict_arguments_elements_map()) {
+ backing_store = FixedArray::cast(backing_store->get(1));
+ } else {
+ Object* writable;
+ MaybeObject* maybe = EnsureWritableFastElements();
+ if (!maybe->ToObject(&writable)) return maybe;
+ backing_store = FixedArray::cast(writable);
}
- FixedArray* elms = FixedArray::cast(elms_obj);
- uint32_t elms_length = static_cast<uint32_t>(elms->length());
+ uint32_t length = static_cast<uint32_t>(backing_store->length());
if (check_prototype &&
- (index >= elms_length || elms->get(index)->IsTheHole())) {
+ (index >= length || backing_store->get(index)->IsTheHole())) {
bool found;
- MaybeObject* result =
- SetElementWithCallbackSetterInPrototypes(index, value, &found);
+ MaybeObject* result = SetElementWithCallbackSetterInPrototypes(index,
+ value,
+ &found,
+ strict_mode);
if (found) return result;
}
+ // Check whether there is extra space in fixed array.
+ if (index < length) {
+ backing_store->set(index, value);
+ if (IsJSArray()) {
+ // Update the length of the array if needed.
+ uint32_t array_length = 0;
+ CHECK(JSArray::cast(this)->length()->ToArrayIndex(&array_length));
+ if (index >= array_length) {
+ JSArray::cast(this)->set_length(Smi::FromInt(index + 1));
+ }
+ }
+ return value;
+ }
- // Check whether there is extra space in fixed array..
+ // Allow gap in fast case.
+ if ((index - length) < kMaxGap) {
+ // Try allocating extra space.
+ int new_capacity = NewElementsCapacity(index + 1);
+ if (new_capacity <= kMaxFastElementsLength ||
+ !ShouldConvertToSlowElements(new_capacity)) {
+ ASSERT(static_cast<uint32_t>(new_capacity) > index);
+ Object* new_elements;
+ MaybeObject* maybe =
+ SetFastElementsCapacityAndLength(new_capacity, index + 1);
+ if (!maybe->ToObject(&new_elements)) return maybe;
+ FixedArray::cast(new_elements)->set(index, value);
+ return value;
+ }
+ }
+
+ // Otherwise default to slow case.
+ MaybeObject* result = NormalizeElements();
+ if (result->IsFailure()) return result;
+ return SetDictionaryElement(index, value, strict_mode, check_prototype);
+}
+
+
+MaybeObject* JSObject::SetDictionaryElement(uint32_t index,
+ Object* value,
+ StrictModeFlag strict_mode,
+ bool check_prototype) {
+ ASSERT(HasDictionaryElements() || HasDictionaryArgumentsElements());
+ Isolate* isolate = GetIsolate();
+ Heap* heap = isolate->heap();
+
+ // Insert element in the dictionary.
+ FixedArray* elements = FixedArray::cast(this->elements());
+ bool is_arguments =
+ (elements->map() == heap->non_strict_arguments_elements_map());
+ NumberDictionary* dictionary = NULL;
+ if (is_arguments) {
+ dictionary = NumberDictionary::cast(elements->get(1));
+ } else {
+ dictionary = NumberDictionary::cast(elements);
+ }
+
+ int entry = dictionary->FindEntry(index);
+ if (entry != NumberDictionary::kNotFound) {
+ Object* element = dictionary->ValueAt(entry);
+ PropertyDetails details = dictionary->DetailsAt(entry);
+ if (details.type() == CALLBACKS) {
+ return SetElementWithCallback(element, index, value, this, strict_mode);
+ } else {
+ dictionary->UpdateMaxNumberKey(index);
+ // If put fails in strict mode, throw an exception.
+ if (!dictionary->ValueAtPut(entry, value) && strict_mode == kStrictMode) {
+ Handle<Object> holder(this);
+ Handle<Object> number = isolate->factory()->NewNumberFromUint(index);
+ Handle<Object> args[2] = { number, holder };
+ Handle<Object> error =
+ isolate->factory()->NewTypeError("strict_read_only_property",
+ HandleVector(args, 2));
+ return isolate->Throw(*error);
+ }
+ }
+ } else {
+ // Index not already used. Look for an accessor in the prototype chain.
+ if (check_prototype) {
+ bool found;
+ MaybeObject* result =
+ SetElementWithCallbackSetterInPrototypes(
+ index, value, &found, strict_mode);
+ if (found) return result;
+ }
+ // When we set the is_extensible flag to false we always force the
+ // element into dictionary mode (and force them to stay there).
+ if (!map()->is_extensible()) {
+ if (strict_mode == kNonStrictMode) {
+ return isolate->heap()->undefined_value();
+ } else {
+ Handle<Object> number = isolate->factory()->NewNumberFromUint(index);
+ Handle<String> name = isolate->factory()->NumberToString(number);
+ Handle<Object> args[1] = { name };
+ Handle<Object> error =
+ isolate->factory()->NewTypeError("object_not_extensible",
+ HandleVector(args, 1));
+ return isolate->Throw(*error);
+ }
+ }
+ Object* new_dictionary;
+ MaybeObject* maybe = dictionary->AtNumberPut(index, value);
+ if (!maybe->ToObject(&new_dictionary)) return maybe;
+ if (dictionary != NumberDictionary::cast(new_dictionary)) {
+ if (is_arguments) {
+ elements->set(1, new_dictionary);
+ } else {
+ set_elements(HeapObject::cast(new_dictionary));
+ }
+ dictionary = NumberDictionary::cast(new_dictionary);
+ }
+ }
+
+ // Update the array length if this JSObject is an array.
+ if (IsJSArray()) {
+ MaybeObject* result =
+ JSArray::cast(this)->JSArrayUpdateLengthFromIndex(index, value);
+ if (result->IsFailure()) return result;
+ }
+
+ // Attempt to put this object back in fast case.
+ if (ShouldConvertToFastElements()) {
+ uint32_t new_length = 0;
+ if (IsJSArray()) {
+ CHECK(JSArray::cast(this)->length()->ToArrayIndex(&new_length));
+ } else {
+ new_length = dictionary->max_number_key() + 1;
+ }
+ MaybeObject* result = ShouldConvertToFastDoubleElements()
+ ? SetFastDoubleElementsCapacityAndLength(new_length, new_length)
+ : SetFastElementsCapacityAndLength(new_length, new_length);
+ if (result->IsFailure()) return result;
+#ifdef DEBUG
+ if (FLAG_trace_normalization) {
+ PrintF("Object elements are fast case again:\n");
+ Print();
+ }
+#endif
+ }
+ return value;
+}
+
+
+MUST_USE_RESULT MaybeObject* JSObject::SetFastDoubleElement(
+ uint32_t index,
+ Object* value,
+ StrictModeFlag strict_mode,
+ bool check_prototype) {
+ ASSERT(HasFastDoubleElements());
+
+ FixedDoubleArray* elms = FixedDoubleArray::cast(elements());
+ uint32_t elms_length = static_cast<uint32_t>(elms->length());
+
+ // If storing to an element that isn't in the array, pass the store request
+ // up the prototype chain before storing in the receiver's elements.
+ if (check_prototype &&
+ (index >= elms_length || elms->is_the_hole(index))) {
+ bool found;
+ MaybeObject* result = SetElementWithCallbackSetterInPrototypes(index,
+ value,
+ &found,
+ strict_mode);
+ if (found) return result;
+ }
+
+ // If the value object is not a heap number, switch to fast elements and try
+ // again.
+ bool value_is_smi = value->IsSmi();
+ if (!value->IsNumber()) {
+ Object* obj;
+ uint32_t length = elms_length;
+ if (IsJSArray()) {
+ CHECK(JSArray::cast(this)->length()->ToArrayIndex(&length));
+ }
+ MaybeObject* maybe_obj =
+ SetFastElementsCapacityAndLength(elms_length, length);
+ if (!maybe_obj->ToObject(&obj)) return maybe_obj;
+ return SetFastElement(index, value, strict_mode, check_prototype);
+ }
+
+ double double_value = value_is_smi
+ ? static_cast<double>(Smi::cast(value)->value())
+ : HeapNumber::cast(value)->value();
+
+ // Check whether there is extra space in the fixed array.
if (index < elms_length) {
- elms->set(index, value);
+ elms->set(index, double_value);
if (IsJSArray()) {
// Update the length of the array if needed.
uint32_t array_length = 0;
@@ -7549,15 +8622,19 @@
ASSERT(static_cast<uint32_t>(new_capacity) > index);
Object* obj;
{ MaybeObject* maybe_obj =
- SetFastElementsCapacityAndLength(new_capacity, index + 1);
+ SetFastDoubleElementsCapacityAndLength(new_capacity,
+ index + 1);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
- FixedArray::cast(elements())->set(index, value);
+ FixedDoubleArray::cast(elements())->set(index, double_value);
return value;
}
}
// Otherwise default to slow case.
+ ASSERT(HasFastDoubleElements());
+ ASSERT(map()->has_fast_double_elements());
+ ASSERT(elements()->IsFixedDoubleArray());
Object* obj;
{ MaybeObject* maybe_obj = NormalizeElements();
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
@@ -7575,7 +8652,7 @@
if (IsAccessCheckNeeded()) {
Heap* heap = GetHeap();
if (!heap->isolate()->MayIndexedAccess(this, index, v8::ACCESS_SET)) {
- HandleScope scope;
+ HandleScope scope(heap->isolate());
Handle<Object> value_handle(value);
heap->isolate()->ReportFailedAccessCheck(this, v8::ACCESS_SET);
return *value_handle;
@@ -7614,8 +8691,9 @@
Isolate* isolate = GetIsolate();
switch (GetElementsKind()) {
case FAST_ELEMENTS:
- // Fast case.
return SetFastElement(index, value, strict_mode, check_prototype);
+ case FAST_DOUBLE_ELEMENTS:
+ return SetFastDoubleElement(index, value, strict_mode, check_prototype);
case EXTERNAL_PIXEL_ELEMENTS: {
ExternalPixelArray* pixels = ExternalPixelArray::cast(elements());
return pixels->SetValue(index, value);
@@ -7655,101 +8733,30 @@
ExternalDoubleArray* array = ExternalDoubleArray::cast(elements());
return array->SetValue(index, value);
}
- case DICTIONARY_ELEMENTS: {
- // Insert element in the dictionary.
- FixedArray* elms = FixedArray::cast(elements());
- NumberDictionary* dictionary = NumberDictionary::cast(elms);
-
- int entry = dictionary->FindEntry(index);
- if (entry != NumberDictionary::kNotFound) {
- Object* element = dictionary->ValueAt(entry);
- PropertyDetails details = dictionary->DetailsAt(entry);
- if (details.type() == CALLBACKS) {
- return SetElementWithCallback(element, index, value, this);
- } else {
- dictionary->UpdateMaxNumberKey(index);
- // If put fails instrict mode, throw exception.
- if (!dictionary->ValueAtPut(entry, value) &&
- strict_mode == kStrictMode) {
- Handle<Object> holder(this);
- Handle<Object> number(isolate->factory()->NewNumberFromUint(index));
- Handle<Object> args[2] = { number, holder };
- return isolate->Throw(
- *isolate->factory()->NewTypeError("strict_read_only_property",
- HandleVector(args, 2)));
- }
- }
+ case DICTIONARY_ELEMENTS:
+ return SetDictionaryElement(index, value, strict_mode, check_prototype);
+ case NON_STRICT_ARGUMENTS_ELEMENTS: {
+ FixedArray* parameter_map = FixedArray::cast(elements());
+ uint32_t length = parameter_map->length();
+ Object* probe =
+ (index < length - 2) ? parameter_map->get(index + 2) : NULL;
+ if (probe != NULL && !probe->IsTheHole()) {
+ Context* context = Context::cast(parameter_map->get(0));
+ int context_index = Smi::cast(probe)->value();
+ ASSERT(!context->get(context_index)->IsTheHole());
+ context->set(context_index, value);
+ return value;
} else {
- // Index not already used. Look for an accessor in the prototype chain.
- if (check_prototype) {
- bool found;
- MaybeObject* result =
- // Strict mode not needed. No-setter case already handled.
- SetElementWithCallbackSetterInPrototypes(index, value, &found);
- if (found) return result;
- }
- // When we set the is_extensible flag to false we always force
- // the element into dictionary mode (and force them to stay there).
- if (!map()->is_extensible()) {
- if (strict_mode == kNonStrictMode) {
- return isolate->heap()->undefined_value();
- } else {
- Handle<Object> number(isolate->factory()->NewNumberFromUint(index));
- Handle<String> index_string(
- isolate->factory()->NumberToString(number));
- Handle<Object> args[1] = { index_string };
- return isolate->Throw(
- *isolate->factory()->NewTypeError("object_not_extensible",
- HandleVector(args, 1)));
- }
- }
- Object* result;
- { MaybeObject* maybe_result = dictionary->AtNumberPut(index, value);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- if (elms != FixedArray::cast(result)) {
- set_elements(FixedArray::cast(result));
- }
- }
-
- // Update the array length if this JSObject is an array.
- if (IsJSArray()) {
- JSArray* array = JSArray::cast(this);
- Object* return_value;
- { MaybeObject* maybe_return_value =
- array->JSArrayUpdateLengthFromIndex(index, value);
- if (!maybe_return_value->ToObject(&return_value)) {
- return maybe_return_value;
- }
- }
- }
-
- // Attempt to put this object back in fast case.
- if (ShouldConvertToFastElements()) {
- uint32_t new_length = 0;
- if (IsJSArray()) {
- CHECK(JSArray::cast(this)->length()->ToArrayIndex(&new_length));
+ // Object is not mapped, defer to the arguments.
+ FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
+ if (arguments->IsDictionary()) {
+ return SetDictionaryElement(index, value, strict_mode,
+ check_prototype);
} else {
- new_length = NumberDictionary::cast(elements())->max_number_key() + 1;
+ return SetFastElement(index, value, strict_mode, check_prototype);
}
- Object* obj;
- { MaybeObject* maybe_obj =
- SetFastElementsCapacityAndLength(new_length, new_length);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
-#ifdef DEBUG
- if (FLAG_trace_normalization) {
- PrintF("Object elements are fast case again:\n");
- Print();
- }
-#endif
}
-
- return value;
}
- default:
- UNREACHABLE();
- break;
}
// All possible cases have been handled above. Add a return to avoid the
// complaints from the compiler.
@@ -7789,6 +8796,15 @@
}
break;
}
+ case FAST_DOUBLE_ELEMENTS: {
+ FixedDoubleArray* elms = FixedDoubleArray::cast(elements());
+ if (index < static_cast<uint32_t>(elms->length())) {
+ if (!elms->is_the_hole(index)) {
+ return GetHeap()->NumberFromDouble(elms->get(index));
+ }
+ }
+ break;
+ }
case EXTERNAL_PIXEL_ELEMENTS:
case EXTERNAL_BYTE_ELEMENTS:
case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
@@ -7820,8 +8836,8 @@
}
break;
}
- default:
- UNREACHABLE();
+ case NON_STRICT_ARGUMENTS_ELEMENTS:
+ UNIMPLEMENTED();
break;
}
@@ -7892,6 +8908,16 @@
}
break;
}
+ case FAST_DOUBLE_ELEMENTS: {
+ FixedDoubleArray* elms = FixedDoubleArray::cast(elements());
+ if (index < static_cast<uint32_t>(elms->length())) {
+ if (!elms->is_the_hole(index)) {
+ double double_value = elms->get(index);
+ return GetHeap()->NumberFromDouble(double_value);
+ }
+ }
+ break;
+ }
case EXTERNAL_PIXEL_ELEMENTS:
case EXTERNAL_BYTE_ELEMENTS:
case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
@@ -7923,6 +8949,40 @@
}
break;
}
+ case NON_STRICT_ARGUMENTS_ELEMENTS: {
+ FixedArray* parameter_map = FixedArray::cast(elements());
+ uint32_t length = parameter_map->length();
+ Object* probe =
+ (index < length - 2) ? parameter_map->get(index + 2) : NULL;
+ if (probe != NULL && !probe->IsTheHole()) {
+ Context* context = Context::cast(parameter_map->get(0));
+ int context_index = Smi::cast(probe)->value();
+ ASSERT(!context->get(context_index)->IsTheHole());
+ return context->get(context_index);
+ } else {
+ // Object is not mapped, defer to the arguments.
+ FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
+ if (arguments->IsDictionary()) {
+ NumberDictionary* dictionary = NumberDictionary::cast(arguments);
+ int entry = dictionary->FindEntry(index);
+ if (entry != NumberDictionary::kNotFound) {
+ Object* element = dictionary->ValueAt(entry);
+ PropertyDetails details = dictionary->DetailsAt(entry);
+ if (details.type() == CALLBACKS) {
+ return GetElementWithCallback(receiver,
+ element,
+ index,
+ this);
+ }
+ return element;
+ }
+ } else if (index < static_cast<uint32_t>(arguments->length())) {
+ Object* value = arguments->get(index);
+ if (!value->IsTheHole()) return value;
+ }
+ }
+ break;
+ }
}
Object* pt = GetPrototype();
@@ -8011,10 +9071,14 @@
}
break;
}
+ case FAST_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
case DICTIONARY_ELEMENTS:
UNREACHABLE();
break;
+ case NON_STRICT_ARGUMENTS_ELEMENTS:
+ UNIMPLEMENTED();
+ break;
}
return GetHeap()->undefined_value();
}
@@ -8024,12 +9088,39 @@
int capacity = 0;
int number_of_elements = 0;
+ FixedArrayBase* backing_store_base = FixedArrayBase::cast(elements());
+ FixedArray* backing_store = NULL;
switch (GetElementsKind()) {
- case FAST_ELEMENTS: {
- FixedArray* elms = FixedArray::cast(elements());
+ case NON_STRICT_ARGUMENTS_ELEMENTS:
+ backing_store_base =
+ FixedArray::cast(FixedArray::cast(backing_store_base)->get(1));
+ backing_store = FixedArray::cast(backing_store_base);
+ if (backing_store->IsDictionary()) {
+ NumberDictionary* dictionary = NumberDictionary::cast(backing_store);
+ capacity = dictionary->Capacity();
+ number_of_elements = dictionary->NumberOfElements();
+ break;
+ }
+ // Fall through.
+ case FAST_ELEMENTS:
+ backing_store = FixedArray::cast(backing_store_base);
+ capacity = backing_store->length();
+ for (int i = 0; i < capacity; ++i) {
+ if (!backing_store->get(i)->IsTheHole()) ++number_of_elements;
+ }
+ break;
+ case DICTIONARY_ELEMENTS: {
+ NumberDictionary* dictionary =
+ NumberDictionary::cast(FixedArray::cast(elements()));
+ capacity = dictionary->Capacity();
+ number_of_elements = dictionary->NumberOfElements();
+ break;
+ }
+ case FAST_DOUBLE_ELEMENTS: {
+ FixedDoubleArray* elms = FixedDoubleArray::cast(elements());
capacity = elms->length();
for (int i = 0; i < capacity; i++) {
- if (!elms->get(i)->IsTheHole()) number_of_elements++;
+ if (!elms->is_the_hole(i)) number_of_elements++;
}
break;
}
@@ -8044,43 +9135,48 @@
case EXTERNAL_DOUBLE_ELEMENTS: {
return true;
}
- case DICTIONARY_ELEMENTS: {
- NumberDictionary* dictionary = NumberDictionary::cast(elements());
- capacity = dictionary->Capacity();
- number_of_elements = dictionary->NumberOfElements();
- break;
- }
- default:
- UNREACHABLE();
- break;
}
-
- if (capacity == 0) return true;
- return (number_of_elements > (capacity / 2));
+ return (capacity == 0) || (number_of_elements > (capacity / 2));
}
bool JSObject::ShouldConvertToSlowElements(int new_capacity) {
- ASSERT(HasFastElements());
// Keep the array in fast case if the current backing storage is
// almost filled and if the new capacity is no more than twice the
// old capacity.
- int elements_length = FixedArray::cast(elements())->length();
+ int elements_length = 0;
+ if (elements()->map() == GetHeap()->non_strict_arguments_elements_map()) {
+ FixedArray* backing_store = FixedArray::cast(elements());
+ elements_length = FixedArray::cast(backing_store->get(1))->length();
+ } else if (HasFastElements()) {
+ elements_length = FixedArray::cast(elements())->length();
+ } else if (HasFastDoubleElements()) {
+ elements_length = FixedDoubleArray::cast(elements())->length();
+ } else {
+ UNREACHABLE();
+ }
return !HasDenseElements() || ((new_capacity / 2) > elements_length);
}
bool JSObject::ShouldConvertToFastElements() {
- ASSERT(HasDictionaryElements());
- NumberDictionary* dictionary = NumberDictionary::cast(elements());
+ ASSERT(HasDictionaryElements() || HasDictionaryArgumentsElements());
// If the elements are sparse, we should not go back to fast case.
if (!HasDenseElements()) return false;
- // If an element has been added at a very high index in the elements
- // dictionary, we cannot go back to fast case.
- if (dictionary->requires_slow_elements()) return false;
// An object requiring access checks is never allowed to have fast
// elements. If it had fast elements we would skip security checks.
if (IsAccessCheckNeeded()) return false;
+
+ FixedArray* elements = FixedArray::cast(this->elements());
+ NumberDictionary* dictionary = NULL;
+ if (elements->map() == GetHeap()->non_strict_arguments_elements_map()) {
+ dictionary = NumberDictionary::cast(elements->get(1));
+ } else {
+ dictionary = NumberDictionary::cast(elements);
+ }
+ // If an element has been added at a very high index in the elements
+ // dictionary, we cannot go back to fast case.
+ if (dictionary->requires_slow_elements()) return false;
// If the dictionary backing storage takes up roughly half as much
// space as a fast-case backing storage would the array should have
// fast elements.
@@ -8095,6 +9191,23 @@
}
+bool JSObject::ShouldConvertToFastDoubleElements() {
+ if (FLAG_unbox_double_arrays) {
+ ASSERT(HasDictionaryElements());
+ NumberDictionary* dictionary = NumberDictionary::cast(elements());
+ for (int i = 0; i < dictionary->Capacity(); i++) {
+ Object* key = dictionary->KeyAt(i);
+ if (key->IsNumber()) {
+ if (!dictionary->ValueAt(i)->IsNumber()) return false;
+ }
+ }
+ return true;
+ } else {
+ return false;
+ }
+}
+
+
// Certain compilers request function template instantiation when they
// see the definition of the other template functions in the
// class. This requires us to have the template functions put
@@ -8159,7 +9272,7 @@
MaybeObject* JSObject::GetPropertyPostInterceptor(
- JSObject* receiver,
+ JSReceiver* receiver,
String* name,
PropertyAttributes* attributes) {
// Check local property in holder, ignore interceptor.
@@ -8177,7 +9290,7 @@
MaybeObject* JSObject::GetLocalPropertyPostInterceptor(
- JSObject* receiver,
+ JSReceiver* receiver,
String* name,
PropertyAttributes* attributes) {
// Check local property in holder, ignore interceptor.
@@ -8191,13 +9304,13 @@
MaybeObject* JSObject::GetPropertyWithInterceptor(
- JSObject* receiver,
+ JSReceiver* receiver,
String* name,
PropertyAttributes* attributes) {
Isolate* isolate = GetIsolate();
InterceptorInfo* interceptor = GetNamedInterceptor();
HandleScope scope(isolate);
- Handle<JSObject> receiver_handle(receiver);
+ Handle<JSReceiver> receiver_handle(receiver);
Handle<JSObject> holder_handle(this);
Handle<String> name_handle(name);
@@ -8283,12 +9396,15 @@
ExternalArray* array = ExternalArray::cast(elements());
return index < static_cast<uint32_t>(array->length());
}
+ case FAST_DOUBLE_ELEMENTS:
+ UNREACHABLE();
+ break;
case DICTIONARY_ELEMENTS: {
return element_dictionary()->FindEntry(index)
!= NumberDictionary::kNotFound;
}
- default:
- UNREACHABLE();
+ case NON_STRICT_ARGUMENTS_ELEMENTS:
+ UNIMPLEMENTED();
break;
}
// All possibilities have been handled above already.
@@ -8460,7 +9576,9 @@
}
ASSERT(storage->length() >= index);
} else {
- property_dictionary()->CopyKeysTo(storage, index);
+ property_dictionary()->CopyKeysTo(storage,
+ index,
+ StringDictionary::UNSORTED);
}
}
@@ -8503,6 +9621,21 @@
ASSERT(!storage || storage->length() >= counter);
break;
}
+ case FAST_DOUBLE_ELEMENTS: {
+ int length = IsJSArray() ?
+ Smi::cast(JSArray::cast(this)->length())->value() :
+ FixedDoubleArray::cast(elements())->length();
+ for (int i = 0; i < length; i++) {
+ if (!FixedDoubleArray::cast(elements())->is_the_hole(i)) {
+ if (storage != NULL) {
+ storage->set(counter, Smi::FromInt(i));
+ }
+ counter++;
+ }
+ }
+ ASSERT(!storage || storage->length() >= counter);
+ break;
+ }
case EXTERNAL_PIXEL_ELEMENTS: {
int length = ExternalPixelArray::cast(elements())->length();
while (counter < length) {
@@ -8534,14 +9667,52 @@
}
case DICTIONARY_ELEMENTS: {
if (storage != NULL) {
- element_dictionary()->CopyKeysTo(storage, filter);
+ element_dictionary()->CopyKeysTo(storage,
+ filter,
+ NumberDictionary::SORTED);
}
- counter = element_dictionary()->NumberOfElementsFilterAttributes(filter);
+ counter += element_dictionary()->NumberOfElementsFilterAttributes(filter);
break;
}
- default:
- UNREACHABLE();
+ case NON_STRICT_ARGUMENTS_ELEMENTS: {
+ FixedArray* parameter_map = FixedArray::cast(elements());
+ int mapped_length = parameter_map->length() - 2;
+ FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
+ if (arguments->IsDictionary()) {
+ // Copy the keys from arguments first, because Dictionary::CopyKeysTo
+ // will insert in storage starting at index 0.
+ NumberDictionary* dictionary = NumberDictionary::cast(arguments);
+ if (storage != NULL) {
+ dictionary->CopyKeysTo(storage, filter, NumberDictionary::UNSORTED);
+ }
+ counter += dictionary->NumberOfElementsFilterAttributes(filter);
+ for (int i = 0; i < mapped_length; ++i) {
+ if (!parameter_map->get(i + 2)->IsTheHole()) {
+ if (storage != NULL) storage->set(counter, Smi::FromInt(i));
+ ++counter;
+ }
+ }
+ if (storage != NULL) storage->SortPairs(storage, counter);
+
+ } else {
+ int backing_length = arguments->length();
+ int i = 0;
+ for (; i < mapped_length; ++i) {
+ if (!parameter_map->get(i + 2)->IsTheHole()) {
+ if (storage != NULL) storage->set(counter, Smi::FromInt(i));
+ ++counter;
+ } else if (i < backing_length && !arguments->get(i)->IsTheHole()) {
+ if (storage != NULL) storage->set(counter, Smi::FromInt(i));
+ ++counter;
+ }
+ }
+ for (; i < backing_length; ++i) {
+ if (storage != NULL) storage->set(counter, Smi::FromInt(i));
+ ++counter;
+ }
+ }
break;
+ }
}
if (this->IsJSValue()) {
@@ -9008,6 +10179,40 @@
template<typename Shape, typename Key>
+MaybeObject* HashTable<Shape, Key>::Rehash(HashTable* new_table, Key key) {
+ ASSERT(NumberOfElements() < new_table->Capacity());
+
+ AssertNoAllocation no_gc;
+ WriteBarrierMode mode = new_table->GetWriteBarrierMode(no_gc);
+
+ // Copy prefix to new array.
+ for (int i = kPrefixStartIndex;
+ i < kPrefixStartIndex + Shape::kPrefixSize;
+ i++) {
+ new_table->set(i, get(i), mode);
+ }
+
+ // Rehash the elements.
+ int capacity = Capacity();
+ for (int i = 0; i < capacity; i++) {
+ uint32_t from_index = EntryToIndex(i);
+ Object* k = get(from_index);
+ if (IsKey(k)) {
+ uint32_t hash = Shape::HashForObject(key, k);
+ uint32_t insertion_index =
+ EntryToIndex(new_table->FindInsertionEntry(hash));
+ for (int j = 0; j < Shape::kEntrySize; j++) {
+ new_table->set(insertion_index + j, get(from_index + j), mode);
+ }
+ }
+ }
+ new_table->SetNumberOfElements(NumberOfElements());
+ new_table->SetNumberOfDeletedElements(0);
+ return new_table;
+}
+
+
+template<typename Shape, typename Key>
MaybeObject* HashTable<Shape, Key>::EnsureCapacity(int n, Key key) {
int capacity = Capacity();
int nof = NumberOfElements() + n;
@@ -9029,32 +10234,36 @@
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
- AssertNoAllocation no_gc;
- HashTable* table = HashTable::cast(obj);
- WriteBarrierMode mode = table->GetWriteBarrierMode(no_gc);
+ return Rehash(HashTable::cast(obj), key);
+}
- // Copy prefix to new array.
- for (int i = kPrefixStartIndex;
- i < kPrefixStartIndex + Shape::kPrefixSize;
- i++) {
- table->set(i, get(i), mode);
+
+template<typename Shape, typename Key>
+MaybeObject* HashTable<Shape, Key>::Shrink(Key key) {
+ int capacity = Capacity();
+ int nof = NumberOfElements();
+
+ // Shrink to fit the number of elements if only a quarter of the
+ // capacity is filled with elements.
+ if (nof > (capacity >> 2)) return this;
+ // Allocate a new dictionary with room for at least the current
+ // number of elements. The allocation method will make sure that
+ // there is extra room in the dictionary for additions. Don't go
+ // lower than room for 16 elements.
+ int at_least_room_for = nof;
+ if (at_least_room_for < 16) return this;
+
+ const int kMinCapacityForPretenure = 256;
+ bool pretenure =
+ (at_least_room_for > kMinCapacityForPretenure) &&
+ !GetHeap()->InNewSpace(this);
+ Object* obj;
+ { MaybeObject* maybe_obj =
+ Allocate(at_least_room_for, pretenure ? TENURED : NOT_TENURED);
+ if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
- // Rehash the elements.
- for (int i = 0; i < capacity; i++) {
- uint32_t from_index = EntryToIndex(i);
- Object* k = get(from_index);
- if (IsKey(k)) {
- uint32_t hash = Shape::HashForObject(key, k);
- uint32_t insertion_index =
- EntryToIndex(table->FindInsertionEntry(hash));
- for (int j = 0; j < Shape::kEntrySize; j++) {
- table->set(insertion_index + j, get(from_index + j), mode);
- }
- }
- }
- table->SetNumberOfElements(NumberOfElements());
- table->SetNumberOfDeletedElements(0);
- return table;
+
+ return Rehash(HashTable::cast(obj), key);
}
@@ -9101,7 +10310,9 @@
Object*);
template void Dictionary<NumberDictionaryShape, uint32_t>::CopyKeysTo(
- FixedArray*, PropertyAttributes);
+ FixedArray*,
+ PropertyAttributes,
+ Dictionary<NumberDictionaryShape, uint32_t>::SortMode);
template Object* Dictionary<StringDictionaryShape, String*>::DeleteProperty(
int, JSObject::DeleteMode);
@@ -9109,8 +10320,16 @@
template Object* Dictionary<NumberDictionaryShape, uint32_t>::DeleteProperty(
int, JSObject::DeleteMode);
+template MaybeObject* Dictionary<StringDictionaryShape, String*>::Shrink(
+ String*);
+
+template MaybeObject* Dictionary<NumberDictionaryShape, uint32_t>::Shrink(
+ uint32_t);
+
template void Dictionary<StringDictionaryShape, String*>::CopyKeysTo(
- FixedArray*, int);
+ FixedArray*,
+ int,
+ Dictionary<StringDictionaryShape, String*>::SortMode);
template int
Dictionary<StringDictionaryShape, String*>::NumberOfElementsFilterAttributes(
@@ -9253,8 +10472,6 @@
// If the object is in dictionary mode, it is converted to fast elements
// mode.
MaybeObject* JSObject::PrepareElementsForSort(uint32_t limit) {
- ASSERT(!HasExternalArrayElements());
-
Heap* heap = GetHeap();
if (HasDictionaryElements()) {
@@ -9284,6 +10501,9 @@
set_map(new_map);
set_elements(fast_elements);
+ } else if (HasExternalArrayElements()) {
+ // External arrays cannot have holes or undefined elements.
+ return Smi::FromInt(ExternalArray::cast(elements())->length());
} else {
Object* obj;
{ MaybeObject* maybe_obj = EnsureWritableFastElements();
@@ -9615,6 +10835,7 @@
UNREACHABLE();
return NULL;
}
+
private:
uint32_t c1_;
uint32_t c2_;
@@ -9993,11 +11214,11 @@
template<typename Shape, typename Key>
Object* Dictionary<Shape, Key>::DeleteProperty(int entry,
- JSObject::DeleteMode mode) {
+ JSReceiver::DeleteMode mode) {
Heap* heap = Dictionary<Shape, Key>::GetHeap();
PropertyDetails details = DetailsAt(entry);
// Ignore attributes if forcing a deletion.
- if (details.IsDontDelete() && mode != JSObject::FORCE_DELETION) {
+ if (details.IsDontDelete() && mode != JSReceiver::FORCE_DELETION) {
return heap->false_value();
}
SetEntry(entry, heap->null_value(), heap->null_value());
@@ -10007,6 +11228,12 @@
template<typename Shape, typename Key>
+MaybeObject* Dictionary<Shape, Key>::Shrink(Key key) {
+ return HashTable<Shape, Key>::Shrink(key);
+}
+
+
+template<typename Shape, typename Key>
MaybeObject* Dictionary<Shape, Key>::AtPut(Key key, Object* value) {
int entry = this->FindEntry(key);
@@ -10155,8 +11382,10 @@
template<typename Shape, typename Key>
-void Dictionary<Shape, Key>::CopyKeysTo(FixedArray* storage,
- PropertyAttributes filter) {
+void Dictionary<Shape, Key>::CopyKeysTo(
+ FixedArray* storage,
+ PropertyAttributes filter,
+ typename Dictionary<Shape, Key>::SortMode sort_mode) {
ASSERT(storage->length() >= NumberOfEnumElements());
int capacity = HashTable<Shape, Key>::Capacity();
int index = 0;
@@ -10169,7 +11398,9 @@
if ((attr & filter) == 0) storage->set(index++, k);
}
}
- storage->SortPairs(storage, index);
+ if (sort_mode == Dictionary<Shape, Key>::SORTED) {
+ storage->SortPairs(storage, index);
+ }
ASSERT(storage->length() >= index);
}
@@ -10197,7 +11428,8 @@
template<typename Shape, typename Key>
void Dictionary<Shape, Key>::CopyKeysTo(
FixedArray* storage,
- int index) {
+ int index,
+ typename Dictionary<Shape, Key>::SortMode sort_mode) {
ASSERT(storage->length() >= NumberOfElementsFilterAttributes(
static_cast<PropertyAttributes>(NONE)));
int capacity = HashTable<Shape, Key>::Capacity();
@@ -10209,6 +11441,9 @@
storage->set(index++, k);
}
}
+ if (sort_mode == Dictionary<Shape, Key>::SORTED) {
+ storage->SortPairs(storage, index);
+ }
ASSERT(storage->length() >= index);
}
diff --git a/src/objects.h b/src/objects.h
index 332b2e4..2aa6b4a 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -48,16 +48,19 @@
// - Object
// - Smi (immediate small integer)
// - HeapObject (superclass for everything allocated in the heap)
-// - JSObject
-// - JSArray
-// - JSRegExp
-// - JSFunction
-// - GlobalObject
-// - JSGlobalObject
-// - JSBuiltinsObject
-// - JSGlobalProxy
-// - JSValue
-// - JSMessageObject
+// - JSReceiver (suitable for property access)
+// - JSObject
+// - JSArray
+// - JSRegExp
+// - JSFunction
+// - GlobalObject
+// - JSGlobalObject
+// - JSBuiltinsObject
+// - JSGlobalProxy
+// - JSValue
+// - JSMessageObject
+// - JSProxy
+// - JSFunctionProxy
// - ByteArray
// - ExternalArray
// - ExternalPixelArray
@@ -91,7 +94,6 @@
// - Code
// - Map
// - Oddball
-// - JSProxy
// - Foreign
// - SharedFunctionInfo
// - Struct
@@ -134,7 +136,6 @@
// They are used both in property dictionaries and instance descriptors.
class PropertyDetails BASE_EMBEDDED {
public:
-
PropertyDetails(PropertyAttributes attributes,
PropertyType type,
int index = 0) {
@@ -214,6 +215,7 @@
class StorageField: public BitField<uint32_t, 8, 32-8> {};
static const int kInitialIndex = 1;
+
private:
uint32_t value_;
};
@@ -289,7 +291,6 @@
V(JS_GLOBAL_PROPERTY_CELL_TYPE) \
\
V(HEAP_NUMBER_TYPE) \
- V(JS_PROXY_TYPE) \
V(FOREIGN_TYPE) \
V(BYTE_ARRAY_TYPE) \
/* Note: the order of these external array */ \
@@ -315,6 +316,7 @@
V(TYPE_SWITCH_INFO_TYPE) \
V(SCRIPT_TYPE) \
V(CODE_CACHE_TYPE) \
+ V(POLYMORPHIC_CODE_CACHE_TYPE) \
\
V(FIXED_ARRAY_TYPE) \
V(SHARED_FUNCTION_INFO_TYPE) \
@@ -328,9 +330,11 @@
V(JS_BUILTINS_OBJECT_TYPE) \
V(JS_GLOBAL_PROXY_TYPE) \
V(JS_ARRAY_TYPE) \
+ V(JS_PROXY_TYPE) \
V(JS_REGEXP_TYPE) \
\
V(JS_FUNCTION_TYPE) \
+ V(JS_FUNCTION_PROXY_TYPE) \
#ifdef ENABLE_DEBUGGER_SUPPORT
#define INSTANCE_TYPE_LIST_DEBUGGER(V) \
@@ -424,7 +428,8 @@
V(SIGNATURE_INFO, SignatureInfo, signature_info) \
V(TYPE_SWITCH_INFO, TypeSwitchInfo, type_switch_info) \
V(SCRIPT, Script, script) \
- V(CODE_CACHE, CodeCache, code_cache)
+ V(CODE_CACHE, CodeCache, code_cache) \
+ V(POLYMORPHIC_CODE_CACHE, PolymorphicCodeCache, polymorphic_code_cache)
#ifdef ENABLE_DEBUGGER_SUPPORT
#define STRUCT_LIST_DEBUGGER(V) \
@@ -518,7 +523,6 @@
// objects.
HEAP_NUMBER_TYPE,
FOREIGN_TYPE,
- JS_PROXY_TYPE,
BYTE_ARRAY_TYPE,
EXTERNAL_BYTE_ARRAY_TYPE, // FIRST_EXTERNAL_ARRAY_TYPE
EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE,
@@ -529,6 +533,7 @@
EXTERNAL_FLOAT_ARRAY_TYPE,
EXTERNAL_DOUBLE_ARRAY_TYPE,
EXTERNAL_PIXEL_ARRAY_TYPE, // LAST_EXTERNAL_ARRAY_TYPE
+ FIXED_DOUBLE_ARRAY_TYPE,
FILLER_TYPE, // LAST_DATA_TYPE
// Structs.
@@ -542,6 +547,7 @@
TYPE_SWITCH_INFO_TYPE,
SCRIPT_TYPE,
CODE_CACHE_TYPE,
+ POLYMORPHIC_CODE_CACHE_TYPE,
// The following two instance types are only used when ENABLE_DEBUGGER_SUPPORT
// is defined. However as include/v8.h contain some of the instance type
// constants always having them avoids them getting different numbers
@@ -554,21 +560,23 @@
JS_MESSAGE_OBJECT_TYPE,
- JS_VALUE_TYPE, // FIRST_JS_OBJECT_TYPE
+ JS_VALUE_TYPE, // FIRST_NON_CALLABLE_OBJECT_TYPE, FIRST_JS_RECEIVER_TYPE
JS_OBJECT_TYPE,
JS_CONTEXT_EXTENSION_OBJECT_TYPE,
JS_GLOBAL_OBJECT_TYPE,
JS_BUILTINS_OBJECT_TYPE,
JS_GLOBAL_PROXY_TYPE,
JS_ARRAY_TYPE,
+ JS_PROXY_TYPE,
- JS_REGEXP_TYPE, // LAST_JS_OBJECT_TYPE, FIRST_FUNCTION_CLASS_TYPE
+ JS_REGEXP_TYPE, // LAST_NONCALLABLE_SPEC_OBJECT_TYPE
- JS_FUNCTION_TYPE,
+ JS_FUNCTION_TYPE, // FIRST_CALLABLE_SPEC_OBJECT_TYPE
+ JS_FUNCTION_PROXY_TYPE, // LAST_CALLABLE_SPEC_OBJECT_TYPE
// Pseudo-types
FIRST_TYPE = 0x0,
- LAST_TYPE = JS_FUNCTION_TYPE,
+ LAST_TYPE = JS_FUNCTION_PROXY_TYPE,
INVALID_TYPE = FIRST_TYPE - 1,
FIRST_NONSTRING_TYPE = MAP_TYPE,
// Boundaries for testing for an external array.
@@ -576,14 +584,22 @@
LAST_EXTERNAL_ARRAY_TYPE = EXTERNAL_PIXEL_ARRAY_TYPE,
// Boundary for promotion to old data space/old pointer space.
LAST_DATA_TYPE = FILLER_TYPE,
- // Boundaries for testing the type is a JavaScript "object". Note that
- // function objects are not counted as objects, even though they are
- // implemented as such; only values whose typeof is "object" are included.
- FIRST_JS_OBJECT_TYPE = JS_VALUE_TYPE,
- LAST_JS_OBJECT_TYPE = JS_REGEXP_TYPE,
- // RegExp objects have [[Class]] "function" because they are callable.
- // All types from this type and above are objects with [[Class]] "function".
- FIRST_FUNCTION_CLASS_TYPE = JS_REGEXP_TYPE
+ // Boundary for objects represented as JSReceiver (i.e. JSObject or JSProxy).
+ // Note that there is no range for JSObject or JSProxy, since their subtypes
+ // are not continuous in this enum! The enum ranges instead reflect the
+ // external class names, where proxies are treated as either ordinary objects,
+ // or functions.
+ FIRST_JS_RECEIVER_TYPE = JS_VALUE_TYPE,
+ LAST_JS_RECEIVER_TYPE = LAST_TYPE,
+ // Boundaries for testing the types for which typeof is "object".
+ FIRST_NONCALLABLE_SPEC_OBJECT_TYPE = JS_VALUE_TYPE,
+ LAST_NONCALLABLE_SPEC_OBJECT_TYPE = JS_REGEXP_TYPE,
+ // Boundaries for testing the types for which typeof is "function".
+ FIRST_CALLABLE_SPEC_OBJECT_TYPE = JS_FUNCTION_TYPE,
+ LAST_CALLABLE_SPEC_OBJECT_TYPE = JS_FUNCTION_PROXY_TYPE,
+ // Boundaries for testing whether the type is a JavaScript object.
+ FIRST_SPEC_OBJECT_TYPE = FIRST_NONCALLABLE_SPEC_OBJECT_TYPE,
+ LAST_SPEC_OBJECT_TYPE = LAST_CALLABLE_SPEC_OBJECT_TYPE
};
static const int kExternalArrayTypeCount = LAST_EXTERNAL_ARRAY_TYPE -
@@ -709,6 +725,7 @@
V(ExternalDoubleArray) \
V(ExternalPixelArray) \
V(ByteArray) \
+ V(JSReceiver) \
V(JSObject) \
V(JSContextExtensionObject) \
V(Map) \
@@ -716,8 +733,8 @@
V(DeoptimizationInputData) \
V(DeoptimizationOutputData) \
V(FixedArray) \
+ V(FixedDoubleArray) \
V(Context) \
- V(CatchContext) \
V(GlobalContext) \
V(JSFunction) \
V(Code) \
@@ -730,6 +747,7 @@
V(Boolean) \
V(JSArray) \
V(JSProxy) \
+ V(JSFunctionProxy) \
V(JSRegExp) \
V(HashTable) \
V(Dictionary) \
@@ -738,6 +756,7 @@
V(NormalizedMapCache) \
V(CompilationCacheTable) \
V(CodeCacheHashTable) \
+ V(PolymorphicCodeCacheHashTable) \
V(MapCache) \
V(Primitive) \
V(GlobalObject) \
@@ -782,6 +801,10 @@
// Extract the number.
inline double Number();
+ // Returns true if the object is of the correct type to be used as a
+ // implementation of a JSObject's elements.
+ inline bool HasValidElements();
+
inline bool HasSpecificClassOf(String* name);
MUST_USE_RESULT MaybeObject* ToObject(); // ECMA-262 9.9.
@@ -1332,11 +1355,9 @@
};
-// The JSObject describes real heap allocated JavaScript objects with
-// properties.
-// Note that the map of JSObject changes during execution to enable inline
-// caching.
-class JSObject: public HeapObject {
+// JSReceiver includes types on which properties can be defined, i.e.,
+// JSObject and JSProxy.
+class JSReceiver: public HeapObject {
public:
enum DeleteMode {
NORMAL_DELETION,
@@ -1344,11 +1365,77 @@
FORCE_DELETION
};
+ // Casting.
+ static inline JSReceiver* cast(Object* obj);
+
+ // Can cause GC.
+ MUST_USE_RESULT MaybeObject* SetProperty(String* key,
+ Object* value,
+ PropertyAttributes attributes,
+ StrictModeFlag strict_mode);
+ MUST_USE_RESULT MaybeObject* SetProperty(LookupResult* result,
+ String* key,
+ Object* value,
+ PropertyAttributes attributes,
+ StrictModeFlag strict_mode);
+
+ MUST_USE_RESULT MaybeObject* DeleteProperty(String* name, DeleteMode mode);
+
+ // Returns the class name ([[Class]] property in the specification).
+ String* class_name();
+
+ // Returns the constructor name (the name (possibly, inferred name) of the
+ // function that was used to instantiate the object).
+ String* constructor_name();
+
+ inline PropertyAttributes GetPropertyAttribute(String* name);
+ PropertyAttributes GetPropertyAttributeWithReceiver(JSReceiver* receiver,
+ String* name);
+ PropertyAttributes GetLocalPropertyAttribute(String* name);
+
+ // Can cause a GC.
+ inline bool HasProperty(String* name);
+ inline bool HasLocalProperty(String* name);
+
+ // Return the object's prototype (might be Heap::null_value()).
+ inline Object* GetPrototype();
+
+ // Set the object's prototype (only JSReceiver and null are allowed).
+ MUST_USE_RESULT MaybeObject* SetPrototype(Object* value,
+ bool skip_hidden_prototypes);
+
+ // Lookup a property. If found, the result is valid and has
+ // detailed information.
+ void LocalLookup(String* name, LookupResult* result);
+ void Lookup(String* name, LookupResult* result);
+
+ private:
+ PropertyAttributes GetPropertyAttribute(JSReceiver* receiver,
+ LookupResult* result,
+ String* name,
+ bool continue_search);
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(JSReceiver);
+};
+
+// The JSObject describes real heap allocated JavaScript objects with
+// properties.
+// Note that the map of JSObject changes during execution to enable inline
+// caching.
+class JSObject: public JSReceiver {
+ public:
enum ElementsKind {
- // The only "fast" kind.
+ // The "fast" kind for tagged values. Must be first to make it possible
+ // to efficiently check maps if they have fast elements.
FAST_ELEMENTS,
- // All the kinds below are "slow".
+
+ // The "fast" kind for unwrapped, non-tagged double values.
+ FAST_DOUBLE_ELEMENTS,
+
+ // The "slow" kind.
DICTIONARY_ELEMENTS,
+ NON_STRICT_ARGUMENTS_ELEMENTS,
+ // The "fast" kind for external arrays
EXTERNAL_BYTE_ELEMENTS,
EXTERNAL_UNSIGNED_BYTE_ELEMENTS,
EXTERNAL_SHORT_ELEMENTS,
@@ -1357,9 +1444,18 @@
EXTERNAL_UNSIGNED_INT_ELEMENTS,
EXTERNAL_FLOAT_ELEMENTS,
EXTERNAL_DOUBLE_ELEMENTS,
- EXTERNAL_PIXEL_ELEMENTS
+ EXTERNAL_PIXEL_ELEMENTS,
+
+ // Derived constants from ElementsKind
+ FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND = EXTERNAL_BYTE_ELEMENTS,
+ LAST_EXTERNAL_ARRAY_ELEMENTS_KIND = EXTERNAL_PIXEL_ELEMENTS,
+ FIRST_ELEMENTS_KIND = FAST_ELEMENTS,
+ LAST_ELEMENTS_KIND = EXTERNAL_PIXEL_ELEMENTS
};
+ static const int kElementsKindCount =
+ LAST_ELEMENTS_KIND - FIRST_ELEMENTS_KIND + 1;
+
// [properties]: Backing storage for properties.
// properties is a FixedArray in the fast case and a Dictionary in the
// slow case.
@@ -1376,18 +1472,22 @@
//
// In the fast mode elements is a FixedArray and so each element can
// be quickly accessed. This fact is used in the generated code. The
- // elements array can have one of the two maps in this mode:
- // fixed_array_map or fixed_cow_array_map (for copy-on-write
- // arrays). In the latter case the elements array may be shared by a
- // few objects and so before writing to any element the array must
- // be copied. Use EnsureWritableFastElements in this case.
+ // elements array can have one of three maps in this mode:
+ // fixed_array_map, non_strict_arguments_elements_map or
+ // fixed_cow_array_map (for copy-on-write arrays). In the latter case
+ // the elements array may be shared by a few objects and so before
+ // writing to any element the array must be copied. Use
+ // EnsureWritableFastElements in this case.
//
- // In the slow mode elements is either a NumberDictionary or an ExternalArray.
+ // In the slow mode the elements is either a NumberDictionary, an
+ // ExternalArray, or a FixedArray parameter map for a (non-strict)
+ // arguments object.
DECL_ACCESSORS(elements, HeapObject)
inline void initialize_elements();
MUST_USE_RESULT inline MaybeObject* ResetElements();
inline ElementsKind GetElementsKind();
inline bool HasFastElements();
+ inline bool HasFastDoubleElements();
inline bool HasDictionaryElements();
inline bool HasExternalPixelElements();
inline bool HasExternalArrayElements();
@@ -1399,9 +1499,12 @@
inline bool HasExternalUnsignedIntElements();
inline bool HasExternalFloatElements();
inline bool HasExternalDoubleElements();
+ bool HasFastArgumentsElements();
+ bool HasDictionaryArgumentsElements();
inline bool AllowsSetElementsLength();
inline NumberDictionary* element_dictionary(); // Gets slow elements.
- // Requires: this->HasFastElements().
+
+ // Requires: HasFastElements().
MUST_USE_RESULT inline MaybeObject* EnsureWritableFastElements();
// Collects elements starting at index 0.
@@ -1412,11 +1515,7 @@
// a dictionary, and it will stay a dictionary.
MUST_USE_RESULT MaybeObject* PrepareSlowElementsForSort(uint32_t limit);
- MUST_USE_RESULT MaybeObject* SetProperty(String* key,
- Object* value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode);
- MUST_USE_RESULT MaybeObject* SetProperty(LookupResult* result,
+ MUST_USE_RESULT MaybeObject* SetPropertyForResult(LookupResult* result,
String* key,
Object* value,
PropertyAttributes attributes,
@@ -1425,11 +1524,14 @@
LookupResult* result,
String* name,
Object* value,
- bool check_prototype);
- MUST_USE_RESULT MaybeObject* SetPropertyWithCallback(Object* structure,
- String* name,
- Object* value,
- JSObject* holder);
+ bool check_prototype,
+ StrictModeFlag strict_mode);
+ MUST_USE_RESULT MaybeObject* SetPropertyWithCallback(
+ Object* structure,
+ String* name,
+ Object* value,
+ JSObject* holder,
+ StrictModeFlag strict_mode);
MUST_USE_RESULT MaybeObject* SetPropertyWithDefinedSetter(JSFunction* setter,
Object* value);
MUST_USE_RESULT MaybeObject* SetPropertyWithInterceptor(
@@ -1465,21 +1567,22 @@
MUST_USE_RESULT MaybeObject* DeleteNormalizedProperty(String* name,
DeleteMode mode);
- // Returns the class name ([[Class]] property in the specification).
- String* class_name();
-
- // Returns the constructor name (the name (possibly, inferred name) of the
- // function that was used to instantiate the object).
- String* constructor_name();
-
// Retrieve interceptors.
InterceptorInfo* GetNamedInterceptor();
InterceptorInfo* GetIndexedInterceptor();
- inline PropertyAttributes GetPropertyAttribute(String* name);
- PropertyAttributes GetPropertyAttributeWithReceiver(JSObject* receiver,
- String* name);
- PropertyAttributes GetLocalPropertyAttribute(String* name);
+ // Used from JSReceiver.
+ PropertyAttributes GetPropertyAttributePostInterceptor(JSObject* receiver,
+ String* name,
+ bool continue_search);
+ PropertyAttributes GetPropertyAttributeWithInterceptor(JSObject* receiver,
+ String* name,
+ bool continue_search);
+ PropertyAttributes GetPropertyAttributeWithFailedAccessCheck(
+ Object* receiver,
+ LookupResult* result,
+ String* name,
+ bool continue_search);
MUST_USE_RESULT MaybeObject* DefineAccessor(String* name,
bool is_getter,
@@ -1496,14 +1599,14 @@
String* name,
PropertyAttributes* attributes);
MaybeObject* GetPropertyWithInterceptor(
- JSObject* receiver,
+ JSReceiver* receiver,
String* name,
PropertyAttributes* attributes);
MaybeObject* GetPropertyPostInterceptor(
- JSObject* receiver,
+ JSReceiver* receiver,
String* name,
PropertyAttributes* attributes);
- MaybeObject* GetLocalPropertyPostInterceptor(JSObject* receiver,
+ MaybeObject* GetLocalPropertyPostInterceptor(JSReceiver* receiver,
String* name,
PropertyAttributes* attributes);
@@ -1511,15 +1614,6 @@
// been modified since it was created. May give false positives.
bool IsDirty();
- bool HasProperty(String* name) {
- return GetPropertyAttribute(name) != ABSENT;
- }
-
- // Can cause a GC if it hits an interceptor.
- bool HasLocalProperty(String* name) {
- return GetLocalPropertyAttribute(name) != ABSENT;
- }
-
// If the receiver is a JSGlobalProxy this method will return its prototype,
// otherwise the result is the receiver itself.
inline Object* BypassGlobalProxy();
@@ -1556,17 +1650,13 @@
// storage would. In that case the JSObject should have fast
// elements.
bool ShouldConvertToFastElements();
-
- // Return the object's prototype (might be Heap::null_value()).
- inline Object* GetPrototype();
-
- // Set the object's prototype (only JSObject and null are allowed).
- MUST_USE_RESULT MaybeObject* SetPrototype(Object* value,
- bool skip_hidden_prototypes);
+ // Returns true if the elements of JSObject contains only values that can be
+ // represented in a FixedDoubleArray.
+ bool ShouldConvertToFastDoubleElements();
// Tells whether the index'th element is present.
inline bool HasElement(uint32_t index);
- bool HasElementWithReceiver(JSObject* receiver, uint32_t index);
+ bool HasElementWithReceiver(JSReceiver* receiver, uint32_t index);
// Computes the new capacity when expanding the elements of a JSObject.
static int NewElementsCapacity(int old_capacity) {
@@ -1594,20 +1684,30 @@
LocalElementType HasLocalElement(uint32_t index);
- bool HasElementWithInterceptor(JSObject* receiver, uint32_t index);
- bool HasElementPostInterceptor(JSObject* receiver, uint32_t index);
+ bool HasElementWithInterceptor(JSReceiver* receiver, uint32_t index);
+ bool HasElementPostInterceptor(JSReceiver* receiver, uint32_t index);
MUST_USE_RESULT MaybeObject* SetFastElement(uint32_t index,
Object* value,
StrictModeFlag strict_mode,
- bool check_prototype = true);
+ bool check_prototype);
+ MUST_USE_RESULT MaybeObject* SetDictionaryElement(uint32_t index,
+ Object* value,
+ StrictModeFlag strict_mode,
+ bool check_prototype);
+
+ MUST_USE_RESULT MaybeObject* SetFastDoubleElement(
+ uint32_t index,
+ Object* value,
+ StrictModeFlag strict_mode,
+ bool check_prototype = true);
// Set the index'th array element.
// A Failure object is returned if GC is needed.
MUST_USE_RESULT MaybeObject* SetElement(uint32_t index,
Object* value,
StrictModeFlag strict_mode,
- bool check_prototype = true);
+ bool check_prototype);
// Returns the index'th element.
// The undefined object if index is out of bounds.
@@ -1619,8 +1719,14 @@
// failed.
MaybeObject* GetExternalElement(uint32_t index);
+ // Replace the elements' backing store with fast elements of the given
+ // capacity. Update the length for JSArrays. Returns the new backing
+ // store.
MUST_USE_RESULT MaybeObject* SetFastElementsCapacityAndLength(int capacity,
int length);
+ MUST_USE_RESULT MaybeObject* SetFastDoubleElementsCapacityAndLength(
+ int capacity,
+ int length);
MUST_USE_RESULT MaybeObject* SetSlowElements(Object* length);
// Lookup interceptors are used for handling properties controlled by host
@@ -1648,7 +1754,6 @@
// Lookup a property. If found, the result is valid and has
// detailed information.
void LocalLookup(String* name, LookupResult* result);
- void Lookup(String* name, LookupResult* result);
// The following lookup functions skip interceptors.
void LocalLookupRealNamedProperty(String* name, LookupResult* result);
@@ -1656,7 +1761,7 @@
void LookupRealNamedPropertyInPrototypes(String* name, LookupResult* result);
void LookupCallbackSetterInPrototypes(String* name, LookupResult* result);
MUST_USE_RESULT MaybeObject* SetElementWithCallbackSetterInPrototypes(
- uint32_t index, Object* value, bool* found);
+ uint32_t index, Object* value, bool* found, StrictModeFlag strict_mode);
void LookupCallback(String* name, LookupResult* result);
// Returns the number of properties on this object filtering out properties
@@ -1746,6 +1851,9 @@
MUST_USE_RESULT MaybeObject* NormalizeProperties(
PropertyNormalizationMode mode,
int expected_additional_properties);
+
+ // Convert and update the elements backing store to be a NumberDictionary
+ // dictionary. Returns the backing after conversion.
MUST_USE_RESULT MaybeObject* NormalizeElements();
MUST_USE_RESULT MaybeObject* UpdateMapCodeCache(String* name, Code* code);
@@ -1868,7 +1976,8 @@
MaybeObject* SetElementWithCallback(Object* structure,
uint32_t index,
Object* value,
- JSObject* holder);
+ JSObject* holder,
+ StrictModeFlag strict_mode);
MUST_USE_RESULT MaybeObject* SetElementWithInterceptor(
uint32_t index,
Object* value,
@@ -1890,21 +1999,16 @@
DeleteMode mode);
MUST_USE_RESULT MaybeObject* DeleteElementWithInterceptor(uint32_t index);
- PropertyAttributes GetPropertyAttributePostInterceptor(JSObject* receiver,
- String* name,
- bool continue_search);
- PropertyAttributes GetPropertyAttributeWithInterceptor(JSObject* receiver,
- String* name,
- bool continue_search);
- PropertyAttributes GetPropertyAttributeWithFailedAccessCheck(
- Object* receiver,
- LookupResult* result,
- String* name,
- bool continue_search);
- PropertyAttributes GetPropertyAttribute(JSObject* receiver,
- LookupResult* result,
- String* name,
- bool continue_search);
+ MUST_USE_RESULT MaybeObject* DeleteFastElement(uint32_t index);
+ MUST_USE_RESULT MaybeObject* DeleteDictionaryElement(uint32_t index,
+ DeleteMode mode);
+
+ bool ReferencesObjectFromElements(FixedArray* elements,
+ ElementsKind kind,
+ Object* object);
+ bool HasElementInElements(FixedArray* elements,
+ ElementsKind kind,
+ uint32_t index);
// Returns true if most of the elements backing storage is used.
bool HasDenseElements();
@@ -1928,13 +2032,26 @@
};
-// FixedArray describes fixed-sized arrays with element type Object*.
-class FixedArray: public HeapObject {
+// Common superclass for FixedArrays that allow implementations to share
+// common accessors and some code paths.
+class FixedArrayBase: public HeapObject {
public:
// [length]: length of the array.
inline int length();
inline void set_length(int value);
+ inline static FixedArrayBase* cast(Object* object);
+
+ // Layout description.
+ // Length is smi tagged when it is stored.
+ static const int kLengthOffset = HeapObject::kHeaderSize;
+ static const int kHeaderSize = kLengthOffset + kPointerSize;
+};
+
+
+// FixedArray describes fixed-sized arrays with element type Object*.
+class FixedArray: public FixedArrayBase {
+ public:
// Setter and getter for elements.
inline Object* get(int index);
// Setter that uses write barrier.
@@ -1985,11 +2102,6 @@
// Casting.
static inline FixedArray* cast(Object* obj);
- // Layout description.
- // Length is smi tagged when it is stored.
- static const int kLengthOffset = HeapObject::kHeaderSize;
- static const int kHeaderSize = kLengthOffset + kPointerSize;
-
// Maximal allowed size, in bytes, of a single FixedArray.
// Prevents overflowing size computations, as well as extreme memory
// consumption.
@@ -2037,6 +2149,60 @@
};
+// FixedDoubleArray describes fixed-sized arrays with element type double.
+class FixedDoubleArray: public FixedArrayBase {
+ public:
+ inline void Initialize(FixedArray* from);
+ inline void Initialize(FixedDoubleArray* from);
+ inline void Initialize(NumberDictionary* from);
+
+ // Setter and getter for elements.
+ inline double get(int index);
+ inline void set(int index, double value);
+ inline void set_the_hole(int index);
+
+ // Checking for the hole.
+ inline bool is_the_hole(int index);
+
+ // Garbage collection support.
+ inline static int SizeFor(int length) {
+ return kHeaderSize + length * kDoubleSize;
+ }
+
+ // Code Generation support.
+ static int OffsetOfElementAt(int index) { return SizeFor(index); }
+
+ inline static bool is_the_hole_nan(double value);
+ inline static double hole_nan_as_double();
+ inline static double canonical_not_the_hole_nan_as_double();
+
+ // Casting.
+ static inline FixedDoubleArray* cast(Object* obj);
+
+ // Maximal allowed size, in bytes, of a single FixedDoubleArray.
+ // Prevents overflowing size computations, as well as extreme memory
+ // consumption.
+ static const int kMaxSize = 512 * MB;
+ // Maximally allowed length of a FixedArray.
+ static const int kMaxLength = (kMaxSize - kHeaderSize) / kDoubleSize;
+
+ // Dispatched behavior.
+#ifdef OBJECT_PRINT
+ inline void FixedDoubleArrayPrint() {
+ FixedDoubleArrayPrint(stdout);
+ }
+ void FixedDoubleArrayPrint(FILE* out);
+#endif
+
+#ifdef DEBUG
+ void FixedDoubleArrayVerify();
+#endif
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(FixedDoubleArray);
+};
+
+
// DescriptorArrays are fixed arrays used to hold instance descriptors.
// The format of the these objects is:
// TODO(1399): It should be possible to make room for bit_field3 in the map
@@ -2365,7 +2531,6 @@
int FindEntry(Isolate* isolate, Key key);
protected:
-
// Find the entry at which to insert element with the given key that
// has the given hash value.
uint32_t FindInsertionEntry(uint32_t hash);
@@ -2410,6 +2575,12 @@
return (last + number) & (size - 1);
}
+ // Rehashes this hash-table into the new table.
+ MUST_USE_RESULT MaybeObject* Rehash(HashTable* new_table, Key key);
+
+ // Attempt to shrink hash table after removal of key.
+ MUST_USE_RESULT MaybeObject* Shrink(Key key);
+
// Ensure enough space for n additional elements.
MUST_USE_RESULT MaybeObject* EnsureCapacity(int n, Key key);
};
@@ -2532,7 +2703,6 @@
template <typename Shape, typename Key>
class Dictionary: public HashTable<Shape, Key> {
public:
-
static inline Dictionary<Shape, Key>* cast(Object* obj) {
return reinterpret_cast<Dictionary<Shape, Key>*>(obj);
}
@@ -2575,6 +2745,9 @@
// Delete a property from the dictionary.
Object* DeleteProperty(int entry, JSObject::DeleteMode mode);
+ // Attempt to shrink the dictionary after deletion of key.
+ MUST_USE_RESULT MaybeObject* Shrink(Key key);
+
// Returns the number of elements in the dictionary filtering out properties
// with the specified attributes.
int NumberOfElementsFilterAttributes(PropertyAttributes filter);
@@ -2582,10 +2755,13 @@
// Returns the number of enumerable elements in the dictionary.
int NumberOfEnumElements();
+ enum SortMode { UNSORTED, SORTED };
// Copies keys to preallocated fixed array.
- void CopyKeysTo(FixedArray* storage, PropertyAttributes filter);
+ void CopyKeysTo(FixedArray* storage,
+ PropertyAttributes filter,
+ SortMode sort_mode);
// Fill in details for properties into storage.
- void CopyKeysTo(FixedArray* storage, int index);
+ void CopyKeysTo(FixedArray* storage, int index, SortMode sort_mode);
// Accessors for next enumeration index.
void SetNextEnumerationIndex(int index) {
@@ -2786,11 +2962,6 @@
#ifdef DEBUG
void NormalizedMapCacheVerify();
#endif
-
- private:
- static int Hash(Map* fast);
-
- static bool CheckHit(Map* slow, Map* fast, PropertyNormalizationMode mode);
};
@@ -3235,7 +3406,7 @@
// Casting.
static inline DeoptimizationInputData* cast(Object* obj);
-#ifdef OBJECT_PRINT
+#ifdef ENABLE_DISASSEMBLER
void DeoptimizationInputDataPrint(FILE* out);
#endif
@@ -3274,7 +3445,7 @@
// Casting.
static inline DeoptimizationOutputData* cast(Object* obj);
-#ifdef OBJECT_PRINT
+#if defined(OBJECT_PRINT) || defined(ENABLE_DISASSEMBLER)
void DeoptimizationOutputDataPrint(FILE* out);
#endif
};
@@ -3429,12 +3600,6 @@
inline CheckType check_type();
inline void set_check_type(CheckType value);
- // [external array type]: For kind KEYED_EXTERNAL_ARRAY_LOAD_IC and
- // KEYED_EXTERNAL_ARRAY_STORE_IC, identifies the type of external
- // array that the code stub is specialized for.
- inline ExternalArrayType external_array_type();
- inline void set_external_array_type(ExternalArrayType value);
-
// [type-recording unary op type]: For all UNARY_OP_IC.
inline byte unary_op_type();
inline void set_unary_op_type(byte value);
@@ -3590,7 +3755,6 @@
static const int kOptimizableOffset = kKindSpecificFlagsOffset;
static const int kStackSlotsOffset = kKindSpecificFlagsOffset;
static const int kCheckTypeOffset = kKindSpecificFlagsOffset;
- static const int kExternalArrayTypeOffset = kKindSpecificFlagsOffset;
static const int kCompareStateOffset = kStubMajorKeyOffset + 1;
static const int kUnaryOpTypeOffset = kStubMajorKeyOffset + 1;
@@ -3741,31 +3905,37 @@
inline void set_is_extensible(bool value);
inline bool is_extensible();
+ inline void set_elements_kind(JSObject::ElementsKind elements_kind) {
+ ASSERT(elements_kind < JSObject::kElementsKindCount);
+ ASSERT(JSObject::kElementsKindCount <= (1 << kElementsKindBitCount));
+ set_bit_field2((bit_field2() & ~kElementsKindMask) |
+ (elements_kind << kElementsKindShift));
+ ASSERT(this->elements_kind() == elements_kind);
+ }
+
+ inline JSObject::ElementsKind elements_kind() {
+ return static_cast<JSObject::ElementsKind>(
+ (bit_field2() & kElementsKindMask) >> kElementsKindShift);
+ }
+
// Tells whether the instance has fast elements.
// Equivalent to instance->GetElementsKind() == FAST_ELEMENTS.
- inline void set_has_fast_elements(bool value) {
- if (value) {
- set_bit_field2(bit_field2() | (1 << kHasFastElements));
- } else {
- set_bit_field2(bit_field2() & ~(1 << kHasFastElements));
- }
- }
-
inline bool has_fast_elements() {
- return ((1 << kHasFastElements) & bit_field2()) != 0;
+ return elements_kind() == JSObject::FAST_ELEMENTS;
}
- // Tells whether an instance has pixel array elements.
- inline void set_has_external_array_elements(bool value) {
- if (value) {
- set_bit_field2(bit_field2() | (1 << kHasExternalArrayElements));
- } else {
- set_bit_field2(bit_field2() & ~(1 << kHasExternalArrayElements));
- }
+ inline bool has_fast_double_elements() {
+ return elements_kind() == JSObject::FAST_DOUBLE_ELEMENTS;
}
inline bool has_external_array_elements() {
- return ((1 << kHasExternalArrayElements) & bit_field2()) != 0;
+ JSObject::ElementsKind kind(elements_kind());
+ return kind >= JSObject::FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND &&
+ kind <= JSObject::LAST_EXTERNAL_ARRAY_ELEMENTS_KIND;
+ }
+
+ inline bool has_dictionary_elements() {
+ return elements_kind() == JSObject::DICTIONARY_ELEMENTS;
}
// Tells whether the map is attached to SharedFunctionInfo
@@ -3818,6 +3988,26 @@
DECL_ACCESSORS(prototype_transitions, FixedArray)
inline FixedArray* unchecked_prototype_transitions();
+ static const int kProtoTransitionHeaderSize = 1;
+ static const int kProtoTransitionNumberOfEntriesOffset = 0;
+ static const int kProtoTransitionElementsPerEntry = 2;
+ static const int kProtoTransitionPrototypeOffset = 0;
+ static const int kProtoTransitionMapOffset = 1;
+
+ inline int NumberOfProtoTransitions() {
+ FixedArray* cache = unchecked_prototype_transitions();
+ if (cache->length() == 0) return 0;
+ return
+ Smi::cast(cache->get(kProtoTransitionNumberOfEntriesOffset))->value();
+ }
+
+ inline void SetNumberOfProtoTransitions(int value) {
+ FixedArray* cache = unchecked_prototype_transitions();
+ ASSERT(cache->length() != 0);
+ cache->set_unchecked(kProtoTransitionNumberOfEntriesOffset,
+ Smi::FromInt(value));
+ }
+
// Lookup in the map's instance descriptors and fill out the result
// with the given holder if the name is found. The holder may be
// NULL when this function is used from the compiler.
@@ -3834,18 +4024,23 @@
// instance descriptors.
MUST_USE_RESULT MaybeObject* CopyDropTransitions();
- // Returns this map if it has the fast elements bit set, otherwise
+ // Returns this map if it already has elements that are fast, otherwise
// returns a copy of the map, with all transitions dropped from the
- // descriptors and the fast elements bit set.
+ // descriptors and the ElementsKind set to FAST_ELEMENTS.
MUST_USE_RESULT inline MaybeObject* GetFastElementsMap();
- // Returns this map if it has the fast elements bit cleared,
- // otherwise returns a copy of the map, with all transitions dropped
- // from the descriptors and the fast elements bit cleared.
+ // Returns this map if it already has fast elements that are doubles,
+ // otherwise returns a copy of the map, with all transitions dropped from the
+ // descriptors and the ElementsKind set to FAST_DOUBLE_ELEMENTS.
+ MUST_USE_RESULT inline MaybeObject* GetFastDoubleElementsMap();
+
+ // Returns this map if already has dictionary elements, otherwise returns a
+ // copy of the map, with all transitions dropped from the descriptors and the
+ // ElementsKind set to DICTIONARY_ELEMENTS.
MUST_USE_RESULT inline MaybeObject* GetSlowElementsMap();
// Returns a new map with all transitions dropped from the descriptors and the
- // external array elements bit set.
+ // ElementsKind set to one of the value corresponding to array_type.
MUST_USE_RESULT MaybeObject* GetExternalArrayElementsMap(
ExternalArrayType array_type,
bool safe_to_add_transition);
@@ -3894,6 +4089,21 @@
// following back pointers.
void ClearNonLiveTransitions(Heap* heap, Object* real_prototype);
+ // Computes a hash value for this map, to be used in HashTables and such.
+ int Hash();
+
+ // Compares this map to another to see if they describe equivalent objects.
+ // If |mode| is set to CLEAR_INOBJECT_PROPERTIES, |other| is treated as if
+ // it had exactly zero inobject properties.
+ // The "shared" flags of both this map and |other| are ignored.
+ bool EquivalentToForNormalization(Map* other, PropertyNormalizationMode mode);
+
+ // Returns true if this map and |other| describe equivalent objects.
+ // The "shared" flags of both this map and |other| are ignored.
+ bool EquivalentTo(Map* other) {
+ return EquivalentToForNormalization(other, KEEP_INOBJECT_PROPERTIES);
+ }
+
// Dispatched behavior.
#ifdef OBJECT_PRINT
inline void MapPrint() {
@@ -3986,13 +4196,21 @@
// Bit positions for bit field 2
static const int kIsExtensible = 0;
static const int kFunctionWithPrototype = 1;
- static const int kHasFastElements = 2;
- static const int kStringWrapperSafeForDefaultValueOf = 3;
- static const int kAttachedToSharedFunctionInfo = 4;
- static const int kHasExternalArrayElements = 5;
+ static const int kStringWrapperSafeForDefaultValueOf = 2;
+ static const int kAttachedToSharedFunctionInfo = 3;
+ // No bits can be used after kElementsKindFirstBit, they are all reserved for
+ // storing ElementKind. for anything other than storing the ElementKind.
+ static const int kElementsKindShift = 4;
+ static const int kElementsKindBitCount = 4;
+
+ // Derived values from bit field 2
+ static const int kElementsKindMask = (-1 << kElementsKindShift) &
+ ((1 << (kElementsKindShift + kElementsKindBitCount)) - 1);
+ static const int8_t kMaximumBitField2FastElementValue = static_cast<int8_t>(
+ (JSObject::FAST_ELEMENTS + 1) << Map::kElementsKindShift) - 1;
// Bit positions for bit field 3
- static const int kIsShared = 1;
+ static const int kIsShared = 0;
// Layout of the default cache. It holds alternating name and code objects.
static const int kCodeCacheEntrySize = 2;
@@ -4288,9 +4506,7 @@
// False if there are definitely no live objects created from this function.
// True if live objects _may_ exist (existence not guaranteed).
// May go back from true to false after GC.
- inline bool live_objects_may_exist();
-
- inline void set_live_objects_may_exist(bool value);
+ DECL_BOOLEAN_ACCESSORS(live_objects_may_exist)
// [instance class name]: class name for instances.
DECL_ACCESSORS(instance_class_name, Object)
@@ -4349,12 +4565,10 @@
inline void set_end_position(int end_position);
// Is this function a function expression in the source code.
- inline bool is_expression();
- inline void set_is_expression(bool value);
+ DECL_BOOLEAN_ACCESSORS(is_expression)
// Is this function a top-level function (scripts, evals).
- inline bool is_toplevel();
- inline void set_is_toplevel(bool value);
+ DECL_BOOLEAN_ACCESSORS(is_toplevel)
// Bit field containing various information collected by the compiler to
// drive optimization.
@@ -4381,8 +4595,7 @@
// Indicates if this function can be lazy compiled.
// This is used to determine if we can safely flush code from a function
// when doing GC if we expect that the function will no longer be used.
- inline bool allows_lazy_compilation();
- inline void set_allows_lazy_compilation(bool flag);
+ DECL_BOOLEAN_ACCESSORS(allows_lazy_compilation)
// Indicates how many full GCs this function has survived with assigned
// code object. Used to determine when it is relatively safe to flush
@@ -4396,19 +4609,36 @@
// shared function info. If a function is repeatedly optimized or if
// we cannot optimize the function we disable optimization to avoid
// spending time attempting to optimize it again.
- inline bool optimization_disabled();
- inline void set_optimization_disabled(bool value);
+ DECL_BOOLEAN_ACCESSORS(optimization_disabled)
// Indicates whether the function is a strict mode function.
- inline bool strict_mode();
- inline void set_strict_mode(bool value);
+ DECL_BOOLEAN_ACCESSORS(strict_mode)
- // Indicates whether the function is a native ES5 function.
+ // False if the function definitely does not allocate an arguments object.
+ DECL_BOOLEAN_ACCESSORS(uses_arguments)
+
+ // True if the function has any duplicated parameter names.
+ DECL_BOOLEAN_ACCESSORS(has_duplicate_parameters)
+
+ // Indicates whether the function is a native function.
// These needs special threatment in .call and .apply since
// null passed as the receiver should not be translated to the
// global object.
- inline bool es5_native();
- inline void set_es5_native(bool value);
+ DECL_BOOLEAN_ACCESSORS(native)
+
+ // Indicates that the function was created by the Function function.
+ // Though it's anonymous, toString should treat it as if it had the name
+ // "anonymous". We don't set the name itself so that the system does not
+ // see a binding for it.
+ DECL_BOOLEAN_ACCESSORS(name_should_print_as_anonymous)
+
+ // Indicates whether the function is a bound function created using
+ // the bind function.
+ DECL_BOOLEAN_ACCESSORS(bound)
+
+ // Indicates that the function is anonymous (the name field can be set
+ // through the API, which does not change this flag).
+ DECL_BOOLEAN_ACCESSORS(is_anonymous)
// Indicates whether or not the code in the shared function support
// deoptimization.
@@ -4588,14 +4818,24 @@
static const int kStartPositionMask = ~((1 << kStartPositionShift) - 1);
// Bit positions in compiler_hints.
- static const int kHasOnlySimpleThisPropertyAssignments = 0;
- static const int kAllowLazyCompilation = 1;
- static const int kLiveObjectsMayExist = 2;
- static const int kCodeAgeShift = 3;
- static const int kCodeAgeMask = 0x7;
- static const int kOptimizationDisabled = 6;
- static const int kStrictModeFunction = 7;
- static const int kES5Native = 8;
+ static const int kCodeAgeSize = 3;
+ static const int kCodeAgeMask = (1 << kCodeAgeSize) - 1;
+
+ enum CompilerHints {
+ kHasOnlySimpleThisPropertyAssignments,
+ kAllowLazyCompilation,
+ kLiveObjectsMayExist,
+ kCodeAgeShift,
+ kOptimizationDisabled = kCodeAgeShift + kCodeAgeSize,
+ kStrictModeFunction,
+ kUsesArguments,
+ kHasDuplicateParameters,
+ kNative,
+ kBoundFunction,
+ kIsAnonymous,
+ kNameShouldPrintAsAnonymous,
+ kCompilerHintsCount // Pseudo entry
+ };
private:
#if V8_HOST_ARCH_32_BIT
@@ -4608,28 +4848,31 @@
static const int kCompilerHintsSize = kIntSize;
#endif
+ STATIC_ASSERT(SharedFunctionInfo::kCompilerHintsCount <=
+ SharedFunctionInfo::kCompilerHintsSize * kBitsPerByte);
+
public:
// Constants for optimizing codegen for strict mode function and
- // es5 native tests.
+ // native tests.
// Allows to use byte-widgh instructions.
static const int kStrictModeBitWithinByte =
(kStrictModeFunction + kCompilerHintsSmiTagSize) % kBitsPerByte;
- static const int kES5NativeBitWithinByte =
- (kES5Native + kCompilerHintsSmiTagSize) % kBitsPerByte;
+ static const int kNativeBitWithinByte =
+ (kNative + kCompilerHintsSmiTagSize) % kBitsPerByte;
#if __BYTE_ORDER == __LITTLE_ENDIAN
static const int kStrictModeByteOffset = kCompilerHintsOffset +
(kStrictModeFunction + kCompilerHintsSmiTagSize) / kBitsPerByte;
- static const int kES5NativeByteOffset = kCompilerHintsOffset +
- (kES5Native + kCompilerHintsSmiTagSize) / kBitsPerByte;
+ static const int kNativeByteOffset = kCompilerHintsOffset +
+ (kNative + kCompilerHintsSmiTagSize) / kBitsPerByte;
#elif __BYTE_ORDER == __BIG_ENDIAN
static const int kStrictModeByteOffset = kCompilerHintsOffset +
(kCompilerHintsSize - 1) -
((kStrictModeFunction + kCompilerHintsSmiTagSize) / kBitsPerByte);
- static const int kES5NativeByteOffset = kCompilerHintsOffset +
+ static const int kNativeByteOffset = kCompilerHintsOffset +
(kCompilerHintsSize - 1) -
- ((kES5Native + kCompilerHintsSmiTagSize) / kBitsPerByte);
+ ((kNative + kCompilerHintsSmiTagSize) / kBitsPerByte);
#else
#error Unknown byte ordering
#endif
@@ -4686,9 +4929,6 @@
// recompilation.
inline bool IsMarkedForLazyRecompilation();
- // Compute a hash code for the source code of this function.
- uint32_t SourceHash();
-
// Check whether or not this function is inlineable.
bool IsInlineable();
@@ -4788,6 +5028,7 @@
// Layout of the literals array.
static const int kLiteralsPrefixSize = 1;
static const int kLiteralGlobalContextIndex = 0;
+
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(JSFunction);
};
@@ -4826,7 +5067,6 @@
static const int kSize = kContextOffset + kPointerSize;
private:
-
DISALLOW_IMPLICIT_CONSTRUCTORS(JSGlobalProxy);
};
@@ -4882,7 +5122,6 @@
// JavaScript global object.
class JSGlobalObject: public GlobalObject {
public:
-
// Casting.
static inline JSGlobalObject* cast(Object* obj);
@@ -5332,6 +5571,49 @@
};
+class PolymorphicCodeCache: public Struct {
+ public:
+ DECL_ACCESSORS(cache, Object)
+
+ MUST_USE_RESULT MaybeObject* Update(MapList* maps,
+ Code::Flags flags,
+ Code* code);
+ Object* Lookup(MapList* maps, Code::Flags flags);
+
+ static inline PolymorphicCodeCache* cast(Object* obj);
+
+#ifdef OBJECT_PRINT
+ inline void PolymorphicCodeCachePrint() {
+ PolymorphicCodeCachePrint(stdout);
+ }
+ void PolymorphicCodeCachePrint(FILE* out);
+#endif
+#ifdef DEBUG
+ void PolymorphicCodeCacheVerify();
+#endif
+
+ static const int kCacheOffset = HeapObject::kHeaderSize;
+ static const int kSize = kCacheOffset + kPointerSize;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(PolymorphicCodeCache);
+};
+
+
+class PolymorphicCodeCacheHashTable
+ : public HashTable<CodeCacheHashTableShape, HashTableKey*> {
+ public:
+ Object* Lookup(MapList* maps, int code_kind);
+ MUST_USE_RESULT MaybeObject* Put(MapList* maps, int code_kind, Code* code);
+
+ static inline PolymorphicCodeCacheHashTable* cast(Object* obj);
+
+ static const int kInitialSize = 64;
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(PolymorphicCodeCacheHashTable);
+};
+
+
enum AllowNullsFlag {ALLOW_NULLS, DISALLOW_NULLS};
enum RobustnessFlag {ROBUST_STRING_TRAVERSAL, FAST_STRING_TRAVERSAL};
@@ -5370,7 +5652,6 @@
static uint32_t MakeArrayIndexHash(uint32_t value, int length);
private:
-
uint32_t array_index() {
ASSERT(is_array_index());
return array_index_;
@@ -5427,6 +5708,7 @@
#else
inline void invalidate() { }
#endif
+
private:
uint32_t type_;
#ifdef DEBUG
@@ -5764,7 +6046,6 @@
// The SeqString abstract class captures sequential string values.
class SeqString: public String {
public:
-
// Casting.
static inline SeqString* cast(Object* obj);
@@ -6199,14 +6480,37 @@
// The JSProxy describes EcmaScript Harmony proxies
-class JSProxy: public HeapObject {
+class JSProxy: public JSReceiver {
public:
// [handler]: The handler property.
DECL_ACCESSORS(handler, Object)
+ // [padding]: The padding slot (unused, see below).
+ DECL_ACCESSORS(padding, Object)
+
// Casting.
static inline JSProxy* cast(Object* obj);
+ bool HasPropertyWithHandler(String* name);
+
+ MUST_USE_RESULT MaybeObject* SetPropertyWithHandler(
+ String* name,
+ Object* value,
+ PropertyAttributes attributes,
+ StrictModeFlag strict_mode);
+
+ MUST_USE_RESULT MaybeObject* DeletePropertyWithHandler(
+ String* name,
+ DeleteMode mode);
+
+ MUST_USE_RESULT PropertyAttributes GetPropertyAttributeWithHandler(
+ JSReceiver* receiver,
+ String* name,
+ bool* has_exception);
+
+ // Turn this into an (empty) JSObject.
+ void Fix();
+
// Dispatched behavior.
#ifdef OBJECT_PRINT
inline void JSProxyPrint() {
@@ -6218,9 +6522,14 @@
void JSProxyVerify();
#endif
- // Layout description.
+ // Layout description. We add padding so that a proxy has the same
+ // size as a virgin JSObject. This is essential for becoming a JSObject
+ // upon freeze.
static const int kHandlerOffset = HeapObject::kHeaderSize;
- static const int kSize = kHandlerOffset + kPointerSize;
+ static const int kPaddingOffset = kHandlerOffset + kPointerSize;
+ static const int kSize = kPaddingOffset + kPointerSize;
+
+ STATIC_CHECK(kSize == JSObject::kHeaderSize);
typedef FixedBodyDescriptor<kHandlerOffset,
kHandlerOffset + kPointerSize,
@@ -6231,6 +6540,16 @@
};
+// TODO(rossberg): Only a stub for now.
+class JSFunctionProxy: public JSProxy {
+ public:
+ // Casting.
+ static inline JSFunctionProxy* cast(Object* obj);
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(JSFunctionProxy);
+};
+
// Foreign describes objects pointing from JavaScript to C structures.
// Since they cannot contain references to JS HeapObjects they can be
@@ -6531,6 +6850,7 @@
DECL_ACCESSORS(instance_call_handler, Object)
DECL_ACCESSORS(access_check_info, Object)
DECL_ACCESSORS(flag, Smi)
+ DECL_ACCESSORS(prototype_attributes, Smi)
// Following properties use flag bits.
DECL_BOOLEAN_ACCESSORS(hidden_prototype)
@@ -6570,7 +6890,8 @@
static const int kAccessCheckInfoOffset =
kInstanceCallHandlerOffset + kPointerSize;
static const int kFlagOffset = kAccessCheckInfoOffset + kPointerSize;
- static const int kSize = kFlagOffset + kPointerSize;
+ static const int kPrototypeAttributesOffset = kFlagOffset + kPointerSize;
+ static const int kSize = kPrototypeAttributesOffset + kPointerSize;
private:
// Bit position in the flag, from least significant bit position.
diff --git a/src/parser.cc b/src/parser.cc
index 7ad6440..ece0cfe 100644
--- a/src/parser.cc
+++ b/src/parser.cc
@@ -411,6 +411,7 @@
return result;
}
+
// ----------------------------------------------------------------------------
// Target is a support class to facilitate manipulation of the
// Parser's target_stack_ (the stack of potential 'break' and
@@ -536,7 +537,6 @@
LexicalScope::~LexicalScope() {
- parser_->top_scope_->Leave();
parser_->top_scope_ = previous_scope_;
parser_->lexical_scope_ = lexical_scope_parent_;
parser_->with_nesting_level_ = previous_with_nesting_level_;
@@ -592,11 +592,11 @@
FunctionLiteral* Parser::ParseProgram(Handle<String> source,
bool in_global_context,
StrictModeFlag strict_mode) {
- CompilationZoneScope zone_scope(isolate(), DONT_DELETE_ON_EXIT);
+ ZoneScope zone_scope(isolate(), DONT_DELETE_ON_EXIT);
HistogramTimerScope timer(isolate()->counters()->parse());
isolate()->counters()->total_parse_size()->Increment(source->length());
- fni_ = new(zone()) FuncNameInferrer();
+ fni_ = new(zone()) FuncNameInferrer(isolate());
// Initialize parser state.
source->TryFlatten();
@@ -648,6 +648,7 @@
}
if (ok) {
result = new(zone()) FunctionLiteral(
+ isolate(),
no_name,
top_scope_,
body,
@@ -658,7 +659,8 @@
0,
0,
source->length(),
- false);
+ FunctionLiteral::ANONYMOUS_EXPRESSION,
+ false); // Does not have duplicate parameters.
} else if (stack_overflow_) {
isolate()->StackOverflow();
}
@@ -674,7 +676,7 @@
}
FunctionLiteral* Parser::ParseLazy(CompilationInfo* info) {
- CompilationZoneScope zone_scope(isolate(), DONT_DELETE_ON_EXIT);
+ ZoneScope zone_scope(isolate(), DONT_DELETE_ON_EXIT);
HistogramTimerScope timer(isolate()->counters()->parse_lazy());
Handle<String> source(String::cast(script_->source()));
isolate()->counters()->total_parse_size()->Increment(source->length());
@@ -707,7 +709,7 @@
ASSERT(target_stack_ == NULL);
Handle<String> name(String::cast(shared_info->name()));
- fni_ = new(zone()) FuncNameInferrer();
+ fni_ = new(zone()) FuncNameInferrer(isolate());
fni_->PushEnclosingName(name);
mode_ = PARSE_EAGERLY;
@@ -717,7 +719,6 @@
{
// Parse the function literal.
- Handle<String> no_name = isolate()->factory()->empty_symbol();
Scope* scope = NewScope(top_scope_, Scope::GLOBAL_SCOPE, inside_with());
if (!info->closure().is_null()) {
scope = Scope::DeserializeScopeChain(info, scope);
@@ -728,12 +729,17 @@
top_scope_->EnableStrictMode();
}
- FunctionLiteralType type =
- shared_info->is_expression() ? EXPRESSION : DECLARATION;
+ FunctionLiteral::Type type = shared_info->is_expression()
+ ? (shared_info->is_anonymous()
+ ? FunctionLiteral::ANONYMOUS_EXPRESSION
+ : FunctionLiteral::NAMED_EXPRESSION)
+ : FunctionLiteral::DECLARATION;
bool ok = true;
result = ParseFunctionLiteral(name,
- false, // Strict mode name already checked.
- RelocInfo::kNoPosition, type, &ok);
+ false, // Strict mode name already checked.
+ RelocInfo::kNoPosition,
+ type,
+ &ok);
// Make sure the results agree.
ASSERT(ok == (result != NULL));
}
@@ -822,14 +828,24 @@
// form expr.a = ...; expr.b = ...; etc.
class InitializationBlockFinder : public ParserFinder {
public:
- InitializationBlockFinder()
- : first_in_block_(NULL), last_in_block_(NULL), block_size_(0) {}
+ // We find and mark the initialization blocks in top level
+ // non-looping code only. This is because the optimization prevents
+ // reuse of the map transitions, so it should be used only for code
+ // that will only be run once.
+ InitializationBlockFinder(Scope* top_scope, Target* target)
+ : enabled_(top_scope->DeclarationScope()->is_global_scope() &&
+ !IsLoopTarget(target)),
+ first_in_block_(NULL),
+ last_in_block_(NULL),
+ block_size_(0) {}
~InitializationBlockFinder() {
+ if (!enabled_) return;
if (InBlock()) EndBlock();
}
void Update(Statement* stat) {
+ if (!enabled_) return;
Assignment* assignment = AsAssignment(stat);
if (InBlock()) {
if (BlockContinues(assignment)) {
@@ -850,6 +866,14 @@
// the overhead exceeds the savings below this limit.
static const int kMinInitializationBlock = 3;
+ static bool IsLoopTarget(Target* target) {
+ while (target != NULL) {
+ if (target->node()->AsIterationStatement() != NULL) return true;
+ target = target->previous();
+ }
+ return false;
+ }
+
// Returns true if the expressions appear to denote the same object.
// In the context of initialization blocks, we only consider expressions
// of the form 'expr.x' or expr["x"].
@@ -912,6 +936,7 @@
bool InBlock() { return first_in_block_ != NULL; }
+ const bool enabled_;
Assignment* first_in_block_;
Assignment* last_in_block_;
int block_size_;
@@ -920,17 +945,18 @@
};
-// A ThisNamedPropertyAssigmentFinder finds and marks statements of the form
+// A ThisNamedPropertyAssignmentFinder finds and marks statements of the form
// this.x = ...;, where x is a named property. It also determines whether a
// function contains only assignments of this type.
-class ThisNamedPropertyAssigmentFinder : public ParserFinder {
+class ThisNamedPropertyAssignmentFinder : public ParserFinder {
public:
- explicit ThisNamedPropertyAssigmentFinder(Isolate* isolate)
+ explicit ThisNamedPropertyAssignmentFinder(Isolate* isolate)
: isolate_(isolate),
only_simple_this_property_assignments_(true),
- names_(NULL),
- assigned_arguments_(NULL),
- assigned_constants_(NULL) {}
+ names_(0),
+ assigned_arguments_(0),
+ assigned_constants_(0) {
+ }
void Update(Scope* scope, Statement* stat) {
// Bail out if function already has property assignment that are
@@ -957,19 +983,17 @@
// Returns a fixed array containing three elements for each assignment of the
// form this.x = y;
Handle<FixedArray> GetThisPropertyAssignments() {
- if (names_ == NULL) {
+ if (names_.is_empty()) {
return isolate_->factory()->empty_fixed_array();
}
- ASSERT(names_ != NULL);
- ASSERT(assigned_arguments_ != NULL);
- ASSERT_EQ(names_->length(), assigned_arguments_->length());
- ASSERT_EQ(names_->length(), assigned_constants_->length());
+ ASSERT_EQ(names_.length(), assigned_arguments_.length());
+ ASSERT_EQ(names_.length(), assigned_constants_.length());
Handle<FixedArray> assignments =
- isolate_->factory()->NewFixedArray(names_->length() * 3);
- for (int i = 0; i < names_->length(); i++) {
- assignments->set(i * 3, *names_->at(i));
- assignments->set(i * 3 + 1, Smi::FromInt(assigned_arguments_->at(i)));
- assignments->set(i * 3 + 2, *assigned_constants_->at(i));
+ isolate_->factory()->NewFixedArray(names_.length() * 3);
+ for (int i = 0; i < names_.length(); ++i) {
+ assignments->set(i * 3, *names_[i]);
+ assignments->set(i * 3 + 1, Smi::FromInt(assigned_arguments_[i]));
+ assignments->set(i * 3 + 2, *assigned_constants_[i]);
}
return assignments;
}
@@ -1026,18 +1050,37 @@
AssignmentFromSomethingElse();
}
+
+
+
+ // We will potentially reorder the property assignments, so they must be
+ // simple enough that the ordering does not matter.
void AssignmentFromParameter(Handle<String> name, int index) {
- EnsureAllocation();
- names_->Add(name);
- assigned_arguments_->Add(index);
- assigned_constants_->Add(isolate_->factory()->undefined_value());
+ EnsureInitialized();
+ for (int i = 0; i < names_.length(); ++i) {
+ if (name->Equals(*names_[i])) {
+ assigned_arguments_[i] = index;
+ assigned_constants_[i] = isolate_->factory()->undefined_value();
+ return;
+ }
+ }
+ names_.Add(name);
+ assigned_arguments_.Add(index);
+ assigned_constants_.Add(isolate_->factory()->undefined_value());
}
void AssignmentFromConstant(Handle<String> name, Handle<Object> value) {
- EnsureAllocation();
- names_->Add(name);
- assigned_arguments_->Add(-1);
- assigned_constants_->Add(value);
+ EnsureInitialized();
+ for (int i = 0; i < names_.length(); ++i) {
+ if (name->Equals(*names_[i])) {
+ assigned_arguments_[i] = -1;
+ assigned_constants_[i] = value;
+ return;
+ }
+ }
+ names_.Add(name);
+ assigned_arguments_.Add(-1);
+ assigned_constants_.Add(value);
}
void AssignmentFromSomethingElse() {
@@ -1045,22 +1088,21 @@
only_simple_this_property_assignments_ = false;
}
- void EnsureAllocation() {
- if (names_ == NULL) {
- ASSERT(assigned_arguments_ == NULL);
- ASSERT(assigned_constants_ == NULL);
- Zone* zone = isolate_->zone();
- names_ = new(zone) ZoneStringList(4);
- assigned_arguments_ = new(zone) ZoneList<int>(4);
- assigned_constants_ = new(zone) ZoneObjectList(4);
+ void EnsureInitialized() {
+ if (names_.capacity() == 0) {
+ ASSERT(assigned_arguments_.capacity() == 0);
+ ASSERT(assigned_constants_.capacity() == 0);
+ names_.Initialize(4);
+ assigned_arguments_.Initialize(4);
+ assigned_constants_.Initialize(4);
}
}
Isolate* isolate_;
bool only_simple_this_property_assignments_;
- ZoneStringList* names_;
- ZoneList<int>* assigned_arguments_;
- ZoneObjectList* assigned_constants_;
+ ZoneStringList names_;
+ ZoneList<int> assigned_arguments_;
+ ZoneObjectList assigned_constants_;
};
@@ -1077,8 +1119,8 @@
TargetScope scope(&this->target_stack_);
ASSERT(processor != NULL);
- InitializationBlockFinder block_finder;
- ThisNamedPropertyAssigmentFinder this_property_assignment_finder(isolate());
+ InitializationBlockFinder block_finder(top_scope_, target_stack_);
+ ThisNamedPropertyAssignmentFinder this_property_assignment_finder(isolate());
bool directive_prologue = true; // Parsing directive prologue.
while (peek() != end_token) {
@@ -1132,12 +1174,7 @@
}
}
- // We find and mark the initialization blocks on top level code only.
- // This is because the optimization prevents reuse of the map transitions,
- // so it should be used only for code that will only be run once.
- if (top_scope_->is_global_scope()) {
- block_finder.Update(stat);
- }
+ block_finder.Update(stat);
// Find and mark all assignments to named properties in this (this.x =)
if (top_scope_->is_function_scope()) {
this_property_assignment_finder.Update(top_scope_, stat);
@@ -1247,7 +1284,7 @@
// one must take great care not to treat it as a
// fall-through. It is much easier just to wrap the entire
// try-statement in a statement block and put the labels there
- Block* result = new(zone()) Block(labels, 1, false);
+ Block* result = new(zone()) Block(isolate(), labels, 1, false);
Target target(&this->target_stack_, result);
TryStatement* statement = ParseTryStatement(CHECK_OK);
if (statement) {
@@ -1269,9 +1306,6 @@
return ParseFunctionDeclaration(ok);
}
- case Token::NATIVE:
- return ParseNativeDeclaration(ok);
-
case Token::DEBUGGER:
stmt = ParseDebuggerStatement(ok);
break;
@@ -1304,13 +1338,14 @@
// to the calling function context.
// Similarly, strict mode eval scope does not leak variable declarations to
// the caller's scope so we declare all locals, too.
- if (top_scope_->is_function_scope() ||
- top_scope_->is_strict_mode_eval_scope()) {
+ Scope* declaration_scope = top_scope_->DeclarationScope();
+ if (declaration_scope->is_function_scope() ||
+ declaration_scope->is_strict_mode_eval_scope()) {
// Declare the variable in the function scope.
- var = top_scope_->LocalLookup(name);
+ var = declaration_scope->LocalLookup(name);
if (var == NULL) {
// Declare the name.
- var = top_scope_->DeclareLocal(name, mode, Scope::VAR_OR_CONST);
+ var = declaration_scope->DeclareLocal(name, mode);
} else {
// The name was declared before; check for conflicting
// re-declarations. If the previous declaration was a const or the
@@ -1326,7 +1361,7 @@
Expression* expression =
NewThrowTypeError(isolate()->factory()->redeclaration_symbol(),
type_string, name);
- top_scope_->SetIllegalRedeclaration(expression);
+ declaration_scope->SetIllegalRedeclaration(expression);
}
}
}
@@ -1347,14 +1382,18 @@
// semantic issue as long as we keep the source order, but it may be
// a performance issue since it may lead to repeated
// Runtime::DeclareContextSlot() calls.
- VariableProxy* proxy = top_scope_->NewUnresolved(name, inside_with());
- top_scope_->AddDeclaration(new(zone()) Declaration(proxy, mode, fun));
+ VariableProxy* proxy = declaration_scope->NewUnresolved(name, false);
+ declaration_scope->AddDeclaration(new(zone()) Declaration(proxy, mode, fun));
// For global const variables we bind the proxy to a variable.
- if (mode == Variable::CONST && top_scope_->is_global_scope()) {
+ if (mode == Variable::CONST && declaration_scope->is_global_scope()) {
ASSERT(resolve); // should be set by all callers
Variable::Kind kind = Variable::NORMAL;
- var = new(zone()) Variable(top_scope_, name, Variable::CONST, true, kind);
+ var = new(zone()) Variable(declaration_scope,
+ name,
+ Variable::CONST,
+ true,
+ kind);
}
// If requested and we have a local variable, bind the proxy to the variable
@@ -1392,13 +1431,6 @@
// declaration is resolved by looking up the function through a
// callback provided by the extension.
Statement* Parser::ParseNativeDeclaration(bool* ok) {
- if (extension_ == NULL) {
- ReportUnexpectedToken(Token::NATIVE);
- *ok = false;
- return NULL;
- }
-
- Expect(Token::NATIVE, CHECK_OK);
Expect(Token::FUNCTION, CHECK_OK);
Handle<String> name = ParseIdentifier(CHECK_OK);
Expect(Token::LPAREN, CHECK_OK);
@@ -1417,7 +1449,7 @@
// isn't lazily compiled. The extension structures are only
// accessible while parsing the first time not when reparsing
// because of lazy compilation.
- top_scope_->ForceEagerCompilation();
+ top_scope_->DeclarationScope()->ForceEagerCompilation();
// Compute the function template for the native function.
v8::Handle<v8::FunctionTemplate> fun_template =
@@ -1443,10 +1475,10 @@
// introduced dynamically when we meet their declarations, whereas
// other functions are setup when entering the surrounding scope.
SharedFunctionInfoLiteral* lit =
- new(zone()) SharedFunctionInfoLiteral(shared);
+ new(zone()) SharedFunctionInfoLiteral(isolate(), shared);
VariableProxy* var = Declare(name, Variable::VAR, NULL, true, CHECK_OK);
return new(zone()) ExpressionStatement(new(zone()) Assignment(
- Token::INIT_VAR, var, lit, RelocInfo::kNoPosition));
+ isolate(), Token::INIT_VAR, var, lit, RelocInfo::kNoPosition));
}
@@ -1455,12 +1487,13 @@
// 'function' Identifier '(' FormalParameterListopt ')' '{' FunctionBody '}'
Expect(Token::FUNCTION, CHECK_OK);
int function_token_position = scanner().location().beg_pos;
- bool is_reserved = false;
- Handle<String> name = ParseIdentifierOrReservedWord(&is_reserved, CHECK_OK);
+ bool is_strict_reserved = false;
+ Handle<String> name = ParseIdentifierOrStrictReservedWord(
+ &is_strict_reserved, CHECK_OK);
FunctionLiteral* fun = ParseFunctionLiteral(name,
- is_reserved,
+ is_strict_reserved,
function_token_position,
- DECLARATION,
+ FunctionLiteral::DECLARATION,
CHECK_OK);
// Even if we're not at the top-level of the global or a function
// scope, we treat is as such and introduce the function with it's
@@ -1478,12 +1511,16 @@
// (ECMA-262, 3rd, 12.2)
//
// Construct block expecting 16 statements.
- Block* result = new(zone()) Block(labels, 16, false);
+ Block* result = new(zone()) Block(isolate(), labels, 16, false);
Target target(&this->target_stack_, result);
Expect(Token::LBRACE, CHECK_OK);
+ InitializationBlockFinder block_finder(top_scope_, target_stack_);
while (peek() != Token::RBRACE) {
Statement* stat = ParseStatement(NULL, CHECK_OK);
- if (stat && !stat->IsEmpty()) result->AddStatement(stat);
+ if (stat && !stat->IsEmpty()) {
+ result->AddStatement(stat);
+ block_finder.Update(stat);
+ }
}
Expect(Token::RBRACE, CHECK_OK);
return result;
@@ -1494,8 +1531,8 @@
// VariableStatement ::
// VariableDeclarations ';'
- Expression* dummy; // to satisfy the ParseVariableDeclarations() signature
- Block* result = ParseVariableDeclarations(true, &dummy, CHECK_OK);
+ Handle<String> ignore;
+ Block* result = ParseVariableDeclarations(true, &ignore, CHECK_OK);
ExpectSemicolon(CHECK_OK);
return result;
}
@@ -1513,18 +1550,19 @@
// to initialize it properly. This mechanism is used for the parsing
// of 'for-in' loops.
Block* Parser::ParseVariableDeclarations(bool accept_IN,
- Expression** var,
+ Handle<String>* out,
bool* ok) {
// VariableDeclarations ::
// ('var' | 'const') (Identifier ('=' AssignmentExpression)?)+[',']
Variable::Mode mode = Variable::VAR;
bool is_const = false;
+ Scope* declaration_scope = top_scope_->DeclarationScope();
if (peek() == Token::VAR) {
Consume(Token::VAR);
} else if (peek() == Token::CONST) {
Consume(Token::CONST);
- if (top_scope_->is_strict_mode()) {
+ if (declaration_scope->is_strict_mode()) {
ReportMessage("strict_const", Vector<const char*>::empty());
*ok = false;
return NULL;
@@ -1548,19 +1586,19 @@
// is inside an initializer block, it is ignored.
//
// Create new block with one expected declaration.
- Block* block = new(zone()) Block(NULL, 1, true);
- VariableProxy* last_var = NULL; // the last variable declared
+ Block* block = new(zone()) Block(isolate(), NULL, 1, true);
int nvars = 0; // the number of variables declared
+ Handle<String> name;
do {
if (fni_ != NULL) fni_->Enter();
// Parse variable name.
if (nvars > 0) Consume(Token::COMMA);
- Handle<String> name = ParseIdentifier(CHECK_OK);
+ name = ParseIdentifier(CHECK_OK);
if (fni_ != NULL) fni_->PushVariableName(name);
// Strict mode variables may not be named eval or arguments
- if (top_scope_->is_strict_mode() && IsEvalOrArguments(name)) {
+ if (declaration_scope->is_strict_mode() && IsEvalOrArguments(name)) {
ReportMessage("strict_var_name", Vector<const char*>::empty());
*ok = false;
return NULL;
@@ -1578,11 +1616,10 @@
// If we have a const declaration, in an inner scope, the proxy is always
// bound to the declared variable (independent of possibly surrounding with
// statements).
- last_var = Declare(name, mode, NULL,
- is_const /* always bound for CONST! */,
- CHECK_OK);
+ Declare(name, mode, NULL, is_const /* always bound for CONST! */,
+ CHECK_OK);
nvars++;
- if (top_scope_->num_var_or_const() > kMaxNumFunctionLocals) {
+ if (declaration_scope->num_var_or_const() > kMaxNumFunctionLocals) {
ReportMessageAt(scanner().location(), "too_many_variables",
Vector<const char*>::empty());
*ok = false;
@@ -1598,10 +1635,10 @@
//
// var v; v = x;
//
- // In particular, we need to re-lookup 'v' as it may be a
- // different 'v' than the 'v' in the declaration (if we are inside
- // a 'with' statement that makes a object property with name 'v'
- // visible).
+ // In particular, we need to re-lookup 'v' (in top_scope_, not
+ // declaration_scope) as it may be a different 'v' than the 'v' in the
+ // declaration (e.g., if we are inside a 'with' statement or 'catch'
+ // block).
//
// However, note that const declarations are different! A const
// declaration of the form:
@@ -1616,6 +1653,7 @@
// one - there is no re-lookup (see the last parameter of the
// Declare() call above).
+ Scope* initialization_scope = is_const ? declaration_scope : top_scope_;
Expression* value = NULL;
int position = -1;
if (peek() == Token::ASSIGN) {
@@ -1623,7 +1661,11 @@
position = scanner().location().beg_pos;
value = ParseAssignmentExpression(accept_IN, CHECK_OK);
// Don't infer if it is "a = function(){...}();"-like expression.
- if (fni_ != NULL && value->AsCall() == NULL) fni_->Infer();
+ if (fni_ != NULL &&
+ value->AsCall() == NULL &&
+ value->AsCallNew() == NULL) {
+ fni_->Infer();
+ }
}
// Make sure that 'const c' actually initializes 'c' to undefined
@@ -1652,11 +1694,11 @@
// browsers where the global object (window) has lots of
// properties defined in prototype objects.
- if (top_scope_->is_global_scope()) {
+ if (initialization_scope->is_global_scope()) {
// Compute the arguments for the runtime call.
ZoneList<Expression*>* arguments = new(zone()) ZoneList<Expression*>(3);
// We have at least 1 parameter.
- arguments->Add(new(zone()) Literal(name));
+ arguments->Add(NewLiteral(name));
CallRuntime* initialize;
if (is_const) {
@@ -1669,14 +1711,17 @@
// the number of arguments (1 or 2).
initialize =
new(zone()) CallRuntime(
- isolate()->factory()->InitializeConstGlobal_symbol(),
- Runtime::FunctionForId(Runtime::kInitializeConstGlobal),
- arguments);
+ isolate(),
+ isolate()->factory()->InitializeConstGlobal_symbol(),
+ Runtime::FunctionForId(Runtime::kInitializeConstGlobal),
+ arguments);
} else {
// Add strict mode.
// We may want to pass singleton to avoid Literal allocations.
- arguments->Add(NewNumberLiteral(
- top_scope_->is_strict_mode() ? kStrictMode : kNonStrictMode));
+ StrictModeFlag flag = initialization_scope->is_strict_mode()
+ ? kStrictMode
+ : kNonStrictMode;
+ arguments->Add(NewNumberLiteral(flag));
// Be careful not to assign a value to the global variable if
// we're in a with. The initialization value should not
@@ -1693,9 +1738,10 @@
// the number of arguments (2 or 3).
initialize =
new(zone()) CallRuntime(
- isolate()->factory()->InitializeVarGlobal_symbol(),
- Runtime::FunctionForId(Runtime::kInitializeVarGlobal),
- arguments);
+ isolate(),
+ isolate()->factory()->InitializeVarGlobal_symbol(),
+ Runtime::FunctionForId(Runtime::kInitializeVarGlobal),
+ arguments);
}
block->AddStatement(new(zone()) ExpressionStatement(initialize));
@@ -1713,8 +1759,11 @@
// the top context for variables). Sigh...
if (value != NULL) {
Token::Value op = (is_const ? Token::INIT_CONST : Token::INIT_VAR);
+ bool in_with = is_const ? false : inside_with();
+ VariableProxy* proxy =
+ initialization_scope->NewUnresolved(name, in_with);
Assignment* assignment =
- new(zone()) Assignment(op, last_var, value, position);
+ new(zone()) Assignment(isolate(), op, proxy, value, position);
if (block) {
block->AddStatement(new(zone()) ExpressionStatement(assignment));
}
@@ -1723,10 +1772,10 @@
if (fni_ != NULL) fni_->Leave();
} while (peek() == Token::COMMA);
- if (!is_const && nvars == 1) {
- // We have a single, non-const variable.
- ASSERT(last_var != NULL);
- *var = last_var;
+ // If there was a single non-const declaration, return it in the output
+ // parameter for possible use by for/in.
+ if (nvars == 1 && !is_const) {
+ *out = name;
}
return block;
@@ -1751,7 +1800,7 @@
// Identifier ':' Statement
bool starts_with_idenfifier = peek_any_identifier();
Expression* expr = ParseExpression(true, CHECK_OK);
- if (peek() == Token::COLON && starts_with_idenfifier && expr &&
+ if (peek() == Token::COLON && starts_with_idenfifier && expr != NULL &&
expr->AsVariableProxy() != NULL &&
!expr->AsVariableProxy()->is_this()) {
// Expression is a single identifier, and not, e.g., a parenthesized
@@ -1781,6 +1830,20 @@
return ParseStatement(labels, ok);
}
+ // If we have an extension, we allow a native function declaration.
+ // A native function declaration starts with "native function" with
+ // no line-terminator between the two words.
+ if (extension_ != NULL &&
+ peek() == Token::FUNCTION &&
+ !scanner().HasAnyLineTerminatorBeforeNext() &&
+ expr != NULL &&
+ expr->AsVariableProxy() != NULL &&
+ expr->AsVariableProxy()->name()->Equals(
+ isolate()->heap()->native_symbol()) &&
+ !scanner().literal_contains_escapes()) {
+ return ParseNativeDeclaration(ok);
+ }
+
// Parsed expression statement.
ExpectSemicolon(CHECK_OK);
return new(zone()) ExpressionStatement(expr);
@@ -1803,7 +1866,8 @@
} else {
else_statement = EmptyStatement();
}
- return new(zone()) IfStatement(condition, then_statement, else_statement);
+ return new(zone()) IfStatement(
+ isolate(), condition, then_statement, else_statement);
}
@@ -1814,7 +1878,7 @@
Expect(Token::CONTINUE, CHECK_OK);
Handle<String> label = Handle<String>::null();
Token::Value tok = peek();
- if (!scanner().has_line_terminator_before_next() &&
+ if (!scanner().HasAnyLineTerminatorBeforeNext() &&
tok != Token::SEMICOLON && tok != Token::RBRACE && tok != Token::EOS) {
label = ParseIdentifier(CHECK_OK);
}
@@ -1844,7 +1908,7 @@
Expect(Token::BREAK, CHECK_OK);
Handle<String> label;
Token::Value tok = peek();
- if (!scanner().has_line_terminator_before_next() &&
+ if (!scanner().HasAnyLineTerminatorBeforeNext() &&
tok != Token::SEMICOLON && tok != Token::RBRACE && tok != Token::EOS) {
label = ParseIdentifier(CHECK_OK);
}
@@ -1886,14 +1950,16 @@
// function. See ECMA-262, section 12.9, page 67.
//
// To be consistent with KJS we report the syntax error at runtime.
- if (!top_scope_->is_function_scope()) {
+ Scope* declaration_scope = top_scope_->DeclarationScope();
+ if (declaration_scope->is_global_scope() ||
+ declaration_scope->is_eval_scope()) {
Handle<String> type = isolate()->factory()->illegal_return_symbol();
Expression* throw_error = NewThrowSyntaxError(type, Handle<Object>::null());
return new(zone()) ExpressionStatement(throw_error);
}
Token::Value tok = peek();
- if (scanner().has_line_terminator_before_next() ||
+ if (scanner().HasAnyLineTerminatorBeforeNext() ||
tok == Token::SEMICOLON ||
tok == Token::RBRACE ||
tok == Token::EOS) {
@@ -1907,35 +1973,31 @@
}
-Block* Parser::WithHelper(Expression* obj,
- ZoneStringList* labels,
- bool is_catch_block,
- bool* ok) {
+Block* Parser::WithHelper(Expression* obj, ZoneStringList* labels, bool* ok) {
// Parse the statement and collect escaping labels.
- ZoneList<Label*>* target_list = new(zone()) ZoneList<Label*>(0);
- TargetCollector collector(target_list);
+ TargetCollector collector;
Statement* stat;
{ Target target(&this->target_stack_, &collector);
with_nesting_level_++;
- top_scope_->RecordWithStatement();
+ top_scope_->DeclarationScope()->RecordWithStatement();
stat = ParseStatement(labels, CHECK_OK);
with_nesting_level_--;
}
// Create resulting block with two statements.
// 1: Evaluate the with expression.
// 2: The try-finally block evaluating the body.
- Block* result = new(zone()) Block(NULL, 2, false);
+ Block* result = new(zone()) Block(isolate(), NULL, 2, false);
if (result != NULL) {
- result->AddStatement(new(zone()) WithEnterStatement(obj, is_catch_block));
+ result->AddStatement(new(zone()) EnterWithContextStatement(obj));
// Create body block.
- Block* body = new(zone()) Block(NULL, 1, false);
+ Block* body = new(zone()) Block(isolate(), NULL, 1, false);
body->AddStatement(stat);
// Create exit block.
- Block* exit = new(zone()) Block(NULL, 1, false);
- exit->AddStatement(new(zone()) WithExitStatement());
+ Block* exit = new(zone()) Block(isolate(), NULL, 1, false);
+ exit->AddStatement(new(zone()) ExitContextStatement());
// Return a try-finally statement.
TryFinallyStatement* wrapper = new(zone()) TryFinallyStatement(body, exit);
@@ -1962,7 +2024,7 @@
Expression* expr = ParseExpression(true, CHECK_OK);
Expect(Token::RPAREN, CHECK_OK);
- return WithHelper(expr, labels, false, CHECK_OK);
+ return WithHelper(expr, labels, CHECK_OK);
}
@@ -1995,7 +2057,7 @@
statements->Add(stat);
}
- return new(zone()) CaseClause(label, statements, pos);
+ return new(zone()) CaseClause(isolate(), label, statements, pos);
}
@@ -2004,7 +2066,7 @@
// SwitchStatement ::
// 'switch' '(' Expression ')' '{' CaseClause* '}'
- SwitchStatement* statement = new(zone()) SwitchStatement(labels);
+ SwitchStatement* statement = new(zone()) SwitchStatement(isolate(), labels);
Target target(&this->target_stack_, statement);
Expect(Token::SWITCH, CHECK_OK);
@@ -2032,7 +2094,7 @@
Expect(Token::THROW, CHECK_OK);
int pos = scanner().location().beg_pos;
- if (scanner().has_line_terminator_before_next()) {
+ if (scanner().HasAnyLineTerminatorBeforeNext()) {
ReportMessage("newline_after_throw", Vector<const char*>::empty());
*ok = false;
return NULL;
@@ -2040,7 +2102,8 @@
Expression* exception = ParseExpression(true, CHECK_OK);
ExpectSemicolon(CHECK_OK);
- return new(zone()) ExpressionStatement(new(zone()) Throw(exception, pos));
+ return new(zone()) ExpressionStatement(
+ new(zone()) Throw(isolate(), exception, pos));
}
@@ -2058,18 +2121,13 @@
Expect(Token::TRY, CHECK_OK);
- ZoneList<Label*>* target_list = new(zone()) ZoneList<Label*>(0);
- TargetCollector collector(target_list);
+ TargetCollector try_collector;
Block* try_block;
- { Target target(&this->target_stack_, &collector);
+ { Target target(&this->target_stack_, &try_collector);
try_block = ParseBlock(NULL, CHECK_OK);
}
- Block* catch_block = NULL;
- Variable* catch_var = NULL;
- Block* finally_block = NULL;
-
Token::Value tok = peek();
if (tok != Token::CATCH && tok != Token::FINALLY) {
ReportMessage("no_catch_or_finally", Vector<const char*>::empty());
@@ -2078,18 +2136,19 @@
}
// If we can break out from the catch block and there is a finally block,
- // then we will need to collect jump targets from the catch block. Since
- // we don't know yet if there will be a finally block, we always collect
- // the jump targets.
- ZoneList<Label*>* catch_target_list = new(zone()) ZoneList<Label*>(0);
- TargetCollector catch_collector(catch_target_list);
- bool has_catch = false;
+ // then we will need to collect escaping targets from the catch
+ // block. Since we don't know yet if there will be a finally block, we
+ // always collect the targets.
+ TargetCollector catch_collector;
+ Scope* catch_scope = NULL;
+ Variable* catch_variable = NULL;
+ Block* catch_block = NULL;
+ Handle<String> name;
if (tok == Token::CATCH) {
- has_catch = true;
Consume(Token::CATCH);
Expect(Token::LPAREN, CHECK_OK);
- Handle<String> name = ParseIdentifier(CHECK_OK);
+ name = ParseIdentifier(CHECK_OK);
if (top_scope_->is_strict_mode() && IsEvalOrArguments(name)) {
ReportMessage("strict_catch_variable", Vector<const char*>::empty());
@@ -2100,17 +2159,39 @@
Expect(Token::RPAREN, CHECK_OK);
if (peek() == Token::LBRACE) {
- // Allocate a temporary for holding the finally state while
- // executing the finally block.
- catch_var =
- top_scope_->NewTemporary(isolate()->factory()->catch_var_symbol());
- Literal* name_literal = new(zone()) Literal(name);
- VariableProxy* catch_var_use = new(zone()) VariableProxy(catch_var);
- Expression* obj =
- new(zone()) CatchExtensionObject(name_literal, catch_var_use);
+ // Rewrite the catch body B to a single statement block
+ // { try B finally { PopContext }}.
+ Block* inner_body;
+ // We need to collect escapes from the body for both the inner
+ // try/finally used to pop the catch context and any possible outer
+ // try/finally.
+ TargetCollector inner_collector;
{ Target target(&this->target_stack_, &catch_collector);
- catch_block = WithHelper(obj, NULL, true, CHECK_OK);
+ { Target target(&this->target_stack_, &inner_collector);
+ catch_scope = NewScope(top_scope_, Scope::CATCH_SCOPE, inside_with());
+ if (top_scope_->is_strict_mode()) {
+ catch_scope->EnableStrictMode();
+ }
+ catch_variable = catch_scope->DeclareLocal(name, Variable::VAR);
+
+ Scope* saved_scope = top_scope_;
+ top_scope_ = catch_scope;
+ inner_body = ParseBlock(NULL, CHECK_OK);
+ top_scope_ = saved_scope;
+ }
}
+
+ // Create exit block.
+ Block* inner_finally = new(zone()) Block(isolate(), NULL, 1, false);
+ inner_finally->AddStatement(new(zone()) ExitContextStatement());
+
+ // Create a try/finally statement.
+ TryFinallyStatement* inner_try_finally =
+ new(zone()) TryFinallyStatement(inner_body, inner_finally);
+ inner_try_finally->set_escaping_targets(inner_collector.targets());
+
+ catch_block = new(zone()) Block(isolate(), NULL, 1, false);
+ catch_block->AddStatement(inner_try_finally);
} else {
Expect(Token::LBRACE, CHECK_OK);
}
@@ -2118,45 +2199,48 @@
tok = peek();
}
- if (tok == Token::FINALLY || !has_catch) {
+ Block* finally_block = NULL;
+ if (tok == Token::FINALLY || catch_block == NULL) {
Consume(Token::FINALLY);
- // Declare a variable for holding the finally state while
- // executing the finally block.
finally_block = ParseBlock(NULL, CHECK_OK);
}
// Simplify the AST nodes by converting:
- // 'try { } catch { } finally { }'
+ // 'try B0 catch B1 finally B2'
// to:
- // 'try { try { } catch { } } finally { }'
+ // 'try { try B0 catch B1 } finally B2'
if (catch_block != NULL && finally_block != NULL) {
- VariableProxy* catch_var_defn = new(zone()) VariableProxy(catch_var);
+ // If we have both, create an inner try/catch.
+ ASSERT(catch_scope != NULL && catch_variable != NULL);
TryCatchStatement* statement =
- new(zone()) TryCatchStatement(try_block, catch_var_defn, catch_block);
- statement->set_escaping_targets(collector.targets());
- try_block = new(zone()) Block(NULL, 1, false);
+ new(zone()) TryCatchStatement(try_block,
+ catch_scope,
+ catch_variable,
+ catch_block);
+ statement->set_escaping_targets(try_collector.targets());
+ try_block = new(zone()) Block(isolate(), NULL, 1, false);
try_block->AddStatement(statement);
- catch_block = NULL;
+ catch_block = NULL; // Clear to indicate it's been handled.
}
TryStatement* result = NULL;
if (catch_block != NULL) {
ASSERT(finally_block == NULL);
- VariableProxy* catch_var_defn = new(zone()) VariableProxy(catch_var);
+ ASSERT(catch_scope != NULL && catch_variable != NULL);
result =
- new(zone()) TryCatchStatement(try_block, catch_var_defn, catch_block);
- result->set_escaping_targets(collector.targets());
+ new(zone()) TryCatchStatement(try_block,
+ catch_scope,
+ catch_variable,
+ catch_block);
} else {
ASSERT(finally_block != NULL);
result = new(zone()) TryFinallyStatement(try_block, finally_block);
- // Add the jump targets of the try block and the catch block.
- for (int i = 0; i < collector.targets()->length(); i++) {
- catch_collector.AddTarget(collector.targets()->at(i));
- }
- result->set_escaping_targets(catch_collector.targets());
+ // Combine the jump targets of the try block and the possible catch block.
+ try_collector.targets()->AddAll(*catch_collector.targets());
}
+ result->set_escaping_targets(try_collector.targets());
return result;
}
@@ -2166,7 +2250,7 @@
// DoStatement ::
// 'do' Statement 'while' '(' Expression ')' ';'
- DoWhileStatement* loop = new(zone()) DoWhileStatement(labels);
+ DoWhileStatement* loop = new(zone()) DoWhileStatement(isolate(), labels);
Target target(&this->target_stack_, loop);
Expect(Token::DO, CHECK_OK);
@@ -2197,7 +2281,7 @@
// WhileStatement ::
// 'while' '(' Expression ')' Statement
- WhileStatement* loop = new(zone()) WhileStatement(labels);
+ WhileStatement* loop = new(zone()) WhileStatement(isolate(), labels);
Target target(&this->target_stack_, loop);
Expect(Token::WHILE, CHECK_OK);
@@ -2221,11 +2305,13 @@
Expect(Token::LPAREN, CHECK_OK);
if (peek() != Token::SEMICOLON) {
if (peek() == Token::VAR || peek() == Token::CONST) {
- Expression* each = NULL;
+ Handle<String> name;
Block* variable_statement =
- ParseVariableDeclarations(false, &each, CHECK_OK);
- if (peek() == Token::IN && each != NULL) {
- ForInStatement* loop = new(zone()) ForInStatement(labels);
+ ParseVariableDeclarations(false, &name, CHECK_OK);
+
+ if (peek() == Token::IN && !name.is_null()) {
+ VariableProxy* each = top_scope_->NewUnresolved(name, inside_with());
+ ForInStatement* loop = new(zone()) ForInStatement(isolate(), labels);
Target target(&this->target_stack_, loop);
Expect(Token::IN, CHECK_OK);
@@ -2234,7 +2320,7 @@
Statement* body = ParseStatement(NULL, CHECK_OK);
loop->Initialize(each, enumerable, body);
- Block* result = new(zone()) Block(NULL, 2, false);
+ Block* result = new(zone()) Block(isolate(), NULL, 2, false);
result->AddStatement(variable_statement);
result->AddStatement(loop);
// Parsed for-in loop w/ variable/const declaration.
@@ -2255,7 +2341,7 @@
isolate()->factory()->invalid_lhs_in_for_in_symbol();
expression = NewThrowReferenceError(type);
}
- ForInStatement* loop = new(zone()) ForInStatement(labels);
+ ForInStatement* loop = new(zone()) ForInStatement(isolate(), labels);
Target target(&this->target_stack_, loop);
Expect(Token::IN, CHECK_OK);
@@ -2274,7 +2360,7 @@
}
// Standard 'for' loop
- ForStatement* loop = new(zone()) ForStatement(labels);
+ ForStatement* loop = new(zone()) ForStatement(isolate(), labels);
Target target(&this->target_stack_, loop);
// Parsed initializer at this point.
@@ -2310,7 +2396,8 @@
Expect(Token::COMMA, CHECK_OK);
int position = scanner().location().beg_pos;
Expression* right = ParseAssignmentExpression(accept_IN, CHECK_OK);
- result = new(zone()) BinaryOperation(Token::COMMA, result, right, position);
+ result = new(zone()) BinaryOperation(
+ isolate(), Token::COMMA, result, right, position);
}
return result;
}
@@ -2376,13 +2463,13 @@
if ((op == Token::INIT_VAR
|| op == Token::INIT_CONST
|| op == Token::ASSIGN)
- && (right->AsCall() == NULL)) {
+ && (right->AsCall() == NULL && right->AsCallNew() == NULL)) {
fni_->Infer();
}
fni_->Leave();
}
- return new(zone()) Assignment(op, expression, right, pos);
+ return new(zone()) Assignment(isolate(), op, expression, right, pos);
}
@@ -2404,8 +2491,8 @@
Expect(Token::COLON, CHECK_OK);
int right_position = scanner().peek_location().beg_pos;
Expression* right = ParseAssignmentExpression(accept_IN, CHECK_OK);
- return new(zone()) Conditional(expression, left, right,
- left_position, right_position);
+ return new(zone()) Conditional(
+ isolate(), expression, left, right, left_position, right_position);
}
@@ -2492,12 +2579,12 @@
x = NewCompareNode(cmp, x, y, position);
if (cmp != op) {
// The comparison was negated - add a NOT.
- x = new(zone()) UnaryOperation(Token::NOT, x, position);
+ x = new(zone()) UnaryOperation(isolate(), Token::NOT, x, position);
}
} else {
// We have a "normal" binary operation.
- x = new(zone()) BinaryOperation(op, x, y, position);
+ x = new(zone()) BinaryOperation(isolate(), op, x, y, position);
}
}
}
@@ -2514,15 +2601,15 @@
bool is_strict = (op == Token::EQ_STRICT);
Literal* x_literal = x->AsLiteral();
if (x_literal != NULL && x_literal->IsNull()) {
- return new(zone()) CompareToNull(is_strict, y);
+ return new(zone()) CompareToNull(isolate(), is_strict, y);
}
Literal* y_literal = y->AsLiteral();
if (y_literal != NULL && y_literal->IsNull()) {
- return new(zone()) CompareToNull(is_strict, x);
+ return new(zone()) CompareToNull(isolate(), is_strict, x);
}
}
- return new(zone()) CompareOperation(op, x, y, position);
+ return new(zone()) CompareOperation(isolate(), op, x, y, position);
}
@@ -2545,18 +2632,26 @@
int position = scanner().location().beg_pos;
Expression* expression = ParseUnaryExpression(CHECK_OK);
- // Compute some expressions involving only number literals.
- if (expression != NULL && expression->AsLiteral() &&
- expression->AsLiteral()->handle()->IsNumber()) {
- double value = expression->AsLiteral()->handle()->Number();
- switch (op) {
- case Token::ADD:
- return expression;
- case Token::SUB:
- return NewNumberLiteral(-value);
- case Token::BIT_NOT:
- return NewNumberLiteral(~DoubleToInt32(value));
- default: break;
+ if (expression != NULL && (expression->AsLiteral() != NULL)) {
+ Handle<Object> literal = expression->AsLiteral()->handle();
+ if (op == Token::NOT) {
+ // Convert the literal to a boolean condition and negate it.
+ bool condition = literal->ToBoolean()->IsTrue();
+ Handle<Object> result(isolate()->heap()->ToBoolean(!condition));
+ return NewLiteral(result);
+ } else if (literal->IsNumber()) {
+ // Compute some expressions involving only number literals.
+ double value = literal->Number();
+ switch (op) {
+ case Token::ADD:
+ return expression;
+ case Token::SUB:
+ return NewNumberLiteral(-value);
+ case Token::BIT_NOT:
+ return NewNumberLiteral(~DoubleToInt32(value));
+ default:
+ break;
+ }
}
}
@@ -2570,7 +2665,7 @@
}
}
- return new(zone()) UnaryOperation(op, expression, position);
+ return new(zone()) UnaryOperation(isolate(), op, expression, position);
} else if (Token::IsCountOp(op)) {
op = Next();
@@ -2591,7 +2686,8 @@
}
int position = scanner().location().beg_pos;
- return new(zone()) CountOperation(op,
+ return new(zone()) CountOperation(isolate(),
+ op,
true /* prefix */,
expression,
position);
@@ -2607,7 +2703,7 @@
// LeftHandSideExpression ('++' | '--')?
Expression* expression = ParseLeftHandSideExpression(CHECK_OK);
- if (!scanner().has_line_terminator_before_next() &&
+ if (!scanner().HasAnyLineTerminatorBeforeNext() &&
Token::IsCountOp(peek())) {
// Signal a reference error if the expression is an invalid
// left-hand side expression. We could report this as a syntax
@@ -2627,7 +2723,8 @@
Token::Value next = Next();
int position = scanner().location().beg_pos;
expression =
- new(zone()) CountOperation(next,
+ new(zone()) CountOperation(isolate(),
+ next,
false /* postfix */,
expression,
position);
@@ -2653,7 +2750,7 @@
Consume(Token::LBRACK);
int pos = scanner().location().beg_pos;
Expression* index = ParseExpression(true, CHECK_OK);
- result = new(zone()) Property(result, index, pos);
+ result = new(zone()) Property(isolate(), result, index, pos);
Expect(Token::RBRACK, CHECK_OK);
break;
}
@@ -2691,7 +2788,10 @@
Consume(Token::PERIOD);
int pos = scanner().location().beg_pos;
Handle<String> name = ParseIdentifierName(CHECK_OK);
- result = new(zone()) Property(result, new(zone()) Literal(name), pos);
+ result = new(zone()) Property(isolate(),
+ result,
+ NewLiteral(name),
+ pos);
if (fni_ != NULL) fni_->PushLiteralName(name);
break;
}
@@ -2727,7 +2827,8 @@
if (!stack->is_empty()) {
int last = stack->pop();
- result = new(zone()) CallNew(result,
+ result = new(zone()) CallNew(isolate(),
+ result,
new(zone()) ZoneList<Expression*>(0),
last);
}
@@ -2758,12 +2859,19 @@
Expect(Token::FUNCTION, CHECK_OK);
int function_token_position = scanner().location().beg_pos;
Handle<String> name;
- bool is_reserved_name = false;
+ bool is_strict_reserved_name = false;
if (peek_any_identifier()) {
- name = ParseIdentifierOrReservedWord(&is_reserved_name, CHECK_OK);
+ name = ParseIdentifierOrStrictReservedWord(&is_strict_reserved_name,
+ CHECK_OK);
}
- result = ParseFunctionLiteral(name, is_reserved_name,
- function_token_position, NESTED, CHECK_OK);
+ FunctionLiteral::Type type = name.is_null()
+ ? FunctionLiteral::ANONYMOUS_EXPRESSION
+ : FunctionLiteral::NAMED_EXPRESSION;
+ result = ParseFunctionLiteral(name,
+ is_strict_reserved_name,
+ function_token_position,
+ type,
+ CHECK_OK);
} else {
result = ParsePrimaryExpression(CHECK_OK);
}
@@ -2774,7 +2882,15 @@
Consume(Token::LBRACK);
int pos = scanner().location().beg_pos;
Expression* index = ParseExpression(true, CHECK_OK);
- result = new(zone()) Property(result, index, pos);
+ result = new(zone()) Property(isolate(), result, index, pos);
+ if (fni_ != NULL) {
+ if (index->IsPropertyName()) {
+ fni_->PushLiteralName(index->AsLiteral()->AsPropertyName());
+ } else {
+ fni_->PushLiteralName(
+ isolate()->factory()->anonymous_function_symbol());
+ }
+ }
Expect(Token::RBRACK, CHECK_OK);
break;
}
@@ -2782,7 +2898,10 @@
Consume(Token::PERIOD);
int pos = scanner().location().beg_pos;
Handle<String> name = ParseIdentifierName(CHECK_OK);
- result = new(zone()) Property(result, new(zone()) Literal(name), pos);
+ result = new(zone()) Property(isolate(),
+ result,
+ NewLiteral(name),
+ pos);
if (fni_ != NULL) fni_->PushLiteralName(name);
break;
}
@@ -2791,7 +2910,7 @@
// Consume one of the new prefixes (already parsed).
ZoneList<Expression*>* args = ParseArguments(CHECK_OK);
int last = stack->pop();
- result = new CallNew(result, args, last);
+ result = new(zone()) CallNew(isolate(), result, args, last);
break;
}
default:
@@ -2833,6 +2952,9 @@
return ReportMessage("unexpected_token_identifier",
Vector<const char*>::empty());
case Token::FUTURE_RESERVED_WORD:
+ return ReportMessage("unexpected_reserved",
+ Vector<const char*>::empty());
+ case Token::FUTURE_STRICT_RESERVED_WORD:
return ReportMessage(top_scope_->is_strict_mode() ?
"unexpected_strict_reserved" :
"unexpected_token_identifier",
@@ -2872,28 +2994,30 @@
switch (peek()) {
case Token::THIS: {
Consume(Token::THIS);
- VariableProxy* recv = top_scope_->receiver();
- result = recv;
+ result = new(zone()) VariableProxy(isolate(), top_scope_->receiver());
break;
}
case Token::NULL_LITERAL:
Consume(Token::NULL_LITERAL);
- result = new(zone()) Literal(isolate()->factory()->null_value());
+ result = new(zone()) Literal(
+ isolate(), isolate()->factory()->null_value());
break;
case Token::TRUE_LITERAL:
Consume(Token::TRUE_LITERAL);
- result = new(zone()) Literal(isolate()->factory()->true_value());
+ result = new(zone()) Literal(
+ isolate(), isolate()->factory()->true_value());
break;
case Token::FALSE_LITERAL:
Consume(Token::FALSE_LITERAL);
- result = new(zone()) Literal(isolate()->factory()->false_value());
+ result = new(zone()) Literal(
+ isolate(), isolate()->factory()->false_value());
break;
case Token::IDENTIFIER:
- case Token::FUTURE_RESERVED_WORD: {
+ case Token::FUTURE_STRICT_RESERVED_WORD: {
Handle<String> name = ParseIdentifier(CHECK_OK);
if (fni_ != NULL) fni_->PushVariableName(name);
result = top_scope_->NewUnresolved(name,
@@ -2915,7 +3039,7 @@
case Token::STRING: {
Consume(Token::STRING);
Handle<String> symbol = GetSymbol(CHECK_OK);
- result = new(zone()) Literal(symbol);
+ result = NewLiteral(symbol);
if (fni_ != NULL) fni_->PushLiteralName(symbol);
break;
}
@@ -3042,8 +3166,8 @@
literals->set_map(isolate()->heap()->fixed_cow_array_map());
}
- return new(zone()) ArrayLiteral(literals, values,
- literal_index, is_simple, depth);
+ return new(zone()) ArrayLiteral(
+ isolate(), literals, values, literal_index, is_simple, depth);
}
@@ -3304,6 +3428,7 @@
bool is_keyword = Token::IsKeyword(next);
if (next == Token::IDENTIFIER || next == Token::NUMBER ||
next == Token::FUTURE_RESERVED_WORD ||
+ next == Token::FUTURE_STRICT_RESERVED_WORD ||
next == Token::STRING || is_keyword) {
Handle<String> name;
if (is_keyword) {
@@ -3315,7 +3440,7 @@
ParseFunctionLiteral(name,
false, // reserved words are allowed here
RelocInfo::kNoPosition,
- DECLARATION,
+ FunctionLiteral::ANONYMOUS_EXPRESSION,
CHECK_OK);
// Allow any number of parameters for compatiabilty with JSC.
// Specification only allows zero parameters for get and one for set.
@@ -3345,7 +3470,6 @@
ObjectLiteralPropertyChecker checker(this, top_scope_->is_strict_mode());
Expect(Token::LBRACE, CHECK_OK);
- Scanner::Location loc = scanner().location();
while (peek() != Token::RBRACE) {
if (fni_ != NULL) fni_->Enter();
@@ -3358,11 +3482,12 @@
switch (next) {
case Token::FUTURE_RESERVED_WORD:
+ case Token::FUTURE_STRICT_RESERVED_WORD:
case Token::IDENTIFIER: {
bool is_getter = false;
bool is_setter = false;
Handle<String> id =
- ParseIdentifierOrGetOrSet(&is_getter, &is_setter, CHECK_OK);
+ ParseIdentifierNameOrGetOrSet(&is_getter, &is_setter, CHECK_OK);
if (fni_ != NULL) fni_->PushLiteralName(id);
if ((is_getter || is_setter) && peek() != Token::COLON) {
@@ -3386,7 +3511,7 @@
}
// Failed to parse as get/set property, so it's just a property
// called "get" or "set".
- key = new(zone()) Literal(id);
+ key = NewLiteral(id);
break;
}
case Token::STRING: {
@@ -3398,7 +3523,7 @@
key = NewNumberLiteral(index);
break;
}
- key = new(zone()) Literal(string);
+ key = NewLiteral(string);
break;
}
case Token::NUMBER: {
@@ -3414,7 +3539,7 @@
if (Token::IsKeyword(next)) {
Consume(next);
Handle<String> string = GetSymbol(CHECK_OK);
- key = new(zone()) Literal(string);
+ key = NewLiteral(string);
} else {
// Unexpected token.
Token::Value next = Next();
@@ -3467,13 +3592,14 @@
&is_simple,
&fast_elements,
&depth);
- return new(zone()) ObjectLiteral(constant_properties,
- properties,
- literal_index,
- is_simple,
- fast_elements,
- depth,
- has_function);
+ return new(zone()) ObjectLiteral(isolate(),
+ constant_properties,
+ properties,
+ literal_index,
+ is_simple,
+ fast_elements,
+ depth,
+ has_function);
}
@@ -3492,7 +3618,8 @@
Handle<String> js_flags = NextLiteralString(TENURED);
Next();
- return new(zone()) RegExpLiteral(js_pattern, js_flags, literal_index);
+ return new(zone()) RegExpLiteral(
+ isolate(), js_pattern, js_flags, literal_index);
}
@@ -3520,29 +3647,29 @@
}
-FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> var_name,
- bool name_is_reserved,
+FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> function_name,
+ bool name_is_strict_reserved,
int function_token_position,
- FunctionLiteralType type,
+ FunctionLiteral::Type type,
bool* ok) {
// Function ::
// '(' FormalParameterList? ')' '{' FunctionBody '}'
- bool is_named = !var_name.is_null();
- // The name associated with this function. If it's a function expression,
- // this is the actual function name, otherwise this is the name of the
- // variable declared and initialized with the function (expression). In
- // that case, we don't have a function name (it's empty).
- Handle<String> name =
- is_named ? var_name : isolate()->factory()->empty_symbol();
- // The function name, if any.
- Handle<String> function_name = isolate()->factory()->empty_symbol();
- if (is_named && (type == EXPRESSION || type == NESTED)) {
- function_name = name;
+ // Anonymous functions were passed either the empty symbol or a null
+ // handle as the function name. Remember if we were passed a non-empty
+ // handle to decide whether to invoke function name inference.
+ bool should_infer_name = function_name.is_null();
+
+ // We want a non-null handle as the function name.
+ if (should_infer_name) {
+ function_name = isolate()->factory()->empty_symbol();
}
int num_parameters = 0;
- Scope* scope = NewScope(top_scope_, Scope::FUNCTION_SCOPE, inside_with());
+ // Function declarations are hoisted.
+ Scope* scope = (type == FunctionLiteral::DECLARATION)
+ ? NewScope(top_scope_->DeclarationScope(), Scope::FUNCTION_SCOPE, false)
+ : NewScope(top_scope_, Scope::FUNCTION_SCOPE, inside_with());
ZoneList<Statement*>* body = new(zone()) ZoneList<Statement*>(8);
int materialized_literal_count;
int expected_property_count;
@@ -3550,9 +3677,10 @@
int end_pos;
bool only_simple_this_property_assignments;
Handle<FixedArray> this_property_assignments;
+ bool has_duplicate_parameters = false;
// Parse function body.
{ LexicalScope lexical_scope(this, scope, isolate());
- top_scope_->SetScopeName(name);
+ top_scope_->SetScopeName(function_name);
// FormalParameterList ::
// '(' (Identifier)*[','] ')'
@@ -3564,25 +3692,24 @@
bool done = (peek() == Token::RPAREN);
while (!done) {
- bool is_reserved = false;
+ bool is_strict_reserved = false;
Handle<String> param_name =
- ParseIdentifierOrReservedWord(&is_reserved, CHECK_OK);
+ ParseIdentifierOrStrictReservedWord(&is_strict_reserved,
+ CHECK_OK);
// Store locations for possible future error reports.
if (!name_loc.IsValid() && IsEvalOrArguments(param_name)) {
name_loc = scanner().location();
}
if (!dupe_loc.IsValid() && top_scope_->IsDeclared(param_name)) {
+ has_duplicate_parameters = true;
dupe_loc = scanner().location();
}
- if (!reserved_loc.IsValid() && is_reserved) {
+ if (!reserved_loc.IsValid() && is_strict_reserved) {
reserved_loc = scanner().location();
}
- Variable* parameter = top_scope_->DeclareLocal(param_name,
- Variable::VAR,
- Scope::PARAMETER);
- top_scope_->AddParameter(parameter);
+ top_scope_->DeclareParameter(param_name);
num_parameters++;
if (num_parameters > kMaxNumFunctionParameters) {
ReportMessageAt(scanner().location(), "too_many_parameters",
@@ -3603,46 +3730,56 @@
// NOTE: We create a proxy and resolve it here so that in the
// future we can change the AST to only refer to VariableProxies
// instead of Variables and Proxis as is the case now.
- if (!function_name.is_null() && function_name->length() > 0) {
+ if (type == FunctionLiteral::NAMED_EXPRESSION) {
Variable* fvar = top_scope_->DeclareFunctionVar(function_name);
VariableProxy* fproxy =
top_scope_->NewUnresolved(function_name, inside_with());
fproxy->BindTo(fvar);
body->Add(new(zone()) ExpressionStatement(
- new(zone()) Assignment(Token::INIT_CONST, fproxy,
- new(zone()) ThisFunction(),
- RelocInfo::kNoPosition)));
+ new(zone()) Assignment(isolate(),
+ Token::INIT_CONST,
+ fproxy,
+ new(zone()) ThisFunction(isolate()),
+ RelocInfo::kNoPosition)));
}
- // Determine if the function will be lazily compiled. The mode can
- // only be PARSE_LAZILY if the --lazy flag is true.
+ // Determine if the function will be lazily compiled. The mode can only
+ // be PARSE_LAZILY if the --lazy flag is true. We will not lazily
+ // compile if we do not have preparser data for the function.
bool is_lazily_compiled = (mode() == PARSE_LAZILY &&
top_scope_->outer_scope()->is_global_scope() &&
top_scope_->HasTrivialOuterContext() &&
- !parenthesized_function_);
+ !parenthesized_function_ &&
+ pre_data() != NULL);
parenthesized_function_ = false; // The bit was set for this function only.
- int function_block_pos = scanner().location().beg_pos;
- if (is_lazily_compiled && pre_data() != NULL) {
+ if (is_lazily_compiled) {
+ int function_block_pos = scanner().location().beg_pos;
FunctionEntry entry = pre_data()->GetFunctionEntry(function_block_pos);
if (!entry.is_valid()) {
- ReportInvalidPreparseData(name, CHECK_OK);
+ // There is no preparser data for the function, we will not lazily
+ // compile after all.
+ is_lazily_compiled = false;
+ } else {
+ end_pos = entry.end_pos();
+ if (end_pos <= function_block_pos) {
+ // End position greater than end of stream is safe, and hard to check.
+ ReportInvalidPreparseData(function_name, CHECK_OK);
+ }
+ isolate()->counters()->total_preparse_skipped()->Increment(
+ end_pos - function_block_pos);
+ // Seek to position just before terminal '}'.
+ scanner().SeekForward(end_pos - 1);
+ materialized_literal_count = entry.literal_count();
+ expected_property_count = entry.property_count();
+ if (entry.strict_mode()) top_scope_->EnableStrictMode();
+ only_simple_this_property_assignments = false;
+ this_property_assignments = isolate()->factory()->empty_fixed_array();
+ Expect(Token::RBRACE, CHECK_OK);
}
- end_pos = entry.end_pos();
- if (end_pos <= function_block_pos) {
- // End position greater than end of stream is safe, and hard to check.
- ReportInvalidPreparseData(name, CHECK_OK);
- }
- isolate()->counters()->total_preparse_skipped()->Increment(
- end_pos - function_block_pos);
- // Seek to position just before terminal '}'.
- scanner().SeekForward(end_pos - 1);
- materialized_literal_count = entry.literal_count();
- expected_property_count = entry.property_count();
- only_simple_this_property_assignments = false;
- this_property_assignments = isolate()->factory()->empty_fixed_array();
- Expect(Token::RBRACE, CHECK_OK);
- } else {
+ }
+
+ if (!is_lazily_compiled) {
ParseSourceElements(body, Token::RBRACE, CHECK_OK);
materialized_literal_count = lexical_scope.materialized_literal_count();
@@ -3657,7 +3794,7 @@
// Validate strict mode.
if (top_scope_->is_strict_mode()) {
- if (IsEvalOrArguments(name)) {
+ if (IsEvalOrArguments(function_name)) {
int position = function_token_position != RelocInfo::kNoPosition
? function_token_position
: (start_pos > 0 ? start_pos - 1 : start_pos);
@@ -3679,7 +3816,7 @@
*ok = false;
return NULL;
}
- if (name_is_reserved) {
+ if (name_is_strict_reserved) {
int position = function_token_position != RelocInfo::kNoPosition
? function_token_position
: (start_pos > 0 ? start_pos - 1 : start_pos);
@@ -3700,7 +3837,8 @@
}
FunctionLiteral* function_literal =
- new(zone()) FunctionLiteral(name,
+ new(zone()) FunctionLiteral(isolate(),
+ function_name,
scope,
body,
materialized_literal_count,
@@ -3710,10 +3848,11 @@
num_parameters,
start_pos,
end_pos,
- (function_name->length() > 0));
+ type,
+ has_duplicate_parameters);
function_literal->set_function_token_position(function_token_position);
- if (fni_ != NULL && !is_named) fni_->AddFunction(function_literal);
+ if (fni_ != NULL && should_infer_name) fni_->AddFunction(function_literal);
return function_literal;
}
@@ -3729,7 +3868,7 @@
if (extension_ != NULL) {
// The extension structures are only accessible while parsing the
// very first time not when reparsing because of lazy compilation.
- top_scope_->ForceEagerCompilation();
+ top_scope_->DeclarationScope()->ForceEagerCompilation();
}
const Runtime::Function* function = Runtime::FunctionForSymbol(name);
@@ -3760,14 +3899,15 @@
}
// We have a valid intrinsics call or a call to a builtin.
- return new(zone()) CallRuntime(name, function, args);
+ return new(zone()) CallRuntime(isolate(), name, function, args);
}
bool Parser::peek_any_identifier() {
Token::Value next = peek();
return next == Token::IDENTIFIER ||
- next == Token::FUTURE_RESERVED_WORD;
+ next == Token::FUTURE_RESERVED_WORD ||
+ next == Token::FUTURE_STRICT_RESERVED_WORD;
}
@@ -3805,7 +3945,7 @@
Next();
return;
}
- if (scanner().has_line_terminator_before_next() ||
+ if (scanner().HasAnyLineTerminatorBeforeNext() ||
tok == Token::RBRACE ||
tok == Token::EOS) {
return;
@@ -3815,12 +3955,12 @@
Literal* Parser::GetLiteralUndefined() {
- return new(zone()) Literal(isolate()->factory()->undefined_value());
+ return NewLiteral(isolate()->factory()->undefined_value());
}
Literal* Parser::GetLiteralTheHole() {
- return new(zone()) Literal(isolate()->factory()->the_hole_value());
+ return NewLiteral(isolate()->factory()->the_hole_value());
}
@@ -3829,22 +3969,27 @@
}
+// Parses and identifier that is valid for the current scope, in particular it
+// fails on strict mode future reserved keywords in a strict scope.
Handle<String> Parser::ParseIdentifier(bool* ok) {
- bool is_reserved;
- return ParseIdentifierOrReservedWord(&is_reserved, ok);
+ if (top_scope_->is_strict_mode()) {
+ Expect(Token::IDENTIFIER, ok);
+ } else if (!Check(Token::IDENTIFIER)) {
+ Expect(Token::FUTURE_STRICT_RESERVED_WORD, ok);
+ }
+ if (!*ok) return Handle<String>();
+ return GetSymbol(ok);
}
-Handle<String> Parser::ParseIdentifierOrReservedWord(bool* is_reserved,
- bool* ok) {
- *is_reserved = false;
- if (top_scope_->is_strict_mode()) {
- Expect(Token::IDENTIFIER, ok);
- } else {
- if (!Check(Token::IDENTIFIER)) {
- Expect(Token::FUTURE_RESERVED_WORD, ok);
- *is_reserved = true;
- }
+// Parses and identifier or a strict mode future reserved word, and indicate
+// whether it is strict mode future reserved.
+Handle<String> Parser::ParseIdentifierOrStrictReservedWord(
+ bool* is_strict_reserved, bool* ok) {
+ *is_strict_reserved = false;
+ if (!Check(Token::IDENTIFIER)) {
+ Expect(Token::FUTURE_STRICT_RESERVED_WORD, ok);
+ *is_strict_reserved = true;
}
if (!*ok) return Handle<String>();
return GetSymbol(ok);
@@ -3854,8 +3999,9 @@
Handle<String> Parser::ParseIdentifierName(bool* ok) {
Token::Value next = Next();
if (next != Token::IDENTIFIER &&
- next != Token::FUTURE_RESERVED_WORD &&
- !Token::IsKeyword(next)) {
+ next != Token::FUTURE_RESERVED_WORD &&
+ next != Token::FUTURE_STRICT_RESERVED_WORD &&
+ !Token::IsKeyword(next)) {
ReportUnexpectedToken(next);
*ok = false;
return Handle<String>();
@@ -3896,12 +4042,12 @@
}
-// This function reads an identifier and determines whether or not it
+// This function reads an identifier name and determines whether or not it
// is 'get' or 'set'.
-Handle<String> Parser::ParseIdentifierOrGetOrSet(bool* is_get,
- bool* is_set,
- bool* ok) {
- Handle<String> result = ParseIdentifier(ok);
+Handle<String> Parser::ParseIdentifierNameOrGetOrSet(bool* is_get,
+ bool* is_set,
+ bool* ok) {
+ Handle<String> result = ParseIdentifierName(ok);
if (!*ok) return Handle<String>();
if (scanner().is_literal_ascii() && scanner().literal_length() == 3) {
const char* token = scanner().literal_ascii_string().start();
@@ -3970,7 +4116,7 @@
Literal* Parser::NewNumberLiteral(double number) {
- return new(zone()) Literal(isolate()->factory()->NewNumber(number, TENURED));
+ return NewLiteral(isolate()->factory()->NewNumber(number, TENURED));
}
@@ -4017,10 +4163,15 @@
TENURED);
ZoneList<Expression*>* args = new(zone()) ZoneList<Expression*>(2);
- args->Add(new(zone()) Literal(type));
- args->Add(new(zone()) Literal(array));
- return new(zone()) Throw(new(zone()) CallRuntime(constructor, NULL, args),
- scanner().location().beg_pos);
+ args->Add(NewLiteral(type));
+ args->Add(NewLiteral(array));
+ CallRuntime* call_constructor = new(zone()) CallRuntime(isolate(),
+ constructor,
+ NULL,
+ args);
+ return new(zone()) Throw(isolate(),
+ call_constructor,
+ scanner().location().beg_pos);
}
// ----------------------------------------------------------------------------
@@ -4901,7 +5052,7 @@
bool allow_lazy,
ParserRecorder* recorder) {
Isolate* isolate = Isolate::Current();
- V8JavaScriptScanner scanner(isolate->unicode_cache());
+ JavaScriptScanner scanner(isolate->unicode_cache());
scanner.Initialize(source);
intptr_t stack_limit = isolate->stack_guard()->real_climit();
if (!preparser::PreParser::PreParseProgram(&scanner,
@@ -4976,6 +5127,7 @@
Parser parser(script, allow_natives_syntax, NULL, NULL);
result = parser.ParseLazy(info);
} else {
+ // Whether we allow %identifier(..) syntax.
bool allow_natives_syntax =
info->is_native() || FLAG_allow_natives_syntax;
ScriptDataImpl* pre_data = info->pre_parse_data();
diff --git a/src/parser.h b/src/parser.h
index a7132ce..535b639 100644
--- a/src/parser.h
+++ b/src/parser.h
@@ -72,22 +72,14 @@
FunctionEntry() : backing_(Vector<unsigned>::empty()) { }
int start_pos() { return backing_[kStartPosOffset]; }
- void set_start_pos(int value) { backing_[kStartPosOffset] = value; }
-
int end_pos() { return backing_[kEndPosOffset]; }
- void set_end_pos(int value) { backing_[kEndPosOffset] = value; }
-
int literal_count() { return backing_[kLiteralCountOffset]; }
- void set_literal_count(int value) { backing_[kLiteralCountOffset] = value; }
-
int property_count() { return backing_[kPropertyCountOffset]; }
- void set_property_count(int value) {
- backing_[kPropertyCountOffset] = value;
- }
+ bool strict_mode() { return backing_[kStrictModeOffset] != 0; }
bool is_valid() { return backing_.length() > 0; }
- static const int kSize = 4;
+ static const int kSize = 5;
private:
Vector<unsigned> backing_;
@@ -95,6 +87,7 @@
static const int kEndPosOffset = 1;
static const int kLiteralCountOffset = 2;
static const int kPropertyCountOffset = 3;
+ static const int kStrictModeOffset = 4;
};
@@ -443,7 +436,7 @@
const char* message,
Vector<Handle<String> > args);
- protected:
+ private:
// Limit on number of function parameters is chosen arbitrarily.
// Code::Flags uses only the low 17 bits of num-parameters to
// construct a hashable id, so if more than 2^17 are allowed, this
@@ -473,7 +466,7 @@
void ReportMessage(const char* message, Vector<const char*> args);
bool inside_with() const { return with_nesting_level_ > 0; }
- V8JavaScriptScanner& scanner() { return scanner_; }
+ JavaScriptScanner& scanner() { return scanner_; }
Mode mode() const { return mode_; }
ScriptDataImpl* pre_data() const { return pre_data_; }
@@ -491,17 +484,16 @@
Statement* ParseNativeDeclaration(bool* ok);
Block* ParseBlock(ZoneStringList* labels, bool* ok);
Block* ParseVariableStatement(bool* ok);
- Block* ParseVariableDeclarations(bool accept_IN, Expression** var, bool* ok);
+ Block* ParseVariableDeclarations(bool accept_IN,
+ Handle<String>* out,
+ bool* ok);
Statement* ParseExpressionOrLabelledStatement(ZoneStringList* labels,
bool* ok);
IfStatement* ParseIfStatement(ZoneStringList* labels, bool* ok);
Statement* ParseContinueStatement(bool* ok);
Statement* ParseBreakStatement(ZoneStringList* labels, bool* ok);
Statement* ParseReturnStatement(bool* ok);
- Block* WithHelper(Expression* obj,
- ZoneStringList* labels,
- bool is_catch_block,
- bool* ok);
+ Block* WithHelper(Expression* obj, ZoneStringList* labels, bool* ok);
Statement* ParseWithStatement(ZoneStringList* labels, bool* ok);
CaseClause* ParseCaseClause(bool* default_seen_ptr, bool* ok);
SwitchStatement* ParseSwitchStatement(ZoneStringList* labels, bool* ok);
@@ -560,17 +552,11 @@
// in the object literal boilerplate.
Handle<Object> GetBoilerplateValue(Expression* expression);
- enum FunctionLiteralType {
- EXPRESSION,
- DECLARATION,
- NESTED
- };
-
ZoneList<Expression*>* ParseArguments(bool* ok);
FunctionLiteral* ParseFunctionLiteral(Handle<String> var_name,
bool name_is_reserved,
int function_token_position,
- FunctionLiteralType type,
+ FunctionLiteral::Type type,
bool* ok);
@@ -633,11 +619,12 @@
Literal* GetLiteralNumber(double value);
Handle<String> ParseIdentifier(bool* ok);
- Handle<String> ParseIdentifierOrReservedWord(bool* is_reserved, bool* ok);
+ Handle<String> ParseIdentifierOrStrictReservedWord(
+ bool* is_strict_reserved, bool* ok);
Handle<String> ParseIdentifierName(bool* ok);
- Handle<String> ParseIdentifierOrGetOrSet(bool* is_get,
- bool* is_set,
- bool* ok);
+ Handle<String> ParseIdentifierNameOrGetOrSet(bool* is_get,
+ bool* is_set,
+ bool* ok);
// Strict mode validation of LValue expressions
void CheckStrictModeLValue(Expression* expression,
@@ -675,9 +662,12 @@
Expression* NewCall(Expression* expression,
ZoneList<Expression*>* arguments,
int pos) {
- return new Call(expression, arguments, pos);
+ return new(zone()) Call(isolate(), expression, arguments, pos);
}
+ inline Literal* NewLiteral(Handle<Object> handle) {
+ return new(zone()) Literal(isolate(), handle);
+ }
// Create a number literal.
Literal* NewNumberLiteral(double value);
@@ -705,7 +695,7 @@
ZoneList<Handle<String> > symbol_cache_;
Handle<Script> script_;
- V8JavaScriptScanner scanner_;
+ JavaScriptScanner scanner_;
Scope* top_scope_;
int with_nesting_level_;
diff --git a/src/platform-cygwin.cc b/src/platform-cygwin.cc
index 6511328..5f283c3 100644
--- a/src/platform-cygwin.cc
+++ b/src/platform-cygwin.cc
@@ -166,23 +166,6 @@
}
-#ifdef ENABLE_HEAP_PROTECTION
-
-void OS::Protect(void* address, size_t size) {
- // TODO(1240712): mprotect has a return value which is ignored here.
- mprotect(address, size, PROT_READ);
-}
-
-
-void OS::Unprotect(void* address, size_t size, bool is_executable) {
- // TODO(1240712): mprotect has a return value which is ignored here.
- int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
- mprotect(address, size, prot);
-}
-
-#endif
-
-
void OS::Sleep(int milliseconds) {
unsigned int ms = static_cast<unsigned int>(milliseconds);
usleep(1000 * ms);
@@ -249,7 +232,6 @@
void OS::LogSharedLibraryAddresses() {
-#ifdef ENABLE_LOGGING_AND_PROFILING
// This function assumes that the layout of the file is as follows:
// hex_start_addr-hex_end_addr rwxp <unused data> [binary_file_name]
// If we encounter an unexpected situation we abort scanning further entries.
@@ -306,7 +288,6 @@
}
free(lib_name);
fclose(fp);
-#endif
}
@@ -371,17 +352,15 @@
-Thread::Thread(Isolate* isolate, const Options& options)
+Thread::Thread(const Options& options)
: data_(new PlatformData),
- isolate_(isolate),
stack_size_(options.stack_size) {
set_name(options.name);
}
-Thread::Thread(Isolate* isolate, const char* name)
+Thread::Thread(const char* name)
: data_(new PlatformData),
- isolate_(isolate),
stack_size_(0) {
set_name(name);
}
@@ -399,7 +378,6 @@
// one) so we initialize it here too.
thread->data()->thread_ = pthread_self();
ASSERT(thread->data()->thread_ != kNoThread);
- Thread::SetThreadLocal(Isolate::isolate_key(), thread->isolate());
thread->Run();
return NULL;
}
@@ -594,8 +572,6 @@
}
-#ifdef ENABLE_LOGGING_AND_PROFILING
-
// ----------------------------------------------------------------------------
// Cygwin profiler support.
//
@@ -631,7 +607,7 @@
class SamplerThread : public Thread {
public:
explicit SamplerThread(int interval)
- : Thread(NULL, "SamplerThread"),
+ : Thread("SamplerThread"),
interval_(interval) {}
static void AddActiveSampler(Sampler* sampler) {
@@ -649,8 +625,7 @@
ScopedLock lock(mutex_);
SamplerRegistry::RemoveActiveSampler(sampler);
if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) {
- RuntimeProfiler::WakeUpRuntimeProfilerThreadBeforeShutdown();
- instance_->Join();
+ RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(instance_);
delete instance_;
instance_ = NULL;
}
@@ -773,7 +748,5 @@
SetActive(false);
}
-#endif // ENABLE_LOGGING_AND_PROFILING
} } // namespace v8::internal
-
diff --git a/src/platform-freebsd.cc b/src/platform-freebsd.cc
index 99264d2..9d9f1b7 100644
--- a/src/platform-freebsd.cc
+++ b/src/platform-freebsd.cc
@@ -181,20 +181,6 @@
}
-#ifdef ENABLE_HEAP_PROTECTION
-
-void OS::Protect(void* address, size_t size) {
- UNIMPLEMENTED();
-}
-
-
-void OS::Unprotect(void* address, size_t size, bool is_executable) {
- UNIMPLEMENTED();
-}
-
-#endif
-
-
void OS::Sleep(int milliseconds) {
unsigned int ms = static_cast<unsigned int>(milliseconds);
usleep(1000 * ms);
@@ -266,15 +252,12 @@
}
-#ifdef ENABLE_LOGGING_AND_PROFILING
static unsigned StringToLong(char* buffer) {
return static_cast<unsigned>(strtol(buffer, NULL, 16)); // NOLINT
}
-#endif
void OS::LogSharedLibraryAddresses() {
-#ifdef ENABLE_LOGGING_AND_PROFILING
static const int MAP_LENGTH = 1024;
int fd = open("/proc/self/maps", O_RDONLY);
if (fd < 0) return;
@@ -311,7 +294,6 @@
LOG(i::Isolate::Current(), SharedLibraryEvent(start_of_path, start, end));
}
close(fd);
-#endif
}
@@ -398,17 +380,15 @@
};
-Thread::Thread(Isolate* isolate, const Options& options)
+Thread::Thread(const Options& options)
: data_(new PlatformData),
- isolate_(isolate),
stack_size_(options.stack_size) {
set_name(options.name);
}
-Thread::Thread(Isolate* isolate, const char* name)
+Thread::Thread(const char* name)
: data_(new PlatformData),
- isolate_(isolate),
stack_size_(0) {
set_name(name);
}
@@ -426,7 +406,6 @@
// one) so we initialize it here too.
thread->data()->thread_ = pthread_self();
ASSERT(thread->data()->thread_ != kNoThread);
- Thread::SetThreadLocal(Isolate::isolate_key(), thread->isolate());
thread->Run();
return NULL;
}
@@ -591,8 +570,6 @@
}
-#ifdef ENABLE_LOGGING_AND_PROFILING
-
static pthread_t GetThreadID() {
pthread_t thread_id = pthread_self();
return thread_id;
@@ -660,7 +637,7 @@
};
explicit SignalSender(int interval)
- : Thread(NULL, "SignalSender"),
+ : Thread("SignalSender"),
interval_(interval) {}
static void AddActiveSampler(Sampler* sampler) {
@@ -687,8 +664,7 @@
ScopedLock lock(mutex_);
SamplerRegistry::RemoveActiveSampler(sampler);
if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) {
- RuntimeProfiler::WakeUpRuntimeProfilerThreadBeforeShutdown();
- instance_->Join();
+ RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(instance_);
delete instance_;
instance_ = NULL;
@@ -821,6 +797,5 @@
SetActive(false);
}
-#endif // ENABLE_LOGGING_AND_PROFILING
} } // namespace v8::internal
diff --git a/src/platform-linux.cc b/src/platform-linux.cc
index d4d772c..37330be 100644
--- a/src/platform-linux.cc
+++ b/src/platform-linux.cc
@@ -78,13 +78,33 @@
static Mutex* limit_mutex = NULL;
+static void* GetRandomMmapAddr() {
+ Isolate* isolate = Isolate::UncheckedCurrent();
+ // Note that the current isolate isn't set up in a call path via
+ // CpuFeatures::Probe. We don't care about randomization in this case because
+ // the code page is immediately freed.
+ if (isolate != NULL) {
+#ifdef V8_TARGET_ARCH_X64
+ uint64_t rnd1 = V8::RandomPrivate(isolate);
+ uint64_t rnd2 = V8::RandomPrivate(isolate);
+ uint64_t raw_addr = (rnd1 << 32) ^ rnd2;
+ raw_addr &= V8_UINT64_C(0x3ffffffff000);
+#else
+ uint32_t raw_addr = V8::RandomPrivate(isolate);
+ // The range 0x20000000 - 0x60000000 is relatively unpopulated across a
+ // variety of ASLR modes (PAE kernel, NX compat mode, etc).
+ raw_addr &= 0x3ffff000;
+ raw_addr += 0x20000000;
+#endif
+ return reinterpret_cast<void*>(raw_addr);
+ }
+ return NULL;
+}
+
+
void OS::Setup() {
- // Seed the random number generator.
- // Convert the current time to a 64-bit integer first, before converting it
- // to an unsigned. Going directly can cause an overflow and the seed to be
- // set to all ones. The seed will be identical for different instances that
- // call this setup code within the same millisecond.
- uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
+ // Seed the random number generator. We preserve microsecond resolution.
+ uint64_t seed = Ticks() ^ (getpid() << 16);
srandom(static_cast<unsigned int>(seed));
limit_mutex = CreateMutex();
@@ -367,10 +387,10 @@
void* OS::Allocate(const size_t requested,
size_t* allocated,
bool is_executable) {
- // TODO(805): Port randomization of allocated executable memory to Linux.
const size_t msize = RoundUp(requested, sysconf(_SC_PAGESIZE));
int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
- void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+ void* addr = GetRandomMmapAddr();
+ void* mbase = mmap(addr, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (mbase == MAP_FAILED) {
LOG(i::Isolate::Current(),
StringEvent("OS::Allocate", "mmap failed"));
@@ -390,23 +410,6 @@
}
-#ifdef ENABLE_HEAP_PROTECTION
-
-void OS::Protect(void* address, size_t size) {
- // TODO(1240712): mprotect has a return value which is ignored here.
- mprotect(address, size, PROT_READ);
-}
-
-
-void OS::Unprotect(void* address, size_t size, bool is_executable) {
- // TODO(1240712): mprotect has a return value which is ignored here.
- int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
- mprotect(address, size, prot);
-}
-
-#endif
-
-
void OS::Sleep(int milliseconds) {
unsigned int ms = static_cast<unsigned int>(milliseconds);
usleep(1000 * ms);
@@ -483,7 +486,6 @@
void OS::LogSharedLibraryAddresses() {
-#ifdef ENABLE_LOGGING_AND_PROFILING
// This function assumes that the layout of the file is as follows:
// hex_start_addr-hex_end_addr rwxp <unused data> [binary_file_name]
// If we encounter an unexpected situation we abort scanning further entries.
@@ -540,7 +542,6 @@
}
free(lib_name);
fclose(fp);
-#endif
}
@@ -548,7 +549,6 @@
void OS::SignalCodeMovingGC() {
-#ifdef ENABLE_LOGGING_AND_PROFILING
// Support for ll_prof.py.
//
// The Linux profiler built into the kernel logs all mmap's with
@@ -564,7 +564,6 @@
ASSERT(addr != MAP_FAILED);
munmap(addr, size);
fclose(f);
-#endif
}
@@ -607,7 +606,7 @@
VirtualMemory::VirtualMemory(size_t size) {
- address_ = mmap(NULL, size, PROT_NONE,
+ address_ = mmap(GetRandomMmapAddr(), size, PROT_NONE,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
kMmapFd, kMmapFdOffset);
size_ = size;
@@ -653,17 +652,15 @@
pthread_t thread_; // Thread handle for pthread.
};
-Thread::Thread(Isolate* isolate, const Options& options)
+Thread::Thread(const Options& options)
: data_(new PlatformData()),
- isolate_(isolate),
stack_size_(options.stack_size) {
set_name(options.name);
}
-Thread::Thread(Isolate* isolate, const char* name)
+Thread::Thread(const char* name)
: data_(new PlatformData()),
- isolate_(isolate),
stack_size_(0) {
set_name(name);
}
@@ -684,7 +681,6 @@
0, 0, 0);
thread->data()->thread_ = pthread_self();
ASSERT(thread->data()->thread_ != kNoThread);
- Thread::SetThreadLocal(Isolate::isolate_key(), thread->isolate());
thread->Run();
return NULL;
}
@@ -750,7 +746,6 @@
class LinuxMutex : public Mutex {
public:
-
LinuxMutex() {
pthread_mutexattr_t attrs;
int result = pthread_mutexattr_init(&attrs);
@@ -759,6 +754,7 @@
ASSERT(result == 0);
result = pthread_mutex_init(&mutex_, &attrs);
ASSERT(result == 0);
+ USE(result);
}
virtual ~LinuxMutex() { pthread_mutex_destroy(&mutex_); }
@@ -863,8 +859,6 @@
}
-#ifdef ENABLE_LOGGING_AND_PROFILING
-
#if !defined(__GLIBC__) && (defined(__arm__) || defined(__thumb__))
// Android runs a fairly new Linux kernel, so signal info is there,
// but the C library doesn't have the structs defined.
@@ -975,7 +969,7 @@
};
explicit SignalSender(int interval)
- : Thread(NULL, "SignalSender"),
+ : Thread("SignalSender"),
vm_tgid_(getpid()),
interval_(interval) {}
@@ -1012,8 +1006,7 @@
ScopedLock lock(mutex_);
SamplerRegistry::RemoveActiveSampler(sampler);
if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) {
- RuntimeProfiler::WakeUpRuntimeProfilerThreadBeforeShutdown();
- instance_->Join();
+ RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(instance_);
delete instance_;
instance_ = NULL;
RestoreSignalHandler();
@@ -1028,10 +1021,11 @@
bool cpu_profiling_enabled =
(state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS);
bool runtime_profiler_enabled = RuntimeProfiler::IsEnabled();
- if (cpu_profiling_enabled && !signal_handler_installed_)
+ if (cpu_profiling_enabled && !signal_handler_installed_) {
InstallSignalHandler();
- else if (!cpu_profiling_enabled && signal_handler_installed_)
+ } else if (!cpu_profiling_enabled && signal_handler_installed_) {
RestoreSignalHandler();
+ }
// When CPU profiling is enabled both JavaScript and C++ code is
// profiled. We must not suspend.
if (!cpu_profiling_enabled) {
@@ -1152,6 +1146,5 @@
SetActive(false);
}
-#endif // ENABLE_LOGGING_AND_PROFILING
} } // namespace v8::internal
diff --git a/src/platform-macos.cc b/src/platform-macos.cc
index 3e10b6a..be6e157 100644
--- a/src/platform-macos.cc
+++ b/src/platform-macos.cc
@@ -169,20 +169,6 @@
}
-#ifdef ENABLE_HEAP_PROTECTION
-
-void OS::Protect(void* address, size_t size) {
- UNIMPLEMENTED();
-}
-
-
-void OS::Unprotect(void* address, size_t size, bool is_executable) {
- UNIMPLEMENTED();
-}
-
-#endif
-
-
void OS::Sleep(int milliseconds) {
usleep(1000 * milliseconds);
}
@@ -248,7 +234,6 @@
void OS::LogSharedLibraryAddresses() {
-#ifdef ENABLE_LOGGING_AND_PROFILING
unsigned int images_count = _dyld_image_count();
for (unsigned int i = 0; i < images_count; ++i) {
const mach_header* header = _dyld_get_image_header(i);
@@ -270,7 +255,6 @@
LOG(Isolate::Current(),
SharedLibraryEvent(_dyld_get_image_name(i), start, start + size));
}
-#endif // ENABLE_LOGGING_AND_PROFILING
}
@@ -398,17 +382,15 @@
pthread_t thread_; // Thread handle for pthread.
};
-Thread::Thread(Isolate* isolate, const Options& options)
+Thread::Thread(const Options& options)
: data_(new PlatformData),
- isolate_(isolate),
stack_size_(options.stack_size) {
set_name(options.name);
}
-Thread::Thread(Isolate* isolate, const char* name)
+Thread::Thread(const char* name)
: data_(new PlatformData),
- isolate_(isolate),
stack_size_(0) {
set_name(name);
}
@@ -444,7 +426,6 @@
thread->data()->thread_ = pthread_self();
SetThreadName(thread->name());
ASSERT(thread->data()->thread_ != kNoThread);
- Thread::SetThreadLocal(Isolate::isolate_key(), thread->isolate());
thread->Run();
return NULL;
}
@@ -647,8 +628,6 @@
}
-#ifdef ENABLE_LOGGING_AND_PROFILING
-
class Sampler::PlatformData : public Malloced {
public:
PlatformData() : profiled_thread_(mach_thread_self()) {}
@@ -670,7 +649,7 @@
class SamplerThread : public Thread {
public:
explicit SamplerThread(int interval)
- : Thread(NULL, "SamplerThread"),
+ : Thread("SamplerThread"),
interval_(interval) {}
static void AddActiveSampler(Sampler* sampler) {
@@ -688,8 +667,7 @@
ScopedLock lock(mutex_);
SamplerRegistry::RemoveActiveSampler(sampler);
if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) {
- RuntimeProfiler::WakeUpRuntimeProfilerThreadBeforeShutdown();
- instance_->Join();
+ RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(instance_);
delete instance_;
instance_ = NULL;
}
@@ -825,6 +803,5 @@
SetActive(false);
}
-#endif // ENABLE_LOGGING_AND_PROFILING
} } // namespace v8::internal
diff --git a/src/platform-nullos.cc b/src/platform-nullos.cc
index 295482d..8c2a863 100644
--- a/src/platform-nullos.cc
+++ b/src/platform-nullos.cc
@@ -217,20 +217,11 @@
}
-#ifdef ENABLE_HEAP_PROTECTION
-
-void OS::Protect(void* address, size_t size) {
+void OS::Guard(void* address, const size_t size) {
UNIMPLEMENTED();
}
-void OS::Unprotect(void* address, size_t size, bool is_executable) {
- UNIMPLEMENTED();
-}
-
-#endif
-
-
void OS::Sleep(int milliseconds) {
UNIMPLEMENTED();
}
@@ -314,18 +305,16 @@
};
-Thread::Thread(Isolate* isolate, const Options& options)
+Thread::Thread(const Options& options)
: data_(new PlatformData()),
- isolate_(isolate),
stack_size_(options.stack_size) {
set_name(options.name);
UNIMPLEMENTED();
}
-Thread::Thread(Isolate* isolate, const char* name)
+Thread::Thread(const char* name)
: data_(new PlatformData()),
- isolate_(isolate),
stack_size_(0) {
set_name(name);
UNIMPLEMENTED();
@@ -439,7 +428,6 @@
return new NullSemaphore(count);
}
-#ifdef ENABLE_LOGGING_AND_PROFILING
class ProfileSampler::PlatformData : public Malloced {
public:
@@ -474,6 +462,5 @@
UNIMPLEMENTED();
}
-#endif // ENABLE_LOGGING_AND_PROFILING
} } // namespace v8::internal
diff --git a/src/platform-openbsd.cc b/src/platform-openbsd.cc
index e90b3e8..973329b 100644
--- a/src/platform-openbsd.cc
+++ b/src/platform-openbsd.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Copyright 2006-2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -50,6 +50,7 @@
#undef MAP_TYPE
#include "v8.h"
+#include "v8threads.h"
#include "platform.h"
#include "vm-state-inl.h"
@@ -73,6 +74,9 @@
}
+static Mutex* limit_mutex = NULL;
+
+
void OS::Setup() {
// Seed the random number generator.
// Convert the current time to a 64-bit integer first, before converting it
@@ -81,6 +85,7 @@
// call this setup code within the same millisecond.
uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
srandom(static_cast<unsigned int>(seed));
+ limit_mutex = CreateMutex();
}
@@ -129,6 +134,9 @@
static void UpdateAllocatedSpaceLimits(void* address, int size) {
+ ASSERT(limit_mutex != NULL);
+ ScopedLock lock(limit_mutex);
+
lowest_ever_allocated = Min(lowest_ever_allocated, address);
highest_ever_allocated =
Max(highest_ever_allocated,
@@ -164,26 +172,13 @@
void OS::Free(void* buf, const size_t length) {
+ // TODO(1240712): munmap has a return value which is ignored here.
int result = munmap(buf, length);
USE(result);
ASSERT(result == 0);
}
-#ifdef ENABLE_HEAP_PROTECTION
-
-void OS::Protect(void* address, size_t size) {
- UNIMPLEMENTED();
-}
-
-
-void OS::Unprotect(void* address, size_t size, bool is_executable) {
- UNIMPLEMENTED();
-}
-
-#endif
-
-
void OS::Sleep(int milliseconds) {
unsigned int ms = static_cast<unsigned int>(milliseconds);
usleep(1000 * ms);
@@ -255,15 +250,12 @@
}
-#ifdef ENABLE_LOGGING_AND_PROFILING
static unsigned StringToLong(char* buffer) {
return static_cast<unsigned>(strtol(buffer, NULL, 16)); // NOLINT
}
-#endif
void OS::LogSharedLibraryAddresses() {
-#ifdef ENABLE_LOGGING_AND_PROFILING
static const int MAP_LENGTH = 1024;
int fd = open("/proc/self/maps", O_RDONLY);
if (fd < 0) return;
@@ -297,10 +289,9 @@
// There may be no filename in this line. Skip to next.
if (start_of_path == NULL) continue;
buffer[bytes_read] = 0;
- LOG(SharedLibraryEvent(start_of_path, start, end));
+ LOG(i::Isolate::Current(), SharedLibraryEvent(start_of_path, start, end));
}
close(fd);
-#endif
}
@@ -309,8 +300,30 @@
int OS::StackWalk(Vector<OS::StackFrame> frames) {
- UNIMPLEMENTED();
- return 1;
+ int frames_size = frames.length();
+ ScopedVector<void*> addresses(frames_size);
+
+ int frames_count = backtrace(addresses.start(), frames_size);
+
+ char** symbols = backtrace_symbols(addresses.start(), frames_count);
+ if (symbols == NULL) {
+ return kStackWalkError;
+ }
+
+ for (int i = 0; i < frames_count; i++) {
+ frames[i].address = addresses[i];
+ // Format a text representation of the frame based on the information
+ // available.
+ SNPrintF(MutableCStrVector(frames[i].text, kStackWalkMaxTextLen),
+ "%s",
+ symbols[i]);
+ // Make sure line termination is in place.
+ frames[i].text[kStackWalkMaxTextLen - 1] = '\0';
+ }
+
+ free(symbols);
+
+ return frames_count;
}
@@ -354,30 +367,26 @@
bool VirtualMemory::Uncommit(void* address, size_t size) {
return mmap(address, size, PROT_NONE,
- MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
+ MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED,
kMmapFd, kMmapFdOffset) != MAP_FAILED;
}
class Thread::PlatformData : public Malloced {
public:
- PlatformData() : thread_(kNoThread) {}
-
pthread_t thread_; // Thread handle for pthread.
};
-Thread::Thread(Isolate* isolate, const Options& options)
- : data_(new PlatformData()),
- isolate_(isolate),
+Thread::Thread(const Options& options)
+ : data_(new PlatformData),
stack_size_(options.stack_size) {
set_name(options.name);
}
-Thread::Thread(Isolate* isolate, const char* name)
- : data_(new PlatfromData()),
- isolate_(isolate),
+Thread::Thread(const char* name)
+ : data_(new PlatformData),
stack_size_(0) {
set_name(name);
}
@@ -395,7 +404,6 @@
// one) so we initialize it here too.
thread->data()->thread_ = pthread_self();
ASSERT(thread->data()->thread_ != kNoThread);
- Thread::SetThreadLocal(Isolate::isolate_key(), thread->isolate());
thread->Run();
return NULL;
}
@@ -416,7 +424,7 @@
attr_ptr = &attr;
}
pthread_create(&data_->thread_, attr_ptr, ThreadEntry, this);
- ASSERT(IsValid());
+ ASSERT(data_->thread_ != kNoThread);
}
@@ -461,7 +469,6 @@
class OpenBSDMutex : public Mutex {
public:
-
OpenBSDMutex() {
pthread_mutexattr_t attrs;
int result = pthread_mutexattr_init(&attrs);
@@ -484,6 +491,16 @@
return result;
}
+ virtual bool TryLock() {
+ int result = pthread_mutex_trylock(&mutex_);
+ // Return false if the lock is busy and locking failed.
+ if (result == EBUSY) {
+ return false;
+ }
+ ASSERT(result == 0); // Verify no other errors.
+ return true;
+ }
+
private:
pthread_mutex_t mutex_; // Pthread mutex for POSIX platforms.
};
@@ -536,11 +553,16 @@
struct timespec ts;
TIMEVAL_TO_TIMESPEC(&end_time, &ts);
+
+ int to = ts.tv_sec;
+
while (true) {
int result = sem_trywait(&sem_);
if (result == 0) return true; // Successfully got semaphore.
- if (result == -1 && errno == ETIMEDOUT) return false; // Timeout.
+ if (!to) return false; // Timeout.
CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup.
+ usleep(ts.tv_nsec / 1000);
+ to--;
}
}
@@ -550,35 +572,202 @@
}
-#ifdef ENABLE_LOGGING_AND_PROFILING
-
-static Sampler* active_sampler_ = NULL;
-
-static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
- USE(info);
- if (signal != SIGPROF) return;
- if (active_sampler_ == NULL) return;
-
- TickSample sample;
-
- // We always sample the VM state.
- sample.state = VMState::current_state();
-
- active_sampler_->Tick(&sample);
+static pthread_t GetThreadID() {
+ pthread_t thread_id = pthread_self();
+ return thread_id;
}
class Sampler::PlatformData : public Malloced {
public:
- PlatformData() {
- signal_handler_installed_ = false;
+ PlatformData() : vm_tid_(GetThreadID()) {}
+
+ pthread_t vm_tid() const { return vm_tid_; }
+
+ private:
+ pthread_t vm_tid_;
+};
+
+
+static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
+ USE(info);
+ if (signal != SIGPROF) return;
+ Isolate* isolate = Isolate::UncheckedCurrent();
+ if (isolate == NULL || !isolate->IsInitialized() || !isolate->IsInUse()) {
+ // We require a fully initialized and entered isolate.
+ return;
+ }
+ if (v8::Locker::IsActive() &&
+ !isolate->thread_manager()->IsLockedByCurrentThread()) {
+ return;
}
- bool signal_handler_installed_;
- struct sigaction old_signal_handler_;
- struct itimerval old_timer_value_;
+ Sampler* sampler = isolate->logger()->sampler();
+ if (sampler == NULL || !sampler->IsActive()) return;
+
+ TickSample sample_obj;
+ TickSample* sample = CpuProfiler::TickSampleEvent(isolate);
+ if (sample == NULL) sample = &sample_obj;
+
+ // Extracting the sample from the context is extremely machine dependent.
+ ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
+ sample->state = isolate->current_vm_state();
+#if V8_HOST_ARCH_IA32
+ sample->pc = reinterpret_cast<Address>(ucontext->sc_eip);
+ sample->sp = reinterpret_cast<Address>(ucontext->sc_esp);
+ sample->fp = reinterpret_cast<Address>(ucontext->sc_ebp);
+#elif V8_HOST_ARCH_X64
+ sample->pc = reinterpret_cast<Address>(ucontext->sc_rip);
+ sample->sp = reinterpret_cast<Address>(ucontext->sc_rsp);
+ sample->fp = reinterpret_cast<Address>(ucontext->sc_rbp);
+#elif V8_HOST_ARCH_ARM
+ sample->pc = reinterpret_cast<Address>(ucontext->sc_r15);
+ sample->sp = reinterpret_cast<Address>(ucontext->sc_r13);
+ sample->fp = reinterpret_cast<Address>(ucontext->sc_r11);
+#endif
+ sampler->SampleStack(sample);
+ sampler->Tick(sample);
+}
+
+
+class SignalSender : public Thread {
+ public:
+ enum SleepInterval {
+ HALF_INTERVAL,
+ FULL_INTERVAL
+ };
+
+ explicit SignalSender(int interval)
+ : Thread("SignalSender"),
+ interval_(interval) {}
+
+ static void AddActiveSampler(Sampler* sampler) {
+ ScopedLock lock(mutex_);
+ SamplerRegistry::AddActiveSampler(sampler);
+ if (instance_ == NULL) {
+ // Install a signal handler.
+ struct sigaction sa;
+ sa.sa_sigaction = ProfilerSignalHandler;
+ sigemptyset(&sa.sa_mask);
+ sa.sa_flags = SA_RESTART | SA_SIGINFO;
+ signal_handler_installed_ =
+ (sigaction(SIGPROF, &sa, &old_signal_handler_) == 0);
+
+ // Start a thread that sends SIGPROF signal to VM threads.
+ instance_ = new SignalSender(sampler->interval());
+ instance_->Start();
+ } else {
+ ASSERT(instance_->interval_ == sampler->interval());
+ }
+ }
+
+ static void RemoveActiveSampler(Sampler* sampler) {
+ ScopedLock lock(mutex_);
+ SamplerRegistry::RemoveActiveSampler(sampler);
+ if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) {
+ RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(instance_);
+ delete instance_;
+ instance_ = NULL;
+
+ // Restore the old signal handler.
+ if (signal_handler_installed_) {
+ sigaction(SIGPROF, &old_signal_handler_, 0);
+ signal_handler_installed_ = false;
+ }
+ }
+ }
+
+ // Implement Thread::Run().
+ virtual void Run() {
+ SamplerRegistry::State state;
+ while ((state = SamplerRegistry::GetState()) !=
+ SamplerRegistry::HAS_NO_SAMPLERS) {
+ bool cpu_profiling_enabled =
+ (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS);
+ bool runtime_profiler_enabled = RuntimeProfiler::IsEnabled();
+ // When CPU profiling is enabled both JavaScript and C++ code is
+ // profiled. We must not suspend.
+ if (!cpu_profiling_enabled) {
+ if (rate_limiter_.SuspendIfNecessary()) continue;
+ }
+ if (cpu_profiling_enabled && runtime_profiler_enabled) {
+ if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this)) {
+ return;
+ }
+ Sleep(HALF_INTERVAL);
+ if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile, NULL)) {
+ return;
+ }
+ Sleep(HALF_INTERVAL);
+ } else {
+ if (cpu_profiling_enabled) {
+ if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile,
+ this)) {
+ return;
+ }
+ }
+ if (runtime_profiler_enabled) {
+ if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile,
+ NULL)) {
+ return;
+ }
+ }
+ Sleep(FULL_INTERVAL);
+ }
+ }
+ }
+
+ static void DoCpuProfile(Sampler* sampler, void* raw_sender) {
+ if (!sampler->IsProfiling()) return;
+ SignalSender* sender = reinterpret_cast<SignalSender*>(raw_sender);
+ sender->SendProfilingSignal(sampler->platform_data()->vm_tid());
+ }
+
+ static void DoRuntimeProfile(Sampler* sampler, void* ignored) {
+ if (!sampler->isolate()->IsInitialized()) return;
+ sampler->isolate()->runtime_profiler()->NotifyTick();
+ }
+
+ void SendProfilingSignal(pthread_t tid) {
+ if (!signal_handler_installed_) return;
+ pthread_kill(tid, SIGPROF);
+ }
+
+ void Sleep(SleepInterval full_or_half) {
+ // Convert ms to us and subtract 100 us to compensate delays
+ // occuring during signal delivery.
+ useconds_t interval = interval_ * 1000 - 100;
+ if (full_or_half == HALF_INTERVAL) interval /= 2;
+ int result = usleep(interval);
+#ifdef DEBUG
+ if (result != 0 && errno != EINTR) {
+ fprintf(stderr,
+ "SignalSender usleep error; interval = %u, errno = %d\n",
+ interval,
+ errno);
+ ASSERT(result == 0 || errno == EINTR);
+ }
+#endif
+ USE(result);
+ }
+
+ const int interval_;
+ RuntimeProfilerRateLimiter rate_limiter_;
+
+ // Protects the process wide state below.
+ static Mutex* mutex_;
+ static SignalSender* instance_;
+ static bool signal_handler_installed_;
+ static struct sigaction old_signal_handler_;
+
+ DISALLOW_COPY_AND_ASSIGN(SignalSender);
};
+Mutex* SignalSender::mutex_ = OS::CreateMutex();
+SignalSender* SignalSender::instance_ = NULL;
+struct sigaction SignalSender::old_signal_handler_;
+bool SignalSender::signal_handler_installed_ = false;
+
Sampler::Sampler(Isolate* isolate, int interval)
: isolate_(isolate),
@@ -586,55 +775,28 @@
profiling_(false),
active_(false),
samples_taken_(0) {
- data_ = new PlatformData();
+ data_ = new PlatformData;
}
Sampler::~Sampler() {
+ ASSERT(!IsActive());
delete data_;
}
void Sampler::Start() {
- // There can only be one active sampler at the time on POSIX
- // platforms.
- if (active_sampler_ != NULL) return;
-
- // Request profiling signals.
- struct sigaction sa;
- sa.sa_sigaction = ProfilerSignalHandler;
- sigemptyset(&sa.sa_mask);
- sa.sa_flags = SA_SIGINFO;
- if (sigaction(SIGPROF, &sa, &data_->old_signal_handler_) != 0) return;
- data_->signal_handler_installed_ = true;
-
- // Set the itimer to generate a tick for each interval.
- itimerval itimer;
- itimer.it_interval.tv_sec = interval_ / 1000;
- itimer.it_interval.tv_usec = (interval_ % 1000) * 1000;
- itimer.it_value.tv_sec = itimer.it_interval.tv_sec;
- itimer.it_value.tv_usec = itimer.it_interval.tv_usec;
- setitimer(ITIMER_PROF, &itimer, &data_->old_timer_value_);
-
- // Set this sampler as the active sampler.
- active_sampler_ = this;
- active_ = true;
+ ASSERT(!IsActive());
+ SetActive(true);
+ SignalSender::AddActiveSampler(this);
}
void Sampler::Stop() {
- // Restore old signal handler
- if (data_->signal_handler_installed_) {
- setitimer(ITIMER_PROF, &data_->old_timer_value_, NULL);
- sigaction(SIGPROF, &data_->old_signal_handler_, 0);
- data_->signal_handler_installed_ = false;
- }
-
- // This sampler is no longer the active sampler.
- active_sampler_ = NULL;
- active_ = false;
+ ASSERT(IsActive());
+ SignalSender::RemoveActiveSampler(this);
+ SetActive(false);
}
-#endif // ENABLE_LOGGING_AND_PROFILING
} } // namespace v8::internal
diff --git a/src/platform-posix.cc b/src/platform-posix.cc
index c4b0fb8..1ea53c8 100644
--- a/src/platform-posix.cc
+++ b/src/platform-posix.cc
@@ -33,15 +33,19 @@
#include <errno.h>
#include <time.h>
+#include <sys/mman.h>
#include <sys/socket.h>
#include <sys/resource.h>
#include <sys/time.h>
#include <sys/types.h>
+#include <sys/stat.h>
#include <arpa/inet.h>
#include <netinet/in.h>
#include <netdb.h>
+#undef MAP_TYPE
+
#if defined(ANDROID)
#define LOG_TAG "v8"
#include <utils/Log.h> // LOG_PRI_VA
@@ -54,6 +58,30 @@
namespace v8 {
namespace internal {
+
+// Maximum size of the virtual memory. 0 means there is no artificial
+// limit.
+
+intptr_t OS::MaxVirtualMemory() {
+ struct rlimit limit;
+ int result = getrlimit(RLIMIT_DATA, &limit);
+ if (result != 0) return 0;
+ return limit.rlim_cur;
+}
+
+
+// Get rid of writable permission on code allocations.
+void OS::ProtectCode(void* address, const size_t size) {
+ mprotect(address, size, PROT_READ | PROT_EXEC);
+}
+
+
+// Create guard pages.
+void OS::Guard(void* address, const size_t size) {
+ mprotect(address, size, PROT_NONE);
+}
+
+
// ----------------------------------------------------------------------------
// Math functions
@@ -118,7 +146,14 @@
//
FILE* OS::FOpen(const char* path, const char* mode) {
- return fopen(path, mode);
+ FILE* file = fopen(path, mode);
+ if (file == NULL) return NULL;
+ struct stat file_stat;
+ if (fstat(fileno(file), &file_stat) != 0) return NULL;
+ bool is_regular_file = ((file_stat.st_mode & S_IFREG) != 0);
+ if (is_regular_file) return file;
+ fclose(file);
+ return NULL;
}
@@ -127,6 +162,11 @@
}
+FILE* OS::OpenTemporaryFile() {
+ return tmpfile();
+}
+
+
const char* const OS::LogFileOpenMode = "w";
diff --git a/src/platform-solaris.cc b/src/platform-solaris.cc
index 970c418..1e79f10 100644
--- a/src/platform-solaris.cc
+++ b/src/platform-solaris.cc
@@ -88,6 +88,7 @@
}
+static Mutex* limit_mutex = NULL;
void OS::Setup() {
// Seed the random number generator.
// Convert the current time to a 64-bit integer first, before converting it
@@ -96,6 +97,7 @@
// call this setup code within the same millisecond.
uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
srandom(static_cast<unsigned int>(seed));
+ limit_mutex = CreateMutex();
}
@@ -145,6 +147,9 @@
static void UpdateAllocatedSpaceLimits(void* address, int size) {
+ ASSERT(limit_mutex != NULL);
+ ScopedLock lock(limit_mutex);
+
lowest_ever_allocated = Min(lowest_ever_allocated, address);
highest_ever_allocated =
Max(highest_ever_allocated,
@@ -187,23 +192,6 @@
}
-#ifdef ENABLE_HEAP_PROTECTION
-
-void OS::Protect(void* address, size_t size) {
- // TODO(1240712): mprotect has a return value which is ignored here.
- mprotect(address, size, PROT_READ);
-}
-
-
-void OS::Unprotect(void* address, size_t size, bool is_executable) {
- // TODO(1240712): mprotect has a return value which is ignored here.
- int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
- mprotect(address, size, prot);
-}
-
-#endif
-
-
void OS::Sleep(int milliseconds) {
useconds_t ms = static_cast<useconds_t>(milliseconds);
usleep(1000 * ms);
@@ -381,17 +369,15 @@
pthread_t thread_; // Thread handle for pthread.
};
-Thread::Thread(Isolate* isolate, const Options& options)
+Thread::Thread(const Options& options)
: data_(new PlatformData()),
- isolate_(isolate),
stack_size_(options.stack_size) {
set_name(options.name);
}
-Thread::Thread(Isolate* isolate, const char* name)
+Thread::Thread(const char* name)
: data_(new PlatformData()),
- isolate_(isolate),
stack_size_(0) {
set_name(name);
}
@@ -409,7 +395,6 @@
// one) so we initialize it here too.
thread->data()->thread_ = pthread_self();
ASSERT(thread->data()->thread_ != kNoThread);
- Thread::SetThreadLocal(Isolate::isolate_key(), thread->isolate());
thread->Run();
return NULL;
}
@@ -587,80 +572,171 @@
}
-#ifdef ENABLE_LOGGING_AND_PROFILING
-
-static Sampler* active_sampler_ = NULL;
-static pthread_t vm_tid_ = 0;
-
-
static pthread_t GetThreadID() {
return pthread_self();
}
-
static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
USE(info);
if (signal != SIGPROF) return;
- if (active_sampler_ == NULL || !active_sampler_->IsActive()) return;
- if (vm_tid_ != GetThreadID()) return;
+ Isolate* isolate = Isolate::UncheckedCurrent();
+ if (isolate == NULL || !isolate->IsInitialized() || !isolate->IsInUse()) {
+ // We require a fully initialized and entered isolate.
+ return;
+ }
+ if (v8::Locker::IsActive() &&
+ !isolate->thread_manager()->IsLockedByCurrentThread()) {
+ return;
+ }
+
+ Sampler* sampler = isolate->logger()->sampler();
+ if (sampler == NULL || !sampler->IsActive()) return;
TickSample sample_obj;
- TickSample* sample = CpuProfiler::TickSampleEvent();
+ TickSample* sample = CpuProfiler::TickSampleEvent(isolate);
if (sample == NULL) sample = &sample_obj;
// Extracting the sample from the context is extremely machine dependent.
ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
mcontext_t& mcontext = ucontext->uc_mcontext;
- sample->state = Top::current_vm_state();
+ sample->state = isolate->current_vm_state();
sample->pc = reinterpret_cast<Address>(mcontext.gregs[REG_PC]);
sample->sp = reinterpret_cast<Address>(mcontext.gregs[REG_SP]);
sample->fp = reinterpret_cast<Address>(mcontext.gregs[REG_FP]);
- active_sampler_->SampleStack(sample);
- active_sampler_->Tick(sample);
+ sampler->SampleStack(sample);
+ sampler->Tick(sample);
}
-
class Sampler::PlatformData : public Malloced {
public:
+ PlatformData() : vm_tid_(GetThreadID()) {}
+
+ pthread_t vm_tid() const { return vm_tid_; }
+
+ private:
+ pthread_t vm_tid_;
+};
+
+
+class SignalSender : public Thread {
+ public:
enum SleepInterval {
- FULL_INTERVAL,
- HALF_INTERVAL
+ HALF_INTERVAL,
+ FULL_INTERVAL
};
- explicit PlatformData(Sampler* sampler)
- : sampler_(sampler),
- signal_handler_installed_(false),
- vm_tgid_(getpid()),
- signal_sender_launched_(false) {
+ explicit SignalSender(int interval)
+ : Thread("SignalSender"),
+ interval_(interval) {}
+
+ static void InstallSignalHandler() {
+ struct sigaction sa;
+ sa.sa_sigaction = ProfilerSignalHandler;
+ sigemptyset(&sa.sa_mask);
+ sa.sa_flags = SA_RESTART | SA_SIGINFO;
+ signal_handler_installed_ =
+ (sigaction(SIGPROF, &sa, &old_signal_handler_) == 0);
}
- void SignalSender() {
- while (sampler_->IsActive()) {
- if (rate_limiter_.SuspendIfNecessary()) continue;
- if (sampler_->IsProfiling() && RuntimeProfiler::IsEnabled()) {
- SendProfilingSignal();
+ static void RestoreSignalHandler() {
+ if (signal_handler_installed_) {
+ sigaction(SIGPROF, &old_signal_handler_, 0);
+ signal_handler_installed_ = false;
+ }
+ }
+
+ static void AddActiveSampler(Sampler* sampler) {
+ ScopedLock lock(mutex_);
+ SamplerRegistry::AddActiveSampler(sampler);
+ if (instance_ == NULL) {
+ // Start a thread that will send SIGPROF signal to VM threads,
+ // when CPU profiling will be enabled.
+ instance_ = new SignalSender(sampler->interval());
+ instance_->Start();
+ } else {
+ ASSERT(instance_->interval_ == sampler->interval());
+ }
+ }
+
+ static void RemoveActiveSampler(Sampler* sampler) {
+ ScopedLock lock(mutex_);
+ SamplerRegistry::RemoveActiveSampler(sampler);
+ if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) {
+ RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(instance_);
+ delete instance_;
+ instance_ = NULL;
+ RestoreSignalHandler();
+ }
+ }
+
+ // Implement Thread::Run().
+ virtual void Run() {
+ SamplerRegistry::State state;
+ while ((state = SamplerRegistry::GetState()) !=
+ SamplerRegistry::HAS_NO_SAMPLERS) {
+ bool cpu_profiling_enabled =
+ (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS);
+ bool runtime_profiler_enabled = RuntimeProfiler::IsEnabled();
+ if (cpu_profiling_enabled && !signal_handler_installed_) {
+ InstallSignalHandler();
+ } else if (!cpu_profiling_enabled && signal_handler_installed_) {
+ RestoreSignalHandler();
+ }
+
+ // When CPU profiling is enabled both JavaScript and C++ code is
+ // profiled. We must not suspend.
+ if (!cpu_profiling_enabled) {
+ if (rate_limiter_.SuspendIfNecessary()) continue;
+ }
+ if (cpu_profiling_enabled && runtime_profiler_enabled) {
+ if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this)) {
+ return;
+ }
Sleep(HALF_INTERVAL);
- RuntimeProfiler::NotifyTick();
+ if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile, NULL)) {
+ return;
+ }
Sleep(HALF_INTERVAL);
} else {
- if (sampler_->IsProfiling()) SendProfilingSignal();
- if (RuntimeProfiler::IsEnabled()) RuntimeProfiler::NotifyTick();
+ if (cpu_profiling_enabled) {
+ if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile,
+ this)) {
+ return;
+ }
+ }
+ if (runtime_profiler_enabled) {
+ if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile,
+ NULL)) {
+ return;
+ }
+ }
Sleep(FULL_INTERVAL);
}
}
}
- void SendProfilingSignal() {
+ static void DoCpuProfile(Sampler* sampler, void* raw_sender) {
+ if (!sampler->IsProfiling()) return;
+ SignalSender* sender = reinterpret_cast<SignalSender*>(raw_sender);
+ sender->SendProfilingSignal(sampler->platform_data()->vm_tid());
+ }
+
+ static void DoRuntimeProfile(Sampler* sampler, void* ignored) {
+ if (!sampler->isolate()->IsInitialized()) return;
+ sampler->isolate()->runtime_profiler()->NotifyTick();
+ }
+
+ void SendProfilingSignal(pthread_t tid) {
if (!signal_handler_installed_) return;
- pthread_kill(vm_tid_, SIGPROF);
+ pthread_kill(tid, SIGPROF);
}
void Sleep(SleepInterval full_or_half) {
// Convert ms to us and subtract 100 us to compensate delays
// occuring during signal delivery.
- useconds_t interval = sampler_->interval_ * 1000 - 100;
+ useconds_t interval = interval_ * 1000 - 100;
if (full_or_half == HALF_INTERVAL) interval /= 2;
int result = usleep(interval);
#ifdef DEBUG
@@ -675,22 +751,22 @@
USE(result);
}
- Sampler* sampler_;
- bool signal_handler_installed_;
- struct sigaction old_signal_handler_;
- int vm_tgid_;
- bool signal_sender_launched_;
- pthread_t signal_sender_thread_;
+ const int interval_;
RuntimeProfilerRateLimiter rate_limiter_;
+
+ // Protects the process wide state below.
+ static Mutex* mutex_;
+ static SignalSender* instance_;
+ static bool signal_handler_installed_;
+ static struct sigaction old_signal_handler_;
+
+ DISALLOW_COPY_AND_ASSIGN(SignalSender);
};
-
-static void* SenderEntry(void* arg) {
- Sampler::PlatformData* data =
- reinterpret_cast<Sampler::PlatformData*>(arg);
- data->SignalSender();
- return 0;
-}
+Mutex* SignalSender::mutex_ = OS::CreateMutex();
+SignalSender* SignalSender::instance_ = NULL;
+struct sigaction SignalSender::old_signal_handler_;
+bool SignalSender::signal_handler_installed_ = false;
Sampler::Sampler(Isolate* isolate, int interval)
@@ -699,65 +775,27 @@
profiling_(false),
active_(false),
samples_taken_(0) {
- data_ = new PlatformData(this);
+ data_ = new PlatformData;
}
Sampler::~Sampler() {
- ASSERT(!data_->signal_sender_launched_);
+ ASSERT(!IsActive());
delete data_;
}
void Sampler::Start() {
- // There can only be one active sampler at the time on POSIX
- // platforms.
ASSERT(!IsActive());
- vm_tid_ = GetThreadID();
-
- // Request profiling signals.
- struct sigaction sa;
- sa.sa_sigaction = ProfilerSignalHandler;
- sigemptyset(&sa.sa_mask);
- sa.sa_flags = SA_RESTART | SA_SIGINFO;
- data_->signal_handler_installed_ =
- sigaction(SIGPROF, &sa, &data_->old_signal_handler_) == 0;
-
- // Start a thread that sends SIGPROF signal to VM thread.
- // Sending the signal ourselves instead of relying on itimer provides
- // much better accuracy.
SetActive(true);
- if (pthread_create(
- &data_->signal_sender_thread_, NULL, SenderEntry, data_) == 0) {
- data_->signal_sender_launched_ = true;
- }
-
- // Set this sampler as the active sampler.
- active_sampler_ = this;
+ SignalSender::AddActiveSampler(this);
}
void Sampler::Stop() {
+ ASSERT(IsActive());
+ SignalSender::RemoveActiveSampler(this);
SetActive(false);
-
- // Wait for signal sender termination (it will exit after setting
- // active_ to false).
- if (data_->signal_sender_launched_) {
- Top::WakeUpRuntimeProfilerThreadBeforeShutdown();
- pthread_join(data_->signal_sender_thread_, NULL);
- data_->signal_sender_launched_ = false;
- }
-
- // Restore old signal handler
- if (data_->signal_handler_installed_) {
- sigaction(SIGPROF, &data_->old_signal_handler_, 0);
- data_->signal_handler_installed_ = false;
- }
-
- // This sampler is no longer the active sampler.
- active_sampler_ = NULL;
}
-#endif // ENABLE_LOGGING_AND_PROFILING
-
} } // namespace v8::internal
diff --git a/src/platform-tls.h b/src/platform-tls.h
index 5649175..3251663 100644
--- a/src/platform-tls.h
+++ b/src/platform-tls.h
@@ -30,7 +30,7 @@
#ifndef V8_PLATFORM_TLS_H_
#define V8_PLATFORM_TLS_H_
-#ifdef V8_FAST_TLS
+#ifndef V8_NO_FAST_TLS
// When fast TLS is requested we include the appropriate
// implementation header.
diff --git a/src/platform-win32.cc b/src/platform-win32.cc
index 390d3d9..e5df5ff 100644
--- a/src/platform-win32.cc
+++ b/src/platform-win32.cc
@@ -138,16 +138,39 @@
}
+#define _TRUNCATE 0
+#define STRUNCATE 80
+
int _vsnprintf_s(char* buffer, size_t sizeOfBuffer, size_t count,
const char* format, va_list argptr) {
+ ASSERT(count == _TRUNCATE);
return _vsnprintf(buffer, sizeOfBuffer, format, argptr);
}
-#define _TRUNCATE 0
-int strncpy_s(char* strDest, size_t numberOfElements,
- const char* strSource, size_t count) {
- strncpy(strDest, strSource, count);
+int strncpy_s(char* dest, size_t dest_size, const char* source, size_t count) {
+ CHECK(source != NULL);
+ CHECK(dest != NULL);
+ CHECK_GT(dest_size, 0);
+
+ if (count == _TRUNCATE) {
+ while (dest_size > 0 && *source != 0) {
+ *(dest++) = *(source++);
+ --dest_size;
+ }
+ if (dest_size == 0) {
+ *(dest - 1) = 0;
+ return STRUNCATE;
+ }
+ } else {
+ while (dest_size > 0 && count > 0 && *source != 0) {
+ *(dest++) = *(source++);
+ --dest_size;
+ --count;
+ }
+ }
+ CHECK_GT(dest_size, 0);
+ *dest = 0;
return 0;
}
@@ -169,6 +192,11 @@
namespace v8 {
namespace internal {
+intptr_t OS::MaxVirtualMemory() {
+ return 0;
+}
+
+
double ceiling(double x) {
return ceil(x);
}
@@ -712,6 +740,24 @@
}
+FILE* OS::OpenTemporaryFile() {
+ // tmpfile_s tries to use the root dir, don't use it.
+ char tempPathBuffer[MAX_PATH];
+ DWORD path_result = 0;
+ path_result = GetTempPathA(MAX_PATH, tempPathBuffer);
+ if (path_result > MAX_PATH || path_result == 0) return NULL;
+ UINT name_result = 0;
+ char tempNameBuffer[MAX_PATH];
+ name_result = GetTempFileNameA(tempPathBuffer, "", 0, tempNameBuffer);
+ if (name_result == 0) return NULL;
+ FILE* result = FOpen(tempNameBuffer, "w+"); // Same mode as tmpfile uses.
+ if (result != NULL) {
+ Remove(tempNameBuffer); // Delete on close.
+ }
+ return result;
+}
+
+
// Open log file in binary mode to avoid /n -> /r/n conversion.
const char* const OS::LogFileOpenMode = "wb";
@@ -911,24 +957,17 @@
}
-#ifdef ENABLE_HEAP_PROTECTION
-
-void OS::Protect(void* address, size_t size) {
- // TODO(1240712): VirtualProtect has a return value which is ignored here.
+void OS::ProtectCode(void* address, const size_t size) {
DWORD old_protect;
- VirtualProtect(address, size, PAGE_READONLY, &old_protect);
+ VirtualProtect(address, size, PAGE_EXECUTE_READ, &old_protect);
}
-void OS::Unprotect(void* address, size_t size, bool is_executable) {
- // TODO(1240712): VirtualProtect has a return value which is ignored here.
- DWORD new_protect = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
- DWORD old_protect;
- VirtualProtect(address, size, new_protect, &old_protect);
+void OS::Guard(void* address, const size_t size) {
+ DWORD oldprotect;
+ VirtualProtect(address, size, PAGE_READONLY | PAGE_GUARD, &oldprotect);
}
-#endif
-
void OS::Sleep(int milliseconds) {
::Sleep(milliseconds);
@@ -1473,10 +1512,6 @@
// convention.
static unsigned int __stdcall ThreadEntry(void* arg) {
Thread* thread = reinterpret_cast<Thread*>(arg);
- // This is also initialized by the last parameter to _beginthreadex() but we
- // don't know which thread will run first (the original thread or the new
- // one) so we initialize it here too.
- Thread::SetThreadLocal(Isolate::isolate_key(), thread->isolate());
thread->Run();
return 0;
}
@@ -1492,17 +1527,15 @@
// Initialize a Win32 thread object. The thread has an invalid thread
// handle until it is started.
-Thread::Thread(Isolate* isolate, const Options& options)
- : isolate_(isolate),
- stack_size_(options.stack_size) {
+Thread::Thread(const Options& options)
+ : stack_size_(options.stack_size) {
data_ = new PlatformData(kNoThread);
set_name(options.name);
}
-Thread::Thread(Isolate* isolate, const char* name)
- : isolate_(isolate),
- stack_size_(0) {
+Thread::Thread(const char* name)
+ : stack_size_(0) {
data_ = new PlatformData(kNoThread);
set_name(name);
}
@@ -1583,7 +1616,6 @@
class Win32Mutex : public Mutex {
public:
-
Win32Mutex() { InitializeCriticalSection(&cs_); }
virtual ~Win32Mutex() { DeleteCriticalSection(&cs_); }
@@ -1837,8 +1869,6 @@
}
-#ifdef ENABLE_LOGGING_AND_PROFILING
-
// ----------------------------------------------------------------------------
// Win32 profiler support.
@@ -1872,7 +1902,7 @@
class SamplerThread : public Thread {
public:
explicit SamplerThread(int interval)
- : Thread(NULL, "SamplerThread"),
+ : Thread("SamplerThread"),
interval_(interval) {}
static void AddActiveSampler(Sampler* sampler) {
@@ -1890,8 +1920,7 @@
ScopedLock lock(mutex_);
SamplerRegistry::RemoveActiveSampler(sampler);
if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) {
- RuntimeProfiler::WakeUpRuntimeProfilerThreadBeforeShutdown();
- instance_->Join();
+ RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(instance_);
delete instance_;
instance_ = NULL;
}
@@ -2014,6 +2043,5 @@
SetActive(false);
}
-#endif // ENABLE_LOGGING_AND_PROFILING
} } // namespace v8::internal
diff --git a/src/platform.h b/src/platform.h
index 725008a..6b2348c 100644
--- a/src/platform.h
+++ b/src/platform.h
@@ -177,6 +177,9 @@
static FILE* FOpen(const char* path, const char* mode);
static bool Remove(const char* path);
+ // Opens a temporary file, the file is auto removed on close.
+ static FILE* OpenTemporaryFile();
+
// Log file open mode is platform-dependent due to line ends issues.
static const char* const LogFileOpenMode;
@@ -203,15 +206,16 @@
size_t* allocated,
bool is_executable);
static void Free(void* address, const size_t size);
+
+ // Mark code segments non-writable.
+ static void ProtectCode(void* address, const size_t size);
+
+ // Assign memory as a guard page so that access will cause an exception.
+ static void Guard(void* address, const size_t size);
+
// Get the Alignment guaranteed by Allocate().
static size_t AllocateAlignment();
-#ifdef ENABLE_HEAP_PROTECTION
- // Protect/unprotect a block of memory by marking it read-only/writable.
- static void Protect(void* address, size_t size);
- static void Unprotect(void* address, size_t size, bool is_executable);
-#endif
-
// Returns an indication of whether a pointer is in a space that
// has been allocated by Allocate(). This method may conservatively
// always return false, but giving more accurate information may
@@ -288,6 +292,10 @@
// positions indicated by the members of the CpuFeature enum from globals.h
static uint64_t CpuFeaturesImpliedByPlatform();
+ // Maximum size of the virtual memory. 0 means there is no artificial
+ // limit.
+ static intptr_t MaxVirtualMemory();
+
// Returns the double constant NAN
static double nan_value();
@@ -384,9 +392,9 @@
int stack_size;
};
- // Create new thread (with a value for storing in the TLS isolate field).
- Thread(Isolate* isolate, const Options& options);
- Thread(Isolate* isolate, const char* name);
+ // Create new thread.
+ explicit Thread(const Options& options);
+ explicit Thread(const char* name);
virtual ~Thread();
// Start new thread by calling the Run() method in the new thread.
@@ -433,7 +441,6 @@
// A hint to the scheduler to let another thread run.
static void YieldCPU();
- Isolate* isolate() const { return isolate_; }
// The thread name length is limited to 16 based on Linux's implementation of
// prctl().
@@ -447,7 +454,6 @@
PlatformData* data_;
- Isolate* isolate_;
char name_[kMaxThreadNameLength];
int stack_size_;
@@ -601,7 +607,6 @@
bool has_external_callback : 1;
};
-#ifdef ENABLE_LOGGING_AND_PROFILING
class Sampler {
public:
// Initialize sampler.
@@ -660,8 +665,6 @@
};
-#endif // ENABLE_LOGGING_AND_PROFILING
-
} } // namespace v8::internal
#endif // V8_PLATFORM_H_
diff --git a/src/preparse-data-format.h b/src/preparse-data-format.h
index 64c4f18..e64326e 100644
--- a/src/preparse-data-format.h
+++ b/src/preparse-data-format.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -37,7 +37,7 @@
public:
// Layout and constants of the preparse data exchange format.
static const unsigned kMagicNumber = 0xBadDead;
- static const unsigned kCurrentVersion = 6;
+ static const unsigned kCurrentVersion = 7;
static const int kMagicOffset = 0;
static const int kVersionOffset = 1;
diff --git a/src/preparse-data.h b/src/preparse-data.h
index 551d2e8..c6503c4 100644
--- a/src/preparse-data.h
+++ b/src/preparse-data.h
@@ -48,7 +48,8 @@
virtual void LogFunction(int start,
int end,
int literals,
- int properties) = 0;
+ int properties,
+ int strict_mode) = 0;
// Logs a symbol creation of a literal or identifier.
virtual void LogAsciiSymbol(int start, Vector<const char> literal) { }
@@ -84,11 +85,16 @@
FunctionLoggingParserRecorder();
virtual ~FunctionLoggingParserRecorder() {}
- virtual void LogFunction(int start, int end, int literals, int properties) {
+ virtual void LogFunction(int start,
+ int end,
+ int literals,
+ int properties,
+ int strict_mode) {
function_store_.Add(start);
function_store_.Add(end);
function_store_.Add(literals);
function_store_.Add(properties);
+ function_store_.Add(strict_mode);
}
// Logs an error message and marks the log as containing an error.
diff --git a/src/preparser-api.cc b/src/preparser-api.cc
index a0d13ed..e0ab500 100644
--- a/src/preparser-api.cc
+++ b/src/preparser-api.cc
@@ -158,23 +158,6 @@
};
-class StandAloneJavaScriptScanner : public JavaScriptScanner {
- public:
- explicit StandAloneJavaScriptScanner(UnicodeCache* unicode_cache)
- : JavaScriptScanner(unicode_cache) { }
-
- void Initialize(UC16CharacterStream* source) {
- source_ = source;
- Init();
- // Skip initial whitespace allowing HTML comment ends just like
- // after a newline and scan first token.
- has_line_terminator_before_next_ = true;
- SkipWhiteSpace();
- Scan();
- }
-};
-
-
// Functions declared by allocation.h and implemented in both api.cc (for v8)
// or here (for a stand-alone preparser).
@@ -194,7 +177,7 @@
internal::InputStreamUTF16Buffer buffer(input);
uintptr_t stack_limit = reinterpret_cast<uintptr_t>(&buffer) - max_stack;
internal::UnicodeCache unicode_cache;
- internal::StandAloneJavaScriptScanner scanner(&unicode_cache);
+ internal::JavaScriptScanner scanner(&unicode_cache);
scanner.Initialize(&buffer);
internal::CompleteParserRecorder recorder;
preparser::PreParser::PreParseResult result =
diff --git a/src/preparser.cc b/src/preparser.cc
index 86db379..c741b46 100644
--- a/src/preparser.cc
+++ b/src/preparser.cc
@@ -38,6 +38,8 @@
#include "preparse-data.h"
#include "preparser.h"
+#include "conversions-inl.h"
+
namespace v8 {
namespace preparser {
@@ -77,9 +79,14 @@
return ReportMessageAt(source_location.beg_pos, source_location.end_pos,
"unexpected_token_string", NULL);
case i::Token::IDENTIFIER:
- case i::Token::FUTURE_RESERVED_WORD:
return ReportMessageAt(source_location.beg_pos, source_location.end_pos,
"unexpected_token_identifier", NULL);
+ case i::Token::FUTURE_RESERVED_WORD:
+ return ReportMessageAt(source_location.beg_pos, source_location.end_pos,
+ "unexpected_reserved", NULL);
+ case i::Token::FUTURE_STRICT_RESERVED_WORD:
+ return ReportMessageAt(source_location.beg_pos, source_location.end_pos,
+ "unexpected_strict_reserved", NULL);
default:
const char* name = i::Token::String(token);
ReportMessageAt(source_location.beg_pos, source_location.end_pos,
@@ -209,9 +216,6 @@
case i::Token::FUNCTION:
return ParseFunctionDeclaration(ok);
- case i::Token::NATIVE:
- return ParseNativeDeclaration(ok);
-
case i::Token::DEBUGGER:
return ParseDebuggerStatement(ok);
@@ -236,7 +240,7 @@
// Strict mode violation, using either reserved word or eval/arguments
// as name of strict function.
const char* type = "strict_function_name";
- if (identifier.IsFutureReserved()) {
+ if (identifier.IsFutureStrictReserved()) {
type = "strict_reserved_word";
}
ReportMessageAt(location.beg_pos, location.end_pos, type, NULL);
@@ -246,29 +250,6 @@
}
-// Language extension which is only enabled for source files loaded
-// through the API's extension mechanism. A native function
-// declaration is resolved by looking up the function through a
-// callback provided by the extension.
-PreParser::Statement PreParser::ParseNativeDeclaration(bool* ok) {
- Expect(i::Token::NATIVE, CHECK_OK);
- Expect(i::Token::FUNCTION, CHECK_OK);
- ParseIdentifier(CHECK_OK);
- Expect(i::Token::LPAREN, CHECK_OK);
- bool done = (peek() == i::Token::RPAREN);
- while (!done) {
- ParseIdentifier(CHECK_OK);
- done = (peek() == i::Token::RPAREN);
- if (!done) {
- Expect(i::Token::COMMA, CHECK_OK);
- }
- }
- Expect(i::Token::RPAREN, CHECK_OK);
- Expect(i::Token::SEMICOLON, CHECK_OK);
- return Statement::Default();
-}
-
-
PreParser::Statement PreParser::ParseBlock(bool* ok) {
// Block ::
// '{' Statement* '}'
@@ -362,8 +343,9 @@
// Identifier ':' Statement
Expression expr = ParseExpression(true, CHECK_OK);
- if (peek() == i::Token::COLON && expr.IsRawIdentifier()) {
- if (!strict_mode() || !expr.AsIdentifier().IsFutureReserved()) {
+ if (expr.IsRawIdentifier()) {
+ if (peek() == i::Token::COLON &&
+ (!strict_mode() || !expr.AsIdentifier().IsFutureReserved())) {
Consume(i::Token::COLON);
i::Scanner::Location start_location = scanner_->peek_location();
Statement statement = ParseStatement(CHECK_OK);
@@ -375,6 +357,9 @@
}
return Statement::Default();
}
+ // Preparsing is disabled for extensions (because the extension details
+ // aren't passed to lazily compiled functions), so we don't
+ // accept "native function" in the preparser.
}
// Parsed expression statement.
ExpectSemicolon(CHECK_OK);
@@ -405,7 +390,7 @@
Expect(i::Token::CONTINUE, CHECK_OK);
i::Token::Value tok = peek();
- if (!scanner_->has_line_terminator_before_next() &&
+ if (!scanner_->HasAnyLineTerminatorBeforeNext() &&
tok != i::Token::SEMICOLON &&
tok != i::Token::RBRACE &&
tok != i::Token::EOS) {
@@ -422,7 +407,7 @@
Expect(i::Token::BREAK, CHECK_OK);
i::Token::Value tok = peek();
- if (!scanner_->has_line_terminator_before_next() &&
+ if (!scanner_->HasAnyLineTerminatorBeforeNext() &&
tok != i::Token::SEMICOLON &&
tok != i::Token::RBRACE &&
tok != i::Token::EOS) {
@@ -448,7 +433,7 @@
// This is not handled during preparsing.
i::Token::Value tok = peek();
- if (!scanner_->has_line_terminator_before_next() &&
+ if (!scanner_->HasAnyLineTerminatorBeforeNext() &&
tok != i::Token::SEMICOLON &&
tok != i::Token::RBRACE &&
tok != i::Token::EOS) {
@@ -599,7 +584,7 @@
// 'throw' [no line terminator] Expression ';'
Expect(i::Token::THROW, CHECK_OK);
- if (scanner_->has_line_terminator_before_next()) {
+ if (scanner_->HasAnyLineTerminatorBeforeNext()) {
i::JavaScriptScanner::Location pos = scanner_->location();
ReportMessageAt(pos.beg_pos, pos.end_pos,
"newline_after_throw", NULL);
@@ -822,7 +807,7 @@
i::Scanner::Location before = scanner_->peek_location();
Expression expression = ParseLeftHandSideExpression(CHECK_OK);
- if (!scanner_->has_line_terminator_before_next() &&
+ if (!scanner_->HasAnyLineTerminatorBeforeNext() &&
i::Token::IsCountOp(peek())) {
if (strict_mode() && expression.IsIdentifier() &&
expression.AsIdentifier().IsEvalOrArguments()) {
@@ -1001,7 +986,16 @@
break;
}
- case i::Token::FUTURE_RESERVED_WORD:
+ case i::Token::FUTURE_RESERVED_WORD: {
+ Next();
+ i::Scanner::Location location = scanner_->location();
+ ReportMessageAt(location.beg_pos, location.end_pos,
+ "reserved_word", NULL);
+ *ok = false;
+ return Expression::Default();
+ }
+
+ case i::Token::FUTURE_STRICT_RESERVED_WORD:
if (strict_mode()) {
Next();
i::Scanner::Location location = scanner_->location();
@@ -1100,15 +1094,17 @@
i::Token::Value next = peek();
switch (next) {
case i::Token::IDENTIFIER:
- case i::Token::FUTURE_RESERVED_WORD: {
+ case i::Token::FUTURE_RESERVED_WORD:
+ case i::Token::FUTURE_STRICT_RESERVED_WORD: {
bool is_getter = false;
bool is_setter = false;
- ParseIdentifierOrGetOrSet(&is_getter, &is_setter, CHECK_OK);
+ ParseIdentifierNameOrGetOrSet(&is_getter, &is_setter, CHECK_OK);
if ((is_getter || is_setter) && peek() != i::Token::COLON) {
i::Token::Value name = Next();
bool is_keyword = i::Token::IsKeyword(name);
if (name != i::Token::IDENTIFIER &&
name != i::Token::FUTURE_RESERVED_WORD &&
+ name != i::Token::FUTURE_STRICT_RESERVED_WORD &&
name != i::Token::NUMBER &&
name != i::Token::STRING &&
!is_keyword) {
@@ -1256,7 +1252,8 @@
int end_pos = scanner_->location().end_pos;
log_->LogFunction(function_block_pos, end_pos,
function_scope.materialized_literal_count(),
- function_scope.expected_properties());
+ function_scope.expected_properties(),
+ strict_mode() ? 1 : 0);
} else {
ParseSourceElements(i::Token::RBRACE, CHECK_OK);
Expect(i::Token::RBRACE, CHECK_OK);
@@ -1295,7 +1292,7 @@
Next();
return;
}
- if (scanner_->has_line_terminator_before_next() ||
+ if (scanner_->HasAnyLineTerminatorBeforeNext() ||
tok == i::Token::RBRACE ||
tok == i::Token::EOS) {
return;
@@ -1333,6 +1330,9 @@
LogSymbol();
if (scanner_->current_token() == i::Token::FUTURE_RESERVED_WORD) {
return Identifier::FutureReserved();
+ } else if (scanner_->current_token() ==
+ i::Token::FUTURE_STRICT_RESERVED_WORD) {
+ return Identifier::FutureStrictReserved();
}
if (scanner_->is_literal_ascii()) {
// Detect strict-mode poison words.
@@ -1350,11 +1350,22 @@
PreParser::Identifier PreParser::ParseIdentifier(bool* ok) {
- if (!Check(i::Token::FUTURE_RESERVED_WORD)) {
- Expect(i::Token::IDENTIFIER, ok);
- if (!*ok) return Identifier::Default();
+ i::Token::Value next = Next();
+ switch (next) {
+ case i::Token::FUTURE_RESERVED_WORD: {
+ i::Scanner::Location location = scanner_->location();
+ ReportMessageAt(location.beg_pos, location.end_pos,
+ "reserved_word", NULL);
+ *ok = false;
+ }
+ // FALLTHROUGH
+ case i::Token::FUTURE_STRICT_RESERVED_WORD:
+ case i::Token::IDENTIFIER:
+ return GetIdentifierSymbol();
+ default:
+ *ok = false;
+ return Identifier::Default();
}
- return GetIdentifierSymbol();
}
@@ -1394,6 +1405,8 @@
bool* ok) {
const char* type = eval_args_type;
if (identifier.IsFutureReserved()) {
+ type = "reserved_word";
+ } else if (identifier.IsFutureStrictReserved()) {
type = "strict_reserved_word";
}
if (strict_mode()) {
@@ -1416,7 +1429,8 @@
return Identifier::Default();
}
if (next == i::Token::IDENTIFIER ||
- next == i::Token::FUTURE_RESERVED_WORD) {
+ next == i::Token::FUTURE_RESERVED_WORD ||
+ next == i::Token::FUTURE_STRICT_RESERVED_WORD) {
return GetIdentifierSymbol();
}
*ok = false;
@@ -1428,10 +1442,10 @@
// This function reads an identifier and determines whether or not it
// is 'get' or 'set'.
-PreParser::Identifier PreParser::ParseIdentifierOrGetOrSet(bool* is_get,
- bool* is_set,
- bool* ok) {
- Identifier result = ParseIdentifier(ok);
+PreParser::Identifier PreParser::ParseIdentifierNameOrGetOrSet(bool* is_get,
+ bool* is_set,
+ bool* ok) {
+ Identifier result = ParseIdentifierName(ok);
if (!*ok) return Identifier::Default();
if (scanner_->is_literal_ascii() &&
scanner_->literal_length() == 3) {
@@ -1445,6 +1459,7 @@
bool PreParser::peek_any_identifier() {
i::Token::Value next = peek();
return next == i::Token::IDENTIFIER ||
- next == i::Token::FUTURE_RESERVED_WORD;
+ next == i::Token::FUTURE_RESERVED_WORD ||
+ next == i::Token::FUTURE_STRICT_RESERVED_WORD;
}
} } // v8::preparser
diff --git a/src/preparser.h b/src/preparser.h
index 2efd53e..3d72c97 100644
--- a/src/preparser.h
+++ b/src/preparser.h
@@ -93,15 +93,23 @@
static Identifier FutureReserved() {
return Identifier(kFutureReservedIdentifier);
}
+ static Identifier FutureStrictReserved() {
+ return Identifier(kFutureStrictReservedIdentifier);
+ }
bool IsEval() { return type_ == kEvalIdentifier; }
bool IsArguments() { return type_ == kArgumentsIdentifier; }
bool IsEvalOrArguments() { return type_ >= kEvalIdentifier; }
bool IsFutureReserved() { return type_ == kFutureReservedIdentifier; }
+ bool IsFutureStrictReserved() {
+ return type_ == kFutureStrictReservedIdentifier;
+ }
bool IsValidStrictVariable() { return type_ == kUnknownIdentifier; }
+
private:
enum Type {
kUnknownIdentifier,
kFutureReservedIdentifier,
+ kFutureStrictReservedIdentifier,
kEvalIdentifier,
kArgumentsIdentifier
};
@@ -372,7 +380,6 @@
SourceElements ParseSourceElements(int end_token, bool* ok);
Statement ParseStatement(bool* ok);
Statement ParseFunctionDeclaration(bool* ok);
- Statement ParseNativeDeclaration(bool* ok);
Statement ParseBlock(bool* ok);
Statement ParseVariableStatement(bool* ok);
Statement ParseVariableDeclarations(bool accept_IN, int* num_decl, bool* ok);
@@ -411,7 +418,9 @@
Identifier ParseIdentifier(bool* ok);
Identifier ParseIdentifierName(bool* ok);
- Identifier ParseIdentifierOrGetOrSet(bool* is_get, bool* is_set, bool* ok);
+ Identifier ParseIdentifierNameOrGetOrSet(bool* is_get,
+ bool* is_set,
+ bool* ok);
// Logs the currently parsed literal as a symbol in the preparser data.
void LogSymbol();
diff --git a/src/prettyprinter.cc b/src/prettyprinter.cc
index 60288a9..f18b320 100644
--- a/src/prettyprinter.cc
+++ b/src/prettyprinter.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -123,15 +123,16 @@
}
-void PrettyPrinter::VisitWithEnterStatement(WithEnterStatement* node) {
- Print("<enter with> (");
+void PrettyPrinter::VisitEnterWithContextStatement(
+ EnterWithContextStatement* node) {
+ Print("<enter with context> (");
Visit(node->expression());
Print(") ");
}
-void PrettyPrinter::VisitWithExitStatement(WithExitStatement* node) {
- Print("<exit with>");
+void PrettyPrinter::VisitExitContextStatement(ExitContextStatement* node) {
+ Print("<exit context>");
}
@@ -201,7 +202,8 @@
Print("try ");
Visit(node->try_block());
Print(" catch (");
- Visit(node->catch_var());
+ const bool quote = false;
+ PrintLiteral(node->variable()->name(), quote);
Print(") ");
Visit(node->catch_block());
}
@@ -282,15 +284,6 @@
}
-void PrettyPrinter::VisitCatchExtensionObject(CatchExtensionObject* node) {
- Print("{ ");
- Visit(node->key());
- Print(": ");
- Visit(node->value());
- Print(" }");
-}
-
-
void PrettyPrinter::VisitSlot(Slot* node) {
switch (node->type()) {
case Slot::PARAMETER:
@@ -805,13 +798,14 @@
}
-void AstPrinter::VisitWithEnterStatement(WithEnterStatement* node) {
- PrintIndentedVisit("WITH ENTER", node->expression());
+void AstPrinter::VisitEnterWithContextStatement(
+ EnterWithContextStatement* node) {
+ PrintIndentedVisit("ENTER WITH CONTEXT", node->expression());
}
-void AstPrinter::VisitWithExitStatement(WithExitStatement* node) {
- PrintIndented("WITH EXIT\n");
+void AstPrinter::VisitExitContextStatement(ExitContextStatement* node) {
+ PrintIndented("EXIT CONTEXT\n");
}
@@ -862,7 +856,9 @@
void AstPrinter::VisitTryCatchStatement(TryCatchStatement* node) {
IndentedScope indent(this, "TRY CATCH");
PrintIndentedVisit("TRY", node->try_block());
- PrintIndentedVisit("CATCHVAR", node->catch_var());
+ PrintLiteralWithModeIndented("CATCHVAR",
+ node->variable(),
+ node->variable()->name());
PrintIndentedVisit("CATCH", node->catch_block());
}
@@ -962,13 +958,6 @@
}
-void AstPrinter::VisitCatchExtensionObject(CatchExtensionObject* node) {
- IndentedScope indent(this, "CatchExtensionObject");
- PrintIndentedVisit("KEY", node->key());
- PrintIndentedVisit("VALUE", node->value());
-}
-
-
void AstPrinter::VisitSlot(Slot* node) {
PrintIndented("SLOT ");
PrettyPrinter::VisitSlot(node);
@@ -1205,14 +1194,15 @@
}
-void JsonAstBuilder::VisitWithEnterStatement(WithEnterStatement* stmt) {
- TagScope tag(this, "WithEnterStatement");
+void JsonAstBuilder::VisitEnterWithContextStatement(
+ EnterWithContextStatement* stmt) {
+ TagScope tag(this, "EnterWithContextStatement");
Visit(stmt->expression());
}
-void JsonAstBuilder::VisitWithExitStatement(WithExitStatement* stmt) {
- TagScope tag(this, "WithExitStatement");
+void JsonAstBuilder::VisitExitContextStatement(ExitContextStatement* stmt) {
+ TagScope tag(this, "ExitContextStatement");
}
@@ -1254,8 +1244,10 @@
void JsonAstBuilder::VisitTryCatchStatement(TryCatchStatement* stmt) {
TagScope tag(this, "TryCatchStatement");
+ { AttributesScope attributes(this);
+ AddAttribute("variable", stmt->variable()->name());
+ }
Visit(stmt->try_block());
- Visit(stmt->catch_var());
Visit(stmt->catch_block());
}
@@ -1360,13 +1352,6 @@
}
-void JsonAstBuilder::VisitCatchExtensionObject(CatchExtensionObject* expr) {
- TagScope tag(this, "CatchExtensionObject");
- Visit(expr->key());
- Visit(expr->value());
-}
-
-
void JsonAstBuilder::VisitAssignment(Assignment* expr) {
TagScope tag(this, "Assignment");
{
@@ -1386,10 +1371,6 @@
void JsonAstBuilder::VisitProperty(Property* expr) {
TagScope tag(this, "Property");
- {
- AttributesScope attributes(this);
- AddAttribute("type", expr->is_synthetic() ? "SYNTHETIC" : "NORMAL");
- }
Visit(expr->obj());
Visit(expr->key());
}
diff --git a/src/profile-generator-inl.h b/src/profile-generator-inl.h
index 747e5c7..8f4bc6c 100644
--- a/src/profile-generator-inl.h
+++ b/src/profile-generator-inl.h
@@ -28,8 +28,6 @@
#ifndef V8_PROFILE_GENERATOR_INL_H_
#define V8_PROFILE_GENERATOR_INL_H_
-#ifdef ENABLE_LOGGING_AND_PROFILING
-
#include "profile-generator.h"
namespace v8 {
@@ -123,6 +121,4 @@
} } // namespace v8::internal
-#endif // ENABLE_LOGGING_AND_PROFILING
-
#endif // V8_PROFILE_GENERATOR_INL_H_
diff --git a/src/profile-generator.cc b/src/profile-generator.cc
index c954c4f..07426f2 100644
--- a/src/profile-generator.cc
+++ b/src/profile-generator.cc
@@ -25,8 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#ifdef ENABLE_LOGGING_AND_PROFILING
-
#include "v8.h"
#include "profile-generator-inl.h"
@@ -1096,7 +1094,7 @@
: retained_size_(0) {
}
- int reained_size() const { return retained_size_; }
+ int retained_size() const { return retained_size_; }
void Apply(HeapEntry** entry_ptr) {
if ((*entry_ptr)->painted_reachable()) {
@@ -1137,7 +1135,7 @@
RetainedSizeCalculator ret_size_calc;
snapshot()->IterateEntries(&ret_size_calc);
- retained_size_ = ret_size_calc.reained_size();
+ retained_size_ = ret_size_calc.retained_size();
ASSERT((retained_size_ & kExactRetainedSizeTag) == 0);
retained_size_ |= kExactRetainedSizeTag;
}
@@ -1602,6 +1600,28 @@
}
+const char* HeapObjectsSet::GetTag(Object* obj) {
+ HeapObject* object = HeapObject::cast(obj);
+ HashMap::Entry* cache_entry =
+ entries_.Lookup(object, HeapEntriesMap::Hash(object), false);
+ if (cache_entry != NULL
+ && cache_entry->value != HeapEntriesMap::kHeapEntryPlaceholder) {
+ return reinterpret_cast<const char*>(cache_entry->value);
+ } else {
+ return NULL;
+ }
+}
+
+
+void HeapObjectsSet::SetTag(Object* obj, const char* tag) {
+ if (!obj->IsHeapObject()) return;
+ HeapObject* object = HeapObject::cast(obj);
+ HashMap::Entry* cache_entry =
+ entries_.Lookup(object, HeapEntriesMap::Hash(object), true);
+ cache_entry->value = const_cast<char*>(tag);
+}
+
+
HeapObject *const V8HeapExplorer::kInternalRootObject =
reinterpret_cast<HeapObject*>(
static_cast<intptr_t>(HeapObjectsMap::kInternalRootObjectId));
@@ -1613,7 +1633,8 @@
V8HeapExplorer::V8HeapExplorer(
HeapSnapshot* snapshot,
SnapshottingProgressReportingInterface* progress)
- : snapshot_(snapshot),
+ : heap_(Isolate::Current()->heap()),
+ snapshot_(snapshot),
collection_(snapshot_->collection()),
progress_(progress),
filler_(NULL) {
@@ -1639,6 +1660,18 @@
return snapshot_->AddRootEntry(children_count);
} else if (object == kGcRootsObject) {
return snapshot_->AddGcRootsEntry(children_count, retainers_count);
+ } else if (object->IsJSGlobalObject()) {
+ const char* tag = objects_tags_.GetTag(object);
+ const char* name = collection_->names()->GetName(
+ GetConstructorNameForHeapProfile(JSObject::cast(object)));
+ if (tag != NULL) {
+ name = collection_->names()->GetFormatted("%s / %s", name, tag);
+ }
+ return AddEntry(object,
+ HeapEntry::kObject,
+ name,
+ children_count,
+ retainers_count);
} else if (object->IsJSFunction()) {
JSFunction* func = JSFunction::cast(object);
SharedFunctionInfo* shared = func->shared();
@@ -1691,10 +1724,14 @@
: "",
children_count,
retainers_count);
- } else if (object->IsFixedArray() || object->IsByteArray()) {
+ } else if (object->IsFixedArray() ||
+ object->IsFixedDoubleArray() ||
+ object->IsByteArray() ||
+ object->IsExternalArray()) {
+ const char* tag = objects_tags_.GetTag(object);
return AddEntry(object,
HeapEntry::kArray,
- "",
+ tag != NULL ? tag : "",
children_count,
retainers_count);
} else if (object->IsHeapNumber()) {
@@ -1780,6 +1817,7 @@
ASSERT(Memory::Object_at(field)->IsHeapObject());
*field |= kFailureTag;
}
+
private:
bool CheckVisitedAndUnmark(Object** field) {
if ((*field)->IsFailure()) {
@@ -1801,15 +1839,13 @@
HeapEntry* entry = GetEntry(obj);
if (entry == NULL) return; // No interest in this object.
+ bool extract_indexed_refs = true;
if (obj->IsJSGlobalProxy()) {
// We need to reference JS global objects from snapshot's root.
// We use JSGlobalProxy because this is what embedder (e.g. browser)
// uses for the global object.
JSGlobalProxy* proxy = JSGlobalProxy::cast(obj);
SetRootShortcutReference(proxy->map()->prototype());
- SetInternalReference(obj, entry, "map", obj->map(), HeapObject::kMapOffset);
- IndexedReferencesExtractor refs_extractor(this, obj, entry);
- obj->Iterate(&refs_extractor);
} else if (obj->IsJSObject()) {
JSObject* js_obj = JSObject::cast(obj);
ExtractClosureReferences(js_obj, entry);
@@ -1817,7 +1853,7 @@
ExtractElementReferences(js_obj, entry);
ExtractInternalReferences(js_obj, entry);
SetPropertyReference(
- obj, entry, HEAP->Proto_symbol(), js_obj->GetPrototype());
+ obj, entry, heap_->Proto_symbol(), js_obj->GetPrototype());
if (obj->IsJSFunction()) {
JSFunction* js_fun = JSFunction::cast(js_obj);
Object* proto_or_map = js_fun->prototype_or_initial_map();
@@ -1825,39 +1861,49 @@
if (!proto_or_map->IsMap()) {
SetPropertyReference(
obj, entry,
- HEAP->prototype_symbol(), proto_or_map,
+ heap_->prototype_symbol(), proto_or_map,
JSFunction::kPrototypeOrInitialMapOffset);
} else {
SetPropertyReference(
obj, entry,
- HEAP->prototype_symbol(), js_fun->prototype());
+ heap_->prototype_symbol(), js_fun->prototype());
}
}
SetInternalReference(js_fun, entry,
"shared", js_fun->shared(),
JSFunction::kSharedFunctionInfoOffset);
+ TagObject(js_fun->unchecked_context(), "(context)");
SetInternalReference(js_fun, entry,
"context", js_fun->unchecked_context(),
JSFunction::kContextOffset);
+ TagObject(js_fun->literals(), "(function literals)");
SetInternalReference(js_fun, entry,
"literals", js_fun->literals(),
JSFunction::kLiteralsOffset);
}
+ TagObject(js_obj->properties(), "(object properties)");
SetInternalReference(obj, entry,
"properties", js_obj->properties(),
JSObject::kPropertiesOffset);
+ TagObject(js_obj->elements(), "(object elements)");
SetInternalReference(obj, entry,
"elements", js_obj->elements(),
JSObject::kElementsOffset);
- SetInternalReference(obj, entry, "map", obj->map(), HeapObject::kMapOffset);
- IndexedReferencesExtractor refs_extractor(this, obj, entry);
- obj->Iterate(&refs_extractor);
} else if (obj->IsString()) {
if (obj->IsConsString()) {
ConsString* cs = ConsString::cast(obj);
SetInternalReference(obj, entry, 1, cs->first());
SetInternalReference(obj, entry, 2, cs->second());
}
+ extract_indexed_refs = false;
+ } else if (obj->IsGlobalContext()) {
+ Context* context = Context::cast(obj);
+ TagObject(context->jsfunction_result_caches(),
+ "(context func. result caches)");
+ TagObject(context->normalized_map_cache(), "(context norm. map cache)");
+ TagObject(context->runtime_context(), "(runtime context)");
+ TagObject(context->map_cache(), "(context map cache)");
+ TagObject(context->data(), "(context data)");
} else if (obj->IsMap()) {
Map* map = Map::cast(obj);
SetInternalReference(obj, entry,
@@ -1866,6 +1912,7 @@
"constructor", map->constructor(),
Map::kConstructorOffset);
if (!map->instance_descriptors()->IsEmpty()) {
+ TagObject(map->instance_descriptors(), "(map descriptors)");
SetInternalReference(obj, entry,
"descriptors", map->instance_descriptors(),
Map::kInstanceDescriptorsOrBitField3Offset);
@@ -1873,9 +1920,6 @@
SetInternalReference(obj, entry,
"code_cache", map->code_cache(),
Map::kCodeCacheOffset);
- SetInternalReference(obj, entry, "map", obj->map(), HeapObject::kMapOffset);
- IndexedReferencesExtractor refs_extractor(this, obj, entry);
- obj->Iterate(&refs_extractor);
} else if (obj->IsSharedFunctionInfo()) {
SharedFunctionInfo* shared = SharedFunctionInfo::cast(obj);
SetInternalReference(obj, entry,
@@ -1884,16 +1928,61 @@
SetInternalReference(obj, entry,
"code", shared->unchecked_code(),
SharedFunctionInfo::kCodeOffset);
+ TagObject(shared->scope_info(), "(function scope info)");
+ SetInternalReference(obj, entry,
+ "scope_info", shared->scope_info(),
+ SharedFunctionInfo::kScopeInfoOffset);
SetInternalReference(obj, entry,
"instance_class_name", shared->instance_class_name(),
SharedFunctionInfo::kInstanceClassNameOffset);
SetInternalReference(obj, entry,
"script", shared->script(),
SharedFunctionInfo::kScriptOffset);
- SetInternalReference(obj, entry, "map", obj->map(), HeapObject::kMapOffset);
- IndexedReferencesExtractor refs_extractor(this, obj, entry);
- obj->Iterate(&refs_extractor);
- } else {
+ } else if (obj->IsScript()) {
+ Script* script = Script::cast(obj);
+ SetInternalReference(obj, entry,
+ "source", script->source(),
+ Script::kSourceOffset);
+ SetInternalReference(obj, entry,
+ "name", script->name(),
+ Script::kNameOffset);
+ SetInternalReference(obj, entry,
+ "data", script->data(),
+ Script::kDataOffset);
+ SetInternalReference(obj, entry,
+ "context_data", script->context_data(),
+ Script::kContextOffset);
+ TagObject(script->line_ends(), "(script line ends)");
+ SetInternalReference(obj, entry,
+ "line_ends", script->line_ends(),
+ Script::kLineEndsOffset);
+ } else if (obj->IsDescriptorArray()) {
+ DescriptorArray* desc_array = DescriptorArray::cast(obj);
+ if (desc_array->length() > DescriptorArray::kContentArrayIndex) {
+ Object* content_array =
+ desc_array->get(DescriptorArray::kContentArrayIndex);
+ TagObject(content_array, "(map descriptor content)");
+ SetInternalReference(obj, entry,
+ "content", content_array,
+ FixedArray::OffsetOfElementAt(
+ DescriptorArray::kContentArrayIndex));
+ }
+ } else if (obj->IsCodeCache()) {
+ CodeCache* code_cache = CodeCache::cast(obj);
+ TagObject(code_cache->default_cache(), "(default code cache)");
+ SetInternalReference(obj, entry,
+ "default_cache", code_cache->default_cache(),
+ CodeCache::kDefaultCacheOffset);
+ TagObject(code_cache->normal_type_cache(), "(code type cache)");
+ SetInternalReference(obj, entry,
+ "type_cache", code_cache->normal_type_cache(),
+ CodeCache::kNormalTypeCacheOffset);
+ } else if (obj->IsCode()) {
+ Code* code = Code::cast(obj);
+ TagObject(code->unchecked_relocation_info(), "(code relocation info)");
+ TagObject(code->unchecked_deoptimization_data(), "(code deopt data)");
+ }
+ if (extract_indexed_refs) {
SetInternalReference(obj, entry, "map", obj->map(), HeapObject::kMapOffset);
IndexedReferencesExtractor refs_extractor(this, obj, entry);
obj->Iterate(&refs_extractor);
@@ -2051,7 +2140,7 @@
}
SetRootGcRootsReference();
RootsReferencesExtractor extractor(this);
- HEAP->IterateRoots(&extractor, VISIT_ALL);
+ heap_->IterateRoots(&extractor, VISIT_ALL);
filler_ = NULL;
return progress_->ProgressReport(false);
}
@@ -2206,6 +2295,76 @@
}
+void V8HeapExplorer::TagObject(Object* obj, const char* tag) {
+ if (obj->IsHeapObject() &&
+ !obj->IsOddball() &&
+ obj != heap_->raw_unchecked_empty_byte_array() &&
+ obj != heap_->raw_unchecked_empty_fixed_array() &&
+ obj != heap_->raw_unchecked_empty_fixed_double_array() &&
+ obj != heap_->raw_unchecked_empty_descriptor_array()) {
+ objects_tags_.SetTag(obj, tag);
+ }
+}
+
+
+class GlobalObjectsEnumerator : public ObjectVisitor {
+ public:
+ virtual void VisitPointers(Object** start, Object** end) {
+ for (Object** p = start; p < end; p++) {
+ if ((*p)->IsGlobalContext()) {
+ Context* context = Context::cast(*p);
+ JSObject* proxy = context->global_proxy();
+ if (proxy->IsJSGlobalProxy()) {
+ Object* global = proxy->map()->prototype();
+ if (global->IsJSGlobalObject()) {
+ objects_.Add(Handle<JSGlobalObject>(JSGlobalObject::cast(global)));
+ }
+ }
+ }
+ }
+ }
+ int count() { return objects_.length(); }
+ Handle<JSGlobalObject>& at(int i) { return objects_[i]; }
+
+ private:
+ List<Handle<JSGlobalObject> > objects_;
+};
+
+
+// Modifies heap. Must not be run during heap traversal.
+void V8HeapExplorer::TagGlobalObjects() {
+ Isolate* isolate = Isolate::Current();
+ GlobalObjectsEnumerator enumerator;
+ isolate->global_handles()->IterateAllRoots(&enumerator);
+ Handle<String> document_string =
+ isolate->factory()->NewStringFromAscii(CStrVector("document"));
+ Handle<String> url_string =
+ isolate->factory()->NewStringFromAscii(CStrVector("URL"));
+ const char** urls = NewArray<const char*>(enumerator.count());
+ for (int i = 0, l = enumerator.count(); i < l; ++i) {
+ urls[i] = NULL;
+ Handle<JSGlobalObject> global_obj = enumerator.at(i);
+ Object* obj_document;
+ if (global_obj->GetProperty(*document_string)->ToObject(&obj_document) &&
+ obj_document->IsJSObject()) {
+ JSObject* document = JSObject::cast(obj_document);
+ Object* obj_url;
+ if (document->GetProperty(*url_string)->ToObject(&obj_url) &&
+ obj_url->IsString()) {
+ urls[i] = collection_->names()->GetName(String::cast(obj_url));
+ }
+ }
+ }
+
+ AssertNoAllocation no_allocation;
+ for (int i = 0, l = enumerator.count(); i < l; ++i) {
+ objects_tags_.SetTag(*enumerator.at(i), urls[i]);
+ }
+
+ DeleteArray(urls);
+}
+
+
class GlobalHandlesExtractor : public ObjectVisitor {
public:
explicit GlobalHandlesExtractor(NativeObjectsExplorer* explorer)
@@ -2448,6 +2607,7 @@
HeapEntry*) {
entries_->CountReference(parent_ptr, child_ptr);
}
+
private:
HeapEntriesMap* entries_;
};
@@ -2519,6 +2679,7 @@
child_entry,
retainer_index);
}
+
private:
HeapSnapshot* snapshot_;
HeapSnapshotsCollection* collection_;
@@ -2527,6 +2688,8 @@
bool HeapSnapshotGenerator::GenerateSnapshot() {
+ v8_heap_explorer_.TagGlobalObjects();
+
AssertNoAllocation no_alloc;
SetProgressTotal(4); // 2 passes + dominators + sizes.
@@ -3094,5 +3257,3 @@
}
} } // namespace v8::internal
-
-#endif // ENABLE_LOGGING_AND_PROFILING
diff --git a/src/profile-generator.h b/src/profile-generator.h
index 5b789ac..d1c2b38 100644
--- a/src/profile-generator.h
+++ b/src/profile-generator.h
@@ -28,8 +28,6 @@
#ifndef V8_PROFILE_GENERATOR_H_
#define V8_PROFILE_GENERATOR_H_
-#ifdef ENABLE_LOGGING_AND_PROFILING
-
#include "allocation.h"
#include "hashmap.h"
#include "../include/v8-profiler.h"
@@ -638,8 +636,7 @@
class HeapSnapshot {
public:
enum Type {
- kFull = v8::HeapSnapshot::kFull,
- kAggregated = v8::HeapSnapshot::kAggregated
+ kFull = v8::HeapSnapshot::kFull
};
HeapSnapshot(HeapSnapshotsCollection* collection,
@@ -859,6 +856,8 @@
void Clear();
bool Contains(Object* object);
void Insert(Object* obj);
+ const char* GetTag(Object* obj);
+ void SetTag(Object* obj, const char* tag);
private:
HashMap entries_;
@@ -920,6 +919,7 @@
void AddRootEntries(SnapshotFillerInterface* filler);
int EstimateObjectsCount();
bool IterateAndExtractReferences(SnapshotFillerInterface* filler);
+ void TagGlobalObjects();
static HeapObject* const kInternalRootObject;
@@ -971,13 +971,16 @@
void SetRootShortcutReference(Object* child);
void SetRootGcRootsReference();
void SetGcRootsReference(Object* child);
+ void TagObject(Object* obj, const char* tag);
HeapEntry* GetEntry(Object* obj);
+ Heap* heap_;
HeapSnapshot* snapshot_;
HeapSnapshotsCollection* collection_;
SnapshottingProgressReportingInterface* progress_;
SnapshotFillerInterface* filler_;
+ HeapObjectsSet objects_tags_;
static HeapObject* const kGcRootsObject;
@@ -1121,6 +1124,4 @@
} } // namespace v8::internal
-#endif // ENABLE_LOGGING_AND_PROFILING
-
#endif // V8_PROFILE_GENERATOR_H_
diff --git a/src/property.h b/src/property.h
index 87f9ea3..ddecc92 100644
--- a/src/property.h
+++ b/src/property.h
@@ -206,6 +206,7 @@
lookup_type_ = HANDLER_TYPE;
holder_ = NULL;
details_ = PropertyDetails(NONE, HANDLER);
+ cacheable_ = false;
}
void InterceptorResult(JSObject* holder) {
diff --git a/src/proxy.js b/src/proxy.js
index c11852b..27524bd 100644
--- a/src/proxy.js
+++ b/src/proxy.js
@@ -60,7 +60,9 @@
}
$Proxy.create = function(handler, proto) {
- if (!IS_SPEC_OBJECT(proto)) proto = $Object.prototype
+ if (!IS_SPEC_OBJECT(handler))
+ throw MakeTypeError("handler_non_object", ["create"])
+ if (!IS_SPEC_OBJECT(proto)) proto = null // Mozilla does this...
return %CreateJSProxy(handler, proto)
}
@@ -73,11 +75,75 @@
function DerivedGetTrap(receiver, name) {
var desc = this.getPropertyDescriptor(name)
- if (IS_UNDEFINED(desc)) { return desc; }
+ if (IS_UNDEFINED(desc)) { return desc }
if ('value' in desc) {
return desc.value
} else {
- if (IS_UNDEFINED(desc.get)) { return desc.get; }
- return desc.get.call(receiver) // The proposal says so...
+ if (IS_UNDEFINED(desc.get)) { return desc.get }
+ // The proposal says: desc.get.call(receiver)
+ return %_CallFunction(receiver, desc.get)
}
}
+
+function DerivedSetTrap(receiver, name, val) {
+ var desc = this.getOwnPropertyDescriptor(name)
+ if (desc) {
+ if ('writable' in desc) {
+ if (desc.writable) {
+ desc.value = val
+ this.defineProperty(name, desc)
+ return true
+ } else {
+ return false
+ }
+ } else { // accessor
+ if (desc.set) {
+ // The proposal says: desc.set.call(receiver, val)
+ %_CallFunction(receiver, val, desc.set)
+ return true
+ } else {
+ return false
+ }
+ }
+ }
+ desc = this.getPropertyDescriptor(name)
+ if (desc) {
+ if ('writable' in desc) {
+ if (desc.writable) {
+ // fall through
+ } else {
+ return false
+ }
+ } else { // accessor
+ if (desc.set) {
+ // The proposal says: desc.set.call(receiver, val)
+ %_CallFunction(receiver, val, desc.set)
+ return true
+ } else {
+ return false
+ }
+ }
+ }
+ this.defineProperty(name, {
+ value: val,
+ writable: true,
+ enumerable: true,
+ configurable: true});
+ return true;
+}
+
+function DerivedHasTrap(name) {
+ return !!this.getPropertyDescriptor(name)
+}
+
+function DerivedKeysTrap() {
+ var names = this.getOwnPropertyNames()
+ var enumerableNames = []
+ for (var i = 0, count = 0; i < names.length; ++i) {
+ var name = names[i]
+ if (this.getOwnPropertyDescriptor(TO_STRING_INLINE(name)).enumerable) {
+ enumerableNames[count++] = names[i]
+ }
+ }
+ return enumerableNames
+}
diff --git a/src/rewriter.cc b/src/rewriter.cc
index efe8044..e8ca5b9 100644
--- a/src/rewriter.cc
+++ b/src/rewriter.cc
@@ -66,9 +66,13 @@
Expression* SetResult(Expression* value) {
result_assigned_ = true;
- VariableProxy* result_proxy = new VariableProxy(result_);
- return new Assignment(Token::ASSIGN, result_proxy, value,
- RelocInfo::kNoPosition);
+ Zone* zone = isolate()->zone();
+ VariableProxy* result_proxy = new(zone) VariableProxy(isolate(), result_);
+ return new(zone) Assignment(isolate(),
+ Token::ASSIGN,
+ result_proxy,
+ value,
+ RelocInfo::kNoPosition);
}
// Node visitors.
@@ -197,137 +201,18 @@
void Processor::VisitDeclaration(Declaration* node) {}
void Processor::VisitEmptyStatement(EmptyStatement* node) {}
void Processor::VisitReturnStatement(ReturnStatement* node) {}
-void Processor::VisitWithEnterStatement(WithEnterStatement* node) {}
-void Processor::VisitWithExitStatement(WithExitStatement* node) {}
+void Processor::VisitEnterWithContextStatement(
+ EnterWithContextStatement* node) {
+}
+void Processor::VisitExitContextStatement(ExitContextStatement* node) {}
void Processor::VisitDebuggerStatement(DebuggerStatement* node) {}
// Expressions are never visited yet.
-void Processor::VisitFunctionLiteral(FunctionLiteral* node) {
- USE(node);
- UNREACHABLE();
-}
-
-
-void Processor::VisitSharedFunctionInfoLiteral(
- SharedFunctionInfoLiteral* node) {
- USE(node);
- UNREACHABLE();
-}
-
-
-void Processor::VisitConditional(Conditional* node) {
- USE(node);
- UNREACHABLE();
-}
-
-
-void Processor::VisitVariableProxy(VariableProxy* node) {
- USE(node);
- UNREACHABLE();
-}
-
-
-void Processor::VisitLiteral(Literal* node) {
- USE(node);
- UNREACHABLE();
-}
-
-
-void Processor::VisitRegExpLiteral(RegExpLiteral* node) {
- USE(node);
- UNREACHABLE();
-}
-
-
-void Processor::VisitArrayLiteral(ArrayLiteral* node) {
- USE(node);
- UNREACHABLE();
-}
-
-
-void Processor::VisitObjectLiteral(ObjectLiteral* node) {
- USE(node);
- UNREACHABLE();
-}
-
-
-void Processor::VisitCatchExtensionObject(CatchExtensionObject* node) {
- USE(node);
- UNREACHABLE();
-}
-
-
-void Processor::VisitAssignment(Assignment* node) {
- USE(node);
- UNREACHABLE();
-}
-
-
-void Processor::VisitThrow(Throw* node) {
- USE(node);
- UNREACHABLE();
-}
-
-
-void Processor::VisitProperty(Property* node) {
- USE(node);
- UNREACHABLE();
-}
-
-
-void Processor::VisitCall(Call* node) {
- USE(node);
- UNREACHABLE();
-}
-
-
-void Processor::VisitCallNew(CallNew* node) {
- USE(node);
- UNREACHABLE();
-}
-
-
-void Processor::VisitCallRuntime(CallRuntime* node) {
- USE(node);
- UNREACHABLE();
-}
-
-
-void Processor::VisitUnaryOperation(UnaryOperation* node) {
- USE(node);
- UNREACHABLE();
-}
-
-
-void Processor::VisitCountOperation(CountOperation* node) {
- USE(node);
- UNREACHABLE();
-}
-
-
-void Processor::VisitBinaryOperation(BinaryOperation* node) {
- USE(node);
- UNREACHABLE();
-}
-
-
-void Processor::VisitCompareOperation(CompareOperation* node) {
- USE(node);
- UNREACHABLE();
-}
-
-
-void Processor::VisitCompareToNull(CompareToNull* node) {
- USE(node);
- UNREACHABLE();
-}
-
-
-void Processor::VisitThisFunction(ThisFunction* node) {
- USE(node);
- UNREACHABLE();
-}
+#define DEF_VISIT(type) \
+ void Processor::Visit##type(type* expr) { UNREACHABLE(); }
+EXPRESSION_NODE_LIST(DEF_VISIT)
+#undef DEF_VISIT
// Assumes code has been parsed and scopes have been analyzed. Mutates the
@@ -337,7 +222,7 @@
ASSERT(function != NULL);
Scope* scope = function->scope();
ASSERT(scope != NULL);
- if (scope->is_function_scope()) return true;
+ if (!scope->is_global_scope() && !scope->is_eval_scope()) return true;
ZoneList<Statement*>* body = function->body();
if (!body->is_empty()) {
@@ -348,8 +233,10 @@
if (processor.HasStackOverflow()) return false;
if (processor.result_assigned()) {
- VariableProxy* result_proxy = new VariableProxy(result);
- body->Add(new ReturnStatement(result_proxy));
+ Isolate* isolate = info->isolate();
+ Zone* zone = isolate->zone();
+ VariableProxy* result_proxy = new(zone) VariableProxy(isolate, result);
+ body->Add(new(zone) ReturnStatement(result_proxy));
}
}
diff --git a/src/runtime-profiler.cc b/src/runtime-profiler.cc
index ce9a308..917f6d0 100644
--- a/src/runtime-profiler.cc
+++ b/src/runtime-profiler.cc
@@ -43,32 +43,6 @@
namespace internal {
-class PendingListNode : public Malloced {
- public:
- explicit PendingListNode(JSFunction* function);
- ~PendingListNode() { Destroy(); }
-
- PendingListNode* next() const { return next_; }
- void set_next(PendingListNode* node) { next_ = node; }
- Handle<JSFunction> function() { return Handle<JSFunction>::cast(function_); }
-
- // If the function is garbage collected before we've had the chance
- // to optimize it the weak handle will be null.
- bool IsValid() { return !function_.is_null(); }
-
- // Returns the number of microseconds this node has been pending.
- int Delay() const { return static_cast<int>(OS::Ticks() - start_); }
-
- private:
- void Destroy();
- static void WeakCallback(v8::Persistent<v8::Value> object, void* data);
-
- PendingListNode* next_;
- Handle<Object> function_; // Weak handle.
- int64_t start_;
-};
-
-
// Optimization sampler constants.
static const int kSamplerFrameCount = 2;
static const int kSamplerFrameWeight[kSamplerFrameCount] = { 2, 1 };
@@ -80,39 +54,14 @@
static const int kSamplerThresholdDelta = 1;
static const int kSamplerThresholdSizeFactorInit = 3;
-static const int kSamplerThresholdSizeFactorMin = 1;
-static const int kSamplerThresholdSizeFactorDelta = 1;
static const int kSizeLimit = 1500;
-PendingListNode::PendingListNode(JSFunction* function) : next_(NULL) {
- GlobalHandles* global_handles = Isolate::Current()->global_handles();
- function_ = global_handles->Create(function);
- start_ = OS::Ticks();
- global_handles->MakeWeak(function_.location(), this, &WeakCallback);
-}
-
-
-void PendingListNode::Destroy() {
- if (!IsValid()) return;
- GlobalHandles* global_handles = Isolate::Current()->global_handles();
- global_handles->Destroy(function_.location());
- function_= Handle<Object>::null();
-}
-
-
-void PendingListNode::WeakCallback(v8::Persistent<v8::Value>, void* data) {
- reinterpret_cast<PendingListNode*>(data)->Destroy();
-}
-
-
Atomic32 RuntimeProfiler::state_ = 0;
// TODO(isolates): Create the semaphore lazily and clean it up when no
// longer required.
-#ifdef ENABLE_LOGGING_AND_PROFILING
Semaphore* RuntimeProfiler::semaphore_ = OS::CreateSemaphore(0);
-#endif
#ifdef DEBUG
bool RuntimeProfiler::has_been_globally_setup_ = false;
@@ -125,16 +74,8 @@
sampler_threshold_(kSamplerThresholdInit),
sampler_threshold_size_factor_(kSamplerThresholdSizeFactorInit),
sampler_ticks_until_threshold_adjustment_(
- kSamplerTicksBetweenThresholdAdjustment),
- js_ratio_(0),
- sampler_window_position_(0),
- optimize_soon_list_(NULL),
- state_window_position_(0),
- state_window_ticks_(0) {
- state_counts_[IN_NON_JS_STATE] = kStateWindowSize;
- state_counts_[IN_JS_STATE] = 0;
- STATIC_ASSERT(IN_NON_JS_STATE == 0);
- memset(state_window_, 0, sizeof(state_window_));
+ kSamplerTicksBetweenThresholdAdjustment),
+ sampler_window_position_(0) {
ClearSampleBuffer();
}
@@ -148,16 +89,13 @@
}
-void RuntimeProfiler::Optimize(JSFunction* function, bool eager, int delay) {
+void RuntimeProfiler::Optimize(JSFunction* function) {
ASSERT(function->IsOptimizable());
if (FLAG_trace_opt) {
- PrintF("[marking (%s) ", eager ? "eagerly" : "lazily");
+ PrintF("[marking ");
function->PrintName();
PrintF(" 0x%" V8PRIxPTR, reinterpret_cast<intptr_t>(function->address()));
PrintF(" for recompilation");
- if (delay > 0) {
- PrintF(" (delayed %0.3f ms)", static_cast<double>(delay) / 1000);
- }
PrintF("]\n");
}
@@ -185,7 +123,7 @@
// We are not prepared to do OSR for a function that already has an
// allocated arguments object. The optimized code would bypass it for
// arguments accesses, which is unsound. Don't try OSR.
- if (shared->scope_info()->HasArgumentsShadow()) return;
+ if (shared->uses_arguments()) return;
// We're using on-stack replacement: patch the unoptimized code so that
// any back edge in any unoptimized frame will trigger on-stack
@@ -243,20 +181,6 @@
void RuntimeProfiler::OptimizeNow() {
HandleScope scope(isolate_);
- PendingListNode* current = optimize_soon_list_;
- while (current != NULL) {
- PendingListNode* next = current->next();
- if (current->IsValid()) {
- Handle<JSFunction> function = current->function();
- int delay = current->Delay();
- if (function->IsOptimizable()) {
- Optimize(*function, true, delay);
- }
- }
- delete current;
- current = next;
- }
- optimize_soon_list_ = NULL;
// Run through the JavaScript frames and collect them. If we already
// have a sample of the function, we mark it for optimizations
@@ -303,24 +227,9 @@
: 1;
int threshold = sampler_threshold_ * threshold_size_factor;
- int current_js_ratio = NoBarrier_Load(&js_ratio_);
-
- // Adjust threshold depending on the ratio of time spent
- // in JS code.
- if (current_js_ratio < 20) {
- // If we spend less than 20% of the time in JS code,
- // do not optimize.
- continue;
- } else if (current_js_ratio < 75) {
- // Below 75% of time spent in JS code, only optimize very
- // frequently used functions.
- threshold *= 3;
- }
if (LookupSample(function) >= threshold) {
- Optimize(function, false, 0);
- isolate_->compilation_cache()->MarkForEagerOptimizing(
- Handle<JSFunction>(function));
+ Optimize(function);
}
}
@@ -333,42 +242,8 @@
}
-void RuntimeProfiler::OptimizeSoon(JSFunction* function) {
- if (!function->IsOptimizable()) return;
- PendingListNode* node = new PendingListNode(function);
- node->set_next(optimize_soon_list_);
- optimize_soon_list_ = node;
-}
-
-
-#ifdef ENABLE_LOGGING_AND_PROFILING
-void RuntimeProfiler::UpdateStateRatio(SamplerState current_state) {
- SamplerState old_state = state_window_[state_window_position_];
- state_counts_[old_state]--;
- state_window_[state_window_position_] = current_state;
- state_counts_[current_state]++;
- ASSERT(IsPowerOf2(kStateWindowSize));
- state_window_position_ = (state_window_position_ + 1) &
- (kStateWindowSize - 1);
- // Note: to calculate correct ratio we have to track how many valid
- // ticks are actually in the state window, because on profiler
- // startup this number can be less than the window size.
- state_window_ticks_ = Min(kStateWindowSize, state_window_ticks_ + 1);
- NoBarrier_Store(&js_ratio_, state_counts_[IN_JS_STATE] * 100 /
- state_window_ticks_);
-}
-#endif
-
-
void RuntimeProfiler::NotifyTick() {
-#ifdef ENABLE_LOGGING_AND_PROFILING
- // Record state sample.
- SamplerState state = IsSomeIsolateInJS()
- ? IN_JS_STATE
- : IN_NON_JS_STATE;
- UpdateStateRatio(state);
isolate_->stack_guard()->RequestRuntimeProfilerTick();
-#endif
}
@@ -416,7 +291,6 @@
void RuntimeProfiler::HandleWakeUp(Isolate* isolate) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
// The profiler thread must still be waiting.
ASSERT(NoBarrier_Load(&state_) >= 0);
// In IsolateEnteredJS we have already incremented the counter and
@@ -424,8 +298,6 @@
// to get the right count of active isolates.
NoBarrier_AtomicIncrement(&state_, 1);
semaphore_->Signal();
- isolate->ResetEagerOptimizingData();
-#endif
}
@@ -435,20 +307,33 @@
bool RuntimeProfiler::WaitForSomeIsolateToEnterJS() {
-#ifdef ENABLE_LOGGING_AND_PROFILING
Atomic32 old_state = NoBarrier_CompareAndSwap(&state_, 0, -1);
ASSERT(old_state >= -1);
if (old_state != 0) return false;
semaphore_->Wait();
-#endif
return true;
}
-void RuntimeProfiler::WakeUpRuntimeProfilerThreadBeforeShutdown() {
-#ifdef ENABLE_LOGGING_AND_PROFILING
- semaphore_->Signal();
-#endif
+void RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(Thread* thread) {
+ // Do a fake increment. If the profiler is waiting on the semaphore,
+ // the returned state is 0, which can be left as an initial state in
+ // case profiling is restarted later. If the profiler is not
+ // waiting, the increment will prevent it from waiting, but has to
+ // be undone after the profiler is stopped.
+ Atomic32 new_state = NoBarrier_AtomicIncrement(&state_, 1);
+ ASSERT(new_state >= 0);
+ if (new_state == 0) {
+ // The profiler thread is waiting. Wake it up. It must check for
+ // stop conditions before attempting to wait again.
+ semaphore_->Signal();
+ }
+ thread->Join();
+ // The profiler thread is now stopped. Undo the increment in case it
+ // was not waiting.
+ if (new_state != 0) {
+ NoBarrier_AtomicIncrement(&state_, -1);
+ }
}
@@ -470,18 +355,9 @@
bool RuntimeProfilerRateLimiter::SuspendIfNecessary() {
-#ifdef ENABLE_LOGGING_AND_PROFILING
- static const int kNonJSTicksThreshold = 100;
- if (RuntimeProfiler::IsSomeIsolateInJS()) {
- non_js_ticks_ = 0;
- } else {
- if (non_js_ticks_ < kNonJSTicksThreshold) {
- ++non_js_ticks_;
- } else {
- return RuntimeProfiler::WaitForSomeIsolateToEnterJS();
- }
+ if (!RuntimeProfiler::IsSomeIsolateInJS()) {
+ return RuntimeProfiler::WaitForSomeIsolateToEnterJS();
}
-#endif
return false;
}
diff --git a/src/runtime-profiler.h b/src/runtime-profiler.h
index 692b4ff..3f3ab07 100644
--- a/src/runtime-profiler.h
+++ b/src/runtime-profiler.h
@@ -37,7 +37,6 @@
class Isolate;
class JSFunction;
class Object;
-class PendingListNode;
class Semaphore;
class RuntimeProfiler {
@@ -52,7 +51,6 @@
}
void OptimizeNow();
- void OptimizeSoon(JSFunction* function);
void NotifyTick();
@@ -86,10 +84,9 @@
static bool IsSomeIsolateInJS();
static bool WaitForSomeIsolateToEnterJS();
- // When shutting down we join the profiler thread. Doing so while
- // it's waiting on a semaphore will cause a deadlock, so we have to
- // wake it up first.
- static void WakeUpRuntimeProfilerThreadBeforeShutdown();
+ // Stops the runtime profiler thread when profiling support is being
+ // turned off.
+ static void StopRuntimeProfilerThreadBeforeShutdown(Thread* thread);
void UpdateSamplesAfterScavenge();
void RemoveDeadSamples();
@@ -106,7 +103,7 @@
static void HandleWakeUp(Isolate* isolate);
- void Optimize(JSFunction* function, bool eager, int delay);
+ void Optimize(JSFunction* function);
void AttemptOnStackReplacement(JSFunction* function);
@@ -118,31 +115,16 @@
void AddSample(JSFunction* function, int weight);
-#ifdef ENABLE_LOGGING_AND_PROFILING
- void UpdateStateRatio(SamplerState current_state);
-#endif
-
Isolate* isolate_;
int sampler_threshold_;
int sampler_threshold_size_factor_;
int sampler_ticks_until_threshold_adjustment_;
- // The ratio of ticks spent in JS code in percent.
- Atomic32 js_ratio_;
-
Object* sampler_window_[kSamplerWindowSize];
int sampler_window_position_;
int sampler_window_weight_[kSamplerWindowSize];
- // Support for pending 'optimize soon' requests.
- PendingListNode* optimize_soon_list_;
-
- SamplerState state_window_[kStateWindowSize];
- int state_window_position_;
- int state_window_ticks_;
- int state_counts_[2];
-
// Possible state values:
// -1 => the profiler thread is waiting on the semaphore
// 0 or positive => the number of isolates running JavaScript code.
@@ -159,7 +141,7 @@
// Rate limiter intended to be used in the profiler thread.
class RuntimeProfilerRateLimiter BASE_EMBEDDED {
public:
- RuntimeProfilerRateLimiter() : non_js_ticks_(0) { }
+ RuntimeProfilerRateLimiter() {}
// Suspends the current thread (which must be the profiler thread)
// when not executing JavaScript to minimize CPU usage. Returns
@@ -170,8 +152,6 @@
bool SuspendIfNecessary();
private:
- int non_js_ticks_;
-
DISALLOW_COPY_AND_ASSIGN(RuntimeProfilerRateLimiter);
};
diff --git a/src/runtime.cc b/src/runtime.cc
index 77b3f66..8f14565 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -45,6 +45,7 @@
#include "json-parser.h"
#include "liveedit.h"
#include "liveobjectlist-inl.h"
+#include "misc-intrinsics.h"
#include "parser.h"
#include "platform.h"
#include "runtime-profiler.h"
@@ -81,19 +82,19 @@
RUNTIME_ASSERT(obj->IsBoolean()); \
bool name = (obj)->IsTrue();
-// Cast the given object to a Smi and store its value in an int variable
-// with the given name. If the object is not a Smi call IllegalOperation
+// Cast the given argument to a Smi and store its value in an int variable
+// with the given name. If the argument is not a Smi call IllegalOperation
// and return.
-#define CONVERT_SMI_CHECKED(name, obj) \
- RUNTIME_ASSERT(obj->IsSmi()); \
- int name = Smi::cast(obj)->value();
+#define CONVERT_SMI_ARG_CHECKED(name, index) \
+ RUNTIME_ASSERT(args[index]->IsSmi()); \
+ int name = args.smi_at(index);
-// Cast the given object to a double and store it in a variable with
-// the given name. If the object is not a number (as opposed to
+// Cast the given argument to a double and store it in a variable with
+// the given name. If the argument is not a number (as opposed to
// the number not-a-number) call IllegalOperation and return.
-#define CONVERT_DOUBLE_CHECKED(name, obj) \
- RUNTIME_ASSERT(obj->IsNumber()); \
- double name = (obj)->Number();
+#define CONVERT_DOUBLE_ARG_CHECKED(name, index) \
+ RUNTIME_ASSERT(args[index]->IsNumber()); \
+ double name = args.number_at(index);
// Call the specified converter on the object *comand store the result in
// a variable of the specified type with the given name. If the
@@ -482,7 +483,7 @@
HandleScope scope(isolate);
ASSERT(args.length() == 3);
CONVERT_ARG_CHECKED(FixedArray, literals, 0);
- CONVERT_SMI_CHECKED(literals_index, args[1]);
+ CONVERT_SMI_ARG_CHECKED(literals_index, 1);
CONVERT_ARG_CHECKED(FixedArray, elements, 2);
Handle<Object> object =
@@ -499,9 +500,9 @@
HandleScope scope(isolate);
ASSERT(args.length() == 4);
CONVERT_ARG_CHECKED(FixedArray, literals, 0);
- CONVERT_SMI_CHECKED(literals_index, args[1]);
+ CONVERT_SMI_ARG_CHECKED(literals_index, 1);
CONVERT_ARG_CHECKED(FixedArray, constant_properties, 2);
- CONVERT_SMI_CHECKED(flags, args[3]);
+ CONVERT_SMI_ARG_CHECKED(flags, 3);
bool should_have_fast_elements = (flags & ObjectLiteral::kFastElements) != 0;
bool has_function_literal = (flags & ObjectLiteral::kHasFunction) != 0;
@@ -525,9 +526,9 @@
HandleScope scope(isolate);
ASSERT(args.length() == 4);
CONVERT_ARG_CHECKED(FixedArray, literals, 0);
- CONVERT_SMI_CHECKED(literals_index, args[1]);
+ CONVERT_SMI_ARG_CHECKED(literals_index, 1);
CONVERT_ARG_CHECKED(FixedArray, constant_properties, 2);
- CONVERT_SMI_CHECKED(flags, args[3]);
+ CONVERT_SMI_ARG_CHECKED(flags, 3);
bool should_have_fast_elements = (flags & ObjectLiteral::kFastElements) != 0;
bool has_function_literal = (flags & ObjectLiteral::kHasFunction) != 0;
@@ -551,7 +552,7 @@
HandleScope scope(isolate);
ASSERT(args.length() == 3);
CONVERT_ARG_CHECKED(FixedArray, literals, 0);
- CONVERT_SMI_CHECKED(literals_index, args[1]);
+ CONVERT_SMI_ARG_CHECKED(literals_index, 1);
CONVERT_ARG_CHECKED(FixedArray, elements, 2);
// Check if boilerplate exists. If not, create it first.
@@ -570,7 +571,7 @@
HandleScope scope(isolate);
ASSERT(args.length() == 3);
CONVERT_ARG_CHECKED(FixedArray, literals, 0);
- CONVERT_SMI_CHECKED(literals_index, args[1]);
+ CONVERT_SMI_ARG_CHECKED(literals_index, 1);
CONVERT_ARG_CHECKED(FixedArray, elements, 2);
// Check if boilerplate exists. If not, create it first.
@@ -594,34 +595,30 @@
Object* handler = args[0];
Object* prototype = args[1];
Object* used_prototype =
- (prototype->IsJSObject() || prototype->IsJSProxy()) ? prototype
- : isolate->heap()->null_value();
+ prototype->IsJSReceiver() ? prototype : isolate->heap()->null_value();
return isolate->heap()->AllocateJSProxy(handler, used_prototype);
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateCatchExtensionObject) {
- ASSERT(args.length() == 2);
- CONVERT_CHECKED(String, key, args[0]);
- Object* value = args[1];
- ASSERT(!value->IsFailure());
- // Create a catch context extension object.
- JSFunction* constructor =
- isolate->context()->global_context()->
- context_extension_function();
- Object* object;
- { MaybeObject* maybe_object = isolate->heap()->AllocateJSObject(constructor);
- if (!maybe_object->ToObject(&object)) return maybe_object;
- }
- // Assign the exception value to the catch variable and make sure
- // that the catch variable is DontDelete.
- { MaybeObject* maybe_value =
- // Passing non-strict per ECMA-262 5th Ed. 12.14. Catch, bullet #4.
- JSObject::cast(object)->SetProperty(
- key, value, DONT_DELETE, kNonStrictMode);
- if (!maybe_value->ToObject(&value)) return maybe_value;
- }
- return object;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_IsJSProxy) {
+ ASSERT(args.length() == 1);
+ Object* obj = args[0];
+ return isolate->heap()->ToBoolean(obj->IsJSProxy());
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetHandler) {
+ ASSERT(args.length() == 1);
+ CONVERT_CHECKED(JSProxy, proxy, args[0]);
+ return proxy->handler();
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_Fix) {
+ ASSERT(args.length() == 1);
+ CONVERT_CHECKED(JSProxy, proxy, args[0]);
+ proxy->Fix();
+ return proxy;
}
@@ -634,6 +631,28 @@
}
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetPrototype) {
+ NoHandleAllocation ha;
+ ASSERT(args.length() == 1);
+ CONVERT_CHECKED(JSReceiver, input_obj, args[0]);
+ Object* obj = input_obj;
+ // We don't expect access checks to be needed on JSProxy objects.
+ ASSERT(!obj->IsAccessCheckNeeded() || obj->IsJSObject());
+ do {
+ if (obj->IsAccessCheckNeeded() &&
+ !isolate->MayNamedAccess(JSObject::cast(obj),
+ isolate->heap()->Proto_symbol(),
+ v8::ACCESS_GET)) {
+ isolate->ReportFailedAccessCheck(JSObject::cast(obj), v8::ACCESS_GET);
+ return isolate->heap()->undefined_value();
+ }
+ obj = obj->GetPrototype();
+ } while (obj->IsJSObject() &&
+ JSObject::cast(obj)->map()->is_hidden_prototype());
+ return obj;
+}
+
+
RUNTIME_FUNCTION(MaybeObject*, Runtime_IsInPrototypeChain) {
NoHandleAllocation ha;
ASSERT(args.length() == 2);
@@ -874,7 +893,13 @@
ASSERT(proto->IsJSGlobalObject());
holder = Handle<JSObject>(JSObject::cast(proto));
}
- NumberDictionary* dictionary = holder->element_dictionary();
+ FixedArray* elements = FixedArray::cast(holder->elements());
+ NumberDictionary* dictionary = NULL;
+ if (elements->map() == heap->non_strict_arguments_elements_map()) {
+ dictionary = NumberDictionary::cast(elements->get(1));
+ } else {
+ dictionary = NumberDictionary::cast(elements);
+ }
int entry = dictionary->FindEntry(index);
ASSERT(entry != NumberDictionary::kNotFound);
PropertyDetails details = dictionary->DetailsAt(entry);
@@ -973,8 +998,7 @@
ASSERT(proto->IsJSGlobalObject());
obj = JSObject::cast(proto);
}
- return obj->map()->is_extensible() ? isolate->heap()->true_value()
- : isolate->heap()->false_value();
+ return isolate->heap()->ToBoolean(obj->map()->is_extensible());
}
@@ -1040,8 +1064,7 @@
Map::cast(new_map)->set_is_access_check_needed(false);
object->set_map(Map::cast(new_map));
}
- return needs_access_checks ? isolate->heap()->true_value()
- : isolate->heap()->false_value();
+ return isolate->heap()->ToBoolean(needs_access_checks);
}
@@ -1084,9 +1107,8 @@
Handle<Context> context = args.at<Context>(0);
CONVERT_ARG_CHECKED(FixedArray, pairs, 1);
- bool is_eval = Smi::cast(args[2])->value() == 1;
- StrictModeFlag strict_mode =
- static_cast<StrictModeFlag>(Smi::cast(args[3])->value());
+ bool is_eval = args.smi_at(2) == 1;
+ StrictModeFlag strict_mode = static_cast<StrictModeFlag>(args.smi_at(3));
ASSERT(strict_mode == kStrictMode || strict_mode == kNonStrictMode);
// Compute the property attributes. According to ECMA-262, section
@@ -1225,13 +1247,12 @@
CONVERT_ARG_CHECKED(Context, context, 0);
Handle<String> name(String::cast(args[1]));
- PropertyAttributes mode =
- static_cast<PropertyAttributes>(Smi::cast(args[2])->value());
+ PropertyAttributes mode = static_cast<PropertyAttributes>(args.smi_at(2));
RUNTIME_ASSERT(mode == READ_ONLY || mode == NONE);
Handle<Object> initial_value(args[3], isolate);
- // Declarations are always done in the function context.
- context = Handle<Context>(context->fcontext());
+ // Declarations are always done in a function or global context.
+ context = Handle<Context>(context->declaration_context());
int index;
PropertyAttributes attributes;
@@ -1285,7 +1306,7 @@
Handle<JSObject> context_ext;
if (context->has_extension()) {
// The function context's extension context exists - use it.
- context_ext = Handle<JSObject>(context->extension());
+ context_ext = Handle<JSObject>(JSObject::cast(context->extension()));
} else {
// The function context's extension context does not exists - allocate
// it.
@@ -1339,8 +1360,7 @@
CONVERT_ARG_CHECKED(String, name, 0);
GlobalObject* global = isolate->context()->global();
RUNTIME_ASSERT(args[1]->IsSmi());
- StrictModeFlag strict_mode =
- static_cast<StrictModeFlag>(Smi::cast(args[1])->value());
+ StrictModeFlag strict_mode = static_cast<StrictModeFlag>(args.smi_at(1));
ASSERT(strict_mode == kStrictMode || strict_mode == kNonStrictMode);
// According to ECMA-262, section 12.2, page 62, the property must
@@ -1519,8 +1539,8 @@
CONVERT_ARG_CHECKED(Context, context, 1);
Handle<String> name(String::cast(args[2]));
- // Initializations are always done in the function context.
- context = Handle<Context>(context->fcontext());
+ // Initializations are always done in a function or global context.
+ context = Handle<Context>(context->declaration_context());
int index;
PropertyAttributes attributes;
@@ -1541,14 +1561,12 @@
// In that case, the initialization behaves like a normal assignment
// to property 'x'.
if (index >= 0) {
- // Property was found in a context.
if (holder->IsContext()) {
- // The holder cannot be the function context. If it is, there
- // should have been a const redeclaration error when declaring
- // the const property.
- ASSERT(!holder.is_identical_to(context));
- if ((attributes & READ_ONLY) == 0) {
- Handle<Context>::cast(holder)->set(index, *value);
+ // Property was found in a context. Perform the assignment if we
+ // found some non-constant or an uninitialized constant.
+ Handle<Context> context = Handle<Context>::cast(holder);
+ if ((attributes & READ_ONLY) == 0 || context->get(index)->IsTheHole()) {
+ context->set(index, *value);
}
} else {
// The holder is an arguments object.
@@ -1622,7 +1640,7 @@
HandleScope scope(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_CHECKED(JSObject, object, 0);
- CONVERT_SMI_CHECKED(properties, args[1]);
+ CONVERT_SMI_ARG_CHECKED(properties, 1);
if (object->HasFastProperties()) {
NormalizeProperties(object, KEEP_INOBJECT_PROPERTIES, properties);
}
@@ -1637,7 +1655,7 @@
CONVERT_ARG_CHECKED(String, subject, 1);
// Due to the way the JS calls are constructed this must be less than the
// length of a string, i.e. it is always a Smi. We check anyway for security.
- CONVERT_SMI_CHECKED(index, args[2]);
+ CONVERT_SMI_ARG_CHECKED(index, 2);
CONVERT_ARG_CHECKED(JSArray, last_match_info, 3);
RUNTIME_ASSERT(last_match_info->HasFastElements());
RUNTIME_ASSERT(index >= 0);
@@ -1654,7 +1672,7 @@
RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpConstructResult) {
ASSERT(args.length() == 3);
- CONVERT_SMI_CHECKED(elements_count, args[0]);
+ CONVERT_SMI_ARG_CHECKED(elements_count, 0);
if (elements_count > JSArray::kMaxFastElementsLength) {
return isolate->ThrowIllegalOperation();
}
@@ -1809,7 +1827,7 @@
HandleScope scope(isolate);
ASSERT(args.length() == 4);
CONVERT_ARG_CHECKED(FixedArray, literals, 0);
- int index = Smi::cast(args[1])->value();
+ int index = args.smi_at(1);
Handle<String> pattern = args.at<String>(2);
Handle<String> flags = args.at<String>(3);
@@ -1855,6 +1873,33 @@
}
+RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionNameShouldPrintAsAnonymous) {
+ NoHandleAllocation ha;
+ ASSERT(args.length() == 1);
+ CONVERT_CHECKED(JSFunction, f, args[0]);
+ return isolate->heap()->ToBoolean(
+ f->shared()->name_should_print_as_anonymous());
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionMarkNameShouldPrintAsAnonymous) {
+ NoHandleAllocation ha;
+ ASSERT(args.length() == 1);
+ CONVERT_CHECKED(JSFunction, f, args[0]);
+ f->shared()->set_name_should_print_as_anonymous(true);
+ return isolate->heap()->undefined_value();
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionSetBound) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 1);
+
+ CONVERT_CHECKED(JSFunction, fun, args[0]);
+ fun->shared()->set_bound(true);
+ return isolate->heap()->undefined_value();
+}
+
RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionRemovePrototype) {
NoHandleAllocation ha;
ASSERT(args.length() == 1);
@@ -1953,8 +1998,7 @@
ASSERT(args.length() == 1);
CONVERT_CHECKED(JSFunction, f, args[0]);
- return f->shared()->IsApiFunction() ? isolate->heap()->true_value()
- : isolate->heap()->false_value();
+ return isolate->heap()->ToBoolean(f->shared()->IsApiFunction());
}
@@ -1963,8 +2007,7 @@
ASSERT(args.length() == 1);
CONVERT_CHECKED(JSFunction, f, args[0]);
- return f->IsBuiltin() ? isolate->heap()->true_value() :
- isolate->heap()->false_value();
+ return isolate->heap()->ToBoolean(f->IsBuiltin());
}
@@ -2035,7 +2078,7 @@
HandleScope scope(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_CHECKED(JSFunction, function, 0);
- CONVERT_SMI_CHECKED(num, args[1]);
+ CONVERT_SMI_ARG_CHECKED(num, 1);
RUNTIME_ASSERT(num >= 0);
SetExpectedNofProperties(function, num);
return isolate->heap()->undefined_value();
@@ -2259,24 +2302,24 @@
Handle<String> joined_string;
if (is_ascii_) {
- joined_string = NewRawAsciiString(character_count_);
+ Handle<SeqAsciiString> seq = NewRawAsciiString(character_count_);
AssertNoAllocation no_alloc;
- SeqAsciiString* seq = SeqAsciiString::cast(*joined_string);
char* char_buffer = seq->GetChars();
StringBuilderConcatHelper(*subject_,
char_buffer,
*array_builder_.array(),
array_builder_.length());
+ joined_string = Handle<String>::cast(seq);
} else {
// Non-ASCII.
- joined_string = NewRawTwoByteString(character_count_);
+ Handle<SeqTwoByteString> seq = NewRawTwoByteString(character_count_);
AssertNoAllocation no_alloc;
- SeqTwoByteString* seq = SeqTwoByteString::cast(*joined_string);
uc16* char_buffer = seq->GetChars();
StringBuilderConcatHelper(*subject_,
char_buffer,
*array_builder_.array(),
array_builder_.length());
+ joined_string = Handle<String>::cast(seq);
}
return joined_string;
}
@@ -2294,15 +2337,13 @@
}
private:
- Handle<String> NewRawAsciiString(int size) {
- CALL_HEAP_FUNCTION(heap_->isolate(),
- heap_->AllocateRawAsciiString(size), String);
+ Handle<SeqAsciiString> NewRawAsciiString(int length) {
+ return heap_->isolate()->factory()->NewRawAsciiString(length);
}
- Handle<String> NewRawTwoByteString(int size) {
- CALL_HEAP_FUNCTION(heap_->isolate(),
- heap_->AllocateRawTwoByteString(size), String);
+ Handle<SeqTwoByteString> NewRawTwoByteString(int length) {
+ return heap_->isolate()->factory()->NewRawTwoByteString(length);
}
@@ -2338,6 +2379,7 @@
int parts() {
return parts_.length();
}
+
private:
enum PartType {
SUBJECT_PREFIX = 1,
@@ -2614,7 +2656,7 @@
int capture_count = regexp_handle->CaptureCount();
// CompiledReplacement uses zone allocation.
- CompilationZoneScope zone(isolate, DELETE_ON_EXIT);
+ ZoneScope zone(isolate, DELETE_ON_EXIT);
CompiledReplacement compiled_replacement;
compiled_replacement.Compile(replacement_handle,
capture_count,
@@ -3088,17 +3130,17 @@
ASSERT(args.length() == 3);
CONVERT_CHECKED(String, value, args[0]);
- Object* from = args[1];
- Object* to = args[2];
int start, end;
// We have a fast integer-only case here to avoid a conversion to double in
// the common case where from and to are Smis.
- if (from->IsSmi() && to->IsSmi()) {
- start = Smi::cast(from)->value();
- end = Smi::cast(to)->value();
+ if (args[1]->IsSmi() && args[2]->IsSmi()) {
+ CONVERT_SMI_ARG_CHECKED(from_number, 1);
+ CONVERT_SMI_ARG_CHECKED(to_number, 2);
+ start = from_number;
+ end = to_number;
} else {
- CONVERT_DOUBLE_CHECKED(from_number, from);
- CONVERT_DOUBLE_CHECKED(to_number, to);
+ CONVERT_DOUBLE_ARG_CHECKED(from_number, 1);
+ CONVERT_DOUBLE_ARG_CHECKED(to_number, 2);
start = FastD2I(from_number);
end = FastD2I(to_number);
}
@@ -3128,11 +3170,11 @@
}
int length = subject->length();
- CompilationZoneScope zone_space(isolate, DELETE_ON_EXIT);
+ ZoneScope zone_space(isolate, DELETE_ON_EXIT);
ZoneList<int> offsets(8);
+ int start;
+ int end;
do {
- int start;
- int end;
{
AssertNoAllocation no_alloc;
FixedArray* elements = FixedArray::cast(regexp_info->elements());
@@ -3141,20 +3183,23 @@
}
offsets.Add(start);
offsets.Add(end);
- int index = start < end ? end : end + 1;
- if (index > length) break;
- match = RegExpImpl::Exec(regexp, subject, index, regexp_info);
+ if (start == end) if (++end > length) break;
+ match = RegExpImpl::Exec(regexp, subject, end, regexp_info);
if (match.is_null()) {
return Failure::Exception();
}
} while (!match->IsNull());
int matches = offsets.length() / 2;
Handle<FixedArray> elements = isolate->factory()->NewFixedArray(matches);
- for (int i = 0; i < matches ; i++) {
+ Handle<String> substring = isolate->factory()->
+ NewSubString(subject, offsets.at(0), offsets.at(1));
+ elements->set(0, *substring);
+ for (int i = 1; i < matches ; i++) {
int from = offsets.at(i * 2);
int to = offsets.at(i * 2 + 1);
- Handle<String> match = isolate->factory()->NewSubString(subject, from, to);
- elements->set(i, *match);
+ Handle<String> substring = isolate->factory()->
+ NewProperSubString(subject, from, to);
+ elements->set(i, *substring);
}
Handle<JSArray> result = isolate->factory()->NewJSArrayWithElements(elements);
result->set_length(Smi::FromInt(matches));
@@ -3304,6 +3349,7 @@
OffsetsVector registers(required_registers);
Vector<int32_t> register_vector(registers.vector(), registers.length());
int subject_length = subject->length();
+ bool first = true;
for (;;) { // Break on failure, return on exception.
RegExpImpl::IrregexpResult result =
@@ -3321,9 +3367,15 @@
}
match_end = register_vector[1];
HandleScope loop_scope(isolate);
- builder->Add(*isolate->factory()->NewSubString(subject,
- match_start,
- match_end));
+ if (!first) {
+ builder->Add(*isolate->factory()->NewProperSubString(subject,
+ match_start,
+ match_end));
+ } else {
+ builder->Add(*isolate->factory()->NewSubString(subject,
+ match_start,
+ match_end));
+ }
if (match_start != match_end) {
pos = match_end;
} else {
@@ -3336,6 +3388,7 @@
ASSERT_EQ(result, RegExpImpl::RE_EXCEPTION);
return result;
}
+ first = false;
}
if (match_start >= 0) {
@@ -3387,7 +3440,7 @@
// at the end, so we have two vectors that we swap between.
OffsetsVector registers2(required_registers);
Vector<int> prev_register_vector(registers2.vector(), registers2.length());
-
+ bool first = true;
do {
int match_start = register_vector[0];
builder->EnsureCapacity(kMaxBuilderEntriesPerRegExpMatch);
@@ -3405,18 +3458,30 @@
// subject, i.e., 3 + capture count in total.
Handle<FixedArray> elements =
isolate->factory()->NewFixedArray(3 + capture_count);
- Handle<String> match = isolate->factory()->NewSubString(subject,
- match_start,
- match_end);
+ Handle<String> match;
+ if (!first) {
+ match = isolate->factory()->NewProperSubString(subject,
+ match_start,
+ match_end);
+ } else {
+ match = isolate->factory()->NewSubString(subject,
+ match_start,
+ match_end);
+ }
elements->set(0, *match);
for (int i = 1; i <= capture_count; i++) {
int start = register_vector[i * 2];
if (start >= 0) {
int end = register_vector[i * 2 + 1];
ASSERT(start <= end);
- Handle<String> substring = isolate->factory()->NewSubString(subject,
- start,
- end);
+ Handle<String> substring;
+ if (!first) {
+ substring = isolate->factory()->NewProperSubString(subject,
+ start,
+ end);
+ } else {
+ substring = isolate->factory()->NewSubString(subject, start, end);
+ }
elements->set(i, *substring);
} else {
ASSERT(register_vector[i * 2 + 1] < 0);
@@ -3446,6 +3511,7 @@
subject,
pos,
register_vector);
+ first = false;
} while (result == RegExpImpl::RE_SUCCESS);
if (result != RegExpImpl::RE_EXCEPTION) {
@@ -3492,7 +3558,8 @@
if (result_array->HasFastElements()) {
result_elements =
Handle<FixedArray>(FixedArray::cast(result_array->elements()));
- } else {
+ }
+ if (result_elements.is_null() || result_elements->length() < 16) {
result_elements = isolate->factory()->NewFixedArrayWithHoles(16);
}
FixedArrayBuilder builder(result_elements);
@@ -3534,13 +3601,13 @@
RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToRadixString) {
NoHandleAllocation ha;
ASSERT(args.length() == 2);
+ CONVERT_SMI_ARG_CHECKED(radix, 1);
+ RUNTIME_ASSERT(2 <= radix && radix <= 36);
// Fast case where the result is a one character string.
- if (args[0]->IsSmi() && args[1]->IsSmi()) {
- int value = Smi::cast(args[0])->value();
- int radix = Smi::cast(args[1])->value();
+ if (args[0]->IsSmi()) {
+ int value = args.smi_at(0);
if (value >= 0 && value < radix) {
- RUNTIME_ASSERT(radix <= 36);
// Character array used for conversion.
static const char kCharTable[] = "0123456789abcdefghijklmnopqrstuvwxyz";
return isolate->heap()->
@@ -3549,7 +3616,7 @@
}
// Slow case.
- CONVERT_DOUBLE_CHECKED(value, args[0]);
+ CONVERT_DOUBLE_ARG_CHECKED(value, 0);
if (isnan(value)) {
return isolate->heap()->AllocateStringFromAscii(CStrVector("NaN"));
}
@@ -3559,9 +3626,6 @@
}
return isolate->heap()->AllocateStringFromAscii(CStrVector("Infinity"));
}
- CONVERT_DOUBLE_CHECKED(radix_number, args[1]);
- int radix = FastD2I(radix_number);
- RUNTIME_ASSERT(2 <= radix && radix <= 36);
char* str = DoubleToRadixCString(value, radix);
MaybeObject* result =
isolate->heap()->AllocateStringFromAscii(CStrVector(str));
@@ -3574,7 +3638,7 @@
NoHandleAllocation ha;
ASSERT(args.length() == 2);
- CONVERT_DOUBLE_CHECKED(value, args[0]);
+ CONVERT_DOUBLE_ARG_CHECKED(value, 0);
if (isnan(value)) {
return isolate->heap()->AllocateStringFromAscii(CStrVector("NaN"));
}
@@ -3584,7 +3648,7 @@
}
return isolate->heap()->AllocateStringFromAscii(CStrVector("Infinity"));
}
- CONVERT_DOUBLE_CHECKED(f_number, args[1]);
+ CONVERT_DOUBLE_ARG_CHECKED(f_number, 1);
int f = FastD2I(f_number);
RUNTIME_ASSERT(f >= 0);
char* str = DoubleToFixedCString(value, f);
@@ -3599,7 +3663,7 @@
NoHandleAllocation ha;
ASSERT(args.length() == 2);
- CONVERT_DOUBLE_CHECKED(value, args[0]);
+ CONVERT_DOUBLE_ARG_CHECKED(value, 0);
if (isnan(value)) {
return isolate->heap()->AllocateStringFromAscii(CStrVector("NaN"));
}
@@ -3609,7 +3673,7 @@
}
return isolate->heap()->AllocateStringFromAscii(CStrVector("Infinity"));
}
- CONVERT_DOUBLE_CHECKED(f_number, args[1]);
+ CONVERT_DOUBLE_ARG_CHECKED(f_number, 1);
int f = FastD2I(f_number);
RUNTIME_ASSERT(f >= -1 && f <= 20);
char* str = DoubleToExponentialCString(value, f);
@@ -3624,7 +3688,7 @@
NoHandleAllocation ha;
ASSERT(args.length() == 2);
- CONVERT_DOUBLE_CHECKED(value, args[0]);
+ CONVERT_DOUBLE_ARG_CHECKED(value, 0);
if (isnan(value)) {
return isolate->heap()->AllocateStringFromAscii(CStrVector("NaN"));
}
@@ -3634,7 +3698,7 @@
}
return isolate->heap()->AllocateStringFromAscii(CStrVector("Infinity"));
}
- CONVERT_DOUBLE_CHECKED(f_number, args[1]);
+ CONVERT_DOUBLE_ARG_CHECKED(f_number, 1);
int f = FastD2I(f_number);
RUNTIME_ASSERT(f >= 1 && f <= 21);
char* str = DoubleToPrecisionCString(value, f);
@@ -3724,8 +3788,7 @@
if (name->AsArrayIndex(&index)) {
return GetElementOrCharAt(isolate, object, index);
} else {
- PropertyAttributes attr;
- return object->GetProperty(*name, &attr);
+ return object->GetProperty(*name);
}
}
@@ -3797,7 +3860,7 @@
// Fast case for string indexing using [] with a smi index.
HandleScope scope(isolate);
Handle<String> str = args.at<String>(0);
- int index = Smi::cast(args[1])->value();
+ int index = args.smi_at(1);
if (index >= 0 && index < str->length()) {
Handle<Object> result = GetCharAt(str, index);
return *result;
@@ -3840,7 +3903,7 @@
|| result.type() == CONSTANT_FUNCTION)) {
Object* ok;
{ MaybeObject* maybe_ok =
- obj->DeleteProperty(name, JSObject::NORMAL_DELETION);
+ obj->DeleteProperty(name, JSReceiver::NORMAL_DELETION);
if (!maybe_ok->ToObject(&ok)) return maybe_ok;
}
}
@@ -3896,17 +3959,25 @@
return isolate->Throw(*error);
}
- NormalizeElements(js_object);
- Handle<NumberDictionary> dictionary(js_object->element_dictionary());
+ Handle<NumberDictionary> dictionary = NormalizeElements(js_object);
// Make sure that we never go back to fast case.
dictionary->set_requires_slow_elements();
PropertyDetails details = PropertyDetails(attr, NORMAL);
- NumberDictionarySet(dictionary, index, obj_value, details);
+ Handle<NumberDictionary> extended_dictionary =
+ NumberDictionarySet(dictionary, index, obj_value, details);
+ if (*extended_dictionary != *dictionary) {
+ if (js_object->GetElementsKind() ==
+ JSObject::NON_STRICT_ARGUMENTS_ELEMENTS) {
+ FixedArray::cast(js_object->elements())->set(1, *extended_dictionary);
+ } else {
+ js_object->set_elements(*extended_dictionary);
+ }
+ }
return *obj_value;
}
LookupResult result;
- js_object->LookupRealNamedProperty(*name, &result);
+ js_object->LocalLookupRealNamedProperty(*name, &result);
// To be compatible with safari we do not change the value on API objects
// in defineProperty. Firefox disagrees here, and actually changes the value.
@@ -3946,6 +4017,28 @@
}
+// Special case for elements if any of the flags are true.
+// If elements are in fast case we always implicitly assume that:
+// DONT_DELETE: false, DONT_ENUM: false, READ_ONLY: false.
+static MaybeObject* NormalizeObjectSetElement(Isolate* isolate,
+ Handle<JSObject> js_object,
+ uint32_t index,
+ Handle<Object> value,
+ PropertyAttributes attr) {
+ // Normalize the elements to enable attributes on the property.
+ Handle<NumberDictionary> dictionary = NormalizeElements(js_object);
+ // Make sure that we never go back to fast case.
+ dictionary->set_requires_slow_elements();
+ PropertyDetails details = PropertyDetails(attr, NORMAL);
+ Handle<NumberDictionary> extended_dictionary =
+ NumberDictionarySet(dictionary, index, value, details);
+ if (*extended_dictionary != *dictionary) {
+ js_object->set_elements(*extended_dictionary);
+ }
+ return *value;
+}
+
+
MaybeObject* Runtime::SetObjectProperty(Isolate* isolate,
Handle<Object> object,
Handle<Object> key,
@@ -3981,6 +4074,10 @@
return *value;
}
+ if (((attr & (DONT_DELETE | DONT_ENUM | READ_ONLY)) != 0)) {
+ return NormalizeObjectSetElement(isolate, js_object, index, value, attr);
+ }
+
Handle<Object> result = SetElement(js_object, index, value, strict_mode);
if (result.is_null()) return Failure::Exception();
return *value;
@@ -3989,6 +4086,13 @@
if (key->IsString()) {
Handle<Object> result;
if (Handle<String>::cast(key)->AsArrayIndex(&index)) {
+ if (((attr & (DONT_DELETE | DONT_ENUM | READ_ONLY)) != 0)) {
+ return NormalizeObjectSetElement(isolate,
+ js_object,
+ index,
+ value,
+ attr);
+ }
result = SetElement(js_object, index, value, strict_mode);
} else {
Handle<String> key_string = Handle<String>::cast(key);
@@ -4006,7 +4110,7 @@
Handle<String> name = Handle<String>::cast(converted);
if (name->AsArrayIndex(&index)) {
- return js_object->SetElement(index, *value, strict_mode);
+ return js_object->SetElement(index, *value, strict_mode, true);
} else {
return js_object->SetProperty(*name, *value, attr, strict_mode);
}
@@ -4034,12 +4138,12 @@
return *value;
}
- return js_object->SetElement(index, *value, kNonStrictMode);
+ return js_object->SetElement(index, *value, kNonStrictMode, true);
}
if (key->IsString()) {
if (Handle<String>::cast(key)->AsArrayIndex(&index)) {
- return js_object->SetElement(index, *value, kNonStrictMode);
+ return js_object->SetElement(index, *value, kNonStrictMode, true);
} else {
Handle<String> key_string = Handle<String>::cast(key);
key_string->TryFlatten();
@@ -4056,7 +4160,7 @@
Handle<String> name = Handle<String>::cast(converted);
if (name->AsArrayIndex(&index)) {
- return js_object->SetElement(index, *value, kNonStrictMode);
+ return js_object->SetElement(index, *value, kNonStrictMode, true);
} else {
return js_object->SetLocalPropertyIgnoreAttributes(*name, *value, attr);
}
@@ -4064,24 +4168,25 @@
MaybeObject* Runtime::ForceDeleteObjectProperty(Isolate* isolate,
- Handle<JSObject> js_object,
+ Handle<JSReceiver> receiver,
Handle<Object> key) {
HandleScope scope(isolate);
// Check if the given key is an array index.
uint32_t index;
- if (key->ToArrayIndex(&index)) {
+ if (receiver->IsJSObject() && key->ToArrayIndex(&index)) {
// In Firefox/SpiderMonkey, Safari and Opera you can access the
// characters of a string using [] notation. In the case of a
// String object we just need to redirect the deletion to the
// underlying string if the index is in range. Since the
// underlying string does nothing with the deletion, we can ignore
// such deletions.
- if (js_object->IsStringObjectWithCharacterAt(index)) {
+ if (receiver->IsStringObjectWithCharacterAt(index)) {
return isolate->heap()->true_value();
}
- return js_object->DeleteElement(index, JSObject::FORCE_DELETION);
+ return JSObject::cast(*receiver)->DeleteElement(
+ index, JSReceiver::FORCE_DELETION);
}
Handle<String> key_string;
@@ -4096,7 +4201,7 @@
}
key_string->TryFlatten();
- return js_object->DeleteProperty(*key_string, JSObject::FORCE_DELETION);
+ return receiver->DeleteProperty(*key_string, JSReceiver::FORCE_DELETION);
}
@@ -4107,7 +4212,7 @@
Handle<Object> object = args.at<Object>(0);
Handle<Object> key = args.at<Object>(1);
Handle<Object> value = args.at<Object>(2);
- CONVERT_SMI_CHECKED(unchecked_attributes, args[3]);
+ CONVERT_SMI_ARG_CHECKED(unchecked_attributes, 3);
RUNTIME_ASSERT(
(unchecked_attributes & ~(READ_ONLY | DONT_ENUM | DONT_DELETE)) == 0);
// Compute attributes.
@@ -4116,7 +4221,7 @@
StrictModeFlag strict_mode = kNonStrictMode;
if (args.length() == 5) {
- CONVERT_SMI_CHECKED(strict_unchecked, args[4]);
+ CONVERT_SMI_ARG_CHECKED(strict_unchecked, 4);
RUNTIME_ASSERT(strict_unchecked == kStrictMode ||
strict_unchecked == kNonStrictMode);
strict_mode = static_cast<StrictModeFlag>(strict_unchecked);
@@ -4131,10 +4236,10 @@
}
-// Set the ES5 native flag on the function.
+// Set the native flag on the function.
// This is used to decide if we should transform null and undefined
// into the global object when doing call and apply.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_SetES5Flag) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_SetNativeFlag) {
NoHandleAllocation ha;
RUNTIME_ASSERT(args.length() == 1);
@@ -4142,7 +4247,7 @@
if (object->IsJSFunction()) {
JSFunction* func = JSFunction::cast(*object);
- func->shared()->set_es5_native(true);
+ func->shared()->set_native(true);
}
return isolate->heap()->undefined_value();
}
@@ -4175,12 +4280,12 @@
NoHandleAllocation ha;
ASSERT(args.length() == 3);
- CONVERT_CHECKED(JSObject, object, args[0]);
+ CONVERT_CHECKED(JSReceiver, object, args[0]);
CONVERT_CHECKED(String, key, args[1]);
- CONVERT_SMI_CHECKED(strict, args[2]);
+ CONVERT_SMI_ARG_CHECKED(strict, 2);
return object->DeleteProperty(key, (strict == kStrictMode)
- ? JSObject::STRICT_DELETION
- : JSObject::NORMAL_DELETION);
+ ? JSReceiver::STRICT_DELETION
+ : JSReceiver::NORMAL_DELETION);
}
@@ -4244,11 +4349,11 @@
NoHandleAllocation na;
ASSERT(args.length() == 2);
- // Only JS objects can have properties.
- if (args[0]->IsJSObject()) {
- JSObject* object = JSObject::cast(args[0]);
+ // Only JS receivers can have properties.
+ if (args[0]->IsJSReceiver()) {
+ JSReceiver* receiver = JSReceiver::cast(args[0]);
CONVERT_CHECKED(String, key, args[1]);
- if (object->HasProperty(key)) return isolate->heap()->true_value();
+ if (receiver->HasProperty(key)) return isolate->heap()->true_value();
}
return isolate->heap()->false_value();
}
@@ -4649,7 +4754,7 @@
}
ASSERT(heap_obj->IsUndefined());
return isolate->heap()->undefined_symbol();
- case JS_FUNCTION_TYPE: case JS_REGEXP_TYPE:
+ case JS_FUNCTION_TYPE:
return isolate->heap()->function_symbol();
default:
// For any kind of object not handled above, the spec rule for
@@ -5023,6 +5128,8 @@
// Doing JSON quoting cannot make the string more than this many times larger.
static const int kJsonQuoteWorstCaseBlowup = 6;
+static const int kSpaceForQuotesAndComma = 3;
+static const int kSpaceForBrackets = 2;
// Covers the entire ASCII range (all other characters are unchanged by JSON
// quoting).
@@ -5110,13 +5217,51 @@
}
+template <typename SinkChar, typename SourceChar>
+static inline SinkChar* WriteQuoteJsonString(
+ Isolate* isolate,
+ SinkChar* write_cursor,
+ Vector<const SourceChar> characters) {
+ // SinkChar is only char if SourceChar is guaranteed to be char.
+ ASSERT(sizeof(SinkChar) >= sizeof(SourceChar));
+ const SourceChar* read_cursor = characters.start();
+ const SourceChar* end = read_cursor + characters.length();
+ *(write_cursor++) = '"';
+ while (read_cursor < end) {
+ SourceChar c = *(read_cursor++);
+ if (sizeof(SourceChar) > 1u &&
+ static_cast<unsigned>(c) >= kQuoteTableLength) {
+ *(write_cursor++) = static_cast<SinkChar>(c);
+ } else {
+ int len = JsonQuoteLengths[static_cast<unsigned>(c)];
+ const char* replacement = JsonQuotes +
+ static_cast<unsigned>(c) * kJsonQuotesCharactersPerEntry;
+ write_cursor[0] = replacement[0];
+ if (len > 1) {
+ write_cursor[1] = replacement[1];
+ if (len > 2) {
+ ASSERT(len == 6);
+ write_cursor[2] = replacement[2];
+ write_cursor[3] = replacement[3];
+ write_cursor[4] = replacement[4];
+ write_cursor[5] = replacement[5];
+ }
+ }
+ write_cursor += len;
+ }
+ }
+ *(write_cursor++) = '"';
+ return write_cursor;
+}
+
+
template <typename Char, typename StringType, bool comma>
static MaybeObject* QuoteJsonString(Isolate* isolate,
Vector<const Char> characters) {
int length = characters.length();
isolate->counters()->quote_json_char_count()->Increment(length);
- const int kSpaceForQuotes = 2 + (comma ? 1 :0);
- int worst_case_length = length * kJsonQuoteWorstCaseBlowup + kSpaceForQuotes;
+ int worst_case_length =
+ length * kJsonQuoteWorstCaseBlowup + kSpaceForQuotesAndComma;
if (worst_case_length > kMaxGuaranteedNewSpaceString) {
return SlowQuoteJsonString<Char, StringType, comma>(isolate, characters);
}
@@ -5141,34 +5286,9 @@
Char* write_cursor = reinterpret_cast<Char*>(
new_string->address() + SeqAsciiString::kHeaderSize);
if (comma) *(write_cursor++) = ',';
- *(write_cursor++) = '"';
-
- const Char* read_cursor = characters.start();
- const Char* end = read_cursor + length;
- while (read_cursor < end) {
- Char c = *(read_cursor++);
- if (sizeof(Char) > 1u && static_cast<unsigned>(c) >= kQuoteTableLength) {
- *(write_cursor++) = c;
- } else {
- int len = JsonQuoteLengths[static_cast<unsigned>(c)];
- const char* replacement = JsonQuotes +
- static_cast<unsigned>(c) * kJsonQuotesCharactersPerEntry;
- write_cursor[0] = replacement[0];
- if (len > 1) {
- write_cursor[1] = replacement[1];
- if (len > 2) {
- ASSERT(len == 6);
- write_cursor[2] = replacement[2];
- write_cursor[3] = replacement[3];
- write_cursor[4] = replacement[4];
- write_cursor[5] = replacement[5];
- }
- }
- write_cursor += len;
- }
- }
- *(write_cursor++) = '"';
-
+ write_cursor = WriteQuoteJsonString<Char, Char>(isolate,
+ write_cursor,
+ characters);
int final_length = static_cast<int>(
write_cursor - reinterpret_cast<Char*>(
new_string->address() + SeqAsciiString::kHeaderSize));
@@ -5222,11 +5342,106 @@
}
}
+
+template <typename Char, typename StringType>
+static MaybeObject* QuoteJsonStringArray(Isolate* isolate,
+ FixedArray* array,
+ int worst_case_length) {
+ int length = array->length();
+
+ MaybeObject* new_alloc = AllocateRawString<StringType>(isolate,
+ worst_case_length);
+ Object* new_object;
+ if (!new_alloc->ToObject(&new_object)) {
+ return new_alloc;
+ }
+ if (!isolate->heap()->new_space()->Contains(new_object)) {
+ // Even if our string is small enough to fit in new space we still have to
+ // handle it being allocated in old space as may happen in the third
+ // attempt. See CALL_AND_RETRY in heap-inl.h and similar code in
+ // CEntryStub::GenerateCore.
+ return isolate->heap()->undefined_value();
+ }
+ AssertNoAllocation no_gc;
+ StringType* new_string = StringType::cast(new_object);
+ ASSERT(isolate->heap()->new_space()->Contains(new_string));
+
+ STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
+ Char* write_cursor = reinterpret_cast<Char*>(
+ new_string->address() + SeqAsciiString::kHeaderSize);
+ *(write_cursor++) = '[';
+ for (int i = 0; i < length; i++) {
+ if (i != 0) *(write_cursor++) = ',';
+ String* str = String::cast(array->get(i));
+ if (str->IsTwoByteRepresentation()) {
+ write_cursor = WriteQuoteJsonString<Char, uc16>(isolate,
+ write_cursor,
+ str->ToUC16Vector());
+ } else {
+ write_cursor = WriteQuoteJsonString<Char, char>(isolate,
+ write_cursor,
+ str->ToAsciiVector());
+ }
+ }
+ *(write_cursor++) = ']';
+
+ int final_length = static_cast<int>(
+ write_cursor - reinterpret_cast<Char*>(
+ new_string->address() + SeqAsciiString::kHeaderSize));
+ isolate->heap()->new_space()->
+ template ShrinkStringAtAllocationBoundary<StringType>(
+ new_string, final_length);
+ return new_string;
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_QuoteJSONStringArray) {
+ NoHandleAllocation ha;
+ ASSERT(args.length() == 1);
+ CONVERT_CHECKED(JSArray, array, args[0]);
+
+ if (!array->HasFastElements()) return isolate->heap()->undefined_value();
+ FixedArray* elements = FixedArray::cast(array->elements());
+ int n = elements->length();
+ bool ascii = true;
+ int total_length = 0;
+
+ for (int i = 0; i < n; i++) {
+ Object* elt = elements->get(i);
+ if (!elt->IsString()) return isolate->heap()->undefined_value();
+ String* element = String::cast(elt);
+ if (!element->IsFlat()) return isolate->heap()->undefined_value();
+ total_length += element->length();
+ if (ascii && element->IsTwoByteRepresentation()) {
+ ascii = false;
+ }
+ }
+
+ int worst_case_length =
+ kSpaceForBrackets + n * kSpaceForQuotesAndComma
+ + total_length * kJsonQuoteWorstCaseBlowup;
+
+ if (worst_case_length > kMaxGuaranteedNewSpaceString) {
+ return isolate->heap()->undefined_value();
+ }
+
+ if (ascii) {
+ return QuoteJsonStringArray<char, SeqAsciiString>(isolate,
+ elements,
+ worst_case_length);
+ } else {
+ return QuoteJsonStringArray<uc16, SeqTwoByteString>(isolate,
+ elements,
+ worst_case_length);
+ }
+}
+
+
RUNTIME_FUNCTION(MaybeObject*, Runtime_StringParseInt) {
NoHandleAllocation ha;
CONVERT_CHECKED(String, s, args[0]);
- CONVERT_SMI_CHECKED(radix, args[1]);
+ CONVERT_SMI_ARG_CHECKED(radix, 1);
s->TryFlatten();
@@ -5573,6 +5788,27 @@
}
+void FindAsciiStringIndices(Vector<const char> subject,
+ char pattern,
+ ZoneList<int>* indices,
+ unsigned int limit) {
+ ASSERT(limit > 0);
+ // Collect indices of pattern in subject using memchr.
+ // Stop after finding at most limit values.
+ const char* subject_start = reinterpret_cast<const char*>(subject.start());
+ const char* subject_end = subject_start + subject.length();
+ const char* pos = subject_start;
+ while (limit > 0) {
+ pos = reinterpret_cast<const char*>(
+ memchr(pos, pattern, subject_end - pos));
+ if (pos == NULL) return;
+ indices->Add(static_cast<int>(pos - subject_start));
+ pos++;
+ limit--;
+ }
+}
+
+
template <typename SubjectChar, typename PatternChar>
void FindStringIndices(Isolate* isolate,
Vector<const SubjectChar> subject,
@@ -5580,11 +5816,11 @@
ZoneList<int>* indices,
unsigned int limit) {
ASSERT(limit > 0);
- // Collect indices of pattern in subject, and the end-of-string index.
+ // Collect indices of pattern in subject.
// Stop after finding at most limit values.
- StringSearch<PatternChar, SubjectChar> search(isolate, pattern);
int pattern_length = pattern.length();
int index = 0;
+ StringSearch<PatternChar, SubjectChar> search(isolate, pattern);
while (limit > 0) {
index = search.Search(subject, index);
if (index < 0) return;
@@ -5627,11 +5863,19 @@
if (subject->IsAsciiRepresentation()) {
Vector<const char> subject_vector = subject->ToAsciiVector();
if (pattern->IsAsciiRepresentation()) {
- FindStringIndices(isolate,
- subject_vector,
- pattern->ToAsciiVector(),
- &indices,
- limit);
+ Vector<const char> pattern_vector = pattern->ToAsciiVector();
+ if (pattern_vector.length() == 1) {
+ FindAsciiStringIndices(subject_vector,
+ pattern_vector[0],
+ &indices,
+ limit);
+ } else {
+ FindStringIndices(isolate,
+ subject_vector,
+ pattern_vector,
+ &indices,
+ limit);
+ }
} else {
FindStringIndices(isolate,
subject_vector,
@@ -5682,7 +5926,7 @@
HandleScope local_loop_handle;
int part_end = indices.at(i);
Handle<String> substring =
- isolate->factory()->NewSubString(subject, part_start, part_end);
+ isolate->factory()->NewProperSubString(subject, part_start, part_end);
elements->set(i, *substring);
part_start = part_end + pattern_length;
}
@@ -5815,7 +6059,7 @@
NoHandleAllocation ha;
ASSERT(args.length() == 1);
- CONVERT_DOUBLE_CHECKED(number, args[0]);
+ CONVERT_DOUBLE_ARG_CHECKED(number, 0);
// We do not include 0 so that we don't have to treat +0 / -0 cases.
if (number > 0 && number <= Smi::kMaxValue) {
@@ -5829,7 +6073,7 @@
NoHandleAllocation ha;
ASSERT(args.length() == 1);
- CONVERT_DOUBLE_CHECKED(number, args[0]);
+ CONVERT_DOUBLE_ARG_CHECKED(number, 0);
// We do not include 0 so that we don't have to treat +0 / -0 cases.
if (number > 0 && number <= Smi::kMaxValue) {
@@ -5857,7 +6101,7 @@
NoHandleAllocation ha;
ASSERT(args.length() == 1);
- CONVERT_DOUBLE_CHECKED(number, args[0]);
+ CONVERT_DOUBLE_ARG_CHECKED(number, 0);
// We do not include 0 so that we don't have to treat +0 / -0 cases.
if (number > 0 && number <= Smi::kMaxValue) {
@@ -5899,8 +6143,8 @@
NoHandleAllocation ha;
ASSERT(args.length() == 2);
- CONVERT_DOUBLE_CHECKED(x, args[0]);
- CONVERT_DOUBLE_CHECKED(y, args[1]);
+ CONVERT_DOUBLE_ARG_CHECKED(x, 0);
+ CONVERT_DOUBLE_ARG_CHECKED(y, 1);
return isolate->heap()->NumberFromDouble(x + y);
}
@@ -5909,8 +6153,8 @@
NoHandleAllocation ha;
ASSERT(args.length() == 2);
- CONVERT_DOUBLE_CHECKED(x, args[0]);
- CONVERT_DOUBLE_CHECKED(y, args[1]);
+ CONVERT_DOUBLE_ARG_CHECKED(x, 0);
+ CONVERT_DOUBLE_ARG_CHECKED(y, 1);
return isolate->heap()->NumberFromDouble(x - y);
}
@@ -5919,8 +6163,8 @@
NoHandleAllocation ha;
ASSERT(args.length() == 2);
- CONVERT_DOUBLE_CHECKED(x, args[0]);
- CONVERT_DOUBLE_CHECKED(y, args[1]);
+ CONVERT_DOUBLE_ARG_CHECKED(x, 0);
+ CONVERT_DOUBLE_ARG_CHECKED(y, 1);
return isolate->heap()->NumberFromDouble(x * y);
}
@@ -5929,7 +6173,7 @@
NoHandleAllocation ha;
ASSERT(args.length() == 1);
- CONVERT_DOUBLE_CHECKED(x, args[0]);
+ CONVERT_DOUBLE_ARG_CHECKED(x, 0);
return isolate->heap()->NumberFromDouble(-x);
}
@@ -5946,8 +6190,8 @@
NoHandleAllocation ha;
ASSERT(args.length() == 2);
- CONVERT_DOUBLE_CHECKED(x, args[0]);
- CONVERT_DOUBLE_CHECKED(y, args[1]);
+ CONVERT_DOUBLE_ARG_CHECKED(x, 0);
+ CONVERT_DOUBLE_ARG_CHECKED(y, 1);
return isolate->heap()->NumberFromDouble(x / y);
}
@@ -5956,8 +6200,8 @@
NoHandleAllocation ha;
ASSERT(args.length() == 2);
- CONVERT_DOUBLE_CHECKED(x, args[0]);
- CONVERT_DOUBLE_CHECKED(y, args[1]);
+ CONVERT_DOUBLE_ARG_CHECKED(x, 0);
+ CONVERT_DOUBLE_ARG_CHECKED(y, 1);
x = modulo(x, y);
// NumberFromDouble may return a Smi instead of a Number object
@@ -6022,7 +6266,7 @@
isolate->context()->mark_out_of_memory();
return Failure::OutOfMemoryException();
}
- int array_length = Smi::cast(args[1])->value();
+ int array_length = args.smi_at(1);
CONVERT_CHECKED(String, special, args[2]);
// This assumption is used by the slice encoding in one or two smis.
@@ -6135,7 +6379,7 @@
isolate->context()->mark_out_of_memory();
return Failure::OutOfMemoryException();
}
- int array_length = Smi::cast(args[1])->value();
+ int array_length = args.smi_at(1);
CONVERT_CHECKED(String, separator, args[2]);
if (!array->HasFastElements()) {
@@ -6413,8 +6657,8 @@
NoHandleAllocation ha;
ASSERT(args.length() == 2);
- CONVERT_DOUBLE_CHECKED(x, args[0]);
- CONVERT_DOUBLE_CHECKED(y, args[1]);
+ CONVERT_DOUBLE_ARG_CHECKED(x, 0);
+ CONVERT_DOUBLE_ARG_CHECKED(y, 1);
if (isnan(x)) return Smi::FromInt(NOT_EQUAL);
if (isnan(y)) return Smi::FromInt(NOT_EQUAL);
if (x == y) return Smi::FromInt(EQUAL);
@@ -6450,8 +6694,8 @@
NoHandleAllocation ha;
ASSERT(args.length() == 3);
- CONVERT_DOUBLE_CHECKED(x, args[0]);
- CONVERT_DOUBLE_CHECKED(y, args[1]);
+ CONVERT_DOUBLE_ARG_CHECKED(x, 0);
+ CONVERT_DOUBLE_ARG_CHECKED(y, 1);
if (isnan(x) || isnan(y)) return args[2];
if (x == y) return Smi::FromInt(EQUAL);
if (isless(x, y)) return Smi::FromInt(LESS);
@@ -6474,50 +6718,69 @@
// If the integers are equal so are the string representations.
if (x_value == y_value) return Smi::FromInt(EQUAL);
- // If one of the integers are zero the normal integer order is the
+ // If one of the integers is zero the normal integer order is the
// same as the lexicographic order of the string representations.
- if (x_value == 0 || y_value == 0) return Smi::FromInt(x_value - y_value);
+ if (x_value == 0 || y_value == 0)
+ return Smi::FromInt(x_value < y_value ? LESS : GREATER);
// If only one of the integers is negative the negative number is
// smallest because the char code of '-' is less than the char code
// of any digit. Otherwise, we make both values positive.
+
+ // Use unsigned values otherwise the logic is incorrect for -MIN_INT on
+ // architectures using 32-bit Smis.
+ uint32_t x_scaled = x_value;
+ uint32_t y_scaled = y_value;
if (x_value < 0 || y_value < 0) {
if (y_value >= 0) return Smi::FromInt(LESS);
if (x_value >= 0) return Smi::FromInt(GREATER);
- x_value = -x_value;
- y_value = -y_value;
+ x_scaled = -x_value;
+ y_scaled = -y_value;
}
- // Arrays for the individual characters of the two Smis. Smis are
- // 31 bit integers and 10 decimal digits are therefore enough.
- // TODO(isolates): maybe we should simply allocate 20 bytes on the stack.
- int* x_elms = isolate->runtime_state()->smi_lexicographic_compare_x_elms();
- int* y_elms = isolate->runtime_state()->smi_lexicographic_compare_y_elms();
+ static const uint32_t kPowersOf10[] = {
+ 1, 10, 100, 1000, 10*1000, 100*1000,
+ 1000*1000, 10*1000*1000, 100*1000*1000,
+ 1000*1000*1000
+ };
+ // If the integers have the same number of decimal digits they can be
+ // compared directly as the numeric order is the same as the
+ // lexicographic order. If one integer has fewer digits, it is scaled
+ // by some power of 10 to have the same number of digits as the longer
+ // integer. If the scaled integers are equal it means the shorter
+ // integer comes first in the lexicographic order.
- // Convert the integers to arrays of their decimal digits.
- int x_index = 0;
- int y_index = 0;
- while (x_value > 0) {
- x_elms[x_index++] = x_value % 10;
- x_value /= 10;
- }
- while (y_value > 0) {
- y_elms[y_index++] = y_value % 10;
- y_value /= 10;
+ // From http://graphics.stanford.edu/~seander/bithacks.html#IntegerLog10
+ int x_log2 = IntegerLog2(x_scaled);
+ int x_log10 = ((x_log2 + 1) * 1233) >> 12;
+ x_log10 -= x_scaled < kPowersOf10[x_log10];
+
+ int y_log2 = IntegerLog2(y_scaled);
+ int y_log10 = ((y_log2 + 1) * 1233) >> 12;
+ y_log10 -= y_scaled < kPowersOf10[y_log10];
+
+ int tie = EQUAL;
+
+ if (x_log10 < y_log10) {
+ // X has fewer digits. We would like to simply scale up X but that
+ // might overflow, e.g when comparing 9 with 1_000_000_000, 9 would
+ // be scaled up to 9_000_000_000. So we scale up by the next
+ // smallest power and scale down Y to drop one digit. It is OK to
+ // drop one digit from the longer integer since the final digit is
+ // past the length of the shorter integer.
+ x_scaled *= kPowersOf10[y_log10 - x_log10 - 1];
+ y_scaled /= 10;
+ tie = LESS;
+ } else if (y_log10 < x_log10) {
+ y_scaled *= kPowersOf10[x_log10 - y_log10 - 1];
+ x_scaled /= 10;
+ tie = GREATER;
}
- // Loop through the arrays of decimal digits finding the first place
- // where they differ.
- while (--x_index >= 0 && --y_index >= 0) {
- int diff = x_elms[x_index] - y_elms[y_index];
- if (diff != 0) return Smi::FromInt(diff);
- }
-
- // If one array is a suffix of the other array, the longest array is
- // the representation of the largest of the Smis in the
- // lexicographic ordering.
- return Smi::FromInt(x_index - y_index);
+ if (x_scaled < y_scaled) return Smi::FromInt(LESS);
+ if (x_scaled > y_scaled) return Smi::FromInt(GREATER);
+ return Smi::FromInt(tie);
}
@@ -6624,7 +6887,7 @@
ASSERT(args.length() == 1);
isolate->counters()->math_acos()->Increment();
- CONVERT_DOUBLE_CHECKED(x, args[0]);
+ CONVERT_DOUBLE_ARG_CHECKED(x, 0);
return isolate->transcendental_cache()->Get(TranscendentalCache::ACOS, x);
}
@@ -6634,7 +6897,7 @@
ASSERT(args.length() == 1);
isolate->counters()->math_asin()->Increment();
- CONVERT_DOUBLE_CHECKED(x, args[0]);
+ CONVERT_DOUBLE_ARG_CHECKED(x, 0);
return isolate->transcendental_cache()->Get(TranscendentalCache::ASIN, x);
}
@@ -6644,7 +6907,7 @@
ASSERT(args.length() == 1);
isolate->counters()->math_atan()->Increment();
- CONVERT_DOUBLE_CHECKED(x, args[0]);
+ CONVERT_DOUBLE_ARG_CHECKED(x, 0);
return isolate->transcendental_cache()->Get(TranscendentalCache::ATAN, x);
}
@@ -6657,8 +6920,8 @@
ASSERT(args.length() == 2);
isolate->counters()->math_atan2()->Increment();
- CONVERT_DOUBLE_CHECKED(x, args[0]);
- CONVERT_DOUBLE_CHECKED(y, args[1]);
+ CONVERT_DOUBLE_ARG_CHECKED(x, 0);
+ CONVERT_DOUBLE_ARG_CHECKED(y, 1);
double result;
if (isinf(x) && isinf(y)) {
// Make sure that the result in case of two infinite arguments
@@ -6680,7 +6943,7 @@
ASSERT(args.length() == 1);
isolate->counters()->math_ceil()->Increment();
- CONVERT_DOUBLE_CHECKED(x, args[0]);
+ CONVERT_DOUBLE_ARG_CHECKED(x, 0);
return isolate->heap()->NumberFromDouble(ceiling(x));
}
@@ -6690,7 +6953,7 @@
ASSERT(args.length() == 1);
isolate->counters()->math_cos()->Increment();
- CONVERT_DOUBLE_CHECKED(x, args[0]);
+ CONVERT_DOUBLE_ARG_CHECKED(x, 0);
return isolate->transcendental_cache()->Get(TranscendentalCache::COS, x);
}
@@ -6700,7 +6963,7 @@
ASSERT(args.length() == 1);
isolate->counters()->math_exp()->Increment();
- CONVERT_DOUBLE_CHECKED(x, args[0]);
+ CONVERT_DOUBLE_ARG_CHECKED(x, 0);
return isolate->transcendental_cache()->Get(TranscendentalCache::EXP, x);
}
@@ -6710,7 +6973,7 @@
ASSERT(args.length() == 1);
isolate->counters()->math_floor()->Increment();
- CONVERT_DOUBLE_CHECKED(x, args[0]);
+ CONVERT_DOUBLE_ARG_CHECKED(x, 0);
return isolate->heap()->NumberFromDouble(floor(x));
}
@@ -6720,7 +6983,7 @@
ASSERT(args.length() == 1);
isolate->counters()->math_log()->Increment();
- CONVERT_DOUBLE_CHECKED(x, args[0]);
+ CONVERT_DOUBLE_ARG_CHECKED(x, 0);
return isolate->transcendental_cache()->Get(TranscendentalCache::LOG, x);
}
@@ -6730,16 +6993,16 @@
ASSERT(args.length() == 2);
isolate->counters()->math_pow()->Increment();
- CONVERT_DOUBLE_CHECKED(x, args[0]);
+ CONVERT_DOUBLE_ARG_CHECKED(x, 0);
// If the second argument is a smi, it is much faster to call the
// custom powi() function than the generic pow().
if (args[1]->IsSmi()) {
- int y = Smi::cast(args[1])->value();
+ int y = args.smi_at(1);
return isolate->heap()->NumberFromDouble(power_double_int(x, y));
}
- CONVERT_DOUBLE_CHECKED(y, args[1]);
+ CONVERT_DOUBLE_ARG_CHECKED(y, 1);
return isolate->heap()->AllocateHeapNumber(power_double_double(x, y));
}
@@ -6748,8 +7011,8 @@
RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_pow_cfunction) {
NoHandleAllocation ha;
ASSERT(args.length() == 2);
- CONVERT_DOUBLE_CHECKED(x, args[0]);
- CONVERT_DOUBLE_CHECKED(y, args[1]);
+ CONVERT_DOUBLE_ARG_CHECKED(x, 0);
+ CONVERT_DOUBLE_ARG_CHECKED(y, 1);
if (y == 0) {
return Smi::FromInt(1);
} else if (isnan(y) || ((x == 1 || x == -1) && isinf(y))) {
@@ -6808,7 +7071,7 @@
ASSERT(args.length() == 1);
isolate->counters()->math_sin()->Increment();
- CONVERT_DOUBLE_CHECKED(x, args[0]);
+ CONVERT_DOUBLE_ARG_CHECKED(x, 0);
return isolate->transcendental_cache()->Get(TranscendentalCache::SIN, x);
}
@@ -6818,7 +7081,7 @@
ASSERT(args.length() == 1);
isolate->counters()->math_sqrt()->Increment();
- CONVERT_DOUBLE_CHECKED(x, args[0]);
+ CONVERT_DOUBLE_ARG_CHECKED(x, 0);
return isolate->heap()->AllocateHeapNumber(sqrt(x));
}
@@ -6828,7 +7091,7 @@
ASSERT(args.length() == 1);
isolate->counters()->math_tan()->Increment();
- CONVERT_DOUBLE_CHECKED(x, args[0]);
+ CONVERT_DOUBLE_ARG_CHECKED(x, 0);
return isolate->transcendental_cache()->Get(TranscendentalCache::TAN, x);
}
@@ -6882,9 +7145,9 @@
NoHandleAllocation ha;
ASSERT(args.length() == 3);
- CONVERT_SMI_CHECKED(year, args[0]);
- CONVERT_SMI_CHECKED(month, args[1]);
- CONVERT_SMI_CHECKED(date, args[2]);
+ CONVERT_SMI_ARG_CHECKED(year, 0);
+ CONVERT_SMI_ARG_CHECKED(month, 1);
+ CONVERT_SMI_ARG_CHECKED(date, 2);
return Smi::FromInt(MakeDay(year, month, date));
}
@@ -7181,7 +7444,7 @@
NoHandleAllocation ha;
ASSERT(args.length() == 2);
- CONVERT_DOUBLE_CHECKED(t, args[0]);
+ CONVERT_DOUBLE_ARG_CHECKED(t, 0);
CONVERT_CHECKED(JSArray, res_array, args[1]);
int year, month, day;
@@ -7201,12 +7464,109 @@
RUNTIME_FUNCTION(MaybeObject*, Runtime_NewArgumentsFast) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 3);
+
+ Handle<JSFunction> callee = args.at<JSFunction>(0);
+ Object** parameters = reinterpret_cast<Object**>(args[1]);
+ const int argument_count = Smi::cast(args[2])->value();
+
+ Handle<JSObject> result =
+ isolate->factory()->NewArgumentsObject(callee, argument_count);
+ // Allocate the elements if needed.
+ int parameter_count = callee->shared()->formal_parameter_count();
+ if (argument_count > 0) {
+ if (parameter_count > 0) {
+ int mapped_count = Min(argument_count, parameter_count);
+ Handle<FixedArray> parameter_map =
+ isolate->factory()->NewFixedArray(mapped_count + 2, NOT_TENURED);
+ parameter_map->set_map(
+ isolate->heap()->non_strict_arguments_elements_map());
+
+ Handle<Map> old_map(result->map());
+ Handle<Map> new_map =
+ isolate->factory()->CopyMapDropTransitions(old_map);
+ new_map->set_elements_kind(JSObject::NON_STRICT_ARGUMENTS_ELEMENTS);
+
+ result->set_map(*new_map);
+ result->set_elements(*parameter_map);
+
+ // Store the context and the arguments array at the beginning of the
+ // parameter map.
+ Handle<Context> context(isolate->context());
+ Handle<FixedArray> arguments =
+ isolate->factory()->NewFixedArray(argument_count, NOT_TENURED);
+ parameter_map->set(0, *context);
+ parameter_map->set(1, *arguments);
+
+ // Loop over the actual parameters backwards.
+ int index = argument_count - 1;
+ while (index >= mapped_count) {
+ // These go directly in the arguments array and have no
+ // corresponding slot in the parameter map.
+ arguments->set(index, *(parameters - index - 1));
+ --index;
+ }
+
+ ScopeInfo<> scope_info(callee->shared()->scope_info());
+ while (index >= 0) {
+ // Detect duplicate names to the right in the parameter list.
+ Handle<String> name = scope_info.parameter_name(index);
+ int context_slot_count = scope_info.number_of_context_slots();
+ bool duplicate = false;
+ for (int j = index + 1; j < parameter_count; ++j) {
+ if (scope_info.parameter_name(j).is_identical_to(name)) {
+ duplicate = true;
+ break;
+ }
+ }
+
+ if (duplicate) {
+ // This goes directly in the arguments array with a hole in the
+ // parameter map.
+ arguments->set(index, *(parameters - index - 1));
+ parameter_map->set_the_hole(index + 2);
+ } else {
+ // The context index goes in the parameter map with a hole in the
+ // arguments array.
+ int context_index = -1;
+ for (int j = Context::MIN_CONTEXT_SLOTS;
+ j < context_slot_count;
+ ++j) {
+ if (scope_info.context_slot_name(j).is_identical_to(name)) {
+ context_index = j;
+ break;
+ }
+ }
+ ASSERT(context_index >= 0);
+ arguments->set_the_hole(index);
+ parameter_map->set(index + 2, Smi::FromInt(context_index));
+ }
+
+ --index;
+ }
+ } else {
+ // If there is no aliasing, the arguments object elements are not
+ // special in any way.
+ Handle<FixedArray> elements =
+ isolate->factory()->NewFixedArray(argument_count, NOT_TENURED);
+ result->set_elements(*elements);
+ for (int i = 0; i < argument_count; ++i) {
+ elements->set(i, *(parameters - i - 1));
+ }
+ }
+ }
+ return *result;
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NewStrictArgumentsFast) {
NoHandleAllocation ha;
ASSERT(args.length() == 3);
JSFunction* callee = JSFunction::cast(args[0]);
Object** parameters = reinterpret_cast<Object**>(args[1]);
- const int length = Smi::cast(args[2])->value();
+ const int length = args.smi_at(2);
Object* result;
{ MaybeObject* maybe_result =
@@ -7243,10 +7603,8 @@
CONVERT_ARG_CHECKED(SharedFunctionInfo, shared, 1);
CONVERT_BOOLEAN_CHECKED(pretenure, args[2]);
- // Allocate global closures in old space and allocate local closures
- // in new space. Additionally pretenure closures that are assigned
+ // The caller ensures that we pretenure closures that are assigned
// directly to properties.
- pretenure = pretenure || (context->global_context() == *context);
PretenureFlag pretenure_flag = pretenure ? TENURED : NOT_TENURED;
Handle<JSFunction> result =
isolate->factory()->NewFunctionFromSharedFunctionInfo(shared,
@@ -7366,7 +7724,7 @@
// If function should not have prototype, construction is not allowed. In this
// case generated code bailouts here, since function has no initial_map.
- if (!function->should_have_prototype()) {
+ if (!function->should_have_prototype() && !function->shared()->bound()) {
Vector< Handle<Object> > arguments = HandleVector(&constructor, 1);
Handle<Object> type_error =
isolate->factory()->NewTypeError("not_constructor", arguments);
@@ -7507,7 +7865,7 @@
ASSERT(args.length() == 1);
RUNTIME_ASSERT(args[0]->IsSmi());
Deoptimizer::BailoutType type =
- static_cast<Deoptimizer::BailoutType>(Smi::cast(args[0])->value());
+ static_cast<Deoptimizer::BailoutType>(args.smi_at(0));
Deoptimizer* deoptimizer = Deoptimizer::Grab(isolate);
ASSERT(isolate->heap()->IsAllocationAllowed());
int frames = deoptimizer->output_count();
@@ -7538,7 +7896,6 @@
}
}
- isolate->compilation_cache()->MarkForLazyOptimizing(function);
if (type == Deoptimizer::EAGER) {
RUNTIME_ASSERT(function->IsOptimized());
} else {
@@ -7594,6 +7951,15 @@
}
+RUNTIME_FUNCTION(MaybeObject*, Runtime_RunningInSimulator) {
+#if defined(USE_SIMULATOR)
+ return isolate->heap()->true_value();
+#else
+ return isolate->heap()->false_value();
+#endif
+}
+
+
RUNTIME_FUNCTION(MaybeObject*, Runtime_OptimizeFunctionOnNextCall) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
@@ -7633,7 +7999,7 @@
CONVERT_ARG_CHECKED(JSFunction, function, 0);
// We're not prepared to handle a function with arguments object.
- ASSERT(!function->shared()->scope_info()->HasArgumentsShadow());
+ ASSERT(!function->shared()->uses_arguments());
// We have hit a back edge in an unoptimized frame for a function that was
// selected for on-stack replacement. Find the unoptimized code object.
@@ -7757,7 +8123,7 @@
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NewContext) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NewFunctionContext) {
NoHandleAllocation ha;
ASSERT(args.length() == 1);
@@ -7775,50 +8141,73 @@
}
-MUST_USE_RESULT static MaybeObject* PushContextHelper(Isolate* isolate,
- Object* object,
- bool is_catch_context) {
- // Convert the object to a proper JavaScript object.
- Object* js_object = object;
- if (!js_object->IsJSObject()) {
- MaybeObject* maybe_js_object = js_object->ToObject();
- if (!maybe_js_object->ToObject(&js_object)) {
- if (!Failure::cast(maybe_js_object)->IsInternalError()) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_PushWithContext) {
+ NoHandleAllocation ha;
+ ASSERT(args.length() == 2);
+ JSObject* extension_object;
+ if (args[0]->IsJSObject()) {
+ extension_object = JSObject::cast(args[0]);
+ } else {
+ // Convert the object to a proper JavaScript object.
+ MaybeObject* maybe_js_object = args[0]->ToObject();
+ if (!maybe_js_object->To(&extension_object)) {
+ if (Failure::cast(maybe_js_object)->IsInternalError()) {
+ HandleScope scope(isolate);
+ Handle<Object> handle = args.at<Object>(0);
+ Handle<Object> result =
+ isolate->factory()->NewTypeError("with_expression",
+ HandleVector(&handle, 1));
+ return isolate->Throw(*result);
+ } else {
return maybe_js_object;
}
- HandleScope scope(isolate);
- Handle<Object> handle(object, isolate);
- Handle<Object> result =
- isolate->factory()->NewTypeError("with_expression",
- HandleVector(&handle, 1));
- return isolate->Throw(*result);
}
}
- Object* result;
- { MaybeObject* maybe_result = isolate->heap()->AllocateWithContext(
- isolate->context(), JSObject::cast(js_object), is_catch_context);
- if (!maybe_result->ToObject(&result)) return maybe_result;
+ JSFunction* function;
+ if (args[1]->IsSmi()) {
+ // A smi sentinel indicates a context nested inside global code rather
+ // than some function. There is a canonical empty function that can be
+ // gotten from the global context.
+ function = isolate->context()->global_context()->closure();
+ } else {
+ function = JSFunction::cast(args[1]);
}
- Context* context = Context::cast(result);
+ Context* context;
+ MaybeObject* maybe_context =
+ isolate->heap()->AllocateWithContext(function,
+ isolate->context(),
+ extension_object);
+ if (!maybe_context->To(&context)) return maybe_context;
isolate->set_context(context);
-
- return result;
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_PushContext) {
- NoHandleAllocation ha;
- ASSERT(args.length() == 1);
- return PushContextHelper(isolate, args[0], false);
+ return context;
}
RUNTIME_FUNCTION(MaybeObject*, Runtime_PushCatchContext) {
NoHandleAllocation ha;
- ASSERT(args.length() == 1);
- return PushContextHelper(isolate, args[0], true);
+ ASSERT(args.length() == 3);
+ String* name = String::cast(args[0]);
+ Object* thrown_object = args[1];
+ JSFunction* function;
+ if (args[2]->IsSmi()) {
+ // A smi sentinel indicates a context nested inside global code rather
+ // than some function. There is a canonical empty function that can be
+ // gotten from the global context.
+ function = isolate->context()->global_context()->closure();
+ } else {
+ function = JSFunction::cast(args[2]);
+ }
+ Context* context;
+ MaybeObject* maybe_context =
+ isolate->heap()->AllocateCatchContext(function,
+ isolate->context(),
+ name,
+ thrown_object);
+ if (!maybe_context->To(&context)) return maybe_context;
+ isolate->set_context(context);
+ return context;
}
@@ -7853,9 +8242,9 @@
// index is non-negative.
Handle<JSObject> object = Handle<JSObject>::cast(holder);
if (index >= 0) {
- return object->DeleteElement(index, JSObject::NORMAL_DELETION);
+ return object->DeleteElement(index, JSReceiver::NORMAL_DELETION);
} else {
- return object->DeleteProperty(*name, JSObject::NORMAL_DELETION);
+ return object->DeleteProperty(*name, JSReceiver::NORMAL_DELETION);
}
}
@@ -8008,7 +8397,7 @@
Handle<Object> value(args[0], isolate);
CONVERT_ARG_CHECKED(Context, context, 1);
CONVERT_ARG_CHECKED(String, name, 2);
- CONVERT_SMI_CHECKED(strict_unchecked, args[3]);
+ CONVERT_SMI_ARG_CHECKED(strict_unchecked, 3);
RUNTIME_ASSERT(strict_unchecked == kStrictMode ||
strict_unchecked == kNonStrictMode);
StrictModeFlag strict_mode = static_cast<StrictModeFlag>(strict_unchecked);
@@ -8322,7 +8711,7 @@
NoHandleAllocation ha;
ASSERT(args.length() == 1);
- CONVERT_DOUBLE_CHECKED(x, args[0]);
+ CONVERT_DOUBLE_ARG_CHECKED(x, 0);
const char* zone = OS::LocalTimezone(x);
return isolate->heap()->AllocateStringFromUtf8(CStrVector(zone));
}
@@ -8340,7 +8729,7 @@
NoHandleAllocation ha;
ASSERT(args.length() == 1);
- CONVERT_DOUBLE_CHECKED(x, args[0]);
+ CONVERT_DOUBLE_ARG_CHECKED(x, 0);
return isolate->heap()->NumberFromDouble(OS::DaylightSavingsOffset(x));
}
@@ -8358,7 +8747,14 @@
ASSERT_EQ(1, args.length());
CONVERT_ARG_CHECKED(String, source, 0);
- Handle<Object> result = JsonParser::Parse(source);
+ source = Handle<String>(source->TryFlattenGetString());
+ // Optimized fast case where we only have ascii characters.
+ Handle<Object> result;
+ if (source->IsSeqAsciiString()) {
+ result = JsonParser<true>::Parse(source);
+ } else {
+ result = JsonParser<false>::Parse(source);
+ }
if (result.is_null()) {
// Syntax error or stack overflow in scanner.
ASSERT(isolate->has_pending_exception());
@@ -8474,12 +8870,7 @@
// Stop search when eval is found or when the global context is
// reached.
if (attributes != ABSENT || context->IsGlobalContext()) break;
- if (context->is_function_context()) {
- context = Handle<Context>(Context::cast(context->closure()->context()),
- isolate);
- } else {
- context = Handle<Context>(context->previous(), isolate);
- }
+ context = Handle<Context>(context->previous(), isolate);
}
// If eval could not be resolved, it has been deleted and we need to
@@ -8512,8 +8903,7 @@
return CompileGlobalEval(isolate,
args.at<String>(1),
args.at<Object>(2),
- static_cast<StrictModeFlag>(
- Smi::cast(args[3])->value()));
+ static_cast<StrictModeFlag>(args.smi_at(3)));
}
@@ -8534,8 +8924,7 @@
return CompileGlobalEval(isolate,
args.at<String>(1),
args.at<Object>(2),
- static_cast<StrictModeFlag>(
- Smi::cast(args[3])->value()));
+ static_cast<StrictModeFlag>(args.smi_at(3)));
}
@@ -8596,8 +8985,8 @@
}
Object* obj;
// Strict not needed. Used for cycle detection in Array join implementation.
- { MaybeObject* maybe_obj = array->SetFastElement(length, element,
- kNonStrictMode);
+ { MaybeObject* maybe_obj =
+ array->SetFastElement(length, element, kNonStrictMode, true);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
return isolate->heap()->true_value();
@@ -9497,9 +9886,7 @@
details->set(0, *value);
details->set(1, property_details);
if (hasJavaScriptAccessors) {
- details->set(2,
- caught_exception ? isolate->heap()->true_value()
- : isolate->heap()->false_value());
+ details->set(2, isolate->heap()->ToBoolean(caught_exception));
details->set(3, FixedArray::cast(*result_callback_obj)->get(0));
details->set(4, FixedArray::cast(*result_callback_obj)->get(1));
}
@@ -9623,11 +10010,79 @@
// If there is no JavaScript stack frame count is 0.
return Smi::FromInt(0);
}
- for (JavaScriptFrameIterator it(isolate, id); !it.done(); it.Advance()) n++;
+
+ for (JavaScriptFrameIterator it(isolate, id); !it.done(); it.Advance()) {
+ n += it.frame()->GetInlineCount();
+ }
return Smi::FromInt(n);
}
+class FrameInspector {
+ public:
+ FrameInspector(JavaScriptFrame* frame,
+ int inlined_frame_index,
+ Isolate* isolate)
+ : frame_(frame), deoptimized_frame_(NULL), isolate_(isolate) {
+ // Calculate the deoptimized frame.
+ if (frame->is_optimized()) {
+ deoptimized_frame_ = Deoptimizer::DebuggerInspectableFrame(
+ frame, inlined_frame_index, isolate);
+ }
+ has_adapted_arguments_ = frame_->has_adapted_arguments();
+ is_optimized_ = frame_->is_optimized();
+ }
+
+ ~FrameInspector() {
+ // Get rid of the calculated deoptimized frame if any.
+ if (deoptimized_frame_ != NULL) {
+ Deoptimizer::DeleteDebuggerInspectableFrame(deoptimized_frame_,
+ isolate_);
+ }
+ }
+
+ int GetParametersCount() {
+ return is_optimized_
+ ? deoptimized_frame_->parameters_count()
+ : frame_->ComputeParametersCount();
+ }
+ int expression_count() { return deoptimized_frame_->expression_count(); }
+ Object* GetFunction() {
+ return is_optimized_
+ ? deoptimized_frame_->GetFunction()
+ : frame_->function();
+ }
+ Object* GetParameter(int index) {
+ return is_optimized_
+ ? deoptimized_frame_->GetParameter(index)
+ : frame_->GetParameter(index);
+ }
+ Object* GetExpression(int index) {
+ return is_optimized_
+ ? deoptimized_frame_->GetExpression(index)
+ : frame_->GetExpression(index);
+ }
+
+ // To inspect all the provided arguments the frame might need to be
+ // replaced with the arguments frame.
+ void SetArgumentsFrame(JavaScriptFrame* frame) {
+ ASSERT(has_adapted_arguments_);
+ frame_ = frame;
+ is_optimized_ = frame_->is_optimized();
+ ASSERT(!is_optimized_);
+ }
+
+ private:
+ JavaScriptFrame* frame_;
+ DeoptimizedFrameInfo* deoptimized_frame_;
+ Isolate* isolate_;
+ bool is_optimized_;
+ bool has_adapted_arguments_;
+
+ DISALLOW_COPY_AND_ASSIGN(FrameInspector);
+};
+
+
static const int kFrameDetailsFrameIdIndex = 0;
static const int kFrameDetailsReceiverIndex = 1;
static const int kFrameDetailsFunctionIndex = 2;
@@ -9636,7 +10091,7 @@
static const int kFrameDetailsSourcePositionIndex = 5;
static const int kFrameDetailsConstructCallIndex = 6;
static const int kFrameDetailsAtReturnIndex = 7;
-static const int kFrameDetailsDebuggerFrameIndex = 8;
+static const int kFrameDetailsFlagsIndex = 8;
static const int kFrameDetailsFirstDynamicIndex = 9;
// Return an array with frame details
@@ -9652,7 +10107,7 @@
// 5: Source position
// 6: Constructor call
// 7: Is at return
-// 8: Debugger frame
+// 8: Flags
// Arguments name, value
// Locals name, value
// Return value if any
@@ -9675,16 +10130,22 @@
// If there are no JavaScript stack frames return undefined.
return heap->undefined_value();
}
+
+ int inlined_frame_index = 0; // Inlined frame index in optimized frame.
+
int count = 0;
JavaScriptFrameIterator it(isolate, id);
for (; !it.done(); it.Advance()) {
- if (count == index) break;
- count++;
+ if (index < count + it.frame()->GetInlineCount()) break;
+ count += it.frame()->GetInlineCount();
}
if (it.done()) return heap->undefined_value();
- bool is_optimized_frame =
- it.frame()->LookupCode()->kind() == Code::OPTIMIZED_FUNCTION;
+ if (it.frame()->is_optimized()) {
+ inlined_frame_index =
+ it.frame()->GetInlineCount() - (index - count) - 1;
+ }
+ FrameInspector frame_inspector(it.frame(), inlined_frame_index, isolate);
// Traverse the saved contexts chain to find the active context for the
// selected frame.
@@ -9701,17 +10162,18 @@
int position =
it.frame()->LookupCode()->SourcePosition(it.frame()->pc());
- // Check for constructor frame.
- bool constructor = it.frame()->IsConstructor();
+ // Check for constructor frame. Inlined frames cannot be construct calls.
+ bool inlined_frame =
+ it.frame()->is_optimized() && inlined_frame_index != 0;
+ bool constructor = !inlined_frame && it.frame()->IsConstructor();
// Get scope info and read from it for local variable information.
Handle<JSFunction> function(JSFunction::cast(it.frame()->function()));
- Handle<SerializedScopeInfo> scope_info(function->shared()->scope_info());
+ Handle<SharedFunctionInfo> shared(function->shared());
+ Handle<SerializedScopeInfo> scope_info(shared->scope_info());
+ ASSERT(*scope_info != SerializedScopeInfo::Empty());
ScopeInfo<> info(*scope_info);
- // Get the context.
- Handle<Context> context(Context::cast(it.frame()->context()));
-
// Get the locals names and values into a temporary array.
//
// TODO(1240907): Hide compiler-introduced stack variables
@@ -9720,31 +10182,20 @@
Handle<FixedArray> locals =
isolate->factory()->NewFixedArray(info.NumberOfLocals() * 2);
- // Fill in the names of the locals.
- for (int i = 0; i < info.NumberOfLocals(); i++) {
- locals->set(i * 2, *info.LocalName(i));
- }
-
// Fill in the values of the locals.
- for (int i = 0; i < info.NumberOfLocals(); i++) {
- if (is_optimized_frame) {
- // If we are inspecting an optimized frame use undefined as the
- // value for all locals.
- //
- // TODO(1140): We should be able to get the correct values
- // for locals in optimized frames.
- locals->set(i * 2 + 1, isolate->heap()->undefined_value());
- } else if (i < info.number_of_stack_slots()) {
- // Get the value from the stack.
- locals->set(i * 2 + 1, it.frame()->GetExpression(i));
- } else {
- // Traverse the context chain to the function context as all local
- // variables stored in the context will be on the function context.
+ int i = 0;
+ for (; i < info.number_of_stack_slots(); ++i) {
+ // Use the value from the stack.
+ locals->set(i * 2, *info.LocalName(i));
+ locals->set(i * 2 + 1, frame_inspector.GetExpression(i));
+ }
+ if (i < info.NumberOfLocals()) {
+ // Get the context containing declarations.
+ Handle<Context> context(
+ Context::cast(it.frame()->context())->declaration_context());
+ for (; i < info.NumberOfLocals(); ++i) {
Handle<String> name = info.LocalName(i);
- while (!context->is_function_context()) {
- context = Handle<Context>(context->previous());
- }
- ASSERT(context->is_function_context());
+ locals->set(i * 2, *name);
locals->set(i * 2 + 1,
context->get(scope_info->ContextSlotIndex(*name, NULL)));
}
@@ -9753,7 +10204,7 @@
// Check whether this frame is positioned at return. If not top
// frame or if the frame is optimized it cannot be at a return.
bool at_return = false;
- if (!is_optimized_frame && index == 0) {
+ if (!it.frame()->is_optimized() && index == 0) {
at_return = isolate->debug()->IsBreakAtReturn(it.frame());
}
@@ -9793,14 +10244,22 @@
// the provided parameters whereas the function frame always have the number
// of arguments matching the functions parameters. The rest of the
// information (except for what is collected above) is the same.
- it.AdvanceToArgumentsFrame();
+ if (it.frame()->has_adapted_arguments()) {
+ it.AdvanceToArgumentsFrame();
+ frame_inspector.SetArgumentsFrame(it.frame());
+ }
// Find the number of arguments to fill. At least fill the number of
// parameters for the function and fill more if more parameters are provided.
int argument_count = info.number_of_parameters();
- if (argument_count < it.frame()->ComputeParametersCount()) {
- argument_count = it.frame()->ComputeParametersCount();
+ if (argument_count < frame_inspector.GetParametersCount()) {
+ argument_count = frame_inspector.GetParametersCount();
}
+#ifdef DEBUG
+ if (it.frame()->is_optimized()) {
+ ASSERT_EQ(argument_count, frame_inspector.GetParametersCount());
+ }
+#endif
// Calculate the size of the result.
int details_size = kFrameDetailsFirstDynamicIndex +
@@ -9812,7 +10271,7 @@
details->set(kFrameDetailsFrameIdIndex, *frame_id);
// Add the function (same as in function frame).
- details->set(kFrameDetailsFunctionIndex, it.frame()->function());
+ details->set(kFrameDetailsFunctionIndex, frame_inspector.GetFunction());
// Add the arguments count.
details->set(kFrameDetailsArgumentCountIndex, Smi::FromInt(argument_count));
@@ -9834,10 +10293,19 @@
// Add the at return information.
details->set(kFrameDetailsAtReturnIndex, heap->ToBoolean(at_return));
- // Add information on whether this frame is invoked in the debugger context.
- details->set(kFrameDetailsDebuggerFrameIndex,
- heap->ToBoolean(*save->context() ==
- *isolate->debug()->debug_context()));
+ // Add flags to indicate information on whether this frame is
+ // bit 0: invoked in the debugger context.
+ // bit 1: optimized frame.
+ // bit 2: inlined in optimized frame
+ int flags = 0;
+ if (*save->context() == *isolate->debug()->debug_context()) {
+ flags |= 1 << 0;
+ }
+ if (it.frame()->is_optimized()) {
+ flags |= 1 << 1;
+ flags |= inlined_frame_index << 2;
+ }
+ details->set(kFrameDetailsFlagsIndex, Smi::FromInt(flags));
// Fill the dynamic part.
int details_index = kFrameDetailsFirstDynamicIndex;
@@ -9851,14 +10319,10 @@
details->set(details_index++, heap->undefined_value());
}
- // Parameter value. If we are inspecting an optimized frame, use
- // undefined as the value.
- //
- // TODO(3141533): We should be able to get the actual parameter
- // value for optimized frames.
- if (!is_optimized_frame &&
- (i < it.frame()->ComputeParametersCount())) {
- details->set(details_index++, it.frame()->GetParameter(i));
+ // Parameter value.
+ if (i < it.frame()->ComputeParametersCount()) {
+ // Get the value from the stack.
+ details->set(details_index++, frame_inspector.GetParameter(i));
} else {
details->set(details_index++, heap->undefined_value());
}
@@ -9878,10 +10342,11 @@
// THIS MUST BE DONE LAST SINCE WE MIGHT ADVANCE
// THE FRAME ITERATOR TO WRAP THE RECEIVER.
Handle<Object> receiver(it.frame()->receiver(), isolate);
- if (!receiver->IsJSObject()) {
- // If the receiver is NOT a JSObject we have hit an optimization
- // where a value object is not converted into a wrapped JS objects.
- // To hide this optimization from the debugger, we wrap the receiver
+ if (!receiver->IsJSObject() && !shared->strict_mode() && !shared->native()) {
+ // If the receiver is not a JSObject and the function is not a
+ // builtin or strict-mode we have hit an optimization where a
+ // value object is not converted into a wrapped JS objects. To
+ // hide this optimization from the debugger, we wrap the receiver
// by creating correct wrapper object based on the calling frame's
// global context.
it.Advance();
@@ -9911,18 +10376,14 @@
int context_index = serialized_scope_info->ContextSlotIndex(
*scope_info.context_slot_name(i), NULL);
- // Don't include the arguments shadow (.arguments) context variable.
- if (*scope_info.context_slot_name(i) !=
- isolate->heap()->arguments_shadow_symbol()) {
- RETURN_IF_EMPTY_HANDLE_VALUE(
- isolate,
- SetProperty(scope_object,
- scope_info.context_slot_name(i),
- Handle<Object>(context->get(context_index), isolate),
- NONE,
- kNonStrictMode),
- false);
- }
+ RETURN_IF_EMPTY_HANDLE_VALUE(
+ isolate,
+ SetProperty(scope_object,
+ scope_info.context_slot_name(i),
+ Handle<Object>(context->get(context_index), isolate),
+ NONE,
+ kNonStrictMode),
+ false);
}
return true;
@@ -9931,12 +10392,15 @@
// Create a plain JSObject which materializes the local scope for the specified
// frame.
-static Handle<JSObject> MaterializeLocalScope(Isolate* isolate,
- JavaScriptFrame* frame) {
+static Handle<JSObject> MaterializeLocalScope(
+ Isolate* isolate,
+ JavaScriptFrame* frame,
+ int inlined_frame_index) {
Handle<JSFunction> function(JSFunction::cast(frame->function()));
Handle<SharedFunctionInfo> shared(function->shared());
Handle<SerializedScopeInfo> serialized_scope_info(shared->scope_info());
ScopeInfo<> scope_info(*serialized_scope_info);
+ FrameInspector frame_inspector(frame, inlined_frame_index, isolate);
// Allocate and initialize a JSObject with all the arguments, stack locals
// heap locals and extension properties of the debugged function.
@@ -9949,55 +10413,58 @@
isolate,
SetProperty(local_scope,
scope_info.parameter_name(i),
- Handle<Object>(frame->GetParameter(i), isolate),
+ Handle<Object>(frame_inspector.GetParameter(i)),
NONE,
kNonStrictMode),
Handle<JSObject>());
}
// Second fill all stack locals.
- for (int i = 0; i < scope_info.number_of_stack_slots(); i++) {
+ for (int i = 0; i < scope_info.number_of_stack_slots(); ++i) {
RETURN_IF_EMPTY_HANDLE_VALUE(
isolate,
SetProperty(local_scope,
scope_info.stack_slot_name(i),
- Handle<Object>(frame->GetExpression(i), isolate),
+ Handle<Object>(frame_inspector.GetExpression(i)),
NONE,
kNonStrictMode),
Handle<JSObject>());
}
- // Third fill all context locals.
- Handle<Context> frame_context(Context::cast(frame->context()));
- Handle<Context> function_context(frame_context->fcontext());
- if (!CopyContextLocalsToScopeObject(isolate,
- serialized_scope_info, scope_info,
- function_context, local_scope)) {
- return Handle<JSObject>();
- }
+ if (scope_info.number_of_context_slots() > Context::MIN_CONTEXT_SLOTS) {
+ // Third fill all context locals.
+ Handle<Context> frame_context(Context::cast(frame->context()));
+ Handle<Context> function_context(frame_context->declaration_context());
+ if (!CopyContextLocalsToScopeObject(isolate,
+ serialized_scope_info, scope_info,
+ function_context, local_scope)) {
+ return Handle<JSObject>();
+ }
- // Finally copy any properties from the function context extension. This will
- // be variables introduced by eval.
- if (function_context->closure() == *function) {
- if (function_context->has_extension() &&
- !function_context->IsGlobalContext()) {
- Handle<JSObject> ext(JSObject::cast(function_context->extension()));
- Handle<FixedArray> keys = GetKeysInFixedArrayFor(ext, INCLUDE_PROTOS);
- for (int i = 0; i < keys->length(); i++) {
- // Names of variables introduced by eval are strings.
- ASSERT(keys->get(i)->IsString());
- Handle<String> key(String::cast(keys->get(i)));
- RETURN_IF_EMPTY_HANDLE_VALUE(
- isolate,
- SetProperty(local_scope,
- key,
- GetProperty(ext, key),
- NONE,
- kNonStrictMode),
- Handle<JSObject>());
+ // Finally copy any properties from the function context extension.
+ // These will be variables introduced by eval.
+ if (function_context->closure() == *function) {
+ if (function_context->has_extension() &&
+ !function_context->IsGlobalContext()) {
+ Handle<JSObject> ext(JSObject::cast(function_context->extension()));
+ Handle<FixedArray> keys = GetKeysInFixedArrayFor(ext, INCLUDE_PROTOS);
+ for (int i = 0; i < keys->length(); i++) {
+ // Names of variables introduced by eval are strings.
+ ASSERT(keys->get(i)->IsString());
+ Handle<String> key(String::cast(keys->get(i)));
+ RETURN_IF_EMPTY_HANDLE_VALUE(
+ isolate,
+ SetProperty(local_scope,
+ key,
+ GetProperty(ext, key),
+ NONE,
+ kNonStrictMode),
+ Handle<JSObject>());
+ }
}
}
}
+
return local_scope;
}
@@ -10006,7 +10473,7 @@
// context.
static Handle<JSObject> MaterializeClosure(Isolate* isolate,
Handle<Context> context) {
- ASSERT(context->is_function_context());
+ ASSERT(context->IsFunctionContext());
Handle<SharedFunctionInfo> shared(context->closure()->shared());
Handle<SerializedScopeInfo> serialized_scope_info(shared->scope_info());
@@ -10017,29 +10484,6 @@
Handle<JSObject> closure_scope =
isolate->factory()->NewJSObject(isolate->object_function());
- // Check whether the arguments shadow object exists.
- int arguments_shadow_index =
- shared->scope_info()->ContextSlotIndex(
- isolate->heap()->arguments_shadow_symbol(), NULL);
- if (arguments_shadow_index >= 0) {
- // In this case all the arguments are available in the arguments shadow
- // object.
- Handle<JSObject> arguments_shadow(
- JSObject::cast(context->get(arguments_shadow_index)));
- for (int i = 0; i < scope_info.number_of_parameters(); ++i) {
- // We don't expect exception-throwing getters on the arguments shadow.
- Object* element = arguments_shadow->GetElement(i)->ToObjectUnchecked();
- RETURN_IF_EMPTY_HANDLE_VALUE(
- isolate,
- SetProperty(closure_scope,
- scope_info.parameter_name(i),
- Handle<Object>(element, isolate),
- NONE,
- kNonStrictMode),
- Handle<JSObject>());
- }
- }
-
// Fill all context locals to the context extension.
if (!CopyContextLocalsToScopeObject(isolate,
serialized_scope_info, scope_info,
@@ -10071,6 +10515,23 @@
}
+// Create a plain JSObject which materializes the scope for the specified
+// catch context.
+static Handle<JSObject> MaterializeCatchScope(Isolate* isolate,
+ Handle<Context> context) {
+ ASSERT(context->IsCatchContext());
+ Handle<String> name(String::cast(context->extension()));
+ Handle<Object> thrown_object(context->get(Context::THROWN_OBJECT_INDEX));
+ Handle<JSObject> catch_scope =
+ isolate->factory()->NewJSObject(isolate->object_function());
+ RETURN_IF_EMPTY_HANDLE_VALUE(
+ isolate,
+ SetProperty(catch_scope, name, thrown_object, NONE, kNonStrictMode),
+ Handle<JSObject>());
+ return catch_scope;
+}
+
+
// Iterate over the actual scopes visible from a stack frame. All scopes are
// backed by an actual context except the local scope, which is inserted
// "artifically" in the context chain.
@@ -10081,16 +10542,15 @@
ScopeTypeLocal,
ScopeTypeWith,
ScopeTypeClosure,
- // Every catch block contains an implicit with block (its parameter is
- // a JSContextExtensionObject) that extends current scope with a variable
- // holding exception object. Such with blocks are treated as scopes of their
- // own type.
ScopeTypeCatch
};
- ScopeIterator(Isolate* isolate, JavaScriptFrame* frame)
+ ScopeIterator(Isolate* isolate,
+ JavaScriptFrame* frame,
+ int inlined_frame_index)
: isolate_(isolate),
frame_(frame),
+ inlined_frame_index_(inlined_frame_index),
function_(JSFunction::cast(frame->function())),
context_(Context::cast(frame->context())),
local_done_(false),
@@ -10105,11 +10565,11 @@
int index = function_->shared()->scope_info()->
StackSlotIndex(isolate_->heap()->result_symbol());
at_local_ = index < 0;
- } else if (context_->is_function_context()) {
+ } else if (context_->IsFunctionContext()) {
at_local_ = true;
} else if (context_->closure() != *function_) {
- // The context_ is a with block from the outer function.
- ASSERT(context_->has_extension());
+ // The context_ is a with or catch block from the outer function.
+ ASSERT(context_->IsWithContext() || context_->IsCatchContext());
at_local_ = true;
}
}
@@ -10139,16 +10599,12 @@
}
// Move to the next context.
- if (context_->is_function_context()) {
- context_ = Handle<Context>(Context::cast(context_->closure()->context()));
- } else {
- context_ = Handle<Context>(context_->previous());
- }
+ context_ = Handle<Context>(context_->previous(), isolate_);
// If passing the local scope indicate that the current scope is now the
// local scope.
if (!local_done_ &&
- (context_->IsGlobalContext() || (context_->is_function_context()))) {
+ (context_->IsGlobalContext() || context_->IsFunctionContext())) {
at_local_ = true;
}
}
@@ -10162,18 +10618,13 @@
ASSERT(context_->global()->IsGlobalObject());
return ScopeTypeGlobal;
}
- if (context_->is_function_context()) {
+ if (context_->IsFunctionContext()) {
return ScopeTypeClosure;
}
- ASSERT(context_->has_extension());
- // Current scope is either an explicit with statement or a with statement
- // implicitely generated for a catch block.
- // If the extension object here is a JSContextExtensionObject then
- // current with statement is one frome a catch block otherwise it's a
- // regular with statement.
- if (context_->extension()->IsJSContextExtensionObject()) {
+ if (context_->IsCatchContext()) {
return ScopeTypeCatch;
}
+ ASSERT(context_->IsWithContext());
return ScopeTypeWith;
}
@@ -10182,20 +10633,17 @@
switch (Type()) {
case ScopeIterator::ScopeTypeGlobal:
return Handle<JSObject>(CurrentContext()->global());
- break;
case ScopeIterator::ScopeTypeLocal:
// Materialize the content of the local scope into a JSObject.
- return MaterializeLocalScope(isolate_, frame_);
- break;
+ return MaterializeLocalScope(isolate_, frame_, inlined_frame_index_);
case ScopeIterator::ScopeTypeWith:
- case ScopeIterator::ScopeTypeCatch:
// Return the with object.
- return Handle<JSObject>(CurrentContext()->extension());
- break;
+ return Handle<JSObject>(JSObject::cast(CurrentContext()->extension()));
+ case ScopeIterator::ScopeTypeCatch:
+ return MaterializeCatchScope(isolate_, CurrentContext());
case ScopeIterator::ScopeTypeClosure:
// Materialize the content of the closure scope into a JSObject.
return MaterializeClosure(isolate_, CurrentContext());
- break;
}
UNREACHABLE();
return Handle<JSObject>();
@@ -10226,8 +10674,7 @@
if (!CurrentContext().is_null()) {
CurrentContext()->Print();
if (CurrentContext()->has_extension()) {
- Handle<JSObject> extension =
- Handle<JSObject>(CurrentContext()->extension());
+ Handle<Object> extension(CurrentContext()->extension());
if (extension->IsJSContextExtensionObject()) {
extension->Print();
}
@@ -10236,34 +10683,27 @@
break;
}
- case ScopeIterator::ScopeTypeWith: {
+ case ScopeIterator::ScopeTypeWith:
PrintF("With:\n");
- Handle<JSObject> extension =
- Handle<JSObject>(CurrentContext()->extension());
- extension->Print();
+ CurrentContext()->extension()->Print();
break;
- }
- case ScopeIterator::ScopeTypeCatch: {
+ case ScopeIterator::ScopeTypeCatch:
PrintF("Catch:\n");
- Handle<JSObject> extension =
- Handle<JSObject>(CurrentContext()->extension());
- extension->Print();
+ CurrentContext()->extension()->Print();
+ CurrentContext()->get(Context::THROWN_OBJECT_INDEX)->Print();
break;
- }
- case ScopeIterator::ScopeTypeClosure: {
+ case ScopeIterator::ScopeTypeClosure:
PrintF("Closure:\n");
CurrentContext()->Print();
if (CurrentContext()->has_extension()) {
- Handle<JSObject> extension =
- Handle<JSObject>(CurrentContext()->extension());
+ Handle<Object> extension(CurrentContext()->extension());
if (extension->IsJSContextExtensionObject()) {
extension->Print();
}
}
break;
- }
default:
UNREACHABLE();
@@ -10275,6 +10715,7 @@
private:
Isolate* isolate_;
JavaScriptFrame* frame_;
+ int inlined_frame_index_;
Handle<JSFunction> function_;
Handle<Context> context_;
bool local_done_;
@@ -10303,7 +10744,9 @@
// Count the visible scopes.
int n = 0;
- for (ScopeIterator it(isolate, frame); !it.Done(); it.Next()) {
+ for (ScopeIterator it(isolate, frame, 0);
+ !it.Done();
+ it.Next()) {
n++;
}
@@ -10318,14 +10761,15 @@
// Return an array with scope details
// args[0]: number: break id
// args[1]: number: frame index
-// args[2]: number: scope index
+// args[2]: number: inlined frame index
+// args[3]: number: scope index
//
// The array returned contains the following information:
// 0: Scope type
// 1: Scope object
RUNTIME_FUNCTION(MaybeObject*, Runtime_GetScopeDetails) {
HandleScope scope(isolate);
- ASSERT(args.length() == 3);
+ ASSERT(args.length() == 4);
// Check arguments.
Object* check;
@@ -10334,7 +10778,8 @@
if (!maybe_check->ToObject(&check)) return maybe_check;
}
CONVERT_CHECKED(Smi, wrapped_id, args[1]);
- CONVERT_NUMBER_CHECKED(int, index, Int32, args[2]);
+ CONVERT_NUMBER_CHECKED(int, inlined_frame_index, Int32, args[2]);
+ CONVERT_NUMBER_CHECKED(int, index, Int32, args[3]);
// Get the frame where the debugging is performed.
StackFrame::Id id = UnwrapFrameId(wrapped_id);
@@ -10343,7 +10788,7 @@
// Find the requested scope.
int n = 0;
- ScopeIterator it(isolate, frame);
+ ScopeIterator it(isolate, frame, inlined_frame_index);
for (; !it.Done() && n < index; it.Next()) {
n++;
}
@@ -10373,7 +10818,9 @@
// Print the scopes for the top frame.
StackFrameLocator locator;
JavaScriptFrame* frame = locator.FindJavaScriptFrame(0);
- for (ScopeIterator it(isolate, frame); !it.Done(); it.Next()) {
+ for (ScopeIterator it(isolate, frame, 0);
+ !it.Done();
+ it.Next()) {
it.DebugPrint();
}
#endif
@@ -10730,19 +11177,35 @@
// Creates a copy of the with context chain. The copy of the context chain is
// is linked to the function context supplied.
-static Handle<Context> CopyWithContextChain(Handle<Context> context_chain,
- Handle<Context> function_context) {
- // At the bottom of the chain. Return the function context to link to.
- if (context_chain->is_function_context()) {
- return function_context;
+static Handle<Context> CopyWithContextChain(Isolate* isolate,
+ Handle<JSFunction> function,
+ Handle<Context> current,
+ Handle<Context> base) {
+ // At the end of the chain. Return the base context to link to.
+ if (current->IsFunctionContext() || current->IsGlobalContext()) {
+ return base;
}
- // Recursively copy the with contexts.
- Handle<Context> previous(context_chain->previous());
- Handle<JSObject> extension(JSObject::cast(context_chain->extension()));
- Handle<Context> context = CopyWithContextChain(previous, function_context);
- return context->GetIsolate()->factory()->NewWithContext(
- context, extension, context_chain->IsCatchContext());
+ // Recursively copy the with and catch contexts.
+ HandleScope scope(isolate);
+ Handle<Context> previous(current->previous());
+ Handle<Context> new_previous =
+ CopyWithContextChain(isolate, function, previous, base);
+ Handle<Context> new_current;
+ if (current->IsCatchContext()) {
+ Handle<String> name(String::cast(current->extension()));
+ Handle<Object> thrown_object(current->get(Context::THROWN_OBJECT_INDEX));
+ new_current =
+ isolate->factory()->NewCatchContext(function,
+ new_previous,
+ name,
+ thrown_object);
+ } else {
+ Handle<JSObject> extension(JSObject::cast(current->extension()));
+ new_current =
+ isolate->factory()->NewWithContext(function, new_previous, extension);
+ }
+ return scope.CloseAndEscape(new_current);
}
@@ -10750,6 +11213,7 @@
// Runtime_DebugEvaluate.
static Handle<Object> GetArgumentsObject(Isolate* isolate,
JavaScriptFrame* frame,
+ int inlined_frame_index,
Handle<JSFunction> function,
Handle<SerializedScopeInfo> scope_info,
const ScopeInfo<>* sinfo,
@@ -10773,7 +11237,9 @@
}
}
- const int length = frame->ComputeParametersCount();
+ FrameInspector frame_inspector(frame, inlined_frame_index, isolate);
+
+ int length = frame_inspector.GetParametersCount();
Handle<JSObject> arguments =
isolate->factory()->NewArgumentsObject(function, length);
Handle<FixedArray> array = isolate->factory()->NewFixedArray(length);
@@ -10781,7 +11247,7 @@
AssertNoAllocation no_gc;
WriteBarrierMode mode = array->GetWriteBarrierMode(no_gc);
for (int i = 0; i < length; i++) {
- array->set(i, frame->GetParameter(i), mode);
+ array->set(i, frame_inspector.GetParameter(i), mode);
}
arguments->set_elements(*array);
return arguments;
@@ -10808,7 +11274,7 @@
// Check the execution state and decode arguments frame and source to be
// evaluated.
- ASSERT(args.length() == 5);
+ ASSERT(args.length() == 6);
Object* check_result;
{ MaybeObject* maybe_check_result = Runtime_CheckExecutionState(
RUNTIME_ARGUMENTS(isolate, args));
@@ -10817,9 +11283,10 @@
}
}
CONVERT_CHECKED(Smi, wrapped_id, args[1]);
- CONVERT_ARG_CHECKED(String, source, 2);
- CONVERT_BOOLEAN_CHECKED(disable_break, args[3]);
- Handle<Object> additional_context(args[4]);
+ CONVERT_NUMBER_CHECKED(int, inlined_frame_index, Int32, args[2]);
+ CONVERT_ARG_CHECKED(String, source, 3);
+ CONVERT_BOOLEAN_CHECKED(disable_break, args[4]);
+ Handle<Object> additional_context(args[5]);
// Handle the processing of break.
DisableBreak disable_break_save(disable_break);
@@ -10859,7 +11326,8 @@
#endif
// Materialize the content of the local scope into a JSObject.
- Handle<JSObject> local_scope = MaterializeLocalScope(isolate, frame);
+ Handle<JSObject> local_scope = MaterializeLocalScope(
+ isolate, frame, inlined_frame_index);
RETURN_IF_EMPTY_HANDLE(isolate, local_scope);
// Allocate a new context for the debug evaluation and set the extension
@@ -10870,12 +11338,13 @@
context->set_extension(*local_scope);
// Copy any with contexts present and chain them in front of this context.
Handle<Context> frame_context(Context::cast(frame->context()));
- Handle<Context> function_context(frame_context->fcontext());
- context = CopyWithContextChain(frame_context, context);
+ Handle<Context> function_context(frame_context->declaration_context());
+ context = CopyWithContextChain(isolate, go_between, frame_context, context);
if (additional_context->IsJSObject()) {
- context = isolate->factory()->NewWithContext(context,
- Handle<JSObject>::cast(additional_context), false);
+ Handle<JSObject> extension = Handle<JSObject>::cast(additional_context);
+ context =
+ isolate->factory()->NewWithContext(go_between, context, extension);
}
// Wrap the evaluation statement in a new function compiled in the newly
@@ -10907,7 +11376,8 @@
&has_pending_exception);
if (has_pending_exception) return Failure::Exception();
- Handle<Object> arguments = GetArgumentsObject(isolate, frame,
+ Handle<Object> arguments = GetArgumentsObject(isolate,
+ frame, inlined_frame_index,
function, scope_info,
&sinfo, function_context);
@@ -11597,10 +12067,9 @@
// Deletes the specified live object list.
RUNTIME_FUNCTION(MaybeObject*, Runtime_DeleteLOL) {
#ifdef LIVE_OBJECT_LIST
- CONVERT_SMI_CHECKED(id, args[0]);
+ CONVERT_SMI_ARG_CHECKED(id, 0);
bool success = LiveObjectList::Delete(id);
- return success ? isolate->heap()->true_value() :
- isolate->heap()->false_value();
+ return isolate->heap()->ToBoolean(success);
#else
return isolate->heap()->undefined_value();
#endif
@@ -11615,10 +12084,10 @@
RUNTIME_FUNCTION(MaybeObject*, Runtime_DumpLOL) {
#ifdef LIVE_OBJECT_LIST
HandleScope scope;
- CONVERT_SMI_CHECKED(id1, args[0]);
- CONVERT_SMI_CHECKED(id2, args[1]);
- CONVERT_SMI_CHECKED(start, args[2]);
- CONVERT_SMI_CHECKED(count, args[3]);
+ CONVERT_SMI_ARG_CHECKED(id1, 0);
+ CONVERT_SMI_ARG_CHECKED(id2, 1);
+ CONVERT_SMI_ARG_CHECKED(start, 2);
+ CONVERT_SMI_ARG_CHECKED(count, 3);
CONVERT_ARG_CHECKED(JSObject, filter_obj, 4);
EnterDebugger enter_debugger;
return LiveObjectList::Dump(id1, id2, start, count, filter_obj);
@@ -11632,7 +12101,7 @@
// This is only used for obj ids shown in live object lists.
RUNTIME_FUNCTION(MaybeObject*, Runtime_GetLOLObj) {
#ifdef LIVE_OBJECT_LIST
- CONVERT_SMI_CHECKED(obj_id, args[0]);
+ CONVERT_SMI_ARG_CHECKED(obj_id, 0);
Object* result = LiveObjectList::GetObj(obj_id);
return result;
#else
@@ -11659,7 +12128,7 @@
RUNTIME_FUNCTION(MaybeObject*, Runtime_GetLOLObjRetainers) {
#ifdef LIVE_OBJECT_LIST
HandleScope scope;
- CONVERT_SMI_CHECKED(obj_id, args[0]);
+ CONVERT_SMI_ARG_CHECKED(obj_id, 0);
RUNTIME_ASSERT(args[1]->IsUndefined() || args[1]->IsJSObject());
RUNTIME_ASSERT(args[2]->IsUndefined() || args[2]->IsBoolean());
RUNTIME_ASSERT(args[3]->IsUndefined() || args[3]->IsSmi());
@@ -11676,11 +12145,11 @@
}
int start = 0;
if (args[3]->IsSmi()) {
- start = Smi::cast(args[3])->value();
+ start = args.smi_at(3);
}
int limit = Smi::kMaxValue;
if (args[4]->IsSmi()) {
- limit = Smi::cast(args[4])->value();
+ limit = args.smi_at(4);
}
return LiveObjectList::GetObjRetainers(obj_id,
@@ -11699,8 +12168,8 @@
RUNTIME_FUNCTION(MaybeObject*, Runtime_GetLOLPath) {
#ifdef LIVE_OBJECT_LIST
HandleScope scope;
- CONVERT_SMI_CHECKED(obj_id1, args[0]);
- CONVERT_SMI_CHECKED(obj_id2, args[1]);
+ CONVERT_SMI_ARG_CHECKED(obj_id1, 0);
+ CONVERT_SMI_ARG_CHECKED(obj_id2, 1);
RUNTIME_ASSERT(args[2]->IsUndefined() || args[2]->IsJSObject());
Handle<JSObject> instance_filter;
@@ -11721,8 +12190,8 @@
// previously captured live object lists.
RUNTIME_FUNCTION(MaybeObject*, Runtime_InfoLOL) {
#ifdef LIVE_OBJECT_LIST
- CONVERT_SMI_CHECKED(start, args[0]);
- CONVERT_SMI_CHECKED(count, args[1]);
+ CONVERT_SMI_ARG_CHECKED(start, 0);
+ CONVERT_SMI_ARG_CHECKED(count, 1);
return LiveObjectList::Info(start, count);
#else
return isolate->heap()->undefined_value();
@@ -11735,7 +12204,7 @@
RUNTIME_FUNCTION(MaybeObject*, Runtime_PrintLOLObj) {
#ifdef LIVE_OBJECT_LIST
HandleScope scope;
- CONVERT_SMI_CHECKED(obj_id, args[0]);
+ CONVERT_SMI_ARG_CHECKED(obj_id, 0);
Object* result = LiveObjectList::PrintObj(obj_id);
return result;
#else
@@ -11763,8 +12232,8 @@
RUNTIME_FUNCTION(MaybeObject*, Runtime_SummarizeLOL) {
#ifdef LIVE_OBJECT_LIST
HandleScope scope;
- CONVERT_SMI_CHECKED(id1, args[0]);
- CONVERT_SMI_CHECKED(id2, args[1]);
+ CONVERT_SMI_ARG_CHECKED(id1, 0);
+ CONVERT_SMI_ARG_CHECKED(id2, 1);
CONVERT_ARG_CHECKED(JSObject, filter_obj, 2);
EnterDebugger enter_debugger;
@@ -11777,29 +12246,19 @@
#endif // ENABLE_DEBUGGER_SUPPORT
-#ifdef ENABLE_LOGGING_AND_PROFILING
RUNTIME_FUNCTION(MaybeObject*, Runtime_ProfilerResume) {
NoHandleAllocation ha;
- ASSERT(args.length() == 2);
-
- CONVERT_CHECKED(Smi, smi_modules, args[0]);
- CONVERT_CHECKED(Smi, smi_tag, args[1]);
- v8::V8::ResumeProfilerEx(smi_modules->value(), smi_tag->value());
+ v8::V8::ResumeProfiler();
return isolate->heap()->undefined_value();
}
RUNTIME_FUNCTION(MaybeObject*, Runtime_ProfilerPause) {
NoHandleAllocation ha;
- ASSERT(args.length() == 2);
-
- CONVERT_CHECKED(Smi, smi_modules, args[0]);
- CONVERT_CHECKED(Smi, smi_tag, args[1]);
- v8::V8::PauseProfilerEx(smi_modules->value(), smi_tag->value());
+ v8::V8::PauseProfiler();
return isolate->heap()->undefined_value();
}
-#endif // ENABLE_LOGGING_AND_PROFILING
// Finds the script object from the script data. NOTE: This operation uses
// heap traversal to find the function generated for the source position
@@ -11856,8 +12315,9 @@
// call to this function is encountered it is skipped. The seen_caller
// in/out parameter is used to remember if the caller has been seen
// yet.
-static bool ShowFrameInStackTrace(StackFrame* raw_frame, Object* caller,
- bool* seen_caller) {
+static bool ShowFrameInStackTrace(StackFrame* raw_frame,
+ Object* caller,
+ bool* seen_caller) {
// Only display JS frames.
if (!raw_frame->is_java_script())
return false;
@@ -11870,11 +12330,25 @@
*seen_caller = true;
return false;
}
- // Skip all frames until we've seen the caller. Also, skip the most
- // obvious builtin calls. Some builtin calls (such as Number.ADD
- // which is invoked using 'call') are very difficult to recognize
- // so we're leaving them in for now.
- return *seen_caller && !frame->receiver()->IsJSBuiltinsObject();
+ // Skip all frames until we've seen the caller.
+ if (!(*seen_caller)) return false;
+ // Also, skip the most obvious builtin calls. We recognize builtins
+ // as (1) functions called with the builtins object as the receiver and
+ // as (2) functions from native scripts called with undefined as the
+ // receiver (direct calls to helper functions in the builtins
+ // code). Some builtin calls (such as Number.ADD which is invoked
+ // using 'call') are very difficult to recognize so we're leaving
+ // them in for now.
+ if (frame->receiver()->IsJSBuiltinsObject()) {
+ return false;
+ }
+ JSFunction* fun = JSFunction::cast(raw_fun);
+ Object* raw_script = fun->shared()->script();
+ if (frame->receiver()->IsUndefined() && raw_script->IsScript()) {
+ int script_type = Script::cast(raw_script)->type()->value();
+ return script_type != Script::TYPE_NATIVE;
+ }
+ return true;
}
@@ -11954,8 +12428,8 @@
RUNTIME_FUNCTION(MaybeObject*, Runtime_Abort) {
ASSERT(args.length() == 2);
- OS::PrintError("abort: %s\n", reinterpret_cast<char*>(args[0]) +
- Smi::cast(args[1])->value());
+ OS::PrintError("abort: %s\n",
+ reinterpret_cast<char*>(args[0]) + args.smi_at(1));
isolate->PrintStack();
OS::Abort();
UNREACHABLE();
@@ -12157,6 +12631,28 @@
}
+#define ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(Name) \
+ RUNTIME_FUNCTION(MaybeObject*, Runtime_Has##Name) { \
+ CONVERT_CHECKED(JSObject, obj, args[0]); \
+ return isolate->heap()->ToBoolean(obj->Has##Name()); \
+ }
+
+ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(FastElements)
+ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(FastDoubleElements)
+ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(DictionaryElements)
+ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(ExternalPixelElements)
+ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(ExternalArrayElements)
+ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(ExternalByteElements)
+ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(ExternalUnsignedByteElements)
+ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(ExternalShortElements)
+ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(ExternalUnsignedShortElements)
+ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(ExternalIntElements)
+ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(ExternalUnsignedIntElements)
+ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(ExternalFloatElements)
+ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(ExternalDoubleElements)
+
+#undef ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION
+
// ----------------------------------------------------------------------------
// Implementation of Runtime
diff --git a/src/runtime.h b/src/runtime.h
index d3223d1..9a2cf1d 100644
--- a/src/runtime.h
+++ b/src/runtime.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -67,6 +67,7 @@
F(SpecialArrayFunctions, 1, 1) \
F(GetGlobalReceiver, 0, 1) \
\
+ F(GetPrototype, 1, 1) \
F(IsInPrototypeChain, 2, 1) \
F(SetHiddenPrototype, 2, 1) \
\
@@ -81,18 +82,20 @@
F(GetFunctionDelegate, 1, 1) \
F(GetConstructorDelegate, 1, 1) \
F(NewArgumentsFast, 3, 1) \
+ F(NewStrictArgumentsFast, 3, 1) \
F(LazyCompile, 1, 1) \
F(LazyRecompile, 1, 1) \
F(NotifyDeoptimized, 1, 1) \
F(NotifyOSR, 0, 1) \
F(DeoptimizeFunction, 1, 1) \
+ F(RunningInSimulator, 0, 1) \
F(OptimizeFunctionOnNextCall, 1, 1) \
F(GetOptimizationStatus, 1, 1) \
F(GetOptimizationCount, 1, 1) \
F(CompileForOnStackReplacement, 1, 1) \
F(SetNewFunctionAttributes, 1, 1) \
F(AllocateInNewSpace, 1, 1) \
- F(SetES5Flag, 1, 1) \
+ F(SetNativeFlag, 1, 1) \
\
/* Array join support */ \
F(PushIfAbsent, 2, 1) \
@@ -114,6 +117,7 @@
F(URIUnescape, 1, 1) \
F(QuoteJSONString, 1, 1) \
F(QuoteJSONStringComma, 1, 1) \
+ F(QuoteJSONStringArray, 1, 1) \
\
F(NumberToString, 1, 1) \
F(NumberToStringSkipCache, 1, 1) \
@@ -208,6 +212,9 @@
F(FunctionSetPrototype, 2, 1) \
F(FunctionGetName, 1, 1) \
F(FunctionSetName, 2, 1) \
+ F(FunctionNameShouldPrintAsAnonymous, 1, 1) \
+ F(FunctionMarkNameShouldPrintAsAnonymous, 1, 1) \
+ F(FunctionSetBound, 1, 1) \
F(FunctionRemovePrototype, 1, 1) \
F(FunctionGetSourceCode, 1, 1) \
F(FunctionGetScript, 1, 1) \
@@ -277,9 +284,9 @@
\
/* Harmony proxies */ \
F(CreateJSProxy, 2, 1) \
- \
- /* Catch context extension objects */ \
- F(CreateCatchExtensionObject, 2, 1) \
+ F(IsJSProxy, 1, 1) \
+ F(GetHandler, 1, 1) \
+ F(Fix, 1, 1) \
\
/* Statements */ \
F(NewClosure, 3, 1) \
@@ -293,9 +300,9 @@
F(PromoteScheduledException, 0, 1) \
\
/* Contexts */ \
- F(NewContext, 1, 1) \
- F(PushContext, 1, 1) \
- F(PushCatchContext, 1, 1) \
+ F(NewFunctionContext, 1, 1) \
+ F(PushWithContext, 2, 1) \
+ F(PushCatchContext, 3, 1) \
F(DeleteContextSlot, 2, 1) \
F(LoadContextSlot, 2, 2) \
F(LoadContextSlotNoReferenceError, 2, 2) \
@@ -330,7 +337,26 @@
F(MessageGetScript, 1, 1) \
\
/* Pseudo functions - handled as macros by parser */ \
- F(IS_VAR, 1, 1)
+ F(IS_VAR, 1, 1) \
+ \
+ /* expose boolean functions from objects-inl.h */ \
+ F(HasFastElements, 1, 1) \
+ F(HasFastDoubleElements, 1, 1) \
+ F(HasDictionaryElements, 1, 1) \
+ F(HasExternalPixelElements, 1, 1) \
+ F(HasExternalArrayElements, 1, 1) \
+ F(HasExternalByteElements, 1, 1) \
+ F(HasExternalUnsignedByteElements, 1, 1) \
+ F(HasExternalShortElements, 1, 1) \
+ F(HasExternalUnsignedShortElements, 1, 1) \
+ F(HasExternalIntElements, 1, 1) \
+ F(HasExternalUnsignedIntElements, 1, 1) \
+ F(HasExternalFloatElements, 1, 1) \
+ F(HasExternalDoubleElements, 1, 1) \
+ /* profiler */ \
+ F(ProfilerResume, 0, 1) \
+ F(ProfilerPause, 0, 1)
+
#ifdef ENABLE_DEBUGGER_SUPPORT
#define RUNTIME_FUNCTION_LIST_DEBUGGER_SUPPORT(F) \
@@ -349,7 +375,7 @@
F(GetFrameCount, 1, 1) \
F(GetFrameDetails, 2, 1) \
F(GetScopeCount, 2, 1) \
- F(GetScopeDetails, 3, 1) \
+ F(GetScopeDetails, 4, 1) \
F(DebugPrintScopes, 0, 1) \
F(GetThreadCount, 1, 1) \
F(GetThreadDetails, 2, 1) \
@@ -362,7 +388,7 @@
F(IsBreakOnException, 1, 1) \
F(PrepareStep, 3, 1) \
F(ClearStepping, 0, 1) \
- F(DebugEvaluate, 5, 1) \
+ F(DebugEvaluate, 6, 1) \
F(DebugEvaluateGlobal, 4, 1) \
F(DebugGetLoadedScripts, 0, 1) \
F(DebugReferencedBy, 3, 1) \
@@ -407,14 +433,6 @@
#define RUNTIME_FUNCTION_LIST_DEBUGGER_SUPPORT(F)
#endif
-#ifdef ENABLE_LOGGING_AND_PROFILING
-#define RUNTIME_FUNCTION_LIST_PROFILER_SUPPORT(F) \
- F(ProfilerResume, 2, 1) \
- F(ProfilerPause, 2, 1)
-#else
-#define RUNTIME_FUNCTION_LIST_PROFILER_SUPPORT(F)
-#endif
-
#ifdef DEBUG
#define RUNTIME_FUNCTION_LIST_DEBUG(F) \
/* Testing */ \
@@ -432,8 +450,7 @@
RUNTIME_FUNCTION_LIST_ALWAYS_1(F) \
RUNTIME_FUNCTION_LIST_ALWAYS_2(F) \
RUNTIME_FUNCTION_LIST_DEBUG(F) \
- RUNTIME_FUNCTION_LIST_DEBUGGER_SUPPORT(F) \
- RUNTIME_FUNCTION_LIST_PROFILER_SUPPORT(F)
+ RUNTIME_FUNCTION_LIST_DEBUGGER_SUPPORT(F)
// ----------------------------------------------------------------------------
// INLINE_FUNCTION_LIST defines all inlined functions accessed
@@ -466,7 +483,8 @@
F(IsRegExpEquivalent, 2, 1) \
F(HasCachedArrayIndex, 1, 1) \
F(GetCachedArrayIndex, 1, 1) \
- F(FastAsciiArrayJoin, 2, 1)
+ F(FastAsciiArrayJoin, 2, 1) \
+ F(IsNativeOrStrictMode, 1, 1)
// ----------------------------------------------------------------------------
@@ -494,7 +512,6 @@
class RuntimeState {
public:
-
StaticResource<StringInputBuffer>* string_input_buffer() {
return &string_input_buffer_;
}
@@ -516,12 +533,6 @@
StringInputBuffer* string_locale_compare_buf2() {
return &string_locale_compare_buf2_;
}
- int* smi_lexicographic_compare_x_elms() {
- return smi_lexicographic_compare_x_elms_;
- }
- int* smi_lexicographic_compare_y_elms() {
- return smi_lexicographic_compare_y_elms_;
- }
private:
RuntimeState() {}
@@ -533,8 +544,6 @@
StringInputBuffer string_input_buffer_compare_bufy_;
StringInputBuffer string_locale_compare_buf1_;
StringInputBuffer string_locale_compare_buf2_;
- int smi_lexicographic_compare_x_elms_[10];
- int smi_lexicographic_compare_y_elms_[10];
friend class Isolate;
friend class Runtime;
@@ -630,7 +639,7 @@
MUST_USE_RESULT static MaybeObject* ForceDeleteObjectProperty(
Isolate* isolate,
- Handle<JSObject> object,
+ Handle<JSReceiver> object,
Handle<Object> key);
MUST_USE_RESULT static MaybeObject* GetObjectProperty(
diff --git a/src/runtime.js b/src/runtime.js
index 77b97ae..4b600df 100644
--- a/src/runtime.js
+++ b/src/runtime.js
@@ -354,7 +354,8 @@
if (!IS_SPEC_OBJECT(x)) {
throw %MakeTypeError('invalid_in_operator_use', [this, x]);
}
- return %_IsNonNegativeSmi(this) ? %HasElement(x, this) : %HasProperty(x, %ToString(this));
+ return %_IsNonNegativeSmi(this) && !%IsJSProxy(x) ?
+ %HasElement(x, this) : %HasProperty(x, %ToString(this));
}
diff --git a/src/scanner-base.cc b/src/scanner-base.cc
index e15ef41..16f8db5 100644
--- a/src/scanner-base.cc
+++ b/src/scanner-base.cc
@@ -77,9 +77,22 @@
: Scanner(scanner_contants), octal_pos_(Location::invalid()) { }
+void JavaScriptScanner::Initialize(UC16CharacterStream* source) {
+ source_ = source;
+ // Need to capture identifiers in order to recognize "get" and "set"
+ // in object literals.
+ Init();
+ // Skip initial whitespace allowing HTML comment ends just like
+ // after a newline and scan first token.
+ has_line_terminator_before_next_ = true;
+ SkipWhiteSpace();
+ Scan();
+}
+
Token::Value JavaScriptScanner::Next() {
current_ = next_;
has_line_terminator_before_next_ = false;
+ has_multiline_comment_before_next_ = false;
Scan();
return current_.token;
}
@@ -144,7 +157,7 @@
// to be part of the single-line comment; it is recognized
// separately by the lexical grammar and becomes part of the
// stream of input elements for the syntactic grammar (see
- // ECMA-262, section 7.4, page 12).
+ // ECMA-262, section 7.4).
while (c0_ >= 0 && !unicode_cache_->IsLineTerminator(c0_)) {
Advance();
}
@@ -160,13 +173,14 @@
while (c0_ >= 0) {
char ch = c0_;
Advance();
+ if (unicode_cache_->IsLineTerminator(ch)) {
+ // Following ECMA-262, section 7.4, a comment containing
+ // a newline will make the comment count as a line-terminator.
+ has_multiline_comment_before_next_ = true;
+ }
// If we have reached the end of the multi-line comment, we
// consume the '/' and insert a whitespace. This way all
- // multi-line comments are treated as whitespace - even the ones
- // containing line terminators. This contradicts ECMA-262, section
- // 7.4, page 12, that says that multi-line comments containing
- // line terminators should be treated as a line terminator, but it
- // matches the behaviour of SpiderMonkey and KJS.
+ // multi-line comments are treated as whitespace.
if (ch == '*' && c0_ == '/') {
c0_ = ' ';
return Token::WHITESPACE;
@@ -448,6 +462,7 @@
// of the end of a function (at the "}" token). It doesn't matter
// whether there was a line terminator in the part we skip.
has_line_terminator_before_next_ = false;
+ has_multiline_comment_before_next_ = false;
}
Scan();
}
@@ -784,7 +799,7 @@
{ NULL, I, Token::ILLEGAL },
{ NULL, UNMATCHABLE, Token::ILLEGAL },
{ NULL, UNMATCHABLE, Token::ILLEGAL },
- { "let", KEYWORD_PREFIX, Token::FUTURE_RESERVED_WORD },
+ { "let", KEYWORD_PREFIX, Token::FUTURE_STRICT_RESERVED_WORD },
{ NULL, UNMATCHABLE, Token::ILLEGAL },
{ NULL, N, Token::ILLEGAL },
{ NULL, UNMATCHABLE, Token::ILLEGAL },
@@ -797,7 +812,7 @@
{ NULL, V, Token::ILLEGAL },
{ NULL, W, Token::ILLEGAL },
{ NULL, UNMATCHABLE, Token::ILLEGAL },
- { "yield", KEYWORD_PREFIX, Token::FUTURE_RESERVED_WORD }
+ { "yield", KEYWORD_PREFIX, Token::FUTURE_STRICT_RESERVED_WORD }
};
@@ -834,7 +849,7 @@
case C:
if (MatchState(input, 'a', CA)) return;
if (MatchKeywordStart(input, "class", 1,
- Token::FUTURE_RESERVED_WORD)) return;
+ Token::FUTURE_RESERVED_WORD)) return;
if (MatchState(input, 'o', CO)) return;
break;
case CA:
@@ -860,14 +875,14 @@
case E:
if (MatchKeywordStart(input, "else", 1, Token::ELSE)) return;
if (MatchKeywordStart(input, "enum", 1,
- Token::FUTURE_RESERVED_WORD)) return;
+ Token::FUTURE_RESERVED_WORD)) return;
if (MatchState(input, 'x', EX)) return;
break;
case EX:
if (MatchKeywordStart(input, "export", 2,
- Token::FUTURE_RESERVED_WORD)) return;
+ Token::FUTURE_RESERVED_WORD)) return;
if (MatchKeywordStart(input, "extends", 2,
- Token::FUTURE_RESERVED_WORD)) return;
+ Token::FUTURE_RESERVED_WORD)) return;
break;
case F:
if (MatchKeywordStart(input, "false", 1, Token::FALSE_LITERAL)) return;
@@ -885,41 +900,40 @@
break;
case IMP:
if (MatchKeywordStart(input, "implements", 3,
- Token::FUTURE_RESERVED_WORD )) return;
+ Token::FUTURE_STRICT_RESERVED_WORD )) return;
if (MatchKeywordStart(input, "import", 3,
- Token::FUTURE_RESERVED_WORD)) return;
+ Token::FUTURE_RESERVED_WORD)) return;
break;
case IN:
token_ = Token::IDENTIFIER;
if (MatchKeywordStart(input, "interface", 2,
- Token::FUTURE_RESERVED_WORD)) return;
+ Token::FUTURE_STRICT_RESERVED_WORD)) return;
if (MatchKeywordStart(input, "instanceof", 2, Token::INSTANCEOF)) return;
break;
case N:
- if (MatchKeywordStart(input, "native", 1, Token::NATIVE)) return;
if (MatchKeywordStart(input, "new", 1, Token::NEW)) return;
if (MatchKeywordStart(input, "null", 1, Token::NULL_LITERAL)) return;
break;
case P:
if (MatchKeywordStart(input, "package", 1,
- Token::FUTURE_RESERVED_WORD)) return;
+ Token::FUTURE_STRICT_RESERVED_WORD)) return;
if (MatchState(input, 'r', PR)) return;
if (MatchKeywordStart(input, "public", 1,
- Token::FUTURE_RESERVED_WORD)) return;
+ Token::FUTURE_STRICT_RESERVED_WORD)) return;
break;
case PR:
if (MatchKeywordStart(input, "private", 2,
- Token::FUTURE_RESERVED_WORD)) return;
+ Token::FUTURE_STRICT_RESERVED_WORD)) return;
if (MatchKeywordStart(input, "protected", 2,
- Token::FUTURE_RESERVED_WORD)) return;
+ Token::FUTURE_STRICT_RESERVED_WORD)) return;
break;
case S:
if (MatchKeywordStart(input, "static", 1,
- Token::FUTURE_RESERVED_WORD)) return;
+ Token::FUTURE_STRICT_RESERVED_WORD)) return;
if (MatchKeywordStart(input, "super", 1,
- Token::FUTURE_RESERVED_WORD)) return;
+ Token::FUTURE_RESERVED_WORD)) return;
if (MatchKeywordStart(input, "switch", 1,
- Token::SWITCH)) return;
+ Token::SWITCH)) return;
break;
case T:
if (MatchState(input, 'h', TH)) return;
diff --git a/src/scanner-base.h b/src/scanner-base.h
index 02566dd..3d67d4e 100644
--- a/src/scanner-base.h
+++ b/src/scanner-base.h
@@ -471,12 +471,16 @@
explicit JavaScriptScanner(UnicodeCache* scanner_contants);
+ void Initialize(UC16CharacterStream* source);
+
// Returns the next token.
Token::Value Next();
- // Returns true if there was a line terminator before the peek'ed token.
- bool has_line_terminator_before_next() const {
- return has_line_terminator_before_next_;
+ // Returns true if there was a line terminator before the peek'ed token,
+ // possibly inside a multi-line comment.
+ bool HasAnyLineTerminatorBeforeNext() const {
+ return has_line_terminator_before_next_ ||
+ has_multiline_comment_before_next_;
}
// Scans the input as a regular expression pattern, previous
@@ -529,7 +533,13 @@
// Start position of the octal literal last scanned.
Location octal_pos_;
+ // Whether there is a line terminator whitespace character after
+ // the current token, and before the next. Does not count newlines
+ // inside multiline comments.
bool has_line_terminator_before_next_;
+ // Whether there is a multi-line comment that contains a
+ // line-terminator after the current token, and before the next.
+ bool has_multiline_comment_before_next_;
};
@@ -539,14 +549,26 @@
class KeywordMatcher {
// Incrementally recognize keywords.
//
-// Recognized keywords:
-// break case catch const* continue debugger* default delete do else
-// finally false for function if in instanceof native* new null
-// return switch this throw true try typeof var void while with
+// We distinguish between normal future reserved words and words that are
+// considered to be future reserved words only in strict mode as required by
+// ECMA-262 7.6.1.2.
//
-// *: Actually "future reserved keywords". These are the only ones we
-// recognize, the remaining are allowed as identifiers.
-// In ES5 strict mode, we should disallow all reserved keywords.
+// Recognized as keywords:
+// break, case, catch, const*, continue, debugger, default, delete, do,
+// else, finally, false, for, function, if, in, instanceof, new, null,
+// return, switch, this, throw, true, try, typeof, var, void, while, with.
+//
+// Recognized as Future Reserved Keywords:
+// class, enum, export, extends, import, super.
+//
+// Recognized as Future Reserved Keywords (strict mode only):
+// implements, interface, let, package, private, protected, public,
+// static, yield.
+//
+// *: Actually a "future reserved keyword". It's the only one we are
+// recognizing outside of ES5 strict mode, the remaining are allowed
+// as identifiers.
+//
public:
KeywordMatcher()
: state_(INITIAL),
diff --git a/src/scanner.cc b/src/scanner.cc
index 21a0c2d..5919073 100755
--- a/src/scanner.cc
+++ b/src/scanner.cc
@@ -324,22 +324,4 @@
complete_ = true;
}
-
-// ----------------------------------------------------------------------------
-// V8JavaScriptScanner
-
-
-void V8JavaScriptScanner::Initialize(UC16CharacterStream* source) {
- source_ = source;
- // Need to capture identifiers in order to recognize "get" and "set"
- // in object literals.
- Init();
- // Skip initial whitespace allowing HTML comment ends just like
- // after a newline and scan first token.
- has_line_terminator_before_next_ = true;
- SkipWhiteSpace();
- Scan();
-}
-
-
} } // namespace v8::internal
diff --git a/src/scanner.h b/src/scanner.h
index 804fac8..e66dd60 100644
--- a/src/scanner.h
+++ b/src/scanner.h
@@ -126,21 +126,6 @@
const uc16* raw_data_; // Pointer to the actual array of characters.
};
-
-// ----------------------------------------------------------------------------
-// V8JavaScriptScanner
-// JavaScript scanner getting its input from either a V8 String or a unicode
-// CharacterStream.
-
-class V8JavaScriptScanner : public JavaScriptScanner {
- public:
- explicit V8JavaScriptScanner(UnicodeCache* unicode_cache)
- : JavaScriptScanner(unicode_cache) {}
-
- void Initialize(UC16CharacterStream* source);
-};
-
-
} } // namespace v8::internal
#endif // V8_SCANNER_H_
diff --git a/src/scopeinfo.cc b/src/scopeinfo.cc
index ccc2cc8..3e18368 100644
--- a/src/scopeinfo.cc
+++ b/src/scopeinfo.cc
@@ -32,6 +32,8 @@
#include "scopeinfo.h"
#include "scopes.h"
+#include "allocation-inl.h"
+
namespace v8 {
namespace internal {
diff --git a/src/scopeinfo.h b/src/scopeinfo.h
index ff72013..86c33f6 100644
--- a/src/scopeinfo.h
+++ b/src/scopeinfo.h
@@ -117,11 +117,6 @@
// Is this scope a strict mode scope?
bool IsStrictMode();
- // Does this scope have an arguments shadow?
- bool HasArgumentsShadow() {
- return StackSlotIndex(GetHeap()->arguments_shadow_symbol()) >= 0;
- }
-
// Return the number of stack slots for code.
int NumberOfStackSlots();
diff --git a/src/scopes.cc b/src/scopes.cc
index 6102442..390a0b6 100644
--- a/src/scopes.cc
+++ b/src/scopes.cc
@@ -34,6 +34,8 @@
#include "prettyprinter.h"
#include "scopeinfo.h"
+#include "allocation-inl.h"
+
namespace v8 {
namespace internal {
@@ -114,70 +116,72 @@
// Dummy constructor
Scope::Scope(Type type)
- : inner_scopes_(0),
- variables_(false),
- temps_(0),
- params_(0),
- unresolved_(0),
- decls_(0) {
+ : isolate_(Isolate::Current()),
+ inner_scopes_(0),
+ variables_(false),
+ temps_(0),
+ params_(0),
+ unresolved_(0),
+ decls_(0),
+ already_resolved_(false) {
SetDefaults(type, NULL, Handle<SerializedScopeInfo>::null());
- ASSERT(!resolved());
}
Scope::Scope(Scope* outer_scope, Type type)
- : inner_scopes_(4),
- variables_(),
- temps_(4),
- params_(4),
- unresolved_(16),
- decls_(4) {
+ : isolate_(Isolate::Current()),
+ inner_scopes_(4),
+ variables_(),
+ temps_(4),
+ params_(4),
+ unresolved_(16),
+ decls_(4),
+ already_resolved_(false) {
SetDefaults(type, outer_scope, Handle<SerializedScopeInfo>::null());
// At some point we might want to provide outer scopes to
// eval scopes (by walking the stack and reading the scope info).
// In that case, the ASSERT below needs to be adjusted.
ASSERT((type == GLOBAL_SCOPE || type == EVAL_SCOPE) == (outer_scope == NULL));
ASSERT(!HasIllegalRedeclaration());
- ASSERT(!resolved());
}
Scope::Scope(Scope* inner_scope, Handle<SerializedScopeInfo> scope_info)
- : inner_scopes_(4),
- variables_(),
- temps_(4),
- params_(4),
- unresolved_(16),
- decls_(4) {
+ : isolate_(Isolate::Current()),
+ inner_scopes_(4),
+ variables_(),
+ temps_(4),
+ params_(4),
+ unresolved_(16),
+ decls_(4),
+ already_resolved_(true) {
ASSERT(!scope_info.is_null());
SetDefaults(FUNCTION_SCOPE, NULL, scope_info);
- ASSERT(resolved());
if (scope_info->HasHeapAllocatedLocals()) {
num_heap_slots_ = scope_info_->NumberOfContextSlots();
}
-
AddInnerScope(inner_scope);
+}
- // This scope's arguments shadow (if present) is context-allocated if an inner
- // scope accesses this one's parameters. Allocate the arguments_shadow_
- // variable if necessary.
- Isolate* isolate = Isolate::Current();
- Variable::Mode mode;
- int arguments_shadow_index =
- scope_info_->ContextSlotIndex(
- isolate->heap()->arguments_shadow_symbol(), &mode);
- if (arguments_shadow_index >= 0) {
- ASSERT(mode == Variable::INTERNAL);
- arguments_shadow_ = new Variable(
- this,
- isolate->factory()->arguments_shadow_symbol(),
- Variable::INTERNAL,
- true,
- Variable::ARGUMENTS);
- arguments_shadow_->set_rewrite(
- new Slot(arguments_shadow_, Slot::CONTEXT, arguments_shadow_index));
- arguments_shadow_->set_is_used(true);
- }
+
+Scope::Scope(Scope* inner_scope, Handle<String> catch_variable_name)
+ : isolate_(Isolate::Current()),
+ inner_scopes_(1),
+ variables_(),
+ temps_(0),
+ params_(0),
+ unresolved_(0),
+ decls_(0),
+ already_resolved_(true) {
+ SetDefaults(CATCH_SCOPE, NULL, Handle<SerializedScopeInfo>::null());
+ AddInnerScope(inner_scope);
+ ++num_var_or_const_;
+ Variable* variable = variables_.Declare(this,
+ catch_variable_name,
+ Variable::VAR,
+ true, // Valid left-hand side.
+ Variable::NORMAL);
+ AllocateHeapSlot(variable);
}
@@ -186,12 +190,11 @@
Handle<SerializedScopeInfo> scope_info) {
outer_scope_ = outer_scope;
type_ = type;
- scope_name_ = FACTORY->empty_symbol();
+ scope_name_ = isolate_->factory()->empty_symbol();
dynamics_ = NULL;
receiver_ = NULL;
function_ = NULL;
arguments_ = NULL;
- arguments_shadow_ = NULL;
illegal_redecl_ = NULL;
scope_inside_with_ = false;
scope_contains_with_ = false;
@@ -212,30 +215,43 @@
Scope* Scope::DeserializeScopeChain(CompilationInfo* info,
Scope* global_scope) {
+ // Reconstruct the outer scope chain from a closure's context chain.
ASSERT(!info->closure().is_null());
- // If we have a serialized scope info, reuse it.
+ Context* context = info->closure()->context();
+ Scope* current_scope = NULL;
Scope* innermost_scope = NULL;
- Scope* scope = NULL;
-
- SerializedScopeInfo* scope_info = info->closure()->shared()->scope_info();
- if (scope_info != SerializedScopeInfo::Empty()) {
- JSFunction* current = *info->closure();
- do {
- current = current->context()->closure();
- Handle<SerializedScopeInfo> scope_info(current->shared()->scope_info());
- if (*scope_info != SerializedScopeInfo::Empty()) {
- scope = new Scope(scope, scope_info);
- if (innermost_scope == NULL) innermost_scope = scope;
- } else {
- ASSERT(current->context()->IsGlobalContext());
+ bool contains_with = false;
+ while (!context->IsGlobalContext()) {
+ if (context->IsWithContext()) {
+ // All the inner scopes are inside a with.
+ contains_with = true;
+ for (Scope* s = innermost_scope; s != NULL; s = s->outer_scope()) {
+ s->scope_inside_with_ = true;
}
- } while (!current->context()->IsGlobalContext());
+ } else {
+ if (context->IsFunctionContext()) {
+ SerializedScopeInfo* scope_info =
+ context->closure()->shared()->scope_info();
+ current_scope =
+ new Scope(current_scope, Handle<SerializedScopeInfo>(scope_info));
+ } else {
+ ASSERT(context->IsCatchContext());
+ String* name = String::cast(context->extension());
+ current_scope = new Scope(current_scope, Handle<String>(name));
+ }
+ if (contains_with) current_scope->RecordWithStatement();
+ if (innermost_scope == NULL) innermost_scope = current_scope;
+ }
+
+ // Forget about a with when we move to a context for a different function.
+ if (context->previous()->closure() != context->closure()) {
+ contains_with = false;
+ }
+ context = context->previous();
}
- global_scope->AddInnerScope(scope);
- if (innermost_scope == NULL) innermost_scope = global_scope;
-
- return innermost_scope;
+ global_scope->AddInnerScope(current_scope);
+ return (innermost_scope == NULL) ? global_scope : innermost_scope;
}
@@ -260,7 +276,7 @@
void Scope::Initialize(bool inside_with) {
- ASSERT(!resolved());
+ ASSERT(!already_resolved());
// Add this scope as a new inner scope of the outer scope.
if (outer_scope_ != NULL) {
@@ -278,73 +294,64 @@
// instead load them directly from the stack. Currently, the only
// such parameter is 'this' which is passed on the stack when
// invoking scripts
- Variable* var =
- variables_.Declare(this, FACTORY->this_symbol(), Variable::VAR,
- false, Variable::THIS);
- var->set_rewrite(new Slot(var, Slot::PARAMETER, -1));
- receiver_ = var;
+ if (is_catch_scope()) {
+ ASSERT(outer_scope() != NULL);
+ receiver_ = outer_scope()->receiver();
+ } else {
+ Variable* var =
+ variables_.Declare(this,
+ isolate_->factory()->this_symbol(),
+ Variable::VAR,
+ false,
+ Variable::THIS);
+ var->set_rewrite(NewSlot(var, Slot::PARAMETER, -1));
+ receiver_ = var;
+ }
if (is_function_scope()) {
// Declare 'arguments' variable which exists in all functions.
// Note that it might never be accessed, in which case it won't be
// allocated during variable allocation.
- variables_.Declare(this, FACTORY->arguments_symbol(), Variable::VAR,
- true, Variable::ARGUMENTS);
+ variables_.Declare(this,
+ isolate_->factory()->arguments_symbol(),
+ Variable::VAR,
+ true,
+ Variable::ARGUMENTS);
}
}
Variable* Scope::LocalLookup(Handle<String> name) {
Variable* result = variables_.Lookup(name);
- if (result != NULL || !resolved()) {
+ if (result != NULL || scope_info_.is_null()) {
return result;
}
- // If the scope is resolved, we can find a variable in serialized scope info.
-
- // We should never lookup 'arguments' in this scope
- // as it is implicitly present in any scope.
- ASSERT(*name != *FACTORY->arguments_symbol());
-
- // Assert that there is no local slot with the given name.
+ // If we have a serialized scope info, we might find the variable there.
+ //
+ // We should never lookup 'arguments' in this scope as it is implicitly
+ // present in every scope.
+ ASSERT(*name != *isolate_->factory()->arguments_symbol());
+ // There should be no local slot with the given name.
ASSERT(scope_info_->StackSlotIndex(*name) < 0);
// Check context slot lookup.
Variable::Mode mode;
int index = scope_info_->ContextSlotIndex(*name, &mode);
- if (index >= 0) {
- Variable* var =
- variables_.Declare(this, name, mode, true, Variable::NORMAL);
- var->set_rewrite(new Slot(var, Slot::CONTEXT, index));
- return var;
+ if (index < 0) {
+ // Check parameters.
+ mode = Variable::VAR;
+ index = scope_info_->ParameterIndex(*name);
+ if (index < 0) {
+ // Check the function name.
+ index = scope_info_->FunctionContextSlotIndex(*name);
+ if (index < 0) return NULL;
+ }
}
- index = scope_info_->ParameterIndex(*name);
- if (index >= 0) {
- // ".arguments" must be present in context slots.
- ASSERT(arguments_shadow_ != NULL);
- Variable* var =
- variables_.Declare(this, name, Variable::VAR, true, Variable::NORMAL);
- Property* rewrite =
- new Property(new VariableProxy(arguments_shadow_),
- new Literal(Handle<Object>(Smi::FromInt(index))),
- RelocInfo::kNoPosition,
- Property::SYNTHETIC);
- rewrite->set_is_arguments_access(true);
- var->set_rewrite(rewrite);
- return var;
- }
-
- index = scope_info_->FunctionContextSlotIndex(*name);
- if (index >= 0) {
- // Check that there is no local slot with the given name.
- ASSERT(scope_info_->StackSlotIndex(*name) < 0);
- Variable* var =
- variables_.Declare(this, name, Variable::VAR, true, Variable::NORMAL);
- var->set_rewrite(new Slot(var, Slot::CONTEXT, index));
- return var;
- }
-
- return NULL;
+ Variable* var =
+ variables_.Declare(this, name, mode, true, Variable::NORMAL);
+ var->set_rewrite(NewSlot(var, Slot::CONTEXT, index));
+ return var;
}
@@ -366,17 +373,22 @@
}
-Variable* Scope::DeclareLocal(Handle<String> name,
- Variable::Mode mode,
- LocalType type) {
- // DYNAMIC variables are introduces during variable allocation,
- // INTERNAL variables are allocated explicitly, and TEMPORARY
- // variables are allocated via NewTemporary().
- ASSERT(!resolved());
+void Scope::DeclareParameter(Handle<String> name) {
+ ASSERT(!already_resolved());
+ ASSERT(is_function_scope());
+ Variable* var =
+ variables_.Declare(this, name, Variable::VAR, true, Variable::NORMAL);
+ params_.Add(var);
+}
+
+
+Variable* Scope::DeclareLocal(Handle<String> name, Variable::Mode mode) {
+ ASSERT(!already_resolved());
+ // This function handles VAR and CONST modes. DYNAMIC variables are
+ // introduces during variable allocation, INTERNAL variables are allocated
+ // explicitly, and TEMPORARY variables are allocated via NewTemporary().
ASSERT(mode == Variable::VAR || mode == Variable::CONST);
- if (type == VAR_OR_CONST) {
- num_var_or_const_++;
- }
+ ++num_var_or_const_;
return variables_.Declare(this, name, mode, true, Variable::NORMAL);
}
@@ -388,21 +400,15 @@
}
-void Scope::AddParameter(Variable* var) {
- ASSERT(is_function_scope());
- ASSERT(LocalLookup(var->name()) == var);
- params_.Add(var);
-}
-
-
VariableProxy* Scope::NewUnresolved(Handle<String> name,
bool inside_with,
int position) {
// Note that we must not share the unresolved variables with
// the same name because they may be removed selectively via
// RemoveUnresolved().
- ASSERT(!resolved());
- VariableProxy* proxy = new VariableProxy(name, false, inside_with, position);
+ ASSERT(!already_resolved());
+ VariableProxy* proxy = new(isolate_->zone()) VariableProxy(
+ isolate_, name, false, inside_with, position);
unresolved_.Add(proxy);
return proxy;
}
@@ -421,7 +427,7 @@
Variable* Scope::NewTemporary(Handle<String> name) {
- ASSERT(!resolved());
+ ASSERT(!already_resolved());
Variable* var =
new Variable(this, name, Variable::TEMPORARY, true, Variable::NORMAL);
temps_.Add(var);
@@ -551,12 +557,22 @@
}
+Scope* Scope::DeclarationScope() {
+ Scope* scope = this;
+ while (scope->is_catch_scope()) {
+ scope = scope->outer_scope();
+ }
+ return scope;
+}
+
+
#ifdef DEBUG
static const char* Header(Scope::Type type) {
switch (type) {
case Scope::EVAL_SCOPE: return "eval";
case Scope::FUNCTION_SCOPE: return "function";
case Scope::GLOBAL_SCOPE: return "global";
+ case Scope::CATCH_SCOPE: return "catch";
}
UNREACHABLE();
return NULL;
@@ -692,7 +708,7 @@
// Declare a new non-local.
var = map->Declare(NULL, name, mode, true, Variable::NORMAL);
// Allocate it by giving it a dynamic lookup.
- var->set_rewrite(new Slot(var, Slot::LOOKUP, -1));
+ var->set_rewrite(NewSlot(var, Slot::LOOKUP, -1));
}
return var;
}
@@ -907,8 +923,10 @@
// visible name.
if ((var->is_this() || var->name()->length() > 0) &&
(var->is_accessed_from_inner_scope() ||
- scope_calls_eval_ || inner_scope_calls_eval_ ||
- scope_contains_with_)) {
+ scope_calls_eval_ ||
+ inner_scope_calls_eval_ ||
+ scope_contains_with_ ||
+ is_catch_scope())) {
var->set_is_used(true);
}
// Global variables do not need to be allocated.
@@ -917,73 +935,60 @@
bool Scope::MustAllocateInContext(Variable* var) {
- // If var is accessed from an inner scope, or if there is a
- // possibility that it might be accessed from the current or an inner
- // scope (through an eval() call), it must be allocated in the
- // context. Exception: temporary variables are not allocated in the
+ // If var is accessed from an inner scope, or if there is a possibility
+ // that it might be accessed from the current or an inner scope (through
+ // an eval() call or a runtime with lookup), it must be allocated in the
// context.
- return
- var->mode() != Variable::TEMPORARY &&
- (var->is_accessed_from_inner_scope() ||
- scope_calls_eval_ || inner_scope_calls_eval_ ||
- scope_contains_with_ || var->is_global());
+ //
+ // Exceptions: temporary variables are never allocated in a context;
+ // catch-bound variables are always allocated in a context.
+ if (var->mode() == Variable::TEMPORARY) return false;
+ if (is_catch_scope()) return true;
+ return var->is_accessed_from_inner_scope() ||
+ scope_calls_eval_ ||
+ inner_scope_calls_eval_ ||
+ scope_contains_with_ ||
+ var->is_global();
}
bool Scope::HasArgumentsParameter() {
for (int i = 0; i < params_.length(); i++) {
- if (params_[i]->name().is_identical_to(FACTORY->arguments_symbol()))
+ if (params_[i]->name().is_identical_to(
+ isolate_->factory()->arguments_symbol())) {
return true;
+ }
}
return false;
}
void Scope::AllocateStackSlot(Variable* var) {
- var->set_rewrite(new Slot(var, Slot::LOCAL, num_stack_slots_++));
+ var->set_rewrite(NewSlot(var, Slot::LOCAL, num_stack_slots_++));
}
void Scope::AllocateHeapSlot(Variable* var) {
- var->set_rewrite(new Slot(var, Slot::CONTEXT, num_heap_slots_++));
+ var->set_rewrite(NewSlot(var, Slot::CONTEXT, num_heap_slots_++));
}
void Scope::AllocateParameterLocals() {
ASSERT(is_function_scope());
- Variable* arguments = LocalLookup(FACTORY->arguments_symbol());
+ Variable* arguments = LocalLookup(isolate_->factory()->arguments_symbol());
ASSERT(arguments != NULL); // functions have 'arguments' declared implicitly
- // Parameters are rewritten to arguments[i] if 'arguments' is used in
- // a non-strict mode function. Strict mode code doesn't alias arguments.
- bool rewrite_parameters = false;
+ bool uses_nonstrict_arguments = false;
if (MustAllocate(arguments) && !HasArgumentsParameter()) {
// 'arguments' is used. Unless there is also a parameter called
- // 'arguments', we must be conservative and access all parameters via
- // the arguments object: The i'th parameter is rewritten into
- // '.arguments[i]' (*). If we have a parameter named 'arguments', a
- // (new) value is always assigned to it via the function
- // invocation. Then 'arguments' denotes that specific parameter value
- // and cannot be used to access the parameters, which is why we don't
- // need to rewrite in that case.
- //
- // (*) Instead of having a parameter called 'arguments', we may have an
- // assignment to 'arguments' in the function body, at some arbitrary
- // point in time (possibly through an 'eval()' call!). After that
- // assignment any re-write of parameters would be invalid (was bug
- // 881452). Thus, we introduce a shadow '.arguments'
- // variable which also points to the arguments object. For rewrites we
- // use '.arguments' which remains valid even if we assign to
- // 'arguments'. To summarize: If we need to rewrite, we allocate an
- // 'arguments' object dynamically upon function invocation. The compiler
- // introduces 2 local variables 'arguments' and '.arguments', both of
- // which originally point to the arguments object that was
- // allocated. All parameters are rewritten into property accesses via
- // the '.arguments' variable. Thus, any changes to properties of
- // 'arguments' are reflected in the variables and vice versa. If the
- // 'arguments' variable is changed, '.arguments' still points to the
- // correct arguments object and the rewrites still work.
+ // 'arguments', we must be conservative and allocate all parameters to
+ // the context assuming they will be captured by the arguments object.
+ // If we have a parameter named 'arguments', a (new) value is always
+ // assigned to it via the function invocation. Then 'arguments' denotes
+ // that specific parameter value and cannot be used to access the
+ // parameters, which is why we don't need to allocate an arguments
+ // object in that case.
// We are using 'arguments'. Tell the code generator that is needs to
// allocate the arguments object by setting 'arguments_'.
@@ -992,76 +997,32 @@
// In strict mode 'arguments' does not alias formal parameters.
// Therefore in strict mode we allocate parameters as if 'arguments'
// were not used.
- rewrite_parameters = !is_strict_mode();
+ uses_nonstrict_arguments = !is_strict_mode();
}
- if (rewrite_parameters) {
- // We also need the '.arguments' shadow variable. Declare it and create
- // and bind the corresponding proxy. It's ok to declare it only now
- // because it's a local variable that is allocated after the parameters
- // have been allocated.
- //
- // Note: This is "almost" at temporary variable but we cannot use
- // NewTemporary() because the mode needs to be INTERNAL since this
- // variable may be allocated in the heap-allocated context (temporaries
- // are never allocated in the context).
- arguments_shadow_ = new Variable(this,
- FACTORY->arguments_shadow_symbol(),
- Variable::INTERNAL,
- true,
- Variable::ARGUMENTS);
- arguments_shadow_->set_is_used(true);
- temps_.Add(arguments_shadow_);
-
- // Allocate the parameters by rewriting them into '.arguments[i]' accesses.
- for (int i = 0; i < params_.length(); i++) {
- Variable* var = params_[i];
- ASSERT(var->scope() == this);
- if (MustAllocate(var)) {
- if (MustAllocateInContext(var)) {
- // It is ok to set this only now, because arguments is a local
- // variable that is allocated after the parameters have been
- // allocated.
- arguments_shadow_->MarkAsAccessedFromInnerScope();
- }
- Property* rewrite =
- new Property(new VariableProxy(arguments_shadow_),
- new Literal(Handle<Object>(Smi::FromInt(i))),
- RelocInfo::kNoPosition,
- Property::SYNTHETIC);
- rewrite->set_is_arguments_access(true);
- var->set_rewrite(rewrite);
- }
+ // The same parameter may occur multiple times in the parameters_ list.
+ // If it does, and if it is not copied into the context object, it must
+ // receive the highest parameter index for that parameter; thus iteration
+ // order is relevant!
+ for (int i = params_.length() - 1; i >= 0; --i) {
+ Variable* var = params_[i];
+ ASSERT(var->scope() == this);
+ if (uses_nonstrict_arguments) {
+ // Give the parameter a use from an inner scope, to force allocation
+ // to the context.
+ var->MarkAsAccessedFromInnerScope();
}
- } else {
- // The arguments object is not used, so we can access parameters directly.
- // The same parameter may occur multiple times in the parameters_ list.
- // If it does, and if it is not copied into the context object, it must
- // receive the highest parameter index for that parameter; thus iteration
- // order is relevant!
- for (int i = 0; i < params_.length(); i++) {
- Variable* var = params_[i];
- ASSERT(var->scope() == this);
- if (MustAllocate(var)) {
- if (MustAllocateInContext(var)) {
- ASSERT(var->rewrite() == NULL ||
- (var->AsSlot() != NULL &&
- var->AsSlot()->type() == Slot::CONTEXT));
- if (var->rewrite() == NULL) {
- // Only set the heap allocation if the parameter has not
- // been allocated yet.
- AllocateHeapSlot(var);
- }
- } else {
- ASSERT(var->rewrite() == NULL ||
- (var->AsSlot() != NULL &&
- var->AsSlot()->type() == Slot::PARAMETER));
- // Set the parameter index always, even if the parameter
- // was seen before! (We need to access the actual parameter
- // supplied for the last occurrence of a multiply declared
- // parameter.)
- var->set_rewrite(new Slot(var, Slot::PARAMETER, i));
+ if (MustAllocate(var)) {
+ if (MustAllocateInContext(var)) {
+ ASSERT(var->rewrite() == NULL || var->IsContextSlot());
+ if (var->rewrite() == NULL) {
+ AllocateHeapSlot(var);
+ }
+ } else {
+ ASSERT(var->rewrite() == NULL || var->IsParameter());
+ if (var->rewrite() == NULL) {
+ var->set_rewrite(NewSlot(var, Slot::PARAMETER, i));
}
}
}
@@ -1072,8 +1033,9 @@
void Scope::AllocateNonParameterLocal(Variable* var) {
ASSERT(var->scope() == this);
ASSERT(var->rewrite() == NULL ||
- (!var->IsVariable(FACTORY->result_symbol())) ||
- (var->AsSlot() == NULL || var->AsSlot()->type() != Slot::LOCAL));
+ !var->IsVariable(isolate_->factory()->result_symbol()) ||
+ var->AsSlot() == NULL ||
+ var->AsSlot()->type() != Slot::LOCAL);
if (var->rewrite() == NULL && MustAllocate(var)) {
if (MustAllocateInContext(var)) {
AllocateHeapSlot(var);
@@ -1115,7 +1077,7 @@
// If scope is already resolved, we still need to allocate
// variables in inner scopes which might not had been resolved yet.
- if (resolved()) return;
+ if (already_resolved()) return;
// The number of slots required for variables.
num_stack_slots_ = 0;
num_heap_slots_ = Context::MIN_CONTEXT_SLOTS;
diff --git a/src/scopes.h b/src/scopes.h
index faa6fd9..e76fb50 100644
--- a/src/scopes.h
+++ b/src/scopes.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -90,20 +90,14 @@
// Construction
enum Type {
- EVAL_SCOPE, // the top-level scope for an 'eval' source
- FUNCTION_SCOPE, // the top-level scope for a function
- GLOBAL_SCOPE // the top-level scope for a program or a top-level eval
- };
-
- enum LocalType {
- PARAMETER,
- VAR_OR_CONST
+ EVAL_SCOPE, // The top-level scope for an eval source.
+ FUNCTION_SCOPE, // The top-level scope for a function.
+ GLOBAL_SCOPE, // The top-level scope for a program or a top-level eval.
+ CATCH_SCOPE // The scope introduced by catch.
};
Scope(Scope* outer_scope, Type type);
- virtual ~Scope() { }
-
// Compute top scope and allocate variables. For lazy compilation the top
// scope only contains the single lazily compiled function, so this
// doesn't re-allocate variables repeatedly.
@@ -115,33 +109,31 @@
// The scope name is only used for printing/debugging.
void SetScopeName(Handle<String> scope_name) { scope_name_ = scope_name; }
- virtual void Initialize(bool inside_with);
-
- // Called just before leaving a scope.
- virtual void Leave() {
- // No cleanup or fixup necessary.
- }
+ void Initialize(bool inside_with);
// ---------------------------------------------------------------------------
// Declarations
// Lookup a variable in this scope. Returns the variable or NULL if not found.
- virtual Variable* LocalLookup(Handle<String> name);
+ Variable* LocalLookup(Handle<String> name);
// Lookup a variable in this scope or outer scopes.
// Returns the variable or NULL if not found.
- virtual Variable* Lookup(Handle<String> name);
+ Variable* Lookup(Handle<String> name);
// Declare the function variable for a function literal. This variable
// is in an intermediate scope between this function scope and the the
// outer scope. Only possible for function scopes; at most one variable.
Variable* DeclareFunctionVar(Handle<String> name);
+ // Declare a parameter in this scope. When there are duplicated
+ // parameters the rightmost one 'wins'. However, the implementation
+ // expects all parameters to be declared and from left to right.
+ void DeclareParameter(Handle<String> name);
+
// Declare a local variable in this scope. If the variable has been
// declared before, the previously declared variable is returned.
- virtual Variable* DeclareLocal(Handle<String> name,
- Variable::Mode mode,
- LocalType type);
+ Variable* DeclareLocal(Handle<String> name, Variable::Mode mode);
// Declare an implicit global variable in this scope which must be a
// global scope. The variable was introduced (possibly from an inner
@@ -149,16 +141,10 @@
// with statements or eval calls.
Variable* DeclareGlobal(Handle<String> name);
- // Add a parameter to the parameter list. The parameter must have been
- // declared via Declare. The same parameter may occur more than once in
- // the parameter list; they must be added in source order, from left to
- // right.
- void AddParameter(Variable* var);
-
// Create a new unresolved variable.
- virtual VariableProxy* NewUnresolved(Handle<String> name,
- bool inside_with,
- int position = RelocInfo::kNoPosition);
+ VariableProxy* NewUnresolved(Handle<String> name,
+ bool inside_with,
+ int position = RelocInfo::kNoPosition);
// Remove a unresolved variable. During parsing, an unresolved variable
// may have been added optimistically, but then only the variable name
@@ -172,7 +158,7 @@
// for printing and cannot be used to find the variable. In particular,
// the only way to get hold of the temporary is by keeping the Variable*
// around.
- virtual Variable* NewTemporary(Handle<String> name);
+ Variable* NewTemporary(Handle<String> name);
// Adds the specific declaration node to the list of declarations in
// this scope. The declarations are processed as part of entering
@@ -217,6 +203,7 @@
bool is_eval_scope() const { return type_ == EVAL_SCOPE; }
bool is_function_scope() const { return type_ == FUNCTION_SCOPE; }
bool is_global_scope() const { return type_ == GLOBAL_SCOPE; }
+ bool is_catch_scope() const { return type_ == CATCH_SCOPE; }
bool is_strict_mode() const { return strict_mode_; }
bool is_strict_mode_eval_scope() const {
return is_eval_scope() && is_strict_mode();
@@ -240,13 +227,8 @@
// ---------------------------------------------------------------------------
// Accessors.
- // A new variable proxy corresponding to the (function) receiver.
- VariableProxy* receiver() const {
- VariableProxy* proxy =
- new VariableProxy(FACTORY->this_symbol(), true, false);
- proxy->BindTo(receiver_);
- return proxy;
- }
+ // The variable corresponding the 'this' value.
+ Variable* receiver() { return receiver_; }
// The variable holding the function literal for named function
// literals, or NULL.
@@ -266,18 +248,12 @@
int num_parameters() const { return params_.length(); }
// The local variable 'arguments' if we need to allocate it; NULL otherwise.
- // If arguments() exist, arguments_shadow() exists, too.
Variable* arguments() const { return arguments_; }
- // The '.arguments' shadow variable if we need to allocate it; NULL otherwise.
- // If arguments_shadow() exist, arguments() exists, too.
- Variable* arguments_shadow() const { return arguments_shadow_; }
-
// Declarations list.
ZoneList<Declaration*>* declarations() { return &decls_; }
-
// ---------------------------------------------------------------------------
// Variable allocation.
@@ -309,11 +285,15 @@
bool AllowsLazyCompilation() const;
// True if the outer context of this scope is always the global context.
- virtual bool HasTrivialOuterContext() const;
+ bool HasTrivialOuterContext() const;
// The number of contexts between this and scope; zero if this == scope.
int ContextChainLength(Scope* scope);
+ // Find the first function, global, or eval scope. This is the scope
+ // where var declarations will be hoisted to in the implementation.
+ Scope* DeclarationScope();
+
// ---------------------------------------------------------------------------
// Strict mode support.
bool IsDeclared(Handle<String> name) {
@@ -339,6 +319,8 @@
explicit Scope(Type type);
+ Isolate* const isolate_;
+
// Scope tree.
Scope* outer_scope_; // the immediately enclosing outer scope, or NULL
ZoneList<Scope*> inner_scopes_; // the immediately enclosed inner scopes
@@ -371,8 +353,6 @@
Variable* function_;
// Convenience variable; function scopes only.
Variable* arguments_;
- // Convenience variable; function scopes only.
- Variable* arguments_shadow_;
// Illegal redeclaration.
Expression* illegal_redecl_;
@@ -390,6 +370,10 @@
bool outer_scope_is_eval_scope_;
bool force_eager_compilation_;
+ // True if it doesn't need scope resolution (e.g., if the scope was
+ // constructed based on a serialized scope info or a catch context).
+ bool already_resolved_;
+
// Computed as variables are declared.
int num_var_or_const_;
@@ -399,7 +383,7 @@
// Serialized scopes support.
Handle<SerializedScopeInfo> scope_info_;
- bool resolved() { return !scope_info_.is_null(); }
+ bool already_resolved() { return already_resolved_; }
// Create a non-local variable with a given name.
// These variables are looked up dynamically at runtime.
@@ -435,8 +419,16 @@
void AllocateVariablesRecursively();
private:
+ // Construct a function scope based on the scope info.
Scope(Scope* inner_scope, Handle<SerializedScopeInfo> scope_info);
+ // Construct a catch scope with a binding for the name.
+ Scope(Scope* inner_scope, Handle<String> catch_variable_name);
+
+ inline Slot* NewSlot(Variable* var, Slot::Type type, int index) {
+ return new(isolate_->zone()) Slot(isolate_, var, type, index);
+ }
+
void AddInnerScope(Scope* inner_scope) {
if (inner_scope != NULL) {
inner_scopes_.Add(inner_scope);
@@ -449,59 +441,6 @@
Handle<SerializedScopeInfo> scope_info);
};
-
-// Scope used during pre-parsing.
-class DummyScope : public Scope {
- public:
- DummyScope()
- : Scope(GLOBAL_SCOPE),
- nesting_level_(1), // Allows us to Leave the initial scope.
- inside_with_level_(kNotInsideWith) {
- outer_scope_ = this;
- scope_inside_with_ = false;
- }
-
- virtual void Initialize(bool inside_with) {
- nesting_level_++;
- if (inside_with && inside_with_level_ == kNotInsideWith) {
- inside_with_level_ = nesting_level_;
- }
- ASSERT(inside_with_level_ <= nesting_level_);
- }
-
- virtual void Leave() {
- nesting_level_--;
- ASSERT(nesting_level_ >= 0);
- if (nesting_level_ < inside_with_level_) {
- inside_with_level_ = kNotInsideWith;
- }
- ASSERT(inside_with_level_ <= nesting_level_);
- }
-
- virtual Variable* Lookup(Handle<String> name) { return NULL; }
-
- virtual VariableProxy* NewUnresolved(Handle<String> name,
- bool inside_with,
- int position = RelocInfo::kNoPosition) {
- return NULL;
- }
-
- virtual Variable* NewTemporary(Handle<String> name) { return NULL; }
-
- virtual bool HasTrivialOuterContext() const {
- return (nesting_level_ == 0 || inside_with_level_ <= 0);
- }
-
- private:
- static const int kNotInsideWith = -1;
- // Number of surrounding scopes of the current scope.
- int nesting_level_;
- // Nesting level of outermost scope that is contained in a with statement,
- // or kNotInsideWith if there are no with's around the current scope.
- int inside_with_level_;
-};
-
-
} } // namespace v8::internal
#endif // V8_SCOPES_H_
diff --git a/src/serialize.cc b/src/serialize.cc
index a64fba3..8cde580 100644
--- a/src/serialize.cc
+++ b/src/serialize.cc
@@ -284,7 +284,6 @@
const char* AddressNames[] = {
#define C(name) "Isolate::" #name,
ISOLATE_ADDRESS_LIST(C)
- ISOLATE_ADDRESS_LIST_PROF(C)
NULL
#undef C
};
@@ -1017,10 +1016,11 @@
case kNativesStringResource: {
int index = source_->Get();
- Vector<const char> source_vector = Natives::GetScriptSource(index);
+ Vector<const char> source_vector = Natives::GetRawScriptSource(index);
NativesExternalStringResource* resource =
- new NativesExternalStringResource(
- isolate->bootstrapper(), source_vector.start());
+ new NativesExternalStringResource(isolate->bootstrapper(),
+ source_vector.start(),
+ source_vector.length());
*current++ = reinterpret_cast<Object*>(resource);
break;
}
diff --git a/src/spaces-inl.h b/src/spaces-inl.h
index f5f6654..0eb827d 100644
--- a/src/spaces-inl.h
+++ b/src/spaces-inl.h
@@ -379,35 +379,6 @@
}
-#ifdef ENABLE_HEAP_PROTECTION
-
-void MemoryAllocator::Protect(Address start, size_t size) {
- OS::Protect(start, size);
-}
-
-
-void MemoryAllocator::Unprotect(Address start,
- size_t size,
- Executability executable) {
- OS::Unprotect(start, size, executable);
-}
-
-
-void MemoryAllocator::ProtectChunkFromPage(Page* page) {
- int id = GetChunkId(page);
- OS::Protect(chunks_[id].address(), chunks_[id].size());
-}
-
-
-void MemoryAllocator::UnprotectChunkFromPage(Page* page) {
- int id = GetChunkId(page);
- OS::Unprotect(chunks_[id].address(), chunks_[id].size(),
- chunks_[id].owner()->executable() == EXECUTABLE);
-}
-
-#endif
-
-
// --------------------------------------------------------------------------
// PagedSpace
diff --git a/src/spaces.cc b/src/spaces.cc
index b494d24..0f80496 100644
--- a/src/spaces.cc
+++ b/src/spaces.cc
@@ -402,7 +402,9 @@
size_t length,
Executability executable) {
#ifdef DEBUG
- ZapBlock(reinterpret_cast<Address>(mem), length);
+ // Do not try to zap the guard page.
+ size_t guard_size = (executable == EXECUTABLE) ? Page::kPageSize : 0;
+ ZapBlock(reinterpret_cast<Address>(mem) + guard_size, length - guard_size);
#endif
if (isolate_->code_range()->contains(static_cast<Address>(mem))) {
isolate_->code_range()->FreeRawMemory(mem, length);
@@ -504,14 +506,28 @@
LOG(isolate_, NewEvent("PagedChunk", chunk, chunk_size));
*allocated_pages = PagesInChunk(static_cast<Address>(chunk), chunk_size);
+
// We may 'lose' a page due to alignment.
ASSERT(*allocated_pages >= kPagesPerChunk - 1);
- if (*allocated_pages == 0) {
- FreeRawMemory(chunk, chunk_size, owner->executable());
+
+ size_t guard_size = (owner->executable() == EXECUTABLE) ? Page::kPageSize : 0;
+
+ // Check that we got at least one page that we can use.
+ if (*allocated_pages <= ((guard_size != 0) ? 1 : 0)) {
+ FreeRawMemory(chunk,
+ chunk_size,
+ owner->executable());
LOG(isolate_, DeleteEvent("PagedChunk", chunk));
return Page::FromAddress(NULL);
}
+ if (guard_size != 0) {
+ OS::Guard(chunk, guard_size);
+ chunk_size -= guard_size;
+ chunk = static_cast<Address>(chunk) + guard_size;
+ --*allocated_pages;
+ }
+
int chunk_id = Pop();
chunks_[chunk_id].init(static_cast<Address>(chunk), chunk_size, owner);
@@ -681,7 +697,8 @@
LOG(isolate_, DeleteEvent("PagedChunk", c.address()));
ObjectSpace space = static_cast<ObjectSpace>(1 << c.owner_identity());
size_t size = c.size();
- FreeRawMemory(c.address(), size, c.executable());
+ size_t guard_size = (c.executable() == EXECUTABLE) ? Page::kPageSize : 0;
+ FreeRawMemory(c.address() - guard_size, size + guard_size, c.executable());
PerformAllocationCallback(space, kAllocationActionFree, size);
}
c.init(NULL, 0, NULL);
@@ -868,30 +885,6 @@
}
-#ifdef ENABLE_HEAP_PROTECTION
-
-void PagedSpace::Protect() {
- Page* page = first_page_;
- while (page->is_valid()) {
- Isolate::Current()->memory_allocator()->ProtectChunkFromPage(page);
- page = Isolate::Current()->memory_allocator()->
- FindLastPageInSameChunk(page)->next_page();
- }
-}
-
-
-void PagedSpace::Unprotect() {
- Page* page = first_page_;
- while (page->is_valid()) {
- Isolate::Current()->memory_allocator()->UnprotectChunkFromPage(page);
- page = Isolate::Current()->memory_allocator()->
- FindLastPageInSameChunk(page)->next_page();
- }
-}
-
-#endif
-
-
void PagedSpace::MarkAllPagesClean() {
PageIterator it(this, PageIterator::ALL_PAGES);
while (it.has_next()) {
@@ -1196,7 +1189,6 @@
ASSERT(IsPowerOf2(maximum_semispace_capacity));
// Allocate and setup the histogram arrays if necessary.
-#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
allocated_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
promoted_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
@@ -1204,7 +1196,6 @@
promoted_histogram_[name].set_name(#name);
INSTANCE_TYPE_LIST(SET_NAME)
#undef SET_NAME
-#endif
ASSERT(size == 2 * heap()->ReservedSemiSpaceSize());
ASSERT(IsAddressAligned(start, size, 0));
@@ -1236,7 +1227,6 @@
void NewSpace::TearDown() {
-#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
if (allocated_histogram_) {
DeleteArray(allocated_histogram_);
allocated_histogram_ = NULL;
@@ -1245,7 +1235,6 @@
DeleteArray(promoted_histogram_);
promoted_histogram_ = NULL;
}
-#endif
start_ = NULL;
allocation_info_.top = NULL;
@@ -1258,24 +1247,6 @@
}
-#ifdef ENABLE_HEAP_PROTECTION
-
-void NewSpace::Protect() {
- heap()->isolate()->memory_allocator()->Protect(ToSpaceLow(), Capacity());
- heap()->isolate()->memory_allocator()->Protect(FromSpaceLow(), Capacity());
-}
-
-
-void NewSpace::Unprotect() {
- heap()->isolate()->memory_allocator()->Unprotect(ToSpaceLow(), Capacity(),
- to_space_.executable());
- heap()->isolate()->memory_allocator()->Unprotect(FromSpaceLow(), Capacity(),
- from_space_.executable());
-}
-
-#endif
-
-
void NewSpace::Flip() {
SemiSpace tmp = from_space_;
from_space_ = to_space_;
@@ -1638,7 +1609,6 @@
// Support for statistics gathering for --heap-stats and --log-gc.
-#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
void NewSpace::ClearHistograms() {
for (int i = 0; i <= LAST_TYPE; i++) {
allocated_histogram_[i].clear();
@@ -1648,9 +1618,7 @@
// Because the copying collector does not touch garbage objects, we iterate
// the new space before a collection to get a histogram of allocated objects.
-// This only happens (1) when compiled with DEBUG and the --heap-stats flag is
-// set, or when compiled with ENABLE_LOGGING_AND_PROFILING and the --log-gc
-// flag is set.
+// This only happens when --log-gc flag is set.
void NewSpace::CollectStatistics() {
ClearHistograms();
SemiSpaceIterator it(this);
@@ -1659,7 +1627,6 @@
}
-#ifdef ENABLE_LOGGING_AND_PROFILING
static void DoReportStatistics(Isolate* isolate,
HistogramInfo* info, const char* description) {
LOG(isolate, HeapSampleBeginEvent("NewSpace", description));
@@ -1686,7 +1653,6 @@
}
LOG(isolate, HeapSampleEndEvent("NewSpace", description));
}
-#endif // ENABLE_LOGGING_AND_PROFILING
void NewSpace::ReportStatistics() {
@@ -1709,13 +1675,11 @@
}
#endif // DEBUG
-#ifdef ENABLE_LOGGING_AND_PROFILING
if (FLAG_log_gc) {
Isolate* isolate = ISOLATE;
DoReportStatistics(isolate, allocated_histogram_, "allocated");
DoReportStatistics(isolate, promoted_histogram_, "promoted");
}
-#endif // ENABLE_LOGGING_AND_PROFILING
}
@@ -1733,7 +1697,6 @@
promoted_histogram_[type].increment_number(1);
promoted_histogram_[type].increment_bytes(obj->Size());
}
-#endif // defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
// -----------------------------------------------------------------------------
@@ -2726,9 +2689,10 @@
Executability executable) {
size_t requested = ChunkSizeFor(size_in_bytes);
size_t size;
+ size_t guard_size = (executable == EXECUTABLE) ? Page::kPageSize : 0;
Isolate* isolate = Isolate::Current();
void* mem = isolate->memory_allocator()->AllocateRawMemory(
- requested, &size, executable);
+ requested + guard_size, &size, executable);
if (mem == NULL) return NULL;
// The start of the chunk may be overlayed with a page so we have to
@@ -2736,13 +2700,19 @@
ASSERT((size & Page::kPageFlagMask) == 0);
LOG(isolate, NewEvent("LargeObjectChunk", mem, size));
- if (size < requested) {
+ if (size < requested + guard_size) {
isolate->memory_allocator()->FreeRawMemory(
mem, size, executable);
LOG(isolate, DeleteEvent("LargeObjectChunk", mem));
return NULL;
}
+ if (guard_size != 0) {
+ OS::Guard(mem, guard_size);
+ size -= guard_size;
+ mem = static_cast<Address>(mem) + guard_size;
+ }
+
ObjectSpace space = (executable == EXECUTABLE)
? kObjectSpaceCodeSpace
: kObjectSpaceLoSpace;
@@ -2796,9 +2766,11 @@
ObjectSpace space = kObjectSpaceLoSpace;
if (executable == EXECUTABLE) space = kObjectSpaceCodeSpace;
size_t size = chunk->size();
- heap()->isolate()->memory_allocator()->FreeRawMemory(chunk->address(),
- size,
- executable);
+ size_t guard_size = (executable == EXECUTABLE) ? Page::kPageSize : 0;
+ heap()->isolate()->memory_allocator()->FreeRawMemory(
+ chunk->address() - guard_size,
+ size + guard_size,
+ executable);
heap()->isolate()->memory_allocator()->PerformAllocationCallback(
space, kAllocationActionFree, size);
}
@@ -2809,31 +2781,6 @@
}
-#ifdef ENABLE_HEAP_PROTECTION
-
-void LargeObjectSpace::Protect() {
- LargeObjectChunk* chunk = first_chunk_;
- while (chunk != NULL) {
- heap()->isolate()->memory_allocator()->Protect(chunk->address(),
- chunk->size());
- chunk = chunk->next();
- }
-}
-
-
-void LargeObjectSpace::Unprotect() {
- LargeObjectChunk* chunk = first_chunk_;
- while (chunk != NULL) {
- bool is_code = chunk->GetObject()->IsCode();
- heap()->isolate()->memory_allocator()->Unprotect(chunk->address(),
- chunk->size(), is_code ? EXECUTABLE : NOT_EXECUTABLE);
- chunk = chunk->next();
- }
-}
-
-#endif
-
-
MaybeObject* LargeObjectSpace::AllocateRawInternal(int requested_size,
int object_size,
Executability executable) {
@@ -3020,10 +2967,15 @@
objects_size_ -= object->Size();
page_count_--;
ObjectSpace space = kObjectSpaceLoSpace;
- if (executable == EXECUTABLE) space = kObjectSpaceCodeSpace;
- heap()->isolate()->memory_allocator()->FreeRawMemory(chunk_address,
- chunk_size,
- executable);
+ size_t guard_size = 0;
+ if (executable == EXECUTABLE) {
+ space = kObjectSpaceCodeSpace;
+ guard_size = Page::kPageSize;
+ }
+ heap()->isolate()->memory_allocator()->FreeRawMemory(
+ chunk_address - guard_size,
+ chunk_size + guard_size,
+ executable);
heap()->isolate()->memory_allocator()->PerformAllocationCallback(
space, kAllocationActionFree, size_);
LOG(heap()->isolate(), DeleteEvent("LargeObjectChunk", chunk_address));
@@ -3070,7 +3022,7 @@
// strings), fixed arrays, and byte arrays in large object space.
ASSERT(object->IsCode() || object->IsSeqString() ||
object->IsExternalString() || object->IsFixedArray() ||
- object->IsByteArray());
+ object->IsFixedDoubleArray() || object->IsByteArray());
// The object itself should look OK.
object->Verify();
diff --git a/src/spaces.h b/src/spaces.h
index 4024387..ac5d998 100644
--- a/src/spaces.h
+++ b/src/spaces.h
@@ -380,12 +380,6 @@
// (e.g. see LargeObjectSpace).
virtual intptr_t SizeOfObjects() { return Size(); }
-#ifdef ENABLE_HEAP_PROTECTION
- // Protect/unprotect the space by marking it read-only/writable.
- virtual void Protect() = 0;
- virtual void Unprotect() = 0;
-#endif
-
#ifdef DEBUG
virtual void Print() = 0;
#endif
@@ -641,17 +635,6 @@
Page** last_page,
Page** last_page_in_use);
-#ifdef ENABLE_HEAP_PROTECTION
- // Protect/unprotect a block of memory by marking it read-only/writable.
- inline void Protect(Address start, size_t size);
- inline void Unprotect(Address start, size_t size,
- Executability executable);
-
- // Protect/unprotect a chunk given a page in the chunk.
- inline void ProtectChunkFromPage(Page* page);
- inline void UnprotectChunkFromPage(Page* page);
-#endif
-
#ifdef DEBUG
// Reports statistic info of the space.
void ReportStatistics();
@@ -664,13 +647,11 @@
#ifdef V8_TARGET_ARCH_X64
static const int kPagesPerChunk = 32;
// On 64 bit the chunk table consists of 4 levels of 4096-entry tables.
- static const int kPagesPerChunkLog2 = 5;
static const int kChunkTableLevels = 4;
static const int kChunkTableBitsPerLevel = 12;
#else
static const int kPagesPerChunk = 16;
// On 32 bit the chunk table consists of 2 levels of 256-entry tables.
- static const int kPagesPerChunkLog2 = 4;
static const int kChunkTableLevels = 2;
static const int kChunkTableBitsPerLevel = 8;
#endif
@@ -679,7 +660,6 @@
MemoryAllocator();
static const int kChunkSize = kPagesPerChunk * Page::kPageSize;
- static const int kChunkSizeLog2 = kPagesPerChunkLog2 + kPageSizeBits;
// Maximum space size in bytes.
intptr_t capacity_;
@@ -1157,12 +1137,6 @@
// Ensures that the capacity is at least 'capacity'. Returns false on failure.
bool EnsureCapacity(int capacity);
-#ifdef ENABLE_HEAP_PROTECTION
- // Protect/unprotect the space by marking it read-only/writable.
- void Protect();
- void Unprotect();
-#endif
-
#ifdef DEBUG
// Print meta info and objects in this space.
virtual void Print();
@@ -1270,7 +1244,6 @@
};
-#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
class NumberAndSizeInfo BASE_EMBEDDED {
public:
NumberAndSizeInfo() : number_(0), bytes_(0) {}
@@ -1293,9 +1266,7 @@
// HistogramInfo class for recording a single "bar" of a histogram. This
-// class is used for collecting statistics to print to stdout (when compiled
-// with DEBUG) or to the log file (when compiled with
-// ENABLE_LOGGING_AND_PROFILING).
+// class is used for collecting statistics to print to the log file.
class HistogramInfo: public NumberAndSizeInfo {
public:
HistogramInfo() : NumberAndSizeInfo() {}
@@ -1306,7 +1277,6 @@
private:
const char* name_;
};
-#endif
// -----------------------------------------------------------------------------
@@ -1392,12 +1362,6 @@
bool Commit();
bool Uncommit();
-#ifdef ENABLE_HEAP_PROTECTION
- // Protect/unprotect the space by marking it read-only/writable.
- virtual void Protect() {}
- virtual void Unprotect() {}
-#endif
-
#ifdef DEBUG
virtual void Print();
virtual void Verify();
@@ -1628,12 +1592,6 @@
template <typename StringType>
inline void ShrinkStringAtAllocationBoundary(String* string, int len);
-#ifdef ENABLE_HEAP_PROTECTION
- // Protect/unprotect the space by marking it read-only/writable.
- virtual void Protect();
- virtual void Unprotect();
-#endif
-
#ifdef DEBUG
// Verify the active semispace.
virtual void Verify();
@@ -1641,7 +1599,6 @@
virtual void Print() { to_space_.Print(); }
#endif
-#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
// Iterates the active semispace to collect statistics.
void CollectStatistics();
// Reports previously collected statistics of the active semispace.
@@ -1654,7 +1611,6 @@
// to space during a scavenge GC.
void RecordAllocation(HeapObject* obj);
void RecordPromotion(HeapObject* obj);
-#endif
// Return whether the operation succeded.
bool CommitFromSpaceIfNeeded() {
@@ -1683,10 +1639,8 @@
AllocationInfo allocation_info_;
AllocationInfo mc_forwarding_info_;
-#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
HistogramInfo* allocated_histogram_;
HistogramInfo* promoted_histogram_;
-#endif
// Implementation of AllocateRaw and MCAllocateRaw.
MUST_USE_RESULT inline MaybeObject* AllocateRawInternal(
@@ -2296,12 +2250,6 @@
// may use some memory, leaving less for large objects.
virtual bool ReserveSpace(int bytes);
-#ifdef ENABLE_HEAP_PROTECTION
- // Protect/unprotect the space by marking it read-only/writable.
- void Protect();
- void Unprotect();
-#endif
-
#ifdef DEBUG
virtual void Verify();
virtual void Print();
diff --git a/src/string-stream.cc b/src/string-stream.cc
index aea1420..9002593 100644
--- a/src/string-stream.cc
+++ b/src/string-stream.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -30,6 +30,8 @@
#include "factory.h"
#include "string-stream.h"
+#include "allocation-inl.h"
+
namespace v8 {
namespace internal {
diff --git a/src/string.js b/src/string.js
index f24862c..600b131 100644
--- a/src/string.js
+++ b/src/string.js
@@ -251,7 +251,9 @@
// Compute the string to replace with.
if (IS_FUNCTION(replace)) {
- builder.add(%_CallFunction(%GetGlobalReceiver(),
+ var receiver =
+ %_IsNativeOrStrictMode(replace) ? void 0 : %GetGlobalReceiver();
+ builder.add(%_CallFunction(receiver,
search,
start,
subject,
@@ -418,7 +420,8 @@
if (NUMBER_OF_CAPTURES(lastMatchInfo) == 2) {
var match_start = 0;
var override = new InternalArray(null, 0, subject);
- var receiver = %GetGlobalReceiver();
+ var receiver =
+ %_IsNativeOrStrictMode(replace) ? void 0 : %GetGlobalReceiver();
while (i < len) {
var elem = res[i];
if (%_IsSmi(elem)) {
@@ -475,8 +478,10 @@
// No captures, only the match, which is always valid.
var s = SubString(subject, index, endOfMatch);
// Don't call directly to avoid exposing the built-in global object.
+ var receiver =
+ %_IsNativeOrStrictMode(replace) ? void 0 : %GetGlobalReceiver();
replacement =
- %_CallFunction(%GetGlobalReceiver(), s, index, subject, replace);
+ %_CallFunction(receiver, s, index, subject, replace);
} else {
var parameters = new InternalArray(m + 2);
for (var j = 0; j < m; j++) {
diff --git a/src/strtod.cc b/src/strtod.cc
index cedbff9..568531e 100644
--- a/src/strtod.cc
+++ b/src/strtod.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -26,10 +26,10 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <stdarg.h>
-#include <limits.h>
+#include <math.h>
-#include "v8.h"
-
+#include "platform.h"
+#include "utils.h"
#include "strtod.h"
#include "bignum.h"
#include "cached-powers.h"
diff --git a/src/stub-cache.cc b/src/stub-cache.cc
index 8c6d84c..79cd7a0 100644
--- a/src/stub-cache.cc
+++ b/src/stub-cache.cc
@@ -29,6 +29,7 @@
#include "api.h"
#include "arguments.h"
+#include "code-stubs.h"
#include "gdb-jit.h"
#include "ic-inl.h"
#include "stub-cache.h"
@@ -485,86 +486,7 @@
}
-namespace {
-
-ExternalArrayType ElementsKindToExternalArrayType(JSObject::ElementsKind kind) {
- switch (kind) {
- case JSObject::EXTERNAL_BYTE_ELEMENTS:
- return kExternalByteArray;
- case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- return kExternalUnsignedByteArray;
- case JSObject::EXTERNAL_SHORT_ELEMENTS:
- return kExternalShortArray;
- case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- return kExternalUnsignedShortArray;
- case JSObject::EXTERNAL_INT_ELEMENTS:
- return kExternalIntArray;
- case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS:
- return kExternalUnsignedIntArray;
- case JSObject::EXTERNAL_FLOAT_ELEMENTS:
- return kExternalFloatArray;
- case JSObject::EXTERNAL_DOUBLE_ELEMENTS:
- return kExternalDoubleArray;
- case JSObject::EXTERNAL_PIXEL_ELEMENTS:
- return kExternalPixelArray;
- default:
- UNREACHABLE();
- return static_cast<ExternalArrayType>(0);
- }
-}
-
-} // anonymous namespace
-
-
-MaybeObject* StubCache::ComputeKeyedLoadOrStoreExternalArray(
- JSObject* receiver,
- bool is_store,
- StrictModeFlag strict_mode) {
- Code::Flags flags =
- Code::ComputeMonomorphicFlags(
- is_store ? Code::KEYED_STORE_IC :
- Code::KEYED_LOAD_IC,
- NORMAL,
- strict_mode);
- ExternalArrayType array_type =
- ElementsKindToExternalArrayType(receiver->GetElementsKind());
- String* name = is_store
- ? isolate()->heap()->KeyedStoreSpecializedMonomorphic_symbol()
- : isolate()->heap()->KeyedLoadSpecializedMonomorphic_symbol();
- Object* maybe_code = receiver->map()->FindInCodeCache(name, flags);
- if (!maybe_code->IsUndefined()) return Code::cast(maybe_code);
-
- MaybeObject* maybe_new_code = NULL;
- if (is_store) {
- ExternalArrayStoreStubCompiler compiler(strict_mode);
- maybe_new_code = compiler.CompileStore(receiver, array_type);
- } else {
- ExternalArrayLoadStubCompiler compiler(strict_mode);
- maybe_new_code = compiler.CompileLoad(receiver, array_type);
- }
- Code* code;
- if (!maybe_new_code->To(&code)) return maybe_new_code;
- code->set_external_array_type(array_type);
- if (is_store) {
- PROFILE(isolate_,
- CodeCreateEvent(Logger::KEYED_EXTERNAL_ARRAY_STORE_IC_TAG,
- Code::cast(code), 0));
- } else {
- PROFILE(isolate_,
- CodeCreateEvent(Logger::KEYED_EXTERNAL_ARRAY_LOAD_IC_TAG,
- Code::cast(code), 0));
- }
- ASSERT(code->IsCode());
- Object* result;
- { MaybeObject* maybe_result =
- receiver->UpdateMapCodeCache(name, Code::cast(code));
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- return code;
-}
-
-
-MaybeObject* StubCache::ComputeKeyedLoadOrStoreFastElement(
+MaybeObject* StubCache::ComputeKeyedLoadOrStoreElement(
JSObject* receiver,
bool is_store,
StrictModeFlag strict_mode) {
@@ -575,18 +497,19 @@
NORMAL,
strict_mode);
String* name = is_store
- ? isolate()->heap()->KeyedStoreSpecializedMonomorphic_symbol()
- : isolate()->heap()->KeyedLoadSpecializedMonomorphic_symbol();
+ ? isolate()->heap()->KeyedStoreElementMonomorphic_symbol()
+ : isolate()->heap()->KeyedLoadElementMonomorphic_symbol();
Object* maybe_code = receiver->map()->FindInCodeCache(name, flags);
if (!maybe_code->IsUndefined()) return Code::cast(maybe_code);
MaybeObject* maybe_new_code = NULL;
+ Map* receiver_map = receiver->map();
if (is_store) {
KeyedStoreStubCompiler compiler(strict_mode);
- maybe_new_code = compiler.CompileStoreFastElement(receiver->map());
+ maybe_new_code = compiler.CompileStoreElement(receiver_map);
} else {
KeyedLoadStubCompiler compiler;
- maybe_new_code = compiler.CompileLoadFastElement(receiver->map());
+ maybe_new_code = compiler.CompileLoadElement(receiver_map);
}
Code* code;
if (!maybe_new_code->To(&code)) return maybe_new_code;
@@ -1117,6 +1040,26 @@
}
+MaybeObject* StubCache::ComputeCallArguments(int argc,
+ InLoopFlag in_loop,
+ Code::Kind kind) {
+ ASSERT(kind == Code::KEYED_CALL_IC);
+ Code::Flags flags = Code::ComputeFlags(kind,
+ in_loop,
+ MEGAMORPHIC,
+ Code::kNoExtraICState,
+ NORMAL,
+ argc);
+ Object* probe;
+ { MaybeObject* maybe_probe = ProbeCache(isolate_, flags);
+ if (!maybe_probe->ToObject(&probe)) return maybe_probe;
+ }
+ if (!probe->IsUndefined()) return probe;
+ StubCompiler compiler;
+ return FillCache(isolate_, compiler.CompileCallArguments(flags));
+}
+
+
MaybeObject* StubCache::ComputeCallMegamorphic(
int argc,
InLoopFlag in_loop,
@@ -1447,8 +1390,7 @@
JSObject* recv = JSObject::cast(args[0]);
String* name = String::cast(args[1]);
Object* value = args[2];
- StrictModeFlag strict_mode =
- static_cast<StrictModeFlag>(Smi::cast(args[3])->value());
+ StrictModeFlag strict_mode = static_cast<StrictModeFlag>(args.smi_at(3));
ASSERT(strict_mode == kStrictMode || strict_mode == kNonStrictMode);
ASSERT(recv->HasNamedInterceptor());
PropertyAttributes attr = NONE;
@@ -1460,8 +1402,8 @@
RUNTIME_FUNCTION(MaybeObject*, KeyedLoadPropertyWithInterceptor) {
JSObject* receiver = JSObject::cast(args[0]);
- ASSERT(Smi::cast(args[1])->value() >= 0);
- uint32_t index = Smi::cast(args[1])->value();
+ ASSERT(args.smi_at(1) >= 0);
+ uint32_t index = args.smi_at(1);
return receiver->GetElementWithInterceptor(receiver, index);
}
@@ -1573,6 +1515,26 @@
}
+MaybeObject* StubCompiler::CompileCallArguments(Code::Flags flags) {
+ HandleScope scope(isolate());
+ int argc = Code::ExtractArgumentsCountFromFlags(flags);
+ KeyedCallIC::GenerateNonStrictArguments(masm(), argc);
+ Code::Kind kind = Code::ExtractKindFromFlags(flags);
+ Object* result;
+ { MaybeObject* maybe_result =
+ GetCodeWithFlags(flags, "CompileCallArguments");
+ if (!maybe_result->ToObject(&result)) return maybe_result;
+ }
+ Code* code = Code::cast(result);
+ USE(code);
+ PROFILE(isolate(),
+ CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_MEGAMORPHIC_TAG),
+ code, code->arguments_count()));
+ GDBJIT(AddCode(GDBJITInterface::CALL_MEGAMORPHIC, Code::cast(code)));
+ return result;
+}
+
+
MaybeObject* StubCompiler::CompileCallMiss(Code::Flags flags) {
HandleScope scope(isolate());
int argc = Code::ExtractArgumentsCountFromFlags(flags);
@@ -1760,6 +1722,12 @@
}
+void KeyedStoreStubCompiler::GenerateStoreDictionaryElement(
+ MacroAssembler* masm) {
+ KeyedStoreIC::GenerateSlow(masm);
+}
+
+
CallStubCompiler::CallStubCompiler(int argc,
InLoopFlag in_loop,
Code::Kind kind,
@@ -1923,38 +1891,4 @@
}
-MaybeObject* ExternalArrayLoadStubCompiler::GetCode() {
- Object* result;
- Code::Flags flags = Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC,
- NORMAL,
- strict_mode_);
- { MaybeObject* maybe_result = GetCodeWithFlags(flags,
- "ExternalArrayLoadStub");
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- Code* code = Code::cast(result);
- USE(code);
- PROFILE(isolate(),
- CodeCreateEvent(Logger::STUB_TAG, code, "ExternalArrayLoadStub"));
- return result;
-}
-
-
-MaybeObject* ExternalArrayStoreStubCompiler::GetCode() {
- Object* result;
- Code::Flags flags = Code::ComputeMonomorphicFlags(Code::KEYED_STORE_IC,
- NORMAL,
- strict_mode_);
- { MaybeObject* maybe_result = GetCodeWithFlags(flags,
- "ExternalArrayStoreStub");
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- Code* code = Code::cast(result);
- USE(code);
- PROFILE(isolate(),
- CodeCreateEvent(Logger::STUB_TAG, code, "ExternalArrayStoreStub"));
- return result;
-}
-
-
} } // namespace v8::internal
diff --git a/src/stub-cache.h b/src/stub-cache.h
index a1243c2..ffe4241 100644
--- a/src/stub-cache.h
+++ b/src/stub-cache.h
@@ -183,15 +183,11 @@
Map* transition,
StrictModeFlag strict_mode);
- MUST_USE_RESULT MaybeObject* ComputeKeyedLoadOrStoreExternalArray(
+ MUST_USE_RESULT MaybeObject* ComputeKeyedLoadOrStoreElement(
JSObject* receiver,
bool is_store,
StrictModeFlag strict_mode);
- MUST_USE_RESULT MaybeObject* ComputeKeyedLoadOrStoreFastElement(
- JSObject* receiver,
- bool is_store,
- StrictModeFlag strict_mode);
// ---
MUST_USE_RESULT MaybeObject* ComputeCallField(
@@ -265,6 +261,10 @@
Code::Kind kind,
Code::ExtraICState state);
+ MUST_USE_RESULT MaybeObject* ComputeCallArguments(int argc,
+ InLoopFlag in_loop,
+ Code::Kind kind);
+
MUST_USE_RESULT MaybeObject* ComputeCallMegamorphic(int argc,
InLoopFlag in_loop,
Code::Kind kind,
@@ -429,6 +429,7 @@
MUST_USE_RESULT MaybeObject* CompileCallPreMonomorphic(Code::Flags flags);
MUST_USE_RESULT MaybeObject* CompileCallNormal(Code::Flags flags);
MUST_USE_RESULT MaybeObject* CompileCallMegamorphic(Code::Flags flags);
+ MUST_USE_RESULT MaybeObject* CompileCallArguments(Code::Flags flags);
MUST_USE_RESULT MaybeObject* CompileCallMiss(Code::Flags flags);
#ifdef ENABLE_DEBUGGER_SUPPORT
MUST_USE_RESULT MaybeObject* CompileCallDebugBreak(Code::Flags flags);
@@ -650,17 +651,21 @@
MUST_USE_RESULT MaybeObject* CompileLoadStringLength(String* name);
MUST_USE_RESULT MaybeObject* CompileLoadFunctionPrototype(String* name);
- MUST_USE_RESULT MaybeObject* CompileLoadFastElement(Map* receiver_map);
+ MUST_USE_RESULT MaybeObject* CompileLoadElement(Map* receiver_map);
MUST_USE_RESULT MaybeObject* CompileLoadMegamorphic(
MapList* receiver_maps,
CodeList* handler_ics);
static void GenerateLoadExternalArray(MacroAssembler* masm,
- ExternalArrayType array_type);
+ JSObject::ElementsKind elements_kind);
static void GenerateLoadFastElement(MacroAssembler* masm);
+ static void GenerateLoadFastDoubleElement(MacroAssembler* masm);
+
+ static void GenerateLoadDictionaryElement(MacroAssembler* masm);
+
private:
MaybeObject* GetCode(PropertyType type,
String* name,
@@ -705,7 +710,7 @@
Map* transition,
String* name);
- MUST_USE_RESULT MaybeObject* CompileStoreFastElement(Map* receiver_map);
+ MUST_USE_RESULT MaybeObject* CompileStoreElement(Map* receiver_map);
MUST_USE_RESULT MaybeObject* CompileStoreMegamorphic(
MapList* receiver_maps,
@@ -714,8 +719,13 @@
static void GenerateStoreFastElement(MacroAssembler* masm,
bool is_js_array);
+ static void GenerateStoreFastDoubleElement(MacroAssembler* masm,
+ bool is_js_array);
+
static void GenerateStoreExternalArray(MacroAssembler* masm,
- ExternalArrayType array_type);
+ JSObject::ElementsKind elements_kind);
+
+ static void GenerateStoreDictionaryElement(MacroAssembler* masm);
private:
MaybeObject* GetCode(PropertyType type,
@@ -894,35 +904,6 @@
CallHandlerInfo* api_call_info_;
};
-class ExternalArrayLoadStubCompiler: public StubCompiler {
- public:
- explicit ExternalArrayLoadStubCompiler(StrictModeFlag strict_mode)
- : strict_mode_(strict_mode) { }
-
- MUST_USE_RESULT MaybeObject* CompileLoad(
- JSObject* receiver, ExternalArrayType array_type);
-
- private:
- MaybeObject* GetCode();
-
- StrictModeFlag strict_mode_;
-};
-
-
-class ExternalArrayStoreStubCompiler: public StubCompiler {
- public:
- explicit ExternalArrayStoreStubCompiler(StrictModeFlag strict_mode)
- : strict_mode_(strict_mode) {}
-
- MUST_USE_RESULT MaybeObject* CompileStore(
- JSObject* receiver, ExternalArrayType array_type);
-
- private:
- MaybeObject* GetCode();
-
- StrictModeFlag strict_mode_;
-};
-
} } // namespace v8::internal
diff --git a/src/third_party/valgrind/valgrind.h b/src/third_party/valgrind/valgrind.h
index a94dc58..7a3ee2f 100644
--- a/src/third_party/valgrind/valgrind.h
+++ b/src/third_party/valgrind/valgrind.h
@@ -12,7 +12,7 @@
This file is part of Valgrind, a dynamic binary instrumentation
framework.
- Copyright (C) 2000-2007 Julian Seward. All rights reserved.
+ Copyright (C) 2000-2010 Julian Seward. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
@@ -73,6 +73,25 @@
#ifndef __VALGRIND_H
#define __VALGRIND_H
+
+/* ------------------------------------------------------------------ */
+/* VERSION NUMBER OF VALGRIND */
+/* ------------------------------------------------------------------ */
+
+/* Specify Valgrind's version number, so that user code can
+ conditionally compile based on our version number. Note that these
+ were introduced at version 3.6 and so do not exist in version 3.5
+ or earlier. The recommended way to use them to check for "version
+ X.Y or later" is (eg)
+
+#if defined(__VALGRIND_MAJOR__) && defined(__VALGRIND_MINOR__) \
+ && (__VALGRIND_MAJOR__ > 3 \
+ || (__VALGRIND_MAJOR__ == 3 && __VALGRIND_MINOR__ >= 6))
+*/
+#define __VALGRIND_MAJOR__ 3
+#define __VALGRIND_MINOR__ 6
+
+
#include <stdarg.h>
#include <stdint.h>
@@ -85,34 +104,44 @@
identifying architectures, which are different to the ones we use
within the rest of Valgrind. Note, __powerpc__ is active for both
32 and 64-bit PPC, whereas __powerpc64__ is only active for the
- latter (on Linux, that is). */
+ latter (on Linux, that is).
+
+ Misc note: how to find out what's predefined in gcc by default:
+ gcc -Wp,-dM somefile.c
+*/
+#undef PLAT_x86_darwin
+#undef PLAT_amd64_darwin
+#undef PLAT_x86_win32
#undef PLAT_x86_linux
#undef PLAT_amd64_linux
#undef PLAT_ppc32_linux
#undef PLAT_ppc64_linux
-#undef PLAT_ppc32_aix5
-#undef PLAT_ppc64_aix5
+#undef PLAT_arm_linux
+#undef PLAT_s390x_linux
-#if !defined(_AIX) && defined(__i386__)
+
+#if defined(__APPLE__) && defined(__i386__)
+# define PLAT_x86_darwin 1
+#elif defined(__APPLE__) && defined(__x86_64__)
+# define PLAT_amd64_darwin 1
+#elif defined(__MINGW32__) || defined(__CYGWIN32__) \
+ || (defined(_WIN32) && defined(_M_IX86))
+# define PLAT_x86_win32 1
+#elif defined(__linux__) && defined(__i386__)
# define PLAT_x86_linux 1
-#elif !defined(_AIX) && defined(__x86_64__)
+#elif defined(__linux__) && defined(__x86_64__)
# define PLAT_amd64_linux 1
-#elif !defined(_AIX) && defined(__powerpc__) && !defined(__powerpc64__)
+#elif defined(__linux__) && defined(__powerpc__) && !defined(__powerpc64__)
# define PLAT_ppc32_linux 1
-#elif !defined(_AIX) && defined(__powerpc__) && defined(__powerpc64__)
+#elif defined(__linux__) && defined(__powerpc__) && defined(__powerpc64__)
# define PLAT_ppc64_linux 1
-#elif defined(_AIX) && defined(__64BIT__)
-# define PLAT_ppc64_aix5 1
-#elif defined(_AIX) && !defined(__64BIT__)
-# define PLAT_ppc32_aix5 1
-#endif
-
-
+#elif defined(__linux__) && defined(__arm__)
+# define PLAT_arm_linux 1
+#elif defined(__linux__) && defined(__s390__) && defined(__s390x__)
+# define PLAT_s390x_linux 1
+#else
/* If we're not compiling for our target platform, don't generate
any inline asms. */
-#if !defined(PLAT_x86_linux) && !defined(PLAT_amd64_linux) \
- && !defined(PLAT_ppc32_linux) && !defined(PLAT_ppc64_linux) \
- && !defined(PLAT_ppc32_aix5) && !defined(PLAT_ppc64_aix5)
# if !defined(NVALGRIND)
# define NVALGRIND 1
# endif
@@ -124,17 +153,31 @@
/* in here of use to end-users -- skip to the next section. */
/* ------------------------------------------------------------------ */
+/*
+ * VALGRIND_DO_CLIENT_REQUEST(): a statement that invokes a Valgrind client
+ * request. Accepts both pointers and integers as arguments.
+ *
+ * VALGRIND_DO_CLIENT_REQUEST_EXPR(): a C expression that invokes a Valgrind
+ * client request and whose value equals the client request result. Accepts
+ * both pointers and integers as arguments.
+ */
+
+#define VALGRIND_DO_CLIENT_REQUEST(_zzq_rlval, _zzq_default, \
+ _zzq_request, _zzq_arg1, _zzq_arg2, \
+ _zzq_arg3, _zzq_arg4, _zzq_arg5) \
+ { (_zzq_rlval) = VALGRIND_DO_CLIENT_REQUEST_EXPR((_zzq_default), \
+ (_zzq_request), (_zzq_arg1), (_zzq_arg2), \
+ (_zzq_arg3), (_zzq_arg4), (_zzq_arg5)); }
+
#if defined(NVALGRIND)
/* Define NVALGRIND to completely remove the Valgrind magic sequence
from the compiled code (analogous to NDEBUG's effects on
assert()) */
-#define VALGRIND_DO_CLIENT_REQUEST( \
- _zzq_rlval, _zzq_default, _zzq_request, \
+#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \
+ _zzq_default, _zzq_request, \
_zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
- { \
- (_zzq_rlval) = (_zzq_default); \
- }
+ (_zzq_default)
#else /* ! NVALGRIND */
@@ -173,9 +216,10 @@
inline asm stuff to be useful.
*/
-/* ------------------------- x86-linux ------------------------- */
+/* ------------------------- x86-{linux,darwin} ---------------- */
-#if defined(PLAT_x86_linux)
+#if defined(PLAT_x86_linux) || defined(PLAT_x86_darwin) \
+ || (defined(PLAT_x86_win32) && defined(__GNUC__))
typedef
struct {
@@ -187,10 +231,11 @@
"roll $3, %%edi ; roll $13, %%edi\n\t" \
"roll $29, %%edi ; roll $19, %%edi\n\t"
-#define VALGRIND_DO_CLIENT_REQUEST( \
- _zzq_rlval, _zzq_default, _zzq_request, \
+#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \
+ _zzq_default, _zzq_request, \
_zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
- { volatile unsigned int _zzq_args[6]; \
+ __extension__ \
+ ({volatile unsigned int _zzq_args[6]; \
volatile unsigned int _zzq_result; \
_zzq_args[0] = (unsigned int)(_zzq_request); \
_zzq_args[1] = (unsigned int)(_zzq_arg1); \
@@ -205,8 +250,8 @@
: "a" (&_zzq_args[0]), "0" (_zzq_default) \
: "cc", "memory" \
); \
- _zzq_rlval = _zzq_result; \
- }
+ _zzq_result; \
+ })
#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
{ volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
@@ -225,11 +270,77 @@
__SPECIAL_INSTRUCTION_PREAMBLE \
/* call-noredir *%EAX */ \
"xchgl %%edx,%%edx\n\t"
-#endif /* PLAT_x86_linux */
+#endif /* PLAT_x86_linux || PLAT_x86_darwin || (PLAT_x86_win32 && __GNUC__) */
-/* ------------------------ amd64-linux ------------------------ */
+/* ------------------------- x86-Win32 ------------------------- */
-#if defined(PLAT_amd64_linux)
+#if defined(PLAT_x86_win32) && !defined(__GNUC__)
+
+typedef
+ struct {
+ unsigned int nraddr; /* where's the code? */
+ }
+ OrigFn;
+
+#if defined(_MSC_VER)
+
+#define __SPECIAL_INSTRUCTION_PREAMBLE \
+ __asm rol edi, 3 __asm rol edi, 13 \
+ __asm rol edi, 29 __asm rol edi, 19
+
+#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \
+ _zzq_default, _zzq_request, \
+ _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
+ valgrind_do_client_request_expr((uintptr_t)(_zzq_default), \
+ (uintptr_t)(_zzq_request), (uintptr_t)(_zzq_arg1), \
+ (uintptr_t)(_zzq_arg2), (uintptr_t)(_zzq_arg3), \
+ (uintptr_t)(_zzq_arg4), (uintptr_t)(_zzq_arg5))
+
+static __inline uintptr_t
+valgrind_do_client_request_expr(uintptr_t _zzq_default, uintptr_t _zzq_request,
+ uintptr_t _zzq_arg1, uintptr_t _zzq_arg2,
+ uintptr_t _zzq_arg3, uintptr_t _zzq_arg4,
+ uintptr_t _zzq_arg5)
+{
+ volatile uintptr_t _zzq_args[6];
+ volatile unsigned int _zzq_result;
+ _zzq_args[0] = (uintptr_t)(_zzq_request);
+ _zzq_args[1] = (uintptr_t)(_zzq_arg1);
+ _zzq_args[2] = (uintptr_t)(_zzq_arg2);
+ _zzq_args[3] = (uintptr_t)(_zzq_arg3);
+ _zzq_args[4] = (uintptr_t)(_zzq_arg4);
+ _zzq_args[5] = (uintptr_t)(_zzq_arg5);
+ __asm { __asm lea eax, _zzq_args __asm mov edx, _zzq_default
+ __SPECIAL_INSTRUCTION_PREAMBLE
+ /* %EDX = client_request ( %EAX ) */
+ __asm xchg ebx,ebx
+ __asm mov _zzq_result, edx
+ }
+ return _zzq_result;
+}
+
+#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
+ { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
+ volatile unsigned int __addr; \
+ __asm { __SPECIAL_INSTRUCTION_PREAMBLE \
+ /* %EAX = guest_NRADDR */ \
+ __asm xchg ecx,ecx \
+ __asm mov __addr, eax \
+ } \
+ _zzq_orig->nraddr = __addr; \
+ }
+
+#define VALGRIND_CALL_NOREDIR_EAX ERROR
+
+#else
+#error Unsupported compiler.
+#endif
+
+#endif /* PLAT_x86_win32 */
+
+/* ------------------------ amd64-{linux,darwin} --------------- */
+
+#if defined(PLAT_amd64_linux) || defined(PLAT_amd64_darwin)
typedef
struct {
@@ -241,10 +352,11 @@
"rolq $3, %%rdi ; rolq $13, %%rdi\n\t" \
"rolq $61, %%rdi ; rolq $51, %%rdi\n\t"
-#define VALGRIND_DO_CLIENT_REQUEST( \
- _zzq_rlval, _zzq_default, _zzq_request, \
+#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \
+ _zzq_default, _zzq_request, \
_zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
- { volatile uint64_t _zzq_args[6]; \
+ __extension__ \
+ ({ volatile uint64_t _zzq_args[6]; \
volatile uint64_t _zzq_result; \
_zzq_args[0] = (uint64_t)(_zzq_request); \
_zzq_args[1] = (uint64_t)(_zzq_arg1); \
@@ -259,8 +371,8 @@
: "a" (&_zzq_args[0]), "0" (_zzq_default) \
: "cc", "memory" \
); \
- _zzq_rlval = _zzq_result; \
- }
+ _zzq_result; \
+ })
#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
{ volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
@@ -279,7 +391,7 @@
__SPECIAL_INSTRUCTION_PREAMBLE \
/* call-noredir *%RAX */ \
"xchgq %%rdx,%%rdx\n\t"
-#endif /* PLAT_amd64_linux */
+#endif /* PLAT_amd64_linux || PLAT_amd64_darwin */
/* ------------------------ ppc32-linux ------------------------ */
@@ -295,11 +407,12 @@
"rlwinm 0,0,3,0,0 ; rlwinm 0,0,13,0,0\n\t" \
"rlwinm 0,0,29,0,0 ; rlwinm 0,0,19,0,0\n\t"
-#define VALGRIND_DO_CLIENT_REQUEST( \
- _zzq_rlval, _zzq_default, _zzq_request, \
+#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \
+ _zzq_default, _zzq_request, \
_zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
\
- { unsigned int _zzq_args[6]; \
+ __extension__ \
+ ({ unsigned int _zzq_args[6]; \
unsigned int _zzq_result; \
unsigned int* _zzq_ptr; \
_zzq_args[0] = (unsigned int)(_zzq_request); \
@@ -318,8 +431,8 @@
: "=b" (_zzq_result) \
: "b" (_zzq_default), "b" (_zzq_ptr) \
: "cc", "memory", "r3", "r4"); \
- _zzq_rlval = _zzq_result; \
- }
+ _zzq_result; \
+ })
#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
{ volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
@@ -356,11 +469,12 @@
"rotldi 0,0,3 ; rotldi 0,0,13\n\t" \
"rotldi 0,0,61 ; rotldi 0,0,51\n\t"
-#define VALGRIND_DO_CLIENT_REQUEST( \
- _zzq_rlval, _zzq_default, _zzq_request, \
+#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \
+ _zzq_default, _zzq_request, \
_zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
\
- { uint64_t _zzq_args[6]; \
+ __extension__ \
+ ({ uint64_t _zzq_args[6]; \
register uint64_t _zzq_result __asm__("r3"); \
register uint64_t* _zzq_ptr __asm__("r4"); \
_zzq_args[0] = (uint64_t)(_zzq_request); \
@@ -376,8 +490,8 @@
: "=r" (_zzq_result) \
: "0" (_zzq_default), "r" (_zzq_ptr) \
: "cc", "memory"); \
- _zzq_rlval = _zzq_result; \
- }
+ _zzq_result; \
+ })
#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
{ volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
@@ -407,149 +521,135 @@
#endif /* PLAT_ppc64_linux */
-/* ------------------------ ppc32-aix5 ------------------------- */
+/* ------------------------- arm-linux ------------------------- */
-#if defined(PLAT_ppc32_aix5)
+#if defined(PLAT_arm_linux)
typedef
struct {
unsigned int nraddr; /* where's the code? */
- unsigned int r2; /* what tocptr do we need? */
}
OrigFn;
#define __SPECIAL_INSTRUCTION_PREAMBLE \
- "rlwinm 0,0,3,0,0 ; rlwinm 0,0,13,0,0\n\t" \
- "rlwinm 0,0,29,0,0 ; rlwinm 0,0,19,0,0\n\t"
+ "mov r12, r12, ror #3 ; mov r12, r12, ror #13 \n\t" \
+ "mov r12, r12, ror #29 ; mov r12, r12, ror #19 \n\t"
-#define VALGRIND_DO_CLIENT_REQUEST( \
- _zzq_rlval, _zzq_default, _zzq_request, \
+#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \
+ _zzq_default, _zzq_request, \
_zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
\
- { unsigned int _zzq_args[7]; \
- register unsigned int _zzq_result; \
- register unsigned int* _zzq_ptr; \
+ __extension__ \
+ ({volatile unsigned int _zzq_args[6]; \
+ volatile unsigned int _zzq_result; \
_zzq_args[0] = (unsigned int)(_zzq_request); \
_zzq_args[1] = (unsigned int)(_zzq_arg1); \
_zzq_args[2] = (unsigned int)(_zzq_arg2); \
_zzq_args[3] = (unsigned int)(_zzq_arg3); \
_zzq_args[4] = (unsigned int)(_zzq_arg4); \
_zzq_args[5] = (unsigned int)(_zzq_arg5); \
- _zzq_args[6] = (unsigned int)(_zzq_default); \
- _zzq_ptr = _zzq_args; \
- __asm__ volatile("mr 4,%1\n\t" \
- "lwz 3, 24(4)\n\t" \
+ __asm__ volatile("mov r3, %1\n\t" /*default*/ \
+ "mov r4, %2\n\t" /*ptr*/ \
__SPECIAL_INSTRUCTION_PREAMBLE \
- /* %R3 = client_request ( %R4 ) */ \
- "or 1,1,1\n\t" \
- "mr %0,3" \
- : "=b" (_zzq_result) \
- : "b" (_zzq_ptr) \
- : "r3", "r4", "cc", "memory"); \
- _zzq_rlval = _zzq_result; \
- }
+ /* R3 = client_request ( R4 ) */ \
+ "orr r10, r10, r10\n\t" \
+ "mov %0, r3" /*result*/ \
+ : "=r" (_zzq_result) \
+ : "r" (_zzq_default), "r" (&_zzq_args[0]) \
+ : "cc","memory", "r3", "r4"); \
+ _zzq_result; \
+ })
#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
{ volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
- register unsigned int __addr; \
+ unsigned int __addr; \
__asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
- /* %R3 = guest_NRADDR */ \
- "or 2,2,2\n\t" \
- "mr %0,3" \
- : "=b" (__addr) \
+ /* R3 = guest_NRADDR */ \
+ "orr r11, r11, r11\n\t" \
+ "mov %0, r3" \
+ : "=r" (__addr) \
: \
- : "r3", "cc", "memory" \
+ : "cc", "memory", "r3" \
); \
_zzq_orig->nraddr = __addr; \
- __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
- /* %R3 = guest_NRADDR_GPR2 */ \
- "or 4,4,4\n\t" \
- "mr %0,3" \
- : "=b" (__addr) \
- : \
- : "r3", "cc", "memory" \
- ); \
- _zzq_orig->r2 = __addr; \
}
-#define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+#define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
__SPECIAL_INSTRUCTION_PREAMBLE \
- /* branch-and-link-to-noredir *%R11 */ \
- "or 3,3,3\n\t"
+ /* branch-and-link-to-noredir *%R4 */ \
+ "orr r12, r12, r12\n\t"
-#endif /* PLAT_ppc32_aix5 */
+#endif /* PLAT_arm_linux */
-/* ------------------------ ppc64-aix5 ------------------------- */
+/* ------------------------ s390x-linux ------------------------ */
-#if defined(PLAT_ppc64_aix5)
+#if defined(PLAT_s390x_linux)
typedef
- struct {
- uint64_t nraddr; /* where's the code? */
- uint64_t r2; /* what tocptr do we need? */
- }
- OrigFn;
-
-#define __SPECIAL_INSTRUCTION_PREAMBLE \
- "rotldi 0,0,3 ; rotldi 0,0,13\n\t" \
- "rotldi 0,0,61 ; rotldi 0,0,51\n\t"
-
-#define VALGRIND_DO_CLIENT_REQUEST( \
- _zzq_rlval, _zzq_default, _zzq_request, \
- _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
- \
- { uint64_t _zzq_args[7]; \
- register uint64_t _zzq_result; \
- register uint64_t* _zzq_ptr; \
- _zzq_args[0] = (unsigned int long long)(_zzq_request); \
- _zzq_args[1] = (unsigned int long long)(_zzq_arg1); \
- _zzq_args[2] = (unsigned int long long)(_zzq_arg2); \
- _zzq_args[3] = (unsigned int long long)(_zzq_arg3); \
- _zzq_args[4] = (unsigned int long long)(_zzq_arg4); \
- _zzq_args[5] = (unsigned int long long)(_zzq_arg5); \
- _zzq_args[6] = (unsigned int long long)(_zzq_default); \
- _zzq_ptr = _zzq_args; \
- __asm__ volatile("mr 4,%1\n\t" \
- "ld 3, 48(4)\n\t" \
- __SPECIAL_INSTRUCTION_PREAMBLE \
- /* %R3 = client_request ( %R4 ) */ \
- "or 1,1,1\n\t" \
- "mr %0,3" \
- : "=b" (_zzq_result) \
- : "b" (_zzq_ptr) \
- : "r3", "r4", "cc", "memory"); \
- _zzq_rlval = _zzq_result; \
+ struct {
+ uint64_t nraddr; /* where's the code? */
}
+ OrigFn;
-#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
- { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
- register uint64_t __addr; \
- __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
- /* %R3 = guest_NRADDR */ \
- "or 2,2,2\n\t" \
- "mr %0,3" \
- : "=b" (__addr) \
- : \
- : "r3", "cc", "memory" \
- ); \
- _zzq_orig->nraddr = __addr; \
- __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
- /* %R3 = guest_NRADDR_GPR2 */ \
- "or 4,4,4\n\t" \
- "mr %0,3" \
- : "=b" (__addr) \
- : \
- : "r3", "cc", "memory" \
- ); \
- _zzq_orig->r2 = __addr; \
- }
+/* __SPECIAL_INSTRUCTION_PREAMBLE will be used to identify Valgrind specific
+ * code. This detection is implemented in platform specific toIR.c
+ * (e.g. VEX/priv/guest_s390_decoder.c).
+ */
+#define __SPECIAL_INSTRUCTION_PREAMBLE \
+ "lr 15,15\n\t" \
+ "lr 1,1\n\t" \
+ "lr 2,2\n\t" \
+ "lr 3,3\n\t"
-#define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- __SPECIAL_INSTRUCTION_PREAMBLE \
- /* branch-and-link-to-noredir *%R11 */ \
- "or 3,3,3\n\t"
+#define __CLIENT_REQUEST_CODE "lr 2,2\n\t"
+#define __GET_NR_CONTEXT_CODE "lr 3,3\n\t"
+#define __CALL_NO_REDIR_CODE "lr 4,4\n\t"
-#endif /* PLAT_ppc64_aix5 */
+#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \
+ _zzq_default, _zzq_request, \
+ _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
+ __extension__ \
+ ({volatile uint64_t _zzq_args[6]; \
+ volatile uint64_t _zzq_result; \
+ _zzq_args[0] = (uint64_t)(_zzq_request); \
+ _zzq_args[1] = (uint64_t)(_zzq_arg1); \
+ _zzq_args[2] = (uint64_t)(_zzq_arg2); \
+ _zzq_args[3] = (uint64_t)(_zzq_arg3); \
+ _zzq_args[4] = (uint64_t)(_zzq_arg4); \
+ _zzq_args[5] = (uint64_t)(_zzq_arg5); \
+ __asm__ volatile(/* r2 = args */ \
+ "lgr 2,%1\n\t" \
+ /* r3 = default */ \
+ "lgr 3,%2\n\t" \
+ __SPECIAL_INSTRUCTION_PREAMBLE \
+ __CLIENT_REQUEST_CODE \
+ /* results = r3 */ \
+ "lgr %0, 3\n\t" \
+ : "=d" (_zzq_result) \
+ : "a" (&_zzq_args[0]), "0" (_zzq_default) \
+ : "cc", "2", "3", "memory" \
+ ); \
+ _zzq_result; \
+ })
+
+#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
+ { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
+ volatile uint64_t __addr; \
+ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
+ __GET_NR_CONTEXT_CODE \
+ "lgr %0, 3\n\t" \
+ : "=a" (__addr) \
+ : \
+ : "cc", "3", "memory" \
+ ); \
+ _zzq_orig->nraddr = __addr; \
+ }
+
+#define VALGRIND_CALL_NOREDIR_R1 \
+ __SPECIAL_INSTRUCTION_PREAMBLE \
+ __CALL_NO_REDIR_CODE
+
+#endif /* PLAT_s390x_linux */
/* Insert assembly code for other platforms here... */
@@ -582,11 +682,15 @@
/* Use these to write the name of your wrapper. NOTE: duplicates
VG_WRAP_FUNCTION_Z{U,Z} in pub_tool_redir.h. */
+/* Use an extra level of macroisation so as to ensure the soname/fnname
+ args are fully macro-expanded before pasting them together. */
+#define VG_CONCAT4(_aa,_bb,_cc,_dd) _aa##_bb##_cc##_dd
+
#define I_WRAP_SONAME_FNNAME_ZU(soname,fnname) \
- _vgwZU_##soname##_##fnname
+ VG_CONCAT4(_vgwZU_,soname,_,fnname)
#define I_WRAP_SONAME_FNNAME_ZZ(soname,fnname) \
- _vgwZZ_##soname##_##fnname
+ VG_CONCAT4(_vgwZZ_,soname,_,fnname)
/* Use this macro from within a wrapper function to collect the
context (address and possibly other info) of the original function.
@@ -613,9 +717,25 @@
do { volatile unsigned long _junk; \
CALL_FN_W_WWW(_junk,fnptr,arg1,arg2,arg3); } while (0)
-/* ------------------------- x86-linux ------------------------- */
+#define CALL_FN_v_WWWW(fnptr, arg1,arg2,arg3,arg4) \
+ do { volatile unsigned long _junk; \
+ CALL_FN_W_WWWW(_junk,fnptr,arg1,arg2,arg3,arg4); } while (0)
-#if defined(PLAT_x86_linux)
+#define CALL_FN_v_5W(fnptr, arg1,arg2,arg3,arg4,arg5) \
+ do { volatile unsigned long _junk; \
+ CALL_FN_W_5W(_junk,fnptr,arg1,arg2,arg3,arg4,arg5); } while (0)
+
+#define CALL_FN_v_6W(fnptr, arg1,arg2,arg3,arg4,arg5,arg6) \
+ do { volatile unsigned long _junk; \
+ CALL_FN_W_6W(_junk,fnptr,arg1,arg2,arg3,arg4,arg5,arg6); } while (0)
+
+#define CALL_FN_v_7W(fnptr, arg1,arg2,arg3,arg4,arg5,arg6,arg7) \
+ do { volatile unsigned long _junk; \
+ CALL_FN_W_7W(_junk,fnptr,arg1,arg2,arg3,arg4,arg5,arg6,arg7); } while (0)
+
+/* ------------------------- x86-{linux,darwin} ---------------- */
+
+#if defined(PLAT_x86_linux) || defined(PLAT_x86_darwin)
/* These regs are trashed by the hidden call. No need to mention eax
as gcc can already see that, plus causes gcc to bomb. */
@@ -648,10 +768,11 @@
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
__asm__ volatile( \
+ "subl $12, %%esp\n\t" \
"pushl 4(%%eax)\n\t" \
"movl (%%eax), %%eax\n\t" /* target->%eax */ \
VALGRIND_CALL_NOREDIR_EAX \
- "addl $4, %%esp\n" \
+ "addl $16, %%esp\n" \
: /*out*/ "=a" (_res) \
: /*in*/ "a" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
@@ -668,11 +789,12 @@
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
__asm__ volatile( \
+ "subl $8, %%esp\n\t" \
"pushl 8(%%eax)\n\t" \
"pushl 4(%%eax)\n\t" \
"movl (%%eax), %%eax\n\t" /* target->%eax */ \
VALGRIND_CALL_NOREDIR_EAX \
- "addl $8, %%esp\n" \
+ "addl $16, %%esp\n" \
: /*out*/ "=a" (_res) \
: /*in*/ "a" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
@@ -690,12 +812,13 @@
_argvec[2] = (unsigned long)(arg2); \
_argvec[3] = (unsigned long)(arg3); \
__asm__ volatile( \
+ "subl $4, %%esp\n\t" \
"pushl 12(%%eax)\n\t" \
"pushl 8(%%eax)\n\t" \
"pushl 4(%%eax)\n\t" \
"movl (%%eax), %%eax\n\t" /* target->%eax */ \
VALGRIND_CALL_NOREDIR_EAX \
- "addl $12, %%esp\n" \
+ "addl $16, %%esp\n" \
: /*out*/ "=a" (_res) \
: /*in*/ "a" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
@@ -740,6 +863,7 @@
_argvec[4] = (unsigned long)(arg4); \
_argvec[5] = (unsigned long)(arg5); \
__asm__ volatile( \
+ "subl $12, %%esp\n\t" \
"pushl 20(%%eax)\n\t" \
"pushl 16(%%eax)\n\t" \
"pushl 12(%%eax)\n\t" \
@@ -747,7 +871,7 @@
"pushl 4(%%eax)\n\t" \
"movl (%%eax), %%eax\n\t" /* target->%eax */ \
VALGRIND_CALL_NOREDIR_EAX \
- "addl $20, %%esp\n" \
+ "addl $32, %%esp\n" \
: /*out*/ "=a" (_res) \
: /*in*/ "a" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
@@ -768,6 +892,7 @@
_argvec[5] = (unsigned long)(arg5); \
_argvec[6] = (unsigned long)(arg6); \
__asm__ volatile( \
+ "subl $8, %%esp\n\t" \
"pushl 24(%%eax)\n\t" \
"pushl 20(%%eax)\n\t" \
"pushl 16(%%eax)\n\t" \
@@ -776,7 +901,7 @@
"pushl 4(%%eax)\n\t" \
"movl (%%eax), %%eax\n\t" /* target->%eax */ \
VALGRIND_CALL_NOREDIR_EAX \
- "addl $24, %%esp\n" \
+ "addl $32, %%esp\n" \
: /*out*/ "=a" (_res) \
: /*in*/ "a" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
@@ -799,6 +924,7 @@
_argvec[6] = (unsigned long)(arg6); \
_argvec[7] = (unsigned long)(arg7); \
__asm__ volatile( \
+ "subl $4, %%esp\n\t" \
"pushl 28(%%eax)\n\t" \
"pushl 24(%%eax)\n\t" \
"pushl 20(%%eax)\n\t" \
@@ -808,7 +934,7 @@
"pushl 4(%%eax)\n\t" \
"movl (%%eax), %%eax\n\t" /* target->%eax */ \
VALGRIND_CALL_NOREDIR_EAX \
- "addl $28, %%esp\n" \
+ "addl $32, %%esp\n" \
: /*out*/ "=a" (_res) \
: /*in*/ "a" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
@@ -867,6 +993,7 @@
_argvec[8] = (unsigned long)(arg8); \
_argvec[9] = (unsigned long)(arg9); \
__asm__ volatile( \
+ "subl $12, %%esp\n\t" \
"pushl 36(%%eax)\n\t" \
"pushl 32(%%eax)\n\t" \
"pushl 28(%%eax)\n\t" \
@@ -878,7 +1005,7 @@
"pushl 4(%%eax)\n\t" \
"movl (%%eax), %%eax\n\t" /* target->%eax */ \
VALGRIND_CALL_NOREDIR_EAX \
- "addl $36, %%esp\n" \
+ "addl $48, %%esp\n" \
: /*out*/ "=a" (_res) \
: /*in*/ "a" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
@@ -904,6 +1031,7 @@
_argvec[9] = (unsigned long)(arg9); \
_argvec[10] = (unsigned long)(arg10); \
__asm__ volatile( \
+ "subl $8, %%esp\n\t" \
"pushl 40(%%eax)\n\t" \
"pushl 36(%%eax)\n\t" \
"pushl 32(%%eax)\n\t" \
@@ -916,7 +1044,7 @@
"pushl 4(%%eax)\n\t" \
"movl (%%eax), %%eax\n\t" /* target->%eax */ \
VALGRIND_CALL_NOREDIR_EAX \
- "addl $40, %%esp\n" \
+ "addl $48, %%esp\n" \
: /*out*/ "=a" (_res) \
: /*in*/ "a" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
@@ -944,6 +1072,7 @@
_argvec[10] = (unsigned long)(arg10); \
_argvec[11] = (unsigned long)(arg11); \
__asm__ volatile( \
+ "subl $4, %%esp\n\t" \
"pushl 44(%%eax)\n\t" \
"pushl 40(%%eax)\n\t" \
"pushl 36(%%eax)\n\t" \
@@ -957,7 +1086,7 @@
"pushl 4(%%eax)\n\t" \
"movl (%%eax), %%eax\n\t" /* target->%eax */ \
VALGRIND_CALL_NOREDIR_EAX \
- "addl $44, %%esp\n" \
+ "addl $48, %%esp\n" \
: /*out*/ "=a" (_res) \
: /*in*/ "a" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
@@ -1008,11 +1137,11 @@
lval = (__typeof__(lval)) _res; \
} while (0)
-#endif /* PLAT_x86_linux */
+#endif /* PLAT_x86_linux || PLAT_x86_darwin */
-/* ------------------------ amd64-linux ------------------------ */
+/* ------------------------ amd64-{linux,darwin} --------------- */
-#if defined(PLAT_amd64_linux)
+#if defined(PLAT_amd64_linux) || defined(PLAT_amd64_darwin)
/* ARGREGS: rdi rsi rdx rcx r8 r9 (the rest on stack in R-to-L order) */
@@ -1020,6 +1149,78 @@
#define __CALLER_SAVED_REGS /*"rax",*/ "rcx", "rdx", "rsi", \
"rdi", "r8", "r9", "r10", "r11"
+/* This is all pretty complex. It's so as to make stack unwinding
+ work reliably. See bug 243270. The basic problem is the sub and
+ add of 128 of %rsp in all of the following macros. If gcc believes
+ the CFA is in %rsp, then unwinding may fail, because what's at the
+ CFA is not what gcc "expected" when it constructs the CFIs for the
+ places where the macros are instantiated.
+
+ But we can't just add a CFI annotation to increase the CFA offset
+ by 128, to match the sub of 128 from %rsp, because we don't know
+ whether gcc has chosen %rsp as the CFA at that point, or whether it
+ has chosen some other register (eg, %rbp). In the latter case,
+ adding a CFI annotation to change the CFA offset is simply wrong.
+
+ So the solution is to get hold of the CFA using
+ __builtin_dwarf_cfa(), put it in a known register, and add a
+ CFI annotation to say what the register is. We choose %rbp for
+ this (perhaps perversely), because:
+
+ (1) %rbp is already subject to unwinding. If a new register was
+ chosen then the unwinder would have to unwind it in all stack
+ traces, which is expensive, and
+
+ (2) %rbp is already subject to precise exception updates in the
+ JIT. If a new register was chosen, we'd have to have precise
+ exceptions for it too, which reduces performance of the
+ generated code.
+
+ However .. one extra complication. We can't just whack the result
+ of __builtin_dwarf_cfa() into %rbp and then add %rbp to the
+ list of trashed registers at the end of the inline assembly
+ fragments; gcc won't allow %rbp to appear in that list. Hence
+ instead we need to stash %rbp in %r15 for the duration of the asm,
+ and say that %r15 is trashed instead. gcc seems happy to go with
+ that.
+
+ Oh .. and this all needs to be conditionalised so that it is
+ unchanged from before this commit, when compiled with older gccs
+ that don't support __builtin_dwarf_cfa. Furthermore, since
+ this header file is freestanding, it has to be independent of
+ config.h, and so the following conditionalisation cannot depend on
+ configure time checks.
+
+ Although it's not clear from
+ 'defined(__GNUC__) && defined(__GCC_HAVE_DWARF2_CFI_ASM)',
+ this expression excludes Darwin.
+ .cfi directives in Darwin assembly appear to be completely
+ different and I haven't investigated how they work.
+
+ For even more entertainment value, note we have to use the
+ completely undocumented __builtin_dwarf_cfa(), which appears to
+ really compute the CFA, whereas __builtin_frame_address(0) claims
+ to but actually doesn't. See
+ https://bugs.kde.org/show_bug.cgi?id=243270#c47
+*/
+#if defined(__GNUC__) && defined(__GCC_HAVE_DWARF2_CFI_ASM)
+# define __FRAME_POINTER \
+ ,"r"(__builtin_dwarf_cfa())
+# define VALGRIND_CFI_PROLOGUE \
+ "movq %%rbp, %%r15\n\t" \
+ "movq %2, %%rbp\n\t" \
+ ".cfi_remember_state\n\t" \
+ ".cfi_def_cfa rbp, 0\n\t"
+# define VALGRIND_CFI_EPILOGUE \
+ "movq %%r15, %%rbp\n\t" \
+ ".cfi_restore_state\n\t"
+#else
+# define __FRAME_POINTER
+# define VALGRIND_CFI_PROLOGUE
+# define VALGRIND_CFI_EPILOGUE
+#endif
+
+
/* These CALL_FN_ macros assume that on amd64-linux, sizeof(unsigned
long) == 8. */
@@ -1039,7 +1240,7 @@
redzone, for the duration of the hidden call, to make it safe.
Probably the same problem afflicts the other redzone-style ABIs too
- (ppc64-linux, ppc32-aix5, ppc64-aix5); but for those, the stack is
+ (ppc64-linux); but for those, the stack is
self describing (none of this CFI nonsense) so at least messing
with the stack pointer doesn't give a danger of non-unwindable
stack. */
@@ -1051,13 +1252,15 @@
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
__asm__ volatile( \
+ VALGRIND_CFI_PROLOGUE \
"subq $128,%%rsp\n\t" \
"movq (%%rax), %%rax\n\t" /* target->%rax */ \
VALGRIND_CALL_NOREDIR_RAX \
"addq $128,%%rsp\n\t" \
+ VALGRIND_CFI_EPILOGUE \
: /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
@@ -1070,14 +1273,16 @@
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
__asm__ volatile( \
+ VALGRIND_CFI_PROLOGUE \
"subq $128,%%rsp\n\t" \
"movq 8(%%rax), %%rdi\n\t" \
"movq (%%rax), %%rax\n\t" /* target->%rax */ \
VALGRIND_CALL_NOREDIR_RAX \
"addq $128,%%rsp\n\t" \
+ VALGRIND_CFI_EPILOGUE \
: /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
@@ -1091,15 +1296,17 @@
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
__asm__ volatile( \
+ VALGRIND_CFI_PROLOGUE \
"subq $128,%%rsp\n\t" \
"movq 16(%%rax), %%rsi\n\t" \
"movq 8(%%rax), %%rdi\n\t" \
"movq (%%rax), %%rax\n\t" /* target->%rax */ \
VALGRIND_CALL_NOREDIR_RAX \
"addq $128,%%rsp\n\t" \
+ VALGRIND_CFI_EPILOGUE \
: /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
@@ -1114,6 +1321,7 @@
_argvec[2] = (unsigned long)(arg2); \
_argvec[3] = (unsigned long)(arg3); \
__asm__ volatile( \
+ VALGRIND_CFI_PROLOGUE \
"subq $128,%%rsp\n\t" \
"movq 24(%%rax), %%rdx\n\t" \
"movq 16(%%rax), %%rsi\n\t" \
@@ -1121,9 +1329,10 @@
"movq (%%rax), %%rax\n\t" /* target->%rax */ \
VALGRIND_CALL_NOREDIR_RAX \
"addq $128,%%rsp\n\t" \
+ VALGRIND_CFI_EPILOGUE \
: /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
@@ -1139,6 +1348,7 @@
_argvec[3] = (unsigned long)(arg3); \
_argvec[4] = (unsigned long)(arg4); \
__asm__ volatile( \
+ VALGRIND_CFI_PROLOGUE \
"subq $128,%%rsp\n\t" \
"movq 32(%%rax), %%rcx\n\t" \
"movq 24(%%rax), %%rdx\n\t" \
@@ -1147,9 +1357,10 @@
"movq (%%rax), %%rax\n\t" /* target->%rax */ \
VALGRIND_CALL_NOREDIR_RAX \
"addq $128,%%rsp\n\t" \
+ VALGRIND_CFI_EPILOGUE \
: /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
@@ -1166,6 +1377,7 @@
_argvec[4] = (unsigned long)(arg4); \
_argvec[5] = (unsigned long)(arg5); \
__asm__ volatile( \
+ VALGRIND_CFI_PROLOGUE \
"subq $128,%%rsp\n\t" \
"movq 40(%%rax), %%r8\n\t" \
"movq 32(%%rax), %%rcx\n\t" \
@@ -1175,9 +1387,10 @@
"movq (%%rax), %%rax\n\t" /* target->%rax */ \
VALGRIND_CALL_NOREDIR_RAX \
"addq $128,%%rsp\n\t" \
+ VALGRIND_CFI_EPILOGUE \
: /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
@@ -1195,6 +1408,7 @@
_argvec[5] = (unsigned long)(arg5); \
_argvec[6] = (unsigned long)(arg6); \
__asm__ volatile( \
+ VALGRIND_CFI_PROLOGUE \
"subq $128,%%rsp\n\t" \
"movq 48(%%rax), %%r9\n\t" \
"movq 40(%%rax), %%r8\n\t" \
@@ -1203,11 +1417,12 @@
"movq 16(%%rax), %%rsi\n\t" \
"movq 8(%%rax), %%rdi\n\t" \
"movq (%%rax), %%rax\n\t" /* target->%rax */ \
- "addq $128,%%rsp\n\t" \
VALGRIND_CALL_NOREDIR_RAX \
+ "addq $128,%%rsp\n\t" \
+ VALGRIND_CFI_EPILOGUE \
: /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
@@ -1227,7 +1442,8 @@
_argvec[6] = (unsigned long)(arg6); \
_argvec[7] = (unsigned long)(arg7); \
__asm__ volatile( \
- "subq $128,%%rsp\n\t" \
+ VALGRIND_CFI_PROLOGUE \
+ "subq $136,%%rsp\n\t" \
"pushq 56(%%rax)\n\t" \
"movq 48(%%rax), %%r9\n\t" \
"movq 40(%%rax), %%r8\n\t" \
@@ -1238,10 +1454,11 @@
"movq (%%rax), %%rax\n\t" /* target->%rax */ \
VALGRIND_CALL_NOREDIR_RAX \
"addq $8, %%rsp\n" \
- "addq $128,%%rsp\n\t" \
+ "addq $136,%%rsp\n\t" \
+ VALGRIND_CFI_EPILOGUE \
: /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
@@ -1262,6 +1479,7 @@
_argvec[7] = (unsigned long)(arg7); \
_argvec[8] = (unsigned long)(arg8); \
__asm__ volatile( \
+ VALGRIND_CFI_PROLOGUE \
"subq $128,%%rsp\n\t" \
"pushq 64(%%rax)\n\t" \
"pushq 56(%%rax)\n\t" \
@@ -1275,9 +1493,10 @@
VALGRIND_CALL_NOREDIR_RAX \
"addq $16, %%rsp\n" \
"addq $128,%%rsp\n\t" \
+ VALGRIND_CFI_EPILOGUE \
: /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
@@ -1299,7 +1518,8 @@
_argvec[8] = (unsigned long)(arg8); \
_argvec[9] = (unsigned long)(arg9); \
__asm__ volatile( \
- "subq $128,%%rsp\n\t" \
+ VALGRIND_CFI_PROLOGUE \
+ "subq $136,%%rsp\n\t" \
"pushq 72(%%rax)\n\t" \
"pushq 64(%%rax)\n\t" \
"pushq 56(%%rax)\n\t" \
@@ -1312,10 +1532,11 @@
"movq (%%rax), %%rax\n\t" /* target->%rax */ \
VALGRIND_CALL_NOREDIR_RAX \
"addq $24, %%rsp\n" \
- "addq $128,%%rsp\n\t" \
+ "addq $136,%%rsp\n\t" \
+ VALGRIND_CFI_EPILOGUE \
: /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
@@ -1338,6 +1559,7 @@
_argvec[9] = (unsigned long)(arg9); \
_argvec[10] = (unsigned long)(arg10); \
__asm__ volatile( \
+ VALGRIND_CFI_PROLOGUE \
"subq $128,%%rsp\n\t" \
"pushq 80(%%rax)\n\t" \
"pushq 72(%%rax)\n\t" \
@@ -1353,9 +1575,10 @@
VALGRIND_CALL_NOREDIR_RAX \
"addq $32, %%rsp\n" \
"addq $128,%%rsp\n\t" \
+ VALGRIND_CFI_EPILOGUE \
: /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
@@ -1379,7 +1602,8 @@
_argvec[10] = (unsigned long)(arg10); \
_argvec[11] = (unsigned long)(arg11); \
__asm__ volatile( \
- "subq $128,%%rsp\n\t" \
+ VALGRIND_CFI_PROLOGUE \
+ "subq $136,%%rsp\n\t" \
"pushq 88(%%rax)\n\t" \
"pushq 80(%%rax)\n\t" \
"pushq 72(%%rax)\n\t" \
@@ -1394,10 +1618,11 @@
"movq (%%rax), %%rax\n\t" /* target->%rax */ \
VALGRIND_CALL_NOREDIR_RAX \
"addq $40, %%rsp\n" \
- "addq $128,%%rsp\n\t" \
+ "addq $136,%%rsp\n\t" \
+ VALGRIND_CFI_EPILOGUE \
: /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
@@ -1422,6 +1647,7 @@
_argvec[11] = (unsigned long)(arg11); \
_argvec[12] = (unsigned long)(arg12); \
__asm__ volatile( \
+ VALGRIND_CFI_PROLOGUE \
"subq $128,%%rsp\n\t" \
"pushq 96(%%rax)\n\t" \
"pushq 88(%%rax)\n\t" \
@@ -1439,14 +1665,15 @@
VALGRIND_CALL_NOREDIR_RAX \
"addq $48, %%rsp\n" \
"addq $128,%%rsp\n\t" \
+ VALGRIND_CFI_EPILOGUE \
: /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
-#endif /* PLAT_amd64_linux */
+#endif /* PLAT_amd64_linux || PLAT_amd64_darwin */
/* ------------------------ ppc32-linux ------------------------ */
@@ -2439,54 +2666,28 @@
#endif /* PLAT_ppc64_linux */
-/* ------------------------ ppc32-aix5 ------------------------- */
+/* ------------------------- arm-linux ------------------------- */
-#if defined(PLAT_ppc32_aix5)
-
-/* ARGREGS: r3 r4 r5 r6 r7 r8 r9 r10 (the rest on stack somewhere) */
+#if defined(PLAT_arm_linux)
/* These regs are trashed by the hidden call. */
-#define __CALLER_SAVED_REGS \
- "lr", "ctr", "xer", \
- "cr0", "cr1", "cr2", "cr3", "cr4", "cr5", "cr6", "cr7", \
- "r0", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", \
- "r11", "r12", "r13"
+#define __CALLER_SAVED_REGS "r0", "r1", "r2", "r3","r4","r14"
-/* Expand the stack frame, copying enough info that unwinding
- still works. Trashes r3. */
-
-#define VG_EXPAND_FRAME_BY_trashes_r3(_n_fr) \
- "addi 1,1,-" #_n_fr "\n\t" \
- "lwz 3," #_n_fr "(1)\n\t" \
- "stw 3,0(1)\n\t"
-
-#define VG_CONTRACT_FRAME_BY(_n_fr) \
- "addi 1,1," #_n_fr "\n\t"
-
-/* These CALL_FN_ macros assume that on ppc32-aix5, sizeof(unsigned
+/* These CALL_FN_ macros assume that on arm-linux, sizeof(unsigned
long) == 4. */
#define CALL_FN_W_v(lval, orig) \
do { \
volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+0]; \
+ volatile unsigned long _argvec[1]; \
volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
__asm__ volatile( \
- "mr 11,%1\n\t" \
- VG_EXPAND_FRAME_BY_trashes_r3(512) \
- "stw 2,-8(11)\n\t" /* save tocptr */ \
- "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
- "lwz 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "lwz 2,-8(11)\n\t" /* restore tocptr */ \
- VG_CONTRACT_FRAME_BY(512) \
+ "ldr r4, [%1] \n\t" /* target->r4 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
+ "mov %0, r0\n" \
: /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
+ : /*in*/ "0" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
); \
lval = (__typeof__(lval)) _res; \
@@ -2495,27 +2696,18 @@
#define CALL_FN_W_W(lval, orig, arg1) \
do { \
volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+1]; \
+ volatile unsigned long _argvec[2]; \
volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
__asm__ volatile( \
- "mr 11,%1\n\t" \
- VG_EXPAND_FRAME_BY_trashes_r3(512) \
- "stw 2,-8(11)\n\t" /* save tocptr */ \
- "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
- "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
- "lwz 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "lwz 2,-8(11)\n\t" /* restore tocptr */ \
- VG_CONTRACT_FRAME_BY(512) \
+ "ldr r0, [%1, #4] \n\t" \
+ "ldr r4, [%1] \n\t" /* target->r4 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
+ "mov %0, r0\n" \
: /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*in*/ "0" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
@@ -2523,28 +2715,19 @@
#define CALL_FN_W_WW(lval, orig, arg1,arg2) \
do { \
volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+2]; \
+ volatile unsigned long _argvec[3]; \
volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
__asm__ volatile( \
- "mr 11,%1\n\t" \
- VG_EXPAND_FRAME_BY_trashes_r3(512) \
- "stw 2,-8(11)\n\t" /* save tocptr */ \
- "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
- "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
- "lwz 4, 8(11)\n\t" /* arg2->r4 */ \
- "lwz 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "lwz 2,-8(11)\n\t" /* restore tocptr */ \
- VG_CONTRACT_FRAME_BY(512) \
+ "ldr r0, [%1, #4] \n\t" \
+ "ldr r1, [%1, #8] \n\t" \
+ "ldr r4, [%1] \n\t" /* target->r4 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
+ "mov %0, r0\n" \
: /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
+ : /*in*/ "0" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
); \
lval = (__typeof__(lval)) _res; \
@@ -2553,30 +2736,21 @@
#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \
do { \
volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+3]; \
+ volatile unsigned long _argvec[4]; \
volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
__asm__ volatile( \
- "mr 11,%1\n\t" \
- VG_EXPAND_FRAME_BY_trashes_r3(512) \
- "stw 2,-8(11)\n\t" /* save tocptr */ \
- "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
- "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
- "lwz 4, 8(11)\n\t" /* arg2->r4 */ \
- "lwz 5, 12(11)\n\t" /* arg3->r5 */ \
- "lwz 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "lwz 2,-8(11)\n\t" /* restore tocptr */ \
- VG_CONTRACT_FRAME_BY(512) \
+ "ldr r0, [%1, #4] \n\t" \
+ "ldr r1, [%1, #8] \n\t" \
+ "ldr r2, [%1, #12] \n\t" \
+ "ldr r4, [%1] \n\t" /* target->r4 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
+ "mov %0, r0\n" \
: /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
+ : /*in*/ "0" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
); \
lval = (__typeof__(lval)) _res; \
@@ -2585,32 +2759,23 @@
#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \
do { \
volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+4]; \
+ volatile unsigned long _argvec[5]; \
volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
- _argvec[2+4] = (unsigned long)arg4; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
__asm__ volatile( \
- "mr 11,%1\n\t" \
- VG_EXPAND_FRAME_BY_trashes_r3(512) \
- "stw 2,-8(11)\n\t" /* save tocptr */ \
- "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
- "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
- "lwz 4, 8(11)\n\t" /* arg2->r4 */ \
- "lwz 5, 12(11)\n\t" /* arg3->r5 */ \
- "lwz 6, 16(11)\n\t" /* arg4->r6 */ \
- "lwz 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "lwz 2,-8(11)\n\t" /* restore tocptr */ \
- VG_CONTRACT_FRAME_BY(512) \
+ "ldr r0, [%1, #4] \n\t" \
+ "ldr r1, [%1, #8] \n\t" \
+ "ldr r2, [%1, #12] \n\t" \
+ "ldr r3, [%1, #16] \n\t" \
+ "ldr r4, [%1] \n\t" /* target->r4 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
+ "mov %0, r0" \
: /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
+ : /*in*/ "0" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
); \
lval = (__typeof__(lval)) _res; \
@@ -2619,34 +2784,27 @@
#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \
do { \
volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+5]; \
+ volatile unsigned long _argvec[6]; \
volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
- _argvec[2+4] = (unsigned long)arg4; \
- _argvec[2+5] = (unsigned long)arg5; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
__asm__ volatile( \
- "mr 11,%1\n\t" \
- VG_EXPAND_FRAME_BY_trashes_r3(512) \
- "stw 2,-8(11)\n\t" /* save tocptr */ \
- "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
- "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
- "lwz 4, 8(11)\n\t" /* arg2->r4 */ \
- "lwz 5, 12(11)\n\t" /* arg3->r5 */ \
- "lwz 6, 16(11)\n\t" /* arg4->r6 */ \
- "lwz 7, 20(11)\n\t" /* arg5->r7 */ \
- "lwz 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "lwz 2,-8(11)\n\t" /* restore tocptr */ \
- VG_CONTRACT_FRAME_BY(512) \
+ "ldr r0, [%1, #20] \n\t" \
+ "push {r0} \n\t" \
+ "ldr r0, [%1, #4] \n\t" \
+ "ldr r1, [%1, #8] \n\t" \
+ "ldr r2, [%1, #12] \n\t" \
+ "ldr r3, [%1, #16] \n\t" \
+ "ldr r4, [%1] \n\t" /* target->r4 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
+ "add sp, sp, #4 \n\t" \
+ "mov %0, r0" \
: /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
+ : /*in*/ "0" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
); \
lval = (__typeof__(lval)) _res; \
@@ -2655,36 +2813,29 @@
#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \
do { \
volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+6]; \
+ volatile unsigned long _argvec[7]; \
volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
- _argvec[2+4] = (unsigned long)arg4; \
- _argvec[2+5] = (unsigned long)arg5; \
- _argvec[2+6] = (unsigned long)arg6; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
__asm__ volatile( \
- "mr 11,%1\n\t" \
- VG_EXPAND_FRAME_BY_trashes_r3(512) \
- "stw 2,-8(11)\n\t" /* save tocptr */ \
- "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
- "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
- "lwz 4, 8(11)\n\t" /* arg2->r4 */ \
- "lwz 5, 12(11)\n\t" /* arg3->r5 */ \
- "lwz 6, 16(11)\n\t" /* arg4->r6 */ \
- "lwz 7, 20(11)\n\t" /* arg5->r7 */ \
- "lwz 8, 24(11)\n\t" /* arg6->r8 */ \
- "lwz 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "lwz 2,-8(11)\n\t" /* restore tocptr */ \
- VG_CONTRACT_FRAME_BY(512) \
+ "ldr r0, [%1, #20] \n\t" \
+ "ldr r1, [%1, #24] \n\t" \
+ "push {r0, r1} \n\t" \
+ "ldr r0, [%1, #4] \n\t" \
+ "ldr r1, [%1, #8] \n\t" \
+ "ldr r2, [%1, #12] \n\t" \
+ "ldr r3, [%1, #16] \n\t" \
+ "ldr r4, [%1] \n\t" /* target->r4 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
+ "add sp, sp, #8 \n\t" \
+ "mov %0, r0" \
: /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
+ : /*in*/ "0" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
); \
lval = (__typeof__(lval)) _res; \
@@ -2694,38 +2845,31 @@
arg7) \
do { \
volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+7]; \
+ volatile unsigned long _argvec[8]; \
volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
- _argvec[2+4] = (unsigned long)arg4; \
- _argvec[2+5] = (unsigned long)arg5; \
- _argvec[2+6] = (unsigned long)arg6; \
- _argvec[2+7] = (unsigned long)arg7; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
__asm__ volatile( \
- "mr 11,%1\n\t" \
- VG_EXPAND_FRAME_BY_trashes_r3(512) \
- "stw 2,-8(11)\n\t" /* save tocptr */ \
- "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
- "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
- "lwz 4, 8(11)\n\t" /* arg2->r4 */ \
- "lwz 5, 12(11)\n\t" /* arg3->r5 */ \
- "lwz 6, 16(11)\n\t" /* arg4->r6 */ \
- "lwz 7, 20(11)\n\t" /* arg5->r7 */ \
- "lwz 8, 24(11)\n\t" /* arg6->r8 */ \
- "lwz 9, 28(11)\n\t" /* arg7->r9 */ \
- "lwz 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "lwz 2,-8(11)\n\t" /* restore tocptr */ \
- VG_CONTRACT_FRAME_BY(512) \
+ "ldr r0, [%1, #20] \n\t" \
+ "ldr r1, [%1, #24] \n\t" \
+ "ldr r2, [%1, #28] \n\t" \
+ "push {r0, r1, r2} \n\t" \
+ "ldr r0, [%1, #4] \n\t" \
+ "ldr r1, [%1, #8] \n\t" \
+ "ldr r2, [%1, #12] \n\t" \
+ "ldr r3, [%1, #16] \n\t" \
+ "ldr r4, [%1] \n\t" /* target->r4 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
+ "add sp, sp, #12 \n\t" \
+ "mov %0, r0" \
: /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
+ : /*in*/ "0" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
); \
lval = (__typeof__(lval)) _res; \
@@ -2735,40 +2879,33 @@
arg7,arg8) \
do { \
volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+8]; \
+ volatile unsigned long _argvec[9]; \
volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
- _argvec[2+4] = (unsigned long)arg4; \
- _argvec[2+5] = (unsigned long)arg5; \
- _argvec[2+6] = (unsigned long)arg6; \
- _argvec[2+7] = (unsigned long)arg7; \
- _argvec[2+8] = (unsigned long)arg8; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
__asm__ volatile( \
- "mr 11,%1\n\t" \
- VG_EXPAND_FRAME_BY_trashes_r3(512) \
- "stw 2,-8(11)\n\t" /* save tocptr */ \
- "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
- "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
- "lwz 4, 8(11)\n\t" /* arg2->r4 */ \
- "lwz 5, 12(11)\n\t" /* arg3->r5 */ \
- "lwz 6, 16(11)\n\t" /* arg4->r6 */ \
- "lwz 7, 20(11)\n\t" /* arg5->r7 */ \
- "lwz 8, 24(11)\n\t" /* arg6->r8 */ \
- "lwz 9, 28(11)\n\t" /* arg7->r9 */ \
- "lwz 10, 32(11)\n\t" /* arg8->r10 */ \
- "lwz 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "lwz 2,-8(11)\n\t" /* restore tocptr */ \
- VG_CONTRACT_FRAME_BY(512) \
+ "ldr r0, [%1, #20] \n\t" \
+ "ldr r1, [%1, #24] \n\t" \
+ "ldr r2, [%1, #28] \n\t" \
+ "ldr r3, [%1, #32] \n\t" \
+ "push {r0, r1, r2, r3} \n\t" \
+ "ldr r0, [%1, #4] \n\t" \
+ "ldr r1, [%1, #8] \n\t" \
+ "ldr r2, [%1, #12] \n\t" \
+ "ldr r3, [%1, #16] \n\t" \
+ "ldr r4, [%1] \n\t" /* target->r4 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
+ "add sp, sp, #16 \n\t" \
+ "mov %0, r0" \
: /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
+ : /*in*/ "0" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
); \
lval = (__typeof__(lval)) _res; \
@@ -2778,47 +2915,35 @@
arg7,arg8,arg9) \
do { \
volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+9]; \
+ volatile unsigned long _argvec[10]; \
volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
- _argvec[2+4] = (unsigned long)arg4; \
- _argvec[2+5] = (unsigned long)arg5; \
- _argvec[2+6] = (unsigned long)arg6; \
- _argvec[2+7] = (unsigned long)arg7; \
- _argvec[2+8] = (unsigned long)arg8; \
- _argvec[2+9] = (unsigned long)arg9; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ _argvec[9] = (unsigned long)(arg9); \
__asm__ volatile( \
- "mr 11,%1\n\t" \
- VG_EXPAND_FRAME_BY_trashes_r3(512) \
- "stw 2,-8(11)\n\t" /* save tocptr */ \
- "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
- VG_EXPAND_FRAME_BY_trashes_r3(64) \
- /* arg9 */ \
- "lwz 3,36(11)\n\t" \
- "stw 3,56(1)\n\t" \
- /* args1-8 */ \
- "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
- "lwz 4, 8(11)\n\t" /* arg2->r4 */ \
- "lwz 5, 12(11)\n\t" /* arg3->r5 */ \
- "lwz 6, 16(11)\n\t" /* arg4->r6 */ \
- "lwz 7, 20(11)\n\t" /* arg5->r7 */ \
- "lwz 8, 24(11)\n\t" /* arg6->r8 */ \
- "lwz 9, 28(11)\n\t" /* arg7->r9 */ \
- "lwz 10, 32(11)\n\t" /* arg8->r10 */ \
- "lwz 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "lwz 2,-8(11)\n\t" /* restore tocptr */ \
- VG_CONTRACT_FRAME_BY(64) \
- VG_CONTRACT_FRAME_BY(512) \
+ "ldr r0, [%1, #20] \n\t" \
+ "ldr r1, [%1, #24] \n\t" \
+ "ldr r2, [%1, #28] \n\t" \
+ "ldr r3, [%1, #32] \n\t" \
+ "ldr r4, [%1, #36] \n\t" \
+ "push {r0, r1, r2, r3, r4} \n\t" \
+ "ldr r0, [%1, #4] \n\t" \
+ "ldr r1, [%1, #8] \n\t" \
+ "ldr r2, [%1, #12] \n\t" \
+ "ldr r3, [%1, #16] \n\t" \
+ "ldr r4, [%1] \n\t" /* target->r4 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
+ "add sp, sp, #20 \n\t" \
+ "mov %0, r0" \
: /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
+ : /*in*/ "0" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
); \
lval = (__typeof__(lval)) _res; \
@@ -2828,738 +2953,612 @@
arg7,arg8,arg9,arg10) \
do { \
volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+10]; \
+ volatile unsigned long _argvec[11]; \
volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
- _argvec[2+4] = (unsigned long)arg4; \
- _argvec[2+5] = (unsigned long)arg5; \
- _argvec[2+6] = (unsigned long)arg6; \
- _argvec[2+7] = (unsigned long)arg7; \
- _argvec[2+8] = (unsigned long)arg8; \
- _argvec[2+9] = (unsigned long)arg9; \
- _argvec[2+10] = (unsigned long)arg10; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ _argvec[9] = (unsigned long)(arg9); \
+ _argvec[10] = (unsigned long)(arg10); \
__asm__ volatile( \
- "mr 11,%1\n\t" \
- VG_EXPAND_FRAME_BY_trashes_r3(512) \
- "stw 2,-8(11)\n\t" /* save tocptr */ \
- "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
- VG_EXPAND_FRAME_BY_trashes_r3(64) \
- /* arg10 */ \
- "lwz 3,40(11)\n\t" \
- "stw 3,60(1)\n\t" \
- /* arg9 */ \
- "lwz 3,36(11)\n\t" \
- "stw 3,56(1)\n\t" \
- /* args1-8 */ \
- "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
- "lwz 4, 8(11)\n\t" /* arg2->r4 */ \
- "lwz 5, 12(11)\n\t" /* arg3->r5 */ \
- "lwz 6, 16(11)\n\t" /* arg4->r6 */ \
- "lwz 7, 20(11)\n\t" /* arg5->r7 */ \
- "lwz 8, 24(11)\n\t" /* arg6->r8 */ \
- "lwz 9, 28(11)\n\t" /* arg7->r9 */ \
- "lwz 10, 32(11)\n\t" /* arg8->r10 */ \
- "lwz 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "lwz 2,-8(11)\n\t" /* restore tocptr */ \
- VG_CONTRACT_FRAME_BY(64) \
- VG_CONTRACT_FRAME_BY(512) \
+ "ldr r0, [%1, #40] \n\t" \
+ "push {r0} \n\t" \
+ "ldr r0, [%1, #20] \n\t" \
+ "ldr r1, [%1, #24] \n\t" \
+ "ldr r2, [%1, #28] \n\t" \
+ "ldr r3, [%1, #32] \n\t" \
+ "ldr r4, [%1, #36] \n\t" \
+ "push {r0, r1, r2, r3, r4} \n\t" \
+ "ldr r0, [%1, #4] \n\t" \
+ "ldr r1, [%1, #8] \n\t" \
+ "ldr r2, [%1, #12] \n\t" \
+ "ldr r3, [%1, #16] \n\t" \
+ "ldr r4, [%1] \n\t" /* target->r4 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
+ "add sp, sp, #24 \n\t" \
+ "mov %0, r0" \
: /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
+ : /*in*/ "0" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
-#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8,arg9,arg10,arg11) \
+#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5, \
+ arg6,arg7,arg8,arg9,arg10, \
+ arg11) \
do { \
volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+11]; \
+ volatile unsigned long _argvec[12]; \
volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
- _argvec[2+4] = (unsigned long)arg4; \
- _argvec[2+5] = (unsigned long)arg5; \
- _argvec[2+6] = (unsigned long)arg6; \
- _argvec[2+7] = (unsigned long)arg7; \
- _argvec[2+8] = (unsigned long)arg8; \
- _argvec[2+9] = (unsigned long)arg9; \
- _argvec[2+10] = (unsigned long)arg10; \
- _argvec[2+11] = (unsigned long)arg11; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ _argvec[9] = (unsigned long)(arg9); \
+ _argvec[10] = (unsigned long)(arg10); \
+ _argvec[11] = (unsigned long)(arg11); \
__asm__ volatile( \
- "mr 11,%1\n\t" \
- VG_EXPAND_FRAME_BY_trashes_r3(512) \
- "stw 2,-8(11)\n\t" /* save tocptr */ \
- "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
- VG_EXPAND_FRAME_BY_trashes_r3(72) \
- /* arg11 */ \
- "lwz 3,44(11)\n\t" \
- "stw 3,64(1)\n\t" \
- /* arg10 */ \
- "lwz 3,40(11)\n\t" \
- "stw 3,60(1)\n\t" \
- /* arg9 */ \
- "lwz 3,36(11)\n\t" \
- "stw 3,56(1)\n\t" \
- /* args1-8 */ \
- "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
- "lwz 4, 8(11)\n\t" /* arg2->r4 */ \
- "lwz 5, 12(11)\n\t" /* arg3->r5 */ \
- "lwz 6, 16(11)\n\t" /* arg4->r6 */ \
- "lwz 7, 20(11)\n\t" /* arg5->r7 */ \
- "lwz 8, 24(11)\n\t" /* arg6->r8 */ \
- "lwz 9, 28(11)\n\t" /* arg7->r9 */ \
- "lwz 10, 32(11)\n\t" /* arg8->r10 */ \
- "lwz 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "lwz 2,-8(11)\n\t" /* restore tocptr */ \
- VG_CONTRACT_FRAME_BY(72) \
- VG_CONTRACT_FRAME_BY(512) \
+ "ldr r0, [%1, #40] \n\t" \
+ "ldr r1, [%1, #44] \n\t" \
+ "push {r0, r1} \n\t" \
+ "ldr r0, [%1, #20] \n\t" \
+ "ldr r1, [%1, #24] \n\t" \
+ "ldr r2, [%1, #28] \n\t" \
+ "ldr r3, [%1, #32] \n\t" \
+ "ldr r4, [%1, #36] \n\t" \
+ "push {r0, r1, r2, r3, r4} \n\t" \
+ "ldr r0, [%1, #4] \n\t" \
+ "ldr r1, [%1, #8] \n\t" \
+ "ldr r2, [%1, #12] \n\t" \
+ "ldr r3, [%1, #16] \n\t" \
+ "ldr r4, [%1] \n\t" /* target->r4 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
+ "add sp, sp, #28 \n\t" \
+ "mov %0, r0" \
: /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*in*/ "0" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory",__CALLER_SAVED_REGS \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
-#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8,arg9,arg10,arg11,arg12) \
+#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5, \
+ arg6,arg7,arg8,arg9,arg10, \
+ arg11,arg12) \
do { \
volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+12]; \
+ volatile unsigned long _argvec[13]; \
volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
- _argvec[2+4] = (unsigned long)arg4; \
- _argvec[2+5] = (unsigned long)arg5; \
- _argvec[2+6] = (unsigned long)arg6; \
- _argvec[2+7] = (unsigned long)arg7; \
- _argvec[2+8] = (unsigned long)arg8; \
- _argvec[2+9] = (unsigned long)arg9; \
- _argvec[2+10] = (unsigned long)arg10; \
- _argvec[2+11] = (unsigned long)arg11; \
- _argvec[2+12] = (unsigned long)arg12; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ _argvec[9] = (unsigned long)(arg9); \
+ _argvec[10] = (unsigned long)(arg10); \
+ _argvec[11] = (unsigned long)(arg11); \
+ _argvec[12] = (unsigned long)(arg12); \
__asm__ volatile( \
- "mr 11,%1\n\t" \
- VG_EXPAND_FRAME_BY_trashes_r3(512) \
- "stw 2,-8(11)\n\t" /* save tocptr */ \
- "lwz 2,-4(11)\n\t" /* use nraddr's tocptr */ \
- VG_EXPAND_FRAME_BY_trashes_r3(72) \
- /* arg12 */ \
- "lwz 3,48(11)\n\t" \
- "stw 3,68(1)\n\t" \
- /* arg11 */ \
- "lwz 3,44(11)\n\t" \
- "stw 3,64(1)\n\t" \
- /* arg10 */ \
- "lwz 3,40(11)\n\t" \
- "stw 3,60(1)\n\t" \
- /* arg9 */ \
- "lwz 3,36(11)\n\t" \
- "stw 3,56(1)\n\t" \
- /* args1-8 */ \
- "lwz 3, 4(11)\n\t" /* arg1->r3 */ \
- "lwz 4, 8(11)\n\t" /* arg2->r4 */ \
- "lwz 5, 12(11)\n\t" /* arg3->r5 */ \
- "lwz 6, 16(11)\n\t" /* arg4->r6 */ \
- "lwz 7, 20(11)\n\t" /* arg5->r7 */ \
- "lwz 8, 24(11)\n\t" /* arg6->r8 */ \
- "lwz 9, 28(11)\n\t" /* arg7->r9 */ \
- "lwz 10, 32(11)\n\t" /* arg8->r10 */ \
- "lwz 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "lwz 2,-8(11)\n\t" /* restore tocptr */ \
- VG_CONTRACT_FRAME_BY(72) \
- VG_CONTRACT_FRAME_BY(512) \
+ "ldr r0, [%1, #40] \n\t" \
+ "ldr r1, [%1, #44] \n\t" \
+ "ldr r2, [%1, #48] \n\t" \
+ "push {r0, r1, r2} \n\t" \
+ "ldr r0, [%1, #20] \n\t" \
+ "ldr r1, [%1, #24] \n\t" \
+ "ldr r2, [%1, #28] \n\t" \
+ "ldr r3, [%1, #32] \n\t" \
+ "ldr r4, [%1, #36] \n\t" \
+ "push {r0, r1, r2, r3, r4} \n\t" \
+ "ldr r0, [%1, #4] \n\t" \
+ "ldr r1, [%1, #8] \n\t" \
+ "ldr r2, [%1, #12] \n\t" \
+ "ldr r3, [%1, #16] \n\t" \
+ "ldr r4, [%1] \n\t" /* target->r4 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
+ "add sp, sp, #32 \n\t" \
+ "mov %0, r0" \
: /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
+ : /*in*/ "0" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
-#endif /* PLAT_ppc32_aix5 */
+#endif /* PLAT_arm_linux */
-/* ------------------------ ppc64-aix5 ------------------------- */
+/* ------------------------- s390x-linux ------------------------- */
-#if defined(PLAT_ppc64_aix5)
+#if defined(PLAT_s390x_linux)
-/* ARGREGS: r3 r4 r5 r6 r7 r8 r9 r10 (the rest on stack somewhere) */
+/* Similar workaround as amd64 (see above), but we use r11 as frame
+ pointer and save the old r11 in r7. r11 might be used for
+ argvec, therefore we copy argvec in r1 since r1 is clobbered
+ after the call anyway. */
+#if defined(__GNUC__) && defined(__GCC_HAVE_DWARF2_CFI_ASM)
+# define __FRAME_POINTER \
+ ,"d"(__builtin_dwarf_cfa())
+# define VALGRIND_CFI_PROLOGUE \
+ ".cfi_remember_state\n\t" \
+ "lgr 1,%1\n\t" /* copy the argvec pointer in r1 */ \
+ "lgr 7,11\n\t" \
+ "lgr 11,%2\n\t" \
+ ".cfi_def_cfa r11, 0\n\t"
+# define VALGRIND_CFI_EPILOGUE \
+ "lgr 11, 7\n\t" \
+ ".cfi_restore_state\n\t"
+#else
+# define __FRAME_POINTER
+# define VALGRIND_CFI_PROLOGUE \
+ "lgr 1,%1\n\t"
+# define VALGRIND_CFI_EPILOGUE
+#endif
-/* These regs are trashed by the hidden call. */
-#define __CALLER_SAVED_REGS \
- "lr", "ctr", "xer", \
- "cr0", "cr1", "cr2", "cr3", "cr4", "cr5", "cr6", "cr7", \
- "r0", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", \
- "r11", "r12", "r13"
-/* Expand the stack frame, copying enough info that unwinding
- still works. Trashes r3. */
-#define VG_EXPAND_FRAME_BY_trashes_r3(_n_fr) \
- "addi 1,1,-" #_n_fr "\n\t" \
- "ld 3," #_n_fr "(1)\n\t" \
- "std 3,0(1)\n\t"
-#define VG_CONTRACT_FRAME_BY(_n_fr) \
- "addi 1,1," #_n_fr "\n\t"
+/* These regs are trashed by the hidden call. Note that we overwrite
+ r14 in s390_irgen_noredir (VEX/priv/guest_s390_irgen.c) to give the
+ function a proper return address. All others are ABI defined call
+ clobbers. */
+#define __CALLER_SAVED_REGS "0","1","2","3","4","5","14", \
+ "f0","f1","f2","f3","f4","f5","f6","f7"
-/* These CALL_FN_ macros assume that on ppc64-aix5, sizeof(unsigned
- long) == 8. */
-#define CALL_FN_W_v(lval, orig) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+0]; \
- volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- __asm__ volatile( \
- "mr 11,%1\n\t" \
- VG_EXPAND_FRAME_BY_trashes_r3(512) \
- "std 2,-16(11)\n\t" /* save tocptr */ \
- "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
- "ld 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "ld 2,-16(11)\n\t" /* restore tocptr */ \
- VG_CONTRACT_FRAME_BY(512) \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
+#define CALL_FN_W_v(lval, orig) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[1]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ __asm__ volatile( \
+ VALGRIND_CFI_PROLOGUE \
+ "aghi 15,-160\n\t" \
+ "lg 1, 0(1)\n\t" /* target->r1 */ \
+ VALGRIND_CALL_NOREDIR_R1 \
+ "lgr %0, 2\n\t" \
+ "aghi 15,160\n\t" \
+ VALGRIND_CFI_EPILOGUE \
+ : /*out*/ "=d" (_res) \
+ : /*in*/ "d" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"7" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
} while (0)
-#define CALL_FN_W_W(lval, orig, arg1) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+1]; \
- volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- __asm__ volatile( \
- "mr 11,%1\n\t" \
- VG_EXPAND_FRAME_BY_trashes_r3(512) \
- "std 2,-16(11)\n\t" /* save tocptr */ \
- "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
- "ld 3, 8(11)\n\t" /* arg1->r3 */ \
- "ld 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "ld 2,-16(11)\n\t" /* restore tocptr */ \
- VG_CONTRACT_FRAME_BY(512) \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
+/* The call abi has the arguments in r2-r6 and stack */
+#define CALL_FN_W_W(lval, orig, arg1) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[2]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)arg1; \
+ __asm__ volatile( \
+ VALGRIND_CFI_PROLOGUE \
+ "aghi 15,-160\n\t" \
+ "lg 2, 8(1)\n\t" \
+ "lg 1, 0(1)\n\t" \
+ VALGRIND_CALL_NOREDIR_R1 \
+ "lgr %0, 2\n\t" \
+ "aghi 15,160\n\t" \
+ VALGRIND_CFI_EPILOGUE \
+ : /*out*/ "=d" (_res) \
+ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"7" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
} while (0)
-#define CALL_FN_W_WW(lval, orig, arg1,arg2) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+2]; \
- volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- __asm__ volatile( \
- "mr 11,%1\n\t" \
- VG_EXPAND_FRAME_BY_trashes_r3(512) \
- "std 2,-16(11)\n\t" /* save tocptr */ \
- "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
- "ld 3, 8(11)\n\t" /* arg1->r3 */ \
- "ld 4, 16(11)\n\t" /* arg2->r4 */ \
- "ld 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "ld 2,-16(11)\n\t" /* restore tocptr */ \
- VG_CONTRACT_FRAME_BY(512) \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
+#define CALL_FN_W_WW(lval, orig, arg1, arg2) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)arg1; \
+ _argvec[2] = (unsigned long)arg2; \
+ __asm__ volatile( \
+ VALGRIND_CFI_PROLOGUE \
+ "aghi 15,-160\n\t" \
+ "lg 2, 8(1)\n\t" \
+ "lg 3,16(1)\n\t" \
+ "lg 1, 0(1)\n\t" \
+ VALGRIND_CALL_NOREDIR_R1 \
+ "lgr %0, 2\n\t" \
+ "aghi 15,160\n\t" \
+ VALGRIND_CFI_EPILOGUE \
+ : /*out*/ "=d" (_res) \
+ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"7" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
} while (0)
-#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+3]; \
- volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
- __asm__ volatile( \
- "mr 11,%1\n\t" \
- VG_EXPAND_FRAME_BY_trashes_r3(512) \
- "std 2,-16(11)\n\t" /* save tocptr */ \
- "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
- "ld 3, 8(11)\n\t" /* arg1->r3 */ \
- "ld 4, 16(11)\n\t" /* arg2->r4 */ \
- "ld 5, 24(11)\n\t" /* arg3->r5 */ \
- "ld 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "ld 2,-16(11)\n\t" /* restore tocptr */ \
- VG_CONTRACT_FRAME_BY(512) \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
+#define CALL_FN_W_WWW(lval, orig, arg1, arg2, arg3) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[4]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)arg1; \
+ _argvec[2] = (unsigned long)arg2; \
+ _argvec[3] = (unsigned long)arg3; \
+ __asm__ volatile( \
+ VALGRIND_CFI_PROLOGUE \
+ "aghi 15,-160\n\t" \
+ "lg 2, 8(1)\n\t" \
+ "lg 3,16(1)\n\t" \
+ "lg 4,24(1)\n\t" \
+ "lg 1, 0(1)\n\t" \
+ VALGRIND_CALL_NOREDIR_R1 \
+ "lgr %0, 2\n\t" \
+ "aghi 15,160\n\t" \
+ VALGRIND_CFI_EPILOGUE \
+ : /*out*/ "=d" (_res) \
+ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"7" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
} while (0)
-#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+4]; \
- volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
- _argvec[2+4] = (unsigned long)arg4; \
- __asm__ volatile( \
- "mr 11,%1\n\t" \
- VG_EXPAND_FRAME_BY_trashes_r3(512) \
- "std 2,-16(11)\n\t" /* save tocptr */ \
- "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
- "ld 3, 8(11)\n\t" /* arg1->r3 */ \
- "ld 4, 16(11)\n\t" /* arg2->r4 */ \
- "ld 5, 24(11)\n\t" /* arg3->r5 */ \
- "ld 6, 32(11)\n\t" /* arg4->r6 */ \
- "ld 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "ld 2,-16(11)\n\t" /* restore tocptr */ \
- VG_CONTRACT_FRAME_BY(512) \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
+#define CALL_FN_W_WWWW(lval, orig, arg1, arg2, arg3, arg4) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[5]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)arg1; \
+ _argvec[2] = (unsigned long)arg2; \
+ _argvec[3] = (unsigned long)arg3; \
+ _argvec[4] = (unsigned long)arg4; \
+ __asm__ volatile( \
+ VALGRIND_CFI_PROLOGUE \
+ "aghi 15,-160\n\t" \
+ "lg 2, 8(1)\n\t" \
+ "lg 3,16(1)\n\t" \
+ "lg 4,24(1)\n\t" \
+ "lg 5,32(1)\n\t" \
+ "lg 1, 0(1)\n\t" \
+ VALGRIND_CALL_NOREDIR_R1 \
+ "lgr %0, 2\n\t" \
+ "aghi 15,160\n\t" \
+ VALGRIND_CFI_EPILOGUE \
+ : /*out*/ "=d" (_res) \
+ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"7" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
} while (0)
-#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+5]; \
- volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
- _argvec[2+4] = (unsigned long)arg4; \
- _argvec[2+5] = (unsigned long)arg5; \
- __asm__ volatile( \
- "mr 11,%1\n\t" \
- VG_EXPAND_FRAME_BY_trashes_r3(512) \
- "std 2,-16(11)\n\t" /* save tocptr */ \
- "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
- "ld 3, 8(11)\n\t" /* arg1->r3 */ \
- "ld 4, 16(11)\n\t" /* arg2->r4 */ \
- "ld 5, 24(11)\n\t" /* arg3->r5 */ \
- "ld 6, 32(11)\n\t" /* arg4->r6 */ \
- "ld 7, 40(11)\n\t" /* arg5->r7 */ \
- "ld 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "ld 2,-16(11)\n\t" /* restore tocptr */ \
- VG_CONTRACT_FRAME_BY(512) \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
+#define CALL_FN_W_5W(lval, orig, arg1, arg2, arg3, arg4, arg5) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[6]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)arg1; \
+ _argvec[2] = (unsigned long)arg2; \
+ _argvec[3] = (unsigned long)arg3; \
+ _argvec[4] = (unsigned long)arg4; \
+ _argvec[5] = (unsigned long)arg5; \
+ __asm__ volatile( \
+ VALGRIND_CFI_PROLOGUE \
+ "aghi 15,-160\n\t" \
+ "lg 2, 8(1)\n\t" \
+ "lg 3,16(1)\n\t" \
+ "lg 4,24(1)\n\t" \
+ "lg 5,32(1)\n\t" \
+ "lg 6,40(1)\n\t" \
+ "lg 1, 0(1)\n\t" \
+ VALGRIND_CALL_NOREDIR_R1 \
+ "lgr %0, 2\n\t" \
+ "aghi 15,160\n\t" \
+ VALGRIND_CFI_EPILOGUE \
+ : /*out*/ "=d" (_res) \
+ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
} while (0)
-#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+6]; \
- volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
- _argvec[2+4] = (unsigned long)arg4; \
- _argvec[2+5] = (unsigned long)arg5; \
- _argvec[2+6] = (unsigned long)arg6; \
- __asm__ volatile( \
- "mr 11,%1\n\t" \
- VG_EXPAND_FRAME_BY_trashes_r3(512) \
- "std 2,-16(11)\n\t" /* save tocptr */ \
- "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
- "ld 3, 8(11)\n\t" /* arg1->r3 */ \
- "ld 4, 16(11)\n\t" /* arg2->r4 */ \
- "ld 5, 24(11)\n\t" /* arg3->r5 */ \
- "ld 6, 32(11)\n\t" /* arg4->r6 */ \
- "ld 7, 40(11)\n\t" /* arg5->r7 */ \
- "ld 8, 48(11)\n\t" /* arg6->r8 */ \
- "ld 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "ld 2,-16(11)\n\t" /* restore tocptr */ \
- VG_CONTRACT_FRAME_BY(512) \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
+#define CALL_FN_W_6W(lval, orig, arg1, arg2, arg3, arg4, arg5, \
+ arg6) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[7]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)arg1; \
+ _argvec[2] = (unsigned long)arg2; \
+ _argvec[3] = (unsigned long)arg3; \
+ _argvec[4] = (unsigned long)arg4; \
+ _argvec[5] = (unsigned long)arg5; \
+ _argvec[6] = (unsigned long)arg6; \
+ __asm__ volatile( \
+ VALGRIND_CFI_PROLOGUE \
+ "aghi 15,-168\n\t" \
+ "lg 2, 8(1)\n\t" \
+ "lg 3,16(1)\n\t" \
+ "lg 4,24(1)\n\t" \
+ "lg 5,32(1)\n\t" \
+ "lg 6,40(1)\n\t" \
+ "mvc 160(8,15), 48(1)\n\t" \
+ "lg 1, 0(1)\n\t" \
+ VALGRIND_CALL_NOREDIR_R1 \
+ "lgr %0, 2\n\t" \
+ "aghi 15,168\n\t" \
+ VALGRIND_CFI_EPILOGUE \
+ : /*out*/ "=d" (_res) \
+ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
} while (0)
-#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+7]; \
- volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
- _argvec[2+4] = (unsigned long)arg4; \
- _argvec[2+5] = (unsigned long)arg5; \
- _argvec[2+6] = (unsigned long)arg6; \
- _argvec[2+7] = (unsigned long)arg7; \
- __asm__ volatile( \
- "mr 11,%1\n\t" \
- VG_EXPAND_FRAME_BY_trashes_r3(512) \
- "std 2,-16(11)\n\t" /* save tocptr */ \
- "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
- "ld 3, 8(11)\n\t" /* arg1->r3 */ \
- "ld 4, 16(11)\n\t" /* arg2->r4 */ \
- "ld 5, 24(11)\n\t" /* arg3->r5 */ \
- "ld 6, 32(11)\n\t" /* arg4->r6 */ \
- "ld 7, 40(11)\n\t" /* arg5->r7 */ \
- "ld 8, 48(11)\n\t" /* arg6->r8 */ \
- "ld 9, 56(11)\n\t" /* arg7->r9 */ \
- "ld 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "ld 2,-16(11)\n\t" /* restore tocptr */ \
- VG_CONTRACT_FRAME_BY(512) \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
+#define CALL_FN_W_7W(lval, orig, arg1, arg2, arg3, arg4, arg5, \
+ arg6, arg7) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[8]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)arg1; \
+ _argvec[2] = (unsigned long)arg2; \
+ _argvec[3] = (unsigned long)arg3; \
+ _argvec[4] = (unsigned long)arg4; \
+ _argvec[5] = (unsigned long)arg5; \
+ _argvec[6] = (unsigned long)arg6; \
+ _argvec[7] = (unsigned long)arg7; \
+ __asm__ volatile( \
+ VALGRIND_CFI_PROLOGUE \
+ "aghi 15,-176\n\t" \
+ "lg 2, 8(1)\n\t" \
+ "lg 3,16(1)\n\t" \
+ "lg 4,24(1)\n\t" \
+ "lg 5,32(1)\n\t" \
+ "lg 6,40(1)\n\t" \
+ "mvc 160(8,15), 48(1)\n\t" \
+ "mvc 168(8,15), 56(1)\n\t" \
+ "lg 1, 0(1)\n\t" \
+ VALGRIND_CALL_NOREDIR_R1 \
+ "lgr %0, 2\n\t" \
+ "aghi 15,176\n\t" \
+ VALGRIND_CFI_EPILOGUE \
+ : /*out*/ "=d" (_res) \
+ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
} while (0)
-#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+8]; \
- volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
- _argvec[2+4] = (unsigned long)arg4; \
- _argvec[2+5] = (unsigned long)arg5; \
- _argvec[2+6] = (unsigned long)arg6; \
- _argvec[2+7] = (unsigned long)arg7; \
- _argvec[2+8] = (unsigned long)arg8; \
- __asm__ volatile( \
- "mr 11,%1\n\t" \
- VG_EXPAND_FRAME_BY_trashes_r3(512) \
- "std 2,-16(11)\n\t" /* save tocptr */ \
- "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
- "ld 3, 8(11)\n\t" /* arg1->r3 */ \
- "ld 4, 16(11)\n\t" /* arg2->r4 */ \
- "ld 5, 24(11)\n\t" /* arg3->r5 */ \
- "ld 6, 32(11)\n\t" /* arg4->r6 */ \
- "ld 7, 40(11)\n\t" /* arg5->r7 */ \
- "ld 8, 48(11)\n\t" /* arg6->r8 */ \
- "ld 9, 56(11)\n\t" /* arg7->r9 */ \
- "ld 10, 64(11)\n\t" /* arg8->r10 */ \
- "ld 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "ld 2,-16(11)\n\t" /* restore tocptr */ \
- VG_CONTRACT_FRAME_BY(512) \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
+#define CALL_FN_W_8W(lval, orig, arg1, arg2, arg3, arg4, arg5, \
+ arg6, arg7 ,arg8) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[9]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)arg1; \
+ _argvec[2] = (unsigned long)arg2; \
+ _argvec[3] = (unsigned long)arg3; \
+ _argvec[4] = (unsigned long)arg4; \
+ _argvec[5] = (unsigned long)arg5; \
+ _argvec[6] = (unsigned long)arg6; \
+ _argvec[7] = (unsigned long)arg7; \
+ _argvec[8] = (unsigned long)arg8; \
+ __asm__ volatile( \
+ VALGRIND_CFI_PROLOGUE \
+ "aghi 15,-184\n\t" \
+ "lg 2, 8(1)\n\t" \
+ "lg 3,16(1)\n\t" \
+ "lg 4,24(1)\n\t" \
+ "lg 5,32(1)\n\t" \
+ "lg 6,40(1)\n\t" \
+ "mvc 160(8,15), 48(1)\n\t" \
+ "mvc 168(8,15), 56(1)\n\t" \
+ "mvc 176(8,15), 64(1)\n\t" \
+ "lg 1, 0(1)\n\t" \
+ VALGRIND_CALL_NOREDIR_R1 \
+ "lgr %0, 2\n\t" \
+ "aghi 15,184\n\t" \
+ VALGRIND_CFI_EPILOGUE \
+ : /*out*/ "=d" (_res) \
+ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
} while (0)
-#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8,arg9) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+9]; \
- volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
- _argvec[2+4] = (unsigned long)arg4; \
- _argvec[2+5] = (unsigned long)arg5; \
- _argvec[2+6] = (unsigned long)arg6; \
- _argvec[2+7] = (unsigned long)arg7; \
- _argvec[2+8] = (unsigned long)arg8; \
- _argvec[2+9] = (unsigned long)arg9; \
- __asm__ volatile( \
- "mr 11,%1\n\t" \
- VG_EXPAND_FRAME_BY_trashes_r3(512) \
- "std 2,-16(11)\n\t" /* save tocptr */ \
- "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
- VG_EXPAND_FRAME_BY_trashes_r3(128) \
- /* arg9 */ \
- "ld 3,72(11)\n\t" \
- "std 3,112(1)\n\t" \
- /* args1-8 */ \
- "ld 3, 8(11)\n\t" /* arg1->r3 */ \
- "ld 4, 16(11)\n\t" /* arg2->r4 */ \
- "ld 5, 24(11)\n\t" /* arg3->r5 */ \
- "ld 6, 32(11)\n\t" /* arg4->r6 */ \
- "ld 7, 40(11)\n\t" /* arg5->r7 */ \
- "ld 8, 48(11)\n\t" /* arg6->r8 */ \
- "ld 9, 56(11)\n\t" /* arg7->r9 */ \
- "ld 10, 64(11)\n\t" /* arg8->r10 */ \
- "ld 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "ld 2,-16(11)\n\t" /* restore tocptr */ \
- VG_CONTRACT_FRAME_BY(128) \
- VG_CONTRACT_FRAME_BY(512) \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
+#define CALL_FN_W_9W(lval, orig, arg1, arg2, arg3, arg4, arg5, \
+ arg6, arg7 ,arg8, arg9) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[10]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)arg1; \
+ _argvec[2] = (unsigned long)arg2; \
+ _argvec[3] = (unsigned long)arg3; \
+ _argvec[4] = (unsigned long)arg4; \
+ _argvec[5] = (unsigned long)arg5; \
+ _argvec[6] = (unsigned long)arg6; \
+ _argvec[7] = (unsigned long)arg7; \
+ _argvec[8] = (unsigned long)arg8; \
+ _argvec[9] = (unsigned long)arg9; \
+ __asm__ volatile( \
+ VALGRIND_CFI_PROLOGUE \
+ "aghi 15,-192\n\t" \
+ "lg 2, 8(1)\n\t" \
+ "lg 3,16(1)\n\t" \
+ "lg 4,24(1)\n\t" \
+ "lg 5,32(1)\n\t" \
+ "lg 6,40(1)\n\t" \
+ "mvc 160(8,15), 48(1)\n\t" \
+ "mvc 168(8,15), 56(1)\n\t" \
+ "mvc 176(8,15), 64(1)\n\t" \
+ "mvc 184(8,15), 72(1)\n\t" \
+ "lg 1, 0(1)\n\t" \
+ VALGRIND_CALL_NOREDIR_R1 \
+ "lgr %0, 2\n\t" \
+ "aghi 15,192\n\t" \
+ VALGRIND_CFI_EPILOGUE \
+ : /*out*/ "=d" (_res) \
+ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
} while (0)
-#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8,arg9,arg10) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+10]; \
- volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
- _argvec[2+4] = (unsigned long)arg4; \
- _argvec[2+5] = (unsigned long)arg5; \
- _argvec[2+6] = (unsigned long)arg6; \
- _argvec[2+7] = (unsigned long)arg7; \
- _argvec[2+8] = (unsigned long)arg8; \
- _argvec[2+9] = (unsigned long)arg9; \
- _argvec[2+10] = (unsigned long)arg10; \
- __asm__ volatile( \
- "mr 11,%1\n\t" \
- VG_EXPAND_FRAME_BY_trashes_r3(512) \
- "std 2,-16(11)\n\t" /* save tocptr */ \
- "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
- VG_EXPAND_FRAME_BY_trashes_r3(128) \
- /* arg10 */ \
- "ld 3,80(11)\n\t" \
- "std 3,120(1)\n\t" \
- /* arg9 */ \
- "ld 3,72(11)\n\t" \
- "std 3,112(1)\n\t" \
- /* args1-8 */ \
- "ld 3, 8(11)\n\t" /* arg1->r3 */ \
- "ld 4, 16(11)\n\t" /* arg2->r4 */ \
- "ld 5, 24(11)\n\t" /* arg3->r5 */ \
- "ld 6, 32(11)\n\t" /* arg4->r6 */ \
- "ld 7, 40(11)\n\t" /* arg5->r7 */ \
- "ld 8, 48(11)\n\t" /* arg6->r8 */ \
- "ld 9, 56(11)\n\t" /* arg7->r9 */ \
- "ld 10, 64(11)\n\t" /* arg8->r10 */ \
- "ld 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "ld 2,-16(11)\n\t" /* restore tocptr */ \
- VG_CONTRACT_FRAME_BY(128) \
- VG_CONTRACT_FRAME_BY(512) \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
+#define CALL_FN_W_10W(lval, orig, arg1, arg2, arg3, arg4, arg5, \
+ arg6, arg7 ,arg8, arg9, arg10) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[11]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)arg1; \
+ _argvec[2] = (unsigned long)arg2; \
+ _argvec[3] = (unsigned long)arg3; \
+ _argvec[4] = (unsigned long)arg4; \
+ _argvec[5] = (unsigned long)arg5; \
+ _argvec[6] = (unsigned long)arg6; \
+ _argvec[7] = (unsigned long)arg7; \
+ _argvec[8] = (unsigned long)arg8; \
+ _argvec[9] = (unsigned long)arg9; \
+ _argvec[10] = (unsigned long)arg10; \
+ __asm__ volatile( \
+ VALGRIND_CFI_PROLOGUE \
+ "aghi 15,-200\n\t" \
+ "lg 2, 8(1)\n\t" \
+ "lg 3,16(1)\n\t" \
+ "lg 4,24(1)\n\t" \
+ "lg 5,32(1)\n\t" \
+ "lg 6,40(1)\n\t" \
+ "mvc 160(8,15), 48(1)\n\t" \
+ "mvc 168(8,15), 56(1)\n\t" \
+ "mvc 176(8,15), 64(1)\n\t" \
+ "mvc 184(8,15), 72(1)\n\t" \
+ "mvc 192(8,15), 80(1)\n\t" \
+ "lg 1, 0(1)\n\t" \
+ VALGRIND_CALL_NOREDIR_R1 \
+ "lgr %0, 2\n\t" \
+ "aghi 15,200\n\t" \
+ VALGRIND_CFI_EPILOGUE \
+ : /*out*/ "=d" (_res) \
+ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
} while (0)
-#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8,arg9,arg10,arg11) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+11]; \
- volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
- _argvec[2+4] = (unsigned long)arg4; \
- _argvec[2+5] = (unsigned long)arg5; \
- _argvec[2+6] = (unsigned long)arg6; \
- _argvec[2+7] = (unsigned long)arg7; \
- _argvec[2+8] = (unsigned long)arg8; \
- _argvec[2+9] = (unsigned long)arg9; \
- _argvec[2+10] = (unsigned long)arg10; \
- _argvec[2+11] = (unsigned long)arg11; \
- __asm__ volatile( \
- "mr 11,%1\n\t" \
- VG_EXPAND_FRAME_BY_trashes_r3(512) \
- "std 2,-16(11)\n\t" /* save tocptr */ \
- "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
- VG_EXPAND_FRAME_BY_trashes_r3(144) \
- /* arg11 */ \
- "ld 3,88(11)\n\t" \
- "std 3,128(1)\n\t" \
- /* arg10 */ \
- "ld 3,80(11)\n\t" \
- "std 3,120(1)\n\t" \
- /* arg9 */ \
- "ld 3,72(11)\n\t" \
- "std 3,112(1)\n\t" \
- /* args1-8 */ \
- "ld 3, 8(11)\n\t" /* arg1->r3 */ \
- "ld 4, 16(11)\n\t" /* arg2->r4 */ \
- "ld 5, 24(11)\n\t" /* arg3->r5 */ \
- "ld 6, 32(11)\n\t" /* arg4->r6 */ \
- "ld 7, 40(11)\n\t" /* arg5->r7 */ \
- "ld 8, 48(11)\n\t" /* arg6->r8 */ \
- "ld 9, 56(11)\n\t" /* arg7->r9 */ \
- "ld 10, 64(11)\n\t" /* arg8->r10 */ \
- "ld 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "ld 2,-16(11)\n\t" /* restore tocptr */ \
- VG_CONTRACT_FRAME_BY(144) \
- VG_CONTRACT_FRAME_BY(512) \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
+#define CALL_FN_W_11W(lval, orig, arg1, arg2, arg3, arg4, arg5, \
+ arg6, arg7 ,arg8, arg9, arg10, arg11) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[12]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)arg1; \
+ _argvec[2] = (unsigned long)arg2; \
+ _argvec[3] = (unsigned long)arg3; \
+ _argvec[4] = (unsigned long)arg4; \
+ _argvec[5] = (unsigned long)arg5; \
+ _argvec[6] = (unsigned long)arg6; \
+ _argvec[7] = (unsigned long)arg7; \
+ _argvec[8] = (unsigned long)arg8; \
+ _argvec[9] = (unsigned long)arg9; \
+ _argvec[10] = (unsigned long)arg10; \
+ _argvec[11] = (unsigned long)arg11; \
+ __asm__ volatile( \
+ VALGRIND_CFI_PROLOGUE \
+ "aghi 15,-208\n\t" \
+ "lg 2, 8(1)\n\t" \
+ "lg 3,16(1)\n\t" \
+ "lg 4,24(1)\n\t" \
+ "lg 5,32(1)\n\t" \
+ "lg 6,40(1)\n\t" \
+ "mvc 160(8,15), 48(1)\n\t" \
+ "mvc 168(8,15), 56(1)\n\t" \
+ "mvc 176(8,15), 64(1)\n\t" \
+ "mvc 184(8,15), 72(1)\n\t" \
+ "mvc 192(8,15), 80(1)\n\t" \
+ "mvc 200(8,15), 88(1)\n\t" \
+ "lg 1, 0(1)\n\t" \
+ VALGRIND_CALL_NOREDIR_R1 \
+ "lgr %0, 2\n\t" \
+ "aghi 15,208\n\t" \
+ VALGRIND_CFI_EPILOGUE \
+ : /*out*/ "=d" (_res) \
+ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
} while (0)
-#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8,arg9,arg10,arg11,arg12) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3+12]; \
- volatile unsigned long _res; \
- /* _argvec[0] holds current r2 across the call */ \
- _argvec[1] = (unsigned long)_orig.r2; \
- _argvec[2] = (unsigned long)_orig.nraddr; \
- _argvec[2+1] = (unsigned long)arg1; \
- _argvec[2+2] = (unsigned long)arg2; \
- _argvec[2+3] = (unsigned long)arg3; \
- _argvec[2+4] = (unsigned long)arg4; \
- _argvec[2+5] = (unsigned long)arg5; \
- _argvec[2+6] = (unsigned long)arg6; \
- _argvec[2+7] = (unsigned long)arg7; \
- _argvec[2+8] = (unsigned long)arg8; \
- _argvec[2+9] = (unsigned long)arg9; \
- _argvec[2+10] = (unsigned long)arg10; \
- _argvec[2+11] = (unsigned long)arg11; \
- _argvec[2+12] = (unsigned long)arg12; \
- __asm__ volatile( \
- "mr 11,%1\n\t" \
- VG_EXPAND_FRAME_BY_trashes_r3(512) \
- "std 2,-16(11)\n\t" /* save tocptr */ \
- "ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
- VG_EXPAND_FRAME_BY_trashes_r3(144) \
- /* arg12 */ \
- "ld 3,96(11)\n\t" \
- "std 3,136(1)\n\t" \
- /* arg11 */ \
- "ld 3,88(11)\n\t" \
- "std 3,128(1)\n\t" \
- /* arg10 */ \
- "ld 3,80(11)\n\t" \
- "std 3,120(1)\n\t" \
- /* arg9 */ \
- "ld 3,72(11)\n\t" \
- "std 3,112(1)\n\t" \
- /* args1-8 */ \
- "ld 3, 8(11)\n\t" /* arg1->r3 */ \
- "ld 4, 16(11)\n\t" /* arg2->r4 */ \
- "ld 5, 24(11)\n\t" /* arg3->r5 */ \
- "ld 6, 32(11)\n\t" /* arg4->r6 */ \
- "ld 7, 40(11)\n\t" /* arg5->r7 */ \
- "ld 8, 48(11)\n\t" /* arg6->r8 */ \
- "ld 9, 56(11)\n\t" /* arg7->r9 */ \
- "ld 10, 64(11)\n\t" /* arg8->r10 */ \
- "ld 11, 0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr 11,%1\n\t" \
- "mr %0,3\n\t" \
- "ld 2,-16(11)\n\t" /* restore tocptr */ \
- VG_CONTRACT_FRAME_BY(144) \
- VG_CONTRACT_FRAME_BY(512) \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
+#define CALL_FN_W_12W(lval, orig, arg1, arg2, arg3, arg4, arg5, \
+ arg6, arg7 ,arg8, arg9, arg10, arg11, arg12)\
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[13]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)arg1; \
+ _argvec[2] = (unsigned long)arg2; \
+ _argvec[3] = (unsigned long)arg3; \
+ _argvec[4] = (unsigned long)arg4; \
+ _argvec[5] = (unsigned long)arg5; \
+ _argvec[6] = (unsigned long)arg6; \
+ _argvec[7] = (unsigned long)arg7; \
+ _argvec[8] = (unsigned long)arg8; \
+ _argvec[9] = (unsigned long)arg9; \
+ _argvec[10] = (unsigned long)arg10; \
+ _argvec[11] = (unsigned long)arg11; \
+ _argvec[12] = (unsigned long)arg12; \
+ __asm__ volatile( \
+ VALGRIND_CFI_PROLOGUE \
+ "aghi 15,-216\n\t" \
+ "lg 2, 8(1)\n\t" \
+ "lg 3,16(1)\n\t" \
+ "lg 4,24(1)\n\t" \
+ "lg 5,32(1)\n\t" \
+ "lg 6,40(1)\n\t" \
+ "mvc 160(8,15), 48(1)\n\t" \
+ "mvc 168(8,15), 56(1)\n\t" \
+ "mvc 176(8,15), 64(1)\n\t" \
+ "mvc 184(8,15), 72(1)\n\t" \
+ "mvc 192(8,15), 80(1)\n\t" \
+ "mvc 200(8,15), 88(1)\n\t" \
+ "mvc 208(8,15), 96(1)\n\t" \
+ "lg 1, 0(1)\n\t" \
+ VALGRIND_CALL_NOREDIR_R1 \
+ "lgr %0, 2\n\t" \
+ "aghi 15,216\n\t" \
+ VALGRIND_CFI_EPILOGUE \
+ : /*out*/ "=d" (_res) \
+ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
} while (0)
-#endif /* PLAT_ppc64_aix5 */
+
+#endif /* PLAT_s390x_linux */
/* ------------------------------------------------------------------ */
@@ -3605,9 +3604,14 @@
errors. */
VG_USERREQ__COUNT_ERRORS = 0x1201,
+ /* Allows a string (gdb monitor command) to be passed to the tool
+ Used for interaction with vgdb/gdb */
+ VG_USERREQ__GDB_MONITOR_COMMAND = 0x1202,
+
/* These are useful and can be interpreted by any tool that
tracks malloc() et al, by using vg_replace_malloc.c. */
VG_USERREQ__MALLOCLIKE_BLOCK = 0x1301,
+ VG_USERREQ__RESIZEINPLACE_BLOCK = 0x130b,
VG_USERREQ__FREELIKE_BLOCK = 0x1302,
/* Memory pool support. */
VG_USERREQ__CREATE_MEMPOOL = 0x1303,
@@ -3620,30 +3624,43 @@
VG_USERREQ__MEMPOOL_EXISTS = 0x130a,
/* Allow printfs to valgrind log. */
+ /* The first two pass the va_list argument by value, which
+ assumes it is the same size as or smaller than a UWord,
+ which generally isn't the case. Hence are deprecated.
+ The second two pass the vargs by reference and so are
+ immune to this problem. */
+ /* both :: char* fmt, va_list vargs (DEPRECATED) */
VG_USERREQ__PRINTF = 0x1401,
VG_USERREQ__PRINTF_BACKTRACE = 0x1402,
+ /* both :: char* fmt, va_list* vargs */
+ VG_USERREQ__PRINTF_VALIST_BY_REF = 0x1403,
+ VG_USERREQ__PRINTF_BACKTRACE_VALIST_BY_REF = 0x1404,
/* Stack support. */
VG_USERREQ__STACK_REGISTER = 0x1501,
VG_USERREQ__STACK_DEREGISTER = 0x1502,
- VG_USERREQ__STACK_CHANGE = 0x1503
+ VG_USERREQ__STACK_CHANGE = 0x1503,
+
+ /* Wine support */
+ VG_USERREQ__LOAD_PDB_DEBUGINFO = 0x1601,
+
+ /* Querying of debug info. */
+ VG_USERREQ__MAP_IP_TO_SRCLOC = 0x1701
} Vg_ClientRequest;
#if !defined(__GNUC__)
# define __extension__ /* */
#endif
+
/* Returns the number of Valgrinds this code is running under. That
is, 0 if running natively, 1 if running under Valgrind, 2 if
running under Valgrind which is running under another Valgrind,
etc. */
-#define RUNNING_ON_VALGRIND __extension__ \
- ({unsigned int _qzz_res; \
- VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0 /* if not */, \
- VG_USERREQ__RUNNING_ON_VALGRIND, \
- 0, 0, 0, 0, 0); \
- _qzz_res; \
- })
+#define RUNNING_ON_VALGRIND \
+ (unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* if not */, \
+ VG_USERREQ__RUNNING_ON_VALGRIND, \
+ 0, 0, 0, 0, 0) \
/* Discard translation of code in the range [_qzz_addr .. _qzz_addr +
@@ -3651,56 +3668,93 @@
since it provides a way to make sure valgrind will retranslate the
invalidated area. Returns no value. */
#define VALGRIND_DISCARD_TRANSLATIONS(_qzz_addr,_qzz_len) \
- {unsigned int _qzz_res; \
- VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ (unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \
VG_USERREQ__DISCARD_TRANSLATIONS, \
- _qzz_addr, _qzz_len, 0, 0, 0); \
- }
+ _qzz_addr, _qzz_len, 0, 0, 0)
/* These requests are for getting Valgrind itself to print something.
- Possibly with a backtrace. This is a really ugly hack. */
+ Possibly with a backtrace. This is a really ugly hack. The return value
+ is the number of characters printed, excluding the "**<pid>** " part at the
+ start and the backtrace (if present). */
-#if defined(NVALGRIND)
-
-# define VALGRIND_PRINTF(...)
-# define VALGRIND_PRINTF_BACKTRACE(...)
-
-#else /* NVALGRIND */
-
+#if defined(__GNUC__) || defined(__INTEL_COMPILER)
/* Modern GCC will optimize the static routine out if unused,
and unused attribute will shut down warnings about it. */
static int VALGRIND_PRINTF(const char *format, ...)
__attribute__((format(__printf__, 1, 2), __unused__));
+#endif
static int
+#if defined(_MSC_VER)
+__inline
+#endif
VALGRIND_PRINTF(const char *format, ...)
{
+#if defined(NVALGRIND)
+ return 0;
+#else /* NVALGRIND */
+#if defined(_MSC_VER)
+ uintptr_t _qzz_res;
+#else
unsigned long _qzz_res;
+#endif
va_list vargs;
va_start(vargs, format);
- VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, VG_USERREQ__PRINTF,
- (unsigned long)format, (unsigned long)vargs,
+#if defined(_MSC_VER)
+ _qzz_res = VALGRIND_DO_CLIENT_REQUEST_EXPR(0,
+ VG_USERREQ__PRINTF_VALIST_BY_REF,
+ (uintptr_t)format,
+ (uintptr_t)&vargs,
0, 0, 0);
+#else
+ _qzz_res = VALGRIND_DO_CLIENT_REQUEST_EXPR(0,
+ VG_USERREQ__PRINTF_VALIST_BY_REF,
+ (unsigned long)format,
+ (unsigned long)&vargs,
+ 0, 0, 0);
+#endif
va_end(vargs);
return (int)_qzz_res;
+#endif /* NVALGRIND */
}
+#if defined(__GNUC__) || defined(__INTEL_COMPILER)
static int VALGRIND_PRINTF_BACKTRACE(const char *format, ...)
__attribute__((format(__printf__, 1, 2), __unused__));
+#endif
static int
+#if defined(_MSC_VER)
+__inline
+#endif
VALGRIND_PRINTF_BACKTRACE(const char *format, ...)
{
+#if defined(NVALGRIND)
+ return 0;
+#else /* NVALGRIND */
+#if defined(_MSC_VER)
+ uintptr_t _qzz_res;
+#else
unsigned long _qzz_res;
+#endif
va_list vargs;
va_start(vargs, format);
- VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, VG_USERREQ__PRINTF_BACKTRACE,
- (unsigned long)format, (unsigned long)vargs,
+#if defined(_MSC_VER)
+ _qzz_res = VALGRIND_DO_CLIENT_REQUEST_EXPR(0,
+ VG_USERREQ__PRINTF_BACKTRACE_VALIST_BY_REF,
+ (uintptr_t)format,
+ (uintptr_t)&vargs,
0, 0, 0);
+#else
+ _qzz_res = VALGRIND_DO_CLIENT_REQUEST_EXPR(0,
+ VG_USERREQ__PRINTF_BACKTRACE_VALIST_BY_REF,
+ (unsigned long)format,
+ (unsigned long)&vargs,
+ 0, 0, 0);
+#endif
va_end(vargs);
return (int)_qzz_res;
-}
-
#endif /* NVALGRIND */
+}
/* These requests allow control to move from the simulated CPU to the
@@ -3727,199 +3781,253 @@
with a lot in the past.
*/
#define VALGRIND_NON_SIMD_CALL0(_qyy_fn) \
- __extension__ \
- ({unsigned long _qyy_res; \
- VALGRIND_DO_CLIENT_REQUEST(_qyy_res, 0 /* default return */, \
- VG_USERREQ__CLIENT_CALL0, \
- _qyy_fn, \
- 0, 0, 0, 0); \
- _qyy_res; \
- })
+ VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
+ VG_USERREQ__CLIENT_CALL0, \
+ _qyy_fn, \
+ 0, 0, 0, 0)
-#define VALGRIND_NON_SIMD_CALL1(_qyy_fn, _qyy_arg1) \
- __extension__ \
- ({unsigned long _qyy_res; \
- VALGRIND_DO_CLIENT_REQUEST(_qyy_res, 0 /* default return */, \
- VG_USERREQ__CLIENT_CALL1, \
- _qyy_fn, \
- _qyy_arg1, 0, 0, 0); \
- _qyy_res; \
- })
+#define VALGRIND_NON_SIMD_CALL1(_qyy_fn, _qyy_arg1) \
+ VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
+ VG_USERREQ__CLIENT_CALL1, \
+ _qyy_fn, \
+ _qyy_arg1, 0, 0, 0)
-#define VALGRIND_NON_SIMD_CALL2(_qyy_fn, _qyy_arg1, _qyy_arg2) \
- __extension__ \
- ({unsigned long _qyy_res; \
- VALGRIND_DO_CLIENT_REQUEST(_qyy_res, 0 /* default return */, \
- VG_USERREQ__CLIENT_CALL2, \
- _qyy_fn, \
- _qyy_arg1, _qyy_arg2, 0, 0); \
- _qyy_res; \
- })
+#define VALGRIND_NON_SIMD_CALL2(_qyy_fn, _qyy_arg1, _qyy_arg2) \
+ VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
+ VG_USERREQ__CLIENT_CALL2, \
+ _qyy_fn, \
+ _qyy_arg1, _qyy_arg2, 0, 0)
#define VALGRIND_NON_SIMD_CALL3(_qyy_fn, _qyy_arg1, _qyy_arg2, _qyy_arg3) \
- __extension__ \
- ({unsigned long _qyy_res; \
- VALGRIND_DO_CLIENT_REQUEST(_qyy_res, 0 /* default return */, \
- VG_USERREQ__CLIENT_CALL3, \
- _qyy_fn, \
- _qyy_arg1, _qyy_arg2, \
- _qyy_arg3, 0); \
- _qyy_res; \
- })
+ VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
+ VG_USERREQ__CLIENT_CALL3, \
+ _qyy_fn, \
+ _qyy_arg1, _qyy_arg2, \
+ _qyy_arg3, 0)
/* Counts the number of errors that have been recorded by a tool. Nb:
the tool must record the errors with VG_(maybe_record_error)() or
VG_(unique_error)() for them to be counted. */
#define VALGRIND_COUNT_ERRORS \
- __extension__ \
- ({unsigned int _qyy_res; \
- VALGRIND_DO_CLIENT_REQUEST(_qyy_res, 0 /* default return */, \
+ (unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR( \
+ 0 /* default return */, \
VG_USERREQ__COUNT_ERRORS, \
- 0, 0, 0, 0, 0); \
- _qyy_res; \
- })
+ 0, 0, 0, 0, 0)
-/* Mark a block of memory as having been allocated by a malloc()-like
- function. `addr' is the start of the usable block (ie. after any
- redzone) `rzB' is redzone size if the allocator can apply redzones;
- use '0' if not. Adding redzones makes it more likely Valgrind will spot
- block overruns. `is_zeroed' indicates if the memory is zeroed, as it is
- for calloc(). Put it immediately after the point where a block is
- allocated.
+/* Several Valgrind tools (Memcheck, Massif, Helgrind, DRD) rely on knowing
+ when heap blocks are allocated in order to give accurate results. This
+ happens automatically for the standard allocator functions such as
+ malloc(), calloc(), realloc(), memalign(), new, new[], free(), delete,
+ delete[], etc.
+
+ But if your program uses a custom allocator, this doesn't automatically
+ happen, and Valgrind will not do as well. For example, if you allocate
+ superblocks with mmap() and then allocates chunks of the superblocks, all
+ Valgrind's observations will be at the mmap() level and it won't know that
+ the chunks should be considered separate entities. In Memcheck's case,
+ that means you probably won't get heap block overrun detection (because
+ there won't be redzones marked as unaddressable) and you definitely won't
+ get any leak detection.
+
+ The following client requests allow a custom allocator to be annotated so
+ that it can be handled accurately by Valgrind.
+
+ VALGRIND_MALLOCLIKE_BLOCK marks a region of memory as having been allocated
+ by a malloc()-like function. For Memcheck (an illustrative case), this
+ does two things:
+
+ - It records that the block has been allocated. This means any addresses
+ within the block mentioned in error messages will be
+ identified as belonging to the block. It also means that if the block
+ isn't freed it will be detected by the leak checker.
+
+ - It marks the block as being addressable and undefined (if 'is_zeroed' is
+ not set), or addressable and defined (if 'is_zeroed' is set). This
+ controls how accesses to the block by the program are handled.
- If you're using Memcheck: If you're allocating memory via superblocks,
- and then handing out small chunks of each superblock, if you don't have
- redzones on your small blocks, it's worth marking the superblock with
- VALGRIND_MAKE_MEM_NOACCESS when it's created, so that block overruns are
- detected. But if you can put redzones on, it's probably better to not do
- this, so that messages for small overruns are described in terms of the
- small block rather than the superblock (but if you have a big overrun
- that skips over a redzone, you could miss an error this way). See
- memcheck/tests/custom_alloc.c for an example.
+ 'addr' is the start of the usable block (ie. after any
+ redzone), 'sizeB' is its size. 'rzB' is the redzone size if the allocator
+ can apply redzones -- these are blocks of padding at the start and end of
+ each block. Adding redzones is recommended as it makes it much more likely
+ Valgrind will spot block overruns. `is_zeroed' indicates if the memory is
+ zeroed (or filled with another predictable value), as is the case for
+ calloc().
+
+ VALGRIND_MALLOCLIKE_BLOCK should be put immediately after the point where a
+ heap block -- that will be used by the client program -- is allocated.
+ It's best to put it at the outermost level of the allocator if possible;
+ for example, if you have a function my_alloc() which calls
+ internal_alloc(), and the client request is put inside internal_alloc(),
+ stack traces relating to the heap block will contain entries for both
+ my_alloc() and internal_alloc(), which is probably not what you want.
- WARNING: if your allocator uses malloc() or 'new' to allocate
- superblocks, rather than mmap() or brk(), this will not work properly --
- you'll likely get assertion failures during leak detection. This is
- because Valgrind doesn't like seeing overlapping heap blocks. Sorry.
+ For Memcheck users: if you use VALGRIND_MALLOCLIKE_BLOCK to carve out
+ custom blocks from within a heap block, B, that has been allocated with
+ malloc/calloc/new/etc, then block B will be *ignored* during leak-checking
+ -- the custom blocks will take precedence.
- Nb: block must be freed via a free()-like function specified
- with VALGRIND_FREELIKE_BLOCK or mismatch errors will occur. */
+ VALGRIND_FREELIKE_BLOCK is the partner to VALGRIND_MALLOCLIKE_BLOCK. For
+ Memcheck, it does two things:
+
+ - It records that the block has been deallocated. This assumes that the
+ block was annotated as having been allocated via
+ VALGRIND_MALLOCLIKE_BLOCK. Otherwise, an error will be issued.
+
+ - It marks the block as being unaddressable.
+
+ VALGRIND_FREELIKE_BLOCK should be put immediately after the point where a
+ heap block is deallocated.
+
+ VALGRIND_RESIZEINPLACE_BLOCK informs a tool about reallocation. For
+ Memcheck, it does four things:
+
+ - It records that the size of a block has been changed. This assumes that
+ the block was annotated as having been allocated via
+ VALGRIND_MALLOCLIKE_BLOCK. Otherwise, an error will be issued.
+
+ - If the block shrunk, it marks the freed memory as being unaddressable.
+
+ - If the block grew, it marks the new area as undefined and defines a red
+ zone past the end of the new block.
+
+ - The V-bits of the overlap between the old and the new block are preserved.
+
+ VALGRIND_RESIZEINPLACE_BLOCK should be put after allocation of the new block
+ and before deallocation of the old block.
+
+ In many cases, these three client requests will not be enough to get your
+ allocator working well with Memcheck. More specifically, if your allocator
+ writes to freed blocks in any way then a VALGRIND_MAKE_MEM_UNDEFINED call
+ will be necessary to mark the memory as addressable just before the zeroing
+ occurs, otherwise you'll get a lot of invalid write errors. For example,
+ you'll need to do this if your allocator recycles freed blocks, but it
+ zeroes them before handing them back out (via VALGRIND_MALLOCLIKE_BLOCK).
+ Alternatively, if your allocator reuses freed blocks for allocator-internal
+ data structures, VALGRIND_MAKE_MEM_UNDEFINED calls will also be necessary.
+
+ Really, what's happening is a blurring of the lines between the client
+ program and the allocator... after VALGRIND_FREELIKE_BLOCK is called, the
+ memory should be considered unaddressable to the client program, but the
+ allocator knows more than the rest of the client program and so may be able
+ to safely access it. Extra client requests are necessary for Valgrind to
+ understand the distinction between the allocator and the rest of the
+ program.
+
+ Ignored if addr == 0.
+*/
#define VALGRIND_MALLOCLIKE_BLOCK(addr, sizeB, rzB, is_zeroed) \
- {unsigned int _qzz_res; \
- VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \
VG_USERREQ__MALLOCLIKE_BLOCK, \
- addr, sizeB, rzB, is_zeroed, 0); \
- }
+ addr, sizeB, rzB, is_zeroed, 0)
-/* Mark a block of memory as having been freed by a free()-like function.
- `rzB' is redzone size; it must match that given to
- VALGRIND_MALLOCLIKE_BLOCK. Memory not freed will be detected by the leak
- checker. Put it immediately after the point where the block is freed. */
+/* See the comment for VALGRIND_MALLOCLIKE_BLOCK for details.
+ Ignored if addr == 0.
+*/
+#define VALGRIND_RESIZEINPLACE_BLOCK(addr, oldSizeB, newSizeB, rzB) \
+ VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \
+ VG_USERREQ__RESIZEINPLACE_BLOCK, \
+ addr, oldSizeB, newSizeB, rzB, 0)
+
+/* See the comment for VALGRIND_MALLOCLIKE_BLOCK for details.
+ Ignored if addr == 0.
+*/
#define VALGRIND_FREELIKE_BLOCK(addr, rzB) \
- {unsigned int _qzz_res; \
- VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \
VG_USERREQ__FREELIKE_BLOCK, \
- addr, rzB, 0, 0, 0); \
- }
+ addr, rzB, 0, 0, 0)
/* Create a memory pool. */
#define VALGRIND_CREATE_MEMPOOL(pool, rzB, is_zeroed) \
- {unsigned int _qzz_res; \
- VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \
VG_USERREQ__CREATE_MEMPOOL, \
- pool, rzB, is_zeroed, 0, 0); \
- }
+ pool, rzB, is_zeroed, 0, 0)
/* Destroy a memory pool. */
#define VALGRIND_DESTROY_MEMPOOL(pool) \
- {unsigned int _qzz_res; \
- VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \
VG_USERREQ__DESTROY_MEMPOOL, \
- pool, 0, 0, 0, 0); \
- }
+ pool, 0, 0, 0, 0)
/* Associate a piece of memory with a memory pool. */
#define VALGRIND_MEMPOOL_ALLOC(pool, addr, size) \
- {unsigned int _qzz_res; \
- VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \
VG_USERREQ__MEMPOOL_ALLOC, \
- pool, addr, size, 0, 0); \
- }
+ pool, addr, size, 0, 0)
/* Disassociate a piece of memory from a memory pool. */
#define VALGRIND_MEMPOOL_FREE(pool, addr) \
- {unsigned int _qzz_res; \
- VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \
VG_USERREQ__MEMPOOL_FREE, \
- pool, addr, 0, 0, 0); \
- }
+ pool, addr, 0, 0, 0)
/* Disassociate any pieces outside a particular range. */
#define VALGRIND_MEMPOOL_TRIM(pool, addr, size) \
- {unsigned int _qzz_res; \
- VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \
VG_USERREQ__MEMPOOL_TRIM, \
- pool, addr, size, 0, 0); \
- }
+ pool, addr, size, 0, 0)
/* Resize and/or move a piece associated with a memory pool. */
#define VALGRIND_MOVE_MEMPOOL(poolA, poolB) \
- {unsigned int _qzz_res; \
- VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \
VG_USERREQ__MOVE_MEMPOOL, \
- poolA, poolB, 0, 0, 0); \
- }
+ poolA, poolB, 0, 0, 0)
/* Resize and/or move a piece associated with a memory pool. */
#define VALGRIND_MEMPOOL_CHANGE(pool, addrA, addrB, size) \
- {unsigned int _qzz_res; \
- VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \
VG_USERREQ__MEMPOOL_CHANGE, \
- pool, addrA, addrB, size, 0); \
- }
+ pool, addrA, addrB, size, 0)
/* Return 1 if a mempool exists, else 0. */
#define VALGRIND_MEMPOOL_EXISTS(pool) \
- ({unsigned int _qzz_res; \
- VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ (unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \
VG_USERREQ__MEMPOOL_EXISTS, \
- pool, 0, 0, 0, 0); \
- _qzz_res; \
- })
+ pool, 0, 0, 0, 0)
/* Mark a piece of memory as being a stack. Returns a stack id. */
#define VALGRIND_STACK_REGISTER(start, end) \
- ({unsigned int _qzz_res; \
- VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ (unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \
VG_USERREQ__STACK_REGISTER, \
- start, end, 0, 0, 0); \
- _qzz_res; \
- })
+ start, end, 0, 0, 0)
/* Unmark the piece of memory associated with a stack id as being a
stack. */
#define VALGRIND_STACK_DEREGISTER(id) \
- {unsigned int _qzz_res; \
- VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ (unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \
VG_USERREQ__STACK_DEREGISTER, \
- id, 0, 0, 0, 0); \
- }
+ id, 0, 0, 0, 0)
/* Change the start and end address of the stack id. */
#define VALGRIND_STACK_CHANGE(id, start, end) \
- {unsigned int _qzz_res; \
- VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \
VG_USERREQ__STACK_CHANGE, \
- id, start, end, 0, 0); \
- }
+ id, start, end, 0, 0)
+
+/* Load PDB debug info for Wine PE image_map. */
+#define VALGRIND_LOAD_PDB_DEBUGINFO(fd, ptr, total_size, delta) \
+ VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \
+ VG_USERREQ__LOAD_PDB_DEBUGINFO, \
+ fd, ptr, total_size, delta, 0)
+
+/* Map a code address to a source file name and line number. buf64
+ must point to a 64-byte buffer in the caller's address space. The
+ result will be dumped in there and is guaranteed to be zero
+ terminated. If no info is found, the first byte is set to zero. */
+#define VALGRIND_MAP_IP_TO_SRCLOC(addr, buf64) \
+ (unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \
+ VG_USERREQ__MAP_IP_TO_SRCLOC, \
+ addr, buf64, 0, 0, 0)
+#undef PLAT_x86_darwin
+#undef PLAT_amd64_darwin
+#undef PLAT_x86_win32
#undef PLAT_x86_linux
#undef PLAT_amd64_linux
#undef PLAT_ppc32_linux
#undef PLAT_ppc64_linux
-#undef PLAT_ppc32_aix5
-#undef PLAT_ppc64_aix5
+#undef PLAT_arm_linux
+#undef PLAT_s390x_linux
#endif /* __VALGRIND_H */
diff --git a/src/token.h b/src/token.h
index a0afbc1..77333bc 100644
--- a/src/token.h
+++ b/src/token.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -167,8 +167,8 @@
\
/* Future reserved words (ECMA-262, section 7.6.1.2). */ \
T(FUTURE_RESERVED_WORD, NULL, 0) \
+ T(FUTURE_STRICT_RESERVED_WORD, NULL, 0) \
K(CONST, "const", 0) \
- K(NATIVE, "native", 0) \
\
/* Illegal token - not able to scan. */ \
T(ILLEGAL, "ILLEGAL", 0) \
diff --git a/src/type-info.cc b/src/type-info.cc
index 5f794bd..defb1ae 100644
--- a/src/type-info.cc
+++ b/src/type-info.cc
@@ -28,6 +28,7 @@
#include "v8.h"
#include "ast.h"
+#include "code-stubs.h"
#include "compiler.h"
#include "ic.h"
#include "macro-assembler.h"
@@ -61,7 +62,7 @@
TypeFeedbackOracle::TypeFeedbackOracle(Handle<Code> code,
Handle<Context> global_context) {
global_context_ = global_context;
- PopulateMap(code);
+ BuildDictionary(code);
ASSERT(reinterpret_cast<Address>(*dictionary_.location()) != kHandleZapValue);
}
@@ -74,26 +75,55 @@
}
-bool TypeFeedbackOracle::LoadIsMonomorphic(Property* expr) {
+bool TypeFeedbackOracle::LoadIsMonomorphicNormal(Property* expr) {
Handle<Object> map_or_code(GetInfo(expr->id()));
if (map_or_code->IsMap()) return true;
if (map_or_code->IsCode()) {
Handle<Code> code = Handle<Code>::cast(map_or_code);
return code->is_keyed_load_stub() &&
code->ic_state() == MONOMORPHIC &&
+ Code::ExtractTypeFromFlags(code->flags()) == NORMAL &&
code->FindFirstMap() != NULL;
}
return false;
}
-bool TypeFeedbackOracle::StoreIsMonomorphic(Expression* expr) {
+bool TypeFeedbackOracle::LoadIsMegamorphicWithTypeInfo(Property* expr) {
+ Handle<Object> map_or_code(GetInfo(expr->id()));
+ if (map_or_code->IsCode()) {
+ Handle<Code> code = Handle<Code>::cast(map_or_code);
+ Builtins* builtins = Isolate::Current()->builtins();
+ return code->is_keyed_load_stub() &&
+ *code != builtins->builtin(Builtins::kKeyedLoadIC_Generic) &&
+ code->ic_state() == MEGAMORPHIC;
+ }
+ return false;
+}
+
+
+bool TypeFeedbackOracle::StoreIsMonomorphicNormal(Expression* expr) {
Handle<Object> map_or_code(GetInfo(expr->id()));
if (map_or_code->IsMap()) return true;
if (map_or_code->IsCode()) {
Handle<Code> code = Handle<Code>::cast(map_or_code);
return code->is_keyed_store_stub() &&
- code->ic_state() == MONOMORPHIC;
+ code->ic_state() == MONOMORPHIC &&
+ Code::ExtractTypeFromFlags(code->flags()) == NORMAL;
+ }
+ return false;
+}
+
+
+bool TypeFeedbackOracle::StoreIsMegamorphicWithTypeInfo(Expression* expr) {
+ Handle<Object> map_or_code(GetInfo(expr->id()));
+ if (map_or_code->IsCode()) {
+ Handle<Code> code = Handle<Code>::cast(map_or_code);
+ Builtins* builtins = Isolate::Current()->builtins();
+ return code->is_keyed_store_stub() &&
+ *code != builtins->builtin(Builtins::kKeyedStoreIC_Generic) &&
+ *code != builtins->builtin(Builtins::kKeyedStoreIC_Generic_Strict) &&
+ code->ic_state() == MEGAMORPHIC;
}
return false;
}
@@ -106,7 +136,7 @@
Handle<Map> TypeFeedbackOracle::LoadMonomorphicReceiverType(Property* expr) {
- ASSERT(LoadIsMonomorphic(expr));
+ ASSERT(LoadIsMonomorphicNormal(expr));
Handle<Object> map_or_code(GetInfo(expr->id()));
if (map_or_code->IsCode()) {
Handle<Code> code = Handle<Code>::cast(map_or_code);
@@ -119,7 +149,7 @@
Handle<Map> TypeFeedbackOracle::StoreMonomorphicReceiverType(Expression* expr) {
- ASSERT(StoreIsMonomorphic(expr));
+ ASSERT(StoreIsMonomorphicNormal(expr));
Handle<Object> map_or_code(GetInfo(expr->id()));
if (map_or_code->IsCode()) {
Handle<Code> code = Handle<Code>::cast(map_or_code);
@@ -171,20 +201,6 @@
return check;
}
-ExternalArrayType TypeFeedbackOracle::GetKeyedLoadExternalArrayType(
- Property* expr) {
- Handle<Object> stub = GetInfo(expr->id());
- ASSERT(stub->IsCode());
- return Code::cast(*stub)->external_array_type();
-}
-
-ExternalArrayType TypeFeedbackOracle::GetKeyedStoreExternalArrayType(
- Expression* expr) {
- Handle<Object> stub = GetInfo(expr->id());
- ASSERT(stub->IsCode());
- return Code::cast(*stub)->external_array_type();
-}
-
Handle<JSObject> TypeFeedbackOracle::GetPrototypeForPrimitiveCheck(
CheckType check) {
JSFunction* function = NULL;
@@ -224,8 +240,7 @@
switch (state) {
case CompareIC::UNINITIALIZED:
// Uninitialized means never executed.
- // TODO(fschneider): Introduce a separate value for never-executed ICs.
- return unknown;
+ return TypeInfo::Uninitialized();
case CompareIC::SMIS:
return TypeInfo::Smi();
case CompareIC::HEAP_NUMBERS:
@@ -286,8 +301,7 @@
switch (type) {
case BinaryOpIC::UNINITIALIZED:
// Uninitialized means never executed.
- // TODO(fschneider): Introduce a separate value for never-executed ICs
- return unknown;
+ return TypeInfo::Uninitialized();
case BinaryOpIC::SMI:
switch (result_type) {
case BinaryOpIC::UNINITIALIZED:
@@ -404,104 +418,130 @@
}
-void TypeFeedbackOracle::SetInfo(unsigned ast_id, Object* target) {
- ASSERT(dictionary_->FindEntry(ast_id) == NumberDictionary::kNotFound);
- MaybeObject* maybe_result = dictionary_->AtNumberPut(ast_id, target);
- USE(maybe_result);
-#ifdef DEBUG
- Object* result;
- // Dictionary has been allocated with sufficient size for all elements.
- ASSERT(maybe_result->ToObject(&result));
- ASSERT(*dictionary_ == result);
-#endif
+void TypeFeedbackOracle::CollectKeyedReceiverTypes(
+ unsigned ast_id,
+ ZoneMapList* types) {
+ Handle<Object> object = GetInfo(ast_id);
+ if (!object->IsCode()) return;
+ Handle<Code> code = Handle<Code>::cast(object);
+ if (code->kind() == Code::KEYED_LOAD_IC ||
+ code->kind() == Code::KEYED_STORE_IC) {
+ AssertNoAllocation no_allocation;
+ int mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
+ for (RelocIterator it(*code, mask); !it.done(); it.next()) {
+ RelocInfo* info = it.rinfo();
+ Object* object = info->target_object();
+ if (object->IsMap()) {
+ types->Add(Handle<Map>(Map::cast(object)));
+ }
+ }
+ }
}
-void TypeFeedbackOracle::PopulateMap(Handle<Code> code) {
- Isolate* isolate = Isolate::Current();
- HandleScope scope(isolate);
-
- const int kInitialCapacity = 16;
- List<int> code_positions(kInitialCapacity);
- List<unsigned> ast_ids(kInitialCapacity);
- CollectIds(*code, &code_positions, &ast_ids);
-
- ASSERT(dictionary_.is_null()); // Only initialize once.
- dictionary_ = isolate->factory()->NewNumberDictionary(
- code_positions.length());
-
- const int length = code_positions.length();
- ASSERT(ast_ids.length() == length);
- for (int i = 0; i < length; i++) {
- AssertNoAllocation no_allocation;
- RelocInfo info(code->instruction_start() + code_positions[i],
- RelocInfo::CODE_TARGET, 0);
- Code* target = Code::GetCodeFromTargetAddress(info.target_address());
- unsigned id = ast_ids[i];
- InlineCacheState state = target->ic_state();
- Code::Kind kind = target->kind();
-
- if (kind == Code::BINARY_OP_IC ||
- kind == Code::UNARY_OP_IC ||
- kind == Code::COMPARE_IC) {
- SetInfo(id, target);
- } else if (state == MONOMORPHIC) {
- if (kind == Code::KEYED_LOAD_IC ||
- kind == Code::KEYED_STORE_IC) {
- SetInfo(id, target);
- } else if (kind != Code::CALL_IC ||
- target->check_type() == RECEIVER_MAP_CHECK) {
- Map* map = target->FindFirstMap();
- if (map == NULL) {
- SetInfo(id, target);
- } else {
- SetInfo(id, map);
- }
- } else {
- ASSERT(target->kind() == Code::CALL_IC);
- CheckType check = target->check_type();
- ASSERT(check != RECEIVER_MAP_CHECK);
- SetInfo(id, Smi::FromInt(check));
- }
- } else if (state == MEGAMORPHIC) {
- SetInfo(id, target);
- }
- }
+// Things are a bit tricky here: The iterator for the RelocInfos and the infos
+// themselves are not GC-safe, so we first get all infos, then we create the
+// dictionary (possibly triggering GC), and finally we relocate the collected
+// infos before we process them.
+void TypeFeedbackOracle::BuildDictionary(Handle<Code> code) {
+ AssertNoAllocation no_allocation;
+ ZoneList<RelocInfo> infos(16);
+ HandleScope scope;
+ GetRelocInfos(code, &infos);
+ CreateDictionary(code, &infos);
+ ProcessRelocInfos(&infos);
// Allocate handle in the parent scope.
dictionary_ = scope.CloseAndEscape(dictionary_);
}
-void TypeFeedbackOracle::CollectIds(Code* code,
- List<int>* code_positions,
- List<unsigned>* ast_ids) {
- AssertNoAllocation no_allocation;
+void TypeFeedbackOracle::GetRelocInfos(Handle<Code> code,
+ ZoneList<RelocInfo>* infos) {
int mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET_WITH_ID);
- for (RelocIterator it(code, mask); !it.done(); it.next()) {
- RelocInfo* info = it.rinfo();
- ASSERT(RelocInfo::IsCodeTarget(info->rmode()));
- Code* target = Code::GetCodeFromTargetAddress(info->target_address());
- if (target->is_inline_cache_stub()) {
- InlineCacheState state = target->ic_state();
- Code::Kind kind = target->kind();
- if (kind == Code::BINARY_OP_IC) {
- if (target->binary_op_type() ==
- BinaryOpIC::GENERIC) {
- continue;
- }
- } else if (kind == Code::COMPARE_IC) {
- if (target->compare_state() == CompareIC::GENERIC) continue;
- } else {
- if (state != MONOMORPHIC && state != MEGAMORPHIC) continue;
- }
- code_positions->Add(
- static_cast<int>(info->pc() - code->instruction_start()));
- ASSERT(ast_ids->length() == 0 ||
- (*ast_ids)[ast_ids->length()-1] !=
- static_cast<unsigned>(info->data()));
- ast_ids->Add(static_cast<unsigned>(info->data()));
- }
+ for (RelocIterator it(*code, mask); !it.done(); it.next()) {
+ infos->Add(*it.rinfo());
}
}
+
+void TypeFeedbackOracle::CreateDictionary(Handle<Code> code,
+ ZoneList<RelocInfo>* infos) {
+ DisableAssertNoAllocation allocation_allowed;
+ byte* old_start = code->instruction_start();
+ dictionary_ = FACTORY->NewNumberDictionary(infos->length());
+ byte* new_start = code->instruction_start();
+ RelocateRelocInfos(infos, old_start, new_start);
+}
+
+
+void TypeFeedbackOracle::RelocateRelocInfos(ZoneList<RelocInfo>* infos,
+ byte* old_start,
+ byte* new_start) {
+ for (int i = 0; i < infos->length(); i++) {
+ RelocInfo* info = &(*infos)[i];
+ info->set_pc(new_start + (info->pc() - old_start));
+ }
+}
+
+
+void TypeFeedbackOracle::ProcessRelocInfos(ZoneList<RelocInfo>* infos) {
+ for (int i = 0; i < infos->length(); i++) {
+ unsigned ast_id = static_cast<unsigned>((*infos)[i].data());
+ Code* target = Code::GetCodeFromTargetAddress((*infos)[i].target_address());
+ ProcessTarget(ast_id, target);
+ }
+}
+
+
+void TypeFeedbackOracle::ProcessTarget(unsigned ast_id, Code* target) {
+ switch (target->kind()) {
+ case Code::LOAD_IC:
+ case Code::STORE_IC:
+ case Code::CALL_IC:
+ case Code::KEYED_CALL_IC:
+ if (target->ic_state() == MONOMORPHIC) {
+ if (target->kind() == Code::CALL_IC &&
+ target->check_type() != RECEIVER_MAP_CHECK) {
+ SetInfo(ast_id, Smi::FromInt(target->check_type()));
+ } else {
+ Object* map = target->FindFirstMap();
+ SetInfo(ast_id, map == NULL ? static_cast<Object*>(target) : map);
+ }
+ } else if (target->ic_state() == MEGAMORPHIC) {
+ SetInfo(ast_id, target);
+ }
+ break;
+
+ case Code::KEYED_LOAD_IC:
+ case Code::KEYED_STORE_IC:
+ if (target->ic_state() == MONOMORPHIC ||
+ target->ic_state() == MEGAMORPHIC) {
+ SetInfo(ast_id, target);
+ }
+ break;
+
+ case Code::UNARY_OP_IC:
+ case Code::BINARY_OP_IC:
+ case Code::COMPARE_IC:
+ SetInfo(ast_id, target);
+ break;
+
+ default:
+ break;
+ }
+}
+
+
+void TypeFeedbackOracle::SetInfo(unsigned ast_id, Object* target) {
+ ASSERT(dictionary_->FindEntry(ast_id) == NumberDictionary::kNotFound);
+ MaybeObject* maybe_result = dictionary_->AtNumberPut(ast_id, target);
+ USE(maybe_result);
+#ifdef DEBUG
+ Object* result = NULL;
+ // Dictionary has been allocated with sufficient size for all elements.
+ ASSERT(maybe_result->ToObject(&result));
+ ASSERT(*dictionary_ == result);
+#endif
+}
+
} } // namespace v8::internal
diff --git a/src/type-info.h b/src/type-info.h
index 828e3c7..0a8c935 100644
--- a/src/type-info.h
+++ b/src/type-info.h
@@ -30,12 +30,13 @@
#include "allocation.h"
#include "globals.h"
-#include "zone.h"
#include "zone-inl.h"
namespace v8 {
namespace internal {
+const int kMaxKeyedPolymorphism = 4;
+
// Unknown
// | \____________
// | |
@@ -215,8 +216,10 @@
public:
TypeFeedbackOracle(Handle<Code> code, Handle<Context> global_context);
- bool LoadIsMonomorphic(Property* expr);
- bool StoreIsMonomorphic(Expression* expr);
+ bool LoadIsMonomorphicNormal(Property* expr);
+ bool LoadIsMegamorphicWithTypeInfo(Property* expr);
+ bool StoreIsMonomorphicNormal(Expression* expr);
+ bool StoreIsMegamorphicWithTypeInfo(Expression* expr);
bool CallIsMonomorphic(Call* expr);
Handle<Map> LoadMonomorphicReceiverType(Property* expr);
@@ -227,9 +230,8 @@
ZoneMapList* CallReceiverTypes(Call* expr,
Handle<String> name,
CallKind call_kind);
-
- ExternalArrayType GetKeyedLoadExternalArrayType(Property* expr);
- ExternalArrayType GetKeyedStoreExternalArrayType(Expression* expr);
+ void CollectKeyedReceiverTypes(unsigned ast_id,
+ ZoneMapList* types);
CheckType GetCallCheckType(Call* expr);
Handle<JSObject> GetPrototypeForPrimitiveCheck(CheckType check);
@@ -251,11 +253,14 @@
void SetInfo(unsigned ast_id, Object* target);
- void PopulateMap(Handle<Code> code);
-
- void CollectIds(Code* code,
- List<int>* code_positions,
- List<unsigned>* ast_ids);
+ void BuildDictionary(Handle<Code> code);
+ void GetRelocInfos(Handle<Code> code, ZoneList<RelocInfo>* infos);
+ void CreateDictionary(Handle<Code> code, ZoneList<RelocInfo>* infos);
+ void RelocateRelocInfos(ZoneList<RelocInfo>* infos,
+ byte* old_start,
+ byte* new_start);
+ void ProcessRelocInfos(ZoneList<RelocInfo>* infos);
+ void ProcessTarget(unsigned ast_id, Code* target);
// Returns an element from the backing store. Returns undefined if
// there is no information.
diff --git a/src/utils.cc b/src/utils.cc
index b466301..89ef4c6 100644
--- a/src/utils.cc
+++ b/src/utils.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -26,211 +26,26 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <stdarg.h>
-
-#include "v8.h"
-
-#include "platform.h"
-
-#include "sys/stat.h"
+#include "../include/v8stdint.h"
+#include "checks.h"
+#include "utils.h"
namespace v8 {
namespace internal {
-void PrintF(const char* format, ...) {
- va_list arguments;
- va_start(arguments, format);
- OS::VPrint(format, arguments);
- va_end(arguments);
-}
-
-
-void PrintF(FILE* out, const char* format, ...) {
- va_list arguments;
- va_start(arguments, format);
- OS::VFPrint(out, format, arguments);
- va_end(arguments);
-}
-
-
-void Flush(FILE* out) {
- fflush(out);
-}
-
-
-char* ReadLine(const char* prompt) {
- char* result = NULL;
- char line_buf[256];
- int offset = 0;
- bool keep_going = true;
- fprintf(stdout, "%s", prompt);
- fflush(stdout);
- while (keep_going) {
- if (fgets(line_buf, sizeof(line_buf), stdin) == NULL) {
- // fgets got an error. Just give up.
- if (result != NULL) {
- DeleteArray(result);
- }
- return NULL;
- }
- int len = StrLength(line_buf);
- if (len > 1 &&
- line_buf[len - 2] == '\\' &&
- line_buf[len - 1] == '\n') {
- // When we read a line that ends with a "\" we remove the escape and
- // append the remainder.
- line_buf[len - 2] = '\n';
- line_buf[len - 1] = 0;
- len -= 1;
- } else if ((len > 0) && (line_buf[len - 1] == '\n')) {
- // Since we read a new line we are done reading the line. This
- // will exit the loop after copying this buffer into the result.
- keep_going = false;
- }
- if (result == NULL) {
- // Allocate the initial result and make room for the terminating '\0'
- result = NewArray<char>(len + 1);
- } else {
- // Allocate a new result with enough room for the new addition.
- int new_len = offset + len + 1;
- char* new_result = NewArray<char>(new_len);
- // Copy the existing input into the new array and set the new
- // array as the result.
- memcpy(new_result, result, offset * kCharSize);
- DeleteArray(result);
- result = new_result;
- }
- // Copy the newly read line into the result.
- memcpy(result + offset, line_buf, len * kCharSize);
- offset += len;
- }
- ASSERT(result != NULL);
- result[offset] = '\0';
- return result;
-}
-
-
-char* ReadCharsFromFile(const char* filename,
- int* size,
- int extra_space,
- bool verbose) {
- FILE* file = OS::FOpen(filename, "rb");
- if (file == NULL || fseek(file, 0, SEEK_END) != 0) {
- if (verbose) {
- OS::PrintError("Cannot read from file %s.\n", filename);
- }
- return NULL;
- }
-
- // Get the size of the file and rewind it.
- *size = ftell(file);
- rewind(file);
-
- char* result = NewArray<char>(*size + extra_space);
- for (int i = 0; i < *size;) {
- int read = static_cast<int>(fread(&result[i], 1, *size - i, file));
- if (read <= 0) {
- fclose(file);
- DeleteArray(result);
- return NULL;
- }
- i += read;
- }
- fclose(file);
- return result;
-}
-
-
-byte* ReadBytes(const char* filename, int* size, bool verbose) {
- char* chars = ReadCharsFromFile(filename, size, 0, verbose);
- return reinterpret_cast<byte*>(chars);
-}
-
-
-Vector<const char> ReadFile(const char* filename,
- bool* exists,
- bool verbose) {
- int size;
- char* result = ReadCharsFromFile(filename, &size, 1, verbose);
- if (!result) {
- *exists = false;
- return Vector<const char>::empty();
- }
- result[size] = '\0';
- *exists = true;
- return Vector<const char>(result, size);
-}
-
-
-int WriteCharsToFile(const char* str, int size, FILE* f) {
- int total = 0;
- while (total < size) {
- int write = static_cast<int>(fwrite(str, 1, size - total, f));
- if (write == 0) {
- return total;
- }
- total += write;
- str += write;
- }
- return total;
-}
-
-
-int AppendChars(const char* filename,
- const char* str,
- int size,
- bool verbose) {
- FILE* f = OS::FOpen(filename, "ab");
- if (f == NULL) {
- if (verbose) {
- OS::PrintError("Cannot open file %s for writing.\n", filename);
- }
- return 0;
- }
- int written = WriteCharsToFile(str, size, f);
- fclose(f);
- return written;
-}
-
-
-int WriteChars(const char* filename,
- const char* str,
- int size,
- bool verbose) {
- FILE* f = OS::FOpen(filename, "wb");
- if (f == NULL) {
- if (verbose) {
- OS::PrintError("Cannot open file %s for writing.\n", filename);
- }
- return 0;
- }
- int written = WriteCharsToFile(str, size, f);
- fclose(f);
- return written;
-}
-
-
-int WriteBytes(const char* filename,
- const byte* bytes,
- int size,
- bool verbose) {
- const char* str = reinterpret_cast<const char*>(bytes);
- return WriteChars(filename, str, size, verbose);
-}
-
-
-StringBuilder::StringBuilder(int size) {
+SimpleStringBuilder::SimpleStringBuilder(int size) {
buffer_ = Vector<char>::New(size);
position_ = 0;
}
-void StringBuilder::AddString(const char* s) {
+void SimpleStringBuilder::AddString(const char* s) {
AddSubstring(s, StrLength(s));
}
-void StringBuilder::AddSubstring(const char* s, int n) {
+void SimpleStringBuilder::AddSubstring(const char* s, int n) {
ASSERT(!is_finalized() && position_ + n < buffer_.length());
ASSERT(static_cast<size_t>(n) <= strlen(s));
memcpy(&buffer_[position_], s, n * kCharSize);
@@ -238,33 +53,32 @@
}
-void StringBuilder::AddFormatted(const char* format, ...) {
- va_list arguments;
- va_start(arguments, format);
- AddFormattedList(format, arguments);
- va_end(arguments);
-}
-
-
-void StringBuilder::AddFormattedList(const char* format, va_list list) {
- ASSERT(!is_finalized() && position_ < buffer_.length());
- int n = OS::VSNPrintF(buffer_ + position_, format, list);
- if (n < 0 || n >= (buffer_.length() - position_)) {
- position_ = buffer_.length();
- } else {
- position_ += n;
- }
-}
-
-
-void StringBuilder::AddPadding(char c, int count) {
+void SimpleStringBuilder::AddPadding(char c, int count) {
for (int i = 0; i < count; i++) {
AddCharacter(c);
}
}
-char* StringBuilder::Finalize() {
+void SimpleStringBuilder::AddDecimalInteger(int32_t value) {
+ uint32_t number = static_cast<uint32_t>(value);
+ if (value < 0) {
+ AddCharacter('-');
+ number = static_cast<uint32_t>(-value);
+ }
+ int digits = 1;
+ for (uint32_t factor = 10; digits < 10; digits++, factor *= 10) {
+ if (factor > number) break;
+ }
+ position_ += digits;
+ for (int i = 1; i <= digits; i++) {
+ buffer_[position_ - i] = '0' + static_cast<char>(number % 10);
+ number /= 10;
+ }
+}
+
+
+char* SimpleStringBuilder::Finalize() {
ASSERT(!is_finalized() && position_ < buffer_.length());
buffer_[position_] = '\0';
// Make sure nobody managed to add a 0-character to the
@@ -275,97 +89,4 @@
return buffer_.start();
}
-
-MemoryMappedExternalResource::MemoryMappedExternalResource(const char* filename)
- : filename_(NULL),
- data_(NULL),
- length_(0),
- remove_file_on_cleanup_(false) {
- Init(filename);
-}
-
-
-MemoryMappedExternalResource::
- MemoryMappedExternalResource(const char* filename,
- bool remove_file_on_cleanup)
- : filename_(NULL),
- data_(NULL),
- length_(0),
- remove_file_on_cleanup_(remove_file_on_cleanup) {
- Init(filename);
-}
-
-
-MemoryMappedExternalResource::~MemoryMappedExternalResource() {
- // Release the resources if we had successfully acquired them:
- if (file_ != NULL) {
- delete file_;
- if (remove_file_on_cleanup_) {
- OS::Remove(filename_);
- }
- DeleteArray<char>(filename_);
- }
-}
-
-
-void MemoryMappedExternalResource::Init(const char* filename) {
- file_ = OS::MemoryMappedFile::open(filename);
- if (file_ != NULL) {
- filename_ = StrDup(filename);
- data_ = reinterpret_cast<char*>(file_->memory());
- length_ = file_->size();
- }
-}
-
-
-bool MemoryMappedExternalResource::EnsureIsAscii(bool abort_if_failed) const {
- bool is_ascii = true;
-
- int line_no = 1;
- const char* start_of_line = data_;
- const char* end = data_ + length_;
- for (const char* p = data_; p < end; p++) {
- char c = *p;
- if ((c & 0x80) != 0) {
- // Non-ascii detected:
- is_ascii = false;
-
- // Report the error and abort if appropriate:
- if (abort_if_failed) {
- int char_no = static_cast<int>(p - start_of_line) - 1;
-
- ASSERT(filename_ != NULL);
- PrintF("\n\n\n"
- "Abort: Non-Ascii character 0x%.2x in file %s line %d char %d",
- c, filename_, line_no, char_no);
-
- // Allow for some context up to kNumberOfLeadingContextChars chars
- // before the offending non-ascii char to help the user see where
- // the offending char is.
- const int kNumberOfLeadingContextChars = 10;
- const char* err_context = p - kNumberOfLeadingContextChars;
- if (err_context < data_) {
- err_context = data_;
- }
- // Compute the length of the error context and print it.
- int err_context_length = static_cast<int>(p - err_context);
- if (err_context_length != 0) {
- PrintF(" after \"%.*s\"", err_context_length, err_context);
- }
- PrintF(".\n\n\n");
- OS::Abort();
- }
-
- break; // Non-ascii detected. No need to continue scanning.
- }
- if (c == '\n') {
- start_of_line = p;
- line_no++;
- }
- }
-
- return is_ascii;
-}
-
-
} } // namespace v8::internal
diff --git a/src/utils.h b/src/utils.h
index da7a1d9..ecdf1c7 100644
--- a/src/utils.h
+++ b/src/utils.h
@@ -52,11 +52,9 @@
// X must be a power of 2. Returns the number of trailing zeros.
-template <typename T>
-static inline int WhichPowerOf2(T x) {
+static inline int WhichPowerOf2(uint32_t x) {
ASSERT(IsPowerOf2(x));
ASSERT(x != 0);
- if (x < 0) return 31;
int bits = 0;
#ifdef DEBUG
int original_x = x;
@@ -795,6 +793,98 @@
return BitCastHelper<Dest, Source>::cast(source);
}
+
+template<typename ElementType, int NumElements>
+class EmbeddedContainer {
+ public:
+ EmbeddedContainer() : elems_() { }
+
+ int length() { return NumElements; }
+ ElementType& operator[](int i) {
+ ASSERT(i < length());
+ return elems_[i];
+ }
+
+ private:
+ ElementType elems_[NumElements];
+};
+
+
+template<typename ElementType>
+class EmbeddedContainer<ElementType, 0> {
+ public:
+ int length() { return 0; }
+ ElementType& operator[](int i) {
+ UNREACHABLE();
+ static ElementType t = 0;
+ return t;
+ }
+};
+
+
+// Helper class for building result strings in a character buffer. The
+// purpose of the class is to use safe operations that checks the
+// buffer bounds on all operations in debug mode.
+// This simple base class does not allow formatted output.
+class SimpleStringBuilder {
+ public:
+ // Create a string builder with a buffer of the given size. The
+ // buffer is allocated through NewArray<char> and must be
+ // deallocated by the caller of Finalize().
+ explicit SimpleStringBuilder(int size);
+
+ SimpleStringBuilder(char* buffer, int size)
+ : buffer_(buffer, size), position_(0) { }
+
+ ~SimpleStringBuilder() { if (!is_finalized()) Finalize(); }
+
+ int size() const { return buffer_.length(); }
+
+ // Get the current position in the builder.
+ int position() const {
+ ASSERT(!is_finalized());
+ return position_;
+ }
+
+ // Reset the position.
+ void Reset() { position_ = 0; }
+
+ // Add a single character to the builder. It is not allowed to add
+ // 0-characters; use the Finalize() method to terminate the string
+ // instead.
+ void AddCharacter(char c) {
+ ASSERT(c != '\0');
+ ASSERT(!is_finalized() && position_ < buffer_.length());
+ buffer_[position_++] = c;
+ }
+
+ // Add an entire string to the builder. Uses strlen() internally to
+ // compute the length of the input string.
+ void AddString(const char* s);
+
+ // Add the first 'n' characters of the given string 's' to the
+ // builder. The input string must have enough characters.
+ void AddSubstring(const char* s, int n);
+
+ // Add character padding to the builder. If count is non-positive,
+ // nothing is added to the builder.
+ void AddPadding(char c, int count);
+
+ // Add the decimal representation of the value.
+ void AddDecimalInteger(int value);
+
+ // Finalize the string by 0-terminating it and returning the buffer.
+ char* Finalize();
+
+ protected:
+ Vector<char> buffer_;
+ int position_;
+
+ bool is_finalized() const { return position_ < 0; }
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(SimpleStringBuilder);
+};
+
} } // namespace v8::internal
#endif // V8_UTILS_H_
diff --git a/src/v8-counters.h b/src/v8-counters.h
index e3b16e9..2de8303 100644
--- a/src/v8-counters.h
+++ b/src/v8-counters.h
@@ -126,8 +126,9 @@
V8.GCCompactorCausedByWeakHandles) \
SC(gc_last_resort_from_js, V8.GCLastResortFromJS) \
SC(gc_last_resort_from_handles, V8.GCLastResortFromHandles) \
- SC(map_slow_to_fast_elements, V8.MapSlowToFastElements) \
- SC(map_fast_to_slow_elements, V8.MapFastToSlowElements) \
+ SC(map_to_fast_elements, V8.MapToFastElements) \
+ SC(map_to_fast_double_elements, V8.MapToFastDoubleElements) \
+ SC(map_to_slow_elements, V8.MapToSlowElements) \
SC(map_to_external_array_elements, V8.MapToExternalArrayElements) \
/* How is the generic keyed-load stub used? */ \
SC(keyed_load_generic_smi, V8.KeyedLoadGenericSmi) \
@@ -169,14 +170,10 @@
SC(named_load_inline_field, V8.NamedLoadInlineFast) \
SC(keyed_load_inline_generic, V8.KeyedLoadInlineGeneric) \
SC(keyed_load_inline_fast, V8.KeyedLoadInlineFast) \
- SC(named_load_full, V8.NamedLoadFull) \
- SC(keyed_load_full, V8.KeyedLoadFull) \
SC(keyed_store_inline_generic, V8.KeyedStoreInlineGeneric) \
SC(keyed_store_inline_fast, V8.KeyedStoreInlineFast) \
SC(named_store_inline_generic, V8.NamedStoreInlineGeneric) \
SC(named_store_inline_fast, V8.NamedStoreInlineFast) \
- SC(keyed_store_full, V8.KeyedStoreFull) \
- SC(named_store_full, V8.NamedStoreFull) \
SC(keyed_store_inline_miss, V8.KeyedStoreInlineMiss) \
SC(named_store_global_inline, V8.NamedStoreGlobalInline) \
SC(named_store_global_inline_miss, V8.NamedStoreGlobalInlineMiss) \
diff --git a/src/v8.cc b/src/v8.cc
index 0b562fc..36f835f 100644
--- a/src/v8.cc
+++ b/src/v8.cc
@@ -50,6 +50,9 @@
bool V8::has_fatal_error_ = false;
bool V8::use_crankshaft_ = true;
+static Mutex* entropy_mutex = OS::CreateMutex();
+static EntropySource entropy_source;
+
bool V8::Initialize(Deserializer* des) {
InitializeOncePerProcess();
@@ -100,42 +103,45 @@
}
-static uint32_t random_seed() {
- if (FLAG_random_seed == 0) {
- return random();
+static void seed_random(uint32_t* state) {
+ for (int i = 0; i < 2; ++i) {
+ if (FLAG_random_seed != 0) {
+ state[i] = FLAG_random_seed;
+ } else if (entropy_source != NULL) {
+ uint32_t val;
+ ScopedLock lock(entropy_mutex);
+ entropy_source(reinterpret_cast<unsigned char*>(&val), sizeof(uint32_t));
+ state[i] = val;
+ } else {
+ state[i] = random();
+ }
}
- return FLAG_random_seed;
}
-typedef struct {
- uint32_t hi;
- uint32_t lo;
-} random_state;
-
-
// Random number generator using George Marsaglia's MWC algorithm.
-static uint32_t random_base(random_state *state) {
- // Initialize seed using the system random(). If one of the seeds
- // should ever become zero again, or if random() returns zero, we
- // avoid getting stuck with zero bits in hi or lo by re-initializing
- // them on demand.
- if (state->hi == 0) state->hi = random_seed();
- if (state->lo == 0) state->lo = random_seed();
+static uint32_t random_base(uint32_t* state) {
+ // Initialize seed using the system random().
+ // No non-zero seed will ever become zero again.
+ if (state[0] == 0) seed_random(state);
- // Mix the bits.
- state->hi = 36969 * (state->hi & 0xFFFF) + (state->hi >> 16);
- state->lo = 18273 * (state->lo & 0xFFFF) + (state->lo >> 16);
- return (state->hi << 16) + (state->lo & 0xFFFF);
+ // Mix the bits. Never replaces state[i] with 0 if it is nonzero.
+ state[0] = 18273 * (state[0] & 0xFFFF) + (state[0] >> 16);
+ state[1] = 36969 * (state[1] & 0xFFFF) + (state[1] >> 16);
+
+ return (state[0] << 14) + (state[1] & 0x3FFFF);
+}
+
+
+void V8::SetEntropySource(EntropySource source) {
+ entropy_source = source;
}
// Used by JavaScript APIs
uint32_t V8::Random(Isolate* isolate) {
ASSERT(isolate == Isolate::Current());
- // TODO(isolates): move lo and hi to isolate
- static random_state state = {0, 0};
- return random_base(&state);
+ return random_base(isolate->random_seed());
}
@@ -144,9 +150,7 @@
// leaks that could be used in an exploit.
uint32_t V8::RandomPrivate(Isolate* isolate) {
ASSERT(isolate == Isolate::Current());
- // TODO(isolates): move lo and hi to isolate
- static random_state state = {0, 0};
- return random_base(&state);
+ return random_base(isolate->private_random_seed());
}
diff --git a/src/v8.h b/src/v8.h
index 9d98521..e565ca5 100644
--- a/src/v8.h
+++ b/src/v8.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -35,13 +35,10 @@
#if defined(GOOGLE3)
// Google3 special flag handling.
#if defined(DEBUG) && defined(NDEBUG)
-// If both are defined in Google3, then we are building an optimized v8 with
-// assertions enabled.
+// V8 only uses DEBUG and whenever it is set we are building a debug
+// version of V8. We do not use NDEBUG and simply undef it here for
+// consistency.
#undef NDEBUG
-#elif !defined(DEBUG) && !defined(NDEBUG)
-// If neither is defined in Google3, then we are building a debug v8. Mark it
-// as such.
-#define DEBUG
#endif
#endif // defined(GOOGLE3)
@@ -94,6 +91,9 @@
static void FatalProcessOutOfMemory(const char* location,
bool take_snapshot = false);
+ // Allows an entropy source to be provided for use in random number
+ // generation.
+ static void SetEntropySource(EntropySource source);
// Random number generation support. Not cryptographically safe.
static uint32_t Random(Isolate* isolate);
// We use random numbers internally in memory allocation and in the
diff --git a/src/v8conversions.cc b/src/v8conversions.cc
new file mode 100644
index 0000000..96056ec
--- /dev/null
+++ b/src/v8conversions.cc
@@ -0,0 +1,129 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <stdarg.h>
+#include <limits.h>
+
+#include "v8.h"
+
+#include "conversions-inl.h"
+#include "v8conversions.h"
+#include "dtoa.h"
+#include "factory.h"
+#include "scanner-base.h"
+#include "strtod.h"
+
+namespace v8 {
+namespace internal {
+
+namespace {
+
+// C++-style iterator adaptor for StringInputBuffer
+// (unlike C++ iterators the end-marker has different type).
+class StringInputBufferIterator {
+ public:
+ class EndMarker {};
+
+ explicit StringInputBufferIterator(StringInputBuffer* buffer);
+
+ int operator*() const;
+ void operator++();
+ bool operator==(EndMarker const&) const { return end_; }
+ bool operator!=(EndMarker const& m) const { return !end_; }
+
+ private:
+ StringInputBuffer* const buffer_;
+ int current_;
+ bool end_;
+};
+
+
+StringInputBufferIterator::StringInputBufferIterator(
+ StringInputBuffer* buffer) : buffer_(buffer) {
+ ++(*this);
+}
+
+int StringInputBufferIterator::operator*() const {
+ return current_;
+}
+
+
+void StringInputBufferIterator::operator++() {
+ end_ = !buffer_->has_more();
+ if (!end_) {
+ current_ = buffer_->GetNext();
+ }
+}
+} // End anonymous namespace.
+
+
+double StringToDouble(UnicodeCache* unicode_cache,
+ String* str, int flags, double empty_string_val) {
+ StringShape shape(str);
+ if (shape.IsSequentialAscii()) {
+ const char* begin = SeqAsciiString::cast(str)->GetChars();
+ const char* end = begin + str->length();
+ return InternalStringToDouble(unicode_cache, begin, end, flags,
+ empty_string_val);
+ } else if (shape.IsSequentialTwoByte()) {
+ const uc16* begin = SeqTwoByteString::cast(str)->GetChars();
+ const uc16* end = begin + str->length();
+ return InternalStringToDouble(unicode_cache, begin, end, flags,
+ empty_string_val);
+ } else {
+ StringInputBuffer buffer(str);
+ return InternalStringToDouble(unicode_cache,
+ StringInputBufferIterator(&buffer),
+ StringInputBufferIterator::EndMarker(),
+ flags,
+ empty_string_val);
+ }
+}
+
+
+double StringToInt(UnicodeCache* unicode_cache,
+ String* str,
+ int radix) {
+ StringShape shape(str);
+ if (shape.IsSequentialAscii()) {
+ const char* begin = SeqAsciiString::cast(str)->GetChars();
+ const char* end = begin + str->length();
+ return InternalStringToInt(unicode_cache, begin, end, radix);
+ } else if (shape.IsSequentialTwoByte()) {
+ const uc16* begin = SeqTwoByteString::cast(str)->GetChars();
+ const uc16* end = begin + str->length();
+ return InternalStringToInt(unicode_cache, begin, end, radix);
+ } else {
+ StringInputBuffer buffer(str);
+ return InternalStringToInt(unicode_cache,
+ StringInputBufferIterator(&buffer),
+ StringInputBufferIterator::EndMarker(),
+ radix);
+ }
+}
+
+} } // namespace v8::internal
diff --git a/src/v8conversions.h b/src/v8conversions.h
new file mode 100644
index 0000000..1840e3a
--- /dev/null
+++ b/src/v8conversions.h
@@ -0,0 +1,60 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_V8CONVERSIONS_H_
+#define V8_V8CONVERSIONS_H_
+
+#include "conversions.h"
+
+namespace v8 {
+namespace internal {
+
+// Convert from Number object to C integer.
+static inline int32_t NumberToInt32(Object* number) {
+ if (number->IsSmi()) return Smi::cast(number)->value();
+ return DoubleToInt32(number->Number());
+}
+
+
+static inline uint32_t NumberToUint32(Object* number) {
+ if (number->IsSmi()) return Smi::cast(number)->value();
+ return DoubleToUint32(number->Number());
+}
+
+
+// Converts a string into a double value according to ECMA-262 9.3.1
+double StringToDouble(UnicodeCache* unicode_cache,
+ String* str,
+ int flags,
+ double empty_string_val = 0);
+
+// Converts a string into an integer.
+double StringToInt(UnicodeCache* unicode_cache, String* str, int radix);
+
+} } // namespace v8::internal
+
+#endif // V8_V8CONVERSIONS_H_
diff --git a/src/v8globals.h b/src/v8globals.h
index a23ca19..f05a702 100644
--- a/src/v8globals.h
+++ b/src/v8globals.h
@@ -98,10 +98,6 @@
const int kProcessorCacheLineSize = 64;
// Constants relevant to double precision floating point numbers.
-
-// Quiet NaNs have bits 51 to 62 set, possibly the sign bit, and no
-// other bits set.
-const uint64_t kQuietNaNMask = static_cast<uint64_t>(0xfff) << 51;
// If looking only at the top 32 bits, the QNaN mask is bits 19 to 30.
const uint32_t kQuietNaNHighBitsMask = 0xfff << (51 - 32);
@@ -395,12 +391,11 @@
};
-// Logging and profiling.
-// A StateTag represents a possible state of the VM. When compiled with
-// ENABLE_VMSTATE_TRACKING, the logger maintains a stack of these.
-// Creating a VMState object enters a state by pushing on the stack, and
-// destroying a VMState object leaves a state by popping the current state
-// from the stack.
+// Logging and profiling. A StateTag represents a possible state of
+// the VM. The logger maintains a stack of these. Creating a VMState
+// object enters a state by pushing on the stack, and destroying a
+// VMState object leaves a state by popping the current state from the
+// stack.
#define STATE_TAG_LIST(V) \
V(JS) \
@@ -507,6 +502,16 @@
CALL_AS_FUNCTION
};
+
+static const uint32_t kHoleNanUpper32 = 0x7FFFFFFF;
+static const uint32_t kHoleNanLower32 = 0xFFFFFFFF;
+static const uint32_t kNaNOrInfinityLowerBoundUpper32 = 0x7FF00000;
+
+const uint64_t kHoleNanInt64 =
+ (static_cast<uint64_t>(kHoleNanUpper32) << 32) | kHoleNanLower32;
+const uint64_t kLastNonNaNInt64 =
+ (static_cast<uint64_t>(kNaNOrInfinityLowerBoundUpper32) << 32);
+
} } // namespace v8::internal
#endif // V8_V8GLOBALS_H_
diff --git a/src/v8natives.js b/src/v8natives.js
index 700fe58..ff87804 100644
--- a/src/v8natives.js
+++ b/src/v8natives.js
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -56,7 +56,7 @@
%FunctionSetName(f, key);
%FunctionRemovePrototype(f);
%SetProperty(object, key, f, attributes);
- %SetES5Flag(f);
+ %SetNativeFlag(f);
}
%ToFastProperties(object);
}
@@ -106,7 +106,7 @@
// Truncate number.
return string | 0;
}
- if (IS_UNDEFINED(radix)) radix = 0;
+ radix = radix | 0;
} else {
radix = TO_INT32(radix);
if (!(radix == 0 || (2 <= radix && radix <= 36)))
@@ -132,10 +132,19 @@
function GlobalEval(x) {
if (!IS_STRING(x)) return x;
+ var receiver = this;
var global_receiver = %GlobalReceiver(global);
- var this_is_global_receiver = (this === global_receiver);
+
+ if (receiver == null && !IS_UNDETECTABLE(receiver)) {
+ receiver = global_receiver;
+ }
+
+ var this_is_global_receiver = (receiver === global_receiver);
var global_is_detached = (global === global_receiver);
+ // For consistency with JSC we require the global object passed to
+ // eval to be the global object from which 'eval' originated. This
+ // is not mandated by the spec.
if (!this_is_global_receiver || global_is_detached) {
throw new $EvalError('The "this" object passed to eval must ' +
'be the global object from which eval originated');
@@ -144,7 +153,7 @@
var f = %CompileString(x);
if (!IS_FUNCTION(f)) return f;
- return %_CallFunction(this, f);
+ return %_CallFunction(receiver, f);
}
@@ -246,8 +255,9 @@
// Extensions for providing property getters and setters.
function ObjectDefineGetter(name, fun) {
- if (this == null && !IS_UNDETECTABLE(this)) {
- throw new $TypeError('Object.prototype.__defineGetter__: this is Null');
+ var receiver = this;
+ if (receiver == null && !IS_UNDETECTABLE(receiver)) {
+ receiver = %GlobalReceiver(global);
}
if (!IS_FUNCTION(fun)) {
throw new $TypeError('Object.prototype.__defineGetter__: Expecting function');
@@ -256,21 +266,23 @@
desc.setGet(fun);
desc.setEnumerable(true);
desc.setConfigurable(true);
- DefineOwnProperty(ToObject(this), ToString(name), desc, false);
+ DefineOwnProperty(ToObject(receiver), ToString(name), desc, false);
}
function ObjectLookupGetter(name) {
- if (this == null && !IS_UNDETECTABLE(this)) {
- throw new $TypeError('Object.prototype.__lookupGetter__: this is Null');
+ var receiver = this;
+ if (receiver == null && !IS_UNDETECTABLE(receiver)) {
+ receiver = %GlobalReceiver(global);
}
- return %LookupAccessor(ToObject(this), ToString(name), GETTER);
+ return %LookupAccessor(ToObject(receiver), ToString(name), GETTER);
}
function ObjectDefineSetter(name, fun) {
- if (this == null && !IS_UNDETECTABLE(this)) {
- throw new $TypeError('Object.prototype.__defineSetter__: this is Null');
+ var receiver = this;
+ if (receiver == null && !IS_UNDETECTABLE(receiver)) {
+ receiver = %GlobalReceiver(global);
}
if (!IS_FUNCTION(fun)) {
throw new $TypeError(
@@ -280,21 +292,29 @@
desc.setSet(fun);
desc.setEnumerable(true);
desc.setConfigurable(true);
- DefineOwnProperty(ToObject(this), ToString(name), desc, false);
+ DefineOwnProperty(ToObject(receiver), ToString(name), desc, false);
}
function ObjectLookupSetter(name) {
- if (this == null && !IS_UNDETECTABLE(this)) {
- throw new $TypeError('Object.prototype.__lookupSetter__: this is Null');
+ var receiver = this;
+ if (receiver == null && !IS_UNDETECTABLE(receiver)) {
+ receiver = %GlobalReceiver(global);
}
- return %LookupAccessor(ToObject(this), ToString(name), SETTER);
+ return %LookupAccessor(ToObject(receiver), ToString(name), SETTER);
}
function ObjectKeys(obj) {
if (!IS_SPEC_OBJECT(obj))
throw MakeTypeError("obj_ctor_property_non_object", ["keys"]);
+ if (%IsJSProxy(obj)) {
+ var handler = %GetHandler(obj);
+ var keys = handler.keys;
+ if (IS_UNDEFINED(keys)) keys = DerivedKeysTrap;
+ var names = %_CallFunction(handler, keys);
+ return ToStringArray(names);
+ }
return %LocalKeys(obj);
}
@@ -302,14 +322,14 @@
// ES5 8.10.1.
function IsAccessorDescriptor(desc) {
if (IS_UNDEFINED(desc)) return false;
- return desc.hasGetter_ || desc.hasSetter_;
+ return desc.hasGetter() || desc.hasSetter();
}
// ES5 8.10.2.
function IsDataDescriptor(desc) {
if (IS_UNDEFINED(desc)) return false;
- return desc.hasValue_ || desc.hasWritable_;
+ return desc.hasValue() || desc.hasWritable();
}
@@ -323,6 +343,7 @@
return IsAccessorDescriptor(desc) && IsDataDescriptor(desc);
}
+
// ES5 8.10.4
function FromPropertyDescriptor(desc) {
if (IS_UNDEFINED(desc)) return desc;
@@ -340,6 +361,36 @@
configurable: desc.isConfigurable() };
}
+
+// Harmony Proxies
+function FromGenericPropertyDescriptor(desc) {
+ if (IS_UNDEFINED(desc)) return desc;
+ var obj = new $Object();
+
+ if (desc.hasValue()) {
+ %IgnoreAttributesAndSetProperty(obj, "value", desc.getValue(), NONE);
+ }
+ if (desc.hasWritable()) {
+ %IgnoreAttributesAndSetProperty(obj, "writable", desc.isWritable(), NONE);
+ }
+ if (desc.hasGetter()) {
+ %IgnoreAttributesAndSetProperty(obj, "get", desc.getGet(), NONE);
+ }
+ if (desc.hasSetter()) {
+ %IgnoreAttributesAndSetProperty(obj, "set", desc.getSet(), NONE);
+ }
+ if (desc.hasEnumerable()) {
+ %IgnoreAttributesAndSetProperty(obj, "enumerable",
+ desc.isEnumerable(), NONE);
+ }
+ if (desc.hasConfigurable()) {
+ %IgnoreAttributesAndSetProperty(obj, "configurable",
+ desc.isConfigurable(), NONE);
+ }
+ return obj;
+}
+
+
// ES5 8.10.5.
function ToPropertyDescriptor(obj) {
if (!IS_SPEC_OBJECT(obj)) {
@@ -386,6 +437,23 @@
}
+// For Harmony proxies.
+function ToCompletePropertyDescriptor(obj) {
+ var desc = ToPropertyDescriptor(obj)
+ if (IsGenericDescriptor(desc) || IsDataDescriptor(desc)) {
+ if (!desc.hasValue()) desc.setValue(void 0);
+ if (!desc.hasWritable()) desc.setWritable(false);
+ } else {
+ // Is accessor descriptor.
+ if (!desc.hasGetter()) desc.setGet(void 0);
+ if (!desc.hasSetter()) desc.setSet(void 0);
+ }
+ if (!desc.hasEnumerable()) desc.setEnumerable(false);
+ if (!desc.hasConfigurable()) desc.setConfigurable(false);
+ return desc;
+}
+
+
function PropertyDescriptor() {
// Initialize here so they are all in-object and have the same map.
// Default values from ES5 8.6.1.
@@ -533,25 +601,25 @@
}
-// ES5 section 8.12.2.
-function GetProperty(obj, p) {
- var prop = GetOwnProperty(obj);
- if (!IS_UNDEFINED(prop)) return prop;
- var proto = obj.__proto__;
- if (IS_NULL(proto)) return void 0;
- return GetProperty(proto, p);
-}
-
-
-// ES5 section 8.12.6
-function HasProperty(obj, p) {
- var desc = GetProperty(obj, p);
- return IS_UNDEFINED(desc) ? false : true;
-}
-
-
// ES5 section 8.12.1.
function GetOwnProperty(obj, p) {
+ if (%IsJSProxy(obj)) {
+ var handler = %GetHandler(obj);
+ var getOwnProperty = handler.getOwnPropertyDescriptor;
+ if (IS_UNDEFINED(getOwnProperty)) {
+ throw MakeTypeError("handler_trap_missing",
+ [handler, "getOwnPropertyDescriptor"]);
+ }
+ var descriptor = %_CallFunction(handler, p, getOwnProperty);
+ if (IS_UNDEFINED(descriptor)) return descriptor;
+ var desc = ToCompletePropertyDescriptor(descriptor);
+ if (!desc.isConfigurable()) {
+ throw MakeTypeError("proxy_prop_not_configurable",
+ [handler, "getOwnPropertyDescriptor", p, descriptor]);
+ }
+ return desc;
+ }
+
// GetOwnProperty returns an array indexed by the constants
// defined in macros.py.
// If p is not a property on obj undefined is returned.
@@ -564,8 +632,33 @@
}
+// Harmony proxies.
+function DefineProxyProperty(obj, p, attributes, should_throw) {
+ var handler = %GetHandler(obj);
+ var defineProperty = handler.defineProperty;
+ if (IS_UNDEFINED(defineProperty)) {
+ throw MakeTypeError("handler_trap_missing", [handler, "defineProperty"]);
+ }
+ var result = %_CallFunction(handler, p, attributes, defineProperty);
+ if (!ToBoolean(result)) {
+ if (should_throw) {
+ throw MakeTypeError("handler_returned_false",
+ [handler, "defineProperty"]);
+ } else {
+ return false;
+ }
+ }
+ return true;
+}
+
+
// ES5 8.12.9.
function DefineOwnProperty(obj, p, desc, should_throw) {
+ if (%IsJSProxy(obj)) {
+ var attributes = FromGenericPropertyDescriptor(desc);
+ return DefineProxyProperty(obj, p, attributes, should_throw);
+ }
+
var current_or_access = %GetOwnProperty(ToObject(obj), ToString(p));
// A false value here means that access checks failed.
if (current_or_access === false) return void 0;
@@ -577,7 +670,7 @@
// Step 3
if (IS_UNDEFINED(current) && !extensible) {
if (should_throw) {
- throw MakeTypeError("define_disallowed", ["defineProperty"]);
+ throw MakeTypeError("define_disallowed", [p]);
} else {
return;
}
@@ -607,7 +700,7 @@
(desc.hasEnumerable() &&
desc.isEnumerable() != current.isEnumerable())) {
if (should_throw) {
- throw MakeTypeError("redefine_disallowed", ["defineProperty"]);
+ throw MakeTypeError("redefine_disallowed", [p]);
} else {
return;
}
@@ -617,7 +710,7 @@
// Step 9a
if (IsDataDescriptor(current) != IsDataDescriptor(desc)) {
if (should_throw) {
- throw MakeTypeError("redefine_disallowed", ["defineProperty"]);
+ throw MakeTypeError("redefine_disallowed", [p]);
} else {
return;
}
@@ -626,7 +719,7 @@
if (IsDataDescriptor(current) && IsDataDescriptor(desc)) {
if (!current.isWritable() && desc.isWritable()) {
if (should_throw) {
- throw MakeTypeError("redefine_disallowed", ["defineProperty"]);
+ throw MakeTypeError("redefine_disallowed", [p]);
} else {
return;
}
@@ -634,7 +727,7 @@
if (!current.isWritable() && desc.hasValue() &&
!SameValue(desc.getValue(), current.getValue())) {
if (should_throw) {
- throw MakeTypeError("redefine_disallowed", ["defineProperty"]);
+ throw MakeTypeError("redefine_disallowed", [p]);
} else {
return;
}
@@ -644,14 +737,14 @@
if (IsAccessorDescriptor(desc) && IsAccessorDescriptor(current)) {
if (desc.hasSetter() && !SameValue(desc.getSet(), current.getSet())) {
if (should_throw) {
- throw MakeTypeError("redefine_disallowed", ["defineProperty"]);
+ throw MakeTypeError("redefine_disallowed", [p]);
} else {
return;
}
}
if (desc.hasGetter() && !SameValue(desc.getGet(),current.getGet())) {
if (should_throw) {
- throw MakeTypeError("redefine_disallowed", ["defineProperty"]);
+ throw MakeTypeError("redefine_disallowed", [p]);
} else {
return;
}
@@ -733,24 +826,57 @@
function ObjectGetPrototypeOf(obj) {
if (!IS_SPEC_OBJECT(obj))
throw MakeTypeError("obj_ctor_property_non_object", ["getPrototypeOf"]);
- return obj.__proto__;
+ return %GetPrototype(obj);
}
// ES5 section 15.2.3.3
function ObjectGetOwnPropertyDescriptor(obj, p) {
if (!IS_SPEC_OBJECT(obj))
- throw MakeTypeError("obj_ctor_property_non_object", ["getOwnPropertyDescriptor"]);
+ throw MakeTypeError("obj_ctor_property_non_object",
+ ["getOwnPropertyDescriptor"]);
var desc = GetOwnProperty(obj, p);
return FromPropertyDescriptor(desc);
}
+// For Harmony proxies
+function ToStringArray(obj, trap) {
+ if (!IS_SPEC_OBJECT(obj)) {
+ throw MakeTypeError("proxy_non_object_prop_names", [obj, trap]);
+ }
+ var n = ToUint32(obj.length);
+ var array = new $Array(n);
+ var names = {}
+ for (var index = 0; index < n; index++) {
+ var s = ToString(obj[index]);
+ if (s in names) {
+ throw MakeTypeError("proxy_repeated_prop_name", [obj, trap, s])
+ }
+ array[index] = s;
+ names.s = 0;
+ }
+ return array;
+}
+
+
// ES5 section 15.2.3.4.
function ObjectGetOwnPropertyNames(obj) {
if (!IS_SPEC_OBJECT(obj))
throw MakeTypeError("obj_ctor_property_non_object", ["getOwnPropertyNames"]);
+ // Special handling for proxies.
+ if (%IsJSProxy(obj)) {
+ var handler = %GetHandler(obj);
+ var getOwnPropertyNames = handler.getOwnPropertyNames;
+ if (IS_UNDEFINED(getOwnPropertyNames)) {
+ throw MakeTypeError("handler_trap_missing",
+ [handler, "getOwnPropertyNames"]);
+ }
+ var names = %_CallFunction(handler, getOwnPropertyNames);
+ return ToStringArray(names, "getOwnPropertyNames");
+ }
+
// Find all the indexed properties.
// Get the local element names.
@@ -816,8 +942,37 @@
throw MakeTypeError("obj_ctor_property_non_object", ["defineProperty"]);
}
var name = ToString(p);
- var desc = ToPropertyDescriptor(attributes);
- DefineOwnProperty(obj, name, desc, true);
+ if (%IsJSProxy(obj)) {
+ // Clone the attributes object for protection.
+ // TODO(rossberg): not spec'ed yet, so not sure if this should involve
+ // non-own properties as it does (or non-enumerable ones, as it doesn't?).
+ var attributesClone = {}
+ for (var a in attributes) {
+ attributesClone[a] = attributes[a];
+ }
+ DefineProxyProperty(obj, name, attributesClone, true);
+ // The following would implement the spec as in the current proposal,
+ // but after recent comments on es-discuss, is most likely obsolete.
+ /*
+ var defineObj = FromGenericPropertyDescriptor(desc);
+ var names = ObjectGetOwnPropertyNames(attributes);
+ var standardNames =
+ {value: 0, writable: 0, get: 0, set: 0, enumerable: 0, configurable: 0};
+ for (var i = 0; i < names.length; i++) {
+ var N = names[i];
+ if (!(%HasLocalProperty(standardNames, N))) {
+ var attr = GetOwnProperty(attributes, N);
+ DefineOwnProperty(descObj, N, attr, true);
+ }
+ }
+ // This is really confusing the types, but it is what the proxies spec
+ // currently requires:
+ desc = descObj;
+ */
+ } else {
+ var desc = ToPropertyDescriptor(attributes);
+ DefineOwnProperty(obj, name, desc, true);
+ }
return obj;
}
@@ -845,19 +1000,41 @@
}
+// Harmony proxies.
+function ProxyFix(obj) {
+ var handler = %GetHandler(obj);
+ var fix = handler.fix;
+ if (IS_UNDEFINED(fix)) {
+ throw MakeTypeError("handler_trap_missing", [handler, "fix"]);
+ }
+ var props = %_CallFunction(handler, fix);
+ if (IS_UNDEFINED(props)) {
+ throw MakeTypeError("handler_returned_undefined", [handler, "fix"]);
+ }
+ %Fix(obj);
+ ObjectDefineProperties(obj, props);
+}
+
+
// ES5 section 15.2.3.8.
function ObjectSeal(obj) {
if (!IS_SPEC_OBJECT(obj)) {
throw MakeTypeError("obj_ctor_property_non_object", ["seal"]);
}
+ if (%IsJSProxy(obj)) {
+ ProxyFix(obj);
+ }
var names = ObjectGetOwnPropertyNames(obj);
for (var i = 0; i < names.length; i++) {
var name = names[i];
var desc = GetOwnProperty(obj, name);
- if (desc.isConfigurable()) desc.setConfigurable(false);
- DefineOwnProperty(obj, name, desc, true);
+ if (desc.isConfigurable()) {
+ desc.setConfigurable(false);
+ DefineOwnProperty(obj, name, desc, true);
+ }
}
- return ObjectPreventExtension(obj);
+ %PreventExtensions(obj);
+ return obj;
}
@@ -866,15 +1043,21 @@
if (!IS_SPEC_OBJECT(obj)) {
throw MakeTypeError("obj_ctor_property_non_object", ["freeze"]);
}
+ if (%IsJSProxy(obj)) {
+ ProxyFix(obj);
+ }
var names = ObjectGetOwnPropertyNames(obj);
for (var i = 0; i < names.length; i++) {
var name = names[i];
var desc = GetOwnProperty(obj, name);
- if (IsDataDescriptor(desc)) desc.setWritable(false);
- if (desc.isConfigurable()) desc.setConfigurable(false);
- DefineOwnProperty(obj, name, desc, true);
+ if (desc.isWritable() || desc.isConfigurable()) {
+ if (IsDataDescriptor(desc)) desc.setWritable(false);
+ desc.setConfigurable(false);
+ DefineOwnProperty(obj, name, desc, true);
+ }
}
- return ObjectPreventExtension(obj);
+ %PreventExtensions(obj);
+ return obj;
}
@@ -883,6 +1066,9 @@
if (!IS_SPEC_OBJECT(obj)) {
throw MakeTypeError("obj_ctor_property_non_object", ["preventExtension"]);
}
+ if (%IsJSProxy(obj)) {
+ ProxyFix(obj);
+ }
%PreventExtensions(obj);
return obj;
}
@@ -893,6 +1079,9 @@
if (!IS_SPEC_OBJECT(obj)) {
throw MakeTypeError("obj_ctor_property_non_object", ["isSealed"]);
}
+ if (%IsJSProxy(obj)) {
+ return false;
+ }
var names = ObjectGetOwnPropertyNames(obj);
for (var i = 0; i < names.length; i++) {
var name = names[i];
@@ -911,6 +1100,9 @@
if (!IS_SPEC_OBJECT(obj)) {
throw MakeTypeError("obj_ctor_property_non_object", ["isFrozen"]);
}
+ if (%IsJSProxy(obj)) {
+ return false;
+ }
var names = ObjectGetOwnPropertyNames(obj);
for (var i = 0; i < names.length; i++) {
var name = names[i];
@@ -928,7 +1120,10 @@
// ES5 section 15.2.3.13
function ObjectIsExtensible(obj) {
if (!IS_SPEC_OBJECT(obj)) {
- throw MakeTypeError("obj_ctor_property_non_object", ["preventExtension"]);
+ throw MakeTypeError("obj_ctor_property_non_object", ["isExtensible"]);
+ }
+ if (%IsJSProxy(obj)) {
+ return true;
}
return %IsExtensible(obj);
}
@@ -1199,7 +1394,9 @@
}
}
- var name = %FunctionGetName(func);
+ var name = %FunctionNameShouldPrintAsAnonymous(func)
+ ? 'anonymous'
+ : %FunctionGetName(func);
return 'function ' + name + source;
}
@@ -1270,7 +1467,8 @@
// Set the correct length.
var length = (this.length - argc_bound) > 0 ? this.length - argc_bound : 0;
%FunctionSetLength(result, length);
-
+ %FunctionRemovePrototype(result);
+ %FunctionSetBound(result);
return result;
}
@@ -1293,7 +1491,7 @@
// The call to SetNewFunctionAttributes will ensure the prototype
// property of the resulting function is enumerable (ECMA262, 15.3.5.2).
var f = %CompileString(source)();
- %FunctionSetName(f, "anonymous");
+ %FunctionMarkNameShouldPrintAsAnonymous(f);
return %SetNewFunctionAttributes(f);
}
diff --git a/src/v8threads.cc b/src/v8threads.cc
index 26169b5..c9a8bb6 100644
--- a/src/v8threads.cc
+++ b/src/v8threads.cc
@@ -46,7 +46,7 @@
// current thread will be guaranteed to have the lock for a given isolate.
Locker::Locker(v8::Isolate* isolate)
: has_lock_(false),
- top_level_(false),
+ top_level_(true),
isolate_(reinterpret_cast<i::Isolate*>(isolate)) {
if (isolate_ == NULL) {
isolate_ = i::Isolate::GetDefaultIsolateForLocking();
@@ -401,9 +401,10 @@
ContextSwitcher::ContextSwitcher(Isolate* isolate, int every_n_ms)
- : Thread(isolate, "v8:CtxtSwitcher"),
+ : Thread("v8:CtxtSwitcher"),
keep_going_(true),
- sleep_ms_(every_n_ms) {
+ sleep_ms_(every_n_ms),
+ isolate_(isolate) {
}
@@ -411,7 +412,7 @@
// ContextSwitcher thread if needed.
void ContextSwitcher::StartPreemption(int every_n_ms) {
Isolate* isolate = Isolate::Current();
- ASSERT(Locker::IsLocked());
+ ASSERT(Locker::IsLocked(reinterpret_cast<v8::Isolate*>(isolate)));
if (isolate->context_switcher() == NULL) {
// If the ContextSwitcher thread is not running at the moment start it now.
isolate->set_context_switcher(new ContextSwitcher(isolate, every_n_ms));
@@ -428,7 +429,7 @@
// must cooperatively schedule amongst them from this point on.
void ContextSwitcher::StopPreemption() {
Isolate* isolate = Isolate::Current();
- ASSERT(Locker::IsLocked());
+ ASSERT(Locker::IsLocked(reinterpret_cast<v8::Isolate*>(isolate)));
if (isolate->context_switcher() != NULL) {
// The ContextSwitcher thread is running. We need to stop it and release
// its resources.
diff --git a/src/v8threads.h b/src/v8threads.h
index d8a923e..3ba823a 100644
--- a/src/v8threads.h
+++ b/src/v8threads.h
@@ -152,12 +152,15 @@
static void PreemptionReceived();
private:
- explicit ContextSwitcher(Isolate* isolate, int every_n_ms);
+ ContextSwitcher(Isolate* isolate, int every_n_ms);
+
+ Isolate* isolate() const { return isolate_; }
void Run();
bool keep_going_;
int sleep_ms_;
+ Isolate* isolate_;
};
} } // namespace v8::internal
diff --git a/src/v8utils.cc b/src/v8utils.cc
new file mode 100644
index 0000000..bf0e05d
--- /dev/null
+++ b/src/v8utils.cc
@@ -0,0 +1,360 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <stdarg.h>
+
+#include "v8.h"
+
+#include "platform.h"
+
+#include "sys/stat.h"
+
+namespace v8 {
+namespace internal {
+
+
+void PrintF(const char* format, ...) {
+ va_list arguments;
+ va_start(arguments, format);
+ OS::VPrint(format, arguments);
+ va_end(arguments);
+}
+
+
+void PrintF(FILE* out, const char* format, ...) {
+ va_list arguments;
+ va_start(arguments, format);
+ OS::VFPrint(out, format, arguments);
+ va_end(arguments);
+}
+
+
+void Flush(FILE* out) {
+ fflush(out);
+}
+
+
+char* ReadLine(const char* prompt) {
+ char* result = NULL;
+ char line_buf[256];
+ int offset = 0;
+ bool keep_going = true;
+ fprintf(stdout, "%s", prompt);
+ fflush(stdout);
+ while (keep_going) {
+ if (fgets(line_buf, sizeof(line_buf), stdin) == NULL) {
+ // fgets got an error. Just give up.
+ if (result != NULL) {
+ DeleteArray(result);
+ }
+ return NULL;
+ }
+ int len = StrLength(line_buf);
+ if (len > 1 &&
+ line_buf[len - 2] == '\\' &&
+ line_buf[len - 1] == '\n') {
+ // When we read a line that ends with a "\" we remove the escape and
+ // append the remainder.
+ line_buf[len - 2] = '\n';
+ line_buf[len - 1] = 0;
+ len -= 1;
+ } else if ((len > 0) && (line_buf[len - 1] == '\n')) {
+ // Since we read a new line we are done reading the line. This
+ // will exit the loop after copying this buffer into the result.
+ keep_going = false;
+ }
+ if (result == NULL) {
+ // Allocate the initial result and make room for the terminating '\0'
+ result = NewArray<char>(len + 1);
+ } else {
+ // Allocate a new result with enough room for the new addition.
+ int new_len = offset + len + 1;
+ char* new_result = NewArray<char>(new_len);
+ // Copy the existing input into the new array and set the new
+ // array as the result.
+ memcpy(new_result, result, offset * kCharSize);
+ DeleteArray(result);
+ result = new_result;
+ }
+ // Copy the newly read line into the result.
+ memcpy(result + offset, line_buf, len * kCharSize);
+ offset += len;
+ }
+ ASSERT(result != NULL);
+ result[offset] = '\0';
+ return result;
+}
+
+
+char* ReadCharsFromFile(FILE* file,
+ int* size,
+ int extra_space,
+ bool verbose,
+ const char* filename) {
+ if (file == NULL || fseek(file, 0, SEEK_END) != 0) {
+ if (verbose) {
+ OS::PrintError("Cannot read from file %s.\n", filename);
+ }
+ return NULL;
+ }
+
+ // Get the size of the file and rewind it.
+ *size = ftell(file);
+ rewind(file);
+
+ char* result = NewArray<char>(*size + extra_space);
+ for (int i = 0; i < *size && feof(file) == 0;) {
+ int read = static_cast<int>(fread(&result[i], 1, *size - i, file));
+ if (read != (*size - i) && ferror(file) != 0) {
+ fclose(file);
+ DeleteArray(result);
+ return NULL;
+ }
+ i += read;
+ }
+ return result;
+}
+
+
+char* ReadCharsFromFile(const char* filename,
+ int* size,
+ int extra_space,
+ bool verbose) {
+ FILE* file = OS::FOpen(filename, "rb");
+ char* result = ReadCharsFromFile(file, size, extra_space, verbose, filename);
+ if (file != NULL) fclose(file);
+ return result;
+}
+
+
+byte* ReadBytes(const char* filename, int* size, bool verbose) {
+ char* chars = ReadCharsFromFile(filename, size, 0, verbose);
+ return reinterpret_cast<byte*>(chars);
+}
+
+
+static Vector<const char> SetVectorContents(char* chars,
+ int size,
+ bool* exists) {
+ if (!chars) {
+ *exists = false;
+ return Vector<const char>::empty();
+ }
+ chars[size] = '\0';
+ *exists = true;
+ return Vector<const char>(chars, size);
+}
+
+
+Vector<const char> ReadFile(const char* filename,
+ bool* exists,
+ bool verbose) {
+ int size;
+ char* result = ReadCharsFromFile(filename, &size, 1, verbose);
+ return SetVectorContents(result, size, exists);
+}
+
+
+Vector<const char> ReadFile(FILE* file,
+ bool* exists,
+ bool verbose) {
+ int size;
+ char* result = ReadCharsFromFile(file, &size, 1, verbose, "");
+ return SetVectorContents(result, size, exists);
+}
+
+
+int WriteCharsToFile(const char* str, int size, FILE* f) {
+ int total = 0;
+ while (total < size) {
+ int write = static_cast<int>(fwrite(str, 1, size - total, f));
+ if (write == 0) {
+ return total;
+ }
+ total += write;
+ str += write;
+ }
+ return total;
+}
+
+
+int AppendChars(const char* filename,
+ const char* str,
+ int size,
+ bool verbose) {
+ FILE* f = OS::FOpen(filename, "ab");
+ if (f == NULL) {
+ if (verbose) {
+ OS::PrintError("Cannot open file %s for writing.\n", filename);
+ }
+ return 0;
+ }
+ int written = WriteCharsToFile(str, size, f);
+ fclose(f);
+ return written;
+}
+
+
+int WriteChars(const char* filename,
+ const char* str,
+ int size,
+ bool verbose) {
+ FILE* f = OS::FOpen(filename, "wb");
+ if (f == NULL) {
+ if (verbose) {
+ OS::PrintError("Cannot open file %s for writing.\n", filename);
+ }
+ return 0;
+ }
+ int written = WriteCharsToFile(str, size, f);
+ fclose(f);
+ return written;
+}
+
+
+int WriteBytes(const char* filename,
+ const byte* bytes,
+ int size,
+ bool verbose) {
+ const char* str = reinterpret_cast<const char*>(bytes);
+ return WriteChars(filename, str, size, verbose);
+}
+
+
+
+void StringBuilder::AddFormatted(const char* format, ...) {
+ va_list arguments;
+ va_start(arguments, format);
+ AddFormattedList(format, arguments);
+ va_end(arguments);
+}
+
+
+void StringBuilder::AddFormattedList(const char* format, va_list list) {
+ ASSERT(!is_finalized() && position_ < buffer_.length());
+ int n = OS::VSNPrintF(buffer_ + position_, format, list);
+ if (n < 0 || n >= (buffer_.length() - position_)) {
+ position_ = buffer_.length();
+ } else {
+ position_ += n;
+ }
+}
+
+
+MemoryMappedExternalResource::MemoryMappedExternalResource(const char* filename)
+ : filename_(NULL),
+ data_(NULL),
+ length_(0),
+ remove_file_on_cleanup_(false) {
+ Init(filename);
+}
+
+
+MemoryMappedExternalResource::
+ MemoryMappedExternalResource(const char* filename,
+ bool remove_file_on_cleanup)
+ : filename_(NULL),
+ data_(NULL),
+ length_(0),
+ remove_file_on_cleanup_(remove_file_on_cleanup) {
+ Init(filename);
+}
+
+
+MemoryMappedExternalResource::~MemoryMappedExternalResource() {
+ // Release the resources if we had successfully acquired them:
+ if (file_ != NULL) {
+ delete file_;
+ if (remove_file_on_cleanup_) {
+ OS::Remove(filename_);
+ }
+ DeleteArray<char>(filename_);
+ }
+}
+
+
+void MemoryMappedExternalResource::Init(const char* filename) {
+ file_ = OS::MemoryMappedFile::open(filename);
+ if (file_ != NULL) {
+ filename_ = StrDup(filename);
+ data_ = reinterpret_cast<char*>(file_->memory());
+ length_ = file_->size();
+ }
+}
+
+
+bool MemoryMappedExternalResource::EnsureIsAscii(bool abort_if_failed) const {
+ bool is_ascii = true;
+
+ int line_no = 1;
+ const char* start_of_line = data_;
+ const char* end = data_ + length_;
+ for (const char* p = data_; p < end; p++) {
+ char c = *p;
+ if ((c & 0x80) != 0) {
+ // Non-ascii detected:
+ is_ascii = false;
+
+ // Report the error and abort if appropriate:
+ if (abort_if_failed) {
+ int char_no = static_cast<int>(p - start_of_line) - 1;
+
+ ASSERT(filename_ != NULL);
+ PrintF("\n\n\n"
+ "Abort: Non-Ascii character 0x%.2x in file %s line %d char %d",
+ c, filename_, line_no, char_no);
+
+ // Allow for some context up to kNumberOfLeadingContextChars chars
+ // before the offending non-ascii char to help the user see where
+ // the offending char is.
+ const int kNumberOfLeadingContextChars = 10;
+ const char* err_context = p - kNumberOfLeadingContextChars;
+ if (err_context < data_) {
+ err_context = data_;
+ }
+ // Compute the length of the error context and print it.
+ int err_context_length = static_cast<int>(p - err_context);
+ if (err_context_length != 0) {
+ PrintF(" after \"%.*s\"", err_context_length, err_context);
+ }
+ PrintF(".\n\n\n");
+ OS::Abort();
+ }
+
+ break; // Non-ascii detected. No need to continue scanning.
+ }
+ if (c == '\n') {
+ start_of_line = p;
+ line_no++;
+ }
+ }
+
+ return is_ascii;
+}
+
+
+} } // namespace v8::internal
diff --git a/src/v8utils.h b/src/v8utils.h
index 93fc1fd..aada521 100644
--- a/src/v8utils.h
+++ b/src/v8utils.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -188,73 +188,11 @@
Vector<const char> ReadFile(const char* filename,
bool* exists,
bool verbose = true);
+Vector<const char> ReadFile(FILE* file,
+ bool* exists,
+ bool verbose = true);
-// Helper class for building result strings in a character buffer. The
-// purpose of the class is to use safe operations that checks the
-// buffer bounds on all operations in debug mode.
-class StringBuilder {
- public:
- // Create a string builder with a buffer of the given size. The
- // buffer is allocated through NewArray<char> and must be
- // deallocated by the caller of Finalize().
- explicit StringBuilder(int size);
-
- StringBuilder(char* buffer, int size)
- : buffer_(buffer, size), position_(0) { }
-
- ~StringBuilder() { if (!is_finalized()) Finalize(); }
-
- int size() const { return buffer_.length(); }
-
- // Get the current position in the builder.
- int position() const {
- ASSERT(!is_finalized());
- return position_;
- }
-
- // Reset the position.
- void Reset() { position_ = 0; }
-
- // Add a single character to the builder. It is not allowed to add
- // 0-characters; use the Finalize() method to terminate the string
- // instead.
- void AddCharacter(char c) {
- ASSERT(c != '\0');
- ASSERT(!is_finalized() && position_ < buffer_.length());
- buffer_[position_++] = c;
- }
-
- // Add an entire string to the builder. Uses strlen() internally to
- // compute the length of the input string.
- void AddString(const char* s);
-
- // Add the first 'n' characters of the given string 's' to the
- // builder. The input string must have enough characters.
- void AddSubstring(const char* s, int n);
-
- // Add formatted contents to the builder just like printf().
- void AddFormatted(const char* format, ...);
-
- // Add formatted contents like printf based on a va_list.
- void AddFormattedList(const char* format, va_list list);
-
- // Add character padding to the builder. If count is non-positive,
- // nothing is added to the builder.
- void AddPadding(char c, int count);
-
- // Finalize the string by 0-terminating it and returning the buffer.
- char* Finalize();
-
- private:
- Vector<char> buffer_;
- int position_;
-
- bool is_finalized() const { return position_ < 0; }
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(StringBuilder);
-};
-
// Copy from ASCII/16bit chars to ASCII/16bit chars.
template <typename sourcechar, typename sinkchar>
@@ -313,6 +251,19 @@
bool remove_file_on_cleanup_;
};
+class StringBuilder : public SimpleStringBuilder {
+ public:
+ explicit StringBuilder(int size) : SimpleStringBuilder(size) { }
+ StringBuilder(char* buffer, int size) : SimpleStringBuilder(buffer, size) { }
+
+ // Add formatted contents to the builder just like printf().
+ void AddFormatted(const char* format, ...);
+
+ // Add formatted contents like printf based on a va_list.
+ void AddFormattedList(const char* format, va_list list);
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(StringBuilder);
+};
} } // namespace v8::internal
diff --git a/src/variables.cc b/src/variables.cc
index 0502722..67150ea 100644
--- a/src/variables.cc
+++ b/src/variables.cc
@@ -57,32 +57,26 @@
}
-Slot* Variable::AsSlot() const {
- return rewrite_ == NULL ? NULL : rewrite_->AsSlot();
-}
+Slot* Variable::AsSlot() const { return rewrite_; }
bool Variable::IsStackAllocated() const {
- Slot* slot = AsSlot();
- return slot != NULL && slot->IsStackAllocated();
+ return rewrite_ != NULL && rewrite_->IsStackAllocated();
}
bool Variable::IsParameter() const {
- Slot* s = AsSlot();
- return s != NULL && s->type() == Slot::PARAMETER;
+ return rewrite_ != NULL && rewrite_->type() == Slot::PARAMETER;
}
bool Variable::IsStackLocal() const {
- Slot* s = AsSlot();
- return s != NULL && s->type() == Slot::LOCAL;
+ return rewrite_ != NULL && rewrite_->type() == Slot::LOCAL;
}
bool Variable::IsContextSlot() const {
- Slot* s = AsSlot();
- return s != NULL && s->type() == Slot::CONTEXT;
+ return rewrite_ != NULL && rewrite_->type() == Slot::CONTEXT;
}
diff --git a/src/variables.h b/src/variables.h
index b1ff0db..a9c06d1 100644
--- a/src/variables.h
+++ b/src/variables.h
@@ -81,7 +81,7 @@
// Printing support
static const char* Mode2String(Mode mode);
- // Type testing & conversion
+ // Type testing & conversion. Global variables are not slots.
Property* AsProperty() const;
Slot* AsSlot() const;
@@ -138,8 +138,8 @@
local_if_not_shadowed_ = local;
}
- Expression* rewrite() const { return rewrite_; }
- void set_rewrite(Expression* expr) { rewrite_ = expr; }
+ Slot* rewrite() const { return rewrite_; }
+ void set_rewrite(Slot* slot) { rewrite_ = slot; }
private:
Scope* scope_;
@@ -150,8 +150,7 @@
Variable* local_if_not_shadowed_;
// Code generation.
- // rewrite_ is usually a Slot or a Property, but may be any expression.
- Expression* rewrite_;
+ Slot* rewrite_;
// Valid as a LHS? (const and this are not valid LHS, for example)
bool is_valid_LHS_;
diff --git a/src/version.cc b/src/version.cc
index 34549fa..fcf6f88 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -33,9 +33,9 @@
// NOTE these macros are used by the SCons build script so their names
// cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 3
-#define MINOR_VERSION 3
-#define BUILD_NUMBER 10
-#define PATCH_LEVEL 39
+#define MINOR_VERSION 4
+#define BUILD_NUMBER 14
+#define PATCH_LEVEL 35
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
#define IS_CANDIDATE_VERSION 0
diff --git a/src/vm-state-inl.h b/src/vm-state-inl.h
index 1f363de..c647e56 100644
--- a/src/vm-state-inl.h
+++ b/src/vm-state-inl.h
@@ -39,7 +39,6 @@
// logger and partially threaded through the call stack. States are pushed by
// VMState construction and popped by destruction.
//
-#ifdef ENABLE_VMSTATE_TRACKING
inline const char* StateToString(StateTag state) {
switch (state) {
case JS:
@@ -61,32 +60,16 @@
VMState::VMState(Isolate* isolate, StateTag tag)
: isolate_(isolate), previous_tag_(isolate->current_vm_state()) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
if (FLAG_log_state_changes) {
LOG(isolate, UncheckedStringEvent("Entering", StateToString(tag)));
LOG(isolate, UncheckedStringEvent("From", StateToString(previous_tag_)));
}
-#endif
isolate_->SetCurrentVMState(tag);
-
-#ifdef ENABLE_HEAP_PROTECTION
- if (FLAG_protect_heap) {
- if (tag == EXTERNAL) {
- // We are leaving V8.
- ASSERT(previous_tag_ != EXTERNAL);
- isolate_->heap()->Protect();
- } else if (previous_tag_ = EXTERNAL) {
- // We are entering V8.
- isolate_->heap()->Unprotect();
- }
- }
-#endif
}
VMState::~VMState() {
-#ifdef ENABLE_LOGGING_AND_PROFILING
if (FLAG_log_state_changes) {
LOG(isolate_,
UncheckedStringEvent("Leaving",
@@ -94,32 +77,10 @@
LOG(isolate_,
UncheckedStringEvent("To", StateToString(previous_tag_)));
}
-#endif // ENABLE_LOGGING_AND_PROFILING
-
-#ifdef ENABLE_HEAP_PROTECTION
- StateTag tag = isolate_->current_vm_state();
-#endif
isolate_->SetCurrentVMState(previous_tag_);
-
-#ifdef ENABLE_HEAP_PROTECTION
- if (FLAG_protect_heap) {
- if (tag == EXTERNAL) {
- // We are reentering V8.
- ASSERT(previous_tag_ != EXTERNAL);
- isolate_->heap()->Unprotect();
- } else if (previous_tag_ == EXTERNAL) {
- // We are leaving V8.
- isolate_->heap()->Protect();
- }
- }
-#endif // ENABLE_HEAP_PROTECTION
}
-#endif // ENABLE_VMSTATE_TRACKING
-
-
-#ifdef ENABLE_LOGGING_AND_PROFILING
ExternalCallbackScope::ExternalCallbackScope(Isolate* isolate, Address callback)
: isolate_(isolate), previous_callback_(isolate->external_callback()) {
@@ -130,8 +91,6 @@
isolate_->set_external_callback(previous_callback_);
}
-#endif // ENABLE_LOGGING_AND_PROFILING
-
} } // namespace v8::internal
diff --git a/src/vm-state.h b/src/vm-state.h
index 2062340..831e2d3 100644
--- a/src/vm-state.h
+++ b/src/vm-state.h
@@ -35,7 +35,6 @@
namespace internal {
class VMState BASE_EMBEDDED {
-#ifdef ENABLE_VMSTATE_TRACKING
public:
inline VMState(Isolate* isolate, StateTag tag);
inline ~VMState();
@@ -43,26 +42,16 @@
private:
Isolate* isolate_;
StateTag previous_tag_;
-
-#else
- public:
- VMState(Isolate* isolate, StateTag state) {}
-#endif
};
class ExternalCallbackScope BASE_EMBEDDED {
-#ifdef ENABLE_LOGGING_AND_PROFILING
public:
inline ExternalCallbackScope(Isolate* isolate, Address callback);
inline ~ExternalCallbackScope();
private:
Isolate* isolate_;
Address previous_callback_;
-#else
- public:
- ExternalCallbackScope(Isolate* isolate, Address callback) {}
-#endif
};
} } // namespace v8::internal
diff --git a/src/x64/assembler-x64.h b/src/x64/assembler-x64.h
index 7769b03..c23eb16 100644
--- a/src/x64/assembler-x64.h
+++ b/src/x64/assembler-x64.h
@@ -125,7 +125,7 @@
return names[index];
}
- static Register toRegister(int code) {
+ static Register from_code(int code) {
Register r = { code };
return r;
}
@@ -640,6 +640,7 @@
void push_imm32(int32_t imm32);
void push(Register src);
void push(const Operand& src);
+ void push(Handle<Object> handle);
void pop(Register dst);
void pop(const Operand& dst);
@@ -1167,7 +1168,7 @@
// Call near relative 32-bit displacement, relative to next instruction.
void call(Label* L);
void call(Handle<Code> target,
- RelocInfo::Mode rmode,
+ RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
unsigned ast_id = kNoASTId);
// Calls directly to the given address using a relative offset.
@@ -1349,7 +1350,9 @@
void Print();
// Check the code size generated from label to here.
- int SizeOfCodeGeneratedSince(Label* l) { return pc_offset() - l->pos(); }
+ int SizeOfCodeGeneratedSince(Label* label) {
+ return pc_offset() - label->pos();
+ }
// Mark address of the ExitJSFrame code.
void RecordJSReturn();
diff --git a/src/x64/builtins-x64.cc b/src/x64/builtins-x64.cc
index fc4581c..0763989 100644
--- a/src/x64/builtins-x64.cc
+++ b/src/x64/builtins-x64.cc
@@ -362,8 +362,9 @@
__ JumpIfSmi(rax, &use_receiver);
// If the type of the result (stored in its map) is less than
- // FIRST_JS_OBJECT_TYPE, it is not an object in the ECMA sense.
- __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx);
+ // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
+ STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+ __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rcx);
__ j(above_equal, &exit);
// Throw away the result of the constructor invocation and use the
@@ -675,8 +676,8 @@
// Do not transform the receiver for natives.
// SharedFunctionInfo is already loaded into rbx.
- __ testb(FieldOperand(rbx, SharedFunctionInfo::kES5NativeByteOffset),
- Immediate(1 << SharedFunctionInfo::kES5NativeBitWithinByte));
+ __ testb(FieldOperand(rbx, SharedFunctionInfo::kNativeByteOffset),
+ Immediate(1 << SharedFunctionInfo::kNativeBitWithinByte));
__ j(not_zero, &shift_arguments);
// Compute the receiver in non-strict mode.
@@ -688,9 +689,8 @@
__ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
__ j(equal, &use_global_receiver);
- STATIC_ASSERT(LAST_JS_OBJECT_TYPE + 1 == LAST_TYPE);
- STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
- __ CmpObjectType(rbx, FIRST_JS_OBJECT_TYPE, rcx);
+ STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+ __ CmpObjectType(rbx, FIRST_SPEC_OBJECT_TYPE, rcx);
__ j(above_equal, &shift_arguments);
__ bind(&convert_to_object);
@@ -847,9 +847,9 @@
__ j(not_equal, &push_receiver);
// Do not transform the receiver for natives.
- __ testb(FieldOperand(rdx, SharedFunctionInfo::kES5NativeByteOffset),
- Immediate(1 << SharedFunctionInfo::kES5NativeBitWithinByte));
- __ j(not_zero, &push_receiver);
+ __ testb(FieldOperand(rdx, SharedFunctionInfo::kNativeByteOffset),
+ Immediate(1 << SharedFunctionInfo::kNativeBitWithinByte));
+ __ j(not_equal, &push_receiver);
// Compute the receiver in non-strict mode.
__ JumpIfSmi(rbx, &call_to_object, Label::kNear);
@@ -860,9 +860,8 @@
// If given receiver is already a JavaScript object then there's no
// reason for converting it.
- STATIC_ASSERT(LAST_JS_OBJECT_TYPE + 1 == LAST_TYPE);
- STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
- __ CmpObjectType(rbx, FIRST_JS_OBJECT_TYPE, rcx);
+ STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+ __ CmpObjectType(rbx, FIRST_SPEC_OBJECT_TYPE, rcx);
__ j(above_equal, &push_receiver);
// Convert the receiver to an object.
diff --git a/src/x64/code-stubs-x64.cc b/src/x64/code-stubs-x64.cc
index 7075e66..1a6efcb 100644
--- a/src/x64/code-stubs-x64.cc
+++ b/src/x64/code-stubs-x64.cc
@@ -125,18 +125,17 @@
__ movq(rcx, Operand(rsp, 1 * kPointerSize));
// Setup the object header.
- __ LoadRoot(kScratchRegister, Heap::kContextMapRootIndex);
+ __ LoadRoot(kScratchRegister, Heap::kFunctionContextMapRootIndex);
__ movq(FieldOperand(rax, HeapObject::kMapOffset), kScratchRegister);
__ Move(FieldOperand(rax, FixedArray::kLengthOffset), Smi::FromInt(length));
// Setup the fixed slots.
__ Set(rbx, 0); // Set to NULL.
__ movq(Operand(rax, Context::SlotOffset(Context::CLOSURE_INDEX)), rcx);
- __ movq(Operand(rax, Context::SlotOffset(Context::FCONTEXT_INDEX)), rax);
- __ movq(Operand(rax, Context::SlotOffset(Context::PREVIOUS_INDEX)), rbx);
+ __ movq(Operand(rax, Context::SlotOffset(Context::PREVIOUS_INDEX)), rsi);
__ movq(Operand(rax, Context::SlotOffset(Context::EXTENSION_INDEX)), rbx);
- // Copy the global object from the surrounding context.
+ // Copy the global object from the previous context.
__ movq(rbx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
__ movq(Operand(rax, Context::SlotOffset(Context::GLOBAL_INDEX)), rbx);
@@ -152,7 +151,7 @@
// Need to collect. Call into runtime system.
__ bind(&gc);
- __ TailCallRuntime(Runtime::kNewContext, 1, 1);
+ __ TailCallRuntime(Runtime::kNewFunctionContext, 1, 1);
}
@@ -231,8 +230,11 @@
}
+// The stub returns zero for false, and a non-zero value for true.
void ToBooleanStub::Generate(MacroAssembler* masm) {
Label false_result, true_result, not_string;
+ const Register map = rdx;
+
__ movq(rax, Operand(rsp, 1 * kPointerSize));
// undefined -> false
@@ -248,51 +250,47 @@
// Smis: 0 -> false, all other -> true
__ Cmp(rax, Smi::FromInt(0));
__ j(equal, &false_result);
- Condition is_smi = __ CheckSmi(rax);
- __ j(is_smi, &true_result);
+ __ JumpIfSmi(rax, &true_result);
- // 'null' => false.
+ // 'null' -> false.
__ CompareRoot(rax, Heap::kNullValueRootIndex);
__ j(equal, &false_result, Label::kNear);
- // Get the map and type of the heap object.
- // We don't use CmpObjectType because we manipulate the type field.
- __ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset));
- __ movzxbq(rcx, FieldOperand(rdx, Map::kInstanceTypeOffset));
+ // Get the map of the heap object.
+ __ movq(map, FieldOperand(rax, HeapObject::kMapOffset));
- // Undetectable => false.
- __ movzxbq(rbx, FieldOperand(rdx, Map::kBitFieldOffset));
- __ and_(rbx, Immediate(1 << Map::kIsUndetectable));
+ // Undetectable -> false.
+ __ testb(FieldOperand(map, Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsUndetectable));
__ j(not_zero, &false_result, Label::kNear);
- // JavaScript object => true.
- __ cmpq(rcx, Immediate(FIRST_JS_OBJECT_TYPE));
+ // JavaScript object -> true.
+ __ CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
__ j(above_equal, &true_result, Label::kNear);
- // String value => false iff empty.
- __ cmpq(rcx, Immediate(FIRST_NONSTRING_TYPE));
+ // String value -> false iff empty.
+ __ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
__ j(above_equal, ¬_string, Label::kNear);
- __ movq(rdx, FieldOperand(rax, String::kLengthOffset));
- __ SmiTest(rdx);
+ __ cmpq(FieldOperand(rax, String::kLengthOffset), Immediate(0));
__ j(zero, &false_result, Label::kNear);
__ jmp(&true_result, Label::kNear);
__ bind(¬_string);
- __ CompareRoot(rdx, Heap::kHeapNumberMapRootIndex);
- __ j(not_equal, &true_result, Label::kNear);
- // HeapNumber => false iff +0, -0, or NaN.
+ // HeapNumber -> false iff +0, -0, or NaN.
// These three cases set the zero flag when compared to zero using ucomisd.
+ __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
+ __ j(not_equal, &true_result, Label::kNear);
__ xorps(xmm0, xmm0);
__ ucomisd(xmm0, FieldOperand(rax, HeapNumber::kValueOffset));
__ j(zero, &false_result, Label::kNear);
// Fall through to |true_result|.
- // Return 1/0 for true/false in rax.
+ // Return 1/0 for true/false in tos_.
__ bind(&true_result);
- __ Set(rax, 1);
+ __ Set(tos_, 1);
__ ret(1 * kPointerSize);
__ bind(&false_result);
- __ Set(rax, 0);
+ __ Set(tos_, 0);
__ ret(1 * kPointerSize);
}
@@ -406,12 +404,6 @@
}
-Handle<Code> GetUnaryOpStub(int key, UnaryOpIC::TypeInfo type_info) {
- UnaryOpStub stub(key, type_info);
- return stub.GetCode();
-}
-
-
void UnaryOpStub::Generate(MacroAssembler* masm) {
switch (operand_type_) {
case UnaryOpIC::UNINITIALIZED:
@@ -432,12 +424,10 @@
void UnaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
__ pop(rcx); // Save return address.
- __ push(rax);
- // Left and right arguments are now on top.
- // Push this stub's key. Although the operation and the type info are
- // encoded into the key, the encoding is opaque, so push them too.
- __ Push(Smi::FromInt(MinorKey()));
+
+ __ push(rax); // the operand
__ Push(Smi::FromInt(op_));
+ __ Push(Smi::FromInt(mode_));
__ Push(Smi::FromInt(operand_type_));
__ push(rcx); // Push return address.
@@ -445,10 +435,7 @@
// Patch the caller to an appropriate specialized stub and return the
// operation result to the caller of the stub.
__ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kUnaryOp_Patch),
- masm->isolate()),
- 4,
- 1);
+ ExternalReference(IC_Utility(IC::kUnaryOp_Patch), masm->isolate()), 4, 1);
}
@@ -655,33 +642,17 @@
}
-const char* UnaryOpStub::GetName() {
- if (name_ != NULL) return name_;
- const int kMaxNameLength = 100;
- name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(
- kMaxNameLength);
- if (name_ == NULL) return "OOM";
+void UnaryOpStub::PrintName(StringStream* stream) {
const char* op_name = Token::Name(op_);
const char* overwrite_name = NULL; // Make g++ happy.
switch (mode_) {
case UNARY_NO_OVERWRITE: overwrite_name = "Alloc"; break;
case UNARY_OVERWRITE: overwrite_name = "Overwrite"; break;
}
-
- OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
- "UnaryOpStub_%s_%s_%s",
- op_name,
- overwrite_name,
- UnaryOpIC::GetName(operand_type_));
- return name_;
-}
-
-
-Handle<Code> GetBinaryOpStub(int key,
- BinaryOpIC::TypeInfo type_info,
- BinaryOpIC::TypeInfo result_type_info) {
- BinaryOpStub stub(key, type_info, result_type_info);
- return stub.GetCode();
+ stream->Add("UnaryOpStub_%s_%s_%s",
+ op_name,
+ overwrite_name,
+ UnaryOpIC::GetName(operand_type_));
}
@@ -742,12 +713,7 @@
}
-const char* BinaryOpStub::GetName() {
- if (name_ != NULL) return name_;
- const int kMaxNameLength = 100;
- name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(
- kMaxNameLength);
- if (name_ == NULL) return "OOM";
+void BinaryOpStub::PrintName(StringStream* stream) {
const char* op_name = Token::Name(op_);
const char* overwrite_name;
switch (mode_) {
@@ -756,13 +722,10 @@
case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
default: overwrite_name = "UnknownOverwrite"; break;
}
-
- OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
- "BinaryOpStub_%s_%s_%s",
- op_name,
- overwrite_name,
- BinaryOpIC::GetName(operands_type_));
- return name_;
+ stream->Add("BinaryOpStub_%s_%s_%s",
+ op_name,
+ overwrite_name,
+ BinaryOpIC::GetName(operands_type_));
}
@@ -1959,49 +1922,288 @@
}
-void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
+void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
+ // Stack layout:
+ // rsp[0] : return address
+ // rsp[8] : number of parameters (tagged)
+ // rsp[16] : receiver displacement
+ // rsp[24] : function
+ // Registers used over the whole function:
+ // rbx: the mapped parameter count (untagged)
+ // rax: the allocated object (tagged).
+
+ Factory* factory = masm->isolate()->factory();
+
+ __ SmiToInteger64(rbx, Operand(rsp, 1 * kPointerSize));
+ // rbx = parameter count (untagged)
+
+ // Check if the calling frame is an arguments adaptor frame.
+ Label runtime;
+ Label adaptor_frame, try_allocate;
+ __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+ __ movq(rcx, Operand(rdx, StandardFrameConstants::kContextOffset));
+ __ Cmp(rcx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ __ j(equal, &adaptor_frame);
+
+ // No adaptor, parameter count = argument count.
+ __ movq(rcx, rbx);
+ __ jmp(&try_allocate, Label::kNear);
+
+ // We have an adaptor frame. Patch the parameters pointer.
+ __ bind(&adaptor_frame);
+ __ SmiToInteger64(rcx,
+ Operand(rdx,
+ ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ lea(rdx, Operand(rdx, rcx, times_pointer_size,
+ StandardFrameConstants::kCallerSPOffset));
+ __ movq(Operand(rsp, 2 * kPointerSize), rdx);
+
+ // rbx = parameter count (untagged)
+ // rcx = argument count (untagged)
+ // Compute the mapped parameter count = min(rbx, rcx) in rbx.
+ __ cmpq(rbx, rcx);
+ __ j(less_equal, &try_allocate, Label::kNear);
+ __ movq(rbx, rcx);
+
+ __ bind(&try_allocate);
+
+ // Compute the sizes of backing store, parameter map, and arguments object.
+ // 1. Parameter map, has 2 extra words containing context and backing store.
+ const int kParameterMapHeaderSize =
+ FixedArray::kHeaderSize + 2 * kPointerSize;
+ Label no_parameter_map;
+ __ testq(rbx, rbx);
+ __ j(zero, &no_parameter_map, Label::kNear);
+ __ lea(r8, Operand(rbx, times_pointer_size, kParameterMapHeaderSize));
+ __ bind(&no_parameter_map);
+
+ // 2. Backing store.
+ __ lea(r8, Operand(r8, rcx, times_pointer_size, FixedArray::kHeaderSize));
+
+ // 3. Arguments object.
+ __ addq(r8, Immediate(Heap::kArgumentsObjectSize));
+
+ // Do the allocation of all three objects in one go.
+ __ AllocateInNewSpace(r8, rax, rdx, rdi, &runtime, TAG_OBJECT);
+
+ // rax = address of new object(s) (tagged)
+ // rcx = argument count (untagged)
+ // Get the arguments boilerplate from the current (global) context into rdi.
+ Label has_mapped_parameters, copy;
+ __ movq(rdi, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ movq(rdi, FieldOperand(rdi, GlobalObject::kGlobalContextOffset));
+ __ testq(rbx, rbx);
+ __ j(not_zero, &has_mapped_parameters, Label::kNear);
+
+ const int kIndex = Context::ARGUMENTS_BOILERPLATE_INDEX;
+ __ movq(rdi, Operand(rdi, Context::SlotOffset(kIndex)));
+ __ jmp(©, Label::kNear);
+
+ const int kAliasedIndex = Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX;
+ __ bind(&has_mapped_parameters);
+ __ movq(rdi, Operand(rdi, Context::SlotOffset(kAliasedIndex)));
+ __ bind(©);
+
+ // rax = address of new object (tagged)
+ // rbx = mapped parameter count (untagged)
+ // rcx = argument count (untagged)
+ // rdi = address of boilerplate object (tagged)
+ // Copy the JS object part.
+ for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
+ __ movq(rdx, FieldOperand(rdi, i));
+ __ movq(FieldOperand(rax, i), rdx);
+ }
+
+ // Setup the callee in-object property.
+ STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
+ __ movq(rdx, Operand(rsp, 3 * kPointerSize));
+ __ movq(FieldOperand(rax, JSObject::kHeaderSize +
+ Heap::kArgumentsCalleeIndex * kPointerSize),
+ rdx);
+
+ // Use the length (smi tagged) and set that as an in-object property too.
+ // Note: rcx is tagged from here on.
+ STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
+ __ Integer32ToSmi(rcx, rcx);
+ __ movq(FieldOperand(rax, JSObject::kHeaderSize +
+ Heap::kArgumentsLengthIndex * kPointerSize),
+ rcx);
+
+ // Setup the elements pointer in the allocated arguments object.
+ // If we allocated a parameter map, edi will point there, otherwise to the
+ // backing store.
+ __ lea(rdi, Operand(rax, Heap::kArgumentsObjectSize));
+ __ movq(FieldOperand(rax, JSObject::kElementsOffset), rdi);
+
+ // rax = address of new object (tagged)
+ // rbx = mapped parameter count (untagged)
+ // rcx = argument count (tagged)
+ // rdi = address of parameter map or backing store (tagged)
+
+ // Initialize parameter map. If there are no mapped arguments, we're done.
+ Label skip_parameter_map;
+ __ testq(rbx, rbx);
+ __ j(zero, &skip_parameter_map);
+
+ __ LoadRoot(kScratchRegister, Heap::kNonStrictArgumentsElementsMapRootIndex);
+ // rbx contains the untagged argument count. Add 2 and tag to write.
+ __ movq(FieldOperand(rdi, FixedArray::kMapOffset), kScratchRegister);
+ __ Integer64PlusConstantToSmi(r9, rbx, 2);
+ __ movq(FieldOperand(rdi, FixedArray::kLengthOffset), r9);
+ __ movq(FieldOperand(rdi, FixedArray::kHeaderSize + 0 * kPointerSize), rsi);
+ __ lea(r9, Operand(rdi, rbx, times_pointer_size, kParameterMapHeaderSize));
+ __ movq(FieldOperand(rdi, FixedArray::kHeaderSize + 1 * kPointerSize), r9);
+
+ // Copy the parameter slots and the holes in the arguments.
+ // We need to fill in mapped_parameter_count slots. They index the context,
+ // where parameters are stored in reverse order, at
+ // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
+ // The mapped parameter thus need to get indices
+ // MIN_CONTEXT_SLOTS+parameter_count-1 ..
+ // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
+ // We loop from right to left.
+ Label parameters_loop, parameters_test;
+
+ // Load tagged parameter count into r9.
+ __ movq(r9, Operand(rsp, 1 * kPointerSize));
+ __ Move(r8, Smi::FromInt(Context::MIN_CONTEXT_SLOTS));
+ __ addq(r8, Operand(rsp, 3 * kPointerSize));
+ __ subq(r8, r9);
+ __ Move(r11, factory->the_hole_value());
+ __ movq(rdx, rdi);
+ __ SmiToInteger64(kScratchRegister, r9);
+ __ lea(rdi, Operand(rdi, kScratchRegister,
+ times_pointer_size,
+ kParameterMapHeaderSize));
+ // r9 = loop variable (tagged)
+ // r8 = mapping index (tagged)
+ // r11 = the hole value
+ // rdx = address of parameter map (tagged)
+ // rdi = address of backing store (tagged)
+ __ jmp(¶meters_test, Label::kNear);
+
+ __ bind(¶meters_loop);
+ __ SmiSubConstant(r9, r9, Smi::FromInt(1));
+ __ SmiToInteger64(kScratchRegister, r9);
+ __ movq(FieldOperand(rdx, kScratchRegister,
+ times_pointer_size,
+ kParameterMapHeaderSize),
+ r8);
+ __ movq(FieldOperand(rdi, kScratchRegister,
+ times_pointer_size,
+ FixedArray::kHeaderSize),
+ r11);
+ __ SmiAddConstant(r8, r8, Smi::FromInt(1));
+ __ bind(¶meters_test);
+ __ SmiTest(r9);
+ __ j(not_zero, ¶meters_loop, Label::kNear);
+
+ __ bind(&skip_parameter_map);
+
+ // rcx = argument count (tagged)
+ // rdi = address of backing store (tagged)
+ // Copy arguments header and remaining slots (if there are any).
+ __ Move(FieldOperand(rdi, FixedArray::kMapOffset),
+ factory->fixed_array_map());
+ __ movq(FieldOperand(rdi, FixedArray::kLengthOffset), rcx);
+
+ Label arguments_loop, arguments_test;
+ __ movq(r8, rbx);
+ __ movq(rdx, Operand(rsp, 2 * kPointerSize));
+ // Untag rcx and r8 for the loop below.
+ __ SmiToInteger64(rcx, rcx);
+ __ SmiToInteger64(r8, r8);
+ __ lea(kScratchRegister, Operand(r8, times_pointer_size, 0));
+ __ subq(rdx, kScratchRegister);
+ __ jmp(&arguments_test, Label::kNear);
+
+ __ bind(&arguments_loop);
+ __ subq(rdx, Immediate(kPointerSize));
+ __ movq(r9, Operand(rdx, 0));
+ __ movq(FieldOperand(rdi, r8,
+ times_pointer_size,
+ FixedArray::kHeaderSize),
+ r9);
+ __ addq(r8, Immediate(1));
+
+ __ bind(&arguments_test);
+ __ cmpq(r8, rcx);
+ __ j(less, &arguments_loop, Label::kNear);
+
+ // Return and remove the on-stack parameters.
+ __ ret(3 * kPointerSize);
+
+ // Do the runtime call to allocate the arguments object.
+ // rcx = argument count (untagged)
+ __ bind(&runtime);
+ __ Integer32ToSmi(rcx, rcx);
+ __ movq(Operand(rsp, 1 * kPointerSize), rcx); // Patch argument count.
+ __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1);
+}
+
+
+void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) {
+ // esp[0] : return address
+ // esp[8] : number of parameters
+ // esp[16] : receiver displacement
+ // esp[24] : function
+
+ // Check if the calling frame is an arguments adaptor frame.
+ Label runtime;
+ __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+ __ movq(rcx, Operand(rdx, StandardFrameConstants::kContextOffset));
+ __ Cmp(rcx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ __ j(not_equal, &runtime);
+
+ // Patch the arguments.length and the parameters pointer.
+ __ movq(rcx, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ movq(Operand(rsp, 1 * kPointerSize), rcx);
+ __ SmiToInteger64(rcx, rcx);
+ __ lea(rdx, Operand(rdx, rcx, times_pointer_size,
+ StandardFrameConstants::kCallerSPOffset));
+ __ movq(Operand(rsp, 2 * kPointerSize), rdx);
+
+ __ bind(&runtime);
+ __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
+}
+
+
+void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// rsp[0] : return address
// rsp[8] : number of parameters
// rsp[16] : receiver displacement
// rsp[24] : function
- // The displacement is used for skipping the return address and the
- // frame pointer on the stack. It is the offset of the last
- // parameter (if any) relative to the frame pointer.
- static const int kDisplacement = 2 * kPointerSize;
-
// Check if the calling frame is an arguments adaptor frame.
Label adaptor_frame, try_allocate, runtime;
__ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
- __ Cmp(Operand(rdx, StandardFrameConstants::kContextOffset),
- Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ __ movq(rcx, Operand(rdx, StandardFrameConstants::kContextOffset));
+ __ Cmp(rcx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
__ j(equal, &adaptor_frame);
// Get the length from the frame.
- __ SmiToInteger32(rcx, Operand(rsp, 1 * kPointerSize));
+ __ movq(rcx, Operand(rsp, 1 * kPointerSize));
+ __ SmiToInteger64(rcx, rcx);
__ jmp(&try_allocate);
// Patch the arguments.length and the parameters pointer.
__ bind(&adaptor_frame);
- __ SmiToInteger32(rcx,
- Operand(rdx,
- ArgumentsAdaptorFrameConstants::kLengthOffset));
- // Space on stack must already hold a smi.
- __ Integer32ToSmiField(Operand(rsp, 1 * kPointerSize), rcx);
- // Do not clobber the length index for the indexing operation since
- // it is used compute the size for allocation later.
- __ lea(rdx, Operand(rdx, rcx, times_pointer_size, kDisplacement));
+ __ movq(rcx, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ movq(Operand(rsp, 1 * kPointerSize), rcx);
+ __ SmiToInteger64(rcx, rcx);
+ __ lea(rdx, Operand(rdx, rcx, times_pointer_size,
+ StandardFrameConstants::kCallerSPOffset));
__ movq(Operand(rsp, 2 * kPointerSize), rdx);
// Try the new space allocation. Start out with computing the size of
// the arguments object and the elements array.
Label add_arguments_object;
__ bind(&try_allocate);
- __ testl(rcx, rcx);
- __ j(zero, &add_arguments_object);
- __ leal(rcx, Operand(rcx, times_pointer_size, FixedArray::kHeaderSize));
+ __ testq(rcx, rcx);
+ __ j(zero, &add_arguments_object, Label::kNear);
+ __ lea(rcx, Operand(rcx, times_pointer_size, FixedArray::kHeaderSize));
__ bind(&add_arguments_object);
- __ addl(rcx, Immediate(GetArgumentsObjectSize()));
+ __ addq(rcx, Immediate(Heap::kArgumentsObjectSizeStrict));
// Do the allocation of both objects in one go.
__ AllocateInNewSpace(rcx, rax, rdx, rbx, &runtime, TAG_OBJECT);
@@ -2009,59 +2211,51 @@
// Get the arguments boilerplate from the current (global) context.
__ movq(rdi, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
__ movq(rdi, FieldOperand(rdi, GlobalObject::kGlobalContextOffset));
- __ movq(rdi, Operand(rdi,
- Context::SlotOffset(GetArgumentsBoilerplateIndex())));
+ const int offset =
+ Context::SlotOffset(Context::STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX);
+ __ movq(rdi, Operand(rdi, offset));
// Copy the JS object part.
- STATIC_ASSERT(JSObject::kHeaderSize == 3 * kPointerSize);
- __ movq(kScratchRegister, FieldOperand(rdi, 0 * kPointerSize));
- __ movq(rdx, FieldOperand(rdi, 1 * kPointerSize));
- __ movq(rbx, FieldOperand(rdi, 2 * kPointerSize));
- __ movq(FieldOperand(rax, 0 * kPointerSize), kScratchRegister);
- __ movq(FieldOperand(rax, 1 * kPointerSize), rdx);
- __ movq(FieldOperand(rax, 2 * kPointerSize), rbx);
-
- if (type_ == NEW_NON_STRICT) {
- // Setup the callee in-object property.
- ASSERT(Heap::kArgumentsCalleeIndex == 1);
- __ movq(kScratchRegister, Operand(rsp, 3 * kPointerSize));
- __ movq(FieldOperand(rax, JSObject::kHeaderSize +
- Heap::kArgumentsCalleeIndex * kPointerSize),
- kScratchRegister);
+ for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
+ __ movq(rbx, FieldOperand(rdi, i));
+ __ movq(FieldOperand(rax, i), rbx);
}
// Get the length (smi tagged) and set that as an in-object property too.
- ASSERT(Heap::kArgumentsLengthIndex == 0);
+ STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
__ movq(rcx, Operand(rsp, 1 * kPointerSize));
__ movq(FieldOperand(rax, JSObject::kHeaderSize +
- Heap::kArgumentsLengthIndex * kPointerSize),
+ Heap::kArgumentsLengthIndex * kPointerSize),
rcx);
// If there are no actual arguments, we're done.
Label done;
- __ SmiTest(rcx);
+ __ testq(rcx, rcx);
__ j(zero, &done);
- // Get the parameters pointer from the stack and untag the length.
+ // Get the parameters pointer from the stack.
__ movq(rdx, Operand(rsp, 2 * kPointerSize));
// Setup the elements pointer in the allocated arguments object and
// initialize the header in the elements fixed array.
- __ lea(rdi, Operand(rax, GetArgumentsObjectSize()));
+ __ lea(rdi, Operand(rax, Heap::kArgumentsObjectSizeStrict));
__ movq(FieldOperand(rax, JSObject::kElementsOffset), rdi);
__ LoadRoot(kScratchRegister, Heap::kFixedArrayMapRootIndex);
__ movq(FieldOperand(rdi, FixedArray::kMapOffset), kScratchRegister);
+
+
__ movq(FieldOperand(rdi, FixedArray::kLengthOffset), rcx);
- __ SmiToInteger32(rcx, rcx); // Untag length for the loop below.
+ // Untag the length for the loop below.
+ __ SmiToInteger64(rcx, rcx);
// Copy the fixed array slots.
Label loop;
__ bind(&loop);
- __ movq(kScratchRegister, Operand(rdx, -1 * kPointerSize)); // Skip receiver.
- __ movq(FieldOperand(rdi, FixedArray::kHeaderSize), kScratchRegister);
+ __ movq(rbx, Operand(rdx, -1 * kPointerSize)); // Skip receiver.
+ __ movq(FieldOperand(rdi, FixedArray::kHeaderSize), rbx);
__ addq(rdi, Immediate(kPointerSize));
__ subq(rdx, Immediate(kPointerSize));
- __ decl(rcx);
+ __ decq(rcx);
__ j(not_zero, &loop);
// Return and remove the on-stack parameters.
@@ -2070,7 +2264,7 @@
// Do the runtime call to allocate the arguments object.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
+ __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1);
}
@@ -2335,6 +2529,7 @@
#else
// Already there in AMD64 calling convention.
ASSERT(arg1.is(rdi));
+ USE(arg1);
#endif
// Locate the code entry and call it.
@@ -2711,8 +2906,8 @@
factory->heap_number_map());
__ j(equal, &heap_number, Label::kNear);
if (cc_ != equal) {
- // Call runtime on identical JSObjects. Otherwise return equal.
- __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx);
+ // Call runtime on identical objects. Otherwise return equal.
+ __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rcx);
__ j(above_equal, ¬_identical, Label::kNear);
}
__ Set(rax, EQUAL);
@@ -2768,9 +2963,9 @@
// There is no test for undetectability in strict equality.
// If the first object is a JS object, we have done pointer comparison.
- STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+ STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
Label first_non_object;
- __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx);
+ __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rcx);
__ j(below, &first_non_object, Label::kNear);
// Return non-zero (eax (not rax) is not zero)
Label return_not_equal;
@@ -2783,7 +2978,7 @@
__ CmpInstanceType(rcx, ODDBALL_TYPE);
__ j(equal, &return_not_equal);
- __ CmpObjectType(rdx, FIRST_JS_OBJECT_TYPE, rcx);
+ __ CmpObjectType(rdx, FIRST_SPEC_OBJECT_TYPE, rcx);
__ j(above_equal, &return_not_equal);
// Check for oddballs: true, false, null, undefined.
@@ -2879,9 +3074,9 @@
__ lea(rcx, Operand(rax, rdx, times_1, 0));
__ testb(rcx, Immediate(kSmiTagMask));
__ j(not_zero, ¬_both_objects, Label::kNear);
- __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rbx);
+ __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rbx);
__ j(below, ¬_both_objects, Label::kNear);
- __ CmpObjectType(rdx, FIRST_JS_OBJECT_TYPE, rcx);
+ __ CmpObjectType(rdx, FIRST_SPEC_OBJECT_TYPE, rcx);
__ j(below, ¬_both_objects, Label::kNear);
__ testb(FieldOperand(rbx, Map::kBitFieldOffset),
Immediate(1 << Map::kIsUndetectable));
@@ -3004,6 +3199,7 @@
__ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION);
Handle<Code> adaptor =
Isolate::Current()->builtins()->ArgumentsAdaptorTrampoline();
+ __ SetCallKind(rcx, CALL_AS_METHOD);
__ Jump(adaptor, RelocInfo::CODE_TARGET);
}
@@ -3240,9 +3436,7 @@
void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
Label invoke, exit;
-#ifdef ENABLE_LOGGING_AND_PROFILING
Label not_outermost_js, not_outermost_js_2;
-#endif
{ // NOLINT. Scope block confuses linter.
MacroAssembler::NoRootArrayScope uninitialized_root_register(masm);
// Setup frame.
@@ -3287,7 +3481,6 @@
__ push(c_entry_fp_operand);
}
-#ifdef ENABLE_LOGGING_AND_PROFILING
// If this is the outermost JS call, set js_entry_sp value.
ExternalReference js_entry_sp(Isolate::k_js_entry_sp_address, isolate);
__ Load(rax, js_entry_sp);
@@ -3301,7 +3494,6 @@
__ bind(¬_outermost_js);
__ Push(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME));
__ bind(&cont);
-#endif
// Call a faked try-block that does the invoke.
__ call(&invoke);
@@ -3345,7 +3537,6 @@
__ PopTryHandler();
__ bind(&exit);
-#ifdef ENABLE_LOGGING_AND_PROFILING
// Check if the current stack frame is marked as the outermost JS frame.
__ pop(rbx);
__ Cmp(rbx, Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME));
@@ -3353,7 +3544,6 @@
__ movq(kScratchRegister, js_entry_sp);
__ movq(Operand(kScratchRegister, 0), Immediate(0));
__ bind(¬_outermost_js_2);
-#endif
// Restore the top frame descriptor from the stack.
{ Operand c_entry_fp_operand = masm->ExternalOperand(c_entry_fp);
@@ -3417,9 +3607,9 @@
__ JumpIfSmi(rax, &slow);
// Check that the left hand is a JS object. Leave its map in rax.
- __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rax);
+ __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rax);
__ j(below, &slow);
- __ CmpInstanceType(rax, LAST_JS_OBJECT_TYPE);
+ __ CmpInstanceType(rax, LAST_SPEC_OBJECT_TYPE);
__ j(above, &slow);
// Get the prototype of the function.
@@ -3444,9 +3634,9 @@
// Check that the function prototype is a JS object.
__ JumpIfSmi(rbx, &slow);
- __ CmpObjectType(rbx, FIRST_JS_OBJECT_TYPE, kScratchRegister);
+ __ CmpObjectType(rbx, FIRST_SPEC_OBJECT_TYPE, kScratchRegister);
__ j(below, &slow);
- __ CmpInstanceType(kScratchRegister, LAST_JS_OBJECT_TYPE);
+ __ CmpInstanceType(kScratchRegister, LAST_SPEC_OBJECT_TYPE);
__ j(above, &slow);
// Register mapping:
@@ -3562,15 +3752,8 @@
// Unfortunately you have to run without snapshots to see most of these
// names in the profile since most compare stubs end up in the snapshot.
-const char* CompareStub::GetName() {
+void CompareStub::PrintName(StringStream* stream) {
ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
-
- if (name_ != NULL) return name_;
- const int kMaxNameLength = 100;
- name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(
- kMaxNameLength);
- if (name_ == NULL) return "OOM";
-
const char* cc_name;
switch (cc_) {
case less: cc_name = "LT"; break;
@@ -3581,35 +3764,12 @@
case not_equal: cc_name = "NE"; break;
default: cc_name = "UnknownCondition"; break;
}
-
- const char* strict_name = "";
- if (strict_ && (cc_ == equal || cc_ == not_equal)) {
- strict_name = "_STRICT";
- }
-
- const char* never_nan_nan_name = "";
- if (never_nan_nan_ && (cc_ == equal || cc_ == not_equal)) {
- never_nan_nan_name = "_NO_NAN";
- }
-
- const char* include_number_compare_name = "";
- if (!include_number_compare_) {
- include_number_compare_name = "_NO_NUMBER";
- }
-
- const char* include_smi_compare_name = "";
- if (!include_smi_compare_) {
- include_smi_compare_name = "_NO_SMI";
- }
-
- OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
- "CompareStub_%s%s%s%s",
- cc_name,
- strict_name,
- never_nan_nan_name,
- include_number_compare_name,
- include_smi_compare_name);
- return name_;
+ bool is_equality = cc_ == equal || cc_ == not_equal;
+ stream->Add("CompareStub_%s", cc_name);
+ if (strict_ && is_equality) stream->Add("_STRICT");
+ if (never_nan_nan_ && is_equality) stream->Add("_NO_NAN");
+ if (!include_number_compare_) stream->Add("_NO_NUMBER");
+ if (!include_smi_compare_) stream->Add("_NO_SMI");
}
@@ -3815,15 +3975,12 @@
// Make sure that both arguments are strings if not known in advance.
if (flags_ == NO_STRING_ADD_FLAGS) {
- Condition is_smi;
- is_smi = masm->CheckSmi(rax);
- __ j(is_smi, &string_add_runtime);
+ __ JumpIfSmi(rax, &string_add_runtime);
__ CmpObjectType(rax, FIRST_NONSTRING_TYPE, r8);
__ j(above_equal, &string_add_runtime);
// First argument is a a string, test second.
- is_smi = masm->CheckSmi(rdx);
- __ j(is_smi, &string_add_runtime);
+ __ JumpIfSmi(rdx, &string_add_runtime);
__ CmpObjectType(rdx, FIRST_NONSTRING_TYPE, r9);
__ j(above_equal, &string_add_runtime);
} else {
diff --git a/src/x64/code-stubs-x64.h b/src/x64/code-stubs-x64.h
index 2774403..4058118 100644
--- a/src/x64/code-stubs-x64.h
+++ b/src/x64/code-stubs-x64.h
@@ -59,34 +59,14 @@
};
-class ToBooleanStub: public CodeStub {
- public:
- ToBooleanStub() { }
-
- void Generate(MacroAssembler* masm);
-
- private:
- Major MajorKey() { return ToBoolean; }
- int MinorKey() { return 0; }
-};
-
-
class UnaryOpStub: public CodeStub {
public:
- UnaryOpStub(Token::Value op, UnaryOverwriteMode mode)
+ UnaryOpStub(Token::Value op,
+ UnaryOverwriteMode mode,
+ UnaryOpIC::TypeInfo operand_type = UnaryOpIC::UNINITIALIZED)
: op_(op),
mode_(mode),
- operand_type_(UnaryOpIC::UNINITIALIZED),
- name_(NULL) {
- }
-
- UnaryOpStub(
- int key,
- UnaryOpIC::TypeInfo operand_type)
- : op_(OpBits::decode(key)),
- mode_(ModeBits::decode(key)),
- operand_type_(operand_type),
- name_(NULL) {
+ operand_type_(operand_type) {
}
private:
@@ -96,20 +76,7 @@
// Operand type information determined at runtime.
UnaryOpIC::TypeInfo operand_type_;
- char* name_;
-
- const char* GetName();
-
-#ifdef DEBUG
- void Print() {
- PrintF("UnaryOpStub %d (op %s), "
- "(mode %d, runtime_type_info %s)\n",
- MinorKey(),
- Token::String(op_),
- static_cast<int>(mode_),
- UnaryOpIC::GetName(operand_type_));
- }
-#endif
+ virtual void PrintName(StringStream* stream);
class ModeBits: public BitField<UnaryOverwriteMode, 0, 1> {};
class OpBits: public BitField<Token::Value, 1, 7> {};
@@ -169,8 +136,7 @@
: op_(op),
mode_(mode),
operands_type_(BinaryOpIC::UNINITIALIZED),
- result_type_(BinaryOpIC::UNINITIALIZED),
- name_(NULL) {
+ result_type_(BinaryOpIC::UNINITIALIZED) {
ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
}
@@ -181,8 +147,7 @@
: op_(OpBits::decode(key)),
mode_(ModeBits::decode(key)),
operands_type_(operands_type),
- result_type_(result_type),
- name_(NULL) { }
+ result_type_(result_type) { }
private:
enum SmiCodeGenerateHeapNumberResults {
@@ -197,20 +162,7 @@
BinaryOpIC::TypeInfo operands_type_;
BinaryOpIC::TypeInfo result_type_;
- char* name_;
-
- const char* GetName();
-
-#ifdef DEBUG
- void Print() {
- PrintF("BinaryOpStub %d (op %s), "
- "(mode %d, runtime_type_info %s)\n",
- MinorKey(),
- Token::String(op_),
- static_cast<int>(mode_),
- BinaryOpIC::GetName(operands_type_));
- }
-#endif
+ virtual void PrintName(StringStream* stream);
// Minor key encoding in 15 bits RRRTTTOOOOOOOMM.
class ModeBits: public BitField<OverwriteMode, 0, 2> {};
@@ -430,14 +382,6 @@
int MinorKey() { return 0; }
void Generate(MacroAssembler* masm);
-
- const char* GetName() { return "NumberToStringStub"; }
-
-#ifdef DEBUG
- void Print() {
- PrintF("NumberToStringStub\n");
- }
-#endif
};
@@ -481,13 +425,6 @@
StringDictionary::kHeaderSize +
StringDictionary::kElementsStartIndex * kPointerSize;
-
-#ifdef DEBUG
- void Print() {
- PrintF("StringDictionaryLookupStub\n");
- }
-#endif
-
Major MajorKey() { return StringDictionaryNegativeLookup; }
int MinorKey() {
diff --git a/src/x64/codegen-x64.cc b/src/x64/codegen-x64.cc
index f8f2d6e..507bbd4 100644
--- a/src/x64/codegen-x64.cc
+++ b/src/x64/codegen-x64.cc
@@ -132,6 +132,7 @@
CodeDesc desc;
masm.GetCode(&desc);
+ OS::ProtectCode(buffer, actual_size);
// Call the function from C++ through this pointer.
return FUNCTION_CAST<ModuloFunction>(buffer);
}
diff --git a/src/x64/codegen-x64.h b/src/x64/codegen-x64.h
index 94c7850..a0648ce 100644
--- a/src/x64/codegen-x64.h
+++ b/src/x64/codegen-x64.h
@@ -58,9 +58,7 @@
// Print the code after compiling it.
static void PrintCode(Handle<Code> code, CompilationInfo* info);
-#ifdef ENABLE_LOGGING_AND_PROFILING
static bool ShouldGenerateLog(Expression* type);
-#endif
static bool RecordPositions(MacroAssembler* masm,
int pos,
diff --git a/src/x64/cpu-x64.cc b/src/x64/cpu-x64.cc
index e637ba1..ae5045f 100644
--- a/src/x64/cpu-x64.cc
+++ b/src/x64/cpu-x64.cc
@@ -67,7 +67,8 @@
// solution is to run valgrind with --smc-check=all, but this comes at a big
// performance cost. We can notify valgrind to invalidate its cache.
#ifdef VALGRIND_DISCARD_TRANSLATIONS
- VALGRIND_DISCARD_TRANSLATIONS(start, size);
+ unsigned res = VALGRIND_DISCARD_TRANSLATIONS(start, size);
+ USE(res);
#endif
}
diff --git a/src/x64/deoptimizer-x64.cc b/src/x64/deoptimizer-x64.cc
index abac2b6..2a31f28 100644
--- a/src/x64/deoptimizer-x64.cc
+++ b/src/x64/deoptimizer-x64.cc
@@ -128,7 +128,9 @@
Address instruction_start = function->code()->instruction_start();
Address jump_table_address =
instruction_start + function->code()->safepoint_table_offset();
+#ifdef DEBUG
Address previous_pc = instruction_start;
+#endif
SafepointTableDeoptimiztionEntryIterator deoptimizations(function->code());
Address entry_pc = NULL;
@@ -157,12 +159,16 @@
CodePatcher patcher(call_address, Assembler::kCallInstructionLength);
patcher.masm()->Call(GetDeoptimizationEntry(deoptimization_index, LAZY),
RelocInfo::NONE);
+#ifdef DEBUG
previous_pc = call_end_address;
+#endif
} else {
// Not room enough for a long Call instruction. Write a short call
// instruction to a long jump placed elsewhere in the code.
+#ifdef DEBUG
Address short_call_end_address =
call_address + MacroAssembler::kShortCallInstructionLength;
+#endif
ASSERT(next_pc >= short_call_end_address);
// Write jump in jump-table.
@@ -177,7 +183,9 @@
CodePatcher call_patcher(call_address,
MacroAssembler::kShortCallInstructionLength);
call_patcher.masm()->call(jump_table_address);
+#ifdef DEBUG
previous_pc = short_call_end_address;
+#endif
}
// Continue with next deoptimization entry.
@@ -316,7 +324,7 @@
USE(height_in_bytes);
unsigned fixed_size = ComputeFixedSize(function_);
- unsigned input_frame_size = static_cast<unsigned>(input_->GetFrameSize());
+ unsigned input_frame_size = input_->GetFrameSize();
ASSERT(fixed_size + height_in_bytes == input_frame_size);
unsigned stack_slot_size = optimized_code_->stack_slots() * kPointerSize;
@@ -340,6 +348,9 @@
output_ = new FrameDescription*[1];
output_[0] = new(output_frame_size) FrameDescription(
output_frame_size, function_);
+#ifdef DEBUG
+ output_[0]->SetKind(Code::OPTIMIZED_FUNCTION);
+#endif
// Clear the incoming parameters in the optimized frame to avoid
// confusing the garbage collector.
@@ -448,12 +459,15 @@
// The 'fixed' part of the frame consists of the incoming parameters and
// the part described by JavaScriptFrameConstants.
unsigned fixed_frame_size = ComputeFixedSize(function);
- unsigned input_frame_size = static_cast<unsigned>(input_->GetFrameSize());
+ unsigned input_frame_size = input_->GetFrameSize();
unsigned output_frame_size = height_in_bytes + fixed_frame_size;
// Allocate and store the output frame description.
FrameDescription* output_frame =
new(output_frame_size) FrameDescription(output_frame_size, function);
+#ifdef DEBUG
+ output_frame->SetKind(Code::FUNCTION);
+#endif
bool is_bottommost = (0 == frame_index);
bool is_topmost = (output_count_ - 1 == frame_index);
@@ -584,7 +598,7 @@
output_frame->SetState(Smi::FromInt(state));
// Set the continuation for the topmost frame.
- if (is_topmost) {
+ if (is_topmost && bailout_type_ != DEBUGGER) {
Code* continuation = (bailout_type_ == EAGER)
? isolate_->builtins()->builtin(Builtins::kNotifyDeoptimized)
: isolate_->builtins()->builtin(Builtins::kNotifyLazyDeoptimized);
@@ -596,6 +610,26 @@
}
+void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
+ // Set the register values. The values are not important as there are no
+ // callee saved registers in JavaScript frames, so all registers are
+ // spilled. Registers rbp and rsp are set to the correct values though.
+ for (int i = 0; i < Register::kNumRegisters; i++) {
+ input_->SetRegister(i, i * 4);
+ }
+ input_->SetRegister(rsp.code(), reinterpret_cast<intptr_t>(frame->sp()));
+ input_->SetRegister(rbp.code(), reinterpret_cast<intptr_t>(frame->fp()));
+ for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; i++) {
+ input_->SetDoubleRegister(i, 0.0);
+ }
+
+ // Fill the frame content from the actual data on the frame.
+ for (unsigned i = 0; i < input_->GetFrameSize(); i += kPointerSize) {
+ input_->SetFrameSlot(i, Memory::uint64_at(tos + i));
+ }
+}
+
+
#define __ masm()->
void Deoptimizer::EntryGenerator::Generate() {
@@ -617,7 +651,7 @@
// We push all registers onto the stack, even though we do not need
// to restore all later.
for (int i = 0; i < kNumberOfRegisters; i++) {
- Register r = Register::toRegister(i);
+ Register r = Register::from_code(i);
__ push(r);
}
@@ -775,12 +809,12 @@
// Restore the registers from the stack.
for (int i = kNumberOfRegisters - 1; i >= 0 ; i--) {
- Register r = Register::toRegister(i);
+ Register r = Register::from_code(i);
// Do not restore rsp, simply pop the value into the next register
// and overwrite this afterwards.
if (r.is(rsp)) {
ASSERT(i > 0);
- r = Register::toRegister(i - 1);
+ r = Register::from_code(i - 1);
}
__ pop(r);
}
diff --git a/src/x64/disasm-x64.cc b/src/x64/disasm-x64.cc
index 7bb2e61..14c95bc 100644
--- a/src/x64/disasm-x64.cc
+++ b/src/x64/disasm-x64.cc
@@ -1062,11 +1062,12 @@
AppendToBuffer("movq ");
current += PrintRightXMMOperand(current);
AppendToBuffer(", %s", NameOfXMMRegister(regop));
+ } else if (opcode == 0x50) {
+ AppendToBuffer("movmskpd %s,", NameOfCPURegister(regop));
+ current += PrintRightXMMOperand(current);
} else {
const char* mnemonic = "?";
- if (opcode == 0x50) {
- mnemonic = "movmskpd";
- } else if (opcode == 0x54) {
+ if (opcode == 0x54) {
mnemonic = "andpd";
} else if (opcode == 0x56) {
mnemonic = "orpd";
diff --git a/src/x64/full-codegen-x64.cc b/src/x64/full-codegen-x64.cc
index 57baf77..23c2bf8 100644
--- a/src/x64/full-codegen-x64.cc
+++ b/src/x64/full-codegen-x64.cc
@@ -78,16 +78,18 @@
}
void EmitPatchInfo() {
- int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(&patch_site_);
- ASSERT(is_int8(delta_to_patch_site));
- __ testl(rax, Immediate(delta_to_patch_site));
+ if (patch_site_.is_bound()) {
+ int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(&patch_site_);
+ ASSERT(is_int8(delta_to_patch_site));
+ __ testl(rax, Immediate(delta_to_patch_site));
#ifdef DEBUG
- info_emitted_ = true;
+ info_emitted_ = true;
#endif
+ } else {
+ __ nop(); // Signals no inlined code.
+ }
}
- bool is_bound() const { return patch_site_.is_bound(); }
-
private:
// jc will be patched with jz, jnc will become jnz.
void EmitJump(Condition cc, Label* target, Label::Distance near_jump) {
@@ -121,6 +123,7 @@
void FullCodeGenerator::Generate(CompilationInfo* info) {
ASSERT(info_ == NULL);
info_ = info;
+ scope_ = info->scope();
SetFunctionPosition(function());
Comment cmnt(masm_, "[ function compiled by full code generator");
@@ -131,16 +134,16 @@
}
#endif
- // Strict mode functions need to replace the receiver with undefined
- // when called as functions (without an explicit receiver
- // object). rcx is zero for method calls and non-zero for function
- // calls.
- if (info->is_strict_mode()) {
+ // Strict mode functions and builtins need to replace the receiver
+ // with undefined when called as functions (without an explicit
+ // receiver object). rcx is zero for method calls and non-zero for
+ // function calls.
+ if (info->is_strict_mode() || info->is_native()) {
Label ok;
__ testq(rcx, rcx);
__ j(zero, &ok, Label::kNear);
// +1 for return address.
- int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize;
+ int receiver_offset = (info->scope()->num_parameters() + 1) * kPointerSize;
__ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
__ movq(Operand(rsp, receiver_offset), kScratchRegister);
__ bind(&ok);
@@ -152,7 +155,7 @@
__ push(rdi); // Callee's JS Function.
{ Comment cmnt(masm_, "[ Allocate locals");
- int locals_count = scope()->num_stack_slots();
+ int locals_count = info->scope()->num_stack_slots();
if (locals_count == 1) {
__ PushRoot(Heap::kUndefinedValueRootIndex);
} else if (locals_count > 1) {
@@ -166,7 +169,7 @@
bool function_in_register = true;
// Possibly allocate a local context.
- int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+ int heap_slots = info->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
if (heap_slots > 0) {
Comment cmnt(masm_, "[ Allocate local context");
// Argument to NewContext is the function, which is still in rdi.
@@ -175,7 +178,7 @@
FastNewContextStub stub(heap_slots);
__ CallStub(&stub);
} else {
- __ CallRuntime(Runtime::kNewContext, 1);
+ __ CallRuntime(Runtime::kNewFunctionContext, 1);
}
function_in_register = false;
// Context is returned in both rax and rsi. It replaces the context
@@ -183,7 +186,7 @@
__ movq(Operand(rbp, StandardFrameConstants::kContextOffset), rsi);
// Copy any necessary parameters into the context.
- int num_parameters = scope()->num_parameters();
+ int num_parameters = info->scope()->num_parameters();
for (int i = 0; i < num_parameters; i++) {
Slot* slot = scope()->parameter(i)->AsSlot();
if (slot != NULL && slot->type() == Slot::CONTEXT) {
@@ -215,26 +218,21 @@
__ push(Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
}
// The receiver is just before the parameters on the caller's stack.
- int offset = scope()->num_parameters() * kPointerSize;
+ int num_parameters = info->scope()->num_parameters();
+ int offset = num_parameters * kPointerSize;
__ lea(rdx,
Operand(rbp, StandardFrameConstants::kCallerSPOffset + offset));
__ push(rdx);
- __ Push(Smi::FromInt(scope()->num_parameters()));
+ __ Push(Smi::FromInt(num_parameters));
// Arguments to ArgumentsAccessStub:
// function, receiver address, parameter count.
// The stub will rewrite receiver and parameter count if the previous
// stack frame was an arguments adapter frame.
ArgumentsAccessStub stub(
is_strict_mode() ? ArgumentsAccessStub::NEW_STRICT
- : ArgumentsAccessStub::NEW_NON_STRICT);
+ : ArgumentsAccessStub::NEW_NON_STRICT_SLOW);
__ CallStub(&stub);
- Variable* arguments_shadow = scope()->arguments_shadow();
- if (arguments_shadow != NULL) {
- // Store new arguments object in both "arguments" and ".arguments" slots.
- __ movq(rcx, rax);
- Move(arguments_shadow->AsSlot(), rcx, rbx, rdx);
- }
Move(arguments->AsSlot(), rax, rbx, rdx);
}
@@ -338,7 +336,7 @@
__ movq(rsp, rbp);
__ pop(rbp);
- int arguments_bytes = (scope()->num_parameters() + 1) * kPointerSize;
+ int arguments_bytes = (info_->scope()->num_parameters() + 1) * kPointerSize;
__ Ret(arguments_bytes, rcx);
#ifdef ENABLE_DEBUGGER_SUPPORT
@@ -377,7 +375,7 @@
void FullCodeGenerator::TestContext::Plug(Slot* slot) const {
codegen()->Move(result_register(), slot);
codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
- codegen()->DoTest(true_label_, false_label_, fall_through_);
+ codegen()->DoTest(this);
}
@@ -410,7 +408,7 @@
if (true_label_ != fall_through_) __ jmp(true_label_);
} else {
__ LoadRoot(result_register(), index);
- codegen()->DoTest(true_label_, false_label_, fall_through_);
+ codegen()->DoTest(this);
}
}
@@ -455,7 +453,7 @@
} else {
// For simplicity we always test the accumulator register.
__ Move(result_register(), lit);
- codegen()->DoTest(true_label_, false_label_, fall_through_);
+ codegen()->DoTest(this);
}
}
@@ -491,7 +489,7 @@
__ Drop(count);
__ Move(result_register(), reg);
codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
- codegen()->DoTest(true_label_, false_label_, fall_through_);
+ codegen()->DoTest(this);
}
@@ -566,13 +564,14 @@
}
-void FullCodeGenerator::DoTest(Label* if_true,
+void FullCodeGenerator::DoTest(Expression* condition,
+ Label* if_true,
Label* if_false,
Label* fall_through) {
- ToBooleanStub stub;
+ ToBooleanStub stub(result_register());
__ push(result_register());
__ CallStub(&stub);
- __ testq(rax, rax);
+ __ testq(result_register(), result_register());
// The stub returns nonzero for true.
Split(not_zero, if_true, if_false, fall_through);
}
@@ -685,13 +684,16 @@
// We bypass the general EmitSlotSearch because we know more about
// this specific context.
- // The variable in the decl always resides in the current context.
+ // The variable in the decl always resides in the current function
+ // context.
ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
if (FLAG_debug_code) {
- // Check if we have the correct context pointer.
- __ movq(rbx, ContextOperand(rsi, Context::FCONTEXT_INDEX));
- __ cmpq(rbx, rsi);
- __ Check(equal, "Unexpected declaration in current context.");
+ // Check that we're not inside a with or catch context.
+ __ movq(rbx, FieldOperand(rsi, HeapObject::kMapOffset));
+ __ CompareRoot(rbx, Heap::kWithContextMapRootIndex);
+ __ Check(not_equal, "Declaration in with context.");
+ __ CompareRoot(rbx, Heap::kCatchContextMapRootIndex);
+ __ Check(not_equal, "Declaration in catch context.");
}
if (mode == Variable::CONST) {
__ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
@@ -739,7 +741,7 @@
// IDs for bailouts from optimized code.
ASSERT(prop->obj()->AsVariableProxy() != NULL);
{ AccumulatorValueContext for_object(this);
- EmitVariableLoad(prop->obj()->AsVariableProxy()->var());
+ EmitVariableLoad(prop->obj()->AsVariableProxy());
}
__ push(rax);
VisitForAccumulatorValue(function);
@@ -751,7 +753,7 @@
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
: isolate()->builtins()->KeyedStoreIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, AstNode::kNoNumber);
+ __ call(ic);
}
}
}
@@ -824,7 +826,8 @@
// Record position before stub call for type feedback.
SetSourcePosition(clause->position());
Handle<Code> ic = CompareIC::GetUninitialized(Token::EQ_STRICT);
- EmitCallIC(ic, &patch_site, clause->CompareId());
+ __ call(ic, RelocInfo::CODE_TARGET, clause->CompareId());
+ patch_site.EmitPatchInfo();
__ testq(rax, rax);
__ j(not_equal, &next_test);
@@ -878,7 +881,7 @@
// Convert the object to a JS object.
Label convert, done_convert;
__ JumpIfSmi(rax, &convert);
- __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx);
+ __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rcx);
__ j(above_equal, &done_convert);
__ bind(&convert);
__ push(rax);
@@ -1068,7 +1071,7 @@
void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
Comment cmnt(masm_, "[ VariableProxy");
- EmitVariableLoad(expr->var());
+ EmitVariableLoad(expr);
}
@@ -1089,8 +1092,7 @@
__ j(not_equal, slow);
}
// Load next context in chain.
- __ movq(temp, ContextOperand(context, Context::CLOSURE_INDEX));
- __ movq(temp, FieldOperand(temp, JSFunction::kContextOffset));
+ __ movq(temp, ContextOperand(context, Context::PREVIOUS_INDEX));
// Walk the rest of the chain without clobbering rsi.
context = temp;
}
@@ -1118,8 +1120,7 @@
__ cmpq(ContextOperand(temp, Context::EXTENSION_INDEX), Immediate(0));
__ j(not_equal, slow);
// Load next context in chain.
- __ movq(temp, ContextOperand(temp, Context::CLOSURE_INDEX));
- __ movq(temp, FieldOperand(temp, JSFunction::kContextOffset));
+ __ movq(temp, ContextOperand(temp, Context::PREVIOUS_INDEX));
__ jmp(&next);
__ bind(&fast);
}
@@ -1132,7 +1133,7 @@
RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF)
? RelocInfo::CODE_TARGET
: RelocInfo::CODE_TARGET_CONTEXT;
- EmitCallIC(ic, mode, AstNode::kNoNumber);
+ __ call(ic, mode);
}
@@ -1151,8 +1152,7 @@
Immediate(0));
__ j(not_equal, slow);
}
- __ movq(temp, ContextOperand(context, Context::CLOSURE_INDEX));
- __ movq(temp, FieldOperand(temp, JSFunction::kContextOffset));
+ __ movq(temp, ContextOperand(context, Context::PREVIOUS_INDEX));
// Walk the rest of the chain without clobbering rsi.
context = temp;
}
@@ -1213,7 +1213,7 @@
__ Move(rax, key_literal->handle());
Handle<Code> ic =
isolate()->builtins()->KeyedLoadIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, GetPropertyId(property));
+ __ call(ic, RelocInfo::CODE_TARGET, GetPropertyId(property));
__ jmp(done);
}
}
@@ -1222,21 +1222,24 @@
}
-void FullCodeGenerator::EmitVariableLoad(Variable* var) {
- // Four cases: non-this global variables, lookup slots, all other
- // types of slots, and parameters that rewrite to explicit property
- // accesses on the arguments object.
- Slot* slot = var->AsSlot();
- Property* property = var->AsProperty();
+void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
+ // Record position before possible IC call.
+ SetSourcePosition(proxy->position());
+ Variable* var = proxy->var();
- if (var->is_global() && !var->is_this()) {
+ // Three cases: non-this global variables, lookup slots, and all other
+ // types of slots.
+ Slot* slot = var->AsSlot();
+ ASSERT((var->is_global() && !var->is_this()) == (slot == NULL));
+
+ if (slot == NULL) {
Comment cmnt(masm_, "Global variable");
// Use inline caching. Variable name is passed in rcx and the global
// object on the stack.
__ Move(rcx, var->name());
__ movq(rax, GlobalObjectOperand());
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT, AstNode::kNoNumber);
+ __ call(ic, RelocInfo::CODE_TARGET_CONTEXT);
context()->Plug(rax);
} else if (slot != NULL && slot->type() == Slot::LOOKUP) {
@@ -1255,7 +1258,7 @@
context()->Plug(rax);
- } else if (slot != NULL) {
+ } else {
Comment cmnt(masm_, (slot->type() == Slot::CONTEXT)
? "Context slot"
: "Stack slot");
@@ -1273,34 +1276,6 @@
} else {
context()->Plug(slot);
}
-
- } else {
- Comment cmnt(masm_, "Rewritten parameter");
- ASSERT_NOT_NULL(property);
- // Rewritten parameter accesses are of the form "slot[literal]".
-
- // Assert that the object is in a slot.
- Variable* object_var = property->obj()->AsVariableProxy()->AsVariable();
- ASSERT_NOT_NULL(object_var);
- Slot* object_slot = object_var->AsSlot();
- ASSERT_NOT_NULL(object_slot);
-
- // Load the object.
- MemOperand object_loc = EmitSlotSearch(object_slot, rax);
- __ movq(rdx, object_loc);
-
- // Assert that the key is a smi.
- Literal* key_literal = property->key()->AsLiteral();
- ASSERT_NOT_NULL(key_literal);
- ASSERT(key_literal->handle()->IsSmi());
-
- // Load the key.
- __ Move(rax, key_literal->handle());
-
- // Do a keyed property load.
- Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, GetPropertyId(property));
- context()->Plug(rax);
}
}
@@ -1412,7 +1387,7 @@
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->StoreIC_Initialize_Strict()
: isolate()->builtins()->StoreIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, key->id());
+ __ call(ic, RelocInfo::CODE_TARGET, key->id());
PrepareForBailoutForId(key->id(), NO_REGISTERS);
} else {
VisitForEffect(value);
@@ -1533,7 +1508,7 @@
}
// Left-hand side can only be a property, a global or a (parameter or local)
- // slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
+ // slot.
enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
LhsKind assign_type = VARIABLE;
Property* property = expr->target()->AsProperty();
@@ -1559,29 +1534,13 @@
break;
case KEYED_PROPERTY: {
if (expr->is_compound()) {
- if (property->is_arguments_access()) {
- VariableProxy* obj_proxy = property->obj()->AsVariableProxy();
- MemOperand slot_operand =
- EmitSlotSearch(obj_proxy->var()->AsSlot(), rcx);
- __ push(slot_operand);
- __ Move(rax, property->key()->AsLiteral()->handle());
- } else {
- VisitForStackValue(property->obj());
- VisitForAccumulatorValue(property->key());
- }
+ VisitForStackValue(property->obj());
+ VisitForAccumulatorValue(property->key());
__ movq(rdx, Operand(rsp, 0));
__ push(rax);
} else {
- if (property->is_arguments_access()) {
- VariableProxy* obj_proxy = property->obj()->AsVariableProxy();
- MemOperand slot_operand =
- EmitSlotSearch(obj_proxy->var()->AsSlot(), rcx);
- __ push(slot_operand);
- __ Push(property->key()->AsLiteral()->handle());
- } else {
- VisitForStackValue(property->obj());
- VisitForStackValue(property->key());
- }
+ VisitForStackValue(property->obj());
+ VisitForStackValue(property->key());
}
break;
}
@@ -1593,7 +1552,7 @@
{ AccumulatorValueContext context(this);
switch (assign_type) {
case VARIABLE:
- EmitVariableLoad(expr->target()->AsVariableProxy()->var());
+ EmitVariableLoad(expr->target()->AsVariableProxy());
PrepareForBailout(expr->target(), TOS_REG);
break;
case NAMED_PROPERTY:
@@ -1657,14 +1616,14 @@
Literal* key = prop->key()->AsLiteral();
__ Move(rcx, key->handle());
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
+ __ call(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
}
void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
+ __ call(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
}
@@ -1686,7 +1645,8 @@
__ bind(&stub_call);
__ movq(rax, rcx);
BinaryOpStub stub(op, mode);
- EmitCallIC(stub.GetCode(), &patch_site, expr->id());
+ __ call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
+ patch_site.EmitPatchInfo();
__ jmp(&done, Label::kNear);
__ bind(&smi_case);
@@ -1733,8 +1693,9 @@
OverwriteMode mode) {
__ pop(rdx);
BinaryOpStub stub(op, mode);
- // NULL signals no inlined smi code.
- EmitCallIC(stub.GetCode(), NULL, expr->id());
+ JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
+ __ call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
+ patch_site.EmitPatchInfo();
context()->Plug(rax);
}
@@ -1748,7 +1709,7 @@
}
// Left-hand side can only be a property, a global or a (parameter or local)
- // slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
+ // slot.
enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
LhsKind assign_type = VARIABLE;
Property* prop = expr->AsProperty();
@@ -1774,30 +1735,20 @@
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->StoreIC_Initialize_Strict()
: isolate()->builtins()->StoreIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, AstNode::kNoNumber);
+ __ call(ic);
break;
}
case KEYED_PROPERTY: {
__ push(rax); // Preserve value.
- if (prop->is_synthetic()) {
- ASSERT(prop->obj()->AsVariableProxy() != NULL);
- ASSERT(prop->key()->AsLiteral() != NULL);
- { AccumulatorValueContext for_object(this);
- EmitVariableLoad(prop->obj()->AsVariableProxy()->var());
- }
- __ movq(rdx, rax);
- __ Move(rcx, prop->key()->AsLiteral()->handle());
- } else {
- VisitForStackValue(prop->obj());
- VisitForAccumulatorValue(prop->key());
- __ movq(rcx, rax);
- __ pop(rdx);
- }
+ VisitForStackValue(prop->obj());
+ VisitForAccumulatorValue(prop->key());
+ __ movq(rcx, rax);
+ __ pop(rdx);
__ pop(rax); // Restore value.
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
: isolate()->builtins()->KeyedStoreIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, AstNode::kNoNumber);
+ __ call(ic);
break;
}
}
@@ -1808,8 +1759,6 @@
void FullCodeGenerator::EmitVariableAssignment(Variable* var,
Token::Value op) {
- // Left-hand sides that rewrite to explicit property accesses do not reach
- // here.
ASSERT(var != NULL);
ASSERT(var->is_global() || var->AsSlot() != NULL);
@@ -1823,7 +1772,7 @@
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->StoreIC_Initialize_Strict()
: isolate()->builtins()->StoreIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT, AstNode::kNoNumber);
+ __ call(ic, RelocInfo::CODE_TARGET_CONTEXT);
} else if (op == Token::INIT_CONST) {
// Like var declarations, const declarations are hoisted to function
@@ -1843,17 +1792,7 @@
__ j(not_equal, &skip);
__ movq(Operand(rbp, SlotOffset(slot)), rax);
break;
- case Slot::CONTEXT: {
- __ movq(rcx, ContextOperand(rsi, Context::FCONTEXT_INDEX));
- __ movq(rdx, ContextOperand(rcx, slot->index()));
- __ CompareRoot(rdx, Heap::kTheHoleValueRootIndex);
- __ j(not_equal, &skip);
- __ movq(ContextOperand(rcx, slot->index()), rax);
- int offset = Context::SlotOffset(slot->index());
- __ movq(rdx, rax); // Preserve the stored value in eax.
- __ RecordWrite(rcx, offset, rdx, rbx);
- break;
- }
+ case Slot::CONTEXT:
case Slot::LOOKUP:
__ push(rax);
__ push(rsi);
@@ -1926,7 +1865,7 @@
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->StoreIC_Initialize_Strict()
: isolate()->builtins()->StoreIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+ __ call(ic, RelocInfo::CODE_TARGET, expr->id());
// If the assignment ends an initialization block, revert to fast case.
if (expr->ends_initialization_block()) {
@@ -1966,7 +1905,7 @@
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
: isolate()->builtins()->KeyedStoreIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+ __ call(ic, RelocInfo::CODE_TARGET, expr->id());
// If the assignment ends an initialization block, revert to fast case.
if (expr->ends_initialization_block()) {
@@ -2018,7 +1957,7 @@
InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
Handle<Code> ic =
ISOLATE->stub_cache()->ComputeCallInitialize(arg_count, in_loop, mode);
- EmitCallIC(ic, mode, expr->id());
+ __ call(ic, mode, expr->id());
RecordJSReturnSite(expr);
// Restore context register.
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
@@ -2052,7 +1991,7 @@
Handle<Code> ic =
ISOLATE->stub_cache()->ComputeKeyedCallInitialize(arg_count, in_loop);
__ movq(rcx, Operand(rsp, (arg_count + 1) * kPointerSize)); // Key.
- EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+ __ call(ic, RelocInfo::CODE_TARGET, expr->id());
RecordJSReturnSite(expr);
// Restore context register.
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
@@ -2092,7 +2031,7 @@
}
// Push the receiver of the enclosing function and do runtime call.
- __ push(Operand(rbp, (2 + scope()->num_parameters()) * kPointerSize));
+ __ push(Operand(rbp, (2 + info_->scope()->num_parameters()) * kPointerSize));
// Push the strict mode flag.
__ Push(Smi::FromInt(strict_mode_flag()));
@@ -2206,9 +2145,9 @@
__ bind(&done);
// Push function.
__ push(rax);
- // Push global receiver.
- __ movq(rbx, GlobalObjectOperand());
- __ push(FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset));
+ // The receiver is implicitly the global receiver. Indicate this
+ // by passing the hole to the call function stub.
+ __ PushRoot(Heap::kTheHoleValueRootIndex);
__ bind(&call);
}
@@ -2229,7 +2168,7 @@
} else {
// Call to a keyed property.
// For a synthetic property use keyed load IC followed by function call,
- // for a regular property use keyed EmitCallIC.
+ // for a regular property use EmitKeyedCallWithIC.
if (prop->is_synthetic()) {
// Do not visit the object and key subexpressions (they are shared
// by all occurrences of the same rewritten parameter).
@@ -2247,7 +2186,7 @@
SetSourcePosition(prop->position());
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
+ __ call(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
// Push result (function).
__ push(rax);
// Push Global receiver.
@@ -2373,9 +2312,9 @@
Immediate(1 << Map::kIsUndetectable));
__ j(not_zero, if_false);
__ movzxbq(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
- __ cmpq(rbx, Immediate(FIRST_JS_OBJECT_TYPE));
+ __ cmpq(rbx, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
__ j(below, if_false);
- __ cmpq(rbx, Immediate(LAST_JS_OBJECT_TYPE));
+ __ cmpq(rbx, Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
Split(below_equal, if_true, if_false, fall_through);
@@ -2396,7 +2335,7 @@
&if_true, &if_false, &fall_through);
__ JumpIfSmi(rax, if_false);
- __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rbx);
+ __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rbx);
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
Split(above_equal, if_true, if_false, fall_through);
@@ -2634,7 +2573,7 @@
// parameter count in rax.
VisitForAccumulatorValue(args->at(0));
__ movq(rdx, rax);
- __ Move(rax, Smi::FromInt(scope()->num_parameters()));
+ __ Move(rax, Smi::FromInt(info_->scope()->num_parameters()));
ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
__ CallStub(&stub);
context()->Plug(rax);
@@ -2646,7 +2585,7 @@
Label exit;
// Get the number of formal parameters.
- __ Move(rax, Smi::FromInt(scope()->num_parameters()));
+ __ Move(rax, Smi::FromInt(info_->scope()->num_parameters()));
// Check if the calling frame is an arguments adaptor frame.
__ movq(rbx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
@@ -2675,16 +2614,18 @@
// Check that the object is a JS object but take special care of JS
// functions to make sure they have 'Function' as their class.
- __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rax); // Map is now in rax.
+ __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rax);
+ // Map is now in rax.
__ j(below, &null);
- // As long as JS_FUNCTION_TYPE is the last instance type and it is
- // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
- // LAST_JS_OBJECT_TYPE.
- ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
- ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
- __ CmpInstanceType(rax, JS_FUNCTION_TYPE);
- __ j(equal, &function);
+ // As long as LAST_CALLABLE_SPEC_OBJECT_TYPE is the last instance type, and
+ // FIRST_CALLABLE_SPEC_OBJECT_TYPE comes right after
+ // LAST_NONCALLABLE_SPEC_OBJECT_TYPE, we can avoid checking for the latter.
+ STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
+ STATIC_ASSERT(FIRST_CALLABLE_SPEC_OBJECT_TYPE ==
+ LAST_NONCALLABLE_SPEC_OBJECT_TYPE + 1);
+ __ CmpInstanceType(rax, FIRST_CALLABLE_SPEC_OBJECT_TYPE);
+ __ j(above_equal, &function);
// Check if the constructor in the map is a function.
__ movq(rax, FieldOperand(rax, Map::kConstructorOffset));
@@ -2727,13 +2668,11 @@
// with '%2s' (see Logger::LogRuntime for all the formats).
// 2 (array): Arguments to the format string.
ASSERT_EQ(args->length(), 3);
-#ifdef ENABLE_LOGGING_AND_PROFILING
if (CodeGenerator::ShouldGenerateLog(args->at(0))) {
VisitForStackValue(args->at(1));
VisitForStackValue(args->at(2));
__ CallRuntime(Runtime::kLog, 2);
}
-#endif
// Finally, we're expected to leave a value on the top of the stack.
__ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
context()->Plug(rax);
@@ -3331,9 +3270,7 @@
__ j(not_equal, &bailout);
// Check that the array has fast elements.
- __ testb(FieldOperand(scratch, Map::kBitField2Offset),
- Immediate(1 << Map::kHasFastElements));
- __ j(zero, &bailout);
+ __ CheckFastElements(scratch, &bailout);
// Array has fast elements, so its length must be a smi.
// If the array has length zero, return the empty string.
@@ -3579,6 +3516,39 @@
}
+void FullCodeGenerator::EmitIsNativeOrStrictMode(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+
+ // Load the function into rax.
+ VisitForAccumulatorValue(args->at(0));
+
+ // Prepare for the test.
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ // Test for strict mode function.
+ __ movq(rdx, FieldOperand(rax, JSFunction::kSharedFunctionInfoOffset));
+ __ testb(FieldOperand(rdx, SharedFunctionInfo::kStrictModeByteOffset),
+ Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte));
+ __ j(not_equal, if_true);
+
+ // Test for native function.
+ __ testb(FieldOperand(rdx, SharedFunctionInfo::kNativeByteOffset),
+ Immediate(1 << SharedFunctionInfo::kNativeBitWithinByte));
+ __ j(not_equal, if_true);
+
+ // Not native or strict-mode function.
+ __ jmp(if_false);
+
+ PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ context()->Plug(if_true, if_false);
+}
+
+
void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
Handle<String> name = expr->name();
if (name->length() > 0 && name->Get(0) == '_') {
@@ -3609,7 +3579,7 @@
RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
Handle<Code> ic =
ISOLATE->stub_cache()->ComputeCallInitialize(arg_count, in_loop, mode);
- EmitCallIC(ic, mode, expr->id());
+ __ call(ic, mode, expr->id());
// Restore context register.
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
} else {
@@ -3712,8 +3682,7 @@
Comment cmt(masm_, "[ UnaryOperation (ADD)");
VisitForAccumulatorValue(expr->expression());
Label no_conversion;
- Condition is_smi = masm_->CheckSmi(result_register());
- __ j(is_smi, &no_conversion);
+ __ JumpIfSmi(result_register(), &no_conversion);
ToNumberStub convert_stub;
__ CallStub(&convert_stub);
__ bind(&no_conversion);
@@ -3747,7 +3716,7 @@
// accumulator register rax.
VisitForAccumulatorValue(expr->expression());
SetSourcePosition(expr->position());
- EmitCallIC(stub.GetCode(), NULL, expr->id());
+ __ call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
context()->Plug(rax);
}
@@ -3764,7 +3733,7 @@
}
// Expression can only be a property, a global or a (parameter or local)
- // slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
+ // slot.
enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
LhsKind assign_type = VARIABLE;
Property* prop = expr->expression()->AsProperty();
@@ -3779,7 +3748,7 @@
if (assign_type == VARIABLE) {
ASSERT(expr->expression()->AsVariableProxy()->var() != NULL);
AccumulatorValueContext context(this);
- EmitVariableLoad(expr->expression()->AsVariableProxy()->var());
+ EmitVariableLoad(expr->expression()->AsVariableProxy());
} else {
// Reserve space for result of postfix operation.
if (expr->is_postfix() && !context()->IsEffect()) {
@@ -3790,16 +3759,8 @@
__ push(rax); // Copy of receiver, needed for later store.
EmitNamedPropertyLoad(prop);
} else {
- if (prop->is_arguments_access()) {
- VariableProxy* obj_proxy = prop->obj()->AsVariableProxy();
- MemOperand slot_operand =
- EmitSlotSearch(obj_proxy->var()->AsSlot(), rcx);
- __ push(slot_operand);
- __ Move(rax, prop->key()->AsLiteral()->handle());
- } else {
- VisitForStackValue(prop->obj());
- VisitForAccumulatorValue(prop->key());
- }
+ VisitForStackValue(prop->obj());
+ VisitForAccumulatorValue(prop->key());
__ movq(rdx, Operand(rsp, 0)); // Leave receiver on stack
__ push(rax); // Copy of key, needed for later store.
EmitKeyedPropertyLoad(prop);
@@ -3816,9 +3777,7 @@
// Call ToNumber only if operand is not a smi.
Label no_conversion;
- Condition is_smi;
- is_smi = masm_->CheckSmi(rax);
- __ j(is_smi, &no_conversion, Label::kNear);
+ __ JumpIfSmi(rax, &no_conversion, Label::kNear);
ToNumberStub convert_stub;
__ CallStub(&convert_stub);
__ bind(&no_conversion);
@@ -3878,7 +3837,8 @@
__ movq(rdx, rax);
__ Move(rax, Smi::FromInt(1));
}
- EmitCallIC(stub.GetCode(), &patch_site, expr->CountId());
+ __ call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->CountId());
+ patch_site.EmitPatchInfo();
__ bind(&done);
// Store the value returned in rax.
@@ -3911,7 +3871,7 @@
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->StoreIC_Initialize_Strict()
: isolate()->builtins()->StoreIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+ __ call(ic, RelocInfo::CODE_TARGET, expr->id());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -3928,7 +3888,7 @@
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
: isolate()->builtins()->KeyedStoreIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+ __ call(ic, RelocInfo::CODE_TARGET, expr->id());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -3955,7 +3915,7 @@
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
// Use a regular load, not a contextual load, to avoid a reference
// error.
- EmitCallIC(ic, RelocInfo::CODE_TARGET, AstNode::kNoNumber);
+ __ call(ic);
PrepareForBailout(expr, TOS_REG);
context()->Plug(rax);
} else if (proxy != NULL &&
@@ -3978,30 +3938,18 @@
context()->Plug(rax);
} else {
// This expression cannot throw a reference error at the top level.
- context()->HandleExpression(expr);
+ VisitInCurrentContext(expr);
}
}
-bool FullCodeGenerator::TryLiteralCompare(Token::Value op,
- Expression* left,
- Expression* right,
- Label* if_true,
- Label* if_false,
- Label* fall_through) {
- if (op != Token::EQ && op != Token::EQ_STRICT) return false;
-
- // Check for the pattern: typeof <expression> == <string literal>.
- Literal* right_literal = right->AsLiteral();
- if (right_literal == NULL) return false;
- Handle<Object> right_literal_value = right_literal->handle();
- if (!right_literal_value->IsString()) return false;
- UnaryOperation* left_unary = left->AsUnaryOperation();
- if (left_unary == NULL || left_unary->op() != Token::TYPEOF) return false;
- Handle<String> check = Handle<String>::cast(right_literal_value);
-
+void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
+ Handle<String> check,
+ Label* if_true,
+ Label* if_false,
+ Label* fall_through) {
{ AccumulatorValueContext context(this);
- VisitForTypeofValue(left_unary->expression());
+ VisitForTypeofValue(expr);
}
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
@@ -4034,16 +3982,17 @@
Split(not_zero, if_true, if_false, fall_through);
} else if (check->Equals(isolate()->heap()->function_symbol())) {
__ JumpIfSmi(rax, if_false);
- __ CmpObjectType(rax, FIRST_FUNCTION_CLASS_TYPE, rdx);
+ STATIC_ASSERT(LAST_CALLABLE_SPEC_OBJECT_TYPE == LAST_TYPE);
+ __ CmpObjectType(rax, FIRST_CALLABLE_SPEC_OBJECT_TYPE, rdx);
Split(above_equal, if_true, if_false, fall_through);
} else if (check->Equals(isolate()->heap()->object_symbol())) {
__ JumpIfSmi(rax, if_false);
__ CompareRoot(rax, Heap::kNullValueRootIndex);
__ j(equal, if_true);
- __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rdx);
+ __ CmpObjectType(rax, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, rdx);
__ j(below, if_false);
- __ CmpInstanceType(rdx, FIRST_FUNCTION_CLASS_TYPE);
- __ j(above_equal, if_false);
+ __ CmpInstanceType(rdx, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
+ __ j(above, if_false);
// Check for undetectable objects => false.
__ testb(FieldOperand(rdx, Map::kBitFieldOffset),
Immediate(1 << Map::kIsUndetectable));
@@ -4051,8 +4000,18 @@
} else {
if (if_false != fall_through) __ jmp(if_false);
}
+}
- return true;
+
+void FullCodeGenerator::EmitLiteralCompareUndefined(Expression* expr,
+ Label* if_true,
+ Label* if_false,
+ Label* fall_through) {
+ VisitForAccumulatorValue(expr);
+ PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+
+ __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
+ Split(equal, if_true, if_false, fall_through);
}
@@ -4071,14 +4030,12 @@
// First we try a fast inlined version of the compare when one of
// the operands is a literal.
- Token::Value op = expr->op();
- Expression* left = expr->left();
- Expression* right = expr->right();
- if (TryLiteralCompare(op, left, right, if_true, if_false, fall_through)) {
+ if (TryLiteralCompare(expr, if_true, if_false, fall_through)) {
context()->Plug(if_true, if_false);
return;
}
+ Token::Value op = expr->op();
VisitForStackValue(expr->left());
switch (op) {
case Token::IN:
@@ -4103,11 +4060,8 @@
default: {
VisitForAccumulatorValue(expr->right());
Condition cc = no_condition;
- bool strict = false;
switch (op) {
case Token::EQ_STRICT:
- strict = true;
- // Fall through.
case Token::EQ:
cc = equal;
__ pop(rdx);
@@ -4153,7 +4107,8 @@
// Record position and call the compare IC.
SetSourcePosition(expr->position());
Handle<Code> ic = CompareIC::GetUninitialized(op);
- EmitCallIC(ic, &patch_site, expr->id());
+ __ call(ic, RelocInfo::CODE_TARGET, expr->id());
+ patch_site.EmitPatchInfo();
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
__ testq(rax, rax);
@@ -4185,8 +4140,7 @@
__ j(equal, if_true);
__ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
__ j(equal, if_true);
- Condition is_smi = masm_->CheckSmi(rax);
- __ j(is_smi, if_false);
+ __ JumpIfSmi(rax, if_false);
// It can be an undetectable object.
__ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset));
__ testb(FieldOperand(rdx, Map::kBitFieldOffset),
@@ -4213,59 +4167,6 @@
}
-void FullCodeGenerator::EmitCallIC(Handle<Code> ic,
- RelocInfo::Mode mode,
- unsigned ast_id) {
- ASSERT(mode == RelocInfo::CODE_TARGET ||
- mode == RelocInfo::CODE_TARGET_CONTEXT);
- Counters* counters = isolate()->counters();
- switch (ic->kind()) {
- case Code::LOAD_IC:
- __ IncrementCounter(counters->named_load_full(), 1);
- break;
- case Code::KEYED_LOAD_IC:
- __ IncrementCounter(counters->keyed_load_full(), 1);
- break;
- case Code::STORE_IC:
- __ IncrementCounter(counters->named_store_full(), 1);
- break;
- case Code::KEYED_STORE_IC:
- __ IncrementCounter(counters->keyed_store_full(), 1);
- default:
- break;
- }
- __ call(ic, mode, ast_id);
-}
-
-
-void FullCodeGenerator::EmitCallIC(Handle<Code> ic,
- JumpPatchSite* patch_site,
- unsigned ast_id) {
- Counters* counters = isolate()->counters();
- switch (ic->kind()) {
- case Code::LOAD_IC:
- __ IncrementCounter(counters->named_load_full(), 1);
- break;
- case Code::KEYED_LOAD_IC:
- __ IncrementCounter(counters->keyed_load_full(), 1);
- break;
- case Code::STORE_IC:
- __ IncrementCounter(counters->named_store_full(), 1);
- break;
- case Code::KEYED_STORE_IC:
- __ IncrementCounter(counters->keyed_store_full(), 1);
- default:
- break;
- }
- __ call(ic, RelocInfo::CODE_TARGET, ast_id);
- if (patch_site != NULL && patch_site->is_bound()) {
- patch_site->EmitPatchInfo();
- } else {
- __ nop(); // Signals no inlined code.
- }
-}
-
-
void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
ASSERT(IsAligned(frame_offset, kPointerSize));
__ movq(Operand(rbp, frame_offset), value);
@@ -4277,6 +4178,26 @@
}
+void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
+ Scope* declaration_scope = scope()->DeclarationScope();
+ if (declaration_scope->is_global_scope()) {
+ // Contexts nested in the global context have a canonical empty function
+ // as their closure, not the anonymous closure containing the global
+ // code. Pass a smi sentinel and let the runtime look up the empty
+ // function.
+ __ Push(Smi::FromInt(0));
+ } else if (declaration_scope->is_eval_scope()) {
+ // Contexts created by a call to eval have the same closure as the
+ // context calling eval, not the anonymous closure containing the eval
+ // code. Fetch it from the context.
+ __ push(ContextOperand(rsi, Context::CLOSURE_INDEX));
+ } else {
+ ASSERT(declaration_scope->is_function_scope());
+ __ push(Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ }
+}
+
+
// ----------------------------------------------------------------------------
// Non-local control flow support.
@@ -4285,11 +4206,11 @@
ASSERT(!result_register().is(rdx));
ASSERT(!result_register().is(rcx));
// Cook return address on top of stack (smi encoded Code* delta)
- __ movq(rdx, Operand(rsp, 0));
+ __ pop(rdx);
__ Move(rcx, masm_->CodeObject());
__ subq(rdx, rcx);
__ Integer32ToSmi(rdx, rdx);
- __ movq(Operand(rsp, 0), rdx);
+ __ push(rdx);
// Store result register while executing finally block.
__ push(result_register());
}
@@ -4298,16 +4219,13 @@
void FullCodeGenerator::ExitFinallyBlock() {
ASSERT(!result_register().is(rdx));
ASSERT(!result_register().is(rcx));
- // Restore result register from stack.
__ pop(result_register());
// Uncook return address.
- __ movq(rdx, Operand(rsp, 0));
+ __ pop(rdx);
__ SmiToInteger32(rdx, rdx);
__ Move(rcx, masm_->CodeObject());
__ addq(rdx, rcx);
- __ movq(Operand(rsp, 0), rdx);
- // And return.
- __ ret(0);
+ __ jmp(rdx);
}
diff --git a/src/x64/ic-x64.cc b/src/x64/ic-x64.cc
index cec8894..339d2c1 100644
--- a/src/x64/ic-x64.cc
+++ b/src/x64/ic-x64.cc
@@ -76,11 +76,11 @@
// Check that the receiver is a valid JS object.
__ movq(r1, FieldOperand(receiver, HeapObject::kMapOffset));
__ movb(r0, FieldOperand(r1, Map::kInstanceTypeOffset));
- __ cmpb(r0, Immediate(FIRST_JS_OBJECT_TYPE));
+ __ cmpb(r0, Immediate(FIRST_SPEC_OBJECT_TYPE));
__ j(below, miss);
// If this assert fails, we have to check upper bound too.
- ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+ STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
GenerateGlobalInstanceTypeCheck(masm, r0, miss);
@@ -225,110 +225,6 @@
}
-static void GenerateNumberDictionaryLoad(MacroAssembler* masm,
- Label* miss,
- Register elements,
- Register key,
- Register r0,
- Register r1,
- Register r2,
- Register result) {
- // Register use:
- //
- // elements - holds the slow-case elements of the receiver on entry.
- // Unchanged unless 'result' is the same register.
- //
- // key - holds the smi key on entry.
- // Unchanged unless 'result' is the same register.
- //
- // Scratch registers:
- //
- // r0 - holds the untagged key on entry and holds the hash once computed.
- //
- // r1 - used to hold the capacity mask of the dictionary
- //
- // r2 - used for the index into the dictionary.
- //
- // result - holds the result on exit if the load succeeded.
- // Allowed to be the same as 'key' or 'result'.
- // Unchanged on bailout so 'key' or 'result' can be used
- // in further computation.
-
- Label done;
-
- // Compute the hash code from the untagged key. This must be kept in sync
- // with ComputeIntegerHash in utils.h.
- //
- // hash = ~hash + (hash << 15);
- __ movl(r1, r0);
- __ notl(r0);
- __ shll(r1, Immediate(15));
- __ addl(r0, r1);
- // hash = hash ^ (hash >> 12);
- __ movl(r1, r0);
- __ shrl(r1, Immediate(12));
- __ xorl(r0, r1);
- // hash = hash + (hash << 2);
- __ leal(r0, Operand(r0, r0, times_4, 0));
- // hash = hash ^ (hash >> 4);
- __ movl(r1, r0);
- __ shrl(r1, Immediate(4));
- __ xorl(r0, r1);
- // hash = hash * 2057;
- __ imull(r0, r0, Immediate(2057));
- // hash = hash ^ (hash >> 16);
- __ movl(r1, r0);
- __ shrl(r1, Immediate(16));
- __ xorl(r0, r1);
-
- // Compute capacity mask.
- __ SmiToInteger32(r1,
- FieldOperand(elements, NumberDictionary::kCapacityOffset));
- __ decl(r1);
-
- // Generate an unrolled loop that performs a few probes before giving up.
- const int kProbes = 4;
- for (int i = 0; i < kProbes; i++) {
- // Use r2 for index calculations and keep the hash intact in r0.
- __ movq(r2, r0);
- // Compute the masked index: (hash + i + i * i) & mask.
- if (i > 0) {
- __ addl(r2, Immediate(NumberDictionary::GetProbeOffset(i)));
- }
- __ and_(r2, r1);
-
- // Scale the index by multiplying by the entry size.
- ASSERT(NumberDictionary::kEntrySize == 3);
- __ lea(r2, Operand(r2, r2, times_2, 0)); // r2 = r2 * 3
-
- // Check if the key matches.
- __ cmpq(key, FieldOperand(elements,
- r2,
- times_pointer_size,
- NumberDictionary::kElementsStartOffset));
- if (i != (kProbes - 1)) {
- __ j(equal, &done);
- } else {
- __ j(not_equal, miss);
- }
- }
-
- __ bind(&done);
- // Check that the value is a normal propety.
- const int kDetailsOffset =
- NumberDictionary::kElementsStartOffset + 2 * kPointerSize;
- ASSERT_EQ(NORMAL, 0);
- __ Test(FieldOperand(elements, r2, times_pointer_size, kDetailsOffset),
- Smi::FromInt(PropertyDetails::TypeField::mask()));
- __ j(not_zero, miss);
-
- // Get the value at the masked, scaled index.
- const int kValueOffset =
- NumberDictionary::kElementsStartOffset + kPointerSize;
- __ movq(result, FieldOperand(elements, r2, times_pointer_size, kValueOffset));
-}
-
-
void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : receiver
@@ -508,11 +404,8 @@
GenerateKeyedLoadReceiverCheck(
masm, rdx, rcx, Map::kHasIndexedInterceptor, &slow);
- // Check the "has fast elements" bit in the receiver's map which is
- // now in rcx.
- __ testb(FieldOperand(rcx, Map::kBitField2Offset),
- Immediate(1 << Map::kHasFastElements));
- __ j(zero, &check_number_dictionary);
+ // Check the receiver's map to see if it has fast elements.
+ __ CheckFastElements(rcx, &check_number_dictionary);
GenerateFastArrayLoad(masm,
rdx,
@@ -538,7 +431,7 @@
__ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset),
Heap::kHashTableMapRootIndex);
__ j(not_equal, &slow);
- GenerateNumberDictionaryLoad(masm, &slow, rcx, rax, rbx, r9, rdi, rax);
+ __ LoadFromNumberDictionary(&slow, rcx, rax, rbx, r9, rdi, rax);
__ ret(0);
__ bind(&slow);
@@ -730,9 +623,13 @@
__ CmpInstanceType(rbx, JS_ARRAY_TYPE);
__ j(equal, &array);
- // Check that the object is some kind of JS object.
- __ CmpInstanceType(rbx, FIRST_JS_OBJECT_TYPE);
+ // Check that the object is some kind of JSObject.
+ __ CmpInstanceType(rbx, FIRST_JS_RECEIVER_TYPE);
__ j(below, &slow);
+ __ CmpInstanceType(rbx, JS_PROXY_TYPE);
+ __ j(equal, &slow);
+ __ CmpInstanceType(rbx, JS_FUNCTION_PROXY_TYPE);
+ __ j(equal, &slow);
// Object case: Check key against length in the elements array.
// rax: value
@@ -1098,7 +995,7 @@
__ j(not_equal, &slow_load);
__ SmiToInteger32(rbx, rcx);
// ebx: untagged index
- GenerateNumberDictionaryLoad(masm, &slow_load, rax, rcx, rbx, r9, rdi, rdi);
+ __ LoadFromNumberDictionary(&slow_load, rax, rcx, rbx, r9, rdi, rdi);
__ IncrementCounter(counters->keyed_call_generic_smi_dict(), 1);
__ jmp(&do_call);
@@ -1197,6 +1094,171 @@
}
+static Operand GenerateMappedArgumentsLookup(MacroAssembler* masm,
+ Register object,
+ Register key,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* unmapped_case,
+ Label* slow_case) {
+ Heap* heap = masm->isolate()->heap();
+
+ // Check that the receiver is a JSObject. Because of the elements
+ // map check later, we do not need to check for interceptors or
+ // whether it requires access checks.
+ __ JumpIfSmi(object, slow_case);
+ // Check that the object is some kind of JSObject.
+ __ CmpObjectType(object, FIRST_JS_RECEIVER_TYPE, scratch1);
+ __ j(below, slow_case);
+
+ // Check that the key is a positive smi.
+ Condition check = masm->CheckNonNegativeSmi(key);
+ __ j(NegateCondition(check), slow_case);
+
+ // Load the elements into scratch1 and check its map. If not, jump
+ // to the unmapped lookup with the parameter map in scratch1.
+ Handle<Map> arguments_map(heap->non_strict_arguments_elements_map());
+ __ movq(scratch1, FieldOperand(object, JSObject::kElementsOffset));
+ __ CheckMap(scratch1, arguments_map, slow_case, DONT_DO_SMI_CHECK);
+
+ // Check if element is in the range of mapped arguments.
+ __ movq(scratch2, FieldOperand(scratch1, FixedArray::kLengthOffset));
+ __ SmiSubConstant(scratch2, scratch2, Smi::FromInt(2));
+ __ cmpq(key, scratch2);
+ __ j(greater_equal, unmapped_case);
+
+ // Load element index and check whether it is the hole.
+ const int kHeaderSize = FixedArray::kHeaderSize + 2 * kPointerSize;
+ __ SmiToInteger64(scratch3, key);
+ __ movq(scratch2, FieldOperand(scratch1,
+ scratch3,
+ times_pointer_size,
+ kHeaderSize));
+ __ CompareRoot(scratch2, Heap::kTheHoleValueRootIndex);
+ __ j(equal, unmapped_case);
+
+ // Load value from context and return it. We can reuse scratch1 because
+ // we do not jump to the unmapped lookup (which requires the parameter
+ // map in scratch1).
+ __ movq(scratch1, FieldOperand(scratch1, FixedArray::kHeaderSize));
+ __ SmiToInteger64(scratch3, scratch2);
+ return FieldOperand(scratch1,
+ scratch3,
+ times_pointer_size,
+ Context::kHeaderSize);
+}
+
+
+static Operand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
+ Register key,
+ Register parameter_map,
+ Register scratch,
+ Label* slow_case) {
+ // Element is in arguments backing store, which is referenced by the
+ // second element of the parameter_map. The parameter_map register
+ // must be loaded with the parameter map of the arguments object and is
+ // overwritten.
+ const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize;
+ Register backing_store = parameter_map;
+ __ movq(backing_store, FieldOperand(parameter_map, kBackingStoreOffset));
+ Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map());
+ __ CheckMap(backing_store, fixed_array_map, slow_case, DONT_DO_SMI_CHECK);
+ __ movq(scratch, FieldOperand(backing_store, FixedArray::kLengthOffset));
+ __ cmpq(key, scratch);
+ __ j(greater_equal, slow_case);
+ __ SmiToInteger64(scratch, key);
+ return FieldOperand(backing_store,
+ scratch,
+ times_pointer_size,
+ FixedArray::kHeaderSize);
+}
+
+
+void KeyedLoadIC::GenerateNonStrictArguments(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rax : key
+ // -- rdx : receiver
+ // -- rsp[0] : return address
+ // -----------------------------------
+ Label slow, notin;
+ Operand mapped_location =
+ GenerateMappedArgumentsLookup(
+ masm, rdx, rax, rbx, rcx, rdi, ¬in, &slow);
+ __ movq(rax, mapped_location);
+ __ Ret();
+ __ bind(¬in);
+ // The unmapped lookup expects that the parameter map is in rbx.
+ Operand unmapped_location =
+ GenerateUnmappedArgumentsLookup(masm, rax, rbx, rcx, &slow);
+ __ CompareRoot(unmapped_location, Heap::kTheHoleValueRootIndex);
+ __ j(equal, &slow);
+ __ movq(rax, unmapped_location);
+ __ Ret();
+ __ bind(&slow);
+ GenerateMiss(masm, false);
+}
+
+
+void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rax : value
+ // -- rcx : key
+ // -- rdx : receiver
+ // -- rsp[0] : return address
+ // -----------------------------------
+ Label slow, notin;
+ Operand mapped_location = GenerateMappedArgumentsLookup(
+ masm, rdx, rcx, rbx, rdi, r8, ¬in, &slow);
+ __ movq(mapped_location, rax);
+ __ lea(r9, mapped_location);
+ __ movq(r8, rax);
+ __ RecordWrite(rbx, r9, r8);
+ __ Ret();
+ __ bind(¬in);
+ // The unmapped lookup expects that the parameter map is in rbx.
+ Operand unmapped_location =
+ GenerateUnmappedArgumentsLookup(masm, rcx, rbx, rdi, &slow);
+ __ movq(unmapped_location, rax);
+ __ lea(r9, unmapped_location);
+ __ movq(r8, rax);
+ __ RecordWrite(rbx, r9, r8);
+ __ Ret();
+ __ bind(&slow);
+ GenerateMiss(masm, false);
+}
+
+
+void KeyedCallIC::GenerateNonStrictArguments(MacroAssembler* masm,
+ int argc) {
+ // ----------- S t a t e -------------
+ // rcx : function name
+ // rsp[0] : return address
+ // rsp[8] : argument argc
+ // rsp[16] : argument argc - 1
+ // ...
+ // rsp[argc * 8] : argument 1
+ // rsp[(argc + 1) * 8] : argument 0 = receiver
+ // -----------------------------------
+ Label slow, notin;
+ __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
+ Operand mapped_location = GenerateMappedArgumentsLookup(
+ masm, rdx, rcx, rbx, rax, r8, ¬in, &slow);
+ __ movq(rdi, mapped_location);
+ GenerateFunctionTailCall(masm, argc, &slow);
+ __ bind(¬in);
+ // The unmapped lookup expects that the parameter map is in rbx.
+ Operand unmapped_location =
+ GenerateUnmappedArgumentsLookup(masm, rcx, rbx, rax, &slow);
+ __ CompareRoot(unmapped_location, Heap::kTheHoleValueRootIndex);
+ __ j(equal, &slow);
+ __ movq(rdi, unmapped_location);
+ GenerateFunctionTailCall(masm, argc, &slow);
+ __ bind(&slow);
+ GenerateMiss(masm, argc);
+}
+
+
void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : receiver
diff --git a/src/x64/lithium-codegen-x64.cc b/src/x64/lithium-codegen-x64.cc
index 9fa11fa..e505058 100644
--- a/src/x64/lithium-codegen-x64.cc
+++ b/src/x64/lithium-codegen-x64.cc
@@ -144,7 +144,7 @@
// when called as functions (without an explicit receiver
// object). rcx is zero for method calls and non-zero for function
// calls.
- if (info_->is_strict_mode()) {
+ if (info_->is_strict_mode() || info_->is_native()) {
Label ok;
__ testq(rcx, rcx);
__ j(zero, &ok, Label::kNear);
@@ -197,7 +197,7 @@
FastNewContextStub stub(heap_slots);
__ CallStub(&stub);
} else {
- __ CallRuntime(Runtime::kNewContext, 1);
+ __ CallRuntime(Runtime::kNewFunctionContext, 1);
}
RecordSafepoint(Safepoint::kNoDeoptimizationIndex);
// Context is returned in both rax and rsi. It replaces the context
@@ -275,11 +275,25 @@
bool LCodeGen::GenerateDeferredCode() {
ASSERT(is_generating());
- for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
- LDeferredCode* code = deferred_[i];
- __ bind(code->entry());
- code->Generate();
- __ jmp(code->exit());
+ if (deferred_.length() > 0) {
+ for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
+ LDeferredCode* code = deferred_[i];
+ __ bind(code->entry());
+ code->Generate();
+ __ jmp(code->exit());
+ }
+
+ // Pad code to ensure that the last piece of deferred code have
+ // room for lazy bailout.
+ while ((masm()->pc_offset() - LastSafepointEnd())
+ < Deoptimizer::patch_size()) {
+ int padding = masm()->pc_offset() - LastSafepointEnd();
+ if (padding > 9) {
+ __ nop(9);
+ } else {
+ __ nop(padding);
+ }
+ }
}
// Deferred code is the last part of the instruction sequence. Mark
@@ -692,7 +706,7 @@
void LCodeGen::RecordPosition(int position) {
- if (!FLAG_debug_info || position == RelocInfo::kNoPosition) return;
+ if (position == RelocInfo::kNoPosition) return;
masm()->positions_recorder()->RecordPosition(position);
}
@@ -1216,6 +1230,20 @@
}
+void LCodeGen::DoElementsKind(LElementsKind* instr) {
+ Register result = ToRegister(instr->result());
+ Register input = ToRegister(instr->InputAt(0));
+
+ // Load map into |result|.
+ __ movq(result, FieldOperand(input, HeapObject::kMapOffset));
+ // Load the map's "bit field 2" into |result|. We only need the first byte.
+ __ movzxbq(result, FieldOperand(result, Map::kBitField2Offset));
+ // Retrieve elements_kind from bit field 2.
+ __ and_(result, Immediate(Map::kElementsKindMask));
+ __ shr(result, Immediate(Map::kElementsKindShift));
+}
+
+
void LCodeGen::DoValueOf(LValueOf* instr) {
Register input = ToRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
@@ -1350,7 +1378,7 @@
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
- Representation r = instr->hydrogen()->representation();
+ Representation r = instr->hydrogen()->value()->representation();
if (r.IsInteger32()) {
Register reg = ToRegister(instr->InputAt(0));
__ testl(reg, reg);
@@ -1363,7 +1391,7 @@
} else {
ASSERT(r.IsTagged());
Register reg = ToRegister(instr->InputAt(0));
- HType type = instr->hydrogen()->type();
+ HType type = instr->hydrogen()->value()->type();
if (type.IsBoolean()) {
__ CompareRoot(reg, Heap::kTrueValueRootIndex);
EmitBranch(true_block, false_block, equal);
@@ -1400,7 +1428,7 @@
// The conversion stub doesn't cause garbage collections so it's
// safe to not record a safepoint after the call.
__ bind(&call_stub);
- ToBooleanStub stub;
+ ToBooleanStub stub(rax);
__ Pushad();
__ push(reg);
__ CallStub(&stub);
@@ -1412,44 +1440,17 @@
}
-void LCodeGen::EmitGoto(int block, LDeferredCode* deferred_stack_check) {
+void LCodeGen::EmitGoto(int block) {
block = chunk_->LookupDestination(block);
int next_block = GetNextEmittedBlock(current_block_);
if (block != next_block) {
- // Perform stack overflow check if this goto needs it before jumping.
- if (deferred_stack_check != NULL) {
- __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
- __ j(above_equal, chunk_->GetAssemblyLabel(block));
- __ jmp(deferred_stack_check->entry());
- deferred_stack_check->SetExit(chunk_->GetAssemblyLabel(block));
- } else {
- __ jmp(chunk_->GetAssemblyLabel(block));
- }
+ __ jmp(chunk_->GetAssemblyLabel(block));
}
}
-void LCodeGen::DoDeferredStackCheck(LGoto* instr) {
- PushSafepointRegistersScope scope(this);
- CallRuntimeFromDeferred(Runtime::kStackGuard, 0, instr);
-}
-
-
void LCodeGen::DoGoto(LGoto* instr) {
- class DeferredStackCheck: public LDeferredCode {
- public:
- DeferredStackCheck(LCodeGen* codegen, LGoto* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
- private:
- LGoto* instr_;
- };
-
- DeferredStackCheck* deferred = NULL;
- if (instr->include_stack_check()) {
- deferred = new DeferredStackCheck(this, instr);
- }
- EmitGoto(instr->block_id(), deferred);
+ EmitGoto(instr->block_id());
}
@@ -1497,32 +1498,6 @@
}
-void LCodeGen::DoCmpID(LCmpID* instr) {
- LOperand* left = instr->InputAt(0);
- LOperand* right = instr->InputAt(1);
- LOperand* result = instr->result();
-
- Label unordered;
- if (instr->is_double()) {
- // Don't base result on EFLAGS when a NaN is involved. Instead
- // jump to the unordered case, which produces a false value.
- __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
- __ j(parity_even, &unordered, Label::kNear);
- } else {
- EmitCmpI(left, right);
- }
-
- Label done;
- Condition cc = TokenToCondition(instr->op(), instr->is_double());
- __ LoadRoot(ToRegister(result), Heap::kTrueValueRootIndex);
- __ j(cc, &done, Label::kNear);
-
- __ bind(&unordered);
- __ LoadRoot(ToRegister(result), Heap::kFalseValueRootIndex);
- __ bind(&done);
-}
-
-
void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
LOperand* left = instr->InputAt(0);
LOperand* right = instr->InputAt(1);
@@ -1543,23 +1518,7 @@
}
-void LCodeGen::DoCmpJSObjectEq(LCmpJSObjectEq* instr) {
- Register left = ToRegister(instr->InputAt(0));
- Register right = ToRegister(instr->InputAt(1));
- Register result = ToRegister(instr->result());
-
- Label different, done;
- __ cmpq(left, right);
- __ j(not_equal, &different, Label::kNear);
- __ LoadRoot(result, Heap::kTrueValueRootIndex);
- __ jmp(&done, Label::kNear);
- __ bind(&different);
- __ LoadRoot(result, Heap::kFalseValueRootIndex);
- __ bind(&done);
-}
-
-
-void LCodeGen::DoCmpJSObjectEqAndBranch(LCmpJSObjectEqAndBranch* instr) {
+void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
Register left = ToRegister(instr->InputAt(0));
Register right = ToRegister(instr->InputAt(1));
int false_block = chunk_->LookupDestination(instr->false_block_id());
@@ -1570,75 +1529,16 @@
}
-void LCodeGen::DoCmpSymbolEq(LCmpSymbolEq* instr) {
+void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) {
Register left = ToRegister(instr->InputAt(0));
- Register right = ToRegister(instr->InputAt(1));
- Register result = ToRegister(instr->result());
-
- Label done;
- __ cmpq(left, right);
- __ LoadRoot(result, Heap::kFalseValueRootIndex);
- __ j(not_equal, &done, Label::kNear);
- __ LoadRoot(result, Heap::kTrueValueRootIndex);
- __ bind(&done);
-}
-
-
-void LCodeGen::DoCmpSymbolEqAndBranch(LCmpSymbolEqAndBranch* instr) {
- Register left = ToRegister(instr->InputAt(0));
- Register right = ToRegister(instr->InputAt(1));
- int false_block = chunk_->LookupDestination(instr->false_block_id());
int true_block = chunk_->LookupDestination(instr->true_block_id());
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
- __ cmpq(left, right);
+ __ cmpq(left, Immediate(instr->hydrogen()->right()));
EmitBranch(true_block, false_block, equal);
}
-void LCodeGen::DoIsNull(LIsNull* instr) {
- Register reg = ToRegister(instr->InputAt(0));
- Register result = ToRegister(instr->result());
-
- // If the expression is known to be a smi, then it's
- // definitely not null. Materialize false.
- // Consider adding other type and representation tests too.
- if (instr->hydrogen()->value()->type().IsSmi()) {
- __ LoadRoot(result, Heap::kFalseValueRootIndex);
- return;
- }
-
- __ CompareRoot(reg, Heap::kNullValueRootIndex);
- if (instr->is_strict()) {
- ASSERT(Heap::kTrueValueRootIndex >= 0);
- __ movl(result, Immediate(Heap::kTrueValueRootIndex));
- Label load;
- __ j(equal, &load, Label::kNear);
- __ Set(result, Heap::kFalseValueRootIndex);
- __ bind(&load);
- __ LoadRootIndexed(result, result, 0);
- } else {
- Label false_value, true_value, done;
- __ j(equal, &true_value, Label::kNear);
- __ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
- __ j(equal, &true_value, Label::kNear);
- __ JumpIfSmi(reg, &false_value, Label::kNear);
- // Check for undetectable objects by looking in the bit field in
- // the map. The object has already been smi checked.
- Register scratch = result;
- __ movq(scratch, FieldOperand(reg, HeapObject::kMapOffset));
- __ testb(FieldOperand(scratch, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsUndetectable));
- __ j(not_zero, &true_value, Label::kNear);
- __ bind(&false_value);
- __ LoadRoot(result, Heap::kFalseValueRootIndex);
- __ jmp(&done, Label::kNear);
- __ bind(&true_value);
- __ LoadRoot(result, Heap::kTrueValueRootIndex);
- __ bind(&done);
- }
-}
-
-
void LCodeGen::DoIsNullAndBranch(LIsNullAndBranch* instr) {
Register reg = ToRegister(instr->InputAt(0));
@@ -1694,32 +1594,13 @@
__ movzxbl(kScratchRegister,
FieldOperand(kScratchRegister, Map::kInstanceTypeOffset));
- __ cmpb(kScratchRegister, Immediate(FIRST_JS_OBJECT_TYPE));
+ __ cmpb(kScratchRegister, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
__ j(below, is_not_object);
- __ cmpb(kScratchRegister, Immediate(LAST_JS_OBJECT_TYPE));
+ __ cmpb(kScratchRegister, Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
return below_equal;
}
-void LCodeGen::DoIsObject(LIsObject* instr) {
- Register reg = ToRegister(instr->InputAt(0));
- Register result = ToRegister(instr->result());
- Label is_false, is_true, done;
-
- Condition true_cond = EmitIsObject(reg, &is_false, &is_true);
- __ j(true_cond, &is_true);
-
- __ bind(&is_false);
- __ LoadRoot(result, Heap::kFalseValueRootIndex);
- __ jmp(&done);
-
- __ bind(&is_true);
- __ LoadRoot(result, Heap::kTrueValueRootIndex);
-
- __ bind(&done);
-}
-
-
void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
Register reg = ToRegister(instr->InputAt(0));
@@ -1734,22 +1615,6 @@
}
-void LCodeGen::DoIsSmi(LIsSmi* instr) {
- LOperand* input_operand = instr->InputAt(0);
- Register result = ToRegister(instr->result());
- if (input_operand->IsRegister()) {
- Register input = ToRegister(input_operand);
- __ CheckSmiToIndicator(result, input);
- } else {
- Operand input = ToOperand(instr->InputAt(0));
- __ CheckSmiToIndicator(result, input);
- }
- // result is zero if input is a smi, and one otherwise.
- ASSERT(Heap::kFalseValueRootIndex == Heap::kTrueValueRootIndex + 1);
- __ LoadRootIndexed(result, result, Heap::kTrueValueRootIndex);
-}
-
-
void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
@@ -1766,25 +1631,6 @@
}
-void LCodeGen::DoIsUndetectable(LIsUndetectable* instr) {
- Register input = ToRegister(instr->InputAt(0));
- Register result = ToRegister(instr->result());
-
- ASSERT(instr->hydrogen()->value()->representation().IsTagged());
- Label false_label, done;
- __ JumpIfSmi(input, &false_label);
- __ movq(result, FieldOperand(input, HeapObject::kMapOffset));
- __ testb(FieldOperand(result, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsUndetectable));
- __ j(zero, &false_label);
- __ LoadRoot(result, Heap::kTrueValueRootIndex);
- __ jmp(&done);
- __ bind(&false_label);
- __ LoadRoot(result, Heap::kFalseValueRootIndex);
- __ bind(&done);
-}
-
-
void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
Register input = ToRegister(instr->InputAt(0));
Register temp = ToRegister(instr->TempAt(0));
@@ -1800,7 +1646,7 @@
}
-static InstanceType TestType(HHasInstanceType* instr) {
+static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
InstanceType from = instr->from();
InstanceType to = instr->to();
if (from == FIRST_TYPE) return to;
@@ -1809,7 +1655,7 @@
}
-static Condition BranchCondition(HHasInstanceType* instr) {
+static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
InstanceType from = instr->from();
InstanceType to = instr->to();
if (from == to) return equal;
@@ -1820,25 +1666,6 @@
}
-void LCodeGen::DoHasInstanceType(LHasInstanceType* instr) {
- Register input = ToRegister(instr->InputAt(0));
- Register result = ToRegister(instr->result());
-
- ASSERT(instr->hydrogen()->value()->representation().IsTagged());
- __ testl(input, Immediate(kSmiTagMask));
- Label done, is_false;
- __ j(zero, &is_false);
- __ CmpObjectType(input, TestType(instr->hydrogen()), result);
- __ j(NegateCondition(BranchCondition(instr->hydrogen())),
- &is_false, Label::kNear);
- __ LoadRoot(result, Heap::kTrueValueRootIndex);
- __ jmp(&done, Label::kNear);
- __ bind(&is_false);
- __ LoadRoot(result, Heap::kFalseValueRootIndex);
- __ bind(&done);
-}
-
-
void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
Register input = ToRegister(instr->InputAt(0));
@@ -1868,21 +1695,6 @@
}
-void LCodeGen::DoHasCachedArrayIndex(LHasCachedArrayIndex* instr) {
- Register input = ToRegister(instr->InputAt(0));
- Register result = ToRegister(instr->result());
-
- ASSERT(instr->hydrogen()->value()->representation().IsTagged());
- __ LoadRoot(result, Heap::kTrueValueRootIndex);
- __ testl(FieldOperand(input, String::kHashFieldOffset),
- Immediate(String::kContainsCachedArrayIndexMask));
- Label done;
- __ j(zero, &done, Label::kNear);
- __ LoadRoot(result, Heap::kFalseValueRootIndex);
- __ bind(&done);
-}
-
-
void LCodeGen::DoHasCachedArrayIndexAndBranch(
LHasCachedArrayIndexAndBranch* instr) {
Register input = ToRegister(instr->InputAt(0));
@@ -1904,26 +1716,27 @@
Register input,
Register temp) {
__ JumpIfSmi(input, is_false);
- __ CmpObjectType(input, FIRST_JS_OBJECT_TYPE, temp);
+ __ CmpObjectType(input, FIRST_SPEC_OBJECT_TYPE, temp);
__ j(below, is_false);
// Map is now in temp.
// Functions have class 'Function'.
- __ CmpInstanceType(temp, JS_FUNCTION_TYPE);
+ __ CmpInstanceType(temp, FIRST_CALLABLE_SPEC_OBJECT_TYPE);
if (class_name->IsEqualTo(CStrVector("Function"))) {
- __ j(equal, is_true);
+ __ j(above_equal, is_true);
} else {
- __ j(equal, is_false);
+ __ j(above_equal, is_false);
}
// Check if the constructor in the map is a function.
__ movq(temp, FieldOperand(temp, Map::kConstructorOffset));
- // As long as JS_FUNCTION_TYPE is the last instance type and it is
- // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
- // LAST_JS_OBJECT_TYPE.
- ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
- ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
+ // As long as LAST_CALLABLE_SPEC_OBJECT_TYPE is the last type and
+ // FIRST_CALLABLE_SPEC_OBJECT_TYPE comes right after
+ // LAST_NONCALLABLE_SPEC_OBJECT_TYPE, we can avoid checking for the latter.
+ STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
+ STATIC_ASSERT(FIRST_CALLABLE_SPEC_OBJECT_TYPE ==
+ LAST_NONCALLABLE_SPEC_OBJECT_TYPE + 1);
// Objects with a non-function constructor have class 'Object'.
__ CmpObjectType(temp, JS_FUNCTION_TYPE, kScratchRegister);
@@ -1950,29 +1763,6 @@
}
-void LCodeGen::DoClassOfTest(LClassOfTest* instr) {
- Register input = ToRegister(instr->InputAt(0));
- Register result = ToRegister(instr->result());
- ASSERT(input.is(result));
- Register temp = ToRegister(instr->TempAt(0));
- Handle<String> class_name = instr->hydrogen()->class_name();
- Label done;
- Label is_true, is_false;
-
- EmitClassOfTest(&is_true, &is_false, class_name, input, temp);
-
- __ j(not_equal, &is_false);
-
- __ bind(&is_true);
- __ LoadRoot(result, Heap::kTrueValueRootIndex);
- __ jmp(&done, Label::kNear);
-
- __ bind(&is_false);
- __ LoadRoot(result, Heap::kFalseValueRootIndex);
- __ bind(&done);
-}
-
-
void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
Register input = ToRegister(instr->InputAt(0));
Register temp = ToRegister(instr->TempAt(0));
@@ -2016,19 +1806,6 @@
}
-void LCodeGen::DoInstanceOfAndBranch(LInstanceOfAndBranch* instr) {
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- InstanceofStub stub(InstanceofStub::kNoFlags);
- __ push(ToRegister(instr->InputAt(0)));
- __ push(ToRegister(instr->InputAt(1)));
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- __ testq(rax, rax);
- EmitBranch(true_block, false_block, zero);
-}
-
-
void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
class DeferredInstanceOfKnownGlobal: public LDeferredCode {
public:
@@ -2106,7 +1883,6 @@
__ push(ToRegister(instr->InputAt(0)));
__ Push(instr->function());
- Register temp = ToRegister(instr->TempAt(0));
static const int kAdditionalDelta = 10;
int delta =
masm_->SizeOfCodeGeneratedSince(map_check) + kAdditionalDelta;
@@ -2160,25 +1936,6 @@
}
-void LCodeGen::DoCmpTAndBranch(LCmpTAndBranch* instr) {
- Token::Value op = instr->op();
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- Handle<Code> ic = CompareIC::GetUninitialized(op);
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
-
- // The compare stub expects compare condition and the input operands
- // reversed for GT and LTE.
- Condition condition = TokenToCondition(op, false);
- if (op == Token::GT || op == Token::LTE) {
- condition = ReverseCondition(condition);
- }
- __ testq(rax, rax);
- EmitBranch(true_block, false_block, condition);
-}
-
-
void LCodeGen::DoReturn(LReturn* instr) {
if (FLAG_trace) {
// Preserve the return value on the stack and rely on the runtime
@@ -2412,7 +2169,7 @@
Register input = ToRegister(instr->InputAt(0));
__ movq(result, FieldOperand(input, JSObject::kElementsOffset));
if (FLAG_debug_code) {
- Label done;
+ Label done, ok, fail;
__ CompareRoot(FieldOperand(result, HeapObject::kMapOffset),
Heap::kFixedArrayMapRootIndex);
__ j(equal, &done, Label::kNear);
@@ -2422,11 +2179,19 @@
Register temp((result.is(rax)) ? rbx : rax);
__ push(temp);
__ movq(temp, FieldOperand(result, HeapObject::kMapOffset));
- __ movzxbq(temp, FieldOperand(temp, Map::kInstanceTypeOffset));
- __ subq(temp, Immediate(FIRST_EXTERNAL_ARRAY_TYPE));
- __ cmpq(temp, Immediate(kExternalArrayTypeCount));
+ __ movzxbq(temp, FieldOperand(temp, Map::kBitField2Offset));
+ __ and_(temp, Immediate(Map::kElementsKindMask));
+ __ shr(temp, Immediate(Map::kElementsKindShift));
+ __ cmpl(temp, Immediate(JSObject::FAST_ELEMENTS));
+ __ j(equal, &ok, Label::kNear);
+ __ cmpl(temp, Immediate(JSObject::FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND));
+ __ j(less, &fail, Label::kNear);
+ __ cmpl(temp, Immediate(JSObject::LAST_EXTERNAL_ARRAY_ELEMENTS_KIND));
+ __ j(less_equal, &ok, Label::kNear);
+ __ bind(&fail);
+ __ Abort("Check for fast or external elements failed");
+ __ bind(&ok);
__ pop(temp);
- __ Check(below, "Check for fast elements failed.");
__ bind(&done);
}
}
@@ -2479,55 +2244,82 @@
}
-Operand LCodeGen::BuildExternalArrayOperand(LOperand* external_pointer,
- LOperand* key,
- ExternalArrayType array_type) {
+void LCodeGen::DoLoadKeyedFastDoubleElement(
+ LLoadKeyedFastDoubleElement* instr) {
+ XMMRegister result(ToDoubleRegister(instr->result()));
+
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag +
+ sizeof(kHoleNanLower32);
+ Operand hole_check_operand = BuildFastArrayOperand(
+ instr->elements(),
+ instr->key(),
+ JSObject::FAST_DOUBLE_ELEMENTS,
+ offset);
+ __ cmpl(hole_check_operand, Immediate(kHoleNanUpper32));
+ DeoptimizeIf(equal, instr->environment());
+ }
+
+ Operand double_load_operand = BuildFastArrayOperand(
+ instr->elements(), instr->key(), JSObject::FAST_DOUBLE_ELEMENTS,
+ FixedDoubleArray::kHeaderSize - kHeapObjectTag);
+ __ movsd(result, double_load_operand);
+}
+
+
+Operand LCodeGen::BuildFastArrayOperand(
+ LOperand* external_pointer,
+ LOperand* key,
+ JSObject::ElementsKind elements_kind,
+ uint32_t offset) {
Register external_pointer_reg = ToRegister(external_pointer);
- int shift_size = ExternalArrayTypeToShiftSize(array_type);
+ int shift_size = ElementsKindToShiftSize(elements_kind);
if (key->IsConstantOperand()) {
int constant_value = ToInteger32(LConstantOperand::cast(key));
if (constant_value & 0xF0000000) {
Abort("array index constant value too big");
}
- return Operand(external_pointer_reg, constant_value * (1 << shift_size));
+ return Operand(external_pointer_reg,
+ constant_value * (1 << shift_size) + offset);
} else {
ScaleFactor scale_factor = static_cast<ScaleFactor>(shift_size);
- return Operand(external_pointer_reg, ToRegister(key), scale_factor, 0);
+ return Operand(external_pointer_reg, ToRegister(key),
+ scale_factor, offset);
}
}
void LCodeGen::DoLoadKeyedSpecializedArrayElement(
LLoadKeyedSpecializedArrayElement* instr) {
- ExternalArrayType array_type = instr->array_type();
- Operand operand(BuildExternalArrayOperand(instr->external_pointer(),
- instr->key(), array_type));
- if (array_type == kExternalFloatArray) {
+ JSObject::ElementsKind elements_kind = instr->elements_kind();
+ Operand operand(BuildFastArrayOperand(instr->external_pointer(),
+ instr->key(), elements_kind, 0));
+ if (elements_kind == JSObject::EXTERNAL_FLOAT_ELEMENTS) {
XMMRegister result(ToDoubleRegister(instr->result()));
__ movss(result, operand);
__ cvtss2sd(result, result);
- } else if (array_type == kExternalDoubleArray) {
+ } else if (elements_kind == JSObject::EXTERNAL_DOUBLE_ELEMENTS) {
__ movsd(ToDoubleRegister(instr->result()), operand);
} else {
Register result(ToRegister(instr->result()));
- switch (array_type) {
- case kExternalByteArray:
+ switch (elements_kind) {
+ case JSObject::EXTERNAL_BYTE_ELEMENTS:
__ movsxbq(result, operand);
break;
- case kExternalUnsignedByteArray:
- case kExternalPixelArray:
+ case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+ case JSObject::EXTERNAL_PIXEL_ELEMENTS:
__ movzxbq(result, operand);
break;
- case kExternalShortArray:
+ case JSObject::EXTERNAL_SHORT_ELEMENTS:
__ movsxwq(result, operand);
break;
- case kExternalUnsignedShortArray:
+ case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
__ movzxwq(result, operand);
break;
- case kExternalIntArray:
+ case JSObject::EXTERNAL_INT_ELEMENTS:
__ movsxlq(result, operand);
break;
- case kExternalUnsignedIntArray:
+ case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS:
__ movl(result, operand);
__ testl(result, result);
// TODO(danno): we could be more clever here, perhaps having a special
@@ -2535,8 +2327,12 @@
// happens, and generate code that returns a double rather than int.
DeoptimizeIf(negative, instr->environment());
break;
- case kExternalFloatArray:
- case kExternalDoubleArray:
+ case JSObject::EXTERNAL_FLOAT_ELEMENTS:
+ case JSObject::EXTERNAL_DOUBLE_ELEMENTS:
+ case JSObject::FAST_ELEMENTS:
+ case JSObject::FAST_DOUBLE_ELEMENTS:
+ case JSObject::DICTIONARY_ELEMENTS:
+ case JSObject::NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
@@ -2611,12 +2407,27 @@
ASSERT(function.is(rdi)); // Required by InvokeFunction.
ASSERT(ToRegister(instr->result()).is(rax));
- // TODO(1412): This is not correct if the called function is a
- // strict mode function or a native.
- //
- // If the receiver is null or undefined, we have to pass the global object
- // as a receiver.
+ // If the receiver is null or undefined, we have to pass the global
+ // object as a receiver to normal functions. Values have to be
+ // passed unchanged to builtins and strict-mode functions.
Label global_object, receiver_ok;
+
+ // Do not transform the receiver to object for strict mode
+ // functions.
+ __ movq(kScratchRegister,
+ FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
+ __ testb(FieldOperand(kScratchRegister,
+ SharedFunctionInfo::kStrictModeByteOffset),
+ Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte));
+ __ j(not_equal, &receiver_ok, Label::kNear);
+
+ // Do not transform the receiver to object for builtins.
+ __ testb(FieldOperand(kScratchRegister,
+ SharedFunctionInfo::kNativeByteOffset),
+ Immediate(1 << SharedFunctionInfo::kNativeBitWithinByte));
+ __ j(not_equal, &receiver_ok, Label::kNear);
+
+ // Normal function. Replace undefined or null with global receiver.
__ CompareRoot(receiver, Heap::kNullValueRootIndex);
__ j(equal, &global_object, Label::kNear);
__ CompareRoot(receiver, Heap::kUndefinedValueRootIndex);
@@ -2625,7 +2436,7 @@
// The receiver should be a JS object.
Condition is_smi = __ CheckSmi(receiver);
DeoptimizeIf(is_smi, instr->environment());
- __ CmpObjectType(receiver, FIRST_JS_OBJECT_TYPE, kScratchRegister);
+ __ CmpObjectType(receiver, FIRST_SPEC_OBJECT_TYPE, kScratchRegister);
DeoptimizeIf(below, instr->environment());
__ jmp(&receiver_ok, Label::kNear);
@@ -2681,6 +2492,12 @@
}
+void LCodeGen::DoThisFunction(LThisFunction* instr) {
+ Register result = ToRegister(instr->result());
+ __ movq(result, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+}
+
+
void LCodeGen::DoContext(LContext* instr) {
Register result = ToRegister(instr->result());
__ movq(result, rsi);
@@ -2691,8 +2508,7 @@
Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
__ movq(result,
- Operand(context, Context::SlotOffset(Context::CLOSURE_INDEX)));
- __ movq(result, FieldOperand(result, JSFunction::kContextOffset));
+ Operand(context, Context::SlotOffset(Context::PREVIOUS_INDEX)));
}
@@ -3202,33 +3018,37 @@
void LCodeGen::DoStoreKeyedSpecializedArrayElement(
LStoreKeyedSpecializedArrayElement* instr) {
- ExternalArrayType array_type = instr->array_type();
- Operand operand(BuildExternalArrayOperand(instr->external_pointer(),
- instr->key(), array_type));
- if (array_type == kExternalFloatArray) {
+ JSObject::ElementsKind elements_kind = instr->elements_kind();
+ Operand operand(BuildFastArrayOperand(instr->external_pointer(),
+ instr->key(), elements_kind, 0));
+ if (elements_kind == JSObject::EXTERNAL_FLOAT_ELEMENTS) {
XMMRegister value(ToDoubleRegister(instr->value()));
__ cvtsd2ss(value, value);
__ movss(operand, value);
- } else if (array_type == kExternalDoubleArray) {
+ } else if (elements_kind == JSObject::EXTERNAL_DOUBLE_ELEMENTS) {
__ movsd(operand, ToDoubleRegister(instr->value()));
} else {
Register value(ToRegister(instr->value()));
- switch (array_type) {
- case kExternalPixelArray:
- case kExternalByteArray:
- case kExternalUnsignedByteArray:
+ switch (elements_kind) {
+ case JSObject::EXTERNAL_PIXEL_ELEMENTS:
+ case JSObject::EXTERNAL_BYTE_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
__ movb(operand, value);
break;
- case kExternalShortArray:
- case kExternalUnsignedShortArray:
+ case JSObject::EXTERNAL_SHORT_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
__ movw(operand, value);
break;
- case kExternalIntArray:
- case kExternalUnsignedIntArray:
+ case JSObject::EXTERNAL_INT_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS:
__ movl(operand, value);
break;
- case kExternalFloatArray:
- case kExternalDoubleArray:
+ case JSObject::EXTERNAL_FLOAT_ELEMENTS:
+ case JSObject::EXTERNAL_DOUBLE_ELEMENTS:
+ case JSObject::FAST_ELEMENTS:
+ case JSObject::FAST_DOUBLE_ELEMENTS:
+ case JSObject::DICTIONARY_ELEMENTS:
+ case JSObject::NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
@@ -3277,6 +3097,27 @@
}
+void LCodeGen::DoStoreKeyedFastDoubleElement(
+ LStoreKeyedFastDoubleElement* instr) {
+ XMMRegister value = ToDoubleRegister(instr->value());
+ Label have_value;
+
+ __ ucomisd(value, value);
+ __ j(parity_odd, &have_value); // NaN.
+
+ ExternalReference canonical_nan_reference =
+ ExternalReference::address_of_canonical_non_hole_nan();
+ __ Set(kScratchRegister, BitCast<uint64_t>(
+ FixedDoubleArray::canonical_not_the_hole_nan_as_double()));
+ __ movq(value, kScratchRegister);
+
+ __ bind(&have_value);
+ Operand double_store_operand = BuildFastArrayOperand(
+ instr->elements(), instr->key(), JSObject::FAST_DOUBLE_ELEMENTS,
+ FixedDoubleArray::kHeaderSize - kHeapObjectTag);
+ __ movsd(double_store_operand, value);
+}
+
void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
ASSERT(ToRegister(instr->object()).is(rdx));
ASSERT(ToRegister(instr->key()).is(rcx));
@@ -4035,29 +3876,6 @@
}
-void LCodeGen::DoTypeofIs(LTypeofIs* instr) {
- Register input = ToRegister(instr->InputAt(0));
- Register result = ToRegister(instr->result());
- Label true_label;
- Label false_label;
- Label done;
-
- Condition final_branch_condition = EmitTypeofIs(&true_label,
- &false_label,
- input,
- instr->type_literal());
- __ j(final_branch_condition, &true_label);
- __ bind(&false_label);
- __ LoadRoot(result, Heap::kFalseValueRootIndex);
- __ jmp(&done, Label::kNear);
-
- __ bind(&true_label);
- __ LoadRoot(result, Heap::kTrueValueRootIndex);
-
- __ bind(&done);
-}
-
-
void LCodeGen::EmitPushTaggedOperand(LOperand* operand) {
ASSERT(!operand->IsDoubleRegister());
if (operand->IsConstantOperand()) {
@@ -4124,17 +3942,17 @@
} else if (type_name->Equals(heap()->function_symbol())) {
__ JumpIfSmi(input, false_label);
- __ CmpObjectType(input, FIRST_FUNCTION_CLASS_TYPE, input);
+ __ CmpObjectType(input, FIRST_CALLABLE_SPEC_OBJECT_TYPE, input);
final_branch_condition = above_equal;
} else if (type_name->Equals(heap()->object_symbol())) {
__ JumpIfSmi(input, false_label);
__ CompareRoot(input, Heap::kNullValueRootIndex);
__ j(equal, true_label);
- __ CmpObjectType(input, FIRST_JS_OBJECT_TYPE, input);
+ __ CmpObjectType(input, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, input);
__ j(below, false_label);
- __ CmpInstanceType(input, FIRST_FUNCTION_CLASS_TYPE);
- __ j(above_equal, false_label);
+ __ CmpInstanceType(input, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
+ __ j(above, false_label);
// Check for undetectable objects => false.
__ testb(FieldOperand(input, Map::kBitFieldOffset),
Immediate(1 << Map::kIsUndetectable));
@@ -4149,25 +3967,6 @@
}
-void LCodeGen::DoIsConstructCall(LIsConstructCall* instr) {
- Register result = ToRegister(instr->result());
- Label true_label;
- Label done;
-
- EmitIsConstructCall(result);
- __ j(equal, &true_label, Label::kNear);
-
- __ LoadRoot(result, Heap::kFalseValueRootIndex);
- __ jmp(&done, Label::kNear);
-
- __ bind(&true_label);
- __ LoadRoot(result, Heap::kTrueValueRootIndex);
-
-
- __ bind(&done);
-}
-
-
void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
Register temp = ToRegister(instr->TempAt(0));
int true_block = chunk_->LookupDestination(instr->true_block_id());
@@ -4248,15 +4047,48 @@
}
-void LCodeGen::DoStackCheck(LStackCheck* instr) {
- // Perform stack overflow check.
- Label done;
- __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
- __ j(above_equal, &done, Label::kNear);
+void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
+ {
+ PushSafepointRegistersScope scope(this);
+ __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
+ RegisterLazyDeoptimization(instr, RECORD_SAFEPOINT_WITH_REGISTERS, 0);
+ }
- StackCheckStub stub;
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- __ bind(&done);
+ // The gap code includes the restoring of the safepoint registers.
+ int pc = masm()->pc_offset();
+ safepoints_.SetPcAfterGap(pc);
+}
+
+
+void LCodeGen::DoStackCheck(LStackCheck* instr) {
+ class DeferredStackCheck: public LDeferredCode {
+ public:
+ DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
+ private:
+ LStackCheck* instr_;
+ };
+
+ if (instr->hydrogen()->is_function_entry()) {
+ // Perform stack overflow check.
+ Label done;
+ __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
+ __ j(above_equal, &done, Label::kNear);
+ StackCheckStub stub;
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ __ bind(&done);
+ } else {
+ ASSERT(instr->hydrogen()->is_backwards_branch());
+ // Perform stack overflow check if this goto needs it before jumping.
+ DeferredStackCheck* deferred_stack_check =
+ new DeferredStackCheck(this, instr);
+ __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
+ __ j(below, deferred_stack_check->entry());
+ __ bind(instr->done_label());
+ deferred_stack_check->SetExit(instr->done_label());
+ }
}
diff --git a/src/x64/lithium-codegen-x64.h b/src/x64/lithium-codegen-x64.h
index 7c9f2a0..d7c72b5 100644
--- a/src/x64/lithium-codegen-x64.h
+++ b/src/x64/lithium-codegen-x64.h
@@ -94,7 +94,7 @@
void DoDeferredNumberTagD(LNumberTagD* instr);
void DoDeferredTaggedToI(LTaggedToI* instr);
void DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr);
- void DoDeferredStackCheck(LGoto* instr);
+ void DoDeferredStackCheck(LStackCheck* instr);
void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
void DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
@@ -215,9 +215,11 @@
Register ToRegister(int index) const;
XMMRegister ToDoubleRegister(int index) const;
- Operand BuildExternalArrayOperand(LOperand* external_pointer,
- LOperand* key,
- ExternalArrayType array_type);
+ Operand BuildFastArrayOperand(
+ LOperand* external_pointer,
+ LOperand* key,
+ JSObject::ElementsKind elements_kind,
+ uint32_t offset);
// Specific math operations - used from DoUnaryMathOperation.
void EmitIntegerMathAbs(LUnaryMathOperation* instr);
@@ -246,7 +248,7 @@
}
static Condition TokenToCondition(Token::Value op, bool is_unsigned);
- void EmitGoto(int block, LDeferredCode* deferred_stack_check = NULL);
+ void EmitGoto(int block);
void EmitBranch(int left_block, int right_block, Condition cc);
void EmitCmpI(LOperand* left, LOperand* right);
void EmitNumberUntagD(Register input,
diff --git a/src/x64/lithium-x64.cc b/src/x64/lithium-x64.cc
index 569fa3e..00b906d 100644
--- a/src/x64/lithium-x64.cc
+++ b/src/x64/lithium-x64.cc
@@ -78,13 +78,13 @@
ASSERT(Output() == NULL ||
LUnallocated::cast(Output())->HasFixedPolicy() ||
!LUnallocated::cast(Output())->HasRegisterPolicy());
- for (UseIterator it(this); it.HasNext(); it.Advance()) {
- LUnallocated* operand = LUnallocated::cast(it.Next());
+ for (UseIterator it(this); !it.Done(); it.Advance()) {
+ LUnallocated* operand = LUnallocated::cast(it.Current());
ASSERT(operand->HasFixedPolicy() ||
operand->IsUsedAtStart());
}
- for (TempIterator it(this); it.HasNext(); it.Advance()) {
- LUnallocated* operand = LUnallocated::cast(it.Next());
+ for (TempIterator it(this); !it.Done(); it.Advance()) {
+ LUnallocated* operand = LUnallocated::cast(it.Current());
ASSERT(operand->HasFixedPolicy() ||!operand->HasRegisterPolicy());
}
}
@@ -113,21 +113,18 @@
template<int R, int I, int T>
void LTemplateInstruction<R, I, T>::PrintDataTo(StringStream* stream) {
stream->Add("= ");
- inputs_.PrintOperandsTo(stream);
+ for (int i = 0; i < inputs_.length(); i++) {
+ if (i > 0) stream->Add(" ");
+ inputs_[i]->PrintTo(stream);
+ }
}
template<int R, int I, int T>
void LTemplateInstruction<R, I, T>::PrintOutputOperandTo(StringStream* stream) {
- results_.PrintOperandsTo(stream);
-}
-
-
-template<typename T, int N>
-void OperandContainer<T, N>::PrintOperandsTo(StringStream* stream) {
- for (int i = 0; i < N; i++) {
+ for (int i = 0; i < results_.length(); i++) {
if (i > 0) stream->Add(" ");
- elems_[i]->PrintTo(stream);
+ results_[i]->PrintTo(stream);
}
}
@@ -270,12 +267,6 @@
}
-void LTypeofIs::PrintDataTo(StringStream* stream) {
- InputAt(0)->PrintTo(stream);
- stream->Add(" == \"%s\"", *hydrogen()->type_literal()->ToCString());
-}
-
-
void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if typeof ");
InputAt(0)->PrintTo(stream);
@@ -345,13 +336,6 @@
}
-void LClassOfTest::PrintDataTo(StringStream* stream) {
- stream->Add("= class_of_test(");
- InputAt(0)->PrintTo(stream);
- stream->Add(", \"%o\")", *hydrogen()->class_name());
-}
-
-
void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
arguments()->PrintTo(stream);
@@ -393,8 +377,7 @@
LLabel* label = LLabel::cast(first_instr);
if (last_instr->IsGoto()) {
LGoto* goto_instr = LGoto::cast(last_instr);
- if (!goto_instr->include_stack_check() &&
- label->IsRedundant() &&
+ if (label->IsRedundant() &&
!label->is_loop_header()) {
bool can_eliminate = true;
for (int i = first + 1; i < last && can_eliminate; ++i) {
@@ -445,6 +428,15 @@
}
+void LStoreKeyedFastDoubleElement::PrintDataTo(StringStream* stream) {
+ elements()->PrintTo(stream);
+ stream->Add("[");
+ key()->PrintTo(stream);
+ stream->Add("] <- ");
+ value()->PrintTo(stream);
+}
+
+
void LStoreKeyedGeneric::PrintDataTo(StringStream* stream) {
object()->PrintTo(stream);
stream->Add("[");
@@ -713,7 +705,9 @@
LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
HEnvironment* hydrogen_env = current_block_->last_environment();
- instr->set_environment(CreateEnvironment(hydrogen_env));
+ int argument_index_accumulator = 0;
+ instr->set_environment(CreateEnvironment(hydrogen_env,
+ &argument_index_accumulator));
return instr;
}
@@ -803,6 +797,11 @@
}
+LInstruction* LChunkBuilder::DoSoftDeoptimize(HSoftDeoptimize* instr) {
+ return AssignEnvironment(new LDeoptimize);
+}
+
+
LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
return AssignEnvironment(new LDeoptimize);
}
@@ -843,11 +842,11 @@
}
ASSERT(instr->representation().IsInteger32());
- ASSERT(instr->OperandAt(0)->representation().IsInteger32());
- ASSERT(instr->OperandAt(1)->representation().IsInteger32());
- LOperand* left = UseRegisterAtStart(instr->OperandAt(0));
+ ASSERT(instr->left()->representation().IsInteger32());
+ ASSERT(instr->right()->representation().IsInteger32());
+ LOperand* left = UseRegisterAtStart(instr->left());
- HValue* right_value = instr->OperandAt(1);
+ HValue* right_value = instr->right();
LOperand* right = NULL;
int constant_value = 0;
if (right_value->IsConstant()) {
@@ -984,28 +983,20 @@
if (FLAG_stress_environments && !instr->HasEnvironment()) {
instr = AssignEnvironment(instr);
}
- if (current->IsTest() && !instr->IsGoto()) {
- ASSERT(instr->IsControl());
- HTest* test = HTest::cast(current);
- instr->set_hydrogen_value(test->value());
- HBasicBlock* first = test->FirstSuccessor();
- HBasicBlock* second = test->SecondSuccessor();
- ASSERT(first != NULL && second != NULL);
- instr->SetBranchTargets(first->block_id(), second->block_id());
- } else {
- instr->set_hydrogen_value(current);
- }
-
+ instr->set_hydrogen_value(current);
chunk_->AddInstruction(instr, current_block_);
}
current_instruction_ = old_current;
}
-LEnvironment* LChunkBuilder::CreateEnvironment(HEnvironment* hydrogen_env) {
+LEnvironment* LChunkBuilder::CreateEnvironment(
+ HEnvironment* hydrogen_env,
+ int* argument_index_accumulator) {
if (hydrogen_env == NULL) return NULL;
- LEnvironment* outer = CreateEnvironment(hydrogen_env->outer());
+ LEnvironment* outer =
+ CreateEnvironment(hydrogen_env->outer(), argument_index_accumulator);
int ast_id = hydrogen_env->ast_id();
ASSERT(ast_id != AstNode::kNoNumber);
int value_count = hydrogen_env->length();
@@ -1015,7 +1006,6 @@
argument_count_,
value_count,
outer);
- int argument_index = 0;
for (int i = 0; i < value_count; ++i) {
if (hydrogen_env->is_special_index(i)) continue;
@@ -1024,7 +1014,7 @@
if (value->IsArgumentsObject()) {
op = NULL;
} else if (value->IsPushArgument()) {
- op = new LArgument(argument_index++);
+ op = new LArgument((*argument_index_accumulator)++);
} else {
op = UseAny(value);
}
@@ -1036,104 +1026,21 @@
LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
- LGoto* result = new LGoto(instr->FirstSuccessor()->block_id(),
- instr->include_stack_check());
- return (instr->include_stack_check())
- ? AssignPointerMap(result)
- : result;
+ return new LGoto(instr->FirstSuccessor()->block_id());
}
-LInstruction* LChunkBuilder::DoTest(HTest* instr) {
+LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
HValue* v = instr->value();
- if (!v->EmitAtUses()) {
- return new LBranch(UseRegisterAtStart(v));
- } else if (v->IsClassOfTest()) {
- HClassOfTest* compare = HClassOfTest::cast(v);
- ASSERT(compare->value()->representation().IsTagged());
- return new LClassOfTestAndBranch(UseTempRegister(compare->value()),
- TempRegister());
- } else if (v->IsCompare()) {
- HCompare* compare = HCompare::cast(v);
- Token::Value op = compare->token();
- HValue* left = compare->left();
- HValue* right = compare->right();
- Representation r = compare->GetInputRepresentation();
- if (r.IsInteger32()) {
- ASSERT(left->representation().IsInteger32());
- ASSERT(right->representation().IsInteger32());
- return new LCmpIDAndBranch(UseRegisterAtStart(left),
- UseOrConstantAtStart(right));
- } else if (r.IsDouble()) {
- ASSERT(left->representation().IsDouble());
- ASSERT(right->representation().IsDouble());
- return new LCmpIDAndBranch(UseRegisterAtStart(left),
- UseRegisterAtStart(right));
- } else {
- ASSERT(left->representation().IsTagged());
- ASSERT(right->representation().IsTagged());
- bool reversed = op == Token::GT || op == Token::LTE;
- LOperand* left_operand = UseFixed(left, reversed ? rax : rdx);
- LOperand* right_operand = UseFixed(right, reversed ? rdx : rax);
- LCmpTAndBranch* result = new LCmpTAndBranch(left_operand, right_operand);
- return MarkAsCall(result, instr);
- }
- } else if (v->IsIsSmi()) {
- HIsSmi* compare = HIsSmi::cast(v);
- ASSERT(compare->value()->representation().IsTagged());
- return new LIsSmiAndBranch(Use(compare->value()));
- } else if (v->IsIsUndetectable()) {
- HIsUndetectable* compare = HIsUndetectable::cast(v);
- ASSERT(compare->value()->representation().IsTagged());
- return new LIsUndetectableAndBranch(UseRegisterAtStart(compare->value()),
- TempRegister());
- } else if (v->IsHasInstanceType()) {
- HHasInstanceType* compare = HHasInstanceType::cast(v);
- ASSERT(compare->value()->representation().IsTagged());
- return new LHasInstanceTypeAndBranch(UseRegisterAtStart(compare->value()));
- } else if (v->IsHasCachedArrayIndex()) {
- HHasCachedArrayIndex* compare = HHasCachedArrayIndex::cast(v);
- ASSERT(compare->value()->representation().IsTagged());
- return new LHasCachedArrayIndexAndBranch(
- UseRegisterAtStart(compare->value()));
- } else if (v->IsIsNull()) {
- HIsNull* compare = HIsNull::cast(v);
- ASSERT(compare->value()->representation().IsTagged());
- // We only need a temp register for non-strict compare.
- LOperand* temp = compare->is_strict() ? NULL : TempRegister();
- return new LIsNullAndBranch(UseRegisterAtStart(compare->value()), temp);
- } else if (v->IsIsObject()) {
- HIsObject* compare = HIsObject::cast(v);
- ASSERT(compare->value()->representation().IsTagged());
- return new LIsObjectAndBranch(UseRegisterAtStart(compare->value()));
- } else if (v->IsCompareJSObjectEq()) {
- HCompareJSObjectEq* compare = HCompareJSObjectEq::cast(v);
- return new LCmpJSObjectEqAndBranch(UseRegisterAtStart(compare->left()),
- UseRegisterAtStart(compare->right()));
- } else if (v->IsCompareSymbolEq()) {
- HCompareSymbolEq* compare = HCompareSymbolEq::cast(v);
- return new LCmpSymbolEqAndBranch(UseRegisterAtStart(compare->left()),
- UseRegisterAtStart(compare->right()));
- } else if (v->IsInstanceOf()) {
- HInstanceOf* instance_of = HInstanceOf::cast(v);
- LInstanceOfAndBranch* result =
- new LInstanceOfAndBranch(UseFixed(instance_of->left(), rax),
- UseFixed(instance_of->right(), rdx));
- return MarkAsCall(result, instr);
- } else if (v->IsTypeofIs()) {
- HTypeofIs* typeof_is = HTypeofIs::cast(v);
- return new LTypeofIsAndBranch(UseTempRegister(typeof_is->value()));
- } else if (v->IsIsConstructCall()) {
- return new LIsConstructCallAndBranch(TempRegister());
- } else if (v->IsConstant()) {
+ if (v->EmitAtUses()) {
+ ASSERT(v->IsConstant());
+ ASSERT(!v->representation().IsDouble());
HBasicBlock* successor = HConstant::cast(v)->ToBoolean()
? instr->FirstSuccessor()
: instr->SecondSuccessor();
return new LGoto(successor->block_id());
- } else {
- Abort("Undefined compare before branch");
- return NULL;
}
+ return new LBranch(UseRegisterAtStart(v));
}
@@ -1165,7 +1072,7 @@
LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
HInstanceOfKnownGlobal* instr) {
LInstanceOfKnownGlobal* result =
- new LInstanceOfKnownGlobal(UseFixed(instr->value(), rax),
+ new LInstanceOfKnownGlobal(UseFixed(instr->left(), rax),
FixedTemp(rdi));
return MarkAsCall(DefineFixed(result, rax), instr);
}
@@ -1191,6 +1098,11 @@
}
+LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) {
+ return instr->HasNoUses() ? NULL : DefineAsRegister(new LThisFunction);
+}
+
+
LInstruction* LChunkBuilder::DoContext(HContext* instr) {
return instr->HasNoUses() ? NULL : DefineAsRegister(new LContext);
}
@@ -1481,88 +1393,83 @@
}
-LInstruction* LChunkBuilder::DoCompare(HCompare* instr) {
+LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
Token::Value op = instr->token();
+ ASSERT(instr->left()->representation().IsTagged());
+ ASSERT(instr->right()->representation().IsTagged());
+ bool reversed = (op == Token::GT || op == Token::LTE);
+ LOperand* left = UseFixed(instr->left(), reversed ? rax : rdx);
+ LOperand* right = UseFixed(instr->right(), reversed ? rdx : rax);
+ LCmpT* result = new LCmpT(left, right);
+ return MarkAsCall(DefineFixed(result, rax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCompareIDAndBranch(
+ HCompareIDAndBranch* instr) {
Representation r = instr->GetInputRepresentation();
if (r.IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseOrConstantAtStart(instr->right());
- return DefineAsRegister(new LCmpID(left, right));
- } else if (r.IsDouble()) {
+ return new LCmpIDAndBranch(left, right);
+ } else {
+ ASSERT(r.IsDouble());
ASSERT(instr->left()->representation().IsDouble());
ASSERT(instr->right()->representation().IsDouble());
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseRegisterAtStart(instr->right());
- return DefineAsRegister(new LCmpID(left, right));
- } else {
- ASSERT(instr->left()->representation().IsTagged());
- ASSERT(instr->right()->representation().IsTagged());
- bool reversed = (op == Token::GT || op == Token::LTE);
- LOperand* left = UseFixed(instr->left(), reversed ? rax : rdx);
- LOperand* right = UseFixed(instr->right(), reversed ? rdx : rax);
- LCmpT* result = new LCmpT(left, right);
- return MarkAsCall(DefineFixed(result, rax), instr);
+ return new LCmpIDAndBranch(left, right);
}
}
-LInstruction* LChunkBuilder::DoCompareJSObjectEq(
- HCompareJSObjectEq* instr) {
+LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch(
+ HCompareObjectEqAndBranch* instr) {
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseRegisterAtStart(instr->right());
- LCmpJSObjectEq* result = new LCmpJSObjectEq(left, right);
- return DefineAsRegister(result);
+ return new LCmpObjectEqAndBranch(left, right);
}
-LInstruction* LChunkBuilder::DoCompareSymbolEq(
- HCompareSymbolEq* instr) {
- LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseRegisterAtStart(instr->right());
- LCmpSymbolEq* result = new LCmpSymbolEq(left, right);
- return DefineAsRegister(result);
+LInstruction* LChunkBuilder::DoCompareConstantEqAndBranch(
+ HCompareConstantEqAndBranch* instr) {
+ return new LCmpConstantEqAndBranch(UseRegisterAtStart(instr->value()));
}
-LInstruction* LChunkBuilder::DoIsNull(HIsNull* instr) {
+LInstruction* LChunkBuilder::DoIsNullAndBranch(HIsNullAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
- LOperand* value = UseRegisterAtStart(instr->value());
-
- return DefineAsRegister(new LIsNull(value));
+ LOperand* temp = instr->is_strict() ? NULL : TempRegister();
+ return new LIsNullAndBranch(UseRegisterAtStart(instr->value()), temp);
}
-LInstruction* LChunkBuilder::DoIsObject(HIsObject* instr) {
+LInstruction* LChunkBuilder::DoIsObjectAndBranch(HIsObjectAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
- LOperand* value = UseRegister(instr->value());
-
- return DefineAsRegister(new LIsObject(value));
+ return new LIsObjectAndBranch(UseRegisterAtStart(instr->value()));
}
-LInstruction* LChunkBuilder::DoIsSmi(HIsSmi* instr) {
+LInstruction* LChunkBuilder::DoIsSmiAndBranch(HIsSmiAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
- LOperand* value = UseAtStart(instr->value());
-
- return DefineAsRegister(new LIsSmi(value));
+ return new LIsSmiAndBranch(Use(instr->value()));
}
-LInstruction* LChunkBuilder::DoIsUndetectable(HIsUndetectable* instr) {
+LInstruction* LChunkBuilder::DoIsUndetectableAndBranch(
+ HIsUndetectableAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
- LOperand* value = UseRegisterAtStart(instr->value());
-
- return DefineAsRegister(new LIsUndetectable(value));
+ return new LIsUndetectableAndBranch(UseRegisterAtStart(instr->value()),
+ TempRegister());
}
-LInstruction* LChunkBuilder::DoHasInstanceType(HHasInstanceType* instr) {
+LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch(
+ HHasInstanceTypeAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
- LOperand* value = UseRegisterAtStart(instr->value());
-
- return DefineAsRegister(new LHasInstanceType(value));
+ return new LHasInstanceTypeAndBranch(UseRegisterAtStart(instr->value()));
}
@@ -1575,17 +1482,17 @@
}
-LInstruction* LChunkBuilder::DoHasCachedArrayIndex(
- HHasCachedArrayIndex* instr) {
+LInstruction* LChunkBuilder::DoHasCachedArrayIndexAndBranch(
+ HHasCachedArrayIndexAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
- LOperand* value = UseRegister(instr->value());
- return DefineAsRegister(new LHasCachedArrayIndex(value));
+ return new LHasCachedArrayIndexAndBranch(UseRegisterAtStart(instr->value()));
}
-LInstruction* LChunkBuilder::DoClassOfTest(HClassOfTest* instr) {
- Abort("Unimplemented: %s", "DoClassOfTest");
- return NULL;
+LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
+ HClassOfTestAndBranch* instr) {
+ return new LClassOfTestAndBranch(UseTempRegister(instr->value()),
+ TempRegister());
}
@@ -1608,6 +1515,12 @@
}
+LInstruction* LChunkBuilder::DoElementsKind(HElementsKind* instr) {
+ LOperand* object = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new LElementsKind(object));
+}
+
+
LInstruction* LChunkBuilder::DoValueOf(HValueOf* instr) {
LOperand* object = UseRegister(instr->value());
LValueOf* result = new LValueOf(object);
@@ -1634,6 +1547,11 @@
}
+LInstruction* LChunkBuilder::DoUseConst(HUseConst* instr) {
+ return NULL;
+}
+
+
LInstruction* LChunkBuilder::DoForceRepresentation(HForceRepresentation* bad) {
// All HForceRepresentation instructions should be eliminated in the
// representation change phase of Hydrogen.
@@ -1655,8 +1573,8 @@
LOperand* value = UseRegister(instr->value());
bool needs_check = !instr->value()->type().IsSmi();
if (needs_check) {
- LOperand* xmm_temp = instr->CanTruncateToInt32() ? NULL
- : FixedTemp(xmm1);
+ bool truncating = instr->CanTruncateToInt32();
+ LOperand* xmm_temp = truncating ? NULL : FixedTemp(xmm1);
LTaggedToI* res = new LTaggedToI(value, xmm_temp);
return AssignEnvironment(DefineSameAsFirst(res));
} else {
@@ -1757,6 +1675,32 @@
}
+LInstruction* LChunkBuilder::DoToInt32(HToInt32* instr) {
+ HValue* value = instr->value();
+ Representation input_rep = value->representation();
+ LOperand* reg = UseRegister(value);
+ if (input_rep.IsDouble()) {
+ return AssignEnvironment(DefineAsRegister(new LDoubleToI(reg)));
+ } else if (input_rep.IsInteger32()) {
+ // Canonicalization should already have removed the hydrogen instruction in
+ // this case, since it is a noop.
+ UNREACHABLE();
+ return NULL;
+ } else {
+ ASSERT(input_rep.IsTagged());
+ LOperand* reg = UseRegister(value);
+ // Register allocator doesn't (yet) support allocation of double
+ // temps. Reserve xmm1 explicitly.
+ LOperand* xmm_temp =
+ CpuFeatures::IsSupported(SSE3)
+ ? NULL
+ : FixedTemp(xmm1);
+ return AssignEnvironment(
+ DefineSameAsFirst(new LTaggedToI(reg, xmm_temp)));
+ }
+}
+
+
LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
return new LReturn(UseFixed(instr->value(), rax));
}
@@ -1891,15 +1835,29 @@
}
+LInstruction* LChunkBuilder::DoLoadKeyedFastDoubleElement(
+ HLoadKeyedFastDoubleElement* instr) {
+ ASSERT(instr->representation().IsDouble());
+ ASSERT(instr->key()->representation().IsInteger32());
+ LOperand* elements = UseRegisterAtStart(instr->elements());
+ LOperand* key = UseRegisterOrConstantAtStart(instr->key());
+ LLoadKeyedFastDoubleElement* result =
+ new LLoadKeyedFastDoubleElement(elements, key);
+ return AssignEnvironment(DefineAsRegister(result));
+}
+
+
LInstruction* LChunkBuilder::DoLoadKeyedSpecializedArrayElement(
HLoadKeyedSpecializedArrayElement* instr) {
- ExternalArrayType array_type = instr->array_type();
+ JSObject::ElementsKind elements_kind = instr->elements_kind();
Representation representation(instr->representation());
ASSERT(
- (representation.IsInteger32() && (array_type != kExternalFloatArray &&
- array_type != kExternalDoubleArray)) ||
- (representation.IsDouble() && (array_type == kExternalFloatArray ||
- array_type == kExternalDoubleArray)));
+ (representation.IsInteger32() &&
+ (elements_kind != JSObject::EXTERNAL_FLOAT_ELEMENTS) &&
+ (elements_kind != JSObject::EXTERNAL_DOUBLE_ELEMENTS)) ||
+ (representation.IsDouble() &&
+ ((elements_kind == JSObject::EXTERNAL_FLOAT_ELEMENTS) ||
+ (elements_kind == JSObject::EXTERNAL_DOUBLE_ELEMENTS))));
ASSERT(instr->key()->representation().IsInteger32());
LOperand* external_pointer = UseRegister(instr->external_pointer());
LOperand* key = UseRegisterOrConstant(instr->key());
@@ -1908,7 +1866,7 @@
LInstruction* load_instr = DefineAsRegister(result);
// An unsigned int array load might overflow and cause a deopt, make sure it
// has an environment.
- return (array_type == kExternalUnsignedIntArray) ?
+ return (elements_kind == JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS) ?
AssignEnvironment(load_instr) : load_instr;
}
@@ -1941,21 +1899,38 @@
}
+LInstruction* LChunkBuilder::DoStoreKeyedFastDoubleElement(
+ HStoreKeyedFastDoubleElement* instr) {
+ ASSERT(instr->value()->representation().IsDouble());
+ ASSERT(instr->elements()->representation().IsTagged());
+ ASSERT(instr->key()->representation().IsInteger32());
+
+ LOperand* elements = UseRegisterAtStart(instr->elements());
+ LOperand* val = UseTempRegister(instr->value());
+ LOperand* key = UseRegisterOrConstantAtStart(instr->key());
+
+ return new LStoreKeyedFastDoubleElement(elements, key, val);
+}
+
+
LInstruction* LChunkBuilder::DoStoreKeyedSpecializedArrayElement(
HStoreKeyedSpecializedArrayElement* instr) {
Representation representation(instr->value()->representation());
- ExternalArrayType array_type = instr->array_type();
+ JSObject::ElementsKind elements_kind = instr->elements_kind();
ASSERT(
- (representation.IsInteger32() && (array_type != kExternalFloatArray &&
- array_type != kExternalDoubleArray)) ||
- (representation.IsDouble() && (array_type == kExternalFloatArray ||
- array_type == kExternalDoubleArray)));
+ (representation.IsInteger32() &&
+ (elements_kind != JSObject::EXTERNAL_FLOAT_ELEMENTS) &&
+ (elements_kind != JSObject::EXTERNAL_DOUBLE_ELEMENTS)) ||
+ (representation.IsDouble() &&
+ ((elements_kind == JSObject::EXTERNAL_FLOAT_ELEMENTS) ||
+ (elements_kind == JSObject::EXTERNAL_DOUBLE_ELEMENTS))));
ASSERT(instr->external_pointer()->representation().IsExternal());
ASSERT(instr->key()->representation().IsInteger32());
LOperand* external_pointer = UseRegister(instr->external_pointer());
- bool val_is_temp_register = array_type == kExternalPixelArray ||
- array_type == kExternalFloatArray;
+ bool val_is_temp_register =
+ elements_kind == JSObject::EXTERNAL_PIXEL_ELEMENTS ||
+ elements_kind == JSObject::EXTERNAL_FLOAT_ELEMENTS;
LOperand* val = val_is_temp_register
? UseTempRegister(instr->value())
: UseRegister(instr->value());
@@ -2126,13 +2101,14 @@
}
-LInstruction* LChunkBuilder::DoTypeofIs(HTypeofIs* instr) {
- return DefineSameAsFirst(new LTypeofIs(UseRegister(instr->value())));
+LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) {
+ return new LTypeofIsAndBranch(UseTempRegister(instr->value()));
}
-LInstruction* LChunkBuilder::DoIsConstructCall(HIsConstructCall* instr) {
- return DefineAsRegister(new LIsConstructCall);
+LInstruction* LChunkBuilder::DoIsConstructCallAndBranch(
+ HIsConstructCallAndBranch* instr) {
+ return new LIsConstructCallAndBranch(TempRegister());
}
@@ -2168,7 +2144,12 @@
LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
- return MarkAsCall(new LStackCheck, instr);
+ if (instr->is_function_entry()) {
+ return MarkAsCall(new LStackCheck, instr);
+ } else {
+ ASSERT(instr->is_backwards_branch());
+ return AssignEnvironment(AssignPointerMap(new LStackCheck));
+ }
}
@@ -2177,7 +2158,6 @@
HConstant* undefined = graph()->GetConstantUndefined();
HEnvironment* inner = outer->CopyForInlining(instr->closure(),
instr->function(),
- HEnvironment::LITHIUM,
undefined,
instr->call_kind());
current_block_->UpdateEnvironment(inner);
diff --git a/src/x64/lithium-x64.h b/src/x64/lithium-x64.h
index 1493836..18a036f 100644
--- a/src/x64/lithium-x64.h
+++ b/src/x64/lithium-x64.h
@@ -32,6 +32,7 @@
#include "lithium-allocator.h"
#include "lithium.h"
#include "safepoint-table.h"
+#include "utils.h"
namespace v8 {
namespace internal {
@@ -76,17 +77,12 @@
V(ClampDToUint8) \
V(ClampIToUint8) \
V(ClampTToUint8) \
- V(ClassOfTest) \
V(ClassOfTestAndBranch) \
- V(CmpID) \
+ V(CmpConstantEqAndBranch) \
V(CmpIDAndBranch) \
- V(CmpJSObjectEq) \
- V(CmpJSObjectEqAndBranch) \
+ V(CmpObjectEqAndBranch) \
V(CmpMapAndBranch) \
- V(CmpSymbolEq) \
- V(CmpSymbolEqAndBranch) \
V(CmpT) \
- V(CmpTAndBranch) \
V(ConstantD) \
V(ConstantI) \
V(ConstantT) \
@@ -95,6 +91,7 @@
V(Deoptimize) \
V(DivI) \
V(DoubleToI) \
+ V(ElementsKind) \
V(ExternalArrayLength) \
V(FixedArrayLength) \
V(FunctionLiteral) \
@@ -102,26 +99,18 @@
V(GlobalObject) \
V(GlobalReceiver) \
V(Goto) \
- V(HasCachedArrayIndex) \
V(HasCachedArrayIndexAndBranch) \
- V(HasInstanceType) \
V(HasInstanceTypeAndBranch) \
V(In) \
V(InstanceOf) \
- V(InstanceOfAndBranch) \
V(InstanceOfKnownGlobal) \
V(InstructionGap) \
V(Integer32ToDouble) \
V(InvokeFunction) \
- V(IsConstructCall) \
V(IsConstructCallAndBranch) \
- V(IsNull) \
V(IsNullAndBranch) \
- V(IsObject) \
V(IsObjectAndBranch) \
- V(IsSmi) \
V(IsSmiAndBranch) \
- V(IsUndetectable) \
V(IsUndetectableAndBranch) \
V(JSArrayLength) \
V(Label) \
@@ -132,6 +121,7 @@
V(LoadFunctionPrototype) \
V(LoadGlobalCell) \
V(LoadGlobalGeneric) \
+ V(LoadKeyedFastDoubleElement) \
V(LoadKeyedFastElement) \
V(LoadKeyedGeneric) \
V(LoadKeyedSpecializedArrayElement) \
@@ -158,6 +148,7 @@
V(StoreContextSlot) \
V(StoreGlobalCell) \
V(StoreGlobalGeneric) \
+ V(StoreKeyedFastDoubleElement) \
V(StoreKeyedFastElement) \
V(StoreKeyedGeneric) \
V(StoreKeyedSpecializedArrayElement) \
@@ -169,10 +160,10 @@
V(StringLength) \
V(SubI) \
V(TaggedToI) \
+ V(ThisFunction) \
V(Throw) \
V(ToFastProperties) \
V(Typeof) \
- V(TypeofIs) \
V(TypeofIsAndBranch) \
V(UnaryMathOperation) \
V(UnknownOSRValue) \
@@ -232,7 +223,6 @@
virtual bool IsGap() const { return false; }
virtual bool IsControl() const { return false; }
- virtual void SetBranchTargets(int true_block_id, int false_block_id) { }
void set_environment(LEnvironment* env) { environment_ = env; }
LEnvironment* environment() const { return environment_; }
@@ -287,37 +277,6 @@
};
-template<typename ElementType, int NumElements>
-class OperandContainer {
- public:
- OperandContainer() {
- for (int i = 0; i < NumElements; i++) elems_[i] = NULL;
- }
- int length() { return NumElements; }
- ElementType& operator[](int i) {
- ASSERT(i < length());
- return elems_[i];
- }
- void PrintOperandsTo(StringStream* stream);
-
- private:
- ElementType elems_[NumElements];
-};
-
-
-template<typename ElementType>
-class OperandContainer<ElementType, 0> {
- public:
- int length() { return 0; }
- void PrintOperandsTo(StringStream* stream) { }
- ElementType& operator[](int i) {
- UNREACHABLE();
- static ElementType t = 0;
- return t;
- }
-};
-
-
// R = number of result operands (0 or 1).
// I = number of input operands.
// T = number of temporary operands.
@@ -340,9 +299,9 @@
virtual void PrintOutputOperandTo(StringStream* stream);
protected:
- OperandContainer<LOperand*, R> results_;
- OperandContainer<LOperand*, I> inputs_;
- OperandContainer<LOperand*, T> temps_;
+ EmbeddedContainer<LOperand*, R> results_;
+ EmbeddedContainer<LOperand*, I> inputs_;
+ EmbeddedContainer<LOperand*, T> temps_;
};
@@ -402,19 +361,16 @@
class LGoto: public LTemplateInstruction<0, 0, 0> {
public:
- LGoto(int block_id, bool include_stack_check = false)
- : block_id_(block_id), include_stack_check_(include_stack_check) { }
+ explicit LGoto(int block_id) : block_id_(block_id) { }
DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
virtual void PrintDataTo(StringStream* stream);
virtual bool IsControl() const { return true; }
int block_id() const { return block_id_; }
- bool include_stack_check() const { return include_stack_check_; }
private:
int block_id_;
- bool include_stack_check_;
};
@@ -490,16 +446,15 @@
public:
virtual bool IsControl() const { return true; }
- int true_block_id() const { return true_block_id_; }
- int false_block_id() const { return false_block_id_; }
- void SetBranchTargets(int true_block_id, int false_block_id) {
- true_block_id_ = true_block_id;
- false_block_id_ = false_block_id;
- }
+ int SuccessorCount() { return hydrogen()->SuccessorCount(); }
+ HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); }
+ int true_block_id() { return hydrogen()->SuccessorAt(0)->block_id(); }
+ int false_block_id() { return hydrogen()->SuccessorAt(1)->block_id(); }
private:
- int true_block_id_;
- int false_block_id_;
+ HControlInstruction* hydrogen() {
+ return HControlInstruction::cast(this->hydrogen_value());
+ }
};
@@ -598,23 +553,6 @@
};
-class LCmpID: public LTemplateInstruction<1, 2, 0> {
- public:
- LCmpID(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpID, "cmp-id")
- DECLARE_HYDROGEN_ACCESSOR(Compare)
-
- Token::Value op() const { return hydrogen()->token(); }
- bool is_double() const {
- return hydrogen()->GetInputRepresentation().IsDouble();
- }
-};
-
-
class LCmpIDAndBranch: public LControlInstruction<2, 0> {
public:
LCmpIDAndBranch(LOperand* left, LOperand* right) {
@@ -623,7 +561,7 @@
}
DECLARE_CONCRETE_INSTRUCTION(CmpIDAndBranch, "cmp-id-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(Compare)
+ DECLARE_HYDROGEN_ACCESSOR(CompareIDAndBranch)
Token::Value op() const { return hydrogen()->token(); }
bool is_double() const {
@@ -648,61 +586,27 @@
};
-class LCmpJSObjectEq: public LTemplateInstruction<1, 2, 0> {
+class LCmpObjectEqAndBranch: public LControlInstruction<2, 0> {
public:
- LCmpJSObjectEq(LOperand* left, LOperand* right) {
+ LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
inputs_[0] = left;
inputs_[1] = right;
}
- DECLARE_CONCRETE_INSTRUCTION(CmpJSObjectEq, "cmp-jsobject-eq")
+ DECLARE_CONCRETE_INSTRUCTION(CmpObjectEqAndBranch,
+ "cmp-object-eq-and-branch")
};
-class LCmpJSObjectEqAndBranch: public LControlInstruction<2, 0> {
+class LCmpConstantEqAndBranch: public LControlInstruction<1, 0> {
public:
- LCmpJSObjectEqAndBranch(LOperand* left, LOperand* right) {
+ explicit LCmpConstantEqAndBranch(LOperand* left) {
inputs_[0] = left;
- inputs_[1] = right;
}
- DECLARE_CONCRETE_INSTRUCTION(CmpJSObjectEqAndBranch,
- "cmp-jsobject-eq-and-branch")
-};
-
-
-class LCmpSymbolEq: public LTemplateInstruction<1, 2, 0> {
- public:
- LCmpSymbolEq(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpSymbolEq, "cmp-symbol-eq")
-};
-
-
-class LCmpSymbolEqAndBranch: public LControlInstruction<2, 0> {
- public:
- LCmpSymbolEqAndBranch(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpSymbolEqAndBranch, "cmp-symbol-eq-and-branch")
-};
-
-
-class LIsNull: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LIsNull(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(IsNull, "is-null")
- DECLARE_HYDROGEN_ACCESSOR(IsNull)
-
- bool is_strict() const { return hydrogen()->is_strict(); }
+ DECLARE_CONCRETE_INSTRUCTION(CmpConstantEqAndBranch,
+ "cmp-constant-eq-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(CompareConstantEqAndBranch)
};
@@ -714,7 +618,7 @@
}
DECLARE_CONCRETE_INSTRUCTION(IsNullAndBranch, "is-null-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(IsNull)
+ DECLARE_HYDROGEN_ACCESSOR(IsNullAndBranch)
bool is_strict() const { return hydrogen()->is_strict(); }
@@ -722,16 +626,6 @@
};
-class LIsObject: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LIsObject(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(IsObject, "is-object")
-};
-
-
class LIsObjectAndBranch: public LControlInstruction<1, 0> {
public:
explicit LIsObjectAndBranch(LOperand* value) {
@@ -739,22 +633,12 @@
}
DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(IsObjectAndBranch)
virtual void PrintDataTo(StringStream* stream);
};
-class LIsSmi: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LIsSmi(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(IsSmi, "is-smi")
- DECLARE_HYDROGEN_ACCESSOR(IsSmi)
-};
-
-
class LIsSmiAndBranch: public LControlInstruction<1, 0> {
public:
explicit LIsSmiAndBranch(LOperand* value) {
@@ -762,22 +646,12 @@
}
DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch)
virtual void PrintDataTo(StringStream* stream);
};
-class LIsUndetectable: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LIsUndetectable(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(IsUndetectable, "is-undetectable")
- DECLARE_HYDROGEN_ACCESSOR(IsUndetectable)
-};
-
-
class LIsUndetectableAndBranch: public LControlInstruction<1, 1> {
public:
explicit LIsUndetectableAndBranch(LOperand* value, LOperand* temp) {
@@ -787,22 +661,12 @@
DECLARE_CONCRETE_INSTRUCTION(IsUndetectableAndBranch,
"is-undetectable-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(IsUndetectableAndBranch)
virtual void PrintDataTo(StringStream* stream);
};
-class LHasInstanceType: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LHasInstanceType(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(HasInstanceType, "has-instance-type")
- DECLARE_HYDROGEN_ACCESSOR(HasInstanceType)
-};
-
-
class LHasInstanceTypeAndBranch: public LControlInstruction<1, 0> {
public:
explicit LHasInstanceTypeAndBranch(LOperand* value) {
@@ -811,7 +675,7 @@
DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch,
"has-instance-type-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(HasInstanceType)
+ DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch)
virtual void PrintDataTo(StringStream* stream);
};
@@ -828,17 +692,6 @@
};
-class LHasCachedArrayIndex: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LHasCachedArrayIndex(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndex, "has-cached-array-index")
- DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndex)
-};
-
-
class LHasCachedArrayIndexAndBranch: public LControlInstruction<1, 0> {
public:
explicit LHasCachedArrayIndexAndBranch(LOperand* value) {
@@ -847,19 +700,7 @@
DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndexAndBranch,
"has-cached-array-index-and-branch")
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LClassOfTest: public LTemplateInstruction<1, 1, 1> {
- public:
- LClassOfTest(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(ClassOfTest, "class-of-test")
- DECLARE_HYDROGEN_ACCESSOR(ClassOfTest)
+ DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndexAndBranch)
virtual void PrintDataTo(StringStream* stream);
};
@@ -874,7 +715,7 @@
DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch,
"class-of-test-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(ClassOfTest)
+ DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch)
virtual void PrintDataTo(StringStream* stream);
};
@@ -888,21 +729,7 @@
}
DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
- DECLARE_HYDROGEN_ACCESSOR(Compare)
-
- Token::Value op() const { return hydrogen()->token(); }
-};
-
-
-class LCmpTAndBranch: public LControlInstruction<2, 0> {
- public:
- LCmpTAndBranch(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpTAndBranch, "cmp-t-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(Compare)
+ DECLARE_HYDROGEN_ACCESSOR(CompareGeneric)
Token::Value op() const { return hydrogen()->token(); }
};
@@ -933,17 +760,6 @@
};
-class LInstanceOfAndBranch: public LControlInstruction<2, 0> {
- public:
- LInstanceOfAndBranch(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(InstanceOfAndBranch, "instance-of-and-branch")
-};
-
-
class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 1, 1> {
public:
LInstanceOfKnownGlobal(LOperand* value, LOperand* temp) {
@@ -1059,7 +875,7 @@
}
DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
- DECLARE_HYDROGEN_ACCESSOR(Value)
+ DECLARE_HYDROGEN_ACCESSOR(Branch)
virtual void PrintDataTo(StringStream* stream);
};
@@ -1119,6 +935,17 @@
};
+class LElementsKind: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LElementsKind(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(ElementsKind, "elements-kind")
+ DECLARE_HYDROGEN_ACCESSOR(ElementsKind)
+};
+
+
class LValueOf: public LTemplateInstruction<1, 1, 0> {
public:
explicit LValueOf(LOperand* value) {
@@ -1309,6 +1136,22 @@
};
+class LLoadKeyedFastDoubleElement: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LLoadKeyedFastDoubleElement(LOperand* elements, LOperand* key) {
+ inputs_[0] = elements;
+ inputs_[1] = key;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastDoubleElement,
+ "load-keyed-fast-double-element")
+ DECLARE_HYDROGEN_ACCESSOR(LoadKeyedFastDoubleElement)
+
+ LOperand* elements() { return inputs_[0]; }
+ LOperand* key() { return inputs_[1]; }
+};
+
+
class LLoadKeyedSpecializedArrayElement: public LTemplateInstruction<1, 2, 0> {
public:
LLoadKeyedSpecializedArrayElement(LOperand* external_pointer,
@@ -1323,8 +1166,8 @@
LOperand* external_pointer() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
- ExternalArrayType array_type() const {
- return hydrogen()->array_type();
+ JSObject::ElementsKind elements_kind() const {
+ return hydrogen()->elements_kind();
}
};
@@ -1441,6 +1284,11 @@
};
+class LThisFunction: public LTemplateInstruction<1, 0, 0> {
+ DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
+};
+
+
class LContext: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(Context, "context")
@@ -1634,7 +1482,7 @@
}
DECLARE_CONCRETE_INSTRUCTION(DoubleToI, "double-to-i")
- DECLARE_HYDROGEN_ACCESSOR(Change)
+ DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
bool truncating() { return hydrogen()->CanTruncateToInt32(); }
};
@@ -1649,7 +1497,7 @@
}
DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i")
- DECLARE_HYDROGEN_ACCESSOR(Change)
+ DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
bool truncating() { return hydrogen()->CanTruncateToInt32(); }
};
@@ -1755,6 +1603,28 @@
};
+class LStoreKeyedFastDoubleElement: public LTemplateInstruction<0, 3, 0> {
+ public:
+ LStoreKeyedFastDoubleElement(LOperand* elements,
+ LOperand* key,
+ LOperand* val) {
+ inputs_[0] = elements;
+ inputs_[1] = key;
+ inputs_[2] = val;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastDoubleElement,
+ "store-keyed-fast-double-element")
+ DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastDoubleElement)
+
+ virtual void PrintDataTo(StringStream* stream);
+
+ LOperand* elements() { return inputs_[0]; }
+ LOperand* key() { return inputs_[1]; }
+ LOperand* value() { return inputs_[2]; }
+};
+
+
class LStoreKeyedSpecializedArrayElement: public LTemplateInstruction<0, 3, 0> {
public:
LStoreKeyedSpecializedArrayElement(LOperand* external_pointer,
@@ -1772,8 +1642,8 @@
LOperand* external_pointer() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
- ExternalArrayType array_type() const {
- return hydrogen()->array_type();
+ JSObject::ElementsKind elements_kind() const {
+ return hydrogen()->elements_kind();
}
};
@@ -2013,21 +1883,6 @@
};
-class LTypeofIs: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LTypeofIs(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(TypeofIs, "typeof-is")
- DECLARE_HYDROGEN_ACCESSOR(TypeofIs)
-
- Handle<String> type_literal() { return hydrogen()->type_literal(); }
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
class LTypeofIsAndBranch: public LControlInstruction<1, 0> {
public:
explicit LTypeofIsAndBranch(LOperand* value) {
@@ -2035,7 +1890,7 @@
}
DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch, "typeof-is-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(TypeofIs)
+ DECLARE_HYDROGEN_ACCESSOR(TypeofIsAndBranch)
Handle<String> type_literal() { return hydrogen()->type_literal(); }
@@ -2043,13 +1898,6 @@
};
-class LIsConstructCall: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(IsConstructCall, "is-construct-call")
- DECLARE_HYDROGEN_ACCESSOR(IsConstructCall)
-};
-
-
class LIsConstructCallAndBranch: public LControlInstruction<0, 1> {
public:
explicit LIsConstructCallAndBranch(LOperand* temp) {
@@ -2058,6 +1906,7 @@
DECLARE_CONCRETE_INSTRUCTION(IsConstructCallAndBranch,
"is-construct-call-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(IsConstructCallAndBranch)
};
@@ -2101,6 +1950,12 @@
class LStackCheck: public LTemplateInstruction<0, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(StackCheck, "stack-check")
+ DECLARE_HYDROGEN_ACCESSOR(StackCheck)
+
+ Label* done_label() { return &done_label_; }
+
+ private:
+ Label done_label_;
};
@@ -2298,7 +2153,8 @@
LInstruction* instr, int ast_id);
void ClearInstructionPendingDeoptimizationEnvironment();
- LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env);
+ LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env,
+ int* argument_index_accumulator);
void VisitInstruction(HInstruction* current);
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc
index 2d28579..2b15553 100644
--- a/src/x64/macro-assembler-x64.cc
+++ b/src/x64/macro-assembler-x64.cc
@@ -385,6 +385,9 @@
Heap::kFixedArrayMapRootIndex);
j(equal, &ok, Label::kNear);
CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
+ Heap::kFixedDoubleArrayMapRootIndex);
+ j(equal, &ok, Label::kNear);
+ CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
Heap::kFixedCOWArrayMapRootIndex);
j(equal, &ok, Label::kNear);
Abort("JSObject with fast elements map has slow elements");
@@ -2558,6 +2561,16 @@
}
+void MacroAssembler::CheckFastElements(Register map,
+ Label* fail,
+ Label::Distance distance) {
+ STATIC_ASSERT(JSObject::FAST_ELEMENTS == 0);
+ cmpb(FieldOperand(map, Map::kBitField2Offset),
+ Immediate(Map::kMaximumBitField2FastElementValue));
+ j(above, fail, distance);
+}
+
+
void MacroAssembler::CheckMap(Register obj,
Handle<Map> map,
Label* fail,
@@ -3194,6 +3207,109 @@
}
+void MacroAssembler::LoadFromNumberDictionary(Label* miss,
+ Register elements,
+ Register key,
+ Register r0,
+ Register r1,
+ Register r2,
+ Register result) {
+ // Register use:
+ //
+ // elements - holds the slow-case elements of the receiver on entry.
+ // Unchanged unless 'result' is the same register.
+ //
+ // key - holds the smi key on entry.
+ // Unchanged unless 'result' is the same register.
+ //
+ // Scratch registers:
+ //
+ // r0 - holds the untagged key on entry and holds the hash once computed.
+ //
+ // r1 - used to hold the capacity mask of the dictionary
+ //
+ // r2 - used for the index into the dictionary.
+ //
+ // result - holds the result on exit if the load succeeded.
+ // Allowed to be the same as 'key' or 'result'.
+ // Unchanged on bailout so 'key' or 'result' can be used
+ // in further computation.
+
+ Label done;
+
+ // Compute the hash code from the untagged key. This must be kept in sync
+ // with ComputeIntegerHash in utils.h.
+ //
+ // hash = ~hash + (hash << 15);
+ movl(r1, r0);
+ notl(r0);
+ shll(r1, Immediate(15));
+ addl(r0, r1);
+ // hash = hash ^ (hash >> 12);
+ movl(r1, r0);
+ shrl(r1, Immediate(12));
+ xorl(r0, r1);
+ // hash = hash + (hash << 2);
+ leal(r0, Operand(r0, r0, times_4, 0));
+ // hash = hash ^ (hash >> 4);
+ movl(r1, r0);
+ shrl(r1, Immediate(4));
+ xorl(r0, r1);
+ // hash = hash * 2057;
+ imull(r0, r0, Immediate(2057));
+ // hash = hash ^ (hash >> 16);
+ movl(r1, r0);
+ shrl(r1, Immediate(16));
+ xorl(r0, r1);
+
+ // Compute capacity mask.
+ SmiToInteger32(r1,
+ FieldOperand(elements, NumberDictionary::kCapacityOffset));
+ decl(r1);
+
+ // Generate an unrolled loop that performs a few probes before giving up.
+ const int kProbes = 4;
+ for (int i = 0; i < kProbes; i++) {
+ // Use r2 for index calculations and keep the hash intact in r0.
+ movq(r2, r0);
+ // Compute the masked index: (hash + i + i * i) & mask.
+ if (i > 0) {
+ addl(r2, Immediate(NumberDictionary::GetProbeOffset(i)));
+ }
+ and_(r2, r1);
+
+ // Scale the index by multiplying by the entry size.
+ ASSERT(NumberDictionary::kEntrySize == 3);
+ lea(r2, Operand(r2, r2, times_2, 0)); // r2 = r2 * 3
+
+ // Check if the key matches.
+ cmpq(key, FieldOperand(elements,
+ r2,
+ times_pointer_size,
+ NumberDictionary::kElementsStartOffset));
+ if (i != (kProbes - 1)) {
+ j(equal, &done);
+ } else {
+ j(not_equal, miss);
+ }
+ }
+
+ bind(&done);
+ // Check that the value is a normal propety.
+ const int kDetailsOffset =
+ NumberDictionary::kElementsStartOffset + 2 * kPointerSize;
+ ASSERT_EQ(NORMAL, 0);
+ Test(FieldOperand(elements, r2, times_pointer_size, kDetailsOffset),
+ Smi::FromInt(PropertyDetails::TypeField::mask()));
+ j(not_zero, miss);
+
+ // Get the value at the masked, scaled index.
+ const int kValueOffset =
+ NumberDictionary::kElementsStartOffset + kPointerSize;
+ movq(result, FieldOperand(elements, r2, times_pointer_size, kValueOffset));
+}
+
+
void MacroAssembler::LoadAllocationTopHelper(Register result,
Register scratch,
AllocationFlags flags) {
@@ -3607,15 +3723,10 @@
void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
if (context_chain_length > 0) {
// Move up the chain of contexts to the context containing the slot.
- movq(dst, Operand(rsi, Context::SlotOffset(Context::CLOSURE_INDEX)));
- // Load the function context (which is the incoming, outer context).
- movq(dst, FieldOperand(dst, JSFunction::kContextOffset));
+ movq(dst, Operand(rsi, Context::SlotOffset(Context::PREVIOUS_INDEX)));
for (int i = 1; i < context_chain_length; i++) {
- movq(dst, Operand(dst, Context::SlotOffset(Context::CLOSURE_INDEX)));
- movq(dst, FieldOperand(dst, JSFunction::kContextOffset));
+ movq(dst, Operand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
}
- // The context may be an intermediate context, not a function context.
- movq(dst, Operand(dst, Context::SlotOffset(Context::FCONTEXT_INDEX)));
} else {
// Slot is in the current function context. Move it into the
// destination register in case we store into it (the write barrier
@@ -3623,14 +3734,14 @@
movq(dst, rsi);
}
- // We should not have found a 'with' context by walking the context chain
- // (i.e., the static scope chain and runtime context chain do not agree).
- // A variable occurring in such a scope should have slot type LOOKUP and
- // not CONTEXT.
+ // We should not have found a with context by walking the context
+ // chain (i.e., the static scope chain and runtime context chain do
+ // not agree). A variable occurring in such a scope should have
+ // slot type LOOKUP and not CONTEXT.
if (emit_debug_code()) {
- cmpq(dst, Operand(dst, Context::SlotOffset(Context::FCONTEXT_INDEX)));
- Check(equal, "Yo dawg, I heard you liked function contexts "
- "so I put function contexts in all your contexts");
+ CompareRoot(FieldOperand(dst, HeapObject::kMapOffset),
+ Heap::kWithContextMapRootIndex);
+ Check(not_equal, "Variable resolved to with context.");
}
}
diff --git a/src/x64/macro-assembler-x64.h b/src/x64/macro-assembler-x64.h
index 16f6d8d..47ce01b 100644
--- a/src/x64/macro-assembler-x64.h
+++ b/src/x64/macro-assembler-x64.h
@@ -754,6 +754,12 @@
// Always use unsigned comparisons: above and below, not less and greater.
void CmpInstanceType(Register map, InstanceType type);
+ // Check if a map for a JSObject indicates that the object has fast elements.
+ // Jump to the specified label if it does not.
+ void CheckFastElements(Register map,
+ Label* fail,
+ Label::Distance distance = Label::kFar);
+
// Check if the map of an object is equal to a specified map and
// branch to label if not. Skip the smi check if not required
// (object is known to be a heap object)
@@ -840,6 +846,15 @@
Label* miss);
+ void LoadFromNumberDictionary(Label* miss,
+ Register elements,
+ Register key,
+ Register r0,
+ Register r1,
+ Register r2,
+ Register result);
+
+
// ---------------------------------------------------------------------------
// Allocation support
diff --git a/src/x64/regexp-macro-assembler-x64.cc b/src/x64/regexp-macro-assembler-x64.cc
index 2ea17f0..395466e 100644
--- a/src/x64/regexp-macro-assembler-x64.cc
+++ b/src/x64/regexp-macro-assembler-x64.cc
@@ -661,7 +661,6 @@
}
__ movq(rbx, ExternalReference::re_word_character_map());
ASSERT_EQ(0, word_character_map[0]); // Character '\0' is not a word char.
- ExternalReference word_map = ExternalReference::re_word_character_map();
__ testb(Operand(rbx, current_character(), times_1, 0),
current_character());
BranchOrBacktrack(zero, on_no_match);
@@ -676,7 +675,6 @@
}
__ movq(rbx, ExternalReference::re_word_character_map());
ASSERT_EQ(0, word_character_map[0]); // Character '\0' is not a word char.
- ExternalReference word_map = ExternalReference::re_word_character_map();
__ testb(Operand(rbx, current_character(), times_1, 0),
current_character());
BranchOrBacktrack(not_zero, on_no_match);
diff --git a/src/x64/regexp-macro-assembler-x64.h b/src/x64/regexp-macro-assembler-x64.h
index 02b510f..7102225 100644
--- a/src/x64/regexp-macro-assembler-x64.h
+++ b/src/x64/regexp-macro-assembler-x64.h
@@ -28,6 +28,12 @@
#ifndef V8_X64_REGEXP_MACRO_ASSEMBLER_X64_H_
#define V8_X64_REGEXP_MACRO_ASSEMBLER_X64_H_
+#include "x64/assembler-x64.h"
+#include "x64/assembler-x64-inl.h"
+#include "macro-assembler.h"
+#include "code.h"
+#include "x64/macro-assembler-x64.h"
+
namespace v8 {
namespace internal {
diff --git a/src/x64/stub-cache-x64.cc b/src/x64/stub-cache-x64.cc
index dae4a55..b8e5f22 100644
--- a/src/x64/stub-cache-x64.cc
+++ b/src/x64/stub-cache-x64.cc
@@ -105,7 +105,7 @@
__ j(not_zero, miss_label);
// Check that receiver is a JSObject.
- __ CmpInstanceType(r0, FIRST_JS_OBJECT_TYPE);
+ __ CmpInstanceType(r0, FIRST_SPEC_OBJECT_TYPE);
__ j(below, miss_label);
// Load properties array.
@@ -1089,9 +1089,8 @@
__ JumpIfSmi(receiver, miss);
// Check that the maps haven't changed.
- Register reg =
- CheckPrototypes(object, receiver, holder,
- scratch1, scratch2, scratch3, name, miss);
+ CheckPrototypes(object, receiver, holder,
+ scratch1, scratch2, scratch3, name, miss);
// Return the constant value.
__ Move(rax, Handle<Object>(value));
@@ -2530,18 +2529,18 @@
}
-MaybeObject* KeyedStoreStubCompiler::CompileStoreFastElement(
- Map* receiver_map) {
+MaybeObject* KeyedStoreStubCompiler::CompileStoreElement(Map* receiver_map) {
// ----------- S t a t e -------------
// -- rax : value
// -- rcx : key
// -- rdx : receiver
// -- rsp[0] : return address
// -----------------------------------
+ Code* stub;
+ JSObject::ElementsKind elements_kind = receiver_map->elements_kind();
bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
MaybeObject* maybe_stub =
- KeyedStoreFastElementStub(is_js_array).TryGetCode();
- Code* stub;
+ KeyedStoreElementStub(is_js_array, elements_kind).TryGetCode();
if (!maybe_stub->To(&stub)) return maybe_stub;
__ DispatchMap(rdx,
Handle<Map>(receiver_map),
@@ -2990,14 +2989,15 @@
}
-MaybeObject* KeyedLoadStubCompiler::CompileLoadFastElement(Map* receiver_map) {
+MaybeObject* KeyedLoadStubCompiler::CompileLoadElement(Map* receiver_map) {
// ----------- S t a t e -------------
// -- rax : key
// -- rdx : receiver
// -- rsp[0] : return address
// -----------------------------------
- MaybeObject* maybe_stub = KeyedLoadFastElementStub().TryGetCode();
Code* stub;
+ JSObject::ElementsKind elements_kind = receiver_map->elements_kind();
+ MaybeObject* maybe_stub = KeyedLoadElementStub(elements_kind).TryGetCode();
if (!maybe_stub->To(&stub)) return maybe_stub;
__ DispatchMap(rdx,
Handle<Map>(receiver_map),
@@ -3176,60 +3176,58 @@
}
-MaybeObject* ExternalArrayLoadStubCompiler::CompileLoad(
- JSObject*receiver, ExternalArrayType array_type) {
+#undef __
+#define __ ACCESS_MASM(masm)
+
+
+void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
+ MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : key
// -- rdx : receiver
// -- rsp[0] : return address
// -----------------------------------
- MaybeObject* maybe_stub =
- KeyedLoadExternalArrayStub(array_type).TryGetCode();
- Code* stub;
- if (!maybe_stub->To(&stub)) return maybe_stub;
- __ DispatchMap(rdx,
- Handle<Map>(receiver->map()),
- Handle<Code>(stub),
- DO_SMI_CHECK);
+ Label slow, miss_force_generic;
- Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Miss();
- __ jmp(ic, RelocInfo::CODE_TARGET);
+ // This stub is meant to be tail-jumped to, the receiver must already
+ // have been verified by the caller to not be a smi.
- // Return the generated code.
- return GetCode();
-}
+ __ JumpIfNotSmi(rax, &miss_force_generic);
+ __ SmiToInteger32(rbx, rax);
+ __ movq(rcx, FieldOperand(rdx, JSObject::kElementsOffset));
-MaybeObject* ExternalArrayStoreStubCompiler::CompileStore(
- JSObject* receiver, ExternalArrayType array_type) {
+ // Check whether the elements is a number dictionary.
+ // rdx: receiver
+ // rax: key
+ // rbx: key as untagged int32
+ // rcx: elements
+ __ LoadFromNumberDictionary(&slow, rcx, rax, rbx, r9, rdi, rax);
+ __ ret(0);
+
+ __ bind(&slow);
// ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : key
+ // -- rax : key
// -- rdx : receiver
- // -- rsp[0] : return address
+ // -- rsp[0] : return address
// -----------------------------------
- MaybeObject* maybe_stub =
- KeyedStoreExternalArrayStub(array_type).TryGetCode();
- Code* stub;
- if (!maybe_stub->To(&stub)) return maybe_stub;
- __ DispatchMap(rdx,
- Handle<Map>(receiver->map()),
- Handle<Code>(stub),
- DO_SMI_CHECK);
+ Handle<Code> slow_ic =
+ masm->isolate()->builtins()->KeyedLoadIC_Slow();
+ __ jmp(slow_ic, RelocInfo::CODE_TARGET);
- Handle<Code> ic = isolate()->builtins()->KeyedStoreIC_Miss();
- __ jmp(ic, RelocInfo::CODE_TARGET);
-
- return GetCode();
+ __ bind(&miss_force_generic);
+ // ----------- S t a t e -------------
+ // -- rax : key
+ // -- rdx : receiver
+ // -- rsp[0] : return address
+ // -----------------------------------
+ Handle<Code> miss_ic =
+ masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
+ __ jmp(miss_ic, RelocInfo::CODE_TARGET);
}
-
-#undef __
-#define __ ACCESS_MASM(masm)
-
-
void KeyedLoadStubCompiler::GenerateLoadExternalArray(
MacroAssembler* masm,
- ExternalArrayType array_type) {
+ JSObject::ElementsKind elements_kind) {
// ----------- S t a t e -------------
// -- rax : key
// -- rdx : receiver
@@ -3256,30 +3254,30 @@
// rbx: elements array
__ movq(rbx, FieldOperand(rbx, ExternalArray::kExternalPointerOffset));
// rbx: base pointer of external storage
- switch (array_type) {
- case kExternalByteArray:
+ switch (elements_kind) {
+ case JSObject::EXTERNAL_BYTE_ELEMENTS:
__ movsxbq(rcx, Operand(rbx, rcx, times_1, 0));
break;
- case kExternalPixelArray:
- case kExternalUnsignedByteArray:
+ case JSObject::EXTERNAL_PIXEL_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
__ movzxbq(rcx, Operand(rbx, rcx, times_1, 0));
break;
- case kExternalShortArray:
+ case JSObject::EXTERNAL_SHORT_ELEMENTS:
__ movsxwq(rcx, Operand(rbx, rcx, times_2, 0));
break;
- case kExternalUnsignedShortArray:
+ case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
__ movzxwq(rcx, Operand(rbx, rcx, times_2, 0));
break;
- case kExternalIntArray:
+ case JSObject::EXTERNAL_INT_ELEMENTS:
__ movsxlq(rcx, Operand(rbx, rcx, times_4, 0));
break;
- case kExternalUnsignedIntArray:
+ case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS:
__ movl(rcx, Operand(rbx, rcx, times_4, 0));
break;
- case kExternalFloatArray:
+ case JSObject::EXTERNAL_FLOAT_ELEMENTS:
__ cvtss2sd(xmm0, Operand(rbx, rcx, times_4, 0));
break;
- case kExternalDoubleArray:
+ case JSObject::EXTERNAL_DOUBLE_ELEMENTS:
__ movsd(xmm0, Operand(rbx, rcx, times_8, 0));
break;
default:
@@ -3295,7 +3293,7 @@
// xmm0: value as double.
ASSERT(kSmiValueSize == 32);
- if (array_type == kExternalUnsignedIntArray) {
+ if (elements_kind == JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS) {
// For the UnsignedInt array type, we need to see whether
// the value can be represented in a Smi. If not, we need to convert
// it to a HeapNumber.
@@ -3319,8 +3317,8 @@
__ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0);
__ movq(rax, rcx);
__ ret(0);
- } else if (array_type == kExternalFloatArray ||
- array_type == kExternalDoubleArray) {
+ } else if (elements_kind == JSObject::EXTERNAL_FLOAT_ELEMENTS ||
+ elements_kind == JSObject::EXTERNAL_DOUBLE_ELEMENTS) {
// For the floating-point array type, we need to always allocate a
// HeapNumber.
__ AllocateHeapNumber(rcx, rbx, &slow);
@@ -3363,7 +3361,7 @@
void KeyedStoreStubCompiler::GenerateStoreExternalArray(
MacroAssembler* masm,
- ExternalArrayType array_type) {
+ JSObject::ElementsKind elements_kind) {
// ----------- S t a t e -------------
// -- rax : value
// -- rcx : key
@@ -3393,7 +3391,7 @@
// rbx: elements array
// rdi: untagged key
Label check_heap_number;
- if (array_type == kExternalPixelArray) {
+ if (elements_kind == JSObject::EXTERNAL_PIXEL_ELEMENTS) {
// Float to pixel conversion is only implemented in the runtime for now.
__ JumpIfNotSmi(rax, &slow);
} else {
@@ -3403,8 +3401,8 @@
__ SmiToInteger32(rdx, rax);
__ movq(rbx, FieldOperand(rbx, ExternalArray::kExternalPointerOffset));
// rbx: base pointer of external storage
- switch (array_type) {
- case kExternalPixelArray:
+ switch (elements_kind) {
+ case JSObject::EXTERNAL_PIXEL_ELEMENTS:
{ // Clamp the value to [0..255].
Label done;
__ testl(rdx, Immediate(0xFFFFFF00));
@@ -3415,36 +3413,39 @@
}
__ movb(Operand(rbx, rdi, times_1, 0), rdx);
break;
- case kExternalByteArray:
- case kExternalUnsignedByteArray:
+ case JSObject::EXTERNAL_BYTE_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
__ movb(Operand(rbx, rdi, times_1, 0), rdx);
break;
- case kExternalShortArray:
- case kExternalUnsignedShortArray:
+ case JSObject::EXTERNAL_SHORT_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
__ movw(Operand(rbx, rdi, times_2, 0), rdx);
break;
- case kExternalIntArray:
- case kExternalUnsignedIntArray:
+ case JSObject::EXTERNAL_INT_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS:
__ movl(Operand(rbx, rdi, times_4, 0), rdx);
break;
- case kExternalFloatArray:
+ case JSObject::EXTERNAL_FLOAT_ELEMENTS:
// Need to perform int-to-float conversion.
__ cvtlsi2ss(xmm0, rdx);
__ movss(Operand(rbx, rdi, times_4, 0), xmm0);
break;
- case kExternalDoubleArray:
+ case JSObject::EXTERNAL_DOUBLE_ELEMENTS:
// Need to perform int-to-float conversion.
__ cvtlsi2sd(xmm0, rdx);
__ movsd(Operand(rbx, rdi, times_8, 0), xmm0);
break;
- default:
+ case JSObject::FAST_ELEMENTS:
+ case JSObject::FAST_DOUBLE_ELEMENTS:
+ case JSObject::DICTIONARY_ELEMENTS:
+ case JSObject::NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
__ ret(0);
// TODO(danno): handle heap number -> pixel array conversion
- if (array_type != kExternalPixelArray) {
+ if (elements_kind != JSObject::EXTERNAL_PIXEL_ELEMENTS) {
__ bind(&check_heap_number);
// rax: value
// rcx: key (a smi)
@@ -3463,11 +3464,11 @@
// rdi: untagged index
// rbx: base pointer of external storage
// top of FPU stack: value
- if (array_type == kExternalFloatArray) {
+ if (elements_kind == JSObject::EXTERNAL_FLOAT_ELEMENTS) {
__ cvtsd2ss(xmm0, xmm0);
__ movss(Operand(rbx, rdi, times_4, 0), xmm0);
__ ret(0);
- } else if (array_type == kExternalDoubleArray) {
+ } else if (elements_kind == JSObject::EXTERNAL_DOUBLE_ELEMENTS) {
__ movsd(Operand(rbx, rdi, times_8, 0), xmm0);
__ ret(0);
} else {
@@ -3480,26 +3481,31 @@
// rdx: value (converted to an untagged integer)
// rdi: untagged index
// rbx: base pointer of external storage
- switch (array_type) {
- case kExternalByteArray:
- case kExternalUnsignedByteArray:
+ switch (elements_kind) {
+ case JSObject::EXTERNAL_BYTE_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
__ cvttsd2si(rdx, xmm0);
__ movb(Operand(rbx, rdi, times_1, 0), rdx);
break;
- case kExternalShortArray:
- case kExternalUnsignedShortArray:
+ case JSObject::EXTERNAL_SHORT_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
__ cvttsd2si(rdx, xmm0);
__ movw(Operand(rbx, rdi, times_2, 0), rdx);
break;
- case kExternalIntArray:
- case kExternalUnsignedIntArray: {
+ case JSObject::EXTERNAL_INT_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS:
// Convert to int64, so that NaN and infinities become
// 0x8000000000000000, which is zero mod 2^32.
__ cvttsd2siq(rdx, xmm0);
__ movl(Operand(rbx, rdi, times_4, 0), rdx);
break;
- }
- default:
+ case JSObject::EXTERNAL_PIXEL_ELEMENTS:
+ case JSObject::EXTERNAL_FLOAT_ELEMENTS:
+ case JSObject::EXTERNAL_DOUBLE_ELEMENTS:
+ case JSObject::FAST_ELEMENTS:
+ case JSObject::FAST_DOUBLE_ELEMENTS:
+ case JSObject::DICTIONARY_ELEMENTS:
+ case JSObject::NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
@@ -3577,6 +3583,57 @@
}
+void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(
+ MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rax : key
+ // -- rdx : receiver
+ // -- rsp[0] : return address
+ // -----------------------------------
+ Label miss_force_generic, slow_allocate_heapnumber;
+
+ // This stub is meant to be tail-jumped to, the receiver must already
+ // have been verified by the caller to not be a smi.
+
+ // Check that the key is a smi.
+ __ JumpIfNotSmi(rax, &miss_force_generic);
+
+ // Get the elements array.
+ __ movq(rcx, FieldOperand(rdx, JSObject::kElementsOffset));
+ __ AssertFastElements(rcx);
+
+ // Check that the key is within bounds.
+ __ SmiCompare(rax, FieldOperand(rcx, FixedArray::kLengthOffset));
+ __ j(above_equal, &miss_force_generic);
+
+ // Check for the hole
+ __ SmiToInteger32(kScratchRegister, rax);
+ uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
+ __ cmpl(FieldOperand(rcx, kScratchRegister, times_8, offset),
+ Immediate(kHoleNanUpper32));
+ __ j(equal, &miss_force_generic);
+
+ // Always allocate a heap number for the result.
+ __ movsd(xmm0, FieldOperand(rcx, kScratchRegister, times_8,
+ FixedDoubleArray::kHeaderSize));
+ __ AllocateHeapNumber(rcx, rbx, &slow_allocate_heapnumber);
+ // Set the value.
+ __ movq(rax, rcx);
+ __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0);
+ __ ret(0);
+
+ __ bind(&slow_allocate_heapnumber);
+ Handle<Code> slow_ic =
+ masm->isolate()->builtins()->KeyedLoadIC_Slow();
+ __ jmp(slow_ic, RelocInfo::CODE_TARGET);
+
+ __ bind(&miss_force_generic);
+ Handle<Code> miss_ic =
+ masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
+ __ jmp(miss_ic, RelocInfo::CODE_TARGET);
+}
+
+
void KeyedStoreStubCompiler::GenerateStoreFastElement(MacroAssembler* masm,
bool is_js_array) {
// ----------- S t a t e -------------
@@ -3627,6 +3684,90 @@
}
+void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
+ MacroAssembler* masm,
+ bool is_js_array) {
+ // ----------- S t a t e -------------
+ // -- rax : value
+ // -- rcx : key
+ // -- rdx : receiver
+ // -- rsp[0] : return address
+ // -----------------------------------
+ Label miss_force_generic, smi_value, is_nan, maybe_nan;
+ Label have_double_value, not_nan;
+
+ // This stub is meant to be tail-jumped to, the receiver must already
+ // have been verified by the caller to not be a smi.
+
+ // Check that the key is a smi.
+ __ JumpIfNotSmi(rcx, &miss_force_generic);
+
+ // Get the elements array.
+ __ movq(rdi, FieldOperand(rdx, JSObject::kElementsOffset));
+ __ AssertFastElements(rdi);
+
+ // Check that the key is within bounds.
+ if (is_js_array) {
+ __ SmiCompare(rcx, FieldOperand(rdx, JSArray::kLengthOffset));
+ } else {
+ __ SmiCompare(rcx, FieldOperand(rdi, FixedDoubleArray::kLengthOffset));
+ }
+ __ j(above_equal, &miss_force_generic);
+
+ // Handle smi values specially
+ __ JumpIfSmi(rax, &smi_value, Label::kNear);
+
+ __ CheckMap(rax,
+ masm->isolate()->factory()->heap_number_map(),
+ &miss_force_generic,
+ DONT_DO_SMI_CHECK);
+
+ // Double value, canonicalize NaN.
+ uint32_t offset = HeapNumber::kValueOffset + sizeof(kHoleNanLower32);
+ __ cmpl(FieldOperand(rax, offset),
+ Immediate(kNaNOrInfinityLowerBoundUpper32));
+ __ j(greater_equal, &maybe_nan, Label::kNear);
+
+ __ bind(¬_nan);
+ __ movsd(xmm0, FieldOperand(rax, HeapNumber::kValueOffset));
+ __ bind(&have_double_value);
+ __ SmiToInteger32(rcx, rcx);
+ __ movsd(FieldOperand(rdi, rcx, times_8, FixedDoubleArray::kHeaderSize),
+ xmm0);
+ __ ret(0);
+
+ __ bind(&maybe_nan);
+ // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
+ // it's an Infinity, and the non-NaN code path applies.
+ __ j(greater, &is_nan, Label::kNear);
+ __ cmpl(FieldOperand(rax, HeapNumber::kValueOffset), Immediate(0));
+ __ j(zero, ¬_nan);
+ __ bind(&is_nan);
+ // Convert all NaNs to the same canonical NaN value when they are stored in
+ // the double array.
+ __ Set(kScratchRegister, BitCast<uint64_t>(
+ FixedDoubleArray::canonical_not_the_hole_nan_as_double()));
+ __ movq(xmm0, kScratchRegister);
+ __ jmp(&have_double_value, Label::kNear);
+
+ __ bind(&smi_value);
+ // Value is a smi. convert to a double and store.
+ __ SmiToInteger32(rax, rax);
+ __ push(rax);
+ __ fild_s(Operand(rsp, 0));
+ __ pop(rax);
+ __ SmiToInteger32(rcx, rcx);
+ __ fstp_d(FieldOperand(rdi, rcx, times_8, FixedDoubleArray::kHeaderSize));
+ __ ret(0);
+
+ // Handle store cache miss, replacing the ic with the generic stub.
+ __ bind(&miss_force_generic);
+ Handle<Code> ic_force_generic =
+ masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
+ __ jmp(ic_force_generic, RelocInfo::CODE_TARGET);
+}
+
+
#undef __
} } // namespace v8::internal
diff --git a/src/zone.h b/src/zone.h
index a5e437f..abb53ad 100644
--- a/src/zone.h
+++ b/src/zone.h
@@ -74,6 +74,8 @@
inline void adjust_segment_bytes_allocated(int delta);
+ inline Isolate* isolate() { return isolate_; }
+
static unsigned allocation_size_;
private:
@@ -164,7 +166,7 @@
class ZoneListAllocationPolicy {
public:
// Allocate 'size' bytes of memory in the zone.
- INLINE(static void* New(int size));
+ static void* New(int size);
// De-allocation attempts are silently ignored.
static void Delete(void* p) { }