Update V8 to r3431 as required by WebKit r51976.
Change-Id: I567392c3f8c0a0d5201a4249611ac4ccf468cd5b
diff --git a/src/SConscript b/src/SConscript
index 85fd724..3b0df17 100755
--- a/src/SConscript
+++ b/src/SConscript
@@ -159,6 +159,7 @@
"""),
'simulator:arm': ['arm/simulator-arm.cc'],
'os:freebsd': ['platform-freebsd.cc', 'platform-posix.cc'],
+ 'os:openbsd': ['platform-openbsd.cc', 'platform-posix.cc'],
'os:linux': ['platform-linux.cc', 'platform-posix.cc'],
'os:android': ['platform-linux.cc', 'platform-posix.cc'],
'os:macos': ['platform-macos.cc', 'platform-posix.cc'],
@@ -187,6 +188,9 @@
'os:freebsd': [
'd8-posix.cc'
],
+ 'os:openbsd': [
+ 'd8-posix.cc'
+ ],
'os:win32': [
'd8-windows.cc'
],
@@ -264,7 +268,6 @@
else:
snapshot_cc = Command('snapshot.cc', [], [])
snapshot_obj = context.ConfigureObject(env, snapshot_cc, CPPPATH=['.'])
- libraries_obj = context.ConfigureObject(env, libraries_empty_src, CPPPATH=['.'])
else:
snapshot_obj = empty_snapshot_obj
library_objs = [non_snapshot_files, libraries_obj, snapshot_obj]
diff --git a/src/accessors.cc b/src/accessors.cc
index 82ae702..56cf135 100644
--- a/src/accessors.cc
+++ b/src/accessors.cc
@@ -315,7 +315,11 @@
HandleScope scope;
Handle<Script> script(Script::cast(JSValue::cast(object)->value()));
InitScriptLineEnds(script);
- return script->line_ends();
+ ASSERT(script->line_ends()->IsFixedArray());
+ Handle<FixedArray> line_ends(FixedArray::cast(script->line_ends()));
+ Handle<FixedArray> copy = Factory::CopyFixedArray(line_ends);
+ Handle<JSArray> js_array = Factory::NewJSArrayWithElements(copy);
+ return *js_array;
}
@@ -345,29 +349,38 @@
//
-// Accessors::ScriptGetEvalFromFunction
+// Accessors::ScriptGetEvalFromScript
//
-Object* Accessors::ScriptGetEvalFromFunction(Object* object, void*) {
+Object* Accessors::ScriptGetEvalFromScript(Object* object, void*) {
Object* script = JSValue::cast(object)->value();
- return Script::cast(script)->eval_from_function();
+ if (!Script::cast(script)->eval_from_shared()->IsUndefined()) {
+ Handle<SharedFunctionInfo> eval_from_shared(
+ SharedFunctionInfo::cast(Script::cast(script)->eval_from_shared()));
+
+ if (eval_from_shared->script()->IsScript()) {
+ Handle<Script> eval_from_script(Script::cast(eval_from_shared->script()));
+ return *GetScriptWrapper(eval_from_script);
+ }
+ }
+ return Heap::undefined_value();
}
-const AccessorDescriptor Accessors::ScriptEvalFromFunction = {
- ScriptGetEvalFromFunction,
+const AccessorDescriptor Accessors::ScriptEvalFromScript = {
+ ScriptGetEvalFromScript,
IllegalSetter,
0
};
//
-// Accessors::ScriptGetEvalFromPosition
+// Accessors::ScriptGetEvalFromScriptPosition
//
-Object* Accessors::ScriptGetEvalFromPosition(Object* object, void*) {
+Object* Accessors::ScriptGetEvalFromScriptPosition(Object* object, void*) {
HandleScope scope;
Handle<Script> script(Script::cast(JSValue::cast(object)->value()));
@@ -379,14 +392,42 @@
// Get the function from where eval was called and find the source position
// from the instruction offset.
- Handle<Code> code(JSFunction::cast(script->eval_from_function())->code());
+ Handle<Code> code(SharedFunctionInfo::cast(
+ script->eval_from_shared())->code());
return Smi::FromInt(code->SourcePosition(code->instruction_start() +
script->eval_from_instructions_offset()->value()));
}
-const AccessorDescriptor Accessors::ScriptEvalFromPosition = {
- ScriptGetEvalFromPosition,
+const AccessorDescriptor Accessors::ScriptEvalFromScriptPosition = {
+ ScriptGetEvalFromScriptPosition,
+ IllegalSetter,
+ 0
+};
+
+
+//
+// Accessors::ScriptGetEvalFromFunctionName
+//
+
+
+Object* Accessors::ScriptGetEvalFromFunctionName(Object* object, void*) {
+ Object* script = JSValue::cast(object)->value();
+ Handle<SharedFunctionInfo> shared(SharedFunctionInfo::cast(
+ Script::cast(script)->eval_from_shared()));
+
+
+ // Find the name of the function calling eval.
+ if (!shared->name()->IsUndefined()) {
+ return shared->name();
+ } else {
+ return shared->inferred_name();
+ }
+}
+
+
+const AccessorDescriptor Accessors::ScriptEvalFromFunctionName = {
+ ScriptGetEvalFromFunctionName,
IllegalSetter,
0
};
diff --git a/src/accessors.h b/src/accessors.h
index 51d322e..7a840a1 100644
--- a/src/accessors.h
+++ b/src/accessors.h
@@ -51,8 +51,9 @@
V(ScriptCompilationType) \
V(ScriptLineEnds) \
V(ScriptContextData) \
- V(ScriptEvalFromFunction) \
- V(ScriptEvalFromPosition) \
+ V(ScriptEvalFromScript) \
+ V(ScriptEvalFromScriptPosition) \
+ V(ScriptEvalFromFunctionName) \
V(ObjectPrototype)
// Accessors contains all predefined proxy accessors.
@@ -95,8 +96,9 @@
static Object* ScriptGetCompilationType(Object* object, void*);
static Object* ScriptGetLineEnds(Object* object, void*);
static Object* ScriptGetContextData(Object* object, void*);
- static Object* ScriptGetEvalFromFunction(Object* object, void*);
- static Object* ScriptGetEvalFromPosition(Object* object, void*);
+ static Object* ScriptGetEvalFromScript(Object* object, void*);
+ static Object* ScriptGetEvalFromScriptPosition(Object* object, void*);
+ static Object* ScriptGetEvalFromFunctionName(Object* object, void*);
static Object* ObjectGetPrototype(Object* receiver, void*);
static Object* ObjectSetPrototype(JSObject* receiver, Object* value, void*);
diff --git a/src/allocation.cc b/src/allocation.cc
index 41724b6..678f4fd 100644
--- a/src/allocation.cc
+++ b/src/allocation.cc
@@ -80,7 +80,7 @@
char* StrDup(const char* str) {
- int length = strlen(str);
+ int length = StrLength(str);
char* result = NewArray<char>(length + 1);
memcpy(result, str, length * kCharSize);
result[length] = '\0';
@@ -88,8 +88,8 @@
}
-char* StrNDup(const char* str, size_t n) {
- size_t length = strlen(str);
+char* StrNDup(const char* str, int n) {
+ int length = StrLength(str);
if (n < length) length = n;
char* result = NewArray<char>(length + 1);
memcpy(result, str, length * kCharSize);
diff --git a/src/allocation.h b/src/allocation.h
index 586c4fd..70a3a03 100644
--- a/src/allocation.h
+++ b/src/allocation.h
@@ -124,7 +124,7 @@
// and StrNDup uses new and calls the FatalProcessOutOfMemory handler
// if allocation fails.
char* StrDup(const char* str);
-char* StrNDup(const char* str, size_t n);
+char* StrNDup(const char* str, int n);
// Allocation policy for allocating in the C free store using malloc
diff --git a/src/api.cc b/src/api.cc
index b457aad..93807a7 100644
--- a/src/api.cc
+++ b/src/api.cc
@@ -37,6 +37,7 @@
#include "platform.h"
#include "serialize.h"
#include "snapshot.h"
+#include "utils.h"
#include "v8threads.h"
#include "version.h"
@@ -125,6 +126,48 @@
// When V8 cannot allocated memory FatalProcessOutOfMemory is called.
// The default fatal error handler is called and execution is stopped.
void i::V8::FatalProcessOutOfMemory(const char* location) {
+ i::HeapStats heap_stats;
+ int start_marker;
+ heap_stats.start_marker = &start_marker;
+ int new_space_size;
+ heap_stats.new_space_size = &new_space_size;
+ int new_space_capacity;
+ heap_stats.new_space_capacity = &new_space_capacity;
+ int old_pointer_space_size;
+ heap_stats.old_pointer_space_size = &old_pointer_space_size;
+ int old_pointer_space_capacity;
+ heap_stats.old_pointer_space_capacity = &old_pointer_space_capacity;
+ int old_data_space_size;
+ heap_stats.old_data_space_size = &old_data_space_size;
+ int old_data_space_capacity;
+ heap_stats.old_data_space_capacity = &old_data_space_capacity;
+ int code_space_size;
+ heap_stats.code_space_size = &code_space_size;
+ int code_space_capacity;
+ heap_stats.code_space_capacity = &code_space_capacity;
+ int map_space_size;
+ heap_stats.map_space_size = &map_space_size;
+ int map_space_capacity;
+ heap_stats.map_space_capacity = &map_space_capacity;
+ int cell_space_size;
+ heap_stats.cell_space_size = &cell_space_size;
+ int cell_space_capacity;
+ heap_stats.cell_space_capacity = &cell_space_capacity;
+ int lo_space_size;
+ heap_stats.lo_space_size = &lo_space_size;
+ int global_handle_count;
+ heap_stats.global_handle_count = &global_handle_count;
+ int weak_global_handle_count;
+ heap_stats.weak_global_handle_count = &weak_global_handle_count;
+ int pending_global_handle_count;
+ heap_stats.pending_global_handle_count = &pending_global_handle_count;
+ int near_death_global_handle_count;
+ heap_stats.near_death_global_handle_count = &near_death_global_handle_count;
+ int destroyed_global_handle_count;
+ heap_stats.destroyed_global_handle_count = &destroyed_global_handle_count;
+ int end_marker;
+ heap_stats.end_marker = &end_marker;
+ i::Heap::RecordStats(&heap_stats);
i::V8::SetFatalError();
FatalErrorCallback callback = GetFatalErrorHandler();
{
@@ -450,7 +493,7 @@
}
-void Context::SetData(v8::Handle<Value> data) {
+void Context::SetData(v8::Handle<String> data) {
if (IsDeadCheck("v8::Context::SetData()")) return;
ENTER_V8;
{
@@ -1174,7 +1217,7 @@
}
-void Script::SetData(v8::Handle<Value> data) {
+void Script::SetData(v8::Handle<String> data) {
ON_BAILOUT("v8::Script::SetData()", return);
LOG_API("Script::SetData");
{
@@ -1191,19 +1234,26 @@
v8::TryCatch::TryCatch()
- : next_(i::Top::try_catch_handler()),
+ : next_(i::Top::try_catch_handler_address()),
exception_(i::Heap::the_hole_value()),
message_(i::Smi::FromInt(0)),
is_verbose_(false),
can_continue_(true),
capture_message_(true),
- js_handler_(NULL) {
+ rethrow_(false) {
i::Top::RegisterTryCatchHandler(this);
}
v8::TryCatch::~TryCatch() {
- i::Top::UnregisterTryCatchHandler(this);
+ if (rethrow_) {
+ v8::HandleScope scope;
+ v8::Local<v8::Value> exc = v8::Local<v8::Value>::New(Exception());
+ i::Top::UnregisterTryCatchHandler(this);
+ v8::ThrowException(exc);
+ } else {
+ i::Top::UnregisterTryCatchHandler(this);
+ }
}
@@ -1217,6 +1267,13 @@
}
+v8::Handle<v8::Value> v8::TryCatch::ReThrow() {
+ if (!HasCaught()) return v8::Local<v8::Value>();
+ rethrow_ = true;
+ return v8::Undefined();
+}
+
+
v8::Local<Value> v8::TryCatch::Exception() const {
if (HasCaught()) {
// Check for out of memory exception.
@@ -2032,11 +2089,11 @@
Local<String> str = Utils::ToLocal(class_name);
const char* postfix = "]";
- size_t prefix_len = strlen(prefix);
- size_t str_len = str->Length();
- size_t postfix_len = strlen(postfix);
+ int prefix_len = i::StrLength(prefix);
+ int str_len = str->Length();
+ int postfix_len = i::StrLength(postfix);
- size_t buf_len = prefix_len + str_len + postfix_len;
+ int buf_len = prefix_len + str_len + postfix_len;
char* buf = i::NewArray<char>(buf_len);
// Write prefix.
@@ -2621,11 +2678,8 @@
if (i::V8::IsRunning()) return true;
ENTER_V8;
HandleScope scope;
- if (i::Snapshot::Initialize()) {
- return true;
- } else {
- return i::V8::Initialize(NULL);
- }
+ if (i::Snapshot::Initialize()) return true;
+ return i::V8::Initialize(NULL);
}
@@ -2653,10 +2707,8 @@
void v8::V8::LowMemoryNotification() {
-#if defined(ANDROID)
if (!i::V8::IsRunning()) return;
i::Heap::CollectAllGarbage(true);
-#endif
}
@@ -2952,7 +3004,7 @@
LOG_API("String::New(char)");
if (length == 0) return Empty();
ENTER_V8;
- if (length == -1) length = strlen(data);
+ if (length == -1) length = i::StrLength(data);
i::Handle<i::String> result =
i::Factory::NewStringFromUtf8(i::Vector<const char>(data, length));
return Utils::ToLocal(result);
@@ -2975,7 +3027,7 @@
EnsureInitialized("v8::String::NewUndetectable()");
LOG_API("String::NewUndetectable(char)");
ENTER_V8;
- if (length == -1) length = strlen(data);
+ if (length == -1) length = i::StrLength(data);
i::Handle<i::String> result =
i::Factory::NewStringFromUtf8(i::Vector<const char>(data, length));
result->MarkAsUndetectable();
@@ -3043,7 +3095,8 @@
v8::String::ExternalStringResource* resource =
reinterpret_cast<v8::String::ExternalStringResource*>(parameter);
if (resource != NULL) {
- const size_t total_size = resource->length() * sizeof(*resource->data());
+ const int total_size =
+ static_cast<int>(resource->length() * sizeof(*resource->data()));
i::Counters::total_external_string_memory.Decrement(total_size);
// The object will continue to live in the JavaScript heap until the
@@ -3073,7 +3126,8 @@
v8::String::ExternalAsciiStringResource* resource =
reinterpret_cast<v8::String::ExternalAsciiStringResource*>(parameter);
if (resource != NULL) {
- const size_t total_size = resource->length() * sizeof(*resource->data());
+ const int total_size =
+ static_cast<int>(resource->length() * sizeof(*resource->data()));
i::Counters::total_external_string_memory.Decrement(total_size);
// The object will continue to live in the JavaScript heap until the
@@ -3095,7 +3149,8 @@
EnsureInitialized("v8::String::NewExternal()");
LOG_API("String::NewExternal");
ENTER_V8;
- const size_t total_size = resource->length() * sizeof(*resource->data());
+ const int total_size =
+ static_cast<int>(resource->length() * sizeof(*resource->data()));
i::Counters::total_external_string_memory.Increment(total_size);
i::Handle<i::String> result = NewExternalStringHandle(resource);
i::Handle<i::Object> handle = i::GlobalHandles::Create(*result);
@@ -3130,7 +3185,8 @@
EnsureInitialized("v8::String::NewExternal()");
LOG_API("String::NewExternal");
ENTER_V8;
- const size_t total_size = resource->length() * sizeof(*resource->data());
+ const int total_size =
+ static_cast<int>(resource->length() * sizeof(*resource->data()));
i::Counters::total_external_string_memory.Increment(total_size);
i::Handle<i::String> result = NewExternalAsciiStringHandle(resource);
i::Handle<i::Object> handle = i::GlobalHandles::Create(*result);
@@ -3185,6 +3241,10 @@
Local<v8::Value> v8::Date::New(double time) {
EnsureInitialized("v8::Date::New()");
LOG_API("Date::New");
+ if (isnan(time)) {
+ // Introduce only canonical NaN value into the VM, to avoid signaling NaNs.
+ time = i::OS::nan_value();
+ }
ENTER_V8;
EXCEPTION_PREAMBLE();
i::Handle<i::Object> obj =
@@ -3248,7 +3308,7 @@
EnsureInitialized("v8::String::NewSymbol()");
LOG_API("String::NewSymbol(char)");
ENTER_V8;
- if (length == -1) length = strlen(data);
+ if (length == -1) length = i::StrLength(data);
i::Handle<i::String> result =
i::Factory::LookupSymbol(i::Vector<const char>(data, length));
return Utils::ToLocal(result);
@@ -3257,6 +3317,10 @@
Local<Number> v8::Number::New(double value) {
EnsureInitialized("v8::Number::New()");
+ if (isnan(value)) {
+ // Introduce only canonical NaN value into the VM, to avoid signaling NaNs.
+ value = i::OS::nan_value();
+ }
ENTER_V8;
i::Handle<i::Object> result = i::Factory::NewNumber(value);
return Utils::NumberToLocal(result);
@@ -3712,6 +3776,14 @@
}
+void Debug::SetDebugMessageDispatchHandler(
+ DebugMessageDispatchHandler handler) {
+ EnsureInitialized("v8::Debug::SetDebugMessageDispatchHandler");
+ ENTER_V8;
+ i::Debugger::SetDebugMessageDispatchHandler(handler);
+}
+
+
Local<Value> Debug::Call(v8::Handle<v8::Function> fun,
v8::Handle<v8::Value> data) {
if (!i::V8::IsRunning()) return Local<Value>();
diff --git a/src/api.h b/src/api.h
index 1221f35..a28e1f0 100644
--- a/src/api.h
+++ b/src/api.h
@@ -125,6 +125,15 @@
}
+class ApiFunction {
+ public:
+ explicit ApiFunction(v8::internal::Address addr) : addr_(addr) { }
+ v8::internal::Address address() { return addr_; }
+ private:
+ v8::internal::Address addr_;
+};
+
+
v8::Arguments::Arguments(v8::Local<v8::Value> data,
v8::Local<v8::Object> holder,
v8::Local<v8::Function> callee,
diff --git a/src/arguments.h b/src/arguments.h
index d2f1bfc..3fed223 100644
--- a/src/arguments.h
+++ b/src/arguments.h
@@ -77,9 +77,9 @@
// can.
class CustomArguments : public Relocatable {
public:
- inline CustomArguments(Object *data,
- JSObject *self,
- JSObject *holder) {
+ inline CustomArguments(Object* data,
+ JSObject* self,
+ JSObject* holder) {
values_[3] = self;
values_[2] = holder;
values_[1] = Smi::FromInt(0);
diff --git a/src/arm/assembler-arm-inl.h b/src/arm/assembler-arm-inl.h
index 48cc090..5f47cb7 100644
--- a/src/arm/assembler-arm-inl.h
+++ b/src/arm/assembler-arm-inl.h
@@ -85,7 +85,7 @@
}
-Handle<Object> RelocInfo::target_object_handle(Assembler *origin) {
+Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
return Memory::Object_Handle_at(Assembler::target_address_address_at(pc_));
}
@@ -245,6 +245,12 @@
}
+void Assembler::set_target_at(Address constant_pool_entry,
+ Address target) {
+ Memory::Address_at(constant_pool_entry) = target;
+}
+
+
void Assembler::set_target_address_at(Address pc, Address target) {
Memory::Address_at(target_address_address_at(pc)) = target;
// Intuitively, we would think it is necessary to flush the instruction cache
diff --git a/src/arm/assembler-arm.cc b/src/arm/assembler-arm.cc
index bc3b8e6..d924728 100644
--- a/src/arm/assembler-arm.cc
+++ b/src/arm/assembler-arm.cc
@@ -42,6 +42,34 @@
namespace v8 {
namespace internal {
+// Safe default is no features.
+unsigned CpuFeatures::supported_ = 0;
+unsigned CpuFeatures::enabled_ = 0;
+unsigned CpuFeatures::found_by_runtime_probing_ = 0;
+
+void CpuFeatures::Probe() {
+ // If the compiler is allowed to use vfp then we can use vfp too in our
+ // code generation.
+#if !defined(__arm__)
+ // For the simulator=arm build, always use VFP since the arm simulator has
+ // VFP support.
+ supported_ |= 1u << VFP3;
+#else
+ if (Serializer::enabled()) {
+ supported_ |= OS::CpuFeaturesImpliedByPlatform();
+ return; // No features if we might serialize.
+ }
+
+ if (OS::ArmCpuHasFeature(VFP3)) {
+ // This implementation also sets the VFP flags if
+ // runtime detection of VFP returns true.
+ supported_ |= 1u << VFP3;
+ found_by_runtime_probing_ |= 1u << VFP3;
+ }
+#endif
+}
+
+
// -----------------------------------------------------------------------------
// Implementation of Register and CRegister
@@ -84,6 +112,57 @@
CRegister cr14 = { 14 };
CRegister cr15 = { 15 };
+// Support for the VFP registers s0 to s31 (d0 to d15).
+// Note that "sN:sM" is the same as "dN/2".
+Register s0 = { 0 };
+Register s1 = { 1 };
+Register s2 = { 2 };
+Register s3 = { 3 };
+Register s4 = { 4 };
+Register s5 = { 5 };
+Register s6 = { 6 };
+Register s7 = { 7 };
+Register s8 = { 8 };
+Register s9 = { 9 };
+Register s10 = { 10 };
+Register s11 = { 11 };
+Register s12 = { 12 };
+Register s13 = { 13 };
+Register s14 = { 14 };
+Register s15 = { 15 };
+Register s16 = { 16 };
+Register s17 = { 17 };
+Register s18 = { 18 };
+Register s19 = { 19 };
+Register s20 = { 20 };
+Register s21 = { 21 };
+Register s22 = { 22 };
+Register s23 = { 23 };
+Register s24 = { 24 };
+Register s25 = { 25 };
+Register s26 = { 26 };
+Register s27 = { 27 };
+Register s28 = { 28 };
+Register s29 = { 29 };
+Register s30 = { 30 };
+Register s31 = { 31 };
+
+Register d0 = { 0 };
+Register d1 = { 1 };
+Register d2 = { 2 };
+Register d3 = { 3 };
+Register d4 = { 4 };
+Register d5 = { 5 };
+Register d6 = { 6 };
+Register d7 = { 7 };
+Register d8 = { 8 };
+Register d9 = { 9 };
+Register d10 = { 10 };
+Register d11 = { 11 };
+Register d12 = { 12 };
+Register d13 = { 13 };
+Register d14 = { 14 };
+Register d15 = { 15 };
// -----------------------------------------------------------------------------
// Implementation of RelocInfo
@@ -203,10 +282,14 @@
B4 = 1 << 4,
B5 = 1 << 5,
+ B6 = 1 << 6,
B7 = 1 << 7,
B8 = 1 << 8,
+ B9 = 1 << 9,
B12 = 1 << 12,
B16 = 1 << 16,
+ B18 = 1 << 18,
+ B19 = 1 << 19,
B20 = 1 << 20,
B21 = 1 << 21,
B22 = 1 << 22,
@@ -523,6 +606,11 @@
// encoded.
static bool MustUseIp(RelocInfo::Mode rmode) {
if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
+#ifdef DEBUG
+ if (!Serializer::enabled()) {
+ Serializer::TooLateToEnableNow();
+ }
+#endif
return Serializer::enabled();
} else if (rmode == RelocInfo::NONE) {
return false;
@@ -1282,6 +1370,187 @@
}
+// Support for VFP.
+void Assembler::fmdrr(const Register dst,
+ const Register src1,
+ const Register src2,
+ const SBit s,
+ const Condition cond) {
+ // Dm = <Rt,Rt2>.
+ // Instruction details available in ARM DDI 0406A, A8-646.
+ // cond(31-28) | 1100(27-24)| 010(23-21) | op=0(20) | Rt2(19-16) |
+ // Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(!src1.is(pc) && !src2.is(pc));
+ emit(cond | 0xC*B24 | B22 | src2.code()*B16 |
+ src1.code()*B12 | 0xB*B8 | B4 | dst.code());
+}
+
+
+void Assembler::fmrrd(const Register dst1,
+ const Register dst2,
+ const Register src,
+ const SBit s,
+ const Condition cond) {
+ // <Rt,Rt2> = Dm.
+ // Instruction details available in ARM DDI 0406A, A8-646.
+ // cond(31-28) | 1100(27-24)| 010(23-21) | op=1(20) | Rt2(19-16) |
+ // Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(!dst1.is(pc) && !dst2.is(pc));
+ emit(cond | 0xC*B24 | B22 | B20 | dst2.code()*B16 |
+ dst1.code()*B12 | 0xB*B8 | B4 | src.code());
+}
+
+
+void Assembler::fmsr(const Register dst,
+ const Register src,
+ const SBit s,
+ const Condition cond) {
+ // Sn = Rt.
+ // Instruction details available in ARM DDI 0406A, A8-642.
+ // cond(31-28) | 1110(27-24)| 000(23-21) | op=0(20) | Vn(19-16) |
+ // Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0)
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(!src.is(pc));
+ emit(cond | 0xE*B24 | (dst.code() >> 1)*B16 |
+ src.code()*B12 | 0xA*B8 | (0x1 & dst.code())*B7 | B4);
+}
+
+
+void Assembler::fmrs(const Register dst,
+ const Register src,
+ const SBit s,
+ const Condition cond) {
+ // Rt = Sn.
+ // Instruction details available in ARM DDI 0406A, A8-642.
+ // cond(31-28) | 1110(27-24)| 000(23-21) | op=1(20) | Vn(19-16) |
+ // Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0)
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(!dst.is(pc));
+ emit(cond | 0xE*B24 | B20 | (src.code() >> 1)*B16 |
+ dst.code()*B12 | 0xA*B8 | (0x1 & src.code())*B7 | B4);
+}
+
+
+void Assembler::fsitod(const Register dst,
+ const Register src,
+ const SBit s,
+ const Condition cond) {
+ // Dd = Sm (integer in Sm converted to IEEE 64-bit doubles in Dd).
+ // Instruction details available in ARM DDI 0406A, A8-576.
+ // cond(31-28) | 11101(27-23)| D=?(22) | 11(21-20) | 1(19) |opc2=000(18-16) |
+ // Vd(15-12) | 101(11-9) | sz(8)=1 | op(7)=1 | 1(6) | M=?(5) | 0(4) | Vm(3-0)
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
+ emit(cond | 0xE*B24 | B23 | 0x3*B20 | B19 |
+ dst.code()*B12 | 0x5*B9 | B8 | B7 | B6 |
+ (0x1 & src.code())*B5 | (src.code() >> 1));
+}
+
+
+void Assembler::ftosid(const Register dst,
+ const Register src,
+ const SBit s,
+ const Condition cond) {
+ // Sd = Dm (IEEE 64-bit doubles in Dm converted to 32 bit integer in Sd).
+ // Instruction details available in ARM DDI 0406A, A8-576.
+ // cond(31-28) | 11101(27-23)| D=?(22) | 11(21-20) | 1(19) | opc2=101(18-16)|
+ // Vd(15-12) | 101(11-9) | sz(8)=1 | op(7)=? | 1(6) | M=?(5) | 0(4) | Vm(3-0)
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
+ emit(cond | 0xE*B24 | B23 |(0x1 & dst.code())*B22 |
+ 0x3*B20 | B19 | 0x5*B16 | (dst.code() >> 1)*B12 |
+ 0x5*B9 | B8 | B7 | B6 | src.code());
+}
+
+
+void Assembler::faddd(const Register dst,
+ const Register src1,
+ const Register src2,
+ const SBit s,
+ const Condition cond) {
+ // Dd = faddd(Dn, Dm) double precision floating point addition.
+ // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
+ // Instruction details available in ARM DDI 0406A, A8-536.
+ // cond(31-28) | 11100(27-23)| D=?(22) | 11(21-20) | Vn(19-16) |
+ // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 0(6) | M=?(5) | 0(4) | Vm(3-0)
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
+ emit(cond | 0xE*B24 | 0x3*B20 | src1.code()*B16 |
+ dst.code()*B12 | 0x5*B9 | B8 | src2.code());
+}
+
+
+void Assembler::fsubd(const Register dst,
+ const Register src1,
+ const Register src2,
+ const SBit s,
+ const Condition cond) {
+ // Dd = fsubd(Dn, Dm) double precision floating point subtraction.
+ // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
+ // Instruction details available in ARM DDI 0406A, A8-784.
+ // cond(31-28) | 11100(27-23)| D=?(22) | 11(21-20) | Vn(19-16) |
+ // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 1(6) | M=?(5) | 0(4) | Vm(3-0)
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
+ emit(cond | 0xE*B24 | 0x3*B20 | src1.code()*B16 |
+ dst.code()*B12 | 0x5*B9 | B8 | B6 | src2.code());
+}
+
+
+void Assembler::fmuld(const Register dst,
+ const Register src1,
+ const Register src2,
+ const SBit s,
+ const Condition cond) {
+ // Dd = fmuld(Dn, Dm) double precision floating point multiplication.
+ // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
+ // Instruction details available in ARM DDI 0406A, A8-784.
+ // cond(31-28) | 11100(27-23)| D=?(22) | 10(21-20) | Vn(19-16) |
+ // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 0(6) | M=?(5) | 0(4) | Vm(3-0)
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
+ emit(cond | 0xE*B24 | 0x2*B20 | src1.code()*B16 |
+ dst.code()*B12 | 0x5*B9 | B8 | src2.code());
+}
+
+
+void Assembler::fdivd(const Register dst,
+ const Register src1,
+ const Register src2,
+ const SBit s,
+ const Condition cond) {
+ // Dd = fdivd(Dn, Dm) double precision floating point division.
+ // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
+ // Instruction details available in ARM DDI 0406A, A8-584.
+ // cond(31-28) | 11101(27-23)| D=?(22) | 00(21-20) | Vn(19-16) |
+ // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=? | 0(6) | M=?(5) | 0(4) | Vm(3-0)
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
+ emit(cond | 0xE*B24 | B23 | src1.code()*B16 |
+ dst.code()*B12 | 0x5*B9 | B8 | src2.code());
+}
+
+
+void Assembler::fcmp(const Register src1,
+ const Register src2,
+ const SBit s,
+ const Condition cond) {
+ // vcmp(Dd, Dm) double precision floating point comparison.
+ // Instruction details available in ARM DDI 0406A, A8-570.
+ // cond(31-28) | 11101 (27-23)| D=?(22) | 11 (21-20) | 0100 (19-16) |
+ // Vd(15-12) | 101(11-9) | sz(8)=1 | E(7)=? | 1(6) | M(5)=? | 0(4) | Vm(3-0)
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
+ emit(cond | 0xE*B24 |B23 | 0x3*B20 | B18 |
+ src1.code()*B12 | 0x5*B9 | B8 | B6 | src2.code());
+}
+
+
+void Assembler::vmrs(Register dst, Condition cond) {
+ // Instruction details available in ARM DDI 0406A, A8-652.
+ // cond(31-28) | 1110 (27-24) | 1111(23-20)| 0001 (19-16) |
+ // Rt(15-12) | 1010 (11-8) | 0(7) | 00 (6-5) | 1(4) | 0000(3-0)
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
+ emit(cond | 0xE*B24 | 0xF*B20 | B16 |
+ dst.code()*B12 | 0xA*B8 | B4);
+}
+
+
// Pseudo instructions
void Assembler::lea(Register dst,
const MemOperand& x,
@@ -1311,6 +1580,18 @@
}
+bool Assembler::ImmediateFitsAddrMode1Instruction(int32_t imm32) {
+ uint32_t dummy1;
+ uint32_t dummy2;
+ return fits_shifter(imm32, &dummy1, &dummy2, NULL);
+}
+
+
+void Assembler::BlockConstPoolFor(int instructions) {
+ BlockConstPoolBefore(pc_offset() + instructions * kInstrSize);
+}
+
+
// Debugging
void Assembler::RecordJSReturn() {
WriteRecordedPositions();
@@ -1429,10 +1710,15 @@
}
if (rinfo.rmode() != RelocInfo::NONE) {
// Don't record external references unless the heap will be serialized.
- if (rmode == RelocInfo::EXTERNAL_REFERENCE &&
- !Serializer::enabled() &&
- !FLAG_debug_code) {
- return;
+ if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
+#ifdef DEBUG
+ if (!Serializer::enabled()) {
+ Serializer::TooLateToEnableNow();
+ }
+#endif
+ if (!Serializer::enabled() && !FLAG_debug_code) {
+ return;
+ }
}
ASSERT(buffer_space() >= kMaxRelocSize); // too late to grow buffer here
reloc_info_writer.Write(&rinfo);
diff --git a/src/arm/assembler-arm.h b/src/arm/assembler-arm.h
index d1df08c..86bc18a 100644
--- a/src/arm/assembler-arm.h
+++ b/src/arm/assembler-arm.h
@@ -41,6 +41,7 @@
#define V8_ARM_ASSEMBLER_ARM_H_
#include <stdio.h>
#include "assembler.h"
+#include "serialize.h"
namespace v8 {
namespace internal {
@@ -102,6 +103,57 @@
extern Register lr;
extern Register pc;
+// Support for VFP registers s0 to s32 (d0 to d16).
+// Note that "sN:sM" is the same as "dN/2".
+extern Register s0;
+extern Register s1;
+extern Register s2;
+extern Register s3;
+extern Register s4;
+extern Register s5;
+extern Register s6;
+extern Register s7;
+extern Register s8;
+extern Register s9;
+extern Register s10;
+extern Register s11;
+extern Register s12;
+extern Register s13;
+extern Register s14;
+extern Register s15;
+extern Register s16;
+extern Register s17;
+extern Register s18;
+extern Register s19;
+extern Register s20;
+extern Register s21;
+extern Register s22;
+extern Register s23;
+extern Register s24;
+extern Register s25;
+extern Register s26;
+extern Register s27;
+extern Register s28;
+extern Register s29;
+extern Register s30;
+extern Register s31;
+
+extern Register d0;
+extern Register d1;
+extern Register d2;
+extern Register d3;
+extern Register d4;
+extern Register d5;
+extern Register d6;
+extern Register d7;
+extern Register d8;
+extern Register d9;
+extern Register d10;
+extern Register d11;
+extern Register d12;
+extern Register d13;
+extern Register d14;
+extern Register d15;
// Coprocessor register
struct CRegister {
@@ -372,6 +424,51 @@
friend class Assembler;
};
+// CpuFeatures keeps track of which features are supported by the target CPU.
+// Supported features must be enabled by a Scope before use.
+class CpuFeatures : public AllStatic {
+ public:
+ // Detect features of the target CPU. Set safe defaults if the serializer
+ // is enabled (snapshots must be portable).
+ static void Probe();
+
+ // Check whether a feature is supported by the target CPU.
+ static bool IsSupported(CpuFeature f) {
+ if (f == VFP3 && !FLAG_enable_vfp3) return false;
+ return (supported_ & (1u << f)) != 0;
+ }
+
+ // Check whether a feature is currently enabled.
+ static bool IsEnabled(CpuFeature f) {
+ return (enabled_ & (1u << f)) != 0;
+ }
+
+ // Enable a specified feature within a scope.
+ class Scope BASE_EMBEDDED {
+#ifdef DEBUG
+ public:
+ explicit Scope(CpuFeature f) {
+ ASSERT(CpuFeatures::IsSupported(f));
+ ASSERT(!Serializer::enabled() ||
+ (found_by_runtime_probing_ & (1u << f)) == 0);
+ old_enabled_ = CpuFeatures::enabled_;
+ CpuFeatures::enabled_ |= 1u << f;
+ }
+ ~Scope() { CpuFeatures::enabled_ = old_enabled_; }
+ private:
+ unsigned old_enabled_;
+#else
+ public:
+ explicit Scope(CpuFeature f) {}
+#endif
+ };
+
+ private:
+ static unsigned supported_;
+ static unsigned enabled_;
+ static unsigned found_by_runtime_probing_;
+};
+
typedef int32_t Instr;
@@ -437,6 +534,23 @@
INLINE(static Address target_address_at(Address pc));
INLINE(static void set_target_address_at(Address pc, Address target));
+ // This sets the branch destination (which is in the constant pool on ARM).
+ // This is for calls and branches within generated code.
+ inline static void set_target_at(Address constant_pool_entry, Address target);
+
+ // This sets the branch destination (which is in the constant pool on ARM).
+ // This is for calls and branches to runtime code.
+ inline static void set_external_target_at(Address constant_pool_entry,
+ Address target) {
+ set_target_at(constant_pool_entry, target);
+ }
+
+ // Here we are patching the address in the constant pool, not the actual call
+ // instruction. The address in the constant pool is the same size as a
+ // pointer.
+ static const int kCallTargetSize = kPointerSize;
+ static const int kExternalTargetSize = kPointerSize;
+
// Size of an instruction.
static const int kInstrSize = sizeof(Instr);
@@ -452,6 +566,7 @@
// register.
static const int kPcLoadDelta = 8;
+ static const int kJSReturnSequenceLength = 4;
// ---------------------------------------------------------------------------
// Code generation
@@ -638,6 +753,66 @@
void stc2(Coprocessor coproc, CRegister crd, Register base, int option,
LFlag l = Short); // v5 and above
+ // Support for VFP.
+ // All these APIs support S0 to S31 and D0 to D15.
+ // Currently these APIs do not support extended D registers, i.e, D16 to D31.
+ // However, some simple modifications can allow
+ // these APIs to support D16 to D31.
+
+ void fmdrr(const Register dst,
+ const Register src1,
+ const Register src2,
+ const SBit s = LeaveCC,
+ const Condition cond = al);
+ void fmrrd(const Register dst1,
+ const Register dst2,
+ const Register src,
+ const SBit s = LeaveCC,
+ const Condition cond = al);
+ void fmsr(const Register dst,
+ const Register src,
+ const SBit s = LeaveCC,
+ const Condition cond = al);
+ void fmrs(const Register dst,
+ const Register src,
+ const SBit s = LeaveCC,
+ const Condition cond = al);
+ void fsitod(const Register dst,
+ const Register src,
+ const SBit s = LeaveCC,
+ const Condition cond = al);
+ void ftosid(const Register dst,
+ const Register src,
+ const SBit s = LeaveCC,
+ const Condition cond = al);
+
+ void faddd(const Register dst,
+ const Register src1,
+ const Register src2,
+ const SBit s = LeaveCC,
+ const Condition cond = al);
+ void fsubd(const Register dst,
+ const Register src1,
+ const Register src2,
+ const SBit s = LeaveCC,
+ const Condition cond = al);
+ void fmuld(const Register dst,
+ const Register src1,
+ const Register src2,
+ const SBit s = LeaveCC,
+ const Condition cond = al);
+ void fdivd(const Register dst,
+ const Register src1,
+ const Register src2,
+ const SBit s = LeaveCC,
+ const Condition cond = al);
+ void fcmp(const Register src1,
+ const Register src2,
+ const SBit s = LeaveCC,
+ const Condition cond = al);
+ void vmrs(const Register dst,
+ const Condition cond = al);
+
// Pseudo instructions
void nop() { mov(r0, Operand(r0)); }
@@ -665,6 +840,13 @@
return (pc_offset() - l->pos()) / kInstrSize;
}
+ // Check whether an immediate fits an addressing mode 1 instruction.
+ bool ImmediateFitsAddrMode1Instruction(int32_t imm32);
+
+ // Postpone the generation of the constant pool for the specified number of
+ // instructions.
+ void BlockConstPoolFor(int instructions);
+
// Debugging
// Mark address of the ExitJSFrame code.
diff --git a/src/arm/builtins-arm.cc b/src/arm/builtins-arm.cc
index d7afb37..5389a3c 100644
--- a/src/arm/builtins-arm.cc
+++ b/src/arm/builtins-arm.cc
@@ -284,7 +284,7 @@
// Both registers are preserved by this code so no need to differentiate between
// construct call and normal call.
static void ArrayNativeCode(MacroAssembler* masm,
- Label *call_generic_code) {
+ Label* call_generic_code) {
Label argc_one_or_more, argc_two_or_more;
// Check for array construction with zero arguments or one.
@@ -949,6 +949,8 @@
const int kGlobalIndex =
Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
__ ldr(r2, FieldMemOperand(cp, kGlobalIndex));
+ __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalContextOffset));
+ __ ldr(r2, FieldMemOperand(r2, kGlobalIndex));
__ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalReceiverOffset));
__ bind(&patch_receiver);
@@ -1027,44 +1029,24 @@
__ push(r0);
__ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_JS);
- Label no_preemption, retry_preemption;
- __ bind(&retry_preemption);
- ExternalReference stack_guard_limit_address =
- ExternalReference::address_of_stack_guard_limit();
- __ mov(r2, Operand(stack_guard_limit_address));
- __ ldr(r2, MemOperand(r2));
- __ cmp(sp, r2);
- __ b(hi, &no_preemption);
-
- // We have encountered a preemption or stack overflow already before we push
- // the array contents. Save r0 which is the Smi-tagged length of the array.
- __ push(r0);
-
- // Runtime routines expect at least one argument, so give it a Smi.
- __ mov(r0, Operand(Smi::FromInt(0)));
- __ push(r0);
- __ CallRuntime(Runtime::kStackGuard, 1);
-
- // Since we returned, it wasn't a stack overflow. Restore r0 and try again.
- __ pop(r0);
- __ b(&retry_preemption);
-
- __ bind(&no_preemption);
-
- // Eagerly check for stack-overflow before starting to push the arguments.
- // r0: number of arguments.
- // r2: stack limit.
+ // Check the stack for overflow. We are not trying need to catch
+ // interruptions (e.g. debug break and preemption) here, so the "real stack
+ // limit" is checked.
Label okay;
+ __ LoadRoot(r2, Heap::kRealStackLimitRootIndex);
+ // Make r2 the space we have left. The stack might already be overflowed
+ // here which will cause r2 to become negative.
__ sub(r2, sp, r2);
-
+ // Check if the arguments will overflow the stack.
__ cmp(r2, Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ b(hi, &okay);
+ __ b(gt, &okay); // Signed comparison.
// Out of stack space.
__ ldr(r1, MemOperand(fp, kFunctionOffset));
__ push(r1);
__ push(r0);
__ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_JS);
+ // End of stack check.
// Push current limit and index.
__ bind(&okay);
@@ -1107,6 +1089,8 @@
const int kGlobalOffset =
Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
__ ldr(r0, FieldMemOperand(cp, kGlobalOffset));
+ __ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalContextOffset));
+ __ ldr(r0, FieldMemOperand(r0, kGlobalOffset));
__ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalReceiverOffset));
// Push the receiver.
diff --git a/src/arm/codegen-arm-inl.h b/src/arm/codegen-arm-inl.h
index 9ff02cb..749f32d 100644
--- a/src/arm/codegen-arm-inl.h
+++ b/src/arm/codegen-arm-inl.h
@@ -35,18 +35,15 @@
#define __ ACCESS_MASM(masm_)
void CodeGenerator::LoadConditionAndSpill(Expression* expression,
- TypeofState typeof_state,
JumpTarget* true_target,
JumpTarget* false_target,
bool force_control) {
- LoadCondition(expression, typeof_state, true_target, false_target,
- force_control);
+ LoadCondition(expression, true_target, false_target, force_control);
}
-void CodeGenerator::LoadAndSpill(Expression* expression,
- TypeofState typeof_state) {
- Load(expression, typeof_state);
+void CodeGenerator::LoadAndSpill(Expression* expression) {
+ Load(expression);
}
@@ -60,8 +57,8 @@
}
-void Reference::GetValueAndSpill(TypeofState typeof_state) {
- GetValue(typeof_state);
+void Reference::GetValueAndSpill() {
+ GetValue();
}
diff --git a/src/arm/codegen-arm.cc b/src/arm/codegen-arm.cc
index 47f0e96..7c0b0c6 100644
--- a/src/arm/codegen-arm.cc
+++ b/src/arm/codegen-arm.cc
@@ -29,6 +29,7 @@
#include "bootstrapper.h"
#include "codegen-inl.h"
+#include "compiler.h"
#include "debug.h"
#include "parser.h"
#include "register-allocator-inl.h"
@@ -92,7 +93,6 @@
CodeGenState::CodeGenState(CodeGenerator* owner)
: owner_(owner),
- typeof_state_(NOT_INSIDE_TYPEOF),
true_target_(NULL),
false_target_(NULL),
previous_(NULL) {
@@ -101,11 +101,9 @@
CodeGenState::CodeGenState(CodeGenerator* owner,
- TypeofState typeof_state,
JumpTarget* true_target,
JumpTarget* false_target)
: owner_(owner),
- typeof_state_(typeof_state),
true_target_(true_target),
false_target_(false_target),
previous_(owner->state()) {
@@ -144,6 +142,9 @@
// cp: callee's context
void CodeGenerator::GenCode(FunctionLiteral* fun) {
+ // Record the position for debugging purposes.
+ CodeForFunctionPosition(fun);
+
ZoneList<Statement*>* body = fun->body();
// Initialize state.
@@ -322,18 +323,32 @@
Label check_exit_codesize;
masm_->bind(&check_exit_codesize);
+ // Calculate the exact length of the return sequence and make sure that
+ // the constant pool is not emitted inside of the return sequence.
+ int32_t sp_delta = (scope_->num_parameters() + 1) * kPointerSize;
+ int return_sequence_length = Assembler::kJSReturnSequenceLength;
+ if (!masm_->ImmediateFitsAddrMode1Instruction(sp_delta)) {
+ // Additional mov instruction generated.
+ return_sequence_length++;
+ }
+ masm_->BlockConstPoolFor(return_sequence_length);
+
// Tear down the frame which will restore the caller's frame pointer and
// the link register.
frame_->Exit();
// Here we use masm_-> instead of the __ macro to avoid the code coverage
// tool from instrumenting as we rely on the code size here.
- masm_->add(sp, sp, Operand((scope_->num_parameters() + 1) * kPointerSize));
+ masm_->add(sp, sp, Operand(sp_delta));
masm_->Jump(lr);
// Check that the size of the code used for returning matches what is
- // expected by the debugger.
- ASSERT_EQ(kJSReturnSequenceLength,
+ // expected by the debugger. The add instruction above is an addressing
+ // mode 1 instruction where there are restrictions on which immediate values
+ // can be encoded in the instruction and which immediate values requires
+ // use of an additional instruction for moving the immediate to a temporary
+ // register.
+ ASSERT_EQ(return_sequence_length,
masm_->InstructionsGeneratedSince(&check_exit_codesize));
}
@@ -442,14 +457,13 @@
// register was set, has_cc() is true and cc_reg_ contains the condition to
// test for 'true'.
void CodeGenerator::LoadCondition(Expression* x,
- TypeofState typeof_state,
JumpTarget* true_target,
JumpTarget* false_target,
bool force_cc) {
ASSERT(!has_cc());
int original_height = frame_->height();
- { CodeGenState new_state(this, typeof_state, true_target, false_target);
+ { CodeGenState new_state(this, true_target, false_target);
Visit(x);
// If we hit a stack overflow, we may not have actually visited
@@ -479,13 +493,13 @@
}
-void CodeGenerator::Load(Expression* x, TypeofState typeof_state) {
+void CodeGenerator::Load(Expression* expr) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
JumpTarget true_target;
JumpTarget false_target;
- LoadCondition(x, typeof_state, &true_target, &false_target, false);
+ LoadCondition(expr, &true_target, &false_target, false);
if (has_cc()) {
// Convert cc_reg_ into a boolean value.
@@ -552,24 +566,27 @@
}
-// TODO(1241834): Get rid of this function in favor of just using Load, now
-// that we have the INSIDE_TYPEOF typeof state. => Need to handle global
-// variables w/o reference errors elsewhere.
-void CodeGenerator::LoadTypeofExpression(Expression* x) {
+void CodeGenerator::LoadTypeofExpression(Expression* expr) {
+ // Special handling of identifiers as subexpressions of typeof.
VirtualFrame::SpilledScope spilled_scope;
- Variable* variable = x->AsVariableProxy()->AsVariable();
+ Variable* variable = expr->AsVariableProxy()->AsVariable();
if (variable != NULL && !variable->is_this() && variable->is_global()) {
- // NOTE: This is somewhat nasty. We force the compiler to load
- // the variable as if through '<global>.<variable>' to make sure we
- // do not get reference errors.
+ // For a global variable we build the property reference
+ // <global>.<variable> and perform a (regular non-contextual) property
+ // load to make sure we do not get reference errors.
Slot global(variable, Slot::CONTEXT, Context::GLOBAL_INDEX);
Literal key(variable->name());
- // TODO(1241834): Fetch the position from the variable instead of using
- // no position.
Property property(&global, &key, RelocInfo::kNoPosition);
- LoadAndSpill(&property);
+ Reference ref(this, &property);
+ ref.GetValueAndSpill();
+ } else if (variable != NULL && variable->slot() != NULL) {
+ // For a variable that rewrites to a slot, we signal it is the immediate
+ // subexpression of a typeof.
+ LoadFromSlot(variable->slot(), INSIDE_TYPEOF);
+ frame_->SpillAll();
} else {
- LoadAndSpill(x, INSIDE_TYPEOF);
+ // Anything else can be handled normally.
+ LoadAndSpill(expr);
}
}
@@ -1066,27 +1083,6 @@
}
-class CallFunctionStub: public CodeStub {
- public:
- CallFunctionStub(int argc, InLoopFlag in_loop)
- : argc_(argc), in_loop_(in_loop) {}
-
- void Generate(MacroAssembler* masm);
-
- private:
- int argc_;
- InLoopFlag in_loop_;
-
-#if defined(DEBUG)
- void Print() { PrintF("CallFunctionStub (argc %d)\n", argc_); }
-#endif // defined(DEBUG)
-
- Major MajorKey() { return CallFunction; }
- int MinorKey() { return argc_; }
- InLoopFlag InLoop() { return in_loop_; }
-};
-
-
// Call the function on the stack with the given arguments.
void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
int position) {
@@ -1122,22 +1118,20 @@
void CodeGenerator::CheckStack() {
VirtualFrame::SpilledScope spilled_scope;
- if (FLAG_check_stack) {
- Comment cmnt(masm_, "[ check stack");
- __ LoadRoot(ip, Heap::kStackLimitRootIndex);
- // Put the lr setup instruction in the delay slot. kInstrSize is added to
- // the implicit 8 byte offset that always applies to operations with pc and
- // gives a return address 12 bytes down.
- masm_->add(lr, pc, Operand(Assembler::kInstrSize));
- masm_->cmp(sp, Operand(ip));
- StackCheckStub stub;
- // Call the stub if lower.
- masm_->mov(pc,
- Operand(reinterpret_cast<intptr_t>(stub.GetCode().location()),
- RelocInfo::CODE_TARGET),
- LeaveCC,
- lo);
- }
+ Comment cmnt(masm_, "[ check stack");
+ __ LoadRoot(ip, Heap::kStackLimitRootIndex);
+ // Put the lr setup instruction in the delay slot. kInstrSize is added to
+ // the implicit 8 byte offset that always applies to operations with pc and
+ // gives a return address 12 bytes down.
+ masm_->add(lr, pc, Operand(Assembler::kInstrSize));
+ masm_->cmp(sp, Operand(ip));
+ StackCheckStub stub;
+ // Call the stub if lower.
+ masm_->mov(pc,
+ Operand(reinterpret_cast<intptr_t>(stub.GetCode().location()),
+ RelocInfo::CODE_TARGET),
+ LeaveCC,
+ lo);
}
@@ -1299,8 +1293,7 @@
JumpTarget then;
JumpTarget else_;
// if (cond)
- LoadConditionAndSpill(node->condition(), NOT_INSIDE_TYPEOF,
- &then, &else_, true);
+ LoadConditionAndSpill(node->condition(), &then, &else_, true);
if (frame_ != NULL) {
Branch(false, &else_);
}
@@ -1323,8 +1316,7 @@
ASSERT(!has_else_stm);
JumpTarget then;
// if (cond)
- LoadConditionAndSpill(node->condition(), NOT_INSIDE_TYPEOF,
- &then, &exit, true);
+ LoadConditionAndSpill(node->condition(), &then, &exit, true);
if (frame_ != NULL) {
Branch(false, &exit);
}
@@ -1339,8 +1331,7 @@
ASSERT(!has_then_stm);
JumpTarget else_;
// if (!cond)
- LoadConditionAndSpill(node->condition(), NOT_INSIDE_TYPEOF,
- &exit, &else_, true);
+ LoadConditionAndSpill(node->condition(), &exit, &else_, true);
if (frame_ != NULL) {
Branch(true, &exit);
}
@@ -1354,8 +1345,7 @@
Comment cmnt(masm_, "[ If");
ASSERT(!has_then_stm && !has_else_stm);
// if (cond)
- LoadConditionAndSpill(node->condition(), NOT_INSIDE_TYPEOF,
- &exit, &exit, false);
+ LoadConditionAndSpill(node->condition(), &exit, &exit, false);
if (frame_ != NULL) {
if (has_cc()) {
cc_reg_ = al;
@@ -1570,7 +1560,7 @@
CheckStack(); // TODO(1222600): ignore if body contains calls.
VisitAndSpill(node->body());
- // Compile the test.
+ // Compile the test.
switch (info) {
case ALWAYS_TRUE:
// If control can fall off the end of the body, jump back to the
@@ -1593,8 +1583,9 @@
node->continue_target()->Bind();
}
if (has_valid_frame()) {
- LoadConditionAndSpill(node->cond(), NOT_INSIDE_TYPEOF,
- &body, node->break_target(), true);
+ Comment cmnt(masm_, "[ DoWhileCondition");
+ CodeForDoWhileConditionPosition(node);
+ LoadConditionAndSpill(node->cond(), &body, node->break_target(), true);
if (has_valid_frame()) {
// A invalid frame here indicates that control did not
// fall out of the test expression.
@@ -1633,8 +1624,7 @@
if (info == DONT_KNOW) {
JumpTarget body;
- LoadConditionAndSpill(node->cond(), NOT_INSIDE_TYPEOF,
- &body, node->break_target(), true);
+ LoadConditionAndSpill(node->cond(), &body, node->break_target(), true);
if (has_valid_frame()) {
// A NULL frame indicates that control did not fall out of the
// test expression.
@@ -1693,8 +1683,7 @@
// If the test is always true, there is no need to compile it.
if (info == DONT_KNOW) {
JumpTarget body;
- LoadConditionAndSpill(node->cond(), NOT_INSIDE_TYPEOF,
- &body, node->break_target(), true);
+ LoadConditionAndSpill(node->cond(), &body, node->break_target(), true);
if (has_valid_frame()) {
Branch(false, node->break_target());
}
@@ -1780,25 +1769,81 @@
primitive.Bind();
frame_->EmitPush(r0);
- Result arg_count(r0);
- __ mov(r0, Operand(0));
- frame_->InvokeBuiltin(Builtins::TO_OBJECT, CALL_JS, &arg_count, 1);
+ frame_->InvokeBuiltin(Builtins::TO_OBJECT, CALL_JS, 1);
jsobject.Bind();
// Get the set of properties (as a FixedArray or Map).
- frame_->EmitPush(r0); // duplicate the object being enumerated
- frame_->EmitPush(r0);
+ // r0: value to be iterated over
+ frame_->EmitPush(r0); // Push the object being iterated over.
+
+ // Check cache validity in generated code. This is a fast case for
+ // the JSObject::IsSimpleEnum cache validity checks. If we cannot
+ // guarantee cache validity, call the runtime system to check cache
+ // validity or get the property names in a fixed array.
+ JumpTarget call_runtime;
+ JumpTarget loop(JumpTarget::BIDIRECTIONAL);
+ JumpTarget check_prototype;
+ JumpTarget use_cache;
+ __ mov(r1, Operand(r0));
+ loop.Bind();
+ // Check that there are no elements.
+ __ ldr(r2, FieldMemOperand(r1, JSObject::kElementsOffset));
+ __ LoadRoot(r4, Heap::kEmptyFixedArrayRootIndex);
+ __ cmp(r2, r4);
+ call_runtime.Branch(ne);
+ // Check that instance descriptors are not empty so that we can
+ // check for an enum cache. Leave the map in r3 for the subsequent
+ // prototype load.
+ __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
+ __ ldr(r2, FieldMemOperand(r3, Map::kInstanceDescriptorsOffset));
+ __ LoadRoot(ip, Heap::kEmptyDescriptorArrayRootIndex);
+ __ cmp(r2, ip);
+ call_runtime.Branch(eq);
+ // Check that there in an enum cache in the non-empty instance
+ // descriptors. This is the case if the next enumeration index
+ // field does not contain a smi.
+ __ ldr(r2, FieldMemOperand(r2, DescriptorArray::kEnumerationIndexOffset));
+ __ tst(r2, Operand(kSmiTagMask));
+ call_runtime.Branch(eq);
+ // For all objects but the receiver, check that the cache is empty.
+ // r4: empty fixed array root.
+ __ cmp(r1, r0);
+ check_prototype.Branch(eq);
+ __ ldr(r2, FieldMemOperand(r2, DescriptorArray::kEnumCacheBridgeCacheOffset));
+ __ cmp(r2, r4);
+ call_runtime.Branch(ne);
+ check_prototype.Bind();
+ // Load the prototype from the map and loop if non-null.
+ __ ldr(r1, FieldMemOperand(r3, Map::kPrototypeOffset));
+ __ LoadRoot(ip, Heap::kNullValueRootIndex);
+ __ cmp(r1, ip);
+ loop.Branch(ne);
+ // The enum cache is valid. Load the map of the object being
+ // iterated over and use the cache for the iteration.
+ __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
+ use_cache.Jump();
+
+ call_runtime.Bind();
+ // Call the runtime to get the property names for the object.
+ frame_->EmitPush(r0); // push the object (slot 4) for the runtime call
frame_->CallRuntime(Runtime::kGetPropertyNamesFast, 1);
- // If we got a Map, we can do a fast modification check.
- // Otherwise, we got a FixedArray, and we have to do a slow check.
+ // If we got a map from the runtime call, we can do a fast
+ // modification check. Otherwise, we got a fixed array, and we have
+ // to do a slow check.
+ // r0: map or fixed array (result from call to
+ // Runtime::kGetPropertyNamesFast)
__ mov(r2, Operand(r0));
__ ldr(r1, FieldMemOperand(r2, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kMetaMapRootIndex);
__ cmp(r1, ip);
fixed_array.Branch(ne);
+ use_cache.Bind();
// Get enum cache
+ // r0: map (either the result from a call to
+ // Runtime::kGetPropertyNamesFast or has been fetched directly from
+ // the object)
__ mov(r1, Operand(r0));
__ ldr(r1, FieldMemOperand(r1, Map::kInstanceDescriptorsOffset));
__ ldr(r1, FieldMemOperand(r1, DescriptorArray::kEnumerationIndexOffset));
@@ -1863,9 +1908,7 @@
__ ldr(r0, frame_->ElementAt(4)); // push enumerable
frame_->EmitPush(r0);
frame_->EmitPush(r3); // push entry
- Result arg_count_reg(r0);
- __ mov(r0, Operand(1));
- frame_->InvokeBuiltin(Builtins::FILTER_KEY, CALL_JS, &arg_count_reg, 2);
+ frame_->InvokeBuiltin(Builtins::FILTER_KEY, CALL_JS, 2);
__ mov(r3, Operand(r0));
// If the property has been removed while iterating, we just skip it.
@@ -2272,7 +2315,8 @@
Comment cmnt(masm_, "[ FunctionLiteral");
// Build the function boilerplate and instantiate it.
- Handle<JSFunction> boilerplate = BuildBoilerplate(node);
+ Handle<JSFunction> boilerplate =
+ Compiler::BuildBoilerplate(node, script_, this);
// Check for stack-overflow exception.
if (HasStackOverflow()) {
ASSERT(frame_->height() == original_height);
@@ -2303,20 +2347,19 @@
Comment cmnt(masm_, "[ Conditional");
JumpTarget then;
JumpTarget else_;
- LoadConditionAndSpill(node->condition(), NOT_INSIDE_TYPEOF,
- &then, &else_, true);
+ LoadConditionAndSpill(node->condition(), &then, &else_, true);
if (has_valid_frame()) {
Branch(false, &else_);
}
if (has_valid_frame() || then.is_linked()) {
then.Bind();
- LoadAndSpill(node->then_expression(), typeof_state());
+ LoadAndSpill(node->then_expression());
}
if (else_.is_linked()) {
JumpTarget exit;
if (has_valid_frame()) exit.Jump();
else_.Bind();
- LoadAndSpill(node->else_expression(), typeof_state());
+ LoadAndSpill(node->else_expression());
if (exit.is_linked()) exit.Bind();
}
ASSERT(frame_->height() == original_height + 1);
@@ -2383,10 +2426,6 @@
frame_->EmitPush(r0);
} else {
- // Note: We would like to keep the assert below, but it fires because of
- // some nasty code in LoadTypeofExpression() which should be removed...
- // ASSERT(!slot->var()->is_dynamic());
-
// Special handling for locals allocated in registers.
__ ldr(r0, SlotOperand(slot, r2));
frame_->EmitPush(r0);
@@ -2481,7 +2520,7 @@
#endif
VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ Slot");
- LoadFromSlot(node, typeof_state());
+ LoadFromSlot(node, NOT_INSIDE_TYPEOF);
ASSERT(frame_->height() == original_height + 1);
}
@@ -2500,7 +2539,7 @@
} else {
ASSERT(var->is_global());
Reference ref(this, node);
- ref.GetValueAndSpill(typeof_state());
+ ref.GetValueAndSpill();
}
ASSERT(frame_->height() == original_height + 1);
}
@@ -2836,7 +2875,7 @@
} else {
// +=, *= and similar binary assignments.
// Get the old value of the lhs.
- target.GetValueAndSpill(NOT_INSIDE_TYPEOF);
+ target.GetValueAndSpill();
Literal* literal = node->value()->AsLiteral();
bool overwrite =
(node->value()->AsBinaryOperation() != NULL &&
@@ -2901,7 +2940,7 @@
Comment cmnt(masm_, "[ Property");
{ Reference property(this, node);
- property.GetValueAndSpill(typeof_state());
+ property.GetValueAndSpill();
}
ASSERT(frame_->height() == original_height + 1);
}
@@ -3071,7 +3110,7 @@
// Load the function to call from the property through a reference.
Reference ref(this, property);
- ref.GetValueAndSpill(NOT_INSIDE_TYPEOF); // receiver
+ ref.GetValueAndSpill(); // receiver
// Pass receiver to called function.
if (property->is_synthetic()) {
@@ -3301,7 +3340,79 @@
void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
VirtualFrame::SpilledScope spilled_scope;
ASSERT(args->length() == 2);
+ Comment(masm_, "[ GenerateFastCharCodeAt");
+
+ LoadAndSpill(args->at(0));
+ LoadAndSpill(args->at(1));
+ frame_->EmitPop(r0); // Index.
+ frame_->EmitPop(r1); // String.
+
+ Label slow, end, not_a_flat_string, ascii_string, try_again_with_new_string;
+
+ __ tst(r1, Operand(kSmiTagMask));
+ __ b(eq, &slow); // The 'string' was a Smi.
+
+ ASSERT(kSmiTag == 0);
+ __ tst(r0, Operand(kSmiTagMask | 0x80000000u));
+ __ b(ne, &slow); // The index was negative or not a Smi.
+
+ __ bind(&try_again_with_new_string);
+ __ CompareObjectType(r1, r2, r2, FIRST_NONSTRING_TYPE);
+ __ b(ge, &slow);
+
+ // Now r2 has the string type.
+ __ ldr(r3, FieldMemOperand(r1, String::kLengthOffset));
+ // Now r3 has the length of the string. Compare with the index.
+ __ cmp(r3, Operand(r0, LSR, kSmiTagSize));
+ __ b(le, &slow);
+
+ // Here we know the index is in range. Check that string is sequential.
+ ASSERT_EQ(0, kSeqStringTag);
+ __ tst(r2, Operand(kStringRepresentationMask));
+ __ b(ne, ¬_a_flat_string);
+
+ // Check whether it is an ASCII string.
+ ASSERT_EQ(0, kTwoByteStringTag);
+ __ tst(r2, Operand(kStringEncodingMask));
+ __ b(ne, &ascii_string);
+
+ // 2-byte string. We can add without shifting since the Smi tag size is the
+ // log2 of the number of bytes in a two-byte character.
+ ASSERT_EQ(1, kSmiTagSize);
+ ASSERT_EQ(0, kSmiShiftSize);
+ __ add(r1, r1, Operand(r0));
+ __ ldrh(r0, FieldMemOperand(r1, SeqTwoByteString::kHeaderSize));
+ __ mov(r0, Operand(r0, LSL, kSmiTagSize));
+ __ jmp(&end);
+
+ __ bind(&ascii_string);
+ __ add(r1, r1, Operand(r0, LSR, kSmiTagSize));
+ __ ldrb(r0, FieldMemOperand(r1, SeqAsciiString::kHeaderSize));
+ __ mov(r0, Operand(r0, LSL, kSmiTagSize));
+ __ jmp(&end);
+
+ __ bind(¬_a_flat_string);
+ __ and_(r2, r2, Operand(kStringRepresentationMask));
+ __ cmp(r2, Operand(kConsStringTag));
+ __ b(ne, &slow);
+
+ // ConsString.
+ // Check that the right hand side is the empty string (ie if this is really a
+ // flat string in a cons string). If that is not the case we would rather go
+ // to the runtime system now, to flatten the string.
+ __ ldr(r2, FieldMemOperand(r1, ConsString::kSecondOffset));
+ __ LoadRoot(r3, Heap::kEmptyStringRootIndex);
+ __ cmp(r2, Operand(r3));
+ __ b(ne, &slow);
+
+ // Get the first of the two strings.
+ __ ldr(r1, FieldMemOperand(r1, ConsString::kFirstOffset));
+ __ jmp(&try_again_with_new_string);
+
+ __ bind(&slow);
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
+
+ __ bind(&end);
frame_->EmitPush(r0);
}
@@ -3325,6 +3436,51 @@
}
+void CodeGenerator::GenerateIsObject(ZoneList<Expression*>* args) {
+ // This generates a fast version of:
+ // (typeof(arg) === 'object' || %_ClassOf(arg) == 'RegExp')
+ VirtualFrame::SpilledScope spilled_scope;
+ ASSERT(args->length() == 1);
+ LoadAndSpill(args->at(0));
+ frame_->EmitPop(r1);
+ __ tst(r1, Operand(kSmiTagMask));
+ false_target()->Branch(eq);
+
+ __ LoadRoot(ip, Heap::kNullValueRootIndex);
+ __ cmp(r1, ip);
+ true_target()->Branch(eq);
+
+ Register map_reg = r2;
+ __ ldr(map_reg, FieldMemOperand(r1, HeapObject::kMapOffset));
+ // Undetectable objects behave like undefined when tested with typeof.
+ __ ldrb(r1, FieldMemOperand(map_reg, Map::kBitFieldOffset));
+ __ and_(r1, r1, Operand(1 << Map::kIsUndetectable));
+ __ cmp(r1, Operand(1 << Map::kIsUndetectable));
+ false_target()->Branch(eq);
+
+ __ ldrb(r1, FieldMemOperand(map_reg, Map::kInstanceTypeOffset));
+ __ cmp(r1, Operand(FIRST_JS_OBJECT_TYPE));
+ false_target()->Branch(lt);
+ __ cmp(r1, Operand(LAST_JS_OBJECT_TYPE));
+ cc_reg_ = le;
+}
+
+
+void CodeGenerator::GenerateIsFunction(ZoneList<Expression*>* args) {
+ // This generates a fast version of:
+ // (%_ClassOf(arg) === 'Function')
+ VirtualFrame::SpilledScope spilled_scope;
+ ASSERT(args->length() == 1);
+ LoadAndSpill(args->at(0));
+ frame_->EmitPop(r0);
+ __ tst(r0, Operand(kSmiTagMask));
+ false_target()->Branch(eq);
+ Register map_reg = r2;
+ __ CompareObjectType(r0, map_reg, r1, JS_FUNCTION_TYPE);
+ cc_reg_ = eq;
+}
+
+
void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) {
VirtualFrame::SpilledScope spilled_scope;
ASSERT(args->length() == 0);
@@ -3403,6 +3559,17 @@
}
+void CodeGenerator::GenerateStringAdd(ZoneList<Expression*>* args) {
+ ASSERT_EQ(2, args->length());
+
+ Load(args->at(0));
+ Load(args->at(1));
+
+ frame_->CallRuntime(Runtime::kStringAdd, 2);
+ frame_->EmitPush(r0);
+}
+
+
void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* args) {
VirtualFrame::SpilledScope spilled_scope;
ASSERT(args->length() == 2);
@@ -3476,7 +3643,6 @@
if (op == Token::NOT) {
LoadConditionAndSpill(node->expression(),
- NOT_INSIDE_TYPEOF,
false_target(),
true_target(),
true);
@@ -3490,9 +3656,7 @@
if (property != NULL) {
LoadAndSpill(property->obj());
LoadAndSpill(property->key());
- Result arg_count(r0);
- __ mov(r0, Operand(1)); // not counting receiver
- frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, &arg_count, 2);
+ frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, 2);
} else if (variable != NULL) {
Slot* slot = variable->slot();
@@ -3500,9 +3664,7 @@
LoadGlobal();
__ mov(r0, Operand(variable->name()));
frame_->EmitPush(r0);
- Result arg_count(r0);
- __ mov(r0, Operand(1)); // not counting receiver
- frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, &arg_count, 2);
+ frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, 2);
} else if (slot != NULL && slot->type() == Slot::LOOKUP) {
// lookup the context holding the named variable
@@ -3514,9 +3676,7 @@
frame_->EmitPush(r0);
__ mov(r0, Operand(variable->name()));
frame_->EmitPush(r0);
- Result arg_count(r0);
- __ mov(r0, Operand(1)); // not counting receiver
- frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, &arg_count, 2);
+ frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, 2);
} else {
// Default: Result of deleting non-global, not dynamically
@@ -3566,9 +3726,7 @@
smi_label.Branch(eq);
frame_->EmitPush(r0);
- Result arg_count(r0);
- __ mov(r0, Operand(0)); // not counting receiver
- frame_->InvokeBuiltin(Builtins::BIT_NOT, CALL_JS, &arg_count, 1);
+ frame_->InvokeBuiltin(Builtins::BIT_NOT, CALL_JS, 1);
continue_label.Jump();
smi_label.Bind();
@@ -3590,9 +3748,7 @@
__ tst(r0, Operand(kSmiTagMask));
continue_label.Branch(eq);
frame_->EmitPush(r0);
- Result arg_count(r0);
- __ mov(r0, Operand(0)); // not counting receiver
- frame_->InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS, &arg_count, 1);
+ frame_->InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS, 1);
continue_label.Bind();
break;
}
@@ -3637,7 +3793,7 @@
ASSERT(frame_->height() == original_height + 1);
return;
}
- target.GetValueAndSpill(NOT_INSIDE_TYPEOF);
+ target.GetValueAndSpill();
frame_->EmitPop(r0);
JumpTarget slow;
@@ -3677,9 +3833,7 @@
{
// Convert the operand to a number.
frame_->EmitPush(r0);
- Result arg_count(r0);
- __ mov(r0, Operand(0));
- frame_->InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS, &arg_count, 1);
+ frame_->InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS, 1);
}
if (is_postfix) {
// Postfix: store to result (on the stack).
@@ -3731,7 +3885,6 @@
if (op == Token::AND) {
JumpTarget is_true;
LoadConditionAndSpill(node->left(),
- NOT_INSIDE_TYPEOF,
&is_true,
false_target(),
false);
@@ -3767,7 +3920,6 @@
}
is_true.Bind();
LoadConditionAndSpill(node->right(),
- NOT_INSIDE_TYPEOF,
true_target(),
false_target(),
false);
@@ -3779,7 +3931,6 @@
} else if (op == Token::OR) {
JumpTarget is_false;
LoadConditionAndSpill(node->left(),
- NOT_INSIDE_TYPEOF,
true_target(),
&is_false,
false);
@@ -3815,7 +3966,6 @@
}
is_false.Bind();
LoadConditionAndSpill(node->right(),
- NOT_INSIDE_TYPEOF,
true_target(),
false_target(),
false);
@@ -4000,28 +4150,35 @@
} else if (check->Equals(Heap::function_symbol())) {
__ tst(r1, Operand(kSmiTagMask));
false_target()->Branch(eq);
- __ CompareObjectType(r1, r1, r1, JS_FUNCTION_TYPE);
+ Register map_reg = r2;
+ __ CompareObjectType(r1, map_reg, r1, JS_FUNCTION_TYPE);
+ true_target()->Branch(eq);
+ // Regular expressions are callable so typeof == 'function'.
+ __ CompareInstanceType(map_reg, r1, JS_REGEXP_TYPE);
cc_reg_ = eq;
} else if (check->Equals(Heap::object_symbol())) {
__ tst(r1, Operand(kSmiTagMask));
false_target()->Branch(eq);
- __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kNullValueRootIndex);
__ cmp(r1, ip);
true_target()->Branch(eq);
+ Register map_reg = r2;
+ __ CompareObjectType(r1, map_reg, r1, JS_REGEXP_TYPE);
+ false_target()->Branch(eq);
+
// It can be an undetectable object.
- __ ldrb(r1, FieldMemOperand(r2, Map::kBitFieldOffset));
+ __ ldrb(r1, FieldMemOperand(map_reg, Map::kBitFieldOffset));
__ and_(r1, r1, Operand(1 << Map::kIsUndetectable));
__ cmp(r1, Operand(1 << Map::kIsUndetectable));
false_target()->Branch(eq);
- __ ldrb(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset));
- __ cmp(r2, Operand(FIRST_JS_OBJECT_TYPE));
+ __ ldrb(r1, FieldMemOperand(map_reg, Map::kInstanceTypeOffset));
+ __ cmp(r1, Operand(FIRST_JS_OBJECT_TYPE));
false_target()->Branch(lt);
- __ cmp(r2, Operand(LAST_JS_OBJECT_TYPE));
+ __ cmp(r1, Operand(LAST_JS_OBJECT_TYPE));
cc_reg_ = le;
} else {
@@ -4062,9 +4219,7 @@
case Token::IN: {
LoadAndSpill(left);
LoadAndSpill(right);
- Result arg_count(r0);
- __ mov(r0, Operand(1)); // not counting receiver
- frame_->InvokeBuiltin(Builtins::IN, CALL_JS, &arg_count, 2);
+ frame_->InvokeBuiltin(Builtins::IN, CALL_JS, 2);
frame_->EmitPush(r0);
break;
}
@@ -4114,7 +4269,7 @@
}
-void Reference::GetValue(TypeofState typeof_state) {
+void Reference::GetValue() {
ASSERT(cgen_->HasValidEntryRegisters());
ASSERT(!is_illegal());
ASSERT(!cgen_->has_cc());
@@ -4129,16 +4284,11 @@
Comment cmnt(masm, "[ Load from Slot");
Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
ASSERT(slot != NULL);
- cgen_->LoadFromSlot(slot, typeof_state);
+ cgen_->LoadFromSlot(slot, NOT_INSIDE_TYPEOF);
break;
}
case NAMED: {
- // TODO(1241834): Make sure that this it is safe to ignore the
- // distinction between expressions in a typeof and not in a typeof. If
- // there is a chance that reference errors can be thrown below, we
- // must distinguish between the two kinds of loads (typeof expression
- // loads must not throw a reference error).
VirtualFrame* frame = cgen_->frame();
Comment cmnt(masm, "[ Load from named Property");
Handle<String> name(GetName());
@@ -4157,9 +4307,6 @@
}
case KEYED: {
- // TODO(1241834): Make sure that this it is safe to ignore the
- // distinction between expressions in a typeof and not in a typeof.
-
// TODO(181): Implement inlined version of array indexing once
// loop nesting is properly tracked on ARM.
VirtualFrame* frame = cgen_->frame();
@@ -4495,7 +4642,7 @@
// See comment for class.
-void WriteInt32ToHeapNumberStub::Generate(MacroAssembler *masm) {
+void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
Label max_negative_int;
// the_int_ has the answer which is a signed int32 but not a Smi.
// We test for the special value that has a different exponent. This test
@@ -4568,6 +4715,22 @@
if (cc != eq) {
__ cmp(r4, Operand(FIRST_JS_OBJECT_TYPE));
__ b(ge, slow);
+ // Normally here we fall through to return_equal, but undefined is
+ // special: (undefined == undefined) == true, but (undefined <= undefined)
+ // == false! See ECMAScript 11.8.5.
+ if (cc == le || cc == ge) {
+ __ cmp(r4, Operand(ODDBALL_TYPE));
+ __ b(ne, &return_equal);
+ __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
+ __ cmp(r0, Operand(r2));
+ __ b(ne, &return_equal);
+ if (cc == le) {
+ __ mov(r0, Operand(GREATER)); // undefined <= undefined should fail.
+ } else {
+ __ mov(r0, Operand(LESS)); // undefined >= undefined should fail.
+ }
+ __ mov(pc, Operand(lr)); // Return.
+ }
}
}
__ bind(&return_equal);
@@ -4645,9 +4808,17 @@
// Rhs is a smi, lhs is a number.
__ push(lr);
- __ mov(r7, Operand(r1));
- ConvertToDoubleStub stub1(r3, r2, r7, r6);
- __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
+
+ if (CpuFeatures::IsSupported(VFP3)) {
+ CpuFeatures::Scope scope(VFP3);
+ __ IntegerToDoubleConversionWithVFP3(r1, r3, r2);
+ } else {
+ __ mov(r7, Operand(r1));
+ ConvertToDoubleStub stub1(r3, r2, r7, r6);
+ __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
+ }
+
+
// r3 and r2 are rhs as double.
__ ldr(r1, FieldMemOperand(r0, HeapNumber::kValueOffset + kPointerSize));
__ ldr(r0, FieldMemOperand(r0, HeapNumber::kValueOffset));
@@ -4675,9 +4846,16 @@
__ push(lr);
__ ldr(r2, FieldMemOperand(r1, HeapNumber::kValueOffset));
__ ldr(r3, FieldMemOperand(r1, HeapNumber::kValueOffset + kPointerSize));
- __ mov(r7, Operand(r0));
- ConvertToDoubleStub stub2(r1, r0, r7, r6);
- __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
+
+ if (CpuFeatures::IsSupported(VFP3)) {
+ CpuFeatures::Scope scope(VFP3);
+ __ IntegerToDoubleConversionWithVFP3(r0, r1, r0);
+ } else {
+ __ mov(r7, Operand(r0));
+ ConvertToDoubleStub stub2(r1, r0, r7, r6);
+ __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
+ }
+
__ pop(lr);
// Fall through to both_loaded_as_doubles.
}
@@ -4880,9 +5058,23 @@
// fall through if neither is a NaN. Also binds rhs_not_nan.
EmitNanCheck(masm, &rhs_not_nan, cc_);
- // Compares two doubles in r0, r1, r2, r3 that are not NaNs. Returns the
- // answer. Never falls through.
- EmitTwoNonNanDoubleComparison(masm, cc_);
+ if (CpuFeatures::IsSupported(VFP3)) {
+ CpuFeatures::Scope scope(VFP3);
+ // ARMv7 VFP3 instructions to implement double precision comparison.
+ __ fmdrr(d6, r0, r1);
+ __ fmdrr(d7, r2, r3);
+
+ __ fcmp(d6, d7);
+ __ vmrs(pc);
+ __ mov(r0, Operand(0), LeaveCC, eq);
+ __ mov(r0, Operand(1), LeaveCC, lt);
+ __ mvn(r0, Operand(0), LeaveCC, gt);
+ __ mov(pc, Operand(lr));
+ } else {
+ // Compares two doubles in r0, r1, r2, r3 that are not NaNs. Returns the
+ // answer. Never falls through.
+ EmitTwoNonNanDoubleComparison(masm, cc_);
+ }
__ bind(¬_smis);
// At this point we know we are dealing with two different objects,
@@ -4935,7 +5127,6 @@
// Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
- __ mov(r0, Operand(arg_count));
__ InvokeBuiltin(native, CALL_JS);
__ cmp(r0, Operand(0));
__ pop(pc);
@@ -4982,24 +5173,74 @@
// Since both are Smis there is no heap number to overwrite, so allocate.
// The new heap number is in r5. r6 and r7 are scratch.
AllocateHeapNumber(masm, &slow, r5, r6, r7);
- // Write Smi from r0 to r3 and r2 in double format. r6 is scratch.
- __ mov(r7, Operand(r0));
- ConvertToDoubleStub stub1(r3, r2, r7, r6);
- __ push(lr);
- __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
- // Write Smi from r1 to r1 and r0 in double format. r6 is scratch.
- __ mov(r7, Operand(r1));
- ConvertToDoubleStub stub2(r1, r0, r7, r6);
- __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
- __ pop(lr);
+
+ if (CpuFeatures::IsSupported(VFP3)) {
+ CpuFeatures::Scope scope(VFP3);
+ __ IntegerToDoubleConversionWithVFP3(r0, r3, r2);
+ __ IntegerToDoubleConversionWithVFP3(r1, r1, r0);
+ } else {
+ // Write Smi from r0 to r3 and r2 in double format. r6 is scratch.
+ __ mov(r7, Operand(r0));
+ ConvertToDoubleStub stub1(r3, r2, r7, r6);
+ __ push(lr);
+ __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
+ // Write Smi from r1 to r1 and r0 in double format. r6 is scratch.
+ __ mov(r7, Operand(r1));
+ ConvertToDoubleStub stub2(r1, r0, r7, r6);
+ __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
+ __ pop(lr);
+ }
+
__ jmp(&do_the_call); // Tail call. No return.
// We jump to here if something goes wrong (one param is not a number of any
// sort or new-space allocation fails).
__ bind(&slow);
+
+ // Push arguments to the stack
__ push(r1);
__ push(r0);
- __ mov(r0, Operand(1)); // Set number of arguments.
+
+ if (Token::ADD == operation) {
+ // Test for string arguments before calling runtime.
+ // r1 : first argument
+ // r0 : second argument
+ // sp[0] : second argument
+ // sp[1] : first argument
+
+ Label not_strings, not_string1, string1;
+ __ tst(r1, Operand(kSmiTagMask));
+ __ b(eq, ¬_string1);
+ __ CompareObjectType(r1, r2, r2, FIRST_NONSTRING_TYPE);
+ __ b(ge, ¬_string1);
+
+ // First argument is a a string, test second.
+ __ tst(r0, Operand(kSmiTagMask));
+ __ b(eq, &string1);
+ __ CompareObjectType(r0, r2, r2, FIRST_NONSTRING_TYPE);
+ __ b(ge, &string1);
+
+ // First and second argument are strings.
+ __ TailCallRuntime(ExternalReference(Runtime::kStringAdd), 2, 1);
+
+ // Only first argument is a string.
+ __ bind(&string1);
+ __ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_JS);
+
+ // First argument was not a string, test second.
+ __ bind(¬_string1);
+ __ tst(r0, Operand(kSmiTagMask));
+ __ b(eq, ¬_strings);
+ __ CompareObjectType(r0, r2, r2, FIRST_NONSTRING_TYPE);
+ __ b(ge, ¬_strings);
+
+ // Only second argument is a string.
+ __ b(¬_strings);
+ __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_JS);
+
+ __ bind(¬_strings);
+ }
+
__ InvokeBuiltin(builtin, JUMP_JS); // Tail call. No return.
// We branch here if at least one of r0 and r1 is not a Smi.
@@ -5027,12 +5268,20 @@
// We can't overwrite a Smi so get address of new heap number into r5.
AllocateHeapNumber(masm, &slow, r5, r6, r7);
}
- // Write Smi from r0 to r3 and r2 in double format.
- __ mov(r7, Operand(r0));
- ConvertToDoubleStub stub3(r3, r2, r7, r6);
- __ push(lr);
- __ Call(stub3.GetCode(), RelocInfo::CODE_TARGET);
- __ pop(lr);
+
+
+ if (CpuFeatures::IsSupported(VFP3)) {
+ CpuFeatures::Scope scope(VFP3);
+ __ IntegerToDoubleConversionWithVFP3(r0, r3, r2);
+ } else {
+ // Write Smi from r0 to r3 and r2 in double format.
+ __ mov(r7, Operand(r0));
+ ConvertToDoubleStub stub3(r3, r2, r7, r6);
+ __ push(lr);
+ __ Call(stub3.GetCode(), RelocInfo::CODE_TARGET);
+ __ pop(lr);
+ }
+
__ bind(&finished_loading_r0);
// Move r1 to a double in r0-r1.
@@ -5052,12 +5301,19 @@
// We can't overwrite a Smi so get address of new heap number into r5.
AllocateHeapNumber(masm, &slow, r5, r6, r7);
}
- // Write Smi from r1 to r1 and r0 in double format.
- __ mov(r7, Operand(r1));
- ConvertToDoubleStub stub4(r1, r0, r7, r6);
- __ push(lr);
- __ Call(stub4.GetCode(), RelocInfo::CODE_TARGET);
- __ pop(lr);
+
+ if (CpuFeatures::IsSupported(VFP3)) {
+ CpuFeatures::Scope scope(VFP3);
+ __ IntegerToDoubleConversionWithVFP3(r1, r1, r0);
+ } else {
+ // Write Smi from r1 to r1 and r0 in double format.
+ __ mov(r7, Operand(r1));
+ ConvertToDoubleStub stub4(r1, r0, r7, r6);
+ __ push(lr);
+ __ Call(stub4.GetCode(), RelocInfo::CODE_TARGET);
+ __ pop(lr);
+ }
+
__ bind(&finished_loading_r1);
__ bind(&do_the_call);
@@ -5066,6 +5322,38 @@
// r2: Right value (least significant part of mantissa).
// r3: Right value (sign, exponent, top of mantissa).
// r5: Address of heap number for result.
+
+ if (CpuFeatures::IsSupported(VFP3) &&
+ ((Token::MUL == operation) ||
+ (Token::DIV == operation) ||
+ (Token::ADD == operation) ||
+ (Token::SUB == operation))) {
+ CpuFeatures::Scope scope(VFP3);
+ // ARMv7 VFP3 instructions to implement
+ // double precision, add, subtract, multiply, divide.
+ __ fmdrr(d6, r0, r1);
+ __ fmdrr(d7, r2, r3);
+
+ if (Token::MUL == operation) {
+ __ fmuld(d5, d6, d7);
+ } else if (Token::DIV == operation) {
+ __ fdivd(d5, d6, d7);
+ } else if (Token::ADD == operation) {
+ __ faddd(d5, d6, d7);
+ } else if (Token::SUB == operation) {
+ __ fsubd(d5, d6, d7);
+ } else {
+ UNREACHABLE();
+ }
+
+ __ fmrrd(r0, r1, d5);
+
+ __ str(r0, FieldMemOperand(r5, HeapNumber::kValueOffset));
+ __ str(r1, FieldMemOperand(r5, HeapNumber::kValueOffset + 4));
+ __ mov(r0, Operand(r5));
+ __ mov(pc, lr);
+ return;
+ }
__ push(lr); // For later.
__ push(r5); // Address of heap number that is answer.
__ AlignStack(0);
@@ -5134,38 +5422,49 @@
__ sub(scratch2, scratch2, Operand(zero_exponent), SetCC);
// Dest already has a Smi zero.
__ b(lt, &done);
- // We have a shifted exponent between 0 and 30 in scratch2.
- __ mov(dest, Operand(scratch2, LSR, HeapNumber::kExponentShift));
- // We now have the exponent in dest. Subtract from 30 to get
- // how much to shift down.
- __ rsb(dest, dest, Operand(30));
-
+ if (!CpuFeatures::IsSupported(VFP3)) {
+ // We have a shifted exponent between 0 and 30 in scratch2.
+ __ mov(dest, Operand(scratch2, LSR, HeapNumber::kExponentShift));
+ // We now have the exponent in dest. Subtract from 30 to get
+ // how much to shift down.
+ __ rsb(dest, dest, Operand(30));
+ }
__ bind(&right_exponent);
- // Get the top bits of the mantissa.
- __ and_(scratch2, scratch, Operand(HeapNumber::kMantissaMask));
- // Put back the implicit 1.
- __ orr(scratch2, scratch2, Operand(1 << HeapNumber::kExponentShift));
- // Shift up the mantissa bits to take up the space the exponent used to take.
- // We just orred in the implicit bit so that took care of one and we want to
- // leave the sign bit 0 so we subtract 2 bits from the shift distance.
- const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
- __ mov(scratch2, Operand(scratch2, LSL, shift_distance));
- // Put sign in zero flag.
- __ tst(scratch, Operand(HeapNumber::kSignMask));
- // Get the second half of the double. For some exponents we don't actually
- // need this because the bits get shifted out again, but it's probably slower
- // to test than just to do it.
- __ ldr(scratch, FieldMemOperand(source, HeapNumber::kMantissaOffset));
- // Shift down 22 bits to get the last 10 bits.
- __ orr(scratch, scratch2, Operand(scratch, LSR, 32 - shift_distance));
- // Move down according to the exponent.
- __ mov(dest, Operand(scratch, LSR, dest));
- // Fix sign if sign bit was set.
- __ rsb(dest, dest, Operand(0), LeaveCC, ne);
+ if (CpuFeatures::IsSupported(VFP3)) {
+ CpuFeatures::Scope scope(VFP3);
+ // ARMv7 VFP3 instructions implementing double precision to integer
+ // conversion using round to zero.
+ __ ldr(scratch2, FieldMemOperand(source, HeapNumber::kMantissaOffset));
+ __ fmdrr(d7, scratch2, scratch);
+ __ ftosid(s15, d7);
+ __ fmrs(dest, s15);
+ } else {
+ // Get the top bits of the mantissa.
+ __ and_(scratch2, scratch, Operand(HeapNumber::kMantissaMask));
+ // Put back the implicit 1.
+ __ orr(scratch2, scratch2, Operand(1 << HeapNumber::kExponentShift));
+ // Shift up the mantissa bits to take up the space the exponent used to
+ // take. We just orred in the implicit bit so that took care of one and
+ // we want to leave the sign bit 0 so we subtract 2 bits from the shift
+ // distance.
+ const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
+ __ mov(scratch2, Operand(scratch2, LSL, shift_distance));
+ // Put sign in zero flag.
+ __ tst(scratch, Operand(HeapNumber::kSignMask));
+ // Get the second half of the double. For some exponents we don't
+ // actually need this because the bits get shifted out again, but
+ // it's probably slower to test than just to do it.
+ __ ldr(scratch, FieldMemOperand(source, HeapNumber::kMantissaOffset));
+ // Shift down 22 bits to get the last 10 bits.
+ __ orr(scratch, scratch2, Operand(scratch, LSR, 32 - shift_distance));
+ // Move down according to the exponent.
+ __ mov(dest, Operand(scratch, LSR, dest));
+ // Fix sign if sign bit was set.
+ __ rsb(dest, dest, Operand(0), LeaveCC, ne);
+ }
__ bind(&done);
}
-
// For bitwise ops where the inputs are not both Smis we here try to determine
// whether both inputs are either Smis or at least heap numbers that can be
// represented by a 32 bit signed value. We truncate towards zero as required
@@ -5182,7 +5481,7 @@
__ b(eq, &r1_is_smi); // It's a Smi so don't check it's a heap number.
__ CompareObjectType(r1, r4, r4, HEAP_NUMBER_TYPE);
__ b(ne, &slow);
- GetInt32(masm, r1, r3, r4, r5, &slow);
+ GetInt32(masm, r1, r3, r5, r4, &slow);
__ jmp(&done_checking_r1);
__ bind(&r1_is_smi);
__ mov(r3, Operand(r1, ASR, 1));
@@ -5192,7 +5491,7 @@
__ b(eq, &r0_is_smi); // It's a Smi so don't check it's a heap number.
__ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
__ b(ne, &slow);
- GetInt32(masm, r0, r2, r4, r5, &slow);
+ GetInt32(masm, r0, r2, r5, r4, &slow);
__ jmp(&done_checking_r0);
__ bind(&r0_is_smi);
__ mov(r2, Operand(r0, ASR, 1));
@@ -5277,7 +5576,6 @@
__ bind(&slow);
__ push(r1); // restore stack
__ push(r0);
- __ mov(r0, Operand(1)); // 1 argument (not counting receiver).
switch (op_) {
case Token::BIT_OR:
__ InvokeBuiltin(Builtins::BIT_OR, JUMP_JS);
@@ -5659,7 +5957,6 @@
// Enter runtime system.
__ bind(&slow);
__ push(r0);
- __ mov(r0, Operand(0)); // Set number of arguments.
__ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_JS);
__ bind(¬_smi);
@@ -5797,7 +6094,7 @@
Label* throw_normal_exception,
Label* throw_termination_exception,
Label* throw_out_of_memory_exception,
- StackFrame::Type frame_type,
+ ExitFrame::Mode mode,
bool do_gc,
bool always_allocate) {
// r0: result parameter for PerformGC, if any
@@ -5857,7 +6154,7 @@
// r0:r1: result
// sp: stack pointer
// fp: frame pointer
- __ LeaveExitFrame(frame_type);
+ __ LeaveExitFrame(mode);
// check if we should retry or throw exception
Label retry;
@@ -5903,12 +6200,12 @@
// this by performing a garbage collection and retrying the
// builtin once.
- StackFrame::Type frame_type = is_debug_break
- ? StackFrame::EXIT_DEBUG
- : StackFrame::EXIT;
+ ExitFrame::Mode mode = is_debug_break
+ ? ExitFrame::MODE_DEBUG
+ : ExitFrame::MODE_NORMAL;
// Enter the exit frame that transitions from JavaScript to C++.
- __ EnterExitFrame(frame_type);
+ __ EnterExitFrame(mode);
// r4: number of arguments (C callee-saved)
// r5: pointer to builtin function (C callee-saved)
@@ -5923,7 +6220,7 @@
&throw_normal_exception,
&throw_termination_exception,
&throw_out_of_memory_exception,
- frame_type,
+ mode,
false,
false);
@@ -5932,7 +6229,7 @@
&throw_normal_exception,
&throw_termination_exception,
&throw_out_of_memory_exception,
- frame_type,
+ mode,
true,
false);
@@ -5943,7 +6240,7 @@
&throw_normal_exception,
&throw_termination_exception,
&throw_out_of_memory_exception,
- frame_type,
+ mode,
true,
true);
@@ -6135,7 +6432,6 @@
// Slow-case. Tail call builtin.
__ bind(&slow);
- __ mov(r0, Operand(1)); // Arg count without receiver.
__ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_JS);
}
@@ -6178,7 +6474,7 @@
__ b(eq, &adaptor);
// Check index against formal parameters count limit passed in
- // through register eax. Use unsigned comparison to get negative
+ // through register r0. Use unsigned comparison to get negative
// check for free.
__ cmp(r1, r0);
__ b(cs, &slow);
diff --git a/src/arm/codegen-arm.h b/src/arm/codegen-arm.h
index e079950..ba7f936 100644
--- a/src/arm/codegen-arm.h
+++ b/src/arm/codegen-arm.h
@@ -77,12 +77,12 @@
// Generate code to push the value of the reference on top of the
// expression stack. The reference is expected to be already on top of
// the expression stack, and it is left in place with its value above it.
- void GetValue(TypeofState typeof_state);
+ void GetValue();
// Generate code to push the value of a reference on top of the expression
// stack and then spill the stack frame. This function is used temporarily
// while the code generator is being transformed.
- inline void GetValueAndSpill(TypeofState typeof_state);
+ inline void GetValueAndSpill();
// Generate code to store the value on top of the expression stack in the
// reference. The reference is expected to be immediately below the value
@@ -112,10 +112,8 @@
explicit CodeGenState(CodeGenerator* owner);
// Create a code generator state based on a code generator's current
- // state. The new state has its own typeof state and pair of branch
- // labels.
+ // state. The new state has its own pair of branch labels.
CodeGenState(CodeGenerator* owner,
- TypeofState typeof_state,
JumpTarget* true_target,
JumpTarget* false_target);
@@ -123,13 +121,11 @@
// previous state.
~CodeGenState();
- TypeofState typeof_state() const { return typeof_state_; }
JumpTarget* true_target() const { return true_target_; }
JumpTarget* false_target() const { return false_target_; }
private:
CodeGenerator* owner_;
- TypeofState typeof_state_;
JumpTarget* true_target_;
JumpTarget* false_target_;
CodeGenState* previous_;
@@ -169,8 +165,8 @@
// Accessors
MacroAssembler* masm() { return masm_; }
-
VirtualFrame* frame() const { return frame_; }
+ Handle<Script> script() { return script_; }
bool has_valid_frame() const { return frame_ != NULL; }
@@ -191,10 +187,6 @@
static const int kUnknownIntValue = -1;
- // Number of instructions used for the JS return sequence. The constant is
- // used by the debugger to patch the JS return sequence.
- static const int kJSReturnSequenceLength = 4;
-
private:
// Construction/Destruction
CodeGenerator(int buffer_size, Handle<Script> script, bool is_eval);
@@ -210,7 +202,6 @@
// State
bool has_cc() const { return cc_reg_ != al; }
- TypeofState typeof_state() const { return state_->typeof_state(); }
JumpTarget* true_target() const { return state_->true_target(); }
JumpTarget* false_target() const { return state_->false_target(); }
@@ -259,25 +250,22 @@
}
void LoadCondition(Expression* x,
- TypeofState typeof_state,
JumpTarget* true_target,
JumpTarget* false_target,
bool force_cc);
- void Load(Expression* x, TypeofState typeof_state = NOT_INSIDE_TYPEOF);
+ void Load(Expression* expr);
void LoadGlobal();
void LoadGlobalReceiver(Register scratch);
// Generate code to push the value of an expression on top of the frame
// and then spill the frame fully to memory. This function is used
// temporarily while the code generator is being transformed.
- inline void LoadAndSpill(Expression* expression,
- TypeofState typeof_state = NOT_INSIDE_TYPEOF);
+ inline void LoadAndSpill(Expression* expression);
// Call LoadCondition and then spill the virtual frame unless control flow
// cannot reach the end of the expression (ie, by emitting only
// unconditional jumps to the control targets).
inline void LoadConditionAndSpill(Expression* expression,
- TypeofState typeof_state,
JumpTarget* true_target,
JumpTarget* false_target,
bool force_control);
@@ -331,7 +319,6 @@
InlineRuntimeLUT* old_entry);
static Handle<Code> ComputeLazyCompile(int argc);
- Handle<JSFunction> BuildBoilerplate(FunctionLiteral* node);
void ProcessDeclarations(ZoneList<Declaration*>* declarations);
static Handle<Code> ComputeCallInitialize(int argc, InLoopFlag in_loop);
@@ -347,6 +334,8 @@
void GenerateIsSmi(ZoneList<Expression*>* args);
void GenerateIsNonNegativeSmi(ZoneList<Expression*>* args);
void GenerateIsArray(ZoneList<Expression*>* args);
+ void GenerateIsObject(ZoneList<Expression*>* args);
+ void GenerateIsFunction(ZoneList<Expression*>* args);
// Support for construct call checks.
void GenerateIsConstructCall(ZoneList<Expression*>* args);
@@ -377,6 +366,9 @@
inline void GenerateMathSin(ZoneList<Expression*>* args);
inline void GenerateMathCos(ZoneList<Expression*>* args);
+ // Fast support for StringAdd.
+ void GenerateStringAdd(ZoneList<Expression*>* args);
+
// Simple condition analysis.
enum ConditionAnalysis {
ALWAYS_TRUE,
@@ -391,6 +383,7 @@
void CodeForFunctionPosition(FunctionLiteral* fun);
void CodeForReturnPosition(FunctionLiteral* fun);
void CodeForStatementPosition(Statement* node);
+ void CodeForDoWhileConditionPosition(DoWhileStatement* stmt);
void CodeForSourcePosition(int pos);
#ifdef DEBUG
@@ -433,6 +426,27 @@
};
+class CallFunctionStub: public CodeStub {
+ public:
+ CallFunctionStub(int argc, InLoopFlag in_loop)
+ : argc_(argc), in_loop_(in_loop) {}
+
+ void Generate(MacroAssembler* masm);
+
+ private:
+ int argc_;
+ InLoopFlag in_loop_;
+
+#if defined(DEBUG)
+ void Print() { PrintF("CallFunctionStub (argc %d)\n", argc_); }
+#endif // defined(DEBUG)
+
+ Major MajorKey() { return CallFunction; }
+ int MinorKey() { return argc_; }
+ InLoopFlag InLoop() { return in_loop_; }
+};
+
+
class GenericBinaryOpStub : public CodeStub {
public:
GenericBinaryOpStub(Token::Value op,
diff --git a/src/arm/constants-arm.cc b/src/arm/constants-arm.cc
index 964bfe1..89ff7c0 100644
--- a/src/arm/constants-arm.cc
+++ b/src/arm/constants-arm.cc
@@ -67,6 +67,26 @@
}
+// Support for VFP registers s0 to s31 (d0 to d15).
+// Note that "sN:sM" is the same as "dN/2"
+// These register names are defined in a way to match the native disassembler
+// formatting. See for example the command "objdump -d <binary file>".
+const char* VFPRegisters::names_[kNumVFPRegisters] = {
+ "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
+ "s8", "s9", "s10", "s11", "s12", "s13", "s14", "s15",
+ "s16", "s17", "s18", "s19", "s20", "s21", "s22", "s23",
+ "s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31",
+ "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
+ "d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15"
+};
+
+
+const char* VFPRegisters::Name(int reg) {
+ ASSERT((0 <= reg) && (reg < kNumVFPRegisters));
+ return names_[reg];
+}
+
+
int Registers::Number(const char* name) {
// Look through the canonical names.
for (int i = 0; i < kNumRegisters; i++) {
diff --git a/src/arm/constants-arm.h b/src/arm/constants-arm.h
index 6bd0d00..9432207 100644
--- a/src/arm/constants-arm.h
+++ b/src/arm/constants-arm.h
@@ -43,24 +43,27 @@
# define USE_THUMB_INTERWORK 1
#endif
-#if defined(__ARM_ARCH_5T__) || \
- defined(__ARM_ARCH_5TE__) || \
- defined(__ARM_ARCH_6__) || \
- defined(__ARM_ARCH_7A__) || \
+#if defined(__ARM_ARCH_7A__) || \
+ defined(__ARM_ARCH_7R__) || \
defined(__ARM_ARCH_7__)
-# define CAN_USE_ARMV5_INSTRUCTIONS 1
-# define CAN_USE_THUMB_INSTRUCTIONS 1
+# define CAN_USE_ARMV7_INSTRUCTIONS 1
#endif
-#if defined(__ARM_ARCH_6__) || \
- defined(__ARM_ARCH_7A__) || \
- defined(__ARM_ARCH_7__)
+#if defined(__ARM_ARCH_6__) || \
+ defined(__ARM_ARCH_6J__) || \
+ defined(__ARM_ARCH_6K__) || \
+ defined(__ARM_ARCH_6Z__) || \
+ defined(__ARM_ARCH_6ZK__) || \
+ defined(__ARM_ARCH_6T2__) || \
+ defined(CAN_USE_ARMV7_INSTRUCTIONS)
# define CAN_USE_ARMV6_INSTRUCTIONS 1
#endif
-#if defined(__ARM_ARCH_7A__) || \
- defined(__ARM_ARCH_7__)
-# define CAN_USE_ARMV7_INSTRUCTIONS 1
+#if defined(__ARM_ARCH_5T__) || \
+ defined(__ARM_ARCH_5TE__) || \
+ defined(CAN_USE_ARMV6_INSTRUCTIONS)
+# define CAN_USE_ARMV5_INSTRUCTIONS 1
+# define CAN_USE_THUMB_INSTRUCTIONS 1
#endif
// Simulator should support ARM5 instructions.
@@ -75,6 +78,9 @@
// Number of registers in normal ARM mode.
static const int kNumRegisters = 16;
+// VFP support.
+static const int kNumVFPRegisters = 48;
+
// PC is register 15.
static const int kPCRegister = 15;
static const int kNoRegister = -1;
@@ -231,6 +237,16 @@
inline int RnField() const { return Bits(19, 16); }
inline int RdField() const { return Bits(15, 12); }
+ // Support for VFP.
+ // Vn(19-16) | Vd(15-12) | Vm(3-0)
+ inline int VnField() const { return Bits(19, 16); }
+ inline int VmField() const { return Bits(3, 0); }
+ inline int VdField() const { return Bits(15, 12); }
+ inline int NField() const { return Bit(7); }
+ inline int MField() const { return Bit(5); }
+ inline int DField() const { return Bit(22); }
+ inline int RtField() const { return Bits(15, 12); }
+
// Fields used in Data processing instructions
inline Opcode OpcodeField() const {
return static_cast<Opcode>(Bits(24, 21));
@@ -307,7 +323,7 @@
struct RegisterAlias {
int reg;
- const char *name;
+ const char* name;
};
private:
@@ -315,6 +331,15 @@
static const RegisterAlias aliases_[];
};
+// Helper functions for converting between VFP register numbers and names.
+class VFPRegisters {
+ public:
+ // Return the name of the register.
+ static const char* Name(int reg);
+
+ private:
+ static const char* names_[kNumVFPRegisters];
+};
} } // namespace assembler::arm
diff --git a/src/arm/cpu-arm.cc b/src/arm/cpu-arm.cc
index cafefce..a5a358b 100644
--- a/src/arm/cpu-arm.cc
+++ b/src/arm/cpu-arm.cc
@@ -33,12 +33,13 @@
#include "v8.h"
#include "cpu.h"
+#include "macro-assembler.h"
namespace v8 {
namespace internal {
void CPU::Setup() {
- // Nothing to do.
+ CpuFeatures::Probe();
}
diff --git a/src/arm/debug-arm.cc b/src/arm/debug-arm.cc
index ef33653..fc9808d 100644
--- a/src/arm/debug-arm.cc
+++ b/src/arm/debug-arm.cc
@@ -61,7 +61,7 @@
// Restore the JS frame exit code.
void BreakLocationIterator::ClearDebugBreakAtReturn() {
rinfo()->PatchCode(original_rinfo()->pc(),
- CodeGenerator::kJSReturnSequenceLength);
+ Assembler::kJSReturnSequenceLength);
}
diff --git a/src/arm/disasm-arm.cc b/src/arm/disasm-arm.cc
index 6431483..2f9e78f 100644
--- a/src/arm/disasm-arm.cc
+++ b/src/arm/disasm-arm.cc
@@ -97,6 +97,10 @@
// Printing of common values.
void PrintRegister(int reg);
+ void PrintSRegister(int reg);
+ void PrintDRegister(int reg);
+ int FormatVFPRegister(Instr* instr, const char* format);
+ int FormatVFPinstruction(Instr* instr, const char* format);
void PrintCondition(Instr* instr);
void PrintShiftRm(Instr* instr);
void PrintShiftImm(Instr* instr);
@@ -121,6 +125,10 @@
void DecodeType6(Instr* instr);
void DecodeType7(Instr* instr);
void DecodeUnconditional(Instr* instr);
+ // For VFP support.
+ void DecodeTypeVFP(Instr* instr);
+ void DecodeType6CoprocessorIns(Instr* instr);
+
const disasm::NameConverter& converter_;
v8::internal::Vector<char> out_buffer_;
@@ -171,6 +179,16 @@
Print(converter_.NameOfCPURegister(reg));
}
+// Print the VFP S register name according to the active name converter.
+void Decoder::PrintSRegister(int reg) {
+ Print(assembler::arm::VFPRegisters::Name(reg));
+}
+
+// Print the VFP D register name according to the active name converter.
+void Decoder::PrintDRegister(int reg) {
+ Print(assembler::arm::VFPRegisters::Name(reg + 32));
+}
+
// These shift names are defined in a way to match the native disassembler
// formatting. See for example the command "objdump -d <binary file>".
@@ -290,6 +308,10 @@
int reg = instr->RmField();
PrintRegister(reg);
return 2;
+ } else if (format[1] == 't') { // 'rt: Rt register
+ int reg = instr->RtField();
+ PrintRegister(reg);
+ return 2;
} else if (format[1] == 'l') {
// 'rlist: register list for load and store multiple instructions
ASSERT(STRING_STARTS_WITH(format, "rlist"));
@@ -315,6 +337,39 @@
}
+// Handle all VFP register based formatting in this function to reduce the
+// complexity of FormatOption.
+int Decoder::FormatVFPRegister(Instr* instr, const char* format) {
+ ASSERT((format[0] == 'S') || (format[0] == 'D'));
+
+ if (format[1] == 'n') {
+ int reg = instr->VnField();
+ if (format[0] == 'S') PrintSRegister(((reg << 1) | instr->NField()));
+ if (format[0] == 'D') PrintDRegister(reg);
+ return 2;
+ } else if (format[1] == 'm') {
+ int reg = instr->VmField();
+ if (format[0] == 'S') PrintSRegister(((reg << 1) | instr->MField()));
+ if (format[0] == 'D') PrintDRegister(reg);
+ return 2;
+ } else if (format[1] == 'd') {
+ int reg = instr->VdField();
+ if (format[0] == 'S') PrintSRegister(((reg << 1) | instr->DField()));
+ if (format[0] == 'D') PrintDRegister(reg);
+ return 2;
+ }
+
+ UNREACHABLE();
+ return -1;
+}
+
+
+int Decoder::FormatVFPinstruction(Instr* instr, const char* format) {
+ Print(format);
+ return 0;
+}
+
+
// FormatOption takes a formatting string and interprets it based on
// the current instructions. The format string points to the first
// character of the option string (the option escape has already been
@@ -459,6 +514,13 @@
}
return 1;
}
+ case 'v': {
+ return FormatVFPinstruction(instr, format);
+ }
+ case 'S':
+ case 'D': {
+ return FormatVFPRegister(instr, format);
+ }
case 'w': { // 'w: W field of load and store instructions
if (instr->HasW()) {
Print("!");
@@ -761,8 +823,7 @@
void Decoder::DecodeType6(Instr* instr) {
- // Coprocessor instructions currently not supported.
- Unknown(instr);
+ DecodeType6CoprocessorIns(instr);
}
@@ -770,12 +831,10 @@
if (instr->Bit(24) == 1) {
Format(instr, "swi'cond 'swi");
} else {
- // Coprocessor instructions currently not supported.
- Unknown(instr);
+ DecodeTypeVFP(instr);
}
}
-
void Decoder::DecodeUnconditional(Instr* instr) {
if (instr->Bits(7, 4) == 0xB && instr->Bits(27, 25) == 0 && instr->HasL()) {
Format(instr, "'memop'h'pu 'rd, ");
@@ -837,6 +896,136 @@
}
+// void Decoder::DecodeTypeVFP(Instr* instr)
+// Implements the following VFP instructions:
+// fmsr: Sn = Rt
+// fmrs: Rt = Sn
+// fsitod: Dd = Sm
+// ftosid: Sd = Dm
+// Dd = faddd(Dn, Dm)
+// Dd = fsubd(Dn, Dm)
+// Dd = fmuld(Dn, Dm)
+// Dd = fdivd(Dn, Dm)
+// vcmp(Dd, Dm)
+// VMRS
+void Decoder::DecodeTypeVFP(Instr* instr) {
+ ASSERT((instr->TypeField() == 7) && (instr->Bit(24) == 0x0) );
+
+ if (instr->Bit(23) == 1) {
+ if ((instr->Bits(21, 19) == 0x7) &&
+ (instr->Bits(18, 16) == 0x5) &&
+ (instr->Bits(11, 9) == 0x5) &&
+ (instr->Bit(8) == 1) &&
+ (instr->Bit(6) == 1) &&
+ (instr->Bit(4) == 0)) {
+ Format(instr, "vcvt.s32.f64'cond 'Sd, 'Dm");
+ } else if ((instr->Bits(21, 19) == 0x7) &&
+ (instr->Bits(18, 16) == 0x0) &&
+ (instr->Bits(11, 9) == 0x5) &&
+ (instr->Bit(8) == 1) &&
+ (instr->Bit(7) == 1) &&
+ (instr->Bit(6) == 1) &&
+ (instr->Bit(4) == 0)) {
+ Format(instr, "vcvt.f64.s32'cond 'Dd, 'Sm");
+ } else if ((instr->Bit(21) == 0x0) &&
+ (instr->Bit(20) == 0x0) &&
+ (instr->Bits(11, 9) == 0x5) &&
+ (instr->Bit(8) == 1) &&
+ (instr->Bit(6) == 0) &&
+ (instr->Bit(4) == 0)) {
+ Format(instr, "vdiv.f64'cond 'Dd, 'Dn, 'Dm");
+ } else if ((instr->Bits(21, 20) == 0x3) &&
+ (instr->Bits(19, 16) == 0x4) &&
+ (instr->Bits(11, 9) == 0x5) &&
+ (instr->Bit(8) == 0x1) &&
+ (instr->Bit(6) == 0x1) &&
+ (instr->Bit(4) == 0x0)) {
+ Format(instr, "vcmp.f64'cond 'Dd, 'Dm");
+ } else if ((instr->Bits(23, 20) == 0xF) &&
+ (instr->Bits(19, 16) == 0x1) &&
+ (instr->Bits(11, 8) == 0xA) &&
+ (instr->Bits(7, 5) == 0x0) &&
+ (instr->Bit(4) == 0x1) &&
+ (instr->Bits(3, 0) == 0x0)) {
+ if (instr->Bits(15, 12) == 0xF)
+ Format(instr, "vmrs'cond APSR, FPSCR");
+ else
+ Unknown(instr); // Not used by V8.
+ } else {
+ Unknown(instr); // Not used by V8.
+ }
+ } else if (instr->Bit(21) == 1) {
+ if ((instr->Bit(20) == 0x1) &&
+ (instr->Bits(11, 9) == 0x5) &&
+ (instr->Bit(8) == 0x1) &&
+ (instr->Bit(6) == 0) &&
+ (instr->Bit(4) == 0)) {
+ Format(instr, "vadd.f64'cond 'Dd, 'Dn, 'Dm");
+ } else if ((instr->Bit(20) == 0x1) &&
+ (instr->Bits(11, 9) == 0x5) &&
+ (instr->Bit(8) == 0x1) &&
+ (instr->Bit(6) == 1) &&
+ (instr->Bit(4) == 0)) {
+ Format(instr, "vsub.f64'cond 'Dd, 'Dn, 'Dm");
+ } else if ((instr->Bit(20) == 0x0) &&
+ (instr->Bits(11, 9) == 0x5) &&
+ (instr->Bit(8) == 0x1) &&
+ (instr->Bit(6) == 0) &&
+ (instr->Bit(4) == 0)) {
+ Format(instr, "vmul.f64'cond 'Dd, 'Dn, 'Dm");
+ } else {
+ Unknown(instr); // Not used by V8.
+ }
+ } else {
+ if ((instr->Bit(20) == 0x0) &&
+ (instr->Bits(11, 8) == 0xA) &&
+ (instr->Bits(6, 5) == 0x0) &&
+ (instr->Bit(4) == 1) &&
+ (instr->Bits(3, 0) == 0x0)) {
+ Format(instr, "vmov'cond 'Sn, 'rt");
+ } else if ((instr->Bit(20) == 0x1) &&
+ (instr->Bits(11, 8) == 0xA) &&
+ (instr->Bits(6, 5) == 0x0) &&
+ (instr->Bit(4) == 1) &&
+ (instr->Bits(3, 0) == 0x0)) {
+ Format(instr, "vmov'cond 'rt, 'Sn");
+ } else {
+ Unknown(instr); // Not used by V8.
+ }
+ }
+}
+
+
+// Decode Type 6 coprocessor instructions.
+// Dm = fmdrr(Rt, Rt2)
+// <Rt, Rt2> = fmrrd(Dm)
+void Decoder::DecodeType6CoprocessorIns(Instr* instr) {
+ ASSERT((instr->TypeField() == 6));
+
+ if (instr->Bit(23) == 1) {
+ Unknown(instr); // Not used by V8.
+ } else if (instr->Bit(22) == 1) {
+ if ((instr->Bits(27, 24) == 0xC) &&
+ (instr->Bit(22) == 1) &&
+ (instr->Bits(11, 8) == 0xB) &&
+ (instr->Bits(7, 6) == 0x0) &&
+ (instr->Bit(4) == 1)) {
+ if (instr->Bit(20) == 0) {
+ Format(instr, "vmov'cond 'Dm, 'rt, 'rn");
+ } else if (instr->Bit(20) == 1) {
+ Format(instr, "vmov'cond 'rt, 'rn, 'Dm");
+ }
+ } else {
+ Unknown(instr); // Not used by V8.
+ }
+ } else if (instr->Bit(21) == 1) {
+ Unknown(instr); // Not used by V8.
+ } else {
+ Unknown(instr); // Not used by V8.
+ }
+}
+
+
// Disassemble the instruction at *instr_ptr into the output buffer.
int Decoder::InstructionDecode(byte* instr_ptr) {
Instr* instr = Instr::At(instr_ptr);
diff --git a/src/arm/fast-codegen-arm.cc b/src/arm/fast-codegen-arm.cc
index 97feae5..45cab55 100644
--- a/src/arm/fast-codegen-arm.cc
+++ b/src/arm/fast-codegen-arm.cc
@@ -28,6 +28,8 @@
#include "v8.h"
#include "codegen-inl.h"
+#include "compiler.h"
+#include "debug.h"
#include "fast-codegen.h"
#include "parser.h"
@@ -52,38 +54,94 @@
// frames-arm.h for its layout.
void FastCodeGenerator::Generate(FunctionLiteral* fun) {
function_ = fun;
- // ARM does NOT call SetFunctionPosition.
+ SetFunctionPosition(fun);
+ int locals_count = fun->scope()->num_stack_slots();
__ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
+ if (locals_count > 0) {
+ // Load undefined value here, so the value is ready for the loop below.
+ __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+ }
// Adjust fp to point to caller's fp.
__ add(fp, sp, Operand(2 * kPointerSize));
{ Comment cmnt(masm_, "[ Allocate locals");
- int locals_count = fun->scope()->num_stack_slots();
- if (locals_count > 0) {
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- }
- if (FLAG_check_stack) {
- __ LoadRoot(r2, Heap::kStackLimitRootIndex);
- }
for (int i = 0; i < locals_count; i++) {
__ push(ip);
}
}
- if (FLAG_check_stack) {
- // Put the lr setup instruction in the delay slot. The kInstrSize is
- // added to the implicit 8 byte offset that always applies to operations
- // with pc and gives a return address 12 bytes down.
- Comment cmnt(masm_, "[ Stack check");
- __ add(lr, pc, Operand(Assembler::kInstrSize));
- __ cmp(sp, Operand(r2));
- StackCheckStub stub;
- __ mov(pc,
- Operand(reinterpret_cast<intptr_t>(stub.GetCode().location()),
- RelocInfo::CODE_TARGET),
- LeaveCC,
- lo);
+ bool function_in_register = true;
+
+ // Possibly allocate a local context.
+ if (fun->scope()->num_heap_slots() > 0) {
+ Comment cmnt(masm_, "[ Allocate local context");
+ // Argument to NewContext is the function, which is in r1.
+ __ push(r1);
+ __ CallRuntime(Runtime::kNewContext, 1);
+ function_in_register = false;
+ // Context is returned in both r0 and cp. It replaces the context
+ // passed to us. It's saved in the stack and kept live in cp.
+ __ str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ // Copy any necessary parameters into the context.
+ int num_parameters = fun->scope()->num_parameters();
+ for (int i = 0; i < num_parameters; i++) {
+ Slot* slot = fun->scope()->parameter(i)->slot();
+ if (slot != NULL && slot->type() == Slot::CONTEXT) {
+ int parameter_offset = StandardFrameConstants::kCallerSPOffset +
+ (num_parameters - 1 - i) * kPointerSize;
+ // Load parameter from stack.
+ __ ldr(r0, MemOperand(fp, parameter_offset));
+ // Store it in the context
+ __ str(r0, MemOperand(cp, Context::SlotOffset(slot->index())));
+ }
+ }
+ }
+
+ Variable* arguments = fun->scope()->arguments()->AsVariable();
+ if (arguments != NULL) {
+ // Function uses arguments object.
+ Comment cmnt(masm_, "[ Allocate arguments object");
+ if (!function_in_register) {
+ // Load this again, if it's used by the local context below.
+ __ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ } else {
+ __ mov(r3, r1);
+ }
+ // Receiver is just before the parameters on the caller's stack.
+ __ add(r2, fp, Operand(StandardFrameConstants::kCallerSPOffset +
+ fun->num_parameters() * kPointerSize));
+ __ mov(r1, Operand(Smi::FromInt(fun->num_parameters())));
+ __ stm(db_w, sp, r3.bit() | r2.bit() | r1.bit());
+
+ // Arguments to ArgumentsAccessStub:
+ // function, receiver address, parameter count.
+ // The stub will rewrite receiever and parameter count if the previous
+ // stack frame was an arguments adapter frame.
+ ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
+ __ CallStub(&stub);
+ // Duplicate the value; move-to-slot operation might clobber registers.
+ __ mov(r3, r0);
+ Move(arguments->slot(), r0, r1, r2);
+ Slot* dot_arguments_slot =
+ fun->scope()->arguments_shadow()->AsVariable()->slot();
+ Move(dot_arguments_slot, r3, r1, r2);
+ }
+
+ // Check the stack for overflow or break request.
+ // Put the lr setup instruction in the delay slot. The kInstrSize is
+ // added to the implicit 8 byte offset that always applies to operations
+ // with pc and gives a return address 12 bytes down.
+ { Comment cmnt(masm_, "[ Stack check");
+ __ LoadRoot(r2, Heap::kStackLimitRootIndex);
+ __ add(lr, pc, Operand(Assembler::kInstrSize));
+ __ cmp(sp, Operand(r2));
+ StackCheckStub stub;
+ __ mov(pc,
+ Operand(reinterpret_cast<intptr_t>(stub.GetCode().location()),
+ RelocInfo::CODE_TARGET),
+ LeaveCC,
+ lo);
}
{ Comment cmnt(masm_, "[ Declarations");
@@ -95,14 +153,26 @@
}
{ Comment cmnt(masm_, "[ Body");
+ ASSERT(loop_depth() == 0);
VisitStatements(fun->body());
+ ASSERT(loop_depth() == 0);
}
{ Comment cmnt(masm_, "[ return <undefined>;");
// Emit a 'return undefined' in case control fell off the end of the
// body.
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
- SetReturnPosition(fun);
+ }
+ EmitReturnSequence(function_->end_position());
+}
+
+
+void FastCodeGenerator::EmitReturnSequence(int position) {
+ Comment cmnt(masm_, "[ Return sequence");
+ if (return_label_.is_bound()) {
+ __ b(&return_label_);
+ } else {
+ __ bind(&return_label_);
if (FLAG_trace) {
// Push the return value on the stack as the parameter.
// Runtime::TraceExit returns its parameter in r0.
@@ -110,12 +180,332 @@
__ CallRuntime(Runtime::kTraceExit, 1);
}
+ // Add a label for checking the size of the code used for returning.
+ Label check_exit_codesize;
+ masm_->bind(&check_exit_codesize);
+
+ // Calculate the exact length of the return sequence and make sure that
+ // the constant pool is not emitted inside of the return sequence.
+ int num_parameters = function_->scope()->num_parameters();
+ int32_t sp_delta = (num_parameters + 1) * kPointerSize;
+ int return_sequence_length = Assembler::kJSReturnSequenceLength;
+ if (!masm_->ImmediateFitsAddrMode1Instruction(sp_delta)) {
+ // Additional mov instruction generated.
+ return_sequence_length++;
+ }
+ masm_->BlockConstPoolFor(return_sequence_length);
+
+ CodeGenerator::RecordPositions(masm_, position);
__ RecordJSReturn();
__ mov(sp, fp);
__ ldm(ia_w, sp, fp.bit() | lr.bit());
- int num_parameters = function_->scope()->num_parameters();
- __ add(sp, sp, Operand((num_parameters + 1) * kPointerSize));
+ __ add(sp, sp, Operand(sp_delta));
__ Jump(lr);
+
+ // Check that the size of the code used for returning matches what is
+ // expected by the debugger. The add instruction above is an addressing
+ // mode 1 instruction where there are restrictions on which immediate values
+ // can be encoded in the instruction and which immediate values requires
+ // use of an additional instruction for moving the immediate to a temporary
+ // register.
+ ASSERT_EQ(return_sequence_length,
+ masm_->InstructionsGeneratedSince(&check_exit_codesize));
+ }
+}
+
+
+void FastCodeGenerator::Move(Expression::Context context, Register source) {
+ switch (context) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ case Expression::kEffect:
+ break;
+ case Expression::kValue:
+ __ push(source);
+ break;
+ case Expression::kTest:
+ TestAndBranch(source, true_label_, false_label_);
+ break;
+ case Expression::kValueTest: {
+ Label discard;
+ __ push(source);
+ TestAndBranch(source, true_label_, &discard);
+ __ bind(&discard);
+ __ pop();
+ __ jmp(false_label_);
+ break;
+ }
+ case Expression::kTestValue: {
+ Label discard;
+ __ push(source);
+ TestAndBranch(source, &discard, false_label_);
+ __ bind(&discard);
+ __ pop();
+ __ jmp(true_label_);
+ }
+ }
+}
+
+
+template <>
+MemOperand FastCodeGenerator::CreateSlotOperand<MemOperand>(
+ Slot* source,
+ Register scratch) {
+ switch (source->type()) {
+ case Slot::PARAMETER:
+ case Slot::LOCAL:
+ return MemOperand(fp, SlotOffset(source));
+ case Slot::CONTEXT: {
+ int context_chain_length =
+ function_->scope()->ContextChainLength(source->var()->scope());
+ __ LoadContext(scratch, context_chain_length);
+ return CodeGenerator::ContextOperand(scratch, source->index());
+ break;
+ }
+ case Slot::LOOKUP:
+ UNIMPLEMENTED();
+ // Fall-through.
+ default:
+ UNREACHABLE();
+ return MemOperand(r0, 0); // Dead code to make the compiler happy.
+ }
+}
+
+
+void FastCodeGenerator::Move(Register dst, Slot* source) {
+ // Use dst as scratch.
+ MemOperand location = CreateSlotOperand<MemOperand>(source, dst);
+ __ ldr(dst, location);
+}
+
+
+
+void FastCodeGenerator::Move(Expression::Context context,
+ Slot* source,
+ Register scratch) {
+ switch (context) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ case Expression::kEffect:
+ break;
+ case Expression::kValue: // Fall through.
+ case Expression::kTest: // Fall through.
+ case Expression::kValueTest: // Fall through.
+ case Expression::kTestValue:
+ Move(scratch, source);
+ Move(context, scratch);
+ break;
+ }
+}
+
+
+void FastCodeGenerator::Move(Expression::Context context, Literal* expr) {
+ switch (context) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ case Expression::kEffect:
+ break;
+ case Expression::kValue: // Fall through.
+ case Expression::kTest: // Fall through.
+ case Expression::kValueTest: // Fall through.
+ case Expression::kTestValue:
+ __ mov(ip, Operand(expr->handle()));
+ Move(context, ip);
+ break;
+ }
+}
+
+
+void FastCodeGenerator::Move(Slot* dst,
+ Register src,
+ Register scratch1,
+ Register scratch2) {
+ switch (dst->type()) {
+ case Slot::PARAMETER:
+ case Slot::LOCAL:
+ __ str(src, MemOperand(fp, SlotOffset(dst)));
+ break;
+ case Slot::CONTEXT: {
+ int context_chain_length =
+ function_->scope()->ContextChainLength(dst->var()->scope());
+ __ LoadContext(scratch1, context_chain_length);
+ int index = Context::SlotOffset(dst->index());
+ __ mov(scratch2, Operand(index));
+ __ str(src, MemOperand(scratch1, index));
+ __ RecordWrite(scratch1, scratch2, src);
+ break;
+ }
+ case Slot::LOOKUP:
+ UNIMPLEMENTED();
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+
+void FastCodeGenerator::DropAndMove(Expression::Context context,
+ Register source,
+ int drop_count) {
+ ASSERT(drop_count > 0);
+ switch (context) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ case Expression::kEffect:
+ __ add(sp, sp, Operand(drop_count * kPointerSize));
+ break;
+ case Expression::kValue:
+ if (drop_count > 1) {
+ __ add(sp, sp, Operand((drop_count - 1) * kPointerSize));
+ }
+ __ str(source, MemOperand(sp));
+ break;
+ case Expression::kTest:
+ ASSERT(!source.is(sp));
+ __ add(sp, sp, Operand(drop_count * kPointerSize));
+ TestAndBranch(source, true_label_, false_label_);
+ break;
+ case Expression::kValueTest: {
+ Label discard;
+ if (drop_count > 1) {
+ __ add(sp, sp, Operand((drop_count - 1) * kPointerSize));
+ }
+ __ str(source, MemOperand(sp));
+ TestAndBranch(source, true_label_, &discard);
+ __ bind(&discard);
+ __ pop();
+ __ jmp(false_label_);
+ break;
+ }
+ case Expression::kTestValue: {
+ Label discard;
+ if (drop_count > 1) {
+ __ add(sp, sp, Operand((drop_count - 1) * kPointerSize));
+ }
+ __ str(source, MemOperand(sp));
+ TestAndBranch(source, &discard, false_label_);
+ __ bind(&discard);
+ __ pop();
+ __ jmp(true_label_);
+ break;
+ }
+ }
+}
+
+
+void FastCodeGenerator::TestAndBranch(Register source,
+ Label* true_label,
+ Label* false_label) {
+ ASSERT_NE(NULL, true_label);
+ ASSERT_NE(NULL, false_label);
+ // Call the runtime to find the boolean value of the source and then
+ // translate it into control flow to the pair of labels.
+ __ push(source);
+ __ CallRuntime(Runtime::kToBool, 1);
+ __ LoadRoot(ip, Heap::kTrueValueRootIndex);
+ __ cmp(r0, ip);
+ __ b(eq, true_label);
+ __ jmp(false_label);
+}
+
+
+void FastCodeGenerator::VisitDeclaration(Declaration* decl) {
+ Comment cmnt(masm_, "[ Declaration");
+ Variable* var = decl->proxy()->var();
+ ASSERT(var != NULL); // Must have been resolved.
+ Slot* slot = var->slot();
+ Property* prop = var->AsProperty();
+
+ if (slot != NULL) {
+ switch (slot->type()) {
+ case Slot::PARAMETER: // Fall through.
+ case Slot::LOCAL:
+ if (decl->mode() == Variable::CONST) {
+ __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+ __ str(ip, MemOperand(fp, SlotOffset(var->slot())));
+ } else if (decl->fun() != NULL) {
+ Visit(decl->fun());
+ __ pop(ip);
+ __ str(ip, MemOperand(fp, SlotOffset(var->slot())));
+ }
+ break;
+
+ case Slot::CONTEXT:
+ // The variable in the decl always resides in the current context.
+ ASSERT_EQ(0, function_->scope()->ContextChainLength(var->scope()));
+ if (FLAG_debug_code) {
+ // Check if we have the correct context pointer.
+ __ ldr(r1,
+ CodeGenerator::ContextOperand(cp, Context::FCONTEXT_INDEX));
+ __ cmp(r1, cp);
+ __ Check(eq, "Unexpected declaration in current context.");
+ }
+ if (decl->mode() == Variable::CONST) {
+ __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+ __ str(ip, CodeGenerator::ContextOperand(cp, slot->index()));
+ // No write barrier since the_hole_value is in old space.
+ } else if (decl->fun() != NULL) {
+ Visit(decl->fun());
+ __ pop(r0);
+ __ str(r0, CodeGenerator::ContextOperand(cp, slot->index()));
+ int offset = Context::SlotOffset(slot->index());
+ __ mov(r2, Operand(offset));
+ // We know that we have written a function, which is not a smi.
+ __ RecordWrite(cp, r2, r0);
+ }
+ break;
+
+ case Slot::LOOKUP: {
+ __ mov(r2, Operand(var->name()));
+ // Declaration nodes are always introduced in one of two modes.
+ ASSERT(decl->mode() == Variable::VAR ||
+ decl->mode() == Variable::CONST);
+ PropertyAttributes attr =
+ (decl->mode() == Variable::VAR) ? NONE : READ_ONLY;
+ __ mov(r1, Operand(Smi::FromInt(attr)));
+ // Push initial value, if any.
+ // Note: For variables we must not push an initial value (such as
+ // 'undefined') because we may have a (legal) redeclaration and we
+ // must not destroy the current value.
+ if (decl->mode() == Variable::CONST) {
+ __ LoadRoot(r0, Heap::kTheHoleValueRootIndex);
+ __ stm(db_w, sp, cp.bit() | r2.bit() | r1.bit() | r0.bit());
+ } else if (decl->fun() != NULL) {
+ __ stm(db_w, sp, cp.bit() | r2.bit() | r1.bit());
+ Visit(decl->fun()); // Initial value for function decl.
+ } else {
+ __ mov(r0, Operand(Smi::FromInt(0))); // No initial value!
+ __ stm(db_w, sp, cp.bit() | r2.bit() | r1.bit() | r0.bit());
+ }
+ __ CallRuntime(Runtime::kDeclareContextSlot, 4);
+ break;
+ }
+ }
+
+ } else if (prop != NULL) {
+ if (decl->fun() != NULL || decl->mode() == Variable::CONST) {
+ // We are declaring a function or constant that rewrites to a
+ // property. Use (keyed) IC to set the initial value.
+ ASSERT_EQ(Expression::kValue, prop->obj()->context());
+ Visit(prop->obj());
+ ASSERT_EQ(Expression::kValue, prop->key()->context());
+ Visit(prop->key());
+
+ if (decl->fun() != NULL) {
+ ASSERT_EQ(Expression::kValue, decl->fun()->context());
+ Visit(decl->fun());
+ __ pop(r0);
+ } else {
+ __ LoadRoot(r0, Heap::kTheHoleValueRootIndex);
+ }
+
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
+ __ Call(ic, RelocInfo::CODE_TARGET);
+
+ // Value in r0 is ignored (declarations are statements). Receiver
+ // and key on stack are discarded.
+ __ add(sp, sp, Operand(2 * kPointerSize));
+ }
}
}
@@ -131,50 +521,18 @@
}
-void FastCodeGenerator::VisitBlock(Block* stmt) {
- Comment cmnt(masm_, "[ Block");
- SetStatementPosition(stmt);
- VisitStatements(stmt->statements());
-}
-
-
-void FastCodeGenerator::VisitExpressionStatement(ExpressionStatement* stmt) {
- Comment cmnt(masm_, "[ ExpressionStatement");
- SetStatementPosition(stmt);
- Visit(stmt->expression());
-}
-
-
void FastCodeGenerator::VisitReturnStatement(ReturnStatement* stmt) {
Comment cmnt(masm_, "[ ReturnStatement");
- SetStatementPosition(stmt);
Expression* expr = stmt->expression();
- Visit(expr);
-
- // Complete the statement based on the location of the subexpression.
- Location source = expr->location();
- ASSERT(!source.is_nowhere());
- if (source.is_temporary()) {
- __ pop(r0);
- } else {
- ASSERT(source.is_constant());
- ASSERT(expr->AsLiteral() != NULL);
+ // Complete the statement based on the type of the subexpression.
+ if (expr->AsLiteral() != NULL) {
__ mov(r0, Operand(expr->AsLiteral()->handle()));
+ } else {
+ ASSERT_EQ(Expression::kValue, expr->context());
+ Visit(expr);
+ __ pop(r0);
}
-
- if (FLAG_trace) {
- // Push the return value on the stack as the parameter.
- // Runtime::TraceExit returns its parameter in r0.
- __ push(r0);
- __ CallRuntime(Runtime::kTraceExit, 1);
- }
-
- __ RecordJSReturn();
- __ mov(sp, fp);
- __ ldm(ia_w, sp, fp.bit() | lr.bit());
- int num_parameters = function_->scope()->num_parameters();
- __ add(sp, sp, Operand((num_parameters + 1) * kPointerSize));
- __ Jump(lr);
+ EmitReturnSequence(stmt->statement_pos());
}
@@ -182,7 +540,8 @@
Comment cmnt(masm_, "[ FunctionLiteral");
// Build the function boilerplate and instantiate it.
- Handle<JSFunction> boilerplate = BuildBoilerplate(expr);
+ Handle<JSFunction> boilerplate =
+ Compiler::BuildBoilerplate(expr, script_, this);
if (HasStackOverflow()) return;
ASSERT(boilerplate->IsBoilerplate());
@@ -191,12 +550,7 @@
__ mov(r0, Operand(boilerplate));
__ stm(db_w, sp, cp.bit() | r0.bit());
__ CallRuntime(Runtime::kNewClosure, 2);
-
- if (expr->location().is_temporary()) {
- __ push(r0);
- } else {
- ASSERT(expr->location().is_nowhere());
- }
+ Move(expr->context(), r0);
}
@@ -204,6 +558,7 @@
Comment cmnt(masm_, "[ VariableProxy");
Expression* rewrite = expr->var()->rewrite();
if (rewrite == NULL) {
+ ASSERT(expr->var()->is_global());
Comment cmnt(masm_, "Global variable");
// Use inline caching. Variable name is passed in r2 and the global
// object on the stack.
@@ -212,30 +567,69 @@
__ mov(r2, Operand(expr->name()));
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
- if (expr->location().is_temporary()) {
- // Replace the global object with the result.
- __ str(r0, MemOperand(sp));
- } else {
- ASSERT(expr->location().is_nowhere());
- __ pop();
- }
-
- } else {
- Comment cmnt(masm_, "Stack slot");
+ DropAndMove(expr->context(), r0);
+ } else if (rewrite->AsSlot() != NULL) {
Slot* slot = rewrite->AsSlot();
- ASSERT(slot != NULL);
- if (expr->location().is_temporary()) {
- __ ldr(ip, MemOperand(fp, SlotOffset(slot)));
- __ push(ip);
- } else {
- ASSERT(expr->location().is_nowhere());
+ if (FLAG_debug_code) {
+ switch (slot->type()) {
+ case Slot::LOCAL:
+ case Slot::PARAMETER: {
+ Comment cmnt(masm_, "Stack slot");
+ break;
+ }
+ case Slot::CONTEXT: {
+ Comment cmnt(masm_, "Context slot");
+ break;
+ }
+ case Slot::LOOKUP:
+ UNIMPLEMENTED();
+ break;
+ default:
+ UNREACHABLE();
+ }
}
+ Move(expr->context(), slot, r0);
+ } else {
+ // A variable has been rewritten into an explicit access to
+ // an object property.
+ Property* property = rewrite->AsProperty();
+ ASSERT_NOT_NULL(property);
+
+ // Currently the only parameter expressions that can occur are
+ // on the form "slot[literal]".
+
+ // Check that the object is in a slot.
+ Variable* object_var = property->obj()->AsVariableProxy()->AsVariable();
+ ASSERT_NOT_NULL(object_var);
+ Slot* object_slot = object_var->slot();
+ ASSERT_NOT_NULL(object_slot);
+
+ // Load the object.
+ Move(r2, object_slot);
+
+ // Check that the key is a smi.
+ Literal* key_literal = property->key()->AsLiteral();
+ ASSERT_NOT_NULL(key_literal);
+ ASSERT(key_literal->handle()->IsSmi());
+
+ // Load the key.
+ __ mov(r1, Operand(key_literal->handle()));
+
+ // Push both as arguments to ic.
+ __ stm(db_w, sp, r2.bit() | r1.bit());
+
+ // Do a KEYED property load.
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+ __ Call(ic, RelocInfo::CODE_TARGET);
+
+ // Drop key and object left on the stack by IC, and push the result.
+ DropAndMove(expr->context(), r0, 2);
}
}
void FastCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
- Comment cmnt(masm_, "[ RegExp Literal");
+ Comment cmnt(masm_, "[ RegExpLiteral");
Label done;
// Registers will be used as follows:
// r4 = JS function, literals array
@@ -257,10 +651,132 @@
__ stm(db_w, sp, r4.bit() | r3.bit() | r2.bit() | r1.bit());
__ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
__ bind(&done);
- if (expr->location().is_temporary()) {
- __ push(r0);
+ Move(expr->context(), r0);
+}
+
+
+void FastCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
+ Comment cmnt(masm_, "[ ObjectLiteral");
+ Label boilerplate_exists;
+ __ ldr(r2, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ // r2 = literal array (0).
+ __ ldr(r2, FieldMemOperand(r2, JSFunction::kLiteralsOffset));
+ int literal_offset =
+ FixedArray::kHeaderSize + expr->literal_index() * kPointerSize;
+ __ ldr(r0, FieldMemOperand(r2, literal_offset));
+ // Check whether we need to materialize the object literal boilerplate.
+ __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+ __ cmp(r0, Operand(ip));
+ __ b(ne, &boilerplate_exists);
+ // Create boilerplate if it does not exist.
+ // r1 = literal index (1).
+ __ mov(r1, Operand(Smi::FromInt(expr->literal_index())));
+ // r0 = constant properties (2).
+ __ mov(r0, Operand(expr->constant_properties()));
+ __ stm(db_w, sp, r2.bit() | r1.bit() | r0.bit());
+ __ CallRuntime(Runtime::kCreateObjectLiteralBoilerplate, 3);
+ __ bind(&boilerplate_exists);
+ // r0 contains boilerplate.
+ // Clone boilerplate.
+ __ push(r0);
+ if (expr->depth() > 1) {
+ __ CallRuntime(Runtime::kCloneLiteralBoilerplate, 1);
} else {
- ASSERT(expr->location().is_nowhere());
+ __ CallRuntime(Runtime::kCloneShallowLiteralBoilerplate, 1);
+ }
+
+ // If result_saved == true: The result is saved on top of the
+ // stack and in r0.
+ // If result_saved == false: The result not on the stack, just in r0.
+ bool result_saved = false;
+
+ for (int i = 0; i < expr->properties()->length(); i++) {
+ ObjectLiteral::Property* property = expr->properties()->at(i);
+ if (property->IsCompileTimeValue()) continue;
+
+ Literal* key = property->key();
+ Expression* value = property->value();
+ if (!result_saved) {
+ __ push(r0); // Save result on stack
+ result_saved = true;
+ }
+ switch (property->kind()) {
+ case ObjectLiteral::Property::CONSTANT:
+ UNREACHABLE();
+
+ case ObjectLiteral::Property::MATERIALIZED_LITERAL: // Fall through.
+ ASSERT(!CompileTimeValue::IsCompileTimeValue(property->value()));
+ case ObjectLiteral::Property::COMPUTED:
+ if (key->handle()->IsSymbol()) {
+ Visit(value);
+ ASSERT_EQ(Expression::kValue, value->context());
+ __ pop(r0);
+ __ mov(r2, Operand(key->handle()));
+ Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
+ __ Call(ic, RelocInfo::CODE_TARGET);
+ // StoreIC leaves the receiver on the stack.
+ __ ldr(r0, MemOperand(sp)); // Restore result into r0.
+ break;
+ }
+ // Fall through.
+
+ case ObjectLiteral::Property::PROTOTYPE:
+ __ push(r0);
+ Visit(key);
+ ASSERT_EQ(Expression::kValue, key->context());
+ Visit(value);
+ ASSERT_EQ(Expression::kValue, value->context());
+ __ CallRuntime(Runtime::kSetProperty, 3);
+ __ ldr(r0, MemOperand(sp)); // Restore result into r0.
+ break;
+
+ case ObjectLiteral::Property::GETTER: // Fall through.
+ case ObjectLiteral::Property::SETTER:
+ __ push(r0);
+ Visit(key);
+ ASSERT_EQ(Expression::kValue, key->context());
+ __ mov(r1, Operand(property->kind() == ObjectLiteral::Property::SETTER ?
+ Smi::FromInt(1) :
+ Smi::FromInt(0)));
+ __ push(r1);
+ Visit(value);
+ ASSERT_EQ(Expression::kValue, value->context());
+ __ CallRuntime(Runtime::kDefineAccessor, 4);
+ __ ldr(r0, MemOperand(sp)); // Restore result into r0
+ break;
+ }
+ }
+ switch (expr->context()) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ case Expression::kEffect:
+ if (result_saved) __ pop();
+ break;
+ case Expression::kValue:
+ if (!result_saved) __ push(r0);
+ break;
+ case Expression::kTest:
+ if (result_saved) __ pop(r0);
+ TestAndBranch(r0, true_label_, false_label_);
+ break;
+ case Expression::kValueTest: {
+ Label discard;
+ if (!result_saved) __ push(r0);
+ TestAndBranch(r0, true_label_, &discard);
+ __ bind(&discard);
+ __ pop();
+ __ jmp(false_label_);
+ break;
+ }
+ case Expression::kTestValue: {
+ Label discard;
+ if (!result_saved) __ push(r0);
+ TestAndBranch(r0, &discard, false_label_);
+ __ bind(&discard);
+ __ pop();
+ __ jmp(true_label_);
+ break;
+ }
}
}
@@ -314,7 +830,7 @@
result_saved = true;
}
Visit(subexpr);
- ASSERT(subexpr->location().is_temporary());
+ ASSERT_EQ(Expression::kValue, subexpr->context());
// Store the subexpression value in the array's elements.
__ pop(r0); // Subexpression value.
@@ -329,211 +845,864 @@
__ RecordWrite(r1, r2, r0);
}
- Location destination = expr->location();
- if (destination.is_nowhere() && result_saved) {
- __ pop();
- } else if (destination.is_temporary() && !result_saved) {
- __ push(r0);
+ switch (expr->context()) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ case Expression::kEffect:
+ if (result_saved) __ pop();
+ break;
+ case Expression::kValue:
+ if (!result_saved) __ push(r0);
+ break;
+ case Expression::kTest:
+ if (result_saved) __ pop(r0);
+ TestAndBranch(r0, true_label_, false_label_);
+ break;
+ case Expression::kValueTest: {
+ Label discard;
+ if (!result_saved) __ push(r0);
+ TestAndBranch(r0, true_label_, &discard);
+ __ bind(&discard);
+ __ pop();
+ __ jmp(false_label_);
+ break;
+ }
+ case Expression::kTestValue: {
+ Label discard;
+ if (!result_saved) __ push(r0);
+ TestAndBranch(r0, &discard, false_label_);
+ __ bind(&discard);
+ __ pop();
+ __ jmp(true_label_);
+ break;
+ }
}
}
-void FastCodeGenerator::VisitAssignment(Assignment* expr) {
- Comment cmnt(masm_, "[ Assignment");
- ASSERT(expr->op() == Token::ASSIGN || expr->op() == Token::INIT_VAR);
- Expression* rhs = expr->value();
- Visit(rhs);
-
- // Left-hand side can only be a global or a (parameter or local) slot.
+void FastCodeGenerator::EmitVariableAssignment(Assignment* expr) {
Variable* var = expr->target()->AsVariableProxy()->AsVariable();
ASSERT(var != NULL);
ASSERT(var->is_global() || var->slot() != NULL);
-
- // Complete the assignment based on the location of the right-hand-side
- // value and the desired location of the assignment value.
- Location destination = expr->location();
- Location source = rhs->location();
- ASSERT(!destination.is_constant());
- ASSERT(!source.is_nowhere());
-
if (var->is_global()) {
- // Assignment to a global variable, use inline caching. Right-hand-side
- // value is passed in r0, variable name in r2, and the global object on
- // the stack.
- if (source.is_temporary()) {
- __ pop(r0);
- } else {
- ASSERT(source.is_constant());
- ASSERT(rhs->AsLiteral() != NULL);
- __ mov(r0, Operand(rhs->AsLiteral()->handle()));
- }
+ // Assignment to a global variable. Use inline caching for the
+ // assignment. Right-hand-side value is passed in r0, variable name in
+ // r2, and the global object on the stack.
+ __ pop(r0);
__ mov(r2, Operand(var->name()));
__ ldr(ip, CodeGenerator::GlobalObject());
__ push(ip);
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET);
// Overwrite the global object on the stack with the result if needed.
- if (destination.is_temporary()) {
- __ str(r0, MemOperand(sp));
- } else {
- ASSERT(destination.is_nowhere());
- __ pop();
- }
+ DropAndMove(expr->context(), r0);
- } else {
- if (source.is_temporary()) {
- if (destination.is_temporary()) {
- // Case 'temp1 <- (var = temp0)'. Preserve right-hand-side
- // temporary on the stack.
- __ ldr(ip, MemOperand(sp));
- } else {
- ASSERT(destination.is_nowhere());
- // Case 'var = temp'. Discard right-hand-side temporary.
- __ pop(ip);
+ } else if (var->slot()) {
+ Slot* slot = var->slot();
+ ASSERT_NOT_NULL(slot); // Variables rewritten as properties not handled.
+ switch (slot->type()) {
+ case Slot::LOCAL:
+ case Slot::PARAMETER: {
+ switch (expr->context()) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ case Expression::kEffect:
+ // Perform assignment and discard value.
+ __ pop(r0);
+ __ str(r0, MemOperand(fp, SlotOffset(var->slot())));
+ break;
+ case Expression::kValue:
+ // Perform assignment and preserve value.
+ __ ldr(r0, MemOperand(sp));
+ __ str(r0, MemOperand(fp, SlotOffset(var->slot())));
+ break;
+ case Expression::kTest:
+ // Perform assignment and test (and discard) value.
+ __ pop(r0);
+ __ str(r0, MemOperand(fp, SlotOffset(var->slot())));
+ TestAndBranch(r0, true_label_, false_label_);
+ break;
+ case Expression::kValueTest: {
+ Label discard;
+ __ ldr(r0, MemOperand(sp));
+ __ str(r0, MemOperand(fp, SlotOffset(var->slot())));
+ TestAndBranch(r0, true_label_, &discard);
+ __ bind(&discard);
+ __ pop();
+ __ jmp(false_label_);
+ break;
+ }
+ case Expression::kTestValue: {
+ Label discard;
+ __ ldr(r0, MemOperand(sp));
+ __ str(r0, MemOperand(fp, SlotOffset(var->slot())));
+ TestAndBranch(r0, &discard, false_label_);
+ __ bind(&discard);
+ __ pop();
+ __ jmp(true_label_);
+ break;
+ }
+ }
+ break;
}
- __ str(ip, MemOperand(fp, SlotOffset(var->slot())));
- } else {
- ASSERT(source.is_constant());
- ASSERT(rhs->AsLiteral() != NULL);
- // Two cases: 'temp <- (var = constant)', or 'var = constant' with a
- // discarded result. Always perform the assignment.
- __ mov(ip, Operand(rhs->AsLiteral()->handle()));
- __ str(ip, MemOperand(fp, SlotOffset(var->slot())));
- if (destination.is_temporary()) {
- // Case 'temp <- (var = constant)'. Save result.
- __ push(ip);
+
+ case Slot::CONTEXT: {
+ int chain_length =
+ function_->scope()->ContextChainLength(slot->var()->scope());
+ if (chain_length > 0) {
+ // Move up the chain of contexts to the context containing the slot.
+ __ ldr(r0, CodeGenerator::ContextOperand(cp, Context::CLOSURE_INDEX));
+ // Load the function context (which is the incoming, outer context).
+ __ ldr(r0, FieldMemOperand(r0, JSFunction::kContextOffset));
+ for (int i = 1; i < chain_length; i++) {
+ __ ldr(r0,
+ CodeGenerator::ContextOperand(r0, Context::CLOSURE_INDEX));
+ __ ldr(r0, FieldMemOperand(r0, JSFunction::kContextOffset));
+ }
+ } else { // Slot is in the current context. Generate optimized code.
+ __ mov(r0, cp);
+ }
+ // The context may be an intermediate context, not a function context.
+ __ ldr(r0, CodeGenerator::ContextOperand(r0, Context::FCONTEXT_INDEX));
+ __ pop(r1);
+ __ str(r1, CodeGenerator::ContextOperand(r0, slot->index()));
+
+ // RecordWrite may destroy all its register arguments.
+ if (expr->context() == Expression::kValue) {
+ __ push(r1);
+ } else if (expr->context() != Expression::kEffect) {
+ __ mov(r3, r1);
+ }
+ int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
+
+ // Update the write barrier for the array store with r0 as the scratch
+ // register. Skip the write barrier if the value written (r1) is a smi.
+ // The smi test is part of RecordWrite on other platforms, not on arm.
+ Label exit;
+ __ tst(r1, Operand(kSmiTagMask));
+ __ b(eq, &exit);
+
+ __ mov(r2, Operand(offset));
+ __ RecordWrite(r0, r2, r1);
+ __ bind(&exit);
+ if (expr->context() != Expression::kEffect &&
+ expr->context() != Expression::kValue) {
+ Move(expr->context(), r3);
+ }
+ break;
}
+
+ case Slot::LOOKUP:
+ UNREACHABLE();
+ break;
}
}
}
+void FastCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
+ // Assignment to a property, using a named store IC.
+ Property* prop = expr->target()->AsProperty();
+ ASSERT(prop != NULL);
+ ASSERT(prop->key()->AsLiteral() != NULL);
+
+ // If the assignment starts a block of assignments to the same object,
+ // change to slow case to avoid the quadratic behavior of repeatedly
+ // adding fast properties.
+ if (expr->starts_initialization_block()) {
+ __ ldr(ip, MemOperand(sp, kPointerSize)); // Receiver is under value.
+ __ push(ip);
+ __ CallRuntime(Runtime::kToSlowProperties, 1);
+ }
+
+ __ pop(r0);
+ __ mov(r2, Operand(prop->key()->AsLiteral()->handle()));
+ Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
+ __ Call(ic, RelocInfo::CODE_TARGET);
+
+ // If the assignment ends an initialization block, revert to fast case.
+ if (expr->ends_initialization_block()) {
+ __ push(r0); // Result of assignment, saved even if not needed.
+ __ ldr(ip, MemOperand(sp, kPointerSize)); // Receiver is under value.
+ __ push(ip);
+ __ CallRuntime(Runtime::kToFastProperties, 1);
+ __ pop(r0);
+ }
+
+ DropAndMove(expr->context(), r0);
+}
+
+
+void FastCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
+ // Assignment to a property, using a keyed store IC.
+
+ // If the assignment starts a block of assignments to the same object,
+ // change to slow case to avoid the quadratic behavior of repeatedly
+ // adding fast properties.
+ if (expr->starts_initialization_block()) {
+ // Reciever is under the key and value.
+ __ ldr(ip, MemOperand(sp, 2 * kPointerSize));
+ __ push(ip);
+ __ CallRuntime(Runtime::kToSlowProperties, 1);
+ }
+
+ __ pop(r0);
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
+ __ Call(ic, RelocInfo::CODE_TARGET);
+
+ // If the assignment ends an initialization block, revert to fast case.
+ if (expr->ends_initialization_block()) {
+ __ push(r0); // Result of assignment, saved even if not needed.
+ // Reciever is under the key and value.
+ __ ldr(ip, MemOperand(sp, 2 * kPointerSize));
+ __ push(ip);
+ __ CallRuntime(Runtime::kToFastProperties, 1);
+ __ pop(r0);
+ }
+
+ // Receiver and key are still on stack.
+ __ add(sp, sp, Operand(2 * kPointerSize));
+ Move(expr->context(), r0);
+}
+
+
+void FastCodeGenerator::VisitProperty(Property* expr) {
+ Comment cmnt(masm_, "[ Property");
+ Expression* key = expr->key();
+ uint32_t dummy;
+
+ // Record the source position for the property load.
+ SetSourcePosition(expr->position());
+
+ // Evaluate receiver.
+ Visit(expr->obj());
+
+ if (key->AsLiteral() != NULL && key->AsLiteral()->handle()->IsSymbol() &&
+ !String::cast(*(key->AsLiteral()->handle()))->AsArrayIndex(&dummy)) {
+ // Do a NAMED property load.
+ // The IC expects the property name in r2 and the receiver on the stack.
+ __ mov(r2, Operand(key->AsLiteral()->handle()));
+ Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+ __ Call(ic, RelocInfo::CODE_TARGET);
+ } else {
+ // Do a KEYED property load.
+ Visit(expr->key());
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+ __ Call(ic, RelocInfo::CODE_TARGET);
+ // Drop key and receiver left on the stack by IC.
+ __ pop();
+ }
+ DropAndMove(expr->context(), r0);
+}
+
+void FastCodeGenerator::EmitCallWithIC(Call* expr, RelocInfo::Mode reloc_info) {
+ // Code common for calls using the IC.
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ Visit(args->at(i));
+ ASSERT_EQ(Expression::kValue, args->at(i)->context());
+ }
+ // Record source position for debugger.
+ SetSourcePosition(expr->position());
+ // Call the IC initialization code.
+ Handle<Code> ic = CodeGenerator::ComputeCallInitialize(arg_count,
+ NOT_IN_LOOP);
+ __ Call(ic, reloc_info);
+ // Restore context register.
+ __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ // Discard the function left on TOS.
+ DropAndMove(expr->context(), r0);
+}
+
+
+void FastCodeGenerator::EmitCallWithStub(Call* expr) {
+ // Code common for calls using the call stub.
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ Visit(args->at(i));
+ }
+ // Record source position for debugger.
+ SetSourcePosition(expr->position());
+ CallFunctionStub stub(arg_count, NOT_IN_LOOP);
+ __ CallStub(&stub);
+ // Restore context register.
+ __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ // Discard the function left on TOS.
+ DropAndMove(expr->context(), r0);
+}
+
+
void FastCodeGenerator::VisitCall(Call* expr) {
Comment cmnt(masm_, "[ Call");
Expression* fun = expr->expression();
- ZoneList<Expression*>* args = expr->arguments();
Variable* var = fun->AsVariableProxy()->AsVariable();
- ASSERT(var != NULL && !var->is_this() && var->is_global());
- ASSERT(!var->is_possibly_eval());
- __ mov(r1, Operand(var->name()));
- // Push global object as receiver.
+ if (var != NULL && var->is_possibly_eval()) {
+ // Call to the identifier 'eval'.
+ UNREACHABLE();
+ } else if (var != NULL && !var->is_this() && var->is_global()) {
+ // Call to a global variable.
+ __ mov(r1, Operand(var->name()));
+ // Push global object as receiver for the call IC lookup.
+ __ ldr(r0, CodeGenerator::GlobalObject());
+ __ stm(db_w, sp, r1.bit() | r0.bit());
+ EmitCallWithIC(expr, RelocInfo::CODE_TARGET_CONTEXT);
+ } else if (var != NULL && var->slot() != NULL &&
+ var->slot()->type() == Slot::LOOKUP) {
+ // Call to a lookup slot.
+ UNREACHABLE();
+ } else if (fun->AsProperty() != NULL) {
+ // Call to an object property.
+ Property* prop = fun->AsProperty();
+ Literal* key = prop->key()->AsLiteral();
+ if (key != NULL && key->handle()->IsSymbol()) {
+ // Call to a named property, use call IC.
+ __ mov(r0, Operand(key->handle()));
+ __ push(r0);
+ Visit(prop->obj());
+ EmitCallWithIC(expr, RelocInfo::CODE_TARGET);
+ } else {
+ // Call to a keyed property, use keyed load IC followed by function
+ // call.
+ Visit(prop->obj());
+ Visit(prop->key());
+ // Record source code position for IC call.
+ SetSourcePosition(prop->position());
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+ __ Call(ic, RelocInfo::CODE_TARGET);
+ // Load receiver object into r1.
+ if (prop->is_synthetic()) {
+ __ ldr(r1, CodeGenerator::GlobalObject());
+ } else {
+ __ ldr(r1, MemOperand(sp, kPointerSize));
+ }
+ // Overwrite (object, key) with (function, receiver).
+ __ str(r0, MemOperand(sp, kPointerSize));
+ __ str(r1, MemOperand(sp));
+ EmitCallWithStub(expr);
+ }
+ } else {
+ // Call to some other expression. If the expression is an anonymous
+ // function literal not called in a loop, mark it as one that should
+ // also use the fast code generator.
+ FunctionLiteral* lit = fun->AsFunctionLiteral();
+ if (lit != NULL &&
+ lit->name()->Equals(Heap::empty_string()) &&
+ loop_depth() == 0) {
+ lit->set_try_fast_codegen(true);
+ }
+ Visit(fun);
+ // Load global receiver object.
+ __ ldr(r1, CodeGenerator::GlobalObject());
+ __ ldr(r1, FieldMemOperand(r1, GlobalObject::kGlobalReceiverOffset));
+ __ push(r1);
+ // Emit function call.
+ EmitCallWithStub(expr);
+ }
+}
+
+
+void FastCodeGenerator::VisitCallNew(CallNew* expr) {
+ Comment cmnt(masm_, "[ CallNew");
+ // According to ECMA-262, section 11.2.2, page 44, the function
+ // expression in new calls must be evaluated before the
+ // arguments.
+ // Push function on the stack.
+ Visit(expr->expression());
+ ASSERT_EQ(Expression::kValue, expr->expression()->context());
+
+ // Push global object (receiver).
__ ldr(r0, CodeGenerator::GlobalObject());
- __ stm(db_w, sp, r1.bit() | r0.bit());
+ __ push(r0);
+ // Push the arguments ("left-to-right") on the stack.
+ ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
for (int i = 0; i < arg_count; i++) {
Visit(args->at(i));
- ASSERT(!args->at(i)->location().is_nowhere());
- if (args->at(i)->location().is_constant()) {
- ASSERT(args->at(i)->AsLiteral() != NULL);
- __ mov(r0, Operand(args->at(i)->AsLiteral()->handle()));
- __ push(r0);
- }
+ ASSERT_EQ(Expression::kValue, args->at(i)->context());
+ // If location is value, it is already on the stack,
+ // so nothing to do here.
}
- // Record source position for debugger
+
+ // Call the construct call builtin that handles allocation and
+ // constructor invocation.
SetSourcePosition(expr->position());
- // Call the IC initialization code.
- Handle<Code> ic = CodeGenerator::ComputeCallInitialize(arg_count,
- NOT_IN_LOOP);
- __ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
- // Restore context register.
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- if (expr->location().is_temporary()) {
- __ str(r0, MemOperand(sp));
- } else {
- ASSERT(expr->location().is_nowhere());
- __ pop();
- }
+
+ // Load function, arg_count into r1 and r0.
+ __ mov(r0, Operand(arg_count));
+ // Function is in esp[arg_count + 1].
+ __ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
+
+ Handle<Code> construct_builtin(Builtins::builtin(Builtins::JSConstructCall));
+ __ Call(construct_builtin, RelocInfo::CONSTRUCT_CALL);
+
+ // Replace function on TOS with result in r0, or pop it.
+ DropAndMove(expr->context(), r0);
}
void FastCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
Comment cmnt(masm_, "[ CallRuntime");
ZoneList<Expression*>* args = expr->arguments();
- Runtime::Function* function = expr->function();
- ASSERT(function != NULL);
+ if (expr->is_jsruntime()) {
+ // Prepare for calling JS runtime function.
+ __ mov(r1, Operand(expr->name()));
+ __ ldr(r0, CodeGenerator::GlobalObject());
+ __ ldr(r0, FieldMemOperand(r0, GlobalObject::kBuiltinsOffset));
+ __ stm(db_w, sp, r1.bit() | r0.bit());
+ }
// Push the arguments ("left-to-right").
int arg_count = args->length();
for (int i = 0; i < arg_count; i++) {
Visit(args->at(i));
- ASSERT(!args->at(i)->location().is_nowhere());
- if (args->at(i)->location().is_constant()) {
- ASSERT(args->at(i)->AsLiteral() != NULL);
- __ mov(r0, Operand(args->at(i)->AsLiteral()->handle()));
- __ push(r0);
- } else {
- ASSERT(args->at(i)->location().is_temporary());
- // If location is temporary, it is already on the stack,
- // so nothing to do here.
- }
+ ASSERT_EQ(Expression::kValue, args->at(i)->context());
}
- __ CallRuntime(function, arg_count);
- if (expr->location().is_temporary()) {
- __ push(r0);
+ if (expr->is_jsruntime()) {
+ // Call the JS runtime function.
+ Handle<Code> ic = CodeGenerator::ComputeCallInitialize(arg_count,
+ NOT_IN_LOOP);
+ __ Call(ic, RelocInfo::CODE_TARGET);
+ // Restore context register.
+ __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ // Discard the function left on TOS.
+ DropAndMove(expr->context(), r0);
} else {
- ASSERT(expr->location().is_nowhere());
+ // Call the C runtime function.
+ __ CallRuntime(expr->function(), arg_count);
+ Move(expr->context(), r0);
+ }
+}
+
+
+void FastCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
+ switch (expr->op()) {
+ case Token::VOID: {
+ Comment cmnt(masm_, "[ UnaryOperation (VOID)");
+ Visit(expr->expression());
+ ASSERT_EQ(Expression::kEffect, expr->expression()->context());
+ switch (expr->context()) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ break;
+ case Expression::kEffect:
+ break;
+ case Expression::kValue:
+ __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+ __ push(ip);
+ break;
+ case Expression::kTestValue:
+ // Value is false so it's needed.
+ __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+ __ push(ip);
+ case Expression::kTest: // Fall through.
+ case Expression::kValueTest:
+ __ jmp(false_label_);
+ break;
+ }
+ break;
+ }
+
+ case Token::NOT: {
+ Comment cmnt(masm_, "[ UnaryOperation (NOT)");
+ ASSERT_EQ(Expression::kTest, expr->expression()->context());
+
+ Label push_true;
+ Label push_false;
+ Label done;
+ Label* saved_true = true_label_;
+ Label* saved_false = false_label_;
+ switch (expr->context()) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ break;
+
+ case Expression::kValue:
+ true_label_ = &push_false;
+ false_label_ = &push_true;
+ Visit(expr->expression());
+ __ bind(&push_true);
+ __ LoadRoot(ip, Heap::kTrueValueRootIndex);
+ __ push(ip);
+ __ jmp(&done);
+ __ bind(&push_false);
+ __ LoadRoot(ip, Heap::kFalseValueRootIndex);
+ __ push(ip);
+ __ bind(&done);
+ break;
+
+ case Expression::kEffect:
+ true_label_ = &done;
+ false_label_ = &done;
+ Visit(expr->expression());
+ __ bind(&done);
+ break;
+
+ case Expression::kTest:
+ true_label_ = saved_false;
+ false_label_ = saved_true;
+ Visit(expr->expression());
+ break;
+
+ case Expression::kValueTest:
+ true_label_ = saved_false;
+ false_label_ = &push_true;
+ Visit(expr->expression());
+ __ bind(&push_true);
+ __ LoadRoot(ip, Heap::kTrueValueRootIndex);
+ __ push(ip);
+ __ jmp(saved_true);
+ break;
+
+ case Expression::kTestValue:
+ true_label_ = &push_false;
+ false_label_ = saved_true;
+ Visit(expr->expression());
+ __ bind(&push_false);
+ __ LoadRoot(ip, Heap::kFalseValueRootIndex);
+ __ push(ip);
+ __ jmp(saved_false);
+ break;
+ }
+ true_label_ = saved_true;
+ false_label_ = saved_false;
+ break;
+ }
+
+ case Token::TYPEOF: {
+ Comment cmnt(masm_, "[ UnaryOperation (TYPEOF)");
+ ASSERT_EQ(Expression::kValue, expr->expression()->context());
+
+ VariableProxy* proxy = expr->expression()->AsVariableProxy();
+ if (proxy != NULL &&
+ !proxy->var()->is_this() &&
+ proxy->var()->is_global()) {
+ Comment cmnt(masm_, "Global variable");
+ __ ldr(r0, CodeGenerator::GlobalObject());
+ __ push(r0);
+ __ mov(r2, Operand(proxy->name()));
+ Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+ // Use a regular load, not a contextual load, to avoid a reference
+ // error.
+ __ Call(ic, RelocInfo::CODE_TARGET);
+ __ str(r0, MemOperand(sp));
+ } else if (proxy != NULL &&
+ proxy->var()->slot() != NULL &&
+ proxy->var()->slot()->type() == Slot::LOOKUP) {
+ __ mov(r0, Operand(proxy->name()));
+ __ stm(db_w, sp, cp.bit() | r0.bit());
+ __ CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
+ __ push(r0);
+ } else {
+ // This expression cannot throw a reference error at the top level.
+ Visit(expr->expression());
+ }
+
+ __ CallRuntime(Runtime::kTypeof, 1);
+ Move(expr->context(), r0);
+ break;
+ }
+
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void FastCodeGenerator::VisitCountOperation(CountOperation* expr) {
+ Comment cmnt(masm_, "[ CountOperation");
+ VariableProxy* proxy = expr->expression()->AsVariableProxy();
+ ASSERT(proxy->AsVariable() != NULL);
+ ASSERT(proxy->AsVariable()->is_global());
+
+ Visit(proxy);
+ __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS);
+
+ switch (expr->context()) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ case Expression::kValue: // Fall through
+ case Expression::kTest: // Fall through
+ case Expression::kTestValue: // Fall through
+ case Expression::kValueTest:
+ // Duplicate the result on the stack.
+ __ push(r0);
+ break;
+ case Expression::kEffect:
+ // Do not save result.
+ break;
+ }
+ // Call runtime for +1/-1.
+ __ push(r0);
+ __ mov(ip, Operand(Smi::FromInt(1)));
+ __ push(ip);
+ if (expr->op() == Token::INC) {
+ __ CallRuntime(Runtime::kNumberAdd, 2);
+ } else {
+ __ CallRuntime(Runtime::kNumberSub, 2);
+ }
+ // Call Store IC.
+ __ mov(r2, Operand(proxy->AsVariable()->name()));
+ __ ldr(ip, CodeGenerator::GlobalObject());
+ __ push(ip);
+ Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
+ __ Call(ic, RelocInfo::CODE_TARGET);
+ // Restore up stack after store IC.
+ __ add(sp, sp, Operand(kPointerSize));
+
+ switch (expr->context()) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ case Expression::kEffect: // Fall through
+ case Expression::kValue:
+ // Do nothing. Result in either on the stack for value context
+ // or discarded for effect context.
+ break;
+ case Expression::kTest:
+ __ pop(r0);
+ TestAndBranch(r0, true_label_, false_label_);
+ break;
+ case Expression::kValueTest: {
+ Label discard;
+ __ ldr(r0, MemOperand(sp));
+ TestAndBranch(r0, true_label_, &discard);
+ __ bind(&discard);
+ __ add(sp, sp, Operand(kPointerSize));
+ __ b(false_label_);
+ break;
+ }
+ case Expression::kTestValue: {
+ Label discard;
+ __ ldr(r0, MemOperand(sp));
+ TestAndBranch(r0, &discard, false_label_);
+ __ bind(&discard);
+ __ add(sp, sp, Operand(kPointerSize));
+ __ b(true_label_);
+ break;
+ }
}
}
void FastCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) {
- // Compile a short-circuited boolean or operation in a non-test
- // context.
- ASSERT(expr->op() == Token::OR);
- // Compile (e0 || e1) as if it were
- // (let (temp = e0) temp ? temp : e1).
+ Comment cmnt(masm_, "[ BinaryOperation");
+ switch (expr->op()) {
+ case Token::COMMA:
+ ASSERT_EQ(Expression::kEffect, expr->left()->context());
+ ASSERT_EQ(expr->context(), expr->right()->context());
+ Visit(expr->left());
+ Visit(expr->right());
+ break;
- Label done;
- Location destination = expr->location();
- ASSERT(!destination.is_constant());
+ case Token::OR:
+ case Token::AND:
+ EmitLogicalOperation(expr);
+ break;
- Expression* left = expr->left();
- Location left_source = left->location();
- ASSERT(!left_source.is_nowhere());
+ case Token::ADD:
+ case Token::SUB:
+ case Token::DIV:
+ case Token::MOD:
+ case Token::MUL:
+ case Token::BIT_OR:
+ case Token::BIT_AND:
+ case Token::BIT_XOR:
+ case Token::SHL:
+ case Token::SHR:
+ case Token::SAR: {
+ ASSERT_EQ(Expression::kValue, expr->left()->context());
+ ASSERT_EQ(Expression::kValue, expr->right()->context());
- Expression* right = expr->right();
- Location right_source = right->location();
- ASSERT(!right_source.is_nowhere());
+ Visit(expr->left());
+ Visit(expr->right());
+ __ pop(r0);
+ __ pop(r1);
+ GenericBinaryOpStub stub(expr->op(),
+ NO_OVERWRITE);
+ __ CallStub(&stub);
+ Move(expr->context(), r0);
- Visit(left);
- // Call the runtime to find the boolean value of the left-hand
- // subexpression. Duplicate the value if it may be needed as the final
- // result.
- if (left_source.is_temporary()) {
- if (destination.is_temporary()) {
- __ ldr(r0, MemOperand(sp));
- __ push(r0);
+ break;
}
- } else {
- ASSERT(left->AsLiteral() != NULL);
- __ mov(r0, Operand(left->AsLiteral()->handle()));
- __ push(r0);
- if (destination.is_temporary()) __ push(r0);
+ default:
+ UNREACHABLE();
}
- // The left-hand value is in on top of the stack. It is duplicated on the
- // stack iff the destination location is temporary.
- __ CallRuntime(Runtime::kToBool, 1);
- __ LoadRoot(ip, Heap::kTrueValueRootIndex);
- __ cmp(r0, ip);
- __ b(eq, &done);
-
- // Discard the left-hand value if present on the stack.
- if (destination.is_temporary()) __ pop();
- Visit(right);
-
- // Save or discard the right-hand value as needed.
- if (destination.is_temporary() && right_source.is_constant()) {
- ASSERT(right->AsLiteral() != NULL);
- __ mov(ip, Operand(right->AsLiteral()->handle()));
- __ push(ip);
- } else if (destination.is_nowhere() && right_source.is_temporary()) {
- __ pop();
- }
-
- __ bind(&done);
}
+
+void FastCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
+ Comment cmnt(masm_, "[ CompareOperation");
+ ASSERT_EQ(Expression::kValue, expr->left()->context());
+ ASSERT_EQ(Expression::kValue, expr->right()->context());
+ Visit(expr->left());
+ Visit(expr->right());
+
+ // Convert current context to test context: Pre-test code.
+ Label push_true;
+ Label push_false;
+ Label done;
+ Label* saved_true = true_label_;
+ Label* saved_false = false_label_;
+ switch (expr->context()) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ break;
+
+ case Expression::kValue:
+ true_label_ = &push_true;
+ false_label_ = &push_false;
+ break;
+
+ case Expression::kEffect:
+ true_label_ = &done;
+ false_label_ = &done;
+ break;
+
+ case Expression::kTest:
+ break;
+
+ case Expression::kValueTest:
+ true_label_ = &push_true;
+ break;
+
+ case Expression::kTestValue:
+ false_label_ = &push_false;
+ break;
+ }
+ // Convert current context to test context: End pre-test code.
+
+ switch (expr->op()) {
+ case Token::IN: {
+ __ InvokeBuiltin(Builtins::IN, CALL_JS);
+ __ LoadRoot(ip, Heap::kTrueValueRootIndex);
+ __ cmp(r0, ip);
+ __ b(eq, true_label_);
+ __ jmp(false_label_);
+ break;
+ }
+
+ case Token::INSTANCEOF: {
+ InstanceofStub stub;
+ __ CallStub(&stub);
+ __ tst(r0, r0);
+ __ b(eq, true_label_); // The stub returns 0 for true.
+ __ jmp(false_label_);
+ break;
+ }
+
+ default: {
+ Condition cc = eq;
+ bool strict = false;
+ switch (expr->op()) {
+ case Token::EQ_STRICT:
+ strict = true;
+ // Fall through
+ case Token::EQ:
+ cc = eq;
+ __ pop(r0);
+ __ pop(r1);
+ break;
+ case Token::LT:
+ cc = lt;
+ __ pop(r0);
+ __ pop(r1);
+ break;
+ case Token::GT:
+ // Reverse left and right sizes to obtain ECMA-262 conversion order.
+ cc = lt;
+ __ pop(r1);
+ __ pop(r0);
+ break;
+ case Token::LTE:
+ // Reverse left and right sizes to obtain ECMA-262 conversion order.
+ cc = ge;
+ __ pop(r1);
+ __ pop(r0);
+ break;
+ case Token::GTE:
+ cc = ge;
+ __ pop(r0);
+ __ pop(r1);
+ break;
+ case Token::IN:
+ case Token::INSTANCEOF:
+ default:
+ UNREACHABLE();
+ }
+
+ // The comparison stub expects the smi vs. smi case to be handled
+ // before it is called.
+ Label slow_case;
+ __ orr(r2, r0, Operand(r1));
+ __ tst(r2, Operand(kSmiTagMask));
+ __ b(ne, &slow_case);
+ __ cmp(r1, r0);
+ __ b(cc, true_label_);
+ __ jmp(false_label_);
+
+ __ bind(&slow_case);
+ CompareStub stub(cc, strict);
+ __ CallStub(&stub);
+ __ tst(r0, r0);
+ __ b(cc, true_label_);
+ __ jmp(false_label_);
+ }
+ }
+
+ // Convert current context to test context: Post-test code.
+ switch (expr->context()) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ break;
+
+ case Expression::kValue:
+ __ bind(&push_true);
+ __ LoadRoot(ip, Heap::kTrueValueRootIndex);
+ __ push(ip);
+ __ jmp(&done);
+ __ bind(&push_false);
+ __ LoadRoot(ip, Heap::kFalseValueRootIndex);
+ __ push(ip);
+ __ bind(&done);
+ break;
+
+ case Expression::kEffect:
+ __ bind(&done);
+ break;
+
+ case Expression::kTest:
+ break;
+
+ case Expression::kValueTest:
+ __ bind(&push_true);
+ __ LoadRoot(ip, Heap::kTrueValueRootIndex);
+ __ push(ip);
+ __ jmp(saved_true);
+ break;
+
+ case Expression::kTestValue:
+ __ bind(&push_false);
+ __ LoadRoot(ip, Heap::kFalseValueRootIndex);
+ __ push(ip);
+ __ jmp(saved_false);
+ break;
+ }
+ true_label_ = saved_true;
+ false_label_ = saved_false;
+ // Convert current context to test context: End post-test code.
+}
+
+void FastCodeGenerator::VisitThisFunction(ThisFunction* expr) {
+ __ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ Move(expr->context(), r0);
+}
+
+
+#undef __
+
+
} } // namespace v8::internal
diff --git a/src/arm/frames-arm.cc b/src/arm/frames-arm.cc
index 6fde4b7..b0fa13a 100644
--- a/src/arm/frames-arm.cc
+++ b/src/arm/frames-arm.cc
@@ -54,23 +54,24 @@
if (fp == 0) return NONE;
// Compute frame type and stack pointer.
Address sp = fp + ExitFrameConstants::kSPDisplacement;
- Type type;
- if (Memory::Address_at(fp + ExitFrameConstants::kDebugMarkOffset) != 0) {
- type = EXIT_DEBUG;
+ const int offset = ExitFrameConstants::kCodeOffset;
+ Object* code = Memory::Object_at(fp + offset);
+ bool is_debug_exit = code->IsSmi();
+ if (is_debug_exit) {
sp -= kNumJSCallerSaved * kPointerSize;
- } else {
- type = EXIT;
}
// Fill in the state.
state->sp = sp;
state->fp = fp;
state->pc_address = reinterpret_cast<Address*>(sp - 1 * kPointerSize);
- return type;
+ return EXIT;
}
void ExitFrame::Iterate(ObjectVisitor* v) const {
- // Do nothing
+ v->VisitPointer(&code_slot());
+ // The arguments are traversed as part of the expression stack of
+ // the calling frame.
}
diff --git a/src/arm/frames-arm.h b/src/arm/frames-arm.h
index 0874c09..4924c1a 100644
--- a/src/arm/frames-arm.h
+++ b/src/arm/frames-arm.h
@@ -100,7 +100,7 @@
static const int kSPDisplacement = -1 * kPointerSize;
// The debug marker is just above the frame pointer.
- static const int kDebugMarkOffset = -1 * kPointerSize;
+ static const int kCodeOffset = -1 * kPointerSize;
static const int kSavedRegistersOffset = 0 * kPointerSize;
diff --git a/src/arm/ic-arm.cc b/src/arm/ic-arm.cc
index ba83645..c56f414 100644
--- a/src/arm/ic-arm.cc
+++ b/src/arm/ic-arm.cc
@@ -107,12 +107,17 @@
static const int kProbes = 4;
for (int i = 0; i < kProbes; i++) {
// Compute the masked index: (hash + i + i * i) & mask.
- __ ldr(t1, FieldMemOperand(r2, String::kLengthOffset));
- __ mov(t1, Operand(t1, LSR, String::kHashShift));
+ __ ldr(t1, FieldMemOperand(r2, String::kHashFieldOffset));
if (i > 0) {
- __ add(t1, t1, Operand(StringDictionary::GetProbeOffset(i)));
+ // Add the probe offset (i + i * i) left shifted to avoid right shifting
+ // the hash in a separate instruction. The value hash + i + i * i is right
+ // shifted in the following and instruction.
+ ASSERT(StringDictionary::GetProbeOffset(i) <
+ 1 << (32 - String::kHashFieldOffset));
+ __ add(t1, t1, Operand(
+ StringDictionary::GetProbeOffset(i) << String::kHashShift));
}
- __ and_(t1, t1, Operand(r3));
+ __ and_(t1, r3, Operand(t1, LSR, String::kHashShift));
// Scale the index by multiplying by the element size.
ASSERT(StringDictionary::kEntrySize == 3);
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
index 45c6540..aa6570c 100644
--- a/src/arm/macro-assembler-arm.cc
+++ b/src/arm/macro-assembler-arm.cc
@@ -155,6 +155,15 @@
}
+void MacroAssembler::StackLimitCheck(Label* on_stack_overflow) {
+ LoadRoot(ip, Heap::kStackLimitRootIndex);
+ cmp(sp, Operand(ip));
+ b(lo, on_stack_overflow);
+}
+
+
+
+
void MacroAssembler::SmiJumpTable(Register index, Vector<Label*> targets) {
// Empty the const pool.
CheckConstPool(true, true);
@@ -274,9 +283,7 @@
}
-void MacroAssembler::EnterExitFrame(StackFrame::Type type) {
- ASSERT(type == StackFrame::EXIT || type == StackFrame::EXIT_DEBUG);
-
+void MacroAssembler::EnterExitFrame(ExitFrame::Mode mode) {
// Compute the argv pointer and keep it in a callee-saved register.
// r0 is argc.
add(r6, sp, Operand(r0, LSL, kPointerSizeLog2));
@@ -298,8 +305,11 @@
stm(db_w, sp, fp.bit() | ip.bit() | lr.bit());
mov(fp, Operand(sp)); // setup new frame pointer
- // Push debug marker.
- mov(ip, Operand(type == StackFrame::EXIT_DEBUG ? 1 : 0));
+ if (mode == ExitFrame::MODE_DEBUG) {
+ mov(ip, Operand(Smi::FromInt(0)));
+ } else {
+ mov(ip, Operand(CodeObject()));
+ }
push(ip);
// Save the frame pointer and the context in top.
@@ -316,7 +326,7 @@
#ifdef ENABLE_DEBUGGER_SUPPORT
// Save the state of all registers to the stack from the memory
// location. This is needed to allow nested break points.
- if (type == StackFrame::EXIT_DEBUG) {
+ if (mode == ExitFrame::MODE_DEBUG) {
// Use sp as base to push.
CopyRegistersFromMemoryToStack(sp, kJSCallerSaved);
}
@@ -348,14 +358,14 @@
}
-void MacroAssembler::LeaveExitFrame(StackFrame::Type type) {
+void MacroAssembler::LeaveExitFrame(ExitFrame::Mode mode) {
#ifdef ENABLE_DEBUGGER_SUPPORT
// Restore the memory copy of the registers by digging them out from
// the stack. This is needed to allow nested break points.
- if (type == StackFrame::EXIT_DEBUG) {
+ if (mode == ExitFrame::MODE_DEBUG) {
// This code intentionally clobbers r2 and r3.
const int kCallerSavedSize = kNumJSCallerSaved * kPointerSize;
- const int kOffset = ExitFrameConstants::kDebugMarkOffset - kCallerSavedSize;
+ const int kOffset = ExitFrameConstants::kCodeOffset - kCallerSavedSize;
add(r3, fp, Operand(kOffset));
CopyRegistersFromStackToMemory(r3, r2, kJSCallerSaved);
}
@@ -784,15 +794,13 @@
mov(scratch1, Operand(new_space_allocation_top));
if ((flags & RESULT_CONTAINS_TOP) == 0) {
ldr(result, MemOperand(scratch1));
- } else {
-#ifdef DEBUG
+ } else if (FLAG_debug_code) {
// Assert that result actually contains top on entry. scratch2 is used
// immediately below so this use of scratch2 does not cause difference with
// respect to register content between debug and release mode.
ldr(scratch2, MemOperand(scratch1));
cmp(result, scratch2);
Check(eq, "Unexpected allocation top");
-#endif
}
// Calculate new top and bail out if new space is exhausted. Use result
@@ -805,7 +813,11 @@
cmp(result, Operand(scratch2));
b(hi, gc_required);
- // Update allocation top. result temporarily holds the new top,
+ // Update allocation top. result temporarily holds the new top.
+ if (FLAG_debug_code) {
+ tst(result, Operand(kObjectAlignmentMask));
+ Check(eq, "Unaligned allocation in new space");
+ }
str(result, MemOperand(scratch1));
// Tag and adjust back to start of new object.
@@ -834,15 +846,13 @@
mov(scratch1, Operand(new_space_allocation_top));
if ((flags & RESULT_CONTAINS_TOP) == 0) {
ldr(result, MemOperand(scratch1));
- } else {
-#ifdef DEBUG
+ } else if (FLAG_debug_code) {
// Assert that result actually contains top on entry. scratch2 is used
// immediately below so this use of scratch2 does not cause difference with
// respect to register content between debug and release mode.
ldr(scratch2, MemOperand(scratch1));
cmp(result, scratch2);
Check(eq, "Unexpected allocation top");
-#endif
}
// Calculate new top and bail out if new space is exhausted. Use result
@@ -856,7 +866,11 @@
cmp(result, Operand(scratch2));
b(hi, gc_required);
- // Update allocation top. result temporarily holds the new top,
+ // Update allocation top. result temporarily holds the new top.
+ if (FLAG_debug_code) {
+ tst(result, Operand(kObjectAlignmentMask));
+ Check(eq, "Unaligned allocation in new space");
+ }
str(result, MemOperand(scratch1));
// Adjust back to start of new object.
@@ -975,6 +989,17 @@
}
+void MacroAssembler::IntegerToDoubleConversionWithVFP3(Register inReg,
+ Register outHighReg,
+ Register outLowReg) {
+ // ARMv7 VFP3 instructions to implement integer to double conversion.
+ mov(r7, Operand(inReg, ASR, kSmiTagSize));
+ fmsr(s15, r7);
+ fsitod(d7, s15);
+ fmrrd(outLowReg, outHighReg, d7);
+}
+
+
void MacroAssembler::CallRuntime(Runtime::Function* f, int num_arguments) {
// All parameters are on the stack. r0 has the return value after call.
@@ -1141,6 +1166,9 @@
RecordComment(msg);
}
#endif
+ // Disable stub call restrictions to always allow calls to abort.
+ set_allow_stub_calls(true);
+
mov(r0, Operand(p0));
push(r0);
mov(r0, Operand(Smi::FromInt(p1 - p0)));
@@ -1150,6 +1178,26 @@
}
+void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
+ if (context_chain_length > 0) {
+ // Move up the chain of contexts to the context containing the slot.
+ ldr(dst, MemOperand(cp, Context::SlotOffset(Context::CLOSURE_INDEX)));
+ // Load the function context (which is the incoming, outer context).
+ ldr(dst, FieldMemOperand(dst, JSFunction::kContextOffset));
+ for (int i = 1; i < context_chain_length; i++) {
+ ldr(dst, MemOperand(dst, Context::SlotOffset(Context::CLOSURE_INDEX)));
+ ldr(dst, FieldMemOperand(dst, JSFunction::kContextOffset));
+ }
+ // The context may be an intermediate context, not a function context.
+ ldr(dst, MemOperand(dst, Context::SlotOffset(Context::FCONTEXT_INDEX)));
+ } else { // Slot is in the current function context.
+ // The context may be an intermediate context, not a function context.
+ ldr(dst, MemOperand(cp, Context::SlotOffset(Context::FCONTEXT_INDEX)));
+ }
+}
+
+
+
#ifdef ENABLE_DEBUGGER_SUPPORT
CodePatcher::CodePatcher(byte* address, int instructions)
: address_(address),
diff --git a/src/arm/macro-assembler-arm.h b/src/arm/macro-assembler-arm.h
index e37bb5e..0974329 100644
--- a/src/arm/macro-assembler-arm.h
+++ b/src/arm/macro-assembler-arm.h
@@ -79,6 +79,11 @@
void RecordWrite(Register object, Register offset, Register scratch);
// ---------------------------------------------------------------------------
+ // Stack limit support
+
+ void StackLimitCheck(Label* on_stack_limit_hit);
+
+ // ---------------------------------------------------------------------------
// Activation frames
void EnterInternalFrame() { EnterFrame(StackFrame::INTERNAL); }
@@ -87,18 +92,20 @@
void EnterConstructFrame() { EnterFrame(StackFrame::CONSTRUCT); }
void LeaveConstructFrame() { LeaveFrame(StackFrame::CONSTRUCT); }
- // Enter specific kind of exit frame; either EXIT or
- // EXIT_DEBUG. Expects the number of arguments in register r0 and
+ // Enter specific kind of exit frame; either normal or debug mode.
+ // Expects the number of arguments in register r0 and
// the builtin function to call in register r1. Exits with argc in
// r4, argv in r6, and and the builtin function to call in r5.
- void EnterExitFrame(StackFrame::Type type);
+ void EnterExitFrame(ExitFrame::Mode mode);
// Leave the current exit frame. Expects the return value in r0.
- void LeaveExitFrame(StackFrame::Type type);
+ void LeaveExitFrame(ExitFrame::Mode mode);
// Align the stack by optionally pushing a Smi zero.
void AlignStack(int offset);
+ void LoadContext(Register dst, int context_chain_length);
+
// ---------------------------------------------------------------------------
// JavaScript invokes
@@ -240,6 +247,11 @@
// occurred.
void IllegalOperation(int num_arguments);
+ // Uses VFP instructions to Convert a Smi to a double.
+ void IntegerToDoubleConversionWithVFP3(Register inReg,
+ Register outHighReg,
+ Register outLowReg);
+
// ---------------------------------------------------------------------------
// Runtime calls
diff --git a/src/arm/regexp-macro-assembler-arm.cc b/src/arm/regexp-macro-assembler-arm.cc
index 2e75a61..24b6a9c 100644
--- a/src/arm/regexp-macro-assembler-arm.cc
+++ b/src/arm/regexp-macro-assembler-arm.cc
@@ -29,6 +29,7 @@
#include "unicode.h"
#include "log.h"
#include "ast.h"
+#include "code-stubs.h"
#include "regexp-stack.h"
#include "macro-assembler.h"
#include "regexp-macro-assembler.h"
@@ -587,9 +588,9 @@
Label stack_limit_hit;
Label stack_ok;
- ExternalReference stack_guard_limit =
- ExternalReference::address_of_stack_guard_limit();
- __ mov(r0, Operand(stack_guard_limit));
+ ExternalReference stack_limit =
+ ExternalReference::address_of_stack_limit();
+ __ mov(r0, Operand(stack_limit));
__ ldr(r0, MemOperand(r0));
__ sub(r0, sp, r0, SetCC);
// Handle it if the stack pointer is already below the stack limit.
@@ -1089,9 +1090,9 @@
void RegExpMacroAssemblerARM::CheckPreemption() {
// Check for preemption.
- ExternalReference stack_guard_limit =
- ExternalReference::address_of_stack_guard_limit();
- __ mov(r0, Operand(stack_guard_limit));
+ ExternalReference stack_limit =
+ ExternalReference::address_of_stack_limit();
+ __ mov(r0, Operand(stack_limit));
__ ldr(r0, MemOperand(r0));
__ cmp(sp, r0);
SafeCall(&check_preempt_label_, ls);
@@ -1099,14 +1100,12 @@
void RegExpMacroAssemblerARM::CheckStackLimit() {
- if (FLAG_check_stack) {
- ExternalReference stack_limit =
- ExternalReference::address_of_regexp_stack_limit();
- __ mov(r0, Operand(stack_limit));
- __ ldr(r0, MemOperand(r0));
- __ cmp(backtrack_stackpointer(), Operand(r0));
- SafeCall(&stack_overflow_label_, ls);
- }
+ ExternalReference stack_limit =
+ ExternalReference::address_of_regexp_stack_limit();
+ __ mov(r0, Operand(stack_limit));
+ __ ldr(r0, MemOperand(r0));
+ __ cmp(backtrack_stackpointer(), Operand(r0));
+ SafeCall(&stack_overflow_label_, ls);
}
diff --git a/src/arm/regexp-macro-assembler-arm.h b/src/arm/regexp-macro-assembler-arm.h
index 0711ac1..f70bc05 100644
--- a/src/arm/regexp-macro-assembler-arm.h
+++ b/src/arm/regexp-macro-assembler-arm.h
@@ -260,6 +260,21 @@
};
+// Enter C code from generated RegExp code in a way that allows
+// the C code to fix the return address in case of a GC.
+// Currently only needed on ARM.
+class RegExpCEntryStub: public CodeStub {
+ public:
+ RegExpCEntryStub() {}
+ virtual ~RegExpCEntryStub() {}
+ void Generate(MacroAssembler* masm);
+
+ private:
+ Major MajorKey() { return RegExpCEntry; }
+ int MinorKey() { return 0; }
+ const char* GetName() { return "RegExpCEntryStub"; }
+};
+
#endif // V8_NATIVE_REGEXP
diff --git a/src/arm/simulator-arm.cc b/src/arm/simulator-arm.cc
index 22bec82..9dc417b 100644
--- a/src/arm/simulator-arm.cc
+++ b/src/arm/simulator-arm.cc
@@ -342,6 +342,11 @@
PrintF("Z flag: %d; ", sim_->z_flag_);
PrintF("C flag: %d; ", sim_->c_flag_);
PrintF("V flag: %d\n", sim_->v_flag_);
+ PrintF("INVALID OP flag: %d; ", sim_->inv_op_vfp_flag_);
+ PrintF("DIV BY ZERO flag: %d; ", sim_->div_zero_vfp_flag_);
+ PrintF("OVERFLOW flag: %d; ", sim_->overflow_vfp_flag_);
+ PrintF("UNDERFLOW flag: %d; ", sim_->underflow_vfp_flag_);
+ PrintF("INEXACT flag: %d; ", sim_->inexact_vfp_flag_);
} else if (strcmp(cmd, "unstop") == 0) {
intptr_t stop_pc = sim_->get_pc() - Instr::kInstrSize;
Instr* stop_instr = reinterpret_cast<Instr*>(stop_pc);
@@ -429,6 +434,24 @@
c_flag_ = false;
v_flag_ = false;
+ // Initializing VFP registers.
+ // All registers are initialized to zero to start with
+ // even though s_registers_ & d_registers_ share the same
+ // physical registers in the target.
+ for (int i = 0; i < num_s_registers; i++) {
+ vfp_register[i] = 0;
+ }
+ n_flag_FPSCR_ = false;
+ z_flag_FPSCR_ = false;
+ c_flag_FPSCR_ = false;
+ v_flag_FPSCR_ = false;
+
+ inv_op_vfp_flag_ = false;
+ div_zero_vfp_flag_ = false;
+ overflow_vfp_flag_ = false;
+ underflow_vfp_flag_ = false;
+ inexact_vfp_flag_ = false;
+
// The sp is initialized to point to the bottom (high address) of the
// allocated stack area. To be safe in potential stack underflows we leave
// some buffer below.
@@ -545,6 +568,99 @@
}
+// Getting from and setting into VFP registers.
+void Simulator::set_s_register(int sreg, unsigned int value) {
+ ASSERT((sreg >= 0) && (sreg < num_s_registers));
+ vfp_register[sreg] = value;
+}
+
+
+unsigned int Simulator::get_s_register(int sreg) const {
+ ASSERT((sreg >= 0) && (sreg < num_s_registers));
+ return vfp_register[sreg];
+}
+
+
+void Simulator::set_s_register_from_float(int sreg, const float flt) {
+ ASSERT((sreg >= 0) && (sreg < num_s_registers));
+ // Read the bits from the single precision floating point value
+ // into the unsigned integer element of vfp_register[] given by index=sreg.
+ char buffer[sizeof(vfp_register[0])];
+ memcpy(buffer, &flt, sizeof(vfp_register[0]));
+ memcpy(&vfp_register[sreg], buffer, sizeof(vfp_register[0]));
+}
+
+
+void Simulator::set_s_register_from_sinteger(int sreg, const int sint) {
+ ASSERT((sreg >= 0) && (sreg < num_s_registers));
+ // Read the bits from the integer value into the unsigned integer element of
+ // vfp_register[] given by index=sreg.
+ char buffer[sizeof(vfp_register[0])];
+ memcpy(buffer, &sint, sizeof(vfp_register[0]));
+ memcpy(&vfp_register[sreg], buffer, sizeof(vfp_register[0]));
+}
+
+
+void Simulator::set_d_register_from_double(int dreg, const double& dbl) {
+ ASSERT((dreg >= 0) && (dreg < num_d_registers));
+ // Read the bits from the double precision floating point value into the two
+ // consecutive unsigned integer elements of vfp_register[] given by index
+ // 2*sreg and 2*sreg+1.
+ char buffer[2 * sizeof(vfp_register[0])];
+ memcpy(buffer, &dbl, 2 * sizeof(vfp_register[0]));
+#ifndef BIG_ENDIAN_FLOATING_POINT
+ memcpy(&vfp_register[dreg * 2], buffer, 2 * sizeof(vfp_register[0]));
+#else
+ memcpy(&vfp_register[dreg * 2], &buffer[4], sizeof(vfp_register[0]));
+ memcpy(&vfp_register[dreg * 2 + 1], &buffer[0], sizeof(vfp_register[0]));
+#endif
+}
+
+
+float Simulator::get_float_from_s_register(int sreg) {
+ ASSERT((sreg >= 0) && (sreg < num_s_registers));
+
+ float sm_val = 0.0;
+ // Read the bits from the unsigned integer vfp_register[] array
+ // into the single precision floating point value and return it.
+ char buffer[sizeof(vfp_register[0])];
+ memcpy(buffer, &vfp_register[sreg], sizeof(vfp_register[0]));
+ memcpy(&sm_val, buffer, sizeof(vfp_register[0]));
+ return(sm_val);
+}
+
+
+int Simulator::get_sinteger_from_s_register(int sreg) {
+ ASSERT((sreg >= 0) && (sreg < num_s_registers));
+
+ int sm_val = 0;
+ // Read the bits from the unsigned integer vfp_register[] array
+ // into the single precision floating point value and return it.
+ char buffer[sizeof(vfp_register[0])];
+ memcpy(buffer, &vfp_register[sreg], sizeof(vfp_register[0]));
+ memcpy(&sm_val, buffer, sizeof(vfp_register[0]));
+ return(sm_val);
+}
+
+
+double Simulator::get_double_from_d_register(int dreg) {
+ ASSERT((dreg >= 0) && (dreg < num_d_registers));
+
+ double dm_val = 0.0;
+ // Read the bits from the unsigned integer vfp_register[] array
+ // into the double precision floating point value and return it.
+ char buffer[2 * sizeof(vfp_register[0])];
+#ifdef BIG_ENDIAN_FLOATING_POINT
+ memcpy(&buffer[0], &vfp_register[2 * dreg + 1], sizeof(vfp_register[0]));
+ memcpy(&buffer[4], &vfp_register[2 * dreg], sizeof(vfp_register[0]));
+#else
+ memcpy(buffer, &vfp_register[2 * dreg], 2 * sizeof(vfp_register[0]));
+#endif
+ memcpy(&dm_val, buffer, 2 * sizeof(vfp_register[0]));
+ return(dm_val);
+}
+
+
// For use in calls that take two double values, constructed from r0, r1, r2
// and r3.
void Simulator::GetFpArgs(double* x, double* y) {
@@ -772,6 +888,37 @@
}
+// Support for VFP comparisons.
+void Simulator::Compute_FPSCR_Flags(double val1, double val2) {
+ // All non-NaN cases.
+ if (val1 == val2) {
+ n_flag_FPSCR_ = false;
+ z_flag_FPSCR_ = true;
+ c_flag_FPSCR_ = true;
+ v_flag_FPSCR_ = false;
+ } else if (val1 < val2) {
+ n_flag_FPSCR_ = true;
+ z_flag_FPSCR_ = false;
+ c_flag_FPSCR_ = false;
+ v_flag_FPSCR_ = false;
+ } else {
+ // Case when (val1 > val2).
+ n_flag_FPSCR_ = false;
+ z_flag_FPSCR_ = false;
+ c_flag_FPSCR_ = true;
+ v_flag_FPSCR_ = false;
+ }
+}
+
+
+void Simulator::Copy_FPSCR_to_APSR() {
+ n_flag_ = n_flag_FPSCR_;
+ z_flag_ = z_flag_FPSCR_;
+ c_flag_ = c_flag_FPSCR_;
+ v_flag_ = v_flag_FPSCR_;
+}
+
+
// Addressing Mode 1 - Data-processing operands:
// Get the value based on the shifter_operand with register.
int32_t Simulator::GetShiftRm(Instr* instr, bool* carry_out) {
@@ -1154,7 +1301,7 @@
}
}
} else {
- UNIMPLEMENTED(); // not used by V8
+ UNIMPLEMENTED(); // Not used by V8.
}
} else {
// extra load/store instructions
@@ -1664,16 +1811,15 @@
void Simulator::DecodeType6(Instr* instr) {
- UNIMPLEMENTED();
+ DecodeType6CoprocessorIns(instr);
}
void Simulator::DecodeType7(Instr* instr) {
if (instr->Bit(24) == 1) {
- // Format(instr, "swi 'swi");
SoftwareInterrupt(instr);
} else {
- UNIMPLEMENTED();
+ DecodeTypeVFP(instr);
}
}
@@ -1745,6 +1891,177 @@
}
+// void Simulator::DecodeTypeVFP(Instr* instr)
+// The Following ARMv7 VFPv instructions are currently supported.
+// fmsr :Sn = Rt
+// fmrs :Rt = Sn
+// fsitod: Dd = Sm
+// ftosid: Sd = Dm
+// Dd = faddd(Dn, Dm)
+// Dd = fsubd(Dn, Dm)
+// Dd = fmuld(Dn, Dm)
+// Dd = fdivd(Dn, Dm)
+// vcmp(Dd, Dm)
+// VMRS
+void Simulator::DecodeTypeVFP(Instr* instr) {
+ ASSERT((instr->TypeField() == 7) && (instr->Bit(24) == 0x0) );
+
+ int rt = instr->RtField();
+ int vm = instr->VmField();
+ int vn = instr->VnField();
+ int vd = instr->VdField();
+
+ if (instr->Bit(23) == 1) {
+ if ((instr->Bits(21, 19) == 0x7) &&
+ (instr->Bits(18, 16) == 0x5) &&
+ (instr->Bits(11, 9) == 0x5) &&
+ (instr->Bit(8) == 1) &&
+ (instr->Bit(6) == 1) &&
+ (instr->Bit(4) == 0)) {
+ double dm_val = get_double_from_d_register(vm);
+ int32_t int_value = static_cast<int32_t>(dm_val);
+ set_s_register_from_sinteger(((vd<<1) | instr->DField()), int_value);
+ } else if ((instr->Bits(21, 19) == 0x7) &&
+ (instr->Bits(18, 16) == 0x0) &&
+ (instr->Bits(11, 9) == 0x5) &&
+ (instr->Bit(8) == 1) &&
+ (instr->Bit(7) == 1) &&
+ (instr->Bit(6) == 1) &&
+ (instr->Bit(4) == 0)) {
+ int32_t int_value = get_sinteger_from_s_register(((vm<<1) |
+ instr->MField()));
+ double dbl_value = static_cast<double>(int_value);
+ set_d_register_from_double(vd, dbl_value);
+ } else if ((instr->Bit(21) == 0x0) &&
+ (instr->Bit(20) == 0x0) &&
+ (instr->Bits(11, 9) == 0x5) &&
+ (instr->Bit(8) == 1) &&
+ (instr->Bit(6) == 0) &&
+ (instr->Bit(4) == 0)) {
+ double dn_value = get_double_from_d_register(vn);
+ double dm_value = get_double_from_d_register(vm);
+ double dd_value = dn_value / dm_value;
+ set_d_register_from_double(vd, dd_value);
+ } else if ((instr->Bits(21, 20) == 0x3) &&
+ (instr->Bits(19, 16) == 0x4) &&
+ (instr->Bits(11, 9) == 0x5) &&
+ (instr->Bit(8) == 0x1) &&
+ (instr->Bit(6) == 0x1) &&
+ (instr->Bit(4) == 0x0)) {
+ double dd_value = get_double_from_d_register(vd);
+ double dm_value = get_double_from_d_register(vm);
+ Compute_FPSCR_Flags(dd_value, dm_value);
+ } else if ((instr->Bits(23, 20) == 0xF) &&
+ (instr->Bits(19, 16) == 0x1) &&
+ (instr->Bits(11, 8) == 0xA) &&
+ (instr->Bits(7, 5) == 0x0) &&
+ (instr->Bit(4) == 0x1) &&
+ (instr->Bits(3, 0) == 0x0)) {
+ if (instr->Bits(15, 12) == 0xF)
+ Copy_FPSCR_to_APSR();
+ else
+ UNIMPLEMENTED(); // Not used by V8.
+ } else {
+ UNIMPLEMENTED(); // Not used by V8.
+ }
+ } else if (instr->Bit(21) == 1) {
+ if ((instr->Bit(20) == 0x1) &&
+ (instr->Bits(11, 9) == 0x5) &&
+ (instr->Bit(8) == 0x1) &&
+ (instr->Bit(6) == 0) &&
+ (instr->Bit(4) == 0)) {
+ double dn_value = get_double_from_d_register(vn);
+ double dm_value = get_double_from_d_register(vm);
+ double dd_value = dn_value + dm_value;
+ set_d_register_from_double(vd, dd_value);
+ } else if ((instr->Bit(20) == 0x1) &&
+ (instr->Bits(11, 9) == 0x5) &&
+ (instr->Bit(8) == 0x1) &&
+ (instr->Bit(6) == 1) &&
+ (instr->Bit(4) == 0)) {
+ double dn_value = get_double_from_d_register(vn);
+ double dm_value = get_double_from_d_register(vm);
+ double dd_value = dn_value - dm_value;
+ set_d_register_from_double(vd, dd_value);
+ } else if ((instr->Bit(20) == 0x0) &&
+ (instr->Bits(11, 9) == 0x5) &&
+ (instr->Bit(8) == 0x1) &&
+ (instr->Bit(6) == 0) &&
+ (instr->Bit(4) == 0)) {
+ double dn_value = get_double_from_d_register(vn);
+ double dm_value = get_double_from_d_register(vm);
+ double dd_value = dn_value * dm_value;
+ set_d_register_from_double(vd, dd_value);
+ } else {
+ UNIMPLEMENTED(); // Not used by V8.
+ }
+ } else {
+ if ((instr->Bit(20) == 0x0) &&
+ (instr->Bits(11, 8) == 0xA) &&
+ (instr->Bits(6, 5) == 0x0) &&
+ (instr->Bit(4) == 1) &&
+ (instr->Bits(3, 0) == 0x0)) {
+ int32_t rs_val = get_register(rt);
+ set_s_register_from_sinteger(((vn<<1) | instr->NField()), rs_val);
+ } else if ((instr->Bit(20) == 0x1) &&
+ (instr->Bits(11, 8) == 0xA) &&
+ (instr->Bits(6, 5) == 0x0) &&
+ (instr->Bit(4) == 1) &&
+ (instr->Bits(3, 0) == 0x0)) {
+ int32_t int_value = get_sinteger_from_s_register(((vn<<1) |
+ instr->NField()));
+ set_register(rt, int_value);
+ } else {
+ UNIMPLEMENTED(); // Not used by V8.
+ }
+ }
+}
+
+
+// void Simulator::DecodeType6CoprocessorIns(Instr* instr)
+// Decode Type 6 coprocessor instructions.
+// Dm = fmdrr(Rt, Rt2)
+// <Rt, Rt2> = fmrrd(Dm)
+void Simulator::DecodeType6CoprocessorIns(Instr* instr) {
+ ASSERT((instr->TypeField() == 6));
+
+ int rt = instr->RtField();
+ int rn = instr->RnField();
+ int vm = instr->VmField();
+
+ if (instr->Bit(23) == 1) {
+ UNIMPLEMENTED();
+ } else if (instr->Bit(22) == 1) {
+ if ((instr->Bits(27, 24) == 0xC) &&
+ (instr->Bit(22) == 1) &&
+ (instr->Bits(11, 8) == 0xB) &&
+ (instr->Bits(7, 6) == 0x0) &&
+ (instr->Bit(4) == 1)) {
+ if (instr->Bit(20) == 0) {
+ int32_t rs_val = get_register(rt);
+ int32_t rn_val = get_register(rn);
+
+ set_s_register_from_sinteger(2*vm, rs_val);
+ set_s_register_from_sinteger((2*vm+1), rn_val);
+
+ } else if (instr->Bit(20) == 1) {
+ int32_t rt_int_value = get_sinteger_from_s_register(2*vm);
+ int32_t rn_int_value = get_sinteger_from_s_register(2*vm+1);
+
+ set_register(rt, rt_int_value);
+ set_register(rn, rn_int_value);
+ }
+ } else {
+ UNIMPLEMENTED();
+ }
+ } else if (instr->Bit(21) == 1) {
+ UNIMPLEMENTED();
+ } else {
+ UNIMPLEMENTED();
+ }
+}
+
+
// Executes the current instruction.
void Simulator::InstructionDecode(Instr* instr) {
pc_modified_ = false;
@@ -1802,7 +2119,6 @@
}
-//
void Simulator::Execute() {
// Get the PC to simulate. Cannot use the accessor here as we need the
// raw PC value and not the one used as input to arithmetic instructions.
@@ -1924,6 +2240,25 @@
return result;
}
+
+uintptr_t Simulator::PushAddress(uintptr_t address) {
+ int new_sp = get_register(sp) - sizeof(uintptr_t);
+ uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(new_sp);
+ *stack_slot = address;
+ set_register(sp, new_sp);
+ return new_sp;
+}
+
+
+uintptr_t Simulator::PopAddress() {
+ int current_sp = get_register(sp);
+ uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(current_sp);
+ uintptr_t address = *stack_slot;
+ set_register(sp, current_sp + sizeof(uintptr_t));
+ return address;
+}
+
+
} } // namespace assembler::arm
#endif // !defined(__arm__)
diff --git a/src/arm/simulator-arm.h b/src/arm/simulator-arm.h
index ff6bbf4..3a4bb31 100644
--- a/src/arm/simulator-arm.h
+++ b/src/arm/simulator-arm.h
@@ -52,6 +52,12 @@
static inline uintptr_t JsLimitFromCLimit(uintptr_t c_limit) {
return c_limit;
}
+
+ static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
+ return try_catch_address;
+ }
+
+ static inline void UnregisterCTryCatch() { }
};
@@ -60,6 +66,10 @@
#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \
entry(p0, p1, p2, p3, p4, p5, p6)
+#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
+ reinterpret_cast<TryCatch*>(try_catch_address)
+
+
#else // defined(__arm__)
// When running with the simulator transition into simulated execution at this
@@ -73,6 +83,11 @@
assembler::arm::Simulator::current()->Call( \
FUNCTION_ADDR(entry), 7, p0, p1, p2, p3, p4, p5, p6)
+#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
+ try_catch_address == NULL ? \
+ NULL : *(reinterpret_cast<TryCatch**>(try_catch_address))
+
+
#include "constants-arm.h"
@@ -82,7 +97,6 @@
class Simulator {
public:
friend class Debugger;
-
enum Register {
no_reg = -1,
r0 = 0, r1, r2, r3, r4, r5, r6, r7,
@@ -90,7 +104,15 @@
num_registers,
sp = 13,
lr = 14,
- pc = 15
+ pc = 15,
+ s0 = 0, s1, s2, s3, s4, s5, s6, s7,
+ s8, s9, s10, s11, s12, s13, s14, s15,
+ s16, s17, s18, s19, s20, s21, s22, s23,
+ s24, s25, s26, s27, s28, s29, s30, s31,
+ num_s_registers = 32,
+ d0 = 0, d1, d2, d3, d4, d5, d6, d7,
+ d8, d9, d10, d11, d12, d13, d14, d15,
+ num_d_registers = 16
};
Simulator();
@@ -106,6 +128,16 @@
void set_register(int reg, int32_t value);
int32_t get_register(int reg) const;
+ // Support for VFP.
+ void set_s_register(int reg, unsigned int value);
+ unsigned int get_s_register(int reg) const;
+ void set_d_register_from_double(int dreg, const double& dbl);
+ double get_double_from_d_register(int dreg);
+ void set_s_register_from_float(int sreg, const float dbl);
+ float get_float_from_s_register(int sreg);
+ void set_s_register_from_sinteger(int reg, const int value);
+ int get_sinteger_from_s_register(int reg);
+
// Special case of set_register and get_register to access the raw PC value.
void set_pc(int32_t value);
int32_t get_pc() const;
@@ -124,6 +156,12 @@
// which sets up the simulator state and grabs the result on return.
int32_t Call(byte* entry, int argument_count, ...);
+ // Push an address onto the JS stack.
+ uintptr_t PushAddress(uintptr_t address);
+
+ // Pop an address from the JS stack.
+ uintptr_t PopAddress();
+
private:
enum special_values {
// Known bad pc value to ensure that the simulator does not execute
@@ -154,6 +192,10 @@
int32_t right,
bool addition);
+ // Support for VFP.
+ void Compute_FPSCR_Flags(double val1, double val2);
+ void Copy_FPSCR_to_APSR();
+
// Helper functions to decode common "addressing" modes
int32_t GetShiftRm(Instr* instr, bool* carry_out);
int32_t GetImm(Instr* instr, bool* carry_out);
@@ -185,6 +227,10 @@
void DecodeType7(Instr* instr);
void DecodeUnconditional(Instr* instr);
+ // Support for VFP.
+ void DecodeTypeVFP(Instr* instr);
+ void DecodeType6CoprocessorIns(Instr* instr);
+
// Executes one instruction.
void InstructionDecode(Instr* instr);
@@ -198,20 +244,34 @@
void SetFpResult(const double& result);
void TrashCallerSaveRegisters();
- // architecture state
+ // Architecture state.
int32_t registers_[16];
bool n_flag_;
bool z_flag_;
bool c_flag_;
bool v_flag_;
- // simulator support
+ // VFP architecture state.
+ unsigned int vfp_register[num_s_registers];
+ bool n_flag_FPSCR_;
+ bool z_flag_FPSCR_;
+ bool c_flag_FPSCR_;
+ bool v_flag_FPSCR_;
+
+ // VFP FP exception flags architecture state.
+ bool inv_op_vfp_flag_;
+ bool div_zero_vfp_flag_;
+ bool overflow_vfp_flag_;
+ bool underflow_vfp_flag_;
+ bool inexact_vfp_flag_;
+
+ // Simulator support.
char* stack_;
bool pc_modified_;
int icount_;
static bool initialized_;
- // registered breakpoints
+ // Registered breakpoints.
Instr* break_pc_;
instr_t break_instr_;
};
@@ -229,6 +289,15 @@
static inline uintptr_t JsLimitFromCLimit(uintptr_t c_limit) {
return assembler::arm::Simulator::current()->StackLimit();
}
+
+ static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
+ assembler::arm::Simulator* sim = assembler::arm::Simulator::current();
+ return sim->PushAddress(try_catch_address);
+ }
+
+ static inline void UnregisterCTryCatch() {
+ assembler::arm::Simulator::current()->PopAddress();
+ }
};
diff --git a/src/arm/stub-cache-arm.cc b/src/arm/stub-cache-arm.cc
index 8282655..efccaf4 100644
--- a/src/arm/stub-cache-arm.cc
+++ b/src/arm/stub-cache-arm.cc
@@ -105,7 +105,7 @@
__ b(eq, &miss);
// Get the map of the receiver and compute the hash.
- __ ldr(scratch, FieldMemOperand(name, String::kLengthOffset));
+ __ ldr(scratch, FieldMemOperand(name, String::kHashFieldOffset));
__ ldr(ip, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ add(scratch, scratch, Operand(ip));
__ eor(scratch, scratch, Operand(flags));
@@ -229,10 +229,7 @@
miss, &check_wrapper);
// Load length directly from the string.
- __ and_(scratch1, scratch1, Operand(kStringSizeMask));
- __ add(scratch1, scratch1, Operand(String::kHashShift));
__ ldr(r0, FieldMemOperand(receiver, String::kLengthOffset));
- __ mov(r0, Operand(r0, LSR, scratch1));
__ mov(r0, Operand(r0, LSL, kSmiTagSize));
__ Ret();
diff --git a/src/arm/virtual-frame-arm.cc b/src/arm/virtual-frame-arm.cc
index 97d164e..132c8ae 100644
--- a/src/arm/virtual-frame-arm.cc
+++ b/src/arm/virtual-frame-arm.cc
@@ -146,29 +146,27 @@
// Initialize stack slots with 'undefined' value.
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
}
- if (FLAG_check_stack) {
- __ LoadRoot(r2, Heap::kStackLimitRootIndex);
- }
+ __ LoadRoot(r2, Heap::kStackLimitRootIndex);
for (int i = 0; i < count; i++) {
__ push(ip);
}
- if (FLAG_check_stack) {
- // Put the lr setup instruction in the delay slot. The kInstrSize is added
- // to the implicit 8 byte offset that always applies to operations with pc
- // and gives a return address 12 bytes down.
- masm()->add(lr, pc, Operand(Assembler::kInstrSize));
- masm()->cmp(sp, Operand(r2));
- StackCheckStub stub;
- // Call the stub if lower.
- masm()->mov(pc,
- Operand(reinterpret_cast<intptr_t>(stub.GetCode().location()),
- RelocInfo::CODE_TARGET),
- LeaveCC,
- lo);
- }
+ // Check the stack for overflow or a break request.
+ // Put the lr setup instruction in the delay slot. The kInstrSize is added
+ // to the implicit 8 byte offset that always applies to operations with pc
+ // and gives a return address 12 bytes down.
+ masm()->add(lr, pc, Operand(Assembler::kInstrSize));
+ masm()->cmp(sp, Operand(r2));
+ StackCheckStub stub;
+ // Call the stub if lower.
+ masm()->mov(pc,
+ Operand(reinterpret_cast<intptr_t>(stub.GetCode().location()),
+ RelocInfo::CODE_TARGET),
+ LeaveCC,
+ lo);
}
+
void VirtualFrame::SaveContextRegister() {
UNIMPLEMENTED();
}
@@ -245,11 +243,8 @@
void VirtualFrame::InvokeBuiltin(Builtins::JavaScript id,
InvokeJSFlags flags,
- Result* arg_count_register,
int arg_count) {
- ASSERT(arg_count_register->reg().is(r0));
PrepareForCall(arg_count, arg_count);
- arg_count_register->Unuse();
__ InvokeBuiltin(id, flags);
}
diff --git a/src/arm/virtual-frame-arm.h b/src/arm/virtual-frame-arm.h
index 457478d..d523000 100644
--- a/src/arm/virtual-frame-arm.h
+++ b/src/arm/virtual-frame-arm.h
@@ -305,7 +305,6 @@
// removes from) the stack.
void InvokeBuiltin(Builtins::JavaScript id,
InvokeJSFlags flag,
- Result* arg_count_register,
int arg_count);
// Call into an IC stub given the number of arguments it removes
diff --git a/src/array.js b/src/array.js
index 94d74a5..20d884e 100644
--- a/src/array.js
+++ b/src/array.js
@@ -77,7 +77,8 @@
var key = keys[i];
if (key != last_key) {
var e = array[key];
- builder.add(convert(e));
+ if (typeof(e) !== 'string') e = convert(e);
+ builder.add(e);
last_key = key;
}
}
@@ -114,17 +115,36 @@
if (length == 1) {
var e = array[0];
if (!IS_UNDEFINED(e) || (0 in array)) {
+ if (typeof(e) === 'string') return e;
return convert(e);
}
}
var builder = new StringBuilder();
- for (var i = 0; i < length; i++) {
- var e = array[i];
- if (i != 0) builder.add(separator);
- if (!IS_UNDEFINED(e) || (i in array)) {
- builder.add(convert(e));
+ // We pull the empty separator check outside the loop for speed!
+ if (separator.length == 0) {
+ for (var i = 0; i < length; i++) {
+ var e = array[i];
+ if (!IS_UNDEFINED(e) || (i in array)) {
+ if (typeof(e) !== 'string') e = convert(e);
+ if (e.length > 0) {
+ var elements = builder.elements;
+ elements[elements.length] = e;
+ }
+ }
+ }
+ } else {
+ for (var i = 0; i < length; i++) {
+ var e = array[i];
+ if (i != 0) builder.add(separator);
+ if (!IS_UNDEFINED(e) || (i in array)) {
+ if (typeof(e) !== 'string') e = convert(e);
+ if (e.length > 0) {
+ var elements = builder.elements;
+ elements[elements.length] = e;
+ }
+ }
}
}
return builder.generate();
@@ -136,12 +156,14 @@
function ConvertToString(e) {
+ if (typeof(e) === 'string') return e;
if (e == null) return '';
else return ToString(e);
}
function ConvertToLocaleString(e) {
+ if (typeof(e) === 'string') return e;
if (e == null) return '';
else {
// e_obj's toLocaleString might be overwritten, check if it is a function.
@@ -149,7 +171,7 @@
// See issue 877615.
var e_obj = ToObject(e);
if (IS_FUNCTION(e_obj.toLocaleString))
- return e_obj.toLocaleString();
+ return ToString(e_obj.toLocaleString());
else
return ToString(e);
}
diff --git a/src/assembler.cc b/src/assembler.cc
index 34595f8..9c9ddcd 100644
--- a/src/assembler.cc
+++ b/src/assembler.cc
@@ -174,14 +174,14 @@
void RelocInfoWriter::WriteTaggedData(intptr_t data_delta, int tag) {
- *--pos_ = data_delta << kPositionTypeTagBits | tag;
+ *--pos_ = static_cast<byte>(data_delta << kPositionTypeTagBits | tag);
}
void RelocInfoWriter::WriteExtraTag(int extra_tag, int top_tag) {
- *--pos_ = top_tag << (kTagBits + kExtraTagBits) |
- extra_tag << kTagBits |
- kDefaultTag;
+ *--pos_ = static_cast<int>(top_tag << (kTagBits + kExtraTagBits) |
+ extra_tag << kTagBits |
+ kDefaultTag);
}
@@ -196,7 +196,7 @@
void RelocInfoWriter::WriteExtraTaggedData(intptr_t data_delta, int top_tag) {
WriteExtraTag(kDataJumpTag, top_tag);
for (int i = 0; i < kIntptrSize; i++) {
- *--pos_ = data_delta;
+ *--pos_ = static_cast<byte>(data_delta);
// Signed right shift is arithmetic shift. Tested in test-utils.cc.
data_delta = data_delta >> kBitsPerByte;
}
@@ -211,7 +211,7 @@
ASSERT(rinfo->pc() - last_pc_ >= 0);
ASSERT(RelocInfo::NUMBER_OF_MODES < kMaxRelocModes);
// Use unsigned delta-encoding for pc.
- uint32_t pc_delta = rinfo->pc() - last_pc_;
+ uint32_t pc_delta = static_cast<uint32_t>(rinfo->pc() - last_pc_);
RelocInfo::Mode rmode = rinfo->rmode();
// The two most common modes are given small tags, and usually fit in a byte.
@@ -522,6 +522,10 @@
: address_(Redirect(Builtins::c_function_address(id))) {}
+ExternalReference::ExternalReference(ApiFunction* fun)
+ : address_(Redirect(fun->address())) {}
+
+
ExternalReference::ExternalReference(Builtins::Name name)
: address_(Builtins::builtin_address(name)) {}
@@ -579,11 +583,16 @@
}
-ExternalReference ExternalReference::address_of_stack_guard_limit() {
+ExternalReference ExternalReference::address_of_stack_limit() {
return ExternalReference(StackGuard::address_of_jslimit());
}
+ExternalReference ExternalReference::address_of_real_stack_limit() {
+ return ExternalReference(StackGuard::address_of_real_jslimit());
+}
+
+
ExternalReference ExternalReference::address_of_regexp_stack_limit() {
return ExternalReference(RegExpStack::limit_address());
}
@@ -608,6 +617,27 @@
return ExternalReference(Heap::NewSpaceAllocationLimitAddress());
}
+
+ExternalReference ExternalReference::handle_scope_extensions_address() {
+ return ExternalReference(HandleScope::current_extensions_address());
+}
+
+
+ExternalReference ExternalReference::handle_scope_next_address() {
+ return ExternalReference(HandleScope::current_next_address());
+}
+
+
+ExternalReference ExternalReference::handle_scope_limit_address() {
+ return ExternalReference(HandleScope::current_limit_address());
+}
+
+
+ExternalReference ExternalReference::scheduled_exception_address() {
+ return ExternalReference(Top::scheduled_exception_address());
+}
+
+
#ifdef V8_NATIVE_REGEXP
ExternalReference ExternalReference::re_check_stack_guard_state() {
diff --git a/src/assembler.h b/src/assembler.h
index 21a66dd..aecd4cd 100644
--- a/src/assembler.h
+++ b/src/assembler.h
@@ -373,6 +373,8 @@
public:
explicit ExternalReference(Builtins::CFunctionId id);
+ explicit ExternalReference(ApiFunction* ptr);
+
explicit ExternalReference(Builtins::Name name);
explicit ExternalReference(Runtime::FunctionId id);
@@ -406,7 +408,10 @@
static ExternalReference roots_address();
// Static variable StackGuard::address_of_jslimit()
- static ExternalReference address_of_stack_guard_limit();
+ static ExternalReference address_of_stack_limit();
+
+ // Static variable StackGuard::address_of_real_jslimit()
+ static ExternalReference address_of_real_stack_limit();
// Static variable RegExpStack::limit_address()
static ExternalReference address_of_regexp_stack_limit();
@@ -422,6 +427,12 @@
static ExternalReference double_fp_operation(Token::Value operation);
static ExternalReference compare_doubles();
+ static ExternalReference handle_scope_extensions_address();
+ static ExternalReference handle_scope_next_address();
+ static ExternalReference handle_scope_limit_address();
+
+ static ExternalReference scheduled_exception_address();
+
Address address() const {return reinterpret_cast<Address>(address_);}
#ifdef ENABLE_DEBUGGER_SUPPORT
@@ -460,12 +471,16 @@
static void* Redirect(void* address, bool fp_return = false) {
if (redirector_ == NULL) return address;
- return (*redirector_)(address, fp_return);
+ void* answer = (*redirector_)(address, fp_return);
+ return answer;
}
static void* Redirect(Address address_arg, bool fp_return = false) {
void* address = reinterpret_cast<void*>(address_arg);
- return redirector_ == NULL ? address : (*redirector_)(address, fp_return);
+ void* answer = (redirector_ == NULL) ?
+ address :
+ (*redirector_)(address, fp_return);
+ return answer;
}
void* address_;
diff --git a/src/ast.cc b/src/ast.cc
index f6864b8..90b5ed6 100644
--- a/src/ast.cc
+++ b/src/ast.cc
@@ -28,6 +28,7 @@
#include "v8.h"
#include "ast.h"
+#include "parser.h"
#include "scopes.h"
#include "string-stream.h"
@@ -138,6 +139,13 @@
}
+bool ObjectLiteral::Property::IsCompileTimeValue() {
+ return kind_ == CONSTANT ||
+ (kind_ == MATERIALIZED_LITERAL &&
+ CompileTimeValue::IsCompileTimeValue(value_));
+}
+
+
bool ObjectLiteral::IsValidJSON() {
int length = properties()->length();
for (int i = 0; i < length; i++) {
diff --git a/src/ast.h b/src/ast.h
index 42154f6..c27d558 100644
--- a/src/ast.h
+++ b/src/ast.h
@@ -28,7 +28,6 @@
#ifndef V8_AST_H_
#define V8_AST_H_
-#include "location.h"
#include "execution.h"
#include "factory.h"
#include "jsregexp.h"
@@ -162,7 +161,25 @@
class Expression: public AstNode {
public:
- Expression() : location_(Location::Temporary()) {}
+ enum Context {
+ // Not assigned a context yet, or else will not be visited during
+ // code generation.
+ kUninitialized,
+ // Evaluated for its side effects.
+ kEffect,
+ // Evaluated for its value (and side effects).
+ kValue,
+ // Evaluated for control flow (and side effects).
+ kTest,
+ // Evaluated for control flow and side effects. Value is also
+ // needed if true.
+ kValueTest,
+ // Evaluated for control flow and side effects. Value is also
+ // needed if false.
+ kTestValue
+ };
+
+ Expression() : context_(kUninitialized) {}
virtual Expression* AsExpression() { return this; }
@@ -177,12 +194,12 @@
// Static type information for this expression.
SmiAnalysis* type() { return &type_; }
- Location location() { return location_; }
- void set_location(Location loc) { location_ = loc; }
+ Context context() { return context_; }
+ void set_context(Context context) { context_ = context; }
private:
SmiAnalysis type_;
- Location location_;
+ Context context_;
};
@@ -305,7 +322,7 @@
class DoWhileStatement: public IterationStatement {
public:
explicit DoWhileStatement(ZoneStringList* labels)
- : IterationStatement(labels), cond_(NULL) {
+ : IterationStatement(labels), cond_(NULL), condition_position_(-1) {
}
void Initialize(Expression* cond, Statement* body) {
@@ -317,8 +334,14 @@
Expression* cond() const { return cond_; }
+ // Position where condition expression starts. We need it to make
+ // the loop's condition a breakable location.
+ int condition_position() { return condition_position_; }
+ void set_condition_position(int pos) { condition_position_ = pos; }
+
private:
Expression* cond_;
+ int condition_position_;
};
@@ -747,6 +770,8 @@
Expression* value() { return value_; }
Kind kind() { return kind_; }
+ bool IsCompileTimeValue();
+
private:
Literal* key_;
Expression* value_;
@@ -933,11 +958,7 @@
// variable name in the context object on the heap,
// with lookup starting at the current context. index()
// is invalid.
- LOOKUP,
-
- // A property in the global object. var()->name() is
- // the property name.
- GLOBAL
+ LOOKUP
};
Slot(Variable* var, Type type, int index)
@@ -1059,6 +1080,7 @@
Handle<String> name() const { return name_; }
Runtime::Function* function() const { return function_; }
ZoneList<Expression*>* arguments() const { return arguments_; }
+ bool is_jsruntime() const { return function_ == NULL; }
private:
Handle<String> name_;
@@ -1261,7 +1283,6 @@
ZoneList<Statement*>* body,
int materialized_literal_count,
int expected_property_count,
- bool has_only_this_property_assignments,
bool has_only_simple_this_property_assignments,
Handle<FixedArray> this_property_assignments,
int num_parameters,
@@ -1273,7 +1294,6 @@
body_(body),
materialized_literal_count_(materialized_literal_count),
expected_property_count_(expected_property_count),
- has_only_this_property_assignments_(has_only_this_property_assignments),
has_only_simple_this_property_assignments_(
has_only_simple_this_property_assignments),
this_property_assignments_(this_property_assignments),
@@ -1283,7 +1303,8 @@
is_expression_(is_expression),
loop_nesting_(0),
function_token_position_(RelocInfo::kNoPosition),
- inferred_name_(Heap::empty_string()) {
+ inferred_name_(Heap::empty_string()),
+ try_fast_codegen_(false) {
#ifdef DEBUG
already_compiled_ = false;
#endif
@@ -1305,9 +1326,6 @@
int materialized_literal_count() { return materialized_literal_count_; }
int expected_property_count() { return expected_property_count_; }
- bool has_only_this_property_assignments() {
- return has_only_this_property_assignments_;
- }
bool has_only_simple_this_property_assignments() {
return has_only_simple_this_property_assignments_;
}
@@ -1326,6 +1344,9 @@
inferred_name_ = inferred_name;
}
+ bool try_fast_codegen() { return try_fast_codegen_; }
+ void set_try_fast_codegen(bool flag) { try_fast_codegen_ = flag; }
+
#ifdef DEBUG
void mark_as_compiled() {
ASSERT(!already_compiled_);
@@ -1339,7 +1360,6 @@
ZoneList<Statement*>* body_;
int materialized_literal_count_;
int expected_property_count_;
- bool has_only_this_property_assignments_;
bool has_only_simple_this_property_assignments_;
Handle<FixedArray> this_property_assignments_;
int num_parameters_;
@@ -1349,6 +1369,7 @@
int loop_nesting_;
int function_token_position_;
Handle<String> inferred_name_;
+ bool try_fast_codegen_;
#ifdef DEBUG
bool already_compiled_;
#endif
diff --git a/src/bootstrapper.cc b/src/bootstrapper.cc
index 43aa1a3..deda96f 100644
--- a/src/bootstrapper.cc
+++ b/src/bootstrapper.cc
@@ -36,6 +36,7 @@
#include "global-handles.h"
#include "macro-assembler.h"
#include "natives.h"
+#include "snapshot.h"
namespace v8 {
namespace internal {
@@ -92,14 +93,39 @@
static SourceCodeCache natives_cache(Script::TYPE_NATIVE);
static SourceCodeCache extensions_cache(Script::TYPE_EXTENSION);
+// This is for delete, not delete[].
+static List<char*>* delete_these_non_arrays_on_tear_down = NULL;
+
+
+NativesExternalStringResource::NativesExternalStringResource(const char* source)
+ : data_(source), length_(StrLength(source)) {
+ if (delete_these_non_arrays_on_tear_down == NULL) {
+ delete_these_non_arrays_on_tear_down = new List<char*>(2);
+ }
+ // The resources are small objects and we only make a fixed number of
+ // them, but let's clean them up on exit for neatness.
+ delete_these_non_arrays_on_tear_down->
+ Add(reinterpret_cast<char*>(this));
+}
Handle<String> Bootstrapper::NativesSourceLookup(int index) {
ASSERT(0 <= index && index < Natives::GetBuiltinsCount());
if (Heap::natives_source_cache()->get(index)->IsUndefined()) {
- Handle<String> source_code =
- Factory::NewStringFromAscii(Natives::GetScriptSource(index));
- Heap::natives_source_cache()->set(index, *source_code);
+ if (!Snapshot::IsEnabled() || FLAG_new_snapshot) {
+ // We can use external strings for the natives.
+ NativesExternalStringResource* resource =
+ new NativesExternalStringResource(
+ Natives::GetScriptSource(index).start());
+ Handle<String> source_code =
+ Factory::NewExternalStringFromAscii(resource);
+ Heap::natives_source_cache()->set(index, *source_code);
+ } else {
+ // Old snapshot code can't cope with external strings at all.
+ Handle<String> source_code =
+ Factory::NewStringFromAscii(Natives::GetScriptSource(index));
+ Heap::natives_source_cache()->set(index, *source_code);
+ }
}
Handle<Object> cached_source(Heap::natives_source_cache()->get(index));
return Handle<String>::cast(cached_source);
@@ -125,6 +151,16 @@
void Bootstrapper::TearDown() {
+ if (delete_these_non_arrays_on_tear_down != NULL) {
+ int len = delete_these_non_arrays_on_tear_down->length();
+ ASSERT(len < 20); // Don't use this mechanism for unbounded allocations.
+ for (int i = 0; i < len; i++) {
+ delete delete_these_non_arrays_on_tear_down->at(i);
+ }
+ delete delete_these_non_arrays_on_tear_down;
+ delete_these_non_arrays_on_tear_down = NULL;
+ }
+
natives_cache.Initialize(false); // Yes, symmetrical
extensions_cache.Initialize(false);
}
@@ -316,8 +352,11 @@
void Bootstrapper::Iterate(ObjectVisitor* v) {
natives_cache.Iterate(v);
+ v->Synchronize("NativesCache");
extensions_cache.Iterate(v);
+ v->Synchronize("Extensions");
PendingFixups::Iterate(v);
+ v->Synchronize("PendingFixups");
}
@@ -1072,21 +1111,29 @@
Factory::LookupAsciiSymbol("context_data"),
proxy_context_data,
common_attributes);
- Handle<Proxy> proxy_eval_from_function =
- Factory::NewProxy(&Accessors::ScriptEvalFromFunction);
+ Handle<Proxy> proxy_eval_from_script =
+ Factory::NewProxy(&Accessors::ScriptEvalFromScript);
script_descriptors =
Factory::CopyAppendProxyDescriptor(
script_descriptors,
- Factory::LookupAsciiSymbol("eval_from_function"),
- proxy_eval_from_function,
+ Factory::LookupAsciiSymbol("eval_from_script"),
+ proxy_eval_from_script,
common_attributes);
- Handle<Proxy> proxy_eval_from_position =
- Factory::NewProxy(&Accessors::ScriptEvalFromPosition);
+ Handle<Proxy> proxy_eval_from_script_position =
+ Factory::NewProxy(&Accessors::ScriptEvalFromScriptPosition);
script_descriptors =
Factory::CopyAppendProxyDescriptor(
script_descriptors,
- Factory::LookupAsciiSymbol("eval_from_position"),
- proxy_eval_from_position,
+ Factory::LookupAsciiSymbol("eval_from_script_position"),
+ proxy_eval_from_script_position,
+ common_attributes);
+ Handle<Proxy> proxy_eval_from_function_name =
+ Factory::NewProxy(&Accessors::ScriptEvalFromFunctionName);
+ script_descriptors =
+ Factory::CopyAppendProxyDescriptor(
+ script_descriptors,
+ Factory::LookupAsciiSymbol("eval_from_function_name"),
+ proxy_eval_from_function_name,
common_attributes);
Handle<Map> script_map = Handle<Map>(script_fun->initial_map());
@@ -1299,8 +1346,6 @@
ASSERT(Top::has_pending_exception() != result);
if (!result) {
Top::clear_pending_exception();
- v8::Utils::ReportApiFailure(
- "v8::Context::New()", "Error installing extension");
}
current->set_state(v8::INSTALLED);
return result;
diff --git a/src/bootstrapper.h b/src/bootstrapper.h
index 15fc88d..07d2747 100644
--- a/src/bootstrapper.h
+++ b/src/bootstrapper.h
@@ -76,6 +76,24 @@
static void FreeThreadResources();
};
+
+class NativesExternalStringResource
+ : public v8::String::ExternalAsciiStringResource {
+ public:
+ explicit NativesExternalStringResource(const char* source);
+
+ const char* data() const {
+ return data_;
+ }
+
+ size_t length() const {
+ return length_;
+ }
+ private:
+ const char* data_;
+ size_t length_;
+};
+
}} // namespace v8::internal
#endif // V8_BOOTSTRAPPER_H_
diff --git a/src/builtins.cc b/src/builtins.cc
index fa1b34e..b66635c 100644
--- a/src/builtins.cc
+++ b/src/builtins.cc
@@ -380,6 +380,9 @@
{
// Leaving JavaScript.
VMState state(EXTERNAL);
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ state.set_external_callback(v8::ToCData<Address>(callback_obj));
+#endif
value = callback(new_args);
}
if (value.IsEmpty()) {
@@ -446,6 +449,9 @@
{
// Leaving JavaScript.
VMState state(EXTERNAL);
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ state.set_external_callback(v8::ToCData<Address>(callback_obj));
+#endif
value = callback(new_args);
}
if (value.IsEmpty()) {
diff --git a/src/checks.cc b/src/checks.cc
index f8a2f24..b5df316 100644
--- a/src/checks.cc
+++ b/src/checks.cc
@@ -36,6 +36,8 @@
// Contains protection against recursive calls (faults while handling faults).
extern "C" void V8_Fatal(const char* file, int line, const char* format, ...) {
+ fflush(stdout);
+ fflush(stderr);
fatal_error_handler_nesting_depth++;
// First time we try to print an error message
if (fatal_error_handler_nesting_depth < 2) {
diff --git a/src/code-stubs.cc b/src/code-stubs.cc
index 586c948..dbc39ff 100644
--- a/src/code-stubs.cc
+++ b/src/code-stubs.cc
@@ -36,10 +36,27 @@
namespace internal {
Handle<Code> CodeStub::GetCode() {
- uint32_t key = GetKey();
- int index = Heap::code_stubs()->FindEntry(key);
- if (index == NumberDictionary::kNotFound) {
- HandleScope scope;
+ bool custom_cache = has_custom_cache();
+
+ int index = 0;
+ uint32_t key = 0;
+ if (custom_cache) {
+ Code* cached;
+ if (GetCustomCache(&cached)) {
+ return Handle<Code>(cached);
+ } else {
+ index = NumberDictionary::kNotFound;
+ }
+ } else {
+ key = GetKey();
+ index = Heap::code_stubs()->FindEntry(key);
+ if (index != NumberDictionary::kNotFound)
+ return Handle<Code>(Code::cast(Heap::code_stubs()->ValueAt(index)));
+ }
+
+ Code* result;
+ {
+ v8::HandleScope scope;
// Update the static counter each time a new code stub is generated.
Counters::code_stubs.Increment();
@@ -79,63 +96,29 @@
}
#endif
- // Update the dictionary and the root in Heap.
- Handle<NumberDictionary> dict =
- Factory::DictionaryAtNumberPut(
- Handle<NumberDictionary>(Heap::code_stubs()),
- key,
- code);
- Heap::public_set_code_stubs(*dict);
- index = Heap::code_stubs()->FindEntry(key);
+ if (custom_cache) {
+ SetCustomCache(*code);
+ } else {
+ // Update the dictionary and the root in Heap.
+ Handle<NumberDictionary> dict =
+ Factory::DictionaryAtNumberPut(
+ Handle<NumberDictionary>(Heap::code_stubs()),
+ key,
+ code);
+ Heap::public_set_code_stubs(*dict);
+ }
+ result = *code;
}
- ASSERT(index != NumberDictionary::kNotFound);
- return Handle<Code>(Code::cast(Heap::code_stubs()->ValueAt(index)));
+ return Handle<Code>(result);
}
const char* CodeStub::MajorName(CodeStub::Major major_key) {
switch (major_key) {
- case CallFunction:
- return "CallFunction";
- case GenericBinaryOp:
- return "GenericBinaryOp";
- case SmiOp:
- return "SmiOp";
- case Compare:
- return "Compare";
- case RecordWrite:
- return "RecordWrite";
- case StackCheck:
- return "StackCheck";
- case UnarySub:
- return "UnarySub";
- case RevertToNumber:
- return "RevertToNumber";
- case ToBoolean:
- return "ToBoolean";
- case Instanceof:
- return "Instanceof";
- case CounterOp:
- return "CounterOp";
- case ArgumentsAccess:
- return "ArgumentsAccess";
- case Runtime:
- return "Runtime";
- case CEntry:
- return "CEntry";
- case JSEntry:
- return "JSEntry";
- case GetProperty:
- return "GetProperty";
- case SetProperty:
- return "SetProperty";
- case InvokeBuiltin:
- return "InvokeBuiltin";
- case ConvertToDouble:
- return "ConvertToDouble";
- case WriteInt32ToHeapNumber:
- return "WriteInt32ToHeapNumber";
+#define DEF_CASE(name) case name: return #name;
+ CODE_STUB_LIST(DEF_CASE)
+#undef DEF_CASE
default:
UNREACHABLE();
return NULL;
diff --git a/src/code-stubs.h b/src/code-stubs.h
index 91d951f..25a2d0f 100644
--- a/src/code-stubs.h
+++ b/src/code-stubs.h
@@ -31,32 +31,52 @@
namespace v8 {
namespace internal {
+// List of code stubs used on all platforms. The order in this list is important
+// as only the stubs up to and including RecordWrite allows nested stub calls.
+#define CODE_STUB_LIST_ALL_PLATFORMS(V) \
+ V(CallFunction) \
+ V(GenericBinaryOp) \
+ V(StringAdd) \
+ V(SmiOp) \
+ V(Compare) \
+ V(RecordWrite) \
+ V(ConvertToDouble) \
+ V(WriteInt32ToHeapNumber) \
+ V(StackCheck) \
+ V(UnarySub) \
+ V(RevertToNumber) \
+ V(ToBoolean) \
+ V(Instanceof) \
+ V(CounterOp) \
+ V(ArgumentsAccess) \
+ V(Runtime) \
+ V(CEntry) \
+ V(JSEntry)
+
+// List of code stubs only used on ARM platforms.
+#ifdef V8_TARGET_ARCH_ARM
+#define CODE_STUB_LIST_ARM(V) \
+ V(GetProperty) \
+ V(SetProperty) \
+ V(InvokeBuiltin) \
+ V(RegExpCEntry)
+#else
+#define CODE_STUB_LIST_ARM(V)
+#endif
+
+// Combined list of code stubs.
+#define CODE_STUB_LIST(V) \
+ CODE_STUB_LIST_ALL_PLATFORMS(V) \
+ CODE_STUB_LIST_ARM(V)
// Stub is base classes of all stubs.
class CodeStub BASE_EMBEDDED {
public:
enum Major {
- CallFunction,
- GenericBinaryOp,
- SmiOp,
- Compare,
- RecordWrite, // Last stub that allows stub calls inside.
- ConvertToDouble,
- WriteInt32ToHeapNumber,
- StackCheck,
- UnarySub,
- RevertToNumber,
- ToBoolean,
- Instanceof,
- CounterOp,
- ArgumentsAccess,
- Runtime,
- CEntry,
- JSEntry,
- GetProperty, // ARM only
- SetProperty, // ARM only
- InvokeBuiltin, // ARM only
- RegExpCEntry, // ARM only
+#define DEF_ENUM(name) name,
+ CODE_STUB_LIST(DEF_ENUM)
+#undef DEF_ENUM
+ NoCache, // marker for stubs that do custom caching
NUMBER_OF_IDS
};
@@ -73,6 +93,12 @@
virtual ~CodeStub() {}
+ // Override these methods to provide a custom caching mechanism for
+ // an individual type of code stub.
+ virtual bool GetCustomCache(Code** code_out) { return false; }
+ virtual void SetCustomCache(Code* value) { }
+ virtual bool has_custom_cache() { return false; }
+
protected:
static const int kMajorBits = 5;
static const int kMinorBits = kBitsPerInt - kSmiTagSize - kMajorBits;
diff --git a/src/codegen.cc b/src/codegen.cc
index 28c0ba5..26e8d7d 100644
--- a/src/codegen.cc
+++ b/src/codegen.cc
@@ -29,6 +29,7 @@
#include "bootstrapper.h"
#include "codegen-inl.h"
+#include "compiler.h"
#include "debug.h"
#include "oprofile-agent.h"
#include "prettyprinter.h"
@@ -250,98 +251,6 @@
#endif
-// Sets the function info on a function.
-// The start_position points to the first '(' character after the function name
-// in the full script source. When counting characters in the script source the
-// the first character is number 0 (not 1).
-void CodeGenerator::SetFunctionInfo(Handle<JSFunction> fun,
- FunctionLiteral* lit,
- bool is_toplevel,
- Handle<Script> script) {
- fun->shared()->set_length(lit->num_parameters());
- fun->shared()->set_formal_parameter_count(lit->num_parameters());
- fun->shared()->set_script(*script);
- fun->shared()->set_function_token_position(lit->function_token_position());
- fun->shared()->set_start_position(lit->start_position());
- fun->shared()->set_end_position(lit->end_position());
- fun->shared()->set_is_expression(lit->is_expression());
- fun->shared()->set_is_toplevel(is_toplevel);
- fun->shared()->set_inferred_name(*lit->inferred_name());
- fun->shared()->SetThisPropertyAssignmentsInfo(
- lit->has_only_this_property_assignments(),
- lit->has_only_simple_this_property_assignments(),
- *lit->this_property_assignments());
-}
-
-
-Handle<Code> CodeGenerator::ComputeLazyCompile(int argc) {
- CALL_HEAP_FUNCTION(StubCache::ComputeLazyCompile(argc), Code);
-}
-
-
-Handle<JSFunction> CodeGenerator::BuildBoilerplate(FunctionLiteral* node) {
-#ifdef DEBUG
- // We should not try to compile the same function literal more than
- // once.
- node->mark_as_compiled();
-#endif
-
- // Determine if the function can be lazily compiled. This is
- // necessary to allow some of our builtin JS files to be lazily
- // compiled. These builtins cannot be handled lazily by the parser,
- // since we have to know if a function uses the special natives
- // syntax, which is something the parser records.
- bool allow_lazy = node->AllowsLazyCompilation();
-
- // Generate code
- Handle<Code> code;
- if (FLAG_lazy && allow_lazy) {
- code = ComputeLazyCompile(node->num_parameters());
- } else {
- // The bodies of function literals have not yet been visited by
- // the AST optimizer/analyzer.
- if (!Rewriter::Optimize(node)) {
- return Handle<JSFunction>::null();
- }
-
- code = MakeCode(node, script_, false);
-
- // Check for stack-overflow exception.
- if (code.is_null()) {
- SetStackOverflow();
- return Handle<JSFunction>::null();
- }
-
- // Function compilation complete.
- LOG(CodeCreateEvent(Logger::FUNCTION_TAG, *code, *node->name()));
-
-#ifdef ENABLE_OPROFILE_AGENT
- OProfileAgent::CreateNativeCodeRegion(*node->name(),
- code->instruction_start(),
- code->instruction_size());
-#endif
- }
-
- // Create a boilerplate function.
- Handle<JSFunction> function =
- Factory::NewFunctionBoilerplate(node->name(),
- node->materialized_literal_count(),
- code);
- CodeGenerator::SetFunctionInfo(function, node, false, script_);
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // Notify debugger that a new function has been added.
- Debugger::OnNewFunction(function);
-#endif
-
- // Set the expected number of properties for instances and return
- // the resulting function.
- SetExpectedNofPropertiesFromEstimate(function,
- node->expected_property_count());
- return function;
-}
-
-
Handle<Code> CodeGenerator::ComputeCallInitialize(
int argc,
InLoopFlag in_loop) {
@@ -398,7 +307,8 @@
array->set_undefined(j++);
}
} else {
- Handle<JSFunction> function = BuildBoilerplate(node->fun());
+ Handle<JSFunction> function =
+ Compiler::BuildBoilerplate(node->fun(), script(), this);
// Check for stack-overflow exception.
if (HasStackOverflow()) return;
array->set(j++, *function);
@@ -433,7 +343,10 @@
{&CodeGenerator::GenerateLog, "_Log"},
{&CodeGenerator::GenerateRandomPositiveSmi, "_RandomPositiveSmi"},
{&CodeGenerator::GenerateMathSin, "_Math_sin"},
- {&CodeGenerator::GenerateMathCos, "_Math_cos"}
+ {&CodeGenerator::GenerateMathCos, "_Math_cos"},
+ {&CodeGenerator::GenerateIsObject, "_IsObject"},
+ {&CodeGenerator::GenerateIsFunction, "_IsFunction"},
+ {&CodeGenerator::GenerateStringAdd, "_StringAdd"},
};
@@ -521,6 +434,9 @@
if (FLAG_debug_info) RecordPositions(masm(), stmt->statement_pos());
}
+void CodeGenerator::CodeForDoWhileConditionPosition(DoWhileStatement* stmt) {
+ if (FLAG_debug_info) RecordPositions(masm(), stmt->condition_position());
+}
void CodeGenerator::CodeForSourcePosition(int pos) {
if (FLAG_debug_info && pos != RelocInfo::kNoPosition) {
@@ -551,4 +467,20 @@
}
+bool ApiGetterEntryStub::GetCustomCache(Code** code_out) {
+ Object* cache = info()->load_stub_cache();
+ if (cache->IsUndefined()) {
+ return false;
+ } else {
+ *code_out = Code::cast(cache);
+ return true;
+ }
+}
+
+
+void ApiGetterEntryStub::SetCustomCache(Code* value) {
+ info()->set_load_stub_cache(value);
+}
+
+
} } // namespace v8::internal
diff --git a/src/codegen.h b/src/codegen.h
index 8c1b733..85a08d5 100644
--- a/src/codegen.h
+++ b/src/codegen.h
@@ -38,9 +38,9 @@
// MakeCode
// MakeCodePrologue
// MakeCodeEpilogue
-// SetFunctionInfo
// masm
// frame
+// script
// has_valid_frame
// SetFrame
// DeleteFrame
@@ -69,6 +69,7 @@
// CodeForFunctionPosition
// CodeForReturnPosition
// CodeForStatementPosition
+// CodeForDoWhileConditionPosition
// CodeForSourcePosition
@@ -301,7 +302,7 @@
Label* throw_normal_exception,
Label* throw_termination_exception,
Label* throw_out_of_memory_exception,
- StackFrame::Type frame_type,
+ ExitFrame::Mode mode,
bool do_gc,
bool always_allocate_scope);
void GenerateThrowTOS(MacroAssembler* masm);
@@ -320,6 +321,32 @@
};
+class ApiGetterEntryStub : public CodeStub {
+ public:
+ ApiGetterEntryStub(Handle<AccessorInfo> info,
+ ApiFunction* fun)
+ : info_(info),
+ fun_(fun) { }
+ void Generate(MacroAssembler* masm);
+ virtual bool has_custom_cache() { return true; }
+ virtual bool GetCustomCache(Code** code_out);
+ virtual void SetCustomCache(Code* value);
+
+ static const int kStackSpace = 6;
+ static const int kArgc = 4;
+ private:
+ Handle<AccessorInfo> info() { return info_; }
+ ApiFunction* fun() { return fun_; }
+ Major MajorKey() { return NoCache; }
+ int MinorKey() { return 0; }
+ const char* GetName() { return "ApiEntryStub"; }
+ // The accessor info associated with the function.
+ Handle<AccessorInfo> info_;
+ // The function to be called.
+ ApiFunction* fun_;
+};
+
+
class CEntryDebugBreakStub : public CEntryStub {
public:
CEntryDebugBreakStub() : CEntryStub(1) { }
diff --git a/src/compiler.cc b/src/compiler.cc
index e422bf7..48da63d 100644
--- a/src/compiler.cc
+++ b/src/compiler.cc
@@ -46,11 +46,23 @@
public:
enum CodeGenTag { NORMAL, FAST };
- CodeGenSelector() : has_supported_syntax_(true) {}
+ CodeGenSelector()
+ : has_supported_syntax_(true),
+ context_(Expression::kUninitialized) {
+ }
CodeGenTag Select(FunctionLiteral* fun);
private:
+ // Visit an expression in a given expression context.
+ void ProcessExpression(Expression* expr, Expression::Context context) {
+ Expression::Context saved = context_;
+ context_ = context;
+ Visit(expr);
+ expr->set_context(context);
+ context_ = saved;
+ }
+
void VisitDeclarations(ZoneList<Declaration*>* decls);
void VisitStatements(ZoneList<Statement*>* stmts);
@@ -61,6 +73,9 @@
bool has_supported_syntax_;
+ // The desired expression context of the currently visited expression.
+ Expression::Context context_;
+
DISALLOW_COPY_AND_ASSIGN(CodeGenSelector);
};
@@ -68,7 +83,8 @@
static Handle<Code> MakeCode(FunctionLiteral* literal,
Handle<Script> script,
Handle<Context> context,
- bool is_eval) {
+ bool is_eval,
+ Handle<SharedFunctionInfo> shared) {
ASSERT(literal != NULL);
// Rewrite the AST by introducing .result assignments where needed.
@@ -105,12 +121,24 @@
// Generate code and return it.
if (FLAG_fast_compiler) {
- CodeGenSelector selector;
- CodeGenSelector::CodeGenTag code_gen = selector.Select(literal);
- if (code_gen == CodeGenSelector::FAST) {
- return FastCodeGenerator::MakeCode(literal, script, is_eval);
+ // If there is no shared function info, try the fast code
+ // generator for code in the global scope. Otherwise obey the
+ // explicit hint in the shared function info.
+ // If always_fast_compiler is true, always try the fast compiler.
+ if (shared.is_null() && !literal->scope()->is_global_scope() &&
+ !FLAG_always_fast_compiler) {
+ if (FLAG_trace_bailout) PrintF("Non-global scope\n");
+ } else if (!shared.is_null() && !shared->try_fast_codegen() &&
+ !FLAG_always_fast_compiler) {
+ if (FLAG_trace_bailout) PrintF("No hint to try fast\n");
+ } else {
+ CodeGenSelector selector;
+ CodeGenSelector::CodeGenTag code_gen = selector.Select(literal);
+ if (code_gen == CodeGenSelector::FAST) {
+ return FastCodeGenerator::MakeCode(literal, script, is_eval);
+ }
+ ASSERT(code_gen == CodeGenSelector::NORMAL);
}
- ASSERT(code_gen == CodeGenSelector::NORMAL);
}
return CodeGenerator::MakeCode(literal, script, is_eval);
}
@@ -151,8 +179,10 @@
// called.
if (is_eval) {
JavaScriptFrameIterator it;
- script->set_eval_from_function(it.frame()->function());
- int offset = it.frame()->pc() - it.frame()->code()->instruction_start();
+ script->set_eval_from_shared(
+ JSFunction::cast(it.frame()->function())->shared());
+ int offset = static_cast<int>(
+ it.frame()->pc() - it.frame()->code()->instruction_start());
script->set_eval_from_instructions_offset(Smi::FromInt(offset));
}
}
@@ -195,7 +225,8 @@
HistogramTimerScope timer(rate);
// Compile the code.
- Handle<Code> code = MakeCode(lit, script, context, is_eval);
+ Handle<Code> code = MakeCode(lit, script, context, is_eval,
+ Handle<SharedFunctionInfo>::null());
// Check for stack-overflow exceptions.
if (code.is_null()) {
@@ -232,7 +263,7 @@
code);
ASSERT_EQ(RelocInfo::kNoPosition, lit->function_token_position());
- CodeGenerator::SetFunctionInfo(fun, lit, true, script);
+ Compiler::SetFunctionInfo(fun, lit, true, script);
// Hint to the runtime system used when allocating space for initial
// property space by setting the expected number of properties for
@@ -396,7 +427,8 @@
HistogramTimerScope timer(&Counters::compile_lazy);
// Compile the code.
- Handle<Code> code = MakeCode(lit, script, Handle<Context>::null(), false);
+ Handle<Code> code = MakeCode(lit, script, Handle<Context>::null(), false,
+ shared);
// Check for stack-overflow exception.
if (code.is_null()) {
@@ -438,7 +470,6 @@
// Set the optimication hints after performing lazy compilation, as these are
// not set when the function is set up as a lazily compiled function.
shared->SetThisPropertyAssignmentsInfo(
- lit->has_only_this_property_assignments(),
lit->has_only_simple_this_property_assignments(),
*lit->this_property_assignments());
@@ -448,18 +479,132 @@
}
+Handle<JSFunction> Compiler::BuildBoilerplate(FunctionLiteral* literal,
+ Handle<Script> script,
+ AstVisitor* caller) {
+#ifdef DEBUG
+ // We should not try to compile the same function literal more than
+ // once.
+ literal->mark_as_compiled();
+#endif
+
+ // Determine if the function can be lazily compiled. This is
+ // necessary to allow some of our builtin JS files to be lazily
+ // compiled. These builtins cannot be handled lazily by the parser,
+ // since we have to know if a function uses the special natives
+ // syntax, which is something the parser records.
+ bool allow_lazy = literal->AllowsLazyCompilation();
+
+ // Generate code
+ Handle<Code> code;
+ if (FLAG_lazy && allow_lazy) {
+ code = ComputeLazyCompile(literal->num_parameters());
+ } else {
+ // The bodies of function literals have not yet been visited by
+ // the AST optimizer/analyzer.
+ if (!Rewriter::Optimize(literal)) {
+ return Handle<JSFunction>::null();
+ }
+
+ // Generate code and return it.
+ bool is_compiled = false;
+ if (FLAG_fast_compiler && literal->try_fast_codegen()) {
+ CodeGenSelector selector;
+ CodeGenSelector::CodeGenTag code_gen = selector.Select(literal);
+ if (code_gen == CodeGenSelector::FAST) {
+ code = FastCodeGenerator::MakeCode(literal,
+ script,
+ false); // Not eval.
+ is_compiled = true;
+ }
+ }
+
+ if (!is_compiled) {
+ // We didn't try the fast compiler, or we failed to select it.
+ code = CodeGenerator::MakeCode(literal,
+ script,
+ false); // Not eval.
+ }
+
+ // Check for stack-overflow exception.
+ if (code.is_null()) {
+ caller->SetStackOverflow();
+ return Handle<JSFunction>::null();
+ }
+
+ // Function compilation complete.
+ LOG(CodeCreateEvent(Logger::FUNCTION_TAG, *code, *literal->name()));
+
+#ifdef ENABLE_OPROFILE_AGENT
+ OProfileAgent::CreateNativeCodeRegion(*node->name(),
+ code->instruction_start(),
+ code->instruction_size());
+#endif
+ }
+
+ // Create a boilerplate function.
+ Handle<JSFunction> function =
+ Factory::NewFunctionBoilerplate(literal->name(),
+ literal->materialized_literal_count(),
+ code);
+ SetFunctionInfo(function, literal, false, script);
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ // Notify debugger that a new function has been added.
+ Debugger::OnNewFunction(function);
+#endif
+
+ // Set the expected number of properties for instances and return
+ // the resulting function.
+ SetExpectedNofPropertiesFromEstimate(function,
+ literal->expected_property_count());
+ return function;
+}
+
+
+// Sets the function info on a function.
+// The start_position points to the first '(' character after the function name
+// in the full script source. When counting characters in the script source the
+// the first character is number 0 (not 1).
+void Compiler::SetFunctionInfo(Handle<JSFunction> fun,
+ FunctionLiteral* lit,
+ bool is_toplevel,
+ Handle<Script> script) {
+ fun->shared()->set_length(lit->num_parameters());
+ fun->shared()->set_formal_parameter_count(lit->num_parameters());
+ fun->shared()->set_script(*script);
+ fun->shared()->set_function_token_position(lit->function_token_position());
+ fun->shared()->set_start_position(lit->start_position());
+ fun->shared()->set_end_position(lit->end_position());
+ fun->shared()->set_is_expression(lit->is_expression());
+ fun->shared()->set_is_toplevel(is_toplevel);
+ fun->shared()->set_inferred_name(*lit->inferred_name());
+ fun->shared()->SetThisPropertyAssignmentsInfo(
+ lit->has_only_simple_this_property_assignments(),
+ *lit->this_property_assignments());
+ fun->shared()->set_try_fast_codegen(lit->try_fast_codegen());
+}
+
+
CodeGenSelector::CodeGenTag CodeGenSelector::Select(FunctionLiteral* fun) {
Scope* scope = fun->scope();
- if (!scope->is_global_scope()) {
- if (FLAG_trace_bailout) PrintF("Non-global scope\n");
- return NORMAL;
+ if (scope->num_heap_slots() > 0) {
+ // We support functions with a local context if they do not have
+ // parameters that need to be copied into the context.
+ for (int i = 0, len = scope->num_parameters(); i < len; i++) {
+ Slot* slot = scope->parameter(i)->slot();
+ if (slot != NULL && slot->type() == Slot::CONTEXT) {
+ if (FLAG_trace_bailout) {
+ PrintF("Function has context-allocated parameters.\n");
+ }
+ return NORMAL;
+ }
+ }
}
- ASSERT(scope->num_heap_slots() == 0);
- ASSERT(scope->arguments() == NULL);
has_supported_syntax_ = true;
- VisitDeclarations(fun->scope()->declarations());
+ VisitDeclarations(scope->declarations());
if (!has_supported_syntax_) return NORMAL;
VisitStatements(fun->body());
@@ -500,9 +645,20 @@
void CodeGenSelector::VisitDeclaration(Declaration* decl) {
- Variable* var = decl->proxy()->var();
- if (!var->is_global() || var->mode() == Variable::CONST) {
- BAILOUT("Non-global declaration");
+ Property* prop = decl->proxy()->AsProperty();
+ if (prop != NULL) {
+ // Property rewrites are shared, ensure we are not changing its
+ // expression context state.
+ ASSERT(prop->obj()->context() == Expression::kUninitialized ||
+ prop->obj()->context() == Expression::kValue);
+ ASSERT(prop->key()->context() == Expression::kUninitialized ||
+ prop->key()->context() == Expression::kValue);
+ ProcessExpression(prop->obj(), Expression::kValue);
+ ProcessExpression(prop->key(), Expression::kValue);
+ }
+
+ if (decl->fun() != NULL) {
+ ProcessExpression(decl->fun(), Expression::kValue);
}
}
@@ -513,10 +669,7 @@
void CodeGenSelector::VisitExpressionStatement(ExpressionStatement* stmt) {
- Expression* expr = stmt->expression();
- Visit(expr);
- CHECK_BAILOUT;
- expr->set_location(Location::Nowhere());
+ ProcessExpression(stmt->expression(), Expression::kEffect);
}
@@ -526,7 +679,11 @@
void CodeGenSelector::VisitIfStatement(IfStatement* stmt) {
- BAILOUT("IfStatement");
+ ProcessExpression(stmt->condition(), Expression::kTest);
+ CHECK_BAILOUT;
+ Visit(stmt->then_statement());
+ CHECK_BAILOUT;
+ Visit(stmt->else_statement());
}
@@ -541,7 +698,7 @@
void CodeGenSelector::VisitReturnStatement(ReturnStatement* stmt) {
- Visit(stmt->expression());
+ ProcessExpression(stmt->expression(), Expression::kValue);
}
@@ -561,17 +718,39 @@
void CodeGenSelector::VisitDoWhileStatement(DoWhileStatement* stmt) {
- BAILOUT("DoWhileStatement");
+ // We do not handle loops with breaks or continue statements in their
+ // body. We will bailout when we hit those statements in the body.
+ ProcessExpression(stmt->cond(), Expression::kTest);
+ CHECK_BAILOUT;
+ Visit(stmt->body());
}
void CodeGenSelector::VisitWhileStatement(WhileStatement* stmt) {
- BAILOUT("WhileStatement");
+ // We do not handle loops with breaks or continue statements in their
+ // body. We will bailout when we hit those statements in the body.
+ ProcessExpression(stmt->cond(), Expression::kTest);
+ CHECK_BAILOUT;
+ Visit(stmt->body());
}
void CodeGenSelector::VisitForStatement(ForStatement* stmt) {
- BAILOUT("ForStatement");
+ // We do not handle loops with breaks or continue statements in their
+ // body. We will bailout when we hit those statements in the body.
+ if (stmt->init() != NULL) {
+ Visit(stmt->init());
+ CHECK_BAILOUT;
+ }
+ if (stmt->cond() != NULL) {
+ ProcessExpression(stmt->cond(), Expression::kTest);
+ CHECK_BAILOUT;
+ }
+ Visit(stmt->body());
+ if (stmt->next() != NULL) {
+ CHECK_BAILOUT;
+ Visit(stmt->next());
+ }
}
@@ -591,14 +770,12 @@
void CodeGenSelector::VisitDebuggerStatement(DebuggerStatement* stmt) {
- BAILOUT("DebuggerStatement");
+ // Debugger statement is supported.
}
void CodeGenSelector::VisitFunctionLiteral(FunctionLiteral* expr) {
- if (!expr->AllowsLazyCompilation()) {
- BAILOUT("FunctionLiteral does not allow lazy compilation");
- }
+ // Function literal is supported.
}
@@ -609,37 +786,92 @@
void CodeGenSelector::VisitConditional(Conditional* expr) {
- BAILOUT("Conditional");
+ ProcessExpression(expr->condition(), Expression::kTest);
+ CHECK_BAILOUT;
+ ProcessExpression(expr->then_expression(), context_);
+ CHECK_BAILOUT;
+ ProcessExpression(expr->else_expression(), context_);
}
void CodeGenSelector::VisitSlot(Slot* expr) {
- Slot::Type type = expr->type();
- if (type != Slot::PARAMETER && type != Slot::LOCAL) {
- BAILOUT("non-parameter/non-local slot reference");
- }
+ UNREACHABLE();
}
void CodeGenSelector::VisitVariableProxy(VariableProxy* expr) {
Expression* rewrite = expr->var()->rewrite();
- if (rewrite != NULL) Visit(rewrite);
+ // A rewrite of NULL indicates a global variable.
+ if (rewrite != NULL) {
+ // Non-global.
+ Slot* slot = rewrite->AsSlot();
+ if (slot != NULL) {
+ Slot::Type type = slot->type();
+ // When LOOKUP slots are enabled, some currently dead code
+ // implementing unary typeof will become live.
+ if (type == Slot::LOOKUP) {
+ BAILOUT("Lookup slot");
+ }
+ } else {
+#ifdef DEBUG
+ // Only remaining possibility is a property where the object is
+ // a slotted variable and the key is a smi.
+ Property* property = rewrite->AsProperty();
+ ASSERT_NOT_NULL(property);
+ Variable* object = property->obj()->AsVariableProxy()->AsVariable();
+ ASSERT_NOT_NULL(object);
+ ASSERT_NOT_NULL(object->slot());
+ ASSERT_NOT_NULL(property->key()->AsLiteral());
+ ASSERT(property->key()->AsLiteral()->handle()->IsSmi());
+#endif
+ }
+ }
}
void CodeGenSelector::VisitLiteral(Literal* expr) {
- // All literals are supported.
- expr->set_location(Location::Constant());
+ /* Nothing to do. */
}
void CodeGenSelector::VisitRegExpLiteral(RegExpLiteral* expr) {
- // RegexpLiterals are supported.
+ /* Nothing to do. */
}
void CodeGenSelector::VisitObjectLiteral(ObjectLiteral* expr) {
- BAILOUT("ObjectLiteral");
+ ZoneList<ObjectLiteral::Property*>* properties = expr->properties();
+
+ for (int i = 0, len = properties->length(); i < len; i++) {
+ ObjectLiteral::Property* property = properties->at(i);
+ if (property->IsCompileTimeValue()) continue;
+
+ switch (property->kind()) {
+ case ObjectLiteral::Property::CONSTANT:
+ UNREACHABLE();
+
+ // For (non-compile-time) materialized literals and computed
+ // properties with symbolic keys we will use an IC and therefore not
+ // generate code for the key.
+ case ObjectLiteral::Property::COMPUTED: // Fall through.
+ case ObjectLiteral::Property::MATERIALIZED_LITERAL:
+ if (property->key()->handle()->IsSymbol()) {
+ break;
+ }
+ // Fall through.
+
+ // In all other cases we need the key's value on the stack
+ // for a runtime call. (Relies on TEMP meaning STACK.)
+ case ObjectLiteral::Property::GETTER: // Fall through.
+ case ObjectLiteral::Property::SETTER: // Fall through.
+ case ObjectLiteral::Property::PROTOTYPE:
+ ProcessExpression(property->key(), Expression::kValue);
+ CHECK_BAILOUT;
+ break;
+ }
+ ProcessExpression(property->value(), Expression::kValue);
+ CHECK_BAILOUT;
+ }
}
@@ -649,7 +881,7 @@
Expression* subexpr = subexprs->at(i);
if (subexpr->AsLiteral() != NULL) continue;
if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
- Visit(subexpr);
+ ProcessExpression(subexpr, Expression::kValue);
CHECK_BAILOUT;
}
}
@@ -661,10 +893,8 @@
void CodeGenSelector::VisitAssignment(Assignment* expr) {
- // We support plain non-compound assignments to parameters and
- // non-context (stack-allocated) locals.
- if (expr->starts_initialization_block()) BAILOUT("initialization block");
-
+ // We support plain non-compound assignments to properties, parameters and
+ // non-context (stack-allocated) locals, and global variables.
Token::Value op = expr->op();
if (op == Token::INIT_CONST) BAILOUT("initialize constant");
if (op != Token::ASSIGN && op != Token::INIT_VAR) {
@@ -672,17 +902,41 @@
}
Variable* var = expr->target()->AsVariableProxy()->AsVariable();
- if (var == NULL) BAILOUT("non-variable assignment");
-
- if (!var->is_global()) {
- ASSERT(var->slot() != NULL);
- Slot::Type type = var->slot()->type();
- if (type != Slot::PARAMETER && type != Slot::LOCAL) {
- BAILOUT("non-parameter/non-local slot assignment");
+ Property* prop = expr->target()->AsProperty();
+ ASSERT(var == NULL || prop == NULL);
+ if (var != NULL) {
+ // All global variables are supported.
+ if (!var->is_global()) {
+ ASSERT(var->slot() != NULL);
+ Slot::Type type = var->slot()->type();
+ if (type == Slot::LOOKUP) {
+ BAILOUT("Lookup slot");
+ }
}
+ } else if (prop != NULL) {
+ ASSERT(prop->obj()->context() == Expression::kUninitialized ||
+ prop->obj()->context() == Expression::kValue);
+ ProcessExpression(prop->obj(), Expression::kValue);
+ CHECK_BAILOUT;
+ // We will only visit the key during code generation for keyed property
+ // stores. Leave its expression context uninitialized for named
+ // property stores.
+ Literal* lit = prop->key()->AsLiteral();
+ uint32_t ignored;
+ if (lit == NULL ||
+ !lit->handle()->IsSymbol() ||
+ String::cast(*(lit->handle()))->AsArrayIndex(&ignored)) {
+ ASSERT(prop->key()->context() == Expression::kUninitialized ||
+ prop->key()->context() == Expression::kValue);
+ ProcessExpression(prop->key(), Expression::kValue);
+ CHECK_BAILOUT;
+ }
+ } else {
+ // This is a throw reference error.
+ BAILOUT("non-variable/non-property assignment");
}
- Visit(expr->value());
+ ProcessExpression(expr->value(), Expression::kValue);
}
@@ -692,7 +946,9 @@
void CodeGenSelector::VisitProperty(Property* expr) {
- BAILOUT("Property");
+ ProcessExpression(expr->obj(), Expression::kValue);
+ CHECK_BAILOUT;
+ ProcessExpression(expr->key(), Expression::kValue);
}
@@ -703,58 +959,154 @@
// Check for supported calls
if (var != NULL && var->is_possibly_eval()) {
- BAILOUT("Call to a function named 'eval'");
+ BAILOUT("call to the identifier 'eval'");
} else if (var != NULL && !var->is_this() && var->is_global()) {
- // ----------------------------------
- // JavaScript example: 'foo(1, 2, 3)' // foo is global
- // ----------------------------------
+ // Calls to global variables are supported.
+ } else if (var != NULL && var->slot() != NULL &&
+ var->slot()->type() == Slot::LOOKUP) {
+ BAILOUT("call to a lookup slot");
+ } else if (fun->AsProperty() != NULL) {
+ Property* prop = fun->AsProperty();
+ Literal* literal_key = prop->key()->AsLiteral();
+ if (literal_key != NULL && literal_key->handle()->IsSymbol()) {
+ ProcessExpression(prop->obj(), Expression::kValue);
+ CHECK_BAILOUT;
+ } else {
+ ProcessExpression(prop->obj(), Expression::kValue);
+ CHECK_BAILOUT;
+ ProcessExpression(prop->key(), Expression::kValue);
+ CHECK_BAILOUT;
+ }
} else {
- BAILOUT("Call to a non-global function");
+ // Otherwise the call is supported if the function expression is.
+ ProcessExpression(fun, Expression::kValue);
}
- // Check all arguments to the call
+ // Check all arguments to the call.
for (int i = 0; i < args->length(); i++) {
- Visit(args->at(i));
+ ProcessExpression(args->at(i), Expression::kValue);
CHECK_BAILOUT;
}
}
void CodeGenSelector::VisitCallNew(CallNew* expr) {
- BAILOUT("CallNew");
+ ProcessExpression(expr->expression(), Expression::kValue);
+ CHECK_BAILOUT;
+ ZoneList<Expression*>* args = expr->arguments();
+ // Check all arguments to the call
+ for (int i = 0; i < args->length(); i++) {
+ ProcessExpression(args->at(i), Expression::kValue);
+ CHECK_BAILOUT;
+ }
}
void CodeGenSelector::VisitCallRuntime(CallRuntime* expr) {
- // In case of JS runtime function bail out.
- if (expr->function() == NULL) BAILOUT("CallRuntime");
// Check for inline runtime call
if (expr->name()->Get(0) == '_' &&
CodeGenerator::FindInlineRuntimeLUT(expr->name()) != NULL) {
- BAILOUT("InlineRuntimeCall");
+ BAILOUT("inlined runtime call");
}
+ // Check all arguments to the call. (Relies on TEMP meaning STACK.)
for (int i = 0; i < expr->arguments()->length(); i++) {
- Visit(expr->arguments()->at(i));
+ ProcessExpression(expr->arguments()->at(i), Expression::kValue);
CHECK_BAILOUT;
}
}
void CodeGenSelector::VisitUnaryOperation(UnaryOperation* expr) {
- BAILOUT("UnaryOperation");
+ switch (expr->op()) {
+ case Token::VOID:
+ ProcessExpression(expr->expression(), Expression::kEffect);
+ break;
+ case Token::NOT:
+ ProcessExpression(expr->expression(), Expression::kTest);
+ break;
+ case Token::TYPEOF:
+ ProcessExpression(expr->expression(), Expression::kValue);
+ break;
+ default:
+ BAILOUT("UnaryOperation");
+ }
}
void CodeGenSelector::VisitCountOperation(CountOperation* expr) {
- BAILOUT("CountOperation");
+ // We support postfix count operations on global variables.
+ if (expr->is_prefix()) BAILOUT("Prefix CountOperation");
+ Variable* var = expr->expression()->AsVariableProxy()->AsVariable();
+ if (var == NULL || !var->is_global()) BAILOUT("non-global postincrement");
+ ProcessExpression(expr->expression(), Expression::kValue);
}
void CodeGenSelector::VisitBinaryOperation(BinaryOperation* expr) {
switch (expr->op()) {
- case Token::OR:
- Visit(expr->left());
+ case Token::COMMA:
+ ProcessExpression(expr->left(), Expression::kEffect);
CHECK_BAILOUT;
- Visit(expr->right());
+ ProcessExpression(expr->right(), context_);
+ break;
+
+ case Token::OR:
+ switch (context_) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ case Expression::kEffect: // Fall through.
+ case Expression::kTest: // Fall through.
+ case Expression::kTestValue:
+ // The left subexpression's value is not needed, it is in a pure
+ // test context.
+ ProcessExpression(expr->left(), Expression::kTest);
+ break;
+ case Expression::kValue: // Fall through.
+ case Expression::kValueTest:
+ // The left subexpression's value is needed, it is in a hybrid
+ // value/test context.
+ ProcessExpression(expr->left(), Expression::kValueTest);
+ break;
+ }
+ CHECK_BAILOUT;
+ ProcessExpression(expr->right(), context_);
+ break;
+
+ case Token::AND:
+ switch (context_) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ case Expression::kEffect: // Fall through.
+ case Expression::kTest: // Fall through.
+ case Expression::kValueTest:
+ // The left subexpression's value is not needed, it is in a pure
+ // test context.
+ ProcessExpression(expr->left(), Expression::kTest);
+ break;
+ case Expression::kValue: // Fall through.
+ case Expression::kTestValue:
+ // The left subexpression's value is needed, it is in a hybrid
+ // test/value context.
+ ProcessExpression(expr->left(), Expression::kTestValue);
+ break;
+ }
+ CHECK_BAILOUT;
+ ProcessExpression(expr->right(), context_);
+ break;
+
+ case Token::ADD:
+ case Token::SUB:
+ case Token::DIV:
+ case Token::MOD:
+ case Token::MUL:
+ case Token::BIT_OR:
+ case Token::BIT_AND:
+ case Token::BIT_XOR:
+ case Token::SHL:
+ case Token::SHR:
+ case Token::SAR:
+ ProcessExpression(expr->left(), Expression::kValue);
+ CHECK_BAILOUT;
+ ProcessExpression(expr->right(), Expression::kValue);
break;
default:
@@ -764,12 +1116,14 @@
void CodeGenSelector::VisitCompareOperation(CompareOperation* expr) {
- BAILOUT("CompareOperation");
+ ProcessExpression(expr->left(), Expression::kValue);
+ CHECK_BAILOUT;
+ ProcessExpression(expr->right(), Expression::kValue);
}
void CodeGenSelector::VisitThisFunction(ThisFunction* expr) {
- BAILOUT("ThisFunction");
+ // ThisFunction is supported.
}
#undef BAILOUT
diff --git a/src/compiler.h b/src/compiler.h
index 579970b..546e446 100644
--- a/src/compiler.h
+++ b/src/compiler.h
@@ -71,6 +71,19 @@
// true on success and false if the compilation resulted in a stack
// overflow.
static bool CompileLazy(Handle<SharedFunctionInfo> shared, int loop_nesting);
+
+ // Compile a function boilerplate object (the function is possibly
+ // lazily compiled). Called recursively from a backend code
+ // generator 'caller' to build the boilerplate.
+ static Handle<JSFunction> BuildBoilerplate(FunctionLiteral* node,
+ Handle<Script> script,
+ AstVisitor* caller);
+
+ // Set the function info for a newly compiled function.
+ static void SetFunctionInfo(Handle<JSFunction> fun,
+ FunctionLiteral* lit,
+ bool is_toplevel,
+ Handle<Script> script);
};
diff --git a/src/conversions.cc b/src/conversions.cc
index 3e66d28..fd6d38d 100644
--- a/src/conversions.cc
+++ b/src/conversions.cc
@@ -50,7 +50,7 @@
// Provide a common interface to getting a character at a certain
// index from a char* or a String object.
static inline int GetChar(const char* str, int index) {
- ASSERT(index >= 0 && index < static_cast<int>(strlen(str)));
+ ASSERT(index >= 0 && index < StrLength(str));
return str[index];
}
@@ -61,7 +61,7 @@
static inline int GetLength(const char* str) {
- return strlen(str);
+ return StrLength(str);
}
@@ -101,7 +101,7 @@
static inline bool IsSpace(const char* str, int index) {
- ASSERT(index >= 0 && index < static_cast<int>(strlen(str)));
+ ASSERT(index >= 0 && index < StrLength(str));
return Scanner::kIsWhiteSpace.get(str[index]);
}
@@ -121,13 +121,13 @@
static inline bool SubStringEquals(String* str, int index, const char* other) {
HandleScope scope;
int str_length = str->length();
- int other_length = strlen(other);
+ int other_length = StrLength(other);
int end = index + other_length < str_length ?
index + other_length :
str_length;
- Handle<String> slice =
- Factory::NewStringSlice(Handle<String>(str), index, end);
- return slice->IsEqualTo(Vector<const char>(other, other_length));
+ Handle<String> substring =
+ Factory::NewSubString(Handle<String>(str), index, end);
+ return substring->IsEqualTo(Vector<const char>(other, other_length));
}
@@ -319,7 +319,7 @@
ReleaseCString(str, cstr);
if (result != 0.0 || end != cstr) {
// It appears that strtod worked
- index += end - cstr;
+ index += static_cast<int>(end - cstr);
} else {
// Check for {+,-,}Infinity
bool is_negative = (GetChar(str, index) == '-');
@@ -383,7 +383,7 @@
int sign;
char* decimal_rep = dtoa(v, 0, 0, &decimal_point, &sign, NULL);
- int length = strlen(decimal_rep);
+ int length = StrLength(decimal_rep);
if (sign) builder.AddCharacter('-');
@@ -465,7 +465,7 @@
int decimal_point;
int sign;
char* decimal_rep = dtoa(abs_value, 3, f, &decimal_point, &sign, NULL);
- int decimal_rep_length = strlen(decimal_rep);
+ int decimal_rep_length = StrLength(decimal_rep);
// Create a representation that is padded with zeros if needed.
int zero_prefix_length = 0;
@@ -526,7 +526,8 @@
if (significant_digits != 1) {
builder.AddCharacter('.');
builder.AddString(decimal_rep + 1);
- builder.AddPadding('0', significant_digits - strlen(decimal_rep));
+ int rep_length = StrLength(decimal_rep);
+ builder.AddPadding('0', significant_digits - rep_length);
}
builder.AddCharacter('e');
@@ -553,11 +554,11 @@
char* decimal_rep = NULL;
if (f == -1) {
decimal_rep = dtoa(value, 0, 0, &decimal_point, &sign, NULL);
- f = strlen(decimal_rep) - 1;
+ f = StrLength(decimal_rep) - 1;
} else {
decimal_rep = dtoa(value, 2, f + 1, &decimal_point, &sign, NULL);
}
- int decimal_rep_length = strlen(decimal_rep);
+ int decimal_rep_length = StrLength(decimal_rep);
ASSERT(decimal_rep_length > 0);
ASSERT(decimal_rep_length <= f + 1);
USE(decimal_rep_length);
@@ -585,7 +586,7 @@
int decimal_point;
int sign;
char* decimal_rep = dtoa(value, 2, p, &decimal_point, &sign, NULL);
- int decimal_rep_length = strlen(decimal_rep);
+ int decimal_rep_length = StrLength(decimal_rep);
ASSERT(decimal_rep_length <= p);
int exponent = decimal_point - 1;
@@ -619,7 +620,7 @@
builder.AddCharacter('.');
const int extra = negative ? 2 : 1;
if (decimal_rep_length > decimal_point) {
- const int len = strlen(decimal_rep + decimal_point);
+ const int len = StrLength(decimal_rep + decimal_point);
const int n = Min(len, p - (builder.position() - extra));
builder.AddSubstring(decimal_rep + decimal_point, n);
}
diff --git a/src/d8.cc b/src/d8.cc
index e4658b1..dedbd55 100644
--- a/src/d8.cc
+++ b/src/d8.cc
@@ -159,7 +159,11 @@
printf(" ");
}
v8::String::Utf8Value str(args[i]);
- fwrite(*str, sizeof(**str), str.length(), stdout);
+ int n = fwrite(*str, sizeof(**str), str.length(), stdout);
+ if (n != str.length()) {
+ printf("Error in fwrite\n");
+ exit(1);
+ }
}
return Undefined();
}
@@ -203,7 +207,7 @@
return ThrowException(String::New("Error loading file"));
}
if (!ExecuteString(source, String::New(*file), false, false)) {
- return ThrowException(String::New("Error executing file"));
+ return ThrowException(String::New("Error executing file"));
}
}
return Undefined();
diff --git a/src/debug-agent.cc b/src/debug-agent.cc
index 9d5cace..0701382 100644
--- a/src/debug-agent.cc
+++ b/src/debug-agent.cc
@@ -105,7 +105,7 @@
if (session_ != NULL) {
static const char* message = "Remote debugging session already active\r\n";
- client->Send(message, strlen(message));
+ client->Send(message, StrLength(message));
delete client;
return;
}
@@ -172,14 +172,15 @@
}
// Convert UTF-8 to UTF-16.
- unibrow::Utf8InputBuffer<> buf(*message, strlen(*message));
+ unibrow::Utf8InputBuffer<> buf(*message,
+ StrLength(*message));
int len = 0;
while (buf.has_more()) {
buf.GetNext();
len++;
}
int16_t* temp = NewArray<int16_t>(len + 1);
- buf.Reset(*message, strlen(*message));
+ buf.Reset(*message, StrLength(*message));
for (int i = 0; i < len; i++) {
temp[i] = buf.GetNext();
}
@@ -203,7 +204,8 @@
const char* DebuggerAgentUtil::kContentLength = "Content-Length";
-int DebuggerAgentUtil::kContentLengthSize = strlen(kContentLength);
+int DebuggerAgentUtil::kContentLengthSize =
+ StrLength(kContentLength);
SmartPointer<char> DebuggerAgentUtil::ReceiveMessage(const Socket* conn) {
diff --git a/src/debug-delay.js b/src/debug-delay.js
index 35f7fcd..04fde1f 100644
--- a/src/debug-delay.js
+++ b/src/debug-delay.js
@@ -1245,6 +1245,8 @@
this.suspendRequest_(request, response);
} else if (request.command == 'version') {
this.versionRequest_(request, response);
+ } else if (request.command == 'profile') {
+ this.profileRequest_(request, response);
} else {
throw new Error('Unknown command "' + request.command + '" in request');
}
@@ -1924,6 +1926,25 @@
};
+DebugCommandProcessor.prototype.profileRequest_ = function(request, response) {
+ if (!request.arguments) {
+ return response.failed('Missing arguments');
+ }
+ var modules = parseInt(request.arguments.modules);
+ if (isNaN(modules)) {
+ return response.failed('Modules is not an integer');
+ }
+ if (request.arguments.command == 'resume') {
+ %ProfilerResume(modules);
+ } else if (request.arguments.command == 'pause') {
+ %ProfilerPause(modules);
+ } else {
+ return response.failed('Unknown command');
+ }
+ response.body = {};
+};
+
+
// Check whether the previously processed command caused the VM to become
// running.
DebugCommandProcessor.prototype.isRunning = function() {
diff --git a/src/debug.cc b/src/debug.cc
index d3a6b5b..2c4552e 100644
--- a/src/debug.cc
+++ b/src/debug.cc
@@ -108,12 +108,13 @@
// current value of these.
if (RelocInfo::IsPosition(rmode())) {
if (RelocInfo::IsStatementPosition(rmode())) {
- statement_position_ =
- rinfo()->data() - debug_info_->shared()->start_position();
+ statement_position_ = static_cast<int>(
+ rinfo()->data() - debug_info_->shared()->start_position());
}
// Always update the position as we don't want that to be before the
// statement position.
- position_ = rinfo()->data() - debug_info_->shared()->start_position();
+ position_ = static_cast<int>(
+ rinfo()->data() - debug_info_->shared()->start_position());
ASSERT(position_ >= 0);
ASSERT(statement_position_ >= 0);
}
@@ -182,7 +183,7 @@
// Check if this break point is closer that what was previously found.
if (this->pc() < pc && pc - this->pc() < distance) {
closest_break_point = break_point();
- distance = pc - this->pc();
+ distance = static_cast<int>(pc - this->pc());
// Check whether we can't get any closer.
if (distance == 0) break;
}
@@ -1758,6 +1759,8 @@
v8::Debug::MessageHandler2 Debugger::message_handler_ = NULL;
bool Debugger::debugger_unload_pending_ = false;
v8::Debug::HostDispatchHandler Debugger::host_dispatch_handler_ = NULL;
+v8::Debug::DebugMessageDispatchHandler
+ Debugger::debug_message_dispatch_handler_ = NULL;
int Debugger::host_dispatch_micros_ = 100 * 1000;
DebuggerAgent* Debugger::agent_ = NULL;
LockingCommandMessageQueue Debugger::command_queue_(kQueueInitialSize);
@@ -2398,6 +2401,12 @@
}
+void Debugger::SetDebugMessageDispatchHandler(
+ v8::Debug::DebugMessageDispatchHandler handler) {
+ debug_message_dispatch_handler_ = handler;
+}
+
+
// Calls the registered debug message handler. This callback is part of the
// public API.
void Debugger::InvokeMessageHandler(MessageImpl message) {
@@ -2428,6 +2437,10 @@
if (!Debug::InDebugger()) {
StackGuard::DebugCommand();
}
+
+ if (Debugger::debug_message_dispatch_handler_ != NULL) {
+ Debugger::debug_message_dispatch_handler_();
+ }
}
diff --git a/src/debug.h b/src/debug.h
index 29c2bc2..24f0db4 100644
--- a/src/debug.h
+++ b/src/debug.h
@@ -102,7 +102,9 @@
void ClearAllDebugBreak();
- inline int code_position() { return pc() - debug_info_->code()->entry(); }
+ inline int code_position() {
+ return static_cast<int>(pc() - debug_info_->code()->entry());
+ }
inline int break_point() { return break_point_; }
inline int position() { return position_; }
inline int statement_position() { return statement_position_; }
@@ -368,15 +370,6 @@
// Garbage collection notifications.
static void AfterGarbageCollection();
- // Code generation assumptions.
- static const int kIa32CallInstructionLength = 5;
- static const int kIa32JSReturnSequenceLength = 6;
-
- // The x64 JS return sequence is padded with int3 to make it large
- // enough to hold a call instruction when the debugger patches it.
- static const int kX64CallInstructionLength = 13;
- static const int kX64JSReturnSequenceLength = 13;
-
// Code generator routines.
static void GenerateLoadICDebugBreak(MacroAssembler* masm);
static void GenerateStoreICDebugBreak(MacroAssembler* masm);
@@ -625,6 +618,8 @@
static void SetMessageHandler(v8::Debug::MessageHandler2 handler);
static void SetHostDispatchHandler(v8::Debug::HostDispatchHandler handler,
int period);
+ static void SetDebugMessageDispatchHandler(
+ v8::Debug::DebugMessageDispatchHandler handler);
// Invoke the message handler function.
static void InvokeMessageHandler(MessageImpl message);
@@ -685,6 +680,7 @@
static v8::Debug::MessageHandler2 message_handler_;
static bool debugger_unload_pending_; // Was message handler cleared?
static v8::Debug::HostDispatchHandler host_dispatch_handler_;
+ static v8::Debug::DebugMessageDispatchHandler debug_message_dispatch_handler_;
static int host_dispatch_micros_;
static DebuggerAgent* agent_;
diff --git a/src/disassembler.cc b/src/disassembler.cc
index e2f908d..524dbe6 100644
--- a/src/disassembler.cc
+++ b/src/disassembler.cc
@@ -74,7 +74,7 @@
}
if (code_ != NULL) {
- int offs = pc - code_->instruction_start();
+ int offs = static_cast<int>(pc - code_->instruction_start());
// print as code offset, if it seems reasonable
if (0 <= offs && offs < code_->instruction_size()) {
OS::SNPrintF(buffer, "%d (%p)", offs, pc);
@@ -289,7 +289,7 @@
}
delete it;
- return pc - begin;
+ return static_cast<int>(pc - begin);
}
diff --git a/src/dtoa-config.c b/src/dtoa-config.c
index bc0a58a..a1acd2d 100644
--- a/src/dtoa-config.c
+++ b/src/dtoa-config.c
@@ -38,7 +38,7 @@
*/
#if !(defined(__APPLE__) && defined(__MACH__)) && \
- !defined(WIN32) && !defined(__FreeBSD__)
+ !defined(WIN32) && !defined(__FreeBSD__) && !defined(__OpenBSD__)
#include <endian.h>
#endif
#include <math.h>
@@ -47,14 +47,16 @@
/* The floating point word order on ARM is big endian when floating point
* emulation is used, even if the byte order is little endian */
#if !(defined(__APPLE__) && defined(__MACH__)) && !defined(WIN32) && \
- !defined(__FreeBSD__) && __FLOAT_WORD_ORDER == __BIG_ENDIAN
+ !defined(__FreeBSD__) && !defined(__OpenBSD__) && \
+ __FLOAT_WORD_ORDER == __BIG_ENDIAN
#define IEEE_MC68k
#else
#define IEEE_8087
#endif
#define __MATH_H__
-#if defined(__APPLE__) && defined(__MACH__) || defined(__FreeBSD__)
+#if defined(__APPLE__) && defined(__MACH__) || defined(__FreeBSD__) || \
+ defined(__OpenBSD__)
/* stdlib.h on FreeBSD and Apple's 10.5 and later SDKs will mangle the
* name of strtod. If it's included after strtod is redefined as
* gay_strtod, it will mangle the name of gay_strtod, which is
diff --git a/src/execution.cc b/src/execution.cc
index 229b8df..2f646a5 100644
--- a/src/execution.cc
+++ b/src/execution.cc
@@ -31,18 +31,8 @@
#include "api.h"
#include "codegen-inl.h"
-
-#if V8_TARGET_ARCH_IA32
-#include "ia32/simulator-ia32.h"
-#elif V8_TARGET_ARCH_X64
-#include "x64/simulator-x64.h"
-#elif V8_TARGET_ARCH_ARM
-#include "arm/simulator-arm.h"
-#else
-#error Unsupported target architecture.
-#endif
-
#include "debug.h"
+#include "simulator.h"
#include "v8threads.h"
namespace v8 {
@@ -237,15 +227,14 @@
// If the current limits are special (eg due to a pending interrupt) then
// leave them alone.
uintptr_t jslimit = SimulatorStack::JsLimitFromCLimit(limit);
- if (thread_local_.jslimit_ == thread_local_.initial_jslimit_) {
+ if (thread_local_.jslimit_ == thread_local_.real_jslimit_) {
thread_local_.jslimit_ = jslimit;
- Heap::SetStackLimit(jslimit);
}
- if (thread_local_.climit_ == thread_local_.initial_climit_) {
+ if (thread_local_.climit_ == thread_local_.real_climit_) {
thread_local_.climit_ = limit;
}
- thread_local_.initial_climit_ = limit;
- thread_local_.initial_jslimit_ = jslimit;
+ thread_local_.real_climit_ = limit;
+ thread_local_.real_jslimit_ = jslimit;
}
@@ -354,7 +343,7 @@
char* StackGuard::RestoreStackGuard(char* from) {
ExecutionAccess access;
memcpy(reinterpret_cast<char*>(&thread_local_), from, sizeof(ThreadLocal));
- Heap::SetStackLimit(thread_local_.jslimit_);
+ Heap::SetStackLimits();
return from + sizeof(ThreadLocal);
}
@@ -366,33 +355,33 @@
void StackGuard::FreeThreadResources() {
Thread::SetThreadLocal(
stack_limit_key,
- reinterpret_cast<void*>(thread_local_.initial_climit_));
+ reinterpret_cast<void*>(thread_local_.real_climit_));
}
void StackGuard::ThreadLocal::Clear() {
- initial_jslimit_ = kIllegalLimit;
+ real_jslimit_ = kIllegalLimit;
jslimit_ = kIllegalLimit;
- initial_climit_ = kIllegalLimit;
+ real_climit_ = kIllegalLimit;
climit_ = kIllegalLimit;
nesting_ = 0;
postpone_interrupts_nesting_ = 0;
interrupt_flags_ = 0;
- Heap::SetStackLimit(kIllegalLimit);
+ Heap::SetStackLimits();
}
void StackGuard::ThreadLocal::Initialize() {
- if (initial_climit_ == kIllegalLimit) {
+ if (real_climit_ == kIllegalLimit) {
// Takes the address of the limit variable in order to find out where
// the top of stack is right now.
uintptr_t limit = reinterpret_cast<uintptr_t>(&limit) - kLimitSize;
ASSERT(reinterpret_cast<uintptr_t>(&limit) > kLimitSize);
- initial_jslimit_ = SimulatorStack::JsLimitFromCLimit(limit);
+ real_jslimit_ = SimulatorStack::JsLimitFromCLimit(limit);
jslimit_ = SimulatorStack::JsLimitFromCLimit(limit);
- initial_climit_ = limit;
+ real_climit_ = limit;
climit_ = limit;
- Heap::SetStackLimit(SimulatorStack::JsLimitFromCLimit(limit));
+ Heap::SetStackLimits();
}
nesting_ = 0;
postpone_interrupts_nesting_ = 0;
diff --git a/src/execution.h b/src/execution.h
index ac00aa4..52198c4 100644
--- a/src/execution.h
+++ b/src/execution.h
@@ -150,10 +150,6 @@
// is assumed to grow downwards.
static void SetStackLimit(uintptr_t limit);
- static Address address_of_jslimit() {
- return reinterpret_cast<Address>(&thread_local_.jslimit_);
- }
-
// Threading support.
static char* ArchiveStackGuard(char* to);
static char* RestoreStackGuard(char* from);
@@ -181,16 +177,24 @@
#endif
static void Continue(InterruptFlag after_what);
- // This provides an asynchronous read of the stack limit for the current
+ // This provides an asynchronous read of the stack limits for the current
// thread. There are no locks protecting this, but it is assumed that you
// have the global V8 lock if you are using multiple V8 threads.
static uintptr_t climit() {
return thread_local_.climit_;
}
-
static uintptr_t jslimit() {
return thread_local_.jslimit_;
}
+ static uintptr_t real_jslimit() {
+ return thread_local_.real_jslimit_;
+ }
+ static Address address_of_jslimit() {
+ return reinterpret_cast<Address>(&thread_local_.jslimit_);
+ }
+ static Address address_of_real_jslimit() {
+ return reinterpret_cast<Address>(&thread_local_.real_jslimit_);
+ }
private:
// You should hold the ExecutionAccess lock when calling this method.
@@ -198,17 +202,17 @@
// You should hold the ExecutionAccess lock when calling this method.
static void set_limits(uintptr_t value, const ExecutionAccess& lock) {
- Heap::SetStackLimit(value);
thread_local_.jslimit_ = value;
thread_local_.climit_ = value;
+ Heap::SetStackLimits();
}
- // Reset limits to initial values. For example after handling interrupt.
+ // Reset limits to actual values. For example after handling interrupt.
// You should hold the ExecutionAccess lock when calling this method.
static void reset_limits(const ExecutionAccess& lock) {
- thread_local_.jslimit_ = thread_local_.initial_jslimit_;
- Heap::SetStackLimit(thread_local_.jslimit_);
- thread_local_.climit_ = thread_local_.initial_climit_;
+ thread_local_.jslimit_ = thread_local_.real_jslimit_;
+ thread_local_.climit_ = thread_local_.real_climit_;
+ Heap::SetStackLimits();
}
// Enable or disable interrupts.
@@ -232,10 +236,21 @@
// Clear.
void Initialize();
void Clear();
- uintptr_t initial_jslimit_;
+
+ // The stack limit is split into a JavaScript and a C++ stack limit. These
+ // two are the same except when running on a simulator where the C++ and
+ // JavaScript stacks are separate. Each of the two stack limits have two
+ // values. The one eith the real_ prefix is the actual stack limit
+ // set for the VM. The one without the real_ prefix has the same value as
+ // the actual stack limit except when there is an interruption (e.g. debug
+ // break or preemption) in which case it is lowered to make stack checks
+ // fail. Both the generated code and the runtime system check against the
+ // one without the real_ prefix.
+ uintptr_t real_jslimit_; // Actual JavaScript stack limit set for the VM.
uintptr_t jslimit_;
- uintptr_t initial_climit_;
+ uintptr_t real_climit_; // Actual C++ stack limit set for the VM.
uintptr_t climit_;
+
int nesting_;
int postpone_interrupts_nesting_;
int interrupt_flags_;
diff --git a/src/factory.cc b/src/factory.cc
index 32b69db..83775ef 100644
--- a/src/factory.cc
+++ b/src/factory.cc
@@ -106,10 +106,10 @@
}
-Handle<String> Factory::NewStringSlice(Handle<String> str,
- int begin,
- int end) {
- CALL_HEAP_FUNCTION(str->Slice(begin, end), String);
+Handle<String> Factory::NewSubString(Handle<String> str,
+ int begin,
+ int end) {
+ CALL_HEAP_FUNCTION(str->SubString(begin, end), String);
}
@@ -189,7 +189,7 @@
script->set_compilation_type(Smi::FromInt(Script::COMPILATION_TYPE_HOST));
script->set_wrapper(*wrapper);
script->set_line_ends(Heap::undefined_value());
- script->set_eval_from_function(Heap::undefined_value());
+ script->set_eval_from_shared(Heap::undefined_value());
script->set_eval_from_instructions_offset(Smi::FromInt(0));
return script;
diff --git a/src/factory.h b/src/factory.h
index cb438e9..951c043 100644
--- a/src/factory.h
+++ b/src/factory.h
@@ -106,11 +106,10 @@
static Handle<String> NewConsString(Handle<String> first,
Handle<String> second);
- // Create a new sliced string object which represents a substring of a
- // backing string.
- static Handle<String> NewStringSlice(Handle<String> str,
- int begin,
- int end);
+ // Create a new string object which holds a substring of a string.
+ static Handle<String> NewSubString(Handle<String> str,
+ int begin,
+ int end);
// Creates a new external String object. There are two String encodings
// in the system: ASCII and two byte. Unlike other String types, it does
diff --git a/src/fast-codegen.cc b/src/fast-codegen.cc
index d0c264a..1bdc367 100644
--- a/src/fast-codegen.cc
+++ b/src/fast-codegen.cc
@@ -28,6 +28,7 @@
#include "v8.h"
#include "codegen-inl.h"
+#include "compiler.h"
#include "fast-codegen.h"
#include "stub-cache.h"
#include "debug.h"
@@ -35,6 +36,8 @@
namespace v8 {
namespace internal {
+#define __ ACCESS_MASM(masm_)
+
Handle<Code> FastCodeGenerator::MakeCode(FunctionLiteral* fun,
Handle<Script> script,
bool is_eval) {
@@ -53,6 +56,7 @@
int FastCodeGenerator::SlotOffset(Slot* slot) {
+ ASSERT(slot != NULL);
// Offset is negative because higher indexes are at lower addresses.
int offset = -slot->index() * kPointerSize;
// Adjust by a (parameter or local) base offset.
@@ -75,86 +79,52 @@
int length = declarations->length();
int globals = 0;
for (int i = 0; i < length; i++) {
- Declaration* node = declarations->at(i);
- Variable* var = node->proxy()->var();
+ Declaration* decl = declarations->at(i);
+ Variable* var = decl->proxy()->var();
Slot* slot = var->slot();
// If it was not possible to allocate the variable at compile
// time, we need to "declare" it at runtime to make sure it
// actually exists in the local context.
if ((slot != NULL && slot->type() == Slot::LOOKUP) || !var->is_global()) {
- UNREACHABLE();
+ VisitDeclaration(decl);
} else {
// Count global variables and functions for later processing
globals++;
}
}
- // Return in case of no declared global functions or variables.
- if (globals == 0) return;
-
// Compute array of global variable and function declarations.
- Handle<FixedArray> array = Factory::NewFixedArray(2 * globals, TENURED);
- for (int j = 0, i = 0; i < length; i++) {
- Declaration* node = declarations->at(i);
- Variable* var = node->proxy()->var();
- Slot* slot = var->slot();
+ // Do nothing in case of no declared global functions or variables.
+ if (globals > 0) {
+ Handle<FixedArray> array = Factory::NewFixedArray(2 * globals, TENURED);
+ for (int j = 0, i = 0; i < length; i++) {
+ Declaration* decl = declarations->at(i);
+ Variable* var = decl->proxy()->var();
+ Slot* slot = var->slot();
- if ((slot == NULL || slot->type() != Slot::LOOKUP) && var->is_global()) {
- array->set(j++, *(var->name()));
- if (node->fun() == NULL) {
- if (var->mode() == Variable::CONST) {
- // In case this is const property use the hole.
- array->set_the_hole(j++);
+ if ((slot == NULL || slot->type() != Slot::LOOKUP) && var->is_global()) {
+ array->set(j++, *(var->name()));
+ if (decl->fun() == NULL) {
+ if (var->mode() == Variable::CONST) {
+ // In case this is const property use the hole.
+ array->set_the_hole(j++);
+ } else {
+ array->set_undefined(j++);
+ }
} else {
- array->set_undefined(j++);
+ Handle<JSFunction> function =
+ Compiler::BuildBoilerplate(decl->fun(), script_, this);
+ // Check for stack-overflow exception.
+ if (HasStackOverflow()) return;
+ array->set(j++, *function);
}
- } else {
- Handle<JSFunction> function = BuildBoilerplate(node->fun());
- // Check for stack-overflow exception.
- if (HasStackOverflow()) return;
- array->set(j++, *function);
}
}
+ // Invoke the platform-dependent code generator to do the actual
+ // declaration the global variables and functions.
+ DeclareGlobals(array);
}
-
- // Invoke the platform-dependent code generator to do the actual
- // declaration the global variables and functions.
- DeclareGlobals(array);
-}
-
-Handle<JSFunction> FastCodeGenerator::BuildBoilerplate(FunctionLiteral* fun) {
-#ifdef DEBUG
- // We should not try to compile the same function literal more than
- // once.
- fun->mark_as_compiled();
-#endif
-
- // Generate code
- Handle<Code> code = CodeGenerator::ComputeLazyCompile(fun->num_parameters());
- // Check for stack-overflow exception.
- if (code.is_null()) {
- SetStackOverflow();
- return Handle<JSFunction>::null();
- }
-
- // Create a boilerplate function.
- Handle<JSFunction> function =
- Factory::NewFunctionBoilerplate(fun->name(),
- fun->materialized_literal_count(),
- code);
- CodeGenerator::SetFunctionInfo(function, fun, false, script_);
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // Notify debugger that a new function has been added.
- Debugger::OnNewFunction(function);
-#endif
-
- // Set the expected number of properties for instances and return
- // the resulting function.
- SetExpectedNofPropertiesFromEstimate(function,
- fun->expected_property_count());
- return function;
}
@@ -186,8 +156,91 @@
}
-void FastCodeGenerator::VisitDeclaration(Declaration* decl) {
- UNREACHABLE();
+void FastCodeGenerator::EmitLogicalOperation(BinaryOperation* expr) {
+#ifdef DEBUG
+ Expression::Context expected = Expression::kUninitialized;
+ switch (expr->context()) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ case Expression::kEffect: // Fall through.
+ case Expression::kTest:
+ // The value of the left subexpression is not needed.
+ expected = Expression::kTest;
+ break;
+ case Expression::kValue:
+ // The value of the left subexpression is needed and its specific
+ // context depends on the operator.
+ expected = (expr->op() == Token::OR)
+ ? Expression::kValueTest
+ : Expression::kTestValue;
+ break;
+ case Expression::kValueTest:
+ // The value of the left subexpression is needed for OR.
+ expected = (expr->op() == Token::OR)
+ ? Expression::kValueTest
+ : Expression::kTest;
+ break;
+ case Expression::kTestValue:
+ // The value of the left subexpression is needed for AND.
+ expected = (expr->op() == Token::OR)
+ ? Expression::kTest
+ : Expression::kTestValue;
+ break;
+ }
+ ASSERT_EQ(expected, expr->left()->context());
+ ASSERT_EQ(expr->context(), expr->right()->context());
+#endif
+
+ Label eval_right, done;
+ Label* saved_true = true_label_;
+ Label* saved_false = false_label_;
+
+ // Set up the appropriate context for the left subexpression based on the
+ // operation and our own context.
+ if (expr->op() == Token::OR) {
+ // If there is no usable true label in the OR expression's context, use
+ // the end of this expression, otherwise inherit the same true label.
+ if (expr->context() == Expression::kEffect ||
+ expr->context() == Expression::kValue) {
+ true_label_ = &done;
+ }
+ // The false label is the label of the second subexpression.
+ false_label_ = &eval_right;
+ } else {
+ ASSERT_EQ(Token::AND, expr->op());
+ // The true label is the label of the second subexpression.
+ true_label_ = &eval_right;
+ // If there is no usable false label in the AND expression's context,
+ // use the end of the expression, otherwise inherit the same false
+ // label.
+ if (expr->context() == Expression::kEffect ||
+ expr->context() == Expression::kValue) {
+ false_label_ = &done;
+ }
+ }
+
+ Visit(expr->left());
+ true_label_ = saved_true;
+ false_label_ = saved_false;
+
+ __ bind(&eval_right);
+ Visit(expr->right());
+
+ __ bind(&done);
+}
+
+
+void FastCodeGenerator::VisitBlock(Block* stmt) {
+ Comment cmnt(masm_, "[ Block");
+ SetStatementPosition(stmt);
+ VisitStatements(stmt->statements());
+}
+
+
+void FastCodeGenerator::VisitExpressionStatement(ExpressionStatement* stmt) {
+ Comment cmnt(masm_, "[ ExpressionStatement");
+ SetStatementPosition(stmt);
+ Visit(stmt->expression());
}
@@ -198,7 +251,29 @@
void FastCodeGenerator::VisitIfStatement(IfStatement* stmt) {
- UNREACHABLE();
+ Comment cmnt(masm_, "[ IfStatement");
+ // Expressions cannot recursively enter statements, there are no labels in
+ // the state.
+ ASSERT_EQ(NULL, true_label_);
+ ASSERT_EQ(NULL, false_label_);
+ Label then_part, else_part, done;
+
+ // Do not worry about optimizing for empty then or else bodies.
+ true_label_ = &then_part;
+ false_label_ = &else_part;
+ ASSERT(stmt->condition()->context() == Expression::kTest);
+ Visit(stmt->condition());
+ true_label_ = NULL;
+ false_label_ = NULL;
+
+ __ bind(&then_part);
+ Visit(stmt->then_statement());
+ __ jmp(&done);
+
+ __ bind(&else_part);
+ Visit(stmt->else_statement());
+
+ __ bind(&done);
}
@@ -228,17 +303,120 @@
void FastCodeGenerator::VisitDoWhileStatement(DoWhileStatement* stmt) {
- UNREACHABLE();
+ Comment cmnt(masm_, "[ DoWhileStatement");
+ increment_loop_depth();
+ Label body, exit, stack_limit_hit, stack_check_success;
+
+ __ bind(&body);
+ Visit(stmt->body());
+
+ // Check stack before looping.
+ __ StackLimitCheck(&stack_limit_hit);
+ __ bind(&stack_check_success);
+
+ // We are not in an expression context because we have been compiling
+ // statements. Set up a test expression context for the condition.
+ ASSERT_EQ(NULL, true_label_);
+ ASSERT_EQ(NULL, false_label_);
+ true_label_ = &body;
+ false_label_ = &exit;
+ ASSERT(stmt->cond()->context() == Expression::kTest);
+ Visit(stmt->cond());
+ true_label_ = NULL;
+ false_label_ = NULL;
+
+ __ bind(&stack_limit_hit);
+ StackCheckStub stack_stub;
+ __ CallStub(&stack_stub);
+ __ jmp(&stack_check_success);
+
+ __ bind(&exit);
+
+ decrement_loop_depth();
}
void FastCodeGenerator::VisitWhileStatement(WhileStatement* stmt) {
- UNREACHABLE();
+ Comment cmnt(masm_, "[ WhileStatement");
+ increment_loop_depth();
+ Label test, body, exit, stack_limit_hit, stack_check_success;
+
+ // Emit the test at the bottom of the loop.
+ __ jmp(&test);
+
+ __ bind(&body);
+ Visit(stmt->body());
+
+ __ bind(&test);
+ // Check stack before looping.
+ __ StackLimitCheck(&stack_limit_hit);
+ __ bind(&stack_check_success);
+
+ // We are not in an expression context because we have been compiling
+ // statements. Set up a test expression context for the condition.
+ ASSERT_EQ(NULL, true_label_);
+ ASSERT_EQ(NULL, false_label_);
+ true_label_ = &body;
+ false_label_ = &exit;
+ ASSERT(stmt->cond()->context() == Expression::kTest);
+ Visit(stmt->cond());
+ true_label_ = NULL;
+ false_label_ = NULL;
+
+ __ bind(&stack_limit_hit);
+ StackCheckStub stack_stub;
+ __ CallStub(&stack_stub);
+ __ jmp(&stack_check_success);
+
+ __ bind(&exit);
+
+ decrement_loop_depth();
}
void FastCodeGenerator::VisitForStatement(ForStatement* stmt) {
- UNREACHABLE();
+ Comment cmnt(masm_, "[ ForStatement");
+ Label test, body, exit, stack_limit_hit, stack_check_success;
+ if (stmt->init() != NULL) Visit(stmt->init());
+
+ increment_loop_depth();
+ // Emit the test at the bottom of the loop (even if empty).
+ __ jmp(&test);
+ __ bind(&body);
+ Visit(stmt->body());
+
+ // Check stack before looping.
+ __ StackLimitCheck(&stack_limit_hit);
+ __ bind(&stack_check_success);
+
+ if (stmt->next() != NULL) Visit(stmt->next());
+
+ __ bind(&test);
+
+ if (stmt->cond() == NULL) {
+ // For an empty test jump to the top of the loop.
+ __ jmp(&body);
+ } else {
+ // We are not in an expression context because we have been compiling
+ // statements. Set up a test expression context for the condition.
+ ASSERT_EQ(NULL, true_label_);
+ ASSERT_EQ(NULL, false_label_);
+
+ true_label_ = &body;
+ false_label_ = &exit;
+ ASSERT(stmt->cond()->context() == Expression::kTest);
+ Visit(stmt->cond());
+ true_label_ = NULL;
+ false_label_ = NULL;
+ }
+
+ __ bind(&stack_limit_hit);
+ StackCheckStub stack_stub;
+ __ CallStub(&stack_stub);
+ __ jmp(&stack_check_success);
+
+ __ bind(&exit);
+ decrement_loop_depth();
}
@@ -258,7 +436,12 @@
void FastCodeGenerator::VisitDebuggerStatement(DebuggerStatement* stmt) {
- UNREACHABLE();
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ Comment cmnt(masm_, "[ DebuggerStatement");
+ SetStatementPosition(stmt);
+ __ CallRuntime(Runtime::kDebugBreak, 0);
+ // Ignore the return value.
+#endif
}
@@ -269,7 +452,37 @@
void FastCodeGenerator::VisitConditional(Conditional* expr) {
- UNREACHABLE();
+ Comment cmnt(masm_, "[ Conditional");
+ ASSERT_EQ(Expression::kTest, expr->condition()->context());
+ ASSERT_EQ(expr->context(), expr->then_expression()->context());
+ ASSERT_EQ(expr->context(), expr->else_expression()->context());
+
+
+ Label true_case, false_case, done;
+ Label* saved_true = true_label_;
+ Label* saved_false = false_label_;
+
+ true_label_ = &true_case;
+ false_label_ = &false_case;
+ Visit(expr->condition());
+ true_label_ = saved_true;
+ false_label_ = saved_false;
+
+ __ bind(&true_case);
+ Visit(expr->then_expression());
+ // If control flow falls through Visit, jump to done.
+ if (expr->context() == Expression::kEffect ||
+ expr->context() == Expression::kValue) {
+ __ jmp(&done);
+ }
+
+ __ bind(&false_case);
+ Visit(expr->else_expression());
+ // If control flow falls through Visit, merge it with true case here.
+ if (expr->context() == Expression::kEffect ||
+ expr->context() == Expression::kValue) {
+ __ bind(&done);
+ }
}
@@ -280,12 +493,54 @@
void FastCodeGenerator::VisitLiteral(Literal* expr) {
- // No code is emitted (here) for simple literals.
+ Comment cmnt(masm_, "[ Literal");
+ Move(expr->context(), expr);
}
-void FastCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
- UNREACHABLE();
+void FastCodeGenerator::VisitAssignment(Assignment* expr) {
+ Comment cmnt(masm_, "[ Assignment");
+ ASSERT(expr->op() == Token::ASSIGN || expr->op() == Token::INIT_VAR);
+
+ // Record source code position of the (possible) IC call.
+ SetSourcePosition(expr->position());
+
+ // Left-hand side can only be a property, a global or a (parameter or local)
+ // slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
+ enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
+ LhsKind assign_type = VARIABLE;
+ Property* prop = expr->target()->AsProperty();
+ // In case of a property we use the uninitialized expression context
+ // of the key to detect a named property.
+ if (prop != NULL) {
+ assign_type = (prop->key()->context() == Expression::kUninitialized)
+ ? NAMED_PROPERTY
+ : KEYED_PROPERTY;
+ }
+
+ Expression* rhs = expr->value();
+ ASSERT_EQ(Expression::kValue, rhs->context());
+
+ switch (assign_type) {
+ case VARIABLE:
+ Visit(rhs);
+ EmitVariableAssignment(expr);
+ break;
+ case NAMED_PROPERTY:
+ Visit(prop->obj());
+ ASSERT_EQ(Expression::kValue, prop->obj()->context());
+ Visit(rhs);
+ EmitNamedPropertyAssignment(expr);
+ break;
+ case KEYED_PROPERTY:
+ Visit(prop->obj());
+ ASSERT_EQ(Expression::kValue, prop->obj()->context());
+ Visit(prop->key());
+ ASSERT_EQ(Expression::kValue, prop->key()->context());
+ Visit(rhs);
+ EmitKeyedPropertyAssignment(expr);
+ break;
+ }
}
@@ -299,34 +554,7 @@
}
-void FastCodeGenerator::VisitProperty(Property* expr) {
- UNREACHABLE();
-}
-
-
-void FastCodeGenerator::VisitCallNew(CallNew* expr) {
- UNREACHABLE();
-}
-
-
-void FastCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
- UNREACHABLE();
-}
-
-
-void FastCodeGenerator::VisitCountOperation(CountOperation* expr) {
- UNREACHABLE();
-}
-
-
-void FastCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
- UNREACHABLE();
-}
-
-
-void FastCodeGenerator::VisitThisFunction(ThisFunction* expr) {
- UNREACHABLE();
-}
+#undef __
} } // namespace v8::internal
diff --git a/src/fast-codegen.h b/src/fast-codegen.h
index 42d6cde..9b262a7 100644
--- a/src/fast-codegen.h
+++ b/src/fast-codegen.h
@@ -39,7 +39,13 @@
class FastCodeGenerator: public AstVisitor {
public:
FastCodeGenerator(MacroAssembler* masm, Handle<Script> script, bool is_eval)
- : masm_(masm), function_(NULL), script_(script), is_eval_(is_eval) {
+ : masm_(masm),
+ function_(NULL),
+ script_(script),
+ is_eval_(is_eval),
+ loop_depth_(0),
+ true_label_(NULL),
+ false_label_(NULL) {
}
static Handle<Code> MakeCode(FunctionLiteral* fun,
@@ -50,25 +56,79 @@
private:
int SlotOffset(Slot* slot);
+ void Move(Expression::Context destination, Register source);
+ void Move(Expression::Context destination, Slot* source, Register scratch);
+ void Move(Expression::Context destination, Literal* source);
+ void Move(Slot* dst, Register source, Register scratch1, Register scratch2);
+ void Move(Register dst, Slot* source);
+
+ // Templated to allow for Operand on intel and MemOperand on ARM.
+ template <typename MemoryLocation>
+ MemoryLocation CreateSlotOperand(Slot* slot, Register scratch);
+
+ // Drop the TOS, and store source to destination.
+ // If destination is TOS, just overwrite TOS with source.
+ void DropAndMove(Expression::Context destination,
+ Register source,
+ int drop_count = 1);
+
+ // Test the JavaScript value in source as if in a test context, compile
+ // control flow to a pair of labels.
+ void TestAndBranch(Register source, Label* true_label, Label* false_label);
void VisitDeclarations(ZoneList<Declaration*>* declarations);
- Handle<JSFunction> BuildBoilerplate(FunctionLiteral* fun);
void DeclareGlobals(Handle<FixedArray> pairs);
+ // Platform-specific return sequence
+ void EmitReturnSequence(int position);
+
+ // Platform-specific code sequences for calls
+ void EmitCallWithStub(Call* expr);
+ void EmitCallWithIC(Call* expr, RelocInfo::Mode reloc_info);
+
+ // Platform-specific support for compiling assignments.
+
+ // Complete a variable assignment. The right-hand-side value is expected
+ // on top of the stack.
+ void EmitVariableAssignment(Assignment* expr);
+
+ // Complete a named property assignment. The receiver and right-hand-side
+ // value are expected on top of the stack.
+ void EmitNamedPropertyAssignment(Assignment* expr);
+
+ // Complete a keyed property assignment. The reciever, key, and
+ // right-hand-side value are expected on top of the stack.
+ void EmitKeyedPropertyAssignment(Assignment* expr);
+
void SetFunctionPosition(FunctionLiteral* fun);
void SetReturnPosition(FunctionLiteral* fun);
void SetStatementPosition(Statement* stmt);
void SetSourcePosition(int pos);
+ int loop_depth() { return loop_depth_; }
+ void increment_loop_depth() { loop_depth_++; }
+ void decrement_loop_depth() {
+ ASSERT(loop_depth_ > 0);
+ loop_depth_--;
+ }
+
// AST node visit functions.
#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
AST_NODE_LIST(DECLARE_VISIT)
#undef DECLARE_VISIT
+ // Handles the shortcutted logical binary operations in VisitBinaryOperation.
+ void EmitLogicalOperation(BinaryOperation* expr);
+
MacroAssembler* masm_;
FunctionLiteral* function_;
Handle<Script> script_;
bool is_eval_;
+ Label return_label_;
+ int loop_depth_;
+
+ Label* true_label_;
+ Label* false_label_;
DISALLOW_COPY_AND_ASSIGN(FastCodeGenerator);
};
diff --git a/src/flag-definitions.h b/src/flag-definitions.h
index 42c96b6..88fda12 100644
--- a/src/flag-definitions.h
+++ b/src/flag-definitions.h
@@ -114,6 +114,8 @@
"enable use of RDTSC instruction if available")
DEFINE_bool(enable_sahf, true,
"enable use of SAHF instruction if available (X64 only)")
+DEFINE_bool(enable_vfp3, true,
+ "enable use of VFP3 instructions if available (ARM only)")
// bootstrapper.cc
DEFINE_string(expose_natives_as, NULL, "expose natives in global object")
@@ -132,8 +134,6 @@
// codegen-ia32.cc / codegen-arm.cc
DEFINE_bool(trace, false, "trace function calls")
DEFINE_bool(defer_negation, true, "defer negation operation")
-DEFINE_bool(check_stack, true,
- "check stack for overflow, interrupt, breakpoint")
// codegen.cc
DEFINE_bool(lazy, true, "use lazy compilation")
@@ -147,6 +147,8 @@
"use the fast-mode compiler for some top-level code")
DEFINE_bool(trace_bailout, false,
"print reasons for failing to use fast compilation")
+DEFINE_bool(always_fast_compiler, false,
+ "always try using the fast compiler")
// compilation-cache.cc
DEFINE_bool(compilation_cache, true, "enable compilation cache")
@@ -154,9 +156,9 @@
// debug.cc
DEFINE_bool(remote_debugging, false, "enable remote debugging")
DEFINE_bool(trace_debug_json, false, "trace debugging JSON request/response")
-DEFINE_bool(debugger_auto_break, false,
+DEFINE_bool(debugger_auto_break, true,
"automatically set the debug break flag when debugger commands are "
- "in the queue (experimental)")
+ "in the queue")
// frames.cc
DEFINE_int(max_stack_trace_source_length, 300,
@@ -198,6 +200,7 @@
// mksnapshot.cc
DEFINE_bool(h, false, "print this message")
+DEFINE_bool(new_snapshot, true, "use new snapshot implementation")
// parser.cc
DEFINE_bool(allow_natives_syntax, false, "allow natives syntax")
diff --git a/src/flags.cc b/src/flags.cc
index 5df3afd..d444c97 100644
--- a/src/flags.cc
+++ b/src/flags.cc
@@ -303,8 +303,8 @@
// get the value if any
if (*arg == '=') {
// make a copy so we can NUL-terminate flag name
- int n = arg - *name;
- CHECK(n < buffer_size); // buffer is too small
+ size_t n = arg - *name;
+ CHECK(n < static_cast<size_t>(buffer_size)); // buffer is too small
memcpy(buffer, *name, n);
buffer[n] = '\0';
*name = buffer;
diff --git a/src/frames.cc b/src/frames.cc
index 5cd8332..7c327dd 100644
--- a/src/frames.cc
+++ b/src/frames.cc
@@ -393,8 +393,19 @@
}
+Object*& ExitFrame::code_slot() const {
+ const int offset = ExitFrameConstants::kCodeOffset;
+ return Memory::Object_at(fp() + offset);
+}
+
+
Code* ExitFrame::code() const {
- return Heap::c_entry_code();
+ Object* code = code_slot();
+ if (code->IsSmi()) {
+ return Heap::c_entry_debug_break_code();
+ } else {
+ return Code::cast(code);
+ }
}
@@ -412,11 +423,6 @@
}
-Code* ExitDebugFrame::code() const {
- return Heap::c_entry_debug_break_code();
-}
-
-
Address StandardFrame::GetExpressionAddress(int n) const {
const int offset = StandardFrameConstants::kExpressionsOffset;
return fp() + offset - n * kPointerSize;
@@ -430,7 +436,7 @@
Address limit = sp();
ASSERT(base >= limit); // stack grows downwards
// Include register-allocated locals in number of expressions.
- return (base - limit) / kPointerSize;
+ return static_cast<int>((base - limit) / kPointerSize);
}
@@ -460,7 +466,7 @@
int JavaScriptFrame::ComputeParametersCount() const {
Address base = caller_sp() + JavaScriptFrameConstants::kReceiverOffset;
Address limit = fp() + JavaScriptFrameConstants::kSavedRegistersOffset;
- return (base - limit) / kPointerSize;
+ return static_cast<int>((base - limit) / kPointerSize);
}
diff --git a/src/frames.h b/src/frames.h
index 768196d..024065a 100644
--- a/src/frames.h
+++ b/src/frames.h
@@ -93,7 +93,6 @@
V(ENTRY, EntryFrame) \
V(ENTRY_CONSTRUCT, EntryConstructFrame) \
V(EXIT, ExitFrame) \
- V(EXIT_DEBUG, ExitDebugFrame) \
V(JAVA_SCRIPT, JavaScriptFrame) \
V(INTERNAL, InternalFrame) \
V(CONSTRUCT, ConstructFrame) \
@@ -119,7 +118,6 @@
bool is_entry() const { return type() == ENTRY; }
bool is_entry_construct() const { return type() == ENTRY_CONSTRUCT; }
bool is_exit() const { return type() == EXIT; }
- bool is_exit_debug() const { return type() == EXIT_DEBUG; }
bool is_java_script() const { return type() == JAVA_SCRIPT; }
bool is_arguments_adaptor() const { return type() == ARGUMENTS_ADAPTOR; }
bool is_internal() const { return type() == INTERNAL; }
@@ -260,10 +258,13 @@
// Exit frames are used to exit JavaScript execution and go to C.
class ExitFrame: public StackFrame {
public:
+ enum Mode { MODE_NORMAL, MODE_DEBUG };
virtual Type type() const { return EXIT; }
virtual Code* code() const;
+ Object*& code_slot() const;
+
// Garbage collection support.
virtual void Iterate(ObjectVisitor* v) const;
@@ -289,26 +290,6 @@
};
-class ExitDebugFrame: public ExitFrame {
- public:
- virtual Type type() const { return EXIT_DEBUG; }
-
- virtual Code* code() const;
-
- static ExitDebugFrame* cast(StackFrame* frame) {
- ASSERT(frame->is_exit_debug());
- return static_cast<ExitDebugFrame*>(frame);
- }
-
- protected:
- explicit ExitDebugFrame(StackFrameIterator* iterator)
- : ExitFrame(iterator) { }
-
- private:
- friend class StackFrameIterator;
-};
-
-
class StandardFrame: public StackFrame {
public:
// Testers.
diff --git a/src/global-handles.cc b/src/global-handles.cc
index f4b69fc..1a0c982 100644
--- a/src/global-handles.cc
+++ b/src/global-handles.cc
@@ -44,6 +44,10 @@
callback_ = NULL;
}
+ Node() {
+ state_ = DESTROYED;
+ }
+
explicit Node(Object* object) {
Initialize(object);
// Initialize link structure.
@@ -161,6 +165,9 @@
// It's fine though to reuse nodes that were destroyed in weak callback
// as those cannot be deallocated until we are back from the callback.
set_first_free(NULL);
+ if (first_deallocated()) {
+ first_deallocated()->set_next(head());
+ }
// Leaving V8.
VMState state(EXTERNAL);
func(object, par);
@@ -200,20 +207,81 @@
};
+class GlobalHandles::Pool BASE_EMBEDDED {
+ public:
+ Pool() {
+ current_ = new Chunk();
+ current_->previous = NULL;
+ next_ = current_->nodes;
+ limit_ = current_->nodes + kNodesPerChunk;
+ }
+
+ Node* Allocate() {
+ if (next_ < limit_) {
+ return next_++;
+ }
+ return SlowAllocate();
+ }
+
+ void Release() {
+ Chunk* current = current_;
+ ASSERT(current != NULL); // At least a single block must by allocated
+ do {
+ Chunk* previous = current->previous;
+ delete current;
+ current = previous;
+ } while (current != NULL);
+ current_ = NULL;
+ next_ = limit_ = NULL;
+ }
+
+ private:
+ static const int kNodesPerChunk = (1 << 12) - 1;
+ struct Chunk : public Malloced {
+ Chunk* previous;
+ Node nodes[kNodesPerChunk];
+ };
+
+ Node* SlowAllocate() {
+ Chunk* chunk = new Chunk();
+ chunk->previous = current_;
+ current_ = chunk;
+
+ Node* new_nodes = current_->nodes;
+ next_ = new_nodes + 1;
+ limit_ = new_nodes + kNodesPerChunk;
+ return new_nodes;
+ }
+
+ Chunk* current_;
+ Node* next_;
+ Node* limit_;
+};
+
+
+static GlobalHandles::Pool pool_;
+
+
Handle<Object> GlobalHandles::Create(Object* value) {
Counters::global_handles.Increment();
Node* result;
- if (first_free() == NULL) {
- // Allocate a new node.
- result = new Node(value);
- result->set_next(head());
- set_head(result);
- } else {
+ if (first_free()) {
// Take the first node in the free list.
result = first_free();
set_first_free(result->next_free());
- result->Initialize(value);
+ } else if (first_deallocated()) {
+ // Next try deallocated list
+ result = first_deallocated();
+ set_first_deallocated(result->next_free());
+ ASSERT(result->next() == head());
+ set_head(result);
+ } else {
+ // Allocate a new node.
+ result = pool_.Allocate();
+ result->set_next(head());
+ set_head(result);
}
+ result->Initialize(value);
return result->handle();
}
@@ -292,7 +360,7 @@
// Process weak global handle callbacks. This must be done after the
// GC is completely done, because the callbacks may invoke arbitrary
// API functions.
- // At the same time deallocate all DESTROYED nodes
+ // At the same time deallocate all DESTROYED nodes.
ASSERT(Heap::gc_state() == Heap::NOT_IN_GC);
const int initial_post_gc_processing_count = ++post_gc_processing_count;
Node** p = &head_;
@@ -310,17 +378,24 @@
// Delete the link.
Node* node = *p;
*p = node->next(); // Update the link.
- delete node;
+ if (first_deallocated()) {
+ first_deallocated()->set_next(node);
+ }
+ node->set_next_free(first_deallocated());
+ set_first_deallocated(node);
} else {
p = (*p)->next_addr();
}
}
set_first_free(NULL);
+ if (first_deallocated()) {
+ first_deallocated()->set_next(head());
+ }
}
-void GlobalHandles::IterateRoots(ObjectVisitor* v) {
- // Traversal of global handles marked as NORMAL or NEAR_DEATH.
+void GlobalHandles::IterateStrongRoots(ObjectVisitor* v) {
+ // Traversal of global handles marked as NORMAL.
for (Node* current = head_; current != NULL; current = current->next()) {
if (current->state_ == Node::NORMAL) {
v->VisitPointer(¤t->object_);
@@ -328,17 +403,22 @@
}
}
-void GlobalHandles::TearDown() {
- // Delete all the nodes in the linked list.
- Node* current = head_;
- while (current != NULL) {
- Node* n = current;
- current = current->next();
- delete n;
+
+void GlobalHandles::IterateAllRoots(ObjectVisitor* v) {
+ for (Node* current = head_; current != NULL; current = current->next()) {
+ if (current->state_ != Node::DESTROYED) {
+ v->VisitPointer(¤t->object_);
+ }
}
- // Reset the head and free_list.
+}
+
+
+void GlobalHandles::TearDown() {
+ // Reset all the lists.
set_head(NULL);
set_first_free(NULL);
+ set_first_deallocated(NULL);
+ pool_.Release();
}
@@ -347,6 +427,27 @@
GlobalHandles::Node* GlobalHandles::head_ = NULL;
GlobalHandles::Node* GlobalHandles::first_free_ = NULL;
+GlobalHandles::Node* GlobalHandles::first_deallocated_ = NULL;
+
+void GlobalHandles::RecordStats(HeapStats* stats) {
+ *stats->global_handle_count = 0;
+ *stats->weak_global_handle_count = 0;
+ *stats->pending_global_handle_count = 0;
+ *stats->near_death_global_handle_count = 0;
+ *stats->destroyed_global_handle_count = 0;
+ for (Node* current = head_; current != NULL; current = current->next()) {
+ *stats->global_handle_count += 1;
+ if (current->state_ == Node::WEAK) {
+ *stats->weak_global_handle_count += 1;
+ } else if (current->state_ == Node::PENDING) {
+ *stats->pending_global_handle_count += 1;
+ } else if (current->state_ == Node::NEAR_DEATH) {
+ *stats->near_death_global_handle_count += 1;
+ } else if (current->state_ == Node::DESTROYED) {
+ *stats->destroyed_global_handle_count += 1;
+ }
+ }
+}
#ifdef DEBUG
diff --git a/src/global-handles.h b/src/global-handles.h
index feb95bf..659f86e 100644
--- a/src/global-handles.h
+++ b/src/global-handles.h
@@ -48,7 +48,8 @@
class ObjectGroup : public Malloced {
public:
ObjectGroup() : objects_(4) {}
- explicit ObjectGroup(size_t capacity) : objects_(capacity) {}
+ explicit ObjectGroup(size_t capacity)
+ : objects_(static_cast<int>(capacity)) { }
List<Object**> objects_;
};
@@ -77,6 +78,8 @@
// Returns the current number of weak handles.
static int NumberOfWeakHandles() { return number_of_weak_handles_; }
+ static void RecordStats(HeapStats* stats);
+
// Returns the current number of weak handles to global objects.
// These handles are also included in NumberOfWeakHandles().
static int NumberOfGlobalObjectWeakHandles() {
@@ -95,8 +98,11 @@
// Process pending weak handles.
static void PostGarbageCollectionProcessing();
+ // Iterates over all strong handles.
+ static void IterateStrongRoots(ObjectVisitor* v);
+
// Iterates over all handles.
- static void IterateRoots(ObjectVisitor* v);
+ static void IterateAllRoots(ObjectVisitor* v);
// Iterates over all weak roots in heap.
static void IterateWeakRoots(ObjectVisitor* v);
@@ -127,6 +133,7 @@
static void PrintStats();
static void Print();
#endif
+ class Pool;
private:
// Internal node structure, one for each global handle.
class Node;
@@ -148,6 +155,23 @@
static Node* first_free_;
static Node* first_free() { return first_free_; }
static void set_first_free(Node* value) { first_free_ = value; }
+
+ // List of deallocated nodes.
+ // Deallocated nodes form a prefix of all the nodes and
+ // |first_deallocated| points to last deallocated node before
+ // |head|. Those deallocated nodes are additionally linked
+ // by |next_free|:
+ // 1st deallocated head
+ // | |
+ // V V
+ // node node ... node node
+ // .next -> .next -> .next ->
+ // <- .next_free <- .next_free <- .next_free
+ static Node* first_deallocated_;
+ static Node* first_deallocated() { return first_deallocated_; }
+ static void set_first_deallocated(Node* value) {
+ first_deallocated_ = value;
+ }
};
diff --git a/src/globals.h b/src/globals.h
index efe0127..ad0539f 100644
--- a/src/globals.h
+++ b/src/globals.h
@@ -103,6 +103,10 @@
#define V8PRIxPTR "lx"
#endif
+#if defined(__APPLE__) && defined(__MACH__)
+#define USING_MAC_ABI
+#endif
+
// Code-point values in Unicode 4.0 are 21 bits wide.
typedef uint16_t uc16;
typedef int32_t uc32;
@@ -170,6 +174,15 @@
#endif
+// Constants relevant to double precision floating point numbers.
+
+// Quiet NaNs have bits 51 to 62 set, possibly the sign bit, and no
+// other bits set.
+const uint64_t kQuietNaNMask = static_cast<uint64_t>(0xfff) << 51;
+// If looking only at the top 32 bits, the QNaN mask is bits 19 to 30.
+const uint32_t kQuietNaNHighBitsMask = 0xfff << (51 - 32);
+
+
// -----------------------------------------------------------------------------
// Forward declarations for frequently used classes
// (sorted alphabetically)
@@ -263,7 +276,9 @@
LO_SPACE, // Promoted large objects.
FIRST_SPACE = NEW_SPACE,
- LAST_SPACE = LO_SPACE
+ LAST_SPACE = LO_SPACE,
+ FIRST_PAGED_SPACE = OLD_POINTER_SPACE,
+ LAST_PAGED_SPACE = CELL_SPACE
};
const int kSpaceTagSize = 3;
const int kSpaceTagMask = (1 << kSpaceTagSize) - 1;
@@ -279,6 +294,8 @@
enum Executability { NOT_EXECUTABLE, EXECUTABLE };
+enum VisitMode { VISIT_ALL, VISIT_ONLY_STRONG };
+
// A CodeDesc describes a buffer holding instructions and relocation
// information. The instructions start at the beginning of the buffer
@@ -558,6 +575,17 @@
}
+// Feature flags bit positions. They are mostly based on the CPUID spec.
+// (We assign CPUID itself to one of the currently reserved bits --
+// feel free to change this if needed.)
+enum CpuFeature { SSE3 = 32, // x86
+ SSE2 = 26, // x86
+ CMOV = 15, // x86
+ RDTSC = 4, // x86
+ CPUID = 10, // x86
+ VFP3 = 1, // ARM
+ SAHF = 0}; // x86
+
} } // namespace v8::internal
#endif // V8_GLOBALS_H_
diff --git a/src/handles.cc b/src/handles.cc
index b764334..d551e21 100644
--- a/src/handles.cc
+++ b/src/handles.cc
@@ -37,6 +37,7 @@
#include "global-handles.h"
#include "natives.h"
#include "runtime.h"
+#include "stub-cache.h"
namespace v8 {
namespace internal {
@@ -49,8 +50,8 @@
int HandleScope::NumberOfHandles() {
int n = HandleScopeImplementer::instance()->blocks()->length();
if (n == 0) return 0;
- return ((n - 1) * kHandleBlockSize) +
- (current_.next - HandleScopeImplementer::instance()->blocks()->last());
+ return ((n - 1) * kHandleBlockSize) + static_cast<int>(
+ (current_.next - HandleScopeImplementer::instance()->blocks()->last()));
}
@@ -105,6 +106,21 @@
}
+Address HandleScope::current_extensions_address() {
+ return reinterpret_cast<Address>(¤t_.extensions);
+}
+
+
+Address HandleScope::current_next_address() {
+ return reinterpret_cast<Address>(¤t_.next);
+}
+
+
+Address HandleScope::current_limit_address() {
+ return reinterpret_cast<Address>(¤t_.limit);
+}
+
+
Handle<FixedArray> AddKeysFromJSArray(Handle<FixedArray> content,
Handle<JSArray> array) {
CALL_HEAP_FUNCTION(content->AddKeysFromJSArray(*array), FixedArray);
@@ -285,7 +301,9 @@
Handle<Object> GetHiddenProperties(Handle<JSObject> obj,
bool create_if_needed) {
- Handle<String> key = Factory::hidden_symbol();
+ Object* holder = obj->BypassGlobalProxy();
+ if (holder->IsUndefined()) return Factory::undefined_value();
+ obj = Handle<JSObject>(JSObject::cast(holder));
if (obj->HasFastProperties()) {
// If the object has fast properties, check whether the first slot
@@ -294,7 +312,7 @@
// code zero) it will always occupy the first entry if present.
DescriptorArray* descriptors = obj->map()->instance_descriptors();
if ((descriptors->number_of_descriptors() > 0) &&
- (descriptors->GetKey(0) == *key) &&
+ (descriptors->GetKey(0) == Heap::hidden_symbol()) &&
descriptors->IsProperty(0)) {
ASSERT(descriptors->GetType(0) == FIELD);
return Handle<Object>(obj->FastPropertyAt(descriptors->GetFieldIndex(0)));
@@ -304,17 +322,17 @@
// Only attempt to find the hidden properties in the local object and not
// in the prototype chain. Note that HasLocalProperty() can cause a GC in
// the general case in the presence of interceptors.
- if (!obj->HasLocalProperty(*key)) {
+ if (!obj->HasHiddenPropertiesObject()) {
// Hidden properties object not found. Allocate a new hidden properties
// object if requested. Otherwise return the undefined value.
if (create_if_needed) {
Handle<Object> hidden_obj = Factory::NewJSObject(Top::object_function());
- return SetProperty(obj, key, hidden_obj, DONT_ENUM);
+ CALL_HEAP_FUNCTION(obj->SetHiddenPropertiesObject(*hidden_obj), Object);
} else {
return Factory::undefined_value();
}
}
- return GetProperty(obj, key);
+ return Handle<Object>(obj->GetHiddenPropertiesObject());
}
@@ -338,7 +356,7 @@
Handle<String> SubString(Handle<String> str, int start, int end) {
- CALL_HEAP_FUNCTION(str->Slice(start, end), String);
+ CALL_HEAP_FUNCTION(str->SubString(start, end), String);
}
@@ -415,8 +433,8 @@
if (!script->source()->IsString()) {
ASSERT(script->source()->IsUndefined());
- script->set_line_ends(*(Factory::NewJSArray(0)));
- ASSERT(script->line_ends()->IsJSArray());
+ script->set_line_ends(*(Factory::NewFixedArray(0)));
+ ASSERT(script->line_ends()->IsFixedArray());
return;
}
@@ -449,9 +467,8 @@
}
ASSERT(array_index == line_count);
- Handle<JSArray> object = Factory::NewJSArrayWithElements(array);
- script->set_line_ends(*object);
- ASSERT(script->line_ends()->IsJSArray());
+ script->set_line_ends(*array);
+ ASSERT(script->line_ends()->IsFixedArray());
}
@@ -459,17 +476,18 @@
int GetScriptLineNumber(Handle<Script> script, int code_pos) {
InitScriptLineEnds(script);
AssertNoAllocation no_allocation;
- JSArray* line_ends_array = JSArray::cast(script->line_ends());
- const int line_ends_len = (Smi::cast(line_ends_array->length()))->value();
+ FixedArray* line_ends_array =
+ FixedArray::cast(script->line_ends());
+ const int line_ends_len = line_ends_array->length();
int line = -1;
if (line_ends_len > 0 &&
- code_pos <= (Smi::cast(line_ends_array->GetElement(0)))->value()) {
+ code_pos <= (Smi::cast(line_ends_array->get(0)))->value()) {
line = 0;
} else {
for (int i = 1; i < line_ends_len; ++i) {
- if ((Smi::cast(line_ends_array->GetElement(i - 1)))->value() < code_pos &&
- code_pos <= (Smi::cast(line_ends_array->GetElement(i)))->value()) {
+ if ((Smi::cast(line_ends_array->get(i - 1)))->value() < code_pos &&
+ code_pos <= (Smi::cast(line_ends_array->get(i)))->value()) {
line = i;
break;
}
@@ -530,6 +548,12 @@
Handle<FixedArray> GetKeysInFixedArrayFor(Handle<JSObject> object,
KeyCollectionType type) {
Handle<FixedArray> content = Factory::empty_fixed_array();
+ Handle<JSObject> arguments_boilerplate =
+ Handle<JSObject>(
+ Top::context()->global_context()->arguments_boilerplate());
+ Handle<JSFunction> arguments_function =
+ Handle<JSFunction>(
+ JSFunction::cast(arguments_boilerplate->map()->constructor()));
// Only collect keys if access is permitted.
for (Handle<Object> p = object;
@@ -559,8 +583,21 @@
content = AddKeysFromJSArray(content, v8::Utils::OpenHandle(*result));
}
- // Compute the property keys.
- content = UnionOfKeys(content, GetEnumPropertyKeys(current));
+ // We can cache the computed property keys if access checks are
+ // not needed and no interceptors are involved.
+ //
+ // We do not use the cache if the object has elements and
+ // therefore it does not make sense to cache the property names
+ // for arguments objects. Arguments objects will always have
+ // elements.
+ bool cache_enum_keys =
+ ((current->map()->constructor() != *arguments_function) &&
+ !current->IsAccessCheckNeeded() &&
+ !current->HasNamedInterceptor() &&
+ !current->HasIndexedInterceptor());
+ // Compute the property keys and cache them if possible.
+ content =
+ UnionOfKeys(content, GetEnumPropertyKeys(current, cache_enum_keys));
// Add the property keys from the interceptor.
if (current->HasNamedInterceptor()) {
@@ -587,7 +624,8 @@
}
-Handle<FixedArray> GetEnumPropertyKeys(Handle<JSObject> object) {
+Handle<FixedArray> GetEnumPropertyKeys(Handle<JSObject> object,
+ bool cache_result) {
int index = 0;
if (object->HasFastProperties()) {
if (object->map()->instance_descriptors()->HasEnumCache()) {
@@ -610,10 +648,12 @@
}
}
(*storage)->SortPairs(*sort_array, sort_array->length());
- Handle<FixedArray> bridge_storage =
- Factory::NewFixedArray(DescriptorArray::kEnumCacheBridgeLength);
- DescriptorArray* desc = object->map()->instance_descriptors();
- desc->SetEnumCache(*bridge_storage, *storage);
+ if (cache_result) {
+ Handle<FixedArray> bridge_storage =
+ Factory::NewFixedArray(DescriptorArray::kEnumCacheBridgeLength);
+ DescriptorArray* desc = object->map()->instance_descriptors();
+ desc->SetEnumCache(*bridge_storage, *storage);
+ }
ASSERT(storage->length() == index);
return storage;
} else {
@@ -672,6 +712,11 @@
}
+Handle<Code> ComputeLazyCompile(int argc) {
+ CALL_HEAP_FUNCTION(StubCache::ComputeLazyCompile(argc), Code);
+}
+
+
OptimizedObjectForAddingMultipleProperties::
~OptimizedObjectForAddingMultipleProperties() {
// Reoptimize the object to allow fast property access.
diff --git a/src/handles.h b/src/handles.h
index 5d57465..fe820d5 100644
--- a/src/handles.h
+++ b/src/handles.h
@@ -133,6 +133,13 @@
return result;
}
+ // Deallocates any extensions used by the current scope.
+ static void DeleteExtensions();
+
+ static Address current_extensions_address();
+ static Address current_next_address();
+ static Address current_limit_address();
+
private:
// Prevent heap allocation or illegal handle scopes.
HandleScope(const HandleScope&);
@@ -166,9 +173,6 @@
// Extend the handle scope making room for more handles.
static internal::Object** Extend();
- // Deallocates any extensions used by the current scope.
- static void DeleteExtensions();
-
// Zaps the handles in the half-open interval [start, end).
static void ZapRange(internal::Object** start, internal::Object** end);
@@ -273,7 +277,8 @@
Handle<FixedArray> GetKeysInFixedArrayFor(Handle<JSObject> object,
KeyCollectionType type);
Handle<JSArray> GetKeysFor(Handle<JSObject> object);
-Handle<FixedArray> GetEnumPropertyKeys(Handle<JSObject> object);
+Handle<FixedArray> GetEnumPropertyKeys(Handle<JSObject> object,
+ bool cache_result);
// Computes the union of keys and return the result.
// Used for implementing "for (n in object) { }"
@@ -304,8 +309,8 @@
Handle<Object> prototype);
-// Do lazy compilation of the given function. Returns true on success
-// and false if the compilation resulted in a stack overflow.
+// Does lazy compilation of the given function. Returns true on success and
+// false if the compilation resulted in a stack overflow.
enum ClearExceptionFlag { KEEP_EXCEPTION, CLEAR_EXCEPTION };
bool CompileLazyShared(Handle<SharedFunctionInfo> shared,
@@ -315,6 +320,9 @@
bool CompileLazy(Handle<JSFunction> function, ClearExceptionFlag flag);
bool CompileLazyInLoop(Handle<JSFunction> function, ClearExceptionFlag flag);
+// Returns the lazy compilation stub for argc arguments.
+Handle<Code> ComputeLazyCompile(int argc);
+
// These deal with lazily loaded properties.
void SetupLazy(Handle<JSObject> obj,
int index,
diff --git a/src/heap-inl.h b/src/heap-inl.h
index 0646878..eccd5ee 100644
--- a/src/heap-inl.h
+++ b/src/heap-inl.h
@@ -41,10 +41,10 @@
Object* Heap::AllocateSymbol(Vector<const char> str,
int chars,
- uint32_t length_field) {
+ uint32_t hash_field) {
unibrow::Utf8InputBuffer<> buffer(str.start(),
static_cast<unsigned>(str.length()));
- return AllocateInternalSymbol(&buffer, chars, length_field);
+ return AllocateInternalSymbol(&buffer, chars, hash_field);
}
diff --git a/src/heap-profiler.cc b/src/heap-profiler.cc
index bfd378d..bd1cd2d 100644
--- a/src/heap-profiler.cc
+++ b/src/heap-profiler.cc
@@ -78,6 +78,10 @@
}
} else if (obj->IsString()) {
return JSObjectsCluster(Heap::String_symbol());
+ } else if (obj->IsJSGlobalPropertyCell()) {
+ return JSObjectsCluster(JSObjectsCluster::GLOBAL_PROPERTY);
+ } else if (obj->IsCode() || obj->IsSharedFunctionInfo() || obj->IsScript()) {
+ return JSObjectsCluster(JSObjectsCluster::CODE);
}
return JSObjectsCluster();
}
@@ -112,6 +116,16 @@
if (FixedArray::cast(obj->elements())->length() != 0) {
size += obj->elements()->Size();
}
+ // For functions, also account non-empty context and literals sizes.
+ if (obj->IsJSFunction()) {
+ JSFunction* f = JSFunction::cast(obj);
+ if (f->unchecked_context()->IsContext()) {
+ size += f->context()->Size();
+ }
+ if (f->literals()->length() != 0) {
+ size += f->literals()->Size();
+ }
+ }
return size;
}
@@ -127,15 +141,15 @@
}
void VisitPointer(Object** o) {
- if ((*o)->IsJSObject() || (*o)->IsString()) {
- profile_->StoreReference(cluster_, HeapObject::cast(*o));
- } else if ((*o)->IsFixedArray() && !inside_array_) {
+ if ((*o)->IsFixedArray() && !inside_array_) {
// Traverse one level deep for data members that are fixed arrays.
// This covers the case of 'elements' and 'properties' of JSObject,
// and function contexts.
inside_array_ = true;
FixedArray::cast(*o)->Iterate(this);
inside_array_ = false;
+ } else if ((*o)->IsHeapObject()) {
+ profile_->StoreReference(cluster_, HeapObject::cast(*o));
}
}
@@ -340,6 +354,8 @@
accumulator->Add("(roots)");
} else if (constructor_ == FromSpecialCase(GLOBAL_PROPERTY)) {
accumulator->Add("(global property)");
+ } else if (constructor_ == FromSpecialCase(CODE)) {
+ accumulator->Add("(code)");
} else if (constructor_ == FromSpecialCase(SELF)) {
accumulator->Add("(self)");
} else {
@@ -520,13 +536,14 @@
: zscope_(DELETE_ON_EXIT) {
JSObjectsCluster roots(JSObjectsCluster::ROOTS);
ReferencesExtractor extractor(roots, this);
- Heap::IterateRoots(&extractor);
+ Heap::IterateRoots(&extractor, VISIT_ONLY_STRONG);
}
void RetainerHeapProfile::StoreReference(const JSObjectsCluster& cluster,
HeapObject* ref) {
JSObjectsCluster ref_cluster = Clusterizer::Clusterize(ref);
+ if (ref_cluster.is_null()) return;
JSObjectsRetainerTree::Locator ref_loc;
if (retainers_tree_.Insert(ref_cluster, &ref_loc)) {
ref_loc.set_value(new JSObjectsClusterTree());
@@ -537,15 +554,10 @@
void RetainerHeapProfile::CollectStats(HeapObject* obj) {
- if (obj->IsJSObject()) {
- const JSObjectsCluster cluster = Clusterizer::Clusterize(obj);
- ReferencesExtractor extractor(cluster, this);
- obj->Iterate(&extractor);
- } else if (obj->IsJSGlobalPropertyCell()) {
- JSObjectsCluster global_prop(JSObjectsCluster::GLOBAL_PROPERTY);
- ReferencesExtractor extractor(global_prop, this);
- obj->Iterate(&extractor);
- }
+ const JSObjectsCluster cluster = Clusterizer::Clusterize(obj);
+ if (cluster.is_null()) return;
+ ReferencesExtractor extractor(cluster, this);
+ obj->Iterate(&extractor);
}
diff --git a/src/heap-profiler.h b/src/heap-profiler.h
index bd875df..f8cb04d 100644
--- a/src/heap-profiler.h
+++ b/src/heap-profiler.h
@@ -54,7 +54,8 @@
enum SpecialCase {
ROOTS = 1,
GLOBAL_PROPERTY = 2,
- SELF = 3 // This case is used in ClustersCoarser only.
+ CODE = 3,
+ SELF = 100 // This case is used in ClustersCoarser only.
};
JSObjectsCluster() : constructor_(NULL), instance_(NULL) {}
@@ -97,6 +98,7 @@
switch (special) {
case ROOTS: return Heap::result_symbol();
case GLOBAL_PROPERTY: return Heap::code_symbol();
+ case CODE: return Heap::arguments_shadow_symbol();
case SELF: return Heap::catch_var_symbol();
default:
UNREACHABLE();
diff --git a/src/heap.cc b/src/heap.cc
index 5084058..4e4cd1c 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -43,6 +43,7 @@
#include "v8threads.h"
#if V8_TARGET_ARCH_ARM && V8_NATIVE_REGEXP
#include "regexp-macro-assembler.h"
+#include "arm/regexp-macro-assembler-arm.h"
#endif
namespace v8 {
@@ -113,6 +114,7 @@
int Heap::gc_count_ = 0;
int Heap::always_allocate_scope_depth_ = 0;
+int Heap::linear_allocation_scope_depth_ = 0;
bool Heap::context_disposed_pending_ = false;
#ifdef DEBUG
@@ -731,10 +733,7 @@
ScavengeVisitor scavenge_visitor;
// Copy roots.
- IterateRoots(&scavenge_visitor);
-
- // Copy objects reachable from weak pointers.
- GlobalHandles::IterateWeakRoots(&scavenge_visitor);
+ IterateRoots(&scavenge_visitor, VISIT_ALL);
// Copy objects reachable from the old generation. By definition,
// there are no intergenerational pointers in code or data spaces.
@@ -1188,34 +1187,14 @@
roots_[entry.index] = Map::cast(obj);
}
- obj = AllocateMap(SHORT_STRING_TYPE, SeqTwoByteString::kAlignedSize);
+ obj = AllocateMap(STRING_TYPE, SeqTwoByteString::kAlignedSize);
if (obj->IsFailure()) return false;
- set_undetectable_short_string_map(Map::cast(obj));
+ set_undetectable_string_map(Map::cast(obj));
Map::cast(obj)->set_is_undetectable();
- obj = AllocateMap(MEDIUM_STRING_TYPE, SeqTwoByteString::kAlignedSize);
+ obj = AllocateMap(ASCII_STRING_TYPE, SeqAsciiString::kAlignedSize);
if (obj->IsFailure()) return false;
- set_undetectable_medium_string_map(Map::cast(obj));
- Map::cast(obj)->set_is_undetectable();
-
- obj = AllocateMap(LONG_STRING_TYPE, SeqTwoByteString::kAlignedSize);
- if (obj->IsFailure()) return false;
- set_undetectable_long_string_map(Map::cast(obj));
- Map::cast(obj)->set_is_undetectable();
-
- obj = AllocateMap(SHORT_ASCII_STRING_TYPE, SeqAsciiString::kAlignedSize);
- if (obj->IsFailure()) return false;
- set_undetectable_short_ascii_string_map(Map::cast(obj));
- Map::cast(obj)->set_is_undetectable();
-
- obj = AllocateMap(MEDIUM_ASCII_STRING_TYPE, SeqAsciiString::kAlignedSize);
- if (obj->IsFailure()) return false;
- set_undetectable_medium_ascii_string_map(Map::cast(obj));
- Map::cast(obj)->set_is_undetectable();
-
- obj = AllocateMap(LONG_ASCII_STRING_TYPE, SeqAsciiString::kAlignedSize);
- if (obj->IsFailure()) return false;
- set_undetectable_long_ascii_string_map(Map::cast(obj));
+ set_undetectable_ascii_string_map(Map::cast(obj));
Map::cast(obj)->set_is_undetectable();
obj = AllocateMap(BYTE_ARRAY_TYPE, ByteArray::kAlignedSize);
@@ -1728,6 +1707,7 @@
// Statically ensure that it is safe to allocate proxies in paged spaces.
STATIC_ASSERT(Proxy::kSize <= Page::kMaxHeapObjectSize);
AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
+ if (always_allocate()) space = OLD_DATA_SPACE;
Object* result = Allocate(proxy_map(), space);
if (result->IsFailure()) return result;
@@ -1762,14 +1742,63 @@
}
+// Returns true for a character in a range. Both limits are inclusive.
+static inline bool Between(uint32_t character, uint32_t from, uint32_t to) {
+ // This makes uses of the the unsigned wraparound.
+ return character - from <= to - from;
+}
+
+
+static inline Object* MakeOrFindTwoCharacterString(uint32_t c1, uint32_t c2) {
+ String* symbol;
+ // Numeric strings have a different hash algorithm not known by
+ // LookupTwoCharsSymbolIfExists, so we skip this step for such strings.
+ if ((!Between(c1, '0', '9') || !Between(c2, '0', '9')) &&
+ Heap::symbol_table()->LookupTwoCharsSymbolIfExists(c1, c2, &symbol)) {
+ return symbol;
+ // Now we know the length is 2, we might as well make use of that fact
+ // when building the new string.
+ } else if ((c1 | c2) <= String::kMaxAsciiCharCodeU) { // We can do this
+ ASSERT(IsPowerOf2(String::kMaxAsciiCharCodeU + 1)); // because of this.
+ Object* result = Heap::AllocateRawAsciiString(2);
+ if (result->IsFailure()) return result;
+ char* dest = SeqAsciiString::cast(result)->GetChars();
+ dest[0] = c1;
+ dest[1] = c2;
+ return result;
+ } else {
+ Object* result = Heap::AllocateRawTwoByteString(2);
+ if (result->IsFailure()) return result;
+ uc16* dest = SeqTwoByteString::cast(result)->GetChars();
+ dest[0] = c1;
+ dest[1] = c2;
+ return result;
+ }
+}
+
+
Object* Heap::AllocateConsString(String* first, String* second) {
int first_length = first->length();
- if (first_length == 0) return second;
+ if (first_length == 0) {
+ return second;
+ }
int second_length = second->length();
- if (second_length == 0) return first;
+ if (second_length == 0) {
+ return first;
+ }
int length = first_length + second_length;
+
+ // Optimization for 2-byte strings often used as keys in a decompression
+ // dictionary. Check whether we already have the string in the symbol
+ // table to prevent creation of many unneccesary strings.
+ if (length == 2) {
+ unsigned c1 = first->Get(0);
+ unsigned c2 = second->Get(0);
+ return MakeOrFindTwoCharacterString(c1, c2);
+ }
+
bool is_ascii = first->IsAsciiRepresentation()
&& second->IsAsciiRepresentation();
@@ -1790,10 +1819,19 @@
// Copy the characters into the new object.
char* dest = SeqAsciiString::cast(result)->GetChars();
// Copy first part.
- char* src = SeqAsciiString::cast(first)->GetChars();
+ const char* src;
+ if (first->IsExternalString()) {
+ src = ExternalAsciiString::cast(first)->resource()->data();
+ } else {
+ src = SeqAsciiString::cast(first)->GetChars();
+ }
for (int i = 0; i < first_length; i++) *dest++ = src[i];
// Copy second part.
- src = SeqAsciiString::cast(second)->GetChars();
+ if (second->IsExternalString()) {
+ src = ExternalAsciiString::cast(second)->resource()->data();
+ } else {
+ src = SeqAsciiString::cast(second)->GetChars();
+ }
for (int i = 0; i < second_length; i++) *dest++ = src[i];
return result;
} else {
@@ -1807,62 +1845,17 @@
}
}
- Map* map;
- if (length <= String::kMaxShortStringSize) {
- map = is_ascii ? short_cons_ascii_string_map()
- : short_cons_string_map();
- } else if (length <= String::kMaxMediumStringSize) {
- map = is_ascii ? medium_cons_ascii_string_map()
- : medium_cons_string_map();
- } else {
- map = is_ascii ? long_cons_ascii_string_map()
- : long_cons_string_map();
- }
+ Map* map = is_ascii ? cons_ascii_string_map() : cons_string_map();
- Object* result = Allocate(map, NEW_SPACE);
+ Object* result = Allocate(map,
+ always_allocate() ? OLD_POINTER_SPACE : NEW_SPACE);
if (result->IsFailure()) return result;
- ASSERT(InNewSpace(result));
ConsString* cons_string = ConsString::cast(result);
- cons_string->set_first(first, SKIP_WRITE_BARRIER);
- cons_string->set_second(second, SKIP_WRITE_BARRIER);
+ WriteBarrierMode mode = cons_string->GetWriteBarrierMode();
cons_string->set_length(length);
- return result;
-}
-
-
-Object* Heap::AllocateSlicedString(String* buffer,
- int start,
- int end) {
- int length = end - start;
-
- // If the resulting string is small make a sub string.
- if (length <= String::kMinNonFlatLength) {
- return Heap::AllocateSubString(buffer, start, end);
- }
-
- Map* map;
- if (length <= String::kMaxShortStringSize) {
- map = buffer->IsAsciiRepresentation() ?
- short_sliced_ascii_string_map() :
- short_sliced_string_map();
- } else if (length <= String::kMaxMediumStringSize) {
- map = buffer->IsAsciiRepresentation() ?
- medium_sliced_ascii_string_map() :
- medium_sliced_string_map();
- } else {
- map = buffer->IsAsciiRepresentation() ?
- long_sliced_ascii_string_map() :
- long_sliced_string_map();
- }
-
- Object* result = Allocate(map, NEW_SPACE);
- if (result->IsFailure()) return result;
-
- SlicedString* sliced_string = SlicedString::cast(result);
- sliced_string->set_buffer(buffer);
- sliced_string->set_start(start);
- sliced_string->set_length(length);
-
+ cons_string->set_hash_field(String::kEmptyHashField);
+ cons_string->set_first(first, mode);
+ cons_string->set_second(second, mode);
return result;
}
@@ -1875,6 +1868,13 @@
if (length == 1) {
return Heap::LookupSingleCharacterStringFromCode(
buffer->Get(start));
+ } else if (length == 2) {
+ // Optimization for 2-byte strings often used as keys in a decompression
+ // dictionary. Check whether we already have the string in the symbol
+ // table to prevent creation of many unneccesary strings.
+ unsigned c1 = buffer->Get(start);
+ unsigned c2 = buffer->Get(start + 1);
+ return MakeOrFindTwoCharacterString(c1, c2);
}
// Make an attempt to flatten the buffer to reduce access time.
@@ -1886,43 +1886,39 @@
? AllocateRawAsciiString(length)
: AllocateRawTwoByteString(length);
if (result->IsFailure()) return result;
+ String* string_result = String::cast(result);
// Copy the characters into the new object.
- String* string_result = String::cast(result);
- StringHasher hasher(length);
- int i = 0;
- for (; i < length && hasher.is_array_index(); i++) {
- uc32 c = buffer->Get(start + i);
- hasher.AddCharacter(c);
- string_result->Set(i, c);
+ if (buffer->IsAsciiRepresentation()) {
+ ASSERT(string_result->IsAsciiRepresentation());
+ char* dest = SeqAsciiString::cast(string_result)->GetChars();
+ String::WriteToFlat(buffer, dest, start, end);
+ } else {
+ ASSERT(string_result->IsTwoByteRepresentation());
+ uc16* dest = SeqTwoByteString::cast(string_result)->GetChars();
+ String::WriteToFlat(buffer, dest, start, end);
}
- for (; i < length; i++) {
- uc32 c = buffer->Get(start + i);
- hasher.AddCharacterNoIndex(c);
- string_result->Set(i, c);
- }
- string_result->set_length_field(hasher.GetHashField());
+
return result;
}
Object* Heap::AllocateExternalStringFromAscii(
ExternalAsciiString::Resource* resource) {
- Map* map;
- int length = resource->length();
- if (length <= String::kMaxShortStringSize) {
- map = short_external_ascii_string_map();
- } else if (length <= String::kMaxMediumStringSize) {
- map = medium_external_ascii_string_map();
- } else {
- map = long_external_ascii_string_map();
+ size_t length = resource->length();
+ if (length > static_cast<size_t>(String::kMaxLength)) {
+ Top::context()->mark_out_of_memory();
+ return Failure::OutOfMemoryException();
}
- Object* result = Allocate(map, NEW_SPACE);
+ Map* map = external_ascii_string_map();
+ Object* result = Allocate(map,
+ always_allocate() ? OLD_DATA_SPACE : NEW_SPACE);
if (result->IsFailure()) return result;
ExternalAsciiString* external_string = ExternalAsciiString::cast(result);
- external_string->set_length(length);
+ external_string->set_length(static_cast<int>(length));
+ external_string->set_hash_field(String::kEmptyHashField);
external_string->set_resource(resource);
return result;
@@ -1931,14 +1927,20 @@
Object* Heap::AllocateExternalStringFromTwoByte(
ExternalTwoByteString::Resource* resource) {
- int length = resource->length();
+ size_t length = resource->length();
+ if (length > static_cast<size_t>(String::kMaxLength)) {
+ Top::context()->mark_out_of_memory();
+ return Failure::OutOfMemoryException();
+ }
- Map* map = ExternalTwoByteString::StringMap(length);
- Object* result = Allocate(map, NEW_SPACE);
+ Map* map = Heap::external_string_map();
+ Object* result = Allocate(map,
+ always_allocate() ? OLD_DATA_SPACE : NEW_SPACE);
if (result->IsFailure()) return result;
ExternalTwoByteString* external_string = ExternalTwoByteString::cast(result);
- external_string->set_length(length);
+ external_string->set_length(static_cast<int>(length));
+ external_string->set_hash_field(String::kEmptyHashField);
external_string->set_resource(resource);
return result;
@@ -2256,9 +2258,8 @@
// descriptors for these to the initial map as the object cannot be
// constructed without having these properties.
ASSERT(in_object_properties <= Map::kMaxPreAllocatedPropertyFields);
- if (fun->shared()->has_only_this_property_assignments() &&
- fun->shared()->this_property_assignments_count() > 0 &&
- fun->shared()->has_only_simple_this_property_assignments()) {
+ if (fun->shared()->has_only_simple_this_property_assignments() &&
+ fun->shared()->this_property_assignments_count() > 0) {
int count = fun->shared()->this_property_assignments_count();
if (count > in_object_properties) {
count = in_object_properties;
@@ -2320,6 +2321,7 @@
AllocationSpace space =
(pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
if (map->instance_size() > MaxObjectSizeInPagedSpace()) space = LO_SPACE;
+ if (always_allocate()) space = OLD_POINTER_SPACE;
Object* obj = Allocate(map, space);
if (obj->IsFailure()) return obj;
@@ -2579,62 +2581,12 @@
// Find the corresponding symbol map for strings.
Map* map = string->map();
-
- if (map == short_ascii_string_map()) return short_ascii_symbol_map();
- if (map == medium_ascii_string_map()) return medium_ascii_symbol_map();
- if (map == long_ascii_string_map()) return long_ascii_symbol_map();
-
- if (map == short_string_map()) return short_symbol_map();
- if (map == medium_string_map()) return medium_symbol_map();
- if (map == long_string_map()) return long_symbol_map();
-
- if (map == short_cons_string_map()) return short_cons_symbol_map();
- if (map == medium_cons_string_map()) return medium_cons_symbol_map();
- if (map == long_cons_string_map()) return long_cons_symbol_map();
-
- if (map == short_cons_ascii_string_map()) {
- return short_cons_ascii_symbol_map();
- }
- if (map == medium_cons_ascii_string_map()) {
- return medium_cons_ascii_symbol_map();
- }
- if (map == long_cons_ascii_string_map()) {
- return long_cons_ascii_symbol_map();
- }
-
- if (map == short_sliced_string_map()) return short_sliced_symbol_map();
- if (map == medium_sliced_string_map()) return medium_sliced_symbol_map();
- if (map == long_sliced_string_map()) return long_sliced_symbol_map();
-
- if (map == short_sliced_ascii_string_map()) {
- return short_sliced_ascii_symbol_map();
- }
- if (map == medium_sliced_ascii_string_map()) {
- return medium_sliced_ascii_symbol_map();
- }
- if (map == long_sliced_ascii_string_map()) {
- return long_sliced_ascii_symbol_map();
- }
-
- if (map == short_external_string_map()) {
- return short_external_symbol_map();
- }
- if (map == medium_external_string_map()) {
- return medium_external_symbol_map();
- }
- if (map == long_external_string_map()) {
- return long_external_symbol_map();
- }
-
- if (map == short_external_ascii_string_map()) {
- return short_external_ascii_symbol_map();
- }
- if (map == medium_external_ascii_string_map()) {
- return medium_external_ascii_symbol_map();
- }
- if (map == long_external_ascii_string_map()) {
- return long_external_ascii_symbol_map();
- }
+ if (map == ascii_string_map()) return ascii_symbol_map();
+ if (map == string_map()) return symbol_map();
+ if (map == cons_string_map()) return cons_symbol_map();
+ if (map == cons_ascii_string_map()) return cons_ascii_symbol_map();
+ if (map == external_string_map()) return external_symbol_map();
+ if (map == external_ascii_string_map()) return external_ascii_symbol_map();
// No match found.
return NULL;
@@ -2643,7 +2595,7 @@
Object* Heap::AllocateInternalSymbol(unibrow::CharacterStream* buffer,
int chars,
- uint32_t length_field) {
+ uint32_t hash_field) {
// Ensure the chars matches the number of characters in the buffer.
ASSERT(static_cast<unsigned>(chars) == buffer->Length());
// Determine whether the string is ascii.
@@ -2658,22 +2610,10 @@
Map* map;
if (is_ascii) {
- if (chars <= String::kMaxShortStringSize) {
- map = short_ascii_symbol_map();
- } else if (chars <= String::kMaxMediumStringSize) {
- map = medium_ascii_symbol_map();
- } else {
- map = long_ascii_symbol_map();
- }
+ map = ascii_symbol_map();
size = SeqAsciiString::SizeFor(chars);
} else {
- if (chars <= String::kMaxShortStringSize) {
- map = short_symbol_map();
- } else if (chars <= String::kMaxMediumStringSize) {
- map = medium_symbol_map();
- } else {
- map = long_symbol_map();
- }
+ map = symbol_map();
size = SeqTwoByteString::SizeFor(chars);
}
@@ -2684,9 +2624,10 @@
if (result->IsFailure()) return result;
reinterpret_cast<HeapObject*>(result)->set_map(map);
- // The hash value contains the length of the string.
+ // Set length and hash fields of the allocated string.
String* answer = String::cast(result);
- answer->set_length_field(length_field);
+ answer->set_length(chars);
+ answer->set_hash_field(hash_field);
ASSERT_EQ(size, answer->Size());
@@ -2717,19 +2658,10 @@
}
if (result->IsFailure()) return result;
- // Determine the map based on the string's length.
- Map* map;
- if (length <= String::kMaxShortStringSize) {
- map = short_ascii_string_map();
- } else if (length <= String::kMaxMediumStringSize) {
- map = medium_ascii_string_map();
- } else {
- map = long_ascii_string_map();
- }
-
// Partially initialize the object.
- HeapObject::cast(result)->set_map(map);
+ HeapObject::cast(result)->set_map(ascii_string_map());
String::cast(result)->set_length(length);
+ String::cast(result)->set_hash_field(String::kEmptyHashField);
ASSERT_EQ(size, HeapObject::cast(result)->Size());
return result;
}
@@ -2754,19 +2686,10 @@
}
if (result->IsFailure()) return result;
- // Determine the map based on the string's length.
- Map* map;
- if (length <= String::kMaxShortStringSize) {
- map = short_string_map();
- } else if (length <= String::kMaxMediumStringSize) {
- map = medium_string_map();
- } else {
- map = long_string_map();
- }
-
// Partially initialize the object.
- HeapObject::cast(result)->set_map(map);
+ HeapObject::cast(result)->set_map(string_map());
String::cast(result)->set_length(length);
+ String::cast(result)->set_hash_field(String::kEmptyHashField);
ASSERT_EQ(size, HeapObject::cast(result)->Size());
return result;
}
@@ -2987,6 +2910,11 @@
last_gc_count = gc_count_;
} else if (number_idle_notifications == kIdlesBeforeMarkSweep) {
+ // Before doing the mark-sweep collections we clear the
+ // compilation cache to avoid hanging on to source code and
+ // generated code for cached functions.
+ CompilationCache::Clear();
+
CollectAllGarbage(false);
new_space_.Shrink();
last_gc_count = gc_count_;
@@ -3116,7 +3044,7 @@
ASSERT(HasBeenSetup());
VerifyPointersVisitor visitor;
- IterateRoots(&visitor);
+ IterateRoots(&visitor, VISIT_ONLY_STRONG);
new_space_.Verify();
@@ -3243,60 +3171,57 @@
}
-#ifdef DEBUG
-#define SYNCHRONIZE_TAG(tag) v->Synchronize(tag)
-#else
-#define SYNCHRONIZE_TAG(tag)
-#endif
-
-void Heap::IterateRoots(ObjectVisitor* v) {
- IterateStrongRoots(v);
+void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
+ IterateStrongRoots(v, mode);
v->VisitPointer(reinterpret_cast<Object**>(&roots_[kSymbolTableRootIndex]));
- SYNCHRONIZE_TAG("symbol_table");
+ v->Synchronize("symbol_table");
}
-void Heap::IterateStrongRoots(ObjectVisitor* v) {
+void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
- SYNCHRONIZE_TAG("strong_root_list");
+ v->Synchronize("strong_root_list");
v->VisitPointer(bit_cast<Object**, String**>(&hidden_symbol_));
- SYNCHRONIZE_TAG("symbol");
+ v->Synchronize("symbol");
Bootstrapper::Iterate(v);
- SYNCHRONIZE_TAG("bootstrapper");
+ v->Synchronize("bootstrapper");
Top::Iterate(v);
- SYNCHRONIZE_TAG("top");
+ v->Synchronize("top");
Relocatable::Iterate(v);
- SYNCHRONIZE_TAG("relocatable");
+ v->Synchronize("relocatable");
#ifdef ENABLE_DEBUGGER_SUPPORT
Debug::Iterate(v);
#endif
- SYNCHRONIZE_TAG("debug");
+ v->Synchronize("debug");
CompilationCache::Iterate(v);
- SYNCHRONIZE_TAG("compilationcache");
+ v->Synchronize("compilationcache");
// Iterate over local handles in handle scopes.
HandleScopeImplementer::Iterate(v);
- SYNCHRONIZE_TAG("handlescope");
+ v->Synchronize("handlescope");
// Iterate over the builtin code objects and code stubs in the heap. Note
// that it is not strictly necessary to iterate over code objects on
// scavenge collections. We still do it here because this same function
// is used by the mark-sweep collector and the deserializer.
Builtins::IterateBuiltins(v);
- SYNCHRONIZE_TAG("builtins");
+ v->Synchronize("builtins");
// Iterate over global handles.
- GlobalHandles::IterateRoots(v);
- SYNCHRONIZE_TAG("globalhandles");
+ if (mode == VISIT_ONLY_STRONG) {
+ GlobalHandles::IterateStrongRoots(v);
+ } else {
+ GlobalHandles::IterateAllRoots(v);
+ }
+ v->Synchronize("globalhandles");
// Iterate over pointers being held by inactive threads.
ThreadManager::Iterate(v);
- SYNCHRONIZE_TAG("threadmanager");
+ v->Synchronize("threadmanager");
}
-#undef SYNCHRONIZE_TAG
// Flag is set when the heap has been configured. The heap can be repeatedly
@@ -3348,6 +3273,26 @@
}
+void Heap::RecordStats(HeapStats* stats) {
+ *stats->start_marker = 0xDECADE00;
+ *stats->end_marker = 0xDECADE01;
+ *stats->new_space_size = new_space_.Size();
+ *stats->new_space_capacity = new_space_.Capacity();
+ *stats->old_pointer_space_size = old_pointer_space_->Size();
+ *stats->old_pointer_space_capacity = old_pointer_space_->Capacity();
+ *stats->old_data_space_size = old_data_space_->Size();
+ *stats->old_data_space_capacity = old_data_space_->Capacity();
+ *stats->code_space_size = code_space_->Size();
+ *stats->code_space_capacity = code_space_->Capacity();
+ *stats->map_space_size = map_space_->Size();
+ *stats->map_space_capacity = map_space_->Capacity();
+ *stats->cell_space_size = cell_space_->Size();
+ *stats->cell_space_capacity = cell_space_->Capacity();
+ *stats->lo_space_size = lo_space_->Size();
+ GlobalHandles::RecordStats(stats);
+}
+
+
int Heap::PromotedSpaceSize() {
return old_pointer_space_->Size()
+ old_data_space_->Size()
@@ -3461,14 +3406,18 @@
}
-void Heap::SetStackLimit(intptr_t limit) {
+void Heap::SetStackLimits() {
// On 64 bit machines, pointers are generally out of range of Smis. We write
// something that looks like an out of range Smi to the GC.
- // Set up the special root array entry containing the stack guard.
- // This is actually an address, but the tag makes the GC ignore it.
+ // Set up the special root array entries containing the stack limits.
+ // These are actually addresses, but the tag makes the GC ignore it.
roots_[kStackLimitRootIndex] =
- reinterpret_cast<Object*>((limit & ~kSmiTagMask) | kSmiTag);
+ reinterpret_cast<Object*>(
+ (StackGuard::jslimit() & ~kSmiTagMask) | kSmiTag);
+ roots_[kRealStackLimitRootIndex] =
+ reinterpret_cast<Object*>(
+ (StackGuard::real_jslimit() & ~kSmiTagMask) | kSmiTag);
}
@@ -3895,7 +3844,7 @@
search_for_any_global = false;
MarkRootVisitor root_visitor;
- IterateRoots(&root_visitor);
+ IterateRoots(&root_visitor, VISIT_ONLY_STRONG);
}
@@ -3907,7 +3856,7 @@
search_for_any_global = true;
MarkRootVisitor root_visitor;
- IterateRoots(&root_visitor);
+ IterateRoots(&root_visitor, VISIT_ONLY_STRONG);
}
#endif
diff --git a/src/heap.h b/src/heap.h
index cd49a8d..b37fe4b 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -38,7 +38,13 @@
// Defines all the roots in Heap.
#define UNCONDITIONAL_STRONG_ROOT_LIST(V) \
- /* Cluster the most popular ones in a few cache lines here at the top. */ \
+ /* Put the byte array map early. We need it to be in place by the time */ \
+ /* the deserializer hits the next page, since it wants to put a byte */ \
+ /* array in the unused space at the end of the page. */ \
+ V(Map, byte_array_map, ByteArrayMap) \
+ V(Map, one_pointer_filler_map, OnePointerFillerMap) \
+ V(Map, two_pointer_filler_map, TwoPointerFillerMap) \
+ /* Cluster the most popular ones in a few cache lines here at the top. */ \
V(Smi, stack_limit, StackLimit) \
V(Object, undefined_value, UndefinedValue) \
V(Object, the_hole_value, TheHoleValue) \
@@ -53,63 +59,20 @@
V(Object, termination_exception, TerminationException) \
V(Map, hash_table_map, HashTableMap) \
V(FixedArray, empty_fixed_array, EmptyFixedArray) \
- V(Map, short_string_map, ShortStringMap) \
- V(Map, medium_string_map, MediumStringMap) \
- V(Map, long_string_map, LongStringMap) \
- V(Map, short_ascii_string_map, ShortAsciiStringMap) \
- V(Map, medium_ascii_string_map, MediumAsciiStringMap) \
- V(Map, long_ascii_string_map, LongAsciiStringMap) \
- V(Map, short_symbol_map, ShortSymbolMap) \
- V(Map, medium_symbol_map, MediumSymbolMap) \
- V(Map, long_symbol_map, LongSymbolMap) \
- V(Map, short_ascii_symbol_map, ShortAsciiSymbolMap) \
- V(Map, medium_ascii_symbol_map, MediumAsciiSymbolMap) \
- V(Map, long_ascii_symbol_map, LongAsciiSymbolMap) \
- V(Map, short_cons_symbol_map, ShortConsSymbolMap) \
- V(Map, medium_cons_symbol_map, MediumConsSymbolMap) \
- V(Map, long_cons_symbol_map, LongConsSymbolMap) \
- V(Map, short_cons_ascii_symbol_map, ShortConsAsciiSymbolMap) \
- V(Map, medium_cons_ascii_symbol_map, MediumConsAsciiSymbolMap) \
- V(Map, long_cons_ascii_symbol_map, LongConsAsciiSymbolMap) \
- V(Map, short_sliced_symbol_map, ShortSlicedSymbolMap) \
- V(Map, medium_sliced_symbol_map, MediumSlicedSymbolMap) \
- V(Map, long_sliced_symbol_map, LongSlicedSymbolMap) \
- V(Map, short_sliced_ascii_symbol_map, ShortSlicedAsciiSymbolMap) \
- V(Map, medium_sliced_ascii_symbol_map, MediumSlicedAsciiSymbolMap) \
- V(Map, long_sliced_ascii_symbol_map, LongSlicedAsciiSymbolMap) \
- V(Map, short_external_symbol_map, ShortExternalSymbolMap) \
- V(Map, medium_external_symbol_map, MediumExternalSymbolMap) \
- V(Map, long_external_symbol_map, LongExternalSymbolMap) \
- V(Map, short_external_ascii_symbol_map, ShortExternalAsciiSymbolMap) \
- V(Map, medium_external_ascii_symbol_map, MediumExternalAsciiSymbolMap) \
- V(Map, long_external_ascii_symbol_map, LongExternalAsciiSymbolMap) \
- V(Map, short_cons_string_map, ShortConsStringMap) \
- V(Map, medium_cons_string_map, MediumConsStringMap) \
- V(Map, long_cons_string_map, LongConsStringMap) \
- V(Map, short_cons_ascii_string_map, ShortConsAsciiStringMap) \
- V(Map, medium_cons_ascii_string_map, MediumConsAsciiStringMap) \
- V(Map, long_cons_ascii_string_map, LongConsAsciiStringMap) \
- V(Map, short_sliced_string_map, ShortSlicedStringMap) \
- V(Map, medium_sliced_string_map, MediumSlicedStringMap) \
- V(Map, long_sliced_string_map, LongSlicedStringMap) \
- V(Map, short_sliced_ascii_string_map, ShortSlicedAsciiStringMap) \
- V(Map, medium_sliced_ascii_string_map, MediumSlicedAsciiStringMap) \
- V(Map, long_sliced_ascii_string_map, LongSlicedAsciiStringMap) \
- V(Map, short_external_string_map, ShortExternalStringMap) \
- V(Map, medium_external_string_map, MediumExternalStringMap) \
- V(Map, long_external_string_map, LongExternalStringMap) \
- V(Map, short_external_ascii_string_map, ShortExternalAsciiStringMap) \
- V(Map, medium_external_ascii_string_map, MediumExternalAsciiStringMap) \
- V(Map, long_external_ascii_string_map, LongExternalAsciiStringMap) \
- V(Map, undetectable_short_string_map, UndetectableShortStringMap) \
- V(Map, undetectable_medium_string_map, UndetectableMediumStringMap) \
- V(Map, undetectable_long_string_map, UndetectableLongStringMap) \
- V(Map, undetectable_short_ascii_string_map, UndetectableShortAsciiStringMap) \
- V(Map, \
- undetectable_medium_ascii_string_map, \
- UndetectableMediumAsciiStringMap) \
- V(Map, undetectable_long_ascii_string_map, UndetectableLongAsciiStringMap) \
- V(Map, byte_array_map, ByteArrayMap) \
+ V(Map, string_map, StringMap) \
+ V(Map, ascii_string_map, AsciiStringMap) \
+ V(Map, symbol_map, SymbolMap) \
+ V(Map, ascii_symbol_map, AsciiSymbolMap) \
+ V(Map, cons_symbol_map, ConsSymbolMap) \
+ V(Map, cons_ascii_symbol_map, ConsAsciiSymbolMap) \
+ V(Map, external_symbol_map, ExternalSymbolMap) \
+ V(Map, external_ascii_symbol_map, ExternalAsciiSymbolMap) \
+ V(Map, cons_string_map, ConsStringMap) \
+ V(Map, cons_ascii_string_map, ConsAsciiStringMap) \
+ V(Map, external_string_map, ExternalStringMap) \
+ V(Map, external_ascii_string_map, ExternalAsciiStringMap) \
+ V(Map, undetectable_string_map, UndetectableStringMap) \
+ V(Map, undetectable_ascii_string_map, UndetectableAsciiStringMap) \
V(Map, pixel_array_map, PixelArrayMap) \
V(Map, external_byte_array_map, ExternalByteArrayMap) \
V(Map, external_unsigned_byte_array_map, ExternalUnsignedByteArrayMap) \
@@ -126,8 +89,6 @@
V(Map, boilerplate_function_map, BoilerplateFunctionMap) \
V(Map, shared_function_info_map, SharedFunctionInfoMap) \
V(Map, proxy_map, ProxyMap) \
- V(Map, one_pointer_filler_map, OnePointerFillerMap) \
- V(Map, two_pointer_filler_map, TwoPointerFillerMap) \
V(Object, nan_value, NanValue) \
V(Object, minus_zero_value, MinusZeroValue) \
V(String, empty_string, EmptyString) \
@@ -145,6 +106,7 @@
V(FixedArray, single_character_string_cache, SingleCharacterStringCache) \
V(FixedArray, natives_source_cache, NativesSourceCache) \
V(Object, last_script_id, LastScriptId) \
+ V(Smi, real_stack_limit, RealStackLimit) \
#if V8_TARGET_ARCH_ARM && V8_NATIVE_REGEXP
#define STRONG_ROOT_LIST(V) \
@@ -221,11 +183,13 @@
V(exec_symbol, "exec") \
V(zero_symbol, "0") \
V(global_eval_symbol, "GlobalEval") \
- V(identity_hash_symbol, "v8::IdentityHash")
+ V(identity_hash_symbol, "v8::IdentityHash") \
+ V(closure_symbol, "(closure)")
// Forward declaration of the GCTracer class.
class GCTracer;
+class HeapStats;
// The all static Heap captures the interface to the global object heap.
@@ -246,10 +210,10 @@
// Destroys all memory allocated by the heap.
static void TearDown();
- // Sets the stack limit in the roots_ array. Some architectures generate code
- // that looks here, because it is faster than loading from the static jslimit_
- // variable.
- static void SetStackLimit(intptr_t limit);
+ // Set the stack limit in the roots_ array. Some architectures generate
+ // code that looks here, because it is faster than loading from the static
+ // jslimit_/real_jslimit_ variable in the StackGuard.
+ static void SetStackLimits();
// Returns whether Setup has been called.
static bool HasBeenSetup();
@@ -304,6 +268,9 @@
static Address always_allocate_scope_depth_address() {
return reinterpret_cast<Address>(&always_allocate_scope_depth_);
}
+ static bool linear_allocation() {
+ return linear_allocation_scope_depth_ != 0;
+ }
static Address* NewSpaceAllocationTopAddress() {
return new_space_.allocation_top_address();
@@ -413,11 +380,11 @@
// Please note this function does not perform a garbage collection.
static inline Object* AllocateSymbol(Vector<const char> str,
int chars,
- uint32_t length_field);
+ uint32_t hash_field);
static Object* AllocateInternalSymbol(unibrow::CharacterStream* buffer,
int chars,
- uint32_t length_field);
+ uint32_t hash_field);
static Object* AllocateExternalSymbol(Vector<const char> str,
int chars);
@@ -579,16 +546,6 @@
// Please note this does not perform a garbage collection.
static Object* AllocateConsString(String* first, String* second);
- // Allocates a new sliced string object which is a slice of an underlying
- // string buffer stretching from the index start (inclusive) to the index
- // end (exclusive).
- // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
- // failed.
- // Please note this does not perform a garbage collection.
- static Object* AllocateSlicedString(String* buffer,
- int start,
- int end);
-
// Allocates a new sub string object which is a substring of an underlying
// string buffer stretching from the index start (inclusive) to the index
// end (exclusive).
@@ -645,6 +602,7 @@
}
static Object* LookupSymbol(String* str);
static bool LookupSymbolIfExists(String* str, String** symbol);
+ static bool LookupTwoCharsSymbolIfExists(String* str, String** symbol);
// Compute the matching symbol map for a string if possible.
// NULL is returned if string is in new space or not flattened.
@@ -722,9 +680,9 @@
static String* hidden_symbol() { return hidden_symbol_; }
// Iterates over all roots in the heap.
- static void IterateRoots(ObjectVisitor* v);
+ static void IterateRoots(ObjectVisitor* v, VisitMode mode);
// Iterates over all strong roots in the heap.
- static void IterateStrongRoots(ObjectVisitor* v);
+ static void IterateStrongRoots(ObjectVisitor* v, VisitMode mode);
// Iterates remembered set of an old space.
static void IterateRSet(PagedSpace* space, ObjectSlotCallback callback);
@@ -749,7 +707,7 @@
static bool Contains(HeapObject* value);
// Checks whether an address/object in a space.
- // Currently used by tests and heap verification only.
+ // Currently used by tests, serialization and heap verification only.
static bool InSpace(Address addr, AllocationSpace space);
static bool InSpace(HeapObject* value, AllocationSpace space);
@@ -908,6 +866,8 @@
static RootListIndex RootIndexForExternalArrayType(
ExternalArrayType array_type);
+ static void RecordStats(HeapStats* stats);
+
private:
static int reserved_semispace_size_;
static int max_semispace_size_;
@@ -920,9 +880,13 @@
static int survived_since_last_expansion_;
static int always_allocate_scope_depth_;
+ static int linear_allocation_scope_depth_;
static bool context_disposed_pending_;
- static const int kMaxMapSpaceSize = 8*MB;
+ // The number of MapSpace pages is limited by the way we pack
+ // Map pointers during GC.
+ static const int kMaxMapSpaceSize =
+ (1 << MapWord::kMapPageIndexBits) * Page::kPageSize;
#if defined(V8_TARGET_ARCH_X64)
static const int kMaxObjectSizeInNewSpace = 512*KB;
@@ -1135,6 +1099,32 @@
friend class Factory;
friend class DisallowAllocationFailure;
friend class AlwaysAllocateScope;
+ friend class LinearAllocationScope;
+};
+
+
+class HeapStats {
+ public:
+ int *start_marker;
+ int *new_space_size;
+ int *new_space_capacity;
+ int *old_pointer_space_size;
+ int *old_pointer_space_capacity;
+ int *old_data_space_size;
+ int *old_data_space_capacity;
+ int *code_space_size;
+ int *code_space_capacity;
+ int *map_space_size;
+ int *map_space_capacity;
+ int *cell_space_size;
+ int *cell_space_capacity;
+ int *lo_space_size;
+ int *global_handle_count;
+ int *weak_global_handle_count;
+ int *pending_global_handle_count;
+ int *near_death_global_handle_count;
+ int *destroyed_global_handle_count;
+ int *end_marker;
};
@@ -1156,6 +1146,19 @@
};
+class LinearAllocationScope {
+ public:
+ LinearAllocationScope() {
+ Heap::linear_allocation_scope_depth_++;
+ }
+
+ ~LinearAllocationScope() {
+ Heap::linear_allocation_scope_depth_--;
+ ASSERT(Heap::linear_allocation_scope_depth_ >= 0);
+ }
+};
+
+
#ifdef DEBUG
// Visitor class to verify interior pointers that do not have remembered set
// bits. All heap object pointers have to point into the heap to a location
diff --git a/src/ia32/assembler-ia32-inl.h b/src/ia32/assembler-ia32-inl.h
index 5fa75ec..69f2a8d 100644
--- a/src/ia32/assembler-ia32-inl.h
+++ b/src/ia32/assembler-ia32-inl.h
@@ -89,7 +89,7 @@
}
-Handle<Object> RelocInfo::target_object_handle(Assembler *origin) {
+Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
return Memory::Object_Handle_at(pc_);
}
diff --git a/src/ia32/assembler-ia32.cc b/src/ia32/assembler-ia32.cc
index 698377a..d6f5550 100644
--- a/src/ia32/assembler-ia32.cc
+++ b/src/ia32/assembler-ia32.cc
@@ -49,6 +49,7 @@
// Safe default is no features.
uint64_t CpuFeatures::supported_ = 0;
uint64_t CpuFeatures::enabled_ = 0;
+uint64_t CpuFeatures::found_by_runtime_probing_ = 0;
// The Probe method needs executable memory, so it uses Heap::CreateCode.
@@ -56,7 +57,10 @@
void CpuFeatures::Probe() {
ASSERT(Heap::HasBeenSetup());
ASSERT(supported_ == 0);
- if (Serializer::enabled()) return; // No features if we might serialize.
+ if (Serializer::enabled()) {
+ supported_ |= OS::CpuFeaturesImpliedByPlatform();
+ return; // No features if we might serialize.
+ }
Assembler assm(NULL, 0);
Label cpuid, done;
@@ -124,6 +128,10 @@
typedef uint64_t (*F0)();
F0 probe = FUNCTION_CAST<F0>(Code::cast(code)->entry());
supported_ = probe();
+ found_by_runtime_probing_ = supported_;
+ uint64_t os_guarantees = OS::CpuFeaturesImpliedByPlatform();
+ supported_ |= os_guarantees;
+ found_by_runtime_probing_ &= ~os_guarantees;
}
@@ -360,7 +368,7 @@
void Assembler::cpuid() {
- ASSERT(CpuFeatures::IsEnabled(CpuFeatures::CPUID));
+ ASSERT(CpuFeatures::IsEnabled(CPUID));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0x0F);
@@ -712,7 +720,7 @@
void Assembler::cmov(Condition cc, Register dst, int32_t imm32) {
- ASSERT(CpuFeatures::IsEnabled(CpuFeatures::CMOV));
+ ASSERT(CpuFeatures::IsEnabled(CMOV));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
UNIMPLEMENTED();
@@ -723,7 +731,7 @@
void Assembler::cmov(Condition cc, Register dst, Handle<Object> handle) {
- ASSERT(CpuFeatures::IsEnabled(CpuFeatures::CMOV));
+ ASSERT(CpuFeatures::IsEnabled(CMOV));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
UNIMPLEMENTED();
@@ -734,7 +742,7 @@
void Assembler::cmov(Condition cc, Register dst, const Operand& src) {
- ASSERT(CpuFeatures::IsEnabled(CpuFeatures::CMOV));
+ ASSERT(CpuFeatures::IsEnabled(CMOV));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
// Opcode: 0f 40 + cc /r
@@ -1083,7 +1091,7 @@
}
-void Assembler::sar(Register dst) {
+void Assembler::sar_cl(Register dst) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xD3);
@@ -1123,7 +1131,7 @@
}
-void Assembler::shl(Register dst) {
+void Assembler::shl_cl(Register dst) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xD3);
@@ -1144,24 +1152,21 @@
EnsureSpace ensure_space(this);
last_pc_ = pc_;
ASSERT(is_uint5(imm8)); // illegal shift count
- EMIT(0xC1);
- EMIT(0xE8 | dst.code());
- EMIT(imm8);
-}
-
-
-void Assembler::shr(Register dst) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0xD3);
- EMIT(0xE8 | dst.code());
+ if (imm8 == 1) {
+ EMIT(0xD1);
+ EMIT(0xE8 | dst.code());
+ } else {
+ EMIT(0xC1);
+ EMIT(0xE8 | dst.code());
+ EMIT(imm8);
+ }
}
void Assembler::shr_cl(Register dst) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
- EMIT(0xD1);
+ EMIT(0xD3);
EMIT(0xE8 | dst.code());
}
@@ -1316,7 +1321,7 @@
void Assembler::rdtsc() {
- ASSERT(CpuFeatures::IsEnabled(CpuFeatures::RDTSC));
+ ASSERT(CpuFeatures::IsEnabled(RDTSC));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0x0F);
@@ -1662,7 +1667,7 @@
void Assembler::fisttp_s(const Operand& adr) {
- ASSERT(CpuFeatures::IsEnabled(CpuFeatures::SSE3));
+ ASSERT(CpuFeatures::IsEnabled(SSE3));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xDB);
@@ -1923,7 +1928,7 @@
void Assembler::cvttss2si(Register dst, const Operand& src) {
- ASSERT(CpuFeatures::IsEnabled(CpuFeatures::SSE2));
+ ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xF3);
@@ -1934,7 +1939,7 @@
void Assembler::cvttsd2si(Register dst, const Operand& src) {
- ASSERT(CpuFeatures::IsEnabled(CpuFeatures::SSE2));
+ ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xF2);
@@ -1945,7 +1950,7 @@
void Assembler::cvtsi2sd(XMMRegister dst, const Operand& src) {
- ASSERT(CpuFeatures::IsEnabled(CpuFeatures::SSE2));
+ ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xF2);
@@ -1956,7 +1961,7 @@
void Assembler::addsd(XMMRegister dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(CpuFeatures::SSE2));
+ ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xF2);
@@ -1967,7 +1972,7 @@
void Assembler::mulsd(XMMRegister dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(CpuFeatures::SSE2));
+ ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xF2);
@@ -1978,7 +1983,7 @@
void Assembler::subsd(XMMRegister dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(CpuFeatures::SSE2));
+ ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xF2);
@@ -1989,7 +1994,7 @@
void Assembler::divsd(XMMRegister dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(CpuFeatures::SSE2));
+ ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xF2);
@@ -2000,7 +2005,7 @@
void Assembler::comisd(XMMRegister dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(CpuFeatures::SSE2));
+ ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0x66);
@@ -2025,7 +2030,7 @@
void Assembler::movsd(const Operand& dst, XMMRegister src ) {
- ASSERT(CpuFeatures::IsEnabled(CpuFeatures::SSE2));
+ ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xF2); // double
@@ -2036,7 +2041,7 @@
void Assembler::movsd(XMMRegister dst, const Operand& src) {
- ASSERT(CpuFeatures::IsEnabled(CpuFeatures::SSE2));
+ ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xF2); // double
@@ -2245,10 +2250,15 @@
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
ASSERT(rmode != RelocInfo::NONE);
// Don't record external references unless the heap will be serialized.
- if (rmode == RelocInfo::EXTERNAL_REFERENCE &&
- !Serializer::enabled() &&
- !FLAG_debug_code) {
- return;
+ if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
+#ifdef DEBUG
+ if (!Serializer::enabled()) {
+ Serializer::TooLateToEnableNow();
+ }
+#endif
+ if (!Serializer::enabled() && !FLAG_debug_code) {
+ return;
+ }
}
RelocInfo rinfo(pc_, rmode, data);
reloc_info_writer.Write(&rinfo);
diff --git a/src/ia32/assembler-ia32.h b/src/ia32/assembler-ia32.h
index 4d9f08b..662ebc9 100644
--- a/src/ia32/assembler-ia32.h
+++ b/src/ia32/assembler-ia32.h
@@ -37,6 +37,8 @@
#ifndef V8_IA32_ASSEMBLER_IA32_H_
#define V8_IA32_ASSEMBLER_IA32_H_
+#include "serialize.h"
+
namespace v8 {
namespace internal {
@@ -358,15 +360,11 @@
// }
class CpuFeatures : public AllStatic {
public:
- // Feature flags bit positions. They are mostly based on the CPUID spec.
- // (We assign CPUID itself to one of the currently reserved bits --
- // feel free to change this if needed.)
- enum Feature { SSE3 = 32, SSE2 = 26, CMOV = 15, RDTSC = 4, CPUID = 10 };
// Detect features of the target CPU. Set safe defaults if the serializer
// is enabled (snapshots must be portable).
static void Probe();
// Check whether a feature is supported by the target CPU.
- static bool IsSupported(Feature f) {
+ static bool IsSupported(CpuFeature f) {
if (f == SSE2 && !FLAG_enable_sse2) return false;
if (f == SSE3 && !FLAG_enable_sse3) return false;
if (f == CMOV && !FLAG_enable_cmov) return false;
@@ -374,29 +372,32 @@
return (supported_ & (static_cast<uint64_t>(1) << f)) != 0;
}
// Check whether a feature is currently enabled.
- static bool IsEnabled(Feature f) {
+ static bool IsEnabled(CpuFeature f) {
return (enabled_ & (static_cast<uint64_t>(1) << f)) != 0;
}
// Enable a specified feature within a scope.
class Scope BASE_EMBEDDED {
#ifdef DEBUG
public:
- explicit Scope(Feature f) {
+ explicit Scope(CpuFeature f) {
+ uint64_t mask = static_cast<uint64_t>(1) << f;
ASSERT(CpuFeatures::IsSupported(f));
+ ASSERT(!Serializer::enabled() || (found_by_runtime_probing_ & mask) == 0);
old_enabled_ = CpuFeatures::enabled_;
- CpuFeatures::enabled_ |= (static_cast<uint64_t>(1) << f);
+ CpuFeatures::enabled_ |= mask;
}
~Scope() { CpuFeatures::enabled_ = old_enabled_; }
private:
uint64_t old_enabled_;
#else
public:
- explicit Scope(Feature f) {}
+ explicit Scope(CpuFeature f) {}
#endif
};
private:
static uint64_t supported_;
static uint64_t enabled_;
+ static uint64_t found_by_runtime_probing_;
};
@@ -439,6 +440,23 @@
inline static Address target_address_at(Address pc);
inline static void set_target_address_at(Address pc, Address target);
+ // This sets the branch destination (which is in the instruction on x86).
+ // This is for calls and branches within generated code.
+ inline static void set_target_at(Address instruction_payload,
+ Address target) {
+ set_target_address_at(instruction_payload, target);
+ }
+
+ // This sets the branch destination (which is in the instruction on x86).
+ // This is for calls and branches to runtime code.
+ inline static void set_external_target_at(Address instruction_payload,
+ Address target) {
+ set_target_address_at(instruction_payload, target);
+ }
+
+ static const int kCallTargetSize = kPointerSize;
+ static const int kExternalTargetSize = kPointerSize;
+
// Distance between the address of the code target in the call instruction
// and the return address
static const int kCallTargetAddressOffset = kPointerSize;
@@ -446,6 +464,8 @@
// to jump to.
static const int kPatchReturnSequenceAddressOffset = 1; // JMP imm32.
+ static const int kCallInstructionLength = 5;
+ static const int kJSReturnSequenceLength = 6;
// ---------------------------------------------------------------------------
// Code generation
@@ -579,19 +599,18 @@
void rcl(Register dst, uint8_t imm8);
void sar(Register dst, uint8_t imm8);
- void sar(Register dst);
+ void sar_cl(Register dst);
void sbb(Register dst, const Operand& src);
void shld(Register dst, const Operand& src);
void shl(Register dst, uint8_t imm8);
- void shl(Register dst);
+ void shl_cl(Register dst);
void shrd(Register dst, const Operand& src);
void shr(Register dst, uint8_t imm8);
- void shr(Register dst);
void shr_cl(Register dst);
void subb(const Operand& dst, int8_t imm8);
diff --git a/src/ia32/builtins-ia32.cc b/src/ia32/builtins-ia32.cc
index ad44026..a164cfa 100644
--- a/src/ia32/builtins-ia32.cc
+++ b/src/ia32/builtins-ia32.cc
@@ -462,6 +462,8 @@
const int kGlobalIndex =
Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
__ mov(ebx, FieldOperand(esi, kGlobalIndex));
+ __ mov(ebx, FieldOperand(ebx, GlobalObject::kGlobalContextOffset));
+ __ mov(ebx, FieldOperand(ebx, kGlobalIndex));
__ mov(ebx, FieldOperand(ebx, GlobalObject::kGlobalReceiverOffset));
__ bind(&patch_receiver);
@@ -520,48 +522,31 @@
__ push(Operand(ebp, 2 * kPointerSize)); // push arguments
__ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
- if (FLAG_check_stack) {
- // We need to catch preemptions right here, otherwise an unlucky preemption
- // could show up as a failed apply.
- ExternalReference stack_guard_limit =
- ExternalReference::address_of_stack_guard_limit();
- Label retry_preemption;
- Label no_preemption;
- __ bind(&retry_preemption);
- __ mov(edi, Operand::StaticVariable(stack_guard_limit));
- __ cmp(esp, Operand(edi));
- __ j(above, &no_preemption, taken);
+ // Check the stack for overflow. We are not trying need to catch
+ // interruptions (e.g. debug break and preemption) here, so the "real stack
+ // limit" is checked.
+ Label okay;
+ ExternalReference real_stack_limit =
+ ExternalReference::address_of_real_stack_limit();
+ __ mov(edi, Operand::StaticVariable(real_stack_limit));
+ // Make ecx the space we have left. The stack might already be overflowed
+ // here which will cause ecx to become negative.
+ __ mov(ecx, Operand(esp));
+ __ sub(ecx, Operand(edi));
+ // Make edx the space we need for the array when it is unrolled onto the
+ // stack.
+ __ mov(edx, Operand(eax));
+ __ shl(edx, kPointerSizeLog2 - kSmiTagSize);
+ // Check if the arguments will overflow the stack.
+ __ cmp(ecx, Operand(edx));
+ __ j(greater, &okay, taken); // Signed comparison.
- // Preemption!
- // Because builtins always remove the receiver from the stack, we
- // have to fake one to avoid underflowing the stack.
- __ push(eax);
- __ push(Immediate(Smi::FromInt(0)));
-
- // Do call to runtime routine.
- __ CallRuntime(Runtime::kStackGuard, 1);
- __ pop(eax);
- __ jmp(&retry_preemption);
-
- __ bind(&no_preemption);
-
- Label okay;
- // Make ecx the space we have left.
- __ mov(ecx, Operand(esp));
- __ sub(ecx, Operand(edi));
- // Make edx the space we need for the array when it is unrolled onto the
- // stack.
- __ mov(edx, Operand(eax));
- __ shl(edx, kPointerSizeLog2 - kSmiTagSize);
- __ cmp(ecx, Operand(edx));
- __ j(greater, &okay, taken);
-
- // Too bad: Out of stack space.
- __ push(Operand(ebp, 4 * kPointerSize)); // push this
- __ push(eax);
- __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
- __ bind(&okay);
- }
+ // Out of stack space.
+ __ push(Operand(ebp, 4 * kPointerSize)); // push this
+ __ push(eax);
+ __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
+ __ bind(&okay);
+ // End of stack check.
// Push current index and limit.
const int kLimitOffset =
@@ -606,6 +591,8 @@
const int kGlobalOffset =
Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
__ mov(ebx, FieldOperand(esi, kGlobalOffset));
+ __ mov(ebx, FieldOperand(ebx, GlobalObject::kGlobalContextOffset));
+ __ mov(ebx, FieldOperand(ebx, kGlobalOffset));
__ mov(ebx, FieldOperand(ebx, GlobalObject::kGlobalReceiverOffset));
// Push the receiver.
@@ -894,7 +881,7 @@
// be preserved.
static void ArrayNativeCode(MacroAssembler* masm,
bool construct_call,
- Label *call_generic_code) {
+ Label* call_generic_code) {
Label argc_one_or_more, argc_two_or_more, prepare_generic_code_call;
// Push the constructor and argc. No need to tag argc as a smi, as there will
diff --git a/src/ia32/codegen-ia32.cc b/src/ia32/codegen-ia32.cc
index a339e90..7c8ff31 100644
--- a/src/ia32/codegen-ia32.cc
+++ b/src/ia32/codegen-ia32.cc
@@ -29,6 +29,7 @@
#include "bootstrapper.h"
#include "codegen-inl.h"
+#include "compiler.h"
#include "debug.h"
#include "ic-inl.h"
#include "parser.h"
@@ -75,7 +76,6 @@
CodeGenState::CodeGenState(CodeGenerator* owner)
: owner_(owner),
- typeof_state_(NOT_INSIDE_TYPEOF),
destination_(NULL),
previous_(NULL) {
owner_->set_state(this);
@@ -83,10 +83,8 @@
CodeGenState::CodeGenState(CodeGenerator* owner,
- TypeofState typeof_state,
ControlDestination* destination)
: owner_(owner),
- typeof_state_(typeof_state),
destination_(destination),
previous_(owner->state()) {
owner_->set_state(this);
@@ -415,13 +413,12 @@
// partially compiled) into control flow to the control destination.
// If force_control is true, control flow is forced.
void CodeGenerator::LoadCondition(Expression* x,
- TypeofState typeof_state,
ControlDestination* dest,
bool force_control) {
ASSERT(!in_spilled_code());
int original_height = frame_->height();
- { CodeGenState new_state(this, typeof_state, dest);
+ { CodeGenState new_state(this, dest);
Visit(x);
// If we hit a stack overflow, we may not have actually visited
@@ -450,17 +447,16 @@
}
-void CodeGenerator::LoadAndSpill(Expression* expression,
- TypeofState typeof_state) {
+void CodeGenerator::LoadAndSpill(Expression* expression) {
ASSERT(in_spilled_code());
set_in_spilled_code(false);
- Load(expression, typeof_state);
+ Load(expression);
frame_->SpillAll();
set_in_spilled_code(true);
}
-void CodeGenerator::Load(Expression* x, TypeofState typeof_state) {
+void CodeGenerator::Load(Expression* expr) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
@@ -468,7 +464,7 @@
JumpTarget true_target;
JumpTarget false_target;
ControlDestination dest(&true_target, &false_target, true);
- LoadCondition(x, typeof_state, &dest, false);
+ LoadCondition(expr, &dest, false);
if (dest.false_was_fall_through()) {
// The false target was just bound.
@@ -543,23 +539,25 @@
}
-// TODO(1241834): Get rid of this function in favor of just using Load, now
-// that we have the INSIDE_TYPEOF typeof state. => Need to handle global
-// variables w/o reference errors elsewhere.
-void CodeGenerator::LoadTypeofExpression(Expression* x) {
- Variable* variable = x->AsVariableProxy()->AsVariable();
+void CodeGenerator::LoadTypeofExpression(Expression* expr) {
+ // Special handling of identifiers as subexpressions of typeof.
+ Variable* variable = expr->AsVariableProxy()->AsVariable();
if (variable != NULL && !variable->is_this() && variable->is_global()) {
- // NOTE: This is somewhat nasty. We force the compiler to load
- // the variable as if through '<global>.<variable>' to make sure we
- // do not get reference errors.
+ // For a global variable we build the property reference
+ // <global>.<variable> and perform a (regular non-contextual) property
+ // load to make sure we do not get reference errors.
Slot global(variable, Slot::CONTEXT, Context::GLOBAL_INDEX);
Literal key(variable->name());
- // TODO(1241834): Fetch the position from the variable instead of using
- // no position.
Property property(&global, &key, RelocInfo::kNoPosition);
- Load(&property);
+ Reference ref(this, &property);
+ ref.GetValue();
+ } else if (variable != NULL && variable->slot() != NULL) {
+ // For a variable that rewrites to a slot, we signal it is the immediate
+ // subexpression of a typeof.
+ LoadFromSlotCheckForArguments(variable->slot(), INSIDE_TYPEOF);
} else {
- Load(x, INSIDE_TYPEOF);
+ // Anything else can be handled normally.
+ Load(expr);
}
}
@@ -1190,12 +1188,12 @@
// Perform the operation.
switch (op) {
case Token::SAR:
- __ sar(answer.reg());
+ __ sar_cl(answer.reg());
// No checks of result necessary
break;
case Token::SHR: {
Label result_ok;
- __ shr(answer.reg());
+ __ shr_cl(answer.reg());
// Check that the *unsigned* result fits in a smi. Neither of
// the two high-order bits can be set:
// * 0x80000000: high bit would be lost when smi tagging.
@@ -1216,7 +1214,7 @@
}
case Token::SHL: {
Label result_ok;
- __ shl(answer.reg());
+ __ shl_cl(answer.reg());
// Check that the *signed* result fits in a smi.
__ cmp(answer.reg(), 0xc0000000);
__ j(positive, &result_ok);
@@ -1970,27 +1968,6 @@
}
-class CallFunctionStub: public CodeStub {
- public:
- CallFunctionStub(int argc, InLoopFlag in_loop)
- : argc_(argc), in_loop_(in_loop) { }
-
- void Generate(MacroAssembler* masm);
-
- private:
- int argc_;
- InLoopFlag in_loop_;
-
-#ifdef DEBUG
- void Print() { PrintF("CallFunctionStub (args %d)\n", argc_); }
-#endif
-
- Major MajorKey() { return CallFunction; }
- int MinorKey() { return argc_; }
- InLoopFlag InLoop() { return in_loop_; }
-};
-
-
// Call the function just below TOS on the stack with the given
// arguments. The receiver is the TOS.
void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
@@ -2027,7 +2004,7 @@
// Load the apply function onto the stack. This will usually
// give us a megamorphic load site. Not super, but it works.
Reference ref(this, apply);
- ref.GetValue(NOT_INSIDE_TYPEOF);
+ ref.GetValue();
ASSERT(ref.type() == Reference::NAMED);
// Load the receiver and the existing arguments object onto the
@@ -2203,14 +2180,12 @@
void CodeGenerator::CheckStack() {
- if (FLAG_check_stack) {
- DeferredStackCheck* deferred = new DeferredStackCheck;
- ExternalReference stack_guard_limit =
- ExternalReference::address_of_stack_guard_limit();
- __ cmp(esp, Operand::StaticVariable(stack_guard_limit));
- deferred->Branch(below);
- deferred->BindExit();
- }
+ DeferredStackCheck* deferred = new DeferredStackCheck;
+ ExternalReference stack_limit =
+ ExternalReference::address_of_stack_limit();
+ __ cmp(esp, Operand::StaticVariable(stack_limit));
+ deferred->Branch(below);
+ deferred->BindExit();
}
@@ -2368,7 +2343,7 @@
JumpTarget then;
JumpTarget else_;
ControlDestination dest(&then, &else_, true);
- LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &dest, true);
+ LoadCondition(node->condition(), &dest, true);
if (dest.false_was_fall_through()) {
// The else target was bound, so we compile the else part first.
@@ -2395,7 +2370,7 @@
ASSERT(!has_else_stm);
JumpTarget then;
ControlDestination dest(&then, &exit, true);
- LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &dest, true);
+ LoadCondition(node->condition(), &dest, true);
if (dest.false_was_fall_through()) {
// The exit label was bound. We may have dangling jumps to the
@@ -2415,7 +2390,7 @@
ASSERT(!has_then_stm);
JumpTarget else_;
ControlDestination dest(&exit, &else_, false);
- LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &dest, true);
+ LoadCondition(node->condition(), &dest, true);
if (dest.true_was_fall_through()) {
// The exit label was bound. We may have dangling jumps to the
@@ -2437,7 +2412,7 @@
// or control flow effect). LoadCondition is called without
// forcing control flow.
ControlDestination dest(&exit, &exit, true);
- LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &dest, false);
+ LoadCondition(node->condition(), &dest, false);
if (!dest.is_used()) {
// We got a value on the frame rather than (or in addition to)
// control flow.
@@ -2474,6 +2449,7 @@
CodeForStatementPosition(node);
Load(node->expression());
Result return_value = frame_->Pop();
+ masm()->WriteRecordedPositions();
if (function_return_is_shadowed_) {
function_return_.Jump(&return_value);
} else {
@@ -2514,7 +2490,7 @@
#ifdef ENABLE_DEBUGGER_SUPPORT
// Check that the size of the code used for returning matches what is
// expected by the debugger.
- ASSERT_EQ(Debug::kIa32JSReturnSequenceLength,
+ ASSERT_EQ(Assembler::kJSReturnSequenceLength,
masm_->SizeOfCodeGeneratedSince(&check_exit_codesize));
#endif
}
@@ -2737,8 +2713,10 @@
node->continue_target()->Bind();
}
if (has_valid_frame()) {
+ Comment cmnt(masm_, "[ DoWhileCondition");
+ CodeForDoWhileConditionPosition(node);
ControlDestination dest(&body, node->break_target(), false);
- LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
+ LoadCondition(node->cond(), &dest, true);
}
if (node->break_target()->is_linked()) {
node->break_target()->Bind();
@@ -2793,7 +2771,7 @@
// Compile the test with the body as the true target and preferred
// fall-through and with the break target as the false target.
ControlDestination dest(&body, node->break_target(), true);
- LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
+ LoadCondition(node->cond(), &dest, true);
if (dest.false_was_fall_through()) {
// If we got the break target as fall-through, the test may have
@@ -2840,7 +2818,7 @@
// The break target is the fall-through (body is a backward
// jump from here and thus an invalid fall-through).
ControlDestination dest(&body, node->break_target(), false);
- LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
+ LoadCondition(node->cond(), &dest, true);
}
} else {
// If we have chosen not to recompile the test at the bottom,
@@ -2931,7 +2909,7 @@
// Compile the test with the body as the true target and preferred
// fall-through and with the break target as the false target.
ControlDestination dest(&body, node->break_target(), true);
- LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
+ LoadCondition(node->cond(), &dest, true);
if (dest.false_was_fall_through()) {
// If we got the break target as fall-through, the test may have
@@ -3001,7 +2979,7 @@
// The break target is the fall-through (body is a backward
// jump from here).
ControlDestination dest(&body, node->break_target(), false);
- LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
+ LoadCondition(node->cond(), &dest, true);
}
} else {
// Otherwise, jump back to the test at the top.
@@ -3078,13 +3056,59 @@
jsobject.Bind();
// Get the set of properties (as a FixedArray or Map).
// eax: value to be iterated over
- frame_->EmitPush(eax); // push the object being iterated over (slot 4)
+ frame_->EmitPush(eax); // Push the object being iterated over.
+ // Check cache validity in generated code. This is a fast case for
+ // the JSObject::IsSimpleEnum cache validity checks. If we cannot
+ // guarantee cache validity, call the runtime system to check cache
+ // validity or get the property names in a fixed array.
+ JumpTarget call_runtime;
+ JumpTarget loop(JumpTarget::BIDIRECTIONAL);
+ JumpTarget check_prototype;
+ JumpTarget use_cache;
+ __ mov(ecx, eax);
+ loop.Bind();
+ // Check that there are no elements.
+ __ mov(edx, FieldOperand(ecx, JSObject::kElementsOffset));
+ __ cmp(Operand(edx), Immediate(Factory::empty_fixed_array()));
+ call_runtime.Branch(not_equal);
+ // Check that instance descriptors are not empty so that we can
+ // check for an enum cache. Leave the map in ebx for the subsequent
+ // prototype load.
+ __ mov(ebx, FieldOperand(ecx, HeapObject::kMapOffset));
+ __ mov(edx, FieldOperand(ebx, Map::kInstanceDescriptorsOffset));
+ __ cmp(Operand(edx), Immediate(Factory::empty_descriptor_array()));
+ call_runtime.Branch(equal);
+ // Check that there in an enum cache in the non-empty instance
+ // descriptors. This is the case if the next enumeration index
+ // field does not contain a smi.
+ __ mov(edx, FieldOperand(edx, DescriptorArray::kEnumerationIndexOffset));
+ __ test(edx, Immediate(kSmiTagMask));
+ call_runtime.Branch(zero);
+ // For all objects but the receiver, check that the cache is empty.
+ __ cmp(ecx, Operand(eax));
+ check_prototype.Branch(equal);
+ __ mov(edx, FieldOperand(edx, DescriptorArray::kEnumCacheBridgeCacheOffset));
+ __ cmp(Operand(edx), Immediate(Factory::empty_fixed_array()));
+ call_runtime.Branch(not_equal);
+ check_prototype.Bind();
+ // Load the prototype from the map and loop if non-null.
+ __ mov(ecx, FieldOperand(ebx, Map::kPrototypeOffset));
+ __ cmp(Operand(ecx), Immediate(Factory::null_value()));
+ loop.Branch(not_equal);
+ // The enum cache is valid. Load the map of the object being
+ // iterated over and use the cache for the iteration.
+ __ mov(eax, FieldOperand(eax, HeapObject::kMapOffset));
+ use_cache.Jump();
+
+ call_runtime.Bind();
+ // Call the runtime to get the property names for the object.
frame_->EmitPush(eax); // push the Object (slot 4) for the runtime call
frame_->CallRuntime(Runtime::kGetPropertyNamesFast, 1);
- // If we got a Map, we can do a fast modification check.
- // Otherwise, we got a FixedArray, and we have to do a slow check.
+ // If we got a map from the runtime call, we can do a fast
+ // modification check. Otherwise, we got a fixed array, and we have
+ // to do a slow check.
// eax: map or fixed array (result from call to
// Runtime::kGetPropertyNamesFast)
__ mov(edx, Operand(eax));
@@ -3092,9 +3116,13 @@
__ cmp(ecx, Factory::meta_map());
fixed_array.Branch(not_equal);
+ use_cache.Bind();
// Get enum cache
- // eax: map (result from call to Runtime::kGetPropertyNamesFast)
+ // eax: map (either the result from a call to
+ // Runtime::kGetPropertyNamesFast or has been fetched directly from
+ // the object)
__ mov(ecx, Operand(eax));
+
__ mov(ecx, FieldOperand(ecx, Map::kInstanceDescriptorsOffset));
// Get the bridge array held in the enumeration index field.
__ mov(ecx, FieldOperand(ecx, DescriptorArray::kEnumerationIndexOffset));
@@ -3576,7 +3604,8 @@
Comment cmnt(masm_, "[ FunctionLiteral");
// Build the function boilerplate and instantiate it.
- Handle<JSFunction> boilerplate = BuildBoilerplate(node);
+ Handle<JSFunction> boilerplate =
+ Compiler::BuildBoilerplate(node, script_, this);
// Check for stack-overflow exception.
if (HasStackOverflow()) return;
InstantiateBoilerplate(boilerplate);
@@ -3596,25 +3625,25 @@
JumpTarget else_;
JumpTarget exit;
ControlDestination dest(&then, &else_, true);
- LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &dest, true);
+ LoadCondition(node->condition(), &dest, true);
if (dest.false_was_fall_through()) {
// The else target was bound, so we compile the else part first.
- Load(node->else_expression(), typeof_state());
+ Load(node->else_expression());
if (then.is_linked()) {
exit.Jump();
then.Bind();
- Load(node->then_expression(), typeof_state());
+ Load(node->then_expression());
}
} else {
// The then target was bound, so we compile the then part first.
- Load(node->then_expression(), typeof_state());
+ Load(node->then_expression());
if (else_.is_linked()) {
exit.Jump();
else_.Bind();
- Load(node->else_expression(), typeof_state());
+ Load(node->else_expression());
}
}
@@ -3936,7 +3965,7 @@
void CodeGenerator::VisitSlot(Slot* node) {
Comment cmnt(masm_, "[ Slot");
- LoadFromSlotCheckForArguments(node, typeof_state());
+ LoadFromSlotCheckForArguments(node, NOT_INSIDE_TYPEOF);
}
@@ -3949,7 +3978,7 @@
} else {
ASSERT(var->is_global());
Reference ref(this, node);
- ref.GetValue(typeof_state());
+ ref.GetValue();
}
}
@@ -3960,12 +3989,28 @@
}
-void CodeGenerator::LoadUnsafeSmi(Register target, Handle<Object> value) {
+void CodeGenerator::PushUnsafeSmi(Handle<Object> value) {
+ ASSERT(value->IsSmi());
+ int bits = reinterpret_cast<int>(*value);
+ __ push(Immediate(bits & 0x0000FFFF));
+ __ or_(Operand(esp, 0), Immediate(bits & 0xFFFF0000));
+}
+
+
+void CodeGenerator::StoreUnsafeSmiToLocal(int offset, Handle<Object> value) {
+ ASSERT(value->IsSmi());
+ int bits = reinterpret_cast<int>(*value);
+ __ mov(Operand(ebp, offset), Immediate(bits & 0x0000FFFF));
+ __ or_(Operand(ebp, offset), Immediate(bits & 0xFFFF0000));
+}
+
+
+void CodeGenerator::MoveUnsafeSmi(Register target, Handle<Object> value) {
ASSERT(target.is_valid());
ASSERT(value->IsSmi());
int bits = reinterpret_cast<int>(*value);
__ Set(target, Immediate(bits & 0x0000FFFF));
- __ xor_(target, bits & 0xFFFF0000);
+ __ or_(target, bits & 0xFFFF0000);
}
@@ -4356,9 +4401,9 @@
// the target, with an implicit promise that it will be written to again
// before it is read.
if (literal != NULL || (right_var != NULL && right_var != var)) {
- target.TakeValue(NOT_INSIDE_TYPEOF);
+ target.TakeValue();
} else {
- target.GetValue(NOT_INSIDE_TYPEOF);
+ target.GetValue();
}
Load(node->value());
GenericBinaryOperation(node->binary_op(),
@@ -4406,7 +4451,7 @@
void CodeGenerator::VisitProperty(Property* node) {
Comment cmnt(masm_, "[ Property");
Reference property(this, node);
- property.GetValue(typeof_state());
+ property.GetValue();
}
@@ -4591,7 +4636,7 @@
// Load the function to call from the property through a reference.
Reference ref(this, property);
- ref.GetValue(NOT_INSIDE_TYPEOF);
+ ref.GetValue();
// Pass receiver to called function.
if (property->is_synthetic()) {
@@ -4701,10 +4746,10 @@
// This generates code that performs a charCodeAt() call or returns
// undefined in order to trigger the slow case, Runtime_StringCharCodeAt.
-// It can handle flat and sliced strings, 8 and 16 bit characters and
-// cons strings where the answer is found in the left hand branch of the
-// cons. The slow case will flatten the string, which will ensure that
-// the answer is in the left hand side the next time around.
+// It can handle flat, 8 and 16 bit characters and cons strings where the
+// answer is found in the left hand branch of the cons. The slow case will
+// flatten the string, which will ensure that the answer is in the left hand
+// side the next time around.
void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
Comment(masm_, "[ GenerateFastCharCodeAt");
ASSERT(args->length() == 2);
@@ -4712,7 +4757,6 @@
Label slow_case;
Label end;
Label not_a_flat_string;
- Label a_cons_string;
Label try_again_with_new_string;
Label ascii_string;
Label got_char_code;
@@ -4783,18 +4827,8 @@
__ test(ecx, Immediate(kIsNotStringMask));
__ j(not_zero, &slow_case);
- // Here we make assumptions about the tag values and the shifts needed.
- // See the comment in objects.h.
- ASSERT(kLongStringTag == 0);
- ASSERT(kMediumStringTag + String::kLongLengthShift ==
- String::kMediumLengthShift);
- ASSERT(kShortStringTag + String::kLongLengthShift ==
- String::kShortLengthShift);
- __ and_(ecx, kStringSizeMask);
- __ add(Operand(ecx), Immediate(String::kLongLengthShift));
// Fetch the length field into the temporary register.
__ mov(temp.reg(), FieldOperand(object.reg(), String::kLengthOffset));
- __ shr(temp.reg()); // The shift amount in ecx is implicit operand.
// Check for index out of range.
__ cmp(index.reg(), Operand(temp.reg()));
__ j(greater_equal, &slow_case);
@@ -4834,21 +4868,16 @@
__ bind(¬_a_flat_string);
__ and_(temp.reg(), kStringRepresentationMask);
__ cmp(temp.reg(), kConsStringTag);
- __ j(equal, &a_cons_string);
- __ cmp(temp.reg(), kSlicedStringTag);
__ j(not_equal, &slow_case);
- // SlicedString.
- // Add the offset to the index and trigger the slow case on overflow.
- __ add(index.reg(), FieldOperand(object.reg(), SlicedString::kStartOffset));
- __ j(overflow, &slow_case);
- // Getting the underlying string is done by running the cons string code.
-
// ConsString.
- __ bind(&a_cons_string);
- // Get the first of the two strings. Both sliced and cons strings
- // store their source string at the same offset.
- ASSERT(SlicedString::kBufferOffset == ConsString::kFirstOffset);
+ // Check that the right hand side is the empty string (ie if this is really a
+ // flat string in a cons string). If that is not the case we would rather go
+ // to the runtime system now, to flatten the string.
+ __ mov(temp.reg(), FieldOperand(object.reg(), ConsString::kSecondOffset));
+ __ cmp(Operand(temp.reg()), Immediate(Handle<String>(Heap::empty_string())));
+ __ j(not_equal, &slow_case);
+ // Get the first of the two strings.
__ mov(object.reg(), FieldOperand(object.reg(), ConsString::kFirstOffset));
__ jmp(&try_again_with_new_string);
@@ -4881,6 +4910,55 @@
}
+void CodeGenerator::GenerateIsObject(ZoneList<Expression*>* args) {
+ // This generates a fast version of:
+ // (typeof(arg) === 'object' || %_ClassOf(arg) == 'RegExp')
+ ASSERT(args->length() == 1);
+ Load(args->at(0));
+ Result obj = frame_->Pop();
+ obj.ToRegister();
+
+ __ test(obj.reg(), Immediate(kSmiTagMask));
+ destination()->false_target()->Branch(zero);
+ __ cmp(obj.reg(), Factory::null_value());
+ destination()->true_target()->Branch(equal);
+
+ Result map = allocator()->Allocate();
+ ASSERT(map.is_valid());
+ __ mov(map.reg(), FieldOperand(obj.reg(), HeapObject::kMapOffset));
+ // Undetectable objects behave like undefined when tested with typeof.
+ __ movzx_b(map.reg(), FieldOperand(map.reg(), Map::kBitFieldOffset));
+ __ test(map.reg(), Immediate(1 << Map::kIsUndetectable));
+ destination()->false_target()->Branch(not_zero);
+ __ mov(map.reg(), FieldOperand(obj.reg(), HeapObject::kMapOffset));
+ __ movzx_b(map.reg(), FieldOperand(map.reg(), Map::kInstanceTypeOffset));
+ __ cmp(map.reg(), FIRST_JS_OBJECT_TYPE);
+ destination()->false_target()->Branch(less);
+ __ cmp(map.reg(), LAST_JS_OBJECT_TYPE);
+ obj.Unuse();
+ map.Unuse();
+ destination()->Split(less_equal);
+}
+
+
+void CodeGenerator::GenerateIsFunction(ZoneList<Expression*>* args) {
+ // This generates a fast version of:
+ // (%_ClassOf(arg) === 'Function')
+ ASSERT(args->length() == 1);
+ Load(args->at(0));
+ Result obj = frame_->Pop();
+ obj.ToRegister();
+ __ test(obj.reg(), Immediate(kSmiTagMask));
+ destination()->false_target()->Branch(zero);
+ Result temp = allocator()->Allocate();
+ ASSERT(temp.is_valid());
+ __ CmpObjectType(obj.reg(), JS_FUNCTION_TYPE, temp.reg());
+ obj.Unuse();
+ temp.Unuse();
+ destination()->Split(equal);
+}
+
+
void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) {
ASSERT(args->length() == 0);
@@ -5184,6 +5262,18 @@
}
+void CodeGenerator::GenerateStringAdd(ZoneList<Expression*>* args) {
+ ASSERT_EQ(2, args->length());
+
+ Load(args->at(0));
+ Load(args->at(1));
+
+ StringAddStub stub(NO_STRING_ADD_FLAGS);
+ Result answer = frame_->CallStub(&stub, 2);
+ frame_->Push(&answer);
+}
+
+
void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
if (CheckForInlineRuntimeCall(node)) {
return;
@@ -5226,9 +5316,6 @@
void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
- // Note that because of NOT and an optimization in comparison of a typeof
- // expression to a literal string, this function can fail to leave a value
- // on top of the frame or in the cc register.
Comment cmnt(masm_, "[ UnaryOperation");
Token::Value op = node->op();
@@ -5237,7 +5324,7 @@
// Swap the true and false targets but keep the same actual label
// as the fall through.
destination()->Invert();
- LoadCondition(node->expression(), NOT_INSIDE_TYPEOF, destination(), true);
+ LoadCondition(node->expression(), destination(), true);
// Swap the labels back.
destination()->Invert();
@@ -5487,7 +5574,7 @@
if (!is_postfix) frame_->Push(Smi::FromInt(0));
return;
}
- target.TakeValue(NOT_INSIDE_TYPEOF);
+ target.TakeValue();
Result new_value = frame_->Pop();
new_value.ToRegister();
@@ -5565,9 +5652,6 @@
void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
- // Note that due to an optimization in comparison operations (typeof
- // compared to a string literal), we can evaluate a binary expression such
- // as AND or OR and not leave a value on the frame or in the cc register.
Comment cmnt(masm_, "[ BinaryOperation");
Token::Value op = node->op();
@@ -5583,7 +5667,7 @@
if (op == Token::AND) {
JumpTarget is_true;
ControlDestination dest(&is_true, destination()->false_target(), true);
- LoadCondition(node->left(), NOT_INSIDE_TYPEOF, &dest, false);
+ LoadCondition(node->left(), &dest, false);
if (dest.false_was_fall_through()) {
// The current false target was used as the fall-through. If
@@ -5602,7 +5686,7 @@
is_true.Bind();
// The left subexpression compiled to control flow, so the
// right one is free to do so as well.
- LoadCondition(node->right(), NOT_INSIDE_TYPEOF, destination(), false);
+ LoadCondition(node->right(), destination(), false);
} else {
// We have actually just jumped to or bound the current false
// target but the current control destination is not marked as
@@ -5613,7 +5697,7 @@
} else if (dest.is_used()) {
// The left subexpression compiled to control flow (and is_true
// was just bound), so the right is free to do so as well.
- LoadCondition(node->right(), NOT_INSIDE_TYPEOF, destination(), false);
+ LoadCondition(node->right(), destination(), false);
} else {
// We have a materialized value on the frame, so we exit with
@@ -5646,7 +5730,7 @@
} else if (op == Token::OR) {
JumpTarget is_false;
ControlDestination dest(destination()->true_target(), &is_false, false);
- LoadCondition(node->left(), NOT_INSIDE_TYPEOF, &dest, false);
+ LoadCondition(node->left(), &dest, false);
if (dest.true_was_fall_through()) {
// The current true target was used as the fall-through. If
@@ -5665,7 +5749,7 @@
is_false.Bind();
// The left subexpression compiled to control flow, so the
// right one is free to do so as well.
- LoadCondition(node->right(), NOT_INSIDE_TYPEOF, destination(), false);
+ LoadCondition(node->right(), destination(), false);
} else {
// We have just jumped to or bound the current true target but
// the current control destination is not marked as used.
@@ -5675,7 +5759,7 @@
} else if (dest.is_used()) {
// The left subexpression compiled to control flow (and is_false
// was just bound), so the right is free to do so as well.
- LoadCondition(node->right(), NOT_INSIDE_TYPEOF, destination(), false);
+ LoadCondition(node->right(), destination(), false);
} else {
// We have a materialized value on the frame, so we exit with
@@ -5807,6 +5891,9 @@
destination()->false_target()->Branch(zero);
frame_->Spill(answer.reg());
__ CmpObjectType(answer.reg(), JS_FUNCTION_TYPE, answer.reg());
+ destination()->true_target()->Branch(equal);
+ // Regular expressions are callable so typeof == 'function'.
+ __ CmpInstanceType(answer.reg(), JS_REGEXP_TYPE);
answer.Unuse();
destination()->Split(equal);
@@ -5816,10 +5903,13 @@
__ cmp(answer.reg(), Factory::null_value());
destination()->true_target()->Branch(equal);
- // It can be an undetectable object.
Result map = allocator()->Allocate();
ASSERT(map.is_valid());
- __ mov(map.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset));
+ // Regular expressions are typeof == 'function', not 'object'.
+ __ CmpObjectType(answer.reg(), JS_REGEXP_TYPE, map.reg());
+ destination()->false_target()->Branch(equal);
+
+ // It can be an undetectable object.
__ movzx_b(map.reg(), FieldOperand(map.reg(), Map::kBitFieldOffset));
__ test(map.reg(), Immediate(1 << Map::kIsUndetectable));
destination()->false_target()->Branch(not_zero);
@@ -6068,7 +6158,7 @@
}
-void Reference::GetValue(TypeofState typeof_state) {
+void Reference::GetValue() {
ASSERT(!cgen_->in_spilled_code());
ASSERT(cgen_->HasValidEntryRegisters());
ASSERT(!is_illegal());
@@ -6085,17 +6175,11 @@
Comment cmnt(masm, "[ Load from Slot");
Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
ASSERT(slot != NULL);
- cgen_->LoadFromSlotCheckForArguments(slot, typeof_state);
+ cgen_->LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
break;
}
case NAMED: {
- // TODO(1241834): Make sure that it is safe to ignore the
- // distinction between expressions in a typeof and not in a
- // typeof. If there is a chance that reference errors can be
- // thrown below, we must distinguish between the two kinds of
- // loads (typeof expression loads must not throw a reference
- // error).
Variable* var = expression_->AsVariableProxy()->AsVariable();
bool is_global = var != NULL;
ASSERT(!is_global || var->is_global());
@@ -6165,8 +6249,6 @@
}
case KEYED: {
- // TODO(1241834): Make sure that this it is safe to ignore the
- // distinction between expressions in a typeof and not in a typeof.
Comment cmnt(masm, "[ Load from keyed Property");
Variable* var = expression_->AsVariableProxy()->AsVariable();
bool is_global = var != NULL;
@@ -6285,13 +6367,13 @@
}
-void Reference::TakeValue(TypeofState typeof_state) {
+void Reference::TakeValue() {
// For non-constant frame-allocated slots, we invalidate the value in the
// slot. For all others, we fall back on GetValue.
ASSERT(!cgen_->in_spilled_code());
ASSERT(!is_illegal());
if (type_ != SLOT) {
- GetValue(typeof_state);
+ GetValue();
return;
}
@@ -6301,7 +6383,7 @@
slot->type() == Slot::CONTEXT ||
slot->var()->mode() == Variable::CONST ||
slot->is_arguments()) {
- GetValue(typeof_state);
+ GetValue();
return;
}
@@ -6472,11 +6554,8 @@
// String value => false iff empty.
__ cmp(ecx, FIRST_NONSTRING_TYPE);
__ j(above_equal, ¬_string);
- __ and_(ecx, kStringSizeMask);
- __ cmp(ecx, kShortStringTag);
- __ j(not_equal, &true_result); // Empty string is always short.
__ mov(edx, FieldOperand(eax, String::kLengthOffset));
- __ shr(edx, String::kShortLengthShift);
+ __ test(edx, Operand(edx));
__ j(zero, &false_result);
__ jmp(&true_result);
@@ -6510,42 +6589,47 @@
__ push(right);
} else {
// The calling convention with registers is left in edx and right in eax.
- __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
- if (!(left.is(edx) && right.is(eax))) {
- if (left.is(eax) && right.is(edx)) {
+ Register left_arg = edx;
+ Register right_arg = eax;
+ if (!(left.is(left_arg) && right.is(right_arg))) {
+ if (left.is(right_arg) && right.is(left_arg)) {
if (IsOperationCommutative()) {
SetArgsReversed();
} else {
__ xchg(left, right);
}
- } else if (left.is(edx)) {
- __ mov(eax, right);
- } else if (left.is(eax)) {
+ } else if (left.is(left_arg)) {
+ __ mov(right_arg, right);
+ } else if (left.is(right_arg)) {
if (IsOperationCommutative()) {
- __ mov(edx, right);
+ __ mov(left_arg, right);
SetArgsReversed();
} else {
- __ mov(edx, left);
- __ mov(eax, right);
+ // Order of moves important to avoid destroying left argument.
+ __ mov(left_arg, left);
+ __ mov(right_arg, right);
}
- } else if (right.is(edx)) {
+ } else if (right.is(left_arg)) {
if (IsOperationCommutative()) {
- __ mov(eax, left);
+ __ mov(right_arg, left);
SetArgsReversed();
} else {
- __ mov(eax, right);
- __ mov(edx, left);
+ // Order of moves important to avoid destroying right argument.
+ __ mov(right_arg, right);
+ __ mov(left_arg, left);
}
- } else if (right.is(eax)) {
- __ mov(edx, left);
+ } else if (right.is(right_arg)) {
+ __ mov(left_arg, left);
} else {
- __ mov(edx, left);
- __ mov(eax, right);
+ // Order of moves is not important.
+ __ mov(left_arg, left);
+ __ mov(right_arg, right);
}
}
// Update flags to indicate that arguments are in registers.
SetArgsInRegisters();
+ __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
}
// Call the stub.
@@ -6562,19 +6646,22 @@
__ push(left);
__ push(Immediate(right));
} else {
- // Adapt arguments to the calling convention left in edx and right in eax.
- if (left.is(edx)) {
- __ mov(eax, Immediate(right));
- } else if (left.is(eax) && IsOperationCommutative()) {
- __ mov(edx, Immediate(right));
+ // The calling convention with registers is left in edx and right in eax.
+ Register left_arg = edx;
+ Register right_arg = eax;
+ if (left.is(left_arg)) {
+ __ mov(right_arg, Immediate(right));
+ } else if (left.is(right_arg) && IsOperationCommutative()) {
+ __ mov(left_arg, Immediate(right));
SetArgsReversed();
} else {
- __ mov(edx, left);
- __ mov(eax, Immediate(right));
+ __ mov(left_arg, left);
+ __ mov(right_arg, Immediate(right));
}
// Update flags to indicate that arguments are in registers.
SetArgsInRegisters();
+ __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
}
// Call the stub.
@@ -6591,18 +6678,21 @@
__ push(Immediate(left));
__ push(right);
} else {
- // Adapt arguments to the calling convention left in edx and right in eax.
- bool is_commutative = (op_ == (Token::ADD) || (op_ == Token::MUL));
- if (right.is(eax)) {
- __ mov(edx, Immediate(left));
- } else if (right.is(edx) && is_commutative) {
- __ mov(eax, Immediate(left));
+ // The calling convention with registers is left in edx and right in eax.
+ Register left_arg = edx;
+ Register right_arg = eax;
+ if (right.is(right_arg)) {
+ __ mov(left_arg, Immediate(left));
+ } else if (right.is(left_arg) && IsOperationCommutative()) {
+ __ mov(right_arg, Immediate(left));
+ SetArgsReversed();
} else {
- __ mov(edx, Immediate(left));
- __ mov(eax, right);
+ __ mov(left_arg, Immediate(left));
+ __ mov(right_arg, right);
}
// Update flags to indicate that arguments are in registers.
SetArgsInRegisters();
+ __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
}
// Call the stub.
@@ -6719,11 +6809,11 @@
// Perform the operation.
switch (op_) {
case Token::SAR:
- __ sar(eax);
+ __ sar_cl(eax);
// No checks of result necessary
break;
case Token::SHR:
- __ shr(eax);
+ __ shr_cl(eax);
// Check that the *unsigned* result fits in a smi.
// Neither of the two high-order bits can be set:
// - 0x80000000: high bit would be lost when smi tagging.
@@ -6734,7 +6824,7 @@
__ j(not_zero, slow, not_taken);
break;
case Token::SHL:
- __ shl(eax);
+ __ shl_cl(eax);
// Check that the *signed* result fits in a smi.
__ cmp(eax, 0xc0000000);
__ j(sign, slow, not_taken);
@@ -6784,8 +6874,8 @@
// eax: y
// edx: x
- if (CpuFeatures::IsSupported(CpuFeatures::SSE2)) {
- CpuFeatures::Scope use_sse2(CpuFeatures::SSE2);
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatures::Scope use_sse2(SSE2);
FloatingPointHelper::LoadSse2Operands(masm, &call_runtime);
switch (op_) {
@@ -6880,7 +6970,7 @@
if (use_sse3_) {
// Truncate the operands to 32-bit integers and check for
// exceptions in doing so.
- CpuFeatures::Scope scope(CpuFeatures::SSE3);
+ CpuFeatures::Scope scope(SSE3);
__ fisttp_s(Operand(esp, 0 * kPointerSize));
__ fisttp_s(Operand(esp, 1 * kPointerSize));
__ fnstsw_ax();
@@ -6909,9 +6999,9 @@
case Token::BIT_OR: __ or_(eax, Operand(ecx)); break;
case Token::BIT_AND: __ and_(eax, Operand(ecx)); break;
case Token::BIT_XOR: __ xor_(eax, Operand(ecx)); break;
- case Token::SAR: __ sar(eax); break;
- case Token::SHL: __ shl(eax); break;
- case Token::SHR: __ shr(eax); break;
+ case Token::SAR: __ sar_cl(eax); break;
+ case Token::SHL: __ shl_cl(eax); break;
+ case Token::SHR: __ shr_cl(eax); break;
default: UNREACHABLE();
}
if (op_ == Token::SHR) {
@@ -6926,7 +7016,7 @@
// Tag smi result and return.
ASSERT(kSmiTagSize == times_2); // adjust code if not the case
__ lea(eax, Operand(eax, eax, times_1, kSmiTag));
- __ ret(2 * kPointerSize);
+ GenerateReturn(masm);
// All ops except SHR return a signed int32 that we load in a HeapNumber.
if (op_ != Token::SHR) {
@@ -6953,7 +7043,7 @@
__ mov(Operand(esp, 1 * kPointerSize), ebx);
__ fild_s(Operand(esp, 1 * kPointerSize));
__ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- __ ret(2 * kPointerSize);
+ GenerateReturn(masm);
}
// Clear the FPU exception flag and reset the stack before calling
@@ -6985,7 +7075,7 @@
// If all else fails, use the runtime system to get the correct
// result. If arguments was passed in registers now place them on the
- // stack in the correct order.
+ // stack in the correct order below the return address.
__ bind(&call_runtime);
if (HasArgumentsInRegisters()) {
__ pop(ecx);
@@ -7001,7 +7091,7 @@
switch (op_) {
case Token::ADD: {
// Test for string arguments before calling runtime.
- Label not_strings, both_strings, not_string1, string1;
+ Label not_strings, not_string1, string1;
Result answer;
__ mov(eax, Operand(esp, 2 * kPointerSize)); // First argument.
__ mov(edx, Operand(esp, 1 * kPointerSize)); // Second argument.
@@ -7016,8 +7106,9 @@
__ CmpObjectType(edx, FIRST_NONSTRING_TYPE, edx);
__ j(above_equal, &string1);
- // First and second argument are strings.
- __ TailCallRuntime(ExternalReference(Runtime::kStringAdd), 2, 1);
+ // First and second argument are strings. Jump to the string add stub.
+ StringAddStub stub(NO_STRING_CHECK_IN_STUB);
+ __ TailCallStub(&stub);
// Only first argument is a string.
__ bind(&string1);
@@ -7400,20 +7491,19 @@
// not NaN.
// The representation of NaN values has all exponent bits (52..62) set,
// and not all mantissa bits (0..51) clear.
+ // We only accept QNaNs, which have bit 51 set.
// Read top bits of double representation (second word of value).
- __ mov(eax, FieldOperand(edx, HeapNumber::kExponentOffset));
- // Test that exponent bits are all set.
- __ not_(eax);
- __ test(eax, Immediate(0x7ff00000));
- __ j(not_zero, &return_equal);
- __ not_(eax);
- // Shift out flag and all exponent bits, retaining only mantissa.
- __ shl(eax, 12);
- // Or with all low-bits of mantissa.
- __ or_(eax, FieldOperand(edx, HeapNumber::kMantissaOffset));
- // Return zero equal if all bits in mantissa is zero (it's an Infinity)
- // and non-zero if not (it's a NaN).
+ // Value is a QNaN if value & kQuietNaNMask == kQuietNaNMask, i.e.,
+ // all bits in the mask are set. We only need to check the word
+ // that contains the exponent and high bit of the mantissa.
+ ASSERT_NE(0, (kQuietNaNHighBitsMask << 1) & 0x80000000u);
+ __ mov(edx, FieldOperand(edx, HeapNumber::kExponentOffset));
+ __ xor_(eax, Operand(eax));
+ // Shift value and mask so kQuietNaNHighBitsMask applies to topmost bits.
+ __ add(edx, Operand(edx));
+ __ cmp(edx, kQuietNaNHighBitsMask << 1);
+ __ setcc(above_equal, eax);
__ ret(0);
__ bind(¬_identical);
@@ -7508,9 +7598,9 @@
// Call builtin if operands are not floating point or smi.
Label check_for_symbols;
Label unordered;
- if (CpuFeatures::IsSupported(CpuFeatures::SSE2)) {
- CpuFeatures::Scope use_sse2(CpuFeatures::SSE2);
- CpuFeatures::Scope use_cmov(CpuFeatures::CMOV);
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatures::Scope use_sse2(SSE2);
+ CpuFeatures::Scope use_cmov(CMOV);
FloatingPointHelper::LoadSse2Operands(masm, &check_for_symbols);
__ comisd(xmm0, xmm1);
@@ -7699,11 +7789,84 @@
}
+// If true, a Handle<T> passed by value is passed and returned by
+// using the location_ field directly. If false, it is passed and
+// returned as a pointer to a handle.
+#ifdef USING_MAC_ABI
+static const bool kPassHandlesDirectly = true;
+#else
+static const bool kPassHandlesDirectly = false;
+#endif
+
+
+void ApiGetterEntryStub::Generate(MacroAssembler* masm) {
+ Label get_result;
+ Label prologue;
+ Label promote_scheduled_exception;
+ __ EnterApiExitFrame(ExitFrame::MODE_NORMAL, kStackSpace, kArgc);
+ ASSERT_EQ(kArgc, 4);
+ if (kPassHandlesDirectly) {
+ // When handles as passed directly we don't have to allocate extra
+ // space for and pass an out parameter.
+ __ mov(Operand(esp, 0 * kPointerSize), ebx); // name.
+ __ mov(Operand(esp, 1 * kPointerSize), eax); // arguments pointer.
+ } else {
+ // The function expects three arguments to be passed but we allocate
+ // four to get space for the output cell. The argument slots are filled
+ // as follows:
+ //
+ // 3: output cell
+ // 2: arguments pointer
+ // 1: name
+ // 0: pointer to the output cell
+ //
+ // Note that this is one more "argument" than the function expects
+ // so the out cell will have to be popped explicitly after returning
+ // from the function.
+ __ mov(Operand(esp, 1 * kPointerSize), ebx); // name.
+ __ mov(Operand(esp, 2 * kPointerSize), eax); // arguments pointer.
+ __ mov(ebx, esp);
+ __ add(Operand(ebx), Immediate(3 * kPointerSize));
+ __ mov(Operand(esp, 0 * kPointerSize), ebx); // output
+ __ mov(Operand(esp, 3 * kPointerSize), Immediate(0)); // out cell.
+ }
+ // Call the api function!
+ __ call(fun()->address(), RelocInfo::RUNTIME_ENTRY);
+ // Check if the function scheduled an exception.
+ ExternalReference scheduled_exception_address =
+ ExternalReference::scheduled_exception_address();
+ __ cmp(Operand::StaticVariable(scheduled_exception_address),
+ Immediate(Factory::the_hole_value()));
+ __ j(not_equal, &promote_scheduled_exception, not_taken);
+ if (!kPassHandlesDirectly) {
+ // The returned value is a pointer to the handle holding the result.
+ // Dereference this to get to the location.
+ __ mov(eax, Operand(eax, 0));
+ }
+ // Check if the result handle holds 0
+ __ test(eax, Operand(eax));
+ __ j(not_zero, &get_result, taken);
+ // It was zero; the result is undefined.
+ __ mov(eax, Factory::undefined_value());
+ __ jmp(&prologue);
+ // It was non-zero. Dereference to get the result value.
+ __ bind(&get_result);
+ __ mov(eax, Operand(eax, 0));
+ __ bind(&prologue);
+ __ LeaveExitFrame(ExitFrame::MODE_NORMAL);
+ __ ret(0);
+ __ bind(&promote_scheduled_exception);
+ __ TailCallRuntime(ExternalReference(Runtime::kPromoteScheduledException),
+ 0,
+ 1);
+}
+
+
void CEntryStub::GenerateCore(MacroAssembler* masm,
Label* throw_normal_exception,
Label* throw_termination_exception,
Label* throw_out_of_memory_exception,
- StackFrame::Type frame_type,
+ ExitFrame::Mode mode,
bool do_gc,
bool always_allocate_scope) {
// eax: result parameter for PerformGC, if any
@@ -7753,7 +7916,7 @@
__ j(zero, &failure_returned, not_taken);
// Exit the JavaScript to C++ exit frame.
- __ LeaveExitFrame(frame_type);
+ __ LeaveExitFrame(mode);
__ ret(0);
// Handling of failure.
@@ -7852,12 +8015,12 @@
// of a proper result. The builtin entry handles this by performing
// a garbage collection and retrying the builtin (twice).
- StackFrame::Type frame_type = is_debug_break ?
- StackFrame::EXIT_DEBUG :
- StackFrame::EXIT;
+ ExitFrame::Mode mode = is_debug_break
+ ? ExitFrame::MODE_DEBUG
+ : ExitFrame::MODE_NORMAL;
// Enter the exit frame that transitions from JavaScript to C++.
- __ EnterExitFrame(frame_type);
+ __ EnterExitFrame(mode);
// eax: result parameter for PerformGC, if any (setup below)
// ebx: pointer to builtin function (C callee-saved)
@@ -7875,7 +8038,7 @@
&throw_normal_exception,
&throw_termination_exception,
&throw_out_of_memory_exception,
- frame_type,
+ mode,
false,
false);
@@ -7884,7 +8047,7 @@
&throw_normal_exception,
&throw_termination_exception,
&throw_out_of_memory_exception,
- frame_type,
+ mode,
true,
false);
@@ -7895,7 +8058,7 @@
&throw_normal_exception,
&throw_termination_exception,
&throw_out_of_memory_exception,
- frame_type,
+ mode,
true,
true);
@@ -8072,6 +8235,224 @@
return (static_cast<unsigned>(cc_) << 1) | (strict_ ? 1 : 0);
}
+
+void StringAddStub::Generate(MacroAssembler* masm) {
+ Label string_add_runtime;
+
+ // Load the two arguments.
+ __ mov(eax, Operand(esp, 2 * kPointerSize)); // First argument.
+ __ mov(edx, Operand(esp, 1 * kPointerSize)); // Second argument.
+
+ // Make sure that both arguments are strings if not known in advance.
+ if (string_check_) {
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, &string_add_runtime);
+ __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, ebx);
+ __ j(above_equal, &string_add_runtime);
+
+ // First argument is a a string, test second.
+ __ test(edx, Immediate(kSmiTagMask));
+ __ j(zero, &string_add_runtime);
+ __ CmpObjectType(edx, FIRST_NONSTRING_TYPE, ebx);
+ __ j(above_equal, &string_add_runtime);
+ }
+
+ // Both arguments are strings.
+ // eax: first string
+ // edx: second string
+ // Check if either of the strings are empty. In that case return the other.
+ Label second_not_zero_length, both_not_zero_length;
+ __ mov(ecx, FieldOperand(edx, String::kLengthOffset));
+ __ test(ecx, Operand(ecx));
+ __ j(not_zero, &second_not_zero_length);
+ // Second string is empty, result is first string which is already in eax.
+ __ IncrementCounter(&Counters::string_add_native, 1);
+ __ ret(2 * kPointerSize);
+ __ bind(&second_not_zero_length);
+ __ mov(ebx, FieldOperand(eax, String::kLengthOffset));
+ __ test(ebx, Operand(ebx));
+ __ j(not_zero, &both_not_zero_length);
+ // First string is empty, result is second string which is in edx.
+ __ mov(eax, edx);
+ __ IncrementCounter(&Counters::string_add_native, 1);
+ __ ret(2 * kPointerSize);
+
+ // Both strings are non-empty.
+ // eax: first string
+ // ebx: length of first string
+ // ecx: length of second string
+ // edx: second string
+ // Look at the length of the result of adding the two strings.
+ Label string_add_flat_result;
+ __ bind(&both_not_zero_length);
+ __ add(ebx, Operand(ecx));
+ // Use the runtime system when adding two one character strings, as it
+ // contains optimizations for this specific case using the symbol table.
+ __ cmp(ebx, 2);
+ __ j(equal, &string_add_runtime);
+ // Check if resulting string will be flat.
+ __ cmp(ebx, String::kMinNonFlatLength);
+ __ j(below, &string_add_flat_result);
+ // Handle exceptionally long strings in the runtime system.
+ ASSERT((String::kMaxLength & 0x80000000) == 0);
+ __ cmp(ebx, String::kMaxLength);
+ __ j(above, &string_add_runtime);
+
+ // If result is not supposed to be flat allocate a cons string object. If both
+ // strings are ascii the result is an ascii cons string.
+ Label non_ascii, allocated;
+ __ mov(edi, FieldOperand(eax, HeapObject::kMapOffset));
+ __ movzx_b(ecx, FieldOperand(edi, Map::kInstanceTypeOffset));
+ __ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
+ __ movzx_b(edi, FieldOperand(edi, Map::kInstanceTypeOffset));
+ __ and_(ecx, Operand(edi));
+ __ test(ecx, Immediate(kAsciiStringTag));
+ __ j(zero, &non_ascii);
+ // Allocate an acsii cons string.
+ __ AllocateAsciiConsString(ecx, edi, no_reg, &string_add_runtime);
+ __ bind(&allocated);
+ // Fill the fields of the cons string.
+ __ mov(FieldOperand(ecx, ConsString::kLengthOffset), ebx);
+ __ mov(FieldOperand(ecx, ConsString::kHashFieldOffset),
+ Immediate(String::kEmptyHashField));
+ __ mov(FieldOperand(ecx, ConsString::kFirstOffset), eax);
+ __ mov(FieldOperand(ecx, ConsString::kSecondOffset), edx);
+ __ mov(eax, ecx);
+ __ IncrementCounter(&Counters::string_add_native, 1);
+ __ ret(2 * kPointerSize);
+ __ bind(&non_ascii);
+ // Allocate a two byte cons string.
+ __ AllocateConsString(ecx, edi, no_reg, &string_add_runtime);
+ __ jmp(&allocated);
+
+ // Handle creating a flat result. First check that both strings are not
+ // external strings.
+ // eax: first string
+ // ebx: length of resulting flat string
+ // edx: second string
+ __ bind(&string_add_flat_result);
+ __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
+ __ and_(ecx, kStringRepresentationMask);
+ __ cmp(ecx, kExternalStringTag);
+ __ j(equal, &string_add_runtime);
+ __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
+ __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
+ __ and_(ecx, kStringRepresentationMask);
+ __ cmp(ecx, kExternalStringTag);
+ __ j(equal, &string_add_runtime);
+ // Now check if both strings are ascii strings.
+ // eax: first string
+ // ebx: length of resulting flat string
+ // edx: second string
+ Label non_ascii_string_add_flat_result;
+ __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
+ ASSERT(kAsciiStringTag != 0);
+ __ test(ecx, Immediate(kAsciiStringTag));
+ __ j(zero, &non_ascii_string_add_flat_result);
+ __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
+ __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
+ __ test(ecx, Immediate(kAsciiStringTag));
+ __ j(zero, &string_add_runtime);
+ // Both strings are ascii strings. As they are short they are both flat.
+ __ AllocateAsciiString(eax, ebx, ecx, edx, edi, &string_add_runtime);
+ // eax: result string
+ __ mov(ecx, eax);
+ // Locate first character of result.
+ __ add(Operand(ecx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ // Load first argument and locate first character.
+ __ mov(edx, Operand(esp, 2 * kPointerSize));
+ __ mov(edi, FieldOperand(edx, String::kLengthOffset));
+ __ add(Operand(edx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ // eax: result string
+ // ecx: first character of result
+ // edx: first char of first argument
+ // edi: length of first argument
+ GenerateCopyCharacters(masm, ecx, edx, edi, ebx, true);
+ // Load second argument and locate first character.
+ __ mov(edx, Operand(esp, 1 * kPointerSize));
+ __ mov(edi, FieldOperand(edx, String::kLengthOffset));
+ __ add(Operand(edx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ // eax: result string
+ // ecx: next character of result
+ // edx: first char of second argument
+ // edi: length of second argument
+ GenerateCopyCharacters(masm, ecx, edx, edi, ebx, true);
+ __ IncrementCounter(&Counters::string_add_native, 1);
+ __ ret(2 * kPointerSize);
+
+ // Handle creating a flat two byte result.
+ // eax: first string - known to be two byte
+ // ebx: length of resulting flat string
+ // edx: second string
+ __ bind(&non_ascii_string_add_flat_result);
+ __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
+ __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
+ __ and_(ecx, kAsciiStringTag);
+ __ j(not_zero, &string_add_runtime);
+ // Both strings are two byte strings. As they are short they are both
+ // flat.
+ __ AllocateTwoByteString(eax, ebx, ecx, edx, edi, &string_add_runtime);
+ // eax: result string
+ __ mov(ecx, eax);
+ // Locate first character of result.
+ __ add(Operand(ecx),
+ Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+ // Load first argument and locate first character.
+ __ mov(edx, Operand(esp, 2 * kPointerSize));
+ __ mov(edi, FieldOperand(edx, String::kLengthOffset));
+ __ add(Operand(edx),
+ Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+ // eax: result string
+ // ecx: first character of result
+ // edx: first char of first argument
+ // edi: length of first argument
+ GenerateCopyCharacters(masm, ecx, edx, edi, ebx, false);
+ // Load second argument and locate first character.
+ __ mov(edx, Operand(esp, 1 * kPointerSize));
+ __ mov(edi, FieldOperand(edx, String::kLengthOffset));
+ __ add(Operand(edx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ // eax: result string
+ // ecx: next character of result
+ // edx: first char of second argument
+ // edi: length of second argument
+ GenerateCopyCharacters(masm, ecx, edx, edi, ebx, false);
+ __ IncrementCounter(&Counters::string_add_native, 1);
+ __ ret(2 * kPointerSize);
+
+ // Just jump to runtime to add the two strings.
+ __ bind(&string_add_runtime);
+ __ TailCallRuntime(ExternalReference(Runtime::kStringAdd), 2, 1);
+}
+
+
+void StringAddStub::GenerateCopyCharacters(MacroAssembler* masm,
+ Register dest,
+ Register src,
+ Register count,
+ Register scratch,
+ bool ascii) {
+ Label loop;
+ __ bind(&loop);
+ // This loop just copies one character at a time, as it is only used for very
+ // short strings.
+ if (ascii) {
+ __ mov_b(scratch, Operand(src, 0));
+ __ mov_b(Operand(dest, 0), scratch);
+ __ add(Operand(src), Immediate(1));
+ __ add(Operand(dest), Immediate(1));
+ } else {
+ __ mov_w(scratch, Operand(src, 0));
+ __ mov_w(Operand(dest, 0), scratch);
+ __ add(Operand(src), Immediate(2));
+ __ add(Operand(dest), Immediate(2));
+ }
+ __ sub(Operand(count), Immediate(1));
+ __ j(not_zero, &loop);
+}
+
+
#undef __
} } // namespace v8::internal
diff --git a/src/ia32/codegen-ia32.h b/src/ia32/codegen-ia32.h
index a37bffe..11a5163 100644
--- a/src/ia32/codegen-ia32.h
+++ b/src/ia32/codegen-ia32.h
@@ -77,12 +77,12 @@
// Generate code to push the value of the reference on top of the
// expression stack. The reference is expected to be already on top of
// the expression stack, and it is left in place with its value above it.
- void GetValue(TypeofState typeof_state);
+ void GetValue();
// Like GetValue except that the slot is expected to be written to before
// being read from again. Thae value of the reference may be invalidated,
// causing subsequent attempts to read it to fail.
- void TakeValue(TypeofState typeof_state);
+ void TakeValue();
// Generate code to store the value on top of the expression stack in the
// reference. The reference is expected to be immediately below the value
@@ -241,28 +241,20 @@
explicit CodeGenState(CodeGenerator* owner);
// Create a code generator state based on a code generator's current
- // state. The new state may or may not be inside a typeof, and has its
- // own control destination.
- CodeGenState(CodeGenerator* owner,
- TypeofState typeof_state,
- ControlDestination* destination);
+ // state. The new state has its own control destination.
+ CodeGenState(CodeGenerator* owner, ControlDestination* destination);
// Destroy a code generator state and restore the owning code generator's
// previous state.
~CodeGenState();
// Accessors for the state.
- TypeofState typeof_state() const { return typeof_state_; }
ControlDestination* destination() const { return destination_; }
private:
// The owning code generator.
CodeGenerator* owner_;
- // A flag indicating whether we are compiling the immediate subexpression
- // of a typeof expression.
- TypeofState typeof_state_;
-
// A control destination in case the expression has a control-flow
// effect.
ControlDestination* destination_;
@@ -307,17 +299,12 @@
static bool ShouldGenerateLog(Expression* type);
#endif
- static void SetFunctionInfo(Handle<JSFunction> fun,
- FunctionLiteral* lit,
- bool is_toplevel,
- Handle<Script> script);
-
static void RecordPositions(MacroAssembler* masm, int pos);
// Accessors
MacroAssembler* masm() { return masm_; }
-
VirtualFrame* frame() const { return frame_; }
+ Handle<Script> script() { return script_; }
bool has_valid_frame() const { return frame_ != NULL; }
@@ -352,7 +339,6 @@
void ProcessDeferred();
// State
- TypeofState typeof_state() const { return state_->typeof_state(); }
ControlDestination* destination() const { return state_->destination(); }
// Track loop nesting level.
@@ -412,18 +398,16 @@
}
void LoadCondition(Expression* x,
- TypeofState typeof_state,
ControlDestination* destination,
bool force_control);
- void Load(Expression* x, TypeofState typeof_state = NOT_INSIDE_TYPEOF);
+ void Load(Expression* expr);
void LoadGlobal();
void LoadGlobalReceiver();
// Generate code to push the value of an expression on top of the frame
// and then spill the frame fully to memory. This function is used
// temporarily while the code generator is being transformed.
- void LoadAndSpill(Expression* expression,
- TypeofState typeof_state = NOT_INSIDE_TYPEOF);
+ void LoadAndSpill(Expression* expression);
// Read a value from a slot and leave it on top of the expression stack.
void LoadFromSlot(Slot* slot, TypeofState typeof_state);
@@ -484,9 +468,11 @@
// than 16 bits.
static const int kMaxSmiInlinedBits = 16;
bool IsUnsafeSmi(Handle<Object> value);
- // Load an integer constant x into a register target using
+ // Load an integer constant x into a register target or into the stack using
// at most 16 bits of user-controlled data per assembly operation.
- void LoadUnsafeSmi(Register target, Handle<Object> value);
+ void MoveUnsafeSmi(Register target, Handle<Object> value);
+ void StoreUnsafeSmiToLocal(int offset, Handle<Object> value);
+ void PushUnsafeSmi(Handle<Object> value);
void CallWithArguments(ZoneList<Expression*>* arguments, int position);
@@ -511,8 +497,6 @@
const InlineRuntimeLUT& new_entry,
InlineRuntimeLUT* old_entry);
- static Handle<Code> ComputeLazyCompile(int argc);
- Handle<JSFunction> BuildBoilerplate(FunctionLiteral* node);
void ProcessDeclarations(ZoneList<Declaration*>* declarations);
static Handle<Code> ComputeCallInitialize(int argc, InLoopFlag in_loop);
@@ -528,6 +512,8 @@
void GenerateIsSmi(ZoneList<Expression*>* args);
void GenerateIsNonNegativeSmi(ZoneList<Expression*>* args);
void GenerateIsArray(ZoneList<Expression*>* args);
+ void GenerateIsObject(ZoneList<Expression*>* args);
+ void GenerateIsFunction(ZoneList<Expression*>* args);
// Support for construct call checks.
void GenerateIsConstructCall(ZoneList<Expression*>* args);
@@ -560,6 +546,9 @@
inline void GenerateMathSin(ZoneList<Expression*>* args);
inline void GenerateMathCos(ZoneList<Expression*>* args);
+ // Fast support for StringAdd.
+ void GenerateStringAdd(ZoneList<Expression*>* args);
+
// Simple condition analysis.
enum ConditionAnalysis {
ALWAYS_TRUE,
@@ -574,6 +563,7 @@
void CodeForFunctionPosition(FunctionLiteral* fun);
void CodeForReturnPosition(FunctionLiteral* fun);
void CodeForStatementPosition(Statement* stmt);
+ void CodeForDoWhileConditionPosition(DoWhileStatement* stmt);
void CodeForSourcePosition(int pos);
#ifdef DEBUG
@@ -626,6 +616,27 @@
};
+class CallFunctionStub: public CodeStub {
+ public:
+ CallFunctionStub(int argc, InLoopFlag in_loop)
+ : argc_(argc), in_loop_(in_loop) { }
+
+ void Generate(MacroAssembler* masm);
+
+ private:
+ int argc_;
+ InLoopFlag in_loop_;
+
+#ifdef DEBUG
+ void Print() { PrintF("CallFunctionStub (args %d)\n", argc_); }
+#endif
+
+ Major MajorKey() { return CallFunction; }
+ int MinorKey() { return argc_; }
+ InLoopFlag InLoop() { return in_loop_; }
+};
+
+
class ToBooleanStub: public CodeStub {
public:
ToBooleanStub() { }
@@ -638,7 +649,7 @@
};
-// Flag that indicates whether how to generate code for the stub.
+// Flag that indicates how to generate code for the stub GenericBinaryOpStub.
enum GenericBinaryFlags {
NO_GENERIC_BINARY_FLAGS = 0,
NO_SMI_CODE_IN_STUB = 1 << 0 // Omit smi code in stub.
@@ -647,15 +658,15 @@
class GenericBinaryOpStub: public CodeStub {
public:
- GenericBinaryOpStub(Token::Value operation,
+ GenericBinaryOpStub(Token::Value op,
OverwriteMode mode,
GenericBinaryFlags flags)
- : op_(operation),
+ : op_(op),
mode_(mode),
flags_(flags),
args_in_registers_(false),
args_reversed_(false) {
- use_sse3_ = CpuFeatures::IsSupported(CpuFeatures::SSE3);
+ use_sse3_ = CpuFeatures::IsSupported(SSE3);
ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
}
@@ -729,6 +740,37 @@
};
+// Flag that indicates how to generate code for the stub StringAddStub.
+enum StringAddFlags {
+ NO_STRING_ADD_FLAGS = 0,
+ NO_STRING_CHECK_IN_STUB = 1 << 0 // Omit string check in stub.
+};
+
+
+class StringAddStub: public CodeStub {
+ public:
+ explicit StringAddStub(StringAddFlags flags) {
+ string_check_ = ((flags & NO_STRING_CHECK_IN_STUB) == 0);
+ }
+
+ private:
+ Major MajorKey() { return StringAdd; }
+ int MinorKey() { return string_check_ ? 0 : 1; }
+
+ void Generate(MacroAssembler* masm);
+
+ void GenerateCopyCharacters(MacroAssembler* masm,
+ Register desc,
+ Register src,
+ Register count,
+ Register scratch,
+ bool ascii);
+
+ // Should the stub check whether arguments are strings?
+ bool string_check_;
+};
+
+
} } // namespace v8::internal
#endif // V8_IA32_CODEGEN_IA32_H_
diff --git a/src/ia32/debug-ia32.cc b/src/ia32/debug-ia32.cc
index 2d20117..5ebe1e0 100644
--- a/src/ia32/debug-ia32.cc
+++ b/src/ia32/debug-ia32.cc
@@ -45,17 +45,17 @@
// CodeGenerator::VisitReturnStatement and VirtualFrame::Exit in codegen-ia32.cc
// for the precise return instructions sequence.
void BreakLocationIterator::SetDebugBreakAtReturn() {
- ASSERT(Debug::kIa32JSReturnSequenceLength >=
- Debug::kIa32CallInstructionLength);
+ ASSERT(Assembler::kJSReturnSequenceLength >=
+ Assembler::kCallInstructionLength);
rinfo()->PatchCodeWithCall(Debug::debug_break_return()->entry(),
- Debug::kIa32JSReturnSequenceLength - Debug::kIa32CallInstructionLength);
+ Assembler::kJSReturnSequenceLength - Assembler::kCallInstructionLength);
}
// Restore the JS frame exit code.
void BreakLocationIterator::ClearDebugBreakAtReturn() {
rinfo()->PatchCode(original_rinfo()->pc(),
- Debug::kIa32JSReturnSequenceLength);
+ Assembler::kJSReturnSequenceLength);
}
diff --git a/src/ia32/disasm-ia32.cc b/src/ia32/disasm-ia32.cc
index adedf34..df5a28a 100644
--- a/src/ia32/disasm-ia32.cc
+++ b/src/ia32/disasm-ia32.cc
@@ -204,7 +204,7 @@
InstructionDesc* id = &instructions_[bm[i].b];
id->mnem = bm[i].mnem;
id->op_order_ = bm[i].op_order_;
- assert(id->type == NO_INSTR); // Information already entered
+ ASSERT_EQ(NO_INSTR, id->type); // Information not already entered.
id->type = type;
}
}
@@ -216,7 +216,7 @@
const char* mnem) {
for (byte b = start; b <= end; b++) {
InstructionDesc* id = &instructions_[b];
- assert(id->type == NO_INSTR); // Information already entered
+ ASSERT_EQ(NO_INSTR, id->type); // Information not already entered.
id->mnem = mnem;
id->type = type;
}
@@ -226,7 +226,7 @@
void InstructionTable::AddJumpConditionalShort() {
for (byte b = 0x70; b <= 0x7F; b++) {
InstructionDesc* id = &instructions_[b];
- assert(id->type == NO_INSTR); // Information already entered
+ ASSERT_EQ(NO_INSTR, id->type); // Information not already entered.
id->mnem = jump_conditional_mnem[b & 0x0F];
id->type = JUMP_CONDITIONAL_SHORT_INSTR;
}
@@ -272,6 +272,17 @@
};
+ enum ShiftOpcodeExtension {
+ kROL = 0,
+ kROR = 1,
+ kRCL = 2,
+ kRCR = 3,
+ kSHL = 4,
+ KSHR = 5,
+ kSAR = 7
+ };
+
+
const char* NameOfCPURegister(int reg) const {
return converter_.NameOfCPURegister(reg);
}
@@ -321,6 +332,8 @@
int SetCC(byte* data);
int CMov(byte* data);
int FPUInstruction(byte* data);
+ int MemoryFPUInstruction(int escape_opcode, int regop, byte* modrm_start);
+ int RegisterFPUInstruction(int escape_opcode, byte modrm_byte);
void AppendToBuffer(const char* format, ...);
@@ -493,7 +506,7 @@
// Returns number of bytes used, including *data.
int DisassemblerIA32::F7Instruction(byte* data) {
- assert(*data == 0xF7);
+ ASSERT_EQ(0xF7, *data);
byte modrm = *(data+1);
int mod, regop, rm;
get_modrm(modrm, &mod, ®op, &rm);
@@ -526,7 +539,7 @@
int DisassemblerIA32::D1D3C1Instruction(byte* data) {
byte op = *data;
- assert(op == 0xD1 || op == 0xD3 || op == 0xC1);
+ ASSERT(op == 0xD1 || op == 0xD3 || op == 0xC1);
byte modrm = *(data+1);
int mod, regop, rm;
get_modrm(modrm, &mod, ®op, &rm);
@@ -534,33 +547,24 @@
int num_bytes = 2;
if (mod == 3) {
const char* mnem = NULL;
+ switch (regop) {
+ case kROL: mnem = "rol"; break;
+ case kROR: mnem = "ror"; break;
+ case kRCL: mnem = "rcl"; break;
+ case kSHL: mnem = "shl"; break;
+ case KSHR: mnem = "shr"; break;
+ case kSAR: mnem = "sar"; break;
+ default: UnimplementedInstruction();
+ }
if (op == 0xD1) {
imm8 = 1;
- switch (regop) {
- case edx: mnem = "rcl"; break;
- case edi: mnem = "sar"; break;
- case esp: mnem = "shl"; break;
- default: UnimplementedInstruction();
- }
} else if (op == 0xC1) {
imm8 = *(data+2);
num_bytes = 3;
- switch (regop) {
- case edx: mnem = "rcl"; break;
- case esp: mnem = "shl"; break;
- case ebp: mnem = "shr"; break;
- case edi: mnem = "sar"; break;
- default: UnimplementedInstruction();
- }
} else if (op == 0xD3) {
- switch (regop) {
- case esp: mnem = "shl"; break;
- case ebp: mnem = "shr"; break;
- case edi: mnem = "sar"; break;
- default: UnimplementedInstruction();
- }
+ // Shift/rotate by cl.
}
- assert(mnem != NULL);
+ ASSERT_NE(NULL, mnem);
AppendToBuffer("%s %s,", mnem, NameOfCPURegister(rm));
if (imm8 > 0) {
AppendToBuffer("%d", imm8);
@@ -576,7 +580,7 @@
// Returns number of bytes used, including *data.
int DisassemblerIA32::JumpShort(byte* data) {
- assert(*data == 0xEB);
+ ASSERT_EQ(0xEB, *data);
byte b = *(data+1);
byte* dest = data + static_cast<int8_t>(b) + 2;
AppendToBuffer("jmp %s", NameOfAddress(dest));
@@ -586,7 +590,7 @@
// Returns number of bytes used, including *data.
int DisassemblerIA32::JumpConditional(byte* data, const char* comment) {
- assert(*data == 0x0F);
+ ASSERT_EQ(0x0F, *data);
byte cond = *(data+1) & 0x0F;
byte* dest = data + *reinterpret_cast<int32_t*>(data+2) + 6;
const char* mnem = jump_conditional_mnem[cond];
@@ -614,18 +618,18 @@
// Returns number of bytes used, including *data.
int DisassemblerIA32::SetCC(byte* data) {
- assert(*data == 0x0F);
+ ASSERT_EQ(0x0F, *data);
byte cond = *(data+1) & 0x0F;
const char* mnem = set_conditional_mnem[cond];
AppendToBuffer("%s ", mnem);
PrintRightByteOperand(data+2);
- return 3; // includes 0x0F
+ return 3; // Includes 0x0F.
}
// Returns number of bytes used, including *data.
int DisassemblerIA32::CMov(byte* data) {
- assert(*data == 0x0F);
+ ASSERT_EQ(0x0F, *data);
byte cond = *(data + 1) & 0x0F;
const char* mnem = conditional_move_mnem[cond];
int op_size = PrintOperands(mnem, REG_OPER_OP_ORDER, data + 2);
@@ -635,107 +639,165 @@
// Returns number of bytes used, including *data.
int DisassemblerIA32::FPUInstruction(byte* data) {
- byte b1 = *data;
- byte b2 = *(data + 1);
- if (b1 == 0xD9) {
- const char* mnem = NULL;
- switch (b2) {
- case 0xE8: mnem = "fld1"; break;
- case 0xEE: mnem = "fldz"; break;
- case 0xE1: mnem = "fabs"; break;
- case 0xE0: mnem = "fchs"; break;
- case 0xF8: mnem = "fprem"; break;
- case 0xF5: mnem = "fprem1"; break;
- case 0xF7: mnem = "fincstp"; break;
- case 0xE4: mnem = "ftst"; break;
- }
- if (mnem != NULL) {
- AppendToBuffer("%s", mnem);
- return 2;
- } else if ((b2 & 0xF8) == 0xC8) {
- AppendToBuffer("fxch st%d", b2 & 0x7);
- return 2;
- } else {
- int mod, regop, rm;
- get_modrm(*(data+1), &mod, ®op, &rm);
- const char* mnem = "?";
- switch (regop) {
- case eax: mnem = "fld_s"; break;
- case ebx: mnem = "fstp_s"; break;
- default: UnimplementedInstruction();
- }
- AppendToBuffer("%s ", mnem);
- int count = PrintRightOperand(data + 1);
- return count + 1;
- }
- } else if (b1 == 0xDD) {
- if ((b2 & 0xF8) == 0xC0) {
- AppendToBuffer("ffree st%d", b2 & 0x7);
- return 2;
- } else {
- int mod, regop, rm;
- get_modrm(*(data+1), &mod, ®op, &rm);
- const char* mnem = "?";
- switch (regop) {
- case eax: mnem = "fld_d"; break;
- case ebx: mnem = "fstp_d"; break;
- default: UnimplementedInstruction();
- }
- AppendToBuffer("%s ", mnem);
- int count = PrintRightOperand(data + 1);
- return count + 1;
- }
- } else if (b1 == 0xDB) {
- int mod, regop, rm;
- get_modrm(*(data+1), &mod, ®op, &rm);
- const char* mnem = "?";
- switch (regop) {
- case eax: mnem = "fild_s"; break;
- case edx: mnem = "fist_s"; break;
- case ebx: mnem = "fistp_s"; break;
- default: UnimplementedInstruction();
- }
- AppendToBuffer("%s ", mnem);
- int count = PrintRightOperand(data + 1);
- return count + 1;
- } else if (b1 == 0xDF) {
- if (b2 == 0xE0) {
- AppendToBuffer("fnstsw_ax");
- return 2;
- }
- int mod, regop, rm;
- get_modrm(*(data+1), &mod, ®op, &rm);
- const char* mnem = "?";
- switch (regop) {
- case ebp: mnem = "fild_d"; break;
- case edi: mnem = "fistp_d"; break;
- default: UnimplementedInstruction();
- }
- AppendToBuffer("%s ", mnem);
- int count = PrintRightOperand(data + 1);
- return count + 1;
- } else if (b1 == 0xDC || b1 == 0xDE) {
- bool is_pop = (b1 == 0xDE);
- if (is_pop && b2 == 0xD9) {
- AppendToBuffer("fcompp");
- return 2;
- }
- const char* mnem = "FP0xDC";
- switch (b2 & 0xF8) {
- case 0xC0: mnem = "fadd"; break;
- case 0xE8: mnem = "fsub"; break;
- case 0xC8: mnem = "fmul"; break;
- case 0xF8: mnem = "fdiv"; break;
- default: UnimplementedInstruction();
- }
- AppendToBuffer("%s%s st%d", mnem, is_pop ? "p" : "", b2 & 0x7);
- return 2;
- } else if (b1 == 0xDA && b2 == 0xE9) {
- const char* mnem = "fucompp";
- AppendToBuffer("%s", mnem);
- return 2;
+ byte escape_opcode = *data;
+ ASSERT_EQ(0xD8, escape_opcode & 0xF8);
+ byte modrm_byte = *(data+1);
+
+ if (modrm_byte >= 0xC0) {
+ return RegisterFPUInstruction(escape_opcode, modrm_byte);
+ } else {
+ return MemoryFPUInstruction(escape_opcode, modrm_byte, data+1);
}
- AppendToBuffer("Unknown FP instruction");
+}
+
+int DisassemblerIA32::MemoryFPUInstruction(int escape_opcode,
+ int modrm_byte,
+ byte* modrm_start) {
+ const char* mnem = "?";
+ int regop = (modrm_byte >> 3) & 0x7; // reg/op field of modrm byte.
+ switch (escape_opcode) {
+ case 0xD9: switch (regop) {
+ case 0: mnem = "fld_s"; break;
+ case 3: mnem = "fstp_s"; break;
+ case 7: mnem = "fstcw"; break;
+ default: UnimplementedInstruction();
+ }
+ break;
+
+ case 0xDB: switch (regop) {
+ case 0: mnem = "fild_s"; break;
+ case 1: mnem = "fisttp_s"; break;
+ case 2: mnem = "fist_s"; break;
+ case 3: mnem = "fistp_s"; break;
+ default: UnimplementedInstruction();
+ }
+ break;
+
+ case 0xDD: switch (regop) {
+ case 0: mnem = "fld_d"; break;
+ case 3: mnem = "fstp_d"; break;
+ default: UnimplementedInstruction();
+ }
+ break;
+
+ case 0xDF: switch (regop) {
+ case 5: mnem = "fild_d"; break;
+ case 7: mnem = "fistp_d"; break;
+ default: UnimplementedInstruction();
+ }
+ break;
+
+ default: UnimplementedInstruction();
+ }
+ AppendToBuffer("%s ", mnem);
+ int count = PrintRightOperand(modrm_start);
+ return count + 1;
+}
+
+int DisassemblerIA32::RegisterFPUInstruction(int escape_opcode,
+ byte modrm_byte) {
+ bool has_register = false; // Is the FPU register encoded in modrm_byte?
+ const char* mnem = "?";
+
+ switch (escape_opcode) {
+ case 0xD8:
+ UnimplementedInstruction();
+ break;
+
+ case 0xD9:
+ switch (modrm_byte & 0xF8) {
+ case 0xC8:
+ mnem = "fxch";
+ has_register = true;
+ break;
+ default:
+ switch (modrm_byte) {
+ case 0xE0: mnem = "fchs"; break;
+ case 0xE1: mnem = "fabs"; break;
+ case 0xE4: mnem = "ftst"; break;
+ case 0xE8: mnem = "fld1"; break;
+ case 0xEE: mnem = "fldz"; break;
+ case 0xF5: mnem = "fprem1"; break;
+ case 0xF7: mnem = "fincstp"; break;
+ case 0xF8: mnem = "fprem"; break;
+ case 0xFE: mnem = "fsin"; break;
+ case 0xFF: mnem = "fcos"; break;
+ default: UnimplementedInstruction();
+ }
+ }
+ break;
+
+ case 0xDA:
+ if (modrm_byte == 0xE9) {
+ mnem = "fucompp";
+ } else {
+ UnimplementedInstruction();
+ }
+ break;
+
+ case 0xDB:
+ if ((modrm_byte & 0xF8) == 0xE8) {
+ mnem = "fucomi";
+ has_register = true;
+ } else if (modrm_byte == 0xE2) {
+ mnem = "fclex";
+ } else {
+ UnimplementedInstruction();
+ }
+ break;
+
+ case 0xDC:
+ has_register = true;
+ switch (modrm_byte & 0xF8) {
+ case 0xC0: mnem = "fadd"; break;
+ case 0xE8: mnem = "fsub"; break;
+ case 0xC8: mnem = "fmul"; break;
+ case 0xF8: mnem = "fdiv"; break;
+ default: UnimplementedInstruction();
+ }
+ break;
+
+ case 0xDD:
+ has_register = true;
+ switch (modrm_byte & 0xF8) {
+ case 0xC0: mnem = "ffree"; break;
+ case 0xD8: mnem = "fstp"; break;
+ default: UnimplementedInstruction();
+ }
+ break;
+
+ case 0xDE:
+ if (modrm_byte == 0xD9) {
+ mnem = "fcompp";
+ } else {
+ has_register = true;
+ switch (modrm_byte & 0xF8) {
+ case 0xC0: mnem = "faddp"; break;
+ case 0xE8: mnem = "fsubp"; break;
+ case 0xC8: mnem = "fmulp"; break;
+ case 0xF8: mnem = "fdivp"; break;
+ default: UnimplementedInstruction();
+ }
+ }
+ break;
+
+ case 0xDF:
+ if (modrm_byte == 0xE0) {
+ mnem = "fnstsw_ax";
+ } else if ((modrm_byte & 0xF8) == 0xE8) {
+ mnem = "fucomip";
+ has_register = true;
+ }
+ break;
+
+ default: UnimplementedInstruction();
+ }
+
+ if (has_register) {
+ AppendToBuffer("%s st%d", mnem, modrm_byte & 0x7);
+ } else {
+ AppendToBuffer("%s", mnem);
+ }
return 2;
}
diff --git a/src/ia32/fast-codegen-ia32.cc b/src/ia32/fast-codegen-ia32.cc
index 663d136..807ebd4 100644
--- a/src/ia32/fast-codegen-ia32.cc
+++ b/src/ia32/fast-codegen-ia32.cc
@@ -28,8 +28,10 @@
#include "v8.h"
#include "codegen-inl.h"
+#include "compiler.h"
#include "fast-codegen.h"
#include "parser.h"
+#include "debug.h"
namespace v8 {
namespace internal {
@@ -60,50 +62,445 @@
{ Comment cmnt(masm_, "[ Allocate locals");
int locals_count = fun->scope()->num_stack_slots();
- for (int i = 0; i < locals_count; i++) {
+ if (locals_count == 1) {
__ push(Immediate(Factory::undefined_value()));
+ } else if (locals_count > 1) {
+ __ mov(eax, Immediate(Factory::undefined_value()));
+ for (int i = 0; i < locals_count; i++) {
+ __ push(eax);
+ }
}
}
+ bool function_in_register = true;
+
+ // Possibly allocate a local context.
+ if (fun->scope()->num_heap_slots() > 0) {
+ Comment cmnt(masm_, "[ Allocate local context");
+ // Argument to NewContext is the function, which is still in edi.
+ __ push(edi);
+ __ CallRuntime(Runtime::kNewContext, 1);
+ function_in_register = false;
+ // Context is returned in both eax and esi. It replaces the context
+ // passed to us. It's saved in the stack and kept live in esi.
+ __ mov(Operand(ebp, StandardFrameConstants::kContextOffset), esi);
+
+ // Copy parameters into context if necessary.
+ int num_parameters = fun->scope()->num_parameters();
+ for (int i = 0; i < num_parameters; i++) {
+ Slot* slot = fun->scope()->parameter(i)->slot();
+ if (slot != NULL && slot->type() == Slot::CONTEXT) {
+ int parameter_offset = StandardFrameConstants::kCallerSPOffset +
+ (num_parameters - 1 - i) * kPointerSize;
+ // Load parameter from stack.
+ __ mov(eax, Operand(ebp, parameter_offset));
+ // Store it in the context
+ __ mov(Operand(esi, Context::SlotOffset(slot->index())), eax);
+ }
+ }
+ }
+
+ Variable* arguments = fun->scope()->arguments()->AsVariable();
+ if (arguments != NULL) {
+ // Function uses arguments object.
+ Comment cmnt(masm_, "[ Allocate arguments object");
+ if (function_in_register) {
+ __ push(edi);
+ } else {
+ __ push(Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+ }
+ // Receiver is just before the parameters on the caller's stack.
+ __ lea(edx, Operand(ebp, StandardFrameConstants::kCallerSPOffset +
+ fun->num_parameters() * kPointerSize));
+ __ push(edx);
+ __ push(Immediate(Smi::FromInt(fun->num_parameters())));
+ // Arguments to ArgumentsAccessStub:
+ // function, receiver address, parameter count.
+ // The stub will rewrite receiever and parameter count if the previous
+ // stack frame was an arguments adapter frame.
+ ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
+ __ CallStub(&stub);
+ __ mov(ecx, eax); // Duplicate result.
+ Move(arguments->slot(), eax, ebx, edx);
+ Slot* dot_arguments_slot =
+ fun->scope()->arguments_shadow()->AsVariable()->slot();
+ Move(dot_arguments_slot, ecx, ebx, edx);
+ }
+
+
+ { Comment cmnt(masm_, "[ Declarations");
+ VisitDeclarations(fun->scope()->declarations());
+ }
+
{ Comment cmnt(masm_, "[ Stack check");
Label ok;
- ExternalReference stack_guard_limit =
- ExternalReference::address_of_stack_guard_limit();
- __ cmp(esp, Operand::StaticVariable(stack_guard_limit));
+ ExternalReference stack_limit =
+ ExternalReference::address_of_stack_limit();
+ __ cmp(esp, Operand::StaticVariable(stack_limit));
__ j(above_equal, &ok, taken);
StackCheckStub stub;
__ CallStub(&stub);
__ bind(&ok);
}
- { Comment cmnt(masm_, "[ Declarations");
- VisitDeclarations(fun->scope()->declarations());
- }
-
if (FLAG_trace) {
__ CallRuntime(Runtime::kTraceEnter, 0);
}
{ Comment cmnt(masm_, "[ Body");
+ ASSERT(loop_depth() == 0);
VisitStatements(fun->body());
+ ASSERT(loop_depth() == 0);
}
{ Comment cmnt(masm_, "[ return <undefined>;");
- // Emit a 'return undefined' in case control fell off the end of the
- // body.
+ // Emit a 'return undefined' in case control fell off the end of the body.
__ mov(eax, Factory::undefined_value());
- SetReturnPosition(fun);
+ EmitReturnSequence(function_->end_position());
+ }
+}
+
+void FastCodeGenerator::EmitReturnSequence(int position) {
+ Comment cmnt(masm_, "[ Return sequence");
+ if (return_label_.is_bound()) {
+ __ jmp(&return_label_);
+ } else {
+ // Common return label
+ __ bind(&return_label_);
if (FLAG_trace) {
__ push(eax);
__ CallRuntime(Runtime::kTraceExit, 1);
}
+#ifdef DEBUG
+ // Add a label for checking the size of the code used for returning.
+ Label check_exit_codesize;
+ masm_->bind(&check_exit_codesize);
+#endif
+ CodeGenerator::RecordPositions(masm_, position);
__ RecordJSReturn();
// Do not use the leave instruction here because it is too short to
// patch with the code required by the debugger.
__ mov(esp, ebp);
__ pop(ebp);
- __ ret((fun->scope()->num_parameters() + 1) * kPointerSize);
+ __ ret((function_->scope()->num_parameters() + 1) * kPointerSize);
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ // Check that the size of the code used for returning matches what is
+ // expected by the debugger.
+ ASSERT_EQ(Assembler::kJSReturnSequenceLength,
+ masm_->SizeOfCodeGeneratedSince(&check_exit_codesize));
+#endif
+ }
+}
+
+
+void FastCodeGenerator::Move(Expression::Context context, Register source) {
+ switch (context) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ case Expression::kEffect:
+ break;
+ case Expression::kValue:
+ __ push(source);
+ break;
+ case Expression::kTest:
+ TestAndBranch(source, true_label_, false_label_);
+ break;
+ case Expression::kValueTest: {
+ Label discard;
+ __ push(source);
+ TestAndBranch(source, true_label_, &discard);
+ __ bind(&discard);
+ __ add(Operand(esp), Immediate(kPointerSize));
+ __ jmp(false_label_);
+ break;
+ }
+ case Expression::kTestValue: {
+ Label discard;
+ __ push(source);
+ TestAndBranch(source, &discard, false_label_);
+ __ bind(&discard);
+ __ add(Operand(esp), Immediate(kPointerSize));
+ __ jmp(true_label_);
+ }
+ }
+}
+
+
+template <>
+Operand FastCodeGenerator::CreateSlotOperand<Operand>(Slot* source,
+ Register scratch) {
+ switch (source->type()) {
+ case Slot::PARAMETER:
+ case Slot::LOCAL:
+ return Operand(ebp, SlotOffset(source));
+ case Slot::CONTEXT: {
+ int context_chain_length =
+ function_->scope()->ContextChainLength(source->var()->scope());
+ __ LoadContext(scratch, context_chain_length);
+ return CodeGenerator::ContextOperand(scratch, source->index());
+ break;
+ }
+ case Slot::LOOKUP:
+ UNIMPLEMENTED();
+ // Fall-through.
+ default:
+ UNREACHABLE();
+ return Operand(eax, 0); // Dead code to make the compiler happy.
+ }
+}
+
+
+void FastCodeGenerator::Move(Register dst, Slot* source) {
+ Operand location = CreateSlotOperand<Operand>(source, dst);
+ __ mov(dst, location);
+}
+
+
+void FastCodeGenerator::Move(Expression::Context context,
+ Slot* source,
+ Register scratch) {
+ switch (context) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ case Expression::kEffect:
+ break;
+ case Expression::kValue: {
+ Operand location = CreateSlotOperand<Operand>(source, scratch);
+ __ push(location);
+ break;
+ }
+ case Expression::kTest: // Fall through.
+ case Expression::kValueTest: // Fall through.
+ case Expression::kTestValue:
+ Move(scratch, source);
+ Move(context, scratch);
+ break;
+ }
+}
+
+
+void FastCodeGenerator::Move(Expression::Context context, Literal* expr) {
+ switch (context) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ case Expression::kEffect:
+ break;
+ case Expression::kValue:
+ __ push(Immediate(expr->handle()));
+ break;
+ case Expression::kTest: // Fall through.
+ case Expression::kValueTest: // Fall through.
+ case Expression::kTestValue:
+ __ mov(eax, expr->handle());
+ Move(context, eax);
+ break;
+ }
+}
+
+
+void FastCodeGenerator::Move(Slot* dst,
+ Register src,
+ Register scratch1,
+ Register scratch2) {
+ switch (dst->type()) {
+ case Slot::PARAMETER:
+ case Slot::LOCAL:
+ __ mov(Operand(ebp, SlotOffset(dst)), src);
+ break;
+ case Slot::CONTEXT: {
+ ASSERT(!src.is(scratch1));
+ ASSERT(!src.is(scratch2));
+ ASSERT(!scratch1.is(scratch2));
+ int context_chain_length =
+ function_->scope()->ContextChainLength(dst->var()->scope());
+ __ LoadContext(scratch1, context_chain_length);
+ __ mov(Operand(scratch1, Context::SlotOffset(dst->index())), src);
+ int offset = FixedArray::kHeaderSize + dst->index() * kPointerSize;
+ __ RecordWrite(scratch1, offset, src, scratch2);
+ break;
+ }
+ case Slot::LOOKUP:
+ UNIMPLEMENTED();
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void FastCodeGenerator::DropAndMove(Expression::Context context,
+ Register source,
+ int count) {
+ ASSERT(count > 0);
+ switch (context) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ case Expression::kEffect:
+ __ add(Operand(esp), Immediate(count * kPointerSize));
+ break;
+ case Expression::kValue:
+ if (count > 1) {
+ __ add(Operand(esp), Immediate((count - 1) * kPointerSize));
+ }
+ __ mov(Operand(esp, 0), source);
+ break;
+ case Expression::kTest:
+ ASSERT(!source.is(esp));
+ __ add(Operand(esp), Immediate(count * kPointerSize));
+ TestAndBranch(source, true_label_, false_label_);
+ break;
+ case Expression::kValueTest: {
+ Label discard;
+ if (count > 1) {
+ __ add(Operand(esp), Immediate((count - 1) * kPointerSize));
+ }
+ __ mov(Operand(esp, 0), source);
+ TestAndBranch(source, true_label_, &discard);
+ __ bind(&discard);
+ __ add(Operand(esp), Immediate(kPointerSize));
+ __ jmp(false_label_);
+ break;
+ }
+ case Expression::kTestValue: {
+ Label discard;
+ if (count > 1) {
+ __ add(Operand(esp), Immediate((count - 1) * kPointerSize));
+ }
+ __ mov(Operand(esp, 0), source);
+ TestAndBranch(source, &discard, false_label_);
+ __ bind(&discard);
+ __ add(Operand(esp), Immediate(kPointerSize));
+ __ jmp(true_label_);
+ break;
+ }
+ }
+}
+
+
+void FastCodeGenerator::TestAndBranch(Register source,
+ Label* true_label,
+ Label* false_label) {
+ ASSERT_NE(NULL, true_label);
+ ASSERT_NE(NULL, false_label);
+ // Use the shared ToBoolean stub to compile the value in the register into
+ // control flow to the code generator's true and false labels. Perform
+ // the fast checks assumed by the stub.
+ __ cmp(source, Factory::undefined_value()); // The undefined value is false.
+ __ j(equal, false_label);
+ __ cmp(source, Factory::true_value()); // True is true.
+ __ j(equal, true_label);
+ __ cmp(source, Factory::false_value()); // False is false.
+ __ j(equal, false_label);
+ ASSERT_EQ(0, kSmiTag);
+ __ test(source, Operand(source)); // The smi zero is false.
+ __ j(zero, false_label);
+ __ test(source, Immediate(kSmiTagMask)); // All other smis are true.
+ __ j(zero, true_label);
+
+ // Call the stub for all other cases.
+ __ push(source);
+ ToBooleanStub stub;
+ __ CallStub(&stub);
+ __ test(eax, Operand(eax)); // The stub returns nonzero for true.
+ __ j(not_zero, true_label);
+ __ jmp(false_label);
+}
+
+
+void FastCodeGenerator::VisitDeclaration(Declaration* decl) {
+ Comment cmnt(masm_, "[ Declaration");
+ Variable* var = decl->proxy()->var();
+ ASSERT(var != NULL); // Must have been resolved.
+ Slot* slot = var->slot();
+ Property* prop = var->AsProperty();
+
+ if (slot != NULL) {
+ switch (slot->type()) {
+ case Slot::PARAMETER: // Fall through.
+ case Slot::LOCAL:
+ if (decl->mode() == Variable::CONST) {
+ __ mov(Operand(ebp, SlotOffset(var->slot())),
+ Immediate(Factory::the_hole_value()));
+ } else if (decl->fun() != NULL) {
+ Visit(decl->fun());
+ __ pop(Operand(ebp, SlotOffset(var->slot())));
+ }
+ break;
+
+ case Slot::CONTEXT:
+ // The variable in the decl always resides in the current context.
+ ASSERT_EQ(0, function_->scope()->ContextChainLength(var->scope()));
+ if (FLAG_debug_code) {
+ // Check if we have the correct context pointer.
+ __ mov(ebx,
+ CodeGenerator::ContextOperand(esi, Context::FCONTEXT_INDEX));
+ __ cmp(ebx, Operand(esi));
+ __ Check(equal, "Unexpected declaration in current context.");
+ }
+ if (decl->mode() == Variable::CONST) {
+ __ mov(eax, Immediate(Factory::the_hole_value()));
+ __ mov(CodeGenerator::ContextOperand(esi, slot->index()), eax);
+ // No write barrier since the hole value is in old space.
+ } else if (decl->fun() != NULL) {
+ Visit(decl->fun());
+ __ pop(eax);
+ __ mov(CodeGenerator::ContextOperand(esi, slot->index()), eax);
+ int offset = Context::SlotOffset(slot->index());
+ __ RecordWrite(esi, offset, eax, ecx);
+ }
+ break;
+
+ case Slot::LOOKUP: {
+ __ push(esi);
+ __ push(Immediate(var->name()));
+ // Declaration nodes are always introduced in one of two modes.
+ ASSERT(decl->mode() == Variable::VAR ||
+ decl->mode() == Variable::CONST);
+ PropertyAttributes attr =
+ (decl->mode() == Variable::VAR) ? NONE : READ_ONLY;
+ __ push(Immediate(Smi::FromInt(attr)));
+ // Push initial value, if any.
+ // Note: For variables we must not push an initial value (such as
+ // 'undefined') because we may have a (legal) redeclaration and we
+ // must not destroy the current value.
+ if (decl->mode() == Variable::CONST) {
+ __ push(Immediate(Factory::the_hole_value()));
+ } else if (decl->fun() != NULL) {
+ Visit(decl->fun());
+ } else {
+ __ push(Immediate(Smi::FromInt(0))); // No initial value!
+ }
+ __ CallRuntime(Runtime::kDeclareContextSlot, 4);
+ break;
+ }
+ }
+
+ } else if (prop != NULL) {
+ if (decl->fun() != NULL || decl->mode() == Variable::CONST) {
+ // We are declaring a function or constant that rewrites to a
+ // property. Use (keyed) IC to set the initial value.
+ ASSERT_EQ(Expression::kValue, prop->obj()->context());
+ Visit(prop->obj());
+ ASSERT_EQ(Expression::kValue, prop->key()->context());
+ Visit(prop->key());
+
+ if (decl->fun() != NULL) {
+ ASSERT_EQ(Expression::kValue, decl->fun()->context());
+ Visit(decl->fun());
+ __ pop(eax);
+ } else {
+ __ Set(eax, Immediate(Factory::the_hole_value()));
+ }
+
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
+ __ call(ic, RelocInfo::CODE_TARGET);
+ // Absence of a test eax instruction following the call
+ // indicates that none of the load was inlined.
+
+ // Value in eax is ignored (declarations are statements). Receiver
+ // and key on stack are discarded.
+ __ add(Operand(esp), Immediate(2 * kPointerSize));
+ }
}
}
@@ -118,47 +515,17 @@
}
-void FastCodeGenerator::VisitBlock(Block* stmt) {
- Comment cmnt(masm_, "[ Block");
- SetStatementPosition(stmt);
- VisitStatements(stmt->statements());
-}
-
-
-void FastCodeGenerator::VisitExpressionStatement(ExpressionStatement* stmt) {
- Comment cmnt(masm_, "[ ExpressionStatement");
- SetStatementPosition(stmt);
- Visit(stmt->expression());
-}
-
-
void FastCodeGenerator::VisitReturnStatement(ReturnStatement* stmt) {
Comment cmnt(masm_, "[ ReturnStatement");
- SetStatementPosition(stmt);
Expression* expr = stmt->expression();
- Visit(expr);
-
- // Complete the statement based on the location of the subexpression.
- Location source = expr->location();
- ASSERT(!source.is_nowhere());
- if (source.is_temporary()) {
- __ pop(eax);
- } else {
- ASSERT(source.is_constant());
- ASSERT(expr->AsLiteral() != NULL);
+ if (expr->AsLiteral() != NULL) {
__ mov(eax, expr->AsLiteral()->handle());
+ } else {
+ ASSERT_EQ(Expression::kValue, expr->context());
+ Visit(expr);
+ __ pop(eax);
}
- if (FLAG_trace) {
- __ push(eax);
- __ CallRuntime(Runtime::kTraceExit, 1);
- }
- __ RecordJSReturn();
-
- // Do not use the leave instruction here because it is too short to
- // patch with the code required by the debugger.
- __ mov(esp, ebp);
- __ pop(ebp);
- __ ret((function_->scope()->num_parameters() + 1) * kPointerSize);
+ EmitReturnSequence(stmt->statement_pos());
}
@@ -166,7 +533,8 @@
Comment cmnt(masm_, "[ FunctionLiteral");
// Build the function boilerplate and instantiate it.
- Handle<JSFunction> boilerplate = BuildBoilerplate(expr);
+ Handle<JSFunction> boilerplate =
+ Compiler::BuildBoilerplate(expr, script_, this);
if (HasStackOverflow()) return;
ASSERT(boilerplate->IsBoilerplate());
@@ -175,12 +543,7 @@
__ push(esi);
__ push(Immediate(boilerplate));
__ CallRuntime(Runtime::kNewClosure, 2);
-
- if (expr->location().is_temporary()) {
- __ push(eax);
- } else {
- ASSERT(expr->location().is_nowhere());
- }
+ Move(expr->context(), eax);
}
@@ -188,6 +551,7 @@
Comment cmnt(masm_, "[ VariableProxy");
Expression* rewrite = expr->var()->rewrite();
if (rewrite == NULL) {
+ ASSERT(expr->var()->is_global());
Comment cmnt(masm_, "Global variable");
// Use inline caching. Variable name is passed in ecx and the global
// object on the stack.
@@ -195,34 +559,76 @@
__ mov(ecx, expr->name());
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
__ call(ic, RelocInfo::CODE_TARGET_CONTEXT);
+ // By emitting a nop we make sure that we do not have a test eax
+ // instruction after the call it is treated specially by the LoadIC code
+ // Remember that the assembler may choose to do peephole optimization
+ // (eg, push/pop elimination).
+ __ nop();
- // A test eax instruction following the call is used by the IC to
- // indicate that the inobject property case was inlined. Ensure there
- // is no test eax instruction here. Remember that the assembler may
- // choose to do peephole optimization (eg, push/pop elimination).
- if (expr->location().is_temporary()) {
- // Replace the global object with the result.
- __ mov(Operand(esp, 0), eax);
- } else {
- ASSERT(expr->location().is_nowhere());
- __ add(Operand(esp), Immediate(kPointerSize));
- }
-
- } else {
- Comment cmnt(masm_, "Stack slot");
+ DropAndMove(expr->context(), eax);
+ } else if (rewrite->AsSlot() != NULL) {
Slot* slot = rewrite->AsSlot();
- ASSERT(slot != NULL);
- if (expr->location().is_temporary()) {
- __ push(Operand(ebp, SlotOffset(slot)));
- } else {
- ASSERT(expr->location().is_nowhere());
+ if (FLAG_debug_code) {
+ switch (slot->type()) {
+ case Slot::LOCAL:
+ case Slot::PARAMETER: {
+ Comment cmnt(masm_, "Stack slot");
+ break;
+ }
+ case Slot::CONTEXT: {
+ Comment cmnt(masm_, "Context slot");
+ break;
+ }
+ case Slot::LOOKUP:
+ UNIMPLEMENTED();
+ break;
+ default:
+ UNREACHABLE();
+ }
}
+ Move(expr->context(), slot, eax);
+ } else {
+ Comment cmnt(masm_, "Variable rewritten to Property");
+ // A variable has been rewritten into an explicit access to
+ // an object property.
+ Property* property = rewrite->AsProperty();
+ ASSERT_NOT_NULL(property);
+
+ // Currently the only parameter expressions that can occur are
+ // on the form "slot[literal]".
+
+ // Check that the object is in a slot.
+ Variable* object_var = property->obj()->AsVariableProxy()->AsVariable();
+ ASSERT_NOT_NULL(object_var);
+ Slot* object_slot = object_var->slot();
+ ASSERT_NOT_NULL(object_slot);
+
+ // Load the object.
+ Move(Expression::kValue, object_slot, eax);
+
+ // Check that the key is a smi.
+ Literal* key_literal = property->key()->AsLiteral();
+ ASSERT_NOT_NULL(key_literal);
+ ASSERT(key_literal->handle()->IsSmi());
+
+ // Load the key.
+ Move(Expression::kValue, key_literal);
+
+ // Do a KEYED property load.
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+ __ call(ic, RelocInfo::CODE_TARGET);
+ // Notice: We must not have a "test eax, ..." instruction after
+ // the call. It is treated specially by the LoadIC code.
+ __ nop();
+
+ // Drop key and object left on the stack by IC, and push the result.
+ DropAndMove(expr->context(), eax, 2);
}
}
void FastCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
- Comment cmnt(masm_, "[ RegExp Literal");
+ Comment cmnt(masm_, "[ RegExpLiteral");
Label done;
// Registers will be used as follows:
// edi = JS function.
@@ -244,10 +650,130 @@
__ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
// Label done:
__ bind(&done);
- if (expr->location().is_temporary()) {
- __ push(eax);
+ Move(expr->context(), eax);
+}
+
+
+void FastCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
+ Comment cmnt(masm_, "[ ObjectLiteral");
+ Label exists;
+ // Registers will be used as follows:
+ // edi = JS function.
+ // ebx = literals array.
+ // eax = boilerplate
+
+ __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+ __ mov(ebx, FieldOperand(edi, JSFunction::kLiteralsOffset));
+ int literal_offset =
+ FixedArray::kHeaderSize + expr->literal_index() * kPointerSize;
+ __ mov(eax, FieldOperand(ebx, literal_offset));
+ __ cmp(eax, Factory::undefined_value());
+ __ j(not_equal, &exists);
+ // Create boilerplate if it does not exist.
+ // Literal array (0).
+ __ push(ebx);
+ // Literal index (1).
+ __ push(Immediate(Smi::FromInt(expr->literal_index())));
+ // Constant properties (2).
+ __ push(Immediate(expr->constant_properties()));
+ __ CallRuntime(Runtime::kCreateObjectLiteralBoilerplate, 3);
+ __ bind(&exists);
+ // eax contains boilerplate.
+ // Clone boilerplate.
+ __ push(eax);
+ if (expr->depth() == 1) {
+ __ CallRuntime(Runtime::kCloneShallowLiteralBoilerplate, 1);
} else {
- ASSERT(expr->location().is_nowhere());
+ __ CallRuntime(Runtime::kCloneLiteralBoilerplate, 1);
+ }
+
+ // If result_saved == true: The result is saved on top of the
+ // stack and in eax.
+ // If result_saved == false: The result not on the stack, just in eax.
+ bool result_saved = false;
+
+ for (int i = 0; i < expr->properties()->length(); i++) {
+ ObjectLiteral::Property* property = expr->properties()->at(i);
+ if (property->IsCompileTimeValue()) continue;
+
+ Literal* key = property->key();
+ Expression* value = property->value();
+ if (!result_saved) {
+ __ push(eax); // Save result on the stack
+ result_saved = true;
+ }
+ switch (property->kind()) {
+ case ObjectLiteral::Property::MATERIALIZED_LITERAL: // fall through
+ ASSERT(!CompileTimeValue::IsCompileTimeValue(value));
+ case ObjectLiteral::Property::COMPUTED:
+ if (key->handle()->IsSymbol()) {
+ Visit(value);
+ ASSERT_EQ(Expression::kValue, value->context());
+ __ pop(eax);
+ __ mov(ecx, Immediate(key->handle()));
+ Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
+ __ call(ic, RelocInfo::CODE_TARGET);
+ // StoreIC leaves the receiver on the stack.
+ __ mov(eax, Operand(esp, 0)); // Restore result into eax.
+ break;
+ }
+ // fall through
+ case ObjectLiteral::Property::PROTOTYPE:
+ __ push(eax);
+ Visit(key);
+ ASSERT_EQ(Expression::kValue, key->context());
+ Visit(value);
+ ASSERT_EQ(Expression::kValue, value->context());
+ __ CallRuntime(Runtime::kSetProperty, 3);
+ __ mov(eax, Operand(esp, 0)); // Restore result into eax.
+ break;
+ case ObjectLiteral::Property::SETTER: // fall through
+ case ObjectLiteral::Property::GETTER:
+ __ push(eax);
+ Visit(key);
+ ASSERT_EQ(Expression::kValue, key->context());
+ __ push(Immediate(property->kind() == ObjectLiteral::Property::SETTER ?
+ Smi::FromInt(1) :
+ Smi::FromInt(0)));
+ Visit(value);
+ ASSERT_EQ(Expression::kValue, value->context());
+ __ CallRuntime(Runtime::kDefineAccessor, 4);
+ __ mov(eax, Operand(esp, 0)); // Restore result into eax.
+ break;
+ default: UNREACHABLE();
+ }
+ }
+ switch (expr->context()) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ case Expression::kEffect:
+ if (result_saved) __ add(Operand(esp), Immediate(kPointerSize));
+ break;
+ case Expression::kValue:
+ if (!result_saved) __ push(eax);
+ break;
+ case Expression::kTest:
+ if (result_saved) __ pop(eax);
+ TestAndBranch(eax, true_label_, false_label_);
+ break;
+ case Expression::kValueTest: {
+ Label discard;
+ if (!result_saved) __ push(eax);
+ TestAndBranch(eax, true_label_, &discard);
+ __ bind(&discard);
+ __ add(Operand(esp), Immediate(kPointerSize));
+ __ jmp(false_label_);
+ break;
+ }
+ case Expression::kTestValue: {
+ Label discard;
+ if (!result_saved) __ push(eax);
+ TestAndBranch(eax, &discard, false_label_);
+ __ bind(&discard);
+ __ add(Operand(esp), Immediate(kPointerSize));
+ __ jmp(true_label_);
+ break;
+ }
}
}
@@ -300,7 +826,7 @@
result_saved = true;
}
Visit(subexpr);
- ASSERT(subexpr->location().is_temporary());
+ ASSERT_EQ(Expression::kValue, subexpr->context());
// Store the subexpression value in the array's elements.
__ pop(eax); // Subexpression value.
@@ -313,233 +839,851 @@
__ RecordWrite(ebx, offset, eax, ecx);
}
- Location destination = expr->location();
- if (destination.is_nowhere() && result_saved) {
- __ add(Operand(esp), Immediate(kPointerSize));
- } else if (destination.is_temporary() && !result_saved) {
- __ push(eax);
+ switch (expr->context()) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ case Expression::kEffect:
+ if (result_saved) __ add(Operand(esp), Immediate(kPointerSize));
+ break;
+ case Expression::kValue:
+ if (!result_saved) __ push(eax);
+ break;
+ case Expression::kTest:
+ if (result_saved) __ pop(eax);
+ TestAndBranch(eax, true_label_, false_label_);
+ break;
+ case Expression::kValueTest: {
+ Label discard;
+ if (!result_saved) __ push(eax);
+ TestAndBranch(eax, true_label_, &discard);
+ __ bind(&discard);
+ __ add(Operand(esp), Immediate(kPointerSize));
+ __ jmp(false_label_);
+ break;
+ }
+ case Expression::kTestValue: {
+ Label discard;
+ if (!result_saved) __ push(eax);
+ TestAndBranch(eax, &discard, false_label_);
+ __ bind(&discard);
+ __ add(Operand(esp), Immediate(kPointerSize));
+ __ jmp(true_label_);
+ break;
+ }
}
}
-void FastCodeGenerator::VisitAssignment(Assignment* expr) {
- Comment cmnt(masm_, "[ Assignment");
- ASSERT(expr->op() == Token::ASSIGN || expr->op() == Token::INIT_VAR);
- Expression* rhs = expr->value();
- Visit(rhs);
-
- // Left-hand side can only be a global or a (parameter or local) slot.
+void FastCodeGenerator::EmitVariableAssignment(Assignment* expr) {
Variable* var = expr->target()->AsVariableProxy()->AsVariable();
ASSERT(var != NULL);
ASSERT(var->is_global() || var->slot() != NULL);
-
- // Complete the assignment based on the location of the right-hand-side
- // value and the desired location of the assignment value.
- Location destination = expr->location();
- Location source = rhs->location();
- ASSERT(!destination.is_constant());
- ASSERT(!source.is_nowhere());
-
if (var->is_global()) {
- // Assignment to a global variable, use inline caching. Right-hand-side
- // value is passed in eax, variable name in ecx, and the global object
- // on the stack.
- if (source.is_temporary()) {
- __ pop(eax);
- } else {
- ASSERT(source.is_constant());
- ASSERT(rhs->AsLiteral() != NULL);
- __ mov(eax, rhs->AsLiteral()->handle());
- }
+ // Assignment to a global variable. Use inline caching for the
+ // assignment. Right-hand-side value is passed in eax, variable name in
+ // ecx, and the global object on the stack.
+ __ pop(eax);
__ mov(ecx, var->name());
__ push(CodeGenerator::GlobalObject());
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
__ call(ic, RelocInfo::CODE_TARGET);
- // Overwrite the global object on the stack with the result if needed.
- if (destination.is_temporary()) {
- __ mov(Operand(esp, 0), eax);
- } else {
- ASSERT(destination.is_nowhere());
- __ add(Operand(esp), Immediate(kPointerSize));
- }
+ // Overwrite the receiver on the stack with the result if needed.
+ DropAndMove(expr->context(), eax);
- } else {
- // Local or parameter assignment.
- if (source.is_temporary()) {
- if (destination.is_temporary()) {
- // Case 'temp1 <- (var = temp0)'. Preserve right-hand-side
- // temporary on the stack.
- __ mov(eax, Operand(esp, 0));
- __ mov(Operand(ebp, SlotOffset(var->slot())), eax);
- } else {
- ASSERT(destination.is_nowhere());
- // Case 'var = temp'. Discard right-hand-side temporary.
- __ pop(Operand(ebp, SlotOffset(var->slot())));
+ } else if (var->slot() != NULL) {
+ Slot* slot = var->slot();
+ switch (slot->type()) {
+ case Slot::LOCAL:
+ case Slot::PARAMETER: {
+ Operand target = Operand(ebp, SlotOffset(var->slot()));
+ switch (expr->context()) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ case Expression::kEffect:
+ // Perform assignment and discard value.
+ __ pop(target);
+ break;
+ case Expression::kValue:
+ // Perform assignment and preserve value.
+ __ mov(eax, Operand(esp, 0));
+ __ mov(target, eax);
+ break;
+ case Expression::kTest:
+ // Perform assignment and test (and discard) value.
+ __ pop(eax);
+ __ mov(target, eax);
+ TestAndBranch(eax, true_label_, false_label_);
+ break;
+ case Expression::kValueTest: {
+ Label discard;
+ __ mov(eax, Operand(esp, 0));
+ __ mov(target, eax);
+ TestAndBranch(eax, true_label_, &discard);
+ __ bind(&discard);
+ __ add(Operand(esp), Immediate(kPointerSize));
+ __ jmp(false_label_);
+ break;
+ }
+ case Expression::kTestValue: {
+ Label discard;
+ __ mov(eax, Operand(esp, 0));
+ __ mov(target, eax);
+ TestAndBranch(eax, &discard, false_label_);
+ __ bind(&discard);
+ __ add(Operand(esp), Immediate(kPointerSize));
+ __ jmp(true_label_);
+ break;
+ }
+ }
+ break;
}
- } else {
- ASSERT(source.is_constant());
- ASSERT(rhs->AsLiteral() != NULL);
- // Two cases: 'temp <- (var = constant)', or 'var = constant' with a
- // discarded result. Always perform the assignment.
- __ mov(eax, rhs->AsLiteral()->handle());
- __ mov(Operand(ebp, SlotOffset(var->slot())), eax);
- if (destination.is_temporary()) {
- // Case 'temp <- (var = constant)'. Save result.
- __ push(eax);
+
+ case Slot::CONTEXT: {
+ int chain_length =
+ function_->scope()->ContextChainLength(slot->var()->scope());
+ if (chain_length > 0) {
+ // Move up the context chain to the context containing the slot.
+ __ mov(eax,
+ Operand(esi, Context::SlotOffset(Context::CLOSURE_INDEX)));
+ // Load the function context (which is the incoming, outer context).
+ __ mov(eax, FieldOperand(eax, JSFunction::kContextOffset));
+ for (int i = 1; i < chain_length; i++) {
+ __ mov(eax,
+ Operand(eax, Context::SlotOffset(Context::CLOSURE_INDEX)));
+ __ mov(eax, FieldOperand(eax, JSFunction::kContextOffset));
+ }
+ } else { // Slot is in the current context. Generate optimized code.
+ __ mov(eax, esi); // RecordWrite destroys the object register.
+ }
+ if (FLAG_debug_code) {
+ __ cmp(eax,
+ Operand(eax, Context::SlotOffset(Context::FCONTEXT_INDEX)));
+ __ Check(equal, "Context Slot chain length wrong.");
+ }
+ __ pop(ecx);
+ __ mov(Operand(eax, Context::SlotOffset(slot->index())), ecx);
+
+ // RecordWrite may destroy all its register arguments.
+ if (expr->context() == Expression::kValue) {
+ __ push(ecx);
+ } else if (expr->context() != Expression::kEffect) {
+ __ mov(edx, ecx);
+ }
+ int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
+ __ RecordWrite(eax, offset, ecx, ebx);
+ if (expr->context() != Expression::kEffect &&
+ expr->context() != Expression::kValue) {
+ Move(expr->context(), edx);
+ }
+ break;
}
+
+ case Slot::LOOKUP:
+ UNREACHABLE();
+ break;
}
}
}
-void FastCodeGenerator::VisitCall(Call* expr) {
- Expression* fun = expr->expression();
- ZoneList<Expression*>* args = expr->arguments();
- Variable* var = fun->AsVariableProxy()->AsVariable();
- ASSERT(var != NULL && !var->is_this() && var->is_global());
- ASSERT(!var->is_possibly_eval());
+void FastCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
+ // Assignment to a property, using a named store IC.
+ Property* prop = expr->target()->AsProperty();
+ ASSERT(prop != NULL);
+ ASSERT(prop->key()->AsLiteral() != NULL);
- __ push(Immediate(var->name()));
- // Push global object (receiver).
- __ push(CodeGenerator::GlobalObject());
+ // If the assignment starts a block of assignments to the same object,
+ // change to slow case to avoid the quadratic behavior of repeatedly
+ // adding fast properties.
+ if (expr->starts_initialization_block()) {
+ __ push(Operand(esp, kPointerSize)); // Receiver is under value.
+ __ CallRuntime(Runtime::kToSlowProperties, 1);
+ }
+
+ __ pop(eax);
+ __ mov(ecx, prop->key()->AsLiteral()->handle());
+ Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
+ __ call(ic, RelocInfo::CODE_TARGET);
+
+ // If the assignment ends an initialization block, revert to fast case.
+ if (expr->ends_initialization_block()) {
+ __ push(eax); // Result of assignment, saved even if not needed.
+ __ push(Operand(esp, kPointerSize)); // Receiver is under value.
+ __ CallRuntime(Runtime::kToFastProperties, 1);
+ __ pop(eax);
+ }
+
+ DropAndMove(expr->context(), eax);
+}
+
+
+void FastCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
+ // Assignment to a property, using a keyed store IC.
+
+ // If the assignment starts a block of assignments to the same object,
+ // change to slow case to avoid the quadratic behavior of repeatedly
+ // adding fast properties.
+ if (expr->starts_initialization_block()) {
+ // Reciever is under the key and value.
+ __ push(Operand(esp, 2 * kPointerSize));
+ __ CallRuntime(Runtime::kToSlowProperties, 1);
+ }
+
+ __ pop(eax);
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
+ __ call(ic, RelocInfo::CODE_TARGET);
+ // This nop signals to the IC that there is no inlined code at the call
+ // site for it to patch.
+ __ nop();
+
+ // If the assignment ends an initialization block, revert to fast case.
+ if (expr->ends_initialization_block()) {
+ __ push(eax); // Result of assignment, saved even if not needed.
+ // Reciever is under the key and value.
+ __ push(Operand(esp, 2 * kPointerSize));
+ __ CallRuntime(Runtime::kToFastProperties, 1);
+ __ pop(eax);
+ }
+
+ // Receiver and key are still on stack.
+ __ add(Operand(esp), Immediate(2 * kPointerSize));
+ Move(expr->context(), eax);
+}
+
+
+void FastCodeGenerator::VisitProperty(Property* expr) {
+ Comment cmnt(masm_, "[ Property");
+ Expression* key = expr->key();
+ uint32_t dummy;
+
+ // Record the source position for the property load.
+ SetSourcePosition(expr->position());
+
+ // Evaluate receiver.
+ Visit(expr->obj());
+
+ if (key->AsLiteral() != NULL && key->AsLiteral()->handle()->IsSymbol() &&
+ !String::cast(*(key->AsLiteral()->handle()))->AsArrayIndex(&dummy)) {
+ // Do a NAMED property load.
+ // The IC expects the property name in ecx and the receiver on the stack.
+ __ mov(ecx, Immediate(key->AsLiteral()->handle()));
+ Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+ __ call(ic, RelocInfo::CODE_TARGET);
+ // By emitting a nop we make sure that we do not have a test eax
+ // instruction after the call it is treated specially by the LoadIC code.
+ __ nop();
+ } else {
+ // Do a KEYED property load.
+ Visit(expr->key());
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+ __ call(ic, RelocInfo::CODE_TARGET);
+ // By emitting a nop we make sure that we do not have a "test eax,..."
+ // instruction after the call it is treated specially by the LoadIC code.
+ __ nop();
+ // Drop key left on the stack by IC.
+ __ add(Operand(esp), Immediate(kPointerSize));
+ }
+ DropAndMove(expr->context(), eax);
+}
+
+
+void FastCodeGenerator::EmitCallWithIC(Call* expr, RelocInfo::Mode reloc_info) {
+ // Code common for calls using the IC.
+ ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
for (int i = 0; i < arg_count; i++) {
Visit(args->at(i));
- ASSERT(!args->at(i)->location().is_nowhere());
- if (args->at(i)->location().is_constant()) {
- ASSERT(args->at(i)->AsLiteral() != NULL);
- __ push(Immediate(args->at(i)->AsLiteral()->handle()));
- }
+ ASSERT_EQ(Expression::kValue, args->at(i)->context());
}
- // Record source position for debugger
+ // Record source position for debugger.
SetSourcePosition(expr->position());
// Call the IC initialization code.
Handle<Code> ic = CodeGenerator::ComputeCallInitialize(arg_count,
NOT_IN_LOOP);
- __ call(ic, RelocInfo::CODE_TARGET_CONTEXT);
+ __ call(ic, reloc_info);
// Restore context register.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
// Discard the function left on TOS.
- if (expr->location().is_temporary()) {
- __ mov(Operand(esp, 0), eax);
- } else {
- ASSERT(expr->location().is_nowhere());
- __ add(Operand(esp), Immediate(kPointerSize));
+ DropAndMove(expr->context(), eax);
+}
+
+
+void FastCodeGenerator::EmitCallWithStub(Call* expr) {
+ // Code common for calls using the call stub.
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ Visit(args->at(i));
}
+ // Record source position for debugger.
+ SetSourcePosition(expr->position());
+ CallFunctionStub stub(arg_count, NOT_IN_LOOP);
+ __ CallStub(&stub);
+ // Restore context register.
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ // Discard the function left on TOS.
+ DropAndMove(expr->context(), eax);
+}
+
+
+void FastCodeGenerator::VisitCall(Call* expr) {
+ Comment cmnt(masm_, "[ Call");
+ Expression* fun = expr->expression();
+ Variable* var = fun->AsVariableProxy()->AsVariable();
+
+ if (var != NULL && var->is_possibly_eval()) {
+ // Call to the identifier 'eval'.
+ UNREACHABLE();
+ } else if (var != NULL && !var->is_this() && var->is_global()) {
+ // Call to a global variable.
+ __ push(Immediate(var->name()));
+ // Push global object as receiver for the call IC lookup.
+ __ push(CodeGenerator::GlobalObject());
+ EmitCallWithIC(expr, RelocInfo::CODE_TARGET_CONTEXT);
+ } else if (var != NULL && var->slot() != NULL &&
+ var->slot()->type() == Slot::LOOKUP) {
+ // Call to a lookup slot.
+ UNREACHABLE();
+ } else if (fun->AsProperty() != NULL) {
+ // Call to an object property.
+ Property* prop = fun->AsProperty();
+ Literal* key = prop->key()->AsLiteral();
+ if (key != NULL && key->handle()->IsSymbol()) {
+ // Call to a named property, use call IC.
+ __ push(Immediate(key->handle()));
+ Visit(prop->obj());
+ EmitCallWithIC(expr, RelocInfo::CODE_TARGET);
+ } else {
+ // Call to a keyed property, use keyed load IC followed by function
+ // call.
+ Visit(prop->obj());
+ Visit(prop->key());
+ // Record source code position for IC call.
+ SetSourcePosition(prop->position());
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+ __ call(ic, RelocInfo::CODE_TARGET);
+ // By emitting a nop we make sure that we do not have a "test eax,..."
+ // instruction after the call it is treated specially by the LoadIC code.
+ __ nop();
+ // Drop key left on the stack by IC.
+ __ add(Operand(esp), Immediate(kPointerSize));
+ // Pop receiver.
+ __ pop(ebx);
+ // Push result (function).
+ __ push(eax);
+ // Push receiver object on stack.
+ if (prop->is_synthetic()) {
+ __ push(CodeGenerator::GlobalObject());
+ } else {
+ __ push(ebx);
+ }
+ EmitCallWithStub(expr);
+ }
+ } else {
+ // Call to some other expression. If the expression is an anonymous
+ // function literal not called in a loop, mark it as one that should
+ // also use the fast code generator.
+ FunctionLiteral* lit = fun->AsFunctionLiteral();
+ if (lit != NULL &&
+ lit->name()->Equals(Heap::empty_string()) &&
+ loop_depth() == 0) {
+ lit->set_try_fast_codegen(true);
+ }
+ Visit(fun);
+ // Load global receiver object.
+ __ mov(ebx, CodeGenerator::GlobalObject());
+ __ push(FieldOperand(ebx, GlobalObject::kGlobalReceiverOffset));
+ // Emit function call.
+ EmitCallWithStub(expr);
+ }
+}
+
+
+void FastCodeGenerator::VisitCallNew(CallNew* expr) {
+ Comment cmnt(masm_, "[ CallNew");
+ // According to ECMA-262, section 11.2.2, page 44, the function
+ // expression in new calls must be evaluated before the
+ // arguments.
+ // Push function on the stack.
+ Visit(expr->expression());
+ ASSERT_EQ(Expression::kValue, expr->expression()->context());
+
+ // Push global object (receiver).
+ __ push(CodeGenerator::GlobalObject());
+
+ // Push the arguments ("left-to-right") on the stack.
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ Visit(args->at(i));
+ ASSERT_EQ(Expression::kValue, args->at(i)->context());
+ // If location is value, it is already on the stack,
+ // so nothing to do here.
+ }
+
+ // Call the construct call builtin that handles allocation and
+ // constructor invocation.
+ SetSourcePosition(expr->position());
+
+ // Load function, arg_count into edi and eax.
+ __ Set(eax, Immediate(arg_count));
+ // Function is in esp[arg_count + 1].
+ __ mov(edi, Operand(esp, eax, times_pointer_size, kPointerSize));
+
+ Handle<Code> construct_builtin(Builtins::builtin(Builtins::JSConstructCall));
+ __ call(construct_builtin, RelocInfo::CONSTRUCT_CALL);
+
+ // Replace function on TOS with result in eax, or pop it.
+ DropAndMove(expr->context(), eax);
}
void FastCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
Comment cmnt(masm_, "[ CallRuntime");
ZoneList<Expression*>* args = expr->arguments();
- Runtime::Function* function = expr->function();
- ASSERT(function != NULL);
+ if (expr->is_jsruntime()) {
+ // Prepare for calling JS runtime function.
+ __ push(Immediate(expr->name()));
+ __ mov(eax, CodeGenerator::GlobalObject());
+ __ push(FieldOperand(eax, GlobalObject::kBuiltinsOffset));
+ }
// Push the arguments ("left-to-right").
int arg_count = args->length();
for (int i = 0; i < arg_count; i++) {
Visit(args->at(i));
- ASSERT(!args->at(i)->location().is_nowhere());
- if (args->at(i)->location().is_constant()) {
- ASSERT(args->at(i)->AsLiteral() != NULL);
- __ push(Immediate(args->at(i)->AsLiteral()->handle()));
- } else {
- ASSERT(args->at(i)->location().is_temporary());
- // If location is temporary, it is already on the stack,
- // so nothing to do here.
- }
+ ASSERT_EQ(Expression::kValue, args->at(i)->context());
}
- __ CallRuntime(function, arg_count);
- if (expr->location().is_temporary()) {
- __ push(eax);
+ if (expr->is_jsruntime()) {
+ // Call the JS runtime function.
+ Handle<Code> ic = CodeGenerator::ComputeCallInitialize(arg_count,
+ NOT_IN_LOOP);
+ __ call(ic, RelocInfo::CODE_TARGET);
+ // Restore context register.
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ // Discard the function left on TOS.
+ DropAndMove(expr->context(), eax);
} else {
- ASSERT(expr->location().is_nowhere());
+ // Call the C runtime function.
+ __ CallRuntime(expr->function(), arg_count);
+ Move(expr->context(), eax);
+ }
+}
+
+
+void FastCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
+ switch (expr->op()) {
+ case Token::VOID: {
+ Comment cmnt(masm_, "[ UnaryOperation (VOID)");
+ Visit(expr->expression());
+ ASSERT_EQ(Expression::kEffect, expr->expression()->context());
+ switch (expr->context()) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ break;
+ case Expression::kEffect:
+ break;
+ case Expression::kValue:
+ __ push(Immediate(Factory::undefined_value()));
+ break;
+ case Expression::kTestValue:
+ // Value is false so it's needed.
+ __ push(Immediate(Factory::undefined_value()));
+ // Fall through.
+ case Expression::kTest: // Fall through.
+ case Expression::kValueTest:
+ __ jmp(false_label_);
+ break;
+ }
+ break;
+ }
+
+ case Token::NOT: {
+ Comment cmnt(masm_, "[ UnaryOperation (NOT)");
+ ASSERT_EQ(Expression::kTest, expr->expression()->context());
+
+ Label push_true;
+ Label push_false;
+ Label done;
+ Label* saved_true = true_label_;
+ Label* saved_false = false_label_;
+ switch (expr->context()) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ break;
+
+ case Expression::kValue:
+ true_label_ = &push_false;
+ false_label_ = &push_true;
+ Visit(expr->expression());
+ __ bind(&push_true);
+ __ push(Immediate(Factory::true_value()));
+ __ jmp(&done);
+ __ bind(&push_false);
+ __ push(Immediate(Factory::false_value()));
+ __ bind(&done);
+ break;
+
+ case Expression::kEffect:
+ true_label_ = &done;
+ false_label_ = &done;
+ Visit(expr->expression());
+ __ bind(&done);
+ break;
+
+ case Expression::kTest:
+ true_label_ = saved_false;
+ false_label_ = saved_true;
+ Visit(expr->expression());
+ break;
+
+ case Expression::kValueTest:
+ true_label_ = saved_false;
+ false_label_ = &push_true;
+ Visit(expr->expression());
+ __ bind(&push_true);
+ __ push(Immediate(Factory::true_value()));
+ __ jmp(saved_true);
+ break;
+
+ case Expression::kTestValue:
+ true_label_ = &push_false;
+ false_label_ = saved_true;
+ Visit(expr->expression());
+ __ bind(&push_false);
+ __ push(Immediate(Factory::false_value()));
+ __ jmp(saved_false);
+ break;
+ }
+ true_label_ = saved_true;
+ false_label_ = saved_false;
+ break;
+ }
+
+ case Token::TYPEOF: {
+ Comment cmnt(masm_, "[ UnaryOperation (TYPEOF)");
+ ASSERT_EQ(Expression::kValue, expr->expression()->context());
+
+ VariableProxy* proxy = expr->expression()->AsVariableProxy();
+ if (proxy != NULL &&
+ !proxy->var()->is_this() &&
+ proxy->var()->is_global()) {
+ Comment cmnt(masm_, "Global variable");
+ __ push(CodeGenerator::GlobalObject());
+ __ mov(ecx, Immediate(proxy->name()));
+ Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+ // Use a regular load, not a contextual load, to avoid a reference
+ // error.
+ __ call(ic, RelocInfo::CODE_TARGET);
+ __ mov(Operand(esp, 0), eax);
+ } else if (proxy != NULL &&
+ proxy->var()->slot() != NULL &&
+ proxy->var()->slot()->type() == Slot::LOOKUP) {
+ __ push(esi);
+ __ push(Immediate(proxy->name()));
+ __ CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
+ __ push(eax);
+ } else {
+ // This expression cannot throw a reference error at the top level.
+ Visit(expr->expression());
+ }
+
+ __ CallRuntime(Runtime::kTypeof, 1);
+ Move(expr->context(), eax);
+ break;
+ }
+
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void FastCodeGenerator::VisitCountOperation(CountOperation* expr) {
+ Comment cmnt(masm_, "[ CountOperation");
+ VariableProxy* proxy = expr->expression()->AsVariableProxy();
+ ASSERT(proxy->AsVariable() != NULL);
+ ASSERT(proxy->AsVariable()->is_global());
+
+ Visit(proxy);
+ __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
+
+ switch (expr->context()) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ case Expression::kValue: // Fall through
+ case Expression::kTest: // Fall through
+ case Expression::kTestValue: // Fall through
+ case Expression::kValueTest:
+ // Duplicate the result on the stack.
+ __ push(eax);
+ break;
+ case Expression::kEffect:
+ // Do not save result.
+ break;
+ }
+ // Call runtime for +1/-1.
+ __ push(eax);
+ __ push(Immediate(Smi::FromInt(1)));
+ if (expr->op() == Token::INC) {
+ __ CallRuntime(Runtime::kNumberAdd, 2);
+ } else {
+ __ CallRuntime(Runtime::kNumberSub, 2);
+ }
+ // Call Store IC.
+ __ mov(ecx, proxy->AsVariable()->name());
+ __ push(CodeGenerator::GlobalObject());
+ Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
+ __ call(ic, RelocInfo::CODE_TARGET);
+ // Restore up stack after store IC.
+ __ add(Operand(esp), Immediate(kPointerSize));
+
+ switch (expr->context()) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ case Expression::kEffect: // Fall through
+ case Expression::kValue:
+ // Do nothing. Result in either on the stack for value context
+ // or discarded for effect context.
+ break;
+ case Expression::kTest:
+ __ pop(eax);
+ TestAndBranch(eax, true_label_, false_label_);
+ break;
+ case Expression::kValueTest: {
+ Label discard;
+ __ mov(eax, Operand(esp, 0));
+ TestAndBranch(eax, true_label_, &discard);
+ __ bind(&discard);
+ __ add(Operand(esp), Immediate(kPointerSize));
+ __ jmp(false_label_);
+ break;
+ }
+ case Expression::kTestValue: {
+ Label discard;
+ __ mov(eax, Operand(esp, 0));
+ TestAndBranch(eax, &discard, false_label_);
+ __ bind(&discard);
+ __ add(Operand(esp), Immediate(kPointerSize));
+ __ jmp(true_label_);
+ break;
+ }
}
}
void FastCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) {
- // Compile a short-circuited boolean or operation in a non-test
- // context.
- ASSERT(expr->op() == Token::OR);
- // Compile (e0 || e1) as if it were
- // (let (temp = e0) temp ? temp : e1).
+ Comment cmnt(masm_, "[ BinaryOperation");
+ switch (expr->op()) {
+ case Token::COMMA:
+ ASSERT_EQ(Expression::kEffect, expr->left()->context());
+ ASSERT_EQ(expr->context(), expr->right()->context());
+ Visit(expr->left());
+ Visit(expr->right());
+ break;
- Label eval_right, done;
- Location destination = expr->location();
- ASSERT(!destination.is_constant());
+ case Token::OR:
+ case Token::AND:
+ EmitLogicalOperation(expr);
+ break;
- Expression* left = expr->left();
- Location left_source = left->location();
- ASSERT(!left_source.is_nowhere());
+ case Token::ADD:
+ case Token::SUB:
+ case Token::DIV:
+ case Token::MOD:
+ case Token::MUL:
+ case Token::BIT_OR:
+ case Token::BIT_AND:
+ case Token::BIT_XOR:
+ case Token::SHL:
+ case Token::SHR:
+ case Token::SAR: {
+ ASSERT_EQ(Expression::kValue, expr->left()->context());
+ ASSERT_EQ(Expression::kValue, expr->right()->context());
- Expression* right = expr->right();
- Location right_source = right->location();
- ASSERT(!right_source.is_nowhere());
+ Visit(expr->left());
+ Visit(expr->right());
+ GenericBinaryOpStub stub(expr->op(),
+ NO_OVERWRITE,
+ NO_GENERIC_BINARY_FLAGS);
+ __ CallStub(&stub);
+ Move(expr->context(), eax);
- Visit(left);
- // Use the shared ToBoolean stub to find the boolean value of the
- // left-hand subexpression. Load the value into eax to perform some
- // inlined checks assumed by the stub.
- if (left_source.is_temporary()) {
- if (destination.is_temporary()) {
- // Copy the left-hand value into eax because we may need it as the
- // final result.
- __ mov(eax, Operand(esp, 0));
- } else {
- // Pop the left-hand value into eax because we will not need it as the
- // final result.
- __ pop(eax);
+ break;
}
- } else {
- // Load the left-hand value into eax. Put it on the stack if we may
- // need it.
- ASSERT(left->AsLiteral() != NULL);
- __ mov(eax, left->AsLiteral()->handle());
- if (destination.is_temporary()) __ push(eax);
+ default:
+ UNREACHABLE();
}
- // The left-hand value is in eax. It is also on the stack iff the
- // destination location is temporary.
-
- // Perform fast checks assumed by the stub.
- __ cmp(eax, Factory::undefined_value()); // The undefined value is false.
- __ j(equal, &eval_right);
- __ cmp(eax, Factory::true_value()); // True is true.
- __ j(equal, &done);
- __ cmp(eax, Factory::false_value()); // False is false.
- __ j(equal, &eval_right);
- ASSERT(kSmiTag == 0);
- __ test(eax, Operand(eax)); // The smi zero is false.
- __ j(zero, &eval_right);
- __ test(eax, Immediate(kSmiTagMask)); // All other smis are true.
- __ j(zero, &done);
-
- // Call the stub for all other cases.
- __ push(eax);
- ToBooleanStub stub;
- __ CallStub(&stub);
- __ test(eax, Operand(eax)); // The stub returns nonzero for true.
- __ j(not_zero, &done);
-
- __ bind(&eval_right);
- // Discard the left-hand value if present on the stack.
- if (destination.is_temporary()) {
- __ add(Operand(esp), Immediate(kPointerSize));
- }
- Visit(right);
-
- // Save or discard the right-hand value as needed.
- if (destination.is_temporary() && right_source.is_constant()) {
- ASSERT(right->AsLiteral() != NULL);
- __ push(Immediate(right->AsLiteral()->handle()));
- } else if (destination.is_nowhere() && right_source.is_temporary()) {
- __ add(Operand(esp), Immediate(kPointerSize));
- }
-
- __ bind(&done);
}
+void FastCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
+ Comment cmnt(masm_, "[ CompareOperation");
+ ASSERT_EQ(Expression::kValue, expr->left()->context());
+ ASSERT_EQ(Expression::kValue, expr->right()->context());
+ Visit(expr->left());
+ Visit(expr->right());
+
+ // Convert current context to test context: Pre-test code.
+ Label push_true;
+ Label push_false;
+ Label done;
+ Label* saved_true = true_label_;
+ Label* saved_false = false_label_;
+ switch (expr->context()) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ break;
+
+ case Expression::kValue:
+ true_label_ = &push_true;
+ false_label_ = &push_false;
+ break;
+
+ case Expression::kEffect:
+ true_label_ = &done;
+ false_label_ = &done;
+ break;
+
+ case Expression::kTest:
+ break;
+
+ case Expression::kValueTest:
+ true_label_ = &push_true;
+ break;
+
+ case Expression::kTestValue:
+ false_label_ = &push_false;
+ break;
+ }
+ // Convert current context to test context: End pre-test code.
+
+ switch (expr->op()) {
+ case Token::IN: {
+ __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION);
+ __ cmp(eax, Factory::true_value());
+ __ j(equal, true_label_);
+ __ jmp(false_label_);
+ break;
+ }
+
+ case Token::INSTANCEOF: {
+ InstanceofStub stub;
+ __ CallStub(&stub);
+ __ test(eax, Operand(eax));
+ __ j(zero, true_label_); // The stub returns 0 for true.
+ __ jmp(false_label_);
+ break;
+ }
+
+ default: {
+ Condition cc = no_condition;
+ bool strict = false;
+ switch (expr->op()) {
+ case Token::EQ_STRICT:
+ strict = true;
+ // Fall through
+ case Token::EQ:
+ cc = equal;
+ __ pop(eax);
+ __ pop(edx);
+ break;
+ case Token::LT:
+ cc = less;
+ __ pop(eax);
+ __ pop(edx);
+ break;
+ case Token::GT:
+ // Reverse left and right sizes to obtain ECMA-262 conversion order.
+ cc = less;
+ __ pop(edx);
+ __ pop(eax);
+ break;
+ case Token::LTE:
+ // Reverse left and right sizes to obtain ECMA-262 conversion order.
+ cc = greater_equal;
+ __ pop(edx);
+ __ pop(eax);
+ break;
+ case Token::GTE:
+ cc = greater_equal;
+ __ pop(eax);
+ __ pop(edx);
+ break;
+ case Token::IN:
+ case Token::INSTANCEOF:
+ default:
+ UNREACHABLE();
+ }
+
+ // The comparison stub expects the smi vs. smi case to be handled
+ // before it is called.
+ Label slow_case;
+ __ mov(ecx, Operand(edx));
+ __ or_(ecx, Operand(eax));
+ __ test(ecx, Immediate(kSmiTagMask));
+ __ j(not_zero, &slow_case, not_taken);
+ __ cmp(edx, Operand(eax));
+ __ j(cc, true_label_);
+ __ jmp(false_label_);
+
+ __ bind(&slow_case);
+ CompareStub stub(cc, strict);
+ __ CallStub(&stub);
+ __ test(eax, Operand(eax));
+ __ j(cc, true_label_);
+ __ jmp(false_label_);
+ }
+ }
+
+ // Convert current context to test context: Post-test code.
+ switch (expr->context()) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ break;
+
+ case Expression::kValue:
+ __ bind(&push_true);
+ __ push(Immediate(Factory::true_value()));
+ __ jmp(&done);
+ __ bind(&push_false);
+ __ push(Immediate(Factory::false_value()));
+ __ bind(&done);
+ break;
+
+ case Expression::kEffect:
+ __ bind(&done);
+ break;
+
+ case Expression::kTest:
+ break;
+
+ case Expression::kValueTest:
+ __ bind(&push_true);
+ __ push(Immediate(Factory::true_value()));
+ __ jmp(saved_true);
+ break;
+
+ case Expression::kTestValue:
+ __ bind(&push_false);
+ __ push(Immediate(Factory::false_value()));
+ __ jmp(saved_false);
+ break;
+ }
+ true_label_ = saved_true;
+ false_label_ = saved_false;
+ // Convert current context to test context: End post-test code.
+}
+
+
+void FastCodeGenerator::VisitThisFunction(ThisFunction* expr) {
+ __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+ Move(expr->context(), eax);
+}
+
+#undef __
+
+
} } // namespace v8::internal
diff --git a/src/ia32/frames-ia32.cc b/src/ia32/frames-ia32.cc
index dea439f..5c900be 100644
--- a/src/ia32/frames-ia32.cc
+++ b/src/ia32/frames-ia32.cc
@@ -56,19 +56,14 @@
state->fp = fp;
state->sp = sp;
state->pc_address = reinterpret_cast<Address*>(sp - 1 * kPointerSize);
- // Determine frame type.
- if (Memory::Address_at(fp + ExitFrameConstants::kDebugMarkOffset) != 0) {
- return EXIT_DEBUG;
- } else {
- return EXIT;
- }
+ return EXIT;
}
void ExitFrame::Iterate(ObjectVisitor* v) const {
- // Exit frames on IA-32 do not contain any pointers. The arguments
- // are traversed as part of the expression stack of the calling
- // frame.
+ v->VisitPointer(&code_slot());
+ // The arguments are traversed as part of the expression stack of
+ // the calling frame.
}
diff --git a/src/ia32/frames-ia32.h b/src/ia32/frames-ia32.h
index 3a7c86b..c3fe6c7 100644
--- a/src/ia32/frames-ia32.h
+++ b/src/ia32/frames-ia32.h
@@ -76,7 +76,7 @@
class ExitFrameConstants : public AllStatic {
public:
- static const int kDebugMarkOffset = -2 * kPointerSize;
+ static const int kCodeOffset = -2 * kPointerSize;
static const int kSPOffset = -1 * kPointerSize;
static const int kCallerFPOffset = 0 * kPointerSize;
diff --git a/src/ia32/ic-ia32.cc b/src/ia32/ic-ia32.cc
index 3aa3c34..6988fe0 100644
--- a/src/ia32/ic-ia32.cc
+++ b/src/ia32/ic-ia32.cc
@@ -31,6 +31,7 @@
#include "ic-inl.h"
#include "runtime.h"
#include "stub-cache.h"
+#include "utils.h"
namespace v8 {
namespace internal {
@@ -108,7 +109,7 @@
StringDictionary::kElementsStartIndex * kPointerSize;
for (int i = 0; i < kProbes; i++) {
// Compute the masked index: (hash + i + i * i) & mask.
- __ mov(r1, FieldOperand(name, String::kLengthOffset));
+ __ mov(r1, FieldOperand(name, String::kHashFieldOffset));
__ shr(r1, String::kHashShift);
if (i > 0) {
__ add(Operand(r1), Immediate(StringDictionary::GetProbeOffset(i)));
@@ -216,18 +217,6 @@
}
-#ifdef DEBUG
-// For use in assert below.
-static int TenToThe(int exponent) {
- ASSERT(exponent <= 9);
- ASSERT(exponent >= 1);
- int answer = 10;
- for (int i = 1; i < exponent; i++) answer *= 10;
- return answer;
-}
-#endif
-
-
void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- esp[0] : return address
@@ -309,7 +298,7 @@
__ CmpObjectType(eax, FIRST_NONSTRING_TYPE, edx);
__ j(above_equal, &slow);
// Is the string an array index, with cached numeric value?
- __ mov(ebx, FieldOperand(eax, String::kLengthOffset));
+ __ mov(ebx, FieldOperand(eax, String::kHashFieldOffset));
__ test(ebx, Immediate(String::kIsArrayIndexMask));
__ j(not_zero, &index_string, not_taken);
@@ -324,20 +313,16 @@
__ mov(eax, Operand(ecx));
__ IncrementCounter(&Counters::keyed_load_generic_symbol, 1);
__ ret(0);
- // Array index string: If short enough use cache in length/hash field (ebx).
- // We assert that there are enough bits in an int32_t after the hash shift
- // bits have been subtracted to allow space for the length and the cached
- // array index.
+ // If the hash field contains an array index pick it out. The assert checks
+ // that the constants for the maximum number of digits for an array index
+ // cached in the hash field and the number of bits reserved for it does not
+ // conflict.
ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
- (1 << (String::kShortLengthShift - String::kHashShift)));
+ (1 << String::kArrayIndexValueBits));
__ bind(&index_string);
- const int kLengthFieldLimit =
- (String::kMaxCachedArrayIndexLength + 1) << String::kShortLengthShift;
- __ cmp(ebx, kLengthFieldLimit);
- __ j(above_equal, &slow);
__ mov(eax, Operand(ebx));
- __ and_(eax, (1 << String::kShortLengthShift) - 1);
- __ shr(eax, String::kLongLengthShift);
+ __ and_(eax, String::kArrayIndexHashMask);
+ __ shr(eax, String::kHashShift);
__ jmp(&index_int);
}
@@ -403,13 +388,13 @@
__ movsx_b(eax, Operand(ecx, eax, times_1, 0));
break;
case kExternalUnsignedByteArray:
- __ mov_b(eax, Operand(ecx, eax, times_1, 0));
+ __ movzx_b(eax, Operand(ecx, eax, times_1, 0));
break;
case kExternalShortArray:
__ movsx_w(eax, Operand(ecx, eax, times_2, 0));
break;
case kExternalUnsignedShortArray:
- __ mov_w(eax, Operand(ecx, eax, times_2, 0));
+ __ movzx_w(eax, Operand(ecx, eax, times_2, 0));
break;
case kExternalIntArray:
case kExternalUnsignedIntArray:
diff --git a/src/ia32/macro-assembler-ia32.cc b/src/ia32/macro-assembler-ia32.cc
index 08c4c0c..b91caa8 100644
--- a/src/ia32/macro-assembler-ia32.cc
+++ b/src/ia32/macro-assembler-ia32.cc
@@ -213,6 +213,13 @@
}
+void MacroAssembler::StackLimitCheck(Label* on_stack_overflow) {
+ cmp(esp,
+ Operand::StaticVariable(ExternalReference::address_of_stack_limit()));
+ j(below, on_stack_overflow);
+}
+
+
#ifdef ENABLE_DEBUGGER_SUPPORT
void MacroAssembler::SaveRegistersToMemory(RegList regs) {
ASSERT((regs & ~kJSCallerSaved) == 0);
@@ -319,7 +326,7 @@
void MacroAssembler::FCmp() {
- if (CpuFeatures::IsSupported(CpuFeatures::CMOV)) {
+ if (CpuFeatures::IsSupported(CMOV)) {
fucomip();
ffree(0);
fincstp();
@@ -355,10 +362,7 @@
leave();
}
-
-void MacroAssembler::EnterExitFrame(StackFrame::Type type) {
- ASSERT(type == StackFrame::EXIT || type == StackFrame::EXIT_DEBUG);
-
+void MacroAssembler::EnterExitFramePrologue(ExitFrame::Mode mode) {
// Setup the frame structure on the stack.
ASSERT(ExitFrameConstants::kCallerSPDisplacement == +2 * kPointerSize);
ASSERT(ExitFrameConstants::kCallerPCOffset == +1 * kPointerSize);
@@ -369,23 +373,24 @@
// Reserve room for entry stack pointer and push the debug marker.
ASSERT(ExitFrameConstants::kSPOffset == -1 * kPointerSize);
push(Immediate(0)); // saved entry sp, patched before call
- push(Immediate(type == StackFrame::EXIT_DEBUG ? 1 : 0));
+ if (mode == ExitFrame::MODE_DEBUG) {
+ push(Immediate(0));
+ } else {
+ push(Immediate(CodeObject()));
+ }
// Save the frame pointer and the context in top.
ExternalReference c_entry_fp_address(Top::k_c_entry_fp_address);
ExternalReference context_address(Top::k_context_address);
mov(Operand::StaticVariable(c_entry_fp_address), ebp);
mov(Operand::StaticVariable(context_address), esi);
+}
- // Setup argc and argv in callee-saved registers.
- int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
- mov(edi, Operand(eax));
- lea(esi, Operand(ebp, eax, times_4, offset));
-
+void MacroAssembler::EnterExitFrameEpilogue(ExitFrame::Mode mode, int argc) {
#ifdef ENABLE_DEBUGGER_SUPPORT
// Save the state of all registers to the stack from the memory
// location. This is needed to allow nested break points.
- if (type == StackFrame::EXIT_DEBUG) {
+ if (mode == ExitFrame::MODE_DEBUG) {
// TODO(1243899): This should be symmetric to
// CopyRegistersFromStackToMemory() but it isn't! esp is assumed
// correct here, but computed for the other call. Very error
@@ -396,8 +401,8 @@
}
#endif
- // Reserve space for two arguments: argc and argv.
- sub(Operand(esp), Immediate(2 * kPointerSize));
+ // Reserve space for arguments.
+ sub(Operand(esp), Immediate(argc * kPointerSize));
// Get the required frame alignment for the OS.
static const int kFrameAlignment = OS::ActivationFrameAlignment();
@@ -411,15 +416,39 @@
}
-void MacroAssembler::LeaveExitFrame(StackFrame::Type type) {
+void MacroAssembler::EnterExitFrame(ExitFrame::Mode mode) {
+ EnterExitFramePrologue(mode);
+
+ // Setup argc and argv in callee-saved registers.
+ int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
+ mov(edi, Operand(eax));
+ lea(esi, Operand(ebp, eax, times_4, offset));
+
+ EnterExitFrameEpilogue(mode, 2);
+}
+
+
+void MacroAssembler::EnterApiExitFrame(ExitFrame::Mode mode,
+ int stack_space,
+ int argc) {
+ EnterExitFramePrologue(mode);
+
+ int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
+ lea(esi, Operand(ebp, (stack_space * kPointerSize) + offset));
+
+ EnterExitFrameEpilogue(mode, argc);
+}
+
+
+void MacroAssembler::LeaveExitFrame(ExitFrame::Mode mode) {
#ifdef ENABLE_DEBUGGER_SUPPORT
// Restore the memory copy of the registers by digging them out from
// the stack. This is needed to allow nested break points.
- if (type == StackFrame::EXIT_DEBUG) {
+ if (mode == ExitFrame::MODE_DEBUG) {
// It's okay to clobber register ebx below because we don't need
// the function pointer after this.
const int kCallerSavedSize = kNumJSCallerSaved * kPointerSize;
- int kOffset = ExitFrameConstants::kDebugMarkOffset - kCallerSavedSize;
+ int kOffset = ExitFrameConstants::kCodeOffset - kCallerSavedSize;
lea(ebx, Operand(ebp, kOffset));
CopyRegistersFromStackToMemory(ebx, ecx, kJSCallerSaved);
}
@@ -658,6 +687,11 @@
void MacroAssembler::UpdateAllocationTopHelper(Register result_end,
Register scratch) {
+ if (FLAG_debug_code) {
+ test(result_end, Immediate(kObjectAlignmentMask));
+ Check(zero, "Unaligned allocation in new space");
+ }
+
ExternalReference new_space_allocation_top =
ExternalReference::new_space_allocation_top_address();
@@ -791,6 +825,109 @@
}
+void MacroAssembler::AllocateTwoByteString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* gc_required) {
+ // Calculate the number of bytes needed for the characters in the string while
+ // observing object alignment.
+ ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
+ mov(scratch1, length);
+ ASSERT(kShortSize == 2);
+ shl(scratch1, 1);
+ add(Operand(scratch1), Immediate(kObjectAlignmentMask));
+ and_(Operand(scratch1), Immediate(~kObjectAlignmentMask));
+
+ // Allocate two byte string in new space.
+ AllocateInNewSpace(SeqTwoByteString::kHeaderSize,
+ times_1,
+ scratch1,
+ result,
+ scratch2,
+ scratch3,
+ gc_required,
+ TAG_OBJECT);
+
+ // Set the map, length and hash field.
+ mov(FieldOperand(result, HeapObject::kMapOffset),
+ Immediate(Factory::string_map()));
+ mov(FieldOperand(result, String::kLengthOffset), length);
+ mov(FieldOperand(result, String::kHashFieldOffset),
+ Immediate(String::kEmptyHashField));
+}
+
+
+void MacroAssembler::AllocateAsciiString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* gc_required) {
+ // Calculate the number of bytes needed for the characters in the string while
+ // observing object alignment.
+ ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0);
+ mov(scratch1, length);
+ ASSERT(kCharSize == 1);
+ add(Operand(scratch1), Immediate(kObjectAlignmentMask));
+ and_(Operand(scratch1), Immediate(~kObjectAlignmentMask));
+
+ // Allocate ascii string in new space.
+ AllocateInNewSpace(SeqAsciiString::kHeaderSize,
+ times_1,
+ scratch1,
+ result,
+ scratch2,
+ scratch3,
+ gc_required,
+ TAG_OBJECT);
+
+ // Set the map, length and hash field.
+ mov(FieldOperand(result, HeapObject::kMapOffset),
+ Immediate(Factory::ascii_string_map()));
+ mov(FieldOperand(result, String::kLengthOffset), length);
+ mov(FieldOperand(result, String::kHashFieldOffset),
+ Immediate(String::kEmptyHashField));
+}
+
+
+void MacroAssembler::AllocateConsString(Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required) {
+ // Allocate heap number in new space.
+ AllocateInNewSpace(ConsString::kSize,
+ result,
+ scratch1,
+ scratch2,
+ gc_required,
+ TAG_OBJECT);
+
+ // Set the map. The other fields are left uninitialized.
+ mov(FieldOperand(result, HeapObject::kMapOffset),
+ Immediate(Factory::cons_string_map()));
+}
+
+
+void MacroAssembler::AllocateAsciiConsString(Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required) {
+ // Allocate heap number in new space.
+ AllocateInNewSpace(ConsString::kSize,
+ result,
+ scratch1,
+ scratch2,
+ gc_required,
+ TAG_OBJECT);
+
+ // Set the map. The other fields are left uninitialized.
+ mov(FieldOperand(result, HeapObject::kMapOffset),
+ Immediate(Factory::cons_ascii_string_map()));
+}
+
+
void MacroAssembler::NegativeZeroTest(CodeGenerator* cgen,
Register result,
Register op,
@@ -884,6 +1021,12 @@
}
+void MacroAssembler::TailCallStub(CodeStub* stub) {
+ ASSERT(allow_stub_calls()); // calls are not allowed in some stubs
+ jmp(stub->GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
void MacroAssembler::StubReturn(int argc) {
ASSERT(argc >= 1 && generating_stub());
ret((argc - 1) * kPointerSize);
@@ -931,6 +1074,52 @@
}
+void MacroAssembler::PushHandleScope(Register scratch) {
+ // Push the number of extensions, smi-tagged so the gc will ignore it.
+ ExternalReference extensions_address =
+ ExternalReference::handle_scope_extensions_address();
+ mov(scratch, Operand::StaticVariable(extensions_address));
+ ASSERT_EQ(0, kSmiTag);
+ shl(scratch, kSmiTagSize);
+ push(scratch);
+ mov(Operand::StaticVariable(extensions_address), Immediate(0));
+ // Push next and limit pointers which will be wordsize aligned and
+ // hence automatically smi tagged.
+ ExternalReference next_address =
+ ExternalReference::handle_scope_next_address();
+ push(Operand::StaticVariable(next_address));
+ ExternalReference limit_address =
+ ExternalReference::handle_scope_limit_address();
+ push(Operand::StaticVariable(limit_address));
+}
+
+
+void MacroAssembler::PopHandleScope(Register saved, Register scratch) {
+ ExternalReference extensions_address =
+ ExternalReference::handle_scope_extensions_address();
+ Label write_back;
+ mov(scratch, Operand::StaticVariable(extensions_address));
+ cmp(Operand(scratch), Immediate(0));
+ j(equal, &write_back);
+ // Calling a runtime function messes with registers so we save and
+ // restore any one we're asked not to change
+ if (saved.is_valid()) push(saved);
+ CallRuntime(Runtime::kDeleteHandleScopeExtensions, 0);
+ if (saved.is_valid()) pop(saved);
+
+ bind(&write_back);
+ ExternalReference limit_address =
+ ExternalReference::handle_scope_limit_address();
+ pop(Operand::StaticVariable(limit_address));
+ ExternalReference next_address =
+ ExternalReference::handle_scope_next_address();
+ pop(Operand::StaticVariable(next_address));
+ pop(scratch);
+ shr(scratch, kSmiTagSize);
+ mov(Operand::StaticVariable(extensions_address), scratch);
+}
+
+
void MacroAssembler::JumpToRuntime(const ExternalReference& ext) {
// Set the entry point and jump to the C entry runtime stub.
mov(ebx, Immediate(ext));
@@ -1117,6 +1306,26 @@
}
+void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
+ if (context_chain_length > 0) {
+ // Move up the chain of contexts to the context containing the slot.
+ mov(dst, Operand(esi, Context::SlotOffset(Context::CLOSURE_INDEX)));
+ // Load the function context (which is the incoming, outer context).
+ mov(dst, FieldOperand(dst, JSFunction::kContextOffset));
+ for (int i = 1; i < context_chain_length; i++) {
+ mov(dst, Operand(dst, Context::SlotOffset(Context::CLOSURE_INDEX)));
+ mov(dst, FieldOperand(dst, JSFunction::kContextOffset));
+ }
+ // The context may be an intermediate context, not a function context.
+ mov(dst, Operand(dst, Context::SlotOffset(Context::FCONTEXT_INDEX)));
+ } else { // Slot is in the current function context.
+ // The context may be an intermediate context, not a function context.
+ mov(dst, Operand(esi, Context::SlotOffset(Context::FCONTEXT_INDEX)));
+ }
+}
+
+
+
void MacroAssembler::Ret() {
ret(0);
}
@@ -1184,11 +1393,15 @@
RecordComment(msg);
}
#endif
+ // Disable stub call restrictions to always allow calls to abort.
+ set_allow_stub_calls(true);
+
push(eax);
push(Immediate(p0));
push(Immediate(reinterpret_cast<intptr_t>(Smi::FromInt(p1 - p0))));
CallRuntime(Runtime::kAbort, 2);
// will not return here
+ int3();
}
diff --git a/src/ia32/macro-assembler-ia32.h b/src/ia32/macro-assembler-ia32.h
index a0a2428..a41d42e 100644
--- a/src/ia32/macro-assembler-ia32.h
+++ b/src/ia32/macro-assembler-ia32.h
@@ -69,6 +69,12 @@
#endif
// ---------------------------------------------------------------------------
+ // Stack limit support
+
+ // Do simple test for stack overflow. This doesn't handle an overflow.
+ void StackLimitCheck(Label* on_stack_limit_hit);
+
+ // ---------------------------------------------------------------------------
// Activation frames
void EnterInternalFrame() { EnterFrame(StackFrame::INTERNAL); }
@@ -77,17 +83,21 @@
void EnterConstructFrame() { EnterFrame(StackFrame::CONSTRUCT); }
void LeaveConstructFrame() { LeaveFrame(StackFrame::CONSTRUCT); }
- // Enter specific kind of exit frame; either EXIT or
- // EXIT_DEBUG. Expects the number of arguments in register eax and
+ // Enter specific kind of exit frame; either in normal or debug mode.
+ // Expects the number of arguments in register eax and
// sets up the number of arguments in register edi and the pointer
// to the first argument in register esi.
- void EnterExitFrame(StackFrame::Type type);
+ void EnterExitFrame(ExitFrame::Mode mode);
+
+ void EnterApiExitFrame(ExitFrame::Mode mode, int stack_space, int argc);
// Leave the current exit frame. Expects the return value in
// register eax:edx (untouched) and the pointer to the first
// argument in register esi.
- void LeaveExitFrame(StackFrame::Type type);
+ void LeaveExitFrame(ExitFrame::Mode mode);
+ // Find the function context up the context chain.
+ void LoadContext(Register dst, int context_chain_length);
// ---------------------------------------------------------------------------
// JavaScript invokes
@@ -173,7 +183,7 @@
// scratch can be passed as no_reg in which case an additional object
// reference will be added to the reloc info. The returned pointers in result
// and result_end have not yet been tagged as heap objects. If
- // result_contains_top_on_entry is true the contnt of result is known to be
+ // result_contains_top_on_entry is true the content of result is known to be
// the allocation top on entry (could be result_end from a previous call to
// AllocateInNewSpace). If result_contains_top_on_entry is true scratch
// should be no_reg as it is never used.
@@ -215,6 +225,32 @@
Register scratch2,
Label* gc_required);
+ // Allocate a sequential string. All the header fields of the string object
+ // are initialized.
+ void AllocateTwoByteString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* gc_required);
+ void AllocateAsciiString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* gc_required);
+
+ // Allocate a raw cons string object. Only the map field of the result is
+ // initialized.
+ void AllocateConsString(Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required);
+ void AllocateAsciiConsString(Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required);
+
// ---------------------------------------------------------------------------
// Support functions.
@@ -252,6 +288,9 @@
// Call a code stub.
void CallStub(CodeStub* stub);
+ // Tail call a code stub (jump).
+ void TailCallStub(CodeStub* stub);
+
// Return from a code stub after popping its arguments.
void StubReturn(int argc);
@@ -269,6 +308,12 @@
int num_arguments,
int result_size);
+ void PushHandleScope(Register scratch);
+
+ // Pops a handle scope using the specified scratch register and
+ // ensuring that saved register, it is not no_reg, is left unchanged.
+ void PopHandleScope(Register saved, Register scratch);
+
// Jump to a runtime routine.
void JumpToRuntime(const ExternalReference& ext);
@@ -346,6 +391,9 @@
void EnterFrame(StackFrame::Type type);
void LeaveFrame(StackFrame::Type type);
+ void EnterExitFramePrologue(ExitFrame::Mode mode);
+ void EnterExitFrameEpilogue(ExitFrame::Mode mode, int argc);
+
// Allocation support helpers.
void LoadAllocationTopHelper(Register result,
Register result_end,
diff --git a/src/ia32/regexp-macro-assembler-ia32.cc b/src/ia32/regexp-macro-assembler-ia32.cc
index 7af4e89..2e13d8a 100644
--- a/src/ia32/regexp-macro-assembler-ia32.cc
+++ b/src/ia32/regexp-macro-assembler-ia32.cc
@@ -598,10 +598,10 @@
Label stack_limit_hit;
Label stack_ok;
- ExternalReference stack_guard_limit =
- ExternalReference::address_of_stack_guard_limit();
+ ExternalReference stack_limit =
+ ExternalReference::address_of_stack_limit();
__ mov(ecx, esp);
- __ sub(ecx, Operand::StaticVariable(stack_guard_limit));
+ __ sub(ecx, Operand::StaticVariable(stack_limit));
// Handle it if the stack pointer is already below the stack limit.
__ j(below_equal, &stack_limit_hit, not_taken);
// Check if there is room for the variable number of registers above
@@ -1081,9 +1081,9 @@
void RegExpMacroAssemblerIA32::CheckPreemption() {
// Check for preemption.
Label no_preempt;
- ExternalReference stack_guard_limit =
- ExternalReference::address_of_stack_guard_limit();
- __ cmp(esp, Operand::StaticVariable(stack_guard_limit));
+ ExternalReference stack_limit =
+ ExternalReference::address_of_stack_limit();
+ __ cmp(esp, Operand::StaticVariable(stack_limit));
__ j(above, &no_preempt, taken);
SafeCall(&check_preempt_label_);
@@ -1093,17 +1093,15 @@
void RegExpMacroAssemblerIA32::CheckStackLimit() {
- if (FLAG_check_stack) {
- Label no_stack_overflow;
- ExternalReference stack_limit =
- ExternalReference::address_of_regexp_stack_limit();
- __ cmp(backtrack_stackpointer(), Operand::StaticVariable(stack_limit));
- __ j(above, &no_stack_overflow);
+ Label no_stack_overflow;
+ ExternalReference stack_limit =
+ ExternalReference::address_of_regexp_stack_limit();
+ __ cmp(backtrack_stackpointer(), Operand::StaticVariable(stack_limit));
+ __ j(above, &no_stack_overflow);
- SafeCall(&stack_overflow_label_);
+ SafeCall(&stack_overflow_label_);
- __ bind(&no_stack_overflow);
- }
+ __ bind(&no_stack_overflow);
}
@@ -1163,10 +1161,6 @@
}
-void RegExpCEntryStub::Generate(MacroAssembler* masm_) {
- __ int3(); // Unused on ia32.
-}
-
#undef __
#endif // V8_NATIVE_REGEXP
diff --git a/src/ia32/register-allocator-ia32.cc b/src/ia32/register-allocator-ia32.cc
index 2914960..0bad87d 100644
--- a/src/ia32/register-allocator-ia32.cc
+++ b/src/ia32/register-allocator-ia32.cc
@@ -42,7 +42,7 @@
Result fresh = CodeGeneratorScope::Current()->allocator()->Allocate();
ASSERT(fresh.is_valid());
if (CodeGeneratorScope::Current()->IsUnsafeSmi(handle())) {
- CodeGeneratorScope::Current()->LoadUnsafeSmi(fresh.reg(), handle());
+ CodeGeneratorScope::Current()->MoveUnsafeSmi(fresh.reg(), handle());
} else {
CodeGeneratorScope::Current()->masm()->Set(fresh.reg(),
Immediate(handle()));
@@ -64,7 +64,7 @@
} else {
ASSERT(is_constant());
if (CodeGeneratorScope::Current()->IsUnsafeSmi(handle())) {
- CodeGeneratorScope::Current()->LoadUnsafeSmi(fresh.reg(), handle());
+ CodeGeneratorScope::Current()->MoveUnsafeSmi(fresh.reg(), handle());
} else {
CodeGeneratorScope::Current()->masm()->Set(fresh.reg(),
Immediate(handle()));
diff --git a/src/ia32/simulator-ia32.h b/src/ia32/simulator-ia32.h
index 8fa4287..ce7ed0e 100644
--- a/src/ia32/simulator-ia32.h
+++ b/src/ia32/simulator-ia32.h
@@ -43,6 +43,12 @@
static inline uintptr_t JsLimitFromCLimit(uintptr_t c_limit) {
return c_limit;
}
+
+ static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
+ return try_catch_address;
+ }
+
+ static inline void UnregisterCTryCatch() { }
};
// Call the generated regexp code directly. The entry function pointer should
@@ -50,4 +56,7 @@
#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \
entry(p0, p1, p2, p3, p4, p5, p6)
+#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
+ reinterpret_cast<TryCatch*>(try_catch_address)
+
#endif // V8_IA32_SIMULATOR_IA32_H_
diff --git a/src/ia32/stub-cache-ia32.cc b/src/ia32/stub-cache-ia32.cc
index ca4e142..425c51d 100644
--- a/src/ia32/stub-cache-ia32.cc
+++ b/src/ia32/stub-cache-ia32.cc
@@ -126,7 +126,7 @@
__ j(zero, &miss, not_taken);
// Get the map of the receiver and compute the hash.
- __ mov(scratch, FieldOperand(name, String::kLengthOffset));
+ __ mov(scratch, FieldOperand(name, String::kHashFieldOffset));
__ add(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
__ xor_(scratch, flags);
__ and_(scratch, (kPrimaryTableSize - 1) << kHeapObjectTagSize);
@@ -135,7 +135,7 @@
ProbeTable(masm, flags, kPrimary, name, scratch, extra);
// Primary miss: Compute hash for secondary probe.
- __ mov(scratch, FieldOperand(name, String::kLengthOffset));
+ __ mov(scratch, FieldOperand(name, String::kHashFieldOffset));
__ add(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
__ xor_(scratch, flags);
__ and_(scratch, (kPrimaryTableSize - 1) << kHeapObjectTagSize);
@@ -234,13 +234,9 @@
// scratch register.
GenerateStringCheck(masm, receiver, scratch, miss, &check_wrapper);
- // Load length directly from the string.
+ // Load length from the string and convert to a smi.
__ bind(&load_length);
- __ and_(scratch, kStringSizeMask);
__ mov(eax, FieldOperand(receiver, String::kLengthOffset));
- // ecx is also the receiver.
- __ lea(ecx, Operand(scratch, String::kLongLengthShift));
- __ shr(eax); // ecx is implicit shift register.
__ shl(eax, kSmiTagSize);
__ ret(0);
@@ -776,20 +772,40 @@
CheckPrototypes(object, receiver, holder,
scratch1, scratch2, name, miss);
- // Push the arguments on the JS stack of the caller.
- __ pop(scratch2); // remove return address
+ Handle<AccessorInfo> callback_handle(callback);
+
+ Register other = reg.is(scratch1) ? scratch2 : scratch1;
+ __ EnterInternalFrame();
+ __ PushHandleScope(other);
+ // Push the stack address where the list of arguments ends
+ __ mov(other, esp);
+ __ sub(Operand(other), Immediate(2 * kPointerSize));
+ __ push(other);
__ push(receiver); // receiver
__ push(reg); // holder
- __ mov(reg, Immediate(Handle<AccessorInfo>(callback))); // callback data
- __ push(reg);
- __ push(FieldOperand(reg, AccessorInfo::kDataOffset));
+ __ mov(other, Immediate(callback_handle));
+ __ push(other);
+ __ push(FieldOperand(other, AccessorInfo::kDataOffset)); // data
__ push(name_reg); // name
- __ push(scratch2); // restore return address
+ // Save a pointer to where we pushed the arguments pointer.
+ // This will be passed as the const Arguments& to the C++ callback.
+ __ mov(eax, esp);
+ __ add(Operand(eax), Immediate(5 * kPointerSize));
+ __ mov(ebx, esp);
- // Do tail-call to the runtime system.
- ExternalReference load_callback_property =
- ExternalReference(IC_Utility(IC::kLoadCallbackProperty));
- __ TailCallRuntime(load_callback_property, 5, 1);
+ // Do call through the api.
+ ASSERT_EQ(6, ApiGetterEntryStub::kStackSpace);
+ Address getter_address = v8::ToCData<Address>(callback->getter());
+ ApiFunction fun(getter_address);
+ ApiGetterEntryStub stub(callback_handle, &fun);
+ __ CallStub(&stub);
+
+ // We need to avoid using eax since that now holds the result.
+ Register tmp = other.is(eax) ? reg : other;
+ __ PopHandleScope(eax, tmp);
+ __ LeaveInternalFrame();
+
+ __ ret(0);
}
diff --git a/src/ia32/virtual-frame-ia32.cc b/src/ia32/virtual-frame-ia32.cc
index 980cec8..e770cdd 100644
--- a/src/ia32/virtual-frame-ia32.cc
+++ b/src/ia32/virtual-frame-ia32.cc
@@ -75,10 +75,7 @@
case FrameElement::CONSTANT:
if (cgen()->IsUnsafeSmi(element.handle())) {
- Result temp = cgen()->allocator()->Allocate();
- ASSERT(temp.is_valid());
- cgen()->LoadUnsafeSmi(temp.reg(), element.handle());
- __ mov(Operand(ebp, fp_relative(index)), temp.reg());
+ cgen()->StoreUnsafeSmiToLocal(fp_relative(index), element.handle());
} else {
__ Set(Operand(ebp, fp_relative(index)),
Immediate(element.handle()));
@@ -127,10 +124,7 @@
case FrameElement::CONSTANT:
if (cgen()->IsUnsafeSmi(element.handle())) {
- Result temp = cgen()->allocator()->Allocate();
- ASSERT(temp.is_valid());
- cgen()->LoadUnsafeSmi(temp.reg(), element.handle());
- __ push(temp.reg());
+ cgen()->PushUnsafeSmi(element.handle());
} else {
__ push(Immediate(element.handle()));
}
@@ -161,7 +155,7 @@
// on the stack.
int start = Min(begin, stack_pointer_ + 1);
- // Emit normal 'push' instructions for elements above stack pointer
+ // Emit normal push instructions for elements above stack pointer
// and use mov instructions if we are below stack pointer.
for (int i = start; i <= end; i++) {
if (!elements_[i].is_synced()) {
@@ -199,7 +193,7 @@
// Emit a move.
if (element.is_constant()) {
if (cgen()->IsUnsafeSmi(element.handle())) {
- cgen()->LoadUnsafeSmi(fresh.reg(), element.handle());
+ cgen()->MoveUnsafeSmi(fresh.reg(), element.handle());
} else {
__ Set(fresh.reg(), Immediate(element.handle()));
}
@@ -300,7 +294,7 @@
if (!source.is_synced()) {
if (cgen()->IsUnsafeSmi(source.handle())) {
esi_caches = i;
- cgen()->LoadUnsafeSmi(esi, source.handle());
+ cgen()->MoveUnsafeSmi(esi, source.handle());
__ mov(Operand(ebp, fp_relative(i)), esi);
} else {
__ Set(Operand(ebp, fp_relative(i)), Immediate(source.handle()));
@@ -408,7 +402,7 @@
case FrameElement::CONSTANT:
if (cgen()->IsUnsafeSmi(source.handle())) {
- cgen()->LoadUnsafeSmi(target_reg, source.handle());
+ cgen()->MoveUnsafeSmi(target_reg, source.handle());
} else {
__ Set(target_reg, Immediate(source.handle()));
}
diff --git a/src/ic.cc b/src/ic.cc
index c12dba7..2779356 100644
--- a/src/ic.cc
+++ b/src/ic.cc
@@ -126,7 +126,8 @@
// Return the address in the original code. This is the place where
// the call which has been overwritten by the DebugBreakXXX resides
// and the place where the inline cache system should look.
- int delta = original_code->instruction_start() - code->instruction_start();
+ intptr_t delta =
+ original_code->instruction_start() - code->instruction_start();
return addr + delta;
}
#endif
diff --git a/src/interpreter-irregexp.cc b/src/interpreter-irregexp.cc
index ae914d3..a904447 100644
--- a/src/interpreter-irregexp.cc
+++ b/src/interpreter-irregexp.cc
@@ -117,17 +117,17 @@
}
-#define BYTECODE(name) \
- case BC_##name: \
- TraceInterpreter(code_base, \
- pc, \
- backtrack_sp - backtrack_stack_base, \
- current, \
- current_char, \
- BC_##name##_LENGTH, \
+#define BYTECODE(name) \
+ case BC_##name: \
+ TraceInterpreter(code_base, \
+ pc, \
+ static_cast<int>(backtrack_sp - backtrack_stack_base), \
+ current, \
+ current_char, \
+ BC_##name##_LENGTH, \
#name);
#else
-#define BYTECODE(name) \
+#define BYTECODE(name) \
case BC_##name:
#endif
@@ -250,13 +250,14 @@
pc += BC_SET_CP_TO_REGISTER_LENGTH;
break;
BYTECODE(SET_REGISTER_TO_SP)
- registers[insn >> BYTECODE_SHIFT] = backtrack_sp - backtrack_stack_base;
+ registers[insn >> BYTECODE_SHIFT] =
+ static_cast<int>(backtrack_sp - backtrack_stack_base);
pc += BC_SET_REGISTER_TO_SP_LENGTH;
break;
BYTECODE(SET_SP_TO_REGISTER)
backtrack_sp = backtrack_stack_base + registers[insn >> BYTECODE_SHIFT];
backtrack_stack_space = backtrack_stack.max_size() -
- (backtrack_sp - backtrack_stack_base);
+ static_cast<int>(backtrack_sp - backtrack_stack_base);
pc += BC_SET_SP_TO_REGISTER_LENGTH;
break;
BYTECODE(POP_CP)
diff --git a/src/jsregexp.cc b/src/jsregexp.cc
index c77f32d..04d1944 100644
--- a/src/jsregexp.cc
+++ b/src/jsregexp.cc
@@ -2432,16 +2432,19 @@
}
-void TextNode::MakeCaseIndependent() {
+void TextNode::MakeCaseIndependent(bool is_ascii) {
int element_count = elms_->length();
for (int i = 0; i < element_count; i++) {
TextElement elm = elms_->at(i);
if (elm.type == TextElement::CHAR_CLASS) {
RegExpCharacterClass* cc = elm.data.u_char_class;
+ // None of the standard character classses is different in the case
+ // independent case and it slows us down if we don't know that.
+ if (cc->is_standard()) continue;
ZoneList<CharacterRange>* ranges = cc->ranges();
int range_count = ranges->length();
- for (int i = 0; i < range_count; i++) {
- ranges->at(i).AddCaseEquivalents(ranges);
+ for (int j = 0; j < range_count; j++) {
+ ranges->at(j).AddCaseEquivalents(ranges, is_ascii);
}
}
}
@@ -3912,19 +3915,31 @@
}
-void CharacterRange::AddCaseEquivalents(ZoneList<CharacterRange>* ranges) {
+static void AddUncanonicals(ZoneList<CharacterRange>* ranges,
+ int bottom,
+ int top);
+
+
+void CharacterRange::AddCaseEquivalents(ZoneList<CharacterRange>* ranges,
+ bool is_ascii) {
+ uc16 bottom = from();
+ uc16 top = to();
+ if (is_ascii) {
+ if (bottom > String::kMaxAsciiCharCode) return;
+ if (top > String::kMaxAsciiCharCode) top = String::kMaxAsciiCharCode;
+ }
unibrow::uchar chars[unibrow::Ecma262UnCanonicalize::kMaxWidth];
- if (IsSingleton()) {
+ if (top == bottom) {
// If this is a singleton we just expand the one character.
- int length = uncanonicalize.get(from(), '\0', chars);
+ int length = uncanonicalize.get(bottom, '\0', chars);
for (int i = 0; i < length; i++) {
uc32 chr = chars[i];
- if (chr != from()) {
+ if (chr != bottom) {
ranges->Add(CharacterRange::Singleton(chars[i]));
}
}
- } else if (from() <= kRangeCanonicalizeMax &&
- to() <= kRangeCanonicalizeMax) {
+ } else if (bottom <= kRangeCanonicalizeMax &&
+ top <= kRangeCanonicalizeMax) {
// If this is a range we expand the characters block by block,
// expanding contiguous subranges (blocks) one at a time.
// The approach is as follows. For a given start character we
@@ -3943,14 +3958,14 @@
// completely contained in a block we do this for all the blocks
// covered by the range.
unibrow::uchar range[unibrow::Ecma262UnCanonicalize::kMaxWidth];
- // First, look up the block that contains the 'from' character.
- int length = canonrange.get(from(), '\0', range);
+ // First, look up the block that contains the 'bottom' character.
+ int length = canonrange.get(bottom, '\0', range);
if (length == 0) {
- range[0] = from();
+ range[0] = bottom;
} else {
ASSERT_EQ(1, length);
}
- int pos = from();
+ int pos = bottom;
// The start of the current block. Note that except for the first
// iteration 'start' is always equal to 'pos'.
int start;
@@ -3961,10 +3976,10 @@
} else {
start = pos;
}
- // Then we add the ranges on at a time, incrementing the current
+ // Then we add the ranges one at a time, incrementing the current
// position to be after the last block each time. The position
// always points to the start of a block.
- while (pos < to()) {
+ while (pos < top) {
length = canonrange.get(start, '\0', range);
if (length == 0) {
range[0] = start;
@@ -3975,20 +3990,122 @@
// The start point of a block contains the distance to the end
// of the range.
int block_end = start + (range[0] & kPayloadMask) - 1;
- int end = (block_end > to()) ? to() : block_end;
+ int end = (block_end > top) ? top : block_end;
length = uncanonicalize.get(start, '\0', range);
for (int i = 0; i < length; i++) {
uc32 c = range[i];
uc16 range_from = c + (pos - start);
uc16 range_to = c + (end - start);
- if (!(from() <= range_from && range_to <= to())) {
+ if (!(bottom <= range_from && range_to <= top)) {
ranges->Add(CharacterRange(range_from, range_to));
}
}
start = pos = block_end + 1;
}
} else {
- // TODO(plesner) when we've fixed the 2^11 bug in unibrow.
+ // Unibrow ranges don't work for high characters due to the "2^11 bug".
+ // Therefore we do something dumber for these ranges.
+ AddUncanonicals(ranges, bottom, top);
+ }
+}
+
+
+static void AddUncanonicals(ZoneList<CharacterRange>* ranges,
+ int bottom,
+ int top) {
+ unibrow::uchar chars[unibrow::Ecma262UnCanonicalize::kMaxWidth];
+ // Zones with no case mappings. There is a DEBUG-mode loop to assert that
+ // this table is correct.
+ // 0x0600 - 0x0fff
+ // 0x1100 - 0x1cff
+ // 0x2000 - 0x20ff
+ // 0x2200 - 0x23ff
+ // 0x2500 - 0x2bff
+ // 0x2e00 - 0xa5ff
+ // 0xa800 - 0xfaff
+ // 0xfc00 - 0xfeff
+ const int boundary_count = 18;
+ // The ASCII boundary and the kRangeCanonicalizeMax boundary are also in this
+ // array. This is to split up big ranges and not because they actually denote
+ // a case-mapping-free-zone.
+ ASSERT(CharacterRange::kRangeCanonicalizeMax < 0x600);
+ const int kFirstRealCaselessZoneIndex = 2;
+ int boundaries[] = {0x80, CharacterRange::kRangeCanonicalizeMax,
+ 0x600, 0x1000, 0x1100, 0x1d00, 0x2000, 0x2100, 0x2200, 0x2400, 0x2500,
+ 0x2c00, 0x2e00, 0xa600, 0xa800, 0xfb00, 0xfc00, 0xff00};
+
+ // Special ASCII rule from spec can save us some work here.
+ if (bottom == 0x80 && top == 0xffff) return;
+
+ // We have optimized support for this range.
+ if (top <= CharacterRange::kRangeCanonicalizeMax) {
+ CharacterRange range(bottom, top);
+ range.AddCaseEquivalents(ranges, false);
+ return;
+ }
+
+ // Split up very large ranges. This helps remove ranges where there are no
+ // case mappings.
+ for (int i = 0; i < boundary_count; i++) {
+ if (bottom < boundaries[i] && top >= boundaries[i]) {
+ AddUncanonicals(ranges, bottom, boundaries[i] - 1);
+ AddUncanonicals(ranges, boundaries[i], top);
+ return;
+ }
+ }
+
+ // If we are completely in a zone with no case mappings then we are done.
+ // We start at 2 so as not to except the ASCII range from mappings.
+ for (int i = kFirstRealCaselessZoneIndex; i < boundary_count; i += 2) {
+ if (bottom >= boundaries[i] && top < boundaries[i + 1]) {
+#ifdef DEBUG
+ for (int j = bottom; j <= top; j++) {
+ unsigned current_char = j;
+ int length = uncanonicalize.get(current_char, '\0', chars);
+ for (int k = 0; k < length; k++) {
+ ASSERT(chars[k] == current_char);
+ }
+ }
+#endif
+ return;
+ }
+ }
+
+ // Step through the range finding equivalent characters.
+ ZoneList<unibrow::uchar> *characters = new ZoneList<unibrow::uchar>(100);
+ for (int i = bottom; i <= top; i++) {
+ int length = uncanonicalize.get(i, '\0', chars);
+ for (int j = 0; j < length; j++) {
+ uc32 chr = chars[j];
+ if (chr != i && (chr < bottom || chr > top)) {
+ characters->Add(chr);
+ }
+ }
+ }
+
+ // Step through the equivalent characters finding simple ranges and
+ // adding ranges to the character class.
+ if (characters->length() > 0) {
+ int new_from = characters->at(0);
+ int new_to = new_from;
+ for (int i = 1; i < characters->length(); i++) {
+ int chr = characters->at(i);
+ if (chr == new_to + 1) {
+ new_to++;
+ } else {
+ if (new_to == new_from) {
+ ranges->Add(CharacterRange::Singleton(new_from));
+ } else {
+ ranges->Add(CharacterRange(new_from, new_to));
+ }
+ new_from = new_to = chr;
+ }
+ }
+ if (new_to == new_from) {
+ ranges->Add(CharacterRange::Singleton(new_from));
+ } else {
+ ranges->Add(CharacterRange(new_from, new_to));
+ }
}
}
@@ -4234,7 +4351,7 @@
void Analysis::VisitText(TextNode* that) {
if (ignore_case_) {
- that->MakeCaseIndependent();
+ that->MakeCaseIndependent(is_ascii_);
}
EnsureAnalyzed(that->on_success());
if (!has_failed()) {
@@ -4452,7 +4569,7 @@
}
}
data->node = node;
- Analysis analysis(ignore_case);
+ Analysis analysis(ignore_case, is_ascii);
analysis.EnsureAnalyzed(node);
if (analysis.has_failed()) {
const char* error_message = analysis.error_message();
diff --git a/src/jsregexp.h b/src/jsregexp.h
index 84f8d98..b681119 100644
--- a/src/jsregexp.h
+++ b/src/jsregexp.h
@@ -200,7 +200,7 @@
bool is_valid() { return from_ <= to_; }
bool IsEverything(uc16 max) { return from_ == 0 && to_ >= max; }
bool IsSingleton() { return (from_ == to_); }
- void AddCaseEquivalents(ZoneList<CharacterRange>* ranges);
+ void AddCaseEquivalents(ZoneList<CharacterRange>* ranges, bool is_ascii);
static void Split(ZoneList<CharacterRange>* base,
Vector<const uc16> overlay,
ZoneList<CharacterRange>** included,
@@ -703,7 +703,7 @@
int characters_filled_in,
bool not_at_start);
ZoneList<TextElement>* elements() { return elms_; }
- void MakeCaseIndependent();
+ void MakeCaseIndependent(bool is_ascii);
virtual int GreedyLoopTextLength();
virtual TextNode* Clone() {
TextNode* result = new TextNode(*this);
@@ -1212,8 +1212,10 @@
// +-------+ +------------+
class Analysis: public NodeVisitor {
public:
- explicit Analysis(bool ignore_case)
- : ignore_case_(ignore_case), error_message_(NULL) { }
+ Analysis(bool ignore_case, bool is_ascii)
+ : ignore_case_(ignore_case),
+ is_ascii_(is_ascii),
+ error_message_(NULL) { }
void EnsureAnalyzed(RegExpNode* node);
#define DECLARE_VISIT(Type) \
@@ -1232,6 +1234,7 @@
}
private:
bool ignore_case_;
+ bool is_ascii_;
const char* error_message_;
DISALLOW_IMPLICIT_CONSTRUCTORS(Analysis);
diff --git a/src/list.h b/src/list.h
index 25211d9..aff63c3 100644
--- a/src/list.h
+++ b/src/list.h
@@ -48,6 +48,7 @@
class List {
public:
+ List() { Initialize(0); }
INLINE(explicit List(int capacity)) { Initialize(capacity); }
INLINE(~List()) { DeleteData(data_); }
@@ -58,7 +59,9 @@
Initialize(0);
}
- INLINE(void* operator new(size_t size)) { return P::New(size); }
+ INLINE(void* operator new(size_t size)) {
+ return P::New(static_cast<int>(size));
+ }
INLINE(void operator delete(void* p, size_t)) { return P::Delete(p); }
// Returns a reference to the element at index i. This reference is
diff --git a/src/log-inl.h b/src/log-inl.h
index 1844d2b..1500252 100644
--- a/src/log-inl.h
+++ b/src/log-inl.h
@@ -55,7 +55,7 @@
}
}
-VMState::VMState(StateTag state) : disabled_(true) {
+VMState::VMState(StateTag state) : disabled_(true), external_callback_(NULL) {
if (!Logger::is_logging()) {
return;
}
diff --git a/src/log-utils.cc b/src/log-utils.cc
index f327a0a..fd95604 100644
--- a/src/log-utils.cc
+++ b/src/log-utils.cc
@@ -155,7 +155,7 @@
ASSERT(!IsEnabled());
output_buffer_ = new LogDynamicBuffer(
kDynamicBufferBlockSize, kMaxDynamicBufferSize,
- kDynamicBufferSeal, strlen(kDynamicBufferSeal));
+ kDynamicBufferSeal, StrLength(kDynamicBufferSeal));
Write = WriteToMemory;
Init();
}
@@ -195,7 +195,7 @@
// Find previous log line boundary.
char* end_pos = dest_buf + actual_size - 1;
while (end_pos >= dest_buf && *end_pos != '\n') --end_pos;
- actual_size = end_pos - dest_buf + 1;
+ actual_size = static_cast<int>(end_pos - dest_buf + 1);
ASSERT(actual_size <= max_size);
return actual_size;
}
@@ -352,7 +352,7 @@
void LogMessageBuilder::WriteCStringToLogFile(const char* str) {
- const int len = strlen(str);
+ const int len = StrLength(str);
const int written = Log::Write(str, len);
if (written != len && write_failure_handler != NULL) {
write_failure_handler();
@@ -461,7 +461,7 @@
--data_ptr;
}
const intptr_t truncated_len = prev_end - prev_ptr;
- const int copy_from_pos = data_ptr - data.start();
+ const int copy_from_pos = static_cast<int>(data_ptr - data.start());
// Check if the length of compressed tail is enough.
if (truncated_len <= kMaxBackwardReferenceSize
&& truncated_len <= GetBackwardReferenceSize(distance, copy_from_pos)) {
@@ -493,7 +493,7 @@
prev_record->start() + unchanged_len, best.backref_size + 1);
PrintBackwardReference(backref, best.distance, best.copy_from_pos);
ASSERT(strlen(backref.start()) - best.backref_size == 0);
- prev_record->Truncate(unchanged_len + best.backref_size);
+ prev_record->Truncate(static_cast<int>(unchanged_len + best.backref_size));
}
return true;
}
diff --git a/src/log-utils.h b/src/log-utils.h
index 117f098..3e25b0e 100644
--- a/src/log-utils.h
+++ b/src/log-utils.h
@@ -129,9 +129,10 @@
// Implementation of writing to a log file.
static int WriteToFile(const char* msg, int length) {
ASSERT(output_handle_ != NULL);
- int rv = fwrite(msg, 1, length, output_handle_);
- ASSERT(length == rv);
- return rv;
+ size_t rv = fwrite(msg, 1, length, output_handle_);
+ ASSERT(static_cast<size_t>(length) == rv);
+ USE(rv);
+ return length;
}
// Implementation of writing to a memory buffer.
diff --git a/src/log.cc b/src/log.cc
index d1d9a31..bbce926 100644
--- a/src/log.cc
+++ b/src/log.cc
@@ -30,6 +30,7 @@
#include "v8.h"
#include "bootstrapper.h"
+#include "global-handles.h"
#include "log.h"
#include "macro-assembler.h"
#include "serialize.h"
@@ -125,6 +126,9 @@
bool overflow_; // Tell whether a buffer overflow has occurred.
Semaphore* buffer_semaphore_; // Sempahore used for buffer synchronization.
+ // Tells whether profiler is engaged, that is, processing thread is stated.
+ bool engaged_;
+
// Tells whether worker thread should continue running.
bool running_;
@@ -151,12 +155,18 @@
return;
}
+ int i = 0;
+ const Address callback = Logger::current_state_ != NULL ?
+ Logger::current_state_->external_callback() : NULL;
+ if (callback != NULL) {
+ sample->stack[i++] = callback;
+ }
+
SafeStackTraceFrameIterator it(
reinterpret_cast<Address>(sample->fp),
reinterpret_cast<Address>(sample->sp),
reinterpret_cast<Address>(sample->sp),
js_entry_sp);
- int i = 0;
while (!it.done() && i < TickSample::kMaxFramesCount) {
sample->stack[i++] = it.frame()->pc();
it.Advance();
@@ -243,17 +253,25 @@
//
// Profiler implementation.
//
-Profiler::Profiler() {
- buffer_semaphore_ = OS::CreateSemaphore(0);
- head_ = 0;
- tail_ = 0;
- overflow_ = false;
- running_ = false;
+Profiler::Profiler()
+ : head_(0),
+ tail_(0),
+ overflow_(false),
+ buffer_semaphore_(OS::CreateSemaphore(0)),
+ engaged_(false),
+ running_(false) {
}
void Profiler::Engage() {
- OS::LogSharedLibraryAddresses();
+ if (engaged_) return;
+ engaged_ = true;
+
+ // TODO(mnaganov): This is actually "Chromium" mode. Flags need to be revised.
+ // http://code.google.com/p/v8/issues/detail?id=487
+ if (!FLAG_prof_lazy) {
+ OS::LogSharedLibraryAddresses();
+ }
// Start thread processing the profiler buffer.
running_ = true;
@@ -268,6 +286,8 @@
void Profiler::Disengage() {
+ if (!engaged_) return;
+
// Stop receiving ticks.
Logger::ticker_->ClearProfiler();
@@ -660,6 +680,55 @@
#endif // ENABLE_LOGGING_AND_PROFILING
+#ifdef ENABLE_LOGGING_AND_PROFILING
+void Logger::CallbackEventInternal(const char* prefix, const char* name,
+ Address entry_point) {
+ if (!Log::IsEnabled() || !FLAG_log_code) return;
+ LogMessageBuilder msg;
+ msg.Append("%s,%s,",
+ log_events_[CODE_CREATION_EVENT], log_events_[CALLBACK_TAG]);
+ msg.AppendAddress(entry_point);
+ msg.Append(",1,\"%s%s\"", prefix, name);
+ if (FLAG_compress_log) {
+ ASSERT(compression_helper_ != NULL);
+ if (!compression_helper_->HandleMessage(&msg)) return;
+ }
+ msg.Append('\n');
+ msg.WriteToLogFile();
+}
+#endif
+
+
+void Logger::CallbackEvent(String* name, Address entry_point) {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ if (!Log::IsEnabled() || !FLAG_log_code) return;
+ SmartPointer<char> str =
+ name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
+ CallbackEventInternal("", *str, entry_point);
+#endif
+}
+
+
+void Logger::GetterCallbackEvent(String* name, Address entry_point) {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ if (!Log::IsEnabled() || !FLAG_log_code) return;
+ SmartPointer<char> str =
+ name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
+ CallbackEventInternal("get ", *str, entry_point);
+#endif
+}
+
+
+void Logger::SetterCallbackEvent(String* name, Address entry_point) {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ if (!Log::IsEnabled() || !FLAG_log_code) return;
+ SmartPointer<char> str =
+ name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
+ CallbackEventInternal("set ", *str, entry_point);
+#endif
+}
+
+
void Logger::CodeCreateEvent(LogEventsAndTags tag,
Code* code,
const char* comment) {
@@ -902,8 +971,9 @@
// Event starts with comma, so we don't have it in the format string.
static const char* event_text = "heap-js-ret-item,%s";
// We take placeholder strings into account, but it's OK to be conservative.
- static const int event_text_len = strlen(event_text);
- const int cons_len = strlen(constructor), event_len = strlen(event);
+ static const int event_text_len = StrLength(event_text);
+ const int cons_len = StrLength(constructor);
+ const int event_len = StrLength(event);
int pos = 0;
// Retainer lists can be long. We may need to split them into multiple events.
do {
@@ -1053,9 +1123,11 @@
}
if (modules_to_enable & PROFILER_MODULE_CPU) {
if (FLAG_prof_lazy) {
+ profiler_->Engage();
LOG(UncheckedStringEvent("profiler", "resume"));
FLAG_log_code = true;
LogCompiledFunctions();
+ LogAccessorCallbacks();
if (!FLAG_sliding_state_window) ticker_->Start();
}
profiler_->resume();
@@ -1106,6 +1178,48 @@
}
+void Logger::LogCodeObject(Object* object) {
+ if (FLAG_log_code) {
+ Code* code_object = Code::cast(object);
+ LogEventsAndTags tag = Logger::STUB_TAG;
+ const char* description = "Unknown code from the snapshot";
+ switch (code_object->kind()) {
+ case Code::FUNCTION:
+ return; // We log this later using LogCompiledFunctions.
+ case Code::STUB:
+ description = CodeStub::MajorName(code_object->major_key());
+ tag = Logger::STUB_TAG;
+ break;
+ case Code::BUILTIN:
+ description = "A builtin from the snapshot";
+ tag = Logger::BUILTIN_TAG;
+ break;
+ case Code::KEYED_LOAD_IC:
+ description = "A keyed load IC from the snapshot";
+ tag = Logger::KEYED_LOAD_IC_TAG;
+ break;
+ case Code::LOAD_IC:
+ description = "A load IC from the snapshot";
+ tag = Logger::LOAD_IC_TAG;
+ break;
+ case Code::STORE_IC:
+ description = "A store IC from the snapshot";
+ tag = Logger::STORE_IC_TAG;
+ break;
+ case Code::KEYED_STORE_IC:
+ description = "A keyed store IC from the snapshot";
+ tag = Logger::KEYED_STORE_IC_TAG;
+ break;
+ case Code::CALL_IC:
+ description = "A call IC from the snapshot";
+ tag = Logger::CALL_IC_TAG;
+ break;
+ }
+ LOG(CodeCreateEvent(tag, code_object, description));
+ }
+}
+
+
void Logger::LogCompiledFunctions() {
HandleScope scope;
const int compiled_funcs_count = EnumerateCompiledFunctions(NULL);
@@ -1134,16 +1248,52 @@
LOG(CodeCreateEvent(Logger::SCRIPT_TAG,
shared->code(), *script_name));
}
- continue;
+ } else {
+ LOG(CodeCreateEvent(
+ Logger::LAZY_COMPILE_TAG, shared->code(), *func_name));
}
+ } else if (shared->function_data()->IsFunctionTemplateInfo()) {
+ // API function.
+ FunctionTemplateInfo* fun_data =
+ FunctionTemplateInfo::cast(shared->function_data());
+ Object* raw_call_data = fun_data->call_code();
+ if (!raw_call_data->IsUndefined()) {
+ CallHandlerInfo* call_data = CallHandlerInfo::cast(raw_call_data);
+ Object* callback_obj = call_data->callback();
+ Address entry_point = v8::ToCData<Address>(callback_obj);
+ LOG(CallbackEvent(*func_name, entry_point));
+ }
+ } else {
+ LOG(CodeCreateEvent(
+ Logger::LAZY_COMPILE_TAG, shared->code(), *func_name));
}
- // If no script or script has no name.
- LOG(CodeCreateEvent(Logger::LAZY_COMPILE_TAG, shared->code(), *func_name));
}
DeleteArray(sfis);
}
+
+void Logger::LogAccessorCallbacks() {
+ AssertNoAllocation no_alloc;
+ HeapIterator iterator;
+ while (iterator.has_next()) {
+ HeapObject* obj = iterator.next();
+ ASSERT(obj != NULL);
+ if (!obj->IsAccessorInfo()) continue;
+ AccessorInfo* ai = AccessorInfo::cast(obj);
+ if (!ai->name()->IsString()) continue;
+ String* name = String::cast(ai->name());
+ Address getter_entry = v8::ToCData<Address>(ai->getter());
+ if (getter_entry != 0) {
+ LOG(GetterCallbackEvent(name, getter_entry));
+ }
+ Address setter_entry = v8::ToCData<Address>(ai->setter());
+ if (setter_entry != 0) {
+ LOG(SetterCallbackEvent(name, setter_entry));
+ }
+ }
+}
+
#endif
@@ -1245,7 +1395,9 @@
} else {
is_logging_ = true;
}
- profiler_->Engage();
+ if (!FLAG_prof_lazy) {
+ profiler_->Engage();
+ }
}
LogMessageBuilder::set_write_failure_handler(StopLoggingAndProfiling);
diff --git a/src/log.h b/src/log.h
index 13d45d2..4d5acce 100644
--- a/src/log.h
+++ b/src/log.h
@@ -91,15 +91,20 @@
class VMState BASE_EMBEDDED {
#ifdef ENABLE_LOGGING_AND_PROFILING
public:
- inline explicit VMState(StateTag state);
+ inline VMState(StateTag state);
inline ~VMState();
StateTag state() { return state_; }
+ Address external_callback() { return external_callback_; }
+ void set_external_callback(Address external_callback) {
+ external_callback_ = external_callback;
+ }
private:
bool disabled_;
StateTag state_;
VMState* previous_;
+ Address external_callback_;
#else
public:
explicit VMState(StateTag state) {}
@@ -122,6 +127,7 @@
V(CALL_MISS_TAG, "CallMiss", "cm") \
V(CALL_NORMAL_TAG, "CallNormal", "cn") \
V(CALL_PRE_MONOMORPHIC_TAG, "CallPreMonomorphic", "cpm") \
+ V(CALLBACK_TAG, "Callback", "cb") \
V(EVAL_TAG, "Eval", "e") \
V(FUNCTION_TAG, "Function", "f") \
V(KEYED_LOAD_IC_TAG, "KeyedLoadIC", "klic") \
@@ -200,6 +206,10 @@
// ==== Events logged by --log-code. ====
+ // Emits a code event for a callback function.
+ static void CallbackEvent(String* name, Address entry_point);
+ static void GetterCallbackEvent(String* name, Address entry_point);
+ static void SetterCallbackEvent(String* name, Address entry_point);
// Emits a code create event.
static void CodeCreateEvent(LogEventsAndTags tag,
Code* code, const char* source);
@@ -265,6 +275,10 @@
// Logs all compiled functions found in the heap.
static void LogCompiledFunctions();
+ // Logs all accessor callbacks found in the heap.
+ static void LogAccessorCallbacks();
+ // Used for logging stubs found in the snapshot.
+ static void LogCodeObject(Object* code_object);
private:
@@ -277,6 +291,11 @@
// Emits the profiler's first message.
static void ProfilerBeginEvent();
+ // Emits callback event messages.
+ static void CallbackEventInternal(const char* prefix,
+ const char* name,
+ Address entry_point);
+
// Emits aliases for compressed messages.
static void LogAliases();
@@ -328,6 +347,7 @@
friend class TimeLog;
friend class Profiler;
friend class SlidingStateWindow;
+ friend class StackTracer;
friend class VMState;
friend class LoggerTestHelper;
diff --git a/src/macros.py b/src/macros.py
index ddd2f13..5b06099 100644
--- a/src/macros.py
+++ b/src/macros.py
@@ -77,12 +77,12 @@
macro IS_NULL(arg) = (arg === null);
macro IS_NULL_OR_UNDEFINED(arg) = (arg == null);
macro IS_UNDEFINED(arg) = (typeof(arg) === 'undefined');
-macro IS_FUNCTION(arg) = (typeof(arg) === 'function');
macro IS_NUMBER(arg) = (typeof(arg) === 'number');
macro IS_STRING(arg) = (typeof(arg) === 'string');
-macro IS_OBJECT(arg) = (typeof(arg) === 'object');
macro IS_BOOLEAN(arg) = (typeof(arg) === 'boolean');
+macro IS_OBJECT(arg) = (%_IsObject(arg));
macro IS_ARRAY(arg) = (%_IsArray(arg));
+macro IS_FUNCTION(arg) = (%_IsFunction(arg));
macro IS_REGEXP(arg) = (%_ClassOf(arg) === 'RegExp');
macro IS_DATE(arg) = (%_ClassOf(arg) === 'Date');
macro IS_NUMBER_WRAPPER(arg) = (%_ClassOf(arg) === 'Number');
diff --git a/src/mark-compact.cc b/src/mark-compact.cc
index 5a3ab89..81819b7 100644
--- a/src/mark-compact.cc
+++ b/src/mark-compact.cc
@@ -572,9 +572,8 @@
void MarkCompactCollector::MarkSymbolTable() {
// Objects reachable from symbols are marked as live so as to ensure
// that if the symbol itself remains alive after GC for any reason,
- // and if it is a sliced string or a cons string backed by an
- // external string (even indirectly), then the external string does
- // not receive a weak reference callback.
+ // and if it is a cons string backed by an external string (even indirectly),
+ // then the external string does not receive a weak reference callback.
SymbolTable* symbol_table = Heap::raw_unchecked_symbol_table();
// Mark the symbol table itself.
SetMark(symbol_table);
@@ -593,7 +592,7 @@
void MarkCompactCollector::MarkRoots(RootMarkingVisitor* visitor) {
// Mark the heap roots including global variables, stack variables,
// etc., and all objects reachable from them.
- Heap::IterateStrongRoots(visitor);
+ Heap::IterateStrongRoots(visitor, VISIT_ONLY_STRONG);
// Handle the symbol table specially.
MarkSymbolTable();
@@ -1074,7 +1073,7 @@
}
#endif
if (!is_prev_alive) { // Transition from non-live to live.
- EncodeFreeRegion(free_start, current - free_start);
+ EncodeFreeRegion(free_start, static_cast<int>(current - free_start));
is_prev_alive = true;
}
} else { // Non-live object.
@@ -1088,7 +1087,9 @@
}
// If we ended on a free region, mark it.
- if (!is_prev_alive) EncodeFreeRegion(free_start, end - free_start);
+ if (!is_prev_alive) {
+ EncodeFreeRegion(free_start, static_cast<int>(end - free_start));
+ }
}
@@ -1169,7 +1170,7 @@
object->ClearMark();
MarkCompactCollector::tracer()->decrement_marked_count();
if (!is_previous_alive) { // Transition from free to live.
- dealloc(free_start, current - free_start);
+ dealloc(free_start, static_cast<int>(current - free_start));
is_previous_alive = true;
}
} else {
@@ -1189,7 +1190,7 @@
// If the last region was not live we need to deallocate from
// free_start to the allocation top in the page.
if (!is_previous_alive) {
- int free_size = p->AllocationTop() - free_start;
+ int free_size = static_cast<int>(p->AllocationTop() - free_start);
if (free_size > 0) {
dealloc(free_start, free_size);
}
@@ -1455,7 +1456,7 @@
state_ = UPDATE_POINTERS;
#endif
UpdatingVisitor updating_visitor;
- Heap::IterateRoots(&updating_visitor);
+ Heap::IterateRoots(&updating_visitor, VISIT_ONLY_STRONG);
GlobalHandles::IterateWeakRoots(&updating_visitor);
int live_maps = IterateLiveObjects(Heap::map_space(),
diff --git a/src/math.js b/src/math.js
index e3d266e..7191896 100644
--- a/src/math.js
+++ b/src/math.js
@@ -29,7 +29,6 @@
// Keep reference to original values of some global properties. This
// has the added benefit that the code in this file is isolated from
// changes to these properties.
-const $Infinity = global.Infinity;
const $floor = MathFloor;
const $random = MathRandom;
const $abs = MathAbs;
@@ -118,10 +117,16 @@
// ECMA 262 - 15.8.2.11
function MathMax(arg1, arg2) { // length == 2
- var r = -$Infinity;
var length = %_ArgumentsLength();
- for (var i = 0; i < length; i++) {
- var n = ToNumber(%_Arguments(i));
+ if (length == 0) {
+ return -1/0; // Compiler constant-folds this to -Infinity.
+ }
+ var r = arg1;
+ if (!IS_NUMBER(r)) r = ToNumber(r);
+ if (NUMBER_IS_NAN(r)) return r;
+ for (var i = 1; i < length; i++) {
+ var n = %_Arguments(i);
+ if (!IS_NUMBER(n)) n = ToNumber(n);
if (NUMBER_IS_NAN(n)) return n;
// Make sure +0 is considered greater than -0.
if (n > r || (r === 0 && n === 0 && !%_IsSmi(r))) r = n;
@@ -131,10 +136,16 @@
// ECMA 262 - 15.8.2.12
function MathMin(arg1, arg2) { // length == 2
- var r = $Infinity;
var length = %_ArgumentsLength();
- for (var i = 0; i < length; i++) {
- var n = ToNumber(%_Arguments(i));
+ if (length == 0) {
+ return 1/0; // Compiler constant-folds this to Infinity.
+ }
+ var r = arg1;
+ if (!IS_NUMBER(r)) r = ToNumber(r);
+ if (NUMBER_IS_NAN(r)) return r;
+ for (var i = 1; i < length; i++) {
+ var n = %_Arguments(i);
+ if (!IS_NUMBER(n)) n = ToNumber(n);
if (NUMBER_IS_NAN(n)) return n;
// Make sure -0 is considered less than +0.
if (n < r || (r === 0 && n === 0 && !%_IsSmi(n))) r = n;
diff --git a/src/messages.js b/src/messages.js
index 2720792..1e5053d 100644
--- a/src/messages.js
+++ b/src/messages.js
@@ -238,14 +238,15 @@
Script.prototype.lineFromPosition = function(position) {
var lower = 0;
var upper = this.lineCount() - 1;
+ var line_ends = this.line_ends;
// We'll never find invalid positions so bail right away.
- if (position > this.line_ends[upper]) {
+ if (position > line_ends[upper]) {
return -1;
}
// This means we don't have to safe-guard indexing line_ends[i - 1].
- if (position <= this.line_ends[0]) {
+ if (position <= line_ends[0]) {
return 0;
}
@@ -253,9 +254,9 @@
while (upper >= 1) {
var i = (lower + upper) >> 1;
- if (position > this.line_ends[i]) {
+ if (position > line_ends[i]) {
lower = i + 1;
- } else if (position <= this.line_ends[i - 1]) {
+ } else if (position <= line_ends[i - 1]) {
upper = i - 1;
} else {
return i;
@@ -278,8 +279,9 @@
if (line == -1) return null;
// Determine start, end and column.
- var start = line == 0 ? 0 : this.line_ends[line - 1] + 1;
- var end = this.line_ends[line];
+ var line_ends = this.line_ends;
+ var start = line == 0 ? 0 : line_ends[line - 1] + 1;
+ var end = line_ends[line];
if (end > 0 && StringCharAt.call(this.source, end - 1) == '\r') end--;
var column = position - start;
@@ -368,8 +370,9 @@
return null;
}
- var from_position = from_line == 0 ? 0 : this.line_ends[from_line - 1] + 1;
- var to_position = to_line == 0 ? 0 : this.line_ends[to_line - 1] + 1;
+ var line_ends = this.line_ends;
+ var from_position = from_line == 0 ? 0 : line_ends[from_line - 1] + 1;
+ var to_position = to_line == 0 ? 0 : line_ends[to_line - 1] + 1;
// Return a source slice with line numbers re-adjusted to the resource.
return new SourceSlice(this, from_line + this.line_offset, to_line + this.line_offset,
@@ -391,8 +394,9 @@
}
// Return the source line.
- var start = line == 0 ? 0 : this.line_ends[line - 1] + 1;
- var end = this.line_ends[line];
+ var line_ends = this.line_ends;
+ var start = line == 0 ? 0 : line_ends[line - 1] + 1;
+ var end = line_ends[line];
return StringSubstring.call(this.source, start, end);
}
@@ -625,10 +629,7 @@
CallSite.prototype.getEvalOrigin = function () {
var script = %FunctionGetScript(this.fun);
- if (!script || script.compilation_type != 1)
- return null;
- return new CallSite(null, script.eval_from_function,
- script.eval_from_position);
+ return FormatEvalOrigin(script);
};
CallSite.prototype.getFunction = function () {
@@ -696,7 +697,7 @@
if (script) {
location = script.locationFromPosition(this.pos, true);
}
- return location ? location.column : null;
+ return location ? location.column + 1: null;
};
CallSite.prototype.isNative = function () {
@@ -715,12 +716,44 @@
return this.fun === constructor;
};
+function FormatEvalOrigin(script) {
+ var eval_origin = "";
+ if (script.eval_from_function_name) {
+ eval_origin += script.eval_from_function_name;
+ } else {
+ eval_origin += "<anonymous>";
+ }
+
+ var eval_from_script = script.eval_from_script;
+ if (eval_from_script) {
+ if (eval_from_script.compilation_type == 1) {
+ // eval script originated from another eval.
+ eval_origin += " (eval at " + FormatEvalOrigin(eval_from_script) + ")";
+ } else {
+ // eval script originated from "real" scource.
+ if (eval_from_script.name) {
+ eval_origin += " (" + eval_from_script.name;
+ var location = eval_from_script.locationFromPosition(script.eval_from_script_position, true);
+ if (location) {
+ eval_origin += ":" + (location.line + 1);
+ eval_origin += ":" + (location.column + 1);
+ }
+ eval_origin += ")"
+ } else {
+ eval_origin += " (unknown source)";
+ }
+ }
+ }
+
+ return eval_origin;
+};
+
function FormatSourcePosition(frame) {
var fileLocation = "";
if (frame.isNative()) {
fileLocation = "native";
} else if (frame.isEval()) {
- fileLocation = "eval at " + FormatSourcePosition(frame.getEvalOrigin());
+ fileLocation = "eval at " + frame.getEvalOrigin();
} else {
var fileName = frame.getFileName();
if (fileName) {
diff --git a/src/mirror-delay.js b/src/mirror-delay.js
index cde5534..ba663b2 100644
--- a/src/mirror-delay.js
+++ b/src/mirror-delay.js
@@ -849,6 +849,33 @@
/**
+ * Returns the script source position for the function. Only makes sense
+ * for functions which has a script defined.
+ * @return {Number or undefined} in-script position for the function
+ */
+FunctionMirror.prototype.sourcePosition_ = function() {
+ // Return script if function is resolved. Otherwise just fall through
+ // to return undefined.
+ if (this.resolved()) {
+ return %FunctionGetScriptSourcePosition(this.value_);
+ }
+};
+
+
+/**
+ * Returns the script source location object for the function. Only makes sense
+ * for functions which has a script defined.
+ * @return {Location or undefined} in-script location for the function begin
+ */
+FunctionMirror.prototype.sourceLocation = function() {
+ if (this.resolved() && this.script()) {
+ return this.script().locationFromPosition(this.sourcePosition_(),
+ true);
+ }
+};
+
+
+/**
* Returns objects constructed by this function.
* @param {number} opt_max_instances Optional parameter specifying the maximum
* number of instances to return.
@@ -1766,16 +1793,21 @@
};
-ScriptMirror.prototype.evalFromFunction = function() {
- return MakeMirror(this.script_.eval_from_function);
+ScriptMirror.prototype.evalFromScript = function() {
+ return MakeMirror(this.script_.eval_from_script);
+};
+
+
+ScriptMirror.prototype.evalFromFunctionName = function() {
+ return MakeMirror(this.script_.eval_from_function_name);
};
ScriptMirror.prototype.evalFromLocation = function() {
- var eval_from_function = this.evalFromFunction();
- if (!eval_from_function.isUndefined()) {
- var position = this.script_.eval_from_position;
- return eval_from_function.script().locationFromPosition(position, true);
+ var eval_from_script = this.evalFromScript();
+ if (!eval_from_script.isUndefined()) {
+ var position = this.script_.eval_from_script_position;
+ return eval_from_script.locationFromPosition(position, true);
}
};
@@ -2053,12 +2085,15 @@
// For compilation type eval emit information on the script from which
// eval was called if a script is present.
if (mirror.compilationType() == 1 &&
- mirror.evalFromFunction().script()) {
+ mirror.evalFromScript()) {
content.evalFromScript =
- this.serializeReference(mirror.evalFromFunction().script());
+ this.serializeReference(mirror.evalFromScript());
var evalFromLocation = mirror.evalFromLocation()
content.evalFromLocation = { line: evalFromLocation.line,
column: evalFromLocation.column}
+ if (mirror.evalFromFunctionName()) {
+ content.evalFromFunctionName = mirror.evalFromFunctionName();
+ }
}
if (mirror.context()) {
content.context = this.serializeReference(mirror.context());
@@ -2119,6 +2154,9 @@
}
if (mirror.script()) {
content.script = this.serializeReference(mirror.script());
+ content.scriptId = mirror.script().id();
+
+ serializeLocationFields(mirror.sourceLocation(), content);
}
}
@@ -2151,6 +2189,31 @@
/**
+ * Serialize location information to the following JSON format:
+ *
+ * "position":"<position>",
+ * "line":"<line>",
+ * "column":"<column>",
+ *
+ * @param {SourceLocation} location The location to serialize, may be undefined.
+ */
+function serializeLocationFields (location, content) {
+ if (!location) {
+ return;
+ }
+ content.position = location.position;
+ var line = location.line;
+ if (!IS_UNDEFINED(line)) {
+ content.line = line;
+ }
+ var column = location.column;
+ if (!IS_UNDEFINED(column)) {
+ content.column = column;
+ }
+}
+
+
+/**
* Serialize property information to the following JSON format for building the
* array of properties.
*
@@ -2218,15 +2281,7 @@
x[i] = local;
}
content.locals = x;
- content.position = mirror.sourcePosition();
- var line = mirror.sourceLine();
- if (!IS_UNDEFINED(line)) {
- content.line = line;
- }
- var column = mirror.sourceColumn();
- if (!IS_UNDEFINED(column)) {
- content.column = column;
- }
+ serializeLocationFields(mirror.sourceLocation(), content);
var source_line_text = mirror.sourceLineText();
if (!IS_UNDEFINED(source_line_text)) {
content.sourceLineText = source_line_text;
diff --git a/src/mksnapshot.cc b/src/mksnapshot.cc
index 80789eb..eb743f8 100644
--- a/src/mksnapshot.cc
+++ b/src/mksnapshot.cc
@@ -87,57 +87,53 @@
// We statically allocate a set of local counters to be used if we
// don't want to store the stats in a memory-mapped file
static CounterCollection local_counters;
-static CounterCollection* counters = &local_counters;
typedef std::map<std::string, int*> CounterMap;
typedef std::map<std::string, int*>::iterator CounterMapIterator;
static CounterMap counter_table_;
-// Callback receiver when v8 has a counter to track.
-static int* counter_callback(const char* name) {
- std::string counter = name;
- // See if this counter name is already known.
- if (counter_table_.find(counter) != counter_table_.end())
- return counter_table_[counter];
- Counter* ctr = counters->GetNextCounter();
- if (ctr == NULL) return NULL;
- int* ptr = ctr->Bind(name);
- counter_table_[counter] = ptr;
- return ptr;
-}
-
-
-// Write C++ code that defines Snapshot::snapshot_ to contain the snapshot
-// to the file given by filename. Only the first size chars are written.
-static int WriteInternalSnapshotToFile(const char* filename,
- const v8::internal::byte* bytes,
- int size) {
- FILE* f = i::OS::FOpen(filename, "wb");
- if (f == NULL) {
- i::OS::PrintError("Cannot open file %s for reading.\n", filename);
- return 0;
+class CppByteSink : public i::SnapshotByteSink {
+ public:
+ explicit CppByteSink(const char* snapshot_file) : bytes_written_(0) {
+ fp_ = i::OS::FOpen(snapshot_file, "wb");
+ if (fp_ == NULL) {
+ i::PrintF("Unable to write to snapshot file \"%s\"\n", snapshot_file);
+ exit(1);
+ }
+ fprintf(fp_, "// Autogenerated snapshot file. Do not edit.\n\n");
+ fprintf(fp_, "#include \"v8.h\"\n");
+ fprintf(fp_, "#include \"platform.h\"\n\n");
+ fprintf(fp_, "#include \"snapshot.h\"\n\n");
+ fprintf(fp_, "namespace v8 {\nnamespace internal {\n\n");
+ fprintf(fp_, "const byte Snapshot::data_[] = {");
}
- fprintf(f, "// Autogenerated snapshot file. Do not edit.\n\n");
- fprintf(f, "#include \"v8.h\"\n");
- fprintf(f, "#include \"platform.h\"\n\n");
- fprintf(f, "#include \"snapshot.h\"\n\n");
- fprintf(f, "namespace v8 {\nnamespace internal {\n\n");
- fprintf(f, "const byte Snapshot::data_[] = {");
- int written = 0;
- written += fprintf(f, "0x%x", bytes[0]);
- for (int i = 1; i < size; ++i) {
- written += fprintf(f, ",0x%x", bytes[i]);
- // The following is needed to keep the line length low on Visual C++:
- if (i % 512 == 0) fprintf(f, "\n");
+
+ virtual ~CppByteSink() {
+ if (fp_ != NULL) {
+ fprintf(fp_, "};\n\n");
+ fprintf(fp_, "int Snapshot::size_ = %d;\n\n", bytes_written_);
+ fprintf(fp_, "} } // namespace v8::internal\n");
+ fclose(fp_);
+ }
}
- fprintf(f, "};\n\n");
- fprintf(f, "int Snapshot::size_ = %d;\n\n", size);
- fprintf(f, "} } // namespace v8::internal\n");
- fclose(f);
- return written;
-}
+
+ virtual void Put(int byte, const char* description) {
+ if (bytes_written_ != 0) {
+ fprintf(fp_, ",");
+ }
+ fprintf(fp_, "%d", byte);
+ bytes_written_++;
+ if ((bytes_written_ & 0x3f) == 0) {
+ fprintf(fp_, "\n");
+ }
+ }
+
+ private:
+ FILE* fp_;
+ int bytes_written_;
+};
int main(int argc, char** argv) {
@@ -153,34 +149,20 @@
i::FlagList::PrintHelp();
return !i::FLAG_help;
}
-
- v8::V8::SetCounterFunction(counter_callback);
- v8::HandleScope scope;
-
- const int kExtensionCount = 1;
- const char* extension_list[kExtensionCount] = { "v8/gc" };
- v8::ExtensionConfiguration extensions(kExtensionCount, extension_list);
-
i::Serializer::Enable();
- v8::Context::New(&extensions);
-
+ Persistent<Context> context = v8::Context::New();
// Make sure all builtin scripts are cached.
{ HandleScope scope;
for (int i = 0; i < i::Natives::GetBuiltinsCount(); i++) {
i::Bootstrapper::NativesSourceLookup(i);
}
}
- // Get rid of unreferenced scripts with a global GC.
- i::Heap::CollectAllGarbage(false);
- i::Serializer ser;
+ context.Dispose();
+ CppByteSink sink(argv[1]);
+ i::Serializer ser(&sink);
+ // This results in a somewhat smaller snapshot, probably because it gets rid
+ // of some things that are cached between garbage collections.
+ i::Heap::CollectAllGarbage(true);
ser.Serialize();
- v8::internal::byte* bytes;
- int len;
- ser.Finalize(&bytes, &len);
-
- WriteInternalSnapshotToFile(argv[1], bytes, len);
-
- i::DeleteArray(bytes);
-
return 0;
}
diff --git a/src/objects-debug.cc b/src/objects-debug.cc
index 0188134..36f65ee 100644
--- a/src/objects-debug.cc
+++ b/src/objects-debug.cc
@@ -547,54 +547,18 @@
case INVALID_TYPE: return "INVALID";
case MAP_TYPE: return "MAP";
case HEAP_NUMBER_TYPE: return "HEAP_NUMBER";
- case SHORT_SYMBOL_TYPE:
- case MEDIUM_SYMBOL_TYPE:
- case LONG_SYMBOL_TYPE: return "SYMBOL";
- case SHORT_ASCII_SYMBOL_TYPE:
- case MEDIUM_ASCII_SYMBOL_TYPE:
- case LONG_ASCII_SYMBOL_TYPE: return "ASCII_SYMBOL";
- case SHORT_SLICED_SYMBOL_TYPE:
- case MEDIUM_SLICED_SYMBOL_TYPE:
- case LONG_SLICED_SYMBOL_TYPE: return "SLICED_SYMBOL";
- case SHORT_SLICED_ASCII_SYMBOL_TYPE:
- case MEDIUM_SLICED_ASCII_SYMBOL_TYPE:
- case LONG_SLICED_ASCII_SYMBOL_TYPE: return "SLICED_ASCII_SYMBOL";
- case SHORT_CONS_SYMBOL_TYPE:
- case MEDIUM_CONS_SYMBOL_TYPE:
- case LONG_CONS_SYMBOL_TYPE: return "CONS_SYMBOL";
- case SHORT_CONS_ASCII_SYMBOL_TYPE:
- case MEDIUM_CONS_ASCII_SYMBOL_TYPE:
- case LONG_CONS_ASCII_SYMBOL_TYPE: return "CONS_ASCII_SYMBOL";
- case SHORT_EXTERNAL_ASCII_SYMBOL_TYPE:
- case MEDIUM_EXTERNAL_ASCII_SYMBOL_TYPE:
- case LONG_EXTERNAL_ASCII_SYMBOL_TYPE:
- case SHORT_EXTERNAL_SYMBOL_TYPE:
- case MEDIUM_EXTERNAL_SYMBOL_TYPE:
- case LONG_EXTERNAL_SYMBOL_TYPE: return "EXTERNAL_SYMBOL";
- case SHORT_ASCII_STRING_TYPE:
- case MEDIUM_ASCII_STRING_TYPE:
- case LONG_ASCII_STRING_TYPE: return "ASCII_STRING";
- case SHORT_STRING_TYPE:
- case MEDIUM_STRING_TYPE:
- case LONG_STRING_TYPE: return "TWO_BYTE_STRING";
- case SHORT_CONS_STRING_TYPE:
- case MEDIUM_CONS_STRING_TYPE:
- case LONG_CONS_STRING_TYPE:
- case SHORT_CONS_ASCII_STRING_TYPE:
- case MEDIUM_CONS_ASCII_STRING_TYPE:
- case LONG_CONS_ASCII_STRING_TYPE: return "CONS_STRING";
- case SHORT_SLICED_STRING_TYPE:
- case MEDIUM_SLICED_STRING_TYPE:
- case LONG_SLICED_STRING_TYPE:
- case SHORT_SLICED_ASCII_STRING_TYPE:
- case MEDIUM_SLICED_ASCII_STRING_TYPE:
- case LONG_SLICED_ASCII_STRING_TYPE: return "SLICED_STRING";
- case SHORT_EXTERNAL_ASCII_STRING_TYPE:
- case MEDIUM_EXTERNAL_ASCII_STRING_TYPE:
- case LONG_EXTERNAL_ASCII_STRING_TYPE:
- case SHORT_EXTERNAL_STRING_TYPE:
- case MEDIUM_EXTERNAL_STRING_TYPE:
- case LONG_EXTERNAL_STRING_TYPE: return "EXTERNAL_STRING";
+ case SYMBOL_TYPE: return "SYMBOL";
+ case ASCII_SYMBOL_TYPE: return "ASCII_SYMBOL";
+ case CONS_SYMBOL_TYPE: return "CONS_SYMBOL";
+ case CONS_ASCII_SYMBOL_TYPE: return "CONS_ASCII_SYMBOL";
+ case EXTERNAL_ASCII_SYMBOL_TYPE:
+ case EXTERNAL_SYMBOL_TYPE: return "EXTERNAL_SYMBOL";
+ case ASCII_STRING_TYPE: return "ASCII_STRING";
+ case STRING_TYPE: return "TWO_BYTE_STRING";
+ case CONS_STRING_TYPE:
+ case CONS_ASCII_STRING_TYPE: return "CONS_STRING";
+ case EXTERNAL_ASCII_STRING_TYPE:
+ case EXTERNAL_STRING_TYPE: return "EXTERNAL_STRING";
case FIXED_ARRAY_TYPE: return "FIXED_ARRAY";
case BYTE_ARRAY_TYPE: return "BYTE_ARRAY";
case PIXEL_ARRAY_TYPE: return "PIXEL_ARRAY";
@@ -796,8 +760,6 @@
PrintF("\n - debug info = ");
debug_info()->ShortPrint();
PrintF("\n - length = %d", length());
- PrintF("\n - has_only_this_property_assignments = %d",
- has_only_this_property_assignments());
PrintF("\n - has_only_simple_this_property_assignments = %d",
has_only_simple_this_property_assignments());
PrintF("\n - this_property_assignments = ");
@@ -979,6 +941,7 @@
VerifyPointer(name());
VerifyPointer(data());
VerifyPointer(flag());
+ VerifyPointer(load_stub_cache());
}
void AccessorInfo::AccessorInfoPrint() {
@@ -1172,6 +1135,20 @@
type()->ShortPrint();
PrintF("\n - id: ");
id()->ShortPrint();
+ PrintF("\n - data: ");
+ data()->ShortPrint();
+ PrintF("\n - context data: ");
+ context_data()->ShortPrint();
+ PrintF("\n - wrapper: ");
+ wrapper()->ShortPrint();
+ PrintF("\n - compilation type: ");
+ compilation_type()->ShortPrint();
+ PrintF("\n - line ends: ");
+ line_ends()->ShortPrint();
+ PrintF("\n - eval from shared: ");
+ eval_from_shared()->ShortPrint();
+ PrintF("\n - eval from instructions offset: ");
+ eval_from_instructions_offset()->ShortPrint();
PrintF("\n");
}
diff --git a/src/objects-inl.h b/src/objects-inl.h
index 1ada583..8514a41 100644
--- a/src/objects-inl.h
+++ b/src/objects-inl.h
@@ -163,11 +163,6 @@
}
-#ifdef DEBUG
-// These are for cast checks. If you need one of these in release
-// mode you should consider using a StringShape before moving it out
-// of the ifdef
-
bool Object::IsSeqString() {
if (!IsString()) return false;
return StringShape(String::cast(this)).IsSequential();
@@ -208,15 +203,6 @@
}
-bool Object::IsSlicedString() {
- if (!IsString()) return false;
- return StringShape(String::cast(this)).IsSliced();
-}
-
-
-#endif // DEBUG
-
-
StringShape::StringShape(String* str)
: type_(str->map()->instance_type()) {
set_valid();
@@ -246,9 +232,6 @@
bool String::IsAsciiRepresentation() {
uint32_t type = map()->instance_type();
- if ((type & kStringRepresentationMask) == kSlicedStringTag) {
- return SlicedString::cast(this)->buffer()->IsAsciiRepresentation();
- }
if ((type & kStringRepresentationMask) == kConsStringTag &&
ConsString::cast(this)->second()->length() == 0) {
return ConsString::cast(this)->first()->IsAsciiRepresentation();
@@ -259,9 +242,7 @@
bool String::IsTwoByteRepresentation() {
uint32_t type = map()->instance_type();
- if ((type & kStringRepresentationMask) == kSlicedStringTag) {
- return SlicedString::cast(this)->buffer()->IsTwoByteRepresentation();
- } else if ((type & kStringRepresentationMask) == kConsStringTag &&
+ if ((type & kStringRepresentationMask) == kConsStringTag &&
ConsString::cast(this)->second()->length() == 0) {
return ConsString::cast(this)->first()->IsTwoByteRepresentation();
}
@@ -274,11 +255,6 @@
}
-bool StringShape::IsSliced() {
- return (type_ & kStringRepresentationMask) == kSlicedStringTag;
-}
-
-
bool StringShape::IsExternal() {
return (type_ & kStringRepresentationMask) == kExternalStringTag;
}
@@ -304,11 +280,6 @@
Internals::kFullStringRepresentationMask);
-uint32_t StringShape::size_tag() {
- return (type_ & kStringSizeMask);
-}
-
-
bool StringShape::IsSequentialAscii() {
return full_representation_tag() == (kSeqStringTag | kAsciiStringTag);
}
@@ -879,7 +850,7 @@
requested = static_cast<intptr_t>(
(~static_cast<uintptr_t>(0)) >> (tag_bits + 1));
}
- int value = (requested << kSpaceTagSize) | NEW_SPACE;
+ int value = static_cast<int>(requested << kSpaceTagSize) | NEW_SPACE;
return Construct(RETRY_AFTER_GC, value);
}
@@ -1014,9 +985,9 @@
int MapWord::DecodeOffset() {
// The offset field is represented in the kForwardingOffsetBits
// most-significant bits.
- int offset = (value_ >> kForwardingOffsetShift) << kObjectAlignmentBits;
- ASSERT(0 <= offset && offset < Page::kObjectAreaSize);
- return offset;
+ uintptr_t offset = (value_ >> kForwardingOffsetShift) << kObjectAlignmentBits;
+ ASSERT(offset < static_cast<uintptr_t>(Page::kObjectAreaSize));
+ return static_cast<int>(offset);
}
@@ -1591,7 +1562,6 @@
CAST_ACCESSOR(SeqAsciiString)
CAST_ACCESSOR(SeqTwoByteString)
CAST_ACCESSOR(ConsString)
-CAST_ACCESSOR(SlicedString)
CAST_ACCESSOR(ExternalString)
CAST_ACCESSOR(ExternalAsciiString)
CAST_ACCESSOR(ExternalTwoByteString)
@@ -1641,6 +1611,19 @@
INT_ACCESSORS(Array, length, kLengthOffset)
+INT_ACCESSORS(String, length, kLengthOffset)
+
+
+uint32_t String::hash_field() {
+ return READ_UINT32_FIELD(this, kHashFieldOffset);
+}
+
+
+void String::set_hash_field(uint32_t value) {
+ WRITE_UINT32_FIELD(this, kHashFieldOffset, value);
+}
+
+
bool String::Equals(String* other) {
if (other == this) return true;
if (StringShape(this).IsSymbol() && StringShape(other).IsSymbol()) {
@@ -1650,38 +1633,6 @@
}
-int String::length() {
- uint32_t len = READ_INT_FIELD(this, kLengthOffset);
-
- ASSERT(kShortStringTag + kLongLengthShift == kShortLengthShift);
- ASSERT(kMediumStringTag + kLongLengthShift == kMediumLengthShift);
- ASSERT(kLongStringTag == 0);
-
- return len >> (StringShape(this).size_tag() + kLongLengthShift);
-}
-
-
-void String::set_length(int value) {
- ASSERT(kShortStringTag + kLongLengthShift == kShortLengthShift);
- ASSERT(kMediumStringTag + kLongLengthShift == kMediumLengthShift);
- ASSERT(kLongStringTag == 0);
-
- WRITE_INT_FIELD(this,
- kLengthOffset,
- value << (StringShape(this).size_tag() + kLongLengthShift));
-}
-
-
-uint32_t String::length_field() {
- return READ_UINT32_FIELD(this, kLengthOffset);
-}
-
-
-void String::set_length_field(uint32_t value) {
- WRITE_UINT32_FIELD(this, kLengthOffset, value);
-}
-
-
Object* String::TryFlattenIfNotFlat() {
// We don't need to flatten strings that are already flat. Since this code
// is inlined, it can be helpful in the flat case to not call out to Flatten.
@@ -1702,9 +1653,6 @@
case kConsStringTag | kAsciiStringTag:
case kConsStringTag | kTwoByteStringTag:
return ConsString::cast(this)->ConsStringGet(index);
- case kSlicedStringTag | kAsciiStringTag:
- case kSlicedStringTag | kTwoByteStringTag:
- return SlicedString::cast(this)->SlicedStringGet(index);
case kExternalStringTag | kAsciiStringTag:
return ExternalAsciiString::cast(this)->ExternalAsciiStringGet(index);
case kExternalStringTag | kTwoByteStringTag:
@@ -1735,11 +1683,6 @@
// Only flattened strings have second part empty.
return second->length() == 0;
}
- case kSlicedStringTag: {
- StringRepresentationTag tag =
- StringShape(SlicedString::cast(this)->buffer()).representation_tag();
- return tag == kSeqStringTag || tag == kExternalStringTag;
- }
default:
return true;
}
@@ -1793,30 +1736,12 @@
int SeqTwoByteString::SeqTwoByteStringSize(InstanceType instance_type) {
uint32_t length = READ_INT_FIELD(this, kLengthOffset);
-
- ASSERT(kShortStringTag + kLongLengthShift == kShortLengthShift);
- ASSERT(kMediumStringTag + kLongLengthShift == kMediumLengthShift);
- ASSERT(kLongStringTag == 0);
-
- // Use the map (and not 'this') to compute the size tag, since
- // TwoByteStringSize is called during GC when maps are encoded.
- length >>= StringShape(instance_type).size_tag() + kLongLengthShift;
-
return SizeFor(length);
}
int SeqAsciiString::SeqAsciiStringSize(InstanceType instance_type) {
uint32_t length = READ_INT_FIELD(this, kLengthOffset);
-
- ASSERT(kShortStringTag + kLongLengthShift == kShortLengthShift);
- ASSERT(kMediumStringTag + kLongLengthShift == kMediumLengthShift);
- ASSERT(kLongStringTag == 0);
-
- // Use the map (and not 'this') to compute the size tag, since
- // AsciiStringSize is called during GC when maps are encoded.
- length >>= StringShape(instance_type).size_tag() + kLongLengthShift;
-
return SizeFor(length);
}
@@ -1853,27 +1778,6 @@
}
-String* SlicedString::buffer() {
- return String::cast(READ_FIELD(this, kBufferOffset));
-}
-
-
-void SlicedString::set_buffer(String* buffer) {
- WRITE_FIELD(this, kBufferOffset, buffer);
- WRITE_BARRIER(this, kBufferOffset);
-}
-
-
-int SlicedString::start() {
- return READ_INT_FIELD(this, kStartOffset);
-}
-
-
-void SlicedString::set_start(int start) {
- WRITE_INT_FIELD(this, kStartOffset, start);
-}
-
-
ExternalAsciiString::Resource* ExternalAsciiString::resource() {
return *reinterpret_cast<Resource**>(FIELD_ADDR(this, kResourceOffset));
}
@@ -1885,34 +1789,6 @@
}
-Map* ExternalAsciiString::StringMap(int length) {
- Map* map;
- // Number of characters: determines the map.
- if (length <= String::kMaxShortStringSize) {
- map = Heap::short_external_ascii_string_map();
- } else if (length <= String::kMaxMediumStringSize) {
- map = Heap::medium_external_ascii_string_map();
- } else {
- map = Heap::long_external_ascii_string_map();
- }
- return map;
-}
-
-
-Map* ExternalAsciiString::SymbolMap(int length) {
- Map* map;
- // Number of characters: determines the map.
- if (length <= String::kMaxShortStringSize) {
- map = Heap::short_external_ascii_symbol_map();
- } else if (length <= String::kMaxMediumStringSize) {
- map = Heap::medium_external_ascii_symbol_map();
- } else {
- map = Heap::long_external_ascii_symbol_map();
- }
- return map;
-}
-
-
ExternalTwoByteString::Resource* ExternalTwoByteString::resource() {
return *reinterpret_cast<Resource**>(FIELD_ADDR(this, kResourceOffset));
}
@@ -1924,34 +1800,6 @@
}
-Map* ExternalTwoByteString::StringMap(int length) {
- Map* map;
- // Number of characters: determines the map.
- if (length <= String::kMaxShortStringSize) {
- map = Heap::short_external_string_map();
- } else if (length <= String::kMaxMediumStringSize) {
- map = Heap::medium_external_string_map();
- } else {
- map = Heap::long_external_string_map();
- }
- return map;
-}
-
-
-Map* ExternalTwoByteString::SymbolMap(int length) {
- Map* map;
- // Number of characters: determines the map.
- if (length <= String::kMaxShortStringSize) {
- map = Heap::short_external_symbol_map();
- } else if (length <= String::kMaxMediumStringSize) {
- map = Heap::medium_external_symbol_map();
- } else {
- map = Heap::long_external_symbol_map();
- }
- return map;
-}
-
-
byte ByteArray::get(int index) {
ASSERT(index >= 0 && index < this->length());
return READ_BYTE_FIELD(this, kHeaderSize + index * kCharSize);
@@ -2417,6 +2265,7 @@
ACCESSORS(AccessorInfo, data, Object, kDataOffset)
ACCESSORS(AccessorInfo, name, Object, kNameOffset)
ACCESSORS(AccessorInfo, flag, Smi, kFlagOffset)
+ACCESSORS(AccessorInfo, load_stub_cache, Object, kLoadStubCacheOffset)
ACCESSORS(AccessCheckInfo, named_callback, Object, kNamedCallbackOffset)
ACCESSORS(AccessCheckInfo, indexed_callback, Object, kIndexedCallbackOffset)
@@ -2476,7 +2325,7 @@
ACCESSORS(Script, type, Smi, kTypeOffset)
ACCESSORS(Script, compilation_type, Smi, kCompilationTypeOffset)
ACCESSORS(Script, line_ends, Object, kLineEndsOffset)
-ACCESSORS(Script, eval_from_function, Object, kEvalFromFunctionOffset)
+ACCESSORS(Script, eval_from_shared, Object, kEvalFromSharedOffset)
ACCESSORS(Script, eval_from_instructions_offset, Smi,
kEvalFrominstructionsOffsetOffset)
@@ -2514,12 +2363,12 @@
BOOL_ACCESSORS(SharedFunctionInfo, start_position_and_type, is_toplevel,
kIsTopLevelBit)
BOOL_GETTER(SharedFunctionInfo, compiler_hints,
- has_only_this_property_assignments,
- kHasOnlyThisPropertyAssignments)
-BOOL_GETTER(SharedFunctionInfo, compiler_hints,
has_only_simple_this_property_assignments,
kHasOnlySimpleThisPropertyAssignments)
-
+BOOL_ACCESSORS(SharedFunctionInfo,
+ compiler_hints,
+ try_fast_codegen,
+ kTryFastCodegen)
INT_ACCESSORS(SharedFunctionInfo, length, kLengthOffset)
INT_ACCESSORS(SharedFunctionInfo, formal_parameter_count,
@@ -2933,13 +2782,13 @@
bool String::HasHashCode() {
- return (length_field() & kHashComputedMask) != 0;
+ return (hash_field() & kHashComputedMask) != 0;
}
uint32_t String::Hash() {
// Fast case: has hash code already been computed?
- uint32_t field = length_field();
+ uint32_t field = hash_field();
if (field & kHashComputedMask) return field >> kHashShift;
// Slow case: compute hash code and set it.
return ComputeAndSetHash();
@@ -2956,7 +2805,7 @@
bool StringHasher::has_trivial_hash() {
- return length_ > String::kMaxMediumStringSize;
+ return length_ > String::kMaxHashCalcLength;
}
@@ -3012,7 +2861,7 @@
bool String::AsArrayIndex(uint32_t* index) {
- uint32_t field = length_field();
+ uint32_t field = hash_field();
if ((field & kHashComputedMask) && !(field & kIsArrayIndexMask)) return false;
return SlowAsArrayIndex(index);
}
@@ -3027,6 +2876,43 @@
return GetPropertyAttributeWithReceiver(this, key);
}
+// TODO(504): this may be useful in other places too where JSGlobalProxy
+// is used.
+Object* JSObject::BypassGlobalProxy() {
+ if (IsJSGlobalProxy()) {
+ Object* proto = GetPrototype();
+ if (proto->IsNull()) return Heap::undefined_value();
+ ASSERT(proto->IsJSGlobalObject());
+ return proto;
+ }
+ return this;
+}
+
+
+bool JSObject::HasHiddenPropertiesObject() {
+ ASSERT(!IsJSGlobalProxy());
+ return GetPropertyAttributePostInterceptor(this,
+ Heap::hidden_symbol(),
+ false) != ABSENT;
+}
+
+
+Object* JSObject::GetHiddenPropertiesObject() {
+ ASSERT(!IsJSGlobalProxy());
+ PropertyAttributes attributes;
+ return GetLocalPropertyPostInterceptor(this,
+ Heap::hidden_symbol(),
+ &attributes);
+}
+
+
+Object* JSObject::SetHiddenPropertiesObject(Object* hidden_obj) {
+ ASSERT(!IsJSGlobalProxy());
+ return SetPropertyPostInterceptor(Heap::hidden_symbol(),
+ hidden_obj,
+ DONT_ENUM);
+}
+
bool JSObject::HasElement(uint32_t index) {
return HasElementWithReceiver(this, index);
@@ -3099,8 +2985,19 @@
void JSArray::EnsureSize(int required_size) {
ASSERT(HasFastElements());
- if (elements()->length() >= required_size) return;
- Expand(required_size);
+ Array* elts = elements();
+ const int kArraySizeThatFitsComfortablyInNewSpace = 128;
+ if (elts->length() < required_size) {
+ // Doubling in size would be overkill, but leave some slack to avoid
+ // constantly growing.
+ Expand(required_size + (required_size >> 3));
+ // It's a performance benefit to keep a frequently used array in new-space.
+ } else if (!Heap::new_space()->Contains(elts) &&
+ required_size < kArraySizeThatFitsComfortablyInNewSpace) {
+ // Expand will allocate a new backing store in new space even if the size
+ // we asked for isn't larger than what we had before.
+ Expand(required_size);
+ }
}
diff --git a/src/objects.cc b/src/objects.cc
index af1a0e5..0f8dca3 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -37,6 +37,7 @@
#include "scanner.h"
#include "scopeinfo.h"
#include "string-stream.h"
+#include "utils.h"
#ifdef ENABLE_DISASSEMBLER
#include "disassembler.h"
@@ -683,23 +684,6 @@
#endif
switch (StringShape(this).representation_tag()) {
- case kSlicedStringTag: {
- SlicedString* ss = SlicedString::cast(this);
- // The SlicedString constructor should ensure that there are no
- // SlicedStrings that are constructed directly on top of other
- // SlicedStrings.
- String* buf = ss->buffer();
- ASSERT(!buf->IsSlicedString());
- Object* ok = buf->TryFlatten();
- if (ok->IsFailure()) return ok;
- // Under certain circumstances (TryFlattenIfNotFlat fails in
- // String::Slice) we can have a cons string under a slice.
- // In this case we need to get the flat string out of the cons!
- if (StringShape(String::cast(ok)).IsCons()) {
- ss->set_buffer(ConsString::cast(ok)->first());
- }
- return this;
- }
case kConsStringTag: {
ConsString* cs = ConsString::cast(this);
if (cs->second()->length() == 0) {
@@ -771,19 +755,21 @@
ASSERT(size >= ExternalString::kSize);
bool is_symbol = this->IsSymbol();
int length = this->length();
+ int hash_field = this->hash_field();
// Morph the object to an external string by adjusting the map and
// reinitializing the fields.
- this->set_map(ExternalTwoByteString::StringMap(length));
+ this->set_map(Heap::external_string_map());
ExternalTwoByteString* self = ExternalTwoByteString::cast(this);
self->set_length(length);
+ self->set_hash_field(hash_field);
self->set_resource(resource);
// Additionally make the object into an external symbol if the original string
// was a symbol to start with.
if (is_symbol) {
self->Hash(); // Force regeneration of the hash value.
// Now morph this external string into a external symbol.
- self->set_map(ExternalTwoByteString::SymbolMap(length));
+ this->set_map(Heap::external_symbol_map());
}
// Fill the remainder of the string with dead wood.
@@ -815,19 +801,21 @@
ASSERT(size >= ExternalString::kSize);
bool is_symbol = this->IsSymbol();
int length = this->length();
+ int hash_field = this->hash_field();
// Morph the object to an external string by adjusting the map and
// reinitializing the fields.
- this->set_map(ExternalAsciiString::StringMap(length));
+ this->set_map(Heap::external_ascii_string_map());
ExternalAsciiString* self = ExternalAsciiString::cast(this);
self->set_length(length);
+ self->set_hash_field(hash_field);
self->set_resource(resource);
// Additionally make the object into an external symbol if the original string
// was a symbol to start with.
if (is_symbol) {
self->Hash(); // Force regeneration of the hash value.
// Now morph this external string into a external symbol.
- self->set_map(ExternalAsciiString::SymbolMap(length));
+ this->set_map(Heap::external_ascii_symbol_map());
}
// Fill the remainder of the string with dead wood.
@@ -839,7 +827,7 @@
void String::StringShortPrint(StringStream* accumulator) {
int len = length();
- if (len > kMaxMediumStringSize) {
+ if (len > kMaxShortPrintLength) {
accumulator->Add("<Very long string[%u]>", len);
return;
}
@@ -1135,8 +1123,14 @@
case kConsStringTag:
reinterpret_cast<ConsString*>(this)->ConsStringIterateBody(v);
break;
- case kSlicedStringTag:
- reinterpret_cast<SlicedString*>(this)->SlicedStringIterateBody(v);
+ case kExternalStringTag:
+ if ((type & kStringEncodingMask) == kAsciiStringTag) {
+ reinterpret_cast<ExternalAsciiString*>(this)->
+ ExternalAsciiStringIterateBody(v);
+ } else {
+ reinterpret_cast<ExternalTwoByteString*>(this)->
+ ExternalTwoByteStringIterateBody(v);
+ }
break;
}
return;
@@ -1251,7 +1245,8 @@
String* JSObject::constructor_name() {
if (IsJSFunction()) {
- return Heap::function_class_symbol();
+ return JSFunction::cast(this)->IsBoilerplate() ?
+ Heap::function_class_symbol() : Heap::closure_symbol();
}
if (map()->constructor()->IsJSFunction()) {
JSFunction* constructor = JSFunction::cast(map()->constructor());
@@ -1473,8 +1468,8 @@
Object* JSObject::ReplaceSlowProperty(String* name,
- Object* value,
- PropertyAttributes attributes) {
+ Object* value,
+ PropertyAttributes attributes) {
StringDictionary* dictionary = property_dictionary();
int old_index = dictionary->FindEntry(name);
int new_enumeration_index = 0; // 0 means "Use the next available index."
@@ -1488,6 +1483,7 @@
return SetNormalizedProperty(name, value, new_details);
}
+
Object* JSObject::ConvertDescriptorToFieldAndMapTransition(
String* name,
Object* new_value,
@@ -1879,6 +1875,14 @@
// interceptor calls.
AssertNoContextChange ncc;
+ // Optimization for 2-byte strings often used as keys in a decompression
+ // dictionary. We make these short keys into symbols to avoid constantly
+ // reallocating them.
+ if (!name->IsSymbol() && name->length() <= 2) {
+ Object* symbol_version = Heap::LookupSymbol(name);
+ if (!symbol_version->IsFailure()) name = String::cast(symbol_version);
+ }
+
// Check access rights if needed.
if (IsAccessCheckNeeded()
&& !Top::MayNamedAccess(this, name, v8::ACCESS_SET)) {
@@ -2629,33 +2633,24 @@
// Tests for the fast common case for property enumeration:
-// - this object has an enum cache
-// - this object has no elements
-// - no prototype has enumerable properties/elements
-// - neither this object nor any prototype has interceptors
+// - This object and all prototypes has an enum cache (which means that it has
+// no interceptors and needs no access checks).
+// - This object has no elements.
+// - No prototype has enumerable properties/elements.
bool JSObject::IsSimpleEnum() {
- JSObject* arguments_boilerplate =
- Top::context()->global_context()->arguments_boilerplate();
- JSFunction* arguments_function =
- JSFunction::cast(arguments_boilerplate->map()->constructor());
- if (IsAccessCheckNeeded()) return false;
- if (map()->constructor() == arguments_function) return false;
-
for (Object* o = this;
o != Heap::null_value();
o = JSObject::cast(o)->GetPrototype()) {
JSObject* curr = JSObject::cast(o);
- if (!curr->HasFastProperties()) return false;
if (!curr->map()->instance_descriptors()->HasEnumCache()) return false;
+ ASSERT(!curr->HasNamedInterceptor());
+ ASSERT(!curr->HasIndexedInterceptor());
+ ASSERT(!curr->IsAccessCheckNeeded());
if (curr->NumberOfEnumElements() > 0) return false;
- if (curr->HasNamedInterceptor()) return false;
- if (curr->HasIndexedInterceptor()) return false;
if (curr != this) {
FixedArray* curr_fixed_array =
FixedArray::cast(curr->map()->instance_descriptors()->GetEnumCache());
- if (curr_fixed_array->length() > 0) {
- return false;
- }
+ if (curr_fixed_array->length() > 0) return false;
}
}
return true;
@@ -3561,12 +3556,7 @@
int length = this->length();
StringRepresentationTag string_tag = StringShape(this).representation_tag();
String* string = this;
- if (string_tag == kSlicedStringTag) {
- SlicedString* sliced = SlicedString::cast(string);
- offset += sliced->start();
- string = sliced->buffer();
- string_tag = StringShape(string).representation_tag();
- } else if (string_tag == kConsStringTag) {
+ if (string_tag == kConsStringTag) {
ConsString* cons = ConsString::cast(string);
ASSERT(cons->second()->length() == 0);
string = cons->first();
@@ -3592,12 +3582,7 @@
int length = this->length();
StringRepresentationTag string_tag = StringShape(this).representation_tag();
String* string = this;
- if (string_tag == kSlicedStringTag) {
- SlicedString* sliced = SlicedString::cast(string);
- offset += sliced->start();
- string = String::cast(sliced->buffer());
- string_tag = StringShape(string).representation_tag();
- } else if (string_tag == kConsStringTag) {
+ if (string_tag == kConsStringTag) {
ConsString* cons = ConsString::cast(string);
ASSERT(cons->second()->length() == 0);
string = cons->first();
@@ -3688,17 +3673,6 @@
case kExternalStringTag:
return ExternalTwoByteString::cast(this)->
ExternalTwoByteStringGetData(start);
- case kSlicedStringTag: {
- SlicedString* sliced_string = SlicedString::cast(this);
- String* buffer = sliced_string->buffer();
- if (StringShape(buffer).IsCons()) {
- ConsString* cs = ConsString::cast(buffer);
- // Flattened string.
- ASSERT(cs->second()->length() == 0);
- buffer = cs->first();
- }
- return buffer->GetTwoByteData(start + sliced_string->start());
- }
case kConsStringTag:
UNREACHABLE();
return NULL;
@@ -3853,22 +3827,6 @@
}
-const unibrow::byte* SlicedString::SlicedStringReadBlock(ReadBlockBuffer* rbb,
- unsigned* offset_ptr,
- unsigned max_chars) {
- String* backing = buffer();
- unsigned offset = start() + *offset_ptr;
- unsigned length = backing->length();
- if (max_chars > length - offset) {
- max_chars = length - offset;
- }
- const unibrow::byte* answer =
- String::ReadBlock(backing, rbb, &offset, max_chars);
- *offset_ptr = offset - start();
- return answer;
-}
-
-
uint16_t ExternalAsciiString::ExternalAsciiStringGet(int index) {
ASSERT(index >= 0 && index < length());
return resource()->data()[index];
@@ -3992,10 +3950,6 @@
return ConsString::cast(input)->ConsStringReadBlock(rbb,
offset_ptr,
max_chars);
- case kSlicedStringTag:
- return SlicedString::cast(input)->SlicedStringReadBlock(rbb,
- offset_ptr,
- max_chars);
case kExternalStringTag:
if (input->IsAsciiRepresentation()) {
return ExternalAsciiString::cast(input)->ExternalAsciiStringReadBlock(
@@ -4138,20 +4092,15 @@
offset_ptr,
max_chars);
return;
- case kSlicedStringTag:
- SlicedString::cast(input)->SlicedStringReadBlockIntoBuffer(rbb,
- offset_ptr,
- max_chars);
- return;
case kExternalStringTag:
if (input->IsAsciiRepresentation()) {
- ExternalAsciiString::cast(input)->
- ExternalAsciiStringReadBlockIntoBuffer(rbb, offset_ptr, max_chars);
- } else {
- ExternalTwoByteString::cast(input)->
- ExternalTwoByteStringReadBlockIntoBuffer(rbb,
- offset_ptr,
- max_chars);
+ ExternalAsciiString::cast(input)->
+ ExternalAsciiStringReadBlockIntoBuffer(rbb, offset_ptr, max_chars);
+ } else {
+ ExternalTwoByteString::cast(input)->
+ ExternalTwoByteStringReadBlockIntoBuffer(rbb,
+ offset_ptr,
+ max_chars);
}
return;
default:
@@ -4257,20 +4206,6 @@
}
-void SlicedString::SlicedStringReadBlockIntoBuffer(ReadBlockBuffer* rbb,
- unsigned* offset_ptr,
- unsigned max_chars) {
- String* backing = buffer();
- unsigned offset = start() + *offset_ptr;
- unsigned length = backing->length();
- if (max_chars > length - offset) {
- max_chars = length - offset;
- }
- String::ReadBlockIntoBuffer(backing, rbb, &offset, max_chars);
- *offset_ptr = offset - start();
-}
-
-
void ConsString::ConsStringIterateBody(ObjectVisitor* v) {
IteratePointers(v, kFirstOffset, kSecondOffset + kPointerSize);
}
@@ -4349,15 +4284,6 @@
to - from);
return;
}
- case kAsciiStringTag | kSlicedStringTag:
- case kTwoByteStringTag | kSlicedStringTag: {
- SlicedString* sliced_string = SlicedString::cast(source);
- int start = sliced_string->start();
- from += start;
- to += start;
- source = String::cast(sliced_string->buffer());
- break;
- }
case kAsciiStringTag | kConsStringTag:
case kTwoByteStringTag | kConsStringTag: {
ConsString* cons_string = ConsString::cast(source);
@@ -4393,18 +4319,23 @@
}
-void SlicedString::SlicedStringIterateBody(ObjectVisitor* v) {
- IteratePointer(v, kBufferOffset);
+#define FIELD_ADDR(p, offset) \
+ (reinterpret_cast<byte*>(p) + offset - kHeapObjectTag)
+
+void ExternalAsciiString::ExternalAsciiStringIterateBody(ObjectVisitor* v) {
+ typedef v8::String::ExternalAsciiStringResource Resource;
+ v->VisitExternalAsciiString(
+ reinterpret_cast<Resource**>(FIELD_ADDR(this, kResourceOffset)));
}
-uint16_t SlicedString::SlicedStringGet(int index) {
- ASSERT(index >= 0 && index < this->length());
- // Delegate to the buffer string.
- String* underlying = buffer();
- return underlying->Get(start() + index);
+void ExternalTwoByteString::ExternalTwoByteStringIterateBody(ObjectVisitor* v) {
+ typedef v8::String::ExternalStringResource Resource;
+ v->VisitExternalTwoByteString(
+ reinterpret_cast<Resource**>(FIELD_ADDR(this, kResourceOffset)));
}
+#undef FIELD_ADDR
template <typename IteratorA, typename IteratorB>
static inline bool CompareStringContents(IteratorA* ia, IteratorB* ib) {
@@ -4549,23 +4480,11 @@
if (StringShape(this).IsSymbol()) return false;
Map* map = this->map();
- if (map == Heap::short_string_map()) {
- this->set_map(Heap::undetectable_short_string_map());
+ if (map == Heap::string_map()) {
+ this->set_map(Heap::undetectable_string_map());
return true;
- } else if (map == Heap::medium_string_map()) {
- this->set_map(Heap::undetectable_medium_string_map());
- return true;
- } else if (map == Heap::long_string_map()) {
- this->set_map(Heap::undetectable_long_string_map());
- return true;
- } else if (map == Heap::short_ascii_string_map()) {
- this->set_map(Heap::undetectable_short_ascii_string_map());
- return true;
- } else if (map == Heap::medium_ascii_string_map()) {
- this->set_map(Heap::undetectable_medium_ascii_string_map());
- return true;
- } else if (map == Heap::long_ascii_string_map()) {
- this->set_map(Heap::undetectable_long_ascii_string_map());
+ } else if (map == Heap::ascii_string_map()) {
+ this->set_map(Heap::undetectable_ascii_string_map());
return true;
}
// Rest cannot be marked as undetectable
@@ -4588,17 +4507,17 @@
uint32_t String::ComputeAndSetHash() {
// Should only be called if hash code has not yet been computed.
- ASSERT(!(length_field() & kHashComputedMask));
+ ASSERT(!(hash_field() & kHashComputedMask));
// Compute the hash code.
StringInputBuffer buffer(this);
- uint32_t field = ComputeLengthAndHashField(&buffer, length());
+ uint32_t field = ComputeHashField(&buffer, length());
// Store the hash code in the object.
- set_length_field(field);
+ set_hash_field(field);
// Check the hash code is there.
- ASSERT(length_field() & kHashComputedMask);
+ ASSERT(hash_field() & kHashComputedMask);
uint32_t result = field >> kHashShift;
ASSERT(result != 0); // Ensure that the hash value of 0 is never computed.
return result;
@@ -4638,9 +4557,10 @@
bool String::SlowAsArrayIndex(uint32_t* index) {
if (length() <= kMaxCachedArrayIndexLength) {
Hash(); // force computation of hash code
- uint32_t field = length_field();
+ uint32_t field = hash_field();
if ((field & kIsArrayIndexMask) == 0) return false;
- *index = (field & ((1 << kShortLengthShift) - 1)) >> kLongLengthShift;
+ // Isolate the array index form the full hash field.
+ *index = (kArrayIndexHashMask & field) >> kHashShift;
return true;
} else {
StringInputBuffer buffer(this);
@@ -4649,37 +4569,42 @@
}
-static inline uint32_t HashField(uint32_t hash, bool is_array_index) {
+static inline uint32_t HashField(uint32_t hash,
+ bool is_array_index,
+ int length = -1) {
uint32_t result =
- (hash << String::kLongLengthShift) | String::kHashComputedMask;
- if (is_array_index) result |= String::kIsArrayIndexMask;
+ (hash << String::kHashShift) | String::kHashComputedMask;
+ if (is_array_index) {
+ // For array indexes mix the length into the hash as an array index could
+ // be zero.
+ ASSERT(length > 0);
+ ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
+ (1 << String::kArrayIndexValueBits));
+ result |= String::kIsArrayIndexMask;
+ result |= length << String::kArrayIndexHashLengthShift;
+ }
return result;
}
uint32_t StringHasher::GetHashField() {
ASSERT(is_valid());
- if (length_ <= String::kMaxShortStringSize) {
- uint32_t payload;
+ if (length_ <= String::kMaxHashCalcLength) {
if (is_array_index()) {
- payload = v8::internal::HashField(array_index(), true);
+ return v8::internal::HashField(array_index(), true, length_);
} else {
- payload = v8::internal::HashField(GetHash(), false);
+ return v8::internal::HashField(GetHash(), false);
}
- return (payload & ((1 << String::kShortLengthShift) - 1)) |
- (length_ << String::kShortLengthShift);
- } else if (length_ <= String::kMaxMediumStringSize) {
uint32_t payload = v8::internal::HashField(GetHash(), false);
- return (payload & ((1 << String::kMediumLengthShift) - 1)) |
- (length_ << String::kMediumLengthShift);
+ return payload;
} else {
return v8::internal::HashField(length_, false);
}
}
-uint32_t String::ComputeLengthAndHashField(unibrow::CharacterStream* buffer,
- int length) {
+uint32_t String::ComputeHashField(unibrow::CharacterStream* buffer,
+ int length) {
StringHasher hasher(length);
// Very long strings have a trivial hash that doesn't inspect the
@@ -4704,43 +4629,10 @@
}
-Object* String::Slice(int start, int end) {
+Object* String::SubString(int start, int end) {
if (start == 0 && end == length()) return this;
- if (StringShape(this).representation_tag() == kSlicedStringTag) {
- // Translate slices of a SlicedString into slices of the
- // underlying string buffer.
- SlicedString* str = SlicedString::cast(this);
- String* buf = str->buffer();
- return Heap::AllocateSlicedString(buf,
- str->start() + start,
- str->start() + end);
- }
- Object* result = Heap::AllocateSlicedString(this, start, end);
- if (result->IsFailure()) {
- return result;
- }
- // Due to the way we retry after GC on allocation failure we are not allowed
- // to fail on allocation after this point. This is the one-allocation rule.
-
- // Try to flatten a cons string that is under the sliced string.
- // This is to avoid memory leaks and possible stack overflows caused by
- // building 'towers' of sliced strings on cons strings.
- // This may fail due to an allocation failure (when a GC is needed), but it
- // will succeed often enough to avoid the problem. We only have to do this
- // if Heap::AllocateSlicedString actually returned a SlicedString. It will
- // return flat strings for small slices for efficiency reasons.
- String* answer = String::cast(result);
- if (StringShape(answer).IsSliced() &&
- StringShape(this).representation_tag() == kConsStringTag) {
- TryFlatten();
- // If the flatten succeeded we might as well make the sliced string point
- // to the flat string rather than the cons string.
- String* second = ConsString::cast(this)->second();
- if (second->length() == 0) {
- SlicedString::cast(answer)->set_buffer(ConsString::cast(this)->first());
- }
- }
- return answer;
+ Object* result = Heap::AllocateSubString(this, start, end);
+ return result;
}
@@ -4920,13 +4812,9 @@
void SharedFunctionInfo::SetThisPropertyAssignmentsInfo(
- bool only_this_property_assignments,
bool only_simple_this_property_assignments,
FixedArray* assignments) {
set_compiler_hints(BooleanBit::set(compiler_hints(),
- kHasOnlyThisPropertyAssignments,
- only_this_property_assignments));
- set_compiler_hints(BooleanBit::set(compiler_hints(),
kHasOnlySimpleThisPropertyAssignments,
only_simple_this_property_assignments));
set_this_property_assignments(assignments);
@@ -4936,9 +4824,6 @@
void SharedFunctionInfo::ClearThisPropertyAssignmentsInfo() {
set_compiler_hints(BooleanBit::set(compiler_hints(),
- kHasOnlyThisPropertyAssignments,
- false));
- set_compiler_hints(BooleanBit::set(compiler_hints(),
kHasOnlySimpleThisPropertyAssignments,
false));
set_this_property_assignments(Heap::undefined_value());
@@ -4993,7 +4878,7 @@
return;
}
- // Get the slice of the source for this function.
+ // Get the source for the script which this function came from.
// Don't use String::cast because we don't want more assertion errors while
// we are already creating a stack dump.
String* script_source =
@@ -5082,7 +4967,7 @@
}
-void Code::Relocate(int delta) {
+void Code::Relocate(intptr_t delta) {
for (RelocIterator it(this, RelocInfo::kApplyMask); !it.done(); it.next()) {
it.rinfo()->apply(delta);
}
@@ -5148,8 +5033,9 @@
// Only look at positions after the current pc.
if (it.rinfo()->pc() < pc) {
// Get position and distance.
- int dist = pc - it.rinfo()->pc();
- int pos = it.rinfo()->data();
+
+ int dist = static_cast<int>(pc - it.rinfo()->pc());
+ int pos = static_cast<int>(it.rinfo()->data());
// If this position is closer than the current candidate or if it has the
// same distance as the current candidate and the position is higher then
// this position is the new candidate.
@@ -5176,7 +5062,7 @@
RelocIterator it(this, RelocInfo::kPositionMask);
while (!it.done()) {
if (RelocInfo::IsStatementPosition(it.rinfo()->rmode())) {
- int p = it.rinfo()->data();
+ int p = static_cast<int>(it.rinfo()->data());
if (statement_position < p && p <= position) {
statement_position = p;
}
@@ -5353,9 +5239,7 @@
Handle<JSArray> self(this);
Handle<FixedArray> old_backing(FixedArray::cast(elements()));
int old_size = old_backing->length();
- // Doubling in size would be overkill, but leave some slack to avoid
- // constantly growing.
- int new_size = required_size + (required_size >> 3);
+ int new_size = required_size > old_size ? required_size : old_size;
Handle<FixedArray> new_backing = Factory::NewFixedArray(new_size);
// Can't use this any more now because we may have had a GC!
for (int i = 0; i < old_size; i++) new_backing->set(i, old_backing->get(i));
@@ -6284,6 +6168,18 @@
}
+Object* JSObject::GetLocalPropertyPostInterceptor(
+ JSObject* receiver,
+ String* name,
+ PropertyAttributes* attributes) {
+ // Check local property in holder, ignore interceptor.
+ LookupResult result;
+ LocalLookupRealNamedProperty(name, &result);
+ if (!result.IsValid()) return Heap::undefined_value();
+ return GetProperty(receiver, &result, name, attributes);
+}
+
+
Object* JSObject::GetPropertyWithInterceptor(
JSObject* receiver,
String* name,
@@ -6573,6 +6469,15 @@
int JSObject::NumberOfEnumElements() {
+ // Fast case for objects with no elements.
+ if (!IsJSValue() && HasFastElements()) {
+ uint32_t length = IsJSArray() ?
+ static_cast<uint32_t>(
+ Smi::cast(JSArray::cast(this)->length())->value()) :
+ static_cast<uint32_t>(FixedArray::cast(elements())->length());
+ if (length == 0) return 0;
+ }
+ // Compute the number of enumerable elements.
return NumberOfLocalElements(static_cast<PropertyAttributes>(DONT_ENUM));
}
@@ -6832,19 +6737,19 @@
class Utf8SymbolKey : public HashTableKey {
public:
explicit Utf8SymbolKey(Vector<const char> string)
- : string_(string), length_field_(0) { }
+ : string_(string), hash_field_(0) { }
bool IsMatch(Object* string) {
return String::cast(string)->IsEqualTo(string_);
}
uint32_t Hash() {
- if (length_field_ != 0) return length_field_ >> String::kHashShift;
+ if (hash_field_ != 0) return hash_field_ >> String::kHashShift;
unibrow::Utf8InputBuffer<> buffer(string_.start(),
static_cast<unsigned>(string_.length()));
chars_ = buffer.Length();
- length_field_ = String::ComputeLengthAndHashField(&buffer, chars_);
- uint32_t result = length_field_ >> String::kHashShift;
+ hash_field_ = String::ComputeHashField(&buffer, chars_);
+ uint32_t result = hash_field_ >> String::kHashShift;
ASSERT(result != 0); // Ensure that the hash value of 0 is never computed.
return result;
}
@@ -6854,12 +6759,12 @@
}
Object* AsObject() {
- if (length_field_ == 0) Hash();
- return Heap::AllocateSymbol(string_, chars_, length_field_);
+ if (hash_field_ == 0) Hash();
+ return Heap::AllocateSymbol(string_, chars_, hash_field_);
}
Vector<const char> string_;
- uint32_t length_field_;
+ uint32_t hash_field_;
int chars_; // Caches the number of characters when computing the hash code.
};
@@ -6900,7 +6805,7 @@
StringInputBuffer buffer(string_);
return Heap::AllocateInternalSymbol(&buffer,
string_->length(),
- string_->length_field());
+ string_->hash_field());
}
static uint32_t StringHash(Object* obj) {
@@ -7429,6 +7334,67 @@
}
+// This class is used for looking up two character strings in the symbol table.
+// If we don't have a hit we don't want to waste much time so we unroll the
+// string hash calculation loop here for speed. Doesn't work if the two
+// characters form a decimal integer, since such strings have a different hash
+// algorithm.
+class TwoCharHashTableKey : public HashTableKey {
+ public:
+ TwoCharHashTableKey(uint32_t c1, uint32_t c2)
+ : c1_(c1), c2_(c2) {
+ // Char 1.
+ uint32_t hash = c1 + (c1 << 10);
+ hash ^= hash >> 6;
+ // Char 2.
+ hash += c2;
+ hash += hash << 10;
+ hash ^= hash >> 6;
+ // GetHash.
+ hash += hash << 3;
+ hash ^= hash >> 11;
+ hash += hash << 15;
+ if (hash == 0) hash = 27;
+#ifdef DEBUG
+ StringHasher hasher(2);
+ hasher.AddCharacter(c1);
+ hasher.AddCharacter(c2);
+ // If this assert fails then we failed to reproduce the two-character
+ // version of the string hashing algorithm above. One reason could be
+ // that we were passed two digits as characters, since the hash
+ // algorithm is different in that case.
+ ASSERT_EQ(static_cast<int>(hasher.GetHash()), static_cast<int>(hash));
+#endif
+ hash_ = hash;
+ }
+
+ bool IsMatch(Object* o) {
+ if (!o->IsString()) return false;
+ String* other = String::cast(o);
+ if (other->length() != 2) return false;
+ if (other->Get(0) != c1_) return false;
+ return other->Get(1) == c2_;
+ }
+
+ uint32_t Hash() { return hash_; }
+ uint32_t HashForObject(Object* key) {
+ if (!key->IsString()) return 0;
+ return String::cast(key)->Hash();
+ }
+
+ Object* AsObject() {
+ // The TwoCharHashTableKey is only used for looking in the symbol
+ // table, not for adding to it.
+ UNREACHABLE();
+ return NULL;
+ }
+ private:
+ uint32_t c1_;
+ uint32_t c2_;
+ uint32_t hash_;
+};
+
+
bool SymbolTable::LookupSymbolIfExists(String* string, String** symbol) {
SymbolKey key(string);
int entry = FindEntry(&key);
@@ -7443,6 +7409,22 @@
}
+bool SymbolTable::LookupTwoCharsSymbolIfExists(uint32_t c1,
+ uint32_t c2,
+ String** symbol) {
+ TwoCharHashTableKey key(c1, c2);
+ int entry = FindEntry(&key);
+ if (entry == kNotFound) {
+ return false;
+ } else {
+ String* result = String::cast(KeyAt(entry));
+ ASSERT(StringShape(result).IsSymbol());
+ *symbol = result;
+ return true;
+ }
+}
+
+
Object* SymbolTable::LookupSymbol(Vector<const char> str, Object** s) {
Utf8SymbolKey key(str);
return LookupKey(&key, s);
diff --git a/src/objects.h b/src/objects.h
index 68bed6c..671978a 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -78,7 +78,6 @@
// - SeqAsciiString
// - SeqTwoByteString
// - ConsString
-// - SlicedString
// - ExternalString
// - ExternalAsciiString
// - ExternalTwoByteString
@@ -210,7 +209,7 @@
// considered TWO_BYTE. It is not mentioned in the name. ASCII encoding is
// mentioned explicitly in the name. Likewise, the default representation is
// considered sequential. It is not mentioned in the name. The other
-// representations (eg, CONS, SLICED, EXTERNAL) are explicitly mentioned.
+// representations (eg, CONS, EXTERNAL) are explicitly mentioned.
// Finally, the string is either a SYMBOL_TYPE (if it is a symbol) or a
// STRING_TYPE (if it is not a symbol).
//
@@ -222,308 +221,128 @@
// NOTE: Everything following JS_VALUE_TYPE is considered a
// JSObject for GC purposes. The first four entries here have typeof
// 'object', whereas JS_FUNCTION_TYPE has typeof 'function'.
-#define INSTANCE_TYPE_LIST_ALL(V) \
- V(SHORT_SYMBOL_TYPE) \
- V(MEDIUM_SYMBOL_TYPE) \
- V(LONG_SYMBOL_TYPE) \
- V(SHORT_ASCII_SYMBOL_TYPE) \
- V(MEDIUM_ASCII_SYMBOL_TYPE) \
- V(LONG_ASCII_SYMBOL_TYPE) \
- V(SHORT_CONS_SYMBOL_TYPE) \
- V(MEDIUM_CONS_SYMBOL_TYPE) \
- V(LONG_CONS_SYMBOL_TYPE) \
- V(SHORT_CONS_ASCII_SYMBOL_TYPE) \
- V(MEDIUM_CONS_ASCII_SYMBOL_TYPE) \
- V(LONG_CONS_ASCII_SYMBOL_TYPE) \
- V(SHORT_SLICED_SYMBOL_TYPE) \
- V(MEDIUM_SLICED_SYMBOL_TYPE) \
- V(LONG_SLICED_SYMBOL_TYPE) \
- V(SHORT_SLICED_ASCII_SYMBOL_TYPE) \
- V(MEDIUM_SLICED_ASCII_SYMBOL_TYPE) \
- V(LONG_SLICED_ASCII_SYMBOL_TYPE) \
- V(SHORT_EXTERNAL_SYMBOL_TYPE) \
- V(MEDIUM_EXTERNAL_SYMBOL_TYPE) \
- V(LONG_EXTERNAL_SYMBOL_TYPE) \
- V(SHORT_EXTERNAL_ASCII_SYMBOL_TYPE) \
- V(MEDIUM_EXTERNAL_ASCII_SYMBOL_TYPE) \
- V(LONG_EXTERNAL_ASCII_SYMBOL_TYPE) \
- V(SHORT_STRING_TYPE) \
- V(MEDIUM_STRING_TYPE) \
- V(LONG_STRING_TYPE) \
- V(SHORT_ASCII_STRING_TYPE) \
- V(MEDIUM_ASCII_STRING_TYPE) \
- V(LONG_ASCII_STRING_TYPE) \
- V(SHORT_CONS_STRING_TYPE) \
- V(MEDIUM_CONS_STRING_TYPE) \
- V(LONG_CONS_STRING_TYPE) \
- V(SHORT_CONS_ASCII_STRING_TYPE) \
- V(MEDIUM_CONS_ASCII_STRING_TYPE) \
- V(LONG_CONS_ASCII_STRING_TYPE) \
- V(SHORT_SLICED_STRING_TYPE) \
- V(MEDIUM_SLICED_STRING_TYPE) \
- V(LONG_SLICED_STRING_TYPE) \
- V(SHORT_SLICED_ASCII_STRING_TYPE) \
- V(MEDIUM_SLICED_ASCII_STRING_TYPE) \
- V(LONG_SLICED_ASCII_STRING_TYPE) \
- V(SHORT_EXTERNAL_STRING_TYPE) \
- V(MEDIUM_EXTERNAL_STRING_TYPE) \
- V(LONG_EXTERNAL_STRING_TYPE) \
- V(SHORT_EXTERNAL_ASCII_STRING_TYPE) \
- V(MEDIUM_EXTERNAL_ASCII_STRING_TYPE) \
- V(LONG_EXTERNAL_ASCII_STRING_TYPE) \
- V(LONG_PRIVATE_EXTERNAL_ASCII_STRING_TYPE) \
- \
- V(MAP_TYPE) \
- V(HEAP_NUMBER_TYPE) \
- V(FIXED_ARRAY_TYPE) \
- V(CODE_TYPE) \
- V(JS_GLOBAL_PROPERTY_CELL_TYPE) \
- V(ODDBALL_TYPE) \
- V(PROXY_TYPE) \
- V(BYTE_ARRAY_TYPE) \
- V(PIXEL_ARRAY_TYPE) \
- /* Note: the order of these external array */ \
- /* types is relied upon in */ \
- /* Object::IsExternalArray(). */ \
- V(EXTERNAL_BYTE_ARRAY_TYPE) \
- V(EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE) \
- V(EXTERNAL_SHORT_ARRAY_TYPE) \
- V(EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE) \
- V(EXTERNAL_INT_ARRAY_TYPE) \
- V(EXTERNAL_UNSIGNED_INT_ARRAY_TYPE) \
- V(EXTERNAL_FLOAT_ARRAY_TYPE) \
- V(FILLER_TYPE) \
- \
- V(ACCESSOR_INFO_TYPE) \
- V(ACCESS_CHECK_INFO_TYPE) \
- V(INTERCEPTOR_INFO_TYPE) \
- V(SHARED_FUNCTION_INFO_TYPE) \
- V(CALL_HANDLER_INFO_TYPE) \
- V(FUNCTION_TEMPLATE_INFO_TYPE) \
- V(OBJECT_TEMPLATE_INFO_TYPE) \
- V(SIGNATURE_INFO_TYPE) \
- V(TYPE_SWITCH_INFO_TYPE) \
- V(SCRIPT_TYPE) \
- \
- V(JS_VALUE_TYPE) \
- V(JS_OBJECT_TYPE) \
- V(JS_CONTEXT_EXTENSION_OBJECT_TYPE) \
- V(JS_GLOBAL_OBJECT_TYPE) \
- V(JS_BUILTINS_OBJECT_TYPE) \
- V(JS_GLOBAL_PROXY_TYPE) \
- V(JS_ARRAY_TYPE) \
- V(JS_REGEXP_TYPE) \
- \
- V(JS_FUNCTION_TYPE) \
+#define INSTANCE_TYPE_LIST_ALL(V) \
+ V(SYMBOL_TYPE) \
+ V(ASCII_SYMBOL_TYPE) \
+ V(CONS_SYMBOL_TYPE) \
+ V(CONS_ASCII_SYMBOL_TYPE) \
+ V(EXTERNAL_SYMBOL_TYPE) \
+ V(EXTERNAL_ASCII_SYMBOL_TYPE) \
+ V(STRING_TYPE) \
+ V(ASCII_STRING_TYPE) \
+ V(CONS_STRING_TYPE) \
+ V(CONS_ASCII_STRING_TYPE) \
+ V(EXTERNAL_STRING_TYPE) \
+ V(EXTERNAL_ASCII_STRING_TYPE) \
+ V(PRIVATE_EXTERNAL_ASCII_STRING_TYPE) \
+ \
+ V(MAP_TYPE) \
+ V(HEAP_NUMBER_TYPE) \
+ V(FIXED_ARRAY_TYPE) \
+ V(CODE_TYPE) \
+ V(JS_GLOBAL_PROPERTY_CELL_TYPE) \
+ V(ODDBALL_TYPE) \
+ V(PROXY_TYPE) \
+ V(BYTE_ARRAY_TYPE) \
+ V(PIXEL_ARRAY_TYPE) \
+ /* Note: the order of these external array */ \
+ /* types is relied upon in */ \
+ /* Object::IsExternalArray(). */ \
+ V(EXTERNAL_BYTE_ARRAY_TYPE) \
+ V(EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE) \
+ V(EXTERNAL_SHORT_ARRAY_TYPE) \
+ V(EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE) \
+ V(EXTERNAL_INT_ARRAY_TYPE) \
+ V(EXTERNAL_UNSIGNED_INT_ARRAY_TYPE) \
+ V(EXTERNAL_FLOAT_ARRAY_TYPE) \
+ V(FILLER_TYPE) \
+ \
+ V(ACCESSOR_INFO_TYPE) \
+ V(ACCESS_CHECK_INFO_TYPE) \
+ V(INTERCEPTOR_INFO_TYPE) \
+ V(SHARED_FUNCTION_INFO_TYPE) \
+ V(CALL_HANDLER_INFO_TYPE) \
+ V(FUNCTION_TEMPLATE_INFO_TYPE) \
+ V(OBJECT_TEMPLATE_INFO_TYPE) \
+ V(SIGNATURE_INFO_TYPE) \
+ V(TYPE_SWITCH_INFO_TYPE) \
+ V(SCRIPT_TYPE) \
+ \
+ V(JS_VALUE_TYPE) \
+ V(JS_OBJECT_TYPE) \
+ V(JS_CONTEXT_EXTENSION_OBJECT_TYPE) \
+ V(JS_GLOBAL_OBJECT_TYPE) \
+ V(JS_BUILTINS_OBJECT_TYPE) \
+ V(JS_GLOBAL_PROXY_TYPE) \
+ V(JS_ARRAY_TYPE) \
+ V(JS_REGEXP_TYPE) \
+ \
+ V(JS_FUNCTION_TYPE) \
#ifdef ENABLE_DEBUGGER_SUPPORT
-#define INSTANCE_TYPE_LIST_DEBUGGER(V) \
- V(DEBUG_INFO_TYPE) \
+#define INSTANCE_TYPE_LIST_DEBUGGER(V) \
+ V(DEBUG_INFO_TYPE) \
V(BREAK_POINT_INFO_TYPE)
#else
#define INSTANCE_TYPE_LIST_DEBUGGER(V)
#endif
-#define INSTANCE_TYPE_LIST(V) \
- INSTANCE_TYPE_LIST_ALL(V) \
+#define INSTANCE_TYPE_LIST(V) \
+ INSTANCE_TYPE_LIST_ALL(V) \
INSTANCE_TYPE_LIST_DEBUGGER(V)
// Since string types are not consecutive, this macro is used to
// iterate over them.
#define STRING_TYPE_LIST(V) \
- V(SHORT_SYMBOL_TYPE, \
+ V(SYMBOL_TYPE, \
SeqTwoByteString::kAlignedSize, \
- short_symbol, \
- ShortSymbol) \
- V(MEDIUM_SYMBOL_TYPE, \
+ symbol, \
+ Symbol) \
+ V(ASCII_SYMBOL_TYPE, \
+ SeqAsciiString::kAlignedSize, \
+ ascii_symbol, \
+ AsciiSymbol) \
+ V(CONS_SYMBOL_TYPE, \
+ ConsString::kSize, \
+ cons_symbol, \
+ ConsSymbol) \
+ V(CONS_ASCII_SYMBOL_TYPE, \
+ ConsString::kSize, \
+ cons_ascii_symbol, \
+ ConsAsciiSymbol) \
+ V(EXTERNAL_SYMBOL_TYPE, \
+ ExternalTwoByteString::kSize, \
+ external_symbol, \
+ ExternalSymbol) \
+ V(EXTERNAL_ASCII_SYMBOL_TYPE, \
+ ExternalAsciiString::kSize, \
+ external_ascii_symbol, \
+ ExternalAsciiSymbol) \
+ V(STRING_TYPE, \
SeqTwoByteString::kAlignedSize, \
- medium_symbol, \
- MediumSymbol) \
- V(LONG_SYMBOL_TYPE, \
- SeqTwoByteString::kAlignedSize, \
- long_symbol, \
- LongSymbol) \
- V(SHORT_ASCII_SYMBOL_TYPE, \
+ string, \
+ String) \
+ V(ASCII_STRING_TYPE, \
SeqAsciiString::kAlignedSize, \
- short_ascii_symbol, \
- ShortAsciiSymbol) \
- V(MEDIUM_ASCII_SYMBOL_TYPE, \
- SeqAsciiString::kAlignedSize, \
- medium_ascii_symbol, \
- MediumAsciiSymbol) \
- V(LONG_ASCII_SYMBOL_TYPE, \
- SeqAsciiString::kAlignedSize, \
- long_ascii_symbol, \
- LongAsciiSymbol) \
- V(SHORT_CONS_SYMBOL_TYPE, \
+ ascii_string, \
+ AsciiString) \
+ V(CONS_STRING_TYPE, \
ConsString::kSize, \
- short_cons_symbol, \
- ShortConsSymbol) \
- V(MEDIUM_CONS_SYMBOL_TYPE, \
+ cons_string, \
+ ConsString) \
+ V(CONS_ASCII_STRING_TYPE, \
ConsString::kSize, \
- medium_cons_symbol, \
- MediumConsSymbol) \
- V(LONG_CONS_SYMBOL_TYPE, \
- ConsString::kSize, \
- long_cons_symbol, \
- LongConsSymbol) \
- V(SHORT_CONS_ASCII_SYMBOL_TYPE, \
- ConsString::kSize, \
- short_cons_ascii_symbol, \
- ShortConsAsciiSymbol) \
- V(MEDIUM_CONS_ASCII_SYMBOL_TYPE, \
- ConsString::kSize, \
- medium_cons_ascii_symbol, \
- MediumConsAsciiSymbol) \
- V(LONG_CONS_ASCII_SYMBOL_TYPE, \
- ConsString::kSize, \
- long_cons_ascii_symbol, \
- LongConsAsciiSymbol) \
- V(SHORT_SLICED_SYMBOL_TYPE, \
- SlicedString::kSize, \
- short_sliced_symbol, \
- ShortSlicedSymbol) \
- V(MEDIUM_SLICED_SYMBOL_TYPE, \
- SlicedString::kSize, \
- medium_sliced_symbol, \
- MediumSlicedSymbol) \
- V(LONG_SLICED_SYMBOL_TYPE, \
- SlicedString::kSize, \
- long_sliced_symbol, \
- LongSlicedSymbol) \
- V(SHORT_SLICED_ASCII_SYMBOL_TYPE, \
- SlicedString::kSize, \
- short_sliced_ascii_symbol, \
- ShortSlicedAsciiSymbol) \
- V(MEDIUM_SLICED_ASCII_SYMBOL_TYPE, \
- SlicedString::kSize, \
- medium_sliced_ascii_symbol, \
- MediumSlicedAsciiSymbol) \
- V(LONG_SLICED_ASCII_SYMBOL_TYPE, \
- SlicedString::kSize, \
- long_sliced_ascii_symbol, \
- LongSlicedAsciiSymbol) \
- V(SHORT_EXTERNAL_SYMBOL_TYPE, \
+ cons_ascii_string, \
+ ConsAsciiString) \
+ V(EXTERNAL_STRING_TYPE, \
ExternalTwoByteString::kSize, \
- short_external_symbol, \
- ShortExternalSymbol) \
- V(MEDIUM_EXTERNAL_SYMBOL_TYPE, \
- ExternalTwoByteString::kSize, \
- medium_external_symbol, \
- MediumExternalSymbol) \
- V(LONG_EXTERNAL_SYMBOL_TYPE, \
- ExternalTwoByteString::kSize, \
- long_external_symbol, \
- LongExternalSymbol) \
- V(SHORT_EXTERNAL_ASCII_SYMBOL_TYPE, \
+ external_string, \
+ ExternalString) \
+ V(EXTERNAL_ASCII_STRING_TYPE, \
ExternalAsciiString::kSize, \
- short_external_ascii_symbol, \
- ShortExternalAsciiSymbol) \
- V(MEDIUM_EXTERNAL_ASCII_SYMBOL_TYPE, \
- ExternalAsciiString::kSize, \
- medium_external_ascii_symbol, \
- MediumExternalAsciiSymbol) \
- V(LONG_EXTERNAL_ASCII_SYMBOL_TYPE, \
- ExternalAsciiString::kSize, \
- long_external_ascii_symbol, \
- LongExternalAsciiSymbol) \
- V(SHORT_STRING_TYPE, \
- SeqTwoByteString::kAlignedSize, \
- short_string, \
- ShortString) \
- V(MEDIUM_STRING_TYPE, \
- SeqTwoByteString::kAlignedSize, \
- medium_string, \
- MediumString) \
- V(LONG_STRING_TYPE, \
- SeqTwoByteString::kAlignedSize, \
- long_string, \
- LongString) \
- V(SHORT_ASCII_STRING_TYPE, \
- SeqAsciiString::kAlignedSize, \
- short_ascii_string, \
- ShortAsciiString) \
- V(MEDIUM_ASCII_STRING_TYPE, \
- SeqAsciiString::kAlignedSize, \
- medium_ascii_string, \
- MediumAsciiString) \
- V(LONG_ASCII_STRING_TYPE, \
- SeqAsciiString::kAlignedSize, \
- long_ascii_string, \
- LongAsciiString) \
- V(SHORT_CONS_STRING_TYPE, \
- ConsString::kSize, \
- short_cons_string, \
- ShortConsString) \
- V(MEDIUM_CONS_STRING_TYPE, \
- ConsString::kSize, \
- medium_cons_string, \
- MediumConsString) \
- V(LONG_CONS_STRING_TYPE, \
- ConsString::kSize, \
- long_cons_string, \
- LongConsString) \
- V(SHORT_CONS_ASCII_STRING_TYPE, \
- ConsString::kSize, \
- short_cons_ascii_string, \
- ShortConsAsciiString) \
- V(MEDIUM_CONS_ASCII_STRING_TYPE, \
- ConsString::kSize, \
- medium_cons_ascii_string, \
- MediumConsAsciiString) \
- V(LONG_CONS_ASCII_STRING_TYPE, \
- ConsString::kSize, \
- long_cons_ascii_string, \
- LongConsAsciiString) \
- V(SHORT_SLICED_STRING_TYPE, \
- SlicedString::kSize, \
- short_sliced_string, \
- ShortSlicedString) \
- V(MEDIUM_SLICED_STRING_TYPE, \
- SlicedString::kSize, \
- medium_sliced_string, \
- MediumSlicedString) \
- V(LONG_SLICED_STRING_TYPE, \
- SlicedString::kSize, \
- long_sliced_string, \
- LongSlicedString) \
- V(SHORT_SLICED_ASCII_STRING_TYPE, \
- SlicedString::kSize, \
- short_sliced_ascii_string, \
- ShortSlicedAsciiString) \
- V(MEDIUM_SLICED_ASCII_STRING_TYPE, \
- SlicedString::kSize, \
- medium_sliced_ascii_string, \
- MediumSlicedAsciiString) \
- V(LONG_SLICED_ASCII_STRING_TYPE, \
- SlicedString::kSize, \
- long_sliced_ascii_string, \
- LongSlicedAsciiString) \
- V(SHORT_EXTERNAL_STRING_TYPE, \
- ExternalTwoByteString::kSize, \
- short_external_string, \
- ShortExternalString) \
- V(MEDIUM_EXTERNAL_STRING_TYPE, \
- ExternalTwoByteString::kSize, \
- medium_external_string, \
- MediumExternalString) \
- V(LONG_EXTERNAL_STRING_TYPE, \
- ExternalTwoByteString::kSize, \
- long_external_string, \
- LongExternalString) \
- V(SHORT_EXTERNAL_ASCII_STRING_TYPE, \
- ExternalAsciiString::kSize, \
- short_external_ascii_string, \
- ShortExternalAsciiString) \
- V(MEDIUM_EXTERNAL_ASCII_STRING_TYPE, \
- ExternalAsciiString::kSize, \
- medium_external_ascii_string, \
- MediumExternalAsciiString) \
- V(LONG_EXTERNAL_ASCII_STRING_TYPE, \
- ExternalAsciiString::kSize, \
- long_external_ascii_string, \
- LongExternalAsciiString)
+ external_ascii_string, \
+ ExternalAsciiString) \
// A struct is a simple object a set of object-valued fields. Including an
// object type in this causes the compiler to generate most of the boilerplate
@@ -534,27 +353,27 @@
// Note that for subtle reasons related to the ordering or numerical values of
// type tags, elements in this list have to be added to the INSTANCE_TYPE_LIST
// manually.
-#define STRUCT_LIST_ALL(V) \
- V(ACCESSOR_INFO, AccessorInfo, accessor_info) \
- V(ACCESS_CHECK_INFO, AccessCheckInfo, access_check_info) \
- V(INTERCEPTOR_INFO, InterceptorInfo, interceptor_info) \
- V(CALL_HANDLER_INFO, CallHandlerInfo, call_handler_info) \
- V(FUNCTION_TEMPLATE_INFO, FunctionTemplateInfo, function_template_info) \
- V(OBJECT_TEMPLATE_INFO, ObjectTemplateInfo, object_template_info) \
- V(SIGNATURE_INFO, SignatureInfo, signature_info) \
- V(TYPE_SWITCH_INFO, TypeSwitchInfo, type_switch_info) \
+#define STRUCT_LIST_ALL(V) \
+ V(ACCESSOR_INFO, AccessorInfo, accessor_info) \
+ V(ACCESS_CHECK_INFO, AccessCheckInfo, access_check_info) \
+ V(INTERCEPTOR_INFO, InterceptorInfo, interceptor_info) \
+ V(CALL_HANDLER_INFO, CallHandlerInfo, call_handler_info) \
+ V(FUNCTION_TEMPLATE_INFO, FunctionTemplateInfo, function_template_info) \
+ V(OBJECT_TEMPLATE_INFO, ObjectTemplateInfo, object_template_info) \
+ V(SIGNATURE_INFO, SignatureInfo, signature_info) \
+ V(TYPE_SWITCH_INFO, TypeSwitchInfo, type_switch_info) \
V(SCRIPT, Script, script)
#ifdef ENABLE_DEBUGGER_SUPPORT
-#define STRUCT_LIST_DEBUGGER(V) \
- V(DEBUG_INFO, DebugInfo, debug_info) \
+#define STRUCT_LIST_DEBUGGER(V) \
+ V(DEBUG_INFO, DebugInfo, debug_info) \
V(BREAK_POINT_INFO, BreakPointInfo, break_point_info)
#else
#define STRUCT_LIST_DEBUGGER(V)
#endif
-#define STRUCT_LIST(V) \
- STRUCT_LIST_ALL(V) \
+#define STRUCT_LIST(V) \
+ STRUCT_LIST_ALL(V) \
STRUCT_LIST_DEBUGGER(V)
// We use the full 8 bits of the instance_type field to encode heap object
@@ -570,15 +389,6 @@
const uint32_t kNotSymbolTag = 0x0;
const uint32_t kSymbolTag = 0x20;
-// If bit 7 is clear, bits 3 and 4 are the string's size (short, medium or
-// long). These values are very special in that they are also used to shift
-// the length field to get the length, removing the hash value. This avoids
-// using if or switch when getting the length of a string.
-const uint32_t kStringSizeMask = 0x18;
-const uint32_t kShortStringTag = 0x18;
-const uint32_t kMediumStringTag = 0x10;
-const uint32_t kLongStringTag = 0x00;
-
// If bit 7 is clear then bit 2 indicates whether the string consists of
// two-byte characters or one-byte characters.
const uint32_t kStringEncodingMask = 0x4;
@@ -591,7 +401,6 @@
enum StringRepresentationTag {
kSeqStringTag = 0x0,
kConsStringTag = 0x1,
- kSlicedStringTag = 0x2,
kExternalStringTag = 0x3
};
@@ -609,78 +418,20 @@
enum InstanceType {
- SHORT_SYMBOL_TYPE = kShortStringTag | kSymbolTag | kSeqStringTag,
- MEDIUM_SYMBOL_TYPE = kMediumStringTag | kSymbolTag | kSeqStringTag,
- LONG_SYMBOL_TYPE = kLongStringTag | kSymbolTag | kSeqStringTag,
- SHORT_ASCII_SYMBOL_TYPE =
- kShortStringTag | kAsciiStringTag | kSymbolTag | kSeqStringTag,
- MEDIUM_ASCII_SYMBOL_TYPE =
- kMediumStringTag | kAsciiStringTag | kSymbolTag | kSeqStringTag,
- LONG_ASCII_SYMBOL_TYPE =
- kLongStringTag | kAsciiStringTag | kSymbolTag | kSeqStringTag,
- SHORT_CONS_SYMBOL_TYPE = kShortStringTag | kSymbolTag | kConsStringTag,
- MEDIUM_CONS_SYMBOL_TYPE = kMediumStringTag | kSymbolTag | kConsStringTag,
- LONG_CONS_SYMBOL_TYPE = kLongStringTag | kSymbolTag | kConsStringTag,
- SHORT_CONS_ASCII_SYMBOL_TYPE =
- kShortStringTag | kAsciiStringTag | kSymbolTag | kConsStringTag,
- MEDIUM_CONS_ASCII_SYMBOL_TYPE =
- kMediumStringTag | kAsciiStringTag | kSymbolTag | kConsStringTag,
- LONG_CONS_ASCII_SYMBOL_TYPE =
- kLongStringTag | kAsciiStringTag | kSymbolTag | kConsStringTag,
- SHORT_SLICED_SYMBOL_TYPE = kShortStringTag | kSymbolTag | kSlicedStringTag,
- MEDIUM_SLICED_SYMBOL_TYPE = kMediumStringTag | kSymbolTag | kSlicedStringTag,
- LONG_SLICED_SYMBOL_TYPE = kLongStringTag | kSymbolTag | kSlicedStringTag,
- SHORT_SLICED_ASCII_SYMBOL_TYPE =
- kShortStringTag | kAsciiStringTag | kSymbolTag | kSlicedStringTag,
- MEDIUM_SLICED_ASCII_SYMBOL_TYPE =
- kMediumStringTag | kAsciiStringTag | kSymbolTag | kSlicedStringTag,
- LONG_SLICED_ASCII_SYMBOL_TYPE =
- kLongStringTag | kAsciiStringTag | kSymbolTag | kSlicedStringTag,
- SHORT_EXTERNAL_SYMBOL_TYPE =
- kShortStringTag | kSymbolTag | kExternalStringTag,
- MEDIUM_EXTERNAL_SYMBOL_TYPE =
- kMediumStringTag | kSymbolTag | kExternalStringTag,
- LONG_EXTERNAL_SYMBOL_TYPE = kLongStringTag | kSymbolTag | kExternalStringTag,
- SHORT_EXTERNAL_ASCII_SYMBOL_TYPE =
- kShortStringTag | kAsciiStringTag | kSymbolTag | kExternalStringTag,
- MEDIUM_EXTERNAL_ASCII_SYMBOL_TYPE =
- kMediumStringTag | kAsciiStringTag | kSymbolTag | kExternalStringTag,
- LONG_EXTERNAL_ASCII_SYMBOL_TYPE =
- kLongStringTag | kAsciiStringTag | kSymbolTag | kExternalStringTag,
- SHORT_STRING_TYPE = kShortStringTag | kSeqStringTag,
- MEDIUM_STRING_TYPE = kMediumStringTag | kSeqStringTag,
- LONG_STRING_TYPE = kLongStringTag | kSeqStringTag,
- SHORT_ASCII_STRING_TYPE = kShortStringTag | kAsciiStringTag | kSeqStringTag,
- MEDIUM_ASCII_STRING_TYPE = kMediumStringTag | kAsciiStringTag | kSeqStringTag,
- LONG_ASCII_STRING_TYPE = kLongStringTag | kAsciiStringTag | kSeqStringTag,
- SHORT_CONS_STRING_TYPE = kShortStringTag | kConsStringTag,
- MEDIUM_CONS_STRING_TYPE = kMediumStringTag | kConsStringTag,
- LONG_CONS_STRING_TYPE = kLongStringTag | kConsStringTag,
- SHORT_CONS_ASCII_STRING_TYPE =
- kShortStringTag | kAsciiStringTag | kConsStringTag,
- MEDIUM_CONS_ASCII_STRING_TYPE =
- kMediumStringTag | kAsciiStringTag | kConsStringTag,
- LONG_CONS_ASCII_STRING_TYPE =
- kLongStringTag | kAsciiStringTag | kConsStringTag,
- SHORT_SLICED_STRING_TYPE = kShortStringTag | kSlicedStringTag,
- MEDIUM_SLICED_STRING_TYPE = kMediumStringTag | kSlicedStringTag,
- LONG_SLICED_STRING_TYPE = kLongStringTag | kSlicedStringTag,
- SHORT_SLICED_ASCII_STRING_TYPE =
- kShortStringTag | kAsciiStringTag | kSlicedStringTag,
- MEDIUM_SLICED_ASCII_STRING_TYPE =
- kMediumStringTag | kAsciiStringTag | kSlicedStringTag,
- LONG_SLICED_ASCII_STRING_TYPE =
- kLongStringTag | kAsciiStringTag | kSlicedStringTag,
- SHORT_EXTERNAL_STRING_TYPE = kShortStringTag | kExternalStringTag,
- MEDIUM_EXTERNAL_STRING_TYPE = kMediumStringTag | kExternalStringTag,
- LONG_EXTERNAL_STRING_TYPE = kLongStringTag | kExternalStringTag,
- SHORT_EXTERNAL_ASCII_STRING_TYPE =
- kShortStringTag | kAsciiStringTag | kExternalStringTag,
- MEDIUM_EXTERNAL_ASCII_STRING_TYPE =
- kMediumStringTag | kAsciiStringTag | kExternalStringTag,
- LONG_EXTERNAL_ASCII_STRING_TYPE =
- kLongStringTag | kAsciiStringTag | kExternalStringTag,
- LONG_PRIVATE_EXTERNAL_ASCII_STRING_TYPE = LONG_EXTERNAL_ASCII_STRING_TYPE,
+ SYMBOL_TYPE = kSymbolTag | kSeqStringTag,
+ ASCII_SYMBOL_TYPE = kAsciiStringTag | kSymbolTag | kSeqStringTag,
+ CONS_SYMBOL_TYPE = kSymbolTag | kConsStringTag,
+ CONS_ASCII_SYMBOL_TYPE = kAsciiStringTag | kSymbolTag | kConsStringTag,
+ EXTERNAL_SYMBOL_TYPE = kSymbolTag | kExternalStringTag,
+ EXTERNAL_ASCII_SYMBOL_TYPE =
+ kAsciiStringTag | kSymbolTag | kExternalStringTag,
+ STRING_TYPE = kSeqStringTag,
+ ASCII_STRING_TYPE = kAsciiStringTag | kSeqStringTag,
+ CONS_STRING_TYPE = kConsStringTag,
+ CONS_ASCII_STRING_TYPE = kAsciiStringTag | kConsStringTag,
+ EXTERNAL_STRING_TYPE = kExternalStringTag,
+ EXTERNAL_ASCII_STRING_TYPE = kAsciiStringTag | kExternalStringTag,
+ PRIVATE_EXTERNAL_ASCII_STRING_TYPE = EXTERNAL_ASCII_STRING_TYPE,
MAP_TYPE = kNotStringTag,
HEAP_NUMBER_TYPE,
@@ -790,16 +541,13 @@
inline bool IsHeapNumber();
inline bool IsString();
inline bool IsSymbol();
-#ifdef DEBUG
// See objects-inl.h for more details
inline bool IsSeqString();
- inline bool IsSlicedString();
inline bool IsExternalString();
inline bool IsExternalTwoByteString();
inline bool IsExternalAsciiString();
inline bool IsSeqTwoByteString();
inline bool IsSeqAsciiString();
-#endif // DEBUG
inline bool IsConsString();
inline bool IsNumber();
@@ -1082,7 +830,6 @@
// View this map word as a forwarding address.
inline HeapObject* ToForwardingAddress();
-
// Marking phase of full collection: the map word of live objects is
// marked, and may be marked as overflowed (eg, the object is live, its
// children have not been visited, and it does not fit in the marking
@@ -1481,6 +1228,9 @@
Object* GetPropertyPostInterceptor(JSObject* receiver,
String* name,
PropertyAttributes* attributes);
+ Object* GetLocalPropertyPostInterceptor(JSObject* receiver,
+ String* name,
+ PropertyAttributes* attributes);
Object* GetLazyProperty(Object* receiver,
LookupResult* result,
String* name,
@@ -1502,6 +1252,27 @@
return GetLocalPropertyAttribute(name) != ABSENT;
}
+ // If the receiver is a JSGlobalProxy this method will return its prototype,
+ // otherwise the result is the receiver itself.
+ inline Object* BypassGlobalProxy();
+
+ // Accessors for hidden properties object.
+ //
+ // Hidden properties are not local properties of the object itself.
+ // Instead they are stored on an auxiliary JSObject stored as a local
+ // property with a special name Heap::hidden_symbol(). But if the
+ // receiver is a JSGlobalProxy then the auxiliary object is a property
+ // of its prototype.
+ //
+ // Has/Get/SetHiddenPropertiesObject methods don't allow the holder to be
+ // a JSGlobalProxy. Use BypassGlobalProxy method above to get to the real
+ // holder.
+ //
+ // These accessors do not touch interceptors or accessors.
+ inline bool HasHiddenPropertiesObject();
+ inline Object* GetHiddenPropertiesObject();
+ inline Object* SetHiddenPropertiesObject(Object* hidden_obj);
+
Object* DeleteProperty(String* name, DeleteMode mode);
Object* DeleteElement(uint32_t index, DeleteMode mode);
Object* DeleteLazyProperty(LookupResult* result,
@@ -2238,6 +2009,7 @@
// true if it is found, assigning the symbol to the given output
// parameter.
bool LookupSymbolIfExists(String* str, String** symbol);
+ bool LookupTwoCharsSymbolIfExists(uint32_t c1, uint32_t c2, String** symbol);
// Casting.
static inline SymbolTable* cast(Object* obj);
@@ -2864,7 +2636,7 @@
// Relocate the code by delta bytes. Called to signal that this code
// object has been moved by delta bytes.
- void Relocate(int delta);
+ void Relocate(intptr_t delta);
// Migrate code described by desc.
void CopyFrom(const CodeDesc& desc);
@@ -2901,7 +2673,8 @@
void CodeVerify();
#endif
// Code entry points are aligned to 32 bytes.
- static const int kCodeAlignment = 32;
+ static const int kCodeAlignmentBits = 5;
+ static const int kCodeAlignment = 1 << kCodeAlignmentBits;
static const int kCodeAlignmentMask = kCodeAlignment - 1;
// Layout description.
@@ -3229,12 +3002,12 @@
// [compilation]: how the the script was compiled.
DECL_ACCESSORS(compilation_type, Smi)
- // [line_ends]: array of line ends positions.
+ // [line_ends]: FixedArray of line ends positions.
DECL_ACCESSORS(line_ends, Object)
- // [eval_from_function]: for eval scripts the funcion from which eval was
- // called.
- DECL_ACCESSORS(eval_from_function, Object)
+ // [eval_from_shared]: for eval scripts the shared funcion info for the
+ // function from which eval was called.
+ DECL_ACCESSORS(eval_from_shared, Object)
// [eval_from_instructions_offset]: the instruction offset in the code for the
// function from which eval was called where eval was called.
@@ -3262,9 +3035,9 @@
static const int kCompilationTypeOffset = kTypeOffset + kPointerSize;
static const int kLineEndsOffset = kCompilationTypeOffset + kPointerSize;
static const int kIdOffset = kLineEndsOffset + kPointerSize;
- static const int kEvalFromFunctionOffset = kIdOffset + kPointerSize;
+ static const int kEvalFromSharedOffset = kIdOffset + kPointerSize;
static const int kEvalFrominstructionsOffsetOffset =
- kEvalFromFunctionOffset + kPointerSize;
+ kEvalFromSharedOffset + kPointerSize;
static const int kSize = kEvalFrominstructionsOffsetOffset + kPointerSize;
private:
@@ -3362,7 +3135,6 @@
// Add information on assignments of the form this.x = ...;
void SetThisPropertyAssignmentsInfo(
- bool has_only_this_property_assignments,
bool has_only_simple_this_property_assignments,
FixedArray* this_property_assignments);
@@ -3370,13 +3142,12 @@
void ClearThisPropertyAssignmentsInfo();
// Indicate that this function only consists of assignments of the form
- // this.x = ...;.
- inline bool has_only_this_property_assignments();
-
- // Indicate that this function only consists of assignments of the form
// this.x = y; where y is either a constant or refers to an argument.
inline bool has_only_simple_this_property_assignments();
+ inline bool try_fast_codegen();
+ inline void set_try_fast_codegen(bool flag);
+
// For functions which only contains this property assignments this provides
// access to the names for the properties assigned.
DECL_ACCESSORS(this_property_assignments, Object)
@@ -3455,8 +3226,8 @@
static const int kStartPositionMask = ~((1 << kStartPositionShift) - 1);
// Bit positions in compiler_hints.
- static const int kHasOnlyThisPropertyAssignments = 0;
- static const int kHasOnlySimpleThisPropertyAssignments = 1;
+ static const int kHasOnlySimpleThisPropertyAssignments = 0;
+ static const int kTryFastCodegen = 1;
DISALLOW_IMPLICIT_CONSTRUCTORS(SharedFunctionInfo);
};
@@ -3886,6 +3657,7 @@
bool is_array_index_;
bool is_first_char_;
bool is_valid_;
+ friend class TwoCharHashTableKey;
};
@@ -3908,7 +3680,6 @@
inline bool IsSequential();
inline bool IsExternal();
inline bool IsCons();
- inline bool IsSliced();
inline bool IsExternalAscii();
inline bool IsExternalTwoByte();
inline bool IsSequentialAscii();
@@ -3949,12 +3720,9 @@
inline int length();
inline void set_length(int value);
- // Get and set the uninterpreted length field of the string. Notice
- // that the length field is also used to cache the hash value of
- // strings. In order to get or set the actual length of the string
- // use the length() and set_length methods.
- inline uint32_t length_field();
- inline void set_length_field(uint32_t value);
+ // Get and set the hash field of the string.
+ inline uint32_t hash_field();
+ inline void set_hash_field(uint32_t value);
inline bool IsAsciiRepresentation();
inline bool IsTwoByteRepresentation();
@@ -3966,9 +3734,8 @@
inline uint16_t Get(int index);
// Try to flatten the top level ConsString that is hiding behind this
- // string. This is a no-op unless the string is a ConsString or a
- // SlicedString. Flatten mutates the ConsString and might return a
- // failure.
+ // string. This is a no-op unless the string is a ConsString. Flatten
+ // mutates the ConsString and might return a failure.
Object* TryFlatten();
// Try to flatten the string. Checks first inline to see if it is necessary.
@@ -3984,8 +3751,8 @@
// ascii and two byte string types.
bool MarkAsUndetectable();
- // Slice the string and return a substring.
- Object* Slice(int from, int to);
+ // Return a substring.
+ Object* SubString(int from, int to);
// String equality operations.
inline bool Equals(String* other);
@@ -4026,8 +3793,8 @@
// Returns a hash value used for the property table
inline uint32_t Hash();
- static uint32_t ComputeLengthAndHashField(unibrow::CharacterStream* buffer,
- int length);
+ static uint32_t ComputeHashField(unibrow::CharacterStream* buffer,
+ int length);
static bool ComputeArrayIndex(unibrow::CharacterStream* buffer,
uint32_t* index,
@@ -4058,13 +3825,12 @@
// Layout description.
static const int kLengthOffset = HeapObject::kHeaderSize;
- static const int kSize = kLengthOffset + kIntSize;
+ static const int kHashFieldOffset = kLengthOffset + kIntSize;
+ static const int kSize = kHashFieldOffset + kIntSize;
// Notice: kSize is not pointer-size aligned if pointers are 64-bit.
- // Limits on sizes of different types of strings.
- static const int kMaxShortStringSize = 63;
- static const int kMaxMediumStringSize = 16383;
-
+ // Maximum number of characters to consider when trying to convert a string
+ // value into an array index.
static const int kMaxArrayIndexSize = 10;
// Max ascii char code.
@@ -4072,7 +3838,7 @@
static const unsigned kMaxAsciiCharCodeU = unibrow::Utf8::kMaxOneByteChar;
static const int kMaxUC16CharCode = 0xffff;
- // Minimum length for a cons or sliced string.
+ // Minimum length for a cons string.
static const int kMinNonFlatLength = 13;
// Mask constant for checking if a string has a computed hash code
@@ -4084,18 +3850,30 @@
static const int kIsArrayIndexMask = 1 << 1;
static const int kNofLengthBitFields = 2;
+ // Shift constant retrieving hash code from hash field.
+ static const int kHashShift = kNofLengthBitFields;
+
// Array index strings this short can keep their index in the hash
// field.
static const int kMaxCachedArrayIndexLength = 7;
- // Shift constants for retriving length and hash code from
- // length/hash field.
- static const int kHashShift = kNofLengthBitFields;
- static const int kShortLengthShift = kHashShift + kShortStringTag;
- static const int kMediumLengthShift = kHashShift + kMediumStringTag;
- static const int kLongLengthShift = kHashShift + kLongStringTag;
- // Maximal string length that can be stored in the hash/length field.
- static const int kMaxLength = (1 << (32 - kLongLengthShift)) - 1;
+ // For strings which are array indexes the hash value has the string length
+ // mixed into the hash, mainly to avoid a hash value of zero which would be
+ // the case for the string '0'. 24 bits are used for the array index value.
+ static const int kArrayIndexHashLengthShift = 24 + kNofLengthBitFields;
+ static const int kArrayIndexHashMask = (1 << kArrayIndexHashLengthShift) - 1;
+ static const int kArrayIndexValueBits =
+ kArrayIndexHashLengthShift - kHashShift;
+
+ // Value of empty hash field indicating that the hash is not computed.
+ static const int kEmptyHashField = 0;
+
+ // Maximal string length.
+ static const int kMaxLength = (1 << (32 - 2)) - 1;
+
+ // Max length for computing hash. For strings longer than this limit the
+ // string length is used as the hash value.
+ static const int kMaxHashCalcLength = 16383;
// Limit for truncation in short printing.
static const int kMaxShortPrintLength = 1024;
@@ -4141,12 +3919,6 @@
unsigned remaining;
};
- // NOTE: If you call StringInputBuffer routines on strings that are
- // too deeply nested trees of cons and slice strings, then this
- // routine will overflow the stack. Strings that are merely deeply
- // nested trees of cons strings do not have a problem apart from
- // performance.
-
static inline const unibrow::byte* ReadBlock(String* input,
ReadBlockBuffer* buffer,
unsigned* offset,
@@ -4331,56 +4103,6 @@
};
-// The SlicedString class describes string values that are slices of
-// some other string. SlicedStrings consist of a reference to an
-// underlying heap-allocated string value, a start index, and the
-// length field common to all strings.
-class SlicedString: public String {
- public:
- // The underlying string buffer.
- inline String* buffer();
- inline void set_buffer(String* buffer);
-
- // The start index of the slice.
- inline int start();
- inline void set_start(int start);
-
- // Dispatched behavior.
- uint16_t SlicedStringGet(int index);
-
- // Casting.
- static inline SlicedString* cast(Object* obj);
-
- // Garbage collection support.
- void SlicedStringIterateBody(ObjectVisitor* v);
-
- // Layout description
-#if V8_HOST_ARCH_64_BIT
- // Optimizations expect buffer to be located at same offset as a ConsString's
- // first substring. In 64 bit mode we have room for the start offset before
- // the buffer.
- static const int kStartOffset = String::kSize;
- static const int kBufferOffset = kStartOffset + kIntSize;
- static const int kSize = kBufferOffset + kPointerSize;
-#else
- static const int kBufferOffset = String::kSize;
- static const int kStartOffset = kBufferOffset + kPointerSize;
- static const int kSize = kStartOffset + kIntSize;
-#endif
-
- // Support for StringInputBuffer.
- inline const unibrow::byte* SlicedStringReadBlock(ReadBlockBuffer* buffer,
- unsigned* offset_ptr,
- unsigned chars);
- inline void SlicedStringReadBlockIntoBuffer(ReadBlockBuffer* buffer,
- unsigned* offset_ptr,
- unsigned chars);
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(SlicedString);
-};
-
-
// The ExternalString class describes string values that are backed by
// a string resource that lies outside the V8 heap. ExternalStrings
// consist of the length field common to all strings, a pointer to the
@@ -4422,6 +4144,9 @@
// Casting.
static inline ExternalAsciiString* cast(Object* obj);
+ // Garbage collection support.
+ void ExternalAsciiStringIterateBody(ObjectVisitor* v);
+
// Support for StringInputBuffer.
const unibrow::byte* ExternalAsciiStringReadBlock(unsigned* remaining,
unsigned* offset,
@@ -4430,9 +4155,6 @@
unsigned* offset,
unsigned chars);
- // Identify the map for the external string/symbol with a particular length.
- static inline Map* StringMap(int length);
- static inline Map* SymbolMap(int length);
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalAsciiString);
};
@@ -4457,14 +4179,14 @@
// Casting.
static inline ExternalTwoByteString* cast(Object* obj);
+ // Garbage collection support.
+ void ExternalTwoByteStringIterateBody(ObjectVisitor* v);
+
// Support for StringInputBuffer.
void ExternalTwoByteStringReadBlockIntoBuffer(ReadBlockBuffer* buffer,
unsigned* offset_ptr,
unsigned chars);
- // Identify the map for the external string/symbol with a particular length.
- static inline Map* StringMap(int length);
- static inline Map* SymbolMap(int length);
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalTwoByteString);
};
@@ -4708,6 +4430,7 @@
DECL_ACCESSORS(data, Object)
DECL_ACCESSORS(name, Object)
DECL_ACCESSORS(flag, Smi)
+ DECL_ACCESSORS(load_stub_cache, Object)
inline bool all_can_read();
inline void set_all_can_read(bool value);
@@ -4733,7 +4456,8 @@
static const int kDataOffset = kSetterOffset + kPointerSize;
static const int kNameOffset = kDataOffset + kPointerSize;
static const int kFlagOffset = kNameOffset + kPointerSize;
- static const int kSize = kFlagOffset + kPointerSize;
+ static const int kLoadStubCacheOffset = kFlagOffset + kPointerSize;
+ static const int kSize = kLoadStubCacheOffset + kPointerSize;
private:
// Bit positions in flag.
@@ -5086,6 +4810,12 @@
// Visits a runtime entry in the instruction stream.
virtual void VisitRuntimeEntry(RelocInfo* rinfo) {}
+ // Visits the resource of an ASCII or two-byte string.
+ virtual void VisitExternalAsciiString(
+ v8::String::ExternalAsciiStringResource** resource) {}
+ virtual void VisitExternalTwoByteString(
+ v8::String::ExternalStringResource** resource) {}
+
// Visits a debug call target in the instruction stream.
virtual void VisitDebugTarget(RelocInfo* rinfo);
@@ -5105,6 +4835,8 @@
// Intended for serialization/deserialization checking: insert, or
// check for the presence of, a tag at this position in the stream.
virtual void Synchronize(const char* tag) {}
+#else
+ inline void Synchronize(const char* tag) {}
#endif
};
diff --git a/src/parser.cc b/src/parser.cc
index 02fcfdc..c37078c 100644
--- a/src/parser.cc
+++ b/src/parser.cc
@@ -676,17 +676,12 @@
int materialized_literal_count() { return materialized_literal_count_; }
void SetThisPropertyAssignmentInfo(
- bool only_this_property_assignments,
bool only_simple_this_property_assignments,
Handle<FixedArray> this_property_assignments) {
- only_this_property_assignments_ = only_this_property_assignments;
only_simple_this_property_assignments_ =
only_simple_this_property_assignments;
this_property_assignments_ = this_property_assignments;
}
- bool only_this_property_assignments() {
- return only_this_property_assignments_;
- }
bool only_simple_this_property_assignments() {
return only_simple_this_property_assignments_;
}
@@ -705,7 +700,6 @@
// Properties count estimation.
int expected_property_count_;
- bool only_this_property_assignments_;
bool only_simple_this_property_assignments_;
Handle<FixedArray> this_property_assignments_;
@@ -720,7 +714,6 @@
TemporaryScope::TemporaryScope(Parser* parser)
: materialized_literal_count_(0),
expected_property_count_(0),
- only_this_property_assignments_(false),
only_simple_this_property_assignments_(false),
this_property_assignments_(Factory::empty_fixed_array()),
parser_(parser),
@@ -1227,7 +1220,6 @@
body.elements(),
temp_scope.materialized_literal_count(),
temp_scope.expected_property_count(),
- temp_scope.only_this_property_assignments(),
temp_scope.only_simple_this_property_assignments(),
temp_scope.this_property_assignments(),
0,
@@ -1339,7 +1331,7 @@
// An InitializationBlockFinder finds and marks sequences of statements of the
-// form x.y.z.a = ...; x.y.z.b = ...; etc.
+// form expr.a = ...; expr.b = ...; etc.
class InitializationBlockFinder : public ParserFinder {
public:
InitializationBlockFinder()
@@ -1367,7 +1359,7 @@
private:
// Returns true if the expressions appear to denote the same object.
// In the context of initialization blocks, we only consider expressions
- // of the form 'x.y.z'.
+ // of the form 'expr.x' or expr["x"].
static bool SameObject(Expression* e1, Expression* e2) {
VariableProxy* v1 = e1->AsVariableProxy();
VariableProxy* v2 = e2->AsVariableProxy();
@@ -1441,16 +1433,15 @@
class ThisNamedPropertyAssigmentFinder : public ParserFinder {
public:
ThisNamedPropertyAssigmentFinder()
- : only_this_property_assignments_(true),
- only_simple_this_property_assignments_(true),
+ : only_simple_this_property_assignments_(true),
names_(NULL),
assigned_arguments_(NULL),
assigned_constants_(NULL) {}
void Update(Scope* scope, Statement* stat) {
- // Bail out if function already has non this property assignment
- // statements.
- if (!only_this_property_assignments_) {
+ // Bail out if function already has property assignment that are
+ // not simple this property assignments.
+ if (!only_simple_this_property_assignments_) {
return;
}
@@ -1459,16 +1450,10 @@
if (IsThisPropertyAssignment(assignment)) {
HandleThisPropertyAssignment(scope, assignment);
} else {
- only_this_property_assignments_ = false;
only_simple_this_property_assignments_ = false;
}
}
- // Returns whether only statements of the form this.x = ...; was encountered.
- bool only_this_property_assignments() {
- return only_this_property_assignments_;
- }
-
// Returns whether only statements of the form this.x = y; where y is either a
// constant or a function argument was encountered.
bool only_simple_this_property_assignments() {
@@ -1524,28 +1509,24 @@
// Constant assigned.
Literal* literal = assignment->value()->AsLiteral();
AssignmentFromConstant(key, literal->handle());
+ return;
} else if (assignment->value()->AsVariableProxy() != NULL) {
// Variable assigned.
Handle<String> name =
assignment->value()->AsVariableProxy()->name();
// Check whether the variable assigned matches an argument name.
- int index = -1;
for (int i = 0; i < scope->num_parameters(); i++) {
if (*scope->parameter(i)->name() == *name) {
// Assigned from function argument.
- index = i;
- break;
+ AssignmentFromParameter(key, i);
+ return;
}
}
- if (index != -1) {
- AssignmentFromParameter(key, index);
- } else {
- AssignmentFromSomethingElse(key);
- }
- } else {
- AssignmentFromSomethingElse(key);
}
}
+ // It is not a simple "this.x = value;" assignment with a constant
+ // or parameter value.
+ AssignmentFromSomethingElse();
}
void AssignmentFromParameter(Handle<String> name, int index) {
@@ -1562,12 +1543,7 @@
assigned_constants_->Add(value);
}
- void AssignmentFromSomethingElse(Handle<String> name) {
- EnsureAllocation();
- names_->Add(name);
- assigned_arguments_->Add(-1);
- assigned_constants_->Add(Factory::undefined_value());
-
+ void AssignmentFromSomethingElse() {
// The this assignment is not a simple one.
only_simple_this_property_assignments_ = false;
}
@@ -1582,7 +1558,6 @@
}
}
- bool only_this_property_assignments_;
bool only_simple_this_property_assignments_;
ZoneStringList* names_;
ZoneList<int>* assigned_arguments_;
@@ -1623,11 +1598,11 @@
// Propagate the collected information on this property assignments.
if (top_scope_->is_function_scope()) {
- if (this_property_assignment_finder.only_this_property_assignments()) {
+ bool only_simple_this_property_assignments =
+ this_property_assignment_finder.only_simple_this_property_assignments();
+ if (only_simple_this_property_assignments) {
temp_scope_->SetThisPropertyAssignmentInfo(
- this_property_assignment_finder.only_this_property_assignments(),
- this_property_assignment_finder.
- only_simple_this_property_assignments(),
+ only_simple_this_property_assignments,
this_property_assignment_finder.GetThisPropertyAssignments());
}
}
@@ -2567,6 +2542,12 @@
Statement* body = ParseStatement(NULL, CHECK_OK);
Expect(Token::WHILE, CHECK_OK);
Expect(Token::LPAREN, CHECK_OK);
+
+ if (loop != NULL) {
+ int position = scanner().location().beg_pos;
+ loop->set_condition_position(position);
+ }
+
Expression* cond = ParseExpression(true, CHECK_OK);
Expect(Token::RPAREN, CHECK_OK);
@@ -3624,7 +3605,6 @@
int materialized_literal_count;
int expected_property_count;
- bool only_this_property_assignments;
bool only_simple_this_property_assignments;
Handle<FixedArray> this_property_assignments;
if (is_lazily_compiled && pre_data() != NULL) {
@@ -3634,15 +3614,12 @@
scanner_.SeekForward(end_pos);
materialized_literal_count = entry.literal_count();
expected_property_count = entry.property_count();
- only_this_property_assignments = false;
only_simple_this_property_assignments = false;
this_property_assignments = Factory::empty_fixed_array();
} else {
ParseSourceElements(&body, Token::RBRACE, CHECK_OK);
materialized_literal_count = temp_scope.materialized_literal_count();
expected_property_count = temp_scope.expected_property_count();
- only_this_property_assignments =
- temp_scope.only_this_property_assignments();
only_simple_this_property_assignments =
temp_scope.only_simple_this_property_assignments();
this_property_assignments = temp_scope.this_property_assignments();
@@ -3664,7 +3641,6 @@
body.elements(),
materialized_literal_count,
expected_property_count,
- only_this_property_assignments,
only_simple_this_property_assignments,
this_property_assignments,
num_parameters,
diff --git a/src/platform-freebsd.cc b/src/platform-freebsd.cc
index 73d6eeb..353d165 100644
--- a/src/platform-freebsd.cc
+++ b/src/platform-freebsd.cc
@@ -84,8 +84,8 @@
}
-double OS::nan_value() {
- return NAN;
+uint64_t OS::CpuFeaturesImpliedByPlatform() {
+ return 0; // FreeBSD runs on anything.
}
diff --git a/src/platform-linux.cc b/src/platform-linux.cc
index fe4c31f..bfcd8fb 100644
--- a/src/platform-linux.cc
+++ b/src/platform-linux.cc
@@ -84,11 +84,68 @@
}
-double OS::nan_value() {
- return NAN;
+uint64_t OS::CpuFeaturesImpliedByPlatform() {
+#if (defined(__VFP_FP__) && !defined(__SOFTFP__))
+ // Here gcc is telling us that we are on an ARM and gcc is assuming that we
+ // have VFP3 instructions. If gcc can assume it then so can we.
+ return 1u << VFP3;
+#else
+ return 0; // Linux runs on anything.
+#endif
}
+#ifdef __arm__
+bool OS::ArmCpuHasFeature(CpuFeature feature) {
+ const char* search_string = NULL;
+ const char* file_name = "/proc/cpuinfo";
+ // Simple detection of VFP at runtime for Linux.
+ // It is based on /proc/cpuinfo, which reveals hardware configuration
+ // to user-space applications. According to ARM (mid 2009), no similar
+ // facility is universally available on the ARM architectures,
+ // so it's up to individual OSes to provide such.
+ //
+ // This is written as a straight shot one pass parser
+ // and not using STL string and ifstream because,
+ // on Linux, it's reading from a (non-mmap-able)
+ // character special device.
+ switch (feature) {
+ case VFP3:
+ search_string = "vfp";
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ FILE* f = NULL;
+ const char* what = search_string;
+
+ if (NULL == (f = fopen(file_name, "r")))
+ return false;
+
+ int k;
+ while (EOF != (k = fgetc(f))) {
+ if (k == *what) {
+ ++what;
+ while ((*what != '\0') && (*what == fgetc(f))) {
+ ++what;
+ }
+ if (*what == '\0') {
+ fclose(f);
+ return true;
+ } else {
+ what = search_string;
+ }
+ }
+ }
+ fclose(f);
+
+ // Did not find string in the proc file.
+ return false;
+}
+#endif // def __arm__
+
+
int OS::ActivationFrameAlignment() {
#ifdef V8_TARGET_ARCH_ARM
// On EABI ARM targets this is required for fp correctness in the
@@ -232,7 +289,7 @@
// This function assumes that the layout of the file is as follows:
// hex_start_addr-hex_end_addr rwxp <unused data> [binary_file_name]
// If we encounter an unexpected situation we abort scanning further entries.
- FILE *fp = fopen("/proc/self/maps", "r");
+ FILE* fp = fopen("/proc/self/maps", "r");
if (fp == NULL) return;
// Allocate enough room to be able to store a full file name.
@@ -603,7 +660,7 @@
typedef struct sigcontext mcontext_t;
typedef struct ucontext {
uint32_t uc_flags;
- struct ucontext *uc_link;
+ struct ucontext* uc_link;
stack_t uc_stack;
mcontext_t uc_mcontext;
__sigset_t uc_sigmask;
diff --git a/src/platform-macos.cc b/src/platform-macos.cc
index 0b236a5..0d5be45 100644
--- a/src/platform-macos.cc
+++ b/src/platform-macos.cc
@@ -244,8 +244,11 @@
}
-double OS::nan_value() {
- return NAN;
+uint64_t OS::CpuFeaturesImpliedByPlatform() {
+ // MacOSX requires all these to install so we can assume they are present.
+ // These constants are defined by the CPUid instructions.
+ const uint64_t one = 1;
+ return (one << SSE2) | (one << CMOV) | (one << RDTSC) | (one << CPUID);
}
diff --git a/src/platform-nullos.cc b/src/platform-nullos.cc
index 084880e..656c317 100644
--- a/src/platform-nullos.cc
+++ b/src/platform-nullos.cc
@@ -150,11 +150,22 @@
}
+uint64_t OS::CpuFeaturesImpliedByPlatform() {
+ return 0;
+}
+
+
double OS::nan_value() {
UNIMPLEMENTED();
return 0;
}
+
+bool OS::ArmCpuHasFeature(CpuFeature feature) {
+ UNIMPLEMENTED();
+}
+
+
bool OS::IsOutsideAllocatedSpace(void* address) {
UNIMPLEMENTED();
return false;
diff --git a/src/platform-openbsd.cc b/src/platform-openbsd.cc
new file mode 100644
index 0000000..6d27304
--- /dev/null
+++ b/src/platform-openbsd.cc
@@ -0,0 +1,597 @@
+// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Platform specific code for OpenBSD goes here. For the POSIX comaptible parts
+// the implementation is in platform-posix.cc.
+
+#include <pthread.h>
+#include <semaphore.h>
+#include <signal.h>
+#include <sys/time.h>
+#include <sys/resource.h>
+#include <sys/types.h>
+#include <stdlib.h>
+
+#include <sys/types.h> // mmap & munmap
+#include <sys/mman.h> // mmap & munmap
+#include <sys/stat.h> // open
+#include <sys/fcntl.h> // open
+#include <unistd.h> // getpagesize
+#include <execinfo.h> // backtrace, backtrace_symbols
+#include <strings.h> // index
+#include <errno.h>
+#include <stdarg.h>
+#include <limits.h>
+
+#undef MAP_TYPE
+
+#include "v8.h"
+
+#include "platform.h"
+
+
+namespace v8 {
+namespace internal {
+
+// 0 is never a valid thread id on OpenBSD since tids and pids share a
+// name space and pid 0 is used to kill the group (see man 2 kill).
+static const pthread_t kNoThread = (pthread_t) 0;
+
+
+double ceiling(double x) {
+ // Correct as on OS X
+ if (-1.0 < x && x < 0.0) {
+ return -0.0;
+ } else {
+ return ceil(x);
+ }
+}
+
+
+void OS::Setup() {
+ // Seed the random number generator.
+ // Convert the current time to a 64-bit integer first, before converting it
+ // to an unsigned. Going directly can cause an overflow and the seed to be
+ // set to all ones. The seed will be identical for different instances that
+ // call this setup code within the same millisecond.
+ uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
+ srandom(static_cast<unsigned int>(seed));
+}
+
+
+uint64_t OS::CpuFeaturesImpliedByPlatform() {
+ return 0; // OpenBSD runs on anything.
+}
+
+
+int OS::ActivationFrameAlignment() {
+ // 16 byte alignment on OpenBSD
+ return 16;
+}
+
+
+// We keep the lowest and highest addresses mapped as a quick way of
+// determining that pointers are outside the heap (used mostly in assertions
+// and verification). The estimate is conservative, ie, not all addresses in
+// 'allocated' space are actually allocated to our heap. The range is
+// [lowest, highest), inclusive on the low and and exclusive on the high end.
+static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
+static void* highest_ever_allocated = reinterpret_cast<void*>(0);
+
+
+static void UpdateAllocatedSpaceLimits(void* address, int size) {
+ lowest_ever_allocated = Min(lowest_ever_allocated, address);
+ highest_ever_allocated =
+ Max(highest_ever_allocated,
+ reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size));
+}
+
+
+bool OS::IsOutsideAllocatedSpace(void* address) {
+ return address < lowest_ever_allocated || address >= highest_ever_allocated;
+}
+
+
+size_t OS::AllocateAlignment() {
+ return getpagesize();
+}
+
+
+void* OS::Allocate(const size_t requested,
+ size_t* allocated,
+ bool executable) {
+ const size_t msize = RoundUp(requested, getpagesize());
+ int prot = PROT_READ | PROT_WRITE | (executable ? PROT_EXEC : 0);
+ void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0);
+
+ if (mbase == MAP_FAILED) {
+ LOG(StringEvent("OS::Allocate", "mmap failed"));
+ return NULL;
+ }
+ *allocated = msize;
+ UpdateAllocatedSpaceLimits(mbase, msize);
+ return mbase;
+}
+
+
+void OS::Free(void* buf, const size_t length) {
+ int result = munmap(buf, length);
+ USE(result);
+ ASSERT(result == 0);
+}
+
+
+#ifdef ENABLE_HEAP_PROTECTION
+
+void OS::Protect(void* address, size_t size) {
+ UNIMPLEMENTED();
+}
+
+
+void OS::Unprotect(void* address, size_t size, bool is_executable) {
+ UNIMPLEMENTED();
+}
+
+#endif
+
+
+void OS::Sleep(int milliseconds) {
+ unsigned int ms = static_cast<unsigned int>(milliseconds);
+ usleep(1000 * ms);
+}
+
+
+void OS::Abort() {
+ // Redirect to std abort to signal abnormal program termination.
+ abort();
+}
+
+
+void OS::DebugBreak() {
+#if defined(__arm__) || defined(__thumb__)
+ asm("bkpt 0");
+#else
+ asm("int $3");
+#endif
+}
+
+
+class PosixMemoryMappedFile : public OS::MemoryMappedFile {
+ public:
+ PosixMemoryMappedFile(FILE* file, void* memory, int size)
+ : file_(file), memory_(memory), size_(size) { }
+ virtual ~PosixMemoryMappedFile();
+ virtual void* memory() { return memory_; }
+ private:
+ FILE* file_;
+ void* memory_;
+ int size_;
+};
+
+
+OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
+ void* initial) {
+ FILE* file = fopen(name, "w+");
+ if (file == NULL) return NULL;
+ int result = fwrite(initial, size, 1, file);
+ if (result < 1) {
+ fclose(file);
+ return NULL;
+ }
+ void* memory =
+ mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
+ return new PosixMemoryMappedFile(file, memory, size);
+}
+
+
+PosixMemoryMappedFile::~PosixMemoryMappedFile() {
+ if (memory_) munmap(memory_, size_);
+ fclose(file_);
+}
+
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+static unsigned StringToLong(char* buffer) {
+ return static_cast<unsigned>(strtol(buffer, NULL, 16)); // NOLINT
+}
+#endif
+
+
+void OS::LogSharedLibraryAddresses() {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ static const int MAP_LENGTH = 1024;
+ int fd = open("/proc/self/maps", O_RDONLY);
+ if (fd < 0) return;
+ while (true) {
+ char addr_buffer[11];
+ addr_buffer[0] = '0';
+ addr_buffer[1] = 'x';
+ addr_buffer[10] = 0;
+ int result = read(fd, addr_buffer + 2, 8);
+ if (result < 8) break;
+ unsigned start = StringToLong(addr_buffer);
+ result = read(fd, addr_buffer + 2, 1);
+ if (result < 1) break;
+ if (addr_buffer[2] != '-') break;
+ result = read(fd, addr_buffer + 2, 8);
+ if (result < 8) break;
+ unsigned end = StringToLong(addr_buffer);
+ char buffer[MAP_LENGTH];
+ int bytes_read = -1;
+ do {
+ bytes_read++;
+ if (bytes_read >= MAP_LENGTH - 1)
+ break;
+ result = read(fd, buffer + bytes_read, 1);
+ if (result < 1) break;
+ } while (buffer[bytes_read] != '\n');
+ buffer[bytes_read] = 0;
+ // Ignore mappings that are not executable.
+ if (buffer[3] != 'x') continue;
+ char* start_of_path = index(buffer, '/');
+ // There may be no filename in this line. Skip to next.
+ if (start_of_path == NULL) continue;
+ buffer[bytes_read] = 0;
+ LOG(SharedLibraryEvent(start_of_path, start, end));
+ }
+ close(fd);
+#endif
+}
+
+
+int OS::StackWalk(Vector<OS::StackFrame> frames) {
+ UNIMPLEMENTED();
+ return 1;
+}
+
+
+// Constants used for mmap.
+static const int kMmapFd = -1;
+static const int kMmapFdOffset = 0;
+
+
+VirtualMemory::VirtualMemory(size_t size) {
+ address_ = mmap(NULL, size, PROT_NONE,
+ MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
+ kMmapFd, kMmapFdOffset);
+ size_ = size;
+}
+
+
+VirtualMemory::~VirtualMemory() {
+ if (IsReserved()) {
+ if (0 == munmap(address(), size())) address_ = MAP_FAILED;
+ }
+}
+
+
+bool VirtualMemory::IsReserved() {
+ return address_ != MAP_FAILED;
+}
+
+
+bool VirtualMemory::Commit(void* address, size_t size, bool executable) {
+ int prot = PROT_READ | PROT_WRITE | (executable ? PROT_EXEC : 0);
+ if (MAP_FAILED == mmap(address, size, prot,
+ MAP_PRIVATE | MAP_ANON | MAP_FIXED,
+ kMmapFd, kMmapFdOffset)) {
+ return false;
+ }
+
+ UpdateAllocatedSpaceLimits(address, size);
+ return true;
+}
+
+
+bool VirtualMemory::Uncommit(void* address, size_t size) {
+ return mmap(address, size, PROT_NONE,
+ MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
+ kMmapFd, kMmapFdOffset) != MAP_FAILED;
+}
+
+
+class ThreadHandle::PlatformData : public Malloced {
+ public:
+ explicit PlatformData(ThreadHandle::Kind kind) {
+ Initialize(kind);
+ }
+
+ void Initialize(ThreadHandle::Kind kind) {
+ switch (kind) {
+ case ThreadHandle::SELF: thread_ = pthread_self(); break;
+ case ThreadHandle::INVALID: thread_ = kNoThread; break;
+ }
+ }
+ pthread_t thread_; // Thread handle for pthread.
+};
+
+
+ThreadHandle::ThreadHandle(Kind kind) {
+ data_ = new PlatformData(kind);
+}
+
+
+void ThreadHandle::Initialize(ThreadHandle::Kind kind) {
+ data_->Initialize(kind);
+}
+
+
+ThreadHandle::~ThreadHandle() {
+ delete data_;
+}
+
+
+bool ThreadHandle::IsSelf() const {
+ return pthread_equal(data_->thread_, pthread_self());
+}
+
+
+bool ThreadHandle::IsValid() const {
+ return data_->thread_ != kNoThread;
+}
+
+
+Thread::Thread() : ThreadHandle(ThreadHandle::INVALID) {
+}
+
+
+Thread::~Thread() {
+}
+
+
+static void* ThreadEntry(void* arg) {
+ Thread* thread = reinterpret_cast<Thread*>(arg);
+ // This is also initialized by the first argument to pthread_create() but we
+ // don't know which thread will run first (the original thread or the new
+ // one) so we initialize it here too.
+ thread->thread_handle_data()->thread_ = pthread_self();
+ ASSERT(thread->IsValid());
+ thread->Run();
+ return NULL;
+}
+
+
+void Thread::Start() {
+ pthread_create(&thread_handle_data()->thread_, NULL, ThreadEntry, this);
+ ASSERT(IsValid());
+}
+
+
+void Thread::Join() {
+ pthread_join(thread_handle_data()->thread_, NULL);
+}
+
+
+Thread::LocalStorageKey Thread::CreateThreadLocalKey() {
+ pthread_key_t key;
+ int result = pthread_key_create(&key, NULL);
+ USE(result);
+ ASSERT(result == 0);
+ return static_cast<LocalStorageKey>(key);
+}
+
+
+void Thread::DeleteThreadLocalKey(LocalStorageKey key) {
+ pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
+ int result = pthread_key_delete(pthread_key);
+ USE(result);
+ ASSERT(result == 0);
+}
+
+
+void* Thread::GetThreadLocal(LocalStorageKey key) {
+ pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
+ return pthread_getspecific(pthread_key);
+}
+
+
+void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
+ pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
+ pthread_setspecific(pthread_key, value);
+}
+
+
+void Thread::YieldCPU() {
+ sched_yield();
+}
+
+
+class OpenBSDMutex : public Mutex {
+ public:
+
+ OpenBSDMutex() {
+ pthread_mutexattr_t attrs;
+ int result = pthread_mutexattr_init(&attrs);
+ ASSERT(result == 0);
+ result = pthread_mutexattr_settype(&attrs, PTHREAD_MUTEX_RECURSIVE);
+ ASSERT(result == 0);
+ result = pthread_mutex_init(&mutex_, &attrs);
+ ASSERT(result == 0);
+ }
+
+ virtual ~OpenBSDMutex() { pthread_mutex_destroy(&mutex_); }
+
+ virtual int Lock() {
+ int result = pthread_mutex_lock(&mutex_);
+ return result;
+ }
+
+ virtual int Unlock() {
+ int result = pthread_mutex_unlock(&mutex_);
+ return result;
+ }
+
+ private:
+ pthread_mutex_t mutex_; // Pthread mutex for POSIX platforms.
+};
+
+
+Mutex* OS::CreateMutex() {
+ return new OpenBSDMutex();
+}
+
+
+class OpenBSDSemaphore : public Semaphore {
+ public:
+ explicit OpenBSDSemaphore(int count) { sem_init(&sem_, 0, count); }
+ virtual ~OpenBSDSemaphore() { sem_destroy(&sem_); }
+
+ virtual void Wait();
+ virtual bool Wait(int timeout);
+ virtual void Signal() { sem_post(&sem_); }
+ private:
+ sem_t sem_;
+};
+
+
+void OpenBSDSemaphore::Wait() {
+ while (true) {
+ int result = sem_wait(&sem_);
+ if (result == 0) return; // Successfully got semaphore.
+ CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup.
+ }
+}
+
+
+bool OpenBSDSemaphore::Wait(int timeout) {
+ const long kOneSecondMicros = 1000000; // NOLINT
+
+ // Split timeout into second and nanosecond parts.
+ struct timeval delta;
+ delta.tv_usec = timeout % kOneSecondMicros;
+ delta.tv_sec = timeout / kOneSecondMicros;
+
+ struct timeval current_time;
+ // Get the current time.
+ if (gettimeofday(¤t_time, NULL) == -1) {
+ return false;
+ }
+
+ // Calculate time for end of timeout.
+ struct timeval end_time;
+ timeradd(¤t_time, &delta, &end_time);
+
+ struct timespec ts;
+ TIMEVAL_TO_TIMESPEC(&end_time, &ts);
+ while (true) {
+ int result = sem_trywait(&sem_);
+ if (result == 0) return true; // Successfully got semaphore.
+ if (result == -1 && errno == ETIMEDOUT) return false; // Timeout.
+ CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup.
+ }
+}
+
+
+Semaphore* OS::CreateSemaphore(int count) {
+ return new OpenBSDSemaphore(count);
+}
+
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+
+static Sampler* active_sampler_ = NULL;
+
+static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
+ USE(info);
+ if (signal != SIGPROF) return;
+ if (active_sampler_ == NULL) return;
+
+ TickSample sample;
+
+ // We always sample the VM state.
+ sample.state = Logger::state();
+
+ active_sampler_->Tick(&sample);
+}
+
+
+class Sampler::PlatformData : public Malloced {
+ public:
+ PlatformData() {
+ signal_handler_installed_ = false;
+ }
+
+ bool signal_handler_installed_;
+ struct sigaction old_signal_handler_;
+ struct itimerval old_timer_value_;
+};
+
+
+Sampler::Sampler(int interval, bool profiling)
+ : interval_(interval), profiling_(profiling), active_(false) {
+ data_ = new PlatformData();
+}
+
+
+Sampler::~Sampler() {
+ delete data_;
+}
+
+
+void Sampler::Start() {
+ // There can only be one active sampler at the time on POSIX
+ // platforms.
+ if (active_sampler_ != NULL) return;
+
+ // Request profiling signals.
+ struct sigaction sa;
+ sa.sa_sigaction = ProfilerSignalHandler;
+ sigemptyset(&sa.sa_mask);
+ sa.sa_flags = SA_SIGINFO;
+ if (sigaction(SIGPROF, &sa, &data_->old_signal_handler_) != 0) return;
+ data_->signal_handler_installed_ = true;
+
+ // Set the itimer to generate a tick for each interval.
+ itimerval itimer;
+ itimer.it_interval.tv_sec = interval_ / 1000;
+ itimer.it_interval.tv_usec = (interval_ % 1000) * 1000;
+ itimer.it_value.tv_sec = itimer.it_interval.tv_sec;
+ itimer.it_value.tv_usec = itimer.it_interval.tv_usec;
+ setitimer(ITIMER_PROF, &itimer, &data_->old_timer_value_);
+
+ // Set this sampler as the active sampler.
+ active_sampler_ = this;
+ active_ = true;
+}
+
+
+void Sampler::Stop() {
+ // Restore old signal handler
+ if (data_->signal_handler_installed_) {
+ setitimer(ITIMER_PROF, &data_->old_timer_value_, NULL);
+ sigaction(SIGPROF, &data_->old_signal_handler_, 0);
+ data_->signal_handler_installed_ = false;
+ }
+
+ // This sampler is no longer the active sampler.
+ active_sampler_ = NULL;
+ active_ = false;
+}
+
+#endif // ENABLE_LOGGING_AND_PROFILING
+
+} } // namespace v8::internal
diff --git a/src/platform-posix.cc b/src/platform-posix.cc
index 1e1245c..41e0e64 100644
--- a/src/platform-posix.cc
+++ b/src/platform-posix.cc
@@ -27,7 +27,7 @@
// Platform specific code for POSIX goes here. This is not a platform on its
// own but contains the parts which are the same across POSIX platforms Linux,
-// Mac OS and FreeBSD.
+// Mac OS, FreeBSD and OpenBSD.
#include <unistd.h>
#include <errno.h>
@@ -61,6 +61,13 @@
return fmod(x, y);
}
+
+double OS::nan_value() {
+ // NAN from math.h is defined in C99 and not in POSIX.
+ return NAN;
+}
+
+
// ----------------------------------------------------------------------------
// POSIX date/time support.
//
diff --git a/src/platform-win32.cc b/src/platform-win32.cc
index 54d7b37..1be4b77 100644
--- a/src/platform-win32.cc
+++ b/src/platform-win32.cc
@@ -48,10 +48,10 @@
#ifndef NOMCX
#define NOMCX
#endif
-// Require Windows 2000 or higher (this is required for the IsDebuggerPresent
+// Require Windows XP or higher (this is required for the RtlCaptureContext
// function to be present).
#ifndef _WIN32_WINNT
-#define _WIN32_WINNT 0x500
+#define _WIN32_WINNT 0x501
#endif
#include <windows.h>
@@ -839,7 +839,7 @@
size_t* allocated,
bool is_executable) {
// VirtualAlloc rounds allocated size to page size automatically.
- size_t msize = RoundUp(requested, GetPageSize());
+ size_t msize = RoundUp(requested, static_cast<int>(GetPageSize()));
// Windows XP SP2 allows Data Excution Prevention (DEP).
int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
@@ -852,7 +852,7 @@
ASSERT(IsAligned(reinterpret_cast<size_t>(mbase), OS::AllocateAlignment()));
*allocated = msize;
- UpdateAllocatedSpaceLimits(mbase, msize);
+ UpdateAllocatedSpaceLimits(mbase, static_cast<int>(msize));
return mbase;
}
@@ -1208,22 +1208,7 @@
// Capture current context.
CONTEXT context;
- memset(&context, 0, sizeof(context));
- context.ContextFlags = CONTEXT_CONTROL;
- context.ContextFlags = CONTEXT_CONTROL;
-#ifdef _WIN64
- // TODO(X64): Implement context capture.
-#else
- __asm call x
- __asm x: pop eax
- __asm mov context.Eip, eax
- __asm mov context.Ebp, ebp
- __asm mov context.Esp, esp
- // NOTE: At some point, we could use RtlCaptureContext(&context) to
- // capture the context instead of inline assembler. However it is
- // only available on XP, Vista, Server 2003 and Server 2008 which
- // might not be sufficient.
-#endif
+ RtlCaptureContext(&context);
// Initialize the stack walking
STACKFRAME64 stack_frame;
@@ -1331,9 +1316,16 @@
#endif // __MINGW32__
+uint64_t OS::CpuFeaturesImpliedByPlatform() {
+ return 0; // Windows runs on anything.
+}
+
+
double OS::nan_value() {
#ifdef _MSC_VER
- static const __int64 nanval = 0xfff8000000000000;
+ // Positive Quiet NaN with no payload (aka. Indeterminate) has all bits
+ // in mask set, so value equals mask.
+ static const __int64 nanval = kQuietNaNMask;
return *reinterpret_cast<const double*>(&nanval);
#else // _MSC_VER
return NAN;
@@ -1374,7 +1366,7 @@
return false;
}
- UpdateAllocatedSpaceLimits(address, size);
+ UpdateAllocatedSpaceLimits(address, static_cast<int>(size));
return true;
}
@@ -1702,7 +1694,9 @@
}
// Connect.
- status = connect(socket_, result->ai_addr, result->ai_addrlen);
+ status = connect(socket_,
+ result->ai_addr,
+ static_cast<int>(result->ai_addrlen));
freeaddrinfo(result);
return status == 0;
}
diff --git a/src/platform.h b/src/platform.h
index fefe4b8..75e557c 100644
--- a/src/platform.h
+++ b/src/platform.h
@@ -247,9 +247,20 @@
// for.
static void LogSharedLibraryAddresses();
+ // The return value indicates the CPU features we are sure of because of the
+ // OS. For example MacOSX doesn't run on any x86 CPUs that don't have SSE2
+ // instructions.
+ // This is a little messy because the interpretation is subject to the cross
+ // of the CPU and the OS. The bits in the answer correspond to the bit
+ // positions indicated by the members of the CpuFeature enum from globals.h
+ static uint64_t CpuFeaturesImpliedByPlatform();
+
// Returns the double constant NAN
static double nan_value();
+ // Support runtime detection of VFP3 on ARM CPUs.
+ static bool ArmCpuHasFeature(CpuFeature feature);
+
// Returns the activation frame alignment constraint or zero if
// the platform doesn't care. Guaranteed to be a power of two.
static int ActivationFrameAlignment();
diff --git a/src/prettyprinter.cc b/src/prettyprinter.cc
index 10c1ea8..87da026 100644
--- a/src/prettyprinter.cc
+++ b/src/prettyprinter.cc
@@ -1339,9 +1339,6 @@
case Slot::LOOKUP:
AddAttribute("type", "LOOKUP");
break;
- case Slot::GLOBAL:
- AddAttribute("type", "GLOBAL");
- break;
}
AddAttribute("index", expr->index());
}
diff --git a/src/regexp-macro-assembler.cc b/src/regexp-macro-assembler.cc
index 0d00cee..9ae19d7 100644
--- a/src/regexp-macro-assembler.cc
+++ b/src/regexp-macro-assembler.cc
@@ -30,13 +30,7 @@
#include "assembler.h"
#include "regexp-stack.h"
#include "regexp-macro-assembler.h"
-#if V8_TARGET_ARCH_ARM
-#include "arm/simulator-arm.h"
-#elif V8_TARGET_ARCH_IA32
-#include "ia32/simulator-ia32.h"
-#elif V8_TARGET_ARCH_X64
-#include "x64/simulator-x64.h"
-#endif
+#include "simulator.h"
namespace v8 {
namespace internal {
@@ -130,11 +124,6 @@
if (StringShape(subject_ptr).IsCons()) {
subject_ptr = ConsString::cast(subject_ptr)->first();
- } else if (StringShape(subject_ptr).IsSliced()) {
- SlicedString* slice = SlicedString::cast(subject_ptr);
- start_offset += slice->start();
- end_offset += slice->start();
- subject_ptr = slice->buffer();
}
// Ensure that an underlying string has the same ascii-ness.
ASSERT(subject_ptr->IsAsciiRepresentation() == is_ascii);
diff --git a/src/regexp-macro-assembler.h b/src/regexp-macro-assembler.h
index 26aab2c..aa01096 100644
--- a/src/regexp-macro-assembler.h
+++ b/src/regexp-macro-assembler.h
@@ -215,22 +215,6 @@
bool at_start);
};
-
-// Enter C code from generated RegExp code in a way that allows
-// the C code to fix the return address in case of a GC.
-// Currently only needed on ARM.
-class RegExpCEntryStub: public CodeStub {
- public:
- RegExpCEntryStub() {}
- virtual ~RegExpCEntryStub() {}
- void Generate(MacroAssembler* masm);
-
- private:
- Major MajorKey() { return RegExpCEntry; }
- int MinorKey() { return 0; }
- const char* GetName() { return "RegExpCEntryStub"; }
-};
-
#endif // V8_NATIVE_REGEXP
} } // namespace v8::internal
diff --git a/src/regexp-stack.cc b/src/regexp-stack.cc
index 87a674d..7696279 100644
--- a/src/regexp-stack.cc
+++ b/src/regexp-stack.cc
@@ -81,7 +81,7 @@
if (size > kMaximumStackSize) return NULL;
if (size < kMinimumStackSize) size = kMinimumStackSize;
if (thread_local_.memory_size_ < size) {
- Address new_memory = NewArray<byte>(size);
+ Address new_memory = NewArray<byte>(static_cast<int>(size));
if (thread_local_.memory_size_ > 0) {
// Copy original memory into top of new memory.
memcpy(reinterpret_cast<void*>(
diff --git a/src/regexp-stack.h b/src/regexp-stack.h
index 319ab28..fbaa6fb 100644
--- a/src/regexp-stack.h
+++ b/src/regexp-stack.h
@@ -68,7 +68,9 @@
static Address EnsureCapacity(size_t size);
// Thread local archiving.
- static size_t ArchiveSpacePerThread() { return sizeof(thread_local_); }
+ static int ArchiveSpacePerThread() {
+ return static_cast<int>(sizeof(thread_local_));
+ }
static char* ArchiveStack(char* to);
static char* RestoreStack(char* from);
static void FreeThreadResources() { thread_local_.Free(); }
diff --git a/src/runtime.cc b/src/runtime.cc
index 8fd62c9..b07361a 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -719,12 +719,15 @@
if (*initial_value != NULL) {
if (index >= 0) {
// The variable or constant context slot should always be in
- // the function context; not in any outer context nor in the
- // arguments object.
- ASSERT(holder.is_identical_to(context));
- if (((attributes & READ_ONLY) == 0) ||
- context->get(index)->IsTheHole()) {
- context->set(index, *initial_value);
+ // the function context or the arguments object.
+ if (holder->IsContext()) {
+ ASSERT(holder.is_identical_to(context));
+ if (((attributes & READ_ONLY) == 0) ||
+ context->get(index)->IsTheHole()) {
+ context->set(index, *initial_value);
+ }
+ } else {
+ Handle<JSObject>::cast(holder)->SetElement(index, *initial_value);
}
} else {
// Slow case: The property is not in the FixedArray part of the context.
@@ -788,51 +791,72 @@
// case of callbacks in the prototype chain (this rules out using
// SetProperty). We have IgnoreAttributesAndSetLocalProperty for
// this.
+ // Note that objects can have hidden prototypes, so we need to traverse
+ // the whole chain of hidden prototypes to do a 'local' lookup.
+ JSObject* real_holder = global;
LookupResult lookup;
- global->LocalLookup(*name, &lookup);
- if (!lookup.IsProperty()) {
- if (assign) {
- return global->IgnoreAttributesAndSetLocalProperty(*name,
- args[1],
- attributes);
+ while (true) {
+ real_holder->LocalLookup(*name, &lookup);
+ if (lookup.IsProperty()) {
+ // Determine if this is a redeclaration of something read-only.
+ if (lookup.IsReadOnly()) {
+ // If we found readonly property on one of hidden prototypes,
+ // just shadow it.
+ if (real_holder != Top::context()->global()) break;
+ return ThrowRedeclarationError("const", name);
+ }
+
+ // Determine if this is a redeclaration of an intercepted read-only
+ // property and figure out if the property exists at all.
+ bool found = true;
+ PropertyType type = lookup.type();
+ if (type == INTERCEPTOR) {
+ HandleScope handle_scope;
+ Handle<JSObject> holder(real_holder);
+ PropertyAttributes intercepted = holder->GetPropertyAttribute(*name);
+ real_holder = *holder;
+ if (intercepted == ABSENT) {
+ // The interceptor claims the property isn't there. We need to
+ // make sure to introduce it.
+ found = false;
+ } else if ((intercepted & READ_ONLY) != 0) {
+ // The property is present, but read-only. Since we're trying to
+ // overwrite it with a variable declaration we must throw a
+ // re-declaration error. However if we found readonly property
+ // on one of hidden prototypes, just shadow it.
+ if (real_holder != Top::context()->global()) break;
+ return ThrowRedeclarationError("const", name);
+ }
+ }
+
+ if (found && !assign) {
+ // The global property is there and we're not assigning any value
+ // to it. Just return.
+ return Heap::undefined_value();
+ }
+
+ // Assign the value (or undefined) to the property.
+ Object* value = (assign) ? args[1] : Heap::undefined_value();
+ return real_holder->SetProperty(&lookup, *name, value, attributes);
}
- return Heap::undefined_value();
+
+ Object* proto = real_holder->GetPrototype();
+ if (!proto->IsJSObject())
+ break;
+
+ if (!JSObject::cast(proto)->map()->is_hidden_prototype())
+ break;
+
+ real_holder = JSObject::cast(proto);
}
- // Determine if this is a redeclaration of something read-only.
- if (lookup.IsReadOnly()) {
- return ThrowRedeclarationError("const", name);
+ global = Top::context()->global();
+ if (assign) {
+ return global->IgnoreAttributesAndSetLocalProperty(*name,
+ args[1],
+ attributes);
}
-
- // Determine if this is a redeclaration of an intercepted read-only
- // property and figure out if the property exists at all.
- bool found = true;
- PropertyType type = lookup.type();
- if (type == INTERCEPTOR) {
- PropertyAttributes intercepted = global->GetPropertyAttribute(*name);
- if (intercepted == ABSENT) {
- // The interceptor claims the property isn't there. We need to
- // make sure to introduce it.
- found = false;
- } else if ((intercepted & READ_ONLY) != 0) {
- // The property is present, but read-only. Since we're trying to
- // overwrite it with a variable declaration we must throw a
- // re-declaration error.
- return ThrowRedeclarationError("const", name);
- }
- // Restore global object from context (in case of GC).
- global = Top::context()->global();
- }
-
- if (found && !assign) {
- // The global property is there and we're not assigning any value
- // to it. Just return.
- return Heap::undefined_value();
- }
-
- // Assign the value (or undefined) to the property.
- Object* value = (assign) ? args[1] : Heap::undefined_value();
- return global->SetProperty(&lookup, *name, value, attributes);
+ return Heap::undefined_value();
}
@@ -1274,7 +1298,9 @@
// Flatten the string. If someone wants to get a char at an index
// in a cons string, it is likely that more indices will be
// accessed.
- subject->TryFlattenIfNotFlat();
+ Object* flat = subject->TryFlatten();
+ if (flat->IsFailure()) return flat;
+ subject = String::cast(flat);
if (i >= static_cast<uint32_t>(subject->length())) {
return Heap::nan_value();
}
@@ -1357,8 +1383,9 @@
StringBuilderSubstringPosition::encode(from);
AddElement(Smi::FromInt(encoded_slice));
} else {
- Handle<String> slice = Factory::NewStringSlice(subject_, from, to);
- AddElement(*slice);
+ // Otherwise encode as two smis.
+ AddElement(Smi::FromInt(-length));
+ AddElement(Smi::FromInt(from));
}
IncrementCharacterCount(length);
}
@@ -1642,16 +1669,14 @@
capture_count,
subject_length);
}
- // Find substrings of replacement string and create them as String objects..
+ // Find substrings of replacement string and create them as String objects.
int substring_index = 0;
for (int i = 0, n = parts_.length(); i < n; i++) {
int tag = parts_[i].tag;
if (tag <= 0) { // A replacement string slice.
int from = -tag;
int to = parts_[i].data;
- replacement_substrings_.Add(Factory::NewStringSlice(replacement,
- from,
- to));
+ replacement_substrings_.Add(Factory::NewSubString(replacement, from, to));
parts_[i].tag = REPLACEMENT_SUBSTRING;
parts_[i].data = substring_index;
substring_index++;
@@ -1749,9 +1774,10 @@
// Index of end of last match.
int prev = 0;
- // Number of parts added by compiled replacement plus preceeding string
- // and possibly suffix after last match.
- const int parts_added_per_loop = compiled_replacement.parts() + 2;
+ // Number of parts added by compiled replacement plus preceeding
+ // string and possibly suffix after last match. It is possible for
+ // all components to use two elements when encoded as two smis.
+ const int parts_added_per_loop = 2 * (compiled_replacement.parts() + 2);
bool matched = true;
do {
ASSERT(last_match_info_handle->HasFastElements());
@@ -2223,8 +2249,8 @@
if (pos == NULL) {
return -1;
}
- return reinterpret_cast<const char*>(pos) - ascii_vector.start()
- + start_index;
+ return static_cast<int>(reinterpret_cast<const char*>(pos)
+ - ascii_vector.start() + start_index);
}
return SingleCharIndexOf(sub->ToUC16Vector(), pat->Get(0), start_index);
}
@@ -2349,21 +2375,29 @@
}
-static Object* Runtime_StringSlice(Arguments args) {
+static Object* Runtime_SubString(Arguments args) {
NoHandleAllocation ha;
ASSERT(args.length() == 3);
CONVERT_CHECKED(String, value, args[0]);
- CONVERT_DOUBLE_CHECKED(from_number, args[1]);
- CONVERT_DOUBLE_CHECKED(to_number, args[2]);
-
- int start = FastD2I(from_number);
- int end = FastD2I(to_number);
-
+ Object* from = args[1];
+ Object* to = args[2];
+ int start, end;
+ // We have a fast integer-only case here to avoid a conversion to double in
+ // the common case where from and to are Smis.
+ if (from->IsSmi() && to->IsSmi()) {
+ start = Smi::cast(from)->value();
+ end = Smi::cast(to)->value();
+ } else {
+ CONVERT_DOUBLE_CHECKED(from_number, from);
+ CONVERT_DOUBLE_CHECKED(to_number, to);
+ start = FastD2I(from_number);
+ end = FastD2I(to_number);
+ }
RUNTIME_ASSERT(end >= start);
RUNTIME_ASSERT(start >= 0);
RUNTIME_ASSERT(end <= value->length());
- return value->Slice(start, end);
+ return value->SubString(start, end);
}
@@ -2410,7 +2444,7 @@
for (int i = 0; i < matches ; i++) {
int from = offsets.at(i * 2);
int to = offsets.at(i * 2 + 1);
- elements->set(i, *Factory::NewStringSlice(subject, from, to));
+ elements->set(i, *Factory::NewSubString(subject, from, to));
}
Handle<JSArray> result = Factory::NewJSArrayWithElements(elements);
result->set_length(Smi::FromInt(matches));
@@ -3385,8 +3419,7 @@
NoHandleAllocation ha;
CONVERT_CHECKED(String, s, args[0]);
- CONVERT_DOUBLE_CHECKED(n, args[1]);
- int radix = FastD2I(n);
+ CONVERT_SMI_CHECKED(radix, args[1]);
s->TryFlattenIfNotFlat();
@@ -3611,7 +3644,7 @@
right--;
}
}
- return s->Slice(left, right);
+ return s->SubString(left, right);
}
bool Runtime::IsUpperCaseChar(uint16_t ch) {
@@ -3753,6 +3786,7 @@
ASSERT(args.length() == 2);
CONVERT_CHECKED(String, str1, args[0]);
CONVERT_CHECKED(String, str2, args[1]);
+ Counters::string_add_runtime.Increment();
return Heap::AllocateConsString(str1, str2);
}
@@ -3766,9 +3800,21 @@
for (int i = 0; i < array_length; i++) {
Object* element = fixed_array->get(i);
if (element->IsSmi()) {
+ // Smi encoding of position and length.
int encoded_slice = Smi::cast(element)->value();
- int pos = StringBuilderSubstringPosition::decode(encoded_slice);
- int len = StringBuilderSubstringLength::decode(encoded_slice);
+ int pos;
+ int len;
+ if (encoded_slice > 0) {
+ // Position and length encoded in one smi.
+ pos = StringBuilderSubstringPosition::decode(encoded_slice);
+ len = StringBuilderSubstringLength::decode(encoded_slice);
+ } else {
+ // Position and length encoded in two smis.
+ Object* obj = fixed_array->get(++i);
+ ASSERT(obj->IsSmi());
+ pos = Smi::cast(obj)->value();
+ len = -encoded_slice;
+ }
String::WriteToFlat(special,
sink + position,
pos,
@@ -3789,6 +3835,10 @@
ASSERT(args.length() == 2);
CONVERT_CHECKED(JSArray, array, args[0]);
CONVERT_CHECKED(String, special, args[1]);
+
+ // This assumption is used by the slice encoding in one or two smis.
+ ASSERT(Smi::kMaxValue >= String::kMaxLength);
+
int special_length = special->length();
Object* smi_array_length = array->length();
if (!smi_array_length->IsSmi()) {
@@ -3816,13 +3866,29 @@
for (int i = 0; i < array_length; i++) {
Object* elt = fixed_array->get(i);
if (elt->IsSmi()) {
+ // Smi encoding of position and length.
int len = Smi::cast(elt)->value();
- int pos = len >> 11;
- len &= 0x7ff;
- if (pos + len > special_length) {
- return Top::Throw(Heap::illegal_argument_symbol());
+ if (len > 0) {
+ // Position and length encoded in one smi.
+ int pos = len >> 11;
+ len &= 0x7ff;
+ if (pos + len > special_length) {
+ return Top::Throw(Heap::illegal_argument_symbol());
+ }
+ position += len;
+ } else {
+ // Position and length encoded in two smis.
+ position += (-len);
+ // Get the position and check that it is also a smi.
+ i++;
+ if (i >= array_length) {
+ return Top::Throw(Heap::illegal_argument_symbol());
+ }
+ Object* pos = fixed_array->get(i);
+ if (!pos->IsSmi()) {
+ return Top::Throw(Heap::illegal_argument_symbol());
+ }
}
- position += len;
} else if (elt->IsString()) {
String* element = String::cast(elt);
int element_length = element->length();
@@ -4336,8 +4402,6 @@
Object* result = Heap::AllocateArgumentsObject(callee, length);
if (result->IsFailure()) return result;
- ASSERT(Heap::InNewSpace(result));
-
// Allocate the elements if needed.
if (length > 0) {
// Allocate the fixed array.
@@ -4350,8 +4414,7 @@
for (int i = 0; i < length; i++) {
array->set(i, *--parameters, mode);
}
- JSObject::cast(result)->set_elements(FixedArray::cast(obj),
- SKIP_WRITE_BARRIER);
+ JSObject::cast(result)->set_elements(FixedArray::cast(obj));
}
return result;
}
@@ -4797,6 +4860,12 @@
}
+static Object* Runtime_PromoteScheduledException(Arguments args) {
+ ASSERT_EQ(0, args.length());
+ return Top::PromoteScheduledException();
+}
+
+
static Object* Runtime_ThrowReferenceError(Arguments args) {
HandleScope scope;
ASSERT(args.length() == 1);
@@ -4943,6 +5012,9 @@
PrintF("DebugPrint: ");
}
args[0]->Print();
+ if (args[0]->IsHeapObject()) {
+ HeapObject::cast(args[0])->map()->Print();
+ }
#else
// ShortPrint is available in release mode. Print is not.
args[0]->ShortPrint();
@@ -5964,14 +6036,33 @@
// Get the property names.
jsproto = obj;
+ int proto_with_hidden_properties = 0;
for (int i = 0; i < length; i++) {
jsproto->GetLocalPropertyNames(*names,
i == 0 ? 0 : local_property_count[i - 1]);
+ if (!GetHiddenProperties(jsproto, false)->IsUndefined()) {
+ proto_with_hidden_properties++;
+ }
if (i < length - 1) {
jsproto = Handle<JSObject>(JSObject::cast(jsproto->GetPrototype()));
}
}
+ // Filter out name of hidden propeties object.
+ if (proto_with_hidden_properties > 0) {
+ Handle<FixedArray> old_names = names;
+ names = Factory::NewFixedArray(
+ names->length() - proto_with_hidden_properties);
+ int dest_pos = 0;
+ for (int i = 0; i < total_property_count; i++) {
+ Object* name = old_names->get(i);
+ if (name == Heap::hidden_symbol()) {
+ continue;
+ }
+ names->set(dest_pos++, name);
+ }
+ }
+
DeleteArray(local_property_count);
return *Factory::NewJSArrayWithElements(names);
}
@@ -6778,8 +6869,9 @@
// Get the stack walk text for this frame.
Handle<String> frame_text;
- if (strlen(frames[i].text) > 0) {
- Vector<const char> str(frames[i].text, strlen(frames[i].text));
+ int frame_text_length = StrLength(frames[i].text);
+ if (frame_text_length > 0) {
+ Vector<const char> str(frames[i].text, frame_text_length);
frame_text = Factory::NewStringFromAscii(str);
}
@@ -7246,7 +7338,7 @@
// function(arguments,__source__) {return eval(__source__);}
static const char* source_str =
"(function(arguments,__source__){return eval(__source__);})";
- static const int source_str_length = strlen(source_str);
+ static const int source_str_length = StrLength(source_str);
Handle<String> function_source =
Factory::NewStringFromAscii(Vector<const char>(source_str,
source_str_length));
@@ -7603,8 +7695,31 @@
CONVERT_CHECKED(JSFunction, f, args[0]);
return f->shared()->inferred_name();
}
+
#endif // ENABLE_DEBUGGER_SUPPORT
+#ifdef ENABLE_LOGGING_AND_PROFILING
+
+static Object* Runtime_ProfilerResume(Arguments args) {
+ NoHandleAllocation ha;
+ ASSERT(args.length() == 1);
+
+ CONVERT_CHECKED(Smi, smi_modules, args[0]);
+ v8::V8::ResumeProfilerEx(smi_modules->value());
+ return Heap::undefined_value();
+}
+
+
+static Object* Runtime_ProfilerPause(Arguments args) {
+ NoHandleAllocation ha;
+ ASSERT(args.length() == 1);
+
+ CONVERT_CHECKED(Smi, smi_modules, args[0]);
+ v8::V8::PauseProfilerEx(smi_modules->value());
+ return Heap::undefined_value();
+}
+
+#endif // ENABLE_LOGGING_AND_PROFILING
// Finds the script object from the script data. NOTE: This operation uses
// heap traversal to find the function generated for the source position
@@ -7711,7 +7826,7 @@
Object* fun = frame->function();
Address pc = frame->pc();
Address start = frame->code()->address();
- Smi* offset = Smi::FromInt(pc - start);
+ Smi* offset = Smi::FromInt(static_cast<int>(pc - start));
FixedArray* elements = FixedArray::cast(result->elements());
if (cursor + 2 < elements->length()) {
elements->set(cursor++, recv);
@@ -7758,6 +7873,13 @@
}
+static Object* Runtime_DeleteHandleScopeExtensions(Arguments args) {
+ ASSERT(args.length() == 0);
+ HandleScope::DeleteExtensions();
+ return Heap::undefined_value();
+}
+
+
#ifdef DEBUG
// ListNatives is ONLY used by the fuzz-natives.js in debug mode
// Exclude the code in release mode.
@@ -7770,7 +7892,8 @@
{ \
HandleScope inner; \
Handle<String> name = \
- Factory::NewStringFromAscii(Vector<const char>(#Name, strlen(#Name))); \
+ Factory::NewStringFromAscii( \
+ Vector<const char>(#Name, StrLength(#Name))); \
Handle<JSArray> pair = Factory::NewJSArray(0); \
SetElement(pair, 0, name); \
SetElement(pair, 1, Handle<Smi>(Smi::FromInt(argc))); \
diff --git a/src/runtime.h b/src/runtime.h
index 6b1ce48..8580233 100644
--- a/src/runtime.h
+++ b/src/runtime.h
@@ -149,7 +149,7 @@
F(StringIndexOf, 3, 1) \
F(StringLastIndexOf, 3, 1) \
F(StringLocaleCompare, 2, 1) \
- F(StringSlice, 3, 1) \
+ F(SubString, 3, 1) \
F(StringReplaceRegExpWithString, 4, 1) \
F(StringMatch, 3, 1) \
F(StringTrim, 3, 1) \
@@ -234,6 +234,7 @@
F(ReThrow, 1, 1) \
F(ThrowReferenceError, 1, 1) \
F(StackGuard, 1, 1) \
+ F(PromoteScheduledException, 0, 1) \
\
/* Contexts */ \
F(NewContext, 1, 1) \
@@ -263,6 +264,8 @@
F(Log, 2, 1) \
/* ES5 */ \
F(LocalKeys, 1, 1) \
+ /* Handle scopes */ \
+ F(DeleteHandleScopeExtensions, 0, 1) \
\
/* Pseudo functions - handled as macros by parser */ \
F(IS_VAR, 1, 1)
@@ -315,6 +318,14 @@
#define RUNTIME_FUNCTION_LIST_DEBUGGER_SUPPORT(F)
#endif
+#ifdef ENABLE_LOGGING_AND_PROFILING
+#define RUNTIME_FUNCTION_LIST_PROFILER_SUPPORT(F) \
+ F(ProfilerResume, 1, 1) \
+ F(ProfilerPause, 1, 1)
+#else
+#define RUNTIME_FUNCTION_LIST_PROFILER_SUPPORT(F)
+#endif
+
#ifdef DEBUG
#define RUNTIME_FUNCTION_LIST_DEBUG(F) \
/* Testing */ \
@@ -333,7 +344,8 @@
RUNTIME_FUNCTION_LIST_ALWAYS_1(F) \
RUNTIME_FUNCTION_LIST_ALWAYS_2(F) \
RUNTIME_FUNCTION_LIST_DEBUG(F) \
- RUNTIME_FUNCTION_LIST_DEBUGGER_SUPPORT(F)
+ RUNTIME_FUNCTION_LIST_DEBUGGER_SUPPORT(F) \
+ RUNTIME_FUNCTION_LIST_PROFILER_SUPPORT(F)
// ----------------------------------------------------------------------------
// Runtime provides access to all C++ runtime functions.
diff --git a/src/runtime.js b/src/runtime.js
index 789bfdb..105749a 100644
--- a/src/runtime.js
+++ b/src/runtime.js
@@ -128,7 +128,10 @@
if (IS_STRING(a) && IS_STRING(b)) {
return %StringCompare(a, b);
} else {
- return %NumberCompare(%ToNumber(a), %ToNumber(b), ncr);
+ var a_number = %ToNumber(a);
+ var b_number = %ToNumber(b);
+ if (NUMBER_IS_NAN(a_number) || NUMBER_IS_NAN(b_number)) return ncr;
+ return %NumberCompare(a_number, b_number, ncr);
}
}
@@ -143,16 +146,16 @@
function ADD(x) {
// Fast case: Check for number operands and do the addition.
if (IS_NUMBER(this) && IS_NUMBER(x)) return %NumberAdd(this, x);
- if (IS_STRING(this) && IS_STRING(x)) return %StringAdd(this, x);
+ if (IS_STRING(this) && IS_STRING(x)) return %_StringAdd(this, x);
// Default implementation.
var a = %ToPrimitive(this, NO_HINT);
var b = %ToPrimitive(x, NO_HINT);
if (IS_STRING(a)) {
- return %StringAdd(a, %ToString(b));
+ return %_StringAdd(a, %ToString(b));
} else if (IS_STRING(b)) {
- return %StringAdd(%ToString(a), b);
+ return %_StringAdd(%ToString(a), b);
} else {
return %NumberAdd(%ToNumber(a), %ToNumber(b));
}
@@ -170,7 +173,7 @@
: %ToString(%ToPrimitive(y, NO_HINT));
}
}
- return %StringAdd(this, y);
+ return %_StringAdd(this, y);
}
@@ -186,7 +189,7 @@
: %ToString(%ToPrimitive(x, NO_HINT));
}
}
- return %StringAdd(x, y);
+ return %_StringAdd(x, y);
}
diff --git a/src/scanner.cc b/src/scanner.cc
index 3dae414..0d3b789 100644
--- a/src/scanner.cc
+++ b/src/scanner.cc
@@ -49,17 +49,11 @@
// ----------------------------------------------------------------------------
// UTF8Buffer
-UTF8Buffer::UTF8Buffer() {
- static const int kInitialCapacity = 1 * KB;
- data_ = NewArray<char>(kInitialCapacity);
- limit_ = ComputeLimit(data_, kInitialCapacity);
- Reset();
- ASSERT(Capacity() == kInitialCapacity && pos() == 0);
-}
+UTF8Buffer::UTF8Buffer() : data_(NULL), limit_(NULL) { }
UTF8Buffer::~UTF8Buffer() {
- DeleteArray(data_);
+ if (data_ != NULL) DeleteArray(data_);
}
@@ -69,7 +63,7 @@
int old_capacity = Capacity();
int old_position = pos();
int new_capacity =
- Min(old_capacity * 2, old_capacity + kCapacityGrowthLimit);
+ Min(old_capacity * 3, old_capacity + kCapacityGrowthLimit);
char* new_data = NewArray<char>(new_capacity);
memcpy(new_data, data_, old_position);
DeleteArray(data_);
@@ -194,11 +188,142 @@
// ----------------------------------------------------------------------------
+// Keyword Matcher
+KeywordMatcher::FirstState KeywordMatcher::first_states_[] = {
+ { "break", KEYWORD_PREFIX, Token::BREAK },
+ { NULL, C, Token::ILLEGAL },
+ { NULL, D, Token::ILLEGAL },
+ { "else", KEYWORD_PREFIX, Token::ELSE },
+ { NULL, F, Token::ILLEGAL },
+ { NULL, UNMATCHABLE, Token::ILLEGAL },
+ { NULL, UNMATCHABLE, Token::ILLEGAL },
+ { NULL, I, Token::ILLEGAL },
+ { NULL, UNMATCHABLE, Token::ILLEGAL },
+ { NULL, UNMATCHABLE, Token::ILLEGAL },
+ { NULL, UNMATCHABLE, Token::ILLEGAL },
+ { NULL, UNMATCHABLE, Token::ILLEGAL },
+ { NULL, N, Token::ILLEGAL },
+ { NULL, UNMATCHABLE, Token::ILLEGAL },
+ { NULL, UNMATCHABLE, Token::ILLEGAL },
+ { NULL, UNMATCHABLE, Token::ILLEGAL },
+ { "return", KEYWORD_PREFIX, Token::RETURN },
+ { "switch", KEYWORD_PREFIX, Token::SWITCH },
+ { NULL, T, Token::ILLEGAL },
+ { NULL, UNMATCHABLE, Token::ILLEGAL },
+ { NULL, V, Token::ILLEGAL },
+ { NULL, W, Token::ILLEGAL }
+};
+
+
+void KeywordMatcher::Step(uc32 input) {
+ switch (state_) {
+ case INITIAL: {
+ // matching the first character is the only state with significant fanout.
+ // Match only lower-case letters in range 'b'..'w'.
+ unsigned int offset = input - kFirstCharRangeMin;
+ if (offset < kFirstCharRangeLength) {
+ state_ = first_states_[offset].state;
+ if (state_ == KEYWORD_PREFIX) {
+ keyword_ = first_states_[offset].keyword;
+ counter_ = 1;
+ keyword_token_ = first_states_[offset].token;
+ }
+ return;
+ }
+ break;
+ }
+ case KEYWORD_PREFIX:
+ if (keyword_[counter_] == input) {
+ ASSERT_NE(input, '\0');
+ counter_++;
+ if (keyword_[counter_] == '\0') {
+ state_ = KEYWORD_MATCHED;
+ token_ = keyword_token_;
+ }
+ return;
+ }
+ break;
+ case KEYWORD_MATCHED:
+ token_ = Token::IDENTIFIER;
+ break;
+ case C:
+ if (MatchState(input, 'a', CA)) return;
+ if (MatchState(input, 'o', CO)) return;
+ break;
+ case CA:
+ if (MatchKeywordStart(input, "case", 2, Token::CASE)) return;
+ if (MatchKeywordStart(input, "catch", 2, Token::CATCH)) return;
+ break;
+ case CO:
+ if (MatchState(input, 'n', CON)) return;
+ break;
+ case CON:
+ if (MatchKeywordStart(input, "const", 3, Token::CONST)) return;
+ if (MatchKeywordStart(input, "continue", 3, Token::CONTINUE)) return;
+ break;
+ case D:
+ if (MatchState(input, 'e', DE)) return;
+ if (MatchKeyword(input, 'o', KEYWORD_MATCHED, Token::DO)) return;
+ break;
+ case DE:
+ if (MatchKeywordStart(input, "debugger", 2, Token::DEBUGGER)) return;
+ if (MatchKeywordStart(input, "default", 2, Token::DEFAULT)) return;
+ if (MatchKeywordStart(input, "delete", 2, Token::DELETE)) return;
+ break;
+ case F:
+ if (MatchKeywordStart(input, "false", 1, Token::FALSE_LITERAL)) return;
+ if (MatchKeywordStart(input, "finally", 1, Token::FINALLY)) return;
+ if (MatchKeywordStart(input, "for", 1, Token::FOR)) return;
+ if (MatchKeywordStart(input, "function", 1, Token::FUNCTION)) return;
+ break;
+ case I:
+ if (MatchKeyword(input, 'f', KEYWORD_MATCHED, Token::IF)) return;
+ if (MatchKeyword(input, 'n', IN, Token::IN)) return;
+ break;
+ case IN:
+ token_ = Token::IDENTIFIER;
+ if (MatchKeywordStart(input, "instanceof", 2, Token::INSTANCEOF)) {
+ return;
+ }
+ break;
+ case N:
+ if (MatchKeywordStart(input, "native", 1, Token::NATIVE)) return;
+ if (MatchKeywordStart(input, "new", 1, Token::NEW)) return;
+ if (MatchKeywordStart(input, "null", 1, Token::NULL_LITERAL)) return;
+ break;
+ case T:
+ if (MatchState(input, 'h', TH)) return;
+ if (MatchState(input, 'r', TR)) return;
+ if (MatchKeywordStart(input, "typeof", 1, Token::TYPEOF)) return;
+ break;
+ case TH:
+ if (MatchKeywordStart(input, "this", 2, Token::THIS)) return;
+ if (MatchKeywordStart(input, "throw", 2, Token::THROW)) return;
+ break;
+ case TR:
+ if (MatchKeywordStart(input, "true", 2, Token::TRUE_LITERAL)) return;
+ if (MatchKeyword(input, 'y', KEYWORD_MATCHED, Token::TRY)) return;
+ break;
+ case V:
+ if (MatchKeywordStart(input, "var", 1, Token::VAR)) return;
+ if (MatchKeywordStart(input, "void", 1, Token::VOID)) return;
+ break;
+ case W:
+ if (MatchKeywordStart(input, "while", 1, Token::WHILE)) return;
+ if (MatchKeywordStart(input, "with", 1, Token::WITH)) return;
+ break;
+ default:
+ UNREACHABLE();
+ }
+ // On fallthrough, it's a failure.
+ state_ = UNMATCHABLE;
+}
+
+
+// ----------------------------------------------------------------------------
// Scanner
-Scanner::Scanner(bool pre) : stack_overflow_(false), is_pre_parsing_(pre) {
- Token::Initialize();
-}
+Scanner::Scanner(bool pre) : stack_overflow_(false), is_pre_parsing_(pre) { }
void Scanner::Init(Handle<String> source, unibrow::CharacterStream* stream,
@@ -215,12 +340,11 @@
position_ = position;
- // Reset literals buffer
- literals_.Reset();
-
// Set c0_ (one character ahead)
ASSERT(kCharacterLookaheadBufferSize == 1);
Advance();
+ // Initializer current_ to not refer to a literal buffer.
+ current_.literal_buffer = NULL;
// Skip initial whitespace allowing HTML comment ends just like
// after a newline and scan first token.
@@ -253,17 +377,23 @@
void Scanner::StartLiteral() {
- next_.literal_pos = literals_.pos();
+ // Use the first buffer unless it's currently in use by the current_ token.
+ // In most cases we won't have two literals/identifiers in a row, so
+ // the second buffer won't be used very often and is unlikely to grow much.
+ UTF8Buffer* free_buffer =
+ (current_.literal_buffer != &literal_buffer_1_) ? &literal_buffer_1_
+ : &literal_buffer_2_;
+ next_.literal_buffer = free_buffer;
+ free_buffer->Reset();
}
void Scanner::AddChar(uc32 c) {
- literals_.AddChar(c);
+ next_.literal_buffer->AddChar(c);
}
void Scanner::TerminateLiteral() {
- next_.literal_end = literals_.pos();
AddChar(0);
}
@@ -383,6 +513,7 @@
void Scanner::Scan() {
+ next_.literal_buffer = NULL;
Token::Value token;
has_line_terminator_before_next_ = false;
do {
@@ -855,48 +986,40 @@
Token::Value Scanner::ScanIdentifier() {
ASSERT(kIsIdentifierStart.get(c0_));
- bool has_escapes = false;
StartLiteral();
+ KeywordMatcher keyword_match;
+
// Scan identifier start character.
if (c0_ == '\\') {
- has_escapes = true;
uc32 c = ScanIdentifierUnicodeEscape();
// Only allow legal identifier start characters.
if (!kIsIdentifierStart.get(c)) return Token::ILLEGAL;
AddChar(c);
+ keyword_match.Fail();
} else {
AddChar(c0_);
+ keyword_match.AddChar(c0_);
Advance();
}
// Scan the rest of the identifier characters.
while (kIsIdentifierPart.get(c0_)) {
if (c0_ == '\\') {
- has_escapes = true;
uc32 c = ScanIdentifierUnicodeEscape();
// Only allow legal identifier part characters.
if (!kIsIdentifierPart.get(c)) return Token::ILLEGAL;
AddChar(c);
+ keyword_match.Fail();
} else {
AddChar(c0_);
+ keyword_match.AddChar(c0_);
Advance();
}
}
TerminateLiteral();
- // We don't have any 1-letter keywords (this is probably a common case).
- if ((next_.literal_end - next_.literal_pos) == 1) {
- return Token::IDENTIFIER;
- }
-
- // If the identifier contains unicode escapes, it must not be
- // resolved to a keyword.
- if (has_escapes) {
- return Token::IDENTIFIER;
- }
-
- return Token::Lookup(&literals_.data()[next_.literal_pos]);
+ return keyword_match.token();
}
diff --git a/src/scanner.h b/src/scanner.h
index a201d0e..9d7b34e 100644
--- a/src/scanner.h
+++ b/src/scanner.h
@@ -41,6 +41,7 @@
~UTF8Buffer();
void AddChar(uc32 c) {
+ ASSERT_NOT_NULL(data_);
if (cursor_ <= limit_ &&
static_cast<unsigned>(c) <= unibrow::Utf8::kMaxOneByteChar) {
*cursor_++ = static_cast<char>(c);
@@ -49,17 +50,30 @@
}
}
- void Reset() { cursor_ = data_; }
- int pos() const { return cursor_ - data_; }
+ void Reset() {
+ if (data_ == NULL) {
+ data_ = NewArray<char>(kInitialCapacity);
+ limit_ = ComputeLimit(data_, kInitialCapacity);
+ }
+ cursor_ = data_;
+ }
+
+ int pos() const {
+ ASSERT_NOT_NULL(data_);
+ return static_cast<int>(cursor_ - data_);
+ }
+
char* data() const { return data_; }
private:
+ static const int kInitialCapacity = 256;
char* data_;
char* cursor_;
char* limit_;
int Capacity() const {
- return (limit_ - data_) + unibrow::Utf8::kMaxEncodedSize;
+ ASSERT_NOT_NULL(data_);
+ return static_cast<int>(limit_ - data_) + unibrow::Utf8::kMaxEncodedSize;
}
static char* ComputeLimit(char* data, int capacity) {
@@ -123,6 +137,121 @@
};
+class KeywordMatcher {
+// Incrementally recognize keywords.
+//
+// Recognized keywords:
+// break case catch const* continue debugger* default delete do else
+// finally false for function if in instanceof native* new null
+// return switch this throw true try typeof var void while with
+//
+// *: Actually "future reserved keywords". These are the only ones we
+// recognized, the remaining are allowed as identifiers.
+ public:
+ KeywordMatcher() : state_(INITIAL), token_(Token::IDENTIFIER) {}
+
+ Token::Value token() { return token_; }
+
+ inline void AddChar(uc32 input) {
+ if (state_ != UNMATCHABLE) {
+ Step(input);
+ }
+ }
+
+ void Fail() {
+ token_ = Token::IDENTIFIER;
+ state_ = UNMATCHABLE;
+ }
+
+ private:
+ enum State {
+ UNMATCHABLE,
+ INITIAL,
+ KEYWORD_PREFIX,
+ KEYWORD_MATCHED,
+ C,
+ CA,
+ CO,
+ CON,
+ D,
+ DE,
+ F,
+ I,
+ IN,
+ N,
+ T,
+ TH,
+ TR,
+ V,
+ W
+ };
+
+ struct FirstState {
+ const char* keyword;
+ State state;
+ Token::Value token;
+ };
+
+ // Range of possible first characters of a keyword.
+ static const unsigned int kFirstCharRangeMin = 'b';
+ static const unsigned int kFirstCharRangeMax = 'w';
+ static const unsigned int kFirstCharRangeLength =
+ kFirstCharRangeMax - kFirstCharRangeMin + 1;
+ // State map for first keyword character range.
+ static FirstState first_states_[kFirstCharRangeLength];
+
+ // Current state.
+ State state_;
+ // Token for currently added characters.
+ Token::Value token_;
+
+ // Matching a specific keyword string (there is only one possible valid
+ // keyword with the current prefix).
+ const char* keyword_;
+ int counter_;
+ Token::Value keyword_token_;
+
+ // If input equals keyword's character at position, continue matching keyword
+ // from that position.
+ inline bool MatchKeywordStart(uc32 input,
+ const char* keyword,
+ int position,
+ Token::Value token_if_match) {
+ if (input == keyword[position]) {
+ state_ = KEYWORD_PREFIX;
+ this->keyword_ = keyword;
+ this->counter_ = position + 1;
+ this->keyword_token_ = token_if_match;
+ return true;
+ }
+ return false;
+ }
+
+ // If input equals match character, transition to new state and return true.
+ inline bool MatchState(uc32 input, char match, State new_state) {
+ if (input == match) {
+ state_ = new_state;
+ return true;
+ }
+ return false;
+ }
+
+ inline bool MatchKeyword(uc32 input,
+ char match,
+ State new_state,
+ Token::Value keyword_token) {
+ if (input == match) { // Matched "do".
+ state_ = new_state;
+ token_ = keyword_token;
+ return true;
+ }
+ return false;
+ }
+
+ void Step(uc32 input);
+};
+
+
class Scanner {
public:
@@ -163,26 +292,30 @@
// token returned by Next()). The string is 0-terminated and in
// UTF-8 format; they may contain 0-characters. Literal strings are
// collected for identifiers, strings, and numbers.
+ // These functions only give the correct result if the literal
+ // was scanned between calls to StartLiteral() and TerminateLiteral().
const char* literal_string() const {
- return &literals_.data()[current_.literal_pos];
+ return current_.literal_buffer->data();
}
int literal_length() const {
- return current_.literal_end - current_.literal_pos;
- }
-
- Vector<const char> next_literal() const {
- return Vector<const char>(next_literal_string(), next_literal_length());
+ // Excluding terminal '\0' added by TerminateLiteral().
+ return current_.literal_buffer->pos() - 1;
}
// Returns the literal string for the next token (the token that
// would be returned if Next() were called).
const char* next_literal_string() const {
- return &literals_.data()[next_.literal_pos];
+ return next_.literal_buffer->data();
}
// Returns the length of the next token (that would be returned if
// Next() were called).
int next_literal_length() const {
- return next_.literal_end - next_.literal_pos;
+ return next_.literal_buffer->pos() - 1;
+ }
+
+ Vector<const char> next_literal() const {
+ return Vector<const char>(next_literal_string(),
+ next_literal_length());
}
// Scans the input as a regular expression pattern, previous
@@ -224,7 +357,8 @@
// Buffer to hold literal values (identifiers, strings, numbers)
// using 0-terminated UTF-8 encoding.
- UTF8Buffer literals_;
+ UTF8Buffer literal_buffer_1_;
+ UTF8Buffer literal_buffer_2_;
bool stack_overflow_;
static StaticResource<Utf8Decoder> utf8_decoder_;
@@ -236,7 +370,7 @@
struct TokenDesc {
Token::Value token;
Location location;
- int literal_pos, literal_end;
+ UTF8Buffer* literal_buffer;
};
TokenDesc current_; // desc for current token (as returned by Next())
diff --git a/src/scopeinfo.cc b/src/scopeinfo.cc
index 8a237fd..8b989d7 100644
--- a/src/scopeinfo.cc
+++ b/src/scopeinfo.cc
@@ -100,8 +100,7 @@
break;
case Slot::LOOKUP:
- case Slot::GLOBAL:
- // these are currently not used
+ // This is currently not used.
UNREACHABLE();
break;
}
@@ -419,7 +418,7 @@
Object** p0 = StackSlotEntriesAddr(code) + 1;
Object** p = p0;
while (*p != NULL) {
- if (*p == name) return p - p0;
+ if (*p == name) return static_cast<int>(p - p0);
p++;
}
}
@@ -450,7 +449,7 @@
ReadInt(p + 1, &v);
Variable::Mode mode_value = static_cast<Variable::Mode>(v);
if (mode != NULL) *mode = mode_value;
- result = ((p - p0) >> 1) + Context::MIN_CONTEXT_SLOTS;
+ result = static_cast<int>((p - p0) >> 1) + Context::MIN_CONTEXT_SLOTS;
ContextSlotCache::Update(code, name, mode_value, result);
return result;
}
@@ -482,7 +481,7 @@
p = p0 + n;
while (p > p0) {
p--;
- if (*p == name) return p - p0;
+ if (*p == name) return static_cast<int>(p - p0);
}
}
return -1;
diff --git a/src/scopes.cc b/src/scopes.cc
index 25873fa..7da06cd 100644
--- a/src/scopes.cc
+++ b/src/scopes.cc
@@ -42,7 +42,7 @@
/* nothing to do */
virtual ~ZoneAllocator() {}
- virtual void* New(size_t size) { return Zone::New(size); }
+ virtual void* New(size_t size) { return Zone::New(static_cast<int>(size)); }
/* ignored - Zone is freed in one fell swoop */
virtual void Delete(void* p) {}
@@ -540,11 +540,11 @@
// Lookup a variable starting with this scope. The result is either
-// the statically resolved (local!) variable belonging to an outer scope,
-// or NULL. It may be NULL because a) we couldn't find a variable, or b)
-// because the variable is just a guess (and may be shadowed by another
-// variable that is introduced dynamically via an 'eval' call or a 'with'
-// statement).
+// the statically resolved variable belonging to an outer scope, or
+// NULL. It may be NULL because a) we couldn't find a variable, or b)
+// because the variable is just a guess (and may be shadowed by
+// another variable that is introduced dynamically via an 'eval' call
+// or a 'with' statement).
Variable* Scope::LookupRecursive(Handle<String> name,
bool inner_lookup,
Variable** invalidated_local) {
@@ -598,9 +598,11 @@
if (inner_lookup)
var->is_accessed_from_inner_scope_ = true;
- // If the variable we have found is just a guess, invalidate the result.
+ // If the variable we have found is just a guess, invalidate the
+ // result. If the found variable is local, record that fact so we
+ // can generate fast code to get it if it is not shadowed by eval.
if (guess) {
- *invalidated_local = var;
+ if (!var->is_global()) *invalidated_local = var;
var = NULL;
}
diff --git a/src/serialize.cc b/src/serialize.cc
index 6ff1d7f..899e2e7 100644
--- a/src/serialize.cc
+++ b/src/serialize.cc
@@ -39,352 +39,73 @@
#include "stub-cache.h"
#include "v8threads.h"
#include "top.h"
+#include "bootstrapper.h"
namespace v8 {
namespace internal {
-// 32-bit encoding: a RelativeAddress must be able to fit in a
-// pointer: it is encoded as an Address with (from LS to MS bits):
-// - 2 bits identifying this as a HeapObject.
-// - 4 bits to encode the AllocationSpace (including special values for
-// code and fixed arrays in LO space)
-// - 27 bits identifying a word in the space, in one of three formats:
-// - paged spaces: 16 bits of page number, 11 bits of word offset in page
-// - NEW space: 27 bits of word offset
-// - LO space: 27 bits of page number
-
-const int kSpaceShift = kHeapObjectTagSize;
-const int kSpaceBits = 4;
-const int kSpaceMask = (1 << kSpaceBits) - 1;
-
-const int kOffsetShift = kSpaceShift + kSpaceBits;
-const int kOffsetBits = 11;
-const int kOffsetMask = (1 << kOffsetBits) - 1;
-
-const int kPageShift = kOffsetShift + kOffsetBits;
-const int kPageBits = 32 - (kOffsetBits + kSpaceBits + kHeapObjectTagSize);
-const int kPageMask = (1 << kPageBits) - 1;
-
-const int kPageAndOffsetShift = kOffsetShift;
-const int kPageAndOffsetBits = kPageBits + kOffsetBits;
-const int kPageAndOffsetMask = (1 << kPageAndOffsetBits) - 1;
-
-// These values are special allocation space tags used for
-// serialization.
-// Mark the pages executable on platforms that support it.
-const int kLargeCode = LAST_SPACE + 1;
-// Allocate extra remembered-set bits.
-const int kLargeFixedArray = LAST_SPACE + 2;
-
-
-static inline AllocationSpace GetSpace(Address addr) {
- const intptr_t encoded = reinterpret_cast<intptr_t>(addr);
- int space_number = (static_cast<int>(encoded >> kSpaceShift) & kSpaceMask);
- if (space_number > LAST_SPACE) space_number = LO_SPACE;
- return static_cast<AllocationSpace>(space_number);
-}
-
-
-static inline bool IsLargeExecutableObject(Address addr) {
- const intptr_t encoded = reinterpret_cast<intptr_t>(addr);
- const int space_number =
- (static_cast<int>(encoded >> kSpaceShift) & kSpaceMask);
- return (space_number == kLargeCode);
-}
-
-
-static inline bool IsLargeFixedArray(Address addr) {
- const intptr_t encoded = reinterpret_cast<intptr_t>(addr);
- const int space_number =
- (static_cast<int>(encoded >> kSpaceShift) & kSpaceMask);
- return (space_number == kLargeFixedArray);
-}
-
-
-static inline int PageIndex(Address addr) {
- const intptr_t encoded = reinterpret_cast<intptr_t>(addr);
- return static_cast<int>(encoded >> kPageShift) & kPageMask;
-}
-
-
-static inline int PageOffset(Address addr) {
- const intptr_t encoded = reinterpret_cast<intptr_t>(addr);
- const int offset = static_cast<int>(encoded >> kOffsetShift) & kOffsetMask;
- return offset << kObjectAlignmentBits;
-}
-
-
-static inline int NewSpaceOffset(Address addr) {
- const intptr_t encoded = reinterpret_cast<intptr_t>(addr);
- const int page_offset =
- static_cast<int>(encoded >> kPageAndOffsetShift) & kPageAndOffsetMask;
- return page_offset << kObjectAlignmentBits;
-}
-
-
-static inline int LargeObjectIndex(Address addr) {
- const intptr_t encoded = reinterpret_cast<intptr_t>(addr);
- return static_cast<int>(encoded >> kPageAndOffsetShift) & kPageAndOffsetMask;
-}
-
-
-// A RelativeAddress encodes a heap address that is independent of
-// the actual memory addresses in real heap. The general case (for the
-// OLD, CODE and MAP spaces) is as a (space id, page number, page offset)
-// triple. The NEW space has page number == 0, because there are no
-// pages. The LARGE_OBJECT space has page offset = 0, since there is
-// exactly one object per page. RelativeAddresses are encodable as
-// Addresses, so that they can replace the map() pointers of
-// HeapObjects. The encoded Addresses are also encoded as HeapObjects
-// and allow for marking (is_marked() see mark(), clear_mark()...) as
-// used by the Mark-Compact collector.
-
-class RelativeAddress {
+// Mapping objects to their location after deserialization.
+// This is used during building, but not at runtime by V8.
+class SerializationAddressMapper {
public:
- RelativeAddress(AllocationSpace space,
- int page_index,
- int page_offset)
- : space_(space), page_index_(page_index), page_offset_(page_offset) {
- // Assert that the space encoding (plus the two pseudo-spaces for
- // special large objects) fits in the available bits.
- ASSERT(((LAST_SPACE + 2) & ~kSpaceMask) == 0);
- ASSERT(space <= LAST_SPACE && space >= 0);
+ static bool IsMapped(HeapObject* obj) {
+ EnsureMapExists();
+ return serialization_map_->Lookup(Key(obj), Hash(obj), false) != NULL;
}
- // Return the encoding of 'this' as an Address. Decode with constructor.
- Address Encode() const;
-
- AllocationSpace space() const {
- if (space_ > LAST_SPACE) return LO_SPACE;
- return static_cast<AllocationSpace>(space_);
- }
- int page_index() const { return page_index_; }
- int page_offset() const { return page_offset_; }
-
- bool in_paged_space() const {
- return space_ == CODE_SPACE ||
- space_ == OLD_POINTER_SPACE ||
- space_ == OLD_DATA_SPACE ||
- space_ == MAP_SPACE ||
- space_ == CELL_SPACE;
+ static int MappedTo(HeapObject* obj) {
+ ASSERT(IsMapped(obj));
+ return reinterpret_cast<intptr_t>(serialization_map_->Lookup(Key(obj),
+ Hash(obj),
+ false)->value);
}
- void next_address(int offset) { page_offset_ += offset; }
- void next_page(int init_offset = 0) {
- page_index_++;
- page_offset_ = init_offset;
+ static void Map(HeapObject* obj, int to) {
+ EnsureMapExists();
+ ASSERT(!IsMapped(obj));
+ HashMap::Entry* entry =
+ serialization_map_->Lookup(Key(obj), Hash(obj), true);
+ entry->value = Value(to);
}
-#ifdef DEBUG
- void Verify();
-#endif
-
- void set_to_large_code_object() {
- ASSERT(space_ == LO_SPACE);
- space_ = kLargeCode;
+ static void Zap() {
+ if (serialization_map_ != NULL) {
+ delete serialization_map_;
+ }
+ serialization_map_ = NULL;
}
- void set_to_large_fixed_array() {
- ASSERT(space_ == LO_SPACE);
- space_ = kLargeFixedArray;
- }
-
private:
- int space_;
- int page_index_;
- int page_offset_;
-};
-
-
-Address RelativeAddress::Encode() const {
- ASSERT(page_index_ >= 0);
- int word_offset = 0;
- int result = 0;
- switch (space_) {
- case MAP_SPACE:
- case CELL_SPACE:
- case OLD_POINTER_SPACE:
- case OLD_DATA_SPACE:
- case CODE_SPACE:
- ASSERT_EQ(0, page_index_ & ~kPageMask);
- word_offset = page_offset_ >> kObjectAlignmentBits;
- ASSERT_EQ(0, word_offset & ~kOffsetMask);
- result = (page_index_ << kPageShift) | (word_offset << kOffsetShift);
- break;
- case NEW_SPACE:
- ASSERT_EQ(0, page_index_);
- word_offset = page_offset_ >> kObjectAlignmentBits;
- ASSERT_EQ(0, word_offset & ~kPageAndOffsetMask);
- result = word_offset << kPageAndOffsetShift;
- break;
- case LO_SPACE:
- case kLargeCode:
- case kLargeFixedArray:
- ASSERT_EQ(0, page_offset_);
- ASSERT_EQ(0, page_index_ & ~kPageAndOffsetMask);
- result = page_index_ << kPageAndOffsetShift;
- break;
+ static bool SerializationMatchFun(void* key1, void* key2) {
+ return key1 == key2;
}
- // OR in AllocationSpace and kHeapObjectTag
- ASSERT_EQ(0, space_ & ~kSpaceMask);
- result |= (space_ << kSpaceShift) | kHeapObjectTag;
- return reinterpret_cast<Address>(result);
-}
-
-#ifdef DEBUG
-void RelativeAddress::Verify() {
- ASSERT(page_offset_ >= 0 && page_index_ >= 0);
- switch (space_) {
- case MAP_SPACE:
- case CELL_SPACE:
- case OLD_POINTER_SPACE:
- case OLD_DATA_SPACE:
- case CODE_SPACE:
- ASSERT(Page::kObjectStartOffset <= page_offset_ &&
- page_offset_ <= Page::kPageSize);
- break;
- case NEW_SPACE:
- ASSERT(page_index_ == 0);
- break;
- case LO_SPACE:
- case kLargeCode:
- case kLargeFixedArray:
- ASSERT(page_offset_ == 0);
- break;
+ static uint32_t Hash(HeapObject* obj) {
+ return reinterpret_cast<intptr_t>(obj->address());
}
-}
-#endif
-enum GCTreatment {
- DataObject, // Object that cannot contain a reference to new space.
- PointerObject, // Object that can contain a reference to new space.
- CodeObject // Object that contains executable code.
-};
-
-// A SimulatedHeapSpace simulates the allocation of objects in a page in
-// the heap. It uses linear allocation - that is, it doesn't simulate the
-// use of a free list. This simulated
-// allocation must exactly match that done by Heap.
-
-class SimulatedHeapSpace {
- public:
- // The default constructor initializes to an invalid state.
- SimulatedHeapSpace(): current_(LAST_SPACE, -1, -1) {}
-
- // Sets 'this' to the first address in 'space' that would be
- // returned by allocation in an empty heap.
- void InitEmptyHeap(AllocationSpace space);
-
- // Sets 'this' to the next address in 'space' that would be returned
- // by allocation in the current heap. Intended only for testing
- // serialization and deserialization in the current address space.
- void InitCurrentHeap(AllocationSpace space);
-
- // Returns the RelativeAddress where the next
- // object of 'size' bytes will be allocated, and updates 'this' to
- // point to the next free address beyond that object.
- RelativeAddress Allocate(int size, GCTreatment special_gc_treatment);
-
- private:
- RelativeAddress current_;
-};
-
-
-void SimulatedHeapSpace::InitEmptyHeap(AllocationSpace space) {
- switch (space) {
- case MAP_SPACE:
- case CELL_SPACE:
- case OLD_POINTER_SPACE:
- case OLD_DATA_SPACE:
- case CODE_SPACE:
- current_ = RelativeAddress(space, 0, Page::kObjectStartOffset);
- break;
- case NEW_SPACE:
- case LO_SPACE:
- current_ = RelativeAddress(space, 0, 0);
- break;
+ static void* Key(HeapObject* obj) {
+ return reinterpret_cast<void*>(obj->address());
}
-}
+ static void* Value(int v) {
+ return reinterpret_cast<void*>(v);
+ }
-void SimulatedHeapSpace::InitCurrentHeap(AllocationSpace space) {
- switch (space) {
- case MAP_SPACE:
- case CELL_SPACE:
- case OLD_POINTER_SPACE:
- case OLD_DATA_SPACE:
- case CODE_SPACE: {
- PagedSpace* ps;
- if (space == MAP_SPACE) {
- ps = Heap::map_space();
- } else if (space == CELL_SPACE) {
- ps = Heap::cell_space();
- } else if (space == OLD_POINTER_SPACE) {
- ps = Heap::old_pointer_space();
- } else if (space == OLD_DATA_SPACE) {
- ps = Heap::old_data_space();
- } else {
- ASSERT(space == CODE_SPACE);
- ps = Heap::code_space();
- }
- Address top = ps->top();
- Page* top_page = Page::FromAllocationTop(top);
- int page_index = 0;
- PageIterator it(ps, PageIterator::PAGES_IN_USE);
- while (it.has_next()) {
- if (it.next() == top_page) break;
- page_index++;
- }
- current_ = RelativeAddress(space,
- page_index,
- top_page->Offset(top));
- break;
+ static void EnsureMapExists() {
+ if (serialization_map_ == NULL) {
+ serialization_map_ = new HashMap(&SerializationMatchFun);
}
- case NEW_SPACE:
- current_ = RelativeAddress(space,
- 0,
- Heap::NewSpaceTop() - Heap::NewSpaceStart());
- break;
- case LO_SPACE:
- int page_index = 0;
- for (LargeObjectIterator it(Heap::lo_space()); it.has_next(); it.next()) {
- page_index++;
- }
- current_ = RelativeAddress(space, page_index, 0);
- break;
}
-}
+
+ static HashMap* serialization_map_;
+};
-RelativeAddress SimulatedHeapSpace::Allocate(int size,
- GCTreatment special_gc_treatment) {
-#ifdef DEBUG
- current_.Verify();
-#endif
- int alloc_size = OBJECT_SIZE_ALIGN(size);
- if (current_.in_paged_space() &&
- current_.page_offset() + alloc_size > Page::kPageSize) {
- ASSERT(alloc_size <= Page::kMaxHeapObjectSize);
- current_.next_page(Page::kObjectStartOffset);
- }
- RelativeAddress result = current_;
- if (current_.space() == LO_SPACE) {
- current_.next_page();
- if (special_gc_treatment == CodeObject) {
- result.set_to_large_code_object();
- } else if (special_gc_treatment == PointerObject) {
- result.set_to_large_fixed_array();
- }
- } else {
- current_.next_address(alloc_size);
- }
-#ifdef DEBUG
- current_.Verify();
- result.Verify();
-#endif
- return result;
-}
+HashMap* SerializationAddressMapper::serialization_map_ = NULL;
+
+
+
// -----------------------------------------------------------------------------
// Coding of external references.
@@ -489,12 +210,12 @@
TypeCode type,
uint16_t id,
const char* name) {
- CHECK_NE(NULL, address);
+ ASSERT_NE(NULL, address);
ExternalReferenceEntry entry;
entry.address = address;
entry.code = EncodeExternal(type, id);
entry.name = name;
- CHECK_NE(0, entry.code);
+ ASSERT_NE(0, entry.code);
refs_.Add(entry);
if (id > max_id_[type]) max_id_[type] = id;
}
@@ -575,7 +296,7 @@
Debug::k_debug_break_return_address << kDebugIdShift,
"Debug::debug_break_return_address()");
const char* debug_register_format = "Debug::register_address(%i)";
- size_t dr_format_length = strlen(debug_register_format);
+ int dr_format_length = StrLength(debug_register_format);
for (int i = 0; i < kNumJSCallerSaved; ++i) {
Vector<char> name = Vector<char>::New(dr_format_length + 1);
OS::SNPrintF(name, debug_register_format, i);
@@ -623,11 +344,11 @@
#undef C
};
- size_t top_format_length = strlen(top_address_format) - 2;
+ int top_format_length = StrLength(top_address_format) - 2;
for (uint16_t i = 0; i < Top::k_top_address_count; ++i) {
const char* address_name = AddressNames[i];
Vector<char> name =
- Vector<char>::New(top_format_length + strlen(address_name) + 1);
+ Vector<char>::New(top_format_length + StrLength(address_name) + 1);
const char* chars = name.start();
OS::SNPrintF(name, top_address_format, address_name);
Add(Top::get_address_from_id((Top::AddressId)i), TOP_ADDRESS, i, chars);
@@ -688,76 +409,80 @@
UNCLASSIFIED,
3,
"Heap::roots_address()");
- Add(ExternalReference::address_of_stack_guard_limit().address(),
+ Add(ExternalReference::address_of_stack_limit().address(),
UNCLASSIFIED,
4,
"StackGuard::address_of_jslimit()");
- Add(ExternalReference::address_of_regexp_stack_limit().address(),
+ Add(ExternalReference::address_of_real_stack_limit().address(),
UNCLASSIFIED,
5,
+ "StackGuard::address_of_real_jslimit()");
+ Add(ExternalReference::address_of_regexp_stack_limit().address(),
+ UNCLASSIFIED,
+ 6,
"RegExpStack::limit_address()");
Add(ExternalReference::new_space_start().address(),
UNCLASSIFIED,
- 6,
+ 7,
"Heap::NewSpaceStart()");
Add(ExternalReference::heap_always_allocate_scope_depth().address(),
UNCLASSIFIED,
- 7,
+ 8,
"Heap::always_allocate_scope_depth()");
Add(ExternalReference::new_space_allocation_limit_address().address(),
UNCLASSIFIED,
- 8,
+ 9,
"Heap::NewSpaceAllocationLimitAddress()");
Add(ExternalReference::new_space_allocation_top_address().address(),
UNCLASSIFIED,
- 9,
+ 10,
"Heap::NewSpaceAllocationTopAddress()");
#ifdef ENABLE_DEBUGGER_SUPPORT
Add(ExternalReference::debug_break().address(),
UNCLASSIFIED,
- 10,
+ 11,
"Debug::Break()");
Add(ExternalReference::debug_step_in_fp_address().address(),
UNCLASSIFIED,
- 11,
+ 12,
"Debug::step_in_fp_addr()");
#endif
Add(ExternalReference::double_fp_operation(Token::ADD).address(),
UNCLASSIFIED,
- 12,
+ 13,
"add_two_doubles");
Add(ExternalReference::double_fp_operation(Token::SUB).address(),
UNCLASSIFIED,
- 13,
+ 14,
"sub_two_doubles");
Add(ExternalReference::double_fp_operation(Token::MUL).address(),
UNCLASSIFIED,
- 14,
+ 15,
"mul_two_doubles");
Add(ExternalReference::double_fp_operation(Token::DIV).address(),
UNCLASSIFIED,
- 15,
+ 16,
"div_two_doubles");
Add(ExternalReference::double_fp_operation(Token::MOD).address(),
UNCLASSIFIED,
- 16,
+ 17,
"mod_two_doubles");
Add(ExternalReference::compare_doubles().address(),
UNCLASSIFIED,
- 17,
+ 18,
"compare_doubles");
#ifdef V8_NATIVE_REGEXP
Add(ExternalReference::re_case_insensitive_compare_uc16().address(),
UNCLASSIFIED,
- 18,
+ 19,
"NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16()");
Add(ExternalReference::re_check_stack_guard_state().address(),
UNCLASSIFIED,
- 19,
+ 20,
"RegExpMacroAssembler*::CheckStackGuardState()");
Add(ExternalReference::re_grow_stack().address(),
UNCLASSIFIED,
- 20,
+ 21,
"NativeRegExpMacroAssembler::GrowStack()");
#endif
}
@@ -823,920 +548,674 @@
}
-//------------------------------------------------------------------------------
-// Implementation of Serializer
-
-
-// Helper class to write the bytes of the serialized heap.
-
-class SnapshotWriter {
- public:
- SnapshotWriter() {
- len_ = 0;
- max_ = 8 << 10; // 8K initial size
- str_ = NewArray<byte>(max_);
- }
-
- ~SnapshotWriter() {
- DeleteArray(str_);
- }
-
- void GetBytes(byte** str, int* len) {
- *str = NewArray<byte>(len_);
- memcpy(*str, str_, len_);
- *len = len_;
- }
-
- void Reserve(int bytes, int pos);
-
- void PutC(char c) {
- InsertC(c, len_);
- }
-
- void PutInt(int i) {
- InsertInt(i, len_);
- }
-
- void PutAddress(Address p) {
- PutBytes(reinterpret_cast<byte*>(&p), sizeof(p));
- }
-
- void PutBytes(const byte* a, int size) {
- InsertBytes(a, len_, size);
- }
-
- void PutString(const char* s) {
- InsertString(s, len_);
- }
-
- int InsertC(char c, int pos) {
- Reserve(1, pos);
- str_[pos] = c;
- len_++;
- return pos + 1;
- }
-
- int InsertInt(int i, int pos) {
- return InsertBytes(reinterpret_cast<byte*>(&i), pos, sizeof(i));
- }
-
- int InsertBytes(const byte* a, int pos, int size) {
- Reserve(size, pos);
- memcpy(&str_[pos], a, size);
- len_ += size;
- return pos + size;
- }
-
- int InsertString(const char* s, int pos);
-
- int length() { return len_; }
-
- Address position() { return reinterpret_cast<Address>(&str_[len_]); }
-
- private:
- byte* str_; // the snapshot
- int len_; // the current length of str_
- int max_; // the allocated size of str_
-};
-
-
-void SnapshotWriter::Reserve(int bytes, int pos) {
- CHECK(0 <= pos && pos <= len_);
- while (len_ + bytes >= max_) {
- max_ *= 2;
- byte* old = str_;
- str_ = NewArray<byte>(max_);
- memcpy(str_, old, len_);
- DeleteArray(old);
- }
- if (pos < len_) {
- byte* old = str_;
- str_ = NewArray<byte>(max_);
- memcpy(str_, old, pos);
- memcpy(str_ + pos + bytes, old + pos, len_ - pos);
- DeleteArray(old);
- }
-}
-
-int SnapshotWriter::InsertString(const char* s, int pos) {
- int size = strlen(s);
- pos = InsertC('[', pos);
- pos = InsertInt(size, pos);
- pos = InsertC(']', pos);
- return InsertBytes(reinterpret_cast<const byte*>(s), pos, size);
-}
-
-
-class ReferenceUpdater: public ObjectVisitor {
- public:
- ReferenceUpdater(HeapObject* obj, Serializer* serializer)
- : obj_address_(obj->address()),
- serializer_(serializer),
- reference_encoder_(serializer->reference_encoder_),
- offsets_(8),
- addresses_(8),
- offsets_32_bit_(0),
- data_32_bit_(0) {
- }
-
- virtual void VisitPointers(Object** start, Object** end) {
- for (Object** p = start; p < end; ++p) {
- if ((*p)->IsHeapObject()) {
- offsets_.Add(reinterpret_cast<Address>(p) - obj_address_);
- Address a = serializer_->GetSavedAddress(HeapObject::cast(*p));
- addresses_.Add(a);
- }
- }
- }
-
- virtual void VisitCodeTarget(RelocInfo* rinfo) {
- ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
- Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
- Address encoded_target = serializer_->GetSavedAddress(target);
- // All calls and jumps are to code objects that encode into 32 bits.
- offsets_32_bit_.Add(rinfo->target_address_address() - obj_address_);
- uint32_t small_target =
- static_cast<uint32_t>(reinterpret_cast<uintptr_t>(encoded_target));
- ASSERT(reinterpret_cast<uintptr_t>(encoded_target) == small_target);
- data_32_bit_.Add(small_target);
- }
-
-
- virtual void VisitExternalReferences(Address* start, Address* end) {
- for (Address* p = start; p < end; ++p) {
- uint32_t code = reference_encoder_->Encode(*p);
- CHECK(*p == NULL ? code == 0 : code != 0);
- offsets_.Add(reinterpret_cast<Address>(p) - obj_address_);
- addresses_.Add(reinterpret_cast<Address>(code));
- }
- }
-
- virtual void VisitRuntimeEntry(RelocInfo* rinfo) {
- Address target = rinfo->target_address();
- uint32_t encoding = reference_encoder_->Encode(target);
- CHECK(target == NULL ? encoding == 0 : encoding != 0);
- offsets_.Add(rinfo->target_address_address() - obj_address_);
- addresses_.Add(reinterpret_cast<Address>(encoding));
- }
-
- void Update(Address start_address) {
- for (int i = 0; i < offsets_.length(); i++) {
- memcpy(start_address + offsets_[i], &addresses_[i], sizeof(Address));
- }
- for (int i = 0; i < offsets_32_bit_.length(); i++) {
- memcpy(start_address + offsets_32_bit_[i], &data_32_bit_[i],
- sizeof(uint32_t));
- }
- }
-
- private:
- Address obj_address_;
- Serializer* serializer_;
- ExternalReferenceEncoder* reference_encoder_;
- List<int> offsets_;
- List<Address> addresses_;
- // Some updates are 32-bit even on a 64-bit platform.
- // We keep a separate list of them on 64-bit platforms.
- List<int> offsets_32_bit_;
- List<uint32_t> data_32_bit_;
-};
-
-
-// Helper functions for a map of encoded heap object addresses.
-static uint32_t HeapObjectHash(HeapObject* key) {
- uint32_t low32bits = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(key));
- return low32bits >> 2;
-}
-
-
-static bool MatchHeapObject(void* key1, void* key2) {
- return key1 == key2;
-}
-
-
-Serializer::Serializer()
- : global_handles_(4),
- saved_addresses_(MatchHeapObject) {
- root_ = true;
- roots_ = 0;
- objects_ = 0;
- reference_encoder_ = NULL;
- writer_ = new SnapshotWriter();
- for (int i = 0; i <= LAST_SPACE; i++) {
- allocator_[i] = new SimulatedHeapSpace();
- }
-}
-
-
-Serializer::~Serializer() {
- for (int i = 0; i <= LAST_SPACE; i++) {
- delete allocator_[i];
- }
- if (reference_encoder_) delete reference_encoder_;
- delete writer_;
-}
-
-
bool Serializer::serialization_enabled_ = false;
+bool Serializer::too_late_to_enable_now_ = false;
+Deserializer::Deserializer(SnapshotByteSource* source)
+ : source_(source),
+ external_reference_decoder_(NULL) {
+}
+
+
+// This routine both allocates a new object, and also keeps
+// track of where objects have been allocated so that we can
+// fix back references when deserializing.
+Address Deserializer::Allocate(int space_index, Space* space, int size) {
+ Address address;
+ if (!SpaceIsLarge(space_index)) {
+ ASSERT(!SpaceIsPaged(space_index) ||
+ size <= Page::kPageSize - Page::kObjectStartOffset);
+ Object* new_allocation;
+ if (space_index == NEW_SPACE) {
+ new_allocation = reinterpret_cast<NewSpace*>(space)->AllocateRaw(size);
+ } else {
+ new_allocation = reinterpret_cast<PagedSpace*>(space)->AllocateRaw(size);
+ }
+ HeapObject* new_object = HeapObject::cast(new_allocation);
+ ASSERT(!new_object->IsFailure());
+ address = new_object->address();
+ high_water_[space_index] = address + size;
+ } else {
+ ASSERT(SpaceIsLarge(space_index));
+ ASSERT(size > Page::kPageSize - Page::kObjectStartOffset);
+ LargeObjectSpace* lo_space = reinterpret_cast<LargeObjectSpace*>(space);
+ Object* new_allocation;
+ if (space_index == kLargeData) {
+ new_allocation = lo_space->AllocateRaw(size);
+ } else if (space_index == kLargeFixedArray) {
+ new_allocation = lo_space->AllocateRawFixedArray(size);
+ } else {
+ ASSERT_EQ(kLargeCode, space_index);
+ new_allocation = lo_space->AllocateRawCode(size);
+ }
+ ASSERT(!new_allocation->IsFailure());
+ HeapObject* new_object = HeapObject::cast(new_allocation);
+ // Record all large objects in the same space.
+ address = new_object->address();
+ high_water_[LO_SPACE] = address + size;
+ }
+ last_object_address_ = address;
+ return address;
+}
+
+
+// This returns the address of an object that has been described in the
+// snapshot as being offset bytes back in a particular space.
+HeapObject* Deserializer::GetAddressFromEnd(int space) {
+ int offset = source_->GetInt();
+ ASSERT(!SpaceIsLarge(space));
+ offset <<= kObjectAlignmentBits;
+ return HeapObject::FromAddress(high_water_[space] - offset);
+}
+
+
+// This returns the address of an object that has been described in the
+// snapshot as being offset bytes into a particular space.
+HeapObject* Deserializer::GetAddressFromStart(int space) {
+ int offset = source_->GetInt();
+ if (SpaceIsLarge(space)) {
+ // Large spaces have one object per 'page'.
+ return HeapObject::FromAddress(pages_[LO_SPACE][offset]);
+ }
+ offset <<= kObjectAlignmentBits;
+ if (space == NEW_SPACE) {
+ // New space has only one space - numbered 0.
+ return HeapObject::FromAddress(pages_[space][0] + offset);
+ }
+ ASSERT(SpaceIsPaged(space));
+ int page_of_pointee = offset >> Page::kPageSizeBits;
+ Address object_address = pages_[space][page_of_pointee] +
+ (offset & Page::kPageAlignmentMask);
+ return HeapObject::FromAddress(object_address);
+}
+
+
+void Deserializer::Deserialize() {
+ // Don't GC while deserializing - just expand the heap.
+ AlwaysAllocateScope always_allocate;
+ // Don't use the free lists while deserializing.
+ LinearAllocationScope allocate_linearly;
+ // No active threads.
+ ASSERT_EQ(NULL, ThreadState::FirstInUse());
+ // No active handles.
+ ASSERT(HandleScopeImplementer::instance()->blocks()->is_empty());
+ ASSERT_EQ(NULL, external_reference_decoder_);
+ external_reference_decoder_ = new ExternalReferenceDecoder();
+ Heap::IterateRoots(this, VISIT_ONLY_STRONG);
+ ASSERT(source_->AtEOF());
+ delete external_reference_decoder_;
+ external_reference_decoder_ = NULL;
+}
+
+
+// This is called on the roots. It is the driver of the deserialization
+// process. It is also called on the body of each function.
+void Deserializer::VisitPointers(Object** start, Object** end) {
+ // The space must be new space. Any other space would cause ReadChunk to try
+ // to update the remembered using NULL as the address.
+ ReadChunk(start, end, NEW_SPACE, NULL);
+}
+
+
+// This routine writes the new object into the pointer provided and then
+// returns true if the new object was in young space and false otherwise.
+// The reason for this strange interface is that otherwise the object is
+// written very late, which means the ByteArray map is not set up by the
+// time we need to use it to mark the space at the end of a page free (by
+// making it into a byte array).
+void Deserializer::ReadObject(int space_number,
+ Space* space,
+ Object** write_back) {
+ int size = source_->GetInt() << kObjectAlignmentBits;
+ Address address = Allocate(space_number, space, size);
+ *write_back = HeapObject::FromAddress(address);
+ Object** current = reinterpret_cast<Object**>(address);
+ Object** limit = current + (size >> kPointerSizeLog2);
+ ReadChunk(current, limit, space_number, address);
+}
+
+
+#define ONE_CASE_PER_SPACE(base_tag) \
+ case (base_tag) + NEW_SPACE: /* NOLINT */ \
+ case (base_tag) + OLD_POINTER_SPACE: /* NOLINT */ \
+ case (base_tag) + OLD_DATA_SPACE: /* NOLINT */ \
+ case (base_tag) + CODE_SPACE: /* NOLINT */ \
+ case (base_tag) + MAP_SPACE: /* NOLINT */ \
+ case (base_tag) + CELL_SPACE: /* NOLINT */ \
+ case (base_tag) + kLargeData: /* NOLINT */ \
+ case (base_tag) + kLargeCode: /* NOLINT */ \
+ case (base_tag) + kLargeFixedArray: /* NOLINT */
+
+
+void Deserializer::ReadChunk(Object** current,
+ Object** limit,
+ int space,
+ Address address) {
+ while (current < limit) {
+ int data = source_->Get();
+ switch (data) {
+#define RAW_CASE(index, size) \
+ case RAW_DATA_SERIALIZATION + index: { \
+ byte* raw_data_out = reinterpret_cast<byte*>(current); \
+ source_->CopyRaw(raw_data_out, size); \
+ current = reinterpret_cast<Object**>(raw_data_out + size); \
+ break; \
+ }
+ COMMON_RAW_LENGTHS(RAW_CASE)
+#undef RAW_CASE
+ case RAW_DATA_SERIALIZATION: {
+ int size = source_->GetInt();
+ byte* raw_data_out = reinterpret_cast<byte*>(current);
+ source_->CopyRaw(raw_data_out, size);
+ current = reinterpret_cast<Object**>(raw_data_out + size);
+ break;
+ }
+ case OBJECT_SERIALIZATION + NEW_SPACE: {
+ ReadObject(NEW_SPACE, Heap::new_space(), current);
+ if (space != NEW_SPACE) {
+ Heap::RecordWrite(address, static_cast<int>(
+ reinterpret_cast<Address>(current) - address));
+ }
+ current++;
+ break;
+ }
+ case OBJECT_SERIALIZATION + OLD_DATA_SPACE:
+ ReadObject(OLD_DATA_SPACE, Heap::old_data_space(), current++);
+ break;
+ case OBJECT_SERIALIZATION + OLD_POINTER_SPACE:
+ ReadObject(OLD_POINTER_SPACE, Heap::old_pointer_space(), current++);
+ break;
+ case OBJECT_SERIALIZATION + MAP_SPACE:
+ ReadObject(MAP_SPACE, Heap::map_space(), current++);
+ break;
+ case OBJECT_SERIALIZATION + CODE_SPACE:
+ ReadObject(CODE_SPACE, Heap::code_space(), current++);
+ LOG(LogCodeObject(current[-1]));
+ break;
+ case OBJECT_SERIALIZATION + CELL_SPACE:
+ ReadObject(CELL_SPACE, Heap::cell_space(), current++);
+ break;
+ case OBJECT_SERIALIZATION + kLargeData:
+ ReadObject(kLargeData, Heap::lo_space(), current++);
+ break;
+ case OBJECT_SERIALIZATION + kLargeCode:
+ ReadObject(kLargeCode, Heap::lo_space(), current++);
+ LOG(LogCodeObject(current[-1]));
+ break;
+ case OBJECT_SERIALIZATION + kLargeFixedArray:
+ ReadObject(kLargeFixedArray, Heap::lo_space(), current++);
+ break;
+ case CODE_OBJECT_SERIALIZATION + kLargeCode: {
+ Object* new_code_object = NULL;
+ ReadObject(kLargeCode, Heap::lo_space(), &new_code_object);
+ Code* code_object = reinterpret_cast<Code*>(new_code_object);
+ LOG(LogCodeObject(code_object));
+ // Setting a branch/call to another code object from code.
+ Address location_of_branch_data = reinterpret_cast<Address>(current);
+ Assembler::set_target_at(location_of_branch_data,
+ code_object->instruction_start());
+ location_of_branch_data += Assembler::kCallTargetSize;
+ current = reinterpret_cast<Object**>(location_of_branch_data);
+ break;
+ }
+ case CODE_OBJECT_SERIALIZATION + CODE_SPACE: {
+ Object* new_code_object = NULL;
+ ReadObject(CODE_SPACE, Heap::code_space(), &new_code_object);
+ Code* code_object = reinterpret_cast<Code*>(new_code_object);
+ LOG(LogCodeObject(code_object));
+ // Setting a branch/call to another code object from code.
+ Address location_of_branch_data = reinterpret_cast<Address>(current);
+ Assembler::set_target_at(location_of_branch_data,
+ code_object->instruction_start());
+ location_of_branch_data += Assembler::kCallTargetSize;
+ current = reinterpret_cast<Object**>(location_of_branch_data);
+ break;
+ }
+ ONE_CASE_PER_SPACE(BACKREF_SERIALIZATION) {
+ // Write a backreference to an object we unpacked earlier.
+ int backref_space = (data & kSpaceMask);
+ if (backref_space == NEW_SPACE && space != NEW_SPACE) {
+ Heap::RecordWrite(address, static_cast<int>(
+ reinterpret_cast<Address>(current) - address));
+ }
+ *current++ = GetAddressFromEnd(backref_space);
+ break;
+ }
+ ONE_CASE_PER_SPACE(REFERENCE_SERIALIZATION) {
+ // Write a reference to an object we unpacked earlier.
+ int reference_space = (data & kSpaceMask);
+ if (reference_space == NEW_SPACE && space != NEW_SPACE) {
+ Heap::RecordWrite(address, static_cast<int>(
+ reinterpret_cast<Address>(current) - address));
+ }
+ *current++ = GetAddressFromStart(reference_space);
+ break;
+ }
+#define COMMON_REFS_CASE(index, reference_space, address) \
+ case REFERENCE_SERIALIZATION + index: { \
+ ASSERT(SpaceIsPaged(reference_space)); \
+ Address object_address = \
+ pages_[reference_space][0] + (address << kObjectAlignmentBits); \
+ *current++ = HeapObject::FromAddress(object_address); \
+ break; \
+ }
+ COMMON_REFERENCE_PATTERNS(COMMON_REFS_CASE)
+#undef COMMON_REFS_CASE
+ ONE_CASE_PER_SPACE(CODE_BACKREF_SERIALIZATION) {
+ int backref_space = (data & kSpaceMask);
+ // Can't use Code::cast because heap is not set up yet and assertions
+ // will fail.
+ Code* code_object =
+ reinterpret_cast<Code*>(GetAddressFromEnd(backref_space));
+ // Setting a branch/call to previously decoded code object from code.
+ Address location_of_branch_data = reinterpret_cast<Address>(current);
+ Assembler::set_target_at(location_of_branch_data,
+ code_object->instruction_start());
+ location_of_branch_data += Assembler::kCallTargetSize;
+ current = reinterpret_cast<Object**>(location_of_branch_data);
+ break;
+ }
+ ONE_CASE_PER_SPACE(CODE_REFERENCE_SERIALIZATION) {
+ int backref_space = (data & kSpaceMask);
+ // Can't use Code::cast because heap is not set up yet and assertions
+ // will fail.
+ Code* code_object =
+ reinterpret_cast<Code*>(GetAddressFromStart(backref_space));
+ // Setting a branch/call to previously decoded code object from code.
+ Address location_of_branch_data = reinterpret_cast<Address>(current);
+ Assembler::set_target_at(location_of_branch_data,
+ code_object->instruction_start());
+ location_of_branch_data += Assembler::kCallTargetSize;
+ current = reinterpret_cast<Object**>(location_of_branch_data);
+ break;
+ }
+ case EXTERNAL_REFERENCE_SERIALIZATION: {
+ int reference_id = source_->GetInt();
+ Address address = external_reference_decoder_->Decode(reference_id);
+ *current++ = reinterpret_cast<Object*>(address);
+ break;
+ }
+ case EXTERNAL_BRANCH_TARGET_SERIALIZATION: {
+ int reference_id = source_->GetInt();
+ Address address = external_reference_decoder_->Decode(reference_id);
+ Address location_of_branch_data = reinterpret_cast<Address>(current);
+ Assembler::set_external_target_at(location_of_branch_data, address);
+ location_of_branch_data += Assembler::kExternalTargetSize;
+ current = reinterpret_cast<Object**>(location_of_branch_data);
+ break;
+ }
+ case START_NEW_PAGE_SERIALIZATION: {
+ int space = source_->Get();
+ pages_[space].Add(last_object_address_);
+ break;
+ }
+ case NATIVES_STRING_RESOURCE: {
+ int index = source_->Get();
+ Vector<const char> source_vector = Natives::GetScriptSource(index);
+ NativesExternalStringResource* resource =
+ new NativesExternalStringResource(source_vector.start());
+ *current++ = reinterpret_cast<Object*>(resource);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ }
+ ASSERT_EQ(current, limit);
+}
+
+
+void SnapshotByteSink::PutInt(uintptr_t integer, const char* description) {
+ const int max_shift = ((kPointerSize * kBitsPerByte) / 7) * 7;
+ for (int shift = max_shift; shift > 0; shift -= 7) {
+ if (integer >= static_cast<uintptr_t>(1u) << shift) {
+ Put(((integer >> shift) & 0x7f) | 0x80, "IntPart");
+ }
+ }
+ PutSection(integer & 0x7f, "IntLastPart");
+}
+
#ifdef DEBUG
-static const int kMaxTagLength = 32;
+
+void Deserializer::Synchronize(const char* tag) {
+ int data = source_->Get();
+ // If this assert fails then that indicates that you have a mismatch between
+ // the number of GC roots when serializing and deserializing.
+ ASSERT_EQ(SYNCHRONIZE, data);
+ do {
+ int character = source_->Get();
+ if (character == 0) break;
+ if (FLAG_debug_serialization) {
+ PrintF("%c", character);
+ }
+ } while (true);
+ if (FLAG_debug_serialization) {
+ PrintF("\n");
+ }
+}
+
void Serializer::Synchronize(const char* tag) {
- if (FLAG_debug_serialization) {
- int length = strlen(tag);
- ASSERT(length <= kMaxTagLength);
- writer_->PutC('S');
- writer_->PutInt(length);
- writer_->PutBytes(reinterpret_cast<const byte*>(tag), length);
- }
+ sink_->Put(SYNCHRONIZE, tag);
+ int character;
+ do {
+ character = *tag++;
+ sink_->PutSection(character, "TagCharacter");
+ } while (character != 0);
}
+
#endif
-
-void Serializer::InitializeAllocators() {
+Serializer::Serializer(SnapshotByteSink* sink)
+ : sink_(sink),
+ current_root_index_(0),
+ external_reference_encoder_(NULL) {
for (int i = 0; i <= LAST_SPACE; i++) {
- allocator_[i]->InitEmptyHeap(static_cast<AllocationSpace>(i));
+ fullness_[i] = 0;
}
}
-bool Serializer::IsVisited(HeapObject* obj) {
- HashMap::Entry* entry =
- saved_addresses_.Lookup(obj, HeapObjectHash(obj), false);
- return entry != NULL;
-}
-
-
-Address Serializer::GetSavedAddress(HeapObject* obj) {
- HashMap::Entry* entry =
- saved_addresses_.Lookup(obj, HeapObjectHash(obj), false);
- ASSERT(entry != NULL);
- return reinterpret_cast<Address>(entry->value);
-}
-
-
-void Serializer::SaveAddress(HeapObject* obj, Address addr) {
- HashMap::Entry* entry =
- saved_addresses_.Lookup(obj, HeapObjectHash(obj), true);
- entry->value = addr;
-}
-
-
void Serializer::Serialize() {
// No active threads.
CHECK_EQ(NULL, ThreadState::FirstInUse());
// No active or weak handles.
CHECK(HandleScopeImplementer::instance()->blocks()->is_empty());
CHECK_EQ(0, GlobalHandles::NumberOfWeakHandles());
- // We need a counter function during serialization to resolve the
- // references to counters in the code on the heap.
- CHECK(StatsTable::HasCounterFunction());
- CHECK(enabled());
- InitializeAllocators();
- reference_encoder_ = new ExternalReferenceEncoder();
- PutHeader();
- Heap::IterateRoots(this);
- PutLog();
- PutContextStack();
- Disable();
+ CHECK_EQ(NULL, external_reference_encoder_);
+ // We don't support serializing installed extensions.
+ for (RegisteredExtension* ext = RegisteredExtension::first_extension();
+ ext != NULL;
+ ext = ext->next()) {
+ CHECK_NE(v8::INSTALLED, ext->state());
+ }
+ external_reference_encoder_ = new ExternalReferenceEncoder();
+ Heap::IterateRoots(this, VISIT_ONLY_STRONG);
+ delete external_reference_encoder_;
+ external_reference_encoder_ = NULL;
+ SerializationAddressMapper::Zap();
}
-void Serializer::Finalize(byte** str, int* len) {
- writer_->GetBytes(str, len);
-}
-
-
-// Serialize objects by writing them into the stream.
-
void Serializer::VisitPointers(Object** start, Object** end) {
- bool root = root_;
- root_ = false;
- for (Object** p = start; p < end; ++p) {
- bool serialized;
- Address a = Encode(*p, &serialized);
- if (root) {
- roots_++;
- // If the object was not just serialized,
- // write its encoded address instead.
- if (!serialized) PutEncodedAddress(a);
- }
- }
- root_ = root;
-}
-
-
-void Serializer::VisitCodeTarget(RelocInfo* rinfo) {
- ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
- Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
- bool serialized;
- Encode(target, &serialized);
-}
-
-
-class GlobalHandlesRetriever: public ObjectVisitor {
- public:
- explicit GlobalHandlesRetriever(List<Object**>* handles)
- : global_handles_(handles) {}
-
- virtual void VisitPointers(Object** start, Object** end) {
- for (; start != end; ++start) {
- global_handles_->Add(start);
- }
- }
-
- private:
- List<Object**>* global_handles_;
-};
-
-
-void Serializer::PutFlags() {
- writer_->PutC('F');
- List<const char*>* argv = FlagList::argv();
- writer_->PutInt(argv->length());
- writer_->PutC('[');
- for (int i = 0; i < argv->length(); i++) {
- if (i > 0) writer_->PutC('|');
- writer_->PutString((*argv)[i]);
- DeleteArray((*argv)[i]);
- }
- writer_->PutC(']');
- flags_end_ = writer_->length();
- delete argv;
-}
-
-
-void Serializer::PutHeader() {
- PutFlags();
- writer_->PutC('D');
-#ifdef DEBUG
- writer_->PutC(FLAG_debug_serialization ? '1' : '0');
-#else
- writer_->PutC('0');
-#endif
-#ifdef V8_NATIVE_REGEXP
- writer_->PutC('N');
-#else // Interpreted regexp
- writer_->PutC('I');
-#endif
- // Write sizes of paged memory spaces. Allocate extra space for the old
- // and code spaces, because objects in new space will be promoted to them.
- writer_->PutC('S');
- writer_->PutC('[');
- writer_->PutInt(Heap::old_pointer_space()->Size() +
- Heap::new_space()->Size());
- writer_->PutC('|');
- writer_->PutInt(Heap::old_data_space()->Size() + Heap::new_space()->Size());
- writer_->PutC('|');
- writer_->PutInt(Heap::code_space()->Size() + Heap::new_space()->Size());
- writer_->PutC('|');
- writer_->PutInt(Heap::map_space()->Size());
- writer_->PutC('|');
- writer_->PutInt(Heap::cell_space()->Size());
- writer_->PutC(']');
- // Write global handles.
- writer_->PutC('G');
- writer_->PutC('[');
- GlobalHandlesRetriever ghr(&global_handles_);
- GlobalHandles::IterateRoots(&ghr);
- for (int i = 0; i < global_handles_.length(); i++) {
- writer_->PutC('N');
- }
- writer_->PutC(']');
-}
-
-
-void Serializer::PutLog() {
-#ifdef ENABLE_LOGGING_AND_PROFILING
- if (FLAG_log_code) {
- Logger::TearDown();
- int pos = writer_->InsertC('L', flags_end_);
- bool exists;
- Vector<const char> log = ReadFile(FLAG_logfile, &exists);
- writer_->InsertString(log.start(), pos);
- log.Dispose();
- }
-#endif
-}
-
-
-static int IndexOf(const List<Object**>& list, Object** element) {
- for (int i = 0; i < list.length(); i++) {
- if (list[i] == element) return i;
- }
- return -1;
-}
-
-
-void Serializer::PutGlobalHandleStack(const List<Handle<Object> >& stack) {
- writer_->PutC('[');
- writer_->PutInt(stack.length());
- for (int i = stack.length() - 1; i >= 0; i--) {
- writer_->PutC('|');
- int gh_index = IndexOf(global_handles_, stack[i].location());
- CHECK_GE(gh_index, 0);
- writer_->PutInt(gh_index);
- }
- writer_->PutC(']');
-}
-
-
-void Serializer::PutContextStack() {
- List<Context*> contexts(2);
- while (HandleScopeImplementer::instance()->HasSavedContexts()) {
- Context* context =
- HandleScopeImplementer::instance()->RestoreContext();
- contexts.Add(context);
- }
- for (int i = contexts.length() - 1; i >= 0; i--) {
- HandleScopeImplementer::instance()->SaveContext(contexts[i]);
- }
- writer_->PutC('C');
- writer_->PutC('[');
- writer_->PutInt(contexts.length());
- if (!contexts.is_empty()) {
- Object** start = reinterpret_cast<Object**>(&contexts.first());
- VisitPointers(start, start + contexts.length());
- }
- writer_->PutC(']');
-}
-
-void Serializer::PutEncodedAddress(Address addr) {
- writer_->PutC('P');
- writer_->PutAddress(addr);
-}
-
-
-Address Serializer::Encode(Object* o, bool* serialized) {
- *serialized = false;
- if (o->IsSmi()) {
- return reinterpret_cast<Address>(o);
- } else {
- HeapObject* obj = HeapObject::cast(o);
- if (IsVisited(obj)) {
- return GetSavedAddress(obj);
+ for (Object** current = start; current < end; current++) {
+ if ((*current)->IsSmi()) {
+ sink_->Put(RAW_DATA_SERIALIZATION, "RawData");
+ sink_->PutInt(kPointerSize, "length");
+ for (int i = 0; i < kPointerSize; i++) {
+ sink_->Put(reinterpret_cast<byte*>(current)[i], "Byte");
+ }
} else {
- // First visit: serialize the object.
- *serialized = true;
- return PutObject(obj);
+ SerializeObject(*current, TAGGED_REPRESENTATION);
}
}
}
-Address Serializer::PutObject(HeapObject* obj) {
- Map* map = obj->map();
- InstanceType type = map->instance_type();
- int size = obj->SizeFromMap(map);
-
- // Simulate the allocation of obj to predict where it will be
- // allocated during deserialization.
- Address addr = Allocate(obj).Encode();
-
- SaveAddress(obj, addr);
-
- if (type == CODE_TYPE) {
- LOG(CodeMoveEvent(obj->address(), addr));
- }
-
- // Write out the object prologue: type, size, and simulated address of obj.
- writer_->PutC('[');
- CHECK_EQ(0, static_cast<int>(size & kObjectAlignmentMask));
- writer_->PutInt(type);
- writer_->PutInt(size >> kObjectAlignmentBits);
- PutEncodedAddress(addr); // encodes AllocationSpace
-
- // Visit all the pointers in the object other than the map. This
- // will recursively serialize any as-yet-unvisited objects.
- obj->Iterate(this);
-
- // Mark end of recursively embedded objects, start of object body.
- writer_->PutC('|');
- // Write out the raw contents of the object. No compression, but
- // fast to deserialize.
- writer_->PutBytes(obj->address(), size);
- // Update pointers and external references in the written object.
- ReferenceUpdater updater(obj, this);
- obj->Iterate(&updater);
- updater.Update(writer_->position() - size);
-
-#ifdef DEBUG
- if (FLAG_debug_serialization) {
- // Write out the object epilogue to catch synchronization errors.
- PutEncodedAddress(addr);
- writer_->PutC(']');
- }
-#endif
-
- objects_++;
- return addr;
-}
-
-
-RelativeAddress Serializer::Allocate(HeapObject* obj) {
- // Find out which AllocationSpace 'obj' is in.
- AllocationSpace s;
- bool found = false;
- for (int i = FIRST_SPACE; !found && i <= LAST_SPACE; i++) {
- s = static_cast<AllocationSpace>(i);
- found = Heap::InSpace(obj, s);
- }
- CHECK(found);
- int size = obj->Size();
- if (s == NEW_SPACE) {
- if (size > Heap::MaxObjectSizeInPagedSpace()) {
- s = LO_SPACE;
- } else {
- OldSpace* space = Heap::TargetSpace(obj);
- ASSERT(space == Heap::old_pointer_space() ||
- space == Heap::old_data_space());
- s = (space == Heap::old_pointer_space()) ?
- OLD_POINTER_SPACE :
- OLD_DATA_SPACE;
+void Serializer::SerializeObject(
+ Object* o,
+ ReferenceRepresentation reference_representation) {
+ CHECK(o->IsHeapObject());
+ HeapObject* heap_object = HeapObject::cast(o);
+ if (SerializationAddressMapper::IsMapped(heap_object)) {
+ int space = SpaceOfAlreadySerializedObject(heap_object);
+ int address = SerializationAddressMapper::MappedTo(heap_object);
+ int offset = CurrentAllocationAddress(space) - address;
+ bool from_start = true;
+ if (SpaceIsPaged(space)) {
+ if ((CurrentAllocationAddress(space) >> Page::kPageSizeBits) ==
+ (address >> Page::kPageSizeBits)) {
+ from_start = false;
+ address = offset;
+ }
+ } else if (space == NEW_SPACE) {
+ if (offset < address) {
+ from_start = false;
+ address = offset;
+ }
}
- }
- GCTreatment gc_treatment = DataObject;
- if (obj->IsFixedArray()) gc_treatment = PointerObject;
- else if (obj->IsCode()) gc_treatment = CodeObject;
- return allocator_[s]->Allocate(size, gc_treatment);
-}
-
-
-//------------------------------------------------------------------------------
-// Implementation of Deserializer
-
-
-static const int kInitArraySize = 32;
-
-
-Deserializer::Deserializer(const byte* str, int len)
- : reader_(str, len),
- map_pages_(kInitArraySize),
- cell_pages_(kInitArraySize),
- old_pointer_pages_(kInitArraySize),
- old_data_pages_(kInitArraySize),
- code_pages_(kInitArraySize),
- large_objects_(kInitArraySize),
- global_handles_(4) {
- root_ = true;
- roots_ = 0;
- objects_ = 0;
- reference_decoder_ = NULL;
-#ifdef DEBUG
- expect_debug_information_ = false;
-#endif
-}
-
-
-Deserializer::~Deserializer() {
- if (reference_decoder_) delete reference_decoder_;
-}
-
-
-void Deserializer::ExpectEncodedAddress(Address expected) {
- Address a = GetEncodedAddress();
- USE(a);
- ASSERT(a == expected);
-}
-
-
-#ifdef DEBUG
-void Deserializer::Synchronize(const char* tag) {
- if (expect_debug_information_) {
- char buf[kMaxTagLength];
- reader_.ExpectC('S');
- int length = reader_.GetInt();
- ASSERT(length <= kMaxTagLength);
- reader_.GetBytes(reinterpret_cast<Address>(buf), length);
- ASSERT_EQ(strlen(tag), length);
- ASSERT(strncmp(tag, buf, length) == 0);
- }
-}
-#endif
-
-
-void Deserializer::Deserialize() {
- // No active threads.
- ASSERT_EQ(NULL, ThreadState::FirstInUse());
- // No active handles.
- ASSERT(HandleScopeImplementer::instance()->blocks()->is_empty());
- reference_decoder_ = new ExternalReferenceDecoder();
- // By setting linear allocation only, we forbid the use of free list
- // allocation which is not predicted by SimulatedAddress.
- GetHeader();
- Heap::IterateRoots(this);
- GetContextStack();
-}
-
-
-void Deserializer::VisitPointers(Object** start, Object** end) {
- bool root = root_;
- root_ = false;
- for (Object** p = start; p < end; ++p) {
- if (root) {
- roots_++;
- // Read the next object or pointer from the stream
- // pointer in the stream.
- int c = reader_.GetC();
- if (c == '[') {
- *p = GetObject(); // embedded object
+ // If we are actually dealing with real offsets (and not a numbering of
+ // all objects) then we should shift out the bits that are always 0.
+ if (!SpaceIsLarge(space)) address >>= kObjectAlignmentBits;
+ if (reference_representation == CODE_TARGET_REPRESENTATION) {
+ if (from_start) {
+ sink_->Put(CODE_REFERENCE_SERIALIZATION + space, "RefCodeSer");
+ sink_->PutInt(address, "address");
} else {
- ASSERT(c == 'P'); // pointer to previously serialized object
- *p = Resolve(reader_.GetAddress());
+ sink_->Put(CODE_BACKREF_SERIALIZATION + space, "BackRefCodeSer");
+ sink_->PutInt(address, "address");
}
} else {
- // A pointer internal to a HeapObject that we've already
- // read: resolve it to a true address (or Smi)
- *p = Resolve(reinterpret_cast<Address>(*p));
- }
- }
- root_ = root;
-}
-
-
-void Deserializer::VisitCodeTarget(RelocInfo* rinfo) {
- ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
- // On all platforms, the encoded code object address is only 32 bits.
- Address encoded_address = reinterpret_cast<Address>(Memory::uint32_at(
- reinterpret_cast<Address>(rinfo->target_object_address())));
- Code* target_object = reinterpret_cast<Code*>(Resolve(encoded_address));
- rinfo->set_target_address(target_object->instruction_start());
-}
-
-
-void Deserializer::VisitExternalReferences(Address* start, Address* end) {
- for (Address* p = start; p < end; ++p) {
- uint32_t code = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(*p));
- *p = reference_decoder_->Decode(code);
- }
-}
-
-
-void Deserializer::VisitRuntimeEntry(RelocInfo* rinfo) {
- uint32_t* pc = reinterpret_cast<uint32_t*>(rinfo->target_address_address());
- uint32_t encoding = *pc;
- Address target = reference_decoder_->Decode(encoding);
- rinfo->set_target_address(target);
-}
-
-
-void Deserializer::GetFlags() {
- reader_.ExpectC('F');
- int argc = reader_.GetInt() + 1;
- char** argv = NewArray<char*>(argc);
- reader_.ExpectC('[');
- for (int i = 1; i < argc; i++) {
- if (i > 1) reader_.ExpectC('|');
- argv[i] = reader_.GetString();
- }
- reader_.ExpectC(']');
- has_log_ = false;
- for (int i = 1; i < argc; i++) {
- if (strcmp("--log_code", argv[i]) == 0) {
- has_log_ = true;
- } else if (strcmp("--nouse_ic", argv[i]) == 0) {
- FLAG_use_ic = false;
- } else if (strcmp("--debug_code", argv[i]) == 0) {
- FLAG_debug_code = true;
- } else if (strcmp("--nolazy", argv[i]) == 0) {
- FLAG_lazy = false;
- }
- DeleteArray(argv[i]);
- }
-
- DeleteArray(argv);
-}
-
-
-void Deserializer::GetLog() {
- if (has_log_) {
- reader_.ExpectC('L');
- char* snapshot_log = reader_.GetString();
-#ifdef ENABLE_LOGGING_AND_PROFILING
- if (FLAG_log_code) {
- LOG(Preamble(snapshot_log));
- }
-#endif
- DeleteArray(snapshot_log);
- }
-}
-
-
-static void InitPagedSpace(PagedSpace* space,
- int capacity,
- List<Page*>* page_list) {
- if (!space->EnsureCapacity(capacity)) {
- V8::FatalProcessOutOfMemory("InitPagedSpace");
- }
- PageIterator it(space, PageIterator::ALL_PAGES);
- while (it.has_next()) page_list->Add(it.next());
-}
-
-
-void Deserializer::GetHeader() {
- reader_.ExpectC('D');
-#ifdef DEBUG
- expect_debug_information_ = reader_.GetC() == '1';
-#else
- // In release mode, don't attempt to read a snapshot containing
- // synchronization tags.
- if (reader_.GetC() != '0') FATAL("Snapshot contains synchronization tags.");
-#endif
-#ifdef V8_NATIVE_REGEXP
- reader_.ExpectC('N');
-#else // Interpreted regexp.
- reader_.ExpectC('I');
-#endif
- // Ensure sufficient capacity in paged memory spaces to avoid growth
- // during deserialization.
- reader_.ExpectC('S');
- reader_.ExpectC('[');
- InitPagedSpace(Heap::old_pointer_space(),
- reader_.GetInt(),
- &old_pointer_pages_);
- reader_.ExpectC('|');
- InitPagedSpace(Heap::old_data_space(), reader_.GetInt(), &old_data_pages_);
- reader_.ExpectC('|');
- InitPagedSpace(Heap::code_space(), reader_.GetInt(), &code_pages_);
- reader_.ExpectC('|');
- InitPagedSpace(Heap::map_space(), reader_.GetInt(), &map_pages_);
- reader_.ExpectC('|');
- InitPagedSpace(Heap::cell_space(), reader_.GetInt(), &cell_pages_);
- reader_.ExpectC(']');
- // Create placeholders for global handles later to be fill during
- // IterateRoots.
- reader_.ExpectC('G');
- reader_.ExpectC('[');
- int c = reader_.GetC();
- while (c != ']') {
- ASSERT(c == 'N');
- global_handles_.Add(GlobalHandles::Create(NULL).location());
- c = reader_.GetC();
- }
-}
-
-
-void Deserializer::GetGlobalHandleStack(List<Handle<Object> >* stack) {
- reader_.ExpectC('[');
- int length = reader_.GetInt();
- for (int i = 0; i < length; i++) {
- reader_.ExpectC('|');
- int gh_index = reader_.GetInt();
- stack->Add(global_handles_[gh_index]);
- }
- reader_.ExpectC(']');
-}
-
-
-void Deserializer::GetContextStack() {
- reader_.ExpectC('C');
- CHECK_EQ(reader_.GetC(), '[');
- int count = reader_.GetInt();
- List<Context*> entered_contexts(count);
- if (count > 0) {
- Object** start = reinterpret_cast<Object**>(&entered_contexts.first());
- VisitPointers(start, start + count);
- }
- reader_.ExpectC(']');
- for (int i = 0; i < count; i++) {
- HandleScopeImplementer::instance()->SaveContext(entered_contexts[i]);
- }
-}
-
-
-Address Deserializer::GetEncodedAddress() {
- reader_.ExpectC('P');
- return reader_.GetAddress();
-}
-
-
-Object* Deserializer::GetObject() {
- // Read the prologue: type, size and encoded address.
- InstanceType type = static_cast<InstanceType>(reader_.GetInt());
- int size = reader_.GetInt() << kObjectAlignmentBits;
- Address a = GetEncodedAddress();
-
- // Get a raw object of the right size in the right space.
- AllocationSpace space = GetSpace(a);
- Object* o;
- if (IsLargeExecutableObject(a)) {
- o = Heap::lo_space()->AllocateRawCode(size);
- } else if (IsLargeFixedArray(a)) {
- o = Heap::lo_space()->AllocateRawFixedArray(size);
- } else {
- AllocationSpace retry_space = (space == NEW_SPACE)
- ? Heap::TargetSpaceId(type)
- : space;
- o = Heap::AllocateRaw(size, space, retry_space);
- }
- ASSERT(!o->IsFailure());
- // Check that the simulation of heap allocation was correct.
- ASSERT(o == Resolve(a));
-
- // Read any recursively embedded objects.
- int c = reader_.GetC();
- while (c == '[') {
- GetObject();
- c = reader_.GetC();
- }
- ASSERT(c == '|');
-
- HeapObject* obj = reinterpret_cast<HeapObject*>(o);
- // Read the uninterpreted contents of the object after the map
- reader_.GetBytes(obj->address(), size);
-#ifdef DEBUG
- if (expect_debug_information_) {
- // Read in the epilogue to check that we're still synchronized
- ExpectEncodedAddress(a);
- reader_.ExpectC(']');
- }
-#endif
-
- // Resolve the encoded pointers we just read in.
- // Same as obj->Iterate(this), but doesn't rely on the map pointer being set.
- VisitPointer(reinterpret_cast<Object**>(obj->address()));
- obj->IterateBody(type, size, this);
-
- if (type == CODE_TYPE) {
- LOG(CodeMoveEvent(a, obj->address()));
- }
- objects_++;
- return o;
-}
-
-
-static inline Object* ResolvePaged(int page_index,
- int page_offset,
- PagedSpace* space,
- List<Page*>* page_list) {
- ASSERT(page_index < page_list->length());
- Address address = (*page_list)[page_index]->OffsetToAddress(page_offset);
- return HeapObject::FromAddress(address);
-}
-
-
-template<typename T>
-void ConcatReversed(List<T>* target, const List<T>& source) {
- for (int i = source.length() - 1; i >= 0; i--) {
- target->Add(source[i]);
- }
-}
-
-
-Object* Deserializer::Resolve(Address encoded) {
- Object* o = reinterpret_cast<Object*>(encoded);
- if (o->IsSmi()) return o;
-
- // Encoded addresses of HeapObjects always have 'HeapObject' tags.
- ASSERT(o->IsHeapObject());
- switch (GetSpace(encoded)) {
- // For Map space and Old space, we cache the known Pages in map_pages,
- // old_pointer_pages and old_data_pages. Even though MapSpace keeps a list
- // of page addresses, we don't rely on it since GetObject uses AllocateRaw,
- // and that appears not to update the page list.
- case MAP_SPACE:
- return ResolvePaged(PageIndex(encoded), PageOffset(encoded),
- Heap::map_space(), &map_pages_);
- case CELL_SPACE:
- return ResolvePaged(PageIndex(encoded), PageOffset(encoded),
- Heap::cell_space(), &cell_pages_);
- case OLD_POINTER_SPACE:
- return ResolvePaged(PageIndex(encoded), PageOffset(encoded),
- Heap::old_pointer_space(), &old_pointer_pages_);
- case OLD_DATA_SPACE:
- return ResolvePaged(PageIndex(encoded), PageOffset(encoded),
- Heap::old_data_space(), &old_data_pages_);
- case CODE_SPACE:
- return ResolvePaged(PageIndex(encoded), PageOffset(encoded),
- Heap::code_space(), &code_pages_);
- case NEW_SPACE:
- return HeapObject::FromAddress(Heap::NewSpaceStart() +
- NewSpaceOffset(encoded));
- case LO_SPACE:
- // Cache the known large_objects, allocated one per 'page'
- int index = LargeObjectIndex(encoded);
- if (index >= large_objects_.length()) {
- int new_object_count =
- Heap::lo_space()->PageCount() - large_objects_.length();
- List<Object*> new_objects(new_object_count);
- LargeObjectIterator it(Heap::lo_space());
- for (int i = 0; i < new_object_count; i++) {
- new_objects.Add(it.next());
+ CHECK_EQ(TAGGED_REPRESENTATION, reference_representation);
+ if (from_start) {
+#define COMMON_REFS_CASE(tag, common_space, common_offset) \
+ if (space == common_space && address == common_offset) { \
+ sink_->PutSection(tag + REFERENCE_SERIALIZATION, "RefSer"); \
+ } else /* NOLINT */
+ COMMON_REFERENCE_PATTERNS(COMMON_REFS_CASE)
+#undef COMMON_REFS_CASE
+ { /* NOLINT */
+ sink_->Put(REFERENCE_SERIALIZATION + space, "RefSer");
+ sink_->PutInt(address, "address");
}
-#ifdef DEBUG
- for (int i = large_objects_.length() - 1; i >= 0; i--) {
- ASSERT(it.next() == large_objects_[i]);
- }
-#endif
- ConcatReversed(&large_objects_, new_objects);
- ASSERT(index < large_objects_.length());
+ } else {
+ sink_->Put(BACKREF_SERIALIZATION + space, "BackRefSer");
+ sink_->PutInt(address, "address");
}
- return large_objects_[index]; // s.page_offset() is ignored.
+ }
+ } else {
+ // Object has not yet been serialized. Serialize it here.
+ ObjectSerializer serializer(this,
+ heap_object,
+ sink_,
+ reference_representation);
+ serializer.Serialize();
+ }
+}
+
+
+
+void Serializer::ObjectSerializer::Serialize() {
+ int space = Serializer::SpaceOfObject(object_);
+ int size = object_->Size();
+
+ if (reference_representation_ == TAGGED_REPRESENTATION) {
+ sink_->Put(OBJECT_SERIALIZATION + space, "ObjectSerialization");
+ } else {
+ CHECK_EQ(CODE_TARGET_REPRESENTATION, reference_representation_);
+ sink_->Put(CODE_OBJECT_SERIALIZATION + space, "ObjectSerialization");
+ }
+ sink_->PutInt(size >> kObjectAlignmentBits, "Size in words");
+
+ // Mark this object as already serialized.
+ bool start_new_page;
+ SerializationAddressMapper::Map(
+ object_,
+ serializer_->Allocate(space, size, &start_new_page));
+ if (start_new_page) {
+ sink_->Put(START_NEW_PAGE_SERIALIZATION, "NewPage");
+ sink_->PutSection(space, "NewPageSpace");
+ }
+
+ // Serialize the map (first word of the object).
+ serializer_->SerializeObject(object_->map(), TAGGED_REPRESENTATION);
+
+ // Serialize the rest of the object.
+ CHECK_EQ(0, bytes_processed_so_far_);
+ bytes_processed_so_far_ = kPointerSize;
+ object_->IterateBody(object_->map()->instance_type(), size, this);
+ OutputRawData(object_->address() + size);
+}
+
+
+void Serializer::ObjectSerializer::VisitPointers(Object** start,
+ Object** end) {
+ Object** current = start;
+ while (current < end) {
+ while (current < end && (*current)->IsSmi()) current++;
+ if (current < end) OutputRawData(reinterpret_cast<Address>(current));
+
+ while (current < end && !(*current)->IsSmi()) {
+ serializer_->SerializeObject(*current, TAGGED_REPRESENTATION);
+ bytes_processed_so_far_ += kPointerSize;
+ current++;
+ }
+ }
+}
+
+
+void Serializer::ObjectSerializer::VisitExternalReferences(Address* start,
+ Address* end) {
+ Address references_start = reinterpret_cast<Address>(start);
+ OutputRawData(references_start);
+
+ for (Address* current = start; current < end; current++) {
+ sink_->Put(EXTERNAL_REFERENCE_SERIALIZATION, "ExternalReference");
+ int reference_id = serializer_->EncodeExternalReference(*current);
+ sink_->PutInt(reference_id, "reference id");
+ }
+ bytes_processed_so_far_ += static_cast<int>((end - start) * kPointerSize);
+}
+
+
+void Serializer::ObjectSerializer::VisitRuntimeEntry(RelocInfo* rinfo) {
+ Address target_start = rinfo->target_address_address();
+ OutputRawData(target_start);
+ Address target = rinfo->target_address();
+ uint32_t encoding = serializer_->EncodeExternalReference(target);
+ CHECK(target == NULL ? encoding == 0 : encoding != 0);
+ sink_->Put(EXTERNAL_BRANCH_TARGET_SERIALIZATION, "ExternalReference");
+ sink_->PutInt(encoding, "reference id");
+ bytes_processed_so_far_ += Assembler::kExternalTargetSize;
+}
+
+
+void Serializer::ObjectSerializer::VisitCodeTarget(RelocInfo* rinfo) {
+ CHECK(RelocInfo::IsCodeTarget(rinfo->rmode()));
+ Address target_start = rinfo->target_address_address();
+ OutputRawData(target_start);
+ Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
+ serializer_->SerializeObject(target, CODE_TARGET_REPRESENTATION);
+ bytes_processed_so_far_ += Assembler::kCallTargetSize;
+}
+
+
+void Serializer::ObjectSerializer::VisitExternalAsciiString(
+ v8::String::ExternalAsciiStringResource** resource_pointer) {
+ Address references_start = reinterpret_cast<Address>(resource_pointer);
+ OutputRawData(references_start);
+ for (int i = 0; i < Natives::GetBuiltinsCount(); i++) {
+ Object* source = Heap::natives_source_cache()->get(i);
+ if (!source->IsUndefined()) {
+ ExternalAsciiString* string = ExternalAsciiString::cast(source);
+ typedef v8::String::ExternalAsciiStringResource Resource;
+ Resource* resource = string->resource();
+ if (resource == *resource_pointer) {
+ sink_->Put(NATIVES_STRING_RESOURCE, "NativesStringResource");
+ sink_->PutSection(i, "NativesStringResourceEnd");
+ bytes_processed_so_far_ += sizeof(resource);
+ return;
+ }
+ }
+ }
+ // One of the strings in the natives cache should match the resource. We
+ // can't serialize any other kinds of external strings.
+ UNREACHABLE();
+}
+
+
+void Serializer::ObjectSerializer::OutputRawData(Address up_to) {
+ Address object_start = object_->address();
+ int up_to_offset = static_cast<int>(up_to - object_start);
+ int skipped = up_to_offset - bytes_processed_so_far_;
+ // This assert will fail if the reloc info gives us the target_address_address
+ // locations in a non-ascending order. Luckily that doesn't happen.
+ ASSERT(skipped >= 0);
+ if (skipped != 0) {
+ Address base = object_start + bytes_processed_so_far_;
+#define RAW_CASE(index, length) \
+ if (skipped == length) { \
+ sink_->PutSection(RAW_DATA_SERIALIZATION + index, "RawDataFixed"); \
+ } else /* NOLINT */
+ COMMON_RAW_LENGTHS(RAW_CASE)
+#undef RAW_CASE
+ { /* NOLINT */
+ sink_->Put(RAW_DATA_SERIALIZATION, "RawData");
+ sink_->PutInt(skipped, "length");
+ }
+ for (int i = 0; i < skipped; i++) {
+ unsigned int data = base[i];
+ sink_->PutSection(data, "Byte");
+ }
+ bytes_processed_so_far_ += skipped;
+ }
+}
+
+
+int Serializer::SpaceOfObject(HeapObject* object) {
+ for (int i = FIRST_SPACE; i <= LAST_SPACE; i++) {
+ AllocationSpace s = static_cast<AllocationSpace>(i);
+ if (Heap::InSpace(object, s)) {
+ if (i == LO_SPACE) {
+ if (object->IsCode()) {
+ return kLargeCode;
+ } else if (object->IsFixedArray()) {
+ return kLargeFixedArray;
+ } else {
+ return kLargeData;
+ }
+ }
+ return i;
+ }
}
UNREACHABLE();
- return NULL;
+ return 0;
+}
+
+
+int Serializer::SpaceOfAlreadySerializedObject(HeapObject* object) {
+ for (int i = FIRST_SPACE; i <= LAST_SPACE; i++) {
+ AllocationSpace s = static_cast<AllocationSpace>(i);
+ if (Heap::InSpace(object, s)) {
+ return i;
+ }
+ }
+ UNREACHABLE();
+ return 0;
+}
+
+
+int Serializer::Allocate(int space, int size, bool* new_page) {
+ CHECK(space >= 0 && space < kNumberOfSpaces);
+ if (SpaceIsLarge(space)) {
+ // In large object space we merely number the objects instead of trying to
+ // determine some sort of address.
+ *new_page = true;
+ return fullness_[LO_SPACE]++;
+ }
+ *new_page = false;
+ if (fullness_[space] == 0) {
+ *new_page = true;
+ }
+ if (SpaceIsPaged(space)) {
+ // Paged spaces are a little special. We encode their addresses as if the
+ // pages were all contiguous and each page were filled up in the range
+ // 0 - Page::kObjectAreaSize. In practice the pages may not be contiguous
+ // and allocation does not start at offset 0 in the page, but this scheme
+ // means the deserializer can get the page number quickly by shifting the
+ // serialized address.
+ CHECK(IsPowerOf2(Page::kPageSize));
+ int used_in_this_page = (fullness_[space] & (Page::kPageSize - 1));
+ CHECK(size <= Page::kObjectAreaSize);
+ if (used_in_this_page + size > Page::kObjectAreaSize) {
+ *new_page = true;
+ fullness_[space] = RoundUp(fullness_[space], Page::kPageSize);
+ }
+ }
+ int allocation_address = fullness_[space];
+ fullness_[space] = allocation_address + size;
+ return allocation_address;
}
diff --git a/src/serialize.h b/src/serialize.h
index c901480..96bd751 100644
--- a/src/serialize.h
+++ b/src/serialize.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2006-2009 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -108,237 +108,290 @@
};
-// A Serializer recursively visits objects to construct a serialized
-// representation of the Heap stored in a string. Serialization is
-// destructive. We use a similar mechanism to the GC to ensure that
-// each object is visited once, namely, we modify the map pointer of
-// each visited object to contain the relative address in the
-// appropriate space where that object will be allocated when the heap
-// is deserialized.
-
-
-// Helper classes defined in serialize.cc.
-class RelativeAddress;
-class SimulatedHeapSpace;
-class SnapshotWriter;
-class ReferenceUpdater;
-
-
-class Serializer: public ObjectVisitor {
+class SnapshotByteSource {
public:
- Serializer();
+ SnapshotByteSource(const byte* array, int length)
+ : data_(array), length_(length), position_(0) { }
- virtual ~Serializer();
+ bool HasMore() { return position_ < length_; }
- // Serialize the current state of the heap. This operation destroys the
- // heap contents and the contents of the roots into the heap.
- void Serialize();
-
- // Returns the serialized buffer. Ownership is transferred to the
- // caller. Only the destructor and getters may be called after this call.
- void Finalize(byte** str, int* len);
-
- int roots() { return roots_; }
- int objects() { return objects_; }
-
-#ifdef DEBUG
- // insert "tag" into the serialized stream
- virtual void Synchronize(const char* tag);
-#endif
-
- static bool enabled() { return serialization_enabled_; }
-
- static void Enable() { serialization_enabled_ = true; }
- static void Disable() { serialization_enabled_ = false; }
-
- private:
- friend class ReferenceUpdater;
-
- virtual void VisitPointers(Object** start, Object** end);
- virtual void VisitCodeTarget(RelocInfo* rinfo);
- bool IsVisited(HeapObject* obj);
-
- Address GetSavedAddress(HeapObject* obj);
-
- void SaveAddress(HeapObject* obj, Address addr);
-
- void PutEncodedAddress(Address addr);
- // Write the global flags into the file.
- void PutFlags();
- // Write global information into the header of the file.
- void PutHeader();
- // Write the contents of the log into the file.
- void PutLog();
- // Serialize 'obj', and return its encoded RelativeAddress.
- Address PutObject(HeapObject* obj);
- // Write a stack of handles to the file bottom first.
- void PutGlobalHandleStack(const List<Handle<Object> >& stack);
- // Write the context stack into the file.
- void PutContextStack();
-
- // Return the encoded RelativeAddress where this object will be
- // allocated on deserialization. On the first visit of 'o',
- // serialize its contents. On return, *serialized will be true iff
- // 'o' has just been serialized.
- Address Encode(Object* o, bool* serialized);
-
- // Simulate the allocation of 'obj', returning the address where it will
- // be allocated on deserialization
- RelativeAddress Allocate(HeapObject* obj);
-
- void InitializeAllocators();
-
- SnapshotWriter* writer_;
- bool root_; // serializing a root?
- int roots_; // number of roots visited
- int objects_; // number of objects serialized
-
- static bool serialization_enabled_;
-
- int flags_end_; // The position right after the flags.
-
- // An array of per-space SimulatedHeapSpaces used as memory allocators.
- SimulatedHeapSpace* allocator_[LAST_SPACE+1];
- // A list of global handles at serialization time.
- List<Object**> global_handles_;
-
- ExternalReferenceEncoder* reference_encoder_;
-
- HashMap saved_addresses_;
-
- DISALLOW_COPY_AND_ASSIGN(Serializer);
-};
-
-// Helper class to read the bytes of the serialized heap.
-
-class SnapshotReader {
- public:
- SnapshotReader(const byte* str, int len): str_(str), end_(str + len) {}
-
- void ExpectC(char expected) {
- int c = GetC();
- USE(c);
- ASSERT(c == expected);
+ int Get() {
+ ASSERT(position_ < length_);
+ return data_[position_++];
}
- int GetC() {
- if (str_ >= end_) return EOF;
- return *str_++;
+ void CopyRaw(byte* to, int number_of_bytes) {
+ memcpy(to, data_ + position_, number_of_bytes);
+ position_ += number_of_bytes;
}
int GetInt() {
- int result;
- GetBytes(reinterpret_cast<Address>(&result), sizeof(result));
- return result;
+ // A little unwind to catch the really small ints.
+ int snapshot_byte = Get();
+ if ((snapshot_byte & 0x80) == 0) {
+ return snapshot_byte;
+ }
+ int accumulator = (snapshot_byte & 0x7f) << 7;
+ while (true) {
+ snapshot_byte = Get();
+ if ((snapshot_byte & 0x80) == 0) {
+ return accumulator | snapshot_byte;
+ }
+ accumulator = (accumulator | (snapshot_byte & 0x7f)) << 7;
+ }
+ UNREACHABLE();
+ return accumulator;
}
- Address GetAddress() {
- Address result;
- GetBytes(reinterpret_cast<Address>(&result), sizeof(result));
- return result;
- }
-
- void GetBytes(Address a, int size) {
- ASSERT(str_ + size <= end_);
- memcpy(a, str_, size);
- str_ += size;
- }
-
- char* GetString() {
- ExpectC('[');
- int size = GetInt();
- ExpectC(']');
- char* s = NewArray<char>(size + 1);
- GetBytes(reinterpret_cast<Address>(s), size);
- s[size] = 0;
- return s;
+ bool AtEOF() {
+ return position_ == length_;
}
private:
- const byte* str_;
- const byte* end_;
+ const byte* data_;
+ int length_;
+ int position_;
};
+// It is very common to have a reference to the object at word 10 in space 2,
+// the object at word 5 in space 2 and the object at word 28 in space 4. This
+// only works for objects in the first page of a space.
+#define COMMON_REFERENCE_PATTERNS(f) \
+ f(kNumberOfSpaces, 2, 10) \
+ f(kNumberOfSpaces + 1, 2, 5) \
+ f(kNumberOfSpaces + 2, 4, 28) \
+ f(kNumberOfSpaces + 3, 2, 21) \
+ f(kNumberOfSpaces + 4, 2, 98) \
+ f(kNumberOfSpaces + 5, 2, 67) \
+ f(kNumberOfSpaces + 6, 4, 132)
+
+#define COMMON_RAW_LENGTHS(f) \
+ f(1, 1) \
+ f(2, 2) \
+ f(3, 3) \
+ f(4, 4) \
+ f(5, 5) \
+ f(6, 6) \
+ f(7, 7) \
+ f(8, 8) \
+ f(9, 12) \
+ f(10, 16) \
+ f(11, 20) \
+ f(12, 24) \
+ f(13, 28) \
+ f(14, 32) \
+ f(15, 36)
+
+// The SerDes class is a common superclass for Serializer and Deserializer
+// which is used to store common constants and methods used by both.
+class SerDes: public ObjectVisitor {
+ protected:
+ enum DataType {
+ RAW_DATA_SERIALIZATION = 0,
+ // And 15 common raw lengths.
+ OBJECT_SERIALIZATION = 16,
+ // One variant per space.
+ CODE_OBJECT_SERIALIZATION = 25,
+ // One per space (only code spaces in use).
+ EXTERNAL_REFERENCE_SERIALIZATION = 34,
+ EXTERNAL_BRANCH_TARGET_SERIALIZATION = 35,
+ SYNCHRONIZE = 36,
+ START_NEW_PAGE_SERIALIZATION = 37,
+ NATIVES_STRING_RESOURCE = 38,
+ // Free: 39-47.
+ BACKREF_SERIALIZATION = 48,
+ // One per space, must be kSpaceMask aligned.
+ // Free: 57-63.
+ REFERENCE_SERIALIZATION = 64,
+ // One per space and common references. Must be kSpaceMask aligned.
+ CODE_BACKREF_SERIALIZATION = 80,
+ // One per space, must be kSpaceMask aligned.
+ // Free: 89-95.
+ CODE_REFERENCE_SERIALIZATION = 96
+ // One per space, must be kSpaceMask aligned.
+ // Free: 105-255.
+ };
+ static const int kLargeData = LAST_SPACE;
+ static const int kLargeCode = kLargeData + 1;
+ static const int kLargeFixedArray = kLargeCode + 1;
+ static const int kNumberOfSpaces = kLargeFixedArray + 1;
+
+ // A bitmask for getting the space out of an instruction.
+ static const int kSpaceMask = 15;
+
+ static inline bool SpaceIsLarge(int space) { return space >= kLargeData; }
+ static inline bool SpaceIsPaged(int space) {
+ return space >= FIRST_PAGED_SPACE && space <= LAST_PAGED_SPACE;
+ }
+};
+
+
+
// A Deserializer reads a snapshot and reconstructs the Object graph it defines.
-
-class Deserializer: public ObjectVisitor {
+class Deserializer: public SerDes {
public:
- // Create a deserializer. The snapshot is held in str and has size len.
- Deserializer(const byte* str, int len);
+ // Create a deserializer from a snapshot byte source.
+ explicit Deserializer(SnapshotByteSource* source);
- virtual ~Deserializer();
-
- // Read the flags from the header of the file, and set those that
- // should be inherited from the snapshot.
- void GetFlags();
-
- // Read saved profiling information from the file and log it if required.
- void GetLog();
+ virtual ~Deserializer() { }
// Deserialize the snapshot into an empty heap.
void Deserialize();
-
- int roots() { return roots_; }
- int objects() { return objects_; }
-
#ifdef DEBUG
- // Check for the presence of "tag" in the serialized stream
virtual void Synchronize(const char* tag);
#endif
private:
virtual void VisitPointers(Object** start, Object** end);
- virtual void VisitCodeTarget(RelocInfo* rinfo);
- virtual void VisitExternalReferences(Address* start, Address* end);
- virtual void VisitRuntimeEntry(RelocInfo* rinfo);
- Address GetEncodedAddress();
+ virtual void VisitExternalReferences(Address* start, Address* end) {
+ UNREACHABLE();
+ }
- // Read other global information (except flags) from the header of the file.
- void GetHeader();
- // Read a stack of handles from the file bottom first.
- void GetGlobalHandleStack(List<Handle<Object> >* stack);
- // Read the context stack from the file.
- void GetContextStack();
+ virtual void VisitRuntimeEntry(RelocInfo* rinfo) {
+ UNREACHABLE();
+ }
- Object* GetObject();
+ void ReadChunk(Object** start, Object** end, int space, Address address);
+ HeapObject* GetAddressFromStart(int space);
+ inline HeapObject* GetAddressFromEnd(int space);
+ Address Allocate(int space_number, Space* space, int size);
+ void ReadObject(int space_number, Space* space, Object** write_back);
- // Get the encoded address. In debug mode we make sure
- // it matches the given expectations.
- void ExpectEncodedAddress(Address expected);
+ // Keep track of the pages in the paged spaces.
+ // (In large object space we are keeping track of individual objects
+ // rather than pages.) In new space we just need the address of the
+ // first object and the others will flow from that.
+ List<Address> pages_[SerDes::kNumberOfSpaces];
- // Given an encoded address (the result of
- // RelativeAddress::Encode), return the object to which it points,
- // which will be either an Smi or a HeapObject in the current heap.
- Object* Resolve(Address encoded_address);
-
- SnapshotReader reader_;
- bool root_; // Deserializing a root?
- int roots_; // number of roots visited
- int objects_; // number of objects serialized
-
- bool has_log_; // The file has log information.
-
- // Resolve caches the following:
- List<Page*> map_pages_; // All pages in the map space.
- List<Page*> cell_pages_; // All pages in the cell space.
- List<Page*> old_pointer_pages_; // All pages in the old pointer space.
- List<Page*> old_data_pages_; // All pages in the old data space.
- List<Page*> code_pages_; // All pages in the code space.
- List<Object*> large_objects_; // All known large objects.
- // A list of global handles at deserialization time.
- List<Object**> global_handles_;
-
- ExternalReferenceDecoder* reference_decoder_;
-
-#ifdef DEBUG
- bool expect_debug_information_;
-#endif
+ SnapshotByteSource* source_;
+ ExternalReferenceDecoder* external_reference_decoder_;
+ // This is the address of the next object that will be allocated in each
+ // space. It is used to calculate the addresses of back-references.
+ Address high_water_[LAST_SPACE + 1];
+ // This is the address of the most recent object that was allocated. It
+ // is used to set the location of the new page when we encounter a
+ // START_NEW_PAGE_SERIALIZATION tag.
+ Address last_object_address_;
DISALLOW_COPY_AND_ASSIGN(Deserializer);
};
+
+class SnapshotByteSink {
+ public:
+ virtual ~SnapshotByteSink() { }
+ virtual void Put(int byte, const char* description) = 0;
+ virtual void PutSection(int byte, const char* description) {
+ Put(byte, description);
+ }
+ void PutInt(uintptr_t integer, const char* description);
+};
+
+
+class Serializer : public SerDes {
+ public:
+ explicit Serializer(SnapshotByteSink* sink);
+ // Serialize the current state of the heap. This operation destroys the
+ // heap contents.
+ void Serialize();
+ void VisitPointers(Object** start, Object** end);
+
+ static void Enable() {
+ if (!serialization_enabled_) {
+ ASSERT(!too_late_to_enable_now_);
+ }
+ serialization_enabled_ = true;
+ }
+
+ static void Disable() { serialization_enabled_ = false; }
+ // Call this when you have made use of the fact that there is no serialization
+ // going on.
+ static void TooLateToEnableNow() { too_late_to_enable_now_ = true; }
+ static bool enabled() { return serialization_enabled_; }
+#ifdef DEBUG
+ virtual void Synchronize(const char* tag);
+#endif
+
+ private:
+ enum ReferenceRepresentation {
+ TAGGED_REPRESENTATION, // A tagged object reference.
+ CODE_TARGET_REPRESENTATION // A reference to first instruction in target.
+ };
+ class ObjectSerializer : public ObjectVisitor {
+ public:
+ ObjectSerializer(Serializer* serializer,
+ Object* o,
+ SnapshotByteSink* sink,
+ ReferenceRepresentation representation)
+ : serializer_(serializer),
+ object_(HeapObject::cast(o)),
+ sink_(sink),
+ reference_representation_(representation),
+ bytes_processed_so_far_(0) { }
+ void Serialize();
+ void VisitPointers(Object** start, Object** end);
+ void VisitExternalReferences(Address* start, Address* end);
+ void VisitCodeTarget(RelocInfo* target);
+ void VisitRuntimeEntry(RelocInfo* reloc);
+ // Used for seralizing the external strings that hold the natives source.
+ void VisitExternalAsciiString(
+ v8::String::ExternalAsciiStringResource** resource);
+ // We can't serialize a heap with external two byte strings.
+ void VisitExternalTwoByteString(
+ v8::String::ExternalStringResource** resource) {
+ UNREACHABLE();
+ }
+
+ private:
+ void OutputRawData(Address up_to);
+
+ Serializer* serializer_;
+ HeapObject* object_;
+ SnapshotByteSink* sink_;
+ ReferenceRepresentation reference_representation_;
+ int bytes_processed_so_far_;
+ };
+
+ void SerializeObject(Object* o, ReferenceRepresentation representation);
+ void InitializeAllocators();
+ // This will return the space for an object. If the object is in large
+ // object space it may return kLargeCode or kLargeFixedArray in order
+ // to indicate to the deserializer what kind of large object allocation
+ // to make.
+ static int SpaceOfObject(HeapObject* object);
+ // This just returns the space of the object. It will return LO_SPACE
+ // for all large objects since you can't check the type of the object
+ // once the map has been used for the serialization address.
+ static int SpaceOfAlreadySerializedObject(HeapObject* object);
+ int Allocate(int space, int size, bool* new_page_started);
+ int CurrentAllocationAddress(int space) {
+ if (SpaceIsLarge(space)) space = LO_SPACE;
+ return fullness_[space];
+ }
+ int EncodeExternalReference(Address addr) {
+ return external_reference_encoder_->Encode(addr);
+ }
+
+ // Keep track of the fullness of each space in order to generate
+ // relative addresses for back references. Large objects are
+ // just numbered sequentially since relative addresses make no
+ // sense in large object space.
+ int fullness_[LAST_SPACE + 1];
+ SnapshotByteSink* sink_;
+ int current_root_index_;
+ ExternalReferenceEncoder* external_reference_encoder_;
+ static bool serialization_enabled_;
+ // Did we already make use of the fact that serialization was not enabled?
+ static bool too_late_to_enable_now_;
+
+ friend class ObjectSerializer;
+ friend class Deserializer;
+
+ DISALLOW_COPY_AND_ASSIGN(Serializer);
+};
+
} } // namespace v8::internal
#endif // V8_SERIALIZE_H_
diff --git a/src/location.h b/src/simulator.h
similarity index 71%
rename from src/location.h
rename to src/simulator.h
index 9702ce4..6f8cd5a 100644
--- a/src/location.h
+++ b/src/simulator.h
@@ -25,33 +25,17 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#ifndef V8_LOCATION_H_
-#define V8_LOCATION_H_
+#ifndef V8_SIMULATOR_H_
+#define V8_SIMULATOR_H_
-#include "utils.h"
+#if V8_TARGET_ARCH_IA32
+#include "ia32/simulator-ia32.h"
+#elif V8_TARGET_ARCH_X64
+#include "x64/simulator-x64.h"
+#elif V8_TARGET_ARCH_ARM
+#include "arm/simulator-arm.h"
+#else
+#error Unsupported target architecture.
+#endif
-namespace v8 {
-namespace internal {
-
-class Location BASE_EMBEDDED {
- public:
- static Location Temporary() { return Location(TEMP); }
- static Location Nowhere() { return Location(NOWHERE); }
- static Location Constant() { return Location(CONSTANT); }
-
- bool is_temporary() { return type_ == TEMP; }
- bool is_nowhere() { return type_ == NOWHERE; }
- bool is_constant() { return type_ == CONSTANT; }
-
- private:
- enum Type { TEMP, NOWHERE, CONSTANT };
-
- explicit Location(Type type) : type_(type) {}
-
- Type type_;
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_LOCATION_H_
+#endif // V8_SIMULATOR_H_
diff --git a/src/snapshot-common.cc b/src/snapshot-common.cc
index 9c66a50..c01baad 100644
--- a/src/snapshot-common.cc
+++ b/src/snapshot-common.cc
@@ -32,14 +32,15 @@
#include "api.h"
#include "serialize.h"
#include "snapshot.h"
+#include "platform.h"
namespace v8 {
namespace internal {
bool Snapshot::Deserialize(const byte* content, int len) {
- Deserializer des(content, len);
- des.GetFlags();
- return V8::Initialize(&des);
+ SnapshotByteSource source(content, len);
+ Deserializer deserializer(&source);
+ return V8::Initialize(&deserializer);
}
@@ -48,28 +49,49 @@
int len;
byte* str = ReadBytes(snapshot_file, &len);
if (!str) return false;
- bool result = Deserialize(str, len);
+ Deserialize(str, len);
DeleteArray(str);
- return result;
+ return true;
} else if (size_ > 0) {
- return Deserialize(data_, size_);
+ Deserialize(data_, size_);
+ return true;
}
return false;
}
+class FileByteSink : public SnapshotByteSink {
+ public:
+ explicit FileByteSink(const char* snapshot_file) {
+ fp_ = OS::FOpen(snapshot_file, "wb");
+ if (fp_ == NULL) {
+ PrintF("Unable to write to snapshot file \"%s\"\n", snapshot_file);
+ exit(1);
+ }
+ }
+ virtual ~FileByteSink() {
+ if (fp_ != NULL) {
+ fclose(fp_);
+ }
+ }
+ virtual void Put(int byte, const char* description) {
+ if (fp_ != NULL) {
+ fputc(byte, fp_);
+ }
+ }
+
+ private:
+ FILE* fp_;
+};
+
+
bool Snapshot::WriteToFile(const char* snapshot_file) {
- Serializer ser;
+ FileByteSink file(snapshot_file);
+ Serializer ser(&file);
ser.Serialize();
- byte* str;
- int len;
- ser.Finalize(&str, &len);
-
- int written = WriteBytes(snapshot_file, str, len);
-
- DeleteArray(str);
- return written == len;
+ return true;
}
+
} } // namespace v8::internal
diff --git a/src/spaces.cc b/src/spaces.cc
index bd58742..f3b6b9f 100644
--- a/src/spaces.cc
+++ b/src/spaces.cc
@@ -354,7 +354,7 @@
} else {
mem = OS::Allocate(requested, allocated, (executable == EXECUTABLE));
}
- int alloced = *allocated;
+ int alloced = static_cast<int>(*allocated);
size_ += alloced;
Counters::memory_allocated.Increment(alloced);
return mem;
@@ -367,8 +367,8 @@
} else {
OS::Free(mem, length);
}
- Counters::memory_allocated.Decrement(length);
- size_ -= length;
+ Counters::memory_allocated.Decrement(static_cast<int>(length));
+ size_ -= static_cast<int>(length);
ASSERT(size_ >= 0);
}
@@ -387,7 +387,7 @@
// We are sure that we have mapped a block of requested addresses.
ASSERT(initial_chunk_->size() == requested);
LOG(NewEvent("InitialChunk", initial_chunk_->address(), requested));
- size_ += requested;
+ size_ += static_cast<int>(requested);
return initial_chunk_->address();
}
@@ -397,8 +397,8 @@
// and the last page ends on the last page-aligned address before
// start+size. Page::kPageSize is a power of two so we can divide by
// shifting.
- return (RoundDown(start + size, Page::kPageSize)
- - RoundUp(start, Page::kPageSize)) >> Page::kPageSizeBits;
+ return static_cast<int>((RoundDown(start + size, Page::kPageSize)
+ - RoundUp(start, Page::kPageSize)) >> Page::kPageSizeBits);
}
@@ -412,7 +412,7 @@
if (size_ + static_cast<int>(chunk_size) > capacity_) {
// Request as many pages as we can.
chunk_size = capacity_ - size_;
- requested_pages = chunk_size >> Page::kPageSizeBits;
+ requested_pages = static_cast<int>(chunk_size >> Page::kPageSizeBits);
if (requested_pages <= 0) return Page::FromAddress(NULL);
}
@@ -445,7 +445,7 @@
if (!initial_chunk_->Commit(start, size, owner->executable() == EXECUTABLE)) {
return Page::FromAddress(NULL);
}
- Counters::memory_allocated.Increment(size);
+ Counters::memory_allocated.Increment(static_cast<int>(size));
// So long as we correctly overestimated the number of chunks we should not
// run out of chunk ids.
@@ -466,7 +466,7 @@
ASSERT(InInitialChunk(start + size - 1));
if (!initial_chunk_->Commit(start, size, executable)) return false;
- Counters::memory_allocated.Increment(size);
+ Counters::memory_allocated.Increment(static_cast<int>(size));
return true;
}
@@ -478,7 +478,7 @@
ASSERT(InInitialChunk(start + size - 1));
if (!initial_chunk_->Uncommit(start, size)) return false;
- Counters::memory_allocated.Decrement(size);
+ Counters::memory_allocated.Decrement(static_cast<int>(size));
return true;
}
@@ -558,7 +558,7 @@
// TODO(1240712): VirtualMemory::Uncommit has a return value which
// is ignored here.
initial_chunk_->Uncommit(c.address(), c.size());
- Counters::memory_allocated.Decrement(c.size());
+ Counters::memory_allocated.Decrement(static_cast<int>(c.size()));
} else {
LOG(DeleteEvent("PagedChunk", c.address()));
FreeRawMemory(c.address(), c.size());
@@ -1096,7 +1096,8 @@
void NewSpace::Shrink() {
int new_capacity = Max(InitialCapacity(), 2 * Size());
- int rounded_new_capacity = RoundUp(new_capacity, OS::AllocateAlignment());
+ int rounded_new_capacity =
+ RoundUp(new_capacity, static_cast<int>(OS::AllocateAlignment()));
if (rounded_new_capacity < Capacity() &&
to_space_.ShrinkTo(rounded_new_capacity)) {
// Only shrink from space if we managed to shrink to space.
@@ -1234,7 +1235,7 @@
bool SemiSpace::Grow() {
// Double the semispace size but only up to maximum capacity.
int maximum_extra = maximum_capacity_ - capacity_;
- int extra = Min(RoundUp(capacity_, OS::AllocateAlignment()),
+ int extra = Min(RoundUp(capacity_, static_cast<int>(OS::AllocateAlignment())),
maximum_extra);
if (!MemoryAllocator::CommitBlock(high(), extra, executable())) {
return false;
@@ -1527,7 +1528,9 @@
// correct size.
if (size_in_bytes > ByteArray::kAlignedSize) {
set_map(Heap::raw_unchecked_byte_array_map());
- ByteArray::cast(this)->set_length(ByteArray::LengthFor(size_in_bytes));
+ // Can't use ByteArray::cast because it fails during deserialization.
+ ByteArray* this_as_byte_array = reinterpret_cast<ByteArray*>(this);
+ this_as_byte_array->set_length(ByteArray::LengthFor(size_in_bytes));
} else if (size_in_bytes == kPointerSize) {
set_map(Heap::raw_unchecked_one_pointer_filler_map());
} else if (size_in_bytes == 2 * kPointerSize) {
@@ -1535,7 +1538,8 @@
} else {
UNREACHABLE();
}
- ASSERT(Size() == size_in_bytes);
+ // We would like to ASSERT(Size() == size_in_bytes) but this would fail during
+ // deserialization because the byte array map is not done yet.
}
@@ -1794,12 +1798,14 @@
while (it.has_next()) {
Page* p = it.next();
// Space below the relocation pointer is allocated.
- computed_size += p->mc_relocation_top - p->ObjectAreaStart();
+ computed_size +=
+ static_cast<int>(p->mc_relocation_top - p->ObjectAreaStart());
if (it.has_next()) {
// Free the space at the top of the page. We cannot use
// p->mc_relocation_top after the call to Free (because Free will clear
// remembered set bits).
- int extra_size = p->ObjectAreaEnd() - p->mc_relocation_top;
+ int extra_size =
+ static_cast<int>(p->ObjectAreaEnd() - p->mc_relocation_top);
if (extra_size > 0) {
int wasted_bytes = free_list_.Free(p->mc_relocation_top, extra_size);
// The bytes we have just "freed" to add to the free list were
@@ -1828,13 +1834,16 @@
return AllocateInNextPage(current_page, size_in_bytes);
}
- // There is no next page in this space. Try free list allocation.
- int wasted_bytes;
- Object* result = free_list_.Allocate(size_in_bytes, &wasted_bytes);
- accounting_stats_.WasteBytes(wasted_bytes);
- if (!result->IsFailure()) {
- accounting_stats_.AllocateBytes(size_in_bytes);
- return HeapObject::cast(result);
+ // There is no next page in this space. Try free list allocation unless that
+ // is currently forbidden.
+ if (!Heap::linear_allocation()) {
+ int wasted_bytes;
+ Object* result = free_list_.Allocate(size_in_bytes, &wasted_bytes);
+ accounting_stats_.WasteBytes(wasted_bytes);
+ if (!result->IsFailure()) {
+ accounting_stats_.AllocateBytes(size_in_bytes);
+ return HeapObject::cast(result);
+ }
}
// Free list allocation failed and there is no next page. Fail if we have
@@ -1862,7 +1871,8 @@
int size_in_bytes) {
ASSERT(current_page->next_page()->is_valid());
// Add the block at the top of this page to the free list.
- int free_size = current_page->ObjectAreaEnd() - allocation_info_.top;
+ int free_size =
+ static_cast<int>(current_page->ObjectAreaEnd() - allocation_info_.top);
if (free_size > 0) {
int wasted_bytes = free_list_.Free(allocation_info_.top, free_size);
accounting_stats_.WasteBytes(wasted_bytes);
@@ -1962,7 +1972,7 @@
if (it->rinfo()->rmode() == RelocInfo::COMMENT) {
const char* const txt =
reinterpret_cast<const char*>(it->rinfo()->data());
- flat_delta += it->rinfo()->pc() - prev_pc;
+ flat_delta += static_cast<int>(it->rinfo()->pc() - prev_pc);
if (txt[0] == ']') break; // End of nested comment
// A new comment
CollectCommentStatistics(it);
@@ -1990,7 +2000,7 @@
const byte* prev_pc = code->instruction_start();
while (!it.done()) {
if (it.rinfo()->rmode() == RelocInfo::COMMENT) {
- delta += it.rinfo()->pc() - prev_pc;
+ delta += static_cast<int>(it.rinfo()->pc() - prev_pc);
CollectCommentStatistics(&it);
prev_pc = it.rinfo()->pc();
}
@@ -1999,7 +2009,7 @@
ASSERT(code->instruction_start() <= prev_pc &&
prev_pc <= code->relocation_start());
- delta += code->relocation_start() - prev_pc;
+ delta += static_cast<int>(code->relocation_start() - prev_pc);
EnterComment("NoComment", delta);
}
}
@@ -2028,7 +2038,8 @@
int rset = Memory::int_at(rset_addr);
if (rset != 0) {
// Bits were set
- int intoff = rset_addr - p->address() - Page::kRSetOffset;
+ int intoff =
+ static_cast<int>(rset_addr - p->address() - Page::kRSetOffset);
int bitoff = 0;
for (; bitoff < kBitsPerInt; ++bitoff) {
if ((rset & (1 << bitoff)) != 0) {
@@ -2205,9 +2216,10 @@
while (it.has_next()) {
Page* page = it.next();
Address page_top = page->AllocationTop();
- computed_size += page_top - page->ObjectAreaStart();
+ computed_size += static_cast<int>(page_top - page->ObjectAreaStart());
if (it.has_next()) {
- accounting_stats_.WasteBytes(page->ObjectAreaEnd() - page_top);
+ accounting_stats_.WasteBytes(
+ static_cast<int>(page->ObjectAreaEnd() - page_top));
}
}
@@ -2230,10 +2242,10 @@
return AllocateInNextPage(current_page, size_in_bytes);
}
- // There is no next page in this space. Try free list allocation.
- // The fixed space free list implicitly assumes that all free blocks
- // are of the fixed size.
- if (size_in_bytes == object_size_in_bytes_) {
+ // There is no next page in this space. Try free list allocation unless
+ // that is currently forbidden. The fixed space free list implicitly assumes
+ // that all free blocks are of the fixed size.
+ if (!Heap::linear_allocation()) {
Object* result = free_list_.Allocate();
if (!result->IsFailure()) {
accounting_stats_.AllocateBytes(size_in_bytes);
@@ -2293,7 +2305,8 @@
int rset = Memory::int_at(rset_addr);
if (rset != 0) {
// Bits were set
- int intoff = rset_addr - p->address() - Page::kRSetOffset;
+ int intoff =
+ static_cast<int>(rset_addr - p->address() - Page::kRSetOffset);
int bitoff = 0;
for (; bitoff < kBitsPerInt; ++bitoff) {
if ((rset & (1 << bitoff)) != 0) {
@@ -2414,7 +2427,7 @@
int LargeObjectChunk::ChunkSizeFor(int size_in_bytes) {
- int os_alignment = OS::AllocateAlignment();
+ int os_alignment = static_cast<int>(OS::AllocateAlignment());
if (os_alignment < Page::kPageSize)
size_in_bytes += (Page::kPageSize - os_alignment);
return size_in_bytes + Page::kObjectStartOffset;
@@ -2493,7 +2506,7 @@
return Failure::RetryAfterGC(requested_size, identity());
}
- size_ += chunk_size;
+ size_ += static_cast<int>(chunk_size);
page_count_++;
chunk->set_next(first_chunk_);
chunk->set_size(chunk_size);
@@ -2644,7 +2657,7 @@
if (object->IsCode()) {
LOG(CodeDeleteEvent(object->address()));
}
- size_ -= chunk_size;
+ size_ -= static_cast<int>(chunk_size);
page_count_--;
MemoryAllocator::FreeRawMemory(chunk_address, chunk_size);
LOG(DeleteEvent("LargeObjectChunk", chunk_address));
diff --git a/src/spaces.h b/src/spaces.h
index 9e1d873..75b992f 100644
--- a/src/spaces.h
+++ b/src/spaces.h
@@ -172,7 +172,7 @@
// Returns the offset of a given address to this page.
INLINE(int Offset(Address a)) {
- int offset = a - address();
+ int offset = static_cast<int>(a - address());
ASSERT_PAGE_OFFSET(offset);
return offset;
}
@@ -1116,7 +1116,9 @@
}
// The offset of an address from the beginning of the space.
- int SpaceOffsetForAddress(Address addr) { return addr - low(); }
+ int SpaceOffsetForAddress(Address addr) {
+ return static_cast<int>(addr - low());
+ }
// If we don't have this here then SemiSpace will be abstract. However
// it should never be called.
@@ -1255,7 +1257,7 @@
}
// Return the allocated bytes in the active semispace.
- virtual int Size() { return top() - bottom(); }
+ virtual int Size() { return static_cast<int>(top() - bottom()); }
// Return the current capacity of a semispace.
int Capacity() {
diff --git a/src/string-stream.cc b/src/string-stream.cc
index 8c62a45..d1859a2 100644
--- a/src/string-stream.cc
+++ b/src/string-stream.cc
@@ -188,7 +188,7 @@
void StringStream::PrintObject(Object* o) {
o->ShortPrint(this);
if (o->IsString()) {
- if (String::cast(o)->length() <= String::kMaxMediumStringSize) {
+ if (String::cast(o)->length() <= String::kMaxShortPrintLength) {
return;
}
} else if (o->IsNumber() || o->IsOddball()) {
diff --git a/src/string.js b/src/string.js
index d2d6e96..4f9957a 100644
--- a/src/string.js
+++ b/src/string.js
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2006-2009 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -180,7 +180,7 @@
}
return %CharFromCode(char_code);
}
- return %StringSlice(string, start, end);
+ return %SubString(string, start, end);
}
@@ -380,12 +380,19 @@
// Unfortunately, that means this code is nearly duplicated, here and in
// jsregexp.cc.
if (regexp.global) {
+ var numberOfCaptures = NUMBER_OF_CAPTURES(matchInfo) >> 1;
var previous = 0;
do {
- result.addSpecialSlice(previous, matchInfo[CAPTURE0]);
var startOfMatch = matchInfo[CAPTURE0];
+ result.addSpecialSlice(previous, startOfMatch);
previous = matchInfo[CAPTURE1];
- result.add(ApplyReplacementFunction(replace, matchInfo, subject));
+ if (numberOfCaptures == 1) {
+ var match = SubString(subject, startOfMatch, previous);
+ // Don't call directly to avoid exposing the built-in global object.
+ result.add(replace.call(null, match, startOfMatch, subject));
+ } else {
+ result.add(ApplyReplacementFunction(replace, matchInfo, subject));
+ }
// Can't use matchInfo any more from here, since the function could
// overwrite it.
// Continue with the next match.
@@ -810,10 +817,13 @@
var len = end - start;
if (len == 0) return;
var elements = this.elements;
- if (start >= 0 && len >= 0 && start < 0x80000 && len < 0x800) {
+ if (start < 0x80000 && len < 0x800) {
elements[elements.length] = (start << 11) + len;
} else {
- elements[elements.length] = SubString(this.special_string, start, end);
+ // 0 < len <= String::kMaxLength and Smi::kMaxValue >= String::kMaxLength,
+ // so -len is a smi.
+ elements[elements.length] = -len;
+ elements[elements.length] = start;
}
}
diff --git a/src/stub-cache.cc b/src/stub-cache.cc
index e10dc61..51d9ddb 100644
--- a/src/stub-cache.cc
+++ b/src/stub-cache.cc
@@ -735,16 +735,24 @@
Object* LoadCallbackProperty(Arguments args) {
+ ASSERT(args[0]->IsJSObject());
+ ASSERT(args[1]->IsJSObject());
AccessorInfo* callback = AccessorInfo::cast(args[2]);
Address getter_address = v8::ToCData<Address>(callback->getter());
v8::AccessorGetter fun = FUNCTION_CAST<v8::AccessorGetter>(getter_address);
ASSERT(fun != NULL);
- v8::AccessorInfo info(args.arguments());
+ CustomArguments custom_args(callback->data(),
+ JSObject::cast(args[0]),
+ JSObject::cast(args[1]));
+ v8::AccessorInfo info(custom_args.end());
HandleScope scope;
v8::Handle<v8::Value> result;
{
// Leaving JavaScript.
VMState state(EXTERNAL);
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ state.set_external_callback(getter_address);
+#endif
result = fun(v8::Utils::ToLocal(args.at<String>(4)), info);
}
RETURN_IF_SCHEDULED_EXCEPTION();
@@ -768,6 +776,9 @@
{
// Leaving JavaScript.
VMState state(EXTERNAL);
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ state.set_external_callback(setter_address);
+#endif
fun(v8::Utils::ToLocal(name), v8::Utils::ToLocal(value), info);
}
RETURN_IF_SCHEDULED_EXCEPTION();
diff --git a/src/stub-cache.h b/src/stub-cache.h
index e268920..788c532 100644
--- a/src/stub-cache.h
+++ b/src/stub-cache.h
@@ -226,9 +226,9 @@
// hash code would effectively throw away two bits of the hash
// code.
ASSERT(kHeapObjectTagSize == String::kHashShift);
- // Compute the hash of the name (use entire length field).
+ // Compute the hash of the name (use entire hash field).
ASSERT(name->HasHashCode());
- uint32_t field = name->length_field();
+ uint32_t field = name->hash_field();
// Using only the low bits in 64-bit mode is unlikely to increase the
// risk of collision even if the heap is spread over an area larger than
// 4Gb (and not at all if it isn't).
diff --git a/src/third_party/valgrind/valgrind.h b/src/third_party/valgrind/valgrind.h
index 47f369b..a94dc58 100644
--- a/src/third_party/valgrind/valgrind.h
+++ b/src/third_party/valgrind/valgrind.h
@@ -74,6 +74,7 @@
#define __VALGRIND_H
#include <stdarg.h>
+#include <stdint.h>
/* Nb: this file might be included in a file compiled with -ansi. So
we can't use C++ style "//" comments nor the "asm" keyword (instead
@@ -232,7 +233,7 @@
typedef
struct {
- unsigned long long int nraddr; /* where's the code? */
+ uint64_t nraddr; /* where's the code? */
}
OrigFn;
@@ -243,14 +244,14 @@
#define VALGRIND_DO_CLIENT_REQUEST( \
_zzq_rlval, _zzq_default, _zzq_request, \
_zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
- { volatile unsigned long long int _zzq_args[6]; \
- volatile unsigned long long int _zzq_result; \
- _zzq_args[0] = (unsigned long long int)(_zzq_request); \
- _zzq_args[1] = (unsigned long long int)(_zzq_arg1); \
- _zzq_args[2] = (unsigned long long int)(_zzq_arg2); \
- _zzq_args[3] = (unsigned long long int)(_zzq_arg3); \
- _zzq_args[4] = (unsigned long long int)(_zzq_arg4); \
- _zzq_args[5] = (unsigned long long int)(_zzq_arg5); \
+ { volatile uint64_t _zzq_args[6]; \
+ volatile uint64_t _zzq_result; \
+ _zzq_args[0] = (uint64_t)(_zzq_request); \
+ _zzq_args[1] = (uint64_t)(_zzq_arg1); \
+ _zzq_args[2] = (uint64_t)(_zzq_arg2); \
+ _zzq_args[3] = (uint64_t)(_zzq_arg3); \
+ _zzq_args[4] = (uint64_t)(_zzq_arg4); \
+ _zzq_args[5] = (uint64_t)(_zzq_arg5); \
__asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
/* %RDX = client_request ( %RAX ) */ \
"xchgq %%rbx,%%rbx" \
@@ -263,7 +264,7 @@
#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
{ volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
- volatile unsigned long long int __addr; \
+ volatile uint64_t __addr; \
__asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
/* %RAX = guest_NRADDR */ \
"xchgq %%rcx,%%rcx" \
@@ -346,8 +347,8 @@
typedef
struct {
- unsigned long long int nraddr; /* where's the code? */
- unsigned long long int r2; /* what tocptr do we need? */
+ uint64_t nraddr; /* where's the code? */
+ uint64_t r2; /* what tocptr do we need? */
}
OrigFn;
@@ -359,15 +360,15 @@
_zzq_rlval, _zzq_default, _zzq_request, \
_zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
\
- { unsigned long long int _zzq_args[6]; \
- register unsigned long long int _zzq_result __asm__("r3"); \
- register unsigned long long int* _zzq_ptr __asm__("r4"); \
- _zzq_args[0] = (unsigned long long int)(_zzq_request); \
- _zzq_args[1] = (unsigned long long int)(_zzq_arg1); \
- _zzq_args[2] = (unsigned long long int)(_zzq_arg2); \
- _zzq_args[3] = (unsigned long long int)(_zzq_arg3); \
- _zzq_args[4] = (unsigned long long int)(_zzq_arg4); \
- _zzq_args[5] = (unsigned long long int)(_zzq_arg5); \
+ { uint64_t _zzq_args[6]; \
+ register uint64_t _zzq_result __asm__("r3"); \
+ register uint64_t* _zzq_ptr __asm__("r4"); \
+ _zzq_args[0] = (uint64_t)(_zzq_request); \
+ _zzq_args[1] = (uint64_t)(_zzq_arg1); \
+ _zzq_args[2] = (uint64_t)(_zzq_arg2); \
+ _zzq_args[3] = (uint64_t)(_zzq_arg3); \
+ _zzq_args[4] = (uint64_t)(_zzq_arg4); \
+ _zzq_args[5] = (uint64_t)(_zzq_arg5); \
_zzq_ptr = _zzq_args; \
__asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
/* %R3 = client_request ( %R4 ) */ \
@@ -380,7 +381,7 @@
#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
{ volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
- register unsigned long long int __addr __asm__("r3"); \
+ register uint64_t __addr __asm__("r3"); \
__asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
/* %R3 = guest_NRADDR */ \
"or 2,2,2" \
@@ -484,8 +485,8 @@
typedef
struct {
- unsigned long long int nraddr; /* where's the code? */
- unsigned long long int r2; /* what tocptr do we need? */
+ uint64_t nraddr; /* where's the code? */
+ uint64_t r2; /* what tocptr do we need? */
}
OrigFn;
@@ -497,9 +498,9 @@
_zzq_rlval, _zzq_default, _zzq_request, \
_zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
\
- { unsigned long long int _zzq_args[7]; \
- register unsigned long long int _zzq_result; \
- register unsigned long long int* _zzq_ptr; \
+ { uint64_t _zzq_args[7]; \
+ register uint64_t _zzq_result; \
+ register uint64_t* _zzq_ptr; \
_zzq_args[0] = (unsigned int long long)(_zzq_request); \
_zzq_args[1] = (unsigned int long long)(_zzq_arg1); \
_zzq_args[2] = (unsigned int long long)(_zzq_arg2); \
@@ -522,7 +523,7 @@
#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
{ volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
- register unsigned long long int __addr; \
+ register uint64_t __addr; \
__asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
/* %R3 = guest_NRADDR */ \
"or 2,2,2\n\t" \
diff --git a/src/token.cc b/src/token.cc
index bb42cea..0a4ad4c 100644
--- a/src/token.cc
+++ b/src/token.cc
@@ -55,109 +55,4 @@
#undef T
-// A perfect (0 collision) hash table of keyword token values.
-
-// larger N will reduce the number of collisions (power of 2 for fast %)
-const unsigned int N = 128;
-// make this small since we have <= 256 tokens
-static uint8_t Hashtable[N];
-static bool IsInitialized = false;
-
-
-static unsigned int Hash(const char* s) {
- // The following constants have been found using trial-and-error. If the
- // keyword set changes, they may have to be recomputed (make them flags
- // and play with the flag values). Increasing N is the simplest way to
- // reduce the number of collisions.
-
- // we must use at least 4 or more chars ('const' and 'continue' share
- // 'con')
- const unsigned int L = 5;
- // smaller S tend to reduce the number of collisions
- const unsigned int S = 4;
- // make this a prime, or at least an odd number
- const unsigned int M = 3;
-
- unsigned int h = 0;
- for (unsigned int i = 0; s[i] != '\0' && i < L; i++) {
- h += (h << S) + s[i];
- }
- // unsigned int % by a power of 2 (otherwise this will not be a bit mask)
- return h * M % N;
-}
-
-
-Token::Value Token::Lookup(const char* str) {
- ASSERT(IsInitialized);
- Value k = static_cast<Value>(Hashtable[Hash(str)]);
- const char* s = string_[k];
- ASSERT(s != NULL || k == IDENTIFIER);
- if (s == NULL || strcmp(s, str) == 0) {
- return k;
- }
- return IDENTIFIER;
-}
-
-
-#ifdef DEBUG
-// We need this function because C++ doesn't allow the expression
-// NULL == NULL, which is a result of macro expansion below. What
-// the hell?
-static bool IsNull(const char* s) {
- return s == NULL;
-}
-#endif
-
-
-void Token::Initialize() {
- if (IsInitialized) return;
-
- // A list of all keywords, terminated by ILLEGAL.
-#define T(name, string, precedence) name,
- static Value keyword[] = {
- TOKEN_LIST(IGNORE_TOKEN, T, IGNORE_TOKEN)
- ILLEGAL
- };
-#undef T
-
- // Assert that the keyword array contains the 25 keywords, 3 future
- // reserved words (const, debugger, and native), and the 3 named literals
- // defined by ECMA-262 standard.
- ASSERT(ARRAY_SIZE(keyword) == 25 + 3 + 3 + 1); // +1 for ILLEGAL sentinel
-
- // Initialize Hashtable.
- ASSERT(NUM_TOKENS <= 256); // Hashtable contains uint8_t elements
- for (unsigned int i = 0; i < N; i++) {
- Hashtable[i] = IDENTIFIER;
- }
-
- // Insert all keywords into Hashtable.
- int collisions = 0;
- for (int i = 0; keyword[i] != ILLEGAL; i++) {
- Value k = keyword[i];
- unsigned int h = Hash(string_[k]);
- if (Hashtable[h] != IDENTIFIER) collisions++;
- Hashtable[h] = k;
- }
-
- if (collisions > 0) {
- PrintF("%d collisions in keyword hashtable\n", collisions);
- FATAL("Fix keyword lookup!");
- }
-
- IsInitialized = true;
-
- // Verify hash table.
-#define T(name, string, precedence) \
- ASSERT(IsNull(string) || Lookup(string) == IDENTIFIER);
-
-#define K(name, string, precedence) \
- ASSERT(Lookup(string) == name);
-
- TOKEN_LIST(T, K, IGNORE_TOKEN)
-
-#undef K
-#undef T
-}
-
} } // namespace v8::internal
diff --git a/src/token.h b/src/token.h
index 4d4df63..a60704c 100644
--- a/src/token.h
+++ b/src/token.h
@@ -260,15 +260,6 @@
return precedence_[tok];
}
- // Returns the keyword value if str is a keyword;
- // returns IDENTIFIER otherwise. The class must
- // have been initialized.
- static Value Lookup(const char* str);
-
- // Must be called once to initialize the class.
- // Multiple calls are ignored.
- static void Initialize();
-
private:
#ifdef DEBUG
static const char* name_[NUM_TOKENS];
diff --git a/src/top.cc b/src/top.cc
index bb2dea4..0274838 100644
--- a/src/top.cc
+++ b/src/top.cc
@@ -31,8 +31,9 @@
#include "bootstrapper.h"
#include "debug.h"
#include "execution.h"
-#include "string-stream.h"
#include "platform.h"
+#include "simulator.h"
+#include "string-stream.h"
namespace v8 {
namespace internal {
@@ -50,6 +51,30 @@
NULL
};
+
+v8::TryCatch* ThreadLocalTop::TryCatchHandler() {
+ return TRY_CATCH_FROM_ADDRESS(try_catch_handler_address());
+}
+
+
+void ThreadLocalTop::Initialize() {
+ c_entry_fp_ = 0;
+ handler_ = 0;
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ js_entry_sp_ = 0;
+#endif
+ stack_is_cooked_ = false;
+ try_catch_handler_address_ = NULL;
+ context_ = NULL;
+ int id = ThreadManager::CurrentId();
+ thread_id_ = (id == 0) ? ThreadManager::kInvalidId : id;
+ external_caught_exception_ = false;
+ failed_access_check_callback_ = NULL;
+ save_context_ = NULL;
+ catcher_ = NULL;
+}
+
+
Address Top::get_address_from_id(Top::AddressId id) {
return top_addresses[id];
}
@@ -70,9 +95,9 @@
v->VisitPointer(bit_cast<Object**, Context**>(&(thread->context_)));
v->VisitPointer(&(thread->scheduled_exception_));
- for (v8::TryCatch* block = thread->try_catch_handler_;
+ for (v8::TryCatch* block = thread->TryCatchHandler();
block != NULL;
- block = block->next_) {
+ block = TRY_CATCH_FROM_ADDRESS(block->next_)) {
v->VisitPointer(bit_cast<Object**, void**>(&(block->exception_)));
v->VisitPointer(bit_cast<Object**, void**>(&(block->message_)));
}
@@ -91,23 +116,10 @@
void Top::InitializeThreadLocal() {
- thread_local_.c_entry_fp_ = 0;
- thread_local_.handler_ = 0;
-#ifdef ENABLE_LOGGING_AND_PROFILING
- thread_local_.js_entry_sp_ = 0;
-#endif
- thread_local_.stack_is_cooked_ = false;
- thread_local_.try_catch_handler_ = NULL;
- thread_local_.context_ = NULL;
- int id = ThreadManager::CurrentId();
- thread_local_.thread_id_ = (id == 0) ? ThreadManager::kInvalidId : id;
- thread_local_.external_caught_exception_ = false;
- thread_local_.failed_access_check_callback_ = NULL;
+ thread_local_.Initialize();
clear_pending_exception();
clear_pending_message();
clear_scheduled_exception();
- thread_local_.save_context_ = NULL;
- thread_local_.catcher_ = NULL;
}
@@ -254,46 +266,24 @@
}
-// There are cases where the C stack is separated from JS stack (ARM simulator).
-// To figure out the order of top-most JS try-catch handler and the top-most C
-// try-catch handler, the C try-catch handler keeps a reference to the top-most
-// JS try_catch handler when it was created.
-//
-// Here is a picture to explain the idea:
-// Top::thread_local_.handler_ Top::thread_local_.try_catch_handler_
-//
-// | |
-// v v
-//
-// | JS handler | | C try_catch handler |
-// | next |--+ +-------- | js_handler_ |
-// | | | next_ |--+
-// | | |
-// | JS handler |--+ <---------+ |
-// | next |
-//
-// If the top-most JS try-catch handler is not equal to
-// Top::thread_local_.try_catch_handler_.js_handler_, it means the JS handler
-// is on the top. Otherwise, it means the C try-catch handler is on the top.
-//
void Top::RegisterTryCatchHandler(v8::TryCatch* that) {
- StackHandler* handler =
- reinterpret_cast<StackHandler*>(thread_local_.handler_);
-
- // Find the top-most try-catch handler.
- while (handler != NULL && !handler->is_try_catch()) {
- handler = handler->next();
- }
-
- that->js_handler_ = handler; // casted to void*
- thread_local_.try_catch_handler_ = that;
+ // The ARM simulator has a separate JS stack. We therefore register
+ // the C++ try catch handler with the simulator and get back an
+ // address that can be used for comparisons with addresses into the
+ // JS stack. When running without the simulator, the address
+ // returned will be the address of the C++ try catch handler itself.
+ Address address = reinterpret_cast<Address>(
+ SimulatorStack::RegisterCTryCatch(reinterpret_cast<uintptr_t>(that)));
+ thread_local_.set_try_catch_handler_address(address);
}
void Top::UnregisterTryCatchHandler(v8::TryCatch* that) {
- ASSERT(thread_local_.try_catch_handler_ == that);
- thread_local_.try_catch_handler_ = that->next_;
+ ASSERT(thread_local_.TryCatchHandler() == that);
+ thread_local_.set_try_catch_handler_address(
+ reinterpret_cast<Address>(that->next_));
thread_local_.catcher_ = NULL;
+ SimulatorStack::UnregisterCTryCatch();
}
@@ -725,20 +715,18 @@
// Get the address of the external handler so we can compare the address to
// determine which one is closer to the top of the stack.
- v8::TryCatch* try_catch = thread_local_.try_catch_handler_;
+ Address external_handler_address = thread_local_.try_catch_handler_address();
// The exception has been externally caught if and only if there is
// an external handler which is on top of the top-most try-catch
// handler.
- //
- // See comments in RegisterTryCatchHandler for details.
- *is_caught_externally = try_catch != NULL &&
- (handler == NULL || handler == try_catch->js_handler_ ||
+ *is_caught_externally = external_handler_address != NULL &&
+ (handler == NULL || handler->address() > external_handler_address ||
!catchable_by_javascript);
if (*is_caught_externally) {
// Only report the exception if the external handler is verbose.
- return thread_local_.try_catch_handler_->is_verbose_;
+ return thread_local_.TryCatchHandler()->is_verbose_;
} else {
// Report the exception if it isn't caught by JavaScript code.
return handler == NULL;
@@ -775,7 +763,7 @@
MessageLocation potential_computed_location;
bool try_catch_needs_message =
is_caught_externally &&
- thread_local_.try_catch_handler_->capture_message_;
+ thread_local_.TryCatchHandler()->capture_message_;
if (report_exception || try_catch_needs_message) {
if (location == NULL) {
// If no location was specified we use a computed one instead
@@ -806,7 +794,7 @@
}
if (is_caught_externally) {
- thread_local_.catcher_ = thread_local_.try_catch_handler_;
+ thread_local_.catcher_ = thread_local_.TryCatchHandler();
}
// NOTE: Notifying the debugger or generating the message
@@ -830,15 +818,15 @@
} else if (thread_local_.pending_exception_ ==
Heap::termination_exception()) {
if (external_caught) {
- thread_local_.try_catch_handler_->can_continue_ = false;
- thread_local_.try_catch_handler_->exception_ = Heap::null_value();
+ thread_local_.TryCatchHandler()->can_continue_ = false;
+ thread_local_.TryCatchHandler()->exception_ = Heap::null_value();
}
} else {
Handle<Object> exception(pending_exception());
thread_local_.external_caught_exception_ = false;
if (external_caught) {
- thread_local_.try_catch_handler_->can_continue_ = true;
- thread_local_.try_catch_handler_->exception_ =
+ thread_local_.TryCatchHandler()->can_continue_ = true;
+ thread_local_.TryCatchHandler()->exception_ =
thread_local_.pending_exception_;
if (!thread_local_.pending_message_obj_->IsTheHole()) {
try_catch_handler()->message_ = thread_local_.pending_message_obj_;
@@ -892,9 +880,9 @@
// If the exception is externally caught, clear it if there are no
// JavaScript frames on the way to the C++ frame that has the
// external handler.
- ASSERT(thread_local_.try_catch_handler_ != NULL);
+ ASSERT(thread_local_.try_catch_handler_address() != NULL);
Address external_handler_address =
- reinterpret_cast<Address>(thread_local_.try_catch_handler_);
+ thread_local_.try_catch_handler_address();
JavaScriptFrameIterator it;
if (it.done() || (it.frame()->sp() > external_handler_address)) {
clear_exception = true;
@@ -941,6 +929,19 @@
Handle<Context> Top::GetCallingGlobalContext() {
JavaScriptFrameIterator it;
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ if (Debug::InDebugger()) {
+ while (!it.done()) {
+ JavaScriptFrame* frame = it.frame();
+ Context* context = Context::cast(frame->context());
+ if (context->global_context() == *Debug::debug_context()) {
+ it.Advance();
+ } else {
+ break;
+ }
+ }
+ }
+#endif // ENABLE_DEBUGGER_SUPPORT
if (it.done()) return Handle<Context>::null();
JavaScriptFrame* frame = it.frame();
Context* context = Context::cast(frame->context());
diff --git a/src/top.h b/src/top.h
index ae94f08..8780844 100644
--- a/src/top.h
+++ b/src/top.h
@@ -43,6 +43,41 @@
class ThreadLocalTop BASE_EMBEDDED {
public:
+ // Initialize the thread data.
+ void Initialize();
+
+ // Get the top C++ try catch handler or NULL if none are registered.
+ //
+ // This method is not guarenteed to return an address that can be
+ // used for comparison with addresses into the JS stack. If such an
+ // address is needed, use try_catch_handler_address.
+ v8::TryCatch* TryCatchHandler();
+
+ // Get the address of the top C++ try catch handler or NULL if
+ // none are registered.
+ //
+ // This method always returns an address that can be compared to
+ // pointers into the JavaScript stack. When running on actual
+ // hardware, try_catch_handler_address and TryCatchHandler return
+ // the same pointer. When running on a simulator with a separate JS
+ // stack, try_catch_handler_address returns a JS stack address that
+ // corresponds to the place on the JS stack where the C++ handler
+ // would have been if the stack were not separate.
+ inline Address try_catch_handler_address() {
+ return try_catch_handler_address_;
+ }
+
+ // Set the address of the top C++ try catch handler.
+ inline void set_try_catch_handler_address(Address address) {
+ try_catch_handler_address_ = address;
+ }
+
+ void Free() {
+ ASSERT(!has_pending_message_);
+ ASSERT(!external_caught_exception_);
+ ASSERT(try_catch_handler_address_ == NULL);
+ }
+
// The context where the current execution method is created and for variable
// lookups.
Context* context_;
@@ -59,7 +94,6 @@
// unify them later.
Object* scheduled_exception_;
bool external_caught_exception_;
- v8::TryCatch* try_catch_handler_;
SaveContext* save_context_;
v8::TryCatch* catcher_;
@@ -79,14 +113,11 @@
// Call back function to report unsafe JS accesses.
v8::FailedAccessCheckCallback failed_access_check_callback_;
- void Free() {
- ASSERT(!has_pending_message_);
- ASSERT(!external_caught_exception_);
- ASSERT(try_catch_handler_ == NULL);
- }
+ private:
+ Address try_catch_handler_address_;
};
-#define TOP_ADDRESS_LIST(C) \
+#define TOP_ADDRESS_LIST(C) \
C(handler_address) \
C(c_entry_fp_address) \
C(context_address) \
@@ -157,7 +188,10 @@
thread_local_.pending_message_script_ = NULL;
}
static v8::TryCatch* try_catch_handler() {
- return thread_local_.try_catch_handler_;
+ return thread_local_.TryCatchHandler();
+ }
+ static Address try_catch_handler_address() {
+ return thread_local_.try_catch_handler_address();
}
// This method is called by the api after operations that may throw
// exceptions. If an exception was thrown and not handled by an external
@@ -170,6 +204,10 @@
return &thread_local_.external_caught_exception_;
}
+ static Object** scheduled_exception_address() {
+ return &thread_local_.scheduled_exception_;
+ }
+
static Object* scheduled_exception() {
ASSERT(has_scheduled_exception());
return thread_local_.scheduled_exception_;
@@ -185,7 +223,7 @@
thread_local_.external_caught_exception_ =
has_pending_exception() &&
(thread_local_.catcher_ != NULL) &&
- (thread_local_.try_catch_handler_ == thread_local_.catcher_);
+ (try_catch_handler() == thread_local_.catcher_);
}
// Tells whether the current context has experienced an out of memory
diff --git a/src/utils.cc b/src/utils.cc
index 3c684b8..08ee16f 100644
--- a/src/utils.cc
+++ b/src/utils.cc
@@ -129,7 +129,7 @@
}
return NULL;
}
- int len = strlen(line_buf);
+ int len = StrLength(line_buf);
if (len > 1 &&
line_buf[len - 2] == '\\' &&
line_buf[len - 1] == '\n') {
@@ -184,7 +184,7 @@
char* result = NewArray<char>(*size + extra_space);
for (int i = 0; i < *size;) {
- int read = fread(&result[i], 1, *size - i, file);
+ int read = static_cast<int>(fread(&result[i], 1, *size - i, file));
if (read <= 0) {
fclose(file);
DeleteArray(result);
@@ -221,7 +221,7 @@
int WriteCharsToFile(const char* str, int size, FILE* f) {
int total = 0;
while (total < size) {
- int write = fwrite(str, 1, size - total, f);
+ int write = static_cast<int>(fwrite(str, 1, size - total, f));
if (write == 0) {
return total;
}
@@ -265,7 +265,7 @@
void StringBuilder::AddString(const char* s) {
- AddSubstring(s, strlen(s));
+ AddSubstring(s, StrLength(s));
}
@@ -309,4 +309,13 @@
return buffer_.start();
}
+
+int TenToThe(int exponent) {
+ ASSERT(exponent <= 9);
+ ASSERT(exponent >= 1);
+ int answer = 10;
+ for (int i = 1; i < exponent; i++) answer *= 10;
+ return answer;
+}
+
} } // namespace v8::internal
diff --git a/src/utils.h b/src/utils.h
index f4a0598..0fd24ec 100644
--- a/src/utils.h
+++ b/src/utils.h
@@ -66,7 +66,7 @@
// integral types.
template <typename T>
static inline T AddressFrom(intptr_t x) {
- return static_cast<T>(0) + x;
+ return static_cast<T>(static_cast<T>(0) + x);
}
@@ -137,6 +137,13 @@
}
+inline int StrLength(const char* string) {
+ size_t length = strlen(string);
+ ASSERT(length == static_cast<size_t>(static_cast<int>(length)));
+ return static_cast<int>(length);
+}
+
+
// ----------------------------------------------------------------------------
// BitField is a help template for encoding and decode bitfield with
// unsigned content.
@@ -449,15 +456,15 @@
inline Vector<const char> CStrVector(const char* data) {
- return Vector<const char>(data, static_cast<int>(strlen(data)));
+ return Vector<const char>(data, StrLength(data));
}
inline Vector<char> MutableCStrVector(char* data) {
- return Vector<char>(data, static_cast<int>(strlen(data)));
+ return Vector<char>(data, StrLength(data));
}
inline Vector<char> MutableCStrVector(char* data, int max) {
- int length = static_cast<int>(strlen(data));
+ int length = StrLength(data);
return Vector<char>(data, (length < max) ? length : max);
}
@@ -577,6 +584,9 @@
}
+// Calculate 10^exponent.
+int TenToThe(int exponent);
+
} } // namespace v8::internal
#endif // V8_UTILS_H_
diff --git a/src/v8-counters.h b/src/v8-counters.h
index b3f29f5..d6f53fa 100644
--- a/src/v8-counters.h
+++ b/src/v8-counters.h
@@ -153,8 +153,9 @@
SC(zone_segment_bytes, V8.ZoneSegmentBytes) \
SC(compute_entry_frame, V8.ComputeEntryFrame) \
SC(generic_binary_stub_calls, V8.GenericBinaryStubCalls) \
- SC(generic_binary_stub_calls_regs, V8.GenericBinaryStubCallsRegs)
-
+ SC(generic_binary_stub_calls_regs, V8.GenericBinaryStubCallsRegs) \
+ SC(string_add_runtime, V8.StringAddRuntime) \
+ SC(string_add_native, V8.StringAddNative)
// This file contains all the v8 counters that are in use.
class Counters : AllStatic {
diff --git a/src/v8.cc b/src/v8.cc
index 3c70ee9..3bec827 100644
--- a/src/v8.cc
+++ b/src/v8.cc
@@ -30,12 +30,10 @@
#include "bootstrapper.h"
#include "debug.h"
#include "serialize.h"
+#include "simulator.h"
#include "stub-cache.h"
#include "oprofile-agent.h"
-
-#if V8_TARGET_ARCH_ARM
-#include "arm/simulator-arm.h"
-#endif
+#include "log.h"
namespace v8 {
namespace internal {
@@ -61,7 +59,6 @@
// Enable logging before setting up the heap
Logger::Setup();
- if (des) des->GetLog();
// Setup the platform OS support.
OS::Setup();
@@ -108,7 +105,7 @@
// Deserializing may put strange things in the root array's copy of the
// stack guard.
- Heap::SetStackLimit(StackGuard::jslimit());
+ Heap::SetStackLimits();
// Setup the CPU support. Must be done after heap setup and after
// any deserialization because we have to have the initial heap
@@ -117,6 +114,11 @@
OProfileAgent::Initialize();
+ if (FLAG_log_code) {
+ HandleScope scope;
+ LOG(LogCompiledFunctions());
+ }
+
return true;
}
diff --git a/src/v8.h b/src/v8.h
index 106ae61..b3624c5 100644
--- a/src/v8.h
+++ b/src/v8.h
@@ -72,6 +72,8 @@
namespace v8 {
namespace internal {
+class Deserializer;
+
class V8 : public AllStatic {
public:
// Global actions.
diff --git a/src/v8natives.js b/src/v8natives.js
index 2fecee8..8f9adcb 100644
--- a/src/v8natives.js
+++ b/src/v8natives.js
@@ -95,8 +95,8 @@
// they make parseInt on a string 1.4% slower (274ns vs 270ns).
if (%_IsSmi(string)) return string;
if (IS_NUMBER(string) &&
- ((string < -0.01 && -1e9 < string) ||
- (0.01 < string && string < 1e9))) {
+ ((0.01 < string && string < 1e9) ||
+ (-1e9 < string && string < -0.01))) {
// Truncate number.
return string | 0;
}
@@ -196,10 +196,7 @@
// ECMA-262 - 15.2.4.2
function ObjectToString() {
- var c = %_ClassOf(this);
- // Hide Arguments from the outside.
- if (c === 'Arguments') c = 'Object';
- return "[object " + c + "]";
+ return "[object " + %_ClassOf(this) + "]";
}
diff --git a/src/version.cc b/src/version.cc
index 54e688b..3611d44 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -32,9 +32,9 @@
// These macros define the version number for the current version.
// NOTE these macros are used by the SCons build script so their names
// cannot be changed without changing the SCons build script.
-#define MAJOR_VERSION 1
-#define MINOR_VERSION 3
-#define BUILD_NUMBER 17
+#define MAJOR_VERSION 2
+#define MINOR_VERSION 0
+#define BUILD_NUMBER 4
#define PATCH_LEVEL 0
#define CANDIDATE_VERSION true
diff --git a/src/x64/assembler-x64-inl.h b/src/x64/assembler-x64-inl.h
index 8f078ff..9c7f9b6 100644
--- a/src/x64/assembler-x64-inl.h
+++ b/src/x64/assembler-x64-inl.h
@@ -176,7 +176,7 @@
void Assembler::set_target_address_at(Address pc, Address target) {
- Memory::int32_at(pc) = target - pc - 4;
+ Memory::int32_at(pc) = static_cast<int32_t>(target - pc - 4);
CPU::FlushICache(pc, sizeof(int32_t));
}
@@ -191,13 +191,13 @@
void RelocInfo::apply(intptr_t delta) {
if (IsInternalReference(rmode_)) {
// absolute code pointer inside code object moves with the code object.
- Memory::Address_at(pc_) += delta;
+ Memory::Address_at(pc_) += static_cast<int32_t>(delta);
} else if (IsCodeTarget(rmode_)) {
- Memory::int32_at(pc_) -= delta;
+ Memory::int32_at(pc_) -= static_cast<int32_t>(delta);
} else if (rmode_ == JS_RETURN && IsPatchedReturnSequence()) {
// Special handling of js_return when a break point is set (call
// instruction has been inserted).
- Memory::int32_at(pc_ + 1) -= delta; // relocate entry
+ Memory::int32_at(pc_ + 1) -= static_cast<int32_t>(delta); // relocate entry
}
}
diff --git a/src/x64/assembler-x64.cc b/src/x64/assembler-x64.cc
index 61e8753..2d524ea 100644
--- a/src/x64/assembler-x64.cc
+++ b/src/x64/assembler-x64.cc
@@ -80,11 +80,15 @@
// fpu, tsc, cx8, cmov, mmx, sse, sse2, fxsr, syscall
uint64_t CpuFeatures::supported_ = kDefaultCpuFeatures;
uint64_t CpuFeatures::enabled_ = 0;
+uint64_t CpuFeatures::found_by_runtime_probing_ = 0;
void CpuFeatures::Probe() {
ASSERT(Heap::HasBeenSetup());
ASSERT(supported_ == kDefaultCpuFeatures);
- if (Serializer::enabled()) return; // No features if we might serialize.
+ if (Serializer::enabled()) {
+ supported_ |= OS::CpuFeaturesImpliedByPlatform();
+ return; // No features if we might serialize.
+ }
Assembler assm(NULL, 0);
Label cpuid, done;
@@ -160,6 +164,11 @@
typedef uint64_t (*F0)();
F0 probe = FUNCTION_CAST<F0>(Code::cast(code)->entry());
supported_ = probe();
+ found_by_runtime_probing_ = supported_;
+ found_by_runtime_probing_ &= ~kDefaultCpuFeatures;
+ uint64_t os_guarantees = OS::CpuFeaturesImpliedByPlatform();
+ supported_ |= os_guarantees;
+ found_by_runtime_probing_ &= ~os_guarantees;
// SSE2 and CMOV must be available on an X64 CPU.
ASSERT(IsSupported(CPUID));
ASSERT(IsSupported(SSE2));
@@ -337,7 +346,8 @@
desc->buffer_size = buffer_size_;
desc->instr_size = pc_offset();
ASSERT(desc->instr_size > 0); // Zero-size code objects upset the system.
- desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
+ desc->reloc_size =
+ static_cast<int>((buffer_ + buffer_size_) - reloc_info_writer.pos());
desc->origin = this;
Counters::reloc_info_size.Increment(desc->reloc_size);
@@ -400,7 +410,8 @@
// setup new buffer
desc.buffer = NewArray<byte>(desc.buffer_size);
desc.instr_size = pc_offset();
- desc.reloc_size = (buffer_ + buffer_size_) - (reloc_info_writer.pos());
+ desc.reloc_size =
+ static_cast<int>((buffer_ + buffer_size_) - (reloc_info_writer.pos()));
// Clear the buffer in debug mode. Use 'int3' instructions to make
// sure to get into problems if we ever run uninitialized code.
@@ -887,7 +898,7 @@
void Assembler::cpuid() {
- ASSERT(CpuFeatures::IsEnabled(CpuFeatures::CPUID));
+ ASSERT(CpuFeatures::IsEnabled(CPUID));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit(0x0F);
@@ -2045,7 +2056,7 @@
void Assembler::fisttp_s(const Operand& adr) {
- ASSERT(CpuFeatures::IsEnabled(CpuFeatures::SSE3));
+ ASSERT(CpuFeatures::IsEnabled(SSE3));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit_optional_rex_32(adr);
diff --git a/src/x64/assembler-x64.h b/src/x64/assembler-x64.h
index 4f514f2..fa7d33b 100644
--- a/src/x64/assembler-x64.h
+++ b/src/x64/assembler-x64.h
@@ -37,6 +37,8 @@
#ifndef V8_X64_ASSEMBLER_X64_H_
#define V8_X64_ASSEMBLER_X64_H_
+#include "serialize.h"
+
namespace v8 {
namespace internal {
@@ -362,20 +364,11 @@
// }
class CpuFeatures : public AllStatic {
public:
- // Feature flags bit positions. They are mostly based on the CPUID spec.
- // (We assign CPUID itself to one of the currently reserved bits --
- // feel free to change this if needed.)
- enum Feature { SSE3 = 32,
- SSE2 = 26,
- CMOV = 15,
- RDTSC = 4,
- CPUID = 10,
- SAHF = 0};
// Detect features of the target CPU. Set safe defaults if the serializer
// is enabled (snapshots must be portable).
static void Probe();
// Check whether a feature is supported by the target CPU.
- static bool IsSupported(Feature f) {
+ static bool IsSupported(CpuFeature f) {
if (f == SSE2 && !FLAG_enable_sse2) return false;
if (f == SSE3 && !FLAG_enable_sse3) return false;
if (f == CMOV && !FLAG_enable_cmov) return false;
@@ -384,33 +377,35 @@
return (supported_ & (V8_UINT64_C(1) << f)) != 0;
}
// Check whether a feature is currently enabled.
- static bool IsEnabled(Feature f) {
+ static bool IsEnabled(CpuFeature f) {
return (enabled_ & (V8_UINT64_C(1) << f)) != 0;
}
// Enable a specified feature within a scope.
class Scope BASE_EMBEDDED {
#ifdef DEBUG
public:
- explicit Scope(Feature f) {
+ explicit Scope(CpuFeature f) {
+ uint64_t mask = (V8_UINT64_C(1) << f);
ASSERT(CpuFeatures::IsSupported(f));
+ ASSERT(!Serializer::enabled() || (found_by_runtime_probing_ & mask) == 0);
old_enabled_ = CpuFeatures::enabled_;
- CpuFeatures::enabled_ |= (V8_UINT64_C(1) << f);
+ CpuFeatures::enabled_ |= mask;
}
~Scope() { CpuFeatures::enabled_ = old_enabled_; }
private:
uint64_t old_enabled_;
#else
public:
- explicit Scope(Feature f) {}
+ explicit Scope(CpuFeature f) {}
#endif
};
private:
// Safe defaults include SSE2 and CMOV for X64. It is always available, if
// anyone checks, but they shouldn't need to check.
- static const uint64_t kDefaultCpuFeatures =
- (1 << CpuFeatures::SSE2 | 1 << CpuFeatures::CMOV);
+ static const uint64_t kDefaultCpuFeatures = (1 << SSE2 | 1 << CMOV);
static uint64_t supported_;
static uint64_t enabled_;
+ static uint64_t found_by_runtime_probing_;
};
@@ -458,7 +453,25 @@
// the relative displacements stored in the code.
static inline Address target_address_at(Address pc);
static inline void set_target_address_at(Address pc, Address target);
+
+ // This sets the branch destination (which is in the instruction on x64).
+ // This is for calls and branches within generated code.
+ inline static void set_target_at(Address instruction_payload,
+ Address target) {
+ set_target_address_at(instruction_payload, target);
+ }
+
+ // This sets the branch destination (which is a load instruction on x64).
+ // This is for calls and branches to runtime code.
+ inline static void set_external_target_at(Address instruction_payload,
+ Address target) {
+ *reinterpret_cast<Address*>(instruction_payload) = target;
+ }
+
inline Handle<Object> code_target_object_handle_at(Address pc);
+ // Number of bytes taken up by the branch target in the code.
+ static const int kCallTargetSize = 4; // Use 32-bit displacement.
+ static const int kExternalTargetSize = 8; // Use 64-bit absolute.
// Distance between the address of the code target in the call instruction
// and the return address pushed on the stack.
static const int kCallTargetAddressOffset = 4; // Use 32-bit displacement.
@@ -469,6 +482,12 @@
static const int kPatchReturnSequenceAddressOffset = 13 - 4;
// TODO(X64): Rename this, removing the "Real", after changing the above.
static const int kRealPatchReturnSequenceAddressOffset = 2;
+
+ // The x64 JS return sequence is padded with int3 to make it large
+ // enough to hold a call instruction when the debugger patches it.
+ static const int kCallInstructionLength = 13;
+ static const int kJSReturnSequenceLength = 13;
+
// ---------------------------------------------------------------------------
// Code generation
//
@@ -829,12 +848,12 @@
}
// Shifts dst right, duplicating sign bit, by cl % 64 bits.
- void sar(Register dst) {
+ void sar_cl(Register dst) {
shift(dst, 0x7);
}
// Shifts dst right, duplicating sign bit, by cl % 64 bits.
- void sarl(Register dst) {
+ void sarl_cl(Register dst) {
shift_32(dst, 0x7);
}
@@ -842,11 +861,11 @@
shift(dst, shift_amount, 0x4);
}
- void shl(Register dst) {
+ void shl_cl(Register dst) {
shift(dst, 0x4);
}
- void shll(Register dst) {
+ void shll_cl(Register dst) {
shift_32(dst, 0x4);
}
@@ -858,11 +877,11 @@
shift(dst, shift_amount, 0x5);
}
- void shr(Register dst) {
+ void shr_cl(Register dst) {
shift(dst, 0x5);
}
- void shrl(Register dst) {
+ void shrl_cl(Register dst) {
shift_32(dst, 0x5);
}
@@ -920,7 +939,11 @@
void testq(Register dst, Immediate mask);
void xor_(Register dst, Register src) {
- arithmetic_op(0x33, dst, src);
+ if (dst.code() == src.code()) {
+ arithmetic_op_32(0x33, dst, src);
+ } else {
+ arithmetic_op(0x33, dst, src);
+ }
}
void xorl(Register dst, Register src) {
@@ -1109,7 +1132,7 @@
void RecordStatementPosition(int pos);
void WriteRecordedPositions();
- int pc_offset() const { return pc_ - buffer_; }
+ int pc_offset() const { return static_cast<int>(pc_ - buffer_); }
int current_statement_position() const { return current_statement_position_; }
int current_position() const { return current_position_; }
@@ -1121,7 +1144,9 @@
}
// Get the number of bytes available in the buffer.
- inline int available_space() const { return reloc_info_writer.pos() - pc_; }
+ inline int available_space() const {
+ return static_cast<int>(reloc_info_writer.pos() - pc_);
+ }
// Avoid overflows for displacements etc.
static const int kMaximalBufferSize = 512*MB;
diff --git a/src/x64/builtins-x64.cc b/src/x64/builtins-x64.cc
index 01992ce..f444d2c 100644
--- a/src/x64/builtins-x64.cc
+++ b/src/x64/builtins-x64.cc
@@ -246,6 +246,8 @@
const int kGlobalIndex =
Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
__ movq(rbx, FieldOperand(rsi, kGlobalIndex));
+ __ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalContextOffset));
+ __ movq(rbx, FieldOperand(rbx, kGlobalIndex));
__ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset));
__ bind(&patch_receiver);
@@ -318,47 +320,28 @@
__ push(Operand(rbp, kArgumentsOffset));
__ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
- if (FLAG_check_stack) {
- // We need to catch preemptions right here, otherwise an unlucky preemption
- // could show up as a failed apply.
- Label retry_preemption;
- Label no_preemption;
- __ bind(&retry_preemption);
- ExternalReference stack_guard_limit =
- ExternalReference::address_of_stack_guard_limit();
- __ movq(kScratchRegister, stack_guard_limit);
- __ movq(rcx, rsp);
- __ subq(rcx, Operand(kScratchRegister, 0));
- // rcx contains the difference between the stack limit and the stack top.
- // We use it below to check that there is enough room for the arguments.
- __ j(above, &no_preemption);
+ // Check the stack for overflow. We are not trying need to catch
+ // interruptions (e.g. debug break and preemption) here, so the "real stack
+ // limit" is checked.
+ Label okay;
+ __ LoadRoot(kScratchRegister, Heap::kRealStackLimitRootIndex);
+ __ movq(rcx, rsp);
+ // Make rcx the space we have left. The stack might already be overflowed
+ // here which will cause rcx to become negative.
+ __ subq(rcx, kScratchRegister);
+ // Make rdx the space we need for the array when it is unrolled onto the
+ // stack.
+ __ PositiveSmiTimesPowerOfTwoToInteger64(rdx, rax, kPointerSizeLog2);
+ // Check if the arguments will overflow the stack.
+ __ cmpq(rcx, rdx);
+ __ j(greater, &okay); // Signed comparison.
- // Preemption!
- // Because runtime functions always remove the receiver from the stack, we
- // have to fake one to avoid underflowing the stack.
- __ push(rax);
- __ Push(Smi::FromInt(0));
-
- // Do call to runtime routine.
- __ CallRuntime(Runtime::kStackGuard, 1);
- __ pop(rax);
- __ jmp(&retry_preemption);
-
- __ bind(&no_preemption);
-
- Label okay;
- // Make rdx the space we need for the array when it is unrolled onto the
- // stack.
- __ PositiveSmiTimesPowerOfTwoToInteger64(rdx, rax, kPointerSizeLog2);
- __ cmpq(rcx, rdx);
- __ j(greater, &okay);
-
- // Too bad: Out of stack space.
- __ push(Operand(rbp, kFunctionOffset));
- __ push(rax);
- __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
- __ bind(&okay);
- }
+ // Out of stack space.
+ __ push(Operand(rbp, kFunctionOffset));
+ __ push(rax);
+ __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
+ __ bind(&okay);
+ // End of stack check.
// Push current index and limit.
const int kLimitOffset =
@@ -400,6 +383,8 @@
const int kGlobalOffset =
Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
__ movq(rbx, FieldOperand(rsi, kGlobalOffset));
+ __ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalContextOffset));
+ __ movq(rbx, FieldOperand(rbx, kGlobalOffset));
__ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset));
// Push the receiver.
diff --git a/src/x64/codegen-x64.cc b/src/x64/codegen-x64.cc
index 0029b74..36f0e63 100644
--- a/src/x64/codegen-x64.cc
+++ b/src/x64/codegen-x64.cc
@@ -29,6 +29,7 @@
#include "bootstrapper.h"
#include "codegen-inl.h"
+#include "compiler.h"
#include "debug.h"
#include "ic-inl.h"
#include "parser.h"
@@ -74,7 +75,6 @@
CodeGenState::CodeGenState(CodeGenerator* owner)
: owner_(owner),
- typeof_state_(NOT_INSIDE_TYPEOF),
destination_(NULL),
previous_(NULL) {
owner_->set_state(this);
@@ -82,10 +82,8 @@
CodeGenState::CodeGenState(CodeGenerator* owner,
- TypeofState typeof_state,
ControlDestination* destination)
: owner_(owner),
- typeof_state_(typeof_state),
destination_(destination),
previous_(owner->state()) {
owner_->set_state(this);
@@ -507,13 +505,13 @@
// Add padding that will be overwritten by a debugger breakpoint.
// frame_->Exit() generates "movq rsp, rbp; pop rbp; ret k"
// with length 7 (3 + 1 + 3).
- const int kPadding = Debug::kX64JSReturnSequenceLength - 7;
+ const int kPadding = Assembler::kJSReturnSequenceLength - 7;
for (int i = 0; i < kPadding; ++i) {
masm_->int3();
}
// Check that the size of the code used for returning matches what is
// expected by the debugger.
- ASSERT_EQ(Debug::kX64JSReturnSequenceLength,
+ ASSERT_EQ(Assembler::kJSReturnSequenceLength,
masm_->SizeOfCodeGeneratedSince(&check_exit_codesize));
#endif
DeleteFrame();
@@ -643,27 +641,6 @@
}
-class CallFunctionStub: public CodeStub {
- public:
- CallFunctionStub(int argc, InLoopFlag in_loop)
- : argc_(argc), in_loop_(in_loop) { }
-
- void Generate(MacroAssembler* masm);
-
- private:
- int argc_;
- InLoopFlag in_loop_;
-
-#ifdef DEBUG
- void Print() { PrintF("CallFunctionStub (args %d)\n", argc_); }
-#endif
-
- Major MajorKey() { return CallFunction; }
- int MinorKey() { return argc_; }
- InLoopFlag InLoop() { return in_loop_; }
-};
-
-
void CodeGenerator::CallApplyLazy(Property* apply,
Expression* receiver,
VariableProxy* arguments,
@@ -676,7 +653,7 @@
// Load the apply function onto the stack. This will usually
// give us a megamorphic load site. Not super, but it works.
Reference ref(this, apply);
- ref.GetValue(NOT_INSIDE_TYPEOF);
+ ref.GetValue();
ASSERT(ref.type() == Reference::NAMED);
// Load the receiver and the existing arguments object onto the
@@ -852,12 +829,10 @@
void CodeGenerator::CheckStack() {
- if (FLAG_check_stack) {
- DeferredStackCheck* deferred = new DeferredStackCheck;
- __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
- deferred->Branch(below);
- deferred->BindExit();
- }
+ DeferredStackCheck* deferred = new DeferredStackCheck;
+ __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
+ deferred->Branch(below);
+ deferred->BindExit();
}
@@ -1003,7 +978,7 @@
JumpTarget then;
JumpTarget else_;
ControlDestination dest(&then, &else_, true);
- LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &dest, true);
+ LoadCondition(node->condition(), &dest, true);
if (dest.false_was_fall_through()) {
// The else target was bound, so we compile the else part first.
@@ -1030,7 +1005,7 @@
ASSERT(!has_else_stm);
JumpTarget then;
ControlDestination dest(&then, &exit, true);
- LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &dest, true);
+ LoadCondition(node->condition(), &dest, true);
if (dest.false_was_fall_through()) {
// The exit label was bound. We may have dangling jumps to the
@@ -1050,7 +1025,7 @@
ASSERT(!has_then_stm);
JumpTarget else_;
ControlDestination dest(&exit, &else_, false);
- LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &dest, true);
+ LoadCondition(node->condition(), &dest, true);
if (dest.true_was_fall_through()) {
// The exit label was bound. We may have dangling jumps to the
@@ -1072,7 +1047,7 @@
// or control flow effect). LoadCondition is called without
// forcing control flow.
ControlDestination dest(&exit, &exit, true);
- LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &dest, false);
+ LoadCondition(node->condition(), &dest, false);
if (!dest.is_used()) {
// We got a value on the frame rather than (or in addition to)
// control flow.
@@ -1343,8 +1318,10 @@
node->continue_target()->Bind();
}
if (has_valid_frame()) {
+ Comment cmnt(masm_, "[ DoWhileCondition");
+ CodeForDoWhileConditionPosition(node);
ControlDestination dest(&body, node->break_target(), false);
- LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
+ LoadCondition(node->cond(), &dest, true);
}
if (node->break_target()->is_linked()) {
node->break_target()->Bind();
@@ -1401,7 +1378,7 @@
// Compile the test with the body as the true target and preferred
// fall-through and with the break target as the false target.
ControlDestination dest(&body, node->break_target(), true);
- LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
+ LoadCondition(node->cond(), &dest, true);
if (dest.false_was_fall_through()) {
// If we got the break target as fall-through, the test may have
@@ -1448,7 +1425,7 @@
// The break target is the fall-through (body is a backward
// jump from here and thus an invalid fall-through).
ControlDestination dest(&body, node->break_target(), false);
- LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
+ LoadCondition(node->cond(), &dest, true);
}
} else {
// If we have chosen not to recompile the test at the
@@ -1540,7 +1517,7 @@
// Compile the test with the body as the true target and preferred
// fall-through and with the break target as the false target.
ControlDestination dest(&body, node->break_target(), true);
- LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
+ LoadCondition(node->cond(), &dest, true);
if (dest.false_was_fall_through()) {
// If we got the break target as fall-through, the test may have
@@ -1610,7 +1587,7 @@
// The break target is the fall-through (body is a backward
// jump from here).
ControlDestination dest(&body, node->break_target(), false);
- LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
+ LoadCondition(node->cond(), &dest, true);
}
} else {
// Otherwise, jump back to the test at the top.
@@ -1685,8 +1662,54 @@
jsobject.Bind();
// Get the set of properties (as a FixedArray or Map).
// rax: value to be iterated over
- frame_->EmitPush(rax); // push the object being iterated over (slot 4)
+ frame_->EmitPush(rax); // Push the object being iterated over.
+
+ // Check cache validity in generated code. This is a fast case for
+ // the JSObject::IsSimpleEnum cache validity checks. If we cannot
+ // guarantee cache validity, call the runtime system to check cache
+ // validity or get the property names in a fixed array.
+ JumpTarget call_runtime;
+ JumpTarget loop(JumpTarget::BIDIRECTIONAL);
+ JumpTarget check_prototype;
+ JumpTarget use_cache;
+ __ movq(rcx, rax);
+ loop.Bind();
+ // Check that there are no elements.
+ __ movq(rdx, FieldOperand(rcx, JSObject::kElementsOffset));
+ __ CompareRoot(rdx, Heap::kEmptyFixedArrayRootIndex);
+ call_runtime.Branch(not_equal);
+ // Check that instance descriptors are not empty so that we can
+ // check for an enum cache. Leave the map in ebx for the subsequent
+ // prototype load.
+ __ movq(rbx, FieldOperand(rcx, HeapObject::kMapOffset));
+ __ movq(rdx, FieldOperand(rbx, Map::kInstanceDescriptorsOffset));
+ __ CompareRoot(rdx, Heap::kEmptyDescriptorArrayRootIndex);
+ call_runtime.Branch(equal);
+ // Check that there in an enum cache in the non-empty instance
+ // descriptors. This is the case if the next enumeration index
+ // field does not contain a smi.
+ __ movq(rdx, FieldOperand(rdx, DescriptorArray::kEnumerationIndexOffset));
+ is_smi = masm_->CheckSmi(rdx);
+ call_runtime.Branch(is_smi);
+ // For all objects but the receiver, check that the cache is empty.
+ __ cmpq(rcx, rax);
+ check_prototype.Branch(equal);
+ __ movq(rdx, FieldOperand(rdx, DescriptorArray::kEnumCacheBridgeCacheOffset));
+ __ CompareRoot(rdx, Heap::kEmptyFixedArrayRootIndex);
+ call_runtime.Branch(not_equal);
+ check_prototype.Bind();
+ // Load the prototype from the map and loop if non-null.
+ __ movq(rcx, FieldOperand(rbx, Map::kPrototypeOffset));
+ __ CompareRoot(rcx, Heap::kNullValueRootIndex);
+ loop.Branch(not_equal);
+ // The enum cache is valid. Load the map of the object being
+ // iterated over and use the cache for the iteration.
+ __ movq(rax, FieldOperand(rax, HeapObject::kMapOffset));
+ use_cache.Jump();
+
+ call_runtime.Bind();
+ // Call the runtime to get the property names for the object.
frame_->EmitPush(rax); // push the Object (slot 4) for the runtime call
frame_->CallRuntime(Runtime::kGetPropertyNamesFast, 1);
@@ -1699,8 +1722,11 @@
__ CompareRoot(rcx, Heap::kMetaMapRootIndex);
fixed_array.Branch(not_equal);
+ use_cache.Bind();
// Get enum cache
- // rax: map (result from call to Runtime::kGetPropertyNamesFast)
+ // rax: map (either the result from a call to
+ // Runtime::kGetPropertyNamesFast or has been fetched directly from
+ // the object)
__ movq(rcx, rax);
__ movq(rcx, FieldOperand(rcx, Map::kInstanceDescriptorsOffset));
// Get the bridge array held in the enumeration index field.
@@ -2190,7 +2216,8 @@
Comment cmnt(masm_, "[ FunctionLiteral");
// Build the function boilerplate and instantiate it.
- Handle<JSFunction> boilerplate = BuildBoilerplate(node);
+ Handle<JSFunction> boilerplate =
+ Compiler::BuildBoilerplate(node, script_, this);
// Check for stack-overflow exception.
if (HasStackOverflow()) return;
InstantiateBoilerplate(boilerplate);
@@ -2210,25 +2237,25 @@
JumpTarget else_;
JumpTarget exit;
ControlDestination dest(&then, &else_, true);
- LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &dest, true);
+ LoadCondition(node->condition(), &dest, true);
if (dest.false_was_fall_through()) {
// The else target was bound, so we compile the else part first.
- Load(node->else_expression(), typeof_state());
+ Load(node->else_expression());
if (then.is_linked()) {
exit.Jump();
then.Bind();
- Load(node->then_expression(), typeof_state());
+ Load(node->then_expression());
}
} else {
// The then target was bound, so we compile the then part first.
- Load(node->then_expression(), typeof_state());
+ Load(node->then_expression());
if (else_.is_linked()) {
exit.Jump();
else_.Bind();
- Load(node->else_expression(), typeof_state());
+ Load(node->else_expression());
}
}
@@ -2238,7 +2265,7 @@
void CodeGenerator::VisitSlot(Slot* node) {
Comment cmnt(masm_, "[ Slot");
- LoadFromSlotCheckForArguments(node, typeof_state());
+ LoadFromSlotCheckForArguments(node, NOT_INSIDE_TYPEOF);
}
@@ -2251,7 +2278,7 @@
} else {
ASSERT(var->is_global());
Reference ref(this, node);
- ref.GetValue(typeof_state());
+ ref.GetValue();
}
}
@@ -2642,9 +2669,9 @@
// the target, with an implicit promise that it will be written to again
// before it is read.
if (literal != NULL || (right_var != NULL && right_var != var)) {
- target.TakeValue(NOT_INSIDE_TYPEOF);
+ target.TakeValue();
} else {
- target.GetValue(NOT_INSIDE_TYPEOF);
+ target.GetValue();
}
Load(node->value());
GenericBinaryOperation(node->binary_op(),
@@ -2692,7 +2719,7 @@
void CodeGenerator::VisitProperty(Property* node) {
Comment cmnt(masm_, "[ Property");
Reference property(this, node);
- property.GetValue(typeof_state());
+ property.GetValue();
}
@@ -2878,7 +2905,7 @@
// Load the function to call from the property through a reference.
Reference ref(this, property);
- ref.GetValue(NOT_INSIDE_TYPEOF);
+ ref.GetValue();
// Pass receiver to called function.
if (property->is_synthetic()) {
@@ -2984,9 +3011,6 @@
void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
- // Note that because of NOT and an optimization in comparison of a typeof
- // expression to a literal string, this function can fail to leave a value
- // on top of the frame or in the cc register.
Comment cmnt(masm_, "[ UnaryOperation");
Token::Value op = node->op();
@@ -2995,7 +3019,7 @@
// Swap the true and false targets but keep the same actual label
// as the fall through.
destination()->Invert();
- LoadCondition(node->expression(), NOT_INSIDE_TYPEOF, destination(), true);
+ LoadCondition(node->expression(), destination(), true);
// Swap the labels back.
destination()->Invert();
@@ -3235,7 +3259,7 @@
if (!is_postfix) frame_->Push(Smi::FromInt(0));
return;
}
- target.TakeValue(NOT_INSIDE_TYPEOF);
+ target.TakeValue();
Result new_value = frame_->Pop();
new_value.ToRegister();
@@ -3293,9 +3317,6 @@
// TODO(X64): This code was copied verbatim from codegen-ia32.
// Either find a reason to change it or move it to a shared location.
- // Note that due to an optimization in comparison operations (typeof
- // compared to a string literal), we can evaluate a binary expression such
- // as AND or OR and not leave a value on the frame or in the cc register.
Comment cmnt(masm_, "[ BinaryOperation");
Token::Value op = node->op();
@@ -3311,7 +3332,7 @@
if (op == Token::AND) {
JumpTarget is_true;
ControlDestination dest(&is_true, destination()->false_target(), true);
- LoadCondition(node->left(), NOT_INSIDE_TYPEOF, &dest, false);
+ LoadCondition(node->left(), &dest, false);
if (dest.false_was_fall_through()) {
// The current false target was used as the fall-through. If
@@ -3330,7 +3351,7 @@
is_true.Bind();
// The left subexpression compiled to control flow, so the
// right one is free to do so as well.
- LoadCondition(node->right(), NOT_INSIDE_TYPEOF, destination(), false);
+ LoadCondition(node->right(), destination(), false);
} else {
// We have actually just jumped to or bound the current false
// target but the current control destination is not marked as
@@ -3341,7 +3362,7 @@
} else if (dest.is_used()) {
// The left subexpression compiled to control flow (and is_true
// was just bound), so the right is free to do so as well.
- LoadCondition(node->right(), NOT_INSIDE_TYPEOF, destination(), false);
+ LoadCondition(node->right(), destination(), false);
} else {
// We have a materialized value on the frame, so we exit with
@@ -3374,7 +3395,7 @@
} else if (op == Token::OR) {
JumpTarget is_false;
ControlDestination dest(destination()->true_target(), &is_false, false);
- LoadCondition(node->left(), NOT_INSIDE_TYPEOF, &dest, false);
+ LoadCondition(node->left(), &dest, false);
if (dest.true_was_fall_through()) {
// The current true target was used as the fall-through. If
@@ -3393,7 +3414,7 @@
is_false.Bind();
// The left subexpression compiled to control flow, so the
// right one is free to do so as well.
- LoadCondition(node->right(), NOT_INSIDE_TYPEOF, destination(), false);
+ LoadCondition(node->right(), destination(), false);
} else {
// We have just jumped to or bound the current true target but
// the current control destination is not marked as used.
@@ -3403,7 +3424,7 @@
} else if (dest.is_used()) {
// The left subexpression compiled to control flow (and is_false
// was just bound), so the right is free to do so as well.
- LoadCondition(node->right(), NOT_INSIDE_TYPEOF, destination(), false);
+ LoadCondition(node->right(), destination(), false);
} else {
// We have a materialized value on the frame, so we exit with
@@ -3525,6 +3546,9 @@
destination()->false_target()->Branch(is_smi);
frame_->Spill(answer.reg());
__ CmpObjectType(answer.reg(), JS_FUNCTION_TYPE, answer.reg());
+ destination()->true_target()->Branch(equal);
+ // Regular expressions are callable so typeof == 'function'.
+ __ CmpInstanceType(answer.reg(), JS_REGEXP_TYPE);
answer.Unuse();
destination()->Split(equal);
@@ -3534,9 +3558,11 @@
__ CompareRoot(answer.reg(), Heap::kNullValueRootIndex);
destination()->true_target()->Branch(equal);
+ // Regular expressions are typeof == 'function', not 'object'.
+ __ CmpObjectType(answer.reg(), JS_REGEXP_TYPE, kScratchRegister);
+ destination()->false_target()->Branch(equal);
+
// It can be an undetectable object.
- __ movq(kScratchRegister,
- FieldOperand(answer.reg(), HeapObject::kMapOffset));
__ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
Immediate(1 << Map::kIsUndetectable));
destination()->false_target()->Branch(not_zero);
@@ -3639,6 +3665,48 @@
}
+void CodeGenerator::GenerateIsObject(ZoneList<Expression*>* args) {
+ // This generates a fast version of:
+ // (typeof(arg) === 'object' || %_ClassOf(arg) == 'RegExp')
+ ASSERT(args->length() == 1);
+ Load(args->at(0));
+ Result obj = frame_->Pop();
+ obj.ToRegister();
+ Condition is_smi = masm_->CheckSmi(obj.reg());
+ destination()->false_target()->Branch(is_smi);
+
+ __ Move(kScratchRegister, Factory::null_value());
+ __ cmpq(obj.reg(), kScratchRegister);
+ destination()->true_target()->Branch(equal);
+
+ __ movq(kScratchRegister, FieldOperand(obj.reg(), HeapObject::kMapOffset));
+ // Undetectable objects behave like undefined when tested with typeof.
+ __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsUndetectable));
+ destination()->false_target()->Branch(not_zero);
+ __ CmpInstanceType(kScratchRegister, FIRST_JS_OBJECT_TYPE);
+ destination()->false_target()->Branch(less);
+ __ CmpInstanceType(kScratchRegister, LAST_JS_OBJECT_TYPE);
+ obj.Unuse();
+ destination()->Split(less_equal);
+}
+
+
+void CodeGenerator::GenerateIsFunction(ZoneList<Expression*>* args) {
+ // This generates a fast version of:
+ // (%_ClassOf(arg) === 'Function')
+ ASSERT(args->length() == 1);
+ Load(args->at(0));
+ Result obj = frame_->Pop();
+ obj.ToRegister();
+ Condition is_smi = masm_->CheckSmi(obj.reg());
+ destination()->false_target()->Branch(is_smi);
+ __ CmpObjectType(obj.reg(), JS_FUNCTION_TYPE, kScratchRegister);
+ obj.Unuse();
+ destination()->Split(equal);
+}
+
+
void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) {
ASSERT(args->length() == 0);
@@ -3681,7 +3749,6 @@
Label slow_case;
Label end;
Label not_a_flat_string;
- Label a_cons_string;
Label try_again_with_new_string;
Label ascii_string;
Label got_char_code;
@@ -3749,30 +3816,19 @@
__ testb(rcx, Immediate(kIsNotStringMask));
__ j(not_zero, &slow_case);
- // Here we make assumptions about the tag values and the shifts needed.
- // See the comment in objects.h.
- ASSERT(kLongStringTag == 0);
- ASSERT(kMediumStringTag + String::kLongLengthShift ==
- String::kMediumLengthShift);
- ASSERT(kShortStringTag + String::kLongLengthShift ==
- String::kShortLengthShift);
- __ and_(rcx, Immediate(kStringSizeMask));
- __ addq(rcx, Immediate(String::kLongLengthShift));
- // Fetch the length field into the temporary register.
- __ movl(temp.reg(), FieldOperand(object.reg(), String::kLengthOffset));
- __ shrl(temp.reg()); // The shift amount in ecx is implicit operand.
// Check for index out of range.
- __ cmpl(index.reg(), temp.reg());
+ __ cmpl(index.reg(), FieldOperand(object.reg(), String::kLengthOffset));
__ j(greater_equal, &slow_case);
// Reload the instance type (into the temp register this time)..
__ movq(temp.reg(), FieldOperand(object.reg(), HeapObject::kMapOffset));
__ movzxbl(temp.reg(), FieldOperand(temp.reg(), Map::kInstanceTypeOffset));
// We need special handling for non-flat strings.
- ASSERT(kSeqStringTag == 0);
+ ASSERT_EQ(0, kSeqStringTag);
__ testb(temp.reg(), Immediate(kStringRepresentationMask));
__ j(not_zero, ¬_a_flat_string);
// Check for 1-byte or 2-byte string.
+ ASSERT_EQ(0, kTwoByteStringTag);
__ testb(temp.reg(), Immediate(kStringEncodingMask));
__ j(not_zero, &ascii_string);
@@ -3799,21 +3855,16 @@
__ bind(¬_a_flat_string);
__ and_(temp.reg(), Immediate(kStringRepresentationMask));
__ cmpb(temp.reg(), Immediate(kConsStringTag));
- __ j(equal, &a_cons_string);
- __ cmpb(temp.reg(), Immediate(kSlicedStringTag));
__ j(not_equal, &slow_case);
- // SlicedString.
- // Add the offset to the index and trigger the slow case on overflow.
- __ addl(index.reg(), FieldOperand(object.reg(), SlicedString::kStartOffset));
- __ j(overflow, &slow_case);
- // Getting the underlying string is done by running the cons string code.
-
// ConsString.
- __ bind(&a_cons_string);
- // Get the first of the two strings. Both sliced and cons strings
- // store their source string at the same offset.
- ASSERT(SlicedString::kBufferOffset == ConsString::kFirstOffset);
+ // Check that the right hand side is the empty string (ie if this is really a
+ // flat string in a cons string). If that is not the case we would rather go
+ // to the runtime system now, to flatten the string.
+ __ movq(temp.reg(), FieldOperand(object.reg(), ConsString::kSecondOffset));
+ __ CompareRoot(temp.reg(), Heap::kEmptyStringRootIndex);
+ __ j(not_equal, &slow_case);
+ // Get the first of the two strings.
__ movq(object.reg(), FieldOperand(object.reg(), ConsString::kFirstOffset));
__ jmp(&try_again_with_new_string);
@@ -3994,6 +4045,17 @@
}
+void CodeGenerator::GenerateStringAdd(ZoneList<Expression*>* args) {
+ ASSERT_EQ(2, args->length());
+
+ Load(args->at(0));
+ Load(args->at(1));
+
+ Result answer = frame_->CallRuntime(Runtime::kStringAdd, 2);
+ frame_->Push(&answer);
+}
+
+
void CodeGenerator::GenerateClassOf(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
JumpTarget leave, null, function, non_function_constructor;
@@ -4124,18 +4186,17 @@
// -----------------------------------------------------------------------------
// CodeGenerator implementation of Expressions
-void CodeGenerator::LoadAndSpill(Expression* expression,
- TypeofState typeof_state) {
+void CodeGenerator::LoadAndSpill(Expression* expression) {
// TODO(x64): No architecture specific code. Move to shared location.
ASSERT(in_spilled_code());
set_in_spilled_code(false);
- Load(expression, typeof_state);
+ Load(expression);
frame_->SpillAll();
set_in_spilled_code(true);
}
-void CodeGenerator::Load(Expression* x, TypeofState typeof_state) {
+void CodeGenerator::Load(Expression* expr) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
@@ -4143,7 +4204,7 @@
JumpTarget true_target;
JumpTarget false_target;
ControlDestination dest(&true_target, &false_target, true);
- LoadCondition(x, typeof_state, &dest, false);
+ LoadCondition(expr, &dest, false);
if (dest.false_was_fall_through()) {
// The false target was just bound.
@@ -4203,13 +4264,12 @@
// partially compiled) into control flow to the control destination.
// If force_control is true, control flow is forced.
void CodeGenerator::LoadCondition(Expression* x,
- TypeofState typeof_state,
ControlDestination* dest,
bool force_control) {
ASSERT(!in_spilled_code());
int original_height = frame_->height();
- { CodeGenState new_state(this, typeof_state, dest);
+ { CodeGenState new_state(this, dest);
Visit(x);
// If we hit a stack overflow, we may not have actually visited
@@ -4837,23 +4897,25 @@
}
-// TODO(1241834): Get rid of this function in favor of just using Load, now
-// that we have the INSIDE_TYPEOF typeof state. => Need to handle global
-// variables w/o reference errors elsewhere.
-void CodeGenerator::LoadTypeofExpression(Expression* x) {
- Variable* variable = x->AsVariableProxy()->AsVariable();
+void CodeGenerator::LoadTypeofExpression(Expression* expr) {
+ // Special handling of identifiers as subexpressions of typeof.
+ Variable* variable = expr->AsVariableProxy()->AsVariable();
if (variable != NULL && !variable->is_this() && variable->is_global()) {
- // NOTE: This is somewhat nasty. We force the compiler to load
- // the variable as if through '<global>.<variable>' to make sure we
- // do not get reference errors.
+ // For a global variable we build the property reference
+ // <global>.<variable> and perform a (regular non-contextual) property
+ // load to make sure we do not get reference errors.
Slot global(variable, Slot::CONTEXT, Context::GLOBAL_INDEX);
Literal key(variable->name());
- // TODO(1241834): Fetch the position from the variable instead of using
- // no position.
Property property(&global, &key, RelocInfo::kNoPosition);
- Load(&property);
+ Reference ref(this, &property);
+ ref.GetValue();
+ } else if (variable != NULL && variable->slot() != NULL) {
+ // For a variable that rewrites to a slot, we signal it is the immediate
+ // subexpression of a typeof.
+ LoadFromSlotCheckForArguments(variable->slot(), INSIDE_TYPEOF);
} else {
- Load(x, INSIDE_TYPEOF);
+ // Anything else can be handled normally.
+ Load(expr);
}
}
@@ -5057,10 +5119,8 @@
void DeferredInlineBinaryOperation::Generate() {
- __ push(left_);
- __ push(right_);
- GenericBinaryOpStub stub(op_, mode_, SMI_CODE_INLINED);
- __ CallStub(&stub);
+ GenericBinaryOpStub stub(op_, mode_, NO_SMI_CODE_IN_STUB);
+ stub.GenerateCall(masm_, left_, right_);
if (!dst_.is(rax)) __ movq(dst_, rax);
}
@@ -5089,16 +5149,16 @@
// Bit operations always assume they likely operate on Smis. Still only
// generate the inline Smi check code if this operation is part of a loop.
flags = (loop_nesting() > 0)
- ? SMI_CODE_INLINED
- : SMI_CODE_IN_STUB;
+ ? NO_SMI_CODE_IN_STUB
+ : NO_GENERIC_BINARY_FLAGS;
break;
default:
// By default only inline the Smi check code for likely smis if this
// operation is part of a loop.
flags = ((loop_nesting() > 0) && type->IsLikelySmi())
- ? SMI_CODE_INLINED
- : SMI_CODE_IN_STUB;
+ ? NO_SMI_CODE_IN_STUB
+ : NO_GENERIC_BINARY_FLAGS;
break;
}
@@ -5157,7 +5217,7 @@
return;
}
- if (flags == SMI_CODE_INLINED && !generate_no_smi_code) {
+ if ((flags & NO_SMI_CODE_IN_STUB) != 0 && !generate_no_smi_code) {
LikelySmiBinaryOperation(op, &left, &right, overwrite_mode);
} else {
frame_->Push(&left);
@@ -5166,7 +5226,7 @@
// that does not check for the fast smi case.
// The same stub is used for NO_SMI_CODE and SMI_CODE_INLINED.
if (generate_no_smi_code) {
- flags = SMI_CODE_INLINED;
+ flags = NO_SMI_CODE_IN_STUB;
}
GenericBinaryOpStub stub(op, overwrite_mode, flags);
Result answer = frame_->CallStub(&stub, 2);
@@ -5221,41 +5281,33 @@
void DeferredInlineSmiAdd::Generate() {
- __ push(dst_);
- __ Push(value_);
- GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, SMI_CODE_INLINED);
- __ CallStub(&igostub);
+ GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, NO_SMI_CODE_IN_STUB);
+ igostub.GenerateCall(masm_, dst_, value_);
if (!dst_.is(rax)) __ movq(dst_, rax);
}
void DeferredInlineSmiAddReversed::Generate() {
- __ Push(value_);
- __ push(dst_);
- GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, SMI_CODE_INLINED);
- __ CallStub(&igostub);
+ GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, NO_SMI_CODE_IN_STUB);
+ igostub.GenerateCall(masm_, value_, dst_);
if (!dst_.is(rax)) __ movq(dst_, rax);
}
void DeferredInlineSmiSub::Generate() {
- __ push(dst_);
- __ Push(value_);
- GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_, SMI_CODE_INLINED);
- __ CallStub(&igostub);
+ GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_, NO_SMI_CODE_IN_STUB);
+ igostub.GenerateCall(masm_, dst_, value_);
if (!dst_.is(rax)) __ movq(dst_, rax);
}
void DeferredInlineSmiOperation::Generate() {
- __ push(src_);
- __ Push(value_);
// For mod we don't generate all the Smi code inline.
GenericBinaryOpStub stub(
op_,
overwrite_mode_,
- (op_ == Token::MOD) ? SMI_CODE_IN_STUB : SMI_CODE_INLINED);
- __ CallStub(&stub);
+ (op_ == Token::MOD) ? NO_GENERIC_BINARY_FLAGS : NO_SMI_CODE_IN_STUB);
+ stub.GenerateCall(masm_, src_, value_);
if (!dst_.is(rax)) __ movq(dst_, rax);
}
@@ -5758,7 +5810,7 @@
}
-void Reference::GetValue(TypeofState typeof_state) {
+void Reference::GetValue() {
ASSERT(!cgen_->in_spilled_code());
ASSERT(cgen_->HasValidEntryRegisters());
ASSERT(!is_illegal());
@@ -5775,17 +5827,11 @@
Comment cmnt(masm, "[ Load from Slot");
Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
ASSERT(slot != NULL);
- cgen_->LoadFromSlotCheckForArguments(slot, typeof_state);
+ cgen_->LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
break;
}
case NAMED: {
- // TODO(1241834): Make sure that it is safe to ignore the
- // distinction between expressions in a typeof and not in a
- // typeof. If there is a chance that reference errors can be
- // thrown below, we must distinguish between the two kinds of
- // loads (typeof expression loads must not throw a reference
- // error).
Variable* var = expression_->AsVariableProxy()->AsVariable();
bool is_global = var != NULL;
ASSERT(!is_global || var->is_global());
@@ -5867,8 +5913,6 @@
}
case KEYED: {
- // TODO(1241834): Make sure that this it is safe to ignore the
- // distinction between expressions in a typeof and not in a typeof.
Comment cmnt(masm, "[ Load from keyed Property");
Variable* var = expression_->AsVariableProxy()->AsVariable();
bool is_global = var != NULL;
@@ -5990,7 +6034,7 @@
}
-void Reference::TakeValue(TypeofState typeof_state) {
+void Reference::TakeValue() {
// TODO(X64): This function is completely architecture independent. Move
// it somewhere shared.
@@ -5999,7 +6043,7 @@
ASSERT(!cgen_->in_spilled_code());
ASSERT(!is_illegal());
if (type_ != SLOT) {
- GetValue(typeof_state);
+ GetValue();
return;
}
@@ -6009,7 +6053,7 @@
slot->type() == Slot::CONTEXT ||
slot->var()->mode() == Variable::CONST ||
slot->is_arguments()) {
- GetValue(typeof_state);
+ GetValue();
return;
}
@@ -6179,11 +6223,8 @@
// String value => false iff empty.
__ cmpq(rcx, Immediate(FIRST_NONSTRING_TYPE));
__ j(above_equal, ¬_string);
- __ and_(rcx, Immediate(kStringSizeMask));
- __ cmpq(rcx, Immediate(kShortStringTag));
- __ j(not_equal, &true_result); // Empty string is always short.
__ movl(rdx, FieldOperand(rax, String::kLengthOffset));
- __ shr(rdx, Immediate(String::kShortLengthShift));
+ __ testl(rdx, rdx);
__ j(zero, &false_result);
__ jmp(&true_result);
@@ -6379,19 +6420,18 @@
// not NaN.
// The representation of NaN values has all exponent bits (52..62) set,
// and not all mantissa bits (0..51) clear.
- // Read double representation into rax.
- __ movq(rbx, V8_UINT64_C(0x7ff0000000000000), RelocInfo::NONE);
- __ movq(rax, FieldOperand(rdx, HeapNumber::kValueOffset));
- // Test that exponent bits are all set.
- __ or_(rbx, rax);
- __ cmpq(rbx, rax);
- __ j(not_equal, &return_equal);
- // Shift out flag and all exponent bits, retaining only mantissa.
- __ shl(rax, Immediate(12));
- // If all bits in the mantissa are zero the number is Infinity, and
- // we return zero. Otherwise it is a NaN, and we return non-zero.
- // We cannot just return rax because only eax is tested on return.
- __ setcc(not_zero, rax);
+ // We only allow QNaNs, which have bit 51 set (which also rules out
+ // the value being Infinity).
+
+ // Value is a QNaN if value & kQuietNaNMask == kQuietNaNMask, i.e.,
+ // all bits in the mask are set. We only need to check the word
+ // that contains the exponent and high bit of the mantissa.
+ ASSERT_NE(0, (kQuietNaNHighBitsMask << 1) & 0x80000000u);
+ __ movl(rdx, FieldOperand(rdx, HeapNumber::kExponentOffset));
+ __ xorl(rax, rax);
+ __ addl(rdx, rdx); // Shift value and mask so mask applies to top bits.
+ __ cmpl(rdx, Immediate(kQuietNaNHighBitsMask << 1));
+ __ setcc(above_equal, rax);
__ ret(0);
__ bind(¬_identical);
@@ -6614,11 +6654,11 @@
__ jmp(&loop);
__ bind(&is_instance);
- __ xor_(rax, rax);
+ __ xorl(rax, rax);
__ ret(2 * kPointerSize);
__ bind(&is_not_instance);
- __ Move(rax, Smi::FromInt(1));
+ __ movl(rax, Immediate(1));
__ ret(2 * kPointerSize);
// Slow-case: Go through the JavaScript implementation.
@@ -6784,7 +6824,7 @@
Label* throw_normal_exception,
Label* throw_termination_exception,
Label* throw_out_of_memory_exception,
- StackFrame::Type frame_type,
+ ExitFrame::Mode mode,
bool do_gc,
bool always_allocate_scope) {
// rax: result parameter for PerformGC, if any.
@@ -6854,7 +6894,9 @@
// If return value is on the stack, pop it to registers.
if (result_size_ > 1) {
ASSERT_EQ(2, result_size_);
- // Position above 4 argument mirrors and arguments object.
+ // Read result values stored on stack. Result is stored
+ // above the four argument mirror slots and the two
+ // Arguments object slots.
__ movq(rax, Operand(rsp, 6 * kPointerSize));
__ movq(rdx, Operand(rsp, 7 * kPointerSize));
}
@@ -6865,7 +6907,7 @@
__ j(zero, &failure_returned);
// Exit the JavaScript to C++ exit frame.
- __ LeaveExitFrame(frame_type, result_size_);
+ __ LeaveExitFrame(mode, result_size_);
__ ret(0);
// Handling of failure.
@@ -6995,12 +7037,12 @@
// this by performing a garbage collection and retrying the
// builtin once.
- StackFrame::Type frame_type = is_debug_break ?
- StackFrame::EXIT_DEBUG :
- StackFrame::EXIT;
+ ExitFrame::Mode mode = is_debug_break ?
+ ExitFrame::MODE_DEBUG :
+ ExitFrame::MODE_NORMAL;
// Enter the exit frame that transitions from JavaScript to C++.
- __ EnterExitFrame(frame_type, result_size_);
+ __ EnterExitFrame(mode, result_size_);
// rax: Holds the context at this point, but should not be used.
// On entry to code generated by GenerateCore, it must hold
@@ -7023,7 +7065,7 @@
&throw_normal_exception,
&throw_termination_exception,
&throw_out_of_memory_exception,
- frame_type,
+ mode,
false,
false);
@@ -7032,7 +7074,7 @@
&throw_normal_exception,
&throw_termination_exception,
&throw_out_of_memory_exception,
- frame_type,
+ mode,
true,
false);
@@ -7043,7 +7085,7 @@
&throw_normal_exception,
&throw_termination_exception,
&throw_out_of_memory_exception,
- frame_type,
+ mode,
true,
true);
@@ -7058,6 +7100,11 @@
}
+void ApiGetterEntryStub::Generate(MacroAssembler* masm) {
+ UNREACHABLE();
+}
+
+
void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
Label invoke, exit;
#ifdef ENABLE_LOGGING_AND_PROFILING
@@ -7340,6 +7387,127 @@
}
+void GenericBinaryOpStub::GenerateCall(
+ MacroAssembler* masm,
+ Register left,
+ Register right) {
+ if (!ArgsInRegistersSupported()) {
+ // Pass arguments on the stack.
+ __ push(left);
+ __ push(right);
+ } else {
+ // The calling convention with registers is left in rdx and right in rax.
+ Register left_arg = rdx;
+ Register right_arg = rax;
+ if (!(left.is(left_arg) && right.is(right_arg))) {
+ if (left.is(right_arg) && right.is(left_arg)) {
+ if (IsOperationCommutative()) {
+ SetArgsReversed();
+ } else {
+ __ xchg(left, right);
+ }
+ } else if (left.is(left_arg)) {
+ __ movq(right_arg, right);
+ } else if (left.is(right_arg)) {
+ if (IsOperationCommutative()) {
+ __ movq(left_arg, right);
+ SetArgsReversed();
+ } else {
+ // Order of moves important to avoid destroying left argument.
+ __ movq(left_arg, left);
+ __ movq(right_arg, right);
+ }
+ } else if (right.is(left_arg)) {
+ if (IsOperationCommutative()) {
+ __ movq(right_arg, left);
+ SetArgsReversed();
+ } else {
+ // Order of moves important to avoid destroying right argument.
+ __ movq(right_arg, right);
+ __ movq(left_arg, left);
+ }
+ } else if (right.is(right_arg)) {
+ __ movq(left_arg, left);
+ } else {
+ // Order of moves is not important.
+ __ movq(left_arg, left);
+ __ movq(right_arg, right);
+ }
+ }
+
+ // Update flags to indicate that arguments are in registers.
+ SetArgsInRegisters();
+ __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
+ }
+
+ // Call the stub.
+ __ CallStub(this);
+}
+
+
+void GenericBinaryOpStub::GenerateCall(
+ MacroAssembler* masm,
+ Register left,
+ Smi* right) {
+ if (!ArgsInRegistersSupported()) {
+ // Pass arguments on the stack.
+ __ push(left);
+ __ Push(right);
+ } else {
+ // The calling convention with registers is left in rdx and right in rax.
+ Register left_arg = rdx;
+ Register right_arg = rax;
+ if (left.is(left_arg)) {
+ __ Move(right_arg, right);
+ } else if (left.is(right_arg) && IsOperationCommutative()) {
+ __ Move(left_arg, right);
+ SetArgsReversed();
+ } else {
+ __ movq(left_arg, left);
+ __ Move(right_arg, right);
+ }
+
+ // Update flags to indicate that arguments are in registers.
+ SetArgsInRegisters();
+ __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
+ }
+
+ // Call the stub.
+ __ CallStub(this);
+}
+
+
+void GenericBinaryOpStub::GenerateCall(
+ MacroAssembler* masm,
+ Smi* left,
+ Register right) {
+ if (!ArgsInRegistersSupported()) {
+ // Pass arguments on the stack.
+ __ Push(left);
+ __ push(right);
+ } else {
+ // The calling convention with registers is left in rdx and right in rax.
+ Register left_arg = rdx;
+ Register right_arg = rax;
+ if (right.is(right_arg)) {
+ __ Move(left_arg, left);
+ } else if (right.is(left_arg) && IsOperationCommutative()) {
+ __ Move(right_arg, left);
+ SetArgsReversed();
+ } else {
+ __ Move(left_arg, left);
+ __ movq(right_arg, right);
+ }
+ // Update flags to indicate that arguments are in registers.
+ SetArgsInRegisters();
+ __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
+ }
+
+ // Call the stub.
+ __ CallStub(this);
+}
+
+
void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
// Perform fast-case smi code for the operation (rax <op> rbx) and
// leave result in register rax.
@@ -7412,22 +7580,21 @@
void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
Label call_runtime;
- if (flags_ == SMI_CODE_IN_STUB) {
+ if (HasSmiCodeInStub()) {
// The fast case smi code wasn't inlined in the stub caller
// code. Generate it here to speed up common operations.
Label slow;
__ movq(rbx, Operand(rsp, 1 * kPointerSize)); // get y
__ movq(rax, Operand(rsp, 2 * kPointerSize)); // get x
GenerateSmiCode(masm, &slow);
- __ ret(2 * kPointerSize); // remove both operands
+ GenerateReturn(masm);
// Too bad. The fast case smi code didn't succeed.
__ bind(&slow);
}
- // Setup registers.
- __ movq(rax, Operand(rsp, 1 * kPointerSize)); // get y
- __ movq(rdx, Operand(rsp, 2 * kPointerSize)); // get x
+ // Make sure the arguments are in rdx and rax.
+ GenerateLoadArguments(masm);
// Floating point case.
switch (op_) {
@@ -7451,7 +7618,10 @@
__ JumpIfNotSmi(rax, &skip_allocation);
// Fall through!
case NO_OVERWRITE:
- __ AllocateHeapNumber(rax, rcx, &call_runtime);
+ // Allocate a heap number for the result. Keep rax and rdx intact
+ // for the possible runtime call.
+ __ AllocateHeapNumber(rbx, rcx, &call_runtime);
+ __ movq(rax, rbx);
__ bind(&skip_allocation);
break;
default: UNREACHABLE();
@@ -7467,7 +7637,7 @@
default: UNREACHABLE();
}
__ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm4);
- __ ret(2 * kPointerSize);
+ GenerateReturn(masm);
}
case Token::MOD: {
// For MOD we go directly to runtime in the non-smi case.
@@ -7492,7 +7662,7 @@
if (use_sse3_) {
// Truncate the operands to 32-bit integers and check for
// exceptions in doing so.
- CpuFeatures::Scope scope(CpuFeatures::SSE3);
+ CpuFeatures::Scope scope(SSE3);
__ fisttp_s(Operand(rsp, 0 * kPointerSize));
__ fisttp_s(Operand(rsp, 1 * kPointerSize));
__ fnstsw_ax();
@@ -7521,9 +7691,9 @@
case Token::BIT_OR: __ orl(rax, rcx); break;
case Token::BIT_AND: __ andl(rax, rcx); break;
case Token::BIT_XOR: __ xorl(rax, rcx); break;
- case Token::SAR: __ sarl(rax); break;
- case Token::SHL: __ shll(rax); break;
- case Token::SHR: __ shrl(rax); break;
+ case Token::SAR: __ sarl_cl(rax); break;
+ case Token::SHL: __ shll_cl(rax); break;
+ case Token::SHR: __ shrl_cl(rax); break;
default: UNREACHABLE();
}
if (op_ == Token::SHR) {
@@ -7535,7 +7705,7 @@
__ JumpIfNotValidSmiValue(rax, &non_smi_result);
// Tag smi result, if possible, and return.
__ Integer32ToSmi(rax, rax);
- __ ret(2 * kPointerSize);
+ GenerateReturn(masm);
// All ops except SHR return a signed int32 that we load in a HeapNumber.
if (op_ != Token::SHR && non_smi_result.is_linked()) {
@@ -7561,7 +7731,7 @@
__ movq(Operand(rsp, 1 * kPointerSize), rbx);
__ fild_s(Operand(rsp, 1 * kPointerSize));
__ fstp_d(FieldOperand(rax, HeapNumber::kValueOffset));
- __ ret(2 * kPointerSize);
+ GenerateReturn(masm);
}
// Clear the FPU exception flag and reset the stack before calling
@@ -7592,12 +7762,62 @@
}
// If all else fails, use the runtime system to get the correct
- // result.
+ // result. If arguments was passed in registers now place them on the
+ // stack in the correct order below the return address.
__ bind(&call_runtime);
+ if (HasArgumentsInRegisters()) {
+ __ pop(rcx);
+ if (HasArgumentsReversed()) {
+ __ push(rax);
+ __ push(rdx);
+ } else {
+ __ push(rdx);
+ __ push(rax);
+ }
+ __ push(rcx);
+ }
switch (op_) {
- case Token::ADD:
+ case Token::ADD: {
+ // Test for string arguments before calling runtime.
+ Label not_strings, both_strings, not_string1, string1;
+ Condition is_smi;
+ Result answer;
+ __ movq(rdx, Operand(rsp, 2 * kPointerSize)); // First argument.
+ __ movq(rax, Operand(rsp, 1 * kPointerSize)); // Second argument.
+ is_smi = masm->CheckSmi(rdx);
+ __ j(is_smi, ¬_string1);
+ __ CmpObjectType(rdx, FIRST_NONSTRING_TYPE, rdx);
+ __ j(above_equal, ¬_string1);
+
+ // First argument is a a string, test second.
+ is_smi = masm->CheckSmi(rax);
+ __ j(is_smi, &string1);
+ __ CmpObjectType(rax, FIRST_NONSTRING_TYPE, rax);
+ __ j(above_equal, &string1);
+
+ // First and second argument are strings.
+ Runtime::Function* f = Runtime::FunctionForId(Runtime::kStringAdd);
+ __ TailCallRuntime(ExternalReference(f), 2, f->result_size);
+
+ // Only first argument is a string.
+ __ bind(&string1);
+ __ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_FUNCTION);
+
+ // First argument was not a string, test second.
+ __ bind(¬_string1);
+ is_smi = masm->CheckSmi(rax);
+ __ j(is_smi, ¬_strings);
+ __ CmpObjectType(rax, FIRST_NONSTRING_TYPE, rax);
+ __ j(above_equal, ¬_strings);
+
+ // Only second argument is a string.
+ __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_FUNCTION);
+
+ __ bind(¬_strings);
+ // Neither argument is a string.
__ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
break;
+ }
case Token::SUB:
__ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
break;
@@ -7634,6 +7854,26 @@
}
+void GenericBinaryOpStub::GenerateLoadArguments(MacroAssembler* masm) {
+ // If arguments are not passed in registers read them from the stack.
+ if (!HasArgumentsInRegisters()) {
+ __ movq(rax, Operand(rsp, 1 * kPointerSize));
+ __ movq(rdx, Operand(rsp, 2 * kPointerSize));
+ }
+}
+
+
+void GenericBinaryOpStub::GenerateReturn(MacroAssembler* masm) {
+ // If arguments are not passed in registers remove them from the stack before
+ // returning.
+ if (!HasArgumentsInRegisters()) {
+ __ ret(2 * kPointerSize); // Remove both operands
+ } else {
+ __ ret(0);
+ }
+}
+
+
int CompareStub::MinorKey() {
// Encode the two parameters in a unique 16 bit value.
ASSERT(static_cast<unsigned>(cc_) < (1 << 15));
@@ -7653,7 +7893,7 @@
&actual_size,
true));
CHECK(buffer);
- Assembler masm(buffer, actual_size);
+ Assembler masm(buffer, static_cast<int>(actual_size));
// Generated code is put into a fixed, unmovable, buffer, and not into
// the V8 heap. We can't, and don't, refer to any relocatable addresses
// (e.g. the JavaScript nan-object).
diff --git a/src/x64/codegen-x64.h b/src/x64/codegen-x64.h
index 56b88b7..8539884 100644
--- a/src/x64/codegen-x64.h
+++ b/src/x64/codegen-x64.h
@@ -77,12 +77,12 @@
// Generate code to push the value of the reference on top of the
// expression stack. The reference is expected to be already on top of
// the expression stack, and it is left in place with its value above it.
- void GetValue(TypeofState typeof_state);
+ void GetValue();
// Like GetValue except that the slot is expected to be written to before
// being read from again. Thae value of the reference may be invalidated,
// causing subsequent attempts to read it to fail.
- void TakeValue(TypeofState typeof_state);
+ void TakeValue();
// Generate code to store the value on top of the expression stack in the
// reference. The reference is expected to be immediately below the value
@@ -241,28 +241,20 @@
explicit CodeGenState(CodeGenerator* owner);
// Create a code generator state based on a code generator's current
- // state. The new state may or may not be inside a typeof, and has its
- // own control destination.
- CodeGenState(CodeGenerator* owner,
- TypeofState typeof_state,
- ControlDestination* destination);
+ // state. The new state has its own control destination.
+ CodeGenState(CodeGenerator* owner, ControlDestination* destination);
// Destroy a code generator state and restore the owning code generator's
// previous state.
~CodeGenState();
// Accessors for the state.
- TypeofState typeof_state() const { return typeof_state_; }
ControlDestination* destination() const { return destination_; }
private:
// The owning code generator.
CodeGenerator* owner_;
- // A flag indicating whether we are compiling the immediate subexpression
- // of a typeof expression.
- TypeofState typeof_state_;
-
// A control destination in case the expression has a control-flow
// effect.
ControlDestination* destination_;
@@ -307,17 +299,12 @@
static bool ShouldGenerateLog(Expression* type);
#endif
- static void SetFunctionInfo(Handle<JSFunction> fun,
- FunctionLiteral* lit,
- bool is_toplevel,
- Handle<Script> script);
-
static void RecordPositions(MacroAssembler* masm, int pos);
// Accessors
MacroAssembler* masm() { return masm_; }
-
VirtualFrame* frame() const { return frame_; }
+ Handle<Script> script() { return script_; }
bool has_valid_frame() const { return frame_ != NULL; }
@@ -353,7 +340,6 @@
bool is_eval() { return is_eval_; }
// State
- TypeofState typeof_state() const { return state_->typeof_state(); }
ControlDestination* destination() const { return state_->destination(); }
// Track loop nesting level.
@@ -414,18 +400,16 @@
}
void LoadCondition(Expression* x,
- TypeofState typeof_state,
ControlDestination* destination,
bool force_control);
- void Load(Expression* x, TypeofState typeof_state = NOT_INSIDE_TYPEOF);
+ void Load(Expression* expr);
void LoadGlobal();
void LoadGlobalReceiver();
// Generate code to push the value of an expression on top of the frame
// and then spill the frame fully to memory. This function is used
// temporarily while the code generator is being transformed.
- void LoadAndSpill(Expression* expression,
- TypeofState typeof_state = NOT_INSIDE_TYPEOF);
+ void LoadAndSpill(Expression* expression);
// Read a value from a slot and leave it on top of the expression stack.
void LoadFromSlot(Slot* slot, TypeofState typeof_state);
@@ -511,8 +495,6 @@
static bool PatchInlineRuntimeEntry(Handle<String> name,
const InlineRuntimeLUT& new_entry,
InlineRuntimeLUT* old_entry);
- static Handle<Code> ComputeLazyCompile(int argc);
- Handle<JSFunction> BuildBoilerplate(FunctionLiteral* node);
void ProcessDeclarations(ZoneList<Declaration*>* declarations);
static Handle<Code> ComputeCallInitialize(int argc, InLoopFlag in_loop);
@@ -528,6 +510,8 @@
void GenerateIsSmi(ZoneList<Expression*>* args);
void GenerateIsNonNegativeSmi(ZoneList<Expression*>* args);
void GenerateIsArray(ZoneList<Expression*>* args);
+ void GenerateIsObject(ZoneList<Expression*>* args);
+ void GenerateIsFunction(ZoneList<Expression*>* args);
// Support for construct call checks.
void GenerateIsConstructCall(ZoneList<Expression*>* args);
@@ -560,6 +544,9 @@
inline void GenerateMathSin(ZoneList<Expression*>* args);
inline void GenerateMathCos(ZoneList<Expression*>* args);
+ // Fast support for StringAdd.
+ void GenerateStringAdd(ZoneList<Expression*>* args);
+
// Simple condition analysis.
enum ConditionAnalysis {
ALWAYS_TRUE,
@@ -574,6 +561,7 @@
void CodeForFunctionPosition(FunctionLiteral* fun);
void CodeForReturnPosition(FunctionLiteral* fun);
void CodeForStatementPosition(Statement* node);
+ void CodeForDoWhileConditionPosition(DoWhileStatement* stmt);
void CodeForSourcePosition(int pos);
#ifdef DEBUG
@@ -633,6 +621,25 @@
// times by generated code to perform common tasks, often the slow
// case of a JavaScript operation. They are all subclasses of CodeStub,
// which is declared in code-stubs.h.
+class CallFunctionStub: public CodeStub {
+ public:
+ CallFunctionStub(int argc, InLoopFlag in_loop)
+ : argc_(argc), in_loop_(in_loop) { }
+
+ void Generate(MacroAssembler* masm);
+
+ private:
+ int argc_;
+ InLoopFlag in_loop_;
+
+#ifdef DEBUG
+ void Print() { PrintF("CallFunctionStub (args %d)\n", argc_); }
+#endif
+
+ Major MajorKey() { return CallFunction; }
+ int MinorKey() { return argc_; }
+ InLoopFlag InLoop() { return in_loop_; }
+};
class ToBooleanStub: public CodeStub {
@@ -647,11 +654,10 @@
};
-// Flag that indicates whether or not the code that handles smi arguments
-// should be placed in the stub, inlined, or omitted entirely.
+// Flag that indicates how to generate code for the stub GenericBinaryOpStub.
enum GenericBinaryFlags {
- SMI_CODE_IN_STUB,
- SMI_CODE_INLINED
+ NO_GENERIC_BINARY_FLAGS = 0,
+ NO_SMI_CODE_IN_STUB = 1 << 0 // Omit smi code in stub.
};
@@ -660,45 +666,82 @@
GenericBinaryOpStub(Token::Value op,
OverwriteMode mode,
GenericBinaryFlags flags)
- : op_(op), mode_(mode), flags_(flags) {
- use_sse3_ = CpuFeatures::IsSupported(CpuFeatures::SSE3);
+ : op_(op),
+ mode_(mode),
+ flags_(flags),
+ args_in_registers_(false),
+ args_reversed_(false) {
+ use_sse3_ = CpuFeatures::IsSupported(SSE3);
ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
}
- void GenerateSmiCode(MacroAssembler* masm, Label* slow);
+ // Generate code to call the stub with the supplied arguments. This will add
+ // code at the call site to prepare arguments either in registers or on the
+ // stack together with the actual call.
+ void GenerateCall(MacroAssembler* masm, Register left, Register right);
+ void GenerateCall(MacroAssembler* masm, Register left, Smi* right);
+ void GenerateCall(MacroAssembler* masm, Smi* left, Register right);
private:
Token::Value op_;
OverwriteMode mode_;
GenericBinaryFlags flags_;
+ bool args_in_registers_; // Arguments passed in registers not on the stack.
+ bool args_reversed_; // Left and right argument are swapped.
bool use_sse3_;
const char* GetName();
#ifdef DEBUG
void Print() {
- PrintF("GenericBinaryOpStub (op %s), (mode %d, flags %d)\n",
+ PrintF("GenericBinaryOpStub (op %s), "
+ "(mode %d, flags %d, registers %d, reversed %d)\n",
Token::String(op_),
static_cast<int>(mode_),
- static_cast<int>(flags_));
+ static_cast<int>(flags_),
+ static_cast<int>(args_in_registers_),
+ static_cast<int>(args_reversed_));
}
#endif
- // Minor key encoding in 16 bits FSOOOOOOOOOOOOMM.
+ // Minor key encoding in 16 bits FRASOOOOOOOOOOMM.
class ModeBits: public BitField<OverwriteMode, 0, 2> {};
- class OpBits: public BitField<Token::Value, 2, 12> {};
- class SSE3Bits: public BitField<bool, 14, 1> {};
+ class OpBits: public BitField<Token::Value, 2, 10> {};
+ class SSE3Bits: public BitField<bool, 12, 1> {};
+ class ArgsInRegistersBits: public BitField<bool, 13, 1> {};
+ class ArgsReversedBits: public BitField<bool, 14, 1> {};
class FlagBits: public BitField<GenericBinaryFlags, 15, 1> {};
Major MajorKey() { return GenericBinaryOp; }
int MinorKey() {
// Encode the parameters in a unique 16 bit value.
return OpBits::encode(op_)
- | ModeBits::encode(mode_)
- | FlagBits::encode(flags_)
- | SSE3Bits::encode(use_sse3_);
+ | ModeBits::encode(mode_)
+ | FlagBits::encode(flags_)
+ | SSE3Bits::encode(use_sse3_)
+ | ArgsInRegistersBits::encode(args_in_registers_)
+ | ArgsReversedBits::encode(args_reversed_);
}
+
void Generate(MacroAssembler* masm);
+ void GenerateSmiCode(MacroAssembler* masm, Label* slow);
+ void GenerateLoadArguments(MacroAssembler* masm);
+ void GenerateReturn(MacroAssembler* masm);
+
+ bool ArgsInRegistersSupported() {
+ return ((op_ == Token::ADD) || (op_ == Token::SUB)
+ || (op_ == Token::MUL) || (op_ == Token::DIV))
+ && flags_ != NO_SMI_CODE_IN_STUB;
+ }
+ bool IsOperationCommutative() {
+ return (op_ == Token::ADD) || (op_ == Token::MUL);
+ }
+
+ void SetArgsInRegisters() { args_in_registers_ = true; }
+ void SetArgsReversed() { args_reversed_ = true; }
+ bool HasSmiCodeInStub() { return (flags_ & NO_SMI_CODE_IN_STUB) == 0; }
+ bool HasArgumentsInRegisters() { return args_in_registers_; }
+ bool HasArgumentsReversed() { return args_reversed_; }
};
diff --git a/src/x64/cpu-x64.cc b/src/x64/cpu-x64.cc
index 8df0ab7..cc20c58 100644
--- a/src/x64/cpu-x64.cc
+++ b/src/x64/cpu-x64.cc
@@ -27,6 +27,10 @@
// CPU specific code for x64 independent of OS goes here.
+#ifdef __GNUC__
+#include "third_party/valgrind/valgrind.h"
+#endif
+
#include "v8.h"
#include "cpu.h"
@@ -49,6 +53,15 @@
// If flushing of the instruction cache becomes necessary Windows has the
// API function FlushInstructionCache.
+
+ // By default, valgrind only checks the stack for writes that might need to
+ // invalidate already cached translated code. This leads to random
+ // instability when code patches or moves are sometimes unnoticed. One
+ // solution is to run valgrind with --smc-check=all, but this comes at a big
+ // performance cost. We can notify valgrind to invalidate its cache.
+#ifdef VALGRIND_DISCARD_TRANSLATIONS
+ VALGRIND_DISCARD_TRANSLATIONS(start, size);
+#endif
}
diff --git a/src/x64/debug-x64.cc b/src/x64/debug-x64.cc
index 49240b4..bc88d46 100644
--- a/src/x64/debug-x64.cc
+++ b/src/x64/debug-x64.cc
@@ -181,7 +181,7 @@
void BreakLocationIterator::ClearDebugBreakAtReturn() {
rinfo()->PatchCode(original_rinfo()->pc(),
- Debug::kX64JSReturnSequenceLength);
+ Assembler::kJSReturnSequenceLength);
}
@@ -191,9 +191,10 @@
void BreakLocationIterator::SetDebugBreakAtReturn() {
- ASSERT(Debug::kX64JSReturnSequenceLength >= Debug::kX64CallInstructionLength);
+ ASSERT(Assembler::kJSReturnSequenceLength >=
+ Assembler::kCallInstructionLength);
rinfo()->PatchCodeWithCall(Debug::debug_break_return()->entry(),
- Debug::kX64JSReturnSequenceLength - Debug::kX64CallInstructionLength);
+ Assembler::kJSReturnSequenceLength - Assembler::kCallInstructionLength);
}
#endif // ENABLE_DEBUGGER_SUPPORT
diff --git a/src/x64/disasm-x64.cc b/src/x64/disasm-x64.cc
index 19bcf66..0b43e76 100644
--- a/src/x64/disasm-x64.cc
+++ b/src/x64/disasm-x64.cc
@@ -218,7 +218,7 @@
OperandType op_order = bm[i].op_order_;
id->op_order_ =
static_cast<OperandType>(op_order & ~BYTE_SIZE_OPERAND_FLAG);
- assert(id->type == NO_INSTR); // Information not already entered
+ ASSERT_EQ(NO_INSTR, id->type); // Information not already entered
id->type = type;
id->byte_size_operation = ((op_order & BYTE_SIZE_OPERAND_FLAG) != 0);
}
@@ -232,7 +232,7 @@
const char* mnem) {
for (byte b = start; b <= end; b++) {
InstructionDesc* id = &instructions_[b];
- assert(id->type == NO_INSTR); // Information already entered
+ ASSERT_EQ(NO_INSTR, id->type); // Information not already entered
id->mnem = mnem;
id->type = type;
id->byte_size_operation = byte_size;
@@ -243,7 +243,7 @@
void InstructionTable::AddJumpConditionalShort() {
for (byte b = 0x70; b <= 0x7F; b++) {
InstructionDesc* id = &instructions_[b];
- assert(id->type == NO_INSTR); // Information already entered
+ ASSERT_EQ(NO_INSTR, id->type); // Information not already entered
id->mnem = NULL; // Computed depending on condition code.
id->type = JUMP_CONDITIONAL_SHORT_INSTR;
}
@@ -393,6 +393,7 @@
RegisterNameMapping register_name);
int PrintRightOperand(byte* modrmp);
int PrintRightByteOperand(byte* modrmp);
+ int PrintRightXMMOperand(byte* modrmp);
int PrintOperands(const char* mnem,
OperandType op_order,
byte* data);
@@ -400,13 +401,15 @@
int PrintImmediateOp(byte* data);
const char* TwoByteMnemonic(byte opcode);
int TwoByteOpcodeInstruction(byte* data);
- int F7Instruction(byte* data);
+ int F6F7Instruction(byte* data);
int ShiftInstruction(byte* data);
int JumpShort(byte* data);
int JumpConditional(byte* data);
int JumpConditionalShort(byte* data);
int SetCC(byte* data);
int FPUInstruction(byte* data);
+ int MemoryFPUInstruction(int escape_opcode, int regop, byte* modrm_start);
+ int RegisterFPUInstruction(int escape_opcode, byte modrm_byte);
void AppendToBuffer(const char* format, ...);
void UnimplementedInstruction() {
@@ -568,6 +571,12 @@
}
+int DisassemblerX64::PrintRightXMMOperand(byte* modrmp) {
+ return PrintRightOperandHelper(modrmp,
+ &DisassemblerX64::NameOfXMMRegister);
+}
+
+
// Returns number of bytes used including the current *data.
// Writes instruction's mnemonic, left and right operands to 'tmp_buffer_'.
int DisassemblerX64::PrintOperands(const char* mnem,
@@ -648,8 +657,8 @@
// Returns number of bytes used, including *data.
-int DisassemblerX64::F7Instruction(byte* data) {
- assert(*data == 0xF7);
+int DisassemblerX64::F6F7Instruction(byte* data) {
+ ASSERT(*data == 0xF7 || *data == 0xF6);
byte modrm = *(data + 1);
int mod, regop, rm;
get_modrm(modrm, &mod, ®op, &rm);
@@ -676,19 +685,12 @@
operand_size_code(),
NameOfCPURegister(rm));
return 2;
- } else if (mod == 3 && regop == 0) {
- int32_t imm = *reinterpret_cast<int32_t*>(data + 2);
- AppendToBuffer("test%c %s,0x%x",
- operand_size_code(),
- NameOfCPURegister(rm),
- imm);
- return 6;
} else if (regop == 0) {
AppendToBuffer("test%c ", operand_size_code());
- int count = PrintRightOperand(data + 1);
- int32_t imm = *reinterpret_cast<int32_t*>(data + 1 + count);
- AppendToBuffer(",0x%x", imm);
- return 1 + count + 4 /*int32_t*/;
+ int count = PrintRightOperand(data + 1); // Use name of 64-bit register.
+ AppendToBuffer(",0x");
+ count += PrintImmediate(data + 1 + count, operand_size());
+ return 1 + count;
} else {
UnimplementedInstruction();
return 2;
@@ -739,7 +741,7 @@
UnimplementedInstruction();
return num_bytes;
}
- assert(mnem != NULL);
+ ASSERT_NE(NULL, mnem);
if (op == 0xD0) {
imm8 = 1;
} else if (op == 0xC0) {
@@ -762,7 +764,7 @@
// Returns number of bytes used, including *data.
int DisassemblerX64::JumpShort(byte* data) {
- assert(*data == 0xEB);
+ ASSERT_EQ(0xEB, *data);
byte b = *(data + 1);
byte* dest = data + static_cast<int8_t>(b) + 2;
AppendToBuffer("jmp %s", NameOfAddress(dest));
@@ -772,7 +774,7 @@
// Returns number of bytes used, including *data.
int DisassemblerX64::JumpConditional(byte* data) {
- assert(*data == 0x0F);
+ ASSERT_EQ(0x0F, *data);
byte cond = *(data + 1) & 0x0F;
byte* dest = data + *reinterpret_cast<int32_t*>(data + 2) + 6;
const char* mnem = conditional_code_suffix[cond];
@@ -794,7 +796,7 @@
// Returns number of bytes used, including *data.
int DisassemblerX64::SetCC(byte* data) {
- assert(*data == 0x0F);
+ ASSERT_EQ(0x0F, *data);
byte cond = *(data + 1) & 0x0F;
const char* mnem = conditional_code_suffix[cond];
AppendToBuffer("set%s%c ", mnem, operand_size_code());
@@ -805,168 +807,170 @@
// Returns number of bytes used, including *data.
int DisassemblerX64::FPUInstruction(byte* data) {
- byte b1 = *data;
- byte b2 = *(data + 1);
- if (b1 == 0xD9) {
- const char* mnem = NULL;
- switch (b2) {
- case 0xE0:
- mnem = "fchs";
- break;
- case 0xE1:
- mnem = "fabs";
- break;
- case 0xE4:
- mnem = "ftst";
- break;
- case 0xF5:
- mnem = "fprem1";
- break;
- case 0xF7:
- mnem = "fincstp";
- break;
- case 0xE8:
- mnem = "fld1";
- break;
- case 0xEE:
- mnem = "fldz";
- break;
- case 0xF8:
- mnem = "fprem";
- break;
- }
- if (mnem != NULL) {
- AppendToBuffer("%s", mnem);
- return 2;
- } else if ((b2 & 0xF8) == 0xC8) {
- AppendToBuffer("fxch st%d", b2 & 0x7);
- return 2;
- } else {
- int mod, regop, rm;
- get_modrm(*(data + 1), &mod, ®op, &rm);
- const char* mnem = "?";
- switch (regop) {
- case 0:
- mnem = "fld_s";
- break;
- case 3:
- mnem = "fstp_s";
- break;
- default:
- UnimplementedInstruction();
- }
- AppendToBuffer("%s ", mnem);
- int count = PrintRightOperand(data + 1);
- return count + 1;
- }
- } else if (b1 == 0xDD) {
- int mod, regop, rm;
- get_modrm(*(data + 1), &mod, ®op, &rm);
- if (mod == 3) {
- switch (regop) {
- case 0:
- AppendToBuffer("ffree st%d", rm & 7);
- break;
- case 2:
- AppendToBuffer("fstp st%d", rm & 7);
- break;
- default:
- UnimplementedInstruction();
- break;
- }
- return 2;
- } else {
- const char* mnem = "?";
- switch (regop) {
- case 0:
- mnem = "fld_d";
- break;
- case 3:
- mnem = "fstp_d";
- break;
- default:
- UnimplementedInstruction();
- }
- AppendToBuffer("%s ", mnem);
- int count = PrintRightOperand(data + 1);
- return count + 1;
- }
- } else if (b1 == 0xDB) {
- int mod, regop, rm;
- get_modrm(*(data + 1), &mod, ®op, &rm);
- const char* mnem = "?";
- switch (regop) {
- case 0:
- mnem = "fild_s";
- break;
- case 2:
- mnem = "fist_s";
- break;
- case 3:
- mnem = "fistp_s";
- break;
- default:
- UnimplementedInstruction();
- }
- AppendToBuffer("%s ", mnem);
- int count = PrintRightOperand(data + 1);
- return count + 1;
- } else if (b1 == 0xDF) {
- if (b2 == 0xE0) {
- AppendToBuffer("fnstsw_ax");
- return 2;
- }
- int mod, regop, rm;
- get_modrm(*(data + 1), &mod, ®op, &rm);
- const char* mnem = "?";
- switch (regop) {
- case 5:
- mnem = "fild_d";
- break;
- case 7:
- mnem = "fistp_d";
- break;
- default:
- UnimplementedInstruction();
- }
- AppendToBuffer("%s ", mnem);
- int count = PrintRightOperand(data + 1);
- return count + 1;
- } else if (b1 == 0xDC || b1 == 0xDE) {
- bool is_pop = (b1 == 0xDE);
- if (is_pop && b2 == 0xD9) {
- AppendToBuffer("fcompp");
- return 2;
- }
- const char* mnem = "FP0xDC";
- switch (b2 & 0xF8) {
- case 0xC0:
- mnem = "fadd";
- break;
- case 0xE8:
- mnem = "fsub";
- break;
- case 0xC8:
- mnem = "fmul";
- break;
- case 0xF8:
- mnem = "fdiv";
- break;
- default:
- UnimplementedInstruction();
- }
- AppendToBuffer("%s%s st%d", mnem, is_pop ? "p" : "", b2 & 0x7);
- return 2;
- } else if (b1 == 0xDA && b2 == 0xE9) {
- const char* mnem = "fucompp";
- AppendToBuffer("%s", mnem);
- return 2;
+ byte escape_opcode = *data;
+ ASSERT_EQ(0xD8, escape_opcode & 0xF8);
+ byte modrm_byte = *(data+1);
+
+ if (modrm_byte >= 0xC0) {
+ return RegisterFPUInstruction(escape_opcode, modrm_byte);
+ } else {
+ return MemoryFPUInstruction(escape_opcode, modrm_byte, data+1);
}
- AppendToBuffer("Unknown FP instruction");
+}
+
+int DisassemblerX64::MemoryFPUInstruction(int escape_opcode,
+ int modrm_byte,
+ byte* modrm_start) {
+ const char* mnem = "?";
+ int regop = (modrm_byte >> 3) & 0x7; // reg/op field of modrm byte.
+ switch (escape_opcode) {
+ case 0xD9: switch (regop) {
+ case 0: mnem = "fld_s"; break;
+ case 3: mnem = "fstp_s"; break;
+ case 7: mnem = "fstcw"; break;
+ default: UnimplementedInstruction();
+ }
+ break;
+
+ case 0xDB: switch (regop) {
+ case 0: mnem = "fild_s"; break;
+ case 1: mnem = "fisttp_s"; break;
+ case 2: mnem = "fist_s"; break;
+ case 3: mnem = "fistp_s"; break;
+ default: UnimplementedInstruction();
+ }
+ break;
+
+ case 0xDD: switch (regop) {
+ case 0: mnem = "fld_d"; break;
+ case 3: mnem = "fstp_d"; break;
+ default: UnimplementedInstruction();
+ }
+ break;
+
+ case 0xDF: switch (regop) {
+ case 5: mnem = "fild_d"; break;
+ case 7: mnem = "fistp_d"; break;
+ default: UnimplementedInstruction();
+ }
+ break;
+
+ default: UnimplementedInstruction();
+ }
+ AppendToBuffer("%s ", mnem);
+ int count = PrintRightOperand(modrm_start);
+ return count + 1;
+}
+
+int DisassemblerX64::RegisterFPUInstruction(int escape_opcode,
+ byte modrm_byte) {
+ bool has_register = false; // Is the FPU register encoded in modrm_byte?
+ const char* mnem = "?";
+
+ switch (escape_opcode) {
+ case 0xD8:
+ UnimplementedInstruction();
+ break;
+
+ case 0xD9:
+ switch (modrm_byte & 0xF8) {
+ case 0xC8:
+ mnem = "fxch";
+ has_register = true;
+ break;
+ default:
+ switch (modrm_byte) {
+ case 0xE0: mnem = "fchs"; break;
+ case 0xE1: mnem = "fabs"; break;
+ case 0xE4: mnem = "ftst"; break;
+ case 0xE8: mnem = "fld1"; break;
+ case 0xEE: mnem = "fldz"; break;
+ case 0xF5: mnem = "fprem1"; break;
+ case 0xF7: mnem = "fincstp"; break;
+ case 0xF8: mnem = "fprem"; break;
+ case 0xFE: mnem = "fsin"; break;
+ case 0xFF: mnem = "fcos"; break;
+ default: UnimplementedInstruction();
+ }
+ }
+ break;
+
+ case 0xDA:
+ if (modrm_byte == 0xE9) {
+ mnem = "fucompp";
+ } else {
+ UnimplementedInstruction();
+ }
+ break;
+
+ case 0xDB:
+ if ((modrm_byte & 0xF8) == 0xE8) {
+ mnem = "fucomi";
+ has_register = true;
+ } else if (modrm_byte == 0xE2) {
+ mnem = "fclex";
+ } else {
+ UnimplementedInstruction();
+ }
+ break;
+
+ case 0xDC:
+ has_register = true;
+ switch (modrm_byte & 0xF8) {
+ case 0xC0: mnem = "fadd"; break;
+ case 0xE8: mnem = "fsub"; break;
+ case 0xC8: mnem = "fmul"; break;
+ case 0xF8: mnem = "fdiv"; break;
+ default: UnimplementedInstruction();
+ }
+ break;
+
+ case 0xDD:
+ has_register = true;
+ switch (modrm_byte & 0xF8) {
+ case 0xC0: mnem = "ffree"; break;
+ case 0xD8: mnem = "fstp"; break;
+ default: UnimplementedInstruction();
+ }
+ break;
+
+ case 0xDE:
+ if (modrm_byte == 0xD9) {
+ mnem = "fcompp";
+ } else {
+ has_register = true;
+ switch (modrm_byte & 0xF8) {
+ case 0xC0: mnem = "faddp"; break;
+ case 0xE8: mnem = "fsubp"; break;
+ case 0xC8: mnem = "fmulp"; break;
+ case 0xF8: mnem = "fdivp"; break;
+ default: UnimplementedInstruction();
+ }
+ }
+ break;
+
+ case 0xDF:
+ if (modrm_byte == 0xE0) {
+ mnem = "fnstsw_ax";
+ } else if ((modrm_byte & 0xF8) == 0xE8) {
+ mnem = "fucomip";
+ has_register = true;
+ }
+ break;
+
+ default: UnimplementedInstruction();
+ }
+
+ if (has_register) {
+ AppendToBuffer("%s st%d", mnem, modrm_byte & 0x7);
+ } else {
+ AppendToBuffer("%s", mnem);
+ }
return 2;
}
+
// Handle all two-byte opcodes, which start with 0x0F.
// These instructions may be affected by an 0x66, 0xF2, or 0xF3 prefix.
// We do not use any three-byte opcodes, which start with 0x0F38 or 0x0F3A.
@@ -1045,13 +1049,13 @@
int mod, regop, rm;
get_modrm(*current, &mod, ®op, &rm);
AppendToBuffer("%s %s,", mnemonic, NameOfXMMRegister(regop));
- data += PrintRightOperand(data);
+ current += PrintRightOperand(current);
} else if ((opcode & 0xF8) == 0x58) {
// XMM arithmetic. Mnemonic was retrieved at the start of this function.
int mod, regop, rm;
get_modrm(*current, &mod, ®op, &rm);
- AppendToBuffer("%s %s,%s", mnemonic, NameOfXMMRegister(regop),
- NameOfXMMRegister(rm));
+ AppendToBuffer("%s %s,", mnemonic, NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
} else {
UnimplementedInstruction();
}
@@ -1060,12 +1064,12 @@
// CVTTSS2SI: Convert scalar single-precision FP to dword integer.
// Assert that mod is not 3, so source is memory, not an XMM register.
- ASSERT((*current & 0xC0) != 0xC0);
+ ASSERT_NE(0xC0, *current & 0xC0);
current += PrintOperands("cvttss2si", REG_OPER_OP_ORDER, current);
} else {
UnimplementedInstruction();
}
- return current - data;
+ return static_cast<int>(current - data);
}
@@ -1236,18 +1240,6 @@
break;
}
- case 0xF6: {
- int mod, regop, rm;
- get_modrm(*(data + 1), &mod, ®op, &rm);
- if (mod == 3 && regop == 0) {
- AppendToBuffer("testb %s,%d", NameOfCPURegister(rm), *(data + 2));
- } else {
- UnimplementedInstruction();
- }
- data += 3;
- break;
- }
-
case 0x81: // fall through
case 0x83: // 0x81 with sign extension bit set
data += PrintImmediateOp(data);
@@ -1344,7 +1336,7 @@
case 0x95:
case 0x96:
case 0x97: {
- int reg = (current & 0x7) | (rex_b() ? 8 : 0);
+ int reg = (*data & 0x7) | (rex_b() ? 8 : 0);
if (reg == 0) {
AppendToBuffer("nop"); // Common name for xchg rax,rax.
} else {
@@ -1352,8 +1344,9 @@
operand_size_code(),
NameOfCPURegister(reg));
}
+ data++;
}
-
+ break;
case 0xFE: {
data++;
@@ -1465,8 +1458,10 @@
data += JumpShort(data);
break;
+ case 0xF6:
+ byte_size_operand_ = true; // fall through
case 0xF7:
- data += F7Instruction(data);
+ data += F6F7Instruction(data);
break;
default:
@@ -1479,7 +1474,7 @@
tmp_buffer_[tmp_buffer_pos_] = '\0';
}
- int instr_len = data - instr;
+ int instr_len = static_cast<int>(data - instr);
ASSERT(instr_len > 0); // Ensure progress.
int outp = 0;
@@ -1591,7 +1586,7 @@
for (byte* bp = prev_pc; bp < pc; bp++) {
fprintf(f, "%02x", *bp);
}
- for (int i = 6 - (pc - prev_pc); i >= 0; i--) {
+ for (int i = 6 - static_cast<int>(pc - prev_pc); i >= 0; i--) {
fprintf(f, " ");
}
fprintf(f, " %s\n", buffer.start());
diff --git a/src/x64/fast-codegen-x64.cc b/src/x64/fast-codegen-x64.cc
index 46d8dc4..333a47d 100644
--- a/src/x64/fast-codegen-x64.cc
+++ b/src/x64/fast-codegen-x64.cc
@@ -28,6 +28,7 @@
#include "v8.h"
#include "codegen-inl.h"
+#include "compiler.h"
#include "debug.h"
#include "fast-codegen.h"
#include "parser.h"
@@ -61,11 +62,76 @@
{ Comment cmnt(masm_, "[ Allocate locals");
int locals_count = fun->scope()->num_stack_slots();
- for (int i = 0; i < locals_count; i++) {
- __ PushRoot(Heap::kUndefinedValueRootIndex);
+ if (locals_count <= 1) {
+ if (locals_count > 0) {
+ __ PushRoot(Heap::kUndefinedValueRootIndex);
+ }
+ } else {
+ __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
+ for (int i = 0; i < locals_count; i++) {
+ __ push(rdx);
+ }
}
}
+ bool function_in_register = true;
+
+ // Possibly allocate a local context.
+ if (fun->scope()->num_heap_slots() > 0) {
+ Comment cmnt(masm_, "[ Allocate local context");
+ // Argument to NewContext is the function, which is still in rdi.
+ __ push(rdi);
+ __ CallRuntime(Runtime::kNewContext, 1);
+ function_in_register = false;
+ // Context is returned in both rax and rsi. It replaces the context
+ // passed to us. It's saved in the stack and kept live in rsi.
+ __ movq(Operand(rbp, StandardFrameConstants::kContextOffset), rsi);
+
+ // Copy any necessary parameters into the context.
+ int num_parameters = fun->scope()->num_parameters();
+ for (int i = 0; i < num_parameters; i++) {
+ Slot* slot = fun->scope()->parameter(i)->slot();
+ if (slot != NULL && slot->type() == Slot::CONTEXT) {
+ int parameter_offset = StandardFrameConstants::kCallerSPOffset +
+ (num_parameters - 1 - i) * kPointerSize;
+ // Load parameter from stack.
+ __ movq(rax, Operand(rbp, parameter_offset));
+ // Store it in the context
+ __ movq(Operand(rsi, Context::SlotOffset(slot->index())), rax);
+ }
+ }
+ }
+
+ // Possibly allocate an arguments object.
+ Variable* arguments = fun->scope()->arguments()->AsVariable();
+ if (arguments != NULL) {
+ // Arguments object must be allocated after the context object, in
+ // case the "arguments" or ".arguments" variables are in the context.
+ Comment cmnt(masm_, "[ Allocate arguments object");
+ if (function_in_register) {
+ __ push(rdi);
+ } else {
+ __ push(Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ }
+ // The receiver is just before the parameters on the caller's stack.
+ __ lea(rdx, Operand(rbp, StandardFrameConstants::kCallerSPOffset +
+ fun->num_parameters() * kPointerSize));
+ __ push(rdx);
+ __ Push(Smi::FromInt(fun->num_parameters()));
+ // Arguments to ArgumentsAccessStub:
+ // function, receiver address, parameter count.
+ // The stub will rewrite receiver and parameter count if the previous
+ // stack frame was an arguments adapter frame.
+ ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
+ __ CallStub(&stub);
+ // Store new arguments object in both "arguments" and ".arguments" slots.
+ __ movq(rcx, rax);
+ Move(arguments->slot(), rax, rbx, rdx);
+ Slot* dot_arguments_slot =
+ fun->scope()->arguments_shadow()->AsVariable()->slot();
+ Move(dot_arguments_slot, rcx, rbx, rdx);
+ }
+
{ Comment cmnt(masm_, "[ Stack check");
Label ok;
__ CompareRoot(rsp, Heap::kStackLimitRootIndex);
@@ -84,38 +150,371 @@
}
{ Comment cmnt(masm_, "[ Body");
+ ASSERT(loop_depth() == 0);
VisitStatements(fun->body());
+ ASSERT(loop_depth() == 0);
}
{ Comment cmnt(masm_, "[ return <undefined>;");
- // Emit a 'return undefined' in case control fell off the end of the
- // body.
+ // Emit a 'return undefined' in case control fell off the end of the body.
__ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
- SetReturnPosition(fun);
+ EmitReturnSequence(function_->end_position());
+ }
+}
+
+
+void FastCodeGenerator::EmitReturnSequence(int position) {
+ Comment cmnt(masm_, "[ Return sequence");
+ if (return_label_.is_bound()) {
+ __ jmp(&return_label_);
+ } else {
+ __ bind(&return_label_);
if (FLAG_trace) {
__ push(rax);
__ CallRuntime(Runtime::kTraceExit, 1);
}
+#ifdef DEBUG
+ // Add a label for checking the size of the code used for returning.
+ Label check_exit_codesize;
+ masm_->bind(&check_exit_codesize);
+#endif
+ CodeGenerator::RecordPositions(masm_, position);
__ RecordJSReturn();
-
// Do not use the leave instruction here because it is too short to
// patch with the code required by the debugger.
__ movq(rsp, rbp);
__ pop(rbp);
- __ ret((fun->scope()->num_parameters() + 1) * kPointerSize);
+ __ ret((function_->scope()->num_parameters() + 1) * kPointerSize);
#ifdef ENABLE_DEBUGGER_SUPPORT
// Add padding that will be overwritten by a debugger breakpoint. We
// have just generated "movq rsp, rbp; pop rbp; ret k" with length 7
// (3 + 1 + 3).
- const int kPadding = Debug::kX64JSReturnSequenceLength - 7;
+ const int kPadding = Assembler::kJSReturnSequenceLength - 7;
for (int i = 0; i < kPadding; ++i) {
masm_->int3();
}
+ // Check that the size of the code used for returning matches what is
+ // expected by the debugger.
+ ASSERT_EQ(Assembler::kJSReturnSequenceLength,
+ masm_->SizeOfCodeGeneratedSince(&check_exit_codesize));
#endif
}
}
+void FastCodeGenerator::Move(Expression::Context context, Register source) {
+ switch (context) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ case Expression::kEffect:
+ break;
+ case Expression::kValue:
+ __ push(source);
+ break;
+ case Expression::kTest:
+ TestAndBranch(source, true_label_, false_label_);
+ break;
+ case Expression::kValueTest: {
+ Label discard;
+ __ push(source);
+ TestAndBranch(source, true_label_, &discard);
+ __ bind(&discard);
+ __ addq(rsp, Immediate(kPointerSize));
+ __ jmp(false_label_);
+ break;
+ }
+ case Expression::kTestValue: {
+ Label discard;
+ __ push(source);
+ TestAndBranch(source, &discard, false_label_);
+ __ bind(&discard);
+ __ addq(rsp, Immediate(kPointerSize));
+ __ jmp(true_label_);
+ break;
+ }
+ }
+}
+
+
+template <>
+Operand FastCodeGenerator::CreateSlotOperand<Operand>(Slot* source,
+ Register scratch) {
+ switch (source->type()) {
+ case Slot::PARAMETER:
+ case Slot::LOCAL:
+ return Operand(rbp, SlotOffset(source));
+ case Slot::CONTEXT: {
+ int context_chain_length =
+ function_->scope()->ContextChainLength(source->var()->scope());
+ __ LoadContext(scratch, context_chain_length);
+ return CodeGenerator::ContextOperand(scratch, source->index());
+ break;
+ }
+ case Slot::LOOKUP:
+ UNIMPLEMENTED();
+ // Fall-through.
+ default:
+ UNREACHABLE();
+ return Operand(rax, 0); // Dead code to make the compiler happy.
+ }
+}
+
+
+void FastCodeGenerator::Move(Register dst, Slot* source) {
+ Operand location = CreateSlotOperand<Operand>(source, dst);
+ __ movq(dst, location);
+}
+
+
+void FastCodeGenerator::Move(Expression::Context context,
+ Slot* source,
+ Register scratch) {
+ switch (context) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ case Expression::kEffect:
+ break;
+ case Expression::kValue: {
+ Operand location = CreateSlotOperand<Operand>(source, scratch);
+ __ push(location);
+ break;
+ }
+ case Expression::kTest: // Fall through.
+ case Expression::kValueTest: // Fall through.
+ case Expression::kTestValue:
+ Move(scratch, source);
+ Move(context, scratch);
+ break;
+ }
+}
+
+
+void FastCodeGenerator::Move(Expression::Context context, Literal* expr) {
+ switch (context) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ case Expression::kEffect:
+ break;
+ case Expression::kValue:
+ __ Push(expr->handle());
+ break;
+ case Expression::kTest: // Fall through.
+ case Expression::kValueTest: // Fall through.
+ case Expression::kTestValue:
+ __ Move(rax, expr->handle());
+ Move(context, rax);
+ break;
+ }
+}
+
+
+void FastCodeGenerator::Move(Slot* dst,
+ Register src,
+ Register scratch1,
+ Register scratch2) {
+ switch (dst->type()) {
+ case Slot::PARAMETER:
+ case Slot::LOCAL:
+ __ movq(Operand(rbp, SlotOffset(dst)), src);
+ break;
+ case Slot::CONTEXT: {
+ ASSERT(!src.is(scratch1));
+ ASSERT(!src.is(scratch2));
+ ASSERT(!scratch1.is(scratch2));
+ int context_chain_length =
+ function_->scope()->ContextChainLength(dst->var()->scope());
+ __ LoadContext(scratch1, context_chain_length);
+ __ movq(Operand(scratch1, Context::SlotOffset(dst->index())), src);
+ int offset = FixedArray::kHeaderSize + dst->index() * kPointerSize;
+ __ RecordWrite(scratch1, offset, src, scratch2);
+ break;
+ }
+ case Slot::LOOKUP:
+ UNIMPLEMENTED();
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void FastCodeGenerator::DropAndMove(Expression::Context context,
+ Register source,
+ int drop_count) {
+ ASSERT(drop_count > 0);
+ switch (context) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ case Expression::kEffect:
+ __ addq(rsp, Immediate(drop_count * kPointerSize));
+ break;
+ case Expression::kValue:
+ if (drop_count > 1) {
+ __ addq(rsp, Immediate((drop_count - 1) * kPointerSize));
+ }
+ __ movq(Operand(rsp, 0), source);
+ break;
+ case Expression::kTest:
+ ASSERT(!source.is(rsp));
+ __ addq(rsp, Immediate(drop_count * kPointerSize));
+ TestAndBranch(source, true_label_, false_label_);
+ break;
+ case Expression::kValueTest: {
+ Label discard;
+ if (drop_count > 1) {
+ __ addq(rsp, Immediate((drop_count - 1) * kPointerSize));
+ }
+ __ movq(Operand(rsp, 0), source);
+ TestAndBranch(source, true_label_, &discard);
+ __ bind(&discard);
+ __ addq(rsp, Immediate(kPointerSize));
+ __ jmp(false_label_);
+ break;
+ }
+ case Expression::kTestValue: {
+ Label discard;
+ __ movq(Operand(rsp, 0), source);
+ TestAndBranch(source, &discard, false_label_);
+ __ bind(&discard);
+ __ addq(rsp, Immediate(kPointerSize));
+ __ jmp(true_label_);
+ break;
+ }
+ }
+}
+
+
+void FastCodeGenerator::TestAndBranch(Register source,
+ Label* true_label,
+ Label* false_label) {
+ ASSERT_NE(NULL, true_label);
+ ASSERT_NE(NULL, false_label);
+ // Use the shared ToBoolean stub to compile the value in the register into
+ // control flow to the code generator's true and false labels. Perform
+ // the fast checks assumed by the stub.
+
+ // The undefined value is false.
+ __ CompareRoot(source, Heap::kUndefinedValueRootIndex);
+ __ j(equal, false_label);
+ __ CompareRoot(source, Heap::kTrueValueRootIndex); // True is true.
+ __ j(equal, true_label);
+ __ CompareRoot(source, Heap::kFalseValueRootIndex); // False is false.
+ __ j(equal, false_label);
+ ASSERT_EQ(0, kSmiTag);
+ __ SmiCompare(source, Smi::FromInt(0)); // The smi zero is false.
+ __ j(equal, false_label);
+ Condition is_smi = masm_->CheckSmi(source); // All other smis are true.
+ __ j(is_smi, true_label);
+
+ // Call the stub for all other cases.
+ __ push(source);
+ ToBooleanStub stub;
+ __ CallStub(&stub);
+ __ testq(rax, rax); // The stub returns nonzero for true.
+ __ j(not_zero, true_label);
+ __ jmp(false_label);
+}
+
+
+void FastCodeGenerator::VisitDeclaration(Declaration* decl) {
+ Comment cmnt(masm_, "[ Declaration");
+ Variable* var = decl->proxy()->var();
+ ASSERT(var != NULL); // Must have been resolved.
+ Slot* slot = var->slot();
+ Property* prop = var->AsProperty();
+
+ if (slot != NULL) {
+ switch (slot->type()) {
+ case Slot::PARAMETER: // Fall through.
+ case Slot::LOCAL:
+ if (decl->mode() == Variable::CONST) {
+ __ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
+ __ movq(Operand(rbp, SlotOffset(var->slot())), kScratchRegister);
+ } else if (decl->fun() != NULL) {
+ Visit(decl->fun());
+ __ pop(Operand(rbp, SlotOffset(var->slot())));
+ }
+ break;
+
+ case Slot::CONTEXT:
+ // The variable in the decl always resides in the current context.
+ ASSERT_EQ(0, function_->scope()->ContextChainLength(var->scope()));
+ if (FLAG_debug_code) {
+ // Check if we have the correct context pointer.
+ __ movq(rbx,
+ CodeGenerator::ContextOperand(rsi, Context::FCONTEXT_INDEX));
+ __ cmpq(rbx, rsi);
+ __ Check(equal, "Unexpected declaration in current context.");
+ }
+ if (decl->mode() == Variable::CONST) {
+ __ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
+ __ movq(CodeGenerator::ContextOperand(rsi, slot->index()),
+ kScratchRegister);
+ // No write barrier since the hole value is in old space.
+ } else if (decl->fun() != NULL) {
+ Visit(decl->fun());
+ __ pop(rax);
+ __ movq(CodeGenerator::ContextOperand(rsi, slot->index()), rax);
+ int offset = Context::SlotOffset(slot->index());
+ __ RecordWrite(rsi, offset, rax, rcx);
+ }
+ break;
+
+ case Slot::LOOKUP: {
+ __ push(rsi);
+ __ Push(var->name());
+ // Declaration nodes are always introduced in one of two modes.
+ ASSERT(decl->mode() == Variable::VAR ||
+ decl->mode() == Variable::CONST);
+ PropertyAttributes attr =
+ (decl->mode() == Variable::VAR) ? NONE : READ_ONLY;
+ __ Push(Smi::FromInt(attr));
+ // Push initial value, if any.
+ // Note: For variables we must not push an initial value (such as
+ // 'undefined') because we may have a (legal) redeclaration and we
+ // must not destroy the current value.
+ if (decl->mode() == Variable::CONST) {
+ __ PushRoot(Heap::kTheHoleValueRootIndex);
+ } else if (decl->fun() != NULL) {
+ Visit(decl->fun());
+ } else {
+ __ Push(Smi::FromInt(0)); // no initial value!
+ }
+ __ CallRuntime(Runtime::kDeclareContextSlot, 4);
+ break;
+ }
+ }
+
+ } else if (prop != NULL) {
+ if (decl->fun() != NULL || decl->mode() == Variable::CONST) {
+ // We are declaring a function or constant that rewrites to a
+ // property. Use (keyed) IC to set the initial value.
+ ASSERT_EQ(Expression::kValue, prop->obj()->context());
+ Visit(prop->obj());
+ ASSERT_EQ(Expression::kValue, prop->key()->context());
+ Visit(prop->key());
+
+ if (decl->fun() != NULL) {
+ ASSERT_EQ(Expression::kValue, decl->fun()->context());
+ Visit(decl->fun());
+ __ pop(rax);
+ } else {
+ __ LoadRoot(rax, Heap::kTheHoleValueRootIndex);
+ }
+
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
+ __ call(ic, RelocInfo::CODE_TARGET);
+
+ // Absence of a test rax instruction following the call
+ // indicates that none of the load was inlined.
+
+ // Value in rax is ignored (declarations are statements). Receiver
+ // and key on stack are discarded.
+ __ addq(rsp, Immediate(2 * kPointerSize));
+ }
+ }
+}
+
+
void FastCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
// Call the runtime to declare the globals.
__ push(rsi); // The context is the first argument.
@@ -126,56 +525,17 @@
}
-void FastCodeGenerator::VisitBlock(Block* stmt) {
- Comment cmnt(masm_, "[ Block");
- SetStatementPosition(stmt);
- VisitStatements(stmt->statements());
-}
-
-
-void FastCodeGenerator::VisitExpressionStatement(ExpressionStatement* stmt) {
- Comment cmnt(masm_, "[ ExpressionStatement");
- SetStatementPosition(stmt);
- Visit(stmt->expression());
-}
-
-
void FastCodeGenerator::VisitReturnStatement(ReturnStatement* stmt) {
Comment cmnt(masm_, "[ ReturnStatement");
- SetStatementPosition(stmt);
Expression* expr = stmt->expression();
- Visit(expr);
-
- // Complete the statement based on the location of the subexpression.
- Location source = expr->location();
- ASSERT(!source.is_nowhere());
- if (source.is_temporary()) {
- __ pop(rax);
- } else {
- ASSERT(source.is_constant());
- ASSERT(expr->AsLiteral() != NULL);
+ if (expr->AsLiteral() != NULL) {
__ Move(rax, expr->AsLiteral()->handle());
+ } else {
+ Visit(expr);
+ ASSERT_EQ(Expression::kValue, expr->context());
+ __ pop(rax);
}
- if (FLAG_trace) {
- __ push(rax);
- __ CallRuntime(Runtime::kTraceExit, 1);
- }
-
- __ RecordJSReturn();
- // Do not use the leave instruction here because it is too short to
- // patch with the code required by the debugger.
- __ movq(rsp, rbp);
- __ pop(rbp);
- __ ret((function_->scope()->num_parameters() + 1) * kPointerSize);
-#ifdef ENABLE_DEBUGGER_SUPPORT
- // Add padding that will be overwritten by a debugger breakpoint. We
- // have just generated "movq rsp, rbp; pop rbp; ret k" with length 7
- // (3 + 1 + 3).
- const int kPadding = Debug::kX64JSReturnSequenceLength - 7;
- for (int i = 0; i < kPadding; ++i) {
- masm_->int3();
- }
-#endif
+ EmitReturnSequence(stmt->statement_pos());
}
@@ -183,7 +543,8 @@
Comment cmnt(masm_, "[ FunctionLiteral");
// Build the function boilerplate and instantiate it.
- Handle<JSFunction> boilerplate = BuildBoilerplate(expr);
+ Handle<JSFunction> boilerplate =
+ Compiler::BuildBoilerplate(expr, script_, this);
if (HasStackOverflow()) return;
ASSERT(boilerplate->IsBoilerplate());
@@ -192,12 +553,7 @@
__ push(rsi);
__ Push(boilerplate);
__ CallRuntime(Runtime::kNewClosure, 2);
-
- if (expr->location().is_temporary()) {
- __ push(rax);
- } else {
- ASSERT(expr->location().is_nowhere());
- }
+ Move(expr->context(), rax);
}
@@ -205,6 +561,7 @@
Comment cmnt(masm_, "[ VariableProxy");
Expression* rewrite = expr->var()->rewrite();
if (rewrite == NULL) {
+ ASSERT(expr->var()->is_global());
Comment cmnt(masm_, "Global variable");
// Use inline caching. Variable name is passed in rcx and the global
// object on the stack.
@@ -212,33 +569,73 @@
__ Move(rcx, expr->name());
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
-
// A test rax instruction following the call is used by the IC to
// indicate that the inobject property case was inlined. Ensure there
// is no test rax instruction here.
- if (expr->location().is_temporary()) {
- // Replace the global object with the result.
- __ movq(Operand(rsp, 0), rax);
- } else {
- ASSERT(expr->location().is_nowhere());
- __ addq(rsp, Immediate(kPointerSize));
- }
+ __ nop();
- } else {
- Comment cmnt(masm_, "Stack slot");
+ DropAndMove(expr->context(), rax);
+ } else if (rewrite->AsSlot() != NULL) {
Slot* slot = rewrite->AsSlot();
- ASSERT(slot != NULL);
- if (expr->location().is_temporary()) {
- __ push(Operand(rbp, SlotOffset(slot)));
- } else {
- ASSERT(expr->location().is_nowhere());
+ if (FLAG_debug_code) {
+ switch (slot->type()) {
+ case Slot::LOCAL:
+ case Slot::PARAMETER: {
+ Comment cmnt(masm_, "Stack slot");
+ break;
+ }
+ case Slot::CONTEXT: {
+ Comment cmnt(masm_, "Context slot");
+ break;
+ }
+ case Slot::LOOKUP:
+ UNIMPLEMENTED();
+ break;
+ default:
+ UNREACHABLE();
+ }
}
+ Move(expr->context(), slot, rax);
+ } else {
+ // A variable has been rewritten into an explicit access to
+ // an object property.
+ Property* property = rewrite->AsProperty();
+ ASSERT_NOT_NULL(property);
+
+ // Currently the only parameter expressions that can occur are
+ // on the form "slot[literal]".
+
+ // Check that the object is in a slot.
+ Variable* object = property->obj()->AsVariableProxy()->AsVariable();
+ ASSERT_NOT_NULL(object);
+ Slot* object_slot = object->slot();
+ ASSERT_NOT_NULL(object_slot);
+
+ // Load the object.
+ Move(Expression::kValue, object_slot, rax);
+
+ // Check that the key is a smi.
+ Literal* key_literal = property->key()->AsLiteral();
+ ASSERT_NOT_NULL(key_literal);
+ ASSERT(key_literal->handle()->IsSmi());
+
+ // Load the key.
+ Move(Expression::kValue, key_literal);
+
+ // Do a KEYED property load.
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+ __ call(ic, RelocInfo::CODE_TARGET);
+ // Notice: We must not have a "test rax, ..." instruction after
+ // the call. It is treated specially by the LoadIC code.
+
+ // Drop key and object left on the stack by IC, and push the result.
+ DropAndMove(expr->context(), rax, 2);
}
}
void FastCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
- Comment cmnt(masm_, "[ RegExp Literal");
+ Comment cmnt(masm_, "[ RegExpLiteral");
Label done;
// Registers will be used as follows:
// rdi = JS function.
@@ -260,10 +657,126 @@
__ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
// Label done:
__ bind(&done);
- if (expr->location().is_temporary()) {
- __ push(rax);
+ Move(expr->context(), rax);
+}
+
+
+void FastCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
+ Comment cmnt(masm_, "[ ObjectLiteral");
+ Label boilerplate_exists;
+
+ __ movq(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ __ movq(rbx, FieldOperand(rdi, JSFunction::kLiteralsOffset));
+ int literal_offset =
+ FixedArray::kHeaderSize + expr->literal_index() * kPointerSize;
+ __ movq(rax, FieldOperand(rbx, literal_offset));
+ __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
+ __ j(not_equal, &boilerplate_exists);
+ // Create boilerplate if it does not exist.
+ // Literal array (0).
+ __ push(rbx);
+ // Literal index (1).
+ __ Push(Smi::FromInt(expr->literal_index()));
+ // Constant properties (2).
+ __ Push(expr->constant_properties());
+ __ CallRuntime(Runtime::kCreateObjectLiteralBoilerplate, 3);
+ __ bind(&boilerplate_exists);
+ // rax contains boilerplate.
+ // Clone boilerplate.
+ __ push(rax);
+ if (expr->depth() == 1) {
+ __ CallRuntime(Runtime::kCloneShallowLiteralBoilerplate, 1);
} else {
- ASSERT(expr->location().is_nowhere());
+ __ CallRuntime(Runtime::kCloneLiteralBoilerplate, 1);
+ }
+
+ // If result_saved == true: The result is saved on top of the
+ // stack and in rax.
+ // If result_saved == false: The result not on the stack, just in rax.
+ bool result_saved = false;
+
+ for (int i = 0; i < expr->properties()->length(); i++) {
+ ObjectLiteral::Property* property = expr->properties()->at(i);
+ if (property->IsCompileTimeValue()) continue;
+
+ Literal* key = property->key();
+ Expression* value = property->value();
+ if (!result_saved) {
+ __ push(rax); // Save result on the stack
+ result_saved = true;
+ }
+ switch (property->kind()) {
+ case ObjectLiteral::Property::MATERIALIZED_LITERAL: // fall through
+ ASSERT(!CompileTimeValue::IsCompileTimeValue(value));
+ case ObjectLiteral::Property::COMPUTED:
+ if (key->handle()->IsSymbol()) {
+ Visit(value);
+ ASSERT_EQ(Expression::kValue, value->context());
+ __ pop(rax);
+ __ Move(rcx, key->handle());
+ Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
+ __ call(ic, RelocInfo::CODE_TARGET);
+ // StoreIC leaves the receiver on the stack.
+ __ movq(rax, Operand(rsp, 0)); // Restore result back into rax.
+ break;
+ }
+ // fall through
+ case ObjectLiteral::Property::PROTOTYPE:
+ __ push(rax);
+ Visit(key);
+ ASSERT_EQ(Expression::kValue, key->context());
+ Visit(value);
+ ASSERT_EQ(Expression::kValue, value->context());
+ __ CallRuntime(Runtime::kSetProperty, 3);
+ __ movq(rax, Operand(rsp, 0)); // Restore result into rax.
+ break;
+ case ObjectLiteral::Property::SETTER: // fall through
+ case ObjectLiteral::Property::GETTER:
+ __ push(rax);
+ Visit(key);
+ ASSERT_EQ(Expression::kValue, key->context());
+ __ Push(property->kind() == ObjectLiteral::Property::SETTER ?
+ Smi::FromInt(1) :
+ Smi::FromInt(0));
+ Visit(value);
+ ASSERT_EQ(Expression::kValue, value->context());
+ __ CallRuntime(Runtime::kDefineAccessor, 4);
+ __ movq(rax, Operand(rsp, 0)); // Restore result into rax.
+ break;
+ default: UNREACHABLE();
+ }
+ }
+ switch (expr->context()) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ case Expression::kEffect:
+ if (result_saved) __ addq(rsp, Immediate(kPointerSize));
+ break;
+ case Expression::kValue:
+ if (!result_saved) __ push(rax);
+ break;
+ case Expression::kTest:
+ if (result_saved) __ pop(rax);
+ TestAndBranch(rax, true_label_, false_label_);
+ break;
+ case Expression::kValueTest: {
+ Label discard;
+ if (!result_saved) __ push(rax);
+ TestAndBranch(rax, true_label_, &discard);
+ __ bind(&discard);
+ __ addq(rsp, Immediate(kPointerSize));
+ __ jmp(false_label_);
+ break;
+ }
+ case Expression::kTestValue: {
+ Label discard;
+ if (!result_saved) __ push(rax);
+ TestAndBranch(rax, &discard, false_label_);
+ __ bind(&discard);
+ __ addq(rsp, Immediate(kPointerSize));
+ __ jmp(true_label_);
+ break;
+ }
}
}
@@ -316,7 +829,7 @@
result_saved = true;
}
Visit(subexpr);
- ASSERT(subexpr->location().is_temporary());
+ ASSERT_EQ(Expression::kValue, subexpr->context());
// Store the subexpression value in the array's elements.
__ pop(rax); // Subexpression value.
@@ -329,231 +842,849 @@
__ RecordWrite(rbx, offset, rax, rcx);
}
- Location destination = expr->location();
- if (destination.is_nowhere() && result_saved) {
- __ addq(rsp, Immediate(kPointerSize));
- } else if (destination.is_temporary() && !result_saved) {
- __ push(rax);
+ switch (expr->context()) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ case Expression::kEffect:
+ if (result_saved) __ addq(rsp, Immediate(kPointerSize));
+ break;
+ case Expression::kValue:
+ if (!result_saved) __ push(rax);
+ break;
+ case Expression::kTest:
+ if (result_saved) __ pop(rax);
+ TestAndBranch(rax, true_label_, false_label_);
+ break;
+ case Expression::kValueTest: {
+ Label discard;
+ if (!result_saved) __ push(rax);
+ TestAndBranch(rax, true_label_, &discard);
+ __ bind(&discard);
+ __ addq(rsp, Immediate(kPointerSize));
+ __ jmp(false_label_);
+ break;
+ }
+ case Expression::kTestValue: {
+ Label discard;
+ if (!result_saved) __ push(rax);
+ TestAndBranch(rax, &discard, false_label_);
+ __ bind(&discard);
+ __ addq(rsp, Immediate(kPointerSize));
+ __ jmp(true_label_);
+ break;
+ }
}
}
-void FastCodeGenerator::VisitAssignment(Assignment* expr) {
- Comment cmnt(masm_, "[ Assignment");
- ASSERT(expr->op() == Token::ASSIGN || expr->op() == Token::INIT_VAR);
- Expression* rhs = expr->value();
- Visit(rhs);
-
- // Left-hand side can only be a global or a (parameter or local) slot.
+void FastCodeGenerator::EmitVariableAssignment(Assignment* expr) {
Variable* var = expr->target()->AsVariableProxy()->AsVariable();
ASSERT(var != NULL);
ASSERT(var->is_global() || var->slot() != NULL);
-
- // Complete the assignment based on the location of the right-hand-side
- // value and the desired location of the assignment value.
- Location destination = expr->location();
- Location source = rhs->location();
- ASSERT(!destination.is_constant());
- ASSERT(!source.is_nowhere());
-
if (var->is_global()) {
- // Assignment to a global variable, use inline caching. Right-hand-side
- // value is passed in rax, variable name in rcx, and the global object
- // on the stack.
- if (source.is_temporary()) {
- __ pop(rax);
- } else {
- ASSERT(source.is_constant());
- ASSERT(rhs->AsLiteral() != NULL);
- __ Move(rax, rhs->AsLiteral()->handle());
- }
+ // Assignment to a global variable. Use inline caching for the
+ // assignment. Right-hand-side value is passed in rax, variable name in
+ // rcx, and the global object on the stack.
+ __ pop(rax);
__ Move(rcx, var->name());
__ push(CodeGenerator::GlobalObject());
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET);
// Overwrite the global object on the stack with the result if needed.
- if (destination.is_temporary()) {
- __ movq(Operand(rsp, 0), rax);
- } else {
- __ addq(rsp, Immediate(kPointerSize));
- }
- } else {
- if (source.is_temporary()) {
- if (destination.is_temporary()) {
- // Case 'temp1 <- (var = temp0)'. Preserve right-hand-side temporary
- // on the stack.
- __ movq(kScratchRegister, Operand(rsp, 0));
- __ movq(Operand(rbp, SlotOffset(var->slot())), kScratchRegister);
- } else {
- ASSERT(destination.is_nowhere());
- // Case 'var = temp'. Discard right-hand-side temporary.
- __ pop(Operand(rbp, SlotOffset(var->slot())));
+ DropAndMove(expr->context(), rax);
+
+ } else if (var->slot()) {
+ Slot* slot = var->slot();
+ ASSERT_NOT_NULL(slot); // Variables rewritten as properties not handled.
+ switch (slot->type()) {
+ case Slot::LOCAL:
+ case Slot::PARAMETER: {
+ switch (expr->context()) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ case Expression::kEffect:
+ // Perform assignment and discard value.
+ __ pop(Operand(rbp, SlotOffset(var->slot())));
+ break;
+ case Expression::kValue:
+ // Perform assignment and preserve value.
+ __ movq(rax, Operand(rsp, 0));
+ __ movq(Operand(rbp, SlotOffset(var->slot())), rax);
+ break;
+ case Expression::kTest:
+ // Perform assignment and test (and discard) value.
+ __ pop(rax);
+ __ movq(Operand(rbp, SlotOffset(var->slot())), rax);
+ TestAndBranch(rax, true_label_, false_label_);
+ break;
+ case Expression::kValueTest: {
+ Label discard;
+ __ movq(rax, Operand(rsp, 0));
+ __ movq(Operand(rbp, SlotOffset(var->slot())), rax);
+ TestAndBranch(rax, true_label_, &discard);
+ __ bind(&discard);
+ __ addq(rsp, Immediate(kPointerSize));
+ __ jmp(false_label_);
+ break;
+ }
+ case Expression::kTestValue: {
+ Label discard;
+ __ movq(rax, Operand(rsp, 0));
+ __ movq(Operand(rbp, SlotOffset(var->slot())), rax);
+ TestAndBranch(rax, &discard, false_label_);
+ __ bind(&discard);
+ __ addq(rsp, Immediate(kPointerSize));
+ __ jmp(true_label_);
+ break;
+ }
+ }
+ break;
}
- } else {
- ASSERT(source.is_constant());
- ASSERT(rhs->AsLiteral() != NULL);
- // Two cases: 'temp <- (var = constant)', or 'var = constant' with a
- // discarded result. Always perform the assignment.
- __ Move(kScratchRegister, rhs->AsLiteral()->handle());
- __ movq(Operand(rbp, SlotOffset(var->slot())), kScratchRegister);
- if (destination.is_temporary()) {
- // Case 'temp <- (var = constant)'. Save result.
- __ push(kScratchRegister);
+
+ case Slot::CONTEXT: {
+ int chain_length =
+ function_->scope()->ContextChainLength(slot->var()->scope());
+ if (chain_length > 0) {
+ // Move up the context chain to the context containing the slot.
+ __ movq(rax,
+ Operand(rsi, Context::SlotOffset(Context::CLOSURE_INDEX)));
+ // Load the function context (which is the incoming, outer context).
+ __ movq(rax, FieldOperand(rax, JSFunction::kContextOffset));
+ for (int i = 1; i < chain_length; i++) {
+ __ movq(rax,
+ Operand(rax, Context::SlotOffset(Context::CLOSURE_INDEX)));
+ __ movq(rax, FieldOperand(rax, JSFunction::kContextOffset));
+ }
+ } else { // Slot is in the current context. Generate optimized code.
+ __ movq(rax, rsi); // RecordWrite destroys the object register.
+ }
+ if (FLAG_debug_code) {
+ __ cmpq(rax,
+ Operand(rax, Context::SlotOffset(Context::FCONTEXT_INDEX)));
+ __ Check(equal, "Context Slot chain length wrong.");
+ }
+ __ pop(rcx);
+ __ movq(Operand(rax, Context::SlotOffset(slot->index())), rcx);
+
+ // RecordWrite may destroy all its register arguments.
+ if (expr->context() == Expression::kValue) {
+ __ push(rcx);
+ } else if (expr->context() != Expression::kEffect) {
+ __ movq(rdx, rcx);
+ }
+ int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
+ __ RecordWrite(rax, offset, rcx, rbx);
+ if (expr->context() != Expression::kEffect &&
+ expr->context() != Expression::kValue) {
+ Move(expr->context(), rdx);
+ }
+ break;
}
+
+ case Slot::LOOKUP:
+ UNREACHABLE();
+ break;
}
}
}
-void FastCodeGenerator::VisitCall(Call* expr) {
- Expression* fun = expr->expression();
- ZoneList<Expression*>* args = expr->arguments();
- Variable* var = fun->AsVariableProxy()->AsVariable();
- ASSERT(var != NULL && !var->is_this() && var->is_global());
- ASSERT(!var->is_possibly_eval());
+void FastCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
+ // Assignment to a property, using a named store IC.
+ Property* prop = expr->target()->AsProperty();
+ ASSERT(prop != NULL);
+ ASSERT(prop->key()->AsLiteral() != NULL);
- __ Push(var->name());
- // Push global object (receiver).
- __ push(CodeGenerator::GlobalObject());
+ // If the assignment starts a block of assignments to the same object,
+ // change to slow case to avoid the quadratic behavior of repeatedly
+ // adding fast properties.
+ if (expr->starts_initialization_block()) {
+ __ push(Operand(rsp, kPointerSize)); // Receiver is under value.
+ __ CallRuntime(Runtime::kToSlowProperties, 1);
+ }
+
+ __ pop(rax);
+ __ Move(rcx, prop->key()->AsLiteral()->handle());
+ Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
+ __ Call(ic, RelocInfo::CODE_TARGET);
+
+ // If the assignment ends an initialization block, revert to fast case.
+ if (expr->ends_initialization_block()) {
+ __ push(rax); // Result of assignment, saved even if not needed.
+ __ push(Operand(rsp, kPointerSize)); // Receiver is under value.
+ __ CallRuntime(Runtime::kToFastProperties, 1);
+ __ pop(rax);
+ }
+
+ DropAndMove(expr->context(), rax);
+}
+
+
+void FastCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
+ // Assignment to a property, using a keyed store IC.
+
+ // If the assignment starts a block of assignments to the same object,
+ // change to slow case to avoid the quadratic behavior of repeatedly
+ // adding fast properties.
+ if (expr->starts_initialization_block()) {
+ // Reciever is under the key and value.
+ __ push(Operand(rsp, 2 * kPointerSize));
+ __ CallRuntime(Runtime::kToSlowProperties, 1);
+ }
+
+ __ pop(rax);
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
+ __ Call(ic, RelocInfo::CODE_TARGET);
+ // This nop signals to the IC that there is no inlined code at the call
+ // site for it to patch.
+ __ nop();
+
+ // If the assignment ends an initialization block, revert to fast case.
+ if (expr->ends_initialization_block()) {
+ __ push(rax); // Result of assignment, saved even if not needed.
+ // Reciever is under the key and value.
+ __ push(Operand(rsp, 2 * kPointerSize));
+ __ CallRuntime(Runtime::kToFastProperties, 1);
+ __ pop(rax);
+ }
+
+ // Receiver and key are still on stack.
+ __ addq(rsp, Immediate(2 * kPointerSize));
+ Move(expr->context(), rax);
+}
+
+
+void FastCodeGenerator::VisitProperty(Property* expr) {
+ Comment cmnt(masm_, "[ Property");
+ Expression* key = expr->key();
+ uint32_t dummy;
+
+ // Record the source position for the property load.
+ SetSourcePosition(expr->position());
+
+ // Evaluate receiver.
+ Visit(expr->obj());
+
+
+ if (key->AsLiteral() != NULL && key->AsLiteral()->handle()->IsSymbol() &&
+ !String::cast(*(key->AsLiteral()->handle()))->AsArrayIndex(&dummy)) {
+ // Do a NAMED property load.
+ // The IC expects the property name in rcx and the receiver on the stack.
+ __ Move(rcx, key->AsLiteral()->handle());
+ Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+ __ call(ic, RelocInfo::CODE_TARGET);
+ // By emitting a nop we make sure that we do not have a "test rax,..."
+ // instruction after the call it is treated specially by the LoadIC code.
+ __ nop();
+ } else {
+ // Do a KEYED property load.
+ Visit(expr->key());
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+ __ call(ic, RelocInfo::CODE_TARGET);
+ // Notice: We must not have a "test rax, ..." instruction after
+ // the call. It is treated specially by the LoadIC code.
+
+ // Drop key left on the stack by IC.
+ __ addq(rsp, Immediate(kPointerSize));
+ }
+ DropAndMove(expr->context(), rax);
+}
+
+
+void FastCodeGenerator::EmitCallWithIC(Call* expr, RelocInfo::Mode reloc_info) {
+ // Code common for calls using the IC.
+ ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
for (int i = 0; i < arg_count; i++) {
Visit(args->at(i));
- ASSERT(!args->at(i)->location().is_nowhere());
- if (args->at(i)->location().is_constant()) {
- ASSERT(args->at(i)->AsLiteral() != NULL);
- __ Push(args->at(i)->AsLiteral()->handle());
- }
+ ASSERT_EQ(Expression::kValue, args->at(i)->context());
}
- // Record source position for debugger
+ // Record source position for debugger.
SetSourcePosition(expr->position());
// Call the IC initialization code.
Handle<Code> ic = CodeGenerator::ComputeCallInitialize(arg_count,
NOT_IN_LOOP);
- __ call(ic, RelocInfo::CODE_TARGET_CONTEXT);
+ __ call(ic, reloc_info);
// Restore context register.
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
// Discard the function left on TOS.
- if (expr->location().is_temporary()) {
- __ movq(Operand(rsp, 0), rax);
- } else {
- ASSERT(expr->location().is_nowhere());
- __ addq(rsp, Immediate(kPointerSize));
+ DropAndMove(expr->context(), rax);
+}
+
+
+void FastCodeGenerator::EmitCallWithStub(Call* expr) {
+ // Code common for calls using the call stub.
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ Visit(args->at(i));
}
+ // Record source position for debugger.
+ SetSourcePosition(expr->position());
+ CallFunctionStub stub(arg_count, NOT_IN_LOOP);
+ __ CallStub(&stub);
+ // Restore context register.
+ __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ // Discard the function left on TOS.
+ DropAndMove(expr->context(), rax);
+}
+
+
+void FastCodeGenerator::VisitCall(Call* expr) {
+ Comment cmnt(masm_, "[ Call");
+ Expression* fun = expr->expression();
+ Variable* var = fun->AsVariableProxy()->AsVariable();
+
+ if (var != NULL && var->is_possibly_eval()) {
+ // Call to the identifier 'eval'.
+ UNREACHABLE();
+ } else if (var != NULL && !var->is_this() && var->is_global()) {
+ // Call to a global variable.
+ __ Push(var->name());
+ // Push global object as receiver for the call IC lookup.
+ __ push(CodeGenerator::GlobalObject());
+ EmitCallWithIC(expr, RelocInfo::CODE_TARGET_CONTEXT);
+ } else if (var != NULL && var->slot() != NULL &&
+ var->slot()->type() == Slot::LOOKUP) {
+ // Call to a lookup slot.
+ UNREACHABLE();
+ } else if (fun->AsProperty() != NULL) {
+ // Call to an object property.
+ Property* prop = fun->AsProperty();
+ Literal* key = prop->key()->AsLiteral();
+ if (key != NULL && key->handle()->IsSymbol()) {
+ // Call to a named property, use call IC.
+ __ Push(key->handle());
+ Visit(prop->obj());
+ EmitCallWithIC(expr, RelocInfo::CODE_TARGET);
+ } else {
+ // Call to a keyed property, use keyed load IC followed by function
+ // call.
+ Visit(prop->obj());
+ Visit(prop->key());
+ // Record source code position for IC call.
+ SetSourcePosition(prop->position());
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+ __ call(ic, RelocInfo::CODE_TARGET);
+ // By emitting a nop we make sure that we do not have a "test rax,..."
+ // instruction after the call it is treated specially by the LoadIC code.
+ __ nop();
+ // Drop key left on the stack by IC.
+ __ addq(rsp, Immediate(kPointerSize));
+ // Pop receiver.
+ __ pop(rbx);
+ // Push result (function).
+ __ push(rax);
+ // Push receiver object on stack.
+ if (prop->is_synthetic()) {
+ __ push(CodeGenerator::GlobalObject());
+ } else {
+ __ push(rbx);
+ }
+ EmitCallWithStub(expr);
+ }
+ } else {
+ // Call to some other expression. If the expression is an anonymous
+ // function literal not called in a loop, mark it as one that should
+ // also use the fast code generator.
+ FunctionLiteral* lit = fun->AsFunctionLiteral();
+ if (lit != NULL &&
+ lit->name()->Equals(Heap::empty_string()) &&
+ loop_depth() == 0) {
+ lit->set_try_fast_codegen(true);
+ }
+ Visit(fun);
+ // Load global receiver object.
+ __ movq(rbx, CodeGenerator::GlobalObject());
+ __ push(FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset));
+ // Emit function call.
+ EmitCallWithStub(expr);
+ }
+}
+
+
+void FastCodeGenerator::VisitCallNew(CallNew* expr) {
+ Comment cmnt(masm_, "[ CallNew");
+ // According to ECMA-262, section 11.2.2, page 44, the function
+ // expression in new calls must be evaluated before the
+ // arguments.
+ // Push function on the stack.
+ Visit(expr->expression());
+ ASSERT_EQ(Expression::kValue, expr->expression()->context());
+ // If location is value, already on the stack,
+
+ // Push global object (receiver).
+ __ push(CodeGenerator::GlobalObject());
+
+ // Push the arguments ("left-to-right") on the stack.
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ Visit(args->at(i));
+ ASSERT_EQ(Expression::kValue, args->at(i)->context());
+ // If location is value, it is already on the stack,
+ // so nothing to do here.
+ }
+
+ // Call the construct call builtin that handles allocation and
+ // constructor invocation.
+ SetSourcePosition(expr->position());
+
+ // Load function, arg_count into rdi and rax.
+ __ Set(rax, arg_count);
+ // Function is in rsp[arg_count + 1].
+ __ movq(rdi, Operand(rsp, rax, times_pointer_size, kPointerSize));
+
+ Handle<Code> construct_builtin(Builtins::builtin(Builtins::JSConstructCall));
+ __ Call(construct_builtin, RelocInfo::CONSTRUCT_CALL);
+
+ // Replace function on TOS with result in rax, or pop it.
+ DropAndMove(expr->context(), rax);
}
void FastCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
Comment cmnt(masm_, "[ CallRuntime");
ZoneList<Expression*>* args = expr->arguments();
- Runtime::Function* function = expr->function();
- ASSERT(function != NULL);
+ if (expr->is_jsruntime()) {
+ // Prepare for calling JS runtime function.
+ __ Push(expr->name());
+ __ movq(rax, CodeGenerator::GlobalObject());
+ __ push(FieldOperand(rax, GlobalObject::kBuiltinsOffset));
+ }
// Push the arguments ("left-to-right").
int arg_count = args->length();
for (int i = 0; i < arg_count; i++) {
Visit(args->at(i));
- ASSERT(!args->at(i)->location().is_nowhere());
- if (args->at(i)->location().is_constant()) {
- ASSERT(args->at(i)->AsLiteral() != NULL);
- __ Push(args->at(i)->AsLiteral()->handle());
- } else {
- ASSERT(args->at(i)->location().is_temporary());
- // If location is temporary, it is already on the stack,
- // so nothing to do here.
- }
+ ASSERT_EQ(Expression::kValue, args->at(i)->context());
}
- __ CallRuntime(function, arg_count);
- if (expr->location().is_temporary()) {
- __ push(rax);
+ if (expr->is_jsruntime()) {
+ // Call the JS runtime function.
+ Handle<Code> ic = CodeGenerator::ComputeCallInitialize(arg_count,
+ NOT_IN_LOOP);
+ __ call(ic, RelocInfo::CODE_TARGET);
+ // Restore context register.
+ __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ // Discard the function left on TOS.
+ DropAndMove(expr->context(), rax);
} else {
- ASSERT(expr->location().is_nowhere());
+ __ CallRuntime(expr->function(), arg_count);
+ Move(expr->context(), rax);
+ }
+}
+
+void FastCodeGenerator::VisitCountOperation(CountOperation* expr) {
+ Comment cmnt(masm_, "[ CountOperation");
+ VariableProxy* proxy = expr->expression()->AsVariableProxy();
+ ASSERT(proxy->AsVariable() != NULL);
+ ASSERT(proxy->AsVariable()->is_global());
+
+ Visit(proxy);
+ __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
+
+ switch (expr->context()) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ case Expression::kValue: // Fall through
+ case Expression::kTest: // Fall through
+ case Expression::kTestValue: // Fall through
+ case Expression::kValueTest:
+ // Duplicate the result on the stack.
+ __ push(rax);
+ break;
+ case Expression::kEffect:
+ // Do not save result.
+ break;
+ }
+ // Call runtime for +1/-1.
+ __ push(rax);
+ __ Push(Smi::FromInt(1));
+ if (expr->op() == Token::INC) {
+ __ CallRuntime(Runtime::kNumberAdd, 2);
+ } else {
+ __ CallRuntime(Runtime::kNumberSub, 2);
+ }
+ // Call Store IC.
+ __ Move(rcx, proxy->AsVariable()->name());
+ __ push(CodeGenerator::GlobalObject());
+ Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
+ __ call(ic, RelocInfo::CODE_TARGET);
+ // Restore up stack after store IC
+ __ addq(rsp, Immediate(kPointerSize));
+
+ switch (expr->context()) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ case Expression::kEffect: // Fall through
+ case Expression::kValue:
+ // Do nothing. Result in either on the stack for value context
+ // or discarded for effect context.
+ break;
+ case Expression::kTest:
+ __ pop(rax);
+ TestAndBranch(rax, true_label_, false_label_);
+ break;
+ case Expression::kValueTest: {
+ Label discard;
+ __ movq(rax, Operand(rsp, 0));
+ TestAndBranch(rax, true_label_, &discard);
+ __ bind(&discard);
+ __ addq(rsp, Immediate(kPointerSize));
+ __ jmp(false_label_);
+ break;
+ }
+ case Expression::kTestValue: {
+ Label discard;
+ __ movq(rax, Operand(rsp, 0));
+ TestAndBranch(rax, &discard, false_label_);
+ __ bind(&discard);
+ __ addq(rsp, Immediate(kPointerSize));
+ __ jmp(true_label_);
+ break;
+ }
+ }
+}
+
+
+void FastCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
+ switch (expr->op()) {
+ case Token::VOID: {
+ Comment cmnt(masm_, "[ UnaryOperation (VOID)");
+ Visit(expr->expression());
+ ASSERT_EQ(Expression::kEffect, expr->expression()->context());
+ switch (expr->context()) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ break;
+ case Expression::kEffect:
+ break;
+ case Expression::kValue:
+ __ PushRoot(Heap::kUndefinedValueRootIndex);
+ break;
+ case Expression::kTestValue:
+ // Value is false so it's needed.
+ __ PushRoot(Heap::kUndefinedValueRootIndex);
+ // Fall through.
+ case Expression::kTest: // Fall through.
+ case Expression::kValueTest:
+ __ jmp(false_label_);
+ break;
+ }
+ break;
+ }
+
+ case Token::NOT: {
+ Comment cmnt(masm_, "[ UnaryOperation (NOT)");
+ ASSERT_EQ(Expression::kTest, expr->expression()->context());
+
+ Label push_true;
+ Label push_false;
+ Label done;
+ Label* saved_true = true_label_;
+ Label* saved_false = false_label_;
+ switch (expr->context()) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ break;
+
+ case Expression::kValue:
+ true_label_ = &push_false;
+ false_label_ = &push_true;
+ Visit(expr->expression());
+ __ bind(&push_true);
+ __ PushRoot(Heap::kTrueValueRootIndex);
+ __ jmp(&done);
+ __ bind(&push_false);
+ __ PushRoot(Heap::kFalseValueRootIndex);
+ __ bind(&done);
+ break;
+
+ case Expression::kEffect:
+ true_label_ = &done;
+ false_label_ = &done;
+ Visit(expr->expression());
+ __ bind(&done);
+ break;
+
+ case Expression::kTest:
+ true_label_ = saved_false;
+ false_label_ = saved_true;
+ Visit(expr->expression());
+ break;
+
+ case Expression::kValueTest:
+ true_label_ = saved_false;
+ false_label_ = &push_true;
+ Visit(expr->expression());
+ __ bind(&push_true);
+ __ PushRoot(Heap::kTrueValueRootIndex);
+ __ jmp(saved_true);
+ break;
+
+ case Expression::kTestValue:
+ true_label_ = &push_false;
+ false_label_ = saved_true;
+ Visit(expr->expression());
+ __ bind(&push_false);
+ __ PushRoot(Heap::kFalseValueRootIndex);
+ __ jmp(saved_false);
+ break;
+ }
+ true_label_ = saved_true;
+ false_label_ = saved_false;
+ break;
+ }
+
+ case Token::TYPEOF: {
+ Comment cmnt(masm_, "[ UnaryOperation (TYPEOF)");
+ ASSERT_EQ(Expression::kValue, expr->expression()->context());
+
+ VariableProxy* proxy = expr->expression()->AsVariableProxy();
+ if (proxy != NULL &&
+ !proxy->var()->is_this() &&
+ proxy->var()->is_global()) {
+ Comment cmnt(masm_, "Global variable");
+ __ push(CodeGenerator::GlobalObject());
+ __ Move(rcx, proxy->name());
+ Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+ // Use a regular load, not a contextual load, to avoid a reference
+ // error.
+ __ Call(ic, RelocInfo::CODE_TARGET);
+ __ movq(Operand(rsp, 0), rax);
+ } else if (proxy != NULL &&
+ proxy->var()->slot() != NULL &&
+ proxy->var()->slot()->type() == Slot::LOOKUP) {
+ __ push(rsi);
+ __ Push(proxy->name());
+ __ CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
+ __ push(rax);
+ } else {
+ // This expression cannot throw a reference error at the top level.
+ Visit(expr->expression());
+ }
+
+ __ CallRuntime(Runtime::kTypeof, 1);
+ Move(expr->context(), rax);
+ break;
+ }
+
+ default:
+ UNREACHABLE();
}
}
void FastCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) {
- // Compile a short-circuited boolean or operation in a non-test
- // context.
- ASSERT(expr->op() == Token::OR);
- // Compile (e0 || e1) as if it were
- // (let (temp = e0) temp ? temp : e1).
+ Comment cmnt(masm_, "[ BinaryOperation");
+ switch (expr->op()) {
+ case Token::COMMA:
+ ASSERT_EQ(Expression::kEffect, expr->left()->context());
+ ASSERT_EQ(expr->context(), expr->right()->context());
+ Visit(expr->left());
+ Visit(expr->right());
+ break;
- Label eval_right, done;
- Location destination = expr->location();
- ASSERT(!destination.is_constant());
+ case Token::OR:
+ case Token::AND:
+ EmitLogicalOperation(expr);
+ break;
- Expression* left = expr->left();
- Location left_source = left->location();
- ASSERT(!left_source.is_nowhere());
+ case Token::ADD:
+ case Token::SUB:
+ case Token::DIV:
+ case Token::MOD:
+ case Token::MUL:
+ case Token::BIT_OR:
+ case Token::BIT_AND:
+ case Token::BIT_XOR:
+ case Token::SHL:
+ case Token::SHR:
+ case Token::SAR: {
+ ASSERT_EQ(Expression::kValue, expr->left()->context());
+ ASSERT_EQ(Expression::kValue, expr->right()->context());
- Expression* right = expr->right();
- Location right_source = right->location();
- ASSERT(!right_source.is_nowhere());
+ Visit(expr->left());
+ Visit(expr->right());
+ GenericBinaryOpStub stub(expr->op(),
+ NO_OVERWRITE,
+ NO_GENERIC_BINARY_FLAGS);
+ __ CallStub(&stub);
+ Move(expr->context(), rax);
- Visit(left);
- // Use the shared ToBoolean stub to find the boolean value of the
- // left-hand subexpression. Load the value into rax to perform some
- // inlined checks assumed by the stub.
- if (left_source.is_temporary()) {
- if (destination.is_temporary()) {
- // Copy the left-hand value into rax because we may need it as the
- // final result.
- __ movq(rax, Operand(rsp, 0));
- } else {
- // Pop the left-hand value into rax because we will not need it as the
- // final result.
- __ pop(rax);
+ break;
}
- } else {
- // Load the left-hand value into rax. Put it on the stack if we may
- // need it.
- ASSERT(left->AsLiteral() != NULL);
- __ Move(rax, left->AsLiteral()->handle());
- if (destination.is_temporary()) __ push(rax);
+ default:
+ UNREACHABLE();
}
- // The left-hand value is in rax. It is also on the stack iff the
- // destination location is temporary.
-
- // Perform fast checks assumed by the stub.
- // The undefined value is false.
- __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
- __ j(equal, &eval_right);
- __ CompareRoot(rax, Heap::kTrueValueRootIndex); // True is true.
- __ j(equal, &done);
- __ CompareRoot(rax, Heap::kFalseValueRootIndex); // False is false.
- __ j(equal, &eval_right);
- ASSERT(kSmiTag == 0);
- __ SmiCompare(rax, Smi::FromInt(0)); // The smi zero is false.
- __ j(equal, &eval_right);
- Condition is_smi = masm_->CheckSmi(rax); // All other smis are true.
- __ j(is_smi, &done);
-
- // Call the stub for all other cases.
- __ push(rax);
- ToBooleanStub stub;
- __ CallStub(&stub);
- __ testq(rax, rax); // The stub returns nonzero for true.
- __ j(not_zero, &done);
-
- __ bind(&eval_right);
- // Discard the left-hand value if present on the stack.
- if (destination.is_temporary()) {
- __ addq(rsp, Immediate(kPointerSize));
- }
- Visit(right);
-
- // Save or discard the right-hand value as needed.
- if (destination.is_temporary() && right_source.is_constant()) {
- ASSERT(right->AsLiteral() != NULL);
- __ Push(right->AsLiteral()->handle());
- } else if (destination.is_nowhere() && right_source.is_temporary()) {
- __ addq(rsp, Immediate(kPointerSize));
- }
-
- __ bind(&done);
}
+void FastCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
+ Comment cmnt(masm_, "[ CompareOperation");
+ ASSERT_EQ(Expression::kValue, expr->left()->context());
+ ASSERT_EQ(Expression::kValue, expr->right()->context());
+ Visit(expr->left());
+ Visit(expr->right());
+
+ // Convert current context to test context: Pre-test code.
+ Label push_true;
+ Label push_false;
+ Label done;
+ Label* saved_true = true_label_;
+ Label* saved_false = false_label_;
+ switch (expr->context()) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ break;
+
+ case Expression::kValue:
+ true_label_ = &push_true;
+ false_label_ = &push_false;
+ break;
+
+ case Expression::kEffect:
+ true_label_ = &done;
+ false_label_ = &done;
+ break;
+
+ case Expression::kTest:
+ break;
+
+ case Expression::kValueTest:
+ true_label_ = &push_true;
+ break;
+
+ case Expression::kTestValue:
+ false_label_ = &push_false;
+ break;
+ }
+ // Convert current context to test context: End pre-test code.
+
+ switch (expr->op()) {
+ case Token::IN: {
+ __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION);
+ __ CompareRoot(rax, Heap::kTrueValueRootIndex);
+ __ j(equal, true_label_);
+ __ jmp(false_label_);
+ break;
+ }
+
+ case Token::INSTANCEOF: {
+ InstanceofStub stub;
+ __ CallStub(&stub);
+ __ testq(rax, rax);
+ __ j(zero, true_label_); // The stub returns 0 for true.
+ __ jmp(false_label_);
+ break;
+ }
+
+ default: {
+ Condition cc = no_condition;
+ bool strict = false;
+ switch (expr->op()) {
+ case Token::EQ_STRICT:
+ strict = true;
+ // Fall through
+ case Token::EQ:
+ cc = equal;
+ __ pop(rax);
+ __ pop(rdx);
+ break;
+ case Token::LT:
+ cc = less;
+ __ pop(rax);
+ __ pop(rdx);
+ break;
+ case Token::GT:
+ // Reverse left and right sizes to obtain ECMA-262 conversion order.
+ cc = less;
+ __ pop(rdx);
+ __ pop(rax);
+ break;
+ case Token::LTE:
+ // Reverse left and right sizes to obtain ECMA-262 conversion order.
+ cc = greater_equal;
+ __ pop(rdx);
+ __ pop(rax);
+ break;
+ case Token::GTE:
+ cc = greater_equal;
+ __ pop(rax);
+ __ pop(rdx);
+ break;
+ case Token::IN:
+ case Token::INSTANCEOF:
+ default:
+ UNREACHABLE();
+ }
+
+ // The comparison stub expects the smi vs. smi case to be handled
+ // before it is called.
+ Label slow_case;
+ __ JumpIfNotBothSmi(rax, rdx, &slow_case);
+ __ SmiCompare(rdx, rax);
+ __ j(cc, true_label_);
+ __ jmp(false_label_);
+
+ __ bind(&slow_case);
+ CompareStub stub(cc, strict);
+ __ CallStub(&stub);
+ __ testq(rax, rax);
+ __ j(cc, true_label_);
+ __ jmp(false_label_);
+ }
+ }
+
+ // Convert current context to test context: Post-test code.
+ switch (expr->context()) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ break;
+
+ case Expression::kValue:
+ __ bind(&push_true);
+ __ PushRoot(Heap::kTrueValueRootIndex);
+ __ jmp(&done);
+ __ bind(&push_false);
+ __ PushRoot(Heap::kFalseValueRootIndex);
+ __ bind(&done);
+ break;
+
+ case Expression::kEffect:
+ __ bind(&done);
+ break;
+
+ case Expression::kTest:
+ break;
+
+ case Expression::kValueTest:
+ __ bind(&push_true);
+ __ PushRoot(Heap::kTrueValueRootIndex);
+ __ jmp(saved_true);
+ break;
+
+ case Expression::kTestValue:
+ __ bind(&push_false);
+ __ PushRoot(Heap::kFalseValueRootIndex);
+ __ jmp(saved_false);
+ break;
+ }
+ true_label_ = saved_true;
+ false_label_ = saved_false;
+ // Convert current context to test context: End post-test code.
+}
+
+
+void FastCodeGenerator::VisitThisFunction(ThisFunction* expr) {
+ __ movq(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ Move(expr->context(), rax);
+}
+
+
+#undef __
+
+
} } // namespace v8::internal
diff --git a/src/x64/frames-x64.cc b/src/x64/frames-x64.cc
index fe224ad..6a0527c 100644
--- a/src/x64/frames-x64.cc
+++ b/src/x64/frames-x64.cc
@@ -57,11 +57,7 @@
state->sp = sp;
state->pc_address = reinterpret_cast<Address*>(sp - 1 * kPointerSize);
// Determine frame type.
- if (Memory::Address_at(fp + ExitFrameConstants::kDebugMarkOffset) != 0) {
- return EXIT_DEBUG;
- } else {
- return EXIT;
- }
+ return EXIT;
}
int JavaScriptFrame::GetProvidedParametersCount() const {
@@ -69,10 +65,10 @@
}
-void ExitFrame::Iterate(ObjectVisitor* a) const {
- // Exit frames on X64 do not contain any pointers. The arguments
- // are traversed as part of the expression stack of the calling
- // frame.
+void ExitFrame::Iterate(ObjectVisitor* v) const {
+ v->VisitPointer(&code_slot());
+ // The arguments are traversed as part of the expression stack of
+ // the calling frame.
}
byte* InternalFrame::GetCallerStackPointer() const {
diff --git a/src/x64/frames-x64.h b/src/x64/frames-x64.h
index eefaa0a..a92b248 100644
--- a/src/x64/frames-x64.h
+++ b/src/x64/frames-x64.h
@@ -63,7 +63,7 @@
class ExitFrameConstants : public AllStatic {
public:
- static const int kDebugMarkOffset = -2 * kPointerSize;
+ static const int kCodeOffset = -2 * kPointerSize;
static const int kSPOffset = -1 * kPointerSize;
static const int kCallerFPOffset = +0 * kPointerSize;
diff --git a/src/x64/ic-x64.cc b/src/x64/ic-x64.cc
index 2812df1..ccbc615 100644
--- a/src/x64/ic-x64.cc
+++ b/src/x64/ic-x64.cc
@@ -31,6 +31,7 @@
#include "ic-inl.h"
#include "runtime.h"
#include "stub-cache.h"
+#include "utils.h"
namespace v8 {
namespace internal {
@@ -107,7 +108,7 @@
StringDictionary::kElementsStartIndex * kPointerSize;
for (int i = 0; i < kProbes; i++) {
// Compute the masked index: (hash + i + i * i) & mask.
- __ movl(r1, FieldOperand(name, String::kLengthOffset));
+ __ movl(r1, FieldOperand(name, String::kHashFieldOffset));
__ shrl(r1, Immediate(String::kHashShift));
if (i > 0) {
__ addl(r1, Immediate(StringDictionary::GetProbeOffset(i)));
@@ -239,18 +240,6 @@
}
-#ifdef DEBUG
-// For use in assert below.
-static int TenToThe(int exponent) {
- ASSERT(exponent <= 9);
- ASSERT(exponent >= 1);
- int answer = 10;
- for (int i = 1; i < exponent; i++) answer *= 10;
- return answer;
-}
-#endif
-
-
void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rsp[0] : return address
@@ -313,7 +302,7 @@
__ cmpl(rax, FieldOperand(rcx, PixelArray::kLengthOffset));
__ j(above_equal, &slow);
__ movq(rcx, FieldOperand(rcx, PixelArray::kExternalPointerOffset));
- __ movb(rax, Operand(rcx, rax, times_1, 0));
+ __ movzxbq(rax, Operand(rcx, rax, times_1, 0));
__ Integer32ToSmi(rax, rax);
__ ret(0);
@@ -327,7 +316,7 @@
__ CmpObjectType(rax, FIRST_NONSTRING_TYPE, rdx);
__ j(above_equal, &slow);
// Is the string an array index, with cached numeric value?
- __ movl(rbx, FieldOperand(rax, String::kLengthOffset));
+ __ movl(rbx, FieldOperand(rax, String::kHashFieldOffset));
__ testl(rbx, Immediate(String::kIsArrayIndexMask));
// If the string is a symbol, do a quick inline probe of the receiver's
@@ -342,20 +331,16 @@
__ movq(rax, rcx);
__ IncrementCounter(&Counters::keyed_load_generic_symbol, 1);
__ ret(0);
- // Array index string: If short enough use cache in length/hash field (rbx).
- // We assert that there are enough bits in an int32_t after the hash shift
- // bits have been subtracted to allow space for the length and the cached
- // array index.
+ // If the hash field contains an array index pick it out. The assert checks
+ // that the constants for the maximum number of digits for an array index
+ // cached in the hash field and the number of bits reserved for it does not
+ // conflict.
ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
- (1 << (String::kShortLengthShift - String::kHashShift)));
+ (1 << String::kArrayIndexValueBits));
__ bind(&index_string);
- const int kLengthFieldLimit =
- (String::kMaxCachedArrayIndexLength + 1) << String::kShortLengthShift;
- __ cmpl(rbx, Immediate(kLengthFieldLimit));
- __ j(above_equal, &slow);
__ movl(rax, rbx);
- __ and_(rax, Immediate((1 << String::kShortLengthShift) - 1));
- __ shrl(rax, Immediate(String::kLongLengthShift));
+ __ and_(rax, Immediate(String::kArrayIndexHashMask));
+ __ shrl(rax, Immediate(String::kHashShift));
__ jmp(&index_int);
}
@@ -393,7 +378,7 @@
// ExternalArray.
// rax: index (as a smi)
// rcx: JSObject
- __ movq(rcx, FieldOperand(rdx, JSObject::kElementsOffset));
+ __ movq(rcx, FieldOperand(rcx, JSObject::kElementsOffset));
__ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset),
Heap::RootIndexForExternalArrayType(array_type));
__ j(not_equal, &slow);
@@ -413,7 +398,7 @@
__ movsxbq(rax, Operand(rcx, rax, times_1, 0));
break;
case kExternalUnsignedByteArray:
- __ movb(rax, Operand(rcx, rax, times_1, 0));
+ __ movzxbq(rax, Operand(rcx, rax, times_1, 0));
break;
case kExternalShortArray:
__ movsxwq(rax, Operand(rcx, rax, times_2, 0));
@@ -790,6 +775,8 @@
// top of FPU stack: value
if (array_type == kExternalFloatArray) {
__ fstp_s(Operand(rcx, rbx, times_4, 0));
+ __ movq(rax, rdx); // Return the original value.
+ __ ret(0);
} else {
// Need to perform float-to-int conversion.
// Test the top of the FP stack for NaN.
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc
index b2f69bb..7115791 100644
--- a/src/x64/macro-assembler-x64.cc
+++ b/src/x64/macro-assembler-x64.cc
@@ -67,6 +67,12 @@
}
+void MacroAssembler::StackLimitCheck(Label* on_stack_overflow) {
+ CompareRoot(rsp, Heap::kStackLimitRootIndex);
+ j(below, on_stack_overflow);
+}
+
+
static void RecordWriteHelper(MacroAssembler* masm,
Register object,
Register addr,
@@ -282,15 +288,19 @@
RecordComment(msg);
}
#endif
+ // Disable stub call restrictions to always allow calls to abort.
+ set_allow_stub_calls(true);
+
push(rax);
movq(kScratchRegister, p0, RelocInfo::NONE);
push(kScratchRegister);
movq(kScratchRegister,
- reinterpret_cast<intptr_t>(Smi::FromInt(p1 - p0)),
+ reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(p1 - p0))),
RelocInfo::NONE);
push(kScratchRegister);
CallRuntime(Runtime::kAbort, 2);
// will not return here
+ int3();
}
@@ -402,9 +412,9 @@
if (x == 0) {
xor_(dst, dst);
} else if (is_int32(x)) {
- movq(dst, Immediate(x));
+ movq(dst, Immediate(static_cast<int32_t>(x)));
} else if (is_uint32(x)) {
- movl(dst, Immediate(x));
+ movl(dst, Immediate(static_cast<uint32_t>(x)));
} else {
movq(dst, x, RelocInfo::NONE);
}
@@ -416,9 +426,9 @@
xor_(kScratchRegister, kScratchRegister);
movq(dst, kScratchRegister);
} else if (is_int32(x)) {
- movq(dst, Immediate(x));
+ movq(dst, Immediate(static_cast<int32_t>(x)));
} else if (is_uint32(x)) {
- movl(dst, Immediate(x));
+ movl(dst, Immediate(static_cast<uint32_t>(x)));
} else {
movq(kScratchRegister, x, RelocInfo::NONE);
movq(dst, kScratchRegister);
@@ -1078,7 +1088,7 @@
SmiToInteger32(rcx, src2);
// Shift amount specified by lower 5 bits, not six as the shl opcode.
and_(rcx, Immediate(0x1f));
- shl(dst);
+ shl_cl(dst);
}
@@ -1099,7 +1109,7 @@
}
SmiToInteger32(rcx, src2);
orl(rcx, Immediate(kSmiShift));
- shr(dst); // Shift is rcx modulo 0x1f + 32.
+ shr_cl(dst); // Shift is rcx modulo 0x1f + 32.
shl(dst, Immediate(kSmiShift));
testq(dst, dst);
if (src1.is(rcx) || src2.is(rcx)) {
@@ -1135,7 +1145,7 @@
}
SmiToInteger32(rcx, src2);
orl(rcx, Immediate(kSmiShift));
- sar(dst); // Shift 32 + original rcx & 0x1f.
+ sar_cl(dst); // Shift 32 + original rcx & 0x1f.
shl(dst, Immediate(kSmiShift));
if (src1.is(rcx)) {
movq(src1, kScratchRegister);
@@ -1787,9 +1797,7 @@
}
-void MacroAssembler::EnterExitFrame(StackFrame::Type type, int result_size) {
- ASSERT(type == StackFrame::EXIT || type == StackFrame::EXIT_DEBUG);
-
+void MacroAssembler::EnterExitFrame(ExitFrame::Mode mode, int result_size) {
// Setup the frame structure on the stack.
// All constants are relative to the frame pointer of the exit frame.
ASSERT(ExitFrameConstants::kCallerSPDisplacement == +2 * kPointerSize);
@@ -1801,7 +1809,12 @@
// Reserve room for entry stack pointer and push the debug marker.
ASSERT(ExitFrameConstants::kSPOffset == -1 * kPointerSize);
push(Immediate(0)); // saved entry sp, patched before call
- push(Immediate(type == StackFrame::EXIT_DEBUG ? 1 : 0));
+ if (mode == ExitFrame::MODE_DEBUG) {
+ push(Immediate(0));
+ } else {
+ movq(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
+ push(kScratchRegister);
+ }
// Save the frame pointer and the context in top.
ExternalReference c_entry_fp_address(Top::k_c_entry_fp_address);
@@ -1821,7 +1834,7 @@
#ifdef ENABLE_DEBUGGER_SUPPORT
// Save the state of all registers to the stack from the memory
// location. This is needed to allow nested break points.
- if (type == StackFrame::EXIT_DEBUG) {
+ if (mode == ExitFrame::MODE_DEBUG) {
// TODO(1243899): This should be symmetric to
// CopyRegistersFromStackToMemory() but it isn't! esp is assumed
// correct here, but computed for the other call. Very error
@@ -1860,17 +1873,17 @@
}
-void MacroAssembler::LeaveExitFrame(StackFrame::Type type, int result_size) {
+void MacroAssembler::LeaveExitFrame(ExitFrame::Mode mode, int result_size) {
// Registers:
// r15 : argv
#ifdef ENABLE_DEBUGGER_SUPPORT
// Restore the memory copy of the registers by digging them out from
// the stack. This is needed to allow nested break points.
- if (type == StackFrame::EXIT_DEBUG) {
+ if (mode == ExitFrame::MODE_DEBUG) {
// It's okay to clobber register rbx below because we don't need
// the function pointer after this.
const int kCallerSavedSize = kNumJSCallerSaved * kPointerSize;
- int kOffset = ExitFrameConstants::kDebugMarkOffset - kCallerSavedSize;
+ int kOffset = ExitFrameConstants::kCodeOffset - kCallerSavedSize;
lea(rbx, Operand(rbp, kOffset));
CopyRegistersFromStackToMemory(rbx, rcx, kJSCallerSaved);
}
@@ -2085,6 +2098,11 @@
void MacroAssembler::UpdateAllocationTopHelper(Register result_end,
Register scratch) {
+ if (FLAG_debug_code) {
+ testq(result_end, Immediate(kObjectAlignmentMask));
+ Check(zero, "Unaligned allocation in new space");
+ }
+
ExternalReference new_space_allocation_top =
ExternalReference::new_space_allocation_top_address();
@@ -2226,6 +2244,25 @@
}
+void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
+ if (context_chain_length > 0) {
+ // Move up the chain of contexts to the context containing the slot.
+ movq(dst, Operand(rsi, Context::SlotOffset(Context::CLOSURE_INDEX)));
+ // Load the function context (which is the incoming, outer context).
+ movq(rax, FieldOperand(rax, JSFunction::kContextOffset));
+ for (int i = 1; i < context_chain_length; i++) {
+ movq(dst, Operand(dst, Context::SlotOffset(Context::CLOSURE_INDEX)));
+ movq(dst, FieldOperand(dst, JSFunction::kContextOffset));
+ }
+ // The context may be an intermediate context, not a function context.
+ movq(dst, Operand(dst, Context::SlotOffset(Context::FCONTEXT_INDEX)));
+ } else { // context is the current function context.
+ // The context may be an intermediate context, not a function context.
+ movq(dst, Operand(rsi, Context::SlotOffset(Context::FCONTEXT_INDEX)));
+ }
+}
+
+
CodePatcher::CodePatcher(byte* address, int size)
: address_(address), size_(size), masm_(address, size + Assembler::kGap) {
// Create a new macro assembler pointing to the address of the code to patch.
diff --git a/src/x64/macro-assembler-x64.h b/src/x64/macro-assembler-x64.h
index 4c2f35b..9e7c25c 100644
--- a/src/x64/macro-assembler-x64.h
+++ b/src/x64/macro-assembler-x64.h
@@ -98,6 +98,12 @@
#endif
// ---------------------------------------------------------------------------
+ // Stack limit support
+
+ // Do simple test for stack overflow. This doesn't handle an overflow.
+ void StackLimitCheck(Label* on_stack_limit_hit);
+
+ // ---------------------------------------------------------------------------
// Activation frames
void EnterInternalFrame() { EnterFrame(StackFrame::INTERNAL); }
@@ -106,16 +112,16 @@
void EnterConstructFrame() { EnterFrame(StackFrame::CONSTRUCT); }
void LeaveConstructFrame() { LeaveFrame(StackFrame::CONSTRUCT); }
- // Enter specific kind of exit frame; either EXIT or
- // EXIT_DEBUG. Expects the number of arguments in register rax and
+ // Enter specific kind of exit frame; either in normal or
+ // debug mode. Expects the number of arguments in register rax and
// sets up the number of arguments in register rdi and the pointer
// to the first argument in register rsi.
- void EnterExitFrame(StackFrame::Type type, int result_size = 1);
+ void EnterExitFrame(ExitFrame::Mode mode, int result_size = 1);
// Leave the current exit frame. Expects/provides the return value in
// register rax:rdx (untouched) and the pointer to the first
// argument in register rsi.
- void LeaveExitFrame(StackFrame::Type type, int result_size = 1);
+ void LeaveExitFrame(ExitFrame::Mode mode, int result_size = 1);
// ---------------------------------------------------------------------------
@@ -542,6 +548,9 @@
// occurred.
void IllegalOperation(int num_arguments);
+ // Find the function context up the context chain.
+ void LoadContext(Register dst, int context_chain_length);
+
// ---------------------------------------------------------------------------
// Runtime calls
diff --git a/src/x64/regexp-macro-assembler-x64.cc b/src/x64/regexp-macro-assembler-x64.cc
index 5d17a2d..639f5e9 100644
--- a/src/x64/regexp-macro-assembler-x64.cc
+++ b/src/x64/regexp-macro-assembler-x64.cc
@@ -643,10 +643,10 @@
Label stack_limit_hit;
Label stack_ok;
- ExternalReference stack_guard_limit =
- ExternalReference::address_of_stack_guard_limit();
+ ExternalReference stack_limit =
+ ExternalReference::address_of_stack_limit();
__ movq(rcx, rsp);
- __ movq(kScratchRegister, stack_guard_limit);
+ __ movq(kScratchRegister, stack_limit);
__ subq(rcx, Operand(kScratchRegister, 0));
// Handle it if the stack pointer is already below the stack limit.
__ j(below_equal, &stack_limit_hit);
@@ -1079,7 +1079,7 @@
// If there is a difference, update the object pointer and start and end
// addresses in the RegExp stack frame to match the new value.
const byte* end_address = frame_entry<const byte* >(re_frame, kInputEnd);
- int byte_length = end_address - start_address;
+ int byte_length = static_cast<int>(end_address - start_address);
frame_entry<const String*>(re_frame, kInputString) = *subject;
frame_entry<const byte*>(re_frame, kInputStart) = new_address;
frame_entry<const byte*>(re_frame, kInputEnd) = new_address + byte_length;
@@ -1196,9 +1196,9 @@
void RegExpMacroAssemblerX64::CheckPreemption() {
// Check for preemption.
Label no_preempt;
- ExternalReference stack_guard_limit =
- ExternalReference::address_of_stack_guard_limit();
- __ load_rax(stack_guard_limit);
+ ExternalReference stack_limit =
+ ExternalReference::address_of_stack_limit();
+ __ load_rax(stack_limit);
__ cmpq(rsp, rax);
__ j(above, &no_preempt);
@@ -1209,18 +1209,16 @@
void RegExpMacroAssemblerX64::CheckStackLimit() {
- if (FLAG_check_stack) {
- Label no_stack_overflow;
- ExternalReference stack_limit =
- ExternalReference::address_of_regexp_stack_limit();
- __ load_rax(stack_limit);
- __ cmpq(backtrack_stackpointer(), rax);
- __ j(above, &no_stack_overflow);
+ Label no_stack_overflow;
+ ExternalReference stack_limit =
+ ExternalReference::address_of_regexp_stack_limit();
+ __ load_rax(stack_limit);
+ __ cmpq(backtrack_stackpointer(), rax);
+ __ j(above, &no_stack_overflow);
- SafeCall(&stack_overflow_label_);
+ SafeCall(&stack_overflow_label_);
- __ bind(&no_stack_overflow);
- }
+ __ bind(&no_stack_overflow);
}
@@ -1287,11 +1285,6 @@
}
}
-
-void RegExpCEntryStub::Generate(MacroAssembler* masm_) {
- __ int3(); // Unused on x64.
-}
-
#undef __
#endif // V8_NATIVE_REGEXP
diff --git a/src/x64/simulator-x64.h b/src/x64/simulator-x64.h
index 998c909..c4f3a85 100644
--- a/src/x64/simulator-x64.h
+++ b/src/x64/simulator-x64.h
@@ -44,6 +44,12 @@
static inline uintptr_t JsLimitFromCLimit(uintptr_t c_limit) {
return c_limit;
}
+
+ static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
+ return try_catch_address;
+ }
+
+ static inline void UnregisterCTryCatch() { }
};
// Call the generated regexp code directly. The entry function pointer should
@@ -51,4 +57,7 @@
#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \
entry(p0, p1, p2, p3, p4, p5, p6)
+#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
+ reinterpret_cast<TryCatch*>(try_catch_address)
+
#endif // V8_X64_SIMULATOR_X64_H_
diff --git a/src/x64/stub-cache-x64.cc b/src/x64/stub-cache-x64.cc
index 58a3e0f..55b0b87 100644
--- a/src/x64/stub-cache-x64.cc
+++ b/src/x64/stub-cache-x64.cc
@@ -173,7 +173,7 @@
__ JumpIfSmi(receiver, &miss);
// Get the map of the receiver and compute the hash.
- __ movl(scratch, FieldOperand(name, String::kLengthOffset));
+ __ movl(scratch, FieldOperand(name, String::kHashFieldOffset));
// Use only the low 32 bits of the map pointer.
__ addl(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
__ xor_(scratch, Immediate(flags));
@@ -183,7 +183,7 @@
ProbeTable(masm, flags, kPrimary, name, scratch);
// Primary miss: Compute hash for secondary probe.
- __ movl(scratch, FieldOperand(name, String::kLengthOffset));
+ __ movl(scratch, FieldOperand(name, String::kHashFieldOffset));
__ addl(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
__ xor_(scratch, Immediate(flags));
__ and_(scratch, Immediate((kPrimaryTableSize - 1) << kHeapObjectTagSize));
@@ -323,11 +323,7 @@
// Load length directly from the string.
__ bind(&load_length);
- __ and_(scratch, Immediate(kStringSizeMask));
__ movl(rax, FieldOperand(receiver, String::kLengthOffset));
- // rcx is also the receiver.
- __ lea(rcx, Operand(scratch, String::kLongLengthShift));
- __ shr(rax); // rcx is implicit shift register.
__ Integer32ToSmi(rax, rax);
__ ret(0);
diff --git a/src/x64/virtual-frame-x64.cc b/src/x64/virtual-frame-x64.cc
index 781efd1..fe65d34 100644
--- a/src/x64/virtual-frame-x64.cc
+++ b/src/x64/virtual-frame-x64.cc
@@ -893,16 +893,15 @@
// on the stack.
int start = Min(begin, stack_pointer_ + 1);
- // Emit normal 'push' instructions for elements above stack pointer
- // and use mov instructions if we are below stack pointer.
+ // If positive we have to adjust the stack pointer.
+ int delta = end - stack_pointer_;
+ if (delta > 0) {
+ stack_pointer_ = end;
+ __ subq(rsp, Immediate(delta * kPointerSize));
+ }
+
for (int i = start; i <= end; i++) {
- if (!elements_[i].is_synced()) {
- if (i <= stack_pointer_) {
- SyncElementBelowStackPointer(i);
- } else {
- SyncElementByPushing(i);
- }
- }
+ if (!elements_[i].is_synced()) SyncElementBelowStackPointer(i);
}
}
diff --git a/src/zone.h b/src/zone.h
index 4e4f1d7..0d006dd 100644
--- a/src/zone.h
+++ b/src/zone.h
@@ -118,7 +118,7 @@
class ZoneObject {
public:
// Allocate a new ZoneObject of 'size' bytes in the Zone.
- void* operator new(size_t size) { return Zone::New(size); }
+ void* operator new(size_t size) { return Zone::New(static_cast<int>(size)); }
// Ideally, the delete operator should be private instead of
// public, but unfortunately the compiler sometimes synthesizes