Version 3.22.0
LiveEdit to mark more closure functions for re-instantiation when scope layout changes. (issue 2872)
Made bounds check elimination iterative instead of recursive. (Chromium issue 289706)
Turned on i18n support by default.
Set the proper instance-type on HAllocate in BuildFastLiteral. (Chromium issue 284577)
Performance and stability improvements on all platforms.
git-svn-id: http://v8.googlecode.com/svn/trunk@16891 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/src/api.cc b/src/api.cc
index 71a8f4a..9756874 100644
--- a/src/api.cc
+++ b/src/api.cc
@@ -624,7 +624,8 @@
uintptr_t limit = reinterpret_cast<uintptr_t>(constraints->stack_limit());
isolate->stack_guard()->SetStackLimit(limit);
}
- if (constraints->is_memory_constrained().has_value) {
+ if (constraints->is_memory_constrained().has_value &&
+ !i::FLAG_force_memory_constrained.has_value) {
isolate->set_is_memory_constrained(
constraints->is_memory_constrained().value);
}
@@ -749,29 +750,22 @@
void Context::Enter() {
i::Handle<i::Context> env = Utils::OpenHandle(this);
i::Isolate* isolate = env->GetIsolate();
- if (IsDeadCheck(isolate, "v8::Context::Enter()")) return;
ENTER_V8(isolate);
-
isolate->handle_scope_implementer()->EnterContext(env);
-
isolate->handle_scope_implementer()->SaveContext(isolate->context());
isolate->set_context(*env);
}
void Context::Exit() {
- // Exit is essentially a static function and doesn't use the
- // receiver, so we have to get the current isolate from the thread
- // local.
- i::Isolate* isolate = i::Isolate::Current();
- if (!isolate->IsInitialized()) return;
-
- if (!ApiCheck(isolate->handle_scope_implementer()->LeaveLastContext(),
+ i::Handle<i::Context> context = Utils::OpenHandle(this);
+ i::Isolate* isolate = context->GetIsolate();
+ ENTER_V8(isolate);
+ if (!ApiCheck(isolate->handle_scope_implementer()->LeaveContext(context),
"v8::Context::Exit()",
"Cannot exit non-entered context")) {
return;
}
-
// Content of 'last_context' could be NULL.
i::Context* last_context =
isolate->handle_scope_implementer()->RestoreContext();
@@ -2056,7 +2050,7 @@
i::HandleScope scope(isolate_);
i::Handle<i::JSObject> obj(i::JSObject::cast(raw_obj), isolate_);
i::Handle<i::String> name = isolate_->factory()->stack_string();
- if (!obj->HasProperty(*name)) return v8::Local<Value>();
+ if (!i::JSReceiver::HasProperty(obj, name)) return v8::Local<Value>();
i::Handle<i::Object> value = i::GetProperty(isolate_, obj, name);
if (value.is_null()) return v8::Local<Value>();
return v8::Utils::ToLocal(scope.CloseAndEscape(value));
@@ -3625,7 +3619,7 @@
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ON_BAILOUT(isolate, "v8::Object::HasProperty()", return false);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- return self->HasElement(index);
+ return i::JSReceiver::HasElement(self, index);
}
@@ -3679,8 +3673,8 @@
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ON_BAILOUT(isolate, "v8::Object::HasOwnProperty()",
return false);
- return Utils::OpenHandle(this)->HasLocalProperty(
- *Utils::OpenHandle(*key));
+ return i::JSReceiver::HasLocalProperty(
+ Utils::OpenHandle(this), Utils::OpenHandle(*key));
}
@@ -3813,7 +3807,7 @@
ENTER_V8(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::JSObject> result = i::Copy(self);
+ i::Handle<i::JSObject> result = i::JSObject::Copy(self);
has_pending_exception = result.is_null();
EXCEPTION_BAILOUT_CHECK(isolate, Local<Object>());
return Utils::ToLocal(result);
@@ -5423,7 +5417,6 @@
v8::ExtensionConfiguration* extensions,
v8::Handle<ObjectTemplate> global_template,
v8::Handle<Value> global_object) {
- i::Isolate::EnsureDefaultIsolate();
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(external_isolate);
EnsureInitializedForIsolate(isolate, "v8::Context::New()");
LOG_API(isolate, "Context::New");
@@ -5494,11 +5487,7 @@
if (!EnsureInitializedForIsolate(isolate, "v8::Context::GetEntered()")) {
return Local<Context>();
}
- i::Handle<i::Object> last =
- isolate->handle_scope_implementer()->LastEnteredContext();
- if (last.is_null()) return Local<Context>();
- i::Handle<i::Context> context = i::Handle<i::Context>::cast(last);
- return Utils::ToLocal(context);
+ return reinterpret_cast<Isolate*>(isolate)->GetEnteredContext();
}
@@ -5516,45 +5505,30 @@
if (IsDeadCheck(isolate, "v8::Context::GetCalling()")) {
return Local<Context>();
}
- i::Handle<i::Object> calling =
- isolate->GetCallingNativeContext();
- if (calling.is_null()) return Local<Context>();
- i::Handle<i::Context> context = i::Handle<i::Context>::cast(calling);
- return Utils::ToLocal(context);
+ return reinterpret_cast<Isolate*>(isolate)->GetCallingContext();
}
v8::Local<v8::Object> Context::Global() {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Context::Global()")) {
- return Local<v8::Object>();
- }
- i::Object** ctx = reinterpret_cast<i::Object**>(this);
- i::Handle<i::Context> context =
- i::Handle<i::Context>::cast(i::Handle<i::Object>(ctx));
+ i::Handle<i::Context> context = Utils::OpenHandle(this);
+ i::Isolate* isolate = context->GetIsolate();
i::Handle<i::Object> global(context->global_proxy(), isolate);
return Utils::ToLocal(i::Handle<i::JSObject>::cast(global));
}
void Context::DetachGlobal() {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Context::DetachGlobal()")) return;
+ i::Handle<i::Context> context = Utils::OpenHandle(this);
+ i::Isolate* isolate = context->GetIsolate();
ENTER_V8(isolate);
- i::Object** ctx = reinterpret_cast<i::Object**>(this);
- i::Handle<i::Context> context =
- i::Handle<i::Context>::cast(i::Handle<i::Object>(ctx));
isolate->bootstrapper()->DetachGlobal(context);
}
void Context::ReattachGlobal(Handle<Object> global_object) {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Context::ReattachGlobal()")) return;
+ i::Handle<i::Context> context = Utils::OpenHandle(this);
+ i::Isolate* isolate = context->GetIsolate();
ENTER_V8(isolate);
- i::Object** ctx = reinterpret_cast<i::Object**>(this);
- i::Handle<i::Context> context =
- i::Handle<i::Context>::cast(i::Handle<i::Object>(ctx));
i::Handle<i::JSGlobalProxy> global_proxy =
i::Handle<i::JSGlobalProxy>::cast(Utils::OpenHandle(*global_object));
isolate->bootstrapper()->ReattachGlobal(context, global_proxy);
@@ -5562,44 +5536,23 @@
void Context::AllowCodeGenerationFromStrings(bool allow) {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Context::AllowCodeGenerationFromStrings()")) {
- return;
- }
+ i::Handle<i::Context> context = Utils::OpenHandle(this);
+ i::Isolate* isolate = context->GetIsolate();
ENTER_V8(isolate);
- i::Object** ctx = reinterpret_cast<i::Object**>(this);
- i::Handle<i::Context> context =
- i::Handle<i::Context>::cast(i::Handle<i::Object>(ctx));
context->set_allow_code_gen_from_strings(
allow ? isolate->heap()->true_value() : isolate->heap()->false_value());
}
bool Context::IsCodeGenerationFromStringsAllowed() {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate,
- "v8::Context::IsCodeGenerationFromStringsAllowed()")) {
- return false;
- }
- ENTER_V8(isolate);
- i::Object** ctx = reinterpret_cast<i::Object**>(this);
- i::Handle<i::Context> context =
- i::Handle<i::Context>::cast(i::Handle<i::Object>(ctx));
+ i::Handle<i::Context> context = Utils::OpenHandle(this);
return !context->allow_code_gen_from_strings()->IsFalse();
}
void Context::SetErrorMessageForCodeGenerationFromStrings(
Handle<String> error) {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate,
- "v8::Context::SetErrorMessageForCodeGenerationFromStrings()")) {
- return;
- }
- ENTER_V8(isolate);
- i::Object** ctx = reinterpret_cast<i::Object**>(this);
- i::Handle<i::Context> context =
- i::Handle<i::Context>::cast(i::Handle<i::Object>(ctx));
+ i::Handle<i::Context> context = Utils::OpenHandle(this);
i::Handle<i::String> error_handle = Utils::OpenHandle(*error);
context->set_error_message_for_code_gen_from_strings(*error_handle);
}
@@ -6212,7 +6165,7 @@
i::Handle<i::JSObject> paragon_handle(i::JSObject::cast(paragon));
EXCEPTION_PREAMBLE(isolate);
ENTER_V8(isolate);
- i::Handle<i::JSObject> result = i::Copy(paragon_handle);
+ i::Handle<i::JSObject> result = i::JSObject::Copy(paragon_handle);
has_pending_exception = result.is_null();
EXCEPTION_BAILOUT_CHECK(isolate, Local<Object>());
return Utils::ToLocal(result);
@@ -6647,9 +6600,15 @@
}
+bool Isolate::InContext() {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ return isolate->context() != NULL;
+}
+
+
v8::Local<v8::Context> Isolate::GetCurrentContext() {
- i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(this);
- i::Context* context = internal_isolate->context();
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ i::Context* context = isolate->context();
if (context == NULL) return Local<Context>();
i::Context* native_context = context->global_object()->native_context();
if (native_context == NULL) return Local<Context>();
@@ -6657,6 +6616,23 @@
}
+v8::Local<v8::Context> Isolate::GetCallingContext() {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ i::Handle<i::Object> calling = isolate->GetCallingNativeContext();
+ if (calling.is_null()) return Local<Context>();
+ return Utils::ToLocal(i::Handle<i::Context>::cast(calling));
+}
+
+
+v8::Local<v8::Context> Isolate::GetEnteredContext() {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ i::Handle<i::Object> last =
+ isolate->handle_scope_implementer()->LastEnteredContext();
+ if (last.is_null()) return Local<Context>();
+ return Utils::ToLocal(i::Handle<i::Context>::cast(last));
+}
+
+
void Isolate::SetObjectGroupId(const Persistent<Value>& object,
UniqueId id) {
i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(this);
@@ -6685,45 +6661,65 @@
}
-void V8::SetGlobalGCPrologueCallback(GCCallback callback) {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::V8::SetGlobalGCPrologueCallback()")) return;
- isolate->heap()->SetGlobalGCPrologueCallback(callback);
+void Isolate::AddGCPrologueCallback(GCPrologueCallback callback,
+ GCType gc_type) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ isolate->heap()->AddGCPrologueCallback(callback, gc_type);
}
-void V8::SetGlobalGCEpilogueCallback(GCCallback callback) {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::V8::SetGlobalGCEpilogueCallback()")) return;
- isolate->heap()->SetGlobalGCEpilogueCallback(callback);
+void Isolate::RemoveGCPrologueCallback(GCPrologueCallback callback) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ isolate->heap()->RemoveGCPrologueCallback(callback);
+}
+
+
+void Isolate::AddGCEpilogueCallback(GCEpilogueCallback callback,
+ GCType gc_type) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ isolate->heap()->AddGCEpilogueCallback(callback, gc_type);
+}
+
+
+void Isolate::RemoveGCEpilogueCallback(GCEpilogueCallback callback) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ isolate->heap()->RemoveGCEpilogueCallback(callback);
}
void V8::AddGCPrologueCallback(GCPrologueCallback callback, GCType gc_type) {
i::Isolate* isolate = i::Isolate::Current();
if (IsDeadCheck(isolate, "v8::V8::AddGCPrologueCallback()")) return;
- isolate->heap()->AddGCPrologueCallback(callback, gc_type);
+ isolate->heap()->AddGCPrologueCallback(
+ reinterpret_cast<v8::Isolate::GCPrologueCallback>(callback),
+ gc_type,
+ false);
}
void V8::RemoveGCPrologueCallback(GCPrologueCallback callback) {
i::Isolate* isolate = i::Isolate::Current();
if (IsDeadCheck(isolate, "v8::V8::RemoveGCPrologueCallback()")) return;
- isolate->heap()->RemoveGCPrologueCallback(callback);
+ isolate->heap()->RemoveGCPrologueCallback(
+ reinterpret_cast<v8::Isolate::GCPrologueCallback>(callback));
}
void V8::AddGCEpilogueCallback(GCEpilogueCallback callback, GCType gc_type) {
i::Isolate* isolate = i::Isolate::Current();
if (IsDeadCheck(isolate, "v8::V8::AddGCEpilogueCallback()")) return;
- isolate->heap()->AddGCEpilogueCallback(callback, gc_type);
+ isolate->heap()->AddGCEpilogueCallback(
+ reinterpret_cast<v8::Isolate::GCEpilogueCallback>(callback),
+ gc_type,
+ false);
}
void V8::RemoveGCEpilogueCallback(GCEpilogueCallback callback) {
i::Isolate* isolate = i::Isolate::Current();
if (IsDeadCheck(isolate, "v8::V8::RemoveGCEpilogueCallback()")) return;
- isolate->heap()->RemoveGCEpilogueCallback(callback);
+ isolate->heap()->RemoveGCEpilogueCallback(
+ reinterpret_cast<v8::Isolate::GCEpilogueCallback>(callback));
}
@@ -6747,7 +6743,6 @@
void V8::AddCallCompletedCallback(CallCompletedCallback callback) {
if (callback == NULL) return;
- i::Isolate::EnsureDefaultIsolate();
i::Isolate* isolate = i::Isolate::Current();
if (IsDeadCheck(isolate, "v8::V8::AddLeaveScriptCallback()")) return;
i::V8::AddCallCompletedCallback(callback);
@@ -6755,7 +6750,6 @@
void V8::RemoveCallCompletedCallback(CallCompletedCallback callback) {
- i::Isolate::EnsureDefaultIsolate();
i::Isolate* isolate = i::Isolate::Current();
if (IsDeadCheck(isolate, "v8::V8::RemoveLeaveScriptCallback()")) return;
i::V8::RemoveCallCompletedCallback(callback);
@@ -7062,6 +7056,16 @@
}
+void Debug::SendCommand(Isolate* isolate,
+ const uint16_t* command,
+ int length,
+ ClientData* client_data) {
+ i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ internal_isolate->debugger()->ProcessCommand(
+ i::Vector<const uint16_t>(command, length), client_data);
+}
+
+
void Debug::SendCommand(const uint16_t* command, int length,
ClientData* client_data,
Isolate* isolate) {
diff --git a/src/api.h b/src/api.h
index 51bc494..7dfa36d 100644
--- a/src/api.h
+++ b/src/api.h
@@ -543,7 +543,7 @@
inline bool CallDepthIsZero() { return call_depth_ == 0; }
inline void EnterContext(Handle<Object> context);
- inline bool LeaveLastContext();
+ inline bool LeaveContext(Handle<Object> context);
// Returns the last entered context or an empty handle if no
// contexts have been entered.
@@ -635,8 +635,10 @@
}
-bool HandleScopeImplementer::LeaveLastContext() {
+bool HandleScopeImplementer::LeaveContext(Handle<Object> context) {
if (entered_contexts_.is_empty()) return false;
+ // TODO(dcarney): figure out what's wrong here
+ // if (*entered_contexts_.last() != *context) return false;
entered_contexts_.RemoveLast();
return true;
}
diff --git a/src/apinatives.js b/src/apinatives.js
index 5fb36c0..6431901 100644
--- a/src/apinatives.js
+++ b/src/apinatives.js
@@ -71,7 +71,6 @@
(serialNumber in cache) && (cache[serialNumber] != kUninitialized);
if (!isFunctionCached) {
try {
- cache[serialNumber] = null;
var fun = %CreateApiFunction(data);
if (name) %FunctionSetName(fun, name);
var flags = %GetTemplateField(data, kApiFlagOffset);
diff --git a/src/arguments.h b/src/arguments.h
index c1db98b..f291816 100644
--- a/src/arguments.h
+++ b/src/arguments.h
@@ -237,6 +237,7 @@
typedef FunctionCallbackInfo<Value> T;
typedef CustomArguments<T> Super;
static const int kArgsLength = T::kArgsLength;
+ static const int kHolderIndex = T::kHolderIndex;
FunctionCallbackArguments(internal::Isolate* isolate,
internal::Object* data,
@@ -253,6 +254,7 @@
values[T::kDataIndex] = data;
values[T::kCalleeIndex] = callee;
values[T::kHolderIndex] = holder;
+ values[T::kContextSaveIndex] = isolate->heap()->the_hole_value();
values[T::kIsolateIndex] = reinterpret_cast<internal::Object*>(isolate);
// Here the hole is set as default value.
// It cannot escape into js as it's remove in Call below.
diff --git a/src/arm/assembler-arm.h b/src/arm/assembler-arm.h
index 866b1c9..1399021 100644
--- a/src/arm/assembler-arm.h
+++ b/src/arm/assembler-arm.h
@@ -89,6 +89,7 @@
static unsigned cache_line_size_;
friend class ExternalReference;
+ friend class PlatformFeatureScope;
DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
};
diff --git a/src/arm/builtins-arm.cc b/src/arm/builtins-arm.cc
index f60e1f8..ec1b227 100644
--- a/src/arm/builtins-arm.cc
+++ b/src/arm/builtins-arm.cc
@@ -193,14 +193,12 @@
Register argument = r2;
Label not_cached, argument_is_string;
- NumberToStringStub::GenerateLookupNumberStringCache(
- masm,
- r0, // Input.
- argument, // Result.
- r3, // Scratch.
- r4, // Scratch.
- r5, // Scratch.
- ¬_cached);
+ __ LookupNumberStringCache(r0, // Input.
+ argument, // Result.
+ r3, // Scratch.
+ r4, // Scratch.
+ r5, // Scratch.
+ ¬_cached);
__ IncrementCounter(counters->string_ctor_cached_number(), 1, r3, r4);
__ bind(&argument_is_string);
@@ -807,12 +805,13 @@
// The following registers must be saved and restored when calling through to
// the runtime:
// r0 - contains return address (beginning of patch sequence)
- // r1 - function object
+ // r1 - isolate
FrameScope scope(masm, StackFrame::MANUAL);
__ stm(db_w, sp, r0.bit() | r1.bit() | fp.bit() | lr.bit());
- __ PrepareCallCFunction(1, 0, r1);
+ __ PrepareCallCFunction(1, 0, r2);
+ __ mov(r1, Operand(ExternalReference::isolate_address(masm->isolate())));
__ CallCFunction(
- ExternalReference::get_make_code_young_function(masm->isolate()), 1);
+ ExternalReference::get_make_code_young_function(masm->isolate()), 2);
__ ldm(ia_w, sp, r0.bit() | r1.bit() | fp.bit() | lr.bit());
__ mov(pc, r0);
}
diff --git a/src/arm/code-stubs-arm.cc b/src/arm/code-stubs-arm.cc
index cd1809f..0335607 100644
--- a/src/arm/code-stubs-arm.cc
+++ b/src/arm/code-stubs-arm.cc
@@ -77,7 +77,7 @@
descriptor->register_param_count_ = 4;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kCreateObjectLiteralShallow)->entry;
+ Runtime::FunctionForId(Runtime::kCreateObjectLiteral)->entry;
}
@@ -972,99 +972,13 @@
}
-void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
- Register object,
- Register result,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* not_found) {
- // Use of registers. Register result is used as a temporary.
- Register number_string_cache = result;
- Register mask = scratch3;
-
- // Load the number string cache.
- __ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
-
- // Make the hash mask from the length of the number string cache. It
- // contains two elements (number and string) for each cache entry.
- __ ldr(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset));
- // Divide length by two (length is a smi).
- __ mov(mask, Operand(mask, ASR, kSmiTagSize + 1));
- __ sub(mask, mask, Operand(1)); // Make mask.
-
- // Calculate the entry in the number string cache. The hash value in the
- // number string cache for smis is just the smi value, and the hash for
- // doubles is the xor of the upper and lower words. See
- // Heap::GetNumberStringCache.
- Isolate* isolate = masm->isolate();
- Label is_smi;
- Label load_result_from_cache;
- __ JumpIfSmi(object, &is_smi);
- __ CheckMap(object,
- scratch1,
- Heap::kHeapNumberMapRootIndex,
- not_found,
- DONT_DO_SMI_CHECK);
-
- STATIC_ASSERT(8 == kDoubleSize);
- __ add(scratch1,
- object,
- Operand(HeapNumber::kValueOffset - kHeapObjectTag));
- __ ldm(ia, scratch1, scratch1.bit() | scratch2.bit());
- __ eor(scratch1, scratch1, Operand(scratch2));
- __ and_(scratch1, scratch1, Operand(mask));
-
- // Calculate address of entry in string cache: each entry consists
- // of two pointer sized fields.
- __ add(scratch1,
- number_string_cache,
- Operand(scratch1, LSL, kPointerSizeLog2 + 1));
-
- Register probe = mask;
- __ ldr(probe,
- FieldMemOperand(scratch1, FixedArray::kHeaderSize));
- __ JumpIfSmi(probe, not_found);
- __ sub(scratch2, object, Operand(kHeapObjectTag));
- __ vldr(d0, scratch2, HeapNumber::kValueOffset);
- __ sub(probe, probe, Operand(kHeapObjectTag));
- __ vldr(d1, probe, HeapNumber::kValueOffset);
- __ VFPCompareAndSetFlags(d0, d1);
- __ b(ne, not_found); // The cache did not contain this value.
- __ b(&load_result_from_cache);
-
- __ bind(&is_smi);
- Register scratch = scratch1;
- __ and_(scratch, mask, Operand(object, ASR, 1));
- // Calculate address of entry in string cache: each entry consists
- // of two pointer sized fields.
- __ add(scratch,
- number_string_cache,
- Operand(scratch, LSL, kPointerSizeLog2 + 1));
-
- // Check if the entry is the smi we are looking for.
- __ ldr(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
- __ cmp(object, probe);
- __ b(ne, not_found);
-
- // Get the result from the cache.
- __ bind(&load_result_from_cache);
- __ ldr(result,
- FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
- __ IncrementCounter(isolate->counters()->number_to_string_native(),
- 1,
- scratch1,
- scratch2);
-}
-
-
void NumberToStringStub::Generate(MacroAssembler* masm) {
Label runtime;
__ ldr(r1, MemOperand(sp, 0));
// Generate code to lookup number in the number string cache.
- GenerateLookupNumberStringCache(masm, r1, r0, r2, r3, r4, &runtime);
+ __ LookupNumberStringCache(r1, r0, r2, r3, r4, &runtime);
__ add(sp, sp, Operand(1 * kPointerSize));
__ Ret();
@@ -2765,9 +2679,10 @@
if (do_gc) {
// Passing r0.
- __ PrepareCallCFunction(1, 0, r1);
+ __ PrepareCallCFunction(2, 0, r1);
+ __ mov(r1, Operand(ExternalReference::isolate_address(masm->isolate())));
__ CallCFunction(ExternalReference::perform_gc_function(isolate),
- 1, 0);
+ 2, 0);
}
ExternalReference scope_depth =
@@ -2841,7 +2756,7 @@
// sp: stack pointer
// fp: frame pointer
// Callee-saved register r4 still holds argc.
- __ LeaveExitFrame(save_doubles_, r4);
+ __ LeaveExitFrame(save_doubles_, r4, true);
__ mov(pc, lr);
// check if we should retry or throw exception
@@ -3375,8 +3290,7 @@
receiver = r0;
}
- StubCompiler::GenerateLoadStringLength(masm, receiver, r3, r4, &miss,
- support_wrapper_);
+ StubCompiler::GenerateLoadStringLength(masm, receiver, r3, r4, &miss);
__ bind(&miss);
StubCompiler::TailCallBuiltin(
@@ -4071,7 +3985,7 @@
DirectCEntryStub stub;
stub.GenerateCall(masm, r7);
- __ LeaveExitFrame(false, no_reg);
+ __ LeaveExitFrame(false, no_reg, true);
// r0: result
// subject: subject string (callee saved)
@@ -4343,6 +4257,7 @@
// Cache the called function in a global property cell. Cache states
// are uninitialized, monomorphic (indicated by a JSFunction), and
// megamorphic.
+ // r0 : number of arguments to the construct function
// r1 : the function to call
// r2 : cache cell for call target
Label initialize, done, miss, megamorphic, not_array_function;
@@ -4364,9 +4279,6 @@
// If we didn't have a matching function, and we didn't find the megamorph
// sentinel, then we have in the cell either some other function or an
// AllocationSite. Do a map check on the object in ecx.
- Handle<Map> allocation_site_map(
- masm->isolate()->heap()->allocation_site_map(),
- masm->isolate());
__ ldr(r5, FieldMemOperand(r3, 0));
__ CompareRoot(r5, Heap::kAllocationSiteMapRootIndex);
__ b(ne, &miss);
@@ -4403,6 +4315,7 @@
{
FrameScope scope(masm, StackFrame::INTERNAL);
+ // Arguments register must be smi-tagged to call out.
__ SmiTag(r0);
__ push(r0);
__ push(r1);
@@ -5792,13 +5705,7 @@
// Check the number to string cache.
__ bind(¬_string);
// Puts the cached result into scratch1.
- NumberToStringStub::GenerateLookupNumberStringCache(masm,
- arg,
- scratch1,
- scratch2,
- scratch3,
- scratch4,
- slow);
+ __ LookupNumberStringCache(arg, scratch1, scratch2, scratch3, scratch4, slow);
__ mov(arg, scratch1);
__ str(arg, MemOperand(sp, stack_offset));
__ bind(&done);
diff --git a/src/arm/code-stubs-arm.h b/src/arm/code-stubs-arm.h
index d05e9a1..a404f01 100644
--- a/src/arm/code-stubs-arm.h
+++ b/src/arm/code-stubs-arm.h
@@ -261,19 +261,6 @@
public:
NumberToStringStub() { }
- // Generate code to do a lookup in the number string cache. If the number in
- // the register object is found in the cache the generated code falls through
- // with the result in the result register. The object and the result register
- // can be the same. If the number is not found in the cache the code jumps to
- // the label not_found with only the content of register object unchanged.
- static void GenerateLookupNumberStringCache(MacroAssembler* masm,
- Register object,
- Register result,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* not_found);
-
private:
Major MajorKey() { return NumberToString; }
int MinorKey() { return 0; }
diff --git a/src/arm/codegen-arm.cc b/src/arm/codegen-arm.cc
index 1bcf3e3..5b80d6f 100644
--- a/src/arm/codegen-arm.cc
+++ b/src/arm/codegen-arm.cc
@@ -55,7 +55,7 @@
#if defined(USE_SIMULATOR)
byte* fast_exp_arm_machine_code = NULL;
double fast_exp_simulator(double x) {
- return Simulator::current(Isolate::Current())->CallFP(
+ return Simulator::current(Isolate::Current())->CallFPReturnsDouble(
fast_exp_arm_machine_code, x, 0);
}
#endif
@@ -870,7 +870,8 @@
}
-void Code::PatchPlatformCodeAge(byte* sequence,
+void Code::PatchPlatformCodeAge(Isolate* isolate,
+ byte* sequence,
Code::Age age,
MarkingParity parity) {
uint32_t young_length;
@@ -879,7 +880,7 @@
CopyBytes(sequence, young_sequence, young_length);
CPU::FlushICache(sequence, young_length);
} else {
- Code* stub = GetCodeAgeStub(age, parity);
+ Code* stub = GetCodeAgeStub(isolate, age, parity);
CodePatcher patcher(sequence, young_length / Assembler::kInstrSize);
patcher.masm()->add(r0, pc, Operand(-8));
patcher.masm()->ldr(pc, MemOperand(pc, -4));
diff --git a/src/arm/deoptimizer-arm.cc b/src/arm/deoptimizer-arm.cc
index 3c57b64..9f8da50 100644
--- a/src/arm/deoptimizer-arm.cc
+++ b/src/arm/deoptimizer-arm.cc
@@ -81,100 +81,6 @@
}
-static const int32_t kBranchBeforeInterrupt = 0x5a000004;
-
-// The back edge bookkeeping code matches the pattern:
-//
-// <decrement profiling counter>
-// 2a 00 00 01 bpl ok
-// e5 9f c? ?? ldr ip, [pc, <interrupt stub address>]
-// e1 2f ff 3c blx ip
-// ok-label
-//
-// We patch the code to the following form:
-//
-// <decrement profiling counter>
-// e1 a0 00 00 mov r0, r0 (NOP)
-// e5 9f c? ?? ldr ip, [pc, <on-stack replacement address>]
-// e1 2f ff 3c blx ip
-// ok-label
-
-void Deoptimizer::PatchInterruptCodeAt(Code* unoptimized_code,
- Address pc_after,
- Code* replacement_code) {
- static const int kInstrSize = Assembler::kInstrSize;
- // Turn the jump into nops.
- CodePatcher patcher(pc_after - 3 * kInstrSize, 1);
- patcher.masm()->nop();
- // Replace the call address.
- uint32_t interrupt_address_offset = Memory::uint16_at(pc_after -
- 2 * kInstrSize) & 0xfff;
- Address interrupt_address_pointer = pc_after + interrupt_address_offset;
- Memory::uint32_at(interrupt_address_pointer) =
- reinterpret_cast<uint32_t>(replacement_code->entry());
-
- unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
- unoptimized_code, pc_after - 2 * kInstrSize, replacement_code);
-}
-
-
-void Deoptimizer::RevertInterruptCodeAt(Code* unoptimized_code,
- Address pc_after,
- Code* interrupt_code) {
- static const int kInstrSize = Assembler::kInstrSize;
- // Restore the original jump.
- CodePatcher patcher(pc_after - 3 * kInstrSize, 1);
- patcher.masm()->b(4 * kInstrSize, pl); // ok-label is 4 instructions later.
- ASSERT_EQ(kBranchBeforeInterrupt,
- Memory::int32_at(pc_after - 3 * kInstrSize));
- // Restore the original call address.
- uint32_t interrupt_address_offset = Memory::uint16_at(pc_after -
- 2 * kInstrSize) & 0xfff;
- Address interrupt_address_pointer = pc_after + interrupt_address_offset;
- Memory::uint32_at(interrupt_address_pointer) =
- reinterpret_cast<uint32_t>(interrupt_code->entry());
-
- interrupt_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
- unoptimized_code, pc_after - 2 * kInstrSize, interrupt_code);
-}
-
-
-#ifdef DEBUG
-Deoptimizer::InterruptPatchState Deoptimizer::GetInterruptPatchState(
- Isolate* isolate,
- Code* unoptimized_code,
- Address pc_after) {
- static const int kInstrSize = Assembler::kInstrSize;
- ASSERT(Memory::int32_at(pc_after - kInstrSize) == kBlxIp);
-
- uint32_t interrupt_address_offset =
- Memory::uint16_at(pc_after - 2 * kInstrSize) & 0xfff;
- Address interrupt_address_pointer = pc_after + interrupt_address_offset;
-
- if (Assembler::IsNop(Assembler::instr_at(pc_after - 3 * kInstrSize))) {
- ASSERT(Assembler::IsLdrPcImmediateOffset(
- Assembler::instr_at(pc_after - 2 * kInstrSize)));
- Code* osr_builtin =
- isolate->builtins()->builtin(Builtins::kOnStackReplacement);
- ASSERT(reinterpret_cast<uint32_t>(osr_builtin->entry()) ==
- Memory::uint32_at(interrupt_address_pointer));
- return PATCHED_FOR_OSR;
- } else {
- // Get the interrupt stub code object to match against from cache.
- Code* interrupt_builtin =
- isolate->builtins()->builtin(Builtins::kInterruptCheck);
- ASSERT(Assembler::IsLdrPcImmediateOffset(
- Assembler::instr_at(pc_after - 2 * kInstrSize)));
- ASSERT_EQ(kBranchBeforeInterrupt,
- Memory::int32_at(pc_after - 3 * kInstrSize));
- ASSERT(reinterpret_cast<uint32_t>(interrupt_builtin->entry()) ==
- Memory::uint32_at(interrupt_address_pointer));
- return NOT_PATCHED;
- }
-}
-#endif // DEBUG
-
-
void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
// Set the register values. The values are not important as there are no
// callee saved registers in JavaScript frames, so all registers are
diff --git a/src/arm/full-codegen-arm.cc b/src/arm/full-codegen-arm.cc
index b6fb70b..195fc8c 100644
--- a/src/arm/full-codegen-arm.cc
+++ b/src/arm/full-codegen-arm.cc
@@ -1651,13 +1651,11 @@
__ mov(r0, Operand(Smi::FromInt(flags)));
int properties_count = constant_properties->length() / 2;
if ((FLAG_track_double_fields && expr->may_store_doubles()) ||
- expr->depth() > 1) {
- __ Push(r3, r2, r1, r0);
- __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
- } else if (Serializer::enabled() || flags != ObjectLiteral::kFastElements ||
+ expr->depth() > 1 || Serializer::enabled() ||
+ flags != ObjectLiteral::kFastElements ||
properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
__ Push(r3, r2, r1, r0);
- __ CallRuntime(Runtime::kCreateObjectLiteralShallow, 4);
+ __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
} else {
FastCloneShallowObjectStub stub(properties_count);
__ CallStub(&stub);
@@ -4894,6 +4892,101 @@
#undef __
+
+static const int32_t kBranchBeforeInterrupt = 0x5a000004;
+
+// The back edge bookkeeping code matches the pattern:
+//
+// <decrement profiling counter>
+// 2a 00 00 01 bpl ok
+// e5 9f c? ?? ldr ip, [pc, <interrupt stub address>]
+// e1 2f ff 3c blx ip
+// ok-label
+//
+// We patch the code to the following form:
+//
+// <decrement profiling counter>
+// e1 a0 00 00 mov r0, r0 (NOP)
+// e5 9f c? ?? ldr ip, [pc, <on-stack replacement address>]
+// e1 2f ff 3c blx ip
+// ok-label
+
+void BackEdgeTable::PatchAt(Code* unoptimized_code,
+ Address pc_after,
+ Code* replacement_code) {
+ static const int kInstrSize = Assembler::kInstrSize;
+ // Turn the jump into nops.
+ CodePatcher patcher(pc_after - 3 * kInstrSize, 1);
+ patcher.masm()->nop();
+ // Replace the call address.
+ uint32_t interrupt_address_offset = Memory::uint16_at(pc_after -
+ 2 * kInstrSize) & 0xfff;
+ Address interrupt_address_pointer = pc_after + interrupt_address_offset;
+ Memory::uint32_at(interrupt_address_pointer) =
+ reinterpret_cast<uint32_t>(replacement_code->entry());
+
+ unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
+ unoptimized_code, pc_after - 2 * kInstrSize, replacement_code);
+}
+
+
+void BackEdgeTable::RevertAt(Code* unoptimized_code,
+ Address pc_after,
+ Code* interrupt_code) {
+ static const int kInstrSize = Assembler::kInstrSize;
+ // Restore the original jump.
+ CodePatcher patcher(pc_after - 3 * kInstrSize, 1);
+ patcher.masm()->b(4 * kInstrSize, pl); // ok-label is 4 instructions later.
+ ASSERT_EQ(kBranchBeforeInterrupt,
+ Memory::int32_at(pc_after - 3 * kInstrSize));
+ // Restore the original call address.
+ uint32_t interrupt_address_offset = Memory::uint16_at(pc_after -
+ 2 * kInstrSize) & 0xfff;
+ Address interrupt_address_pointer = pc_after + interrupt_address_offset;
+ Memory::uint32_at(interrupt_address_pointer) =
+ reinterpret_cast<uint32_t>(interrupt_code->entry());
+
+ interrupt_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
+ unoptimized_code, pc_after - 2 * kInstrSize, interrupt_code);
+}
+
+
+#ifdef DEBUG
+BackEdgeTable::BackEdgeState BackEdgeTable::GetBackEdgeState(
+ Isolate* isolate,
+ Code* unoptimized_code,
+ Address pc_after) {
+ static const int kInstrSize = Assembler::kInstrSize;
+ ASSERT(Memory::int32_at(pc_after - kInstrSize) == kBlxIp);
+
+ uint32_t interrupt_address_offset =
+ Memory::uint16_at(pc_after - 2 * kInstrSize) & 0xfff;
+ Address interrupt_address_pointer = pc_after + interrupt_address_offset;
+
+ if (Assembler::IsNop(Assembler::instr_at(pc_after - 3 * kInstrSize))) {
+ ASSERT(Assembler::IsLdrPcImmediateOffset(
+ Assembler::instr_at(pc_after - 2 * kInstrSize)));
+ Code* osr_builtin =
+ isolate->builtins()->builtin(Builtins::kOnStackReplacement);
+ ASSERT(reinterpret_cast<uint32_t>(osr_builtin->entry()) ==
+ Memory::uint32_at(interrupt_address_pointer));
+ return ON_STACK_REPLACEMENT;
+ } else {
+ // Get the interrupt stub code object to match against from cache.
+ Code* interrupt_builtin =
+ isolate->builtins()->builtin(Builtins::kInterruptCheck);
+ ASSERT(Assembler::IsLdrPcImmediateOffset(
+ Assembler::instr_at(pc_after - 2 * kInstrSize)));
+ ASSERT_EQ(kBranchBeforeInterrupt,
+ Memory::int32_at(pc_after - 3 * kInstrSize));
+ ASSERT(reinterpret_cast<uint32_t>(interrupt_builtin->entry()) ==
+ Memory::uint32_at(interrupt_address_pointer));
+ return INTERRUPT;
+ }
+}
+#endif // DEBUG
+
+
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_ARM
diff --git a/src/arm/lithium-arm.cc b/src/arm/lithium-arm.cc
index 59a8818..f216a8e 100644
--- a/src/arm/lithium-arm.cc
+++ b/src/arm/lithium-arm.cc
@@ -710,51 +710,44 @@
LInstruction* LChunkBuilder::DoShift(Token::Value op,
HBitwiseBinaryOperation* instr) {
- if (instr->representation().IsTagged()) {
- ASSERT(instr->left()->representation().IsTagged());
- ASSERT(instr->right()->representation().IsTagged());
+ if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* left = UseFixed(instr->left(), r1);
- LOperand* right = UseFixed(instr->right(), r0);
- LArithmeticT* result = new(zone()) LArithmeticT(op, left, right);
- return MarkAsCall(DefineFixed(result, r0), instr);
- }
-
- ASSERT(instr->representation().IsSmiOrInteger32());
- ASSERT(instr->left()->representation().Equals(instr->representation()));
- ASSERT(instr->right()->representation().Equals(instr->representation()));
- LOperand* left = UseRegisterAtStart(instr->left());
-
- HValue* right_value = instr->right();
- LOperand* right = NULL;
- int constant_value = 0;
- bool does_deopt = false;
- if (right_value->IsConstant()) {
- HConstant* constant = HConstant::cast(right_value);
- right = chunk_->DefineConstantOperand(constant);
- constant_value = constant->Integer32Value() & 0x1f;
- // Left shifts can deoptimize if we shift by > 0 and the result cannot be
- // truncated to smi.
- if (instr->representation().IsSmi() && constant_value > 0) {
- does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToSmi);
- }
- } else {
- right = UseRegisterAtStart(right_value);
- }
-
- // Shift operations can only deoptimize if we do a logical shift
- // by 0 and the result cannot be truncated to int32.
- if (op == Token::SHR && constant_value == 0) {
- if (FLAG_opt_safe_uint32_operations) {
- does_deopt = !instr->CheckFlag(HInstruction::kUint32);
+ HValue* right_value = instr->right();
+ LOperand* right = NULL;
+ int constant_value = 0;
+ bool does_deopt = false;
+ if (right_value->IsConstant()) {
+ HConstant* constant = HConstant::cast(right_value);
+ right = chunk_->DefineConstantOperand(constant);
+ constant_value = constant->Integer32Value() & 0x1f;
+ // Left shifts can deoptimize if we shift by > 0 and the result cannot be
+ // truncated to smi.
+ if (instr->representation().IsSmi() && constant_value > 0) {
+ does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToSmi);
+ }
} else {
- does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToInt32);
+ right = UseRegisterAtStart(right_value);
}
- }
- LInstruction* result =
- DefineAsRegister(new(zone()) LShiftI(op, left, right, does_deopt));
- return does_deopt ? AssignEnvironment(result) : result;
+ // Shift operations can only deoptimize if we do a logical shift
+ // by 0 and the result cannot be truncated to int32.
+ if (op == Token::SHR && constant_value == 0) {
+ if (FLAG_opt_safe_uint32_operations) {
+ does_deopt = !instr->CheckFlag(HInstruction::kUint32);
+ } else {
+ does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToInt32);
+ }
+ }
+
+ LInstruction* result =
+ DefineAsRegister(new(zone()) LShiftI(op, left, right, does_deopt));
+ return does_deopt ? AssignEnvironment(result) : result;
+ } else {
+ return DoArithmeticT(op, instr);
+ }
}
@@ -763,21 +756,25 @@
ASSERT(instr->representation().IsDouble());
ASSERT(instr->left()->representation().IsDouble());
ASSERT(instr->right()->representation().IsDouble());
- ASSERT(op != Token::MOD);
- LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseRegisterAtStart(instr->right());
- LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
- return DefineAsRegister(result);
+ if (op == Token::MOD) {
+ LOperand* left = UseFixedDouble(instr->left(), d1);
+ LOperand* right = UseFixedDouble(instr->right(), d2);
+ LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
+ // We call a C function for double modulo. It can't trigger a GC. We need
+ // to use fixed result register for the call.
+ // TODO(fschneider): Allow any register as input registers.
+ return MarkAsCall(DefineFixedDouble(result, d1), instr);
+ } else {
+ LOperand* left = UseRegisterAtStart(instr->left());
+ LOperand* right = UseRegisterAtStart(instr->right());
+ LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
+ return DefineAsRegister(result);
+ }
}
LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op,
- HArithmeticBinaryOperation* instr) {
- ASSERT(op == Token::ADD ||
- op == Token::DIV ||
- op == Token::MOD ||
- op == Token::MUL ||
- op == Token::SUB);
+ HBinaryOperation* instr) {
HValue* left = instr->left();
HValue* right = instr->right();
ASSERT(left->representation().IsTagged());
@@ -1347,41 +1344,34 @@
if (instr->representation().IsSmiOrInteger32()) {
ASSERT(instr->left()->representation().Equals(instr->representation()));
ASSERT(instr->right()->representation().Equals(instr->representation()));
+ ASSERT(instr->CheckFlag(HValue::kTruncatingToInt32));
LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand());
return DefineAsRegister(new(zone()) LBitI(left, right));
} else {
- ASSERT(instr->representation().IsTagged());
- ASSERT(instr->left()->representation().IsTagged());
- ASSERT(instr->right()->representation().IsTagged());
-
- LOperand* left = UseFixed(instr->left(), r1);
- LOperand* right = UseFixed(instr->right(), r0);
- LArithmeticT* result = new(zone()) LArithmeticT(instr->op(), left, right);
- return MarkAsCall(DefineFixed(result, r0), instr);
+ return DoArithmeticT(instr->op(), instr);
}
}
LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
- if (instr->representation().IsDouble()) {
- return DoArithmeticD(Token::DIV, instr);
- } else if (instr->representation().IsSmiOrInteger32()) {
+ if (instr->representation().IsSmiOrInteger32()) {
ASSERT(instr->left()->representation().Equals(instr->representation()));
ASSERT(instr->right()->representation().Equals(instr->representation()));
if (instr->HasPowerOf2Divisor()) {
ASSERT(!instr->CheckFlag(HValue::kCanBeDivByZero));
LOperand* value = UseRegisterAtStart(instr->left());
- LDivI* div =
- new(zone()) LDivI(value, UseOrConstant(instr->right()), NULL);
- return AssignEnvironment(DefineSameAsFirst(div));
+ LDivI* div = new(zone()) LDivI(value, UseConstant(instr->right()), NULL);
+ return AssignEnvironment(DefineAsRegister(div));
}
LOperand* dividend = UseRegister(instr->left());
LOperand* divisor = UseRegister(instr->right());
LOperand* temp = CpuFeatures::IsSupported(SUDIV) ? NULL : FixedTemp(d4);
LDivI* div = new(zone()) LDivI(dividend, divisor, temp);
return AssignEnvironment(DefineAsRegister(div));
+ } else if (instr->representation().IsDouble()) {
+ return DoArithmeticD(Token::DIV, instr);
} else {
return DoArithmeticT(Token::DIV, instr);
}
@@ -1502,17 +1492,10 @@
? AssignEnvironment(result)
: result;
}
- } else if (instr->representation().IsTagged()) {
- return DoArithmeticT(Token::MOD, instr);
+ } else if (instr->representation().IsDouble()) {
+ return DoArithmeticD(Token::MOD, instr);
} else {
- ASSERT(instr->representation().IsDouble());
- // We call a C function for double modulo. It can't trigger a GC. We need
- // to use fixed result register for the call.
- // TODO(fschneider): Allow any register as input registers.
- LArithmeticD* mod = new(zone()) LArithmeticD(Token::MOD,
- UseFixedDouble(left, d1),
- UseFixedDouble(right, d2));
- return MarkAsCall(DefineFixedDouble(mod, d1), instr);
+ return DoArithmeticT(Token::MOD, instr);
}
}
@@ -1679,7 +1662,6 @@
return DoArithmeticD(Token::ADD, instr);
} else {
- ASSERT(instr->representation().IsTagged());
return DoArithmeticT(Token::ADD, instr);
}
}
@@ -1774,8 +1756,8 @@
LInstruction* LChunkBuilder::DoCompareHoleAndBranch(
HCompareHoleAndBranch* instr) {
- LOperand* object = UseRegisterAtStart(instr->object());
- return new(zone()) LCmpHoleAndBranch(object);
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return new(zone()) LCmpHoleAndBranch(value);
}
@@ -1883,11 +1865,9 @@
LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) {
LOperand* string = UseRegister(instr->string());
- LOperand* index = UseRegister(instr->index());
- LOperand* value = UseTempRegister(instr->value());
- LSeqStringSetChar* result =
- new(zone()) LSeqStringSetChar(instr->encoding(), string, index, value);
- return DefineAsRegister(result);
+ LOperand* index = UseRegisterOrConstant(instr->index());
+ LOperand* value = UseRegister(instr->value());
+ return new(zone()) LSeqStringSetChar(instr->encoding(), string, index, value);
}
@@ -2040,12 +2020,6 @@
}
-LInstruction* LChunkBuilder::DoIsNumberAndBranch(HIsNumberAndBranch* instr) {
- return new(zone())
- LIsNumberAndBranch(UseRegisterOrConstantAtStart(instr->value()));
-}
-
-
LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
LInstruction* result = new(zone()) LCheckInstanceType(value);
@@ -2195,6 +2169,11 @@
}
+LInstruction* LChunkBuilder::DoLoadRoot(HLoadRoot* instr) {
+ return DefineAsRegister(new(zone()) LLoadRoot);
+}
+
+
LInstruction* LChunkBuilder::DoLoadExternalArrayPointer(
HLoadExternalArrayPointer* instr) {
LOperand* input = UseRegisterAtStart(instr->value());
@@ -2249,8 +2228,6 @@
LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
- ElementsKind elements_kind = instr->elements_kind();
-
if (!instr->is_external()) {
ASSERT(instr->elements()->representation().IsTagged());
bool needs_write_barrier = instr->NeedsWriteBarrier();
@@ -2260,15 +2237,19 @@
if (instr->value()->representation().IsDouble()) {
object = UseRegisterAtStart(instr->elements());
- val = UseTempRegister(instr->value());
+ val = UseRegister(instr->value());
key = UseRegisterOrConstantAtStart(instr->key());
} else {
ASSERT(instr->value()->representation().IsSmiOrTagged());
- object = UseTempRegister(instr->elements());
- val = needs_write_barrier ? UseTempRegister(instr->value())
- : UseRegisterAtStart(instr->value());
- key = needs_write_barrier ? UseTempRegister(instr->key())
- : UseRegisterOrConstantAtStart(instr->key());
+ if (needs_write_barrier) {
+ object = UseTempRegister(instr->elements());
+ val = UseTempRegister(instr->value());
+ key = UseTempRegister(instr->key());
+ } else {
+ object = UseRegisterAtStart(instr->elements());
+ val = UseRegisterAtStart(instr->value());
+ key = UseRegisterOrConstantAtStart(instr->key());
+ }
}
return new(zone()) LStoreKeyed(object, key, val);
@@ -2276,17 +2257,13 @@
ASSERT(
(instr->value()->representation().IsInteger32() &&
- (elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
- (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
+ (instr->elements_kind() != EXTERNAL_FLOAT_ELEMENTS) &&
+ (instr->elements_kind() != EXTERNAL_DOUBLE_ELEMENTS)) ||
(instr->value()->representation().IsDouble() &&
- ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
- (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
+ ((instr->elements_kind() == EXTERNAL_FLOAT_ELEMENTS) ||
+ (instr->elements_kind() == EXTERNAL_DOUBLE_ELEMENTS))));
ASSERT(instr->elements()->representation().IsExternal());
- bool val_is_temp_register =
- elements_kind == EXTERNAL_PIXEL_ELEMENTS ||
- elements_kind == EXTERNAL_FLOAT_ELEMENTS;
- LOperand* val = val_is_temp_register ? UseTempRegister(instr->value())
- : UseRegister(instr->value());
+ LOperand* val = UseRegister(instr->value());
LOperand* key = UseRegisterOrConstantAtStart(instr->key());
LOperand* external_pointer = UseRegister(instr->elements());
return new(zone()) LStoreKeyed(external_pointer, key, val);
diff --git a/src/arm/lithium-arm.h b/src/arm/lithium-arm.h
index 98cacac..3902b4c 100644
--- a/src/arm/lithium-arm.h
+++ b/src/arm/lithium-arm.h
@@ -113,13 +113,13 @@
V(IsConstructCallAndBranch) \
V(IsObjectAndBranch) \
V(IsStringAndBranch) \
- V(IsNumberAndBranch) \
V(IsSmiAndBranch) \
V(IsUndetectableAndBranch) \
V(Label) \
V(LazyBailout) \
V(LoadContextSlot) \
V(LoadExternalArrayPointer) \
+ V(LoadRoot) \
V(LoadFieldByIndex) \
V(LoadFunctionPrototype) \
V(LoadGlobalCell) \
@@ -939,19 +939,6 @@
};
-class LIsNumberAndBranch V8_FINAL : public LControlInstruction<1, 0> {
- public:
- explicit LIsNumberAndBranch(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(IsNumberAndBranch, "is-number-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(IsNumberAndBranch)
-};
-
-
class LIsStringAndBranch V8_FINAL : public LControlInstruction<1, 1> {
public:
LIsStringAndBranch(LOperand* value, LOperand* temp) {
@@ -1318,7 +1305,7 @@
DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch")
DECLARE_HYDROGEN_ACCESSOR(CompareMap)
- Handle<Map> map() const { return hydrogen()->map(); }
+ Handle<Map> map() const { return hydrogen()->map().handle(); }
};
@@ -1593,6 +1580,15 @@
};
+class LLoadRoot V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(LoadRoot, "load-root")
+ DECLARE_HYDROGEN_ACCESSOR(LoadRoot)
+
+ Heap::RootListIndex index() const { return hydrogen()->index(); }
+};
+
+
class LLoadExternalArrayPointer V8_FINAL
: public LTemplateInstruction<1, 1, 0> {
public:
@@ -2119,7 +2115,7 @@
LOperand* temp2() { return temps_[1]; }
DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i")
- DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
+ DECLARE_HYDROGEN_ACCESSOR(Change)
bool truncating() { return hydrogen()->CanTruncateToInt32(); }
};
@@ -2280,8 +2276,10 @@
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- Handle<Map> original_map() { return hydrogen()->original_map(); }
- Handle<Map> transitioned_map() { return hydrogen()->transitioned_map(); }
+ Handle<Map> original_map() { return hydrogen()->original_map().handle(); }
+ Handle<Map> transitioned_map() {
+ return hydrogen()->transitioned_map().handle();
+ }
ElementsKind from_kind() { return hydrogen()->from_kind(); }
ElementsKind to_kind() { return hydrogen()->to_kind(); }
};
@@ -2778,7 +2776,7 @@
LInstruction* DoArithmeticD(Token::Value op,
HArithmeticBinaryOperation* instr);
LInstruction* DoArithmeticT(Token::Value op,
- HArithmeticBinaryOperation* instr);
+ HBinaryOperation* instr);
LPlatformChunk* chunk_;
CompilationInfo* info_;
diff --git a/src/arm/lithium-codegen-arm.cc b/src/arm/lithium-codegen-arm.cc
index 7f65023..2680c34 100644
--- a/src/arm/lithium-codegen-arm.cc
+++ b/src/arm/lithium-codegen-arm.cc
@@ -1383,7 +1383,8 @@
void LCodeGen::DoDivI(LDivI* instr) {
if (instr->hydrogen()->HasPowerOf2Divisor()) {
- Register dividend = ToRegister(instr->left());
+ const Register dividend = ToRegister(instr->left());
+ const Register result = ToRegister(instr->result());
int32_t divisor = instr->hydrogen()->right()->GetInteger32Constant();
int32_t test_value = 0;
int32_t power = 0;
@@ -1394,7 +1395,7 @@
} else {
// Check for (0 / -x) that will produce negative zero.
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ tst(dividend, Operand(dividend));
+ __ cmp(dividend, Operand::Zero());
DeoptimizeIf(eq, instr->environment());
}
// Check for (kMinInt / -1).
@@ -1409,20 +1410,26 @@
if (test_value != 0) {
if (instr->hydrogen()->CheckFlag(
HInstruction::kAllUsesTruncatingToInt32)) {
- __ cmp(dividend, Operand(0));
- __ rsb(dividend, dividend, Operand(0), LeaveCC, lt);
- __ mov(dividend, Operand(dividend, ASR, power));
- if (divisor > 0) __ rsb(dividend, dividend, Operand(0), LeaveCC, lt);
- if (divisor < 0) __ rsb(dividend, dividend, Operand(0), LeaveCC, gt);
+ __ sub(result, dividend, Operand::Zero(), SetCC);
+ __ rsb(result, result, Operand::Zero(), LeaveCC, lt);
+ __ mov(result, Operand(result, ASR, power));
+ if (divisor > 0) __ rsb(result, result, Operand::Zero(), LeaveCC, lt);
+ if (divisor < 0) __ rsb(result, result, Operand::Zero(), LeaveCC, gt);
return; // Don't fall through to "__ rsb" below.
} else {
// Deoptimize if remainder is not 0.
__ tst(dividend, Operand(test_value));
DeoptimizeIf(ne, instr->environment());
- __ mov(dividend, Operand(dividend, ASR, power));
+ __ mov(result, Operand(dividend, ASR, power));
+ if (divisor < 0) __ rsb(result, result, Operand(0));
+ }
+ } else {
+ if (divisor < 0) {
+ __ rsb(result, dividend, Operand(0));
+ } else {
+ __ Move(result, dividend);
}
}
- if (divisor < 0) __ rsb(dividend, dividend, Operand(0));
return;
}
@@ -1439,12 +1446,15 @@
// Check for (0 / -x) that will produce negative zero.
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- Label left_not_zero;
+ Label positive;
+ if (!instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
+ // Do the test only if it hadn't be done above.
+ __ cmp(right, Operand::Zero());
+ }
+ __ b(pl, &positive);
__ cmp(left, Operand::Zero());
- __ b(ne, &left_not_zero);
- __ cmp(right, Operand::Zero());
- DeoptimizeIf(mi, instr->environment());
- __ bind(&left_not_zero);
+ DeoptimizeIf(eq, instr->environment());
+ __ bind(&positive);
}
// Check for (kMinInt / -1).
@@ -1975,32 +1985,42 @@
void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
Register string = ToRegister(instr->string());
- Register index = ToRegister(instr->index());
+ LOperand* index_op = instr->index();
Register value = ToRegister(instr->value());
+ Register scratch = scratch0();
String::Encoding encoding = instr->encoding();
if (FLAG_debug_code) {
- __ ldr(ip, FieldMemOperand(string, HeapObject::kMapOffset));
- __ ldrb(ip, FieldMemOperand(ip, Map::kInstanceTypeOffset));
+ __ ldr(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
+ __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
- __ and_(ip, ip, Operand(kStringRepresentationMask | kStringEncodingMask));
+ __ and_(scratch, scratch,
+ Operand(kStringRepresentationMask | kStringEncodingMask));
static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
- __ cmp(ip, Operand(encoding == String::ONE_BYTE_ENCODING
- ? one_byte_seq_type : two_byte_seq_type));
+ __ cmp(scratch, Operand(encoding == String::ONE_BYTE_ENCODING
+ ? one_byte_seq_type : two_byte_seq_type));
__ Check(eq, kUnexpectedStringType);
}
- __ add(ip,
- string,
- Operand(SeqString::kHeaderSize - kHeapObjectTag));
- if (encoding == String::ONE_BYTE_ENCODING) {
- __ strb(value, MemOperand(ip, index));
+ if (index_op->IsConstantOperand()) {
+ int constant_index = ToInteger32(LConstantOperand::cast(index_op));
+ if (encoding == String::ONE_BYTE_ENCODING) {
+ __ strb(value,
+ FieldMemOperand(string, SeqString::kHeaderSize + constant_index));
+ } else {
+ __ strh(value,
+ FieldMemOperand(string, SeqString::kHeaderSize + constant_index * 2));
+ }
} else {
- // MemOperand with ip as the base register is not allowed for strh, so
- // we do the address calculation explicitly.
- __ add(ip, ip, Operand(index, LSL, 1));
- __ strh(value, MemOperand(ip));
+ Register index = ToRegister(index_op);
+ if (encoding == String::ONE_BYTE_ENCODING) {
+ __ add(scratch, string, Operand(index));
+ __ strb(value, FieldMemOperand(scratch, SeqString::kHeaderSize));
+ } else {
+ __ add(scratch, string, Operand(index, LSL, 1));
+ __ strh(value, FieldMemOperand(scratch, SeqString::kHeaderSize));
+ }
}
}
@@ -2197,25 +2217,6 @@
}
-void LCodeGen::DoIsNumberAndBranch(LIsNumberAndBranch* instr) {
- Representation r = instr->hydrogen()->value()->representation();
- if (r.IsSmiOrInteger32() || r.IsDouble()) {
- EmitBranch(instr, al);
- } else {
- ASSERT(r.IsTagged());
- Register reg = ToRegister(instr->value());
- HType type = instr->hydrogen()->value()->type();
- if (type.IsTaggedNumber()) {
- EmitBranch(instr, al);
- }
- __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
- __ ldr(scratch0(), FieldMemOperand(reg, HeapObject::kMapOffset));
- __ CompareRoot(scratch0(), Heap::kHeapNumberMapRootIndex);
- EmitBranch(instr, eq);
- }
-}
-
-
void LCodeGen::DoBranch(LBranch* instr) {
Representation r = instr->hydrogen()->value()->representation();
if (r.IsInteger32() || r.IsSmi()) {
@@ -2953,7 +2954,7 @@
void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
Register result = ToRegister(instr->result());
- __ mov(ip, Operand(Handle<Object>(instr->hydrogen()->cell())));
+ __ mov(ip, Operand(Handle<Object>(instr->hydrogen()->cell().handle())));
__ ldr(result, FieldMemOperand(ip, Cell::kValueOffset));
if (instr->hydrogen()->RequiresHoleCheck()) {
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
@@ -2980,7 +2981,7 @@
Register cell = scratch0();
// Load the cell.
- __ mov(cell, Operand(instr->hydrogen()->cell()));
+ __ mov(cell, Operand(instr->hydrogen()->cell().handle()));
// If the cell we are storing to contains the hole it could have
// been deleted from the property dictionary. In that case, we need
@@ -3148,6 +3149,12 @@
}
+void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
+ Register result = ToRegister(instr->result());
+ __ LoadRoot(result, instr->index());
+}
+
+
void LCodeGen::DoLoadExternalArrayPointer(
LLoadExternalArrayPointer* instr) {
Register to_reg = ToRegister(instr->result());
@@ -4311,16 +4318,23 @@
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
+ Register address = scratch0();
DwVfpRegister value(ToDoubleRegister(instr->value()));
- Operand operand(key_is_constant
- ? Operand(constant_key << element_size_shift)
- : Operand(key, LSL, shift_size));
- __ add(scratch0(), external_pointer, operand);
+ if (key_is_constant) {
+ if (constant_key != 0) {
+ __ add(address, external_pointer,
+ Operand(constant_key << element_size_shift));
+ } else {
+ address = external_pointer;
+ }
+ } else {
+ __ add(address, external_pointer, Operand(key, LSL, shift_size));
+ }
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
__ vcvt_f32_f64(double_scratch0().low(), value);
- __ vstr(double_scratch0().low(), scratch0(), additional_offset);
+ __ vstr(double_scratch0().low(), address, additional_offset);
} else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
- __ vstr(value, scratch0(), additional_offset);
+ __ vstr(value, address, additional_offset);
}
} else {
Register value(ToRegister(instr->value()));
@@ -4362,32 +4376,28 @@
void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
DwVfpRegister value = ToDoubleRegister(instr->value());
Register elements = ToRegister(instr->elements());
- Register key = no_reg;
Register scratch = scratch0();
+ DwVfpRegister double_scratch = double_scratch0();
bool key_is_constant = instr->key()->IsConstantOperand();
- int constant_key = 0;
// Calculate the effective address of the slot in the array to store the
// double value.
+ int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
if (key_is_constant) {
- constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+ int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
if (constant_key & 0xF0000000) {
Abort(kArrayIndexConstantValueTooBig);
}
+ __ add(scratch, elements,
+ Operand((constant_key << element_size_shift) +
+ FixedDoubleArray::kHeaderSize - kHeapObjectTag));
} else {
- key = ToRegister(instr->key());
- }
- int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
- int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
- ? (element_size_shift - kSmiTagSize) : element_size_shift;
- Operand operand = key_is_constant
- ? Operand((constant_key << element_size_shift) +
- FixedDoubleArray::kHeaderSize - kHeapObjectTag)
- : Operand(key, LSL, shift_size);
- __ add(scratch, elements, operand);
- if (!key_is_constant) {
- __ add(scratch, scratch,
+ int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
+ ? (element_size_shift - kSmiTagSize) : element_size_shift;
+ __ add(scratch, elements,
Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
+ __ add(scratch, scratch,
+ Operand(ToRegister(instr->key()), LSL, shift_size));
}
if (instr->NeedsCanonicalization()) {
@@ -4397,9 +4407,12 @@
__ tst(ip, Operand(kVFPDefaultNaNModeControlBit));
__ Assert(ne, kDefaultNaNModeNotSet);
}
- __ VFPCanonicalizeNaN(value);
+ __ VFPCanonicalizeNaN(double_scratch, value);
+ __ vstr(double_scratch, scratch,
+ instr->additional_index() << element_size_shift);
+ } else {
+ __ vstr(value, scratch, instr->additional_index() << element_size_shift);
}
- __ vstr(value, scratch, instr->additional_index() << element_size_shift);
}
@@ -4855,36 +4868,20 @@
Register scratch = scratch0();
SwVfpRegister flt_scratch = double_scratch0().low();
ASSERT(!result_reg.is(double_scratch0()));
-
- Label load_smi, heap_number, done;
-
+ Label convert, load_smi, done;
if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
// Smi check.
__ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
-
// Heap number map check.
__ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
__ cmp(scratch, Operand(ip));
- if (!can_convert_undefined_to_nan) {
- DeoptimizeIf(ne, env);
+ if (can_convert_undefined_to_nan) {
+ __ b(ne, &convert);
} else {
- Label heap_number, convert;
- __ b(eq, &heap_number);
-
- // Convert undefined (and hole) to NaN.
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ cmp(input_reg, Operand(ip));
DeoptimizeIf(ne, env);
-
- __ bind(&convert);
- __ LoadRoot(scratch, Heap::kNanValueRootIndex);
- __ vldr(result_reg, scratch, HeapNumber::kValueOffset - kHeapObjectTag);
- __ jmp(&done);
-
- __ bind(&heap_number);
}
- // Heap number to double register conversion.
+ // load heap number
__ vldr(result_reg, input_reg, HeapNumber::kValueOffset - kHeapObjectTag);
if (deoptimize_on_minus_zero) {
__ VmovLow(scratch, result_reg);
@@ -4895,11 +4892,20 @@
DeoptimizeIf(eq, env);
}
__ jmp(&done);
+ if (can_convert_undefined_to_nan) {
+ __ bind(&convert);
+ // Convert undefined (and hole) to NaN.
+ __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+ __ cmp(input_reg, Operand(ip));
+ DeoptimizeIf(ne, env);
+ __ LoadRoot(scratch, Heap::kNanValueRootIndex);
+ __ vldr(result_reg, scratch, HeapNumber::kValueOffset - kHeapObjectTag);
+ __ jmp(&done);
+ }
} else {
__ SmiUntag(scratch, input_reg);
ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
}
-
// Smi to double register conversion
__ bind(&load_smi);
// scratch: untagged value of input_reg
@@ -4987,15 +4993,19 @@
Register input_reg = ToRegister(input);
- DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
+ if (instr->hydrogen()->value()->representation().IsSmi()) {
+ __ SmiUntag(input_reg);
+ } else {
+ DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
- // Optimistically untag the input.
- // If the input is a HeapObject, SmiUntag will set the carry flag.
- __ SmiUntag(input_reg, SetCC);
- // Branch to deferred code if the input was tagged.
- // The deferred code will take care of restoring the tag.
- __ b(cs, deferred->entry());
- __ bind(deferred->exit());
+ // Optimistically untag the input.
+ // If the input is a HeapObject, SmiUntag will set the carry flag.
+ __ SmiUntag(input_reg, SetCC);
+ // Branch to deferred code if the input was tagged.
+ // The deferred code will take care of restoring the tag.
+ __ b(cs, deferred->entry());
+ __ bind(deferred->exit());
+ }
}
@@ -5133,7 +5143,7 @@
void LCodeGen::DoCheckValue(LCheckValue* instr) {
Register reg = ToRegister(instr->value());
- Handle<HeapObject> object = instr->hydrogen()->object();
+ Handle<HeapObject> object = instr->hydrogen()->object().handle();
AllowDeferredHandleDereference smi_check;
if (isolate()->heap()->InNewSpace(*object)) {
Register reg = ToRegister(instr->value());
@@ -5185,7 +5195,6 @@
ASSERT(input->IsRegister());
Register reg = ToRegister(input);
- SmallMapList* map_set = instr->hydrogen()->map_set();
__ ldr(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
DeferredCheckMaps* deferred = NULL;
@@ -5194,14 +5203,15 @@
__ bind(deferred->check_maps());
}
+ UniqueSet<Map> map_set = instr->hydrogen()->map_set();
Label success;
- for (int i = 0; i < map_set->length() - 1; i++) {
- Handle<Map> map = map_set->at(i);
+ for (int i = 0; i < map_set.size() - 1; i++) {
+ Handle<Map> map = map_set.at(i).handle();
__ CompareMap(map_reg, map, &success);
__ b(eq, &success);
}
- Handle<Map> map = map_set->last();
+ Handle<Map> map = map_set.at(map_set.size() - 1).handle();
__ CompareMap(map_reg, map, &success);
if (instr->hydrogen()->has_migration_target()) {
__ b(ne, deferred->entry());
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
index 7df7857..eeabc05 100644
--- a/src/arm/macro-assembler-arm.cc
+++ b/src/arm/macro-assembler-arm.cc
@@ -733,9 +733,11 @@
bind(&fpscr_done);
}
-void MacroAssembler::VFPCanonicalizeNaN(const DwVfpRegister value,
+
+void MacroAssembler::VFPCanonicalizeNaN(const DwVfpRegister dst,
+ const DwVfpRegister src,
const Condition cond) {
- vsub(value, value, kDoubleRegZero, cond);
+ vsub(dst, src, kDoubleRegZero, cond);
}
@@ -1020,7 +1022,8 @@
void MacroAssembler::LeaveExitFrame(bool save_doubles,
- Register argument_count) {
+ Register argument_count,
+ bool restore_context) {
// Optionally restore all double registers.
if (save_doubles) {
// Calculate the stack location of the saved doubles and restore them.
@@ -1035,10 +1038,14 @@
mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
str(r3, MemOperand(ip));
+
// Restore current context from top and clear it in debug mode.
- mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
- ldr(cp, MemOperand(ip));
+ if (restore_context) {
+ mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
+ ldr(cp, MemOperand(ip));
+ }
#ifdef DEBUG
+ mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
str(r3, MemOperand(ip));
#endif
@@ -2280,12 +2287,14 @@
}
-void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function,
- Address function_address,
- ExternalReference thunk_ref,
- Register thunk_last_arg,
- int stack_space,
- int return_value_offset) {
+void MacroAssembler::CallApiFunctionAndReturn(
+ ExternalReference function,
+ Address function_address,
+ ExternalReference thunk_ref,
+ Register thunk_last_arg,
+ int stack_space,
+ MemOperand return_value_operand,
+ MemOperand* context_restore_operand) {
ExternalReference next_address =
ExternalReference::handle_scope_next_address(isolate());
const int kNextOffset = 0;
@@ -2349,12 +2358,13 @@
}
Label promote_scheduled_exception;
+ Label exception_handled;
Label delete_allocated_handles;
Label leave_exit_frame;
Label return_value_loaded;
// load value from ReturnValue
- ldr(r0, MemOperand(fp, return_value_offset*kPointerSize));
+ ldr(r0, return_value_operand);
bind(&return_value_loaded);
// No more valid handles (the result handle was the last one). Restore
// previous handle scope.
@@ -2377,17 +2387,25 @@
ldr(r5, MemOperand(ip));
cmp(r4, r5);
b(ne, &promote_scheduled_exception);
+ bind(&exception_handled);
+ bool restore_context = context_restore_operand != NULL;
+ if (restore_context) {
+ ldr(cp, *context_restore_operand);
+ }
// LeaveExitFrame expects unwind space to be in a register.
mov(r4, Operand(stack_space));
- LeaveExitFrame(false, r4);
+ LeaveExitFrame(false, r4, !restore_context);
mov(pc, lr);
bind(&promote_scheduled_exception);
- TailCallExternalReference(
- ExternalReference(Runtime::kPromoteScheduledException, isolate()),
- 0,
- 1);
+ {
+ FrameScope frame(this, StackFrame::INTERNAL);
+ CallExternalReference(
+ ExternalReference(Runtime::kPromoteScheduledException, isolate()),
+ 0);
+ }
+ jmp(&exception_handled);
// HandleScope limit has changed. Delete allocated extensions.
bind(&delete_allocated_handles);
@@ -3079,6 +3097,88 @@
}
+void MacroAssembler::LookupNumberStringCache(Register object,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* not_found) {
+ // Use of registers. Register result is used as a temporary.
+ Register number_string_cache = result;
+ Register mask = scratch3;
+
+ // Load the number string cache.
+ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
+
+ // Make the hash mask from the length of the number string cache. It
+ // contains two elements (number and string) for each cache entry.
+ ldr(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset));
+ // Divide length by two (length is a smi).
+ mov(mask, Operand(mask, ASR, kSmiTagSize + 1));
+ sub(mask, mask, Operand(1)); // Make mask.
+
+ // Calculate the entry in the number string cache. The hash value in the
+ // number string cache for smis is just the smi value, and the hash for
+ // doubles is the xor of the upper and lower words. See
+ // Heap::GetNumberStringCache.
+ Label is_smi;
+ Label load_result_from_cache;
+ JumpIfSmi(object, &is_smi);
+ CheckMap(object,
+ scratch1,
+ Heap::kHeapNumberMapRootIndex,
+ not_found,
+ DONT_DO_SMI_CHECK);
+
+ STATIC_ASSERT(8 == kDoubleSize);
+ add(scratch1,
+ object,
+ Operand(HeapNumber::kValueOffset - kHeapObjectTag));
+ ldm(ia, scratch1, scratch1.bit() | scratch2.bit());
+ eor(scratch1, scratch1, Operand(scratch2));
+ and_(scratch1, scratch1, Operand(mask));
+
+ // Calculate address of entry in string cache: each entry consists
+ // of two pointer sized fields.
+ add(scratch1,
+ number_string_cache,
+ Operand(scratch1, LSL, kPointerSizeLog2 + 1));
+
+ Register probe = mask;
+ ldr(probe, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
+ JumpIfSmi(probe, not_found);
+ sub(scratch2, object, Operand(kHeapObjectTag));
+ vldr(d0, scratch2, HeapNumber::kValueOffset);
+ sub(probe, probe, Operand(kHeapObjectTag));
+ vldr(d1, probe, HeapNumber::kValueOffset);
+ VFPCompareAndSetFlags(d0, d1);
+ b(ne, not_found); // The cache did not contain this value.
+ b(&load_result_from_cache);
+
+ bind(&is_smi);
+ Register scratch = scratch1;
+ and_(scratch, mask, Operand(object, ASR, 1));
+ // Calculate address of entry in string cache: each entry consists
+ // of two pointer sized fields.
+ add(scratch,
+ number_string_cache,
+ Operand(scratch, LSL, kPointerSizeLog2 + 1));
+
+ // Check if the entry is the smi we are looking for.
+ ldr(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
+ cmp(object, probe);
+ b(ne, not_found);
+
+ // Get the result from the cache.
+ bind(&load_result_from_cache);
+ ldr(result, FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
+ IncrementCounter(isolate()->counters()->number_to_string_native(),
+ 1,
+ scratch1,
+ scratch2);
+}
+
+
void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings(
Register first,
Register second,
@@ -3191,20 +3291,19 @@
Register dst,
Register length,
Register scratch) {
- Label align_loop, align_loop_1, word_loop, byte_loop, byte_loop_1, done;
+ Label align_loop_1, word_loop, byte_loop, byte_loop_1, done;
// Align src before copying in word size chunks.
- bind(&align_loop);
- cmp(length, Operand::Zero());
- b(eq, &done);
+ cmp(length, Operand(kPointerSize));
+ b(le, &byte_loop);
+
bind(&align_loop_1);
tst(src, Operand(kPointerSize - 1));
b(eq, &word_loop);
ldrb(scratch, MemOperand(src, 1, PostIndex));
strb(scratch, MemOperand(dst, 1, PostIndex));
sub(length, length, Operand(1), SetCC);
- b(ne, &byte_loop_1);
-
+ b(&align_loop_1);
// Copy bytes in word size chunks.
bind(&word_loop);
if (emit_debug_code()) {
@@ -3792,7 +3891,7 @@
b(gt, &no_memento_available);
ldr(scratch_reg, MemOperand(scratch_reg, -AllocationMemento::kSize));
cmp(scratch_reg,
- Operand(Handle<Map>(isolate()->heap()->allocation_memento_map())));
+ Operand(isolate()->factory()->allocation_memento_map()));
bind(&no_memento_available);
}
diff --git a/src/arm/macro-assembler-arm.h b/src/arm/macro-assembler-arm.h
index 9abd5a0..6d08ab9 100644
--- a/src/arm/macro-assembler-arm.h
+++ b/src/arm/macro-assembler-arm.h
@@ -469,8 +469,13 @@
void VFPEnsureFPSCRState(Register scratch);
// If the value is a NaN, canonicalize the value else, do nothing.
- void VFPCanonicalizeNaN(const DwVfpRegister value,
+ void VFPCanonicalizeNaN(const DwVfpRegister dst,
+ const DwVfpRegister src,
const Condition cond = al);
+ void VFPCanonicalizeNaN(const DwVfpRegister value,
+ const Condition cond = al) {
+ VFPCanonicalizeNaN(value, value, cond);
+ }
// Compare double values and move the result to the normal condition flags.
void VFPCompareAndSetFlags(const DwVfpRegister src1,
@@ -541,7 +546,9 @@
// Leave the current exit frame. Expects the return value in r0.
// Expect the number of values, pushed prior to the exit frame, to
// remove in a register (or no_reg, if there is nothing to remove).
- void LeaveExitFrame(bool save_doubles, Register argument_count);
+ void LeaveExitFrame(bool save_doubles,
+ Register argument_count,
+ bool restore_context);
// Get the actual activation frame alignment for target environment.
static int ActivationFrameAlignment();
@@ -1111,7 +1118,8 @@
ExternalReference thunk_ref,
Register thunk_last_arg,
int stack_space,
- int return_value_offset_from_fp);
+ MemOperand return_value_operand,
+ MemOperand* context_restore_operand);
// Jump to a runtime routine.
void JumpToExternalReference(const ExternalReference& builtin);
@@ -1286,6 +1294,18 @@
// ---------------------------------------------------------------------------
// String utilities
+ // Generate code to do a lookup in the number string cache. If the number in
+ // the register object is found in the cache the generated code falls through
+ // with the result in the result register. The object and the result register
+ // can be the same. If the number is not found in the cache the code jumps to
+ // the label not_found with only the content of register object unchanged.
+ void LookupNumberStringCache(Register object,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* not_found);
+
// Checks if both objects are sequential ASCII strings and jumps to label
// if either is not. Assumes that neither object is a smi.
void JumpIfNonSmisNotBothSequentialAsciiStrings(Register object1,
diff --git a/src/arm/simulator-arm.cc b/src/arm/simulator-arm.cc
index def1818..461d032 100644
--- a/src/arm/simulator-arm.cc
+++ b/src/arm/simulator-arm.cc
@@ -912,6 +912,12 @@
}
+void Simulator::set_register_pair_from_double(int reg, double* value) {
+ ASSERT((reg >= 0) && (reg < num_registers) && ((reg % 2) == 0));
+ memcpy(registers_ + reg, value, sizeof(*value));
+}
+
+
void Simulator::set_dw_register(int dreg, const int* dbl) {
ASSERT((dreg >= 0) && (dreg < num_d_registers));
registers_[dreg] = dbl[0];
@@ -1026,27 +1032,22 @@
}
-// Runtime FP routines take up to two double arguments and zero
-// or one integer arguments. All are consructed here.
-// from r0-r3 or d0 and d1.
+// Runtime FP routines take:
+// - two double arguments
+// - one double argument and zero or one integer arguments.
+// All are consructed here from r0-r3 or d0, d1 and r0.
void Simulator::GetFpArgs(double* x, double* y, int32_t* z) {
if (use_eabi_hardfloat()) {
- *x = vfp_registers_[0];
- *y = vfp_registers_[1];
- *z = registers_[1];
+ *x = get_double_from_d_register(0);
+ *y = get_double_from_d_register(1);
+ *z = get_register(0);
} else {
- // We use a char buffer to get around the strict-aliasing rules which
- // otherwise allow the compiler to optimize away the copy.
- char buffer[sizeof(*x)];
// Registers 0 and 1 -> x.
- OS::MemCopy(buffer, registers_, sizeof(*x));
- OS::MemCopy(x, buffer, sizeof(*x));
+ *x = get_double_from_register_pair(0);
// Register 2 and 3 -> y.
- OS::MemCopy(buffer, registers_ + 2, sizeof(*y));
- OS::MemCopy(y, buffer, sizeof(*y));
+ *y = get_double_from_register_pair(2);
// Register 2 -> z
- memcpy(buffer, registers_ + 2, sizeof(*z));
- memcpy(z, buffer, sizeof(*z));
+ *z = get_register(2);
}
}
@@ -1718,32 +1719,6 @@
(redirection->type() == ExternalReference::BUILTIN_COMPARE_CALL) ||
(redirection->type() == ExternalReference::BUILTIN_FP_CALL) ||
(redirection->type() == ExternalReference::BUILTIN_FP_INT_CALL);
- if (use_eabi_hardfloat()) {
- // With the hard floating point calling convention, double
- // arguments are passed in VFP registers. Fetch the arguments
- // from there and call the builtin using soft floating point
- // convention.
- switch (redirection->type()) {
- case ExternalReference::BUILTIN_FP_FP_CALL:
- case ExternalReference::BUILTIN_COMPARE_CALL:
- arg0 = vfp_registers_[0];
- arg1 = vfp_registers_[1];
- arg2 = vfp_registers_[2];
- arg3 = vfp_registers_[3];
- break;
- case ExternalReference::BUILTIN_FP_CALL:
- arg0 = vfp_registers_[0];
- arg1 = vfp_registers_[1];
- break;
- case ExternalReference::BUILTIN_FP_INT_CALL:
- arg0 = vfp_registers_[0];
- arg1 = vfp_registers_[1];
- arg2 = get_register(0);
- break;
- default:
- break;
- }
- }
// This is dodgy but it works because the C entry stubs are never moved.
// See comment in codegen-arm.cc and bug 1242173.
int32_t saved_lr = get_register(lr);
@@ -3816,19 +3791,27 @@
}
-double Simulator::CallFP(byte* entry, double d0, double d1) {
+void Simulator::CallFP(byte* entry, double d0, double d1) {
if (use_eabi_hardfloat()) {
set_d_register_from_double(0, d0);
set_d_register_from_double(1, d1);
} else {
- int buffer[2];
- ASSERT(sizeof(buffer[0]) * 2 == sizeof(d0));
- OS::MemCopy(buffer, &d0, sizeof(d0));
- set_dw_register(0, buffer);
- OS::MemCopy(buffer, &d1, sizeof(d1));
- set_dw_register(2, buffer);
+ set_register_pair_from_double(0, &d0);
+ set_register_pair_from_double(2, &d1);
}
CallInternal(entry);
+}
+
+
+int32_t Simulator::CallFPReturnsInt(byte* entry, double d0, double d1) {
+ CallFP(entry, d0, d1);
+ int32_t result = get_register(r0);
+ return result;
+}
+
+
+double Simulator::CallFPReturnsDouble(byte* entry, double d0, double d1) {
+ CallFP(entry, d0, d1);
if (use_eabi_hardfloat()) {
return get_double_from_d_register(0);
} else {
diff --git a/src/arm/simulator-arm.h b/src/arm/simulator-arm.h
index 7fca743..e392c5c 100644
--- a/src/arm/simulator-arm.h
+++ b/src/arm/simulator-arm.h
@@ -163,6 +163,7 @@
void set_register(int reg, int32_t value);
int32_t get_register(int reg) const;
double get_double_from_register_pair(int reg);
+ void set_register_pair_from_double(int reg, double* value);
void set_dw_register(int dreg, const int* dbl);
// Support for VFP.
@@ -220,7 +221,9 @@
// which sets up the simulator state and grabs the result on return.
int32_t Call(byte* entry, int argument_count, ...);
// Alternative: call a 2-argument double function.
- double CallFP(byte* entry, double d0, double d1);
+ void CallFP(byte* entry, double d0, double d1);
+ int32_t CallFPReturnsInt(byte* entry, double d0, double d1);
+ double CallFPReturnsDouble(byte* entry, double d0, double d1);
// Push an address onto the JS stack.
uintptr_t PushAddress(uintptr_t address);
@@ -444,6 +447,10 @@
reinterpret_cast<Object*>(Simulator::current(Isolate::Current())->Call( \
FUNCTION_ADDR(entry), 5, p0, p1, p2, p3, p4))
+#define CALL_GENERATED_FP_INT(entry, p0, p1) \
+ Simulator::current(Isolate::Current())->CallFPReturnsInt( \
+ FUNCTION_ADDR(entry), p0, p1)
+
#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
Simulator::current(Isolate::Current())->Call( \
entry, 10, p0, p1, p2, p3, NULL, p4, p5, p6, p7, p8)
diff --git a/src/arm/stub-cache-arm.cc b/src/arm/stub-cache-arm.cc
index 085af3f..567eb63 100644
--- a/src/arm/stub-cache-arm.cc
+++ b/src/arm/stub-cache-arm.cc
@@ -380,31 +380,27 @@
Register receiver,
Register scratch1,
Register scratch2,
- Label* miss,
- bool support_wrappers) {
+ Label* miss) {
Label check_wrapper;
// Check if the object is a string leaving the instance type in the
// scratch1 register.
- GenerateStringCheck(masm, receiver, scratch1, scratch2, miss,
- support_wrappers ? &check_wrapper : miss);
+ GenerateStringCheck(masm, receiver, scratch1, scratch2, miss, &check_wrapper);
// Load length directly from the string.
__ ldr(r0, FieldMemOperand(receiver, String::kLengthOffset));
__ Ret();
- if (support_wrappers) {
- // Check if the object is a JSValue wrapper.
- __ bind(&check_wrapper);
- __ cmp(scratch1, Operand(JS_VALUE_TYPE));
- __ b(ne, miss);
+ // Check if the object is a JSValue wrapper.
+ __ bind(&check_wrapper);
+ __ cmp(scratch1, Operand(JS_VALUE_TYPE));
+ __ b(ne, miss);
- // Unwrap the value and check if the wrapped value is a string.
- __ ldr(scratch1, FieldMemOperand(receiver, JSValue::kValueOffset));
- GenerateStringCheck(masm, scratch1, scratch2, scratch2, miss, miss);
- __ ldr(r0, FieldMemOperand(scratch1, String::kLengthOffset));
- __ Ret();
- }
+ // Unwrap the value and check if the wrapped value is a string.
+ __ ldr(scratch1, FieldMemOperand(receiver, JSValue::kValueOffset));
+ GenerateStringCheck(masm, scratch1, scratch2, scratch2, miss, miss);
+ __ ldr(r0, FieldMemOperand(scratch1, String::kLengthOffset));
+ __ Ret();
}
@@ -843,23 +839,28 @@
static void GenerateFastApiDirectCall(MacroAssembler* masm,
const CallOptimization& optimization,
- int argc) {
+ int argc,
+ bool restore_context) {
// ----------- S t a t e -------------
- // -- sp[0] : holder (set by CheckPrototypes)
- // -- sp[4] : callee JS function
- // -- sp[8] : call data
- // -- sp[12] : isolate
- // -- sp[16] : ReturnValue default value
- // -- sp[20] : ReturnValue
- // -- sp[24] : last JS argument
+ // -- sp[0] : context
+ // -- sp[4] : holder (set by CheckPrototypes)
+ // -- sp[8] : callee JS function
+ // -- sp[12] : call data
+ // -- sp[16] : isolate
+ // -- sp[20] : ReturnValue default value
+ // -- sp[24] : ReturnValue
+ // -- sp[28] : last JS argument
// -- ...
- // -- sp[(argc + 5) * 4] : first JS argument
- // -- sp[(argc + 6) * 4] : receiver
+ // -- sp[(argc + 6) * 4] : first JS argument
+ // -- sp[(argc + 7) * 4] : receiver
// -----------------------------------
+ // Save calling context.
+ __ str(cp, MemOperand(sp));
// Get the function and setup the context.
Handle<JSFunction> function = optimization.constant_function();
__ LoadHeapObject(r5, function);
__ ldr(cp, FieldMemOperand(r5, JSFunction::kContextOffset));
+ __ str(r5, MemOperand(sp, 2 * kPointerSize));
// Pass the additional arguments.
Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
@@ -870,15 +871,18 @@
} else {
__ Move(r6, call_data);
}
+ // Store call data.
+ __ str(r6, MemOperand(sp, 3 * kPointerSize));
+ // Store isolate.
__ mov(r7, Operand(ExternalReference::isolate_address(masm->isolate())));
- // Store JS function, call data, isolate ReturnValue default and ReturnValue.
- __ stm(ib, sp, r5.bit() | r6.bit() | r7.bit());
+ __ str(r7, MemOperand(sp, 4 * kPointerSize));
+ // Store ReturnValue default and ReturnValue.
__ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
- __ str(r5, MemOperand(sp, 4 * kPointerSize));
__ str(r5, MemOperand(sp, 5 * kPointerSize));
+ __ str(r5, MemOperand(sp, 6 * kPointerSize));
// Prepare arguments.
- __ add(r2, sp, Operand(5 * kPointerSize));
+ __ add(r2, sp, Operand((kFastApiCallArguments - 1) * kPointerSize));
// Allocate the v8::Arguments structure in the arguments' space since
// it's not controlled by GC.
@@ -916,12 +920,18 @@
masm->isolate());
AllowExternalCallThatCantCauseGC scope(masm);
+ MemOperand context_restore_operand(
+ fp, 2 * kPointerSize);
+ MemOperand return_value_operand(
+ fp, (kFastApiCallArguments + 1) * kPointerSize);
__ CallApiFunctionAndReturn(ref,
function_address,
thunk_ref,
r1,
kStackUnwindSpace,
- kFastApiCallArguments + 1);
+ return_value_operand,
+ restore_context ?
+ &context_restore_operand : NULL);
}
@@ -936,10 +946,12 @@
ASSERT(!receiver.is(scratch));
const int stack_space = kFastApiCallArguments + argc + 1;
+ const int kHolderIndex = kFastApiCallArguments +
+ FunctionCallbackArguments::kHolderIndex - 1;
// Assign stack space for the call arguments.
__ sub(sp, sp, Operand(stack_space * kPointerSize));
// Write holder to stack frame.
- __ str(receiver, MemOperand(sp, 0));
+ __ str(receiver, MemOperand(sp, kHolderIndex * kPointerSize));
// Write receiver to stack frame.
int index = stack_space - 1;
__ str(receiver, MemOperand(sp, index * kPointerSize));
@@ -950,7 +962,7 @@
__ str(receiver, MemOperand(sp, index-- * kPointerSize));
}
- GenerateFastApiDirectCall(masm, optimization, argc);
+ GenerateFastApiDirectCall(masm, optimization, argc, true);
}
@@ -1064,7 +1076,8 @@
// Invoke function.
if (can_do_fast_api_call) {
- GenerateFastApiDirectCall(masm, optimization, arguments_.immediate());
+ GenerateFastApiDirectCall(
+ masm, optimization, arguments_.immediate(), false);
} else {
CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
? CALL_AS_FUNCTION
@@ -1188,6 +1201,8 @@
int save_at_depth,
Label* miss,
PrototypeCheckType check) {
+ const int kHolderIndex = kFastApiCallArguments +
+ FunctionCallbackArguments::kHolderIndex - 1;
// Make sure that the type feedback oracle harvests the receiver map.
// TODO(svenpanne) Remove this hack when all ICs are reworked.
__ mov(scratch1, Operand(Handle<Map>(object->map())));
@@ -1203,7 +1218,7 @@
int depth = 0;
if (save_at_depth == depth) {
- __ str(reg, MemOperand(sp));
+ __ str(reg, MemOperand(sp, kHolderIndex * kPointerSize));
}
// Check the maps in the prototype chain.
@@ -1262,7 +1277,7 @@
}
if (save_at_depth == depth) {
- __ str(reg, MemOperand(sp));
+ __ str(reg, MemOperand(sp, kHolderIndex * kPointerSize));
}
// Go to the next object in the prototype chain.
@@ -1457,7 +1472,7 @@
__ str(scratch2(), MemOperand(sp, 1 * kPointerSize));
__ add(r1, sp, Operand(1 * kPointerSize)); // r1 = AccessorInfo&
- const int kStackUnwindSpace = kFastApiCallArguments + 1;
+ const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
Address getter_address = v8::ToCData<Address>(callback->getter());
ApiFunction fun(getter_address);
@@ -1475,7 +1490,8 @@
thunk_ref,
r2,
kStackUnwindSpace,
- 6);
+ MemOperand(fp, 6 * kPointerSize),
+ NULL);
}
@@ -2539,7 +2555,7 @@
CheckPrototypes(Handle<JSObject>::cast(object), r1, holder, r0, r3, r4, name,
depth, &miss);
- GenerateFastApiDirectCall(masm(), optimization, argc);
+ GenerateFastApiDirectCall(masm(), optimization, argc, false);
__ bind(&miss);
FreeSpaceForFastApiCall(masm());
diff --git a/src/array.js b/src/array.js
index 5f89ebb..4a7aea5 100644
--- a/src/array.js
+++ b/src/array.js
@@ -399,14 +399,13 @@
n--;
var value = this[n];
- EnqueueSpliceRecord(this, n, [value], 0);
-
try {
BeginPerformSplice(this);
delete this[n];
this.length = n;
} finally {
EndPerformSplice(this);
+ EnqueueSpliceRecord(this, n, [value], 0);
}
return value;
@@ -441,8 +440,6 @@
var n = TO_UINT32(this.length);
var m = %_ArgumentsLength();
- EnqueueSpliceRecord(this, n, [], m);
-
try {
BeginPerformSplice(this);
for (var i = 0; i < m; i++) {
@@ -451,6 +448,7 @@
this.length = n + m;
} finally {
EndPerformSplice(this);
+ EnqueueSpliceRecord(this, n, [], m);
}
return this.length;
@@ -581,14 +579,13 @@
function ObservedArrayShift(len) {
var first = this[0];
- EnqueueSpliceRecord(this, 0, [first], 0);
-
try {
BeginPerformSplice(this);
SimpleMove(this, 0, 1, len, 0);
this.length = len - 1;
} finally {
EndPerformSplice(this);
+ EnqueueSpliceRecord(this, 0, [first], 0);
}
return first;
@@ -627,8 +624,6 @@
var len = TO_UINT32(this.length);
var num_arguments = %_ArgumentsLength();
- EnqueueSpliceRecord(this, 0, [], num_arguments);
-
try {
BeginPerformSplice(this);
SimpleMove(this, 0, 0, len, num_arguments);
@@ -638,6 +633,7 @@
this.length = len + num_arguments;
} finally {
EndPerformSplice(this);
+ EnqueueSpliceRecord(this, 0, [], num_arguments);
}
return len + num_arguments;
diff --git a/src/assembler.cc b/src/assembler.cc
index fbff62d..6581aa1 100644
--- a/src/assembler.cc
+++ b/src/assembler.cc
@@ -207,6 +207,26 @@
// -----------------------------------------------------------------------------
+// Implementation of PlatformFeatureScope
+
+PlatformFeatureScope::PlatformFeatureScope(CpuFeature f)
+ : old_supported_(CpuFeatures::supported_),
+ old_found_by_runtime_probing_only_(
+ CpuFeatures::found_by_runtime_probing_only_) {
+ uint64_t mask = static_cast<uint64_t>(1) << f;
+ CpuFeatures::supported_ |= mask;
+ CpuFeatures::found_by_runtime_probing_only_ &= ~mask;
+}
+
+
+PlatformFeatureScope::~PlatformFeatureScope() {
+ CpuFeatures::supported_ = old_supported_;
+ CpuFeatures::found_by_runtime_probing_only_ =
+ old_found_by_runtime_probing_only_;
+}
+
+
+// -----------------------------------------------------------------------------
// Implementation of Label
int Label::pos() const {
diff --git a/src/assembler.h b/src/assembler.h
index 6b399f2..1220074 100644
--- a/src/assembler.h
+++ b/src/assembler.h
@@ -134,6 +134,19 @@
};
+// Enable a unsupported feature within a scope for cross-compiling for a
+// different CPU.
+class PlatformFeatureScope BASE_EMBEDDED {
+ public:
+ explicit PlatformFeatureScope(CpuFeature f);
+ ~PlatformFeatureScope();
+
+ private:
+ uint64_t old_supported_;
+ uint64_t old_found_by_runtime_probing_only_;
+};
+
+
// -----------------------------------------------------------------------------
// Labels represent pc locations; they are typically jump or call targets.
// After declaration, a label can be freely used to denote known or (yet)
diff --git a/src/ast.cc b/src/ast.cc
index 823dede..5f085d3 100644
--- a/src/ast.cc
+++ b/src/ast.cc
@@ -460,10 +460,7 @@
receiver_types_.Clear();
if (key()->IsPropertyName()) {
FunctionPrototypeStub proto_stub(Code::LOAD_IC);
- StringLengthStub string_stub(Code::LOAD_IC, false);
- if (oracle->LoadIsStub(this, &string_stub)) {
- is_string_length_ = true;
- } else if (oracle->LoadIsStub(this, &proto_stub)) {
+ if (oracle->LoadIsStub(this, &proto_stub)) {
is_function_prototype_ = true;
} else {
Literal* lit_key = key()->AsLiteral();
diff --git a/src/ast.h b/src/ast.h
index c630906..71a51ab 100644
--- a/src/ast.h
+++ b/src/ast.h
@@ -1646,7 +1646,6 @@
BailoutId LoadId() const { return load_id_; }
- bool IsStringLength() const { return is_string_length_; }
bool IsStringAccess() const { return is_string_access_; }
bool IsFunctionPrototype() const { return is_function_prototype_; }
@@ -1674,7 +1673,6 @@
load_id_(GetNextId(isolate)),
is_monomorphic_(false),
is_uninitialized_(false),
- is_string_length_(false),
is_string_access_(false),
is_function_prototype_(false) { }
@@ -1687,7 +1685,6 @@
SmallMapList receiver_types_;
bool is_monomorphic_ : 1;
bool is_uninitialized_ : 1;
- bool is_string_length_ : 1;
bool is_string_access_ : 1;
bool is_function_prototype_ : 1;
};
diff --git a/src/builtins.cc b/src/builtins.cc
index 9290852..454cf46 100644
--- a/src/builtins.cc
+++ b/src/builtins.cc
@@ -195,79 +195,6 @@
}
-static MaybeObject* ArrayCodeGenericCommon(Arguments* args,
- Isolate* isolate,
- JSFunction* constructor) {
- ASSERT(args->length() >= 1);
- Heap* heap = isolate->heap();
- isolate->counters()->array_function_runtime()->Increment();
-
- JSArray* array;
- if (CalledAsConstructor(isolate)) {
- array = JSArray::cast((*args)[0]);
- // Initialize elements and length in case later allocations fail so that the
- // array object is initialized in a valid state.
- MaybeObject* maybe_array = array->Initialize(0);
- if (maybe_array->IsFailure()) return maybe_array;
-
- AllocationMemento* memento = AllocationMemento::FindForJSObject(array);
- if (memento != NULL && memento->IsValid()) {
- AllocationSite* site = memento->GetAllocationSite();
- ElementsKind to_kind = site->GetElementsKind();
- if (IsMoreGeneralElementsKindTransition(array->GetElementsKind(),
- to_kind)) {
- // We have advice that we should change the elements kind
- if (FLAG_trace_track_allocation_sites) {
- PrintF("AllocationSite: pre-transitioning array %p(%s->%s)\n",
- reinterpret_cast<void*>(array),
- ElementsKindToString(array->GetElementsKind()),
- ElementsKindToString(to_kind));
- }
-
- maybe_array = array->TransitionElementsKind(to_kind);
- if (maybe_array->IsFailure()) return maybe_array;
- }
- }
-
- if (!FLAG_smi_only_arrays) {
- Context* native_context = isolate->context()->native_context();
- if (array->GetElementsKind() == GetInitialFastElementsKind() &&
- !native_context->js_array_maps()->IsUndefined()) {
- FixedArray* map_array =
- FixedArray::cast(native_context->js_array_maps());
- array->set_map(Map::cast(map_array->
- get(TERMINAL_FAST_ELEMENTS_KIND)));
- }
- }
- } else {
- // Allocate the JS Array
- MaybeObject* maybe_obj = heap->AllocateJSObject(constructor);
- if (!maybe_obj->To(&array)) return maybe_obj;
- }
-
- Arguments adjusted_arguments(args->length() - 1, args->arguments() - 1);
- ASSERT(adjusted_arguments.length() < 1 ||
- adjusted_arguments[0] == (*args)[1]);
- return ArrayConstructInitializeElements(array, &adjusted_arguments);
-}
-
-
-BUILTIN(InternalArrayCodeGeneric) {
- return ArrayCodeGenericCommon(
- &args,
- isolate,
- isolate->context()->native_context()->internal_array_function());
-}
-
-
-BUILTIN(ArrayCodeGeneric) {
- return ArrayCodeGenericCommon(
- &args,
- isolate,
- isolate->context()->native_context()->array_function());
-}
-
-
static void MoveDoubleElements(FixedDoubleArray* dst,
int dst_index,
FixedDoubleArray* src,
diff --git a/src/builtins.h b/src/builtins.h
index c712f1e..01061b6 100644
--- a/src/builtins.h
+++ b/src/builtins.h
@@ -63,9 +63,6 @@
\
V(EmptyFunction, NO_EXTRA_ARGUMENTS) \
\
- V(InternalArrayCodeGeneric, NO_EXTRA_ARGUMENTS) \
- V(ArrayCodeGeneric, NO_EXTRA_ARGUMENTS) \
- \
V(ArrayPush, NO_EXTRA_ARGUMENTS) \
V(ArrayPop, NO_EXTRA_ARGUMENTS) \
V(ArrayShift, NO_EXTRA_ARGUMENTS) \
diff --git a/src/code-stubs-hydrogen.cc b/src/code-stubs-hydrogen.cc
index 23d4269..0d06209 100644
--- a/src/code-stubs-hydrogen.cc
+++ b/src/code-stubs-hydrogen.cc
@@ -357,40 +357,45 @@
HObjectAccess access = HObjectAccess::ForAllocationSiteTransitionInfo();
HInstruction* boilerplate = Add<HLoadNamedField>(allocation_site, access);
+ HValue* push_value;
if (mode == FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS) {
HValue* elements = AddLoadElements(boilerplate);
IfBuilder if_fixed_cow(this);
if_fixed_cow.If<HCompareMap>(elements, factory->fixed_cow_array_map());
if_fixed_cow.Then();
- environment()->Push(BuildCloneShallowArray(boilerplate,
- allocation_site,
- alloc_site_mode,
- FAST_ELEMENTS,
- 0/*copy-on-write*/));
+ push_value = BuildCloneShallowArray(boilerplate,
+ allocation_site,
+ alloc_site_mode,
+ FAST_ELEMENTS,
+ 0/*copy-on-write*/);
+ environment()->Push(push_value);
if_fixed_cow.Else();
IfBuilder if_fixed(this);
if_fixed.If<HCompareMap>(elements, factory->fixed_array_map());
if_fixed.Then();
- environment()->Push(BuildCloneShallowArray(boilerplate,
- allocation_site,
- alloc_site_mode,
- FAST_ELEMENTS,
- length));
+ push_value = BuildCloneShallowArray(boilerplate,
+ allocation_site,
+ alloc_site_mode,
+ FAST_ELEMENTS,
+ length);
+ environment()->Push(push_value);
if_fixed.Else();
- environment()->Push(BuildCloneShallowArray(boilerplate,
- allocation_site,
- alloc_site_mode,
- FAST_DOUBLE_ELEMENTS,
- length));
+ push_value = BuildCloneShallowArray(boilerplate,
+ allocation_site,
+ alloc_site_mode,
+ FAST_DOUBLE_ELEMENTS,
+ length);
+ environment()->Push(push_value);
} else {
ElementsKind elements_kind = casted_stub()->ComputeElementsKind();
- environment()->Push(BuildCloneShallowArray(boilerplate,
- allocation_site,
- alloc_site_mode,
- elements_kind,
- length));
+ push_value = BuildCloneShallowArray(boilerplate,
+ allocation_site,
+ alloc_site_mode,
+ elements_kind,
+ length);
+ environment()->Push(push_value);
}
checker.ElseDeopt("Uninitialized boilerplate literals");
@@ -459,8 +464,7 @@
JS_OBJECT_TYPE);
// Store the map
- Handle<Map> allocation_site_map(isolate()->heap()->allocation_site_map(),
- isolate());
+ Handle<Map> allocation_site_map = isolate()->factory()->allocation_site_map();
AddStoreMapConstant(object, allocation_site_map);
// Store the payload (smi elements kind)
@@ -469,14 +473,22 @@
HObjectAccess::ForAllocationSiteTransitionInfo(),
initial_elements_kind);
+ // Store an empty fixed array for the code dependency.
+ HConstant* empty_fixed_array =
+ Add<HConstant>(isolate()->factory()->empty_fixed_array());
+ HStoreNamedField* store = Add<HStoreNamedField>(
+ object,
+ HObjectAccess::ForAllocationSiteDependentCode(),
+ empty_fixed_array);
+
// Link the object to the allocation site list
HValue* site_list = Add<HConstant>(
ExternalReference::allocation_sites_list_address(isolate()));
HValue* site = Add<HLoadNamedField>(site_list,
HObjectAccess::ForAllocationSiteList());
- HStoreNamedField* store =
- Add<HStoreNamedField>(object, HObjectAccess::ForAllocationSiteWeakNext(),
- site);
+ store = Add<HStoreNamedField>(object,
+ HObjectAccess::ForAllocationSiteWeakNext(),
+ site);
store->SkipWriteBarrier();
Add<HStoreNamedField>(site_list, HObjectAccess::ForAllocationSiteList(),
object);
@@ -918,8 +930,7 @@
HValue* native_context,
HValue* code_object) {
Counters* counters = isolate()->counters();
- AddIncrementCounter(counters->fast_new_closure_install_optimized(),
- context());
+ AddIncrementCounter(counters->fast_new_closure_install_optimized());
// TODO(fschneider): Idea: store proper code pointers in the optimized code
// map and either unmangle them on marking or do nothing as the whole map is
@@ -967,7 +978,7 @@
}
is_optimized.Else();
{
- AddIncrementCounter(counters->fast_new_closure_try_optimized(), context());
+ AddIncrementCounter(counters->fast_new_closure_try_optimized());
// optimized_map points to fixed array of 3-element entries
// (native context, optimized code, literals).
// Map must never be empty, so check the first elements.
@@ -1052,11 +1063,12 @@
Add<HConstant>(factory->empty_fixed_array());
HValue* shared_info = GetParameter(0);
+ AddIncrementCounter(counters->fast_new_closure_total());
+
// Create a new closure from the given function info in new space
HValue* size = Add<HConstant>(JSFunction::kSize);
HInstruction* js_function = Add<HAllocate>(size, HType::JSObject(),
NOT_TENURED, JS_FUNCTION_TYPE);
- AddIncrementCounter(counters->fast_new_closure_total(), context());
int map_index = Context::FunctionMapIndex(casted_stub()->language_mode(),
casted_stub()->is_generator());
diff --git a/src/code-stubs.h b/src/code-stubs.h
index 946eb76..30ec1c7 100644
--- a/src/code-stubs.h
+++ b/src/code-stubs.h
@@ -830,19 +830,12 @@
class StringLengthStub: public ICStub {
public:
- StringLengthStub(Code::Kind kind, bool support_wrapper)
- : ICStub(kind), support_wrapper_(support_wrapper) { }
+ explicit StringLengthStub(Code::Kind kind) : ICStub(kind) { }
virtual void Generate(MacroAssembler* masm);
private:
STATIC_ASSERT(KindBits::kSize == 4);
- class WrapperModeBits: public BitField<bool, 4, 1> {};
- virtual CodeStub::Major MajorKey() { return StringLength; }
- virtual int MinorKey() {
- return KindBits::encode(kind()) | WrapperModeBits::encode(support_wrapper_);
- }
-
- bool support_wrapper_;
+ virtual CodeStub::Major MajorKey() { return StringLength; }
};
diff --git a/src/cpu-profiler.cc b/src/cpu-profiler.cc
index e0f7aea..3d9dc67 100644
--- a/src/cpu-profiler.cc
+++ b/src/cpu-profiler.cc
@@ -435,8 +435,18 @@
logger->is_logging_ = false;
generator_ = new ProfileGenerator(profiles_);
Sampler* sampler = logger->sampler();
+#if V8_CC_MSVC && (_MSC_VER >= 1800)
+ // VS2013 reports "warning C4316: 'v8::internal::ProfilerEventsProcessor'
+ // : object allocated on the heap may not be aligned 64". We need to
+ // figure out if this is a legitimate warning or a compiler bug.
+ #pragma warning(push)
+ #pragma warning(disable:4316)
+#endif
processor_ = new ProfilerEventsProcessor(
generator_, sampler, sampling_interval_);
+#if V8_CC_MSVC && (_MSC_VER >= 1800)
+ #pragma warning(pop)
+#endif
is_profiling_ = true;
// Enumerate stuff we already have in the heap.
ASSERT(isolate_->heap()->HasBeenSetUp());
diff --git a/src/d8-readline.cc b/src/d8-readline.cc
index 298518d..0226f31 100644
--- a/src/d8-readline.cc
+++ b/src/d8-readline.cc
@@ -150,7 +150,7 @@
static Persistent<Array> current_completions;
Isolate* isolate = read_line_editor.isolate_;
Locker lock(isolate);
- HandleScope scope;
+ HandleScope scope(isolate);
Handle<Array> completions;
if (state == 0) {
Local<String> full_text = String::New(rl_line_buffer, rl_point);
@@ -167,8 +167,7 @@
String::Utf8Value str(str_obj);
return strdup(*str);
} else {
- current_completions.Dispose(isolate);
- current_completions.Clear();
+ current_completions.Reset();
return NULL;
}
}
diff --git a/src/d8.cc b/src/d8.cc
index fb75d81..614b16e 100644
--- a/src/d8.cc
+++ b/src/d8.cc
@@ -263,7 +263,8 @@
data_->realm_current_ = 0;
data_->realm_switch_ = 0;
data_->realms_ = new Persistent<Context>[1];
- data_->realms_[0].Reset(data_->isolate_, Context::GetEntered());
+ data_->realms_[0].Reset(data_->isolate_,
+ data_->isolate_->GetEnteredContext());
data_->realm_shared_.Clear();
}
@@ -290,7 +291,7 @@
void Shell::RealmCurrent(const v8::FunctionCallbackInfo<v8::Value>& args) {
Isolate* isolate = args.GetIsolate();
PerIsolateData* data = PerIsolateData::Get(isolate);
- int index = data->RealmFind(Context::GetEntered());
+ int index = data->RealmFind(isolate->GetEnteredContext());
if (index == -1) return;
args.GetReturnValue().Set(index);
}
@@ -939,8 +940,8 @@
i::Factory* factory = reinterpret_cast<i::Isolate*>(isolate)->factory();
i::JSArguments js_args = i::FLAG_js_arguments;
i::Handle<i::FixedArray> arguments_array =
- factory->NewFixedArray(js_args.argc());
- for (int j = 0; j < js_args.argc(); j++) {
+ factory->NewFixedArray(js_args.argc);
+ for (int j = 0; j < js_args.argc; j++) {
i::Handle<i::String> arg =
factory->NewStringFromUtf8(i::CStrVector(js_args[j]));
arguments_array->set(j, *arg);
diff --git a/src/d8.gyp b/src/d8.gyp
index 15d342d..097abc0 100644
--- a/src/d8.gyp
+++ b/src/d8.gyp
@@ -31,7 +31,7 @@
'console%': '',
# Enable support for Intel VTune. Supported on ia32/x64 only
'v8_enable_vtunejit%': 0,
- 'v8_enable_i18n_support%': 0,
+ 'v8_enable_i18n_support%': 1,
},
'includes': ['../build/toolchain.gypi', '../build/features.gypi'],
'targets': [
@@ -81,13 +81,13 @@
}],
['v8_enable_i18n_support==1', {
'dependencies': [
- '<(DEPTH)/third_party/icu/icu.gyp:icui18n',
- '<(DEPTH)/third_party/icu/icu.gyp:icuuc',
+ '<(icu_gyp_path):icui18n',
+ '<(icu_gyp_path):icuuc',
],
}],
['OS=="win" and v8_enable_i18n_support==1', {
'dependencies': [
- '<(DEPTH)/third_party/icu/icu.gyp:icudata',
+ '<(icu_gyp_path):icudata',
],
}],
],
diff --git a/src/debug.cc b/src/debug.cc
index 0496b8c..63d33eb 100644
--- a/src/debug.cc
+++ b/src/debug.cc
@@ -1793,10 +1793,14 @@
// function to be called and not the code for Builtins::FunctionApply or
// Builtins::FunctionCall. The receiver of call/apply is the target
// function.
- if (!holder.is_null() && holder->IsJSFunction() &&
- !JSFunction::cast(*holder)->IsBuiltin()) {
+ if (!holder.is_null() && holder->IsJSFunction()) {
Handle<JSFunction> js_function = Handle<JSFunction>::cast(holder);
- Debug::FloodWithOneShot(js_function);
+ if (!js_function->IsBuiltin()) {
+ Debug::FloodWithOneShot(js_function);
+ } else if (js_function->shared()->bound()) {
+ // Handle Function.prototype.bind
+ Debug::FloodBoundFunctionWithOneShot(js_function);
+ }
}
} else {
Debug::FloodWithOneShot(function);
diff --git a/src/deoptimizer.cc b/src/deoptimizer.cc
index c979a53..9a1bb9d 100644
--- a/src/deoptimizer.cc
+++ b/src/deoptimizer.cc
@@ -1617,9 +1617,10 @@
Handle<Map> map = Handle<Map>::cast(MaterializeNextValue());
switch (map->instance_type()) {
case HEAP_NUMBER_TYPE: {
- Handle<HeapNumber> number =
- Handle<HeapNumber>::cast(MaterializeNextValue());
- materialized_objects_->Add(number);
+ Handle<HeapNumber> object = isolate_->factory()->NewHeapNumber(0.0);
+ materialized_objects_->Add(object);
+ Handle<Object> number = MaterializeNextValue();
+ object->set_value(number->Number());
materialization_value_index_ += kDoubleSize / kPointerSize - 1;
break;
}
@@ -2337,85 +2338,6 @@
}
-void Deoptimizer::PatchInterruptCode(Isolate* isolate,
- Code* unoptimized) {
- DisallowHeapAllocation no_gc;
- Code* replacement_code =
- isolate->builtins()->builtin(Builtins::kOnStackReplacement);
-
- // Iterate over the back edge table and patch every interrupt
- // call to an unconditional call to the replacement code.
- int loop_nesting_level = unoptimized->allow_osr_at_loop_nesting_level();
-
- for (FullCodeGenerator::BackEdgeTableIterator back_edges(unoptimized, &no_gc);
- !back_edges.Done();
- back_edges.Next()) {
- if (static_cast<int>(back_edges.loop_depth()) == loop_nesting_level) {
- ASSERT_EQ(NOT_PATCHED, GetInterruptPatchState(isolate,
- unoptimized,
- back_edges.pc()));
- PatchInterruptCodeAt(unoptimized,
- back_edges.pc(),
- replacement_code);
- }
- }
-
- unoptimized->set_back_edges_patched_for_osr(true);
- ASSERT(Deoptimizer::VerifyInterruptCode(
- isolate, unoptimized, loop_nesting_level));
-}
-
-
-void Deoptimizer::RevertInterruptCode(Isolate* isolate,
- Code* unoptimized) {
- DisallowHeapAllocation no_gc;
- Code* interrupt_code =
- isolate->builtins()->builtin(Builtins::kInterruptCheck);
-
- // Iterate over the back edge table and revert the patched interrupt calls.
- ASSERT(unoptimized->back_edges_patched_for_osr());
- int loop_nesting_level = unoptimized->allow_osr_at_loop_nesting_level();
-
- for (FullCodeGenerator::BackEdgeTableIterator back_edges(unoptimized, &no_gc);
- !back_edges.Done();
- back_edges.Next()) {
- if (static_cast<int>(back_edges.loop_depth()) <= loop_nesting_level) {
- ASSERT_EQ(PATCHED_FOR_OSR, GetInterruptPatchState(isolate,
- unoptimized,
- back_edges.pc()));
- RevertInterruptCodeAt(unoptimized, back_edges.pc(), interrupt_code);
- }
- }
-
- unoptimized->set_back_edges_patched_for_osr(false);
- unoptimized->set_allow_osr_at_loop_nesting_level(0);
- // Assert that none of the back edges are patched anymore.
- ASSERT(Deoptimizer::VerifyInterruptCode(isolate, unoptimized, -1));
-}
-
-
-#ifdef DEBUG
-bool Deoptimizer::VerifyInterruptCode(Isolate* isolate,
- Code* unoptimized,
- int loop_nesting_level) {
- DisallowHeapAllocation no_gc;
- for (FullCodeGenerator::BackEdgeTableIterator back_edges(unoptimized, &no_gc);
- !back_edges.Done();
- back_edges.Next()) {
- uint32_t loop_depth = back_edges.loop_depth();
- CHECK_LE(static_cast<int>(loop_depth), Code::kMaxLoopNestingMarker);
- // Assert that all back edges for shallower loops (and only those)
- // have already been patched.
- CHECK_EQ((static_cast<int>(loop_depth) <= loop_nesting_level),
- GetInterruptPatchState(isolate,
- unoptimized,
- back_edges.pc()) != NOT_PATCHED);
- }
- return true;
-}
-#endif // DEBUG
-
-
unsigned Deoptimizer::ComputeInputFrameSize() const {
unsigned fixed_size = ComputeFixedSize(function_);
// The fp-to-sp delta already takes the context and the function
diff --git a/src/deoptimizer.h b/src/deoptimizer.h
index 7ee5908..8c16993 100644
--- a/src/deoptimizer.h
+++ b/src/deoptimizer.h
@@ -131,11 +131,6 @@
DEBUGGER
};
- enum InterruptPatchState {
- NOT_PATCHED,
- PATCHED_FOR_OSR
- };
-
static const int kBailoutTypesWithCodeEntry = SOFT + 1;
struct JumpTableEntry {
@@ -213,39 +208,6 @@
// The size in bytes of the code required at a lazy deopt patch site.
static int patch_size();
- // Patch all interrupts with allowed loop depth in the unoptimized code to
- // unconditionally call replacement_code.
- static void PatchInterruptCode(Isolate* isolate,
- Code* unoptimized_code);
-
- // Patch the interrupt at the instruction before pc_after in
- // the unoptimized code to unconditionally call replacement_code.
- static void PatchInterruptCodeAt(Code* unoptimized_code,
- Address pc_after,
- Code* replacement_code);
-
- // Change all patched interrupts patched in the unoptimized code
- // back to normal interrupts.
- static void RevertInterruptCode(Isolate* isolate,
- Code* unoptimized_code);
-
- // Change patched interrupt in the unoptimized code
- // back to a normal interrupt.
- static void RevertInterruptCodeAt(Code* unoptimized_code,
- Address pc_after,
- Code* interrupt_code);
-
-#ifdef DEBUG
- static InterruptPatchState GetInterruptPatchState(Isolate* isolate,
- Code* unoptimized_code,
- Address pc_after);
-
- // Verify that all back edges of a certain loop depth are patched.
- static bool VerifyInterruptCode(Isolate* isolate,
- Code* unoptimized_code,
- int loop_nesting_level);
-#endif // DEBUG
-
~Deoptimizer();
void MaterializeHeapObjects(JavaScriptFrameIterator* it);
diff --git a/src/factory.cc b/src/factory.cc
index acbaf3c..1425552 100644
--- a/src/factory.cc
+++ b/src/factory.cc
@@ -1083,16 +1083,6 @@
}
-void Factory::EnsureCanContainElements(Handle<JSArray> array,
- Handle<FixedArrayBase> elements,
- uint32_t length,
- EnsureElementsMode mode) {
- CALL_HEAP_FUNCTION_VOID(
- isolate(),
- array->EnsureCanContainElements(*elements, length, mode));
-}
-
-
Handle<JSArrayBuffer> Factory::NewJSArrayBuffer() {
Handle<JSFunction> array_buffer_fun(
isolate()->context()->native_context()->array_buffer_fun());
diff --git a/src/factory.h b/src/factory.h
index 1bdf474..5704066 100644
--- a/src/factory.h
+++ b/src/factory.h
@@ -328,11 +328,6 @@
void SetContent(Handle<JSArray> array, Handle<FixedArrayBase> elements);
- void EnsureCanContainElements(Handle<JSArray> array,
- Handle<FixedArrayBase> elements,
- uint32_t length,
- EnsureElementsMode mode);
-
Handle<JSArrayBuffer> NewJSArrayBuffer();
Handle<JSTypedArray> NewJSTypedArray(ExternalArrayType type);
@@ -462,7 +457,15 @@
&isolate()->heap()->roots_[Heap::k##camel_name##RootIndex])); \
}
ROOT_LIST(ROOT_ACCESSOR)
-#undef ROOT_ACCESSOR_ACCESSOR
+#undef ROOT_ACCESSOR
+
+#define STRUCT_MAP_ACCESSOR(NAME, Name, name) \
+ inline Handle<Map> name##_map() { \
+ return Handle<Map>(BitCast<Map**>( \
+ &isolate()->heap()->roots_[Heap::k##Name##MapRootIndex])); \
+ }
+ STRUCT_LIST(STRUCT_MAP_ACCESSOR)
+#undef STRUCT_MAP_ACCESSOR
#define STRING_ACCESSOR(name, str) \
inline Handle<String> name() { \
diff --git a/src/flag-definitions.h b/src/flag-definitions.h
index 08cd830..2c01251 100644
--- a/src/flag-definitions.h
+++ b/src/flag-definitions.h
@@ -90,44 +90,34 @@
#define DEFINE_implication(whenflag, thenflag)
#endif
+#define COMMA ,
#ifdef FLAG_MODE_DECLARE
// Structure used to hold a collection of arguments to the JavaScript code.
-#define JSARGUMENTS_INIT {{}}
struct JSArguments {
public:
- inline int argc() const {
- return static_cast<int>(storage_[0]);
- }
- inline const char** argv() const {
- return reinterpret_cast<const char**>(storage_[1]);
- }
inline const char*& operator[] (int idx) const {
- return argv()[idx];
- }
- inline JSArguments& operator=(JSArguments args) {
- set_argc(args.argc());
- set_argv(args.argv());
- return *this;
+ return argv[idx];
}
static JSArguments Create(int argc, const char** argv) {
JSArguments args;
- args.set_argc(argc);
- args.set_argv(argv);
+ args.argc = argc;
+ args.argv = argv;
return args;
}
-private:
- void set_argc(int argc) {
- storage_[0] = argc;
+ int argc;
+ const char** argv;
+};
+
+struct MaybeBoolFlag {
+ static MaybeBoolFlag Create(bool has_value, bool value) {
+ MaybeBoolFlag flag;
+ flag.has_value = has_value;
+ flag.value = value;
+ return flag;
}
- void set_argv(const char** argv) {
- storage_[1] = reinterpret_cast<AtomicWord>(argv);
- }
-public:
- // Contains argc and argv. Unfortunately we have to store these two fields
- // into a single one to avoid making the initialization macro (which would be
- // "{ 0, NULL }") contain a coma.
- AtomicWord storage_[2];
+ bool has_value;
+ bool value;
};
#endif
@@ -148,10 +138,13 @@
#endif
#define DEFINE_bool(nam, def, cmt) FLAG(BOOL, bool, nam, def, cmt)
+#define DEFINE_maybe_bool(nam, cmt) FLAG(MAYBE_BOOL, MaybeBoolFlag, nam, \
+ { false COMMA false }, cmt)
#define DEFINE_int(nam, def, cmt) FLAG(INT, int, nam, def, cmt)
#define DEFINE_float(nam, def, cmt) FLAG(FLOAT, double, nam, def, cmt)
#define DEFINE_string(nam, def, cmt) FLAG(STRING, const char*, nam, def, cmt)
-#define DEFINE_args(nam, def, cmt) FLAG(ARGS, JSArguments, nam, def, cmt)
+#define DEFINE_args(nam, cmt) FLAG(ARGS, JSArguments, nam, \
+ { 0 COMMA NULL }, cmt)
#define DEFINE_ALIAS_bool(alias, nam) FLAG_ALIAS(BOOL, bool, alias, nam)
#define DEFINE_ALIAS_int(alias, nam) FLAG_ALIAS(INT, int, alias, nam)
@@ -240,7 +233,7 @@
DEFINE_bool(use_gvn, true, "use hydrogen global value numbering")
DEFINE_bool(use_canonicalizing, true, "use hydrogen instruction canonicalizing")
DEFINE_bool(use_inlining, true, "use function inlining")
-DEFINE_bool(use_escape_analysis, false, "use hydrogen escape analysis")
+DEFINE_bool(use_escape_analysis, true, "use hydrogen escape analysis")
DEFINE_bool(use_allocation_folding, true, "use allocation folding")
DEFINE_int(max_inlining_levels, 5, "maximum number of inlining levels")
DEFINE_int(max_inlined_source_size, 600,
@@ -261,6 +254,7 @@
DEFINE_string(trace_hydrogen_file, NULL, "trace hydrogen to given file name")
DEFINE_string(trace_phase, "HLZ", "trace generated IR for specified phases")
DEFINE_bool(trace_inlining, false, "trace inlining decisions")
+DEFINE_bool(trace_load_elimination, false, "trace load elimination")
DEFINE_bool(trace_alloc, false, "trace register allocator")
DEFINE_bool(trace_all_uses, false, "trace all use positions")
DEFINE_bool(trace_range, false, "trace range analysis")
@@ -295,6 +289,7 @@
"perform array index dehoisting")
DEFINE_bool(analyze_environment_liveness, true,
"analyze liveness of environment slots and zap dead values")
+DEFINE_bool(load_elimination, false, "use load elimination")
DEFINE_bool(dead_code_elimination, true, "use dead code elimination")
DEFINE_bool(fold_constants, true, "use constant folding")
DEFINE_bool(trace_dead_code_elimination, false, "trace dead code elimination")
@@ -544,7 +539,6 @@
"Use idle notification to reduce memory footprint.")
// ic.cc
DEFINE_bool(use_ic, true, "use inline caching")
-DEFINE_bool(js_accessor_ics, false, "create ics for js accessors")
// macro-assembler-ia32.cc
DEFINE_bool(native_code_counters, false,
@@ -600,6 +594,9 @@
0,
"Fixed seed to use to hash property keys (0 means random)"
"(with snapshots this option cannot override the baked-in seed)")
+DEFINE_maybe_bool(force_memory_constrained,
+ "force (if true) or prevent (if false) the runtime from treating "
+ "the device as being memory constrained.")
// v8.cc
DEFINE_bool(preemption, false,
@@ -610,6 +607,7 @@
// Testing flags test/cctest/test-{flags,api,serialization}.cc
DEFINE_bool(testing_bool_flag, true, "testing_bool_flag")
+DEFINE_maybe_bool(testing_maybe_bool_flag, "testing_maybe_bool_flag")
DEFINE_int(testing_int_flag, 13, "testing_int_flag")
DEFINE_float(testing_float_flag, 2.5, "float-flag")
DEFINE_string(testing_string_flag, "Hello, world!", "string-flag")
@@ -642,7 +640,7 @@
#endif // ENABLE_DEBUGGER_SUPPORT
DEFINE_string(map_counters, "", "Map counters to a file")
-DEFINE_args(js_arguments, JSARGUMENTS_INIT,
+DEFINE_args(js_arguments,
"Pass all remaining arguments to the script. Alias for \"--\".")
#if defined(WEBOS__)
@@ -773,6 +771,7 @@
"Used with --prof, turns on browser-compatible mode for profiling.")
DEFINE_bool(log_regexp, false, "Log regular expression execution.")
DEFINE_string(logfile, "v8.log", "Specify the name of the log file.")
+DEFINE_bool(logfile_per_isolate, true, "Separate log files for each isolate.")
DEFINE_bool(ll_prof, false, "Enable low-level linux profiler.")
DEFINE_string(gc_fake_mmap, "/tmp/__v8_gc__",
"Specify the name of the file for fake gc mmap used in ll_prof")
@@ -834,6 +833,7 @@
#undef FLAG_ALIAS
#undef DEFINE_bool
+#undef DEFINE_maybe_bool
#undef DEFINE_int
#undef DEFINE_string
#undef DEFINE_float
@@ -850,3 +850,5 @@
#undef FLAG_MODE_DEFINE_DEFAULTS
#undef FLAG_MODE_META
#undef FLAG_MODE_DEFINE_IMPLICATIONS
+
+#undef COMMA
diff --git a/src/flags.cc b/src/flags.cc
index 4e18cc8..0c36aed 100644
--- a/src/flags.cc
+++ b/src/flags.cc
@@ -55,7 +55,8 @@
// to the actual flag, default value, comment, etc. This is designed to be POD
// initialized as to avoid requiring static constructors.
struct Flag {
- enum FlagType { TYPE_BOOL, TYPE_INT, TYPE_FLOAT, TYPE_STRING, TYPE_ARGS };
+ enum FlagType { TYPE_BOOL, TYPE_MAYBE_BOOL, TYPE_INT, TYPE_FLOAT,
+ TYPE_STRING, TYPE_ARGS };
FlagType type_; // What type of flag, bool, int, or string.
const char* name_; // Name of the flag, ex "my_flag".
@@ -75,6 +76,11 @@
return reinterpret_cast<bool*>(valptr_);
}
+ MaybeBoolFlag* maybe_bool_variable() const {
+ ASSERT(type_ == TYPE_MAYBE_BOOL);
+ return reinterpret_cast<MaybeBoolFlag*>(valptr_);
+ }
+
int* int_variable() const {
ASSERT(type_ == TYPE_INT);
return reinterpret_cast<int*>(valptr_);
@@ -133,6 +139,8 @@
switch (type_) {
case TYPE_BOOL:
return *bool_variable() == bool_default();
+ case TYPE_MAYBE_BOOL:
+ return maybe_bool_variable()->has_value == false;
case TYPE_INT:
return *int_variable() == int_default();
case TYPE_FLOAT:
@@ -145,7 +153,7 @@
return strcmp(str1, str2) == 0;
}
case TYPE_ARGS:
- return args_variable()->argc() == 0;
+ return args_variable()->argc == 0;
}
UNREACHABLE();
return true;
@@ -157,6 +165,9 @@
case TYPE_BOOL:
*bool_variable() = bool_default();
break;
+ case TYPE_MAYBE_BOOL:
+ *maybe_bool_variable() = MaybeBoolFlag::Create(false, false);
+ break;
case TYPE_INT:
*int_variable() = int_default();
break;
@@ -186,6 +197,7 @@
static const char* Type2String(Flag::FlagType type) {
switch (type) {
case Flag::TYPE_BOOL: return "bool";
+ case Flag::TYPE_MAYBE_BOOL: return "maybe_bool";
case Flag::TYPE_INT: return "int";
case Flag::TYPE_FLOAT: return "float";
case Flag::TYPE_STRING: return "string";
@@ -203,6 +215,11 @@
case Flag::TYPE_BOOL:
buffer.Add("%s", (*flag->bool_variable() ? "true" : "false"));
break;
+ case Flag::TYPE_MAYBE_BOOL:
+ buffer.Add("%s", flag->maybe_bool_variable()->has_value
+ ? (flag->maybe_bool_variable()->value ? "true" : "false")
+ : "unset");
+ break;
case Flag::TYPE_INT:
buffer.Add("%d", *flag->int_variable());
break;
@@ -216,9 +233,9 @@
}
case Flag::TYPE_ARGS: {
JSArguments args = *flag->args_variable();
- if (args.argc() > 0) {
+ if (args.argc > 0) {
buffer.Add("%s", args[0]);
- for (int i = 1; i < args.argc(); i++) {
+ for (int i = 1; i < args.argc; i++) {
buffer.Add(" %s", args[i]);
}
}
@@ -260,7 +277,7 @@
buffer.Add("--%s", args_flag->name());
args->Add(buffer.ToCString().Detach());
JSArguments jsargs = *args_flag->args_variable();
- for (int j = 0; j < jsargs.argc(); j++) {
+ for (int j = 0; j < jsargs.argc; j++) {
args->Add(StrDup(jsargs[j]));
}
}
@@ -380,6 +397,7 @@
// if we still need a flag value, use the next argument if available
if (flag->type() != Flag::TYPE_BOOL &&
+ flag->type() != Flag::TYPE_MAYBE_BOOL &&
flag->type() != Flag::TYPE_ARGS &&
value == NULL) {
if (i < *argc) {
@@ -399,6 +417,9 @@
case Flag::TYPE_BOOL:
*flag->bool_variable() = !is_bool;
break;
+ case Flag::TYPE_MAYBE_BOOL:
+ *flag->maybe_bool_variable() = MaybeBoolFlag::Create(true, !is_bool);
+ break;
case Flag::TYPE_INT:
*flag->int_variable() = strtol(value, &endp, 10); // NOLINT
break;
@@ -425,8 +446,9 @@
}
// handle errors
- if ((flag->type() == Flag::TYPE_BOOL && value != NULL) ||
- (flag->type() != Flag::TYPE_BOOL && is_bool) ||
+ bool is_bool_type = flag->type() == Flag::TYPE_BOOL ||
+ flag->type() == Flag::TYPE_MAYBE_BOOL;
+ if ((is_bool_type && value != NULL) || (!is_bool_type && is_bool) ||
*endp != '\0') {
PrintF(stderr, "Error: illegal value for flag %s of type %s\n"
"Try --help for options\n",
@@ -549,6 +571,7 @@
}
+// static
void FlagList::EnforceFlagImplications() {
#define FLAG_MODE_DEFINE_IMPLICATIONS
#include "flag-definitions.h"
diff --git a/src/full-codegen.cc b/src/full-codegen.cc
index 91a5173..f1877fb 100644
--- a/src/full-codegen.cc
+++ b/src/full-codegen.cc
@@ -1615,6 +1615,79 @@
}
+void BackEdgeTable::Patch(Isolate* isolate,
+ Code* unoptimized) {
+ DisallowHeapAllocation no_gc;
+ Code* replacement_code =
+ isolate->builtins()->builtin(Builtins::kOnStackReplacement);
+
+ // Iterate over the back edge table and patch every interrupt
+ // call to an unconditional call to the replacement code.
+ int loop_nesting_level = unoptimized->allow_osr_at_loop_nesting_level();
+
+ BackEdgeTable back_edges(unoptimized, &no_gc);
+ for (uint32_t i = 0; i < back_edges.length(); i++) {
+ if (static_cast<int>(back_edges.loop_depth(i)) == loop_nesting_level) {
+ ASSERT_EQ(INTERRUPT, GetBackEdgeState(isolate,
+ unoptimized,
+ back_edges.pc(i)));
+ PatchAt(unoptimized, back_edges.pc(i), replacement_code);
+ }
+ }
+
+ unoptimized->set_back_edges_patched_for_osr(true);
+ ASSERT(Verify(isolate, unoptimized, loop_nesting_level));
+}
+
+
+void BackEdgeTable::Revert(Isolate* isolate,
+ Code* unoptimized) {
+ DisallowHeapAllocation no_gc;
+ Code* interrupt_code =
+ isolate->builtins()->builtin(Builtins::kInterruptCheck);
+
+ // Iterate over the back edge table and revert the patched interrupt calls.
+ ASSERT(unoptimized->back_edges_patched_for_osr());
+ int loop_nesting_level = unoptimized->allow_osr_at_loop_nesting_level();
+
+ BackEdgeTable back_edges(unoptimized, &no_gc);
+ for (uint32_t i = 0; i < back_edges.length(); i++) {
+ if (static_cast<int>(back_edges.loop_depth(i)) <= loop_nesting_level) {
+ ASSERT_EQ(ON_STACK_REPLACEMENT, GetBackEdgeState(isolate,
+ unoptimized,
+ back_edges.pc(i)));
+ RevertAt(unoptimized, back_edges.pc(i), interrupt_code);
+ }
+ }
+
+ unoptimized->set_back_edges_patched_for_osr(false);
+ unoptimized->set_allow_osr_at_loop_nesting_level(0);
+ // Assert that none of the back edges are patched anymore.
+ ASSERT(Verify(isolate, unoptimized, -1));
+}
+
+
+#ifdef DEBUG
+bool BackEdgeTable::Verify(Isolate* isolate,
+ Code* unoptimized,
+ int loop_nesting_level) {
+ DisallowHeapAllocation no_gc;
+ BackEdgeTable back_edges(unoptimized, &no_gc);
+ for (uint32_t i = 0; i < back_edges.length(); i++) {
+ uint32_t loop_depth = back_edges.loop_depth(i);
+ CHECK_LE(static_cast<int>(loop_depth), Code::kMaxLoopNestingMarker);
+ // Assert that all back edges for shallower loops (and only those)
+ // have already been patched.
+ CHECK_EQ((static_cast<int>(loop_depth) <= loop_nesting_level),
+ GetBackEdgeState(isolate,
+ unoptimized,
+ back_edges.pc(i)) != INTERRUPT);
+ }
+ return true;
+}
+#endif // DEBUG
+
+
#undef __
diff --git a/src/full-codegen.h b/src/full-codegen.h
index 5580cb3..adfa1c1 100644
--- a/src/full-codegen.h
+++ b/src/full-codegen.h
@@ -139,65 +139,6 @@
#error Unsupported target architecture.
#endif
- class BackEdgeTableIterator {
- public:
- explicit BackEdgeTableIterator(Code* unoptimized,
- DisallowHeapAllocation* required) {
- ASSERT(unoptimized->kind() == Code::FUNCTION);
- instruction_start_ = unoptimized->instruction_start();
- cursor_ = instruction_start_ + unoptimized->back_edge_table_offset();
- ASSERT(cursor_ < instruction_start_ + unoptimized->instruction_size());
- table_length_ = Memory::uint32_at(cursor_);
- cursor_ += kTableLengthSize;
- end_ = cursor_ + table_length_ * kEntrySize;
- }
-
- bool Done() { return cursor_ >= end_; }
-
- void Next() {
- ASSERT(!Done());
- cursor_ += kEntrySize;
- }
-
- BailoutId ast_id() {
- ASSERT(!Done());
- return BailoutId(static_cast<int>(
- Memory::uint32_at(cursor_ + kAstIdOffset)));
- }
-
- uint32_t loop_depth() {
- ASSERT(!Done());
- return Memory::uint32_at(cursor_ + kLoopDepthOffset);
- }
-
- uint32_t pc_offset() {
- ASSERT(!Done());
- return Memory::uint32_at(cursor_ + kPcOffsetOffset);
- }
-
- Address pc() {
- ASSERT(!Done());
- return instruction_start_ + pc_offset();
- }
-
- uint32_t table_length() { return table_length_; }
-
- private:
- static const int kTableLengthSize = kIntSize;
- static const int kAstIdOffset = 0 * kIntSize;
- static const int kPcOffsetOffset = 1 * kIntSize;
- static const int kLoopDepthOffset = 2 * kIntSize;
- static const int kEntrySize = 3 * kIntSize;
-
- Address cursor_;
- Address end_;
- Address instruction_start_;
- uint32_t table_length_;
-
- DISALLOW_COPY_AND_ASSIGN(BackEdgeTableIterator);
- };
-
-
private:
class Breakable;
class Iteration;
@@ -940,6 +881,91 @@
};
+class BackEdgeTable {
+ public:
+ BackEdgeTable(Code* code, DisallowHeapAllocation* required) {
+ ASSERT(code->kind() == Code::FUNCTION);
+ instruction_start_ = code->instruction_start();
+ Address table_address = instruction_start_ + code->back_edge_table_offset();
+ length_ = Memory::uint32_at(table_address);
+ start_ = table_address + kTableLengthSize;
+ }
+
+ uint32_t length() { return length_; }
+
+ BailoutId ast_id(uint32_t index) {
+ return BailoutId(static_cast<int>(
+ Memory::uint32_at(entry_at(index) + kAstIdOffset)));
+ }
+
+ uint32_t loop_depth(uint32_t index) {
+ return Memory::uint32_at(entry_at(index) + kLoopDepthOffset);
+ }
+
+ uint32_t pc_offset(uint32_t index) {
+ return Memory::uint32_at(entry_at(index) + kPcOffsetOffset);
+ }
+
+ Address pc(uint32_t index) {
+ return instruction_start_ + pc_offset(index);
+ }
+
+ enum BackEdgeState {
+ INTERRUPT,
+ ON_STACK_REPLACEMENT
+ };
+
+ // Patch all interrupts with allowed loop depth in the unoptimized code to
+ // unconditionally call replacement_code.
+ static void Patch(Isolate* isolate,
+ Code* unoptimized_code);
+
+ // Patch the interrupt at the instruction before pc_after in
+ // the unoptimized code to unconditionally call replacement_code.
+ static void PatchAt(Code* unoptimized_code,
+ Address pc_after,
+ Code* replacement_code);
+
+ // Change all patched interrupts patched in the unoptimized code
+ // back to normal interrupts.
+ static void Revert(Isolate* isolate,
+ Code* unoptimized_code);
+
+ // Change patched interrupt in the unoptimized code
+ // back to a normal interrupt.
+ static void RevertAt(Code* unoptimized_code,
+ Address pc_after,
+ Code* interrupt_code);
+
+#ifdef DEBUG
+ static BackEdgeState GetBackEdgeState(Isolate* isolate,
+ Code* unoptimized_code,
+ Address pc_after);
+
+ // Verify that all back edges of a certain loop depth are patched.
+ static bool Verify(Isolate* isolate,
+ Code* unoptimized_code,
+ int loop_nesting_level);
+#endif // DEBUG
+
+ private:
+ Address entry_at(uint32_t index) {
+ ASSERT(index < length_);
+ return start_ + index * kEntrySize;
+ }
+
+ static const int kTableLengthSize = kIntSize;
+ static const int kAstIdOffset = 0 * kIntSize;
+ static const int kPcOffsetOffset = 1 * kIntSize;
+ static const int kLoopDepthOffset = 2 * kIntSize;
+ static const int kEntrySize = 3 * kIntSize;
+
+ Address start_;
+ Address instruction_start_;
+ uint32_t length_;
+};
+
+
} } // namespace v8::internal
#endif // V8_FULL_CODEGEN_H_
diff --git a/src/handles.cc b/src/handles.cc
index b3704df..033fdab 100644
--- a/src/handles.cc
+++ b/src/handles.cc
@@ -294,21 +294,6 @@
}
-Handle<JSObject> Copy(Handle<JSObject> obj) {
- Isolate* isolate = obj->GetIsolate();
- CALL_HEAP_FUNCTION(isolate,
- isolate->heap()->CopyJSObject(*obj), JSObject);
-}
-
-
-Handle<JSObject> DeepCopy(Handle<JSObject> obj) {
- Isolate* isolate = obj->GetIsolate();
- CALL_HEAP_FUNCTION(isolate,
- obj->DeepCopy(isolate),
- JSObject);
-}
-
-
// Wrappers for scripts are kept alive and cached in weak global
// handles referred from foreign objects held by the scripts as long as
// they are used. When they are not used anymore, the garbage
diff --git a/src/handles.h b/src/handles.h
index c3e4dca..585f7b4 100644
--- a/src/handles.h
+++ b/src/handles.h
@@ -255,10 +255,6 @@
Handle<Object> LookupSingleCharacterStringFromCode(Isolate* isolate,
uint32_t index);
-Handle<JSObject> Copy(Handle<JSObject> obj);
-
-Handle<JSObject> DeepCopy(Handle<JSObject> obj);
-
Handle<FixedArray> AddKeysFromJSArray(Handle<FixedArray>,
Handle<JSArray> array);
diff --git a/src/heap-inl.h b/src/heap-inl.h
index 4f19603..e11dec8 100644
--- a/src/heap-inl.h
+++ b/src/heap-inl.h
@@ -532,14 +532,6 @@
}
-MaybeObject* Heap::AllocateEmptyJSArrayWithAllocationSite(
- ElementsKind elements_kind,
- Handle<AllocationSite> allocation_site) {
- return AllocateJSArrayAndStorageWithAllocationSite(elements_kind, 0, 0,
- allocation_site, DONT_INITIALIZE_ARRAY_ELEMENTS);
-}
-
-
bool Heap::CollectGarbage(AllocationSpace space, const char* gc_reason) {
const char* collector_reason = NULL;
GarbageCollector collector = SelectGarbageCollector(space, &collector_reason);
diff --git a/src/heap-snapshot-generator.cc b/src/heap-snapshot-generator.cc
index bd47eec..5799132 100644
--- a/src/heap-snapshot-generator.cc
+++ b/src/heap-snapshot-generator.cc
@@ -1301,6 +1301,8 @@
AllocationSite* site) {
SetInternalReference(site, entry, "transition_info", site->transition_info(),
AllocationSite::kTransitionInfoOffset);
+ SetInternalReference(site, entry, "dependent_code", site->dependent_code(),
+ AllocationSite::kDependentCodeOffset);
}
@@ -2472,7 +2474,7 @@
int HeapSnapshotJSONSerializer::GetStringId(const char* s) {
HashMap::Entry* cache_entry = strings_.Lookup(
- const_cast<char*>(s), ObjectHash(s), true);
+ const_cast<char*>(s), StringHash(s), true);
if (cache_entry->value == NULL) {
cache_entry->value = reinterpret_cast<void*>(next_string_id_++);
}
@@ -2693,37 +2695,21 @@
void HeapSnapshotJSONSerializer::SerializeStrings() {
- List<HashMap::Entry*> sorted_strings;
- SortHashMap(&strings_, &sorted_strings);
+ ScopedVector<const unsigned char*> sorted_strings(
+ strings_.occupancy() + 1);
+ for (HashMap::Entry* entry = strings_.Start();
+ entry != NULL;
+ entry = strings_.Next(entry)) {
+ int index = static_cast<int>(reinterpret_cast<uintptr_t>(entry->value));
+ sorted_strings[index] = reinterpret_cast<const unsigned char*>(entry->key);
+ }
writer_->AddString("\"<dummy>\"");
- for (int i = 0; i < sorted_strings.length(); ++i) {
+ for (int i = 1; i < sorted_strings.length(); ++i) {
writer_->AddCharacter(',');
- SerializeString(
- reinterpret_cast<const unsigned char*>(sorted_strings[i]->key));
+ SerializeString(sorted_strings[i]);
if (writer_->aborted()) return;
}
}
-template<typename T>
-inline static int SortUsingEntryValue(const T* x, const T* y) {
- uintptr_t x_uint = reinterpret_cast<uintptr_t>((*x)->value);
- uintptr_t y_uint = reinterpret_cast<uintptr_t>((*y)->value);
- if (x_uint > y_uint) {
- return 1;
- } else if (x_uint == y_uint) {
- return 0;
- } else {
- return -1;
- }
-}
-
-
-void HeapSnapshotJSONSerializer::SortHashMap(
- HashMap* map, List<HashMap::Entry*>* sorted_entries) {
- for (HashMap::Entry* p = map->Start(); p != NULL; p = map->Next(p))
- sorted_entries->Add(p);
- sorted_entries->Sort(SortUsingEntryValue);
-}
-
} } // namespace v8::internal
diff --git a/src/heap-snapshot-generator.h b/src/heap-snapshot-generator.h
index 7b0cf8f..c323f3c 100644
--- a/src/heap-snapshot-generator.h
+++ b/src/heap-snapshot-generator.h
@@ -628,7 +628,7 @@
public:
explicit HeapSnapshotJSONSerializer(HeapSnapshot* snapshot)
: snapshot_(snapshot),
- strings_(ObjectsMatch),
+ strings_(StringsMatch),
next_node_id_(1),
next_string_id_(1),
writer_(NULL) {
@@ -636,14 +636,16 @@
void Serialize(v8::OutputStream* stream);
private:
- INLINE(static bool ObjectsMatch(void* key1, void* key2)) {
- return key1 == key2;
+ INLINE(static bool StringsMatch(void* key1, void* key2)) {
+ return strcmp(reinterpret_cast<char*>(key1),
+ reinterpret_cast<char*>(key2)) == 0;
}
- INLINE(static uint32_t ObjectHash(const void* key)) {
- return ComputeIntegerHash(
- static_cast<uint32_t>(reinterpret_cast<uintptr_t>(key)),
- v8::internal::kZeroHashSeed);
+ INLINE(static uint32_t StringHash(const void* string)) {
+ const char* s = reinterpret_cast<const char*>(string);
+ int len = static_cast<int>(strlen(s));
+ return StringHasher::HashSequentialString(
+ s, len, v8::internal::kZeroHashSeed);
}
int GetStringId(const char* s);
@@ -656,7 +658,6 @@
void SerializeSnapshot();
void SerializeString(const unsigned char* s);
void SerializeStrings();
- void SortHashMap(HashMap* map, List<HashMap::Entry*>* sorted_entries);
static const int kEdgeFieldsCount;
static const int kNodeFieldsCount;
diff --git a/src/heap.cc b/src/heap.cc
index 24e4039..e81a0e3 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -129,8 +129,6 @@
old_gen_exhausted_(false),
store_buffer_rebuilder_(store_buffer()),
hidden_string_(NULL),
- global_gc_prologue_callback_(NULL),
- global_gc_epilogue_callback_(NULL),
gc_safe_size_of_old_object_(NULL),
total_regexp_code_generated_(0),
tracer_(NULL),
@@ -1055,12 +1053,17 @@
void Heap::CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags) {
- if (gc_type == kGCTypeMarkSweepCompact && global_gc_prologue_callback_) {
- global_gc_prologue_callback_();
- }
for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
if (gc_type & gc_prologue_callbacks_[i].gc_type) {
- gc_prologue_callbacks_[i].callback(gc_type, flags);
+ if (!gc_prologue_callbacks_[i].pass_isolate_) {
+ v8::GCPrologueCallback callback =
+ reinterpret_cast<v8::GCPrologueCallback>(
+ gc_prologue_callbacks_[i].callback);
+ callback(gc_type, flags);
+ } else {
+ v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate());
+ gc_prologue_callbacks_[i].callback(isolate, gc_type, flags);
+ }
}
}
}
@@ -1069,12 +1072,18 @@
void Heap::CallGCEpilogueCallbacks(GCType gc_type) {
for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
if (gc_type & gc_epilogue_callbacks_[i].gc_type) {
- gc_epilogue_callbacks_[i].callback(gc_type, kNoGCCallbackFlags);
+ if (!gc_epilogue_callbacks_[i].pass_isolate_) {
+ v8::GCPrologueCallback callback =
+ reinterpret_cast<v8::GCPrologueCallback>(
+ gc_epilogue_callbacks_[i].callback);
+ callback(gc_type, kNoGCCallbackFlags);
+ } else {
+ v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate());
+ gc_epilogue_callbacks_[i].callback(
+ isolate, gc_type, kNoGCCallbackFlags);
+ }
}
}
- if (gc_type == kGCTypeMarkSweepCompact && global_gc_epilogue_callback_) {
- global_gc_epilogue_callback_();
- }
}
@@ -2958,17 +2967,16 @@
MaybeObject* Heap::AllocateAllocationSite() {
- Object* result;
+ AllocationSite* site;
MaybeObject* maybe_result = Allocate(allocation_site_map(),
OLD_POINTER_SPACE);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- AllocationSite* site = AllocationSite::cast(result);
+ if (!maybe_result->To(&site)) return maybe_result;
site->Initialize();
// Link the site
site->set_weak_next(allocation_sites_list());
set_allocation_sites_list(site);
- return result;
+ return site;
}
@@ -4310,6 +4318,7 @@
AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
reinterpret_cast<Address>(result) + map->instance_size());
alloc_memento->set_map_no_write_barrier(allocation_memento_map());
+ ASSERT(allocation_site->map() == allocation_site_map());
alloc_memento->set_allocation_site(*allocation_site, SKIP_WRITE_BARRIER);
return result;
}
@@ -4745,20 +4754,6 @@
}
-MaybeObject* Heap::AllocateJSArrayAndStorageWithAllocationSite(
- ElementsKind elements_kind,
- int length,
- int capacity,
- Handle<AllocationSite> allocation_site,
- ArrayStorageAllocationMode mode) {
- MaybeObject* maybe_array = AllocateJSArrayWithAllocationSite(elements_kind,
- allocation_site);
- JSArray* array;
- if (!maybe_array->To(&array)) return maybe_array;
- return AllocateJSArrayStorage(array, length, capacity, mode);
-}
-
-
MaybeObject* Heap::AllocateJSArrayStorage(
JSArray* array,
int length,
@@ -4928,7 +4923,7 @@
}
-MaybeObject* Heap::CopyJSObject(JSObject* source) {
+MaybeObject* Heap::CopyJSObject(JSObject* source, AllocationSite* site) {
// Never used to copy functions. If functions need to be copied we
// have to be careful to clear the literals array.
SLOW_ASSERT(!source->IsJSFunction());
@@ -4938,6 +4933,9 @@
int object_size = map->instance_size();
Object* clone;
+ ASSERT(site == NULL || (AllocationSite::CanTrack(map->instance_type()) &&
+ map->instance_type() == JS_ARRAY_TYPE));
+
WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER;
// If we're forced to always allocate, we use the general allocation
@@ -4958,7 +4956,10 @@
} else {
wb_mode = SKIP_WRITE_BARRIER;
- { MaybeObject* maybe_clone = new_space_.AllocateRaw(object_size);
+ { int adjusted_object_size = site != NULL
+ ? object_size + AllocationMemento::kSize
+ : object_size;
+ MaybeObject* maybe_clone = new_space_.AllocateRaw(adjusted_object_size);
if (!maybe_clone->ToObject(&clone)) return maybe_clone;
}
SLOW_ASSERT(InNewSpace(clone));
@@ -4967,115 +4968,14 @@
CopyBlock(HeapObject::cast(clone)->address(),
source->address(),
object_size);
- }
- SLOW_ASSERT(
- JSObject::cast(clone)->GetElementsKind() == source->GetElementsKind());
- FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
- FixedArray* properties = FixedArray::cast(source->properties());
- // Update elements if necessary.
- if (elements->length() > 0) {
- Object* elem;
- { MaybeObject* maybe_elem;
- if (elements->map() == fixed_cow_array_map()) {
- maybe_elem = FixedArray::cast(elements);
- } else if (source->HasFastDoubleElements()) {
- maybe_elem = CopyFixedDoubleArray(FixedDoubleArray::cast(elements));
- } else {
- maybe_elem = CopyFixedArray(FixedArray::cast(elements));
- }
- if (!maybe_elem->ToObject(&elem)) return maybe_elem;
+ if (site != NULL) {
+ AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
+ reinterpret_cast<Address>(clone) + object_size);
+ alloc_memento->set_map_no_write_barrier(allocation_memento_map());
+ ASSERT(site->map() == allocation_site_map());
+ alloc_memento->set_allocation_site(site, SKIP_WRITE_BARRIER);
}
- JSObject::cast(clone)->set_elements(FixedArrayBase::cast(elem), wb_mode);
- }
- // Update properties if necessary.
- if (properties->length() > 0) {
- Object* prop;
- { MaybeObject* maybe_prop = CopyFixedArray(properties);
- if (!maybe_prop->ToObject(&prop)) return maybe_prop;
- }
- JSObject::cast(clone)->set_properties(FixedArray::cast(prop), wb_mode);
- }
- // Return the new clone.
- return clone;
-}
-
-
-MaybeObject* Heap::CopyJSObjectWithAllocationSite(
- JSObject* source,
- AllocationSite* site) {
- // Never used to copy functions. If functions need to be copied we
- // have to be careful to clear the literals array.
- SLOW_ASSERT(!source->IsJSFunction());
-
- // Make the clone.
- Map* map = source->map();
- int object_size = map->instance_size();
- Object* clone;
-
- ASSERT(AllocationSite::CanTrack(map->instance_type()));
- ASSERT(map->instance_type() == JS_ARRAY_TYPE);
- WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER;
-
- // If we're forced to always allocate, we use the general allocation
- // functions which may leave us with an object in old space.
- int adjusted_object_size = object_size;
- if (always_allocate()) {
- // We'll only track origin if we are certain to allocate in new space
- const int kMinFreeNewSpaceAfterGC = InitialSemiSpaceSize() * 3/4;
- if ((object_size + AllocationMemento::kSize) < kMinFreeNewSpaceAfterGC) {
- adjusted_object_size += AllocationMemento::kSize;
- }
-
- { MaybeObject* maybe_clone =
- AllocateRaw(adjusted_object_size, NEW_SPACE, OLD_POINTER_SPACE);
- if (!maybe_clone->ToObject(&clone)) return maybe_clone;
- }
- Address clone_address = HeapObject::cast(clone)->address();
- CopyBlock(clone_address,
- source->address(),
- object_size);
- // Update write barrier for all fields that lie beyond the header.
- int write_barrier_offset = adjusted_object_size > object_size
- ? JSArray::kSize + AllocationMemento::kSize
- : JSObject::kHeaderSize;
- if (((object_size - write_barrier_offset) / kPointerSize) > 0) {
- RecordWrites(clone_address,
- write_barrier_offset,
- (object_size - write_barrier_offset) / kPointerSize);
- }
-
- // Track allocation site information, if we failed to allocate it inline.
- if (InNewSpace(clone) &&
- adjusted_object_size == object_size) {
- MaybeObject* maybe_alloc_memento =
- AllocateStruct(ALLOCATION_MEMENTO_TYPE);
- AllocationMemento* alloc_memento;
- if (maybe_alloc_memento->To(&alloc_memento)) {
- alloc_memento->set_map_no_write_barrier(allocation_memento_map());
- alloc_memento->set_allocation_site(site, SKIP_WRITE_BARRIER);
- }
- }
- } else {
- wb_mode = SKIP_WRITE_BARRIER;
- adjusted_object_size += AllocationMemento::kSize;
-
- { MaybeObject* maybe_clone = new_space_.AllocateRaw(adjusted_object_size);
- if (!maybe_clone->ToObject(&clone)) return maybe_clone;
- }
- SLOW_ASSERT(InNewSpace(clone));
- // Since we know the clone is allocated in new space, we can copy
- // the contents without worrying about updating the write barrier.
- CopyBlock(HeapObject::cast(clone)->address(),
- source->address(),
- object_size);
- }
-
- if (adjusted_object_size > object_size) {
- AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
- reinterpret_cast<Address>(clone) + object_size);
- alloc_memento->set_map_no_write_barrier(allocation_memento_map());
- alloc_memento->set_allocation_site(site, SKIP_WRITE_BARRIER);
}
SLOW_ASSERT(
@@ -5474,24 +5374,6 @@
}
-MaybeObject* Heap::AllocateJSArrayWithAllocationSite(
- ElementsKind elements_kind,
- Handle<AllocationSite> allocation_site) {
- Context* native_context = isolate()->context()->native_context();
- JSFunction* array_function = native_context->array_function();
- Map* map = array_function->initial_map();
- Object* maybe_map_array = native_context->js_array_maps();
- if (!maybe_map_array->IsUndefined()) {
- Object* maybe_transitioned_map =
- FixedArray::cast(maybe_map_array)->get(elements_kind);
- if (!maybe_transitioned_map->IsUndefined()) {
- map = Map::cast(maybe_transitioned_map);
- }
- }
- return AllocateJSObjectFromMapWithAllocationSite(map, allocation_site);
-}
-
-
MaybeObject* Heap::AllocateEmptyFixedArray() {
int size = FixedArray::SizeFor(0);
Object* result;
@@ -7068,15 +6950,17 @@
}
-void Heap::AddGCPrologueCallback(GCPrologueCallback callback, GCType gc_type) {
+void Heap::AddGCPrologueCallback(v8::Isolate::GCPrologueCallback callback,
+ GCType gc_type,
+ bool pass_isolate) {
ASSERT(callback != NULL);
- GCPrologueCallbackPair pair(callback, gc_type);
+ GCPrologueCallbackPair pair(callback, gc_type, pass_isolate);
ASSERT(!gc_prologue_callbacks_.Contains(pair));
return gc_prologue_callbacks_.Add(pair);
}
-void Heap::RemoveGCPrologueCallback(GCPrologueCallback callback) {
+void Heap::RemoveGCPrologueCallback(v8::Isolate::GCPrologueCallback callback) {
ASSERT(callback != NULL);
for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
if (gc_prologue_callbacks_[i].callback == callback) {
@@ -7088,15 +6972,17 @@
}
-void Heap::AddGCEpilogueCallback(GCEpilogueCallback callback, GCType gc_type) {
+void Heap::AddGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback,
+ GCType gc_type,
+ bool pass_isolate) {
ASSERT(callback != NULL);
- GCEpilogueCallbackPair pair(callback, gc_type);
+ GCEpilogueCallbackPair pair(callback, gc_type, pass_isolate);
ASSERT(!gc_epilogue_callbacks_.Contains(pair));
return gc_epilogue_callbacks_.Add(pair);
}
-void Heap::RemoveGCEpilogueCallback(GCEpilogueCallback callback) {
+void Heap::RemoveGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback) {
ASSERT(callback != NULL);
for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
if (gc_epilogue_callbacks_[i].callback == callback) {
diff --git a/src/heap.h b/src/heap.h
index 4dfa076..573c512 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -635,10 +635,6 @@
pretenure);
}
- inline MUST_USE_RESULT MaybeObject* AllocateEmptyJSArrayWithAllocationSite(
- ElementsKind elements_kind,
- Handle<AllocationSite> allocation_site);
-
// Allocate a JSArray with a specified length but elements that are left
// uninitialized.
MUST_USE_RESULT MaybeObject* AllocateJSArrayAndStorage(
@@ -648,13 +644,6 @@
ArrayStorageAllocationMode mode = DONT_INITIALIZE_ARRAY_ELEMENTS,
PretenureFlag pretenure = NOT_TENURED);
- MUST_USE_RESULT MaybeObject* AllocateJSArrayAndStorageWithAllocationSite(
- ElementsKind elements_kind,
- int length,
- int capacity,
- Handle<AllocationSite> allocation_site,
- ArrayStorageAllocationMode mode = DONT_INITIALIZE_ARRAY_ELEMENTS);
-
MUST_USE_RESULT MaybeObject* AllocateJSArrayStorage(
JSArray* array,
int length,
@@ -677,10 +666,9 @@
// Returns a deep copy of the JavaScript object.
// Properties and elements are copied too.
// Returns failure if allocation failed.
- MUST_USE_RESULT MaybeObject* CopyJSObject(JSObject* source);
-
- MUST_USE_RESULT MaybeObject* CopyJSObjectWithAllocationSite(
- JSObject* source, AllocationSite* site);
+ // Optionally takes an AllocationSite to be appended in an AllocationMemento.
+ MUST_USE_RESULT MaybeObject* CopyJSObject(JSObject* source,
+ AllocationSite* site = NULL);
// Allocates the function prototype.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
@@ -1272,22 +1260,15 @@
void GarbageCollectionGreedyCheck();
#endif
- void AddGCPrologueCallback(
- GCPrologueCallback callback, GCType gc_type_filter);
- void RemoveGCPrologueCallback(GCPrologueCallback callback);
+ void AddGCPrologueCallback(v8::Isolate::GCPrologueCallback callback,
+ GCType gc_type_filter,
+ bool pass_isolate = true);
+ void RemoveGCPrologueCallback(v8::Isolate::GCPrologueCallback callback);
- void AddGCEpilogueCallback(
- GCEpilogueCallback callback, GCType gc_type_filter);
- void RemoveGCEpilogueCallback(GCEpilogueCallback callback);
-
- void SetGlobalGCPrologueCallback(GCCallback callback) {
- ASSERT((callback == NULL) ^ (global_gc_prologue_callback_ == NULL));
- global_gc_prologue_callback_ = callback;
- }
- void SetGlobalGCEpilogueCallback(GCCallback callback) {
- ASSERT((callback == NULL) ^ (global_gc_epilogue_callback_ == NULL));
- global_gc_epilogue_callback_ = callback;
- }
+ void AddGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback,
+ GCType gc_type_filter,
+ bool pass_isolate = true);
+ void RemoveGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback);
// Heap root getters. We have versions with and without type::cast() here.
// You can't use type::cast during GC because the assert fails.
@@ -2032,32 +2013,37 @@
// GC callback function, called before and after mark-compact GC.
// Allocations in the callback function are disallowed.
struct GCPrologueCallbackPair {
- GCPrologueCallbackPair(GCPrologueCallback callback, GCType gc_type)
- : callback(callback), gc_type(gc_type) {
+ GCPrologueCallbackPair(v8::Isolate::GCPrologueCallback callback,
+ GCType gc_type,
+ bool pass_isolate)
+ : callback(callback), gc_type(gc_type), pass_isolate_(pass_isolate) {
}
bool operator==(const GCPrologueCallbackPair& pair) const {
return pair.callback == callback;
}
- GCPrologueCallback callback;
+ v8::Isolate::GCPrologueCallback callback;
GCType gc_type;
+ // TODO(dcarney): remove variable
+ bool pass_isolate_;
};
List<GCPrologueCallbackPair> gc_prologue_callbacks_;
struct GCEpilogueCallbackPair {
- GCEpilogueCallbackPair(GCEpilogueCallback callback, GCType gc_type)
- : callback(callback), gc_type(gc_type) {
+ GCEpilogueCallbackPair(v8::Isolate::GCPrologueCallback callback,
+ GCType gc_type,
+ bool pass_isolate)
+ : callback(callback), gc_type(gc_type), pass_isolate_(pass_isolate) {
}
bool operator==(const GCEpilogueCallbackPair& pair) const {
return pair.callback == callback;
}
- GCEpilogueCallback callback;
+ v8::Isolate::GCPrologueCallback callback;
GCType gc_type;
+ // TODO(dcarney): remove variable
+ bool pass_isolate_;
};
List<GCEpilogueCallbackPair> gc_epilogue_callbacks_;
- GCCallback global_gc_prologue_callback_;
- GCCallback global_gc_epilogue_callback_;
-
// Support for computing object sizes during GC.
HeapObjectCallback gc_safe_size_of_old_object_;
static int GcSafeSizeOfOldObject(HeapObject* object);
@@ -2116,10 +2102,6 @@
ElementsKind elements_kind,
PretenureFlag pretenure = NOT_TENURED);
- MUST_USE_RESULT MaybeObject* AllocateJSArrayWithAllocationSite(
- ElementsKind elements_kind,
- Handle<AllocationSite> allocation_site);
-
// Allocate empty fixed array.
MUST_USE_RESULT MaybeObject* AllocateEmptyFixedArray();
diff --git a/src/hydrogen-alias-analysis.h b/src/hydrogen-alias-analysis.h
index 73e116e..21a5462 100644
--- a/src/hydrogen-alias-analysis.h
+++ b/src/hydrogen-alias-analysis.h
@@ -88,15 +88,6 @@
inline bool NoAlias(HValue* a, HValue* b) {
return Query(a, b) == kNoAlias;
}
-
- // Returns the actual value of an instruction. In the case of a chain
- // of informative definitions, return the root of the chain.
- HValue* ActualValue(HValue* obj) {
- while (obj->IsInformativeDefinition()) { // Walk a chain of idefs.
- obj = obj->RedefinedOperand();
- }
- return obj;
- }
};
diff --git a/src/hydrogen-bce.cc b/src/hydrogen-bce.cc
index 7c81ec1..869db54 100644
--- a/src/hydrogen-bce.cc
+++ b/src/hydrogen-bce.cc
@@ -318,12 +318,54 @@
}
+class HBoundsCheckEliminationState {
+ public:
+ HBasicBlock* block_;
+ BoundsCheckBbData* bb_data_list_;
+ int index_;
+};
+
+
// Eliminates checks in bb and recursively in the dominated blocks.
// Also replace the results of check instructions with the original value, if
// the result is used. This is safe now, since we don't do code motion after
// this point. It enables better register allocation since the value produced
// by check instructions is really a copy of the original value.
void HBoundsCheckEliminationPhase::EliminateRedundantBoundsChecks(
+ HBasicBlock* entry) {
+ // Allocate the stack.
+ HBoundsCheckEliminationState* stack =
+ zone()->NewArray<HBoundsCheckEliminationState>(graph()->blocks()->length());
+
+ // Explicitly push the entry block.
+ stack[0].block_ = entry;
+ stack[0].bb_data_list_ = PreProcessBlock(entry);
+ stack[0].index_ = 0;
+ int stack_depth = 1;
+
+ // Implement depth-first traversal with a stack.
+ while (stack_depth > 0) {
+ int current = stack_depth - 1;
+ HBoundsCheckEliminationState* state = &stack[current];
+ const ZoneList<HBasicBlock*>* children = state->block_->dominated_blocks();
+
+ if (state->index_ < children->length()) {
+ // Recursively visit children blocks.
+ HBasicBlock* child = children->at(state->index_++);
+ int next = stack_depth++;
+ stack[next].block_ = child;
+ stack[next].bb_data_list_ = PreProcessBlock(child);
+ stack[next].index_ = 0;
+ } else {
+ // Finished with all children; post process the block.
+ PostProcessBlock(state->block_, state->bb_data_list_);
+ stack_depth--;
+ }
+ }
+}
+
+
+BoundsCheckBbData* HBoundsCheckEliminationPhase::PreProcessBlock(
HBasicBlock* bb) {
BoundsCheckBbData* bb_data_list = NULL;
@@ -375,19 +417,20 @@
}
}
- for (int i = 0; i < bb->dominated_blocks()->length(); ++i) {
- EliminateRedundantBoundsChecks(bb->dominated_blocks()->at(i));
- }
+ return bb_data_list;
+}
- for (BoundsCheckBbData* data = bb_data_list;
- data != NULL;
- data = data->NextInBasicBlock()) {
+
+void HBoundsCheckEliminationPhase::PostProcessBlock(
+ HBasicBlock* block, BoundsCheckBbData* data) {
+ while (data != NULL) {
data->RemoveZeroOperations();
if (data->FatherInDominatorTree()) {
table_.Insert(data->Key(), data->FatherInDominatorTree(), zone());
} else {
table_.Delete(data->Key());
}
+ data = data->NextInBasicBlock();
}
}
diff --git a/src/hydrogen-bce.h b/src/hydrogen-bce.h
index d91997b..c55dea7 100644
--- a/src/hydrogen-bce.h
+++ b/src/hydrogen-bce.h
@@ -60,6 +60,8 @@
private:
void EliminateRedundantBoundsChecks(HBasicBlock* bb);
+ BoundsCheckBbData* PreProcessBlock(HBasicBlock* bb);
+ void PostProcessBlock(HBasicBlock* bb, BoundsCheckBbData* data);
BoundsCheckTable table_;
diff --git a/src/hydrogen-escape-analysis.cc b/src/hydrogen-escape-analysis.cc
index 997e4f9..3a7e10d 100644
--- a/src/hydrogen-escape-analysis.cc
+++ b/src/hydrogen-escape-analysis.cc
@@ -154,9 +154,8 @@
HValue* value = state->map_value();
// TODO(mstarzinger): This will narrow a map check against a set of maps
// down to the first element in the set. Revisit and fix this.
- Handle<Map> map_object = mapcheck->map_set()->first();
- UniqueValueId map_id = mapcheck->map_unique_ids()->first();
- HCheckValue* check = HCheckValue::New(zone, NULL, value, map_object, map_id);
+ HCheckValue* check = HCheckValue::New(
+ zone, NULL, value, mapcheck->first_map(), false);
check->InsertBefore(mapcheck);
return check;
}
diff --git a/src/hydrogen-instructions.cc b/src/hydrogen-instructions.cc
index cca95b9..a685198 100644
--- a/src/hydrogen-instructions.cc
+++ b/src/hydrogen-instructions.cc
@@ -1052,7 +1052,7 @@
void HCompareMap::PrintDataTo(StringStream* stream) {
value()->PrintNameTo(stream);
- stream->Add(" (%p)", *map());
+ stream->Add(" (%p)", *map().handle());
HControlInstruction::PrintDataTo(stream);
}
@@ -1431,11 +1431,9 @@
HStoreNamedField* store = HStoreNamedField::cast(dominator);
if (!store->has_transition() || store->object() != value()) return;
HConstant* transition = HConstant::cast(store->transition());
- for (int i = 0; i < map_set()->length(); i++) {
- if (transition->UniqueValueIdsMatch(map_unique_ids_.at(i))) {
- DeleteAndReplaceWith(NULL);
- return;
- }
+ if (map_set_.Contains(transition->GetUnique())) {
+ DeleteAndReplaceWith(NULL);
+ return;
}
}
}
@@ -1443,9 +1441,9 @@
void HCheckMaps::PrintDataTo(StringStream* stream) {
value()->PrintNameTo(stream);
- stream->Add(" [%p", *map_set()->first());
- for (int i = 1; i < map_set()->length(); ++i) {
- stream->Add(",%p", *map_set()->at(i));
+ stream->Add(" [%p", *map_set_.at(0).handle());
+ for (int i = 1; i < map_set_.size(); ++i) {
+ stream->Add(",%p", *map_set_.at(i).handle());
}
stream->Add("]%s", CanOmitMapChecks() ? "(omitted)" : "");
}
@@ -1454,13 +1452,13 @@
void HCheckValue::PrintDataTo(StringStream* stream) {
value()->PrintNameTo(stream);
stream->Add(" ");
- object()->ShortPrint(stream);
+ object().handle()->ShortPrint(stream);
}
HValue* HCheckValue::Canonicalize() {
return (value()->IsConstant() &&
- HConstant::cast(value())->UniqueValueIdsMatch(object_unique_id_))
+ HConstant::cast(value())->GetUnique() == object_)
? NULL
: this;
}
@@ -2526,9 +2524,11 @@
bool HConstant::EmitAtUses() {
ASSERT(IsLinked());
- if (block()->graph()->has_osr()) {
- return block()->graph()->IsStandardConstant(this);
+ if (block()->graph()->has_osr() &&
+ block()->graph()->IsStandardConstant(this)) {
+ return true;
}
+ if (UseCount() == 0) return true;
if (IsCell()) return false;
if (representation().IsDouble()) return false;
return true;
@@ -2573,10 +2573,6 @@
Representation::Integer32(),
is_not_in_new_space_,
handle_);
- } else {
- ASSERT(!HasNumberValue());
- Maybe<HConstant*> number = CopyToTruncatedNumber(zone);
- if (number.has_value) return number.value->CopyToTruncatedInt32(zone);
}
return Maybe<HConstant*>(res != NULL, res);
}
@@ -2859,15 +2855,9 @@
}
-void HCompareHoleAndBranch::PrintDataTo(StringStream* stream) {
- object()->PrintNameTo(stream);
- HControlInstruction::PrintDataTo(stream);
-}
-
-
void HCompareHoleAndBranch::InferRepresentation(
HInferRepresentationPhase* h_infer) {
- ChangeRepresentation(object()->representation());
+ ChangeRepresentation(value()->representation());
}
@@ -2937,22 +2927,17 @@
if (map->CanOmitMapChecks() &&
value->IsConstant() &&
HConstant::cast(value)->HasMap(map)) {
- check_map->omit(info);
+ // TODO(titzer): collect dependent map checks into a list.
+ check_map->omit_ = true;
+ if (map->CanTransition()) {
+ map->AddDependentCompilationInfo(
+ DependentCode::kPrototypeCheckGroup, info);
+ }
}
return check_map;
}
-void HCheckMaps::FinalizeUniqueValueId() {
- if (!map_unique_ids_.is_empty()) return;
- Zone* zone = block()->zone();
- map_unique_ids_.Initialize(map_set_.length(), zone);
- for (int i = 0; i < map_set_.length(); i++) {
- map_unique_ids_.Add(UniqueValueId(map_set_.at(i)), zone);
- }
-}
-
-
void HLoadNamedGeneric::PrintDataTo(StringStream* stream) {
object()->PrintNameTo(stream);
stream->Add(".");
@@ -3148,19 +3133,19 @@
void HTransitionElementsKind::PrintDataTo(StringStream* stream) {
object()->PrintNameTo(stream);
- ElementsKind from_kind = original_map()->elements_kind();
- ElementsKind to_kind = transitioned_map()->elements_kind();
+ ElementsKind from_kind = original_map().handle()->elements_kind();
+ ElementsKind to_kind = transitioned_map().handle()->elements_kind();
stream->Add(" %p [%s] -> %p [%s]",
- *original_map(),
+ *original_map().handle(),
ElementsAccessor::ForKind(from_kind)->name(),
- *transitioned_map(),
+ *transitioned_map().handle(),
ElementsAccessor::ForKind(to_kind)->name());
if (IsSimpleMapChangeTransition(from_kind, to_kind)) stream->Add(" (simple)");
}
void HLoadGlobalCell::PrintDataTo(StringStream* stream) {
- stream->Add("[%p]", *cell());
+ stream->Add("[%p]", *cell().handle());
if (!details_.IsDontDelete()) stream->Add(" (deleteable)");
if (details_.IsReadOnly()) stream->Add(" (read-only)");
}
@@ -3188,7 +3173,7 @@
void HStoreGlobalCell::PrintDataTo(StringStream* stream) {
- stream->Add("[%p] = ", *cell());
+ stream->Add("[%p] = ", *cell().handle());
value()->PrintNameTo(stream);
if (!details_.IsDontDelete()) stream->Add(" (deleteable)");
if (details_.IsReadOnly()) stream->Add(" (read-only)");
diff --git a/src/hydrogen-instructions.h b/src/hydrogen-instructions.h
index 7d33141..8cb2f59 100644
--- a/src/hydrogen-instructions.h
+++ b/src/hydrogen-instructions.h
@@ -36,6 +36,7 @@
#include "deoptimizer.h"
#include "small-pointer-list.h"
#include "string-stream.h"
+#include "unique.h"
#include "v8conversions.h"
#include "v8utils.h"
#include "zone.h"
@@ -128,7 +129,6 @@
V(InvokeFunction) \
V(IsConstructCallAndBranch) \
V(IsObjectAndBranch) \
- V(IsNumberAndBranch) \
V(IsStringAndBranch) \
V(IsSmiAndBranch) \
V(IsUndetectableAndBranch) \
@@ -143,6 +143,7 @@
V(LoadKeyedGeneric) \
V(LoadNamedField) \
V(LoadNamedGeneric) \
+ V(LoadRoot) \
V(MapEnumLength) \
V(MathFloorOfDiv) \
V(MathMinMax) \
@@ -350,6 +351,7 @@
IMMOVABLE_UNIQUE_VALUE_ID(false_value)
IMMOVABLE_UNIQUE_VALUE_ID(the_hole_value)
IMMOVABLE_UNIQUE_VALUE_ID(empty_string)
+ IMMOVABLE_UNIQUE_VALUE_ID(empty_fixed_array)
#undef IMMOVABLE_UNIQUE_VALUE_ID
@@ -782,11 +784,15 @@
// phase (so that live ranges will be shorter).
virtual bool IsPurelyInformativeDefinition() { return false; }
- // This method must always return the original HValue SSA definition
- // (regardless of any iDef of this value).
+ // This method must always return the original HValue SSA definition,
+ // regardless of any chain of iDefs of this value.
HValue* ActualValue() {
- int index = RedefinedOperandIndex();
- return index == kNoRedefinedOperand ? this : OperandAt(index);
+ HValue* value = this;
+ int index;
+ while ((index = value->RedefinedOperandIndex()) != kNoRedefinedOperand) {
+ value = value->OperandAt(index);
+ }
+ return value;
}
bool IsInteger32Constant();
@@ -1201,6 +1207,12 @@
return SuccessorCount() > 1 ? SuccessorAt(1) : NULL;
}
+ void Not() {
+ HBasicBlock* swap = SuccessorAt(0);
+ SetSuccessorAt(0, SuccessorAt(1));
+ SetSuccessorAt(1, swap);
+ }
+
DECLARE_ABSTRACT_INSTRUCTION(ControlInstruction)
};
@@ -1303,6 +1315,8 @@
// Inserts an int3/stop break instruction for debugging purposes.
class HDebugBreak V8_FINAL : public HTemplateInstruction<0> {
public:
+ DECLARE_INSTRUCTION_FACTORY_P0(HDebugBreak);
+
virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::None();
}
@@ -1345,14 +1359,12 @@
class HBranch V8_FINAL : public HUnaryControlInstruction {
public:
- HBranch(HValue* value,
- ToBooleanStub::Types expected_input_types = ToBooleanStub::Types(),
- HBasicBlock* true_target = NULL,
- HBasicBlock* false_target = NULL)
- : HUnaryControlInstruction(value, true_target, false_target),
- expected_input_types_(expected_input_types) {
- SetFlag(kAllowUndefinedAsNaN);
- }
+ DECLARE_INSTRUCTION_FACTORY_P1(HBranch, HValue*);
+ DECLARE_INSTRUCTION_FACTORY_P2(HBranch, HValue*,
+ ToBooleanStub::Types);
+ DECLARE_INSTRUCTION_FACTORY_P4(HBranch, HValue*,
+ ToBooleanStub::Types,
+ HBasicBlock*, HBasicBlock*);
virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::None();
@@ -1366,24 +1378,28 @@
DECLARE_CONCRETE_INSTRUCTION(Branch)
private:
+ HBranch(HValue* value,
+ ToBooleanStub::Types expected_input_types = ToBooleanStub::Types(),
+ HBasicBlock* true_target = NULL,
+ HBasicBlock* false_target = NULL)
+ : HUnaryControlInstruction(value, true_target, false_target),
+ expected_input_types_(expected_input_types) {
+ SetFlag(kAllowUndefinedAsNaN);
+ }
+
ToBooleanStub::Types expected_input_types_;
};
class HCompareMap V8_FINAL : public HUnaryControlInstruction {
public:
- HCompareMap(HValue* value,
- Handle<Map> map,
- HBasicBlock* true_target = NULL,
- HBasicBlock* false_target = NULL)
- : HUnaryControlInstruction(value, true_target, false_target),
- map_(map) {
- ASSERT(!map.is_null());
- }
+ DECLARE_INSTRUCTION_FACTORY_P2(HCompareMap, HValue*, Handle<Map>);
+ DECLARE_INSTRUCTION_FACTORY_P4(HCompareMap, HValue*, Handle<Map>,
+ HBasicBlock*, HBasicBlock*);
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- Handle<Map> map() const { return map_; }
+ Unique<Map> map() const { return map_; }
virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
@@ -1395,7 +1411,16 @@
virtual int RedefinedOperandIndex() { return 0; }
private:
- Handle<Map> map_;
+ HCompareMap(HValue* value,
+ Handle<Map> map,
+ HBasicBlock* true_target = NULL,
+ HBasicBlock* false_target = NULL)
+ : HUnaryControlInstruction(value, true_target, false_target),
+ map_(Unique<Map>(map)) {
+ ASSERT(!map.is_null());
+ }
+
+ Unique<Map> map_;
};
@@ -2509,6 +2534,40 @@
};
+class HLoadRoot V8_FINAL : public HTemplateInstruction<0> {
+ public:
+ DECLARE_INSTRUCTION_FACTORY_P1(HLoadRoot, Heap::RootListIndex);
+ DECLARE_INSTRUCTION_FACTORY_P2(HLoadRoot, Heap::RootListIndex, HType);
+
+ virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+ return Representation::None();
+ }
+
+ Heap::RootListIndex index() const { return index_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadRoot)
+
+ protected:
+ virtual bool DataEquals(HValue* other) V8_OVERRIDE {
+ HLoadRoot* b = HLoadRoot::cast(other);
+ return index_ == b->index_;
+ }
+
+ private:
+ HLoadRoot(Heap::RootListIndex index, HType type = HType::Tagged())
+ : HTemplateInstruction<0>(type), index_(index) {
+ SetFlag(kUseGVN);
+ // TODO(bmeurer): We'll need kDependsOnRoots once we add the
+ // corresponding HStoreRoot instruction.
+ SetGVNFlag(kDependsOnCalls);
+ }
+
+ virtual bool IsDeletable() const V8_OVERRIDE { return true; }
+
+ const Heap::RootListIndex index_;
+};
+
+
class HLoadExternalArrayPointer V8_FINAL : public HUnaryOperation {
public:
DECLARE_INSTRUCTION_FACTORY_P1(HLoadExternalArrayPointer, HValue*);
@@ -2553,7 +2612,6 @@
for (int i = 0; i < maps->length(); i++) {
check_map->Add(maps->at(i), zone);
}
- check_map->map_set_.Sort();
return check_map;
}
@@ -2568,38 +2626,26 @@
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
HValue* value() { return OperandAt(0); }
- SmallMapList* map_set() { return &map_set_; }
- ZoneList<UniqueValueId>* map_unique_ids() { return &map_unique_ids_; }
- bool has_migration_target() {
+ Unique<Map> first_map() const { return map_set_.at(0); }
+ UniqueSet<Map> map_set() const { return map_set_; }
+
+ bool has_migration_target() const {
return has_migration_target_;
}
- virtual void FinalizeUniqueValueId() V8_OVERRIDE;
-
DECLARE_CONCRETE_INSTRUCTION(CheckMaps)
protected:
virtual bool DataEquals(HValue* other) V8_OVERRIDE {
- ASSERT_EQ(map_set_.length(), map_unique_ids_.length());
- HCheckMaps* b = HCheckMaps::cast(other);
- // Relies on the fact that map_set has been sorted before.
- if (map_unique_ids_.length() != b->map_unique_ids_.length()) {
- return false;
- }
- for (int i = 0; i < map_unique_ids_.length(); i++) {
- if (map_unique_ids_.at(i) != b->map_unique_ids_.at(i)) {
- return false;
- }
- }
- return true;
+ return this->map_set_.Equals(&HCheckMaps::cast(other)->map_set_);
}
virtual int RedefinedOperandIndex() { return 0; }
private:
void Add(Handle<Map> map, Zone* zone) {
- map_set_.Add(map, zone);
+ map_set_.Add(Unique<Map>(map), zone);
if (!has_migration_target_ && map->is_migration_target()) {
has_migration_target_ = true;
SetGVNFlag(kChangesNewSpacePromotion);
@@ -2609,10 +2655,9 @@
// Clients should use one of the static New* methods above.
HCheckMaps(HValue* value, Zone *zone, HValue* typecheck)
: HTemplateInstruction<2>(value->type()),
- omit_(false), has_migration_target_(false), map_unique_ids_(0, zone) {
+ omit_(false), has_migration_target_(false) {
SetOperandAt(0, value);
// Use the object value for the dependency if NULL is passed.
- // TODO(titzer): do GVN flags already express this dependency?
SetOperandAt(1, typecheck != NULL ? typecheck : value);
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
@@ -2621,36 +2666,33 @@
SetGVNFlag(kDependsOnElementsKind);
}
- void omit(CompilationInfo* info) {
- omit_ = true;
- for (int i = 0; i < map_set_.length(); i++) {
- Handle<Map> map = map_set_.at(i);
- if (!map->CanTransition()) continue;
- map->AddDependentCompilationInfo(DependentCode::kPrototypeCheckGroup,
- info);
- }
- }
-
bool omit_;
bool has_migration_target_;
- SmallMapList map_set_;
- ZoneList<UniqueValueId> map_unique_ids_;
+ UniqueSet<Map> map_set_;
};
class HCheckValue V8_FINAL : public HUnaryOperation {
public:
static HCheckValue* New(Zone* zone, HValue* context,
- HValue* value, Handle<JSFunction> target) {
- bool in_new_space = zone->isolate()->heap()->InNewSpace(*target);
+ HValue* value, Handle<JSFunction> func) {
+ bool in_new_space = zone->isolate()->heap()->InNewSpace(*func);
+ // NOTE: We create an uninitialized Unique and initialize it later.
+ // This is because a JSFunction can move due to GC during graph creation.
+ // TODO(titzer): This is a migration crutch. Replace with some kind of
+ // Uniqueness scope later.
+ Unique<JSFunction> target = Unique<JSFunction>::CreateUninitialized(func);
HCheckValue* check = new(zone) HCheckValue(value, target, in_new_space);
return check;
}
static HCheckValue* New(Zone* zone, HValue* context,
- HValue* value, Handle<Map> map, UniqueValueId id) {
- HCheckValue* check = new(zone) HCheckValue(value, map, false);
- check->object_unique_id_ = id;
- return check;
+ HValue* value, Unique<HeapObject> target,
+ bool object_in_new_space) {
+ return new(zone) HCheckValue(value, target, object_in_new_space);
+ }
+
+ virtual void FinalizeUniqueValueId() V8_OVERRIDE {
+ object_ = Unique<HeapObject>(object_.handle());
}
virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
@@ -2664,11 +2706,7 @@
virtual void Verify() V8_OVERRIDE;
#endif
- virtual void FinalizeUniqueValueId() V8_OVERRIDE {
- object_unique_id_ = UniqueValueId(object_);
- }
-
- Handle<HeapObject> object() const { return object_; }
+ Unique<HeapObject> object() const { return object_; }
bool object_in_new_space() const { return object_in_new_space_; }
DECLARE_CONCRETE_INSTRUCTION(CheckValue)
@@ -2676,19 +2714,20 @@
protected:
virtual bool DataEquals(HValue* other) V8_OVERRIDE {
HCheckValue* b = HCheckValue::cast(other);
- return object_unique_id_ == b->object_unique_id_;
+ return object_ == b->object_;
}
private:
- HCheckValue(HValue* value, Handle<HeapObject> object, bool in_new_space)
+ HCheckValue(HValue* value, Unique<HeapObject> object,
+ bool object_in_new_space)
: HUnaryOperation(value, value->type()),
- object_(object), object_in_new_space_(in_new_space) {
+ object_(object),
+ object_in_new_space_(object_in_new_space) {
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
}
- Handle<HeapObject> object_;
- UniqueValueId object_unique_id_;
+ Unique<HeapObject> object_;
bool object_in_new_space_;
};
@@ -2784,21 +2823,6 @@
};
-class HIsNumberAndBranch V8_FINAL : public HUnaryControlInstruction {
- public:
- explicit HIsNumberAndBranch(HValue* value)
- : HUnaryControlInstruction(value, NULL, NULL) {
- SetFlag(kFlexibleRepresentation);
- }
-
- virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
- return Representation::None();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(IsNumberAndBranch)
-};
-
-
class HCheckHeapObject V8_FINAL : public HUnaryOperation {
public:
DECLARE_INSTRUCTION_FACTORY_P1(HCheckHeapObject, HValue*);
@@ -3350,7 +3374,8 @@
unique_id_ == UniqueValueId::true_value(heap) ||
unique_id_ == UniqueValueId::false_value(heap) ||
unique_id_ == UniqueValueId::the_hole_value(heap) ||
- unique_id_ == UniqueValueId::empty_string(heap);
+ unique_id_ == UniqueValueId::empty_string(heap) ||
+ unique_id_ == UniqueValueId::empty_fixed_array(heap);
}
bool IsCell() const {
@@ -3451,6 +3476,12 @@
unique_id_ == other;
}
+ Unique<Object> GetUnique() const {
+ // TODO(titzer): store a Unique<HeapObject> inside the HConstant.
+ Address raw_address = reinterpret_cast<Address>(unique_id_.Hashcode());
+ return Unique<Object>(raw_address, handle_);
+ }
+
#ifdef DEBUG
virtual void Verify() V8_OVERRIDE { }
#endif
@@ -3882,13 +3913,13 @@
}
virtual void RepresentationChanged(Representation to) V8_OVERRIDE {
- if (!to.IsTagged()) {
+ if (to.IsTagged()) {
+ SetAllSideEffects();
+ ClearFlag(kUseGVN);
+ } else {
ASSERT(to.IsSmiOrInteger32());
ClearAllSideEffects();
SetFlag(kUseGVN);
- } else {
- SetAllSideEffects();
- ClearFlag(kUseGVN);
}
}
@@ -4008,13 +4039,11 @@
class HCompareNumericAndBranch : public HTemplateControlInstruction<2, 2> {
public:
- HCompareNumericAndBranch(HValue* left, HValue* right, Token::Value token)
- : token_(token) {
- SetFlag(kFlexibleRepresentation);
- ASSERT(Token::IsCompareOp(token));
- SetOperandAt(0, left);
- SetOperandAt(1, right);
- }
+ DECLARE_INSTRUCTION_FACTORY_P3(HCompareNumericAndBranch,
+ HValue*, HValue*, Token::Value);
+ DECLARE_INSTRUCTION_FACTORY_P5(HCompareNumericAndBranch,
+ HValue*, HValue*, Token::Value,
+ HBasicBlock*, HBasicBlock*);
HValue* left() { return OperandAt(0); }
HValue* right() { return OperandAt(1); }
@@ -4040,25 +4069,30 @@
DECLARE_CONCRETE_INSTRUCTION(CompareNumericAndBranch)
private:
+ HCompareNumericAndBranch(HValue* left,
+ HValue* right,
+ Token::Value token,
+ HBasicBlock* true_target = NULL,
+ HBasicBlock* false_target = NULL)
+ : token_(token) {
+ SetFlag(kFlexibleRepresentation);
+ ASSERT(Token::IsCompareOp(token));
+ SetOperandAt(0, left);
+ SetOperandAt(1, right);
+ SetSuccessorAt(0, true_target);
+ SetSuccessorAt(1, false_target);
+ }
+
Representation observed_input_representation_[2];
Token::Value token_;
};
-class HCompareHoleAndBranch V8_FINAL
- : public HTemplateControlInstruction<2, 1> {
+class HCompareHoleAndBranch V8_FINAL : public HUnaryControlInstruction {
public:
- // TODO(danno): make this private when the IfBuilder properly constructs
- // control flow instructions.
- explicit HCompareHoleAndBranch(HValue* object) {
- SetFlag(kFlexibleRepresentation);
- SetFlag(kAllowUndefinedAsNaN);
- SetOperandAt(0, object);
- }
-
DECLARE_INSTRUCTION_FACTORY_P1(HCompareHoleAndBranch, HValue*);
-
- HValue* object() { return OperandAt(0); }
+ DECLARE_INSTRUCTION_FACTORY_P3(HCompareHoleAndBranch, HValue*,
+ HBasicBlock*, HBasicBlock*);
virtual void InferRepresentation(
HInferRepresentationPhase* h_infer) V8_OVERRIDE;
@@ -4067,23 +4101,24 @@
return representation();
}
- virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
-
DECLARE_CONCRETE_INSTRUCTION(CompareHoleAndBranch)
+
+ private:
+ HCompareHoleAndBranch(HValue* value,
+ HBasicBlock* true_target = NULL,
+ HBasicBlock* false_target = NULL)
+ : HUnaryControlInstruction(value, true_target, false_target) {
+ SetFlag(kFlexibleRepresentation);
+ SetFlag(kAllowUndefinedAsNaN);
+ }
};
class HCompareObjectEqAndBranch : public HTemplateControlInstruction<2, 2> {
public:
- // TODO(danno): make this private when the IfBuilder properly constructs
- // control flow instructions.
- HCompareObjectEqAndBranch(HValue* left,
- HValue* right) {
- SetOperandAt(0, left);
- SetOperandAt(1, right);
- }
-
DECLARE_INSTRUCTION_FACTORY_P2(HCompareObjectEqAndBranch, HValue*, HValue*);
+ DECLARE_INSTRUCTION_FACTORY_P4(HCompareObjectEqAndBranch, HValue*, HValue*,
+ HBasicBlock*, HBasicBlock*);
HValue* left() { return OperandAt(0); }
HValue* right() { return OperandAt(1); }
@@ -4099,38 +4134,65 @@
}
DECLARE_CONCRETE_INSTRUCTION(CompareObjectEqAndBranch)
+
+ private:
+ HCompareObjectEqAndBranch(HValue* left,
+ HValue* right,
+ HBasicBlock* true_target = NULL,
+ HBasicBlock* false_target = NULL) {
+ SetOperandAt(0, left);
+ SetOperandAt(1, right);
+ SetSuccessorAt(0, true_target);
+ SetSuccessorAt(1, false_target);
+ }
};
class HIsObjectAndBranch V8_FINAL : public HUnaryControlInstruction {
public:
- explicit HIsObjectAndBranch(HValue* value)
- : HUnaryControlInstruction(value, NULL, NULL) { }
+ DECLARE_INSTRUCTION_FACTORY_P1(HIsObjectAndBranch, HValue*);
+ DECLARE_INSTRUCTION_FACTORY_P3(HIsObjectAndBranch, HValue*,
+ HBasicBlock*, HBasicBlock*);
virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch)
+
+ private:
+ HIsObjectAndBranch(HValue* value,
+ HBasicBlock* true_target = NULL,
+ HBasicBlock* false_target = NULL)
+ : HUnaryControlInstruction(value, true_target, false_target) {}
};
+
class HIsStringAndBranch V8_FINAL : public HUnaryControlInstruction {
public:
- explicit HIsStringAndBranch(HValue* value)
- : HUnaryControlInstruction(value, NULL, NULL) { }
+ DECLARE_INSTRUCTION_FACTORY_P1(HIsStringAndBranch, HValue*);
+ DECLARE_INSTRUCTION_FACTORY_P3(HIsStringAndBranch, HValue*,
+ HBasicBlock*, HBasicBlock*);
virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch)
+
+ private:
+ HIsStringAndBranch(HValue* value,
+ HBasicBlock* true_target = NULL,
+ HBasicBlock* false_target = NULL)
+ : HUnaryControlInstruction(value, true_target, false_target) {}
};
class HIsSmiAndBranch V8_FINAL : public HUnaryControlInstruction {
public:
- explicit HIsSmiAndBranch(HValue* value)
- : HUnaryControlInstruction(value, NULL, NULL) { }
+ DECLARE_INSTRUCTION_FACTORY_P1(HIsSmiAndBranch, HValue*);
+ DECLARE_INSTRUCTION_FACTORY_P3(HIsSmiAndBranch, HValue*,
+ HBasicBlock*, HBasicBlock*);
DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch)
@@ -4140,19 +4202,32 @@
protected:
virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
+
+ private:
+ HIsSmiAndBranch(HValue* value,
+ HBasicBlock* true_target = NULL,
+ HBasicBlock* false_target = NULL)
+ : HUnaryControlInstruction(value, true_target, false_target) {}
};
class HIsUndetectableAndBranch V8_FINAL : public HUnaryControlInstruction {
public:
- explicit HIsUndetectableAndBranch(HValue* value)
- : HUnaryControlInstruction(value, NULL, NULL) { }
+ DECLARE_INSTRUCTION_FACTORY_P1(HIsUndetectableAndBranch, HValue*);
+ DECLARE_INSTRUCTION_FACTORY_P3(HIsUndetectableAndBranch, HValue*,
+ HBasicBlock*, HBasicBlock*);
virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
DECLARE_CONCRETE_INSTRUCTION(IsUndetectableAndBranch)
+
+ private:
+ HIsUndetectableAndBranch(HValue* value,
+ HBasicBlock* true_target = NULL,
+ HBasicBlock* false_target = NULL)
+ : HUnaryControlInstruction(value, true_target, false_target) {}
};
@@ -4522,10 +4597,12 @@
HValue* right);
static HInstruction* NewImul(Zone* zone,
- HValue* context,
- HValue* left,
- HValue* right) {
- HMul* mul = new(zone) HMul(context, left, right);
+ HValue* context,
+ HValue* left,
+ HValue* right) {
+ HInstruction* instr = HMul::New(zone, context, left, right);
+ if (!instr->IsMul()) return instr;
+ HMul* mul = HMul::cast(instr);
// TODO(mstarzinger): Prevent bailout on minus zero for imul.
mul->AssumeRepresentation(Representation::Integer32());
mul->ClearFlag(HValue::kCanOverflow);
@@ -4884,9 +4961,11 @@
class HRor V8_FINAL : public HBitwiseBinaryOperation {
public:
- HRor(HValue* context, HValue* left, HValue* right)
- : HBitwiseBinaryOperation(context, left, right) {
- ChangeRepresentation(Representation::Integer32());
+ static HInstruction* New(Zone* zone,
+ HValue* context,
+ HValue* left,
+ HValue* right) {
+ return new(zone) HRor(context, left, right);
}
virtual void UpdateRepresentation(Representation new_rep,
@@ -4900,6 +4979,12 @@
protected:
virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
+
+ private:
+ HRor(HValue* context, HValue* left, HValue* right)
+ : HBitwiseBinaryOperation(context, left, right) {
+ ChangeRepresentation(Representation::Integer32());
+ }
};
@@ -5037,23 +5122,23 @@
class HLoadGlobalCell V8_FINAL : public HTemplateInstruction<0> {
public:
HLoadGlobalCell(Handle<Cell> cell, PropertyDetails details)
- : cell_(cell), details_(details), unique_id_() {
+ : cell_(Unique<Cell>::CreateUninitialized(cell)), details_(details) {
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
SetGVNFlag(kDependsOnGlobalVars);
}
- Handle<Cell> cell() const { return cell_; }
+ Unique<Cell> cell() const { return cell_; }
bool RequiresHoleCheck() const;
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
virtual intptr_t Hashcode() V8_OVERRIDE {
- return unique_id_.Hashcode();
+ return cell_.Hashcode();
}
virtual void FinalizeUniqueValueId() V8_OVERRIDE {
- unique_id_ = UniqueValueId(cell_);
+ cell_ = Unique<Cell>(cell_.handle());
}
virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
@@ -5064,16 +5149,14 @@
protected:
virtual bool DataEquals(HValue* other) V8_OVERRIDE {
- HLoadGlobalCell* b = HLoadGlobalCell::cast(other);
- return unique_id_ == b->unique_id_;
+ return cell_ == HLoadGlobalCell::cast(other)->cell_;
}
private:
virtual bool IsDeletable() const V8_OVERRIDE { return !RequiresHoleCheck(); }
- Handle<Cell> cell_;
+ Unique<Cell> cell_;
PropertyDetails details_;
- UniqueValueId unique_id_;
};
@@ -5344,7 +5427,7 @@
DECLARE_INSTRUCTION_FACTORY_P3(HStoreGlobalCell, HValue*,
Handle<PropertyCell>, PropertyDetails);
- Handle<PropertyCell> cell() const { return cell_; }
+ Unique<PropertyCell> cell() const { return cell_; }
bool RequiresHoleCheck() {
return !details_.IsDontDelete() || details_.IsReadOnly();
}
@@ -5352,6 +5435,10 @@
return StoringValueNeedsWriteBarrier(value());
}
+ virtual void FinalizeUniqueValueId() V8_OVERRIDE {
+ cell_ = Unique<PropertyCell>(cell_.handle());
+ }
+
virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
return Representation::Tagged();
}
@@ -5364,12 +5451,12 @@
Handle<PropertyCell> cell,
PropertyDetails details)
: HUnaryOperation(value),
- cell_(cell),
+ cell_(Unique<PropertyCell>::CreateUninitialized(cell)),
details_(details) {
SetGVNFlag(kChangesGlobalVars);
}
- Handle<PropertyCell> cell_;
+ Unique<PropertyCell> cell_;
PropertyDetails details_;
};
@@ -5601,10 +5688,21 @@
? Representation::Smi() : Representation::Tagged());
}
+ static HObjectAccess ForTypedArrayLength() {
+ return HObjectAccess(
+ kInobject,
+ JSTypedArray::kLengthOffset,
+ Representation::Tagged());
+ }
+
static HObjectAccess ForAllocationSiteTransitionInfo() {
return HObjectAccess(kInobject, AllocationSite::kTransitionInfoOffset);
}
+ static HObjectAccess ForAllocationSiteDependentCode() {
+ return HObjectAccess(kInobject, AllocationSite::kDependentCodeOffset);
+ }
+
static HObjectAccess ForAllocationSiteWeakNext() {
return HObjectAccess(kInobject, AllocationSite::kWeakNextOffset);
}
@@ -6417,25 +6515,20 @@
HValue* object() { return OperandAt(0); }
HValue* context() { return OperandAt(1); }
- Handle<Map> original_map() { return original_map_; }
- Handle<Map> transitioned_map() { return transitioned_map_; }
+ Unique<Map> original_map() { return original_map_; }
+ Unique<Map> transitioned_map() { return transitioned_map_; }
ElementsKind from_kind() { return from_kind_; }
ElementsKind to_kind() { return to_kind_; }
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- virtual void FinalizeUniqueValueId() V8_OVERRIDE {
- original_map_unique_id_ = UniqueValueId(original_map_);
- transitioned_map_unique_id_ = UniqueValueId(transitioned_map_);
- }
-
DECLARE_CONCRETE_INSTRUCTION(TransitionElementsKind)
protected:
virtual bool DataEquals(HValue* other) V8_OVERRIDE {
HTransitionElementsKind* instr = HTransitionElementsKind::cast(other);
- return original_map_unique_id_ == instr->original_map_unique_id_ &&
- transitioned_map_unique_id_ == instr->transitioned_map_unique_id_;
+ return original_map_ == instr->original_map_ &&
+ transitioned_map_ == instr->transitioned_map_;
}
private:
@@ -6443,10 +6536,8 @@
HValue* object,
Handle<Map> original_map,
Handle<Map> transitioned_map)
- : original_map_(original_map),
- transitioned_map_(transitioned_map),
- original_map_unique_id_(),
- transitioned_map_unique_id_(),
+ : original_map_(Unique<Map>(original_map)),
+ transitioned_map_(Unique<Map>(transitioned_map)),
from_kind_(original_map->elements_kind()),
to_kind_(transitioned_map->elements_kind()) {
SetOperandAt(0, object);
@@ -6460,10 +6551,8 @@
set_representation(Representation::Tagged());
}
- Handle<Map> original_map_;
- Handle<Map> transitioned_map_;
- UniqueValueId original_map_unique_id_;
- UniqueValueId transitioned_map_unique_id_;
+ Unique<Map> original_map_;
+ Unique<Map> transitioned_map_;
ElementsKind from_kind_;
ElementsKind to_kind_;
};
@@ -6492,14 +6581,21 @@
HStringAdd(HValue* context, HValue* left, HValue* right, StringAddFlags flags)
: HBinaryOperation(context, left, right, HType::String()), flags_(flags) {
set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- SetGVNFlag(kDependsOnMaps);
- SetGVNFlag(kChangesNewSpacePromotion);
+ if (flags_ == STRING_ADD_CHECK_NONE) {
+ SetFlag(kUseGVN);
+ SetGVNFlag(kDependsOnMaps);
+ SetGVNFlag(kChangesNewSpacePromotion);
+ } else {
+ SetAllSideEffects();
+ }
}
- // No side-effects except possible allocation.
- // NOTE: this instruction _does not_ call ToString() on its inputs.
- virtual bool IsDeletable() const V8_OVERRIDE { return true; }
+ // No side-effects except possible allocation:
+ // NOTE: this instruction does not call ToString() on its inputs, when flags_
+ // is set to STRING_ADD_CHECK_NONE.
+ virtual bool IsDeletable() const V8_OVERRIDE {
+ return flags_ == STRING_ADD_CHECK_NONE;
+ }
const StringAddFlags flags_;
};
@@ -6753,8 +6849,7 @@
ASSERT(value->IsCallRuntime());
#ifdef DEBUG
const Runtime::Function* function = HCallRuntime::cast(value)->function();
- ASSERT(function->function_id == Runtime::kCreateObjectLiteral ||
- function->function_id == Runtime::kCreateObjectLiteralShallow);
+ ASSERT(function->function_id == Runtime::kCreateObjectLiteral);
#endif
}
diff --git a/src/hydrogen-load-elimination.cc b/src/hydrogen-load-elimination.cc
new file mode 100644
index 0000000..6d01ae5
--- /dev/null
+++ b/src/hydrogen-load-elimination.cc
@@ -0,0 +1,327 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "hydrogen-alias-analysis.h"
+#include "hydrogen-load-elimination.h"
+#include "hydrogen-instructions.h"
+
+namespace v8 {
+namespace internal {
+
+static const int kMaxTrackedFields = 16;
+static const int kMaxTrackedObjects = 5;
+
+// An element in the field approximation list.
+class HFieldApproximation : public ZoneObject {
+ public: // Just a data blob.
+ HValue* object_;
+ HLoadNamedField* last_load_;
+ HValue* last_value_;
+ HFieldApproximation* next_;
+};
+
+
+// The main datastructure used during load/store elimination. Each in-object
+// field is tracked separately. For each field, store a list of known field
+// values for known objects.
+class HLoadEliminationTable BASE_EMBEDDED {
+ public:
+ HLoadEliminationTable(Zone* zone, HAliasAnalyzer* aliasing)
+ : zone_(zone), fields_(kMaxTrackedFields, zone), aliasing_(aliasing) { }
+
+ // Process a load instruction, updating internal table state. If a previous
+ // load or store for this object and field exists, return the new value with
+ // which the load should be replaced. Otherwise, return {instr}.
+ HValue* load(HLoadNamedField* instr) {
+ int field = FieldOf(instr->access());
+ if (field < 0) return instr;
+
+ HValue* object = instr->object()->ActualValue();
+ HFieldApproximation* approx = FindOrCreate(object, field);
+
+ if (approx->last_value_ == NULL) {
+ // Load is not redundant. Fill out a new entry.
+ approx->last_load_ = instr;
+ approx->last_value_ = instr;
+ return instr;
+ } else {
+ // Eliminate the load. Reuse previously stored value or load instruction.
+ return approx->last_value_;
+ }
+ }
+
+ // Process a store instruction, updating internal table state. If a previous
+ // store to the same object and field makes this store redundant (e.g. because
+ // the stored values are the same), return NULL indicating that this store
+ // instruction is redundant. Otherwise, return {instr}.
+ HValue* store(HStoreNamedField* instr) {
+ int field = FieldOf(instr->access());
+ if (field < 0) return instr;
+
+ HValue* object = instr->object()->ActualValue();
+ HValue* value = instr->value();
+
+ // Kill non-equivalent may-alias entries.
+ KillFieldInternal(object, field, value);
+ if (instr->has_transition()) {
+ // A transition store alters the map of the object.
+ // TODO(titzer): remember the new map (a constant) for the object.
+ KillFieldInternal(object, FieldOf(JSObject::kMapOffset), NULL);
+ }
+ HFieldApproximation* approx = FindOrCreate(object, field);
+
+ if (Equal(approx->last_value_, value)) {
+ // The store is redundant because the field already has this value.
+ return NULL;
+ } else {
+ // The store is not redundant. Update the entry.
+ approx->last_load_ = NULL;
+ approx->last_value_ = value;
+ return instr;
+ }
+ }
+
+ // Kill everything in this table.
+ void Kill() {
+ fields_.Rewind(0);
+ }
+
+ // Kill all entries matching the given offset.
+ void KillOffset(int offset) {
+ int field = FieldOf(offset);
+ if (field >= 0 && field < fields_.length()) {
+ fields_[field] = NULL;
+ }
+ }
+
+ // Compute the field index for the given object access; -1 if not tracked.
+ int FieldOf(HObjectAccess access) {
+ // Only track kMaxTrackedFields in-object fields.
+ if (!access.IsInobject()) return -1;
+ return FieldOf(access.offset());
+ }
+
+ // Print this table to stdout.
+ void Print() {
+ for (int i = 0; i < fields_.length(); i++) {
+ PrintF(" field %d: ", i);
+ for (HFieldApproximation* a = fields_[i]; a != NULL; a = a->next_) {
+ PrintF("[o%d =", a->object_->id());
+ if (a->last_load_ != NULL) PrintF(" L%d", a->last_load_->id());
+ if (a->last_value_ != NULL) PrintF(" v%d", a->last_value_->id());
+ PrintF("] ");
+ }
+ PrintF("\n");
+ }
+ }
+
+ private:
+ // Find or create an entry for the given object and field pair.
+ HFieldApproximation* FindOrCreate(HValue* object, int field) {
+ EnsureFields(field + 1);
+
+ // Search for a field approximation for this object.
+ HFieldApproximation* approx = fields_[field];
+ int count = 0;
+ while (approx != NULL) {
+ if (aliasing_->MustAlias(object, approx->object_)) return approx;
+ count++;
+ approx = approx->next_;
+ }
+
+ if (count >= kMaxTrackedObjects) {
+ // Pull the last entry off the end and repurpose it for this object.
+ approx = ReuseLastApproximation(field);
+ } else {
+ // Allocate a new entry.
+ approx = new(zone_) HFieldApproximation();
+ }
+
+ // Insert the entry at the head of the list.
+ approx->object_ = object;
+ approx->last_load_ = NULL;
+ approx->last_value_ = NULL;
+ approx->next_ = fields_[field];
+ fields_[field] = approx;
+
+ return approx;
+ }
+
+ // Kill all entries for a given field that _may_ alias the given object
+ // and do _not_ have the given value.
+ void KillFieldInternal(HValue* object, int field, HValue* value) {
+ if (field >= fields_.length()) return; // Nothing to do.
+
+ HFieldApproximation* approx = fields_[field];
+ HFieldApproximation* prev = NULL;
+ while (approx != NULL) {
+ if (aliasing_->MayAlias(object, approx->object_)) {
+ if (!Equal(approx->last_value_, value)) {
+ // Kill an aliasing entry that doesn't agree on the value.
+ if (prev != NULL) {
+ prev->next_ = approx->next_;
+ } else {
+ fields_[field] = approx->next_;
+ }
+ approx = approx->next_;
+ continue;
+ }
+ }
+ prev = approx;
+ approx = approx->next_;
+ }
+ }
+
+ bool Equal(HValue* a, HValue* b) {
+ if (a == b) return true;
+ if (a != NULL && b != NULL) return a->Equals(b);
+ return false;
+ }
+
+ // Remove the last approximation for a field so that it can be reused.
+ // We reuse the last entry because it was the first inserted and is thus
+ // farthest away from the current instruction.
+ HFieldApproximation* ReuseLastApproximation(int field) {
+ HFieldApproximation* approx = fields_[field];
+ ASSERT(approx != NULL);
+
+ HFieldApproximation* prev = NULL;
+ while (approx->next_ != NULL) {
+ prev = approx;
+ approx = approx->next_;
+ }
+ if (prev != NULL) prev->next_ = NULL;
+ return approx;
+ }
+
+ // Ensure internal storage for the given number of fields.
+ void EnsureFields(int num_fields) {
+ while (fields_.length() < num_fields) fields_.Add(NULL, zone_);
+ }
+
+ // Compute the field index for the given in-object offset.
+ int FieldOf(int offset) {
+ if (offset >= kMaxTrackedFields * kPointerSize) return -1;
+ ASSERT((offset % kPointerSize) == 0); // Assume aligned accesses.
+ return offset / kPointerSize;
+ }
+
+ Zone* zone_;
+ ZoneList<HFieldApproximation*> fields_;
+ HAliasAnalyzer* aliasing_;
+};
+
+
+void HLoadEliminationPhase::Run() {
+ for (int i = 0; i < graph()->blocks()->length(); i++) {
+ HBasicBlock* block = graph()->blocks()->at(i);
+ EliminateLoads(block);
+ }
+}
+
+
+// For code de-uglification.
+#define TRACE(x) if (FLAG_trace_load_elimination) PrintF x
+
+
+// Eliminate loads and stores local to a block.
+void HLoadEliminationPhase::EliminateLoads(HBasicBlock* block) {
+ HAliasAnalyzer aliasing;
+ HLoadEliminationTable table(zone(), &aliasing);
+
+ TRACE(("-- load-elim B%d -------------------------------------------------\n",
+ block->block_id()));
+
+ for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
+ bool changed = false;
+ HInstruction* instr = it.Current();
+
+ switch (instr->opcode()) {
+ case HValue::kLoadNamedField: {
+ HLoadNamedField* load = HLoadNamedField::cast(instr);
+ TRACE((" process L%d field %d (o%d)\n",
+ instr->id(),
+ table.FieldOf(load->access()),
+ load->object()->ActualValue()->id()));
+ HValue* result = table.load(load);
+ if (result != instr) {
+ // The load can be replaced with a previous load or a value.
+ TRACE((" replace L%d -> v%d\n", instr->id(), result->id()));
+ instr->DeleteAndReplaceWith(result);
+ }
+ changed = true;
+ break;
+ }
+ case HValue::kStoreNamedField: {
+ HStoreNamedField* store = HStoreNamedField::cast(instr);
+ TRACE((" process S%d field %d (o%d) = v%d\n",
+ instr->id(),
+ table.FieldOf(store->access()),
+ store->object()->ActualValue()->id(),
+ store->value()->id()));
+ HValue* result = table.store(store);
+ if (result == NULL) {
+ // The store is redundant. Remove it.
+ TRACE((" remove S%d\n", instr->id()));
+ instr->DeleteAndReplaceWith(NULL);
+ }
+ changed = true;
+ break;
+ }
+ default: {
+ if (instr->CheckGVNFlag(kChangesInobjectFields)) {
+ TRACE((" kill-all i%d\n", instr->id()));
+ table.Kill();
+ continue;
+ }
+ if (instr->CheckGVNFlag(kChangesMaps)) {
+ TRACE((" kill-maps i%d\n", instr->id()));
+ table.KillOffset(JSObject::kMapOffset);
+ }
+ if (instr->CheckGVNFlag(kChangesElementsKind)) {
+ TRACE((" kill-elements-kind i%d\n", instr->id()));
+ table.KillOffset(JSObject::kMapOffset);
+ table.KillOffset(JSObject::kElementsOffset);
+ }
+ if (instr->CheckGVNFlag(kChangesElementsPointer)) {
+ TRACE((" kill-elements i%d\n", instr->id()));
+ table.KillOffset(JSObject::kElementsOffset);
+ }
+ }
+ // Improvements possible:
+ // - learn from HCheckMaps for field 0
+ // - remove unobservable stores (write-after-write)
+ }
+
+ if (changed && FLAG_trace_load_elimination) {
+ table.Print();
+ }
+ }
+}
+
+
+} } // namespace v8::internal
diff --git a/src/hydrogen-load-elimination.h b/src/hydrogen-load-elimination.h
new file mode 100644
index 0000000..ef6f71f
--- /dev/null
+++ b/src/hydrogen-load-elimination.h
@@ -0,0 +1,50 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_HYDROGEN_LOAD_ELIMINATION_H_
+#define V8_HYDROGEN_LOAD_ELIMINATION_H_
+
+#include "hydrogen.h"
+
+namespace v8 {
+namespace internal {
+
+class HLoadEliminationPhase : public HPhase {
+ public:
+ explicit HLoadEliminationPhase(HGraph* graph)
+ : HPhase("H_Load elimination", graph) { }
+
+ void Run();
+
+ private:
+ void EliminateLoads(HBasicBlock* block);
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_HYDROGEN_LOAD_ELIMINATION_H_
diff --git a/src/hydrogen-osr.cc b/src/hydrogen-osr.cc
index 6b1df1e..e95967e 100644
--- a/src/hydrogen-osr.cc
+++ b/src/hydrogen-osr.cc
@@ -63,8 +63,8 @@
HBasicBlock* non_osr_entry = graph->CreateBasicBlock();
osr_entry_ = graph->CreateBasicBlock();
HValue* true_value = graph->GetConstantTrue();
- HBranch* test = new(zone) HBranch(true_value, ToBooleanStub::Types(),
- non_osr_entry, osr_entry_);
+ HBranch* test = builder_->New<HBranch>(true_value, ToBooleanStub::Types(),
+ non_osr_entry, osr_entry_);
builder_->current_block()->Finish(test);
HBasicBlock* loop_predecessor = graph->CreateBasicBlock();
diff --git a/src/hydrogen.cc b/src/hydrogen.cc
index 15ef5ed..50882a8 100644
--- a/src/hydrogen.cc
+++ b/src/hydrogen.cc
@@ -43,6 +43,7 @@
#include "hydrogen-escape-analysis.h"
#include "hydrogen-infer-representation.h"
#include "hydrogen-infer-types.h"
+#include "hydrogen-load-elimination.h"
#include "hydrogen-gvn.h"
#include "hydrogen-mark-deoptimize.h"
#include "hydrogen-minus-zero.h"
@@ -726,6 +727,7 @@
captured_(false),
needs_compare_(false),
first_true_block_(NULL),
+ last_true_block_(NULL),
first_false_block_(NULL),
split_edge_merge_block_(NULL),
merge_block_(NULL) {
@@ -735,7 +737,8 @@
}
-void HGraphBuilder::IfBuilder::AddCompare(HControlInstruction* compare) {
+HControlInstruction* HGraphBuilder::IfBuilder::AddCompare(
+ HControlInstruction* compare) {
if (split_edge_merge_block_ != NULL) {
HEnvironment* env = first_false_block_->last_environment();
HBasicBlock* split_edge =
@@ -754,6 +757,7 @@
}
builder_->current_block()->Finish(compare);
needs_compare_ = false;
+ return compare;
}
@@ -802,6 +806,28 @@
}
+void HGraphBuilder::IfBuilder::JoinContinuation(HIfContinuation* continuation) {
+ ASSERT(!finished_);
+ ASSERT(!captured_);
+ HBasicBlock* true_block = last_true_block_ == NULL
+ ? first_true_block_
+ : last_true_block_;
+ HBasicBlock* false_block = did_else_ && (first_false_block_ != NULL)
+ ? builder_->current_block()
+ : first_false_block_;
+ if (true_block != NULL && !true_block->IsFinished()) {
+ ASSERT(continuation->IsTrueReachable());
+ true_block->GotoNoSimulate(continuation->true_branch());
+ }
+ if (false_block != NULL && !false_block->IsFinished()) {
+ ASSERT(continuation->IsFalseReachable());
+ false_block->GotoNoSimulate(continuation->false_branch());
+ }
+ captured_ = true;
+ End();
+}
+
+
void HGraphBuilder::IfBuilder::Then() {
ASSERT(!captured_);
ASSERT(!finished_);
@@ -814,9 +840,8 @@
HConstant* constant_false = builder_->graph()->GetConstantFalse();
ToBooleanStub::Types boolean_type = ToBooleanStub::Types();
boolean_type.Add(ToBooleanStub::BOOLEAN);
- HBranch* branch =
- new(zone()) HBranch(constant_false, boolean_type, first_true_block_,
- first_false_block_);
+ HBranch* branch = builder()->New<HBranch>(
+ constant_false, boolean_type, first_true_block_, first_false_block_);
builder_->current_block()->Finish(branch);
}
builder_->set_current_block(first_true_block_);
@@ -948,11 +973,8 @@
builder_->set_current_block(header_block_);
env->Pop();
- HCompareNumericAndBranch* compare =
- new(zone()) HCompareNumericAndBranch(phi_, terminating, token);
- compare->SetSuccessorAt(0, body_block_);
- compare->SetSuccessorAt(1, exit_block_);
- builder_->current_block()->Finish(compare);
+ builder_->current_block()->Finish(builder_->New<HCompareNumericAndBranch>(
+ phi_, terminating, token, body_block_, exit_block_));
builder_->set_current_block(body_block_);
if (direction_ == kPreIncrement || direction_ == kPreDecrement) {
@@ -1032,8 +1054,7 @@
}
-void HGraphBuilder::AddIncrementCounter(StatsCounter* counter,
- HValue* context) {
+void HGraphBuilder::AddIncrementCounter(StatsCounter* counter) {
if (FLAG_native_code_counters && counter->Enabled()) {
HValue* reference = Add<HConstant>(ExternalReference(counter));
HValue* old_value = Add<HLoadNamedField>(reference,
@@ -1824,12 +1845,11 @@
HValue* HGraphBuilder::BuildCreateAllocationMemento(HValue* previous_object,
int previous_object_size,
HValue* alloc_site) {
- // TODO(mvstanton): ASSERT altered to CHECK to diagnose chromium bug 284577
- CHECK(alloc_site != NULL);
+ ASSERT(alloc_site != NULL);
HInnerAllocatedObject* alloc_memento = Add<HInnerAllocatedObject>(
previous_object, previous_object_size);
- Handle<Map> alloc_memento_map(
- isolate()->heap()->allocation_memento_map());
+ Handle<Map> alloc_memento_map =
+ isolate()->factory()->allocation_memento_map();
AddStoreMapConstant(alloc_memento, alloc_memento_map);
HObjectAccess access = HObjectAccess::ForAllocationMementoSite();
Add<HStoreNamedField>(alloc_memento, access, alloc_site);
@@ -2773,8 +2793,8 @@
HBasicBlock* empty_true = builder->graph()->CreateBasicBlock();
HBasicBlock* empty_false = builder->graph()->CreateBasicBlock();
ToBooleanStub::Types expected(condition()->to_boolean_types());
- HBranch* test = new(zone()) HBranch(value, expected, empty_true, empty_false);
- builder->current_block()->Finish(test);
+ builder->current_block()->Finish(builder->New<HBranch>(
+ value, expected, empty_true, empty_false));
empty_true->Goto(if_true(), builder->function_state());
empty_false->Goto(if_false(), builder->function_state());
@@ -2973,6 +2993,8 @@
if (FLAG_use_escape_analysis) Run<HEscapeAnalysisPhase>();
+ if (FLAG_load_elimination) Run<HLoadEliminationPhase>();
+
CollectPhis();
if (has_osr()) osr()->FinishOsrValues();
@@ -3373,12 +3395,10 @@
// Test switch's tag value if all clauses are string literals
if (stmt->switch_type() == SwitchStatement::STRING_SWITCH) {
- string_check = new(zone()) HIsStringAndBranch(tag_value);
first_test_block = graph()->CreateBasicBlock();
not_string_block = graph()->CreateBasicBlock();
-
- string_check->SetSuccessorAt(0, first_test_block);
- string_check->SetSuccessorAt(1, not_string_block);
+ string_check = New<HIsStringAndBranch>(
+ tag_value, first_test_block, not_string_block);
current_block()->Finish(string_check);
set_current_block(first_test_block);
@@ -3408,9 +3428,9 @@
}
HCompareNumericAndBranch* compare_ =
- new(zone()) HCompareNumericAndBranch(tag_value,
- label_value,
- Token::EQ_STRICT);
+ New<HCompareNumericAndBranch>(tag_value,
+ label_value,
+ Token::EQ_STRICT);
compare_->set_observed_input_representation(
Representation::Smi(), Representation::Smi());
compare = compare_;
@@ -3694,7 +3714,7 @@
// Check that we still have more keys.
HCompareNumericAndBranch* compare_index =
- new(zone()) HCompareNumericAndBranch(index, limit, Token::LT);
+ New<HCompareNumericAndBranch>(index, limit, Token::LT);
compare_index->set_observed_input_representation(
Representation::Smi(), Representation::Smi());
@@ -4064,20 +4084,6 @@
}
-static bool LookupGetter(Handle<Map> map,
- Handle<String> name,
- Handle<JSFunction>* getter,
- Handle<JSObject>* holder) {
- Handle<AccessorPair> accessors;
- if (LookupAccessorPair(map, name, &accessors, holder) &&
- accessors->getter()->IsJSFunction()) {
- *getter = Handle<JSFunction>(JSFunction::cast(accessors->getter()));
- return true;
- }
- return false;
-}
-
-
static bool LookupSetter(Handle<Map> map,
Handle<String> name,
Handle<JSFunction>* setter,
@@ -4172,8 +4178,7 @@
IsFastLiteral(Handle<JSObject>::cast(boilerplate),
kMaxFastLiteralDepth,
&max_properties)) {
- Handle<JSObject> boilerplate_object =
- Handle<JSObject>::cast(boilerplate);
+ Handle<JSObject> boilerplate_object = Handle<JSObject>::cast(boilerplate);
literal = BuildFastLiteral(boilerplate_object,
Handle<Object>::null(),
@@ -4193,9 +4198,7 @@
Add<HPushArgument>(Add<HConstant>(constant_properties));
Add<HPushArgument>(Add<HConstant>(flags));
- Runtime::FunctionId function_id =
- (expr->depth() > 1 || expr->may_store_doubles())
- ? Runtime::kCreateObjectLiteral : Runtime::kCreateObjectLiteralShallow;
+ Runtime::FunctionId function_id = Runtime::kCreateObjectLiteral;
literal = Add<HCallRuntime>(isolate()->factory()->empty_string(),
Runtime::FunctionForId(function_id),
4);
@@ -4316,17 +4319,26 @@
ElementsKind boilerplate_elements_kind =
Handle<JSObject>::cast(boilerplate_object)->GetElementsKind();
- // TODO(mvstanton): This heuristic is only a temporary solution. In the
- // end, we want to quit creating allocation site info after a certain number
- // of GCs for a call site.
- AllocationSiteMode mode = AllocationSite::GetMode(
- boilerplate_elements_kind);
+ ASSERT(AllocationSite::CanTrack(boilerplate_object->map()->instance_type()));
// Check whether to use fast or slow deep-copying for boilerplate.
int max_properties = kMaxFastLiteralProperties;
if (IsFastLiteral(boilerplate_object,
kMaxFastLiteralDepth,
&max_properties)) {
+ // TODO(mvstanton): This heuristic is only a temporary solution. In the
+ // end, we want to quit creating allocation site info after a certain number
+ // of GCs for a call site.
+ AllocationSiteMode mode = AllocationSite::GetMode(
+ boilerplate_elements_kind);
+
+ // it doesn't make sense to create allocation mementos if we are going to
+ // create in old space.
+ if (mode == TRACK_ALLOCATION_SITE &&
+ isolate()->heap()->GetPretenureMode() == TENURED) {
+ mode = DONT_TRACK_ALLOCATION_SITE;
+ }
+
literal = BuildFastLiteral(boilerplate_object,
site,
mode);
@@ -4546,140 +4558,191 @@
}
-static bool CanLoadPropertyFromPrototype(Handle<Map> map,
- Handle<Name> name,
- LookupResult* lookup) {
- if (!CanInlinePropertyAccess(*map)) return false;
- map->LookupDescriptor(NULL, *name, lookup);
- if (lookup->IsFound()) return false;
+bool HOptimizedGraphBuilder::PropertyAccessInfo::IsCompatibleForLoad(
+ PropertyAccessInfo* info) {
+ if (!CanInlinePropertyAccess(*map_)) return false;
+
+ if (!LookupDescriptor()) return false;
+
+ if (!lookup_.IsFound()) {
+ return (!info->lookup_.IsFound() || !info->holder_.is_null()) &&
+ map_->prototype() == info->map_->prototype();
+ }
+
+ if (lookup_.IsPropertyCallbacks()) {
+ return accessor_.is_identical_to(info->accessor_);
+ }
+
+ if (lookup_.IsConstant()) {
+ return constant_.is_identical_to(info->constant_);
+ }
+
+ ASSERT(lookup_.IsField());
+ if (!info->lookup_.IsField()) return false;
+
+ Representation r = access_.representation();
+ if (!info->access_.representation().IsCompatibleForLoad(r)) return false;
+ if (info->access_.offset() != access_.offset()) return false;
+ if (info->access_.IsInobject() != access_.IsInobject()) return false;
+ info->GeneralizeRepresentation(r);
return true;
}
-HInstruction* HOptimizedGraphBuilder::TryLoadPolymorphicAsMonomorphic(
- HValue* object,
- SmallMapList* types,
- Handle<String> name) {
- // Use monomorphic load if property lookup results in the same field index
- // for all maps. Requires special map check on the set of all handled maps.
- if (types->length() > kMaxLoadPolymorphism) return NULL;
-
- LookupResult lookup(isolate());
- int count;
- HObjectAccess access = HObjectAccess::ForMap(); // initial value unused.
- for (count = 0; count < types->length(); ++count) {
- Handle<Map> map = types->at(count);
- if (!ComputeLoadStoreField(map, name, &lookup, false)) break;
-
- HObjectAccess new_access = HObjectAccess::ForField(map, &lookup, name);
-
- if (count == 0) {
- // First time through the loop; set access and representation.
- access = new_access;
- } else if (!access.representation().IsCompatibleForLoad(
- new_access.representation())) {
- // Representations did not match.
- break;
- } else if (access.offset() != new_access.offset()) {
- // Offsets did not match.
- break;
- } else if (access.IsInobject() != new_access.IsInobject()) {
- // In-objectness did not match.
- break;
- }
- access = access.WithRepresentation(
- access.representation().generalize(new_access.representation()));
- }
-
- if (count == types->length()) {
- // Everything matched; can use monomorphic load.
- BuildCheckHeapObject(object);
- HCheckMaps* checked_object = Add<HCheckMaps>(object, types);
- return BuildLoadNamedField(checked_object, access);
- }
-
- if (count != 0) return NULL;
-
- // Second chance: the property is on the prototype and all maps have the
- // same prototype.
- Handle<Map> map(types->at(0));
- if (!CanLoadPropertyFromPrototype(map, name, &lookup)) return NULL;
-
- Handle<Object> prototype(map->prototype(), isolate());
- for (count = 1; count < types->length(); ++count) {
- Handle<Map> test_map(types->at(count));
- if (!CanLoadPropertyFromPrototype(test_map, name, &lookup)) return NULL;
- if (test_map->prototype() != *prototype) return NULL;
- }
-
- LookupInPrototypes(map, name, &lookup);
- if (!lookup.IsField()) return NULL;
-
- BuildCheckHeapObject(object);
- Add<HCheckMaps>(object, types);
-
- Handle<JSObject> holder(lookup.holder());
- Handle<Map> holder_map(holder->map());
- HValue* checked_holder = BuildCheckPrototypeMaps(
- Handle<JSObject>::cast(prototype), holder);
- return BuildLoadNamedField(checked_holder,
- HObjectAccess::ForField(holder_map, &lookup, name));
+bool HOptimizedGraphBuilder::PropertyAccessInfo::LookupDescriptor() {
+ map_->LookupDescriptor(NULL, *name_, &lookup_);
+ return LoadResult(map_);
}
-// Returns true if an instance of this map can never find a property with this
-// name in its prototype chain. This means all prototypes up to the top are
-// fast and don't have the name in them. It would be good if we could optimize
-// polymorphic loads where the property is sometimes found in the prototype
-// chain.
-static bool PrototypeChainCanNeverResolve(
- Handle<Map> map, Handle<String> name) {
- Isolate* isolate = map->GetIsolate();
- Object* current = map->prototype();
- while (current != isolate->heap()->null_value()) {
- if (current->IsJSGlobalProxy() ||
- current->IsGlobalObject() ||
- !current->IsJSObject() ||
- !CanInlinePropertyAccess(JSObject::cast(current)->map()) ||
- JSObject::cast(current)->IsAccessCheckNeeded()) {
+bool HOptimizedGraphBuilder::PropertyAccessInfo::LoadResult(Handle<Map> map) {
+ if (lookup_.IsField()) {
+ access_ = HObjectAccess::ForField(map, &lookup_, name_);
+ } else if (lookup_.IsPropertyCallbacks()) {
+ Handle<Object> callback(lookup_.GetValueFromMap(*map), isolate());
+ if (!callback->IsAccessorPair()) return false;
+ Object* getter = Handle<AccessorPair>::cast(callback)->getter();
+ if (!getter->IsJSFunction()) return false;
+ accessor_ = handle(JSFunction::cast(getter));
+ } else if (lookup_.IsConstant()) {
+ constant_ = handle(lookup_.GetConstantFromMap(*map), isolate());
+ }
+
+ return true;
+}
+
+
+bool HOptimizedGraphBuilder::PropertyAccessInfo::LookupInPrototypes() {
+ Handle<Map> map = map_;
+ while (map->prototype()->IsJSObject()) {
+ holder_ = handle(JSObject::cast(map->prototype()));
+ map = Handle<Map>(holder_->map());
+ if (!CanInlinePropertyAccess(*map)) {
+ lookup_.NotFound();
return false;
}
-
- LookupResult lookup(isolate);
- Map* map = JSObject::cast(current)->map();
- map->LookupDescriptor(NULL, *name, &lookup);
- if (lookup.IsFound()) return false;
- if (!lookup.IsCacheable()) return false;
- current = JSObject::cast(current)->GetPrototype();
+ map->LookupDescriptor(*holder_, *name_, &lookup_);
+ if (lookup_.IsFound()) return LoadResult(map);
}
+ lookup_.NotFound();
return true;
}
+bool HOptimizedGraphBuilder::PropertyAccessInfo::CanLoadMonomorphic() {
+ if (!CanInlinePropertyAccess(*map_)) return IsStringLength();
+ if (IsArrayLength()) return true;
+ if (!LookupDescriptor()) return false;
+ if (lookup_.IsFound()) return true;
+ return LookupInPrototypes();
+}
+
+
+bool HOptimizedGraphBuilder::PropertyAccessInfo::CanLoadAsMonomorphic(
+ SmallMapList* types) {
+ ASSERT(map_.is_identical_to(types->first()));
+ if (!CanLoadMonomorphic()) return false;
+ if (types->length() > kMaxLoadPolymorphism) return false;
+
+ if (IsStringLength()) {
+ for (int i = 1; i < types->length(); ++i) {
+ if (types->at(i)->instance_type() >= FIRST_NONSTRING_TYPE) return false;
+ }
+ return true;
+ }
+
+ if (IsArrayLength()) {
+ bool is_fast = IsFastElementsKind(map_->elements_kind());
+ for (int i = 1; i < types->length(); ++i) {
+ Handle<Map> test_map = types->at(i);
+ if (test_map->instance_type() != JS_ARRAY_TYPE) return false;
+ if (IsFastElementsKind(test_map->elements_kind()) != is_fast) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ if (IsTypedArrayLength()) {
+ for (int i = 1; i < types->length(); ++i) {
+ if (types->at(i)->instance_type() != JS_TYPED_ARRAY_TYPE) return false;
+ }
+ return true;
+ }
+
+ for (int i = 1; i < types->length(); ++i) {
+ PropertyAccessInfo test_info(isolate(), types->at(i), name_);
+ if (!test_info.IsCompatibleForLoad(this)) return false;
+ }
+
+ return true;
+}
+
+
+HInstruction* HOptimizedGraphBuilder::BuildLoadMonomorphic(
+ PropertyAccessInfo* info,
+ HValue* object,
+ HInstruction* checked_object,
+ BailoutId ast_id,
+ BailoutId return_id,
+ bool can_inline_accessor) {
+ if (info->IsStringLength()) {
+ return New<HLoadNamedField>(
+ checked_object, HObjectAccess::ForStringLength());
+ }
+
+ if (info->IsArrayLength()) {
+ return New<HLoadNamedField>(
+ checked_object, HObjectAccess::ForArrayLength(
+ info->map()->elements_kind()));
+ }
+
+ if (info->IsTypedArrayLength()) {
+ return New<HLoadNamedField>(
+ checked_object, HObjectAccess::ForTypedArrayLength());
+ }
+
+ HValue* checked_holder = checked_object;
+ if (info->has_holder()) {
+ Handle<JSObject> prototype(JSObject::cast(info->map()->prototype()));
+ checked_holder = BuildCheckPrototypeMaps(prototype, info->holder());
+ }
+
+ if (!info->lookup()->IsFound()) return graph()->GetConstantUndefined();
+
+ if (info->lookup()->IsField()) {
+ return BuildLoadNamedField(checked_holder, info->access());
+ }
+
+ if (info->lookup()->IsPropertyCallbacks()) {
+ Push(checked_object);
+ if (FLAG_inline_accessors &&
+ can_inline_accessor &&
+ TryInlineGetter(info->accessor(), ast_id, return_id)) {
+ return NULL;
+ }
+ Add<HPushArgument>(Pop());
+ return new(zone()) HCallConstantFunction(info->accessor(), 1);
+ }
+
+ ASSERT(info->lookup()->IsConstant());
+ return New<HConstant>(info->constant());
+}
+
+
void HOptimizedGraphBuilder::HandlePolymorphicLoadNamedField(
int position,
BailoutId ast_id,
+ BailoutId return_id,
HValue* object,
SmallMapList* types,
Handle<String> name) {
- HInstruction* instr = TryLoadPolymorphicAsMonomorphic(object, types, name);
- if (instr != NULL) {
- instr->set_position(position);
- return ast_context()->ReturnInstruction(instr, ast_id);
- }
-
// Something did not match; must use a polymorphic load.
int count = 0;
HBasicBlock* join = NULL;
for (int i = 0; i < types->length() && count < kMaxLoadPolymorphism; ++i) {
- Handle<Map> map = types->at(i);
- LookupResult lookup(isolate());
- if (ComputeLoadStoreField(map, name, &lookup, false) ||
- (lookup.IsCacheable() &&
- CanInlinePropertyAccess(*map) &&
- (lookup.IsConstant() ||
- (!lookup.IsFound() &&
- PrototypeChainCanNeverResolve(map, name))))) {
+ PropertyAccessInfo info(isolate(), types->at(i), name);
+ if (info.CanLoadMonomorphic()) {
if (count == 0) {
BuildCheckHeapObject(object);
join = graph()->CreateBasicBlock();
@@ -4687,37 +4750,25 @@
++count;
HBasicBlock* if_true = graph()->CreateBasicBlock();
HBasicBlock* if_false = graph()->CreateBasicBlock();
- HCompareMap* compare =
- new(zone()) HCompareMap(object, map, if_true, if_false);
+ HCompareMap* compare = New<HCompareMap>(
+ object, info.map(), if_true, if_false);
current_block()->Finish(compare);
set_current_block(if_true);
- // TODO(verwaest): Merge logic with BuildLoadNamedMonomorphic.
- if (lookup.IsField()) {
- HObjectAccess access = HObjectAccess::ForField(map, &lookup, name);
- HLoadNamedField* load = BuildLoadNamedField(compare, access);
- load->set_position(position);
- AddInstruction(load);
- if (!ast_context()->IsEffect()) Push(load);
- } else if (lookup.IsConstant()) {
- Handle<Object> constant(lookup.GetConstantFromMap(*map), isolate());
- HConstant* hconstant = Add<HConstant>(constant);
- if (!ast_context()->IsEffect()) Push(hconstant);
+ HInstruction* load = BuildLoadMonomorphic(
+ &info, object, compare, ast_id, return_id, FLAG_polymorphic_inlining);
+ if (load == NULL) {
+ if (HasStackOverflow()) return;
} else {
- ASSERT(!lookup.IsFound());
- if (map->prototype()->IsJSObject()) {
- Handle<JSObject> prototype(JSObject::cast(map->prototype()));
- Handle<JSObject> holder = prototype;
- while (holder->map()->prototype()->IsJSObject()) {
- holder = handle(JSObject::cast(holder->map()->prototype()));
- }
- BuildCheckPrototypeMaps(prototype, holder);
+ if (!load->IsLinked()) {
+ load->set_position(position);
+ AddInstruction(load);
}
- if (!ast_context()->IsEffect()) Push(graph()->GetConstantUndefined());
+ if (!ast_context()->IsEffect()) Push(load);
}
- current_block()->Goto(join);
+ if (current_block() != NULL) current_block()->Goto(join);
set_current_block(if_false);
}
}
@@ -4726,6 +4777,10 @@
// know about and do not want to handle ones we've never seen. Otherwise
// use a generic IC.
if (count == types->length() && FLAG_deoptimize_uncommon_cases) {
+ // Because the deopt may be the only path in the polymorphic load, make sure
+ // that the environment stack matches the depth on deopt that it otherwise
+ // would have had after a successful load.
+ if (!ast_context()->IsEffect()) Push(graph()->GetConstant0());
FinishExitWithHardDeoptimization("Unknown map in polymorphic load", join);
} else {
HValue* context = environment()->context();
@@ -4761,8 +4816,6 @@
// for all maps. Requires special map check on the set of all handled maps.
if (types->length() > kMaxStorePolymorphism) return false;
- // TODO(verwaest): Merge the checking logic with the code in
- // TryLoadPolymorphicAsMonomorphic.
LookupResult lookup(isolate());
int count;
Representation representation = Representation::None();
@@ -4840,8 +4893,7 @@
++count;
HBasicBlock* if_true = graph()->CreateBasicBlock();
HBasicBlock* if_false = graph()->CreateBasicBlock();
- HCompareMap* compare =
- new(zone()) HCompareMap(object, map, if_true, if_false);
+ HCompareMap* compare = New<HCompareMap>(object, map, if_true, if_false);
current_block()->Finish(compare);
set_current_block(if_true);
@@ -5136,9 +5188,7 @@
CHECK_ALIVE(VisitForValue(prop->obj()));
HValue* object = Top();
HValue* key = NULL;
- if ((!prop->IsStringLength() &&
- !prop->IsFunctionPrototype() &&
- !prop->key()->IsPropertyName()) ||
+ if ((!prop->IsFunctionPrototype() && !prop->key()->IsPropertyName()) ||
prop->IsStringAccess()) {
CHECK_ALIVE(VisitForValue(prop->key()));
key = Top();
@@ -5363,73 +5413,6 @@
}
-HInstruction* HOptimizedGraphBuilder::BuildLoadNamedMonomorphic(
- HValue* object,
- Handle<String> name,
- Handle<Map> map) {
- // Handle a load from a known field.
- ASSERT(!map->is_dictionary_map());
-
- // Handle access to various length properties
- if (name->Equals(isolate()->heap()->length_string())) {
- if (map->instance_type() == JS_ARRAY_TYPE) {
- HCheckMaps* checked_object = AddCheckMap(object, map);
- return New<HLoadNamedField>(
- checked_object, HObjectAccess::ForArrayLength(map->elements_kind()));
- }
- }
-
- LookupResult lookup(isolate());
- map->LookupDescriptor(NULL, *name, &lookup);
- if (lookup.IsField()) {
- HCheckMaps* checked_object = AddCheckMap(object, map);
- ASSERT(map->IsJSObjectMap());
- return BuildLoadNamedField(
- checked_object, HObjectAccess::ForField(map, &lookup, name));
- }
-
- // Handle a load of a constant known function.
- if (lookup.IsConstant()) {
- AddCheckMap(object, map);
- Handle<Object> constant(lookup.GetConstantFromMap(*map), isolate());
- return New<HConstant>(constant);
- }
-
- if (lookup.IsFound()) {
- // Cannot handle the property, do a generic load instead.
- HValue* context = environment()->context();
- return new(zone()) HLoadNamedGeneric(context, object, name);
- }
-
- // Handle a load from a known field somewhere in the prototype chain.
- LookupInPrototypes(map, name, &lookup);
- if (lookup.IsField()) {
- Handle<JSObject> prototype(JSObject::cast(map->prototype()));
- Handle<JSObject> holder(lookup.holder());
- Handle<Map> holder_map(holder->map());
- AddCheckMap(object, map);
- HValue* checked_holder = BuildCheckPrototypeMaps(prototype, holder);
- return BuildLoadNamedField(
- checked_holder, HObjectAccess::ForField(holder_map, &lookup, name));
- }
-
- // Handle a load of a constant function somewhere in the prototype chain.
- if (lookup.IsConstant()) {
- Handle<JSObject> prototype(JSObject::cast(map->prototype()));
- Handle<JSObject> holder(lookup.holder());
- Handle<Map> holder_map(holder->map());
- AddCheckMap(object, map);
- BuildCheckPrototypeMaps(prototype, holder);
- Handle<Object> constant(lookup.GetConstantFromMap(*holder_map), isolate());
- return New<HConstant>(constant);
- }
-
- // No luck, do a generic load.
- HValue* context = environment()->context();
- return new(zone()) HLoadNamedGeneric(context, object, name);
-}
-
-
HInstruction* HOptimizedGraphBuilder::BuildLoadKeyedGeneric(HValue* object,
HValue* key) {
HValue* context = environment()->context();
@@ -5632,7 +5615,7 @@
HBasicBlock* this_map = graph()->CreateBasicBlock();
HBasicBlock* other_map = graph()->CreateBasicBlock();
HCompareMap* mapcompare =
- new(zone()) HCompareMap(object, map, this_map, other_map);
+ New<HCompareMap>(object, map, this_map, other_map);
current_block()->Finish(mapcompare);
set_current_block(this_map);
@@ -5828,17 +5811,19 @@
}
+static bool AreStringTypes(SmallMapList* types) {
+ for (int i = 0; i < types->length(); i++) {
+ if (types->at(i)->instance_type() >= FIRST_NONSTRING_TYPE) return false;
+ }
+ return true;
+}
+
+
void HOptimizedGraphBuilder::BuildLoad(Property* expr,
int position,
BailoutId ast_id) {
HInstruction* instr = NULL;
- if (expr->IsStringLength()) {
- HValue* string = Pop();
- BuildCheckHeapObject(string);
- HInstruction* checkstring =
- AddInstruction(HCheckInstanceType::NewIsString(string, zone()));
- instr = BuildLoadStringLength(string, checkstring);
- } else if (expr->IsStringAccess()) {
+ if (expr->IsStringAccess()) {
HValue* index = Pop();
HValue* string = Pop();
HValue* context = environment()->context();
@@ -5854,31 +5839,33 @@
} else if (expr->key()->IsPropertyName()) {
Handle<String> name = expr->key()->AsLiteral()->AsPropertyName();
- HValue* object = Top();
+ HValue* object = Pop();
SmallMapList* types;
- bool monomorphic = ComputeReceiverTypes(expr, object, &types);
+ ComputeReceiverTypes(expr, object, &types);
+ ASSERT(types != NULL);
- if (monomorphic) {
- Handle<Map> map = types->first();
- Handle<JSFunction> getter;
- Handle<JSObject> holder;
- if (LookupGetter(map, name, &getter, &holder)) {
- AddCheckConstantFunction(holder, Top(), map);
- if (FLAG_inline_accessors &&
- TryInlineGetter(getter, ast_id, expr->LoadId())) {
- return;
- }
- Add<HPushArgument>(Pop());
- instr = new(zone()) HCallConstantFunction(getter, 1);
- } else {
- instr = BuildLoadNamedMonomorphic(Pop(), name, map);
+ if (types->length() > 0) {
+ PropertyAccessInfo info(isolate(), types->first(), name);
+ if (!info.CanLoadAsMonomorphic(types)) {
+ return HandlePolymorphicLoadNamedField(
+ position, ast_id, expr->LoadId(), object, types, name);
}
- } else if (types != NULL && types->length() > 1) {
- return HandlePolymorphicLoadNamedField(
- position, ast_id, Pop(), types, name);
+
+ BuildCheckHeapObject(object);
+ HInstruction* checked_object;
+ if (AreStringTypes(types)) {
+ checked_object =
+ AddInstruction(HCheckInstanceType::NewIsString(object, zone()));
+ } else {
+ checked_object = Add<HCheckMaps>(object, types);
+ }
+ instr = BuildLoadMonomorphic(
+ &info, object, checked_object, ast_id, expr->LoadId());
+ if (instr == NULL) return;
+ if (instr->IsLinked()) return ast_context()->ReturnValue(instr);
} else {
- instr = BuildLoadNamedGeneric(Pop(), name, expr);
+ instr = BuildLoadNamedGeneric(object, name, expr);
}
} else {
@@ -5914,9 +5901,7 @@
if (TryArgumentsAccess(expr)) return;
CHECK_ALIVE(VisitForValue(expr->obj()));
- if ((!expr->IsStringLength() &&
- !expr->IsFunctionPrototype() &&
- !expr->key()->IsPropertyName()) ||
+ if ((!expr->IsFunctionPrototype() && !expr->key()->IsPropertyName()) ||
expr->IsStringAccess()) {
CHECK_ALIVE(VisitForValue(expr->key()));
}
@@ -6015,22 +6000,13 @@
Handle<String> name) {
if (types->length() > kMaxCallPolymorphism) return false;
- Handle<Map> map(types->at(0));
- LookupResult lookup(isolate());
- if (!CanLoadPropertyFromPrototype(map, name, &lookup)) return false;
-
- Handle<Object> prototype(map->prototype(), isolate());
- for (int count = 1; count < types->length(); ++count) {
- Handle<Map> test_map(types->at(count));
- if (!CanLoadPropertyFromPrototype(test_map, name, &lookup)) return false;
- if (test_map->prototype() != *prototype) return false;
- }
-
- if (!expr->ComputeTarget(map, name)) return false;
+ PropertyAccessInfo info(isolate(), types->at(0), name);
+ if (!info.CanLoadAsMonomorphic(types)) return false;
+ if (!expr->ComputeTarget(info.map(), name)) return false;
BuildCheckHeapObject(receiver);
Add<HCheckMaps>(receiver, types);
- AddCheckPrototypeMaps(expr->holder(), map);
+ AddCheckPrototypeMaps(expr->holder(), info.map());
if (FLAG_trace_inlining) {
Handle<JSFunction> caller = current_info()->closure();
SmartArrayPointer<char> caller_name =
@@ -6107,10 +6083,8 @@
HBasicBlock* empty_smi_block = graph()->CreateBasicBlock();
HBasicBlock* not_smi_block = graph()->CreateBasicBlock();
number_block = graph()->CreateBasicBlock();
- HIsSmiAndBranch* smicheck = new(zone()) HIsSmiAndBranch(receiver);
- smicheck->SetSuccessorAt(0, empty_smi_block);
- smicheck->SetSuccessorAt(1, not_smi_block);
- current_block()->Finish(smicheck);
+ current_block()->Finish(New<HIsSmiAndBranch>(
+ receiver, empty_smi_block, not_smi_block));
empty_smi_block->Goto(number_block);
set_current_block(not_smi_block);
} else {
@@ -6122,20 +6096,17 @@
HUnaryControlInstruction* compare;
if (handle_smi && map.is_identical_to(number_marker_map)) {
- compare = new(zone()) HCompareMap(
- receiver, heap_number_map, if_true, if_false);
+ compare = New<HCompareMap>(receiver, heap_number_map, if_true, if_false);
map = initial_number_map;
expr->set_number_check(
Handle<JSObject>(JSObject::cast(map->prototype())));
} else if (map.is_identical_to(string_marker_map)) {
- compare = new(zone()) HIsStringAndBranch(receiver);
- compare->SetSuccessorAt(0, if_true);
- compare->SetSuccessorAt(1, if_false);
+ compare = New<HIsStringAndBranch>(receiver, if_true, if_false);
map = initial_string_map;
expr->set_string_check(
Handle<JSObject>(JSObject::cast(map->prototype())));
} else {
- compare = new(zone()) HCompareMap(receiver, map, if_true, if_false);
+ compare = New<HCompareMap>(receiver, map, if_true, if_false);
expr->set_map_check();
}
@@ -6182,7 +6153,8 @@
// Because the deopt may be the only path in the polymorphic call, make sure
// that the environment stack matches the depth on deopt that it otherwise
// would have had after a successful call.
- Drop(argument_count - (ast_context()->IsEffect() ? 0 : 1));
+ Drop(argument_count);
+ if (!ast_context()->IsEffect()) Push(graph()->GetConstant0());
FinishExitWithHardDeoptimization("Unknown map in polymorphic call", join);
} else {
HValue* context = environment()->context();
@@ -7567,9 +7539,7 @@
HValue* object = Top();
HValue* key = NULL;
- if ((!prop->IsStringLength() &&
- !prop->IsFunctionPrototype() &&
- !prop->key()->IsPropertyName()) ||
+ if ((!prop->IsFunctionPrototype() && !prop->key()->IsPropertyName()) ||
prop->IsStringAccess()) {
CHECK_ALIVE(VisitForValue(prop->key()));
key = Top();
@@ -7619,9 +7589,16 @@
}
-// Checks if the given shift amounts have form: (sa) and (32 - sa).
+// Checks if the given shift amounts have following forms:
+// (N1) and (N2) with N1 + N2 = 32; (sa) and (32 - sa).
static bool ShiftAmountsAllowReplaceByRotate(HValue* sa,
HValue* const32_minus_sa) {
+ if (sa->IsConstant() && const32_minus_sa->IsConstant()) {
+ const HConstant* c1 = HConstant::cast(sa);
+ const HConstant* c2 = HConstant::cast(const32_minus_sa);
+ return c1->HasInteger32Value() && c2->HasInteger32Value() &&
+ (c1->Integer32Value() + c2->Integer32Value() == 32);
+ }
if (!const32_minus_sa->IsSub()) return false;
HSub* sub = HSub::cast(const32_minus_sa);
if (sa != sub->right()) return false;
@@ -7638,10 +7615,10 @@
// directions that can be replaced by one rotate right instruction or not.
// Returns the operand and the shift amount for the rotate instruction in the
// former case.
-bool HOptimizedGraphBuilder::MatchRotateRight(HValue* left,
- HValue* right,
- HValue** operand,
- HValue** shift_amount) {
+bool HGraphBuilder::MatchRotateRight(HValue* left,
+ HValue* right,
+ HValue** operand,
+ HValue** shift_amount) {
HShl* shl;
HShr* shr;
if (left->IsShl() && right->IsShr()) {
@@ -7677,6 +7654,18 @@
}
+HValue* HGraphBuilder::EnforceNumberType(HValue* number,
+ Handle<Type> expected) {
+ if (expected->Is(Type::Smi())) {
+ return Add<HForceRepresentation>(number, Representation::Smi());
+ }
+ if (expected->Is(Type::Signed32())) {
+ return Add<HForceRepresentation>(number, Representation::Integer32());
+ }
+ return number;
+}
+
+
HValue* HGraphBuilder::TruncateToNumber(HValue* value, Handle<Type>* expected) {
if (value->IsConstant()) {
HConstant* constant = HConstant::cast(value);
@@ -7687,6 +7676,68 @@
}
}
+ // We put temporary values on the stack, which don't correspond to anything
+ // in baseline code. Since nothing is observable we avoid recording those
+ // pushes with a NoObservableSideEffectsScope.
+ NoObservableSideEffectsScope no_effects(this);
+
+ Handle<Type> expected_type = *expected;
+
+ // Separate the number type from the rest.
+ Handle<Type> expected_obj = handle(Type::Intersect(
+ expected_type, handle(Type::NonNumber(), isolate())), isolate());
+ Handle<Type> expected_number = handle(Type::Intersect(
+ expected_type, handle(Type::Number(), isolate())), isolate());
+
+ // We expect to get a number.
+ // (We need to check first, since Type::None->Is(Type::Any()) == true.
+ if (expected_obj->Is(Type::None())) {
+ ASSERT(!expected_number->Is(Type::None()));
+ return value;
+ }
+
+ if (expected_obj->Is(Type::Undefined())) {
+ // This is already done by HChange.
+ *expected = handle(Type::Union(
+ expected_number, handle(Type::Double(), isolate())), isolate());
+ return value;
+ }
+
+ if (expected_obj->Is(Type::Null())) {
+ *expected = handle(Type::Union(
+ expected_number, handle(Type::Smi(), isolate())), isolate());
+ IfBuilder if_null(this);
+ if_null.If<HCompareObjectEqAndBranch>(value,
+ graph()->GetConstantNull());
+ if_null.Then();
+ Push(graph()->GetConstant0());
+ if_null.Else();
+ Push(value);
+ if_null.End();
+ return Pop();
+ }
+
+ if (expected_obj->Is(Type::Boolean())) {
+ *expected = handle(Type::Union(
+ expected_number, handle(Type::Smi(), isolate())), isolate());
+ IfBuilder if_true(this);
+ if_true.If<HCompareObjectEqAndBranch>(value,
+ graph()->GetConstantTrue());
+ if_true.Then();
+ Push(graph()->GetConstant1());
+ if_true.Else();
+ IfBuilder if_false(this);
+ if_false.If<HCompareObjectEqAndBranch>(value,
+ graph()->GetConstantFalse());
+ if_false.Then();
+ Push(graph()->GetConstant0());
+ if_false.Else();
+ Push(value);
+ if_false.End();
+ if_true.End();
+ return Pop();
+ }
+
return value;
}
@@ -7695,86 +7746,118 @@
BinaryOperation* expr,
HValue* left,
HValue* right) {
- HValue* context = environment()->context();
Handle<Type> left_type = expr->left()->bounds().lower;
Handle<Type> right_type = expr->right()->bounds().lower;
Handle<Type> result_type = expr->bounds().lower;
Maybe<int> fixed_right_arg = expr->fixed_right_arg();
+
+ return HGraphBuilder::BuildBinaryOperation(expr->op(), left, right,
+ left_type, right_type, result_type, fixed_right_arg);
+}
+
+
+HInstruction* HGraphBuilder::BuildBinaryOperation(
+ Token::Value op,
+ HValue* left,
+ HValue* right,
+ Handle<Type> left_type,
+ Handle<Type> right_type,
+ Handle<Type> result_type,
+ Maybe<int> fixed_right_arg) {
+
Representation left_rep = Representation::FromType(left_type);
Representation right_rep = Representation::FromType(right_type);
- Representation result_rep = Representation::FromType(result_type);
- if (expr->op() != Token::ADD ||
- (left->type().IsNonString() && right->type().IsNonString())) {
- // For addition we can only truncate the arguments to number if we can
- // prove that we will not end up in string concatenation mode.
- left = TruncateToNumber(left, &left_type);
- right = TruncateToNumber(right, &right_type);
- }
+ bool maybe_string_add = op == Token::ADD &&
+ (left_type->Maybe(Type::String()) ||
+ right_type->Maybe(Type::String()));
if (left_type->Is(Type::None())) {
Add<HDeoptimize>("Insufficient type feedback for LHS of binary operation",
Deoptimizer::SOFT);
- // TODO(rossberg): we should be able to get rid of non-continuous defaults.
+ // TODO(rossberg): we should be able to get rid of non-continuous
+ // defaults.
left_type = handle(Type::Any(), isolate());
+ } else {
+ if (!maybe_string_add) left = TruncateToNumber(left, &left_type);
+ left_rep = Representation::FromType(left_type);
}
+
if (right_type->Is(Type::None())) {
Add<HDeoptimize>("Insufficient type feedback for RHS of binary operation",
Deoptimizer::SOFT);
right_type = handle(Type::Any(), isolate());
+ } else {
+ if (!maybe_string_add) right = TruncateToNumber(right, &right_type);
+ right_rep = Representation::FromType(right_type);
}
+
+ Representation result_rep = Representation::FromType(result_type);
+
+ bool is_string_add = op == Token::ADD &&
+ (left_type->Is(Type::String()) ||
+ right_type->Is(Type::String()));
+
HInstruction* instr = NULL;
- switch (expr->op()) {
+ switch (op) {
case Token::ADD:
- if (left_type->Is(Type::String()) && right_type->Is(Type::String())) {
- BuildCheckHeapObject(left);
- AddInstruction(HCheckInstanceType::NewIsString(left, zone()));
- BuildCheckHeapObject(right);
- AddInstruction(HCheckInstanceType::NewIsString(right, zone()));
- instr = HStringAdd::New(zone(), context, left, right);
+ if (is_string_add) {
+ StringAddFlags flags = STRING_ADD_CHECK_BOTH;
+ if (left_type->Is(Type::String())) {
+ BuildCheckHeapObject(left);
+ AddInstruction(HCheckInstanceType::NewIsString(left, zone()));
+ flags = STRING_ADD_CHECK_RIGHT;
+ }
+ if (right_type->Is(Type::String())) {
+ BuildCheckHeapObject(right);
+ AddInstruction(HCheckInstanceType::NewIsString(right, zone()));
+ flags = (flags == STRING_ADD_CHECK_BOTH)
+ ? STRING_ADD_CHECK_LEFT : STRING_ADD_CHECK_NONE;
+ }
+ instr = NewUncasted<HStringAdd>(left, right, flags);
} else {
- instr = HAdd::New(zone(), context, left, right);
+ instr = NewUncasted<HAdd>(left, right);
}
break;
case Token::SUB:
- instr = HSub::New(zone(), context, left, right);
+ instr = NewUncasted<HSub>(left, right);
break;
case Token::MUL:
- instr = HMul::New(zone(), context, left, right);
+ instr = NewUncasted<HMul>(left, right);
break;
case Token::MOD:
- instr = HMod::New(zone(), context, left, right, fixed_right_arg);
+ instr = NewUncasted<HMod>(left, right, fixed_right_arg);
break;
case Token::DIV:
- instr = HDiv::New(zone(), context, left, right);
+ instr = NewUncasted<HDiv>(left, right);
break;
case Token::BIT_XOR:
case Token::BIT_AND:
- instr = NewUncasted<HBitwise>(expr->op(), left, right);
+ instr = NewUncasted<HBitwise>(op, left, right);
break;
case Token::BIT_OR: {
HValue* operand, *shift_amount;
if (left_type->Is(Type::Signed32()) &&
right_type->Is(Type::Signed32()) &&
MatchRotateRight(left, right, &operand, &shift_amount)) {
- instr = new(zone()) HRor(context, operand, shift_amount);
+ instr = NewUncasted<HRor>(operand, shift_amount);
} else {
- instr = NewUncasted<HBitwise>(expr->op(), left, right);
+ instr = NewUncasted<HBitwise>(op, left, right);
}
break;
}
case Token::SAR:
- instr = HSar::New(zone(), context, left, right);
+ instr = NewUncasted<HSar>(left, right);
break;
case Token::SHR:
- instr = HShr::New(zone(), context, left, right);
+ instr = NewUncasted<HShr>(left, right);
if (FLAG_opt_safe_uint32_operations && instr->IsShr() &&
CanBeZero(right)) {
graph()->RecordUint32Instruction(instr);
}
break;
case Token::SHL:
- instr = HShl::New(zone(), context, left, right);
+ instr = NewUncasted<HShl>(left, right);
break;
default:
UNREACHABLE();
@@ -7874,8 +7957,8 @@
HBasicBlock* eval_right = graph()->CreateBasicBlock();
ToBooleanStub::Types expected(expr->left()->to_boolean_types());
HBranch* test = is_logical_and
- ? new(zone()) HBranch(left_value, expected, eval_right, empty_block)
- : new(zone()) HBranch(left_value, expected, empty_block, eval_right);
+ ? New<HBranch>(left_value, expected, eval_right, empty_block)
+ : New<HBranch>(left_value, expected, empty_block, eval_right);
current_block()->Finish(test);
set_current_block(eval_right);
@@ -8098,7 +8181,7 @@
BuildCheckHeapObject(right);
AddInstruction(HCheckInstanceType::NewIsSpecObject(right, zone()));
HCompareObjectEqAndBranch* result =
- new(zone()) HCompareObjectEqAndBranch(left, right);
+ New<HCompareObjectEqAndBranch>(left, right);
result->set_position(expr->position());
return ast_context()->ReturnControl(result, expr->id());
}
@@ -8113,7 +8196,7 @@
BuildCheckHeapObject(right);
AddInstruction(HCheckInstanceType::NewIsInternalizedString(right, zone()));
HCompareObjectEqAndBranch* result =
- new(zone()) HCompareObjectEqAndBranch(left, right);
+ New<HCompareObjectEqAndBranch>(left, right);
result->set_position(expr->position());
return ast_context()->ReturnControl(result, expr->id());
} else {
@@ -8126,7 +8209,7 @@
return ast_context()->ReturnInstruction(result, expr->id());
} else {
HCompareNumericAndBranch* result =
- new(zone()) HCompareNumericAndBranch(left, right, op);
+ New<HCompareNumericAndBranch>(left, right, op);
result->set_observed_input_representation(left_rep, right_rep);
result->set_position(expr->position());
return ast_context()->ReturnControl(result, expr->id());
@@ -8187,13 +8270,15 @@
int object_offset = object_size;
InstanceType instance_type = boilerplate_object->map()->instance_type();
- bool create_allocation_site_info = mode == TRACK_ALLOCATION_SITE &&
- AllocationSite::CanTrack(instance_type);
+ bool create_allocation_site_info = mode == TRACK_ALLOCATION_SITE;
- // If using allocation sites, then the payload on the site should already
- // be filled in as a valid (boilerplate) array.
+ // If using allocation sites, then
+ // 1) the payload on the site should already be filled in as a valid
+ // (boilerplate) array, and
+ // 2) we shouldn't be pretenuring the allocations.
ASSERT(!create_allocation_site_info ||
- AllocationSite::cast(*allocation_site_object)->IsLiteralSite());
+ (AllocationSite::cast(*allocation_site_object)->IsLiteralSite() &&
+ isolate()->heap()->GetPretenureMode() == NOT_TENURED));
if (create_allocation_site_info) {
object_size += AllocationMemento::kSize;
@@ -8206,7 +8291,6 @@
HInstruction* object = Add<HAllocate>(object_size_constant, type,
isolate()->heap()->GetPretenureMode(), instance_type);
-
BuildEmitObjectHeader(boilerplate_object, object);
if (create_allocation_site_info) {
@@ -8568,7 +8652,7 @@
ASSERT(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
- HIsSmiAndBranch* result = new(zone()) HIsSmiAndBranch(value);
+ HIsSmiAndBranch* result = New<HIsSmiAndBranch>(value);
return ast_context()->ReturnControl(result, call->id());
}
@@ -8629,7 +8713,7 @@
ASSERT(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
- HIsObjectAndBranch* result = new(zone()) HIsObjectAndBranch(value);
+ HIsObjectAndBranch* result = New<HIsObjectAndBranch>(value);
return ast_context()->ReturnControl(result, call->id());
}
@@ -8643,8 +8727,7 @@
ASSERT(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
- HIsUndetectableAndBranch* result =
- new(zone()) HIsUndetectableAndBranch(value);
+ HIsUndetectableAndBranch* result = New<HIsUndetectableAndBranch>(value);
return ast_context()->ReturnControl(result, call->id());
}
@@ -8766,13 +8849,10 @@
HValue* value = Pop();
HValue* object = Pop();
// Check if object is a not a smi.
- HIsSmiAndBranch* smicheck = new(zone()) HIsSmiAndBranch(object);
HBasicBlock* if_smi = graph()->CreateBasicBlock();
HBasicBlock* if_heap_object = graph()->CreateBasicBlock();
HBasicBlock* join = graph()->CreateBasicBlock();
- smicheck->SetSuccessorAt(0, if_smi);
- smicheck->SetSuccessorAt(1, if_heap_object);
- current_block()->Finish(smicheck);
+ current_block()->Finish(New<HIsSmiAndBranch>(object, if_smi, if_heap_object));
if_smi->Goto(join);
// Check if object is a JSValue.
diff --git a/src/hydrogen.h b/src/hydrogen.h
index c1dafa8..a371fa5 100644
--- a/src/hydrogen.h
+++ b/src/hydrogen.h
@@ -941,7 +941,12 @@
class HIfContinuation V8_FINAL {
public:
- HIfContinuation() { continuation_captured_ = false; }
+ HIfContinuation() : continuation_captured_(false) {}
+ HIfContinuation(HBasicBlock* true_branch,
+ HBasicBlock* false_branch,
+ int position = RelocInfo::kNoPosition)
+ : continuation_captured_(true), true_branch_(true_branch),
+ false_branch_(false_branch), position_(position) {}
~HIfContinuation() { ASSERT(!continuation_captured_); }
void Capture(HBasicBlock* true_branch,
@@ -970,6 +975,10 @@
return IsTrueReachable() || IsFalseReachable();
}
+ HBasicBlock* true_branch() const { return true_branch_; }
+ HBasicBlock* false_branch() const { return false_branch_; }
+
+ private:
bool continuation_captured_;
HBasicBlock* true_branch_;
HBasicBlock* false_branch_;
@@ -1260,10 +1269,25 @@
HInstruction* BuildLoadStringLength(HValue* object, HValue* checked_value);
HStoreNamedField* AddStoreMapConstant(HValue* object, Handle<Map>);
HLoadNamedField* AddLoadElements(HValue* object);
+
+ bool MatchRotateRight(HValue* left,
+ HValue* right,
+ HValue** operand,
+ HValue** shift_amount);
+
+ HInstruction* BuildBinaryOperation(Token::Value op,
+ HValue* left,
+ HValue* right,
+ Handle<Type> left_type,
+ Handle<Type> right_type,
+ Handle<Type> result_type,
+ Maybe<int> fixed_right_arg);
+
HLoadNamedField* AddLoadFixedArrayLength(HValue *object);
HValue* AddLoadJSBuiltin(Builtins::JavaScript builtin);
+ HValue* EnforceNumberType(HValue* number, Handle<Type> expected);
HValue* TruncateToNumber(HValue* value, Handle<Type>* expected);
void PushAndAdd(HInstruction* instr);
@@ -1271,8 +1295,7 @@
void FinishExitWithHardDeoptimization(const char* reason,
HBasicBlock* continuation);
- void AddIncrementCounter(StatsCounter* counter,
- HValue* context);
+ void AddIncrementCounter(StatsCounter* counter);
class IfBuilder V8_FINAL {
public:
@@ -1286,80 +1309,79 @@
}
template<class Condition>
- HInstruction* If(HValue *p) {
- HControlInstruction* compare = new(zone()) Condition(p);
+ Condition* If(HValue *p) {
+ Condition* compare = builder()->New<Condition>(p);
AddCompare(compare);
return compare;
}
template<class Condition, class P2>
- HInstruction* If(HValue* p1, P2 p2) {
- HControlInstruction* compare = new(zone()) Condition(p1, p2);
+ Condition* If(HValue* p1, P2 p2) {
+ Condition* compare = builder()->New<Condition>(p1, p2);
AddCompare(compare);
return compare;
}
template<class Condition, class P2, class P3>
- HInstruction* If(HValue* p1, P2 p2, P3 p3) {
- HControlInstruction* compare = new(zone()) Condition(p1, p2, p3);
+ Condition* If(HValue* p1, P2 p2, P3 p3) {
+ Condition* compare = builder()->New<Condition>(p1, p2, p3);
AddCompare(compare);
return compare;
}
- template<class Condition, class P2>
- HInstruction* IfNot(HValue* p1, P2 p2) {
- HControlInstruction* compare = new(zone()) Condition(p1, p2);
- AddCompare(compare);
- HBasicBlock* block0 = compare->SuccessorAt(0);
- HBasicBlock* block1 = compare->SuccessorAt(1);
- compare->SetSuccessorAt(0, block1);
- compare->SetSuccessorAt(1, block0);
- return compare;
- }
-
- template<class Condition, class P2, class P3>
- HInstruction* IfNot(HValue* p1, P2 p2, P3 p3) {
- HControlInstruction* compare = new(zone()) Condition(p1, p2, p3);
- AddCompare(compare);
- HBasicBlock* block0 = compare->SuccessorAt(0);
- HBasicBlock* block1 = compare->SuccessorAt(1);
- compare->SetSuccessorAt(0, block1);
- compare->SetSuccessorAt(1, block0);
- return compare;
- }
-
template<class Condition>
- HInstruction* OrIf(HValue *p) {
+ Condition* IfNot(HValue* p) {
+ Condition* compare = If<Condition>(p);
+ compare->Not();
+ return compare;
+ }
+
+ template<class Condition, class P2>
+ Condition* IfNot(HValue* p1, P2 p2) {
+ Condition* compare = If<Condition>(p1, p2);
+ compare->Not();
+ return compare;
+ }
+
+ template<class Condition, class P2, class P3>
+ Condition* IfNot(HValue* p1, P2 p2, P3 p3) {
+ Condition* compare = If<Condition>(p1, p2, p3);
+ compare->Not();
+ return compare;
+ }
+
+ template<class Condition>
+ Condition* OrIf(HValue *p) {
Or();
return If<Condition>(p);
}
template<class Condition, class P2>
- HInstruction* OrIf(HValue* p1, P2 p2) {
+ Condition* OrIf(HValue* p1, P2 p2) {
Or();
return If<Condition>(p1, p2);
}
template<class Condition, class P2, class P3>
- HInstruction* OrIf(HValue* p1, P2 p2, P3 p3) {
+ Condition* OrIf(HValue* p1, P2 p2, P3 p3) {
Or();
return If<Condition>(p1, p2, p3);
}
template<class Condition>
- HInstruction* AndIf(HValue *p) {
+ Condition* AndIf(HValue *p) {
And();
return If<Condition>(p);
}
template<class Condition, class P2>
- HInstruction* AndIf(HValue* p1, P2 p2) {
+ Condition* AndIf(HValue* p1, P2 p2) {
And();
return If<Condition>(p1, p2);
}
template<class Condition, class P2, class P3>
- HInstruction* AndIf(HValue* p1, P2 p2, P3 p3) {
+ Condition* AndIf(HValue* p1, P2 p2, P3 p3) {
And();
return If<Condition>(p1, p2, p3);
}
@@ -1367,8 +1389,50 @@
void Or();
void And();
+ // Captures the current state of this IfBuilder in the specified
+ // continuation and ends this IfBuilder.
void CaptureContinuation(HIfContinuation* continuation);
+ // Joins the specified continuation from this IfBuilder and ends this
+ // IfBuilder. This appends a Goto instruction from the true branch of
+ // this IfBuilder to the true branch of the continuation unless the
+ // true branch of this IfBuilder is already finished. And vice versa
+ // for the false branch.
+ //
+ // The basic idea is as follows: You have several nested IfBuilder's
+ // that you want to join based on two possible outcomes (i.e. success
+ // and failure, or whatever). You can do this easily using this method
+ // now, for example:
+ //
+ // HIfContinuation cont(graph()->CreateBasicBlock(),
+ // graph()->CreateBasicBlock());
+ // ...
+ // IfBuilder if_whatever(this);
+ // if_whatever.If<Condition>(arg);
+ // if_whatever.Then();
+ // ...
+ // if_whatever.Else();
+ // ...
+ // if_whatever.JoinContinuation(&cont);
+ // ...
+ // IfBuilder if_something(this);
+ // if_something.If<Condition>(arg1, arg2);
+ // if_something.Then();
+ // ...
+ // if_something.Else();
+ // ...
+ // if_something.JoinContinuation(&cont);
+ // ...
+ // IfBuilder if_finally(this, &cont);
+ // if_finally.Then();
+ // // continues after then code of if_whatever or if_something.
+ // ...
+ // if_finally.Else();
+ // // continues after else code of if_whatever or if_something.
+ // ...
+ // if_finally.End();
+ void JoinContinuation(HIfContinuation* continuation);
+
void Then();
void Else();
void End();
@@ -1382,9 +1446,9 @@
void Return(HValue* value);
private:
- void AddCompare(HControlInstruction* compare);
+ HControlInstruction* AddCompare(HControlInstruction* compare);
- Zone* zone() { return builder_->zone(); }
+ HGraphBuilder* builder() const { return builder_; }
HGraphBuilder* builder_;
int position_;
@@ -1946,13 +2010,86 @@
void HandlePropertyAssignment(Assignment* expr);
void HandleCompoundAssignment(Assignment* expr);
void HandlePolymorphicLoadNamedField(int position,
+ BailoutId ast_id,
BailoutId return_id,
HValue* object,
SmallMapList* types,
Handle<String> name);
- HInstruction* TryLoadPolymorphicAsMonomorphic(HValue* object,
- SmallMapList* types,
- Handle<String> name);
+
+ class PropertyAccessInfo {
+ public:
+ PropertyAccessInfo(Isolate* isolate, Handle<Map> map, Handle<String> name)
+ : lookup_(isolate),
+ map_(map),
+ name_(name),
+ access_(HObjectAccess::ForMap()) { }
+
+ // Checkes whether this PropertyAccessInfo can be handled as a monomorphic
+ // load named. It additionally fills in the fields necessary to generate the
+ // lookup code.
+ bool CanLoadMonomorphic();
+
+ // Checks whether all types behave uniform when loading name. If all maps
+ // behave the same, a single monomorphic load instruction can be emitted,
+ // guarded by a single map-checks instruction that whether the receiver is
+ // an instance of any of the types.
+ // This method skips the first type in types, assuming that this
+ // PropertyAccessInfo is built for types->first().
+ bool CanLoadAsMonomorphic(SmallMapList* types);
+
+ bool IsStringLength() {
+ return map_->instance_type() < FIRST_NONSTRING_TYPE &&
+ name_->Equals(isolate()->heap()->length_string());
+ }
+
+ bool IsArrayLength() {
+ return map_->instance_type() == JS_ARRAY_TYPE &&
+ name_->Equals(isolate()->heap()->length_string());
+ }
+
+ bool IsTypedArrayLength() {
+ return map_->instance_type() == JS_TYPED_ARRAY_TYPE &&
+ name_->Equals(isolate()->heap()->length_string());
+ }
+
+ bool has_holder() { return !holder_.is_null(); }
+
+ LookupResult* lookup() { return &lookup_; }
+ Handle<Map> map() { return map_; }
+ Handle<JSObject> holder() { return holder_; }
+ Handle<JSFunction> accessor() { return accessor_; }
+ Handle<Object> constant() { return constant_; }
+ HObjectAccess access() { return access_; }
+
+ private:
+ Isolate* isolate() { return lookup_.isolate(); }
+
+ bool LoadResult(Handle<Map> map);
+ bool LookupDescriptor();
+ bool LookupInPrototypes();
+ bool IsCompatibleForLoad(PropertyAccessInfo* other);
+
+ void GeneralizeRepresentation(Representation r) {
+ access_ = access_.WithRepresentation(
+ access_.representation().generalize(r));
+ }
+
+ LookupResult lookup_;
+ Handle<Map> map_;
+ Handle<String> name_;
+ Handle<JSObject> holder_;
+ Handle<JSFunction> accessor_;
+ Handle<Object> constant_;
+ HObjectAccess access_;
+ };
+
+ HInstruction* BuildLoadMonomorphic(PropertyAccessInfo* info,
+ HValue* object,
+ HInstruction* checked_object,
+ BailoutId ast_id,
+ BailoutId return_id,
+ bool can_inline_accessor = true);
+
void HandlePolymorphicStoreNamedField(int position,
BailoutId assignment_id,
HValue* object,
@@ -2031,9 +2168,6 @@
Handle<Map> map,
Handle<JSFunction> getter,
Handle<JSObject> holder);
- HInstruction* BuildLoadNamedMonomorphic(HValue* object,
- Handle<String> name,
- Handle<Map> map);
HCheckMaps* AddCheckMap(HValue* object, Handle<Map> map);
@@ -2112,11 +2246,6 @@
HValue* receiver,
Handle<Map> receiver_map);
- bool MatchRotateRight(HValue* left,
- HValue* right,
- HValue** operand,
- HValue** shift_amount);
-
// The translation state of the currently-being-translated function.
FunctionState* function_state_;
diff --git a/src/i18n.cc b/src/i18n.cc
index 0ae19c8..dbff6e5 100644
--- a/src/i18n.cc
+++ b/src/i18n.cc
@@ -464,7 +464,7 @@
Handle<String> key = isolate->factory()->NewStringFromAscii(
CStrVector("minimumSignificantDigits"));
- if (resolved->HasLocalProperty(*key)) {
+ if (JSReceiver::HasLocalProperty(resolved, key)) {
JSObject::SetProperty(
resolved,
isolate->factory()->NewStringFromAscii(
@@ -477,7 +477,7 @@
key = isolate->factory()->NewStringFromAscii(
CStrVector("maximumSignificantDigits"));
- if (resolved->HasLocalProperty(*key)) {
+ if (JSReceiver::HasLocalProperty(resolved, key)) {
JSObject::SetProperty(
resolved,
isolate->factory()->NewStringFromAscii(
@@ -855,7 +855,7 @@
Handle<JSObject> obj) {
Handle<String> key =
isolate->factory()->NewStringFromAscii(CStrVector("dateFormat"));
- if (obj->HasLocalProperty(*key)) {
+ if (JSReceiver::HasLocalProperty(obj, key)) {
return reinterpret_cast<icu::SimpleDateFormat*>(
obj->GetInternalField(0));
}
@@ -920,7 +920,7 @@
Handle<JSObject> obj) {
Handle<String> key =
isolate->factory()->NewStringFromAscii(CStrVector("numberFormat"));
- if (obj->HasLocalProperty(*key)) {
+ if (JSReceiver::HasLocalProperty(obj, key)) {
return reinterpret_cast<icu::DecimalFormat*>(obj->GetInternalField(0));
}
@@ -981,7 +981,7 @@
Handle<JSObject> obj) {
Handle<String> key =
isolate->factory()->NewStringFromAscii(CStrVector("collator"));
- if (obj->HasLocalProperty(*key)) {
+ if (JSReceiver::HasLocalProperty(obj, key)) {
return reinterpret_cast<icu::Collator*>(obj->GetInternalField(0));
}
@@ -1045,7 +1045,7 @@
Handle<JSObject> obj) {
Handle<String> key =
isolate->factory()->NewStringFromAscii(CStrVector("breakIterator"));
- if (obj->HasLocalProperty(*key)) {
+ if (JSReceiver::HasLocalProperty(obj, key)) {
return reinterpret_cast<icu::BreakIterator*>(obj->GetInternalField(0));
}
diff --git a/src/i18n.js b/src/i18n.js
index 1798bbb..a80fd4d 100644
--- a/src/i18n.js
+++ b/src/i18n.js
@@ -258,8 +258,8 @@
// DateTimeFormat.format needs to be 0 arg method, but can stil
// receive optional dateValue param. If one was provided, pass it
// along.
- if (arguments.length > 0) {
- return implementation(that, arguments[0]);
+ if (%_ArgumentsLength() > 0) {
+ return implementation(that, %_Arguments(0));
} else {
return implementation(that);
}
@@ -978,8 +978,8 @@
* @constructor
*/
%SetProperty(Intl, 'Collator', function() {
- var locales = arguments[0];
- var options = arguments[1];
+ var locales = %_Arguments(0);
+ var options = %_Arguments(1);
if (!this || this === Intl) {
// Constructor is called as a function.
@@ -1038,7 +1038,7 @@
throw new $TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
}
- return supportedLocalesOf('collator', locales, arguments[1]);
+ return supportedLocalesOf('collator', locales, %_Arguments(1));
},
DONT_ENUM
);
@@ -1207,8 +1207,8 @@
* @constructor
*/
%SetProperty(Intl, 'NumberFormat', function() {
- var locales = arguments[0];
- var options = arguments[1];
+ var locales = %_Arguments(0);
+ var options = %_Arguments(1);
if (!this || this === Intl) {
// Constructor is called as a function.
@@ -1286,7 +1286,7 @@
throw new $TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
}
- return supportedLocalesOf('numberformat', locales, arguments[1]);
+ return supportedLocalesOf('numberformat', locales, %_Arguments(1));
},
DONT_ENUM
);
@@ -1606,8 +1606,8 @@
* @constructor
*/
%SetProperty(Intl, 'DateTimeFormat', function() {
- var locales = arguments[0];
- var options = arguments[1];
+ var locales = %_Arguments(0);
+ var options = %_Arguments(1);
if (!this || this === Intl) {
// Constructor is called as a function.
@@ -1685,7 +1685,7 @@
throw new $TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
}
- return supportedLocalesOf('dateformat', locales, arguments[1]);
+ return supportedLocalesOf('dateformat', locales, %_Arguments(1));
},
DONT_ENUM
);
@@ -1812,8 +1812,8 @@
* @constructor
*/
%SetProperty(Intl, 'v8BreakIterator', function() {
- var locales = arguments[0];
- var options = arguments[1];
+ var locales = %_Arguments(0);
+ var options = %_Arguments(1);
if (!this || this === Intl) {
// Constructor is called as a function.
@@ -1868,7 +1868,7 @@
throw new $TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
}
- return supportedLocalesOf('breakiterator', locales, arguments[1]);
+ return supportedLocalesOf('breakiterator', locales, %_Arguments(1));
},
DONT_ENUM
);
@@ -1975,8 +1975,8 @@
throw new $TypeError('Method invoked on undefined or null value.');
}
- var locales = arguments[1];
- var options = arguments[2];
+ var locales = %_Arguments(1);
+ var options = %_Arguments(2);
var collator = cachedOrNewService('collator', locales, options);
return compare(collator, this, that);
},
@@ -2003,8 +2003,8 @@
throw new $TypeError('Method invoked on an object that is not Number.');
}
- var locales = arguments[0];
- var options = arguments[1];
+ var locales = %_Arguments(0);
+ var options = %_Arguments(1);
var numberFormat = cachedOrNewService('numberformat', locales, options);
return formatNumber(numberFormat, this);
},
@@ -2049,8 +2049,8 @@
throw new $TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
}
- var locales = arguments[0];
- var options = arguments[1];
+ var locales = %_Arguments(0);
+ var options = %_Arguments(1);
return toLocaleDateTime(
this, locales, options, 'any', 'all', 'dateformatall');
},
@@ -2074,8 +2074,8 @@
throw new $TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
}
- var locales = arguments[0];
- var options = arguments[1];
+ var locales = %_Arguments(0);
+ var options = %_Arguments(1);
return toLocaleDateTime(
this, locales, options, 'date', 'date', 'dateformatdate');
},
@@ -2099,8 +2099,8 @@
throw new $TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
}
- var locales = arguments[0];
- var options = arguments[1];
+ var locales = %_Arguments(0);
+ var options = %_Arguments(1);
return toLocaleDateTime(
this, locales, options, 'time', 'time', 'dateformattime');
},
diff --git a/src/ia32/assembler-ia32.h b/src/ia32/assembler-ia32.h
index 55eff93..736dd3b 100644
--- a/src/ia32/assembler-ia32.h
+++ b/src/ia32/assembler-ia32.h
@@ -561,6 +561,7 @@
static uint64_t found_by_runtime_probing_only_;
friend class ExternalReference;
+ friend class PlatformFeatureScope;
DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
};
diff --git a/src/ia32/builtins-ia32.cc b/src/ia32/builtins-ia32.cc
index a159748..5169627 100644
--- a/src/ia32/builtins-ia32.cc
+++ b/src/ia32/builtins-ia32.cc
@@ -539,10 +539,12 @@
__ mov(eax, Operand(esp, 8 * kPointerSize));
{
FrameScope scope(masm, StackFrame::MANUAL);
- __ PrepareCallCFunction(1, ebx);
+ __ PrepareCallCFunction(2, ebx);
+ __ mov(Operand(esp, 1 * kPointerSize),
+ Immediate(ExternalReference::isolate_address(masm->isolate())));
__ mov(Operand(esp, 0), eax);
__ CallCFunction(
- ExternalReference::get_make_code_young_function(masm->isolate()), 1);
+ ExternalReference::get_make_code_young_function(masm->isolate()), 2);
}
__ popad();
__ ret(0);
@@ -1063,13 +1065,11 @@
// Lookup the argument in the number to string cache.
Label not_cached, argument_is_string;
- NumberToStringStub::GenerateLookupNumberStringCache(
- masm,
- eax, // Input.
- ebx, // Result.
- ecx, // Scratch 1.
- edx, // Scratch 2.
- ¬_cached);
+ __ LookupNumberStringCache(eax, // Input.
+ ebx, // Result.
+ ecx, // Scratch 1.
+ edx, // Scratch 2.
+ ¬_cached);
__ IncrementCounter(counters->string_ctor_cached_number(), 1);
__ bind(&argument_is_string);
// ----------- S t a t e -------------
diff --git a/src/ia32/code-stubs-ia32.cc b/src/ia32/code-stubs-ia32.cc
index a83c1ae..6128633 100644
--- a/src/ia32/code-stubs-ia32.cc
+++ b/src/ia32/code-stubs-ia32.cc
@@ -82,7 +82,7 @@
descriptor->register_param_count_ = 4;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kCreateObjectLiteralShallow)->entry;
+ Runtime::FunctionForId(Runtime::kCreateObjectLiteral)->entry;
}
@@ -984,7 +984,7 @@
ASSERT_EQ(Token::SHL, op);
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope use_sse2(masm, SSE2);
- __ cvtsi2sd(xmm0, left);
+ __ Cvtsi2sd(xmm0, left);
__ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
} else {
__ mov(Operand(esp, 1 * kPointerSize), left);
@@ -1370,7 +1370,7 @@
// Store the result in the HeapNumber and return.
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope use_sse2(masm, SSE2);
- __ cvtsi2sd(xmm0, ebx);
+ __ Cvtsi2sd(xmm0, ebx);
__ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
} else {
__ mov(Operand(esp, 1 * kPointerSize), ebx);
@@ -1594,7 +1594,7 @@
// Store the result in the HeapNumber and return.
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope use_sse2(masm, SSE2);
- __ cvtsi2sd(xmm0, ebx);
+ __ Cvtsi2sd(xmm0, ebx);
__ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
} else {
__ mov(Operand(esp, 1 * kPointerSize), ebx);
@@ -1782,7 +1782,7 @@
// Store the result in the HeapNumber and return.
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope use_sse2(masm, SSE2);
- __ cvtsi2sd(xmm0, ebx);
+ __ Cvtsi2sd(xmm0, ebx);
__ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
} else {
__ mov(Operand(esp, 1 * kPointerSize), ebx);
@@ -2329,12 +2329,12 @@
__ jmp(not_numbers); // Argument in eax is not a number.
__ bind(&load_smi_edx);
__ SmiUntag(edx); // Untag smi before converting to float.
- __ cvtsi2sd(xmm0, edx);
+ __ Cvtsi2sd(xmm0, edx);
__ SmiTag(edx); // Retag smi for heap number overwriting test.
__ jmp(&load_eax);
__ bind(&load_smi_eax);
__ SmiUntag(eax); // Untag smi before converting to float.
- __ cvtsi2sd(xmm1, eax);
+ __ Cvtsi2sd(xmm1, eax);
__ SmiTag(eax); // Retag smi for heap number overwriting test.
__ jmp(&done, Label::kNear);
__ bind(&load_float_eax);
@@ -2350,11 +2350,11 @@
__ mov(scratch, left);
ASSERT(!scratch.is(right)); // We're about to clobber scratch.
__ SmiUntag(scratch);
- __ cvtsi2sd(xmm0, scratch);
+ __ Cvtsi2sd(xmm0, scratch);
__ mov(scratch, right);
__ SmiUntag(scratch);
- __ cvtsi2sd(xmm1, scratch);
+ __ Cvtsi2sd(xmm1, scratch);
}
@@ -2365,7 +2365,7 @@
Register scratch,
XMMRegister xmm_scratch) {
__ cvttsd2si(int32_result, Operand(operand));
- __ cvtsi2sd(xmm_scratch, int32_result);
+ __ Cvtsi2sd(xmm_scratch, int32_result);
__ pcmpeqd(xmm_scratch, operand);
__ movmskps(scratch, xmm_scratch);
// Two least significant bits should be both set.
@@ -2470,7 +2470,7 @@
// Save 1 in double_result - we need this several times later on.
__ mov(scratch, Immediate(1));
- __ cvtsi2sd(double_result, scratch);
+ __ Cvtsi2sd(double_result, scratch);
if (exponent_type_ == ON_STACK) {
Label base_is_smi, unpack_exponent;
@@ -2490,7 +2490,7 @@
__ bind(&base_is_smi);
__ SmiUntag(base);
- __ cvtsi2sd(double_base, base);
+ __ Cvtsi2sd(double_base, base);
__ bind(&unpack_exponent);
__ JumpIfNotSmi(exponent, &exponent_not_smi, Label::kNear);
@@ -2683,7 +2683,7 @@
// and may not have contained the exponent value in the first place when the
// exponent is a smi. We reset it with exponent value before bailing out.
__ j(not_equal, &done);
- __ cvtsi2sd(double_exponent, exponent);
+ __ Cvtsi2sd(double_exponent, exponent);
// Returning or bailing out.
Counters* counters = masm->isolate()->counters();
@@ -2756,8 +2756,7 @@
__ j(not_equal, &miss);
}
- StubCompiler::GenerateLoadStringLength(masm, edx, eax, ebx, &miss,
- support_wrapper_);
+ StubCompiler::GenerateLoadStringLength(masm, edx, eax, ebx, &miss);
__ bind(&miss);
StubCompiler::TailCallBuiltin(
masm, BaseLoadStoreStubCompiler::MissBuiltin(kind()));
@@ -3495,7 +3494,7 @@
__ call(edx);
// Drop arguments and come back to JS mode.
- __ LeaveApiExitFrame();
+ __ LeaveApiExitFrame(true);
// Check the result.
Label success;
@@ -3768,98 +3767,13 @@
}
-void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
- Register object,
- Register result,
- Register scratch1,
- Register scratch2,
- Label* not_found) {
- // Use of registers. Register result is used as a temporary.
- Register number_string_cache = result;
- Register mask = scratch1;
- Register scratch = scratch2;
-
- // Load the number string cache.
- __ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
- // Make the hash mask from the length of the number string cache. It
- // contains two elements (number and string) for each cache entry.
- __ mov(mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
- __ shr(mask, kSmiTagSize + 1); // Untag length and divide it by two.
- __ sub(mask, Immediate(1)); // Make mask.
-
- // Calculate the entry in the number string cache. The hash value in the
- // number string cache for smis is just the smi value, and the hash for
- // doubles is the xor of the upper and lower words. See
- // Heap::GetNumberStringCache.
- Label smi_hash_calculated;
- Label load_result_from_cache;
- Label not_smi;
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfNotSmi(object, ¬_smi, Label::kNear);
- __ mov(scratch, object);
- __ SmiUntag(scratch);
- __ jmp(&smi_hash_calculated, Label::kNear);
- __ bind(¬_smi);
- __ cmp(FieldOperand(object, HeapObject::kMapOffset),
- masm->isolate()->factory()->heap_number_map());
- __ j(not_equal, not_found);
- STATIC_ASSERT(8 == kDoubleSize);
- __ mov(scratch, FieldOperand(object, HeapNumber::kValueOffset));
- __ xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
- // Object is heap number and hash is now in scratch. Calculate cache index.
- __ and_(scratch, mask);
- Register index = scratch;
- Register probe = mask;
- __ mov(probe,
- FieldOperand(number_string_cache,
- index,
- times_twice_pointer_size,
- FixedArray::kHeaderSize));
- __ JumpIfSmi(probe, not_found);
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatureScope fscope(masm, SSE2);
- __ movdbl(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
- __ movdbl(xmm1, FieldOperand(probe, HeapNumber::kValueOffset));
- __ ucomisd(xmm0, xmm1);
- } else {
- __ fld_d(FieldOperand(object, HeapNumber::kValueOffset));
- __ fld_d(FieldOperand(probe, HeapNumber::kValueOffset));
- __ FCmp();
- }
- __ j(parity_even, not_found); // Bail out if NaN is involved.
- __ j(not_equal, not_found); // The cache did not contain this value.
- __ jmp(&load_result_from_cache, Label::kNear);
-
- __ bind(&smi_hash_calculated);
- // Object is smi and hash is now in scratch. Calculate cache index.
- __ and_(scratch, mask);
- // Check if the entry is the smi we are looking for.
- __ cmp(object,
- FieldOperand(number_string_cache,
- index,
- times_twice_pointer_size,
- FixedArray::kHeaderSize));
- __ j(not_equal, not_found);
-
- // Get the result from the cache.
- __ bind(&load_result_from_cache);
- __ mov(result,
- FieldOperand(number_string_cache,
- index,
- times_twice_pointer_size,
- FixedArray::kHeaderSize + kPointerSize));
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->number_to_string_native(), 1);
-}
-
-
void NumberToStringStub::Generate(MacroAssembler* masm) {
Label runtime;
__ mov(ebx, Operand(esp, kPointerSize));
// Generate code to lookup number in the number string cache.
- GenerateLookupNumberStringCache(masm, ebx, eax, ecx, edx, &runtime);
+ __ LookupNumberStringCache(ebx, eax, ecx, edx, &runtime);
__ ret(1 * kPointerSize);
__ bind(&runtime);
@@ -4205,6 +4119,7 @@
// Cache the called function in a global property cell. Cache states
// are uninitialized, monomorphic (indicated by a JSFunction), and
// megamorphic.
+ // eax : number of arguments to the construct function
// ebx : cache cell for call target
// edi : the function to call
Isolate* isolate = masm->isolate();
@@ -4224,9 +4139,8 @@
// If we didn't have a matching function, and we didn't find the megamorph
// sentinel, then we have in the cell either some other function or an
// AllocationSite. Do a map check on the object in ecx.
- Handle<Map> allocation_site_map(
- masm->isolate()->heap()->allocation_site_map(),
- masm->isolate());
+ Handle<Map> allocation_site_map =
+ masm->isolate()->factory()->allocation_site_map();
__ cmp(FieldOperand(ecx, 0), Immediate(allocation_site_map));
__ j(not_equal, &miss);
@@ -4265,6 +4179,7 @@
{
FrameScope scope(masm, StackFrame::INTERNAL);
+ // Arguments register must be smi-tagged to call out.
__ SmiTag(eax);
__ push(eax);
__ push(edi);
@@ -4508,6 +4423,8 @@
// stack alignment is known to be correct. This function takes one argument
// which is passed on the stack, and we know that the stack has been
// prepared to pass at least one argument.
+ __ mov(Operand(esp, 1 * kPointerSize),
+ Immediate(ExternalReference::isolate_address(masm->isolate())));
__ mov(Operand(esp, 0 * kPointerSize), eax); // Result.
__ call(FUNCTION_ADDR(Runtime::PerformGC), RelocInfo::RUNTIME_ENTRY);
}
@@ -5517,12 +5434,7 @@
// Check the number to string cache.
__ bind(¬_string);
// Puts the cached result into scratch1.
- NumberToStringStub::GenerateLookupNumberStringCache(masm,
- arg,
- scratch1,
- scratch2,
- scratch3,
- slow);
+ __ LookupNumberStringCache(arg, scratch1, scratch2, scratch3, slow);
__ mov(arg, scratch1);
__ mov(Operand(esp, stack_offset), arg);
__ bind(&done);
@@ -6258,7 +6170,7 @@
__ bind(&right_smi);
__ mov(ecx, eax); // Can't clobber eax because we can still jump away.
__ SmiUntag(ecx);
- __ cvtsi2sd(xmm1, ecx);
+ __ Cvtsi2sd(xmm1, ecx);
__ bind(&left);
__ JumpIfSmi(edx, &left_smi, Label::kNear);
@@ -6270,7 +6182,7 @@
__ bind(&left_smi);
__ mov(ecx, edx); // Can't clobber edx because we can still jump away.
__ SmiUntag(ecx);
- __ cvtsi2sd(xmm0, ecx);
+ __ Cvtsi2sd(xmm0, ecx);
__ bind(&done);
// Compare operands.
@@ -7300,9 +7212,8 @@
__ inc(edx);
__ mov(ecx, FieldOperand(ebx, Cell::kValueOffset));
if (FLAG_debug_code) {
- Handle<Map> allocation_site_map(
- masm->isolate()->heap()->allocation_site_map(),
- masm->isolate());
+ Handle<Map> allocation_site_map =
+ masm->isolate()->factory()->allocation_site_map();
__ cmp(FieldOperand(ecx, 0), Immediate(allocation_site_map));
__ Assert(equal, kExpectedAllocationSiteInCell);
}
@@ -7447,8 +7358,8 @@
__ cmp(ebx, Immediate(undefined_sentinel));
__ j(equal, &no_info);
__ mov(edx, FieldOperand(ebx, Cell::kValueOffset));
- __ cmp(FieldOperand(edx, 0), Immediate(Handle<Map>(
- masm->isolate()->heap()->allocation_site_map())));
+ __ cmp(FieldOperand(edx, 0), Immediate(
+ masm->isolate()->factory()->allocation_site_map()));
__ j(not_equal, &no_info);
__ mov(edx, FieldOperand(edx, AllocationSite::kTransitionInfoOffset));
diff --git a/src/ia32/code-stubs-ia32.h b/src/ia32/code-stubs-ia32.h
index 5c8eca3..f36cd61 100644
--- a/src/ia32/code-stubs-ia32.h
+++ b/src/ia32/code-stubs-ia32.h
@@ -221,18 +221,6 @@
public:
NumberToStringStub() { }
- // Generate code to do a lookup in the number string cache. If the number in
- // the register object is found in the cache the generated code falls through
- // with the result in the result register. The object and the result register
- // can be the same. If the number is not found in the cache the code jumps to
- // the label not_found with only the content of register object unchanged.
- static void GenerateLookupNumberStringCache(MacroAssembler* masm,
- Register object,
- Register result,
- Register scratch1,
- Register scratch2,
- Label* not_found);
-
private:
Major MajorKey() { return NumberToString; }
int MinorKey() { return 0; }
diff --git a/src/ia32/codegen-ia32.cc b/src/ia32/codegen-ia32.cc
index 84a4d23..9385423 100644
--- a/src/ia32/codegen-ia32.cc
+++ b/src/ia32/codegen-ia32.cc
@@ -768,7 +768,7 @@
__ SmiUntag(ebx);
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope fscope(masm, SSE2);
- __ cvtsi2sd(xmm0, ebx);
+ __ Cvtsi2sd(xmm0, ebx);
__ movdbl(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize),
xmm0);
} else {
@@ -1165,7 +1165,8 @@
}
-void Code::PatchPlatformCodeAge(byte* sequence,
+void Code::PatchPlatformCodeAge(Isolate* isolate,
+ byte* sequence,
Code::Age age,
MarkingParity parity) {
uint32_t young_length;
@@ -1174,7 +1175,7 @@
CopyBytes(sequence, young_sequence, young_length);
CPU::FlushICache(sequence, young_length);
} else {
- Code* stub = GetCodeAgeStub(age, parity);
+ Code* stub = GetCodeAgeStub(isolate, age, parity);
CodePatcher patcher(sequence, young_length);
patcher.masm()->call(stub->instruction_start(), RelocInfo::NONE32);
}
diff --git a/src/ia32/deoptimizer-ia32.cc b/src/ia32/deoptimizer-ia32.cc
index 13a70af..649bf9c 100644
--- a/src/ia32/deoptimizer-ia32.cc
+++ b/src/ia32/deoptimizer-ia32.cc
@@ -177,87 +177,6 @@
}
-static const byte kJnsInstruction = 0x79;
-static const byte kJnsOffset = 0x11;
-static const byte kCallInstruction = 0xe8;
-static const byte kNopByteOne = 0x66;
-static const byte kNopByteTwo = 0x90;
-
-// The back edge bookkeeping code matches the pattern:
-//
-// sub <profiling_counter>, <delta>
-// jns ok
-// call <interrupt stub>
-// ok:
-//
-// The patched back edge looks like this:
-//
-// sub <profiling_counter>, <delta> ;; Not changed
-// nop
-// nop
-// call <on-stack replacment>
-// ok:
-
-void Deoptimizer::PatchInterruptCodeAt(Code* unoptimized_code,
- Address pc_after,
- Code* replacement_code) {
- // Turn the jump into nops.
- Address call_target_address = pc_after - kIntSize;
- *(call_target_address - 3) = kNopByteOne;
- *(call_target_address - 2) = kNopByteTwo;
- // Replace the call address.
- Assembler::set_target_address_at(call_target_address,
- replacement_code->entry());
-
- unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
- unoptimized_code, call_target_address, replacement_code);
-}
-
-
-void Deoptimizer::RevertInterruptCodeAt(Code* unoptimized_code,
- Address pc_after,
- Code* interrupt_code) {
- // Restore the original jump.
- Address call_target_address = pc_after - kIntSize;
- *(call_target_address - 3) = kJnsInstruction;
- *(call_target_address - 2) = kJnsOffset;
- // Restore the original call address.
- Assembler::set_target_address_at(call_target_address,
- interrupt_code->entry());
-
- interrupt_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
- unoptimized_code, call_target_address, interrupt_code);
-}
-
-
-#ifdef DEBUG
-Deoptimizer::InterruptPatchState Deoptimizer::GetInterruptPatchState(
- Isolate* isolate,
- Code* unoptimized_code,
- Address pc_after) {
- Address call_target_address = pc_after - kIntSize;
- ASSERT_EQ(kCallInstruction, *(call_target_address - 1));
- if (*(call_target_address - 3) == kNopByteOne) {
- ASSERT_EQ(kNopByteTwo, *(call_target_address - 2));
- Code* osr_builtin =
- isolate->builtins()->builtin(Builtins::kOnStackReplacement);
- ASSERT_EQ(osr_builtin->entry(),
- Assembler::target_address_at(call_target_address));
- return PATCHED_FOR_OSR;
- } else {
- // Get the interrupt stub code object to match against from cache.
- Code* interrupt_builtin =
- isolate->builtins()->builtin(Builtins::kInterruptCheck);
- ASSERT_EQ(interrupt_builtin->entry(),
- Assembler::target_address_at(call_target_address));
- ASSERT_EQ(kJnsInstruction, *(call_target_address - 3));
- ASSERT_EQ(kJnsOffset, *(call_target_address - 2));
- return NOT_PATCHED;
- }
-}
-#endif // DEBUG
-
-
void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
// Set the register values. The values are not important as there are no
// callee saved registers in JavaScript frames, so all registers are
diff --git a/src/ia32/full-codegen-ia32.cc b/src/ia32/full-codegen-ia32.cc
index 6d39cc1..9a2c3ce 100644
--- a/src/ia32/full-codegen-ia32.cc
+++ b/src/ia32/full-codegen-ia32.cc
@@ -1586,21 +1586,15 @@
: ObjectLiteral::kNoFlags;
int properties_count = constant_properties->length() / 2;
if ((FLAG_track_double_fields && expr->may_store_doubles()) ||
- expr->depth() > 1) {
- __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- __ push(FieldOperand(edi, JSFunction::kLiteralsOffset));
- __ push(Immediate(Smi::FromInt(expr->literal_index())));
- __ push(Immediate(constant_properties));
- __ push(Immediate(Smi::FromInt(flags)));
- __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
- } else if (Serializer::enabled() || flags != ObjectLiteral::kFastElements ||
+ expr->depth() > 1 || Serializer::enabled() ||
+ flags != ObjectLiteral::kFastElements ||
properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
__ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
__ push(FieldOperand(edi, JSFunction::kLiteralsOffset));
__ push(Immediate(Smi::FromInt(expr->literal_index())));
__ push(Immediate(constant_properties));
__ push(Immediate(Smi::FromInt(flags)));
- __ CallRuntime(Runtime::kCreateObjectLiteralShallow, 4);
+ __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
} else {
__ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
__ mov(eax, FieldOperand(edi, JSFunction::kLiteralsOffset));
@@ -4897,6 +4891,88 @@
#undef __
+
+static const byte kJnsInstruction = 0x79;
+static const byte kJnsOffset = 0x11;
+static const byte kCallInstruction = 0xe8;
+static const byte kNopByteOne = 0x66;
+static const byte kNopByteTwo = 0x90;
+
+// The back edge bookkeeping code matches the pattern:
+//
+// sub <profiling_counter>, <delta>
+// jns ok
+// call <interrupt stub>
+// ok:
+//
+// The patched back edge looks like this:
+//
+// sub <profiling_counter>, <delta> ;; Not changed
+// nop
+// nop
+// call <on-stack replacment>
+// ok:
+
+void BackEdgeTable::PatchAt(Code* unoptimized_code,
+ Address pc,
+ Code* replacement_code) {
+ // Turn the jump into nops.
+ Address call_target_address = pc - kIntSize;
+ *(call_target_address - 3) = kNopByteOne;
+ *(call_target_address - 2) = kNopByteTwo;
+ // Replace the call address.
+ Assembler::set_target_address_at(call_target_address,
+ replacement_code->entry());
+
+ unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
+ unoptimized_code, call_target_address, replacement_code);
+}
+
+
+void BackEdgeTable::RevertAt(Code* unoptimized_code,
+ Address pc,
+ Code* interrupt_code) {
+ // Restore the original jump.
+ Address call_target_address = pc - kIntSize;
+ *(call_target_address - 3) = kJnsInstruction;
+ *(call_target_address - 2) = kJnsOffset;
+ // Restore the original call address.
+ Assembler::set_target_address_at(call_target_address,
+ interrupt_code->entry());
+
+ interrupt_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
+ unoptimized_code, call_target_address, interrupt_code);
+}
+
+
+#ifdef DEBUG
+BackEdgeTable::BackEdgeState BackEdgeTable::GetBackEdgeState(
+ Isolate* isolate,
+ Code* unoptimized_code,
+ Address pc) {
+ Address call_target_address = pc - kIntSize;
+ ASSERT_EQ(kCallInstruction, *(call_target_address - 1));
+ if (*(call_target_address - 3) == kNopByteOne) {
+ ASSERT_EQ(kNopByteTwo, *(call_target_address - 2));
+ Code* osr_builtin =
+ isolate->builtins()->builtin(Builtins::kOnStackReplacement);
+ ASSERT_EQ(osr_builtin->entry(),
+ Assembler::target_address_at(call_target_address));
+ return ON_STACK_REPLACEMENT;
+ } else {
+ // Get the interrupt stub code object to match against from cache.
+ Code* interrupt_builtin =
+ isolate->builtins()->builtin(Builtins::kInterruptCheck);
+ ASSERT_EQ(interrupt_builtin->entry(),
+ Assembler::target_address_at(call_target_address));
+ ASSERT_EQ(kJnsInstruction, *(call_target_address - 3));
+ ASSERT_EQ(kJnsOffset, *(call_target_address - 2));
+ return INTERRUPT;
+ }
+}
+#endif // DEBUG
+
+
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_IA32
diff --git a/src/ia32/lithium-codegen-ia32.cc b/src/ia32/lithium-codegen-ia32.cc
index d50b780..98a049b 100644
--- a/src/ia32/lithium-codegen-ia32.cc
+++ b/src/ia32/lithium-codegen-ia32.cc
@@ -340,12 +340,41 @@
osr_pc_offset_ = masm()->pc_offset();
+ // Move state of dynamic frame alignment into edx.
+ __ mov(edx, Immediate(kNoAlignmentPadding));
+
+ if (support_aligned_spilled_doubles_ && dynamic_frame_alignment_) {
+ Label do_not_pad, align_loop;
+ // Align ebp + 4 to a multiple of 2 * kPointerSize.
+ __ test(ebp, Immediate(kPointerSize));
+ __ j(zero, &do_not_pad, Label::kNear);
+ __ push(Immediate(0));
+ __ mov(ebx, esp);
+ __ mov(edx, Immediate(kAlignmentPaddingPushed));
+
+ // Move all parts of the frame over one word. The frame consists of:
+ // unoptimized frame slots, alignment state, context, frame pointer, return
+ // address, receiver, and the arguments.
+ __ mov(ecx, Immediate(scope()->num_parameters() +
+ 5 + graph()->osr()->UnoptimizedFrameSlots()));
+
+ __ bind(&align_loop);
+ __ mov(eax, Operand(ebx, 1 * kPointerSize));
+ __ mov(Operand(ebx, 0), eax);
+ __ add(Operand(ebx), Immediate(kPointerSize));
+ __ dec(ecx);
+ __ j(not_zero, &align_loop, Label::kNear);
+ __ mov(Operand(ebx, 0), Immediate(kAlignmentZapValue));
+ __ sub(Operand(ebp), Immediate(kPointerSize));
+ __ bind(&do_not_pad);
+ }
+
// Save the first local, which is overwritten by the alignment state.
Operand alignment_loc = MemOperand(ebp, -3 * kPointerSize);
__ push(alignment_loc);
- // Set the dynamic frame alignment state to "not aligned".
- __ mov(alignment_loc, Immediate(kNoAlignmentPadding));
+ // Set the dynamic frame alignment state.
+ __ mov(alignment_loc, edx);
// Adjust the frame size, subsuming the unoptimized frame into the
// optimized frame.
@@ -1733,9 +1762,9 @@
case 9:
__ lea(left, Operand(left, left, times_8, 0));
break;
- case 16:
- __ shl(left, 4);
- break;
+ case 16:
+ __ shl(left, 4);
+ break;
default:
__ imul(left, left, constant);
break;
@@ -1967,9 +1996,10 @@
__ movd(res, Operand(temp));
__ psllq(res, 32);
if (lower != 0) {
+ XMMRegister xmm_scratch = double_scratch0();
__ Set(temp, Immediate(lower));
- __ movd(xmm0, Operand(temp));
- __ por(res, xmm0);
+ __ movd(xmm_scratch, Operand(temp));
+ __ por(res, xmm_scratch);
}
}
}
@@ -2178,7 +2208,7 @@
__ jmp(&return_right, Label::kNear);
__ bind(&check_zero);
- XMMRegister xmm_scratch = xmm0;
+ XMMRegister xmm_scratch = double_scratch0();
__ xorps(xmm_scratch, xmm_scratch);
__ ucomisd(left_reg, xmm_scratch);
__ j(not_equal, &return_left, Label::kNear); // left == right != 0.
@@ -2208,8 +2238,6 @@
XMMRegister left = ToDoubleRegister(instr->left());
XMMRegister right = ToDoubleRegister(instr->right());
XMMRegister result = ToDoubleRegister(instr->result());
- // Modulo uses a fixed result register.
- ASSERT(instr->op() == Token::MOD || left.is(result));
switch (instr->op()) {
case Token::ADD:
__ addsd(left, right);
@@ -2236,7 +2264,7 @@
4);
// Return value is in st(0) on ia32.
- // Store it into the (fixed) result register.
+ // Store it into the result register.
__ sub(Operand(esp), Immediate(kDoubleSize));
__ fstp_d(Operand(esp, 0));
__ movdbl(result, Operand(esp, 0));
@@ -2340,25 +2368,6 @@
}
-void LCodeGen::DoIsNumberAndBranch(LIsNumberAndBranch* instr) {
- Representation r = instr->hydrogen()->value()->representation();
- if (r.IsSmiOrInteger32() || r.IsDouble()) {
- EmitBranch(instr, no_condition);
- } else {
- ASSERT(r.IsTagged());
- Register reg = ToRegister(instr->value());
- HType type = instr->hydrogen()->value()->type();
- if (type.IsTaggedNumber()) {
- EmitBranch(instr, no_condition);
- }
- __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
- __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
- factory()->heap_number_map());
- EmitBranch(instr, equal);
- }
-}
-
-
void LCodeGen::DoBranch(LBranch* instr) {
Representation r = instr->hydrogen()->value()->representation();
if (r.IsSmiOrInteger32()) {
@@ -2369,8 +2378,9 @@
ASSERT(!info()->IsStub());
CpuFeatureScope scope(masm(), SSE2);
XMMRegister reg = ToDoubleRegister(instr->value());
- __ xorps(xmm0, xmm0);
- __ ucomisd(reg, xmm0);
+ XMMRegister xmm_scratch = double_scratch0();
+ __ xorps(xmm_scratch, xmm_scratch);
+ __ ucomisd(reg, xmm_scratch);
EmitBranch(instr, not_equal);
} else {
ASSERT(r.IsTagged());
@@ -2390,8 +2400,9 @@
} else if (type.IsHeapNumber()) {
ASSERT(!info()->IsStub());
CpuFeatureScope scope(masm(), SSE2);
- __ xorps(xmm0, xmm0);
- __ ucomisd(xmm0, FieldOperand(reg, HeapNumber::kValueOffset));
+ XMMRegister xmm_scratch = double_scratch0();
+ __ xorps(xmm_scratch, xmm_scratch);
+ __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset));
EmitBranch(instr, not_equal);
} else if (type.IsString()) {
ASSERT(!info()->IsStub());
@@ -2476,8 +2487,9 @@
__ j(not_equal, ¬_heap_number, Label::kNear);
if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
CpuFeatureScope scope(masm(), SSE2);
- __ xorps(xmm0, xmm0);
- __ ucomisd(xmm0, FieldOperand(reg, HeapNumber::kValueOffset));
+ XMMRegister xmm_scratch = double_scratch0();
+ __ xorps(xmm_scratch, xmm_scratch);
+ __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset));
} else {
__ fldz();
__ fld_d(FieldOperand(reg, HeapNumber::kValueOffset));
@@ -2556,10 +2568,18 @@
EmitGoto(next_block);
} else {
if (instr->is_double()) {
- CpuFeatureScope scope(masm(), SSE2);
+ if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
+ CpuFeatureScope scope(masm(), SSE2);
+ __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
+ } else {
+ X87Fxch(ToX87Register(right));
+ X87Fxch(ToX87Register(left), 1);
+ __ fld(0);
+ __ fld(2);
+ __ FCmp();
+ }
// Don't base result on EFLAGS when a NaN is involved. Instead
// jump to the false block.
- __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
__ j(parity_even, instr->FalseLabel(chunk_));
} else {
if (right->IsConstantOperand()) {
@@ -3131,7 +3151,7 @@
void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
Register result = ToRegister(instr->result());
- __ mov(result, Operand::ForCell(instr->hydrogen()->cell()));
+ __ mov(result, Operand::ForCell(instr->hydrogen()->cell().handle()));
if (instr->hydrogen()->RequiresHoleCheck()) {
__ cmp(result, factory()->the_hole_value());
DeoptimizeIf(equal, instr->environment());
@@ -3154,7 +3174,7 @@
void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
Register value = ToRegister(instr->value());
- Handle<PropertyCell> cell_handle = instr->hydrogen()->cell();
+ Handle<PropertyCell> cell_handle = instr->hydrogen()->cell().handle();
// If the cell we are storing to contains the hole it could have
// been deleted from the property dictionary. In that case, we need
@@ -3349,6 +3369,12 @@
}
+void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
+ Register result = ToRegister(instr->result());
+ __ LoadRoot(result, instr->index());
+}
+
+
void LCodeGen::DoLoadExternalArrayPointer(
LLoadExternalArrayPointer* instr) {
Register result = ToRegister(instr->result());
@@ -3903,7 +3929,7 @@
CpuFeatureScope scope(masm(), SSE2);
if (r.IsDouble()) {
- XMMRegister scratch = xmm0;
+ XMMRegister scratch = double_scratch0();
XMMRegister input_reg = ToDoubleRegister(instr->value());
__ xorps(scratch, scratch);
__ subsd(scratch, input_reg);
@@ -3924,7 +3950,7 @@
void LCodeGen::DoMathFloor(LMathFloor* instr) {
CpuFeatureScope scope(masm(), SSE2);
- XMMRegister xmm_scratch = xmm0;
+ XMMRegister xmm_scratch = double_scratch0();
Register output_reg = ToRegister(instr->result());
XMMRegister input_reg = ToDoubleRegister(instr->value());
@@ -3977,7 +4003,7 @@
__ bind(&negative_sign);
// Truncate, then compare and compensate.
__ cvttsd2si(output_reg, Operand(input_reg));
- __ cvtsi2sd(xmm_scratch, output_reg);
+ __ Cvtsi2sd(xmm_scratch, output_reg);
__ ucomisd(input_reg, xmm_scratch);
__ j(equal, &done, Label::kNear);
__ sub(output_reg, Immediate(1));
@@ -3992,7 +4018,7 @@
CpuFeatureScope scope(masm(), SSE2);
Register output_reg = ToRegister(instr->result());
XMMRegister input_reg = ToDoubleRegister(instr->value());
- XMMRegister xmm_scratch = xmm0;
+ XMMRegister xmm_scratch = double_scratch0();
XMMRegister input_temp = ToDoubleRegister(instr->temp());
ExternalReference one_half = ExternalReference::address_of_one_half();
ExternalReference minus_one_half =
@@ -4027,7 +4053,7 @@
__ RecordComment("D2I conversion overflow");
DeoptimizeIf(equal, instr->environment());
- __ cvtsi2sd(xmm_scratch, output_reg);
+ __ Cvtsi2sd(xmm_scratch, output_reg);
__ ucomisd(xmm_scratch, input_temp);
__ j(equal, &done);
__ sub(output_reg, Immediate(1));
@@ -4059,7 +4085,7 @@
void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
CpuFeatureScope scope(masm(), SSE2);
- XMMRegister xmm_scratch = xmm0;
+ XMMRegister xmm_scratch = double_scratch0();
XMMRegister input_reg = ToDoubleRegister(instr->value());
Register scratch = ToRegister(instr->temp());
ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
@@ -4178,8 +4204,7 @@
// by computing:
// ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
XMMRegister result = ToDoubleRegister(instr->result());
- // We use xmm0 as fixed scratch register here.
- XMMRegister scratch4 = xmm0;
+ XMMRegister scratch4 = double_scratch0();
__ mov(scratch3, Immediate(0x49800000)); // 1.0 x 2^20 as single.
__ movd(scratch4, scratch3);
__ movd(result, random);
@@ -4193,9 +4218,10 @@
CpuFeatureScope scope(masm(), SSE2);
ASSERT(instr->value()->Equals(instr->result()));
XMMRegister input_reg = ToDoubleRegister(instr->value());
+ XMMRegister xmm_scratch = double_scratch0();
Label positive, done, zero;
- __ xorps(xmm0, xmm0);
- __ ucomisd(input_reg, xmm0);
+ __ xorps(xmm_scratch, xmm_scratch);
+ __ ucomisd(input_reg, xmm_scratch);
__ j(above, &positive, Label::kNear);
__ j(equal, &zero, Label::kNear);
ExternalReference nan =
@@ -4225,10 +4251,11 @@
CpuFeatureScope scope(masm(), SSE2);
XMMRegister input = ToDoubleRegister(instr->value());
XMMRegister result = ToDoubleRegister(instr->result());
+ XMMRegister temp0 = double_scratch0();
Register temp1 = ToRegister(instr->temp1());
Register temp2 = ToRegister(instr->temp2());
- MathExpGenerator::EmitMathExp(masm(), input, result, xmm0, temp1, temp2);
+ MathExpGenerator::EmitMathExp(masm(), input, result, temp0, temp1, temp2);
}
@@ -4609,8 +4636,9 @@
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
CpuFeatureScope scope(masm(), SSE2);
- __ cvtsd2ss(xmm0, ToDoubleRegister(instr->value()));
- __ movss(operand, xmm0);
+ XMMRegister xmm_scratch = double_scratch0();
+ __ cvtsd2ss(xmm_scratch, ToDoubleRegister(instr->value()));
+ __ movss(operand, xmm_scratch);
} else {
__ fld(0);
__ fstp_s(operand);
@@ -4825,9 +4853,8 @@
__ j(not_equal, ¬_applicable, branch_distance);
if (is_simple_map_transition) {
Register new_map_reg = ToRegister(instr->new_map_temp());
- Handle<Map> map = instr->hydrogen()->transitioned_map();
__ mov(FieldOperand(object_reg, HeapObject::kMapOffset),
- Immediate(map));
+ Immediate(to_map));
// Write barrier.
ASSERT_NE(instr->temp(), NULL);
__ RecordWriteForMap(object_reg, to_map, new_map_reg,
@@ -4978,7 +5005,7 @@
ASSERT(output->IsDoubleRegister());
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope scope(masm(), SSE2);
- __ cvtsi2sd(ToDoubleRegister(output), ToOperand(input));
+ __ Cvtsi2sd(ToDoubleRegister(output), ToOperand(input));
} else if (input->IsRegister()) {
Register input_reg = ToRegister(input);
__ push(input_reg);
@@ -5073,6 +5100,7 @@
Label slow;
Register reg = ToRegister(value);
Register tmp = reg.is(eax) ? ecx : eax;
+ XMMRegister xmm_scratch = double_scratch0();
// Preserve the value of all registers.
PushSafepointRegistersScope scope(this);
@@ -5087,7 +5115,7 @@
__ xor_(reg, 0x80000000);
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope feature_scope(masm(), SSE2);
- __ cvtsi2sd(xmm0, Operand(reg));
+ __ Cvtsi2sd(xmm_scratch, Operand(reg));
} else {
__ push(reg);
__ fild_s(Operand(esp, 0));
@@ -5096,7 +5124,7 @@
} else {
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope feature_scope(masm(), SSE2);
- __ LoadUint32(xmm0, reg,
+ __ LoadUint32(xmm_scratch, reg,
ToDoubleRegister(LNumberTagU::cast(instr)->temp()));
} else {
// There's no fild variant for unsigned values, so zero-extend to a 64-bit
@@ -5132,12 +5160,12 @@
instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
if (!reg.is(eax)) __ mov(reg, eax);
- // Done. Put the value in xmm0 into the value of the allocated heap
+ // Done. Put the value in xmm_scratch into the value of the allocated heap
// number.
__ bind(&done);
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope feature_scope(masm(), SSE2);
- __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), xmm0);
+ __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), xmm_scratch);
} else {
__ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset));
}
@@ -5308,7 +5336,7 @@
bool deoptimize_on_minus_zero,
LEnvironment* env,
NumberUntagDMode mode) {
- Label load_smi, done;
+ Label convert, load_smi, done;
if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
// Smi check.
@@ -5317,28 +5345,17 @@
// Heap number map check.
__ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
factory()->heap_number_map());
- if (!can_convert_undefined_to_nan) {
- DeoptimizeIf(not_equal, env);
+ if (can_convert_undefined_to_nan) {
+ __ j(not_equal, &convert, Label::kNear);
} else {
- Label heap_number, convert;
- __ j(equal, &heap_number, Label::kNear);
-
- // Convert undefined (and hole) to NaN.
- __ cmp(input_reg, factory()->undefined_value());
DeoptimizeIf(not_equal, env);
-
- __ bind(&convert);
- ExternalReference nan =
- ExternalReference::address_of_canonical_non_hole_nan();
- __ movdbl(result_reg, Operand::StaticVariable(nan));
- __ jmp(&done, Label::kNear);
-
- __ bind(&heap_number);
}
+
// Heap number to XMM conversion.
__ movdbl(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
+
if (deoptimize_on_minus_zero) {
- XMMRegister xmm_scratch = xmm0;
+ XMMRegister xmm_scratch = double_scratch0();
__ xorps(xmm_scratch, xmm_scratch);
__ ucomisd(result_reg, xmm_scratch);
__ j(not_zero, &done, Label::kNear);
@@ -5347,6 +5364,19 @@
DeoptimizeIf(not_zero, env);
}
__ jmp(&done, Label::kNear);
+
+ if (can_convert_undefined_to_nan) {
+ __ bind(&convert);
+
+ // Convert undefined (and hole) to NaN.
+ __ cmp(input_reg, factory()->undefined_value());
+ DeoptimizeIf(not_equal, env);
+
+ ExternalReference nan =
+ ExternalReference::address_of_canonical_non_hole_nan();
+ __ movdbl(result_reg, Operand::StaticVariable(nan));
+ __ jmp(&done, Label::kNear);
+ }
} else {
ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
}
@@ -5356,7 +5386,7 @@
// input register since we avoid dependencies.
__ mov(temp_reg, input_reg);
__ SmiUntag(temp_reg); // Untag smi before converting to float.
- __ cvtsi2sd(result_reg, Operand(temp_reg));
+ __ Cvtsi2sd(result_reg, Operand(temp_reg));
__ bind(&done);
}
@@ -5417,12 +5447,16 @@
Register input_reg = ToRegister(input);
ASSERT(input_reg.is(ToRegister(instr->result())));
- DeferredTaggedToI* deferred =
- new(zone()) DeferredTaggedToI(this, instr, x87_stack_);
+ if (instr->hydrogen()->value()->representation().IsSmi()) {
+ __ SmiUntag(input_reg);
+ } else {
+ DeferredTaggedToI* deferred =
+ new(zone()) DeferredTaggedToI(this, instr, x87_stack_);
- __ JumpIfNotSmi(input_reg, deferred->entry());
- __ SmiUntag(input_reg);
- __ bind(deferred->exit());
+ __ JumpIfNotSmi(input_reg, deferred->entry());
+ __ SmiUntag(input_reg);
+ __ bind(deferred->exit());
+ }
}
@@ -5487,7 +5521,8 @@
if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
CpuFeatureScope scope(masm(), SSE2);
XMMRegister input_reg = ToDoubleRegister(input);
- __ DoubleToI(result_reg, input_reg, xmm0,
+ XMMRegister xmm_scratch = double_scratch0();
+ __ DoubleToI(result_reg, input_reg, xmm_scratch,
instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear);
} else {
X87Register input_reg = ToX87Register(input);
@@ -5514,7 +5549,8 @@
if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
CpuFeatureScope scope(masm(), SSE2);
XMMRegister input_reg = ToDoubleRegister(input);
- __ DoubleToI(result_reg, input_reg, xmm0,
+ XMMRegister xmm_scratch = double_scratch0();
+ __ DoubleToI(result_reg, input_reg, xmm_scratch,
instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear);
} else {
X87Register input_reg = ToX87Register(input);
@@ -5594,7 +5630,7 @@
void LCodeGen::DoCheckValue(LCheckValue* instr) {
- Handle<HeapObject> object = instr->hydrogen()->object();
+ Handle<HeapObject> object = instr->hydrogen()->object().handle();
if (instr->hydrogen()->object_in_new_space()) {
Register reg = ToRegister(instr->value());
Handle<Cell> cell = isolate()->factory()->NewCell(object);
@@ -5649,22 +5685,21 @@
ASSERT(input->IsRegister());
Register reg = ToRegister(input);
- SmallMapList* map_set = instr->hydrogen()->map_set();
-
DeferredCheckMaps* deferred = NULL;
if (instr->hydrogen()->has_migration_target()) {
deferred = new(zone()) DeferredCheckMaps(this, instr, reg, x87_stack_);
__ bind(deferred->check_maps());
}
+ UniqueSet<Map> map_set = instr->hydrogen()->map_set();
Label success;
- for (int i = 0; i < map_set->length() - 1; i++) {
- Handle<Map> map = map_set->at(i);
+ for (int i = 0; i < map_set.size() - 1; i++) {
+ Handle<Map> map = map_set.at(i).handle();
__ CompareMap(reg, map, &success);
__ j(equal, &success);
}
- Handle<Map> map = map_set->last();
+ Handle<Map> map = map_set.at(map_set.size() - 1).handle();
__ CompareMap(reg, map, &success);
if (instr->hydrogen()->has_migration_target()) {
__ j(not_equal, deferred->entry());
@@ -5679,8 +5714,9 @@
void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
CpuFeatureScope scope(masm(), SSE2);
XMMRegister value_reg = ToDoubleRegister(instr->unclamped());
+ XMMRegister xmm_scratch = double_scratch0();
Register result_reg = ToRegister(instr->result());
- __ ClampDoubleToUint8(value_reg, xmm0, result_reg);
+ __ ClampDoubleToUint8(value_reg, xmm_scratch, result_reg);
}
@@ -5696,6 +5732,7 @@
ASSERT(instr->unclamped()->Equals(instr->result()));
Register input_reg = ToRegister(instr->unclamped());
+ XMMRegister xmm_scratch = double_scratch0();
Label is_smi, done, heap_number;
__ JumpIfSmi(input_reg, &is_smi);
@@ -5714,8 +5751,8 @@
// Heap number
__ bind(&heap_number);
- __ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
- __ ClampDoubleToUint8(xmm0, xmm1, input_reg);
+ __ movdbl(xmm_scratch, FieldOperand(input_reg, HeapNumber::kValueOffset));
+ __ ClampDoubleToUint8(xmm_scratch, xmm1, input_reg);
__ jmp(&done, Label::kNear);
// smi
diff --git a/src/ia32/lithium-codegen-ia32.h b/src/ia32/lithium-codegen-ia32.h
index 769917f..a2280f8 100644
--- a/src/ia32/lithium-codegen-ia32.h
+++ b/src/ia32/lithium-codegen-ia32.h
@@ -208,6 +208,8 @@
Scope* scope() const { return scope_; }
HGraph* graph() const { return chunk()->graph(); }
+ XMMRegister double_scratch0() const { return xmm0; }
+
int GetNextEmittedBlock() const;
void EmitClassOfTest(Label* if_true,
diff --git a/src/ia32/lithium-ia32.cc b/src/ia32/lithium-ia32.cc
index ca1e60d..c73d073 100644
--- a/src/ia32/lithium-ia32.cc
+++ b/src/ia32/lithium-ia32.cc
@@ -762,52 +762,44 @@
LInstruction* LChunkBuilder::DoShift(Token::Value op,
HBitwiseBinaryOperation* instr) {
- if (instr->representation().IsTagged()) {
- ASSERT(instr->left()->representation().IsSmiOrTagged());
- ASSERT(instr->right()->representation().IsSmiOrTagged());
+ if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* left = UseFixed(instr->left(), edx);
- LOperand* right = UseFixed(instr->right(), eax);
- LArithmeticT* result = new(zone()) LArithmeticT(op, context, left, right);
- return MarkAsCall(DefineFixed(result, eax), instr);
- }
-
- ASSERT(instr->representation().IsSmiOrInteger32());
- ASSERT(instr->left()->representation().Equals(instr->representation()));
- ASSERT(instr->right()->representation().Equals(instr->representation()));
- LOperand* left = UseRegisterAtStart(instr->left());
-
- HValue* right_value = instr->right();
- LOperand* right = NULL;
- int constant_value = 0;
- bool does_deopt = false;
- if (right_value->IsConstant()) {
- HConstant* constant = HConstant::cast(right_value);
- right = chunk_->DefineConstantOperand(constant);
- constant_value = constant->Integer32Value() & 0x1f;
- // Left shifts can deoptimize if we shift by > 0 and the result cannot be
- // truncated to smi.
- if (instr->representation().IsSmi() && constant_value > 0) {
- does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToSmi);
- }
- } else {
- right = UseFixed(right_value, ecx);
- }
-
- // Shift operations can only deoptimize if we do a logical shift by 0 and
- // the result cannot be truncated to int32.
- if (op == Token::SHR && constant_value == 0) {
- if (FLAG_opt_safe_uint32_operations) {
- does_deopt = !instr->CheckFlag(HInstruction::kUint32);
+ HValue* right_value = instr->right();
+ LOperand* right = NULL;
+ int constant_value = 0;
+ bool does_deopt = false;
+ if (right_value->IsConstant()) {
+ HConstant* constant = HConstant::cast(right_value);
+ right = chunk_->DefineConstantOperand(constant);
+ constant_value = constant->Integer32Value() & 0x1f;
+ // Left shifts can deoptimize if we shift by > 0 and the result cannot be
+ // truncated to smi.
+ if (instr->representation().IsSmi() && constant_value > 0) {
+ does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToSmi);
+ }
} else {
- does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToInt32);
+ right = UseFixed(right_value, ecx);
}
- }
- LInstruction* result =
- DefineSameAsFirst(new(zone()) LShiftI(op, left, right, does_deopt));
- return does_deopt ? AssignEnvironment(result) : result;
+ // Shift operations can only deoptimize if we do a logical shift by 0 and
+ // the result cannot be truncated to int32.
+ if (op == Token::SHR && constant_value == 0) {
+ if (FLAG_opt_safe_uint32_operations) {
+ does_deopt = !instr->CheckFlag(HInstruction::kUint32);
+ } else {
+ does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToInt32);
+ }
+ }
+
+ LInstruction* result =
+ DefineSameAsFirst(new(zone()) LShiftI(op, left, right, does_deopt));
+ return does_deopt ? AssignEnvironment(result) : result;
+ } else {
+ return DoArithmeticT(op, instr);
+ }
}
@@ -816,21 +808,22 @@
ASSERT(instr->representation().IsDouble());
ASSERT(instr->left()->representation().IsDouble());
ASSERT(instr->right()->representation().IsDouble());
- ASSERT(op != Token::MOD);
- LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
- LOperand* right = UseRegisterAtStart(instr->BetterRightOperand());
- LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
- return DefineSameAsFirst(result);
+ if (op == Token::MOD) {
+ LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+ LOperand* right = UseRegisterAtStart(instr->BetterRightOperand());
+ LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
+ return MarkAsCall(DefineSameAsFirst(result), instr);
+ } else {
+ LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+ LOperand* right = UseRegisterAtStart(instr->BetterRightOperand());
+ LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
+ return DefineSameAsFirst(result);
+ }
}
LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op,
- HArithmeticBinaryOperation* instr) {
- ASSERT(op == Token::ADD ||
- op == Token::DIV ||
- op == Token::MOD ||
- op == Token::MUL ||
- op == Token::SUB);
+ HBinaryOperation* instr) {
HValue* left = instr->left();
HValue* right = instr->right();
ASSERT(left->representation().IsTagged());
@@ -1442,29 +1435,19 @@
if (instr->representation().IsSmiOrInteger32()) {
ASSERT(instr->left()->representation().Equals(instr->representation()));
ASSERT(instr->right()->representation().Equals(instr->representation()));
+ ASSERT(instr->CheckFlag(HValue::kTruncatingToInt32));
LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand());
return DefineSameAsFirst(new(zone()) LBitI(left, right));
} else {
- ASSERT(instr->representation().IsSmiOrTagged());
- ASSERT(instr->left()->representation().IsSmiOrTagged());
- ASSERT(instr->right()->representation().IsSmiOrTagged());
-
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* left = UseFixed(instr->left(), edx);
- LOperand* right = UseFixed(instr->right(), eax);
- LArithmeticT* result =
- new(zone()) LArithmeticT(instr->op(), context, left, right);
- return MarkAsCall(DefineFixed(result, eax), instr);
+ return DoArithmeticT(instr->op(), instr);
}
}
LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
- if (instr->representation().IsDouble()) {
- return DoArithmeticD(Token::DIV, instr);
- } else if (instr->representation().IsSmiOrInteger32()) {
+ if (instr->representation().IsSmiOrInteger32()) {
ASSERT(instr->left()->representation().Equals(instr->representation()));
ASSERT(instr->right()->representation().Equals(instr->representation()));
if (instr->HasPowerOf2Divisor()) {
@@ -1481,8 +1464,9 @@
LOperand* divisor = UseRegister(instr->right());
LDivI* result = new(zone()) LDivI(dividend, divisor, temp);
return AssignEnvironment(DefineFixed(result, eax));
+ } else if (instr->representation().IsDouble()) {
+ return DoArithmeticD(Token::DIV, instr);
} else {
- ASSERT(instr->representation().IsTagged());
return DoArithmeticT(Token::DIV, instr);
}
}
@@ -1584,17 +1568,10 @@
? AssignEnvironment(result)
: result;
}
- } else if (instr->representation().IsSmiOrTagged()) {
- return DoArithmeticT(Token::MOD, instr);
+ } else if (instr->representation().IsDouble()) {
+ return DoArithmeticD(Token::MOD, instr);
} else {
- ASSERT(instr->representation().IsDouble());
- // We call a C function for double modulo. It can't trigger a GC. We need
- // to use fixed result register for the call.
- // TODO(fschneider): Allow any register as input registers.
- LArithmeticD* mod = new(zone()) LArithmeticD(Token::MOD,
- UseFixedDouble(left, xmm2),
- UseFixedDouble(right, xmm1));
- return MarkAsCall(DefineFixedDouble(mod, xmm1), instr);
+ return DoArithmeticT(Token::MOD, instr);
}
}
@@ -1618,7 +1595,6 @@
} else if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::MUL, instr);
} else {
- ASSERT(instr->representation().IsTagged());
return DoArithmeticT(Token::MUL, instr);
}
}
@@ -1639,7 +1615,6 @@
} else if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::SUB, instr);
} else {
- ASSERT(instr->representation().IsSmiOrTagged());
return DoArithmeticT(Token::SUB, instr);
}
}
@@ -1671,7 +1646,6 @@
} else if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::ADD, instr);
} else {
- ASSERT(instr->representation().IsSmiOrTagged());
return DoArithmeticT(Token::ADD, instr);
}
}
@@ -1774,8 +1748,8 @@
LInstruction* LChunkBuilder::DoCompareHoleAndBranch(
HCompareHoleAndBranch* instr) {
- LOperand* object = UseRegisterAtStart(instr->object());
- return new(zone()) LCmpHoleAndBranch(object);
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return new(zone()) LCmpHoleAndBranch(value);
}
@@ -2050,12 +2024,6 @@
}
-LInstruction* LChunkBuilder::DoIsNumberAndBranch(HIsNumberAndBranch* instr) {
- return new(zone())
- LIsNumberAndBranch(UseRegisterOrConstantAtStart(instr->value()));
-}
-
-
LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
LOperand* temp = TempRegister();
@@ -2234,6 +2202,11 @@
}
+LInstruction* LChunkBuilder::DoLoadRoot(HLoadRoot* instr) {
+ return DefineAsRegister(new(zone()) LLoadRoot);
+}
+
+
LInstruction* LChunkBuilder::DoLoadExternalArrayPointer(
HLoadExternalArrayPointer* instr) {
LOperand* input = UseRegisterAtStart(instr->value());
diff --git a/src/ia32/lithium-ia32.h b/src/ia32/lithium-ia32.h
index 3a609c9..379d64b 100644
--- a/src/ia32/lithium-ia32.h
+++ b/src/ia32/lithium-ia32.h
@@ -116,7 +116,6 @@
V(IsObjectAndBranch) \
V(IsStringAndBranch) \
V(IsSmiAndBranch) \
- V(IsNumberAndBranch) \
V(IsUndetectableAndBranch) \
V(Label) \
V(LazyBailout) \
@@ -130,6 +129,7 @@
V(LoadKeyedGeneric) \
V(LoadNamedField) \
V(LoadNamedGeneric) \
+ V(LoadRoot) \
V(MapEnumLength) \
V(MathAbs) \
V(MathCos) \
@@ -922,19 +922,6 @@
};
-class LIsNumberAndBranch V8_FINAL : public LControlInstruction<1, 0> {
- public:
- explicit LIsNumberAndBranch(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(IsNumberAndBranch, "is-number-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(IsNumberAndBranch)
-};
-
-
class LIsStringAndBranch V8_FINAL : public LControlInstruction<1, 1> {
public:
LIsStringAndBranch(LOperand* value, LOperand* temp) {
@@ -1309,7 +1296,7 @@
DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch")
DECLARE_HYDROGEN_ACCESSOR(CompareMap)
- Handle<Map> map() const { return hydrogen()->map(); }
+ Handle<Map> map() const { return hydrogen()->map().handle(); }
};
@@ -1605,6 +1592,15 @@
};
+class LLoadRoot V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(LoadRoot, "load-root")
+ DECLARE_HYDROGEN_ACCESSOR(LoadRoot)
+
+ Heap::RootListIndex index() const { return hydrogen()->index(); }
+};
+
+
class LLoadExternalArrayPointer V8_FINAL
: public LTemplateInstruction<1, 1, 0> {
public:
@@ -1634,11 +1630,6 @@
return hydrogen()->is_external();
}
- virtual bool ClobbersDoubleRegisters() const V8_OVERRIDE {
- return !CpuFeatures::IsSupported(SSE2) &&
- !IsDoubleOrFloatElementsKind(hydrogen()->elements_kind());
- }
-
DECLARE_CONCRETE_INSTRUCTION(LoadKeyed, "load-keyed")
DECLARE_HYDROGEN_ACCESSOR(LoadKeyed)
@@ -2189,7 +2180,7 @@
LOperand* temp() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i")
- DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
+ DECLARE_HYDROGEN_ACCESSOR(Change)
bool truncating() { return hydrogen()->CanTruncateToInt32(); }
};
@@ -2364,8 +2355,10 @@
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- Handle<Map> original_map() { return hydrogen()->original_map(); }
- Handle<Map> transitioned_map() { return hydrogen()->transitioned_map(); }
+ Handle<Map> original_map() { return hydrogen()->original_map().handle(); }
+ Handle<Map> transitioned_map() {
+ return hydrogen()->transitioned_map().handle();
+ }
ElementsKind from_kind() { return hydrogen()->from_kind(); }
ElementsKind to_kind() { return hydrogen()->to_kind(); }
};
@@ -2907,7 +2900,7 @@
LInstruction* DoArithmeticD(Token::Value op,
HArithmeticBinaryOperation* instr);
LInstruction* DoArithmeticT(Token::Value op,
- HArithmeticBinaryOperation* instr);
+ HBinaryOperation* instr);
LOperand* GetStoreKeyedValueOperand(HStoreKeyed* instr);
diff --git a/src/ia32/macro-assembler-ia32.cc b/src/ia32/macro-assembler-ia32.cc
index b65d328..5d25695 100644
--- a/src/ia32/macro-assembler-ia32.cc
+++ b/src/ia32/macro-assembler-ia32.cc
@@ -283,7 +283,7 @@
Label::Distance dst) {
ASSERT(!input_reg.is(scratch));
cvttsd2si(result_reg, Operand(input_reg));
- cvtsi2sd(scratch, Operand(result_reg));
+ Cvtsi2sd(scratch, Operand(result_reg));
ucomisd(scratch, input_reg);
j(not_equal, conversion_failed, dst);
j(parity_even, conversion_failed, dst); // NaN.
@@ -392,7 +392,7 @@
movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
cvttsd2si(result_reg, Operand(xmm0));
- cvtsi2sd(temp, Operand(result_reg));
+ Cvtsi2sd(temp, Operand(result_reg));
ucomisd(xmm0, temp);
RecordComment("Deferred TaggedToI: lost precision");
j(not_equal, lost_precision, Label::kNear);
@@ -457,7 +457,7 @@
cmp(src, Immediate(0));
movdbl(scratch,
Operand(reinterpret_cast<int32_t>(&kUint32Bias), RelocInfo::NONE32));
- cvtsi2sd(dst, src);
+ Cvtsi2sd(dst, src);
j(not_sign, &done, Label::kNear);
addsd(dst, scratch);
bind(&done);
@@ -676,6 +676,12 @@
#endif
+void MacroAssembler::Cvtsi2sd(XMMRegister dst, const Operand& src) {
+ xorps(dst, dst);
+ cvtsi2sd(dst, src);
+}
+
+
void MacroAssembler::Set(Register dst, const Immediate& x) {
if (x.is_zero()) {
xor_(dst, dst); // Shorter than mov.
@@ -834,7 +840,7 @@
SmiUntag(scratch1);
if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) {
CpuFeatureScope fscope(this, SSE2);
- cvtsi2sd(scratch2, scratch1);
+ Cvtsi2sd(scratch2, scratch1);
movdbl(FieldOperand(elements, key, times_4,
FixedDoubleArray::kHeaderSize - elements_offset),
scratch2);
@@ -1109,14 +1115,16 @@
// Push the return address to get ready to return.
push(ecx);
- LeaveExitFrameEpilogue();
+ LeaveExitFrameEpilogue(true);
}
-void MacroAssembler::LeaveExitFrameEpilogue() {
+void MacroAssembler::LeaveExitFrameEpilogue(bool restore_context) {
// Restore current context from top and clear it in debug mode.
ExternalReference context_address(Isolate::kContextAddress, isolate());
- mov(esi, Operand::StaticVariable(context_address));
+ if (restore_context) {
+ mov(esi, Operand::StaticVariable(context_address));
+ }
#ifdef DEBUG
mov(Operand::StaticVariable(context_address), Immediate(0));
#endif
@@ -1128,11 +1136,11 @@
}
-void MacroAssembler::LeaveApiExitFrame() {
+void MacroAssembler::LeaveApiExitFrame(bool restore_context) {
mov(esp, ebp);
pop(ebp);
- LeaveExitFrameEpilogue();
+ LeaveExitFrameEpilogue(restore_context);
}
@@ -2221,11 +2229,13 @@
}
-void MacroAssembler::CallApiFunctionAndReturn(Address function_address,
- Address thunk_address,
- Operand thunk_last_arg,
- int stack_space,
- int return_value_offset) {
+void MacroAssembler::CallApiFunctionAndReturn(
+ Address function_address,
+ Address thunk_address,
+ Operand thunk_last_arg,
+ int stack_space,
+ Operand return_value_operand,
+ Operand* context_restore_operand) {
ExternalReference next_address =
ExternalReference::handle_scope_next_address(isolate());
ExternalReference limit_address =
@@ -2281,9 +2291,10 @@
Label prologue;
// Load the value from ReturnValue
- mov(eax, Operand(ebp, return_value_offset * kPointerSize));
+ mov(eax, return_value_operand);
Label promote_scheduled_exception;
+ Label exception_handled;
Label delete_allocated_handles;
Label leave_exit_frame;
@@ -2303,6 +2314,7 @@
cmp(Operand::StaticVariable(scheduled_exception_address),
Immediate(isolate()->factory()->the_hole_value()));
j(not_equal, &promote_scheduled_exception);
+ bind(&exception_handled);
#if ENABLE_EXTRA_CHECKS
// Check if the function returned a valid JavaScript value.
@@ -2339,11 +2351,19 @@
bind(&ok);
#endif
- LeaveApiExitFrame();
+ bool restore_context = context_restore_operand != NULL;
+ if (restore_context) {
+ mov(esi, *context_restore_operand);
+ }
+ LeaveApiExitFrame(!restore_context);
ret(stack_space * kPointerSize);
bind(&promote_scheduled_exception);
- TailCallRuntime(Runtime::kPromoteScheduledException, 0, 1);
+ {
+ FrameScope frame(this, StackFrame::INTERNAL);
+ CallRuntime(Runtime::kPromoteScheduledException, 0);
+ }
+ jmp(&exception_handled);
// HandleScope limit has changed. Delete allocated extensions.
ExternalReference delete_extensions =
@@ -3003,6 +3023,88 @@
}
+void MacroAssembler::LookupNumberStringCache(Register object,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* not_found) {
+ // Use of registers. Register result is used as a temporary.
+ Register number_string_cache = result;
+ Register mask = scratch1;
+ Register scratch = scratch2;
+
+ // Load the number string cache.
+ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
+ // Make the hash mask from the length of the number string cache. It
+ // contains two elements (number and string) for each cache entry.
+ mov(mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
+ shr(mask, kSmiTagSize + 1); // Untag length and divide it by two.
+ sub(mask, Immediate(1)); // Make mask.
+
+ // Calculate the entry in the number string cache. The hash value in the
+ // number string cache for smis is just the smi value, and the hash for
+ // doubles is the xor of the upper and lower words. See
+ // Heap::GetNumberStringCache.
+ Label smi_hash_calculated;
+ Label load_result_from_cache;
+ Label not_smi;
+ STATIC_ASSERT(kSmiTag == 0);
+ JumpIfNotSmi(object, ¬_smi, Label::kNear);
+ mov(scratch, object);
+ SmiUntag(scratch);
+ jmp(&smi_hash_calculated, Label::kNear);
+ bind(¬_smi);
+ cmp(FieldOperand(object, HeapObject::kMapOffset),
+ isolate()->factory()->heap_number_map());
+ j(not_equal, not_found);
+ STATIC_ASSERT(8 == kDoubleSize);
+ mov(scratch, FieldOperand(object, HeapNumber::kValueOffset));
+ xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
+ // Object is heap number and hash is now in scratch. Calculate cache index.
+ and_(scratch, mask);
+ Register index = scratch;
+ Register probe = mask;
+ mov(probe,
+ FieldOperand(number_string_cache,
+ index,
+ times_twice_pointer_size,
+ FixedArray::kHeaderSize));
+ JumpIfSmi(probe, not_found);
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatureScope fscope(this, SSE2);
+ movdbl(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
+ ucomisd(xmm0, FieldOperand(probe, HeapNumber::kValueOffset));
+ } else {
+ fld_d(FieldOperand(object, HeapNumber::kValueOffset));
+ fld_d(FieldOperand(probe, HeapNumber::kValueOffset));
+ FCmp();
+ }
+ j(parity_even, not_found); // Bail out if NaN is involved.
+ j(not_equal, not_found); // The cache did not contain this value.
+ jmp(&load_result_from_cache, Label::kNear);
+
+ bind(&smi_hash_calculated);
+ // Object is smi and hash is now in scratch. Calculate cache index.
+ and_(scratch, mask);
+ // Check if the entry is the smi we are looking for.
+ cmp(object,
+ FieldOperand(number_string_cache,
+ index,
+ times_twice_pointer_size,
+ FixedArray::kHeaderSize));
+ j(not_equal, not_found);
+
+ // Get the result from the cache.
+ bind(&load_result_from_cache);
+ mov(result,
+ FieldOperand(number_string_cache,
+ index,
+ times_twice_pointer_size,
+ FixedArray::kHeaderSize + kPointerSize));
+ IncrementCounter(isolate()->counters()->number_to_string_native(), 1);
+}
+
+
void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(
Register instance_type,
Register scratch,
@@ -3423,7 +3525,7 @@
cmp(scratch_reg, Operand::StaticVariable(new_space_allocation_top));
j(greater, &no_memento_available);
cmp(MemOperand(scratch_reg, -AllocationMemento::kSize),
- Immediate(Handle<Map>(isolate()->heap()->allocation_memento_map())));
+ Immediate(isolate()->factory()->allocation_memento_map()));
bind(&no_memento_available);
}
diff --git a/src/ia32/macro-assembler-ia32.h b/src/ia32/macro-assembler-ia32.h
index e4e4533..e984b2c 100644
--- a/src/ia32/macro-assembler-ia32.h
+++ b/src/ia32/macro-assembler-ia32.h
@@ -240,7 +240,7 @@
// Leave the current exit frame. Expects the return value in
// register eax (untouched).
- void LeaveApiExitFrame();
+ void LeaveApiExitFrame(bool restore_context);
// Find the function context up the context chain.
void LoadContext(Register dst, int context_chain_length);
@@ -366,6 +366,12 @@
void Set(Register dst, const Immediate& x);
void Set(const Operand& dst, const Immediate& x);
+ // cvtsi2sd instruction only writes to the low 64-bit of dst register, which
+ // hinders register renaming and makes dependence chains longer. So we use
+ // xorps to clear the dst register before cvtsi2sd to solve this issue.
+ void Cvtsi2sd(XMMRegister dst, Register src) { Cvtsi2sd(dst, Operand(src)); }
+ void Cvtsi2sd(XMMRegister dst, const Operand& src);
+
// Support for constant splitting.
bool IsUnsafeImmediate(const Immediate& x);
void SafeSet(Register dst, const Immediate& x);
@@ -807,7 +813,8 @@
Address thunk_address,
Operand thunk_last_arg,
int stack_space,
- int return_value_offset_from_ebp);
+ Operand return_value_operand,
+ Operand* context_restore_operand);
// Jump to a runtime routine.
void JumpToExternalReference(const ExternalReference& ext);
@@ -890,6 +897,17 @@
// ---------------------------------------------------------------------------
// String utilities.
+ // Generate code to do a lookup in the number string cache. If the number in
+ // the register object is found in the cache the generated code falls through
+ // with the result in the result register. The object and the result register
+ // can be the same. If the number is not found in the cache the code jumps to
+ // the label not_found with only the content of register object unchanged.
+ void LookupNumberStringCache(Register object,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* not_found);
+
// Check whether the instance type represents a flat ASCII string. Jump to the
// label if not. If the instance type can be scratched specify same register
// for both instance type and scratch.
@@ -957,7 +975,7 @@
void EnterExitFramePrologue();
void EnterExitFrameEpilogue(int argc, bool save_doubles);
- void LeaveExitFrameEpilogue();
+ void LeaveExitFrameEpilogue(bool restore_context);
// Allocation support helpers.
void LoadAllocationTopHelper(Register result,
diff --git a/src/ia32/stub-cache-ia32.cc b/src/ia32/stub-cache-ia32.cc
index 354c2fd..d339da9 100644
--- a/src/ia32/stub-cache-ia32.cc
+++ b/src/ia32/stub-cache-ia32.cc
@@ -329,32 +329,28 @@
Register receiver,
Register scratch1,
Register scratch2,
- Label* miss,
- bool support_wrappers) {
+ Label* miss) {
Label check_wrapper;
// Check if the object is a string leaving the instance type in the
// scratch register.
- GenerateStringCheck(masm, receiver, scratch1, miss,
- support_wrappers ? &check_wrapper : miss);
+ GenerateStringCheck(masm, receiver, scratch1, miss, &check_wrapper);
// Load length from the string and convert to a smi.
__ mov(eax, FieldOperand(receiver, String::kLengthOffset));
__ ret(0);
- if (support_wrappers) {
- // Check if the object is a JSValue wrapper.
- __ bind(&check_wrapper);
- __ cmp(scratch1, JS_VALUE_TYPE);
- __ j(not_equal, miss);
+ // Check if the object is a JSValue wrapper.
+ __ bind(&check_wrapper);
+ __ cmp(scratch1, JS_VALUE_TYPE);
+ __ j(not_equal, miss);
- // Check if the wrapped value is a string and load the length
- // directly if it is.
- __ mov(scratch2, FieldOperand(receiver, JSValue::kValueOffset));
- GenerateStringCheck(masm, scratch2, scratch1, miss, miss);
- __ mov(eax, FieldOperand(scratch2, String::kLengthOffset));
- __ ret(0);
- }
+ // Check if the wrapped value is a string and load the length
+ // directly if it is.
+ __ mov(scratch2, FieldOperand(receiver, JSValue::kValueOffset));
+ GenerateStringCheck(masm, scratch2, scratch1, miss, miss);
+ __ mov(eax, FieldOperand(scratch2, String::kLengthOffset));
+ __ ret(0);
}
@@ -462,48 +458,54 @@
// Generates call to API function.
static void GenerateFastApiCall(MacroAssembler* masm,
const CallOptimization& optimization,
- int argc) {
+ int argc,
+ bool restore_context) {
// ----------- S t a t e -------------
// -- esp[0] : return address
- // -- esp[4] : object passing the type check
+ // -- esp[4] : context
+ // -- esp[8] : object passing the type check
// (last fast api call extra argument,
// set by CheckPrototypes)
- // -- esp[8] : api function
+ // -- esp[12] : api function
// (first fast api call extra argument)
- // -- esp[12] : api call data
- // -- esp[16] : isolate
- // -- esp[20] : ReturnValue default value
- // -- esp[24] : ReturnValue
- // -- esp[28] : last argument
+ // -- esp[16] : api call data
+ // -- esp[20] : isolate
+ // -- esp[24] : ReturnValue default value
+ // -- esp[28] : ReturnValue
+ // -- esp[32] : last argument
// -- ...
- // -- esp[(argc + 6) * 4] : first argument
- // -- esp[(argc + 7) * 4] : receiver
+ // -- esp[(argc + 7) * 4] : first argument
+ // -- esp[(argc + 8) * 4] : receiver
// -----------------------------------
+
+ // Save calling context.
+ __ mov(Operand(esp, kPointerSize), esi);
+
// Get the function and setup the context.
Handle<JSFunction> function = optimization.constant_function();
__ LoadHeapObject(edi, function);
__ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
// Pass the additional arguments.
- __ mov(Operand(esp, 2 * kPointerSize), edi);
+ __ mov(Operand(esp, 3 * kPointerSize), edi);
Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
Handle<Object> call_data(api_call_info->data(), masm->isolate());
if (masm->isolate()->heap()->InNewSpace(*call_data)) {
__ mov(ecx, api_call_info);
__ mov(ebx, FieldOperand(ecx, CallHandlerInfo::kDataOffset));
- __ mov(Operand(esp, 3 * kPointerSize), ebx);
+ __ mov(Operand(esp, 4 * kPointerSize), ebx);
} else {
- __ mov(Operand(esp, 3 * kPointerSize), Immediate(call_data));
+ __ mov(Operand(esp, 4 * kPointerSize), Immediate(call_data));
}
- __ mov(Operand(esp, 4 * kPointerSize),
- Immediate(reinterpret_cast<int>(masm->isolate())));
__ mov(Operand(esp, 5 * kPointerSize),
- masm->isolate()->factory()->undefined_value());
+ Immediate(reinterpret_cast<int>(masm->isolate())));
__ mov(Operand(esp, 6 * kPointerSize),
masm->isolate()->factory()->undefined_value());
+ __ mov(Operand(esp, 7 * kPointerSize),
+ masm->isolate()->factory()->undefined_value());
// Prepare arguments.
- STATIC_ASSERT(kFastApiCallArguments == 6);
+ STATIC_ASSERT(kFastApiCallArguments == 7);
__ lea(eax, Operand(esp, kFastApiCallArguments * kPointerSize));
@@ -537,11 +539,16 @@
Address thunk_address = FUNCTION_ADDR(&InvokeFunctionCallback);
+ Operand context_restore_operand(ebp, 2 * kPointerSize);
+ Operand return_value_operand(
+ ebp, (kFastApiCallArguments + 1) * kPointerSize);
__ CallApiFunctionAndReturn(function_address,
thunk_address,
ApiParameterOperand(1),
argc + kFastApiCallArguments + 1,
- kFastApiCallArguments + 1);
+ return_value_operand,
+ restore_context ?
+ &context_restore_operand : NULL);
}
@@ -556,6 +563,8 @@
ASSERT(!receiver.is(scratch));
const int stack_space = kFastApiCallArguments + argc + 1;
+ const int kHolderIndex = kFastApiCallArguments +
+ FunctionCallbackArguments::kHolderIndex;
// Copy return value.
__ mov(scratch, Operand(esp, 0));
// Assign stack space for the call arguments.
@@ -563,7 +572,7 @@
// Move the return address on top of the stack.
__ mov(Operand(esp, 0), scratch);
// Write holder to stack frame.
- __ mov(Operand(esp, 1 * kPointerSize), receiver);
+ __ mov(Operand(esp, kHolderIndex * kPointerSize), receiver);
// Write receiver to stack frame.
int index = stack_space;
__ mov(Operand(esp, index-- * kPointerSize), receiver);
@@ -574,7 +583,7 @@
__ mov(Operand(esp, index-- * kPointerSize), values[i]);
}
- GenerateFastApiCall(masm, optimization, argc);
+ GenerateFastApiCall(masm, optimization, argc, true);
}
@@ -688,7 +697,7 @@
// Invoke function.
if (can_do_fast_api_call) {
- GenerateFastApiCall(masm, optimization, arguments_.immediate());
+ GenerateFastApiCall(masm, optimization, arguments_.immediate(), false);
} else {
CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
? CALL_AS_FUNCTION
@@ -862,7 +871,7 @@
__ SmiUntag(value_reg);
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope use_sse2(masm, SSE2);
- __ cvtsi2sd(xmm0, value_reg);
+ __ Cvtsi2sd(xmm0, value_reg);
} else {
__ push(value_reg);
__ fild_s(Operand(esp, 0));
@@ -1041,7 +1050,7 @@
__ SmiUntag(value_reg);
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope use_sse2(masm, SSE2);
- __ cvtsi2sd(xmm0, value_reg);
+ __ Cvtsi2sd(xmm0, value_reg);
} else {
__ push(value_reg);
__ fild_s(Operand(esp, 0));
@@ -1160,6 +1169,8 @@
int save_at_depth,
Label* miss,
PrototypeCheckType check) {
+ const int kHolderIndex = kFastApiCallArguments +
+ FunctionCallbackArguments::kHolderIndex;
// Make sure that the type feedback oracle harvests the receiver map.
// TODO(svenpanne) Remove this hack when all ICs are reworked.
__ mov(scratch1, Handle<Map>(object->map()));
@@ -1176,7 +1187,7 @@
int depth = 0;
if (save_at_depth == depth) {
- __ mov(Operand(esp, kPointerSize), reg);
+ __ mov(Operand(esp, kHolderIndex * kPointerSize), reg);
}
// Traverse the prototype chain and check the maps in the prototype chain for
@@ -1237,7 +1248,7 @@
}
if (save_at_depth == depth) {
- __ mov(Operand(esp, kPointerSize), reg);
+ __ mov(Operand(esp, kHolderIndex * kPointerSize), reg);
}
// Go to the next object in the prototype chain.
@@ -1460,7 +1471,8 @@
thunk_address,
ApiParameterOperand(2),
kStackSpace,
- 7);
+ Operand(ebp, 7 * kPointerSize),
+ NULL);
}
@@ -2623,7 +2635,7 @@
// esp[2 * kPointerSize] is uninitialized, esp[3 * kPointerSize] contains
// duplicate of return address and will be overwritten.
- GenerateFastApiCall(masm(), optimization, argc);
+ GenerateFastApiCall(masm(), optimization, argc, false);
__ bind(&miss);
__ add(esp, Immediate(kFastApiCallArguments * kPointerSize));
diff --git a/src/ic.cc b/src/ic.cc
index 5518751..84e65ac 100644
--- a/src/ic.cc
+++ b/src/ic.cc
@@ -549,9 +549,11 @@
Code::ExtraICState extra_ic_state,
Handle<Object> object,
Handle<String> name) {
+ bool use_ic = FLAG_use_ic;
if (object->IsJSObject()) {
Handle<JSObject> receiver = Handle<JSObject>::cast(object);
if (receiver->map()->is_deprecated()) {
+ use_ic = false;
JSObject::MigrateInstance(receiver);
}
}
@@ -590,9 +592,7 @@
}
// Lookup is valid: Update inline cache and stub cache.
- if (FLAG_use_ic) {
- UpdateCaches(&lookup, state, extra_ic_state, object, name);
- }
+ if (use_ic) UpdateCaches(&lookup, state, extra_ic_state, object, name);
// Get the property.
PropertyAttributes attr;
@@ -819,9 +819,11 @@
Handle<String>::cast(key));
}
+ bool use_ic = FLAG_use_ic && !object->IsAccessCheckNeeded();
if (object->IsJSObject()) {
Handle<JSObject> receiver = Handle<JSObject>::cast(object);
if (receiver->map()->is_deprecated()) {
+ use_ic = false;
JSObject::MigrateInstance(receiver);
}
}
@@ -830,7 +832,6 @@
return TypeError("non_object_property_call", object, key);
}
- bool use_ic = FLAG_use_ic && !object->IsAccessCheckNeeded();
ASSERT(!(use_ic && object->IsJSGlobalProxy()));
if (use_ic && state != MEGAMORPHIC) {
@@ -874,21 +875,20 @@
return TypeError("non_object_property_load", object, name);
}
- if (FLAG_use_ic) {
+ bool use_ic = FLAG_use_ic;
+
+ if (use_ic) {
// Use specialized code for getting the length of strings and
// string wrapper objects. The length property of string wrapper
// objects is read-only and therefore always returns the length of
// the underlying string value. See ECMA-262 15.5.5.1.
- if ((object->IsString() || object->IsStringWrapper()) &&
+ if (object->IsStringWrapper() &&
name->Equals(isolate()->heap()->length_string())) {
Handle<Code> stub;
if (state == UNINITIALIZED) {
stub = pre_monomorphic_stub();
- } else if (state == PREMONOMORPHIC) {
- StringLengthStub string_length_stub(kind(), !object->IsString());
- stub = string_length_stub.GetCode(isolate());
- } else if (state == MONOMORPHIC && object->IsStringWrapper()) {
- StringLengthStub string_length_stub(kind(), true);
+ } else if (state == PREMONOMORPHIC || state == MONOMORPHIC) {
+ StringLengthStub string_length_stub(kind());
stub = string_length_stub.GetCode(isolate());
} else if (state != MEGAMORPHIC) {
ASSERT(state != GENERIC);
@@ -897,14 +897,12 @@
if (!stub.is_null()) {
set_target(*stub);
#ifdef DEBUG
- if (FLAG_trace_ic) PrintF("[LoadIC : +#length /string]\n");
+ if (FLAG_trace_ic) PrintF("[LoadIC : +#length /stringwrapper]\n");
#endif
}
// Get the string if we have a string wrapper object.
- Handle<Object> string = object->IsJSValue()
- ? Handle<Object>(Handle<JSValue>::cast(object)->value(), isolate())
- : object;
- return Smi::FromInt(String::cast(*string)->length());
+ String* string = String::cast(JSValue::cast(*object)->value());
+ return Smi::FromInt(string->length());
}
// Use specialized code for getting prototype of functions.
@@ -936,13 +934,14 @@
uint32_t index;
if (kind() == Code::KEYED_LOAD_IC && name->AsArrayIndex(&index)) {
// Rewrite to the generic keyed load stub.
- if (FLAG_use_ic) set_target(*generic_stub());
+ if (use_ic) set_target(*generic_stub());
return Runtime::GetElementOrCharAtOrFail(isolate(), object, index);
}
if (object->IsJSObject()) {
Handle<JSObject> receiver = Handle<JSObject>::cast(object);
if (receiver->map()->is_deprecated()) {
+ use_ic = false;
JSObject::MigrateInstance(receiver);
}
}
@@ -960,7 +959,7 @@
}
// Update inline cache and stub cache.
- if (FLAG_use_ic) UpdateCaches(&lookup, state, object, name);
+ if (use_ic) UpdateCaches(&lookup, state, object, name);
PropertyAttributes attr;
if (lookup.IsInterceptor() || lookup.IsHandler()) {
@@ -1265,6 +1264,8 @@
State state,
Handle<Object> object,
Handle<String> name) {
+ // TODO(verwaest): It would be nice to support loading fields from smis as
+ // well. For now just fail to update the cache.
if (!object->IsHeapObject()) return;
Handle<HeapObject> receiver = Handle<HeapObject>::cast(object);
@@ -1278,6 +1279,16 @@
} else if (!lookup->IsCacheable()) {
// Bail out if the result is not cacheable.
code = slow_stub();
+ } else if (object->IsString() &&
+ name->Equals(isolate()->heap()->length_string())) {
+ int length_index = String::kLengthOffset / kPointerSize;
+ if (target()->is_load_stub()) {
+ LoadFieldStub stub(true, length_index, Representation::Tagged());
+ code = stub.GetCode(isolate());
+ } else {
+ KeyedLoadFieldStub stub(true, length_index, Representation::Tagged());
+ code = stub.GetCode(isolate());
+ }
} else if (!object->IsJSObject()) {
// TODO(jkummerow): It would be nice to support non-JSObjects in
// ComputeLoadHandler, then we wouldn't need to go generic here.
@@ -1338,6 +1349,19 @@
return isolate()->stub_cache()->ComputeLoadNormal(name, receiver);
case CALLBACKS: {
Handle<Object> callback(lookup->GetCallbackObject(), isolate());
+ if (name->Equals(isolate()->heap()->length_string())) {
+ if (receiver->IsJSArray()) {
+ PropertyIndex lengthIndex = PropertyIndex::NewHeaderIndex(
+ JSArray::kLengthOffset / kPointerSize);
+ return isolate()->stub_cache()->ComputeLoadField(
+ name, receiver, receiver, lengthIndex, Representation::Tagged());
+ } else if (receiver->IsJSTypedArray()) {
+ PropertyIndex lengthIndex = PropertyIndex::NewHeaderIndex(
+ JSTypedArray::kLengthOffset / kPointerSize);
+ return isolate()->stub_cache()->ComputeLoadField(
+ name, receiver, receiver, lengthIndex, Representation::Tagged());
+ }
+ }
if (callback->IsExecutableAccessorInfo()) {
Handle<ExecutableAccessorInfo> info =
Handle<ExecutableAccessorInfo>::cast(callback);
@@ -1354,19 +1378,12 @@
Handle<JSFunction> function = Handle<JSFunction>::cast(getter);
CallOptimization call_optimization(function);
if (call_optimization.is_simple_api_call() &&
- call_optimization.IsCompatibleReceiver(*receiver) &&
- FLAG_js_accessor_ics) {
+ call_optimization.IsCompatibleReceiver(*receiver)) {
return isolate()->stub_cache()->ComputeLoadCallback(
name, receiver, holder, call_optimization);
}
return isolate()->stub_cache()->ComputeLoadViaGetter(
name, receiver, holder, function);
- } else if (receiver->IsJSArray() &&
- name->Equals(isolate()->heap()->length_string())) {
- PropertyIndex lengthIndex =
- PropertyIndex::NewHeaderIndex(JSArray::kLengthOffset / kPointerSize);
- return isolate()->stub_cache()->ComputeLoadField(
- name, receiver, holder, lengthIndex, Representation::Tagged());
}
// TODO(dcarney): Handle correctly.
if (callback->IsDeclaredAccessorInfo()) break;
@@ -1496,6 +1513,7 @@
} else if (object->IsJSObject()) {
Handle<JSObject> receiver = Handle<JSObject>::cast(object);
if (receiver->map()->is_deprecated()) {
+ use_ic = false;
JSObject::MigrateInstance(receiver);
}
@@ -1512,9 +1530,11 @@
} else {
TRACE_GENERIC_IC(isolate(), "KeyedLoadIC", "force generic");
}
- ASSERT(!stub.is_null());
- set_target(*stub);
- TRACE_IC("KeyedLoadIC", key, state, target());
+ if (use_ic) {
+ ASSERT(!stub.is_null());
+ set_target(*stub);
+ TRACE_IC("KeyedLoadIC", key, state, target());
+ }
}
@@ -1563,8 +1583,7 @@
Handle<JSFunction> function = Handle<JSFunction>::cast(getter);
CallOptimization call_optimization(function);
if (call_optimization.is_simple_api_call() &&
- call_optimization.IsCompatibleReceiver(*receiver) &&
- FLAG_js_accessor_ics) {
+ call_optimization.IsCompatibleReceiver(*receiver)) {
return isolate()->stub_cache()->ComputeKeyedLoadCallback(
name, receiver, holder, call_optimization);
}
@@ -1656,8 +1675,10 @@
JSReceiver::StoreFromKeyed store_mode) {
// Handle proxies.
if (object->IsJSProxy()) {
- return JSReceiver::SetPropertyOrFail(
+ Handle<Object> result = JSReceiver::SetProperty(
Handle<JSReceiver>::cast(object), name, value, NONE, strict_mode);
+ RETURN_IF_EMPTY_HANDLE(isolate(), result);
+ return *result;
}
// If the object is undefined or null it's illegal to try to set any
@@ -1678,7 +1699,9 @@
Handle<JSObject> receiver = Handle<JSObject>::cast(object);
+ bool use_ic = FLAG_use_ic;
if (receiver->map()->is_deprecated()) {
+ use_ic = false;
JSObject::MigrateInstance(receiver);
}
@@ -1693,15 +1716,17 @@
// Observed objects are always modified through the runtime.
if (FLAG_harmony_observation && receiver->map()->is_observed()) {
- return JSReceiver::SetPropertyOrFail(
+ Handle<Object> result = JSReceiver::SetProperty(
receiver, name, value, NONE, strict_mode, store_mode);
+ RETURN_IF_EMPTY_HANDLE(isolate(), result);
+ return *result;
}
// Use specialized code for setting the length of arrays with fast
// properties. Slow properties might indicate redefinition of the length
// property. Note that when redefined using Object.freeze, it's possible
// to have fast properties but a read-only length.
- if (FLAG_use_ic &&
+ if (use_ic &&
receiver->IsJSArray() &&
name->Equals(isolate()->heap()->length_string()) &&
Handle<JSArray>::cast(receiver)->AllowsSetElementsLength() &&
@@ -1711,12 +1736,14 @@
StoreArrayLengthStub(kind(), strict_mode).GetCode(isolate());
set_target(*stub);
TRACE_IC("StoreIC", name, state, *stub);
- return JSReceiver::SetPropertyOrFail(
+ Handle<Object> result = JSReceiver::SetProperty(
receiver, name, value, NONE, strict_mode, store_mode);
+ RETURN_IF_EMPTY_HANDLE(isolate(), result);
+ return *result;
}
if (receiver->IsJSGlobalProxy()) {
- if (FLAG_use_ic && kind() != Code::KEYED_STORE_IC) {
+ if (use_ic && kind() != Code::KEYED_STORE_IC) {
// Generate a generic stub that goes to the runtime when we see a global
// proxy as receiver.
Handle<Code> stub = (strict_mode == kStrictMode)
@@ -1725,8 +1752,10 @@
set_target(*stub);
TRACE_IC("StoreIC", name, state, *stub);
}
- return JSReceiver::SetPropertyOrFail(
+ Handle<Object> result = JSReceiver::SetProperty(
receiver, name, value, NONE, strict_mode, store_mode);
+ RETURN_IF_EMPTY_HANDLE(isolate(), result);
+ return *result;
}
LookupResult lookup(isolate());
@@ -1738,7 +1767,7 @@
// Strict mode doesn't allow setting non-existent global property.
return ReferenceError("not_defined", name);
}
- if (FLAG_use_ic) {
+ if (use_ic) {
if (state == UNINITIALIZED) {
Handle<Code> stub = (strict_mode == kStrictMode)
? pre_monomorphic_stub_strict()
@@ -1757,8 +1786,10 @@
}
// Set the property.
- return JSReceiver::SetPropertyOrFail(
+ Handle<Object> result = JSReceiver::SetProperty(
receiver, name, value, NONE, strict_mode, store_mode);
+ RETURN_IF_EMPTY_HANDLE(isolate(), result);
+ return *result;
}
@@ -1830,8 +1861,7 @@
Handle<JSFunction> function = Handle<JSFunction>::cast(setter);
CallOptimization call_optimization(function);
if (call_optimization.is_simple_api_call() &&
- call_optimization.IsCompatibleReceiver(*receiver) &&
- FLAG_js_accessor_ics) {
+ call_optimization.IsCompatibleReceiver(*receiver)) {
return isolate()->stub_cache()->ComputeStoreCallback(
name, receiver, holder, call_optimization, strict_mode);
}
diff --git a/src/isolate.cc b/src/isolate.cc
index 6fa496a..6eb2960 100644
--- a/src/isolate.cc
+++ b/src/isolate.cc
@@ -345,6 +345,14 @@
Thread::LocalStorageKey PerThreadAssertScopeBase::thread_local_key;
#endif // DEBUG
Mutex Isolate::process_wide_mutex_;
+// TODO(dcarney): Remove with default isolate.
+enum DefaultIsolateStatus {
+ kDefaultIsolateUninitialized,
+ kDefaultIsolateInitialized,
+ kDefaultIsolateCrashIfInitialized
+};
+static DefaultIsolateStatus default_isolate_status_
+ = kDefaultIsolateUninitialized;
Isolate::ThreadDataTable* Isolate::thread_data_table_ = NULL;
Atomic32 Isolate::isolate_counter_ = 0;
@@ -382,8 +390,16 @@
}
+void Isolate::SetCrashIfDefaultIsolateInitialized() {
+ LockGuard<Mutex> lock_guard(&process_wide_mutex_);
+ CHECK(default_isolate_status_ != kDefaultIsolateInitialized);
+ default_isolate_status_ = kDefaultIsolateCrashIfInitialized;
+}
+
+
void Isolate::EnsureDefaultIsolate() {
LockGuard<Mutex> lock_guard(&process_wide_mutex_);
+ CHECK(default_isolate_status_ != kDefaultIsolateCrashIfInitialized);
if (default_isolate_ == NULL) {
isolate_key_ = Thread::CreateThreadLocalKey();
thread_id_key_ = Thread::CreateThreadLocalKey();
@@ -1087,7 +1103,7 @@
Handle<String> key = factory()->stack_overflow_string();
Handle<JSObject> boilerplate =
Handle<JSObject>::cast(GetProperty(this, js_builtins_object(), key));
- Handle<JSObject> exception = Copy(boilerplate);
+ Handle<JSObject> exception = JSObject::Copy(boilerplate);
DoThrow(*exception, NULL);
// Get stack trace limit.
@@ -1776,6 +1792,9 @@
// TODO(bmeurer) Initialized lazily because it depends on flags; can
// be fixed once the default isolate cleanup is done.
random_number_generator_(NULL),
+ // TODO(rmcilroy) Currently setting this based on
+ // FLAG_force_memory_constrained in Isolate::Init; move to here when
+ // isolate cleanup is done
is_memory_constrained_(false),
has_fatal_error_(false),
use_crankshaft_(true),
@@ -2135,6 +2154,8 @@
TRACE_ISOLATE(init);
stress_deopt_count_ = FLAG_deopt_every_n_times;
+ if (FLAG_force_memory_constrained.has_value)
+ is_memory_constrained_ = FLAG_force_memory_constrained.value;
has_fatal_error_ = false;
diff --git a/src/isolate.h b/src/isolate.h
index b826ec5..b7ea209 100644
--- a/src/isolate.h
+++ b/src/isolate.h
@@ -497,6 +497,7 @@
bool IsDefaultIsolate() const { return this == default_isolate_; }
+ static void SetCrashIfDefaultIsolateInitialized();
// Ensures that process-wide resources and the default isolate have been
// allocated. It is only necessary to call this method in rare cases, for
// example if you are using V8 from within the body of a static initializer.
diff --git a/src/liveedit-debugger.js b/src/liveedit-debugger.js
index 451b146..a187b75 100644
--- a/src/liveedit-debugger.js
+++ b/src/liveedit-debugger.js
@@ -221,7 +221,7 @@
change_log.push( {position_patched: position_patch_report} );
for (var i = 0; i < update_positions_list.length; i++) {
- // TODO(LiveEdit): take into account wether it's source_changed or
+ // TODO(LiveEdit): take into account whether it's source_changed or
// unchanged and whether positions changed at all.
PatchPositions(update_positions_list[i], diff_array,
position_patch_report);
@@ -288,7 +288,7 @@
}
}
- // After sorting update outer_inder field using old_index_map. Also
+ // After sorting update outer_index field using old_index_map. Also
// set next_sibling_index field.
var current_index = 0;
@@ -692,10 +692,10 @@
ProcessInternals(code_info_tree);
}
- // For ecah old function (if it is not damaged) tries to find a corresponding
+ // For each old function (if it is not damaged) tries to find a corresponding
// function in new script. Typically it should succeed (non-damaged functions
// by definition may only have changes inside their bodies). However there are
- // reasons for corresponence not to be found; function with unmodified text
+ // reasons for correspondence not to be found; function with unmodified text
// in new script may become enclosed into other function; the innocent change
// inside function body may in fact be something like "} function B() {" that
// splits a function into 2 functions.
@@ -703,7 +703,13 @@
// A recursive function that tries to find a correspondence for all
// child functions and for their inner functions.
- function ProcessChildren(old_node, new_node) {
+ function ProcessNode(old_node, new_node) {
+ var scope_change_description =
+ IsFunctionContextLocalsChanged(old_node.info, new_node.info);
+ if (scope_change_description) {
+ old_node.status = FunctionStatus.CHANGED;
+ }
+
var old_children = old_node.children;
var new_children = new_node.children;
@@ -729,8 +735,15 @@
new_children[new_index];
old_children[old_index].textual_corresponding_node =
new_children[new_index];
- if (old_children[old_index].status != FunctionStatus.UNCHANGED) {
- ProcessChildren(old_children[old_index],
+ if (scope_change_description) {
+ old_children[old_index].status = FunctionStatus.DAMAGED;
+ old_children[old_index].status_explanation =
+ "Enclosing function is now incompatible. " +
+ scope_change_description;
+ old_children[old_index].corresponding_node = void 0;
+ } else if (old_children[old_index].status !=
+ FunctionStatus.UNCHANGED) {
+ ProcessNode(old_children[old_index],
new_children[new_index]);
if (old_children[old_index].status == FunctionStatus.DAMAGED) {
unmatched_new_nodes_list.push(
@@ -772,11 +785,10 @@
}
if (old_node.status == FunctionStatus.CHANGED) {
- var why_wrong_expectations =
- WhyFunctionExpectationsDiffer(old_node.info, new_node.info);
- if (why_wrong_expectations) {
+ if (old_node.info.param_num != new_node.info.param_num) {
old_node.status = FunctionStatus.DAMAGED;
- old_node.status_explanation = why_wrong_expectations;
+ old_node.status_explanation = "Changed parameter number: " +
+ old_node.info.param_num + " and " + new_node.info.param_num;
}
}
old_node.unmatched_new_nodes = unmatched_new_nodes_list;
@@ -784,7 +796,7 @@
textually_unmatched_new_nodes_list;
}
- ProcessChildren(old_code_tree, new_code_tree);
+ ProcessNode(old_code_tree, new_code_tree);
old_code_tree.corresponding_node = new_code_tree;
old_code_tree.textual_corresponding_node = new_code_tree;
@@ -856,7 +868,7 @@
this.raw_array = raw_array;
}
- // Changes positions (including all statments) in function.
+ // Changes positions (including all statements) in function.
function PatchPositions(old_info_node, diff_array, report_array) {
if (old_info_node.live_shared_function_infos) {
old_info_node.live_shared_function_infos.forEach(function (info) {
@@ -878,15 +890,9 @@
return script.name + " (old)";
}
- // Compares a function interface old and new version, whether it
+ // Compares a function scope heap structure, old and new version, whether it
// changed or not. Returns explanation if they differ.
- function WhyFunctionExpectationsDiffer(function_info1, function_info2) {
- // Check that function has the same number of parameters (there may exist
- // an adapter, that won't survive function parameter number change).
- if (function_info1.param_num != function_info2.param_num) {
- return "Changed parameter number: " + function_info1.param_num +
- " and " + function_info2.param_num;
- }
+ function IsFunctionContextLocalsChanged(function_info1, function_info2) {
var scope_info1 = function_info1.scope_info;
var scope_info2 = function_info2.scope_info;
@@ -905,8 +911,8 @@
}
if (scope_info1_text != scope_info2_text) {
- return "Incompatible variable maps: [" + scope_info1_text +
- "] and [" + scope_info2_text + "]";
+ return "Variable map changed: [" + scope_info1_text +
+ "] => [" + scope_info2_text + "]";
}
// No differences. Return undefined.
return;
diff --git a/src/liveedit.cc b/src/liveedit.cc
index feaafd4..3d459d4 100644
--- a/src/liveedit.cc
+++ b/src/liveedit.cc
@@ -731,8 +731,8 @@
Handle<JSValue> scope_wrapper = WrapInJSValue(code_scope_info);
this->SetField(kCodeScopeInfoOffset_, scope_wrapper);
}
- void SetOuterScopeInfo(Handle<Object> scope_info_array) {
- this->SetField(kOuterScopeInfoOffset_, scope_info_array);
+ void SetFunctionScopeInfo(Handle<Object> scope_info_array) {
+ this->SetField(kFunctionScopeInfoOffset_, scope_info_array);
}
void SetSharedFunctionInfo(Handle<SharedFunctionInfo> info) {
Handle<JSValue> info_holder = WrapInJSValue(info);
@@ -771,7 +771,7 @@
static const int kParamNumOffset_ = 3;
static const int kCodeOffset_ = 4;
static const int kCodeScopeInfoOffset_ = 5;
- static const int kOuterScopeInfoOffset_ = 6;
+ static const int kFunctionScopeInfoOffset_ = 6;
static const int kParentIndexOffset_ = 7;
static const int kSharedFunctionInfoOffset_ = 8;
static const int kLiteralNumOffset_ = 9;
@@ -880,7 +880,7 @@
Handle<Object> scope_info_list(SerializeFunctionScope(scope, zone),
isolate());
- info.SetOuterScopeInfo(scope_info_list);
+ info.SetFunctionScopeInfo(scope_info_list);
}
Handle<JSArray> GetResult() { return result_; }
@@ -897,14 +897,12 @@
// Saves some description of scope. It stores name and indexes of
// variables in the whole scope chain. Null-named slots delimit
// scopes of this chain.
- Scope* outer_scope = scope->outer_scope();
- if (outer_scope == NULL) {
- return isolate()->heap()->undefined_value();
- }
- do {
- ZoneList<Variable*> stack_list(outer_scope->StackLocalCount(), zone);
- ZoneList<Variable*> context_list(outer_scope->ContextLocalCount(), zone);
- outer_scope->CollectStackAndContextLocals(&stack_list, &context_list);
+ Scope* current_scope = scope;
+ while (current_scope != NULL) {
+ ZoneList<Variable*> stack_list(current_scope->StackLocalCount(), zone);
+ ZoneList<Variable*> context_list(
+ current_scope->ContextLocalCount(), zone);
+ current_scope->CollectStackAndContextLocals(&stack_list, &context_list);
context_list.Sort(&Variable::CompareIndex);
for (int i = 0; i < context_list.length(); i++) {
@@ -924,8 +922,8 @@
isolate()));
scope_info_length++;
- outer_scope = outer_scope->outer_scope();
- } while (outer_scope != NULL);
+ current_scope = current_scope->outer_scope();
+ }
return *scope_info_list;
}
diff --git a/src/log.cc b/src/log.cc
index 0f0ad40..5c404bb 100644
--- a/src/log.cc
+++ b/src/log.cc
@@ -1765,15 +1765,14 @@
static void AddIsolateIdIfNeeded(Isolate* isolate, StringStream* stream) {
- if (isolate->IsDefaultIsolate()) return;
+ if (isolate->IsDefaultIsolate() || !FLAG_logfile_per_isolate) return;
stream->Add("isolate-%p-", isolate);
}
static SmartArrayPointer<const char> PrepareLogFileName(
Isolate* isolate, const char* file_name) {
- if (strchr(file_name, '%') != NULL ||
- !isolate->IsDefaultIsolate()) {
+ if (strchr(file_name, '%') != NULL || !isolate->IsDefaultIsolate()) {
// If there's a '%' in the log file name we have to expand
// placeholders.
HeapStringAllocator allocator;
diff --git a/src/mips/assembler-mips.h b/src/mips/assembler-mips.h
index cb0896a..2e25a56 100644
--- a/src/mips/assembler-mips.h
+++ b/src/mips/assembler-mips.h
@@ -426,6 +426,7 @@
static unsigned found_by_runtime_probing_only_;
friend class ExternalReference;
+ friend class PlatformFeatureScope;
DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
};
diff --git a/src/mips/builtins-mips.cc b/src/mips/builtins-mips.cc
index 3aabd97..e528dd7 100644
--- a/src/mips/builtins-mips.cc
+++ b/src/mips/builtins-mips.cc
@@ -201,14 +201,12 @@
Register argument = a2;
Label not_cached, argument_is_string;
- NumberToStringStub::GenerateLookupNumberStringCache(
- masm,
- a0, // Input.
- argument, // Result.
- a3, // Scratch.
- t0, // Scratch.
- t1, // Scratch.
- ¬_cached);
+ __ LookupNumberStringCache(a0, // Input.
+ argument, // Result.
+ a3, // Scratch.
+ t0, // Scratch.
+ t1, // Scratch.
+ ¬_cached);
__ IncrementCounter(counters->string_ctor_cached_number(), 1, a3, t0);
__ bind(&argument_is_string);
@@ -833,14 +831,15 @@
// The following registers must be saved and restored when calling through to
// the runtime:
// a0 - contains return address (beginning of patch sequence)
- // a1 - function object
+ // a1 - isolate
RegList saved_regs =
(a0.bit() | a1.bit() | ra.bit() | fp.bit()) & ~sp.bit();
FrameScope scope(masm, StackFrame::MANUAL);
__ MultiPush(saved_regs);
- __ PrepareCallCFunction(1, 0, a1);
+ __ PrepareCallCFunction(1, 0, a2);
+ __ li(a1, Operand(ExternalReference::isolate_address(masm->isolate())));
__ CallCFunction(
- ExternalReference::get_make_code_young_function(masm->isolate()), 1);
+ ExternalReference::get_make_code_young_function(masm->isolate()), 2);
__ MultiPop(saved_regs);
__ Jump(a0);
}
diff --git a/src/mips/code-stubs-mips.cc b/src/mips/code-stubs-mips.cc
index 0589bf0..ea104fb 100644
--- a/src/mips/code-stubs-mips.cc
+++ b/src/mips/code-stubs-mips.cc
@@ -78,7 +78,7 @@
descriptor->register_param_count_ = 4;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kCreateObjectLiteralShallow)->entry;
+ Runtime::FunctionForId(Runtime::kCreateObjectLiteral)->entry;
}
@@ -994,97 +994,13 @@
}
-void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
- Register object,
- Register result,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* not_found) {
- // Use of registers. Register result is used as a temporary.
- Register number_string_cache = result;
- Register mask = scratch3;
-
- // Load the number string cache.
- __ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
-
- // Make the hash mask from the length of the number string cache. It
- // contains two elements (number and string) for each cache entry.
- __ lw(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset));
- // Divide length by two (length is a smi).
- __ sra(mask, mask, kSmiTagSize + 1);
- __ Addu(mask, mask, -1); // Make mask.
-
- // Calculate the entry in the number string cache. The hash value in the
- // number string cache for smis is just the smi value, and the hash for
- // doubles is the xor of the upper and lower words. See
- // Heap::GetNumberStringCache.
- Isolate* isolate = masm->isolate();
- Label is_smi;
- Label load_result_from_cache;
- __ JumpIfSmi(object, &is_smi);
- __ CheckMap(object,
- scratch1,
- Heap::kHeapNumberMapRootIndex,
- not_found,
- DONT_DO_SMI_CHECK);
-
- STATIC_ASSERT(8 == kDoubleSize);
- __ Addu(scratch1,
- object,
- Operand(HeapNumber::kValueOffset - kHeapObjectTag));
- __ lw(scratch2, MemOperand(scratch1, kPointerSize));
- __ lw(scratch1, MemOperand(scratch1, 0));
- __ Xor(scratch1, scratch1, Operand(scratch2));
- __ And(scratch1, scratch1, Operand(mask));
-
- // Calculate address of entry in string cache: each entry consists
- // of two pointer sized fields.
- __ sll(scratch1, scratch1, kPointerSizeLog2 + 1);
- __ Addu(scratch1, number_string_cache, scratch1);
-
- Register probe = mask;
- __ lw(probe,
- FieldMemOperand(scratch1, FixedArray::kHeaderSize));
- __ JumpIfSmi(probe, not_found);
- __ ldc1(f12, FieldMemOperand(object, HeapNumber::kValueOffset));
- __ ldc1(f14, FieldMemOperand(probe, HeapNumber::kValueOffset));
- __ BranchF(&load_result_from_cache, NULL, eq, f12, f14);
- __ Branch(not_found);
-
- __ bind(&is_smi);
- Register scratch = scratch1;
- __ sra(scratch, object, 1); // Shift away the tag.
- __ And(scratch, mask, Operand(scratch));
-
- // Calculate address of entry in string cache: each entry consists
- // of two pointer sized fields.
- __ sll(scratch, scratch, kPointerSizeLog2 + 1);
- __ Addu(scratch, number_string_cache, scratch);
-
- // Check if the entry is the smi we are looking for.
- __ lw(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
- __ Branch(not_found, ne, object, Operand(probe));
-
- // Get the result from the cache.
- __ bind(&load_result_from_cache);
- __ lw(result,
- FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
-
- __ IncrementCounter(isolate->counters()->number_to_string_native(),
- 1,
- scratch1,
- scratch2);
-}
-
-
void NumberToStringStub::Generate(MacroAssembler* masm) {
Label runtime;
__ lw(a1, MemOperand(sp, 0));
// Generate code to lookup number in the number string cache.
- GenerateLookupNumberStringCache(masm, a1, v0, a2, a3, t0, &runtime);
+ __ LookupNumberStringCache(a1, v0, a2, a3, t0, &runtime);
__ DropAndRet(1);
__ bind(&runtime);
@@ -2795,8 +2711,9 @@
if (do_gc) {
// Move result passed in v0 into a0 to call PerformGC.
__ mov(a0, v0);
- __ PrepareCallCFunction(1, 0, a1);
- __ CallCFunction(ExternalReference::perform_gc_function(isolate), 1, 0);
+ __ PrepareCallCFunction(2, 0, a1);
+ __ li(a1, Operand(ExternalReference::isolate_address(masm->isolate())));
+ __ CallCFunction(ExternalReference::perform_gc_function(isolate), 2, 0);
}
ExternalReference scope_depth =
@@ -2875,7 +2792,7 @@
// v0:v1: result
// sp: stack pointer
// fp: frame pointer
- __ LeaveExitFrame(save_doubles_, s0, true);
+ __ LeaveExitFrame(save_doubles_, s0, true, EMIT_RETURN);
// Check if we should retry or throw exception.
Label retry;
@@ -3408,8 +3325,7 @@
receiver = a0;
}
- StubCompiler::GenerateLoadStringLength(masm, receiver, a3, t0, &miss,
- support_wrapper_);
+ StubCompiler::GenerateLoadStringLength(masm, receiver, a3, t0, &miss);
__ bind(&miss);
StubCompiler::TailCallBuiltin(
@@ -4156,7 +4072,7 @@
DirectCEntryStub stub;
stub.GenerateCall(masm, t9);
- __ LeaveExitFrame(false, no_reg);
+ __ LeaveExitFrame(false, no_reg, true);
// v0: result
// subject: subject string (callee saved)
@@ -4424,6 +4340,7 @@
// Cache the called function in a global property cell. Cache states
// are uninitialized, monomorphic (indicated by a JSFunction), and
// megamorphic.
+ // a0 : number of arguments to the construct function
// a1 : the function to call
// a2 : cache cell for call target
Label initialize, done, miss, megamorphic, not_array_function;
@@ -4444,9 +4361,6 @@
// If we didn't have a matching function, and we didn't find the megamorph
// sentinel, then we have in the cell either some other function or an
// AllocationSite. Do a map check on the object in a3.
- Handle<Map> allocation_site_map(
- masm->isolate()->heap()->allocation_site_map(),
- masm->isolate());
__ lw(t1, FieldMemOperand(a3, 0));
__ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
__ Branch(&miss, ne, t1, Operand(at));
@@ -4485,6 +4399,7 @@
1 << 5 | // a1
1 << 6; // a2
+ // Arguments register must be smi-tagged to call out.
__ SmiTag(a0);
__ MultiPush(kSavedRegs);
@@ -5863,13 +5778,7 @@
// Check the number to string cache.
__ bind(¬_string);
// Puts the cached result into scratch1.
- NumberToStringStub::GenerateLookupNumberStringCache(masm,
- arg,
- scratch1,
- scratch2,
- scratch3,
- scratch4,
- slow);
+ __ LookupNumberStringCache(arg, scratch1, scratch2, scratch3, scratch4, slow);
__ mov(arg, scratch1);
__ sw(arg, MemOperand(sp, stack_offset));
__ bind(&done);
diff --git a/src/mips/code-stubs-mips.h b/src/mips/code-stubs-mips.h
index 8c9d22a..627244c 100644
--- a/src/mips/code-stubs-mips.h
+++ b/src/mips/code-stubs-mips.h
@@ -272,19 +272,6 @@
public:
NumberToStringStub() { }
- // Generate code to do a lookup in the number string cache. If the number in
- // the register object is found in the cache the generated code falls through
- // with the result in the result register. The object and the result register
- // can be the same. If the number is not found in the cache the code jumps to
- // the label not_found with only the content of register object unchanged.
- static void GenerateLookupNumberStringCache(MacroAssembler* masm,
- Register object,
- Register result,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* not_found);
-
private:
Major MajorKey() { return NumberToString; }
int MinorKey() { return 0; }
diff --git a/src/mips/codegen-mips.cc b/src/mips/codegen-mips.cc
index 5c847fc..a12faee 100644
--- a/src/mips/codegen-mips.cc
+++ b/src/mips/codegen-mips.cc
@@ -635,7 +635,8 @@
}
-void Code::PatchPlatformCodeAge(byte* sequence,
+void Code::PatchPlatformCodeAge(Isolate* isolate,
+ byte* sequence,
Code::Age age,
MarkingParity parity) {
uint32_t young_length;
@@ -644,7 +645,7 @@
CopyBytes(sequence, young_sequence, young_length);
CPU::FlushICache(sequence, young_length);
} else {
- Code* stub = GetCodeAgeStub(age, parity);
+ Code* stub = GetCodeAgeStub(isolate, age, parity);
CodePatcher patcher(sequence, young_length / Assembler::kInstrSize);
// Mark this code sequence for FindPlatformCodeAgeSequence()
patcher.masm()->nop(Assembler::CODE_AGE_MARKER_NOP);
diff --git a/src/mips/deoptimizer-mips.cc b/src/mips/deoptimizer-mips.cc
index 16f75b8..4426d90 100644
--- a/src/mips/deoptimizer-mips.cc
+++ b/src/mips/deoptimizer-mips.cc
@@ -78,88 +78,6 @@
}
-// This structure comes from FullCodeGenerator::EmitBackEdgeBookkeeping.
-// The back edge bookkeeping code matches the pattern:
-//
-// sltu at, sp, t0 / slt at, a3, zero_reg (in case of count based interrupts)
-// beq at, zero_reg, ok
-// lui t9, <interrupt stub address> upper
-// ori t9, <interrupt stub address> lower
-// jalr t9
-// nop
-// ok-label ----- pc_after points here
-//
-// We patch the code to the following form:
-//
-// addiu at, zero_reg, 1
-// beq at, zero_reg, ok ;; Not changed
-// lui t9, <on-stack replacement address> upper
-// ori t9, <on-stack replacement address> lower
-// jalr t9 ;; Not changed
-// nop ;; Not changed
-// ok-label ----- pc_after points here
-
-void Deoptimizer::PatchInterruptCodeAt(Code* unoptimized_code,
- Address pc_after,
- Code* replacement_code) {
- static const int kInstrSize = Assembler::kInstrSize;
- // Replace the sltu instruction with load-imm 1 to at, so beq is not taken.
- CodePatcher patcher(pc_after - 6 * kInstrSize, 1);
- patcher.masm()->addiu(at, zero_reg, 1);
- // Replace the stack check address in the load-immediate (lui/ori pair)
- // with the entry address of the replacement code.
- Assembler::set_target_address_at(pc_after - 4 * kInstrSize,
- replacement_code->entry());
-
- unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
- unoptimized_code, pc_after - 4 * kInstrSize, replacement_code);
-}
-
-
-void Deoptimizer::RevertInterruptCodeAt(Code* unoptimized_code,
- Address pc_after,
- Code* interrupt_code) {
- static const int kInstrSize = Assembler::kInstrSize;
- // Restore the sltu instruction so beq can be taken again.
- CodePatcher patcher(pc_after - 6 * kInstrSize, 1);
- patcher.masm()->slt(at, a3, zero_reg);
- // Restore the original call address.
- Assembler::set_target_address_at(pc_after - 4 * kInstrSize,
- interrupt_code->entry());
-
- interrupt_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
- unoptimized_code, pc_after - 4 * kInstrSize, interrupt_code);
-}
-
-
-#ifdef DEBUG
-Deoptimizer::InterruptPatchState Deoptimizer::GetInterruptPatchState(
- Isolate* isolate,
- Code* unoptimized_code,
- Address pc_after) {
- static const int kInstrSize = Assembler::kInstrSize;
- ASSERT(Assembler::IsBeq(Assembler::instr_at(pc_after - 5 * kInstrSize)));
- if (Assembler::IsAddImmediate(
- Assembler::instr_at(pc_after - 6 * kInstrSize))) {
- Code* osr_builtin =
- isolate->builtins()->builtin(Builtins::kOnStackReplacement);
- ASSERT(reinterpret_cast<uint32_t>(
- Assembler::target_address_at(pc_after - 4 * kInstrSize)) ==
- reinterpret_cast<uint32_t>(osr_builtin->entry()));
- return PATCHED_FOR_OSR;
- } else {
- // Get the interrupt stub code object to match against from cache.
- Code* interrupt_builtin =
- isolate->builtins()->builtin(Builtins::kInterruptCheck);
- ASSERT(reinterpret_cast<uint32_t>(
- Assembler::target_address_at(pc_after - 4 * kInstrSize)) ==
- reinterpret_cast<uint32_t>(interrupt_builtin->entry()));
- return NOT_PATCHED;
- }
-}
-#endif // DEBUG
-
-
void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
// Set the register values. The values are not important as there are no
// callee saved registers in JavaScript frames, so all registers are
diff --git a/src/mips/full-codegen-mips.cc b/src/mips/full-codegen-mips.cc
index df3f417..853ee08 100644
--- a/src/mips/full-codegen-mips.cc
+++ b/src/mips/full-codegen-mips.cc
@@ -1653,13 +1653,11 @@
__ li(a0, Operand(Smi::FromInt(flags)));
int properties_count = constant_properties->length() / 2;
if ((FLAG_track_double_fields && expr->may_store_doubles()) ||
- expr->depth() > 1) {
- __ Push(a3, a2, a1, a0);
- __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
- } else if (Serializer::enabled() || flags != ObjectLiteral::kFastElements ||
+ expr->depth() > 1 || Serializer::enabled() ||
+ flags != ObjectLiteral::kFastElements ||
properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
__ Push(a3, a2, a1, a0);
- __ CallRuntime(Runtime::kCreateObjectLiteralShallow, 4);
+ __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
} else {
FastCloneShallowObjectStub stub(properties_count);
__ CallStub(&stub);
@@ -4926,6 +4924,89 @@
#undef __
+
+// This structure comes from FullCodeGenerator::EmitBackEdgeBookkeeping.
+// The back edge bookkeeping code matches the pattern:
+//
+// sltu at, sp, t0 / slt at, a3, zero_reg (in case of count based interrupts)
+// beq at, zero_reg, ok
+// lui t9, <interrupt stub address> upper
+// ori t9, <interrupt stub address> lower
+// jalr t9
+// nop
+// ok-label ----- pc_after points here
+//
+// We patch the code to the following form:
+//
+// addiu at, zero_reg, 1
+// beq at, zero_reg, ok ;; Not changed
+// lui t9, <on-stack replacement address> upper
+// ori t9, <on-stack replacement address> lower
+// jalr t9 ;; Not changed
+// nop ;; Not changed
+// ok-label ----- pc_after points here
+
+void BackEdgeTable::PatchAt(Code* unoptimized_code,
+ Address pc_after,
+ Code* replacement_code) {
+ static const int kInstrSize = Assembler::kInstrSize;
+ // Replace the sltu instruction with load-imm 1 to at, so beq is not taken.
+ CodePatcher patcher(pc_after - 6 * kInstrSize, 1);
+ patcher.masm()->addiu(at, zero_reg, 1);
+ // Replace the stack check address in the load-immediate (lui/ori pair)
+ // with the entry address of the replacement code.
+ Assembler::set_target_address_at(pc_after - 4 * kInstrSize,
+ replacement_code->entry());
+
+ unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
+ unoptimized_code, pc_after - 4 * kInstrSize, replacement_code);
+}
+
+
+void BackEdgeTable::RevertAt(Code* unoptimized_code,
+ Address pc_after,
+ Code* interrupt_code) {
+ static const int kInstrSize = Assembler::kInstrSize;
+ // Restore the sltu instruction so beq can be taken again.
+ CodePatcher patcher(pc_after - 6 * kInstrSize, 1);
+ patcher.masm()->slt(at, a3, zero_reg);
+ // Restore the original call address.
+ Assembler::set_target_address_at(pc_after - 4 * kInstrSize,
+ interrupt_code->entry());
+
+ interrupt_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
+ unoptimized_code, pc_after - 4 * kInstrSize, interrupt_code);
+}
+
+
+#ifdef DEBUG
+BackEdgeTable::BackEdgeState BackEdgeTable::GetBackEdgeState(
+ Isolate* isolate,
+ Code* unoptimized_code,
+ Address pc_after) {
+ static const int kInstrSize = Assembler::kInstrSize;
+ ASSERT(Assembler::IsBeq(Assembler::instr_at(pc_after - 5 * kInstrSize)));
+ if (Assembler::IsAddImmediate(
+ Assembler::instr_at(pc_after - 6 * kInstrSize))) {
+ Code* osr_builtin =
+ isolate->builtins()->builtin(Builtins::kOnStackReplacement);
+ ASSERT(reinterpret_cast<uint32_t>(
+ Assembler::target_address_at(pc_after - 4 * kInstrSize)) ==
+ reinterpret_cast<uint32_t>(osr_builtin->entry()));
+ return ON_STACK_REPLACEMENT;
+ } else {
+ // Get the interrupt stub code object to match against from cache.
+ Code* interrupt_builtin =
+ isolate->builtins()->builtin(Builtins::kInterruptCheck);
+ ASSERT(reinterpret_cast<uint32_t>(
+ Assembler::target_address_at(pc_after - 4 * kInstrSize)) ==
+ reinterpret_cast<uint32_t>(interrupt_builtin->entry()));
+ return INTERRUPT;
+ }
+}
+#endif // DEBUG
+
+
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_MIPS
diff --git a/src/mips/lithium-codegen-mips.cc b/src/mips/lithium-codegen-mips.cc
index b37c7e0..69a3c89 100644
--- a/src/mips/lithium-codegen-mips.cc
+++ b/src/mips/lithium-codegen-mips.cc
@@ -1789,33 +1789,43 @@
void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
Register string = ToRegister(instr->string());
- Register index = ToRegister(instr->index());
+ LOperand* index_op = instr->index();
Register value = ToRegister(instr->value());
Register scratch = scratch0();
String::Encoding encoding = instr->encoding();
if (FLAG_debug_code) {
- __ lw(at, FieldMemOperand(string, HeapObject::kMapOffset));
- __ lbu(at, FieldMemOperand(at, Map::kInstanceTypeOffset));
+ __ lw(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
+ __ lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
- __ And(at, at, Operand(kStringRepresentationMask | kStringEncodingMask));
+ __ And(scratch, scratch,
+ Operand(kStringRepresentationMask | kStringEncodingMask));
static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
- __ Subu(at, at, Operand(encoding == String::ONE_BYTE_ENCODING
+ __ Subu(at, scratch, Operand(encoding == String::ONE_BYTE_ENCODING
? one_byte_seq_type : two_byte_seq_type));
__ Check(eq, kUnexpectedStringType, at, Operand(zero_reg));
}
- __ Addu(scratch,
- string,
- Operand(SeqString::kHeaderSize - kHeapObjectTag));
- if (encoding == String::ONE_BYTE_ENCODING) {
- __ Addu(at, scratch, index);
- __ sb(value, MemOperand(at));
+ if (index_op->IsConstantOperand()) {
+ int constant_index = ToInteger32(LConstantOperand::cast(index_op));
+ if (encoding == String::ONE_BYTE_ENCODING) {
+ __ sb(value,
+ FieldMemOperand(string, SeqString::kHeaderSize + constant_index));
+ } else {
+ __ sh(value,
+ FieldMemOperand(string, SeqString::kHeaderSize + constant_index * 2));
+ }
} else {
- __ sll(at, index, 1);
- __ Addu(at, scratch, at);
- __ sh(value, MemOperand(at));
+ Register index = ToRegister(index_op);
+ if (encoding == String::ONE_BYTE_ENCODING) {
+ __ Addu(scratch, string, Operand(index));
+ __ sb(value, FieldMemOperand(scratch, SeqString::kHeaderSize));
+ } else {
+ __ sll(scratch, index, 1);
+ __ Addu(scratch, string, scratch);
+ __ sh(value, FieldMemOperand(scratch, SeqString::kHeaderSize));
+ }
}
}
@@ -2057,25 +2067,6 @@
}
-void LCodeGen::DoIsNumberAndBranch(LIsNumberAndBranch* instr) {
- Representation r = instr->hydrogen()->value()->representation();
- if (r.IsSmiOrInteger32() || r.IsDouble()) {
- EmitBranch(instr, al, zero_reg, Operand(zero_reg));
- } else {
- ASSERT(r.IsTagged());
- Register reg = ToRegister(instr->value());
- HType type = instr->hydrogen()->value()->type();
- if (type.IsTaggedNumber()) {
- EmitBranch(instr, al, zero_reg, Operand(zero_reg));
- }
- __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
- __ lw(scratch0(), FieldMemOperand(reg, HeapObject::kMapOffset));
- __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
- EmitBranch(instr, eq, scratch0(), Operand(at));
- }
-}
-
-
void LCodeGen::DoBranch(LBranch* instr) {
Representation r = instr->hydrogen()->value()->representation();
if (r.IsInteger32() || r.IsSmi()) {
@@ -2814,7 +2805,7 @@
void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
Register result = ToRegister(instr->result());
- __ li(at, Operand(Handle<Object>(instr->hydrogen()->cell())));
+ __ li(at, Operand(Handle<Object>(instr->hydrogen()->cell().handle())));
__ lw(result, FieldMemOperand(at, Cell::kValueOffset));
if (instr->hydrogen()->RequiresHoleCheck()) {
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
@@ -2840,7 +2831,7 @@
Register cell = scratch0();
// Load the cell.
- __ li(cell, Operand(instr->hydrogen()->cell()));
+ __ li(cell, Operand(instr->hydrogen()->cell().handle()));
// If the cell we are storing to contains the hole it could have
// been deleted from the property dictionary. In that case, we need
@@ -3011,6 +3002,12 @@
}
+void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
+ Register result = ToRegister(instr->result());
+ __ LoadRoot(result, instr->index());
+}
+
+
void LCodeGen::DoLoadExternalArrayPointer(
LLoadExternalArrayPointer* instr) {
Register to_reg = ToRegister(instr->result());
@@ -4241,20 +4238,25 @@
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
+ Register address = scratch0();
FPURegister value(ToDoubleRegister(instr->value()));
if (key_is_constant) {
- __ Addu(scratch0(), external_pointer, constant_key <<
- element_size_shift);
+ if (constant_key != 0) {
+ __ Addu(address, external_pointer,
+ Operand(constant_key << element_size_shift));
+ } else {
+ address = external_pointer;
+ }
} else {
- __ sll(scratch0(), key, shift_size);
- __ Addu(scratch0(), scratch0(), external_pointer);
+ __ sll(address, key, shift_size);
+ __ Addu(address, external_pointer, address);
}
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
__ cvt_s_d(double_scratch0(), value);
- __ swc1(double_scratch0(), MemOperand(scratch0(), additional_offset));
+ __ swc1(double_scratch0(), MemOperand(address, additional_offset));
} else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
- __ sdc1(value, MemOperand(scratch0(), additional_offset));
+ __ sdc1(value, MemOperand(address, additional_offset));
}
} else {
Register value(ToRegister(instr->value()));
@@ -4296,33 +4298,29 @@
void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
DoubleRegister value = ToDoubleRegister(instr->value());
Register elements = ToRegister(instr->elements());
- Register key = no_reg;
Register scratch = scratch0();
+ DoubleRegister double_scratch = double_scratch0();
bool key_is_constant = instr->key()->IsConstantOperand();
- int constant_key = 0;
- Label not_nan;
+ Label not_nan, done;
// Calculate the effective address of the slot in the array to store the
// double value.
+ int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
if (key_is_constant) {
- constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+ int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
if (constant_key & 0xF0000000) {
Abort(kArrayIndexConstantValueTooBig);
}
+ __ Addu(scratch, elements,
+ Operand((constant_key << element_size_shift) +
+ FixedDoubleArray::kHeaderSize - kHeapObjectTag));
} else {
- key = ToRegister(instr->key());
- }
- int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
- int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
- ? (element_size_shift - kSmiTagSize) : element_size_shift;
- if (key_is_constant) {
- __ Addu(scratch, elements, Operand((constant_key << element_size_shift) +
- FixedDoubleArray::kHeaderSize - kHeapObjectTag));
- } else {
- __ sll(scratch, key, shift_size);
- __ Addu(scratch, elements, Operand(scratch));
- __ Addu(scratch, scratch,
+ int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
+ ? (element_size_shift - kSmiTagSize) : element_size_shift;
+ __ Addu(scratch, elements,
Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
+ __ sll(at, ToRegister(instr->key()), shift_size);
+ __ Addu(scratch, scratch, at);
}
if (instr->NeedsCanonicalization()) {
@@ -4333,12 +4331,17 @@
// Only load canonical NaN if the comparison above set the overflow.
__ bind(&is_nan);
- __ Move(value, FixedDoubleArray::canonical_not_the_hole_nan_as_double());
+ __ Move(double_scratch,
+ FixedDoubleArray::canonical_not_the_hole_nan_as_double());
+ __ sdc1(double_scratch, MemOperand(scratch, instr->additional_index() <<
+ element_size_shift));
+ __ Branch(&done);
}
__ bind(¬_nan);
__ sdc1(value, MemOperand(scratch, instr->additional_index() <<
element_size_shift));
+ __ bind(&done);
}
@@ -4798,34 +4801,19 @@
LEnvironment* env,
NumberUntagDMode mode) {
Register scratch = scratch0();
-
- Label load_smi, heap_number, done;
-
+ Label convert, load_smi, done;
if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
// Smi check.
__ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
-
// Heap number map check.
__ lw(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
__ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
- if (!can_convert_undefined_to_nan) {
- DeoptimizeIf(ne, env, scratch, Operand(at));
+ if (can_convert_undefined_to_nan) {
+ __ Branch(&convert, ne, scratch, Operand(at));
} else {
- Label heap_number, convert;
- __ Branch(&heap_number, eq, scratch, Operand(at));
-
- // Convert undefined (and hole) to NaN.
- __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- DeoptimizeIf(ne, env, input_reg, Operand(at));
-
- __ bind(&convert);
- __ LoadRoot(at, Heap::kNanValueRootIndex);
- __ ldc1(result_reg, FieldMemOperand(at, HeapNumber::kValueOffset));
- __ Branch(&done);
-
- __ bind(&heap_number);
+ DeoptimizeIf(ne, env, scratch, Operand(at));
}
- // Heap number to double register conversion.
+ // Load heap number.
__ ldc1(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
if (deoptimize_on_minus_zero) {
__ mfc1(at, result_reg.low());
@@ -4834,11 +4822,19 @@
DeoptimizeIf(eq, env, scratch, Operand(HeapNumber::kSignMask));
}
__ Branch(&done);
+ if (can_convert_undefined_to_nan) {
+ __ bind(&convert);
+ // Convert undefined (and hole) to NaN.
+ __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+ DeoptimizeIf(ne, env, input_reg, Operand(at));
+ __ LoadRoot(scratch, Heap::kNanValueRootIndex);
+ __ ldc1(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset));
+ __ Branch(&done);
+ }
} else {
__ SmiUntag(scratch, input_reg);
ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
}
-
// Smi to double register conversion
__ bind(&load_smi);
// scratch: untagged value of input_reg
@@ -4934,14 +4930,18 @@
Register input_reg = ToRegister(input);
- DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
+ if (instr->hydrogen()->value()->representation().IsSmi()) {
+ __ SmiUntag(input_reg);
+ } else {
+ DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
- // Let the deferred code handle the HeapObject case.
- __ JumpIfNotSmi(input_reg, deferred->entry());
+ // Let the deferred code handle the HeapObject case.
+ __ JumpIfNotSmi(input_reg, deferred->entry());
- // Smi to int32 conversion.
- __ SmiUntag(input_reg);
- __ bind(deferred->exit());
+ // Smi to int32 conversion.
+ __ SmiUntag(input_reg);
+ __ bind(deferred->exit());
+ }
}
@@ -5091,7 +5091,7 @@
void LCodeGen::DoCheckValue(LCheckValue* instr) {
Register reg = ToRegister(instr->value());
- Handle<HeapObject> object = instr->hydrogen()->object();
+ Handle<HeapObject> object = instr->hydrogen()->object().handle();
AllowDeferredHandleDereference smi_check;
if (isolate()->heap()->InNewSpace(*object)) {
Register reg = ToRegister(instr->value());
@@ -5142,7 +5142,6 @@
LOperand* input = instr->value();
ASSERT(input->IsRegister());
Register reg = ToRegister(input);
- SmallMapList* map_set = instr->hydrogen()->map_set();
__ lw(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
DeferredCheckMaps* deferred = NULL;
@@ -5151,12 +5150,13 @@
__ bind(deferred->check_maps());
}
+ UniqueSet<Map> map_set = instr->hydrogen()->map_set();
Label success;
- for (int i = 0; i < map_set->length() - 1; i++) {
- Handle<Map> map = map_set->at(i);
+ for (int i = 0; i < map_set.size() - 1; i++) {
+ Handle<Map> map = map_set.at(i).handle();
__ CompareMapAndBranch(map_reg, map, &success, eq, &success);
}
- Handle<Map> map = map_set->last();
+ Handle<Map> map = map_set.at(map_set.size() - 1).handle();
// Do the CompareMap() directly within the Branch() and DeoptimizeIf().
if (instr->hydrogen()->has_migration_target()) {
__ Branch(deferred->entry(), ne, map_reg, Operand(map));
diff --git a/src/mips/lithium-mips.cc b/src/mips/lithium-mips.cc
index 4dc8022..7333398 100644
--- a/src/mips/lithium-mips.cc
+++ b/src/mips/lithium-mips.cc
@@ -715,51 +715,44 @@
LInstruction* LChunkBuilder::DoShift(Token::Value op,
HBitwiseBinaryOperation* instr) {
- if (instr->representation().IsTagged()) {
- ASSERT(instr->left()->representation().IsTagged());
- ASSERT(instr->right()->representation().IsTagged());
+ if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* left = UseFixed(instr->left(), a1);
- LOperand* right = UseFixed(instr->right(), a0);
- LArithmeticT* result = new(zone()) LArithmeticT(op, left, right);
- return MarkAsCall(DefineFixed(result, v0), instr);
- }
-
- ASSERT(instr->representation().IsSmiOrInteger32());
- ASSERT(instr->left()->representation().Equals(instr->representation()));
- ASSERT(instr->right()->representation().Equals(instr->representation()));
- LOperand* left = UseRegisterAtStart(instr->left());
-
- HValue* right_value = instr->right();
- LOperand* right = NULL;
- int constant_value = 0;
- bool does_deopt = false;
- if (right_value->IsConstant()) {
- HConstant* constant = HConstant::cast(right_value);
- right = chunk_->DefineConstantOperand(constant);
- constant_value = constant->Integer32Value() & 0x1f;
- // Left shifts can deoptimize if we shift by > 0 and the result cannot be
- // truncated to smi.
- if (instr->representation().IsSmi() && constant_value > 0) {
- does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToSmi);
- }
- } else {
- right = UseRegisterAtStart(right_value);
- }
-
- // Shift operations can deoptimize if we do a logical shift
- // by 0 and the result cannot be truncated to int32.
- if (op == Token::SHR && constant_value == 0) {
- if (FLAG_opt_safe_uint32_operations) {
- does_deopt = !instr->CheckFlag(HInstruction::kUint32);
+ HValue* right_value = instr->right();
+ LOperand* right = NULL;
+ int constant_value = 0;
+ bool does_deopt = false;
+ if (right_value->IsConstant()) {
+ HConstant* constant = HConstant::cast(right_value);
+ right = chunk_->DefineConstantOperand(constant);
+ constant_value = constant->Integer32Value() & 0x1f;
+ // Left shifts can deoptimize if we shift by > 0 and the result cannot be
+ // truncated to smi.
+ if (instr->representation().IsSmi() && constant_value > 0) {
+ does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToSmi);
+ }
} else {
- does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToInt32);
+ right = UseRegisterAtStart(right_value);
}
- }
- LInstruction* result =
- DefineAsRegister(new(zone()) LShiftI(op, left, right, does_deopt));
- return does_deopt ? AssignEnvironment(result) : result;
+ // Shift operations can only deoptimize if we do a logical shift
+ // by 0 and the result cannot be truncated to int32.
+ if (op == Token::SHR && constant_value == 0) {
+ if (FLAG_opt_safe_uint32_operations) {
+ does_deopt = !instr->CheckFlag(HInstruction::kUint32);
+ } else {
+ does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToInt32);
+ }
+ }
+
+ LInstruction* result =
+ DefineAsRegister(new(zone()) LShiftI(op, left, right, does_deopt));
+ return does_deopt ? AssignEnvironment(result) : result;
+ } else {
+ return DoArithmeticT(op, instr);
+ }
}
@@ -768,21 +761,25 @@
ASSERT(instr->representation().IsDouble());
ASSERT(instr->left()->representation().IsDouble());
ASSERT(instr->right()->representation().IsDouble());
- ASSERT(op != Token::MOD);
- LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseRegisterAtStart(instr->right());
- LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
- return DefineAsRegister(result);
+ if (op == Token::MOD) {
+ LOperand* left = UseFixedDouble(instr->left(), f2);
+ LOperand* right = UseFixedDouble(instr->right(), f4);
+ LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
+ // We call a C function for double modulo. It can't trigger a GC. We need
+ // to use fixed result register for the call.
+ // TODO(fschneider): Allow any register as input registers.
+ return MarkAsCall(DefineFixedDouble(result, f2), instr);
+ } else {
+ LOperand* left = UseRegisterAtStart(instr->left());
+ LOperand* right = UseRegisterAtStart(instr->right());
+ LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
+ return DefineAsRegister(result);
+ }
}
LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op,
- HArithmeticBinaryOperation* instr) {
- ASSERT(op == Token::ADD ||
- op == Token::DIV ||
- op == Token::MOD ||
- op == Token::MUL ||
- op == Token::SUB);
+ HBinaryOperation* instr) {
HValue* left = instr->left();
HValue* right = instr->right();
ASSERT(left->representation().IsTagged());
@@ -1349,33 +1346,27 @@
if (instr->representation().IsSmiOrInteger32()) {
ASSERT(instr->left()->representation().Equals(instr->representation()));
ASSERT(instr->right()->representation().Equals(instr->representation()));
+ ASSERT(instr->CheckFlag(HValue::kTruncatingToInt32));
LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand());
return DefineAsRegister(new(zone()) LBitI(left, right));
} else {
- ASSERT(instr->representation().IsTagged());
- ASSERT(instr->left()->representation().IsTagged());
- ASSERT(instr->right()->representation().IsTagged());
-
- LOperand* left = UseFixed(instr->left(), a1);
- LOperand* right = UseFixed(instr->right(), a0);
- LArithmeticT* result = new(zone()) LArithmeticT(instr->op(), left, right);
- return MarkAsCall(DefineFixed(result, v0), instr);
+ return DoArithmeticT(instr->op(), instr);
}
}
LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
- if (instr->representation().IsDouble()) {
- return DoArithmeticD(Token::DIV, instr);
- } else if (instr->representation().IsSmiOrInteger32()) {
+ if (instr->representation().IsSmiOrInteger32()) {
ASSERT(instr->left()->representation().Equals(instr->representation()));
ASSERT(instr->right()->representation().Equals(instr->representation()));
LOperand* dividend = UseRegister(instr->left());
LOperand* divisor = UseRegister(instr->right());
LDivI* div = new(zone()) LDivI(dividend, divisor);
return AssignEnvironment(DefineAsRegister(div));
+ } else if (instr->representation().IsDouble()) {
+ return DoArithmeticD(Token::DIV, instr);
} else {
return DoArithmeticT(Token::DIV, instr);
}
@@ -1466,17 +1457,10 @@
? AssignEnvironment(result)
: result;
}
- } else if (instr->representation().IsTagged()) {
- return DoArithmeticT(Token::MOD, instr);
+ } else if (instr->representation().IsDouble()) {
+ return DoArithmeticD(Token::MOD, instr);
} else {
- ASSERT(instr->representation().IsDouble());
- // We call a C function for double modulo. It can't trigger a GC. We need
- // to use fixed result register for the call.
- // TODO(fschneider): Allow any register as input registers.
- LArithmeticD* mod = new(zone()) LArithmeticD(Token::MOD,
- UseFixedDouble(left, f2),
- UseFixedDouble(right, f4));
- return MarkAsCall(DefineFixedDouble(mod, f2), instr);
+ return DoArithmeticT(Token::MOD, instr);
}
}
@@ -1579,7 +1563,6 @@
}
return DoArithmeticD(Token::ADD, instr);
} else {
- ASSERT(instr->representation().IsTagged());
return DoArithmeticT(Token::ADD, instr);
}
}
@@ -1674,8 +1657,8 @@
LInstruction* LChunkBuilder::DoCompareHoleAndBranch(
HCompareHoleAndBranch* instr) {
- LOperand* object = UseRegisterAtStart(instr->object());
- return new(zone()) LCmpHoleAndBranch(object);
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return new(zone()) LCmpHoleAndBranch(value);
}
@@ -1783,11 +1766,9 @@
LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) {
LOperand* string = UseRegister(instr->string());
- LOperand* index = UseRegister(instr->index());
- LOperand* value = UseTempRegister(instr->value());
- LSeqStringSetChar* result =
- new(zone()) LSeqStringSetChar(instr->encoding(), string, index, value);
- return DefineAsRegister(result);
+ LOperand* index = UseRegisterOrConstant(instr->index());
+ LOperand* value = UseRegister(instr->value());
+ return new(zone()) LSeqStringSetChar(instr->encoding(), string, index, value);
}
@@ -1940,12 +1921,6 @@
}
-LInstruction* LChunkBuilder::DoIsNumberAndBranch(HIsNumberAndBranch* instr) {
- return new(zone())
- LIsNumberAndBranch(UseRegisterOrConstantAtStart(instr->value()));
-}
-
-
LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
LInstruction* result = new(zone()) LCheckInstanceType(value);
@@ -2096,6 +2071,11 @@
}
+LInstruction* LChunkBuilder::DoLoadRoot(HLoadRoot* instr) {
+ return DefineAsRegister(new(zone()) LLoadRoot);
+}
+
+
LInstruction* LChunkBuilder::DoLoadExternalArrayPointer(
HLoadExternalArrayPointer* instr) {
LOperand* input = UseRegisterAtStart(instr->value());
@@ -2150,8 +2130,6 @@
LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
- ElementsKind elements_kind = instr->elements_kind();
-
if (!instr->is_external()) {
ASSERT(instr->elements()->representation().IsTagged());
bool needs_write_barrier = instr->NeedsWriteBarrier();
@@ -2162,14 +2140,18 @@
if (instr->value()->representation().IsDouble()) {
object = UseRegisterAtStart(instr->elements());
key = UseRegisterOrConstantAtStart(instr->key());
- val = UseTempRegister(instr->value());
+ val = UseRegister(instr->value());
} else {
ASSERT(instr->value()->representation().IsSmiOrTagged());
- object = UseTempRegister(instr->elements());
- val = needs_write_barrier ? UseTempRegister(instr->value())
- : UseRegisterAtStart(instr->value());
- key = needs_write_barrier ? UseTempRegister(instr->key())
- : UseRegisterOrConstantAtStart(instr->key());
+ if (needs_write_barrier) {
+ object = UseTempRegister(instr->elements());
+ val = UseTempRegister(instr->value());
+ key = UseTempRegister(instr->key());
+ } else {
+ object = UseRegisterAtStart(instr->elements());
+ val = UseRegisterAtStart(instr->value());
+ key = UseRegisterOrConstantAtStart(instr->key());
+ }
}
return new(zone()) LStoreKeyed(object, key, val);
@@ -2177,17 +2159,13 @@
ASSERT(
(instr->value()->representation().IsInteger32() &&
- (elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
- (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
+ (instr->elements_kind() != EXTERNAL_FLOAT_ELEMENTS) &&
+ (instr->elements_kind() != EXTERNAL_DOUBLE_ELEMENTS)) ||
(instr->value()->representation().IsDouble() &&
- ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
- (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
+ ((instr->elements_kind() == EXTERNAL_FLOAT_ELEMENTS) ||
+ (instr->elements_kind() == EXTERNAL_DOUBLE_ELEMENTS))));
ASSERT(instr->elements()->representation().IsExternal());
- bool val_is_temp_register =
- elements_kind == EXTERNAL_PIXEL_ELEMENTS ||
- elements_kind == EXTERNAL_FLOAT_ELEMENTS;
- LOperand* val = val_is_temp_register ? UseTempRegister(instr->value())
- : UseRegister(instr->value());
+ LOperand* val = UseRegister(instr->value());
LOperand* key = UseRegisterOrConstantAtStart(instr->key());
LOperand* external_pointer = UseRegister(instr->elements());
diff --git a/src/mips/lithium-mips.h b/src/mips/lithium-mips.h
index 29a8eac..7c22ae8 100644
--- a/src/mips/lithium-mips.h
+++ b/src/mips/lithium-mips.h
@@ -113,13 +113,13 @@
V(IsConstructCallAndBranch) \
V(IsObjectAndBranch) \
V(IsStringAndBranch) \
- V(IsNumberAndBranch) \
V(IsSmiAndBranch) \
V(IsUndetectableAndBranch) \
V(Label) \
V(LazyBailout) \
V(LoadContextSlot) \
V(LoadExternalArrayPointer) \
+ V(LoadRoot) \
V(LoadFieldByIndex) \
V(LoadFunctionPrototype) \
V(LoadGlobalCell) \
@@ -936,19 +936,6 @@
};
-class LIsNumberAndBranch V8_FINAL : public LControlInstruction<1, 0> {
- public:
- explicit LIsNumberAndBranch(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(IsNumberAndBranch, "is-number-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(IsNumberAndBranch)
-};
-
-
class LIsStringAndBranch V8_FINAL : public LControlInstruction<1, 1> {
public:
LIsStringAndBranch(LOperand* value, LOperand* temp) {
@@ -1300,7 +1287,7 @@
DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch")
DECLARE_HYDROGEN_ACCESSOR(CompareMap)
- Handle<Map> map() const { return hydrogen()->map(); }
+ Handle<Map> map() const { return hydrogen()->map().handle(); }
};
@@ -1573,6 +1560,15 @@
};
+class LLoadRoot V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(LoadRoot, "load-root")
+ DECLARE_HYDROGEN_ACCESSOR(LoadRoot)
+
+ Heap::RootListIndex index() const { return hydrogen()->index(); }
+};
+
+
class LLoadExternalArrayPointer V8_FINAL
: public LTemplateInstruction<1, 1, 0> {
public:
@@ -2099,7 +2095,7 @@
LOperand* temp2() { return temps_[1]; }
DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i")
- DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
+ DECLARE_HYDROGEN_ACCESSOR(Change)
bool truncating() { return hydrogen()->CanTruncateToInt32(); }
};
@@ -2254,8 +2250,10 @@
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- Handle<Map> original_map() { return hydrogen()->original_map(); }
- Handle<Map> transitioned_map() { return hydrogen()->transitioned_map(); }
+ Handle<Map> original_map() { return hydrogen()->original_map().handle(); }
+ Handle<Map> transitioned_map() {
+ return hydrogen()->transitioned_map().handle();
+ }
ElementsKind from_kind() { return hydrogen()->from_kind(); }
ElementsKind to_kind() { return hydrogen()->to_kind(); }
};
@@ -2753,7 +2751,7 @@
LInstruction* DoArithmeticD(Token::Value op,
HArithmeticBinaryOperation* instr);
LInstruction* DoArithmeticT(Token::Value op,
- HArithmeticBinaryOperation* instr);
+ HBinaryOperation* instr);
LPlatformChunk* chunk_;
CompilationInfo* info_;
diff --git a/src/mips/macro-assembler-mips.cc b/src/mips/macro-assembler-mips.cc
index a85b0d8..159c924 100644
--- a/src/mips/macro-assembler-mips.cc
+++ b/src/mips/macro-assembler-mips.cc
@@ -3847,12 +3847,14 @@
}
-void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function,
- Address function_address,
- ExternalReference thunk_ref,
- Register thunk_last_arg,
- int stack_space,
- int return_value_offset_from_fp) {
+void MacroAssembler::CallApiFunctionAndReturn(
+ ExternalReference function,
+ Address function_address,
+ ExternalReference thunk_ref,
+ Register thunk_last_arg,
+ int stack_space,
+ MemOperand return_value_operand,
+ MemOperand* context_restore_operand) {
ExternalReference next_address =
ExternalReference::handle_scope_next_address(isolate());
const int kNextOffset = 0;
@@ -3915,12 +3917,13 @@
}
Label promote_scheduled_exception;
+ Label exception_handled;
Label delete_allocated_handles;
Label leave_exit_frame;
Label return_value_loaded;
// Load value from ReturnValue.
- lw(v0, MemOperand(fp, return_value_offset_from_fp*kPointerSize));
+ lw(v0, return_value_operand);
bind(&return_value_loaded);
// No more valid handles (the result handle was the last one). Restore
@@ -3941,14 +3944,23 @@
li(at, Operand(ExternalReference::scheduled_exception_address(isolate())));
lw(t1, MemOperand(at));
Branch(&promote_scheduled_exception, ne, t0, Operand(t1));
+ bind(&exception_handled);
+
+ bool restore_context = context_restore_operand != NULL;
+ if (restore_context) {
+ lw(cp, *context_restore_operand);
+ }
li(s0, Operand(stack_space));
- LeaveExitFrame(false, s0, true);
+ LeaveExitFrame(false, s0, !restore_context, EMIT_RETURN);
bind(&promote_scheduled_exception);
- TailCallExternalReference(
- ExternalReference(Runtime::kPromoteScheduledException, isolate()),
- 0,
- 1);
+ {
+ FrameScope frame(this, StackFrame::INTERNAL);
+ CallExternalReference(
+ ExternalReference(Runtime::kPromoteScheduledException, isolate()),
+ 0);
+ }
+ jmp(&exception_handled);
// HandleScope limit has changed. Delete allocated extensions.
bind(&delete_allocated_handles);
@@ -4684,6 +4696,7 @@
void MacroAssembler::LeaveExitFrame(bool save_doubles,
Register argument_count,
+ bool restore_context,
bool do_return) {
// Optionally restore all double registers.
if (save_doubles) {
@@ -4700,9 +4713,12 @@
sw(zero_reg, MemOperand(t8));
// Restore current context from top and clear it in debug mode.
- li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
- lw(cp, MemOperand(t8));
+ if (restore_context) {
+ li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
+ lw(cp, MemOperand(t8));
+ }
#ifdef DEBUG
+ li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
sw(a3, MemOperand(t8));
#endif
@@ -4929,6 +4945,86 @@
}
+void MacroAssembler::LookupNumberStringCache(Register object,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* not_found) {
+ // Use of registers. Register result is used as a temporary.
+ Register number_string_cache = result;
+ Register mask = scratch3;
+
+ // Load the number string cache.
+ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
+
+ // Make the hash mask from the length of the number string cache. It
+ // contains two elements (number and string) for each cache entry.
+ lw(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset));
+ // Divide length by two (length is a smi).
+ sra(mask, mask, kSmiTagSize + 1);
+ Addu(mask, mask, -1); // Make mask.
+
+ // Calculate the entry in the number string cache. The hash value in the
+ // number string cache for smis is just the smi value, and the hash for
+ // doubles is the xor of the upper and lower words. See
+ // Heap::GetNumberStringCache.
+ Label is_smi;
+ Label load_result_from_cache;
+ JumpIfSmi(object, &is_smi);
+ CheckMap(object,
+ scratch1,
+ Heap::kHeapNumberMapRootIndex,
+ not_found,
+ DONT_DO_SMI_CHECK);
+
+ STATIC_ASSERT(8 == kDoubleSize);
+ Addu(scratch1,
+ object,
+ Operand(HeapNumber::kValueOffset - kHeapObjectTag));
+ lw(scratch2, MemOperand(scratch1, kPointerSize));
+ lw(scratch1, MemOperand(scratch1, 0));
+ Xor(scratch1, scratch1, Operand(scratch2));
+ And(scratch1, scratch1, Operand(mask));
+
+ // Calculate address of entry in string cache: each entry consists
+ // of two pointer sized fields.
+ sll(scratch1, scratch1, kPointerSizeLog2 + 1);
+ Addu(scratch1, number_string_cache, scratch1);
+
+ Register probe = mask;
+ lw(probe, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
+ JumpIfSmi(probe, not_found);
+ ldc1(f12, FieldMemOperand(object, HeapNumber::kValueOffset));
+ ldc1(f14, FieldMemOperand(probe, HeapNumber::kValueOffset));
+ BranchF(&load_result_from_cache, NULL, eq, f12, f14);
+ Branch(not_found);
+
+ bind(&is_smi);
+ Register scratch = scratch1;
+ sra(scratch, object, 1); // Shift away the tag.
+ And(scratch, mask, Operand(scratch));
+
+ // Calculate address of entry in string cache: each entry consists
+ // of two pointer sized fields.
+ sll(scratch, scratch, kPointerSizeLog2 + 1);
+ Addu(scratch, number_string_cache, scratch);
+
+ // Check if the entry is the smi we are looking for.
+ lw(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
+ Branch(not_found, ne, object, Operand(probe));
+
+ // Get the result from the cache.
+ bind(&load_result_from_cache);
+ lw(result, FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
+
+ IncrementCounter(isolate()->counters()->number_to_string_native(),
+ 1,
+ scratch1,
+ scratch2);
+}
+
+
void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings(
Register first,
Register second,
@@ -5507,7 +5603,7 @@
Branch(&no_memento_available, gt, scratch_reg, Operand(at));
lw(scratch_reg, MemOperand(scratch_reg, -AllocationMemento::kSize));
Branch(allocation_memento_present, cond, scratch_reg,
- Operand(Handle<Map>(isolate()->heap()->allocation_memento_map())));
+ Operand(isolate()->factory()->allocation_memento_map()));
bind(&no_memento_available);
}
diff --git a/src/mips/macro-assembler-mips.h b/src/mips/macro-assembler-mips.h
index 75ded88..9cc7748 100644
--- a/src/mips/macro-assembler-mips.h
+++ b/src/mips/macro-assembler-mips.h
@@ -51,6 +51,12 @@
// MIPS generated code calls C code, it must be via t9 register.
+// Flags used for LeaveExitFrame function.
+enum LeaveExitFrameMode {
+ EMIT_RETURN = true,
+ NO_EMIT_RETURN = false
+};
+
// Flags used for AllocateHeapNumber
enum TaggingMode {
// Tag the result.
@@ -848,7 +854,8 @@
// Leave the current exit frame.
void LeaveExitFrame(bool save_doubles,
Register arg_count,
- bool do_return = false);
+ bool restore_context,
+ bool do_return = NO_EMIT_RETURN);
// Get the actual activation frame alignment for target environment.
static int ActivationFrameAlignment();
@@ -1271,7 +1278,8 @@
ExternalReference thunk_ref,
Register thunk_last_arg,
int stack_space,
- int return_value_offset_from_fp);
+ MemOperand return_value_operand,
+ MemOperand* context_restore_operand);
// Jump to the builtin routine.
void JumpToExternalReference(const ExternalReference& builtin,
@@ -1419,6 +1427,18 @@
// -------------------------------------------------------------------------
// String utilities.
+ // Generate code to do a lookup in the number string cache. If the number in
+ // the register object is found in the cache the generated code falls through
+ // with the result in the result register. The object and the result register
+ // can be the same. If the number is not found in the cache the code jumps to
+ // the label not_found with only the content of register object unchanged.
+ void LookupNumberStringCache(Register object,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* not_found);
+
// Checks if both instance types are sequential ASCII strings and jumps to
// label if either is not.
void JumpIfBothInstanceTypesAreNotSequentialAscii(
diff --git a/src/mips/stub-cache-mips.cc b/src/mips/stub-cache-mips.cc
index 58452ca..e0cf1b6 100644
--- a/src/mips/stub-cache-mips.cc
+++ b/src/mips/stub-cache-mips.cc
@@ -374,30 +374,26 @@
Register receiver,
Register scratch1,
Register scratch2,
- Label* miss,
- bool support_wrappers) {
+ Label* miss) {
Label check_wrapper;
// Check if the object is a string leaving the instance type in the
// scratch1 register.
- GenerateStringCheck(masm, receiver, scratch1, scratch2, miss,
- support_wrappers ? &check_wrapper : miss);
+ GenerateStringCheck(masm, receiver, scratch1, scratch2, miss, &check_wrapper);
// Load length directly from the string.
__ Ret(USE_DELAY_SLOT);
__ lw(v0, FieldMemOperand(receiver, String::kLengthOffset));
- if (support_wrappers) {
- // Check if the object is a JSValue wrapper.
- __ bind(&check_wrapper);
- __ Branch(miss, ne, scratch1, Operand(JS_VALUE_TYPE));
+ // Check if the object is a JSValue wrapper.
+ __ bind(&check_wrapper);
+ __ Branch(miss, ne, scratch1, Operand(JS_VALUE_TYPE));
- // Unwrap the value and check if the wrapped value is a string.
- __ lw(scratch1, FieldMemOperand(receiver, JSValue::kValueOffset));
- GenerateStringCheck(masm, scratch1, scratch2, scratch2, miss, miss);
- __ Ret(USE_DELAY_SLOT);
- __ lw(v0, FieldMemOperand(scratch1, String::kLengthOffset));
- }
+ // Unwrap the value and check if the wrapped value is a string.
+ __ lw(scratch1, FieldMemOperand(receiver, JSValue::kValueOffset));
+ GenerateStringCheck(masm, scratch1, scratch2, scratch2, miss, miss);
+ __ Ret(USE_DELAY_SLOT);
+ __ lw(v0, FieldMemOperand(scratch1, String::kLengthOffset));
}
@@ -833,23 +829,28 @@
static void GenerateFastApiDirectCall(MacroAssembler* masm,
const CallOptimization& optimization,
- int argc) {
+ int argc,
+ bool restore_context) {
// ----------- S t a t e -------------
- // -- sp[0] : holder (set by CheckPrototypes)
- // -- sp[4] : callee JS function
- // -- sp[8] : call data
- // -- sp[12] : isolate
- // -- sp[16] : ReturnValue default value
- // -- sp[20] : ReturnValue
- // -- sp[24] : last JS argument
+ // -- sp[0] : context
+ // -- sp[4] : holder (set by CheckPrototypes)
+ // -- sp[8] : callee JS function
+ // -- sp[12] : call data
+ // -- sp[16] : isolate
+ // -- sp[20] : ReturnValue default value
+ // -- sp[24] : ReturnValue
+ // -- sp[28] : last JS argument
// -- ...
- // -- sp[(argc + 5) * 4] : first JS argument
- // -- sp[(argc + 6) * 4] : receiver
+ // -- sp[(argc + 6) * 4] : first JS argument
+ // -- sp[(argc + 7) * 4] : receiver
// -----------------------------------
+ // Save calling context.
+ __ sw(cp, MemOperand(sp));
// Get the function and setup the context.
Handle<JSFunction> function = optimization.constant_function();
__ LoadHeapObject(t1, function);
__ lw(cp, FieldMemOperand(t1, JSFunction::kContextOffset));
+ __ sw(t1, MemOperand(sp, 2 * kPointerSize));
// Pass the additional arguments.
Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
@@ -860,18 +861,18 @@
} else {
__ li(t2, call_data);
}
-
+ // Store call data.
+ __ sw(t2, MemOperand(sp, 3 * kPointerSize));
+ // Store isolate.
__ li(t3, Operand(ExternalReference::isolate_address(masm->isolate())));
- // Store JS function, call data, isolate ReturnValue default and ReturnValue.
- __ sw(t1, MemOperand(sp, 1 * kPointerSize));
- __ sw(t2, MemOperand(sp, 2 * kPointerSize));
- __ sw(t3, MemOperand(sp, 3 * kPointerSize));
+ __ sw(t3, MemOperand(sp, 4 * kPointerSize));
+ // Store ReturnValue default and ReturnValue.
__ LoadRoot(t1, Heap::kUndefinedValueRootIndex);
- __ sw(t1, MemOperand(sp, 4 * kPointerSize));
__ sw(t1, MemOperand(sp, 5 * kPointerSize));
+ __ sw(t1, MemOperand(sp, 6 * kPointerSize));
// Prepare arguments.
- __ Addu(a2, sp, Operand(5 * kPointerSize));
+ __ Addu(a2, sp, Operand((kFastApiCallArguments - 1) * kPointerSize));
// Allocate the v8::Arguments structure in the arguments' space since
// it's not controlled by GC.
@@ -910,12 +911,18 @@
masm->isolate());
AllowExternalCallThatCantCauseGC scope(masm);
+ MemOperand context_restore_operand(
+ fp, 2 * kPointerSize);
+ MemOperand return_value_operand(
+ fp, (kFastApiCallArguments + 1) * kPointerSize);
__ CallApiFunctionAndReturn(ref,
function_address,
thunk_ref,
a1,
kStackUnwindSpace,
- kFastApiCallArguments + 1);
+ return_value_operand,
+ restore_context ?
+ &context_restore_operand : NULL);
}
@@ -930,10 +937,12 @@
ASSERT(!receiver.is(scratch));
const int stack_space = kFastApiCallArguments + argc + 1;
+ const int kHolderIndex = kFastApiCallArguments +
+ FunctionCallbackArguments::kHolderIndex - 1;
// Assign stack space for the call arguments.
__ Subu(sp, sp, Operand(stack_space * kPointerSize));
// Write holder to stack frame.
- __ sw(receiver, MemOperand(sp, 0));
+ __ sw(receiver, MemOperand(sp, kHolderIndex * kPointerSize));
// Write receiver to stack frame.
int index = stack_space - 1;
__ sw(receiver, MemOperand(sp, index * kPointerSize));
@@ -944,7 +953,7 @@
__ sw(receiver, MemOperand(sp, index-- * kPointerSize));
}
- GenerateFastApiDirectCall(masm, optimization, argc);
+ GenerateFastApiDirectCall(masm, optimization, argc, true);
}
@@ -1058,7 +1067,8 @@
// Invoke function.
if (can_do_fast_api_call) {
- GenerateFastApiDirectCall(masm, optimization, arguments_.immediate());
+ GenerateFastApiDirectCall(
+ masm, optimization, arguments_.immediate(), false);
} else {
CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
? CALL_AS_FUNCTION
@@ -1185,6 +1195,8 @@
int save_at_depth,
Label* miss,
PrototypeCheckType check) {
+ const int kHolderIndex = kFastApiCallArguments +
+ FunctionCallbackArguments::kHolderIndex - 1;
// Make sure that the type feedback oracle harvests the receiver map.
// TODO(svenpanne) Remove this hack when all ICs are reworked.
__ li(scratch1, Operand(Handle<Map>(object->map())));
@@ -1200,7 +1212,7 @@
int depth = 0;
if (save_at_depth == depth) {
- __ sw(reg, MemOperand(sp));
+ __ sw(reg, MemOperand(sp, kHolderIndex * kPointerSize));
}
// Check the maps in the prototype chain.
@@ -1258,7 +1270,7 @@
}
if (save_at_depth == depth) {
- __ sw(reg, MemOperand(sp));
+ __ sw(reg, MemOperand(sp, kHolderIndex * kPointerSize));
}
// Go to the next object in the prototype chain.
@@ -1458,7 +1470,7 @@
// (second argument - a1) = AccessorInfo&
__ Addu(a1, sp, kPointerSize);
- const int kStackUnwindSpace = kFastApiCallArguments + 1;
+ const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
Address getter_address = v8::ToCData<Address>(callback->getter());
ApiFunction fun(getter_address);
ExternalReference::Type type = ExternalReference::DIRECT_GETTER_CALL;
@@ -1475,7 +1487,8 @@
thunk_ref,
a2,
kStackUnwindSpace,
- 6);
+ MemOperand(fp, 6 * kPointerSize),
+ NULL);
}
@@ -2558,7 +2571,7 @@
CheckPrototypes(Handle<JSObject>::cast(object), a1, holder, a0, a3, t0, name,
depth, &miss);
- GenerateFastApiDirectCall(masm(), optimization, argc);
+ GenerateFastApiDirectCall(masm(), optimization, argc, false);
__ bind(&miss);
FreeSpaceForFastApiCall(masm());
diff --git a/src/mksnapshot.cc b/src/mksnapshot.cc
index 9cf9e2e..75babb5 100644
--- a/src/mksnapshot.cc
+++ b/src/mksnapshot.cc
@@ -310,6 +310,7 @@
int main(int argc, char** argv) {
V8::InitializeICU();
+ i::Isolate::SetCrashIfDefaultIsolateInitialized();
// By default, log code create information in the snapshot.
i::FLAG_log_code = true;
@@ -330,7 +331,10 @@
exit(1);
}
#endif
- Isolate* isolate = Isolate::GetCurrent();
+ i::FLAG_logfile_per_isolate = false;
+
+ Isolate* isolate = v8::Isolate::New();
+ isolate->Enter();
i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
i::Serializer::Enable(internal_isolate);
Persistent<Context> context;
diff --git a/src/object-observe.js b/src/object-observe.js
index 1035792..b09c42d 100644
--- a/src/object-observe.js
+++ b/src/object-observe.js
@@ -284,11 +284,6 @@
arg.length < 0)
return false;
- var length = arg.length;
- for (var i = 0; i < length; i++) {
- if (!IS_STRING(arg[i]))
- return false;
- }
return true;
}
diff --git a/src/objects-debug.cc b/src/objects-debug.cc
index 5d9e161..ad13d7f 100644
--- a/src/objects-debug.cc
+++ b/src/objects-debug.cc
@@ -330,10 +330,11 @@
}
}
- // TODO(hpayer): deal gracefully with partially constructed JSObjects, when
- // allocation folding is turned off.
- if (reinterpret_cast<Map*>(elements()) !=
- GetHeap()->one_pointer_filler_map()) {
+ // If a GC was caused while constructing this object, the elements
+ // pointer may point to a one pointer filler map.
+ if ((FLAG_use_gvn && FLAG_use_allocation_folding) ||
+ (reinterpret_cast<Map*>(elements()) !=
+ GetHeap()->one_pointer_filler_map())) {
CHECK_EQ((map()->has_fast_smi_or_object_elements() ||
(elements() == GetHeap()->empty_fixed_array())),
(elements()->map() == GetHeap()->fixed_array_map() ||
@@ -683,10 +684,11 @@
void JSArray::JSArrayVerify() {
JSObjectVerify();
CHECK(length()->IsNumber() || length()->IsUndefined());
- // TODO(hpayer): deal gracefully with partially constructed JSObjects, when
- // allocation folding is turned off.
- if (reinterpret_cast<Map*>(elements()) !=
- GetHeap()->one_pointer_filler_map()) {
+ // If a GC was caused while constructing this array, the elements
+ // pointer may point to a one pointer filler map.
+ if ((FLAG_use_gvn && FLAG_use_allocation_folding) ||
+ (reinterpret_cast<Map*>(elements()) !=
+ GetHeap()->one_pointer_filler_map())) {
CHECK(elements()->IsUndefined() ||
elements()->IsFixedArray() ||
elements()->IsFixedDoubleArray());
diff --git a/src/objects-inl.h b/src/objects-inl.h
index 89abe50..dcce931 100644
--- a/src/objects-inl.h
+++ b/src/objects-inl.h
@@ -1323,6 +1323,13 @@
}
+void AllocationSite::Initialize() {
+ SetElementsKind(GetInitialFastElementsKind());
+ set_dependent_code(DependentCode::cast(GetHeap()->empty_fixed_array()),
+ SKIP_WRITE_BARRIER);
+}
+
+
// Heuristic: We only need to create allocation site info if the boilerplate
// elements kind is the initial elements kind.
AllocationSiteMode AllocationSite::GetMode(
@@ -1535,52 +1542,6 @@
}
-MaybeObject* JSObject::AllocateStorageForMap(Map* map) {
- ASSERT(this->map()->inobject_properties() == map->inobject_properties());
- ElementsKind obj_kind = this->map()->elements_kind();
- ElementsKind map_kind = map->elements_kind();
- if (map_kind != obj_kind) {
- ElementsKind to_kind = map_kind;
- if (IsMoreGeneralElementsKindTransition(map_kind, obj_kind) ||
- IsDictionaryElementsKind(obj_kind)) {
- to_kind = obj_kind;
- }
- MaybeObject* maybe_obj =
- IsDictionaryElementsKind(to_kind) ? NormalizeElements()
- : TransitionElementsKind(to_kind);
- if (maybe_obj->IsFailure()) return maybe_obj;
- MaybeObject* maybe_map = map->AsElementsKind(to_kind);
- if (!maybe_map->To(&map)) return maybe_map;
- }
- int total_size =
- map->NumberOfOwnDescriptors() + map->unused_property_fields();
- int out_of_object = total_size - map->inobject_properties();
- if (out_of_object != properties()->length()) {
- FixedArray* new_properties;
- MaybeObject* maybe_properties = properties()->CopySize(out_of_object);
- if (!maybe_properties->To(&new_properties)) return maybe_properties;
- set_properties(new_properties);
- }
- set_map(map);
- return this;
-}
-
-
-MaybeObject* JSObject::MigrateInstance() {
- // Converting any field to the most specific type will cause the
- // GeneralizeFieldRepresentation algorithm to create the most general existing
- // transition that matches the object. This achieves what is needed.
- Map* original_map = map();
- MaybeObject* maybe_result = GeneralizeFieldRepresentation(
- 0, Representation::None(), ALLOW_AS_CONSTANT);
- JSObject* result;
- if (FLAG_trace_migration && maybe_result->To(&result)) {
- PrintInstanceMigration(stdout, original_map, result->map());
- }
- return maybe_result;
-}
-
-
MaybeObject* JSObject::TryMigrateInstance() {
Map* new_map = map()->CurrentMapForDeprecated();
if (new_map == NULL) return Smi::FromInt(0);
@@ -4495,6 +4456,8 @@
ACCESSORS(TypeSwitchInfo, types, Object, kTypesOffset)
ACCESSORS(AllocationSite, transition_info, Object, kTransitionInfoOffset)
+ACCESSORS(AllocationSite, dependent_code, DependentCode,
+ kDependentCodeOffset)
ACCESSORS(AllocationSite, weak_next, Object, kWeakNextOffset)
ACCESSORS(AllocationMemento, allocation_site, Object, kAllocationSiteOffset)
@@ -5729,19 +5692,23 @@
}
-bool JSReceiver::HasProperty(Name* name) {
- if (IsJSProxy()) {
- return JSProxy::cast(this)->HasPropertyWithHandler(name);
+bool JSReceiver::HasProperty(Handle<JSReceiver> object,
+ Handle<Name> name) {
+ if (object->IsJSProxy()) {
+ Handle<JSProxy> proxy = Handle<JSProxy>::cast(object);
+ return JSProxy::HasPropertyWithHandler(proxy, name);
}
- return GetPropertyAttribute(name) != ABSENT;
+ return object->GetPropertyAttribute(*name) != ABSENT;
}
-bool JSReceiver::HasLocalProperty(Name* name) {
- if (IsJSProxy()) {
- return JSProxy::cast(this)->HasPropertyWithHandler(name);
+bool JSReceiver::HasLocalProperty(Handle<JSReceiver> object,
+ Handle<Name> name) {
+ if (object->IsJSProxy()) {
+ Handle<JSProxy> proxy = Handle<JSProxy>::cast(object);
+ return JSProxy::HasPropertyWithHandler(proxy, name);
}
- return GetLocalPropertyAttribute(name) != ABSENT;
+ return object->GetLocalPropertyAttribute(*name) != ABSENT;
}
@@ -5783,21 +5750,23 @@
}
-bool JSReceiver::HasElement(uint32_t index) {
- if (IsJSProxy()) {
- return JSProxy::cast(this)->HasElementWithHandler(index);
+bool JSReceiver::HasElement(Handle<JSReceiver> object, uint32_t index) {
+ if (object->IsJSProxy()) {
+ Handle<JSProxy> proxy = Handle<JSProxy>::cast(object);
+ return JSProxy::HasElementWithHandler(proxy, index);
}
- return JSObject::cast(this)->GetElementAttributeWithReceiver(
- this, index, true) != ABSENT;
+ return Handle<JSObject>::cast(object)->GetElementAttributeWithReceiver(
+ *object, index, true) != ABSENT;
}
-bool JSReceiver::HasLocalElement(uint32_t index) {
- if (IsJSProxy()) {
- return JSProxy::cast(this)->HasElementWithHandler(index);
+bool JSReceiver::HasLocalElement(Handle<JSReceiver> object, uint32_t index) {
+ if (object->IsJSProxy()) {
+ Handle<JSProxy> proxy = Handle<JSProxy>::cast(object);
+ return JSProxy::HasElementWithHandler(proxy, index);
}
- return JSObject::cast(this)->GetElementAttributeWithReceiver(
- this, index, false) != ABSENT;
+ return Handle<JSObject>::cast(object)->GetElementAttributeWithReceiver(
+ *object, index, false) != ABSENT;
}
diff --git a/src/objects-printer.cc b/src/objects-printer.cc
index 0b8fdfd..26a7098 100644
--- a/src/objects-printer.cc
+++ b/src/objects-printer.cc
@@ -1100,9 +1100,10 @@
HeapObject::PrintHeader(out, "AllocationSite");
PrintF(out, " - weak_next: ");
weak_next()->ShortPrint(out);
- PrintF(out, "\n");
+ PrintF(out, "\n - dependent code: ");
+ dependent_code()->ShortPrint(out);
- PrintF(out, " - transition_info: ");
+ PrintF(out, "\n - transition_info: ");
if (transition_info()->IsCell()) {
Cell* cell = Cell::cast(transition_info());
Object* cell_contents = cell->value();
diff --git a/src/objects.cc b/src/objects.cc
index 35646b8..67f26f0 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -455,18 +455,15 @@
StrictModeFlag strict_mode) {
Isolate* isolate = proxy->GetIsolate();
Handle<String> name = isolate->factory()->Uint32ToString(index);
- CALL_HEAP_FUNCTION(isolate,
- proxy->SetPropertyWithHandler(
- *receiver, *name, *value, NONE, strict_mode),
- Object);
+ return SetPropertyWithHandler(
+ proxy, receiver, name, value, NONE, strict_mode);
}
-bool JSProxy::HasElementWithHandler(uint32_t index) {
- String* name;
- MaybeObject* maybe = GetHeap()->Uint32ToString(index);
- if (!maybe->To<String>(&name)) return maybe;
- return HasPropertyWithHandler(name);
+bool JSProxy::HasElementWithHandler(Handle<JSProxy> proxy, uint32_t index) {
+ Isolate* isolate = proxy->GetIsolate();
+ Handle<String> name = isolate->factory()->Uint32ToString(index);
+ return HasPropertyWithHandler(proxy, name);
}
@@ -643,67 +640,56 @@
}
-Handle<Object> JSObject::SetNormalizedProperty(Handle<JSObject> object,
- LookupResult* result,
- Handle<Object> value) {
- CALL_HEAP_FUNCTION(object->GetIsolate(),
- object->SetNormalizedProperty(result, *value),
- Object);
-}
-
-
-MaybeObject* JSObject::SetNormalizedProperty(LookupResult* result,
- Object* value) {
- ASSERT(!HasFastProperties());
- if (IsGlobalObject()) {
- PropertyCell* cell = PropertyCell::cast(
- property_dictionary()->ValueAt(result->GetDictionaryEntry()));
- MaybeObject* maybe_type = cell->SetValueInferType(value);
- if (maybe_type->IsFailure()) return maybe_type;
+void JSObject::SetNormalizedProperty(Handle<JSObject> object,
+ LookupResult* result,
+ Handle<Object> value) {
+ ASSERT(!object->HasFastProperties());
+ NameDictionary* property_dictionary = object->property_dictionary();
+ if (object->IsGlobalObject()) {
+ Handle<PropertyCell> cell(PropertyCell::cast(
+ property_dictionary->ValueAt(result->GetDictionaryEntry())));
+ PropertyCell::SetValueInferType(cell, value);
} else {
- property_dictionary()->ValueAtPut(result->GetDictionaryEntry(), value);
+ property_dictionary->ValueAtPut(result->GetDictionaryEntry(), *value);
}
- return value;
}
-Handle<Object> JSObject::SetNormalizedProperty(Handle<JSObject> object,
- Handle<Name> key,
- Handle<Object> value,
- PropertyDetails details) {
- CALL_HEAP_FUNCTION(object->GetIsolate(),
- object->SetNormalizedProperty(*key, *value, details),
- Object);
+// TODO(mstarzinger): Temporary wrapper until handlified.
+static Handle<NameDictionary> NameDictionaryAdd(Handle<NameDictionary> dict,
+ Handle<Name> name,
+ Handle<Object> value,
+ PropertyDetails details) {
+ CALL_HEAP_FUNCTION(dict->GetIsolate(),
+ dict->Add(*name, *value, details),
+ NameDictionary);
}
-MaybeObject* JSObject::SetNormalizedProperty(Name* name,
- Object* value,
- PropertyDetails details) {
- ASSERT(!HasFastProperties());
- int entry = property_dictionary()->FindEntry(name);
+void JSObject::SetNormalizedProperty(Handle<JSObject> object,
+ Handle<Name> name,
+ Handle<Object> value,
+ PropertyDetails details) {
+ ASSERT(!object->HasFastProperties());
+ Handle<NameDictionary> property_dictionary(object->property_dictionary());
+ int entry = property_dictionary->FindEntry(*name);
if (entry == NameDictionary::kNotFound) {
- Object* store_value = value;
- if (IsGlobalObject()) {
- Heap* heap = name->GetHeap();
- MaybeObject* maybe_store_value = heap->AllocatePropertyCell(value);
- if (!maybe_store_value->ToObject(&store_value)) return maybe_store_value;
+ Handle<Object> store_value = value;
+ if (object->IsGlobalObject()) {
+ store_value = object->GetIsolate()->factory()->NewPropertyCell(value);
}
- Object* dict;
- { MaybeObject* maybe_dict =
- property_dictionary()->Add(name, store_value, details);
- if (!maybe_dict->ToObject(&dict)) return maybe_dict;
- }
- set_properties(NameDictionary::cast(dict));
- return value;
+ property_dictionary =
+ NameDictionaryAdd(property_dictionary, name, store_value, details);
+ object->set_properties(*property_dictionary);
+ return;
}
- PropertyDetails original_details = property_dictionary()->DetailsAt(entry);
+ PropertyDetails original_details = property_dictionary->DetailsAt(entry);
int enumeration_index;
// Preserve the enumeration index unless the property was deleted.
if (original_details.IsDeleted()) {
- enumeration_index = property_dictionary()->NextEnumerationIndex();
- property_dictionary()->SetNextEnumerationIndex(enumeration_index + 1);
+ enumeration_index = property_dictionary->NextEnumerationIndex();
+ property_dictionary->SetNextEnumerationIndex(enumeration_index + 1);
} else {
enumeration_index = original_details.dictionary_index();
ASSERT(enumeration_index > 0);
@@ -712,17 +698,15 @@
details = PropertyDetails(
details.attributes(), details.type(), enumeration_index);
- if (IsGlobalObject()) {
- PropertyCell* cell =
- PropertyCell::cast(property_dictionary()->ValueAt(entry));
- MaybeObject* maybe_type = cell->SetValueInferType(value);
- if (maybe_type->IsFailure()) return maybe_type;
+ if (object->IsGlobalObject()) {
+ Handle<PropertyCell> cell(
+ PropertyCell::cast(property_dictionary->ValueAt(entry)));
+ PropertyCell::SetValueInferType(cell, value);
// Please note we have to update the property details.
- property_dictionary()->DetailsAtPut(entry, details);
+ property_dictionary->DetailsAtPut(entry, details);
} else {
- property_dictionary()->SetEntry(entry, name, value, details);
+ property_dictionary->SetEntry(entry, *name, *value, details);
}
- return value;
}
@@ -733,12 +717,6 @@
}
-static void CellSetValueInferType(Handle<PropertyCell> cell,
- Handle<Object> value) {
- CALL_HEAP_FUNCTION_VOID(cell->GetIsolate(), cell->SetValueInferType(*value));
-}
-
-
Handle<Object> JSObject::DeleteNormalizedProperty(Handle<JSObject> object,
Handle<Name> name,
DeleteMode mode) {
@@ -761,7 +739,8 @@
object->set_map(*new_map);
}
Handle<PropertyCell> cell(PropertyCell::cast(dictionary->ValueAt(entry)));
- CellSetValueInferType(cell, isolate->factory()->the_hole_value());
+ Handle<Object> value = isolate->factory()->the_hole_value();
+ PropertyCell::SetValueInferType(cell, value);
dictionary->DetailsAtPut(entry, details.AsDeleted());
} else {
Handle<Object> deleted(dictionary->DeleteProperty(entry, mode), isolate);
@@ -1871,211 +1850,240 @@
}
-MaybeObject* JSObject::AddFastPropertyUsingMap(Map* new_map,
- Name* name,
- Object* value,
- int field_index,
- Representation representation) {
- // This method is used to transition to a field. If we are transitioning to a
- // double field, allocate new storage.
- Object* storage;
- MaybeObject* maybe_storage =
- value->AllocateNewStorageFor(GetHeap(), representation);
- if (!maybe_storage->To(&storage)) return maybe_storage;
-
- if (map()->unused_property_fields() == 0) {
- int new_unused = new_map->unused_property_fields();
- FixedArray* values;
- MaybeObject* maybe_values =
- properties()->CopySize(properties()->length() + new_unused + 1);
- if (!maybe_values->To(&values)) return maybe_values;
-
- set_properties(values);
- }
-
- set_map(new_map);
-
- FastPropertyAtPut(field_index, storage);
- return value;
+// TODO(mstarzinger): Temporary wrapper until handlified.
+static Handle<Object> NewStorageFor(Isolate* isolate,
+ Handle<Object> object,
+ Representation representation) {
+ Heap* heap = isolate->heap();
+ CALL_HEAP_FUNCTION(isolate,
+ object->AllocateNewStorageFor(heap, representation),
+ Object);
}
-MaybeObject* JSObject::AddFastProperty(Name* name,
- Object* value,
- PropertyAttributes attributes,
- StoreFromKeyed store_mode,
- ValueType value_type,
- TransitionFlag flag) {
- ASSERT(!IsJSGlobalProxy());
+void JSObject::AddFastPropertyUsingMap(Handle<JSObject> object,
+ Handle<Map> new_map,
+ Handle<Name> name,
+ Handle<Object> value,
+ int field_index,
+ Representation representation) {
+ Isolate* isolate = object->GetIsolate();
+
+ // This method is used to transition to a field. If we are transitioning to a
+ // double field, allocate new storage.
+ Handle<Object> storage = NewStorageFor(isolate, value, representation);
+
+ if (object->map()->unused_property_fields() == 0) {
+ int new_unused = new_map->unused_property_fields();
+ Handle<FixedArray> properties(object->properties());
+ Handle<FixedArray> values = isolate->factory()->CopySizeFixedArray(
+ properties, properties->length() + new_unused + 1);
+ object->set_properties(*values);
+ }
+
+ object->set_map(*new_map);
+ object->FastPropertyAtPut(field_index, *storage);
+}
+
+
+static MaybeObject* CopyAddFieldDescriptor(Map* map,
+ Name* name,
+ int index,
+ PropertyAttributes attributes,
+ Representation representation,
+ TransitionFlag flag) {
+ Map* new_map;
+ FieldDescriptor new_field_desc(name, index, attributes, representation);
+ MaybeObject* maybe_map = map->CopyAddDescriptor(&new_field_desc, flag);
+ if (!maybe_map->To(&new_map)) return maybe_map;
+ int unused_property_fields = map->unused_property_fields() - 1;
+ if (unused_property_fields < 0) {
+ unused_property_fields += JSObject::kFieldsAdded;
+ }
+ new_map->set_unused_property_fields(unused_property_fields);
+ return new_map;
+}
+
+
+static Handle<Map> CopyAddFieldDescriptor(Handle<Map> map,
+ Handle<Name> name,
+ int index,
+ PropertyAttributes attributes,
+ Representation representation,
+ TransitionFlag flag) {
+ CALL_HEAP_FUNCTION(map->GetIsolate(),
+ CopyAddFieldDescriptor(
+ *map, *name, index, attributes, representation, flag),
+ Map);
+}
+
+
+void JSObject::AddFastProperty(Handle<JSObject> object,
+ Handle<Name> name,
+ Handle<Object> value,
+ PropertyAttributes attributes,
+ StoreFromKeyed store_mode,
+ ValueType value_type,
+ TransitionFlag flag) {
+ ASSERT(!object->IsJSGlobalProxy());
ASSERT(DescriptorArray::kNotFound ==
- map()->instance_descriptors()->Search(
- name, map()->NumberOfOwnDescriptors()));
+ object->map()->instance_descriptors()->Search(
+ *name, object->map()->NumberOfOwnDescriptors()));
// Normalize the object if the name is an actual name (not the
// hidden strings) and is not a real identifier.
// Normalize the object if it will have too many fast properties.
- Isolate* isolate = GetHeap()->isolate();
- if (!name->IsCacheable(isolate) || TooManyFastProperties(store_mode)) {
- MaybeObject* maybe_failure =
- NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0);
- if (maybe_failure->IsFailure()) return maybe_failure;
- return AddSlowProperty(name, value, attributes);
+ Isolate* isolate = object->GetIsolate();
+ if (!name->IsCacheable(isolate) ||
+ object->TooManyFastProperties(store_mode)) {
+ NormalizeProperties(object, CLEAR_INOBJECT_PROPERTIES, 0);
+ AddSlowProperty(object, name, value, attributes);
+ return;
}
// Compute the new index for new field.
- int index = map()->NextFreePropertyIndex();
+ int index = object->map()->NextFreePropertyIndex();
// Allocate new instance descriptors with (name, index) added
- if (IsJSContextExtensionObject()) value_type = FORCE_TAGGED;
+ if (object->IsJSContextExtensionObject()) value_type = FORCE_TAGGED;
Representation representation = value->OptimalRepresentation(value_type);
+ Handle<Map> new_map = CopyAddFieldDescriptor(
+ handle(object->map()), name, index, attributes, representation, flag);
- FieldDescriptor new_field(name, index, attributes, representation);
-
- Map* new_map;
- MaybeObject* maybe_new_map = map()->CopyAddDescriptor(&new_field, flag);
- if (!maybe_new_map->To(&new_map)) return maybe_new_map;
-
- int unused_property_fields = map()->unused_property_fields() - 1;
- if (unused_property_fields < 0) {
- unused_property_fields += kFieldsAdded;
- }
- new_map->set_unused_property_fields(unused_property_fields);
-
- return AddFastPropertyUsingMap(new_map, name, value, index, representation);
+ AddFastPropertyUsingMap(object, new_map, name, value, index, representation);
}
-MaybeObject* JSObject::AddConstantProperty(
- Name* name,
- Object* constant,
- PropertyAttributes attributes,
- TransitionFlag initial_flag) {
- // Allocate new instance descriptors with (name, constant) added
- ConstantDescriptor d(name, constant, attributes);
+static MaybeObject* CopyAddConstantDescriptor(Map* map,
+ Name* name,
+ Object* value,
+ PropertyAttributes attributes,
+ TransitionFlag flag) {
+ ConstantDescriptor new_constant_desc(name, value, attributes);
+ return map->CopyAddDescriptor(&new_constant_desc, flag);
+}
+
+static Handle<Map> CopyAddConstantDescriptor(Handle<Map> map,
+ Handle<Name> name,
+ Handle<Object> value,
+ PropertyAttributes attributes,
+ TransitionFlag flag) {
+ CALL_HEAP_FUNCTION(map->GetIsolate(),
+ CopyAddConstantDescriptor(
+ *map, *name, *value, attributes, flag),
+ Map);
+}
+
+
+void JSObject::AddConstantProperty(Handle<JSObject> object,
+ Handle<Name> name,
+ Handle<Object> constant,
+ PropertyAttributes attributes,
+ TransitionFlag initial_flag) {
TransitionFlag flag =
// Do not add transitions to global objects.
- (IsGlobalObject() ||
+ (object->IsGlobalObject() ||
// Don't add transitions to special properties with non-trivial
// attributes.
attributes != NONE)
? OMIT_TRANSITION
: initial_flag;
- Map* new_map;
- MaybeObject* maybe_new_map = map()->CopyAddDescriptor(&d, flag);
- if (!maybe_new_map->To(&new_map)) return maybe_new_map;
+ // Allocate new instance descriptors with (name, constant) added.
+ Handle<Map> new_map = CopyAddConstantDescriptor(
+ handle(object->map()), name, constant, attributes, flag);
- set_map(new_map);
- return constant;
+ object->set_map(*new_map);
}
-// Add property in slow mode
-MaybeObject* JSObject::AddSlowProperty(Name* name,
- Object* value,
- PropertyAttributes attributes) {
- ASSERT(!HasFastProperties());
- NameDictionary* dict = property_dictionary();
- Object* store_value = value;
- if (IsGlobalObject()) {
+void JSObject::AddSlowProperty(Handle<JSObject> object,
+ Handle<Name> name,
+ Handle<Object> value,
+ PropertyAttributes attributes) {
+ ASSERT(!object->HasFastProperties());
+ Isolate* isolate = object->GetIsolate();
+ Handle<NameDictionary> dict(object->property_dictionary());
+ if (object->IsGlobalObject()) {
// In case name is an orphaned property reuse the cell.
- int entry = dict->FindEntry(name);
+ int entry = dict->FindEntry(*name);
if (entry != NameDictionary::kNotFound) {
- store_value = dict->ValueAt(entry);
- MaybeObject* maybe_type =
- PropertyCell::cast(store_value)->SetValueInferType(value);
- if (maybe_type->IsFailure()) return maybe_type;
+ Handle<PropertyCell> cell(PropertyCell::cast(dict->ValueAt(entry)));
+ PropertyCell::SetValueInferType(cell, value);
// Assign an enumeration index to the property and update
// SetNextEnumerationIndex.
int index = dict->NextEnumerationIndex();
PropertyDetails details = PropertyDetails(attributes, NORMAL, index);
dict->SetNextEnumerationIndex(index + 1);
- dict->SetEntry(entry, name, store_value, details);
- return value;
+ dict->SetEntry(entry, *name, *cell, details);
+ return;
}
- Heap* heap = GetHeap();
- { MaybeObject* maybe_store_value =
- heap->AllocatePropertyCell(value);
- if (!maybe_store_value->ToObject(&store_value)) return maybe_store_value;
- }
- MaybeObject* maybe_type =
- PropertyCell::cast(store_value)->SetValueInferType(value);
- if (maybe_type->IsFailure()) return maybe_type;
+ Handle<PropertyCell> cell = isolate->factory()->NewPropertyCell(value);
+ PropertyCell::SetValueInferType(cell, value);
+ value = cell;
}
PropertyDetails details = PropertyDetails(attributes, NORMAL, 0);
- Object* result;
- { MaybeObject* maybe_result = dict->Add(name, store_value, details);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- if (dict != result) set_properties(NameDictionary::cast(result));
- return value;
+ Handle<NameDictionary> result = NameDictionaryAdd(dict, name, value, details);
+ if (*dict != *result) object->set_properties(*result);
}
-MaybeObject* JSObject::AddProperty(Name* name,
- Object* value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode,
- JSReceiver::StoreFromKeyed store_mode,
- ExtensibilityCheck extensibility_check,
- ValueType value_type,
- StoreMode mode,
- TransitionFlag transition_flag) {
- ASSERT(!IsJSGlobalProxy());
- Map* map_of_this = map();
- Heap* heap = GetHeap();
- Isolate* isolate = heap->isolate();
- MaybeObject* result;
+Handle<Object> JSObject::AddProperty(Handle<JSObject> object,
+ Handle<Name> name,
+ Handle<Object> value,
+ PropertyAttributes attributes,
+ StrictModeFlag strict_mode,
+ JSReceiver::StoreFromKeyed store_mode,
+ ExtensibilityCheck extensibility_check,
+ ValueType value_type,
+ StoreMode mode,
+ TransitionFlag transition_flag) {
+ ASSERT(!object->IsJSGlobalProxy());
+ Isolate* isolate = object->GetIsolate();
if (extensibility_check == PERFORM_EXTENSIBILITY_CHECK &&
- !map_of_this->is_extensible()) {
+ !object->map()->is_extensible()) {
if (strict_mode == kNonStrictMode) {
return value;
} else {
- Handle<Object> args[1] = {Handle<Name>(name)};
- return isolate->Throw(
- *isolate->factory()->NewTypeError("object_not_extensible",
- HandleVector(args, 1)));
+ Handle<Object> args[1] = { name };
+ Handle<Object> error = isolate->factory()->NewTypeError(
+ "object_not_extensible", HandleVector(args, ARRAY_SIZE(args)));
+ isolate->Throw(*error);
+ return Handle<Object>();
}
}
- if (HasFastProperties()) {
+ if (object->HasFastProperties()) {
// Ensure the descriptor array does not get too big.
- if (map_of_this->NumberOfOwnDescriptors() <
+ if (object->map()->NumberOfOwnDescriptors() <
DescriptorArray::kMaxNumberOfDescriptors) {
// TODO(verwaest): Support other constants.
// if (mode == ALLOW_AS_CONSTANT &&
// !value->IsTheHole() &&
// !value->IsConsString()) {
if (value->IsJSFunction()) {
- result = AddConstantProperty(name, value, attributes, transition_flag);
+ AddConstantProperty(object, name, value, attributes, transition_flag);
} else {
- result = AddFastProperty(
- name, value, attributes, store_mode, value_type, transition_flag);
+ AddFastProperty(object, name, value, attributes, store_mode,
+ value_type, transition_flag);
}
} else {
// Normalize the object to prevent very large instance descriptors.
// This eliminates unwanted N^2 allocation and lookup behavior.
- Object* obj;
- MaybeObject* maybe = NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0);
- if (!maybe->To(&obj)) return maybe;
- result = AddSlowProperty(name, value, attributes);
+ NormalizeProperties(object, CLEAR_INOBJECT_PROPERTIES, 0);
+ AddSlowProperty(object, name, value, attributes);
}
} else {
- result = AddSlowProperty(name, value, attributes);
+ AddSlowProperty(object, name, value, attributes);
}
- Handle<Object> hresult;
- if (!result->ToHandle(&hresult, isolate)) return result;
-
- if (FLAG_harmony_observation && map()->is_observed()) {
- EnqueueChangeRecord(handle(this, isolate),
- "new",
- handle(name, isolate),
- handle(heap->the_hole_value(), isolate));
+ if (FLAG_harmony_observation && object->map()->is_observed()) {
+ Handle<Object> old_value = isolate->factory()->the_hole_value();
+ EnqueueChangeRecord(object, "new", name, old_value);
}
- return *hresult;
+ return value;
}
@@ -2115,37 +2123,39 @@
}
-MaybeObject* JSObject::SetPropertyPostInterceptor(
- Name* name,
- Object* value,
+Handle<Object> JSObject::SetPropertyPostInterceptor(
+ Handle<JSObject> object,
+ Handle<Name> name,
+ Handle<Object> value,
PropertyAttributes attributes,
- StrictModeFlag strict_mode,
- StoreMode mode) {
+ StrictModeFlag strict_mode) {
// Check local property, ignore interceptor.
- LookupResult result(GetIsolate());
- LocalLookupRealNamedProperty(name, &result);
- if (!result.IsFound()) map()->LookupTransition(this, name, &result);
+ LookupResult result(object->GetIsolate());
+ object->LocalLookupRealNamedProperty(*name, &result);
+ if (!result.IsFound()) {
+ object->map()->LookupTransition(*object, *name, &result);
+ }
if (result.IsFound()) {
// An existing property or a map transition was found. Use set property to
// handle all these cases.
- return SetProperty(&result, name, value, attributes, strict_mode);
+ return SetPropertyForResult(object, &result, name, value, attributes,
+ strict_mode, MAY_BE_STORE_FROM_KEYED);
}
bool done = false;
- MaybeObject* result_object =
- SetPropertyViaPrototypes(name, value, attributes, strict_mode, &done);
+ Handle<Object> result_object = SetPropertyViaPrototypes(
+ object, name, value, attributes, strict_mode, &done);
if (done) return result_object;
// Add a new real property.
- return AddProperty(name, value, attributes, strict_mode,
- MAY_BE_STORE_FROM_KEYED, PERFORM_EXTENSIBILITY_CHECK,
- OPTIMAL_REPRESENTATION, mode);
+ return AddProperty(object, name, value, attributes, strict_mode);
}
-MaybeObject* JSObject::ReplaceSlowProperty(Name* name,
- Object* value,
- PropertyAttributes attributes) {
- NameDictionary* dictionary = property_dictionary();
- int old_index = dictionary->FindEntry(name);
+static void ReplaceSlowProperty(Handle<JSObject> object,
+ Handle<Name> name,
+ Handle<Object> value,
+ PropertyAttributes attributes) {
+ NameDictionary* dictionary = object->property_dictionary();
+ int old_index = dictionary->FindEntry(*name);
int new_enumeration_index = 0; // 0 means "Use the next available index."
if (old_index != -1) {
// All calls to ReplaceSlowProperty have had all transitions removed.
@@ -2153,7 +2163,7 @@
}
PropertyDetails new_details(attributes, NORMAL, new_enumeration_index);
- return SetNormalizedProperty(name, value, new_details);
+ JSObject::SetNormalizedProperty(object, name, value, new_details);
}
@@ -2260,6 +2270,11 @@
}
+void JSObject::MigrateToMap(Handle<JSObject> object, Handle<Map> new_map) {
+ CALL_HEAP_FUNCTION_VOID(object->GetIsolate(), object->MigrateToMap(*new_map));
+}
+
+
// To migrate an instance to a map:
// - First check whether the instance needs to be rewritten. If not, simply
// change the map.
@@ -2361,17 +2376,14 @@
}
-MaybeObject* JSObject::GeneralizeFieldRepresentation(
- int modify_index,
- Representation new_representation,
- StoreMode store_mode) {
- Map* new_map;
- MaybeObject* maybe_new_map = map()->GeneralizeRepresentation(
- modify_index, new_representation, store_mode);
- if (!maybe_new_map->To(&new_map)) return maybe_new_map;
- if (map() == new_map) return this;
-
- return MigrateToMap(new_map);
+void JSObject::GeneralizeFieldRepresentation(Handle<JSObject> object,
+ int modify_index,
+ Representation new_representation,
+ StoreMode store_mode) {
+ Handle<Map> new_map = Map::GeneralizeRepresentation(
+ handle(object->map()), modify_index, new_representation, store_mode);
+ if (object->map() == *new_map) return;
+ return MigrateToMap(object, new_map);
}
@@ -2385,14 +2397,12 @@
}
-MaybeObject* Map::CopyGeneralizeAllRepresentations(
- int modify_index,
- StoreMode store_mode,
- PropertyAttributes attributes,
- const char* reason) {
- Map* new_map;
- MaybeObject* maybe_map = this->Copy();
- if (!maybe_map->To(&new_map)) return maybe_map;
+Handle<Map> Map::CopyGeneralizeAllRepresentations(Handle<Map> map,
+ int modify_index,
+ StoreMode store_mode,
+ PropertyAttributes attributes,
+ const char* reason) {
+ Handle<Map> new_map = Copy(map);
DescriptorArray* descriptors = new_map->instance_descriptors();
descriptors->InitializeRepresentations(Representation::Tagged());
@@ -2414,7 +2424,7 @@
}
if (FLAG_trace_generalization) {
- PrintGeneralization(stdout, reason, modify_index,
+ map->PrintGeneralization(stdout, reason, modify_index,
new_map->NumberOfOwnDescriptors(),
new_map->NumberOfOwnDescriptors(),
details.type() == CONSTANT && store_mode == FORCE_FIELD,
@@ -2562,11 +2572,11 @@
// - If |updated| == |split_map|, |updated| is in the expected state. Return it.
// - Otherwise, invalidate the outdated transition target from |updated|, and
// replace its transition tree with a new branch for the updated descriptors.
-MaybeObject* Map::GeneralizeRepresentation(int modify_index,
- Representation new_representation,
- StoreMode store_mode) {
- Map* old_map = this;
- DescriptorArray* old_descriptors = old_map->instance_descriptors();
+Handle<Map> Map::GeneralizeRepresentation(Handle<Map> old_map,
+ int modify_index,
+ Representation new_representation,
+ StoreMode store_mode) {
+ Handle<DescriptorArray> old_descriptors(old_map->instance_descriptors());
PropertyDetails old_details = old_descriptors->GetDetails(modify_index);
Representation old_representation = old_details.representation();
@@ -2582,37 +2592,37 @@
}
int descriptors = old_map->NumberOfOwnDescriptors();
- Map* root_map = old_map->FindRootMap();
+ Handle<Map> root_map(old_map->FindRootMap());
// Check the state of the root map.
- if (!old_map->EquivalentToForTransition(root_map)) {
- return CopyGeneralizeAllRepresentations(
- modify_index, store_mode, old_details.attributes(), "not equivalent");
+ if (!old_map->EquivalentToForTransition(*root_map)) {
+ return CopyGeneralizeAllRepresentations(old_map, modify_index, store_mode,
+ old_details.attributes(), "not equivalent");
}
int verbatim = root_map->NumberOfOwnDescriptors();
if (store_mode != ALLOW_AS_CONSTANT && modify_index < verbatim) {
- return CopyGeneralizeAllRepresentations(
- modify_index, store_mode,
+ return CopyGeneralizeAllRepresentations(old_map, modify_index, store_mode,
old_details.attributes(), "root modification");
}
- Map* updated = root_map->FindUpdatedMap(
- verbatim, descriptors, old_descriptors);
- if (updated == NULL) {
- return CopyGeneralizeAllRepresentations(
- modify_index, store_mode, old_details.attributes(), "incompatible");
+ Map* raw_updated = root_map->FindUpdatedMap(
+ verbatim, descriptors, *old_descriptors);
+ if (raw_updated == NULL) {
+ return CopyGeneralizeAllRepresentations(old_map, modify_index, store_mode,
+ old_details.attributes(), "incompatible");
}
- DescriptorArray* updated_descriptors = updated->instance_descriptors();
+ Handle<Map> updated(raw_updated);
+ Handle<DescriptorArray> updated_descriptors(updated->instance_descriptors());
int valid = updated->NumberOfOwnDescriptors();
// Directly change the map if the target map is more general. Ensure that the
// target type of the modify_index is a FIELD, unless we are migrating.
if (updated_descriptors->IsMoreGeneralThan(
- verbatim, valid, descriptors, old_descriptors) &&
+ verbatim, valid, descriptors, *old_descriptors) &&
(store_mode == ALLOW_AS_CONSTANT ||
updated_descriptors->GetDetails(modify_index).type() == FIELD)) {
Representation updated_representation =
@@ -2620,10 +2630,9 @@
if (new_representation.fits_into(updated_representation)) return updated;
}
- DescriptorArray* new_descriptors;
- MaybeObject* maybe_descriptors = updated_descriptors->Merge(
- verbatim, valid, descriptors, modify_index, store_mode, old_descriptors);
- if (!maybe_descriptors->To(&new_descriptors)) return maybe_descriptors;
+ Handle<DescriptorArray> new_descriptors = DescriptorArray::Merge(
+ updated_descriptors, verbatim, valid, descriptors, modify_index,
+ store_mode, old_descriptors);
ASSERT(store_mode == ALLOW_AS_CONSTANT ||
new_descriptors->GetDetails(modify_index).type() == FIELD);
@@ -2635,8 +2644,8 @@
new_descriptors->SetRepresentation(modify_index, updated_representation);
}
- Map* split_map = root_map->FindLastMatchMap(
- verbatim, descriptors, new_descriptors);
+ Handle<Map> split_map(root_map->FindLastMatchMap(
+ verbatim, descriptors, *new_descriptors));
int split_descriptors = split_map->NumberOfOwnDescriptors();
// This is shadowed by |updated_descriptors| being more general than
@@ -2645,28 +2654,20 @@
int descriptor = split_descriptors;
split_map->DeprecateTarget(
- old_descriptors->GetKey(descriptor), new_descriptors);
+ old_descriptors->GetKey(descriptor), *new_descriptors);
if (FLAG_trace_generalization) {
- PrintGeneralization(
+ old_map->PrintGeneralization(
stdout, "", modify_index, descriptor, descriptors,
old_descriptors->GetDetails(modify_index).type() == CONSTANT &&
store_mode == FORCE_FIELD,
old_representation, updated_representation);
}
- Map* new_map = split_map;
// Add missing transitions.
+ Handle<Map> new_map = split_map;
for (; descriptor < descriptors; descriptor++) {
- MaybeObject* maybe_map = new_map->CopyInstallDescriptors(
- descriptor, new_descriptors);
- if (!maybe_map->To(&new_map)) {
- // Create a handle for the last created map to ensure it stays alive
- // during GC. Its descriptor array is too large, but it will be
- // overwritten during retry anyway.
- Handle<Map>(new_map);
- return maybe_map;
- }
+ new_map = Map::CopyInstallDescriptors(new_map, descriptor, new_descriptors);
new_map->set_migration_target(true);
}
@@ -2703,94 +2704,66 @@
}
-MaybeObject* JSObject::SetPropertyWithInterceptor(
- Name* name,
- Object* value,
+Handle<Object> JSObject::SetPropertyWithInterceptor(
+ Handle<JSObject> object,
+ Handle<Name> name,
+ Handle<Object> value,
PropertyAttributes attributes,
StrictModeFlag strict_mode) {
// TODO(rossberg): Support symbols in the API.
if (name->IsSymbol()) return value;
- Isolate* isolate = GetIsolate();
- HandleScope scope(isolate);
- Handle<JSObject> this_handle(this);
- Handle<String> name_handle(String::cast(name));
- Handle<Object> value_handle(value, isolate);
- Handle<InterceptorInfo> interceptor(GetNamedInterceptor());
+ Isolate* isolate = object->GetIsolate();
+ Handle<String> name_string = Handle<String>::cast(name);
+ Handle<InterceptorInfo> interceptor(object->GetNamedInterceptor());
if (!interceptor->setter()->IsUndefined()) {
- LOG(isolate, ApiNamedPropertyAccess("interceptor-named-set", this, name));
- PropertyCallbackArguments args(isolate, interceptor->data(), this, this);
+ LOG(isolate,
+ ApiNamedPropertyAccess("interceptor-named-set", *object, *name));
+ PropertyCallbackArguments args(
+ isolate, interceptor->data(), *object, *object);
v8::NamedPropertySetterCallback setter =
v8::ToCData<v8::NamedPropertySetterCallback>(interceptor->setter());
- Handle<Object> value_unhole(value->IsTheHole() ?
- isolate->heap()->undefined_value() :
- value,
- isolate);
+ Handle<Object> value_unhole = value->IsTheHole()
+ ? Handle<Object>(isolate->factory()->undefined_value()) : value;
v8::Handle<v8::Value> result = args.Call(setter,
- v8::Utils::ToLocal(name_handle),
+ v8::Utils::ToLocal(name_string),
v8::Utils::ToLocal(value_unhole));
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- if (!result.IsEmpty()) return *value_handle;
+ RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
+ if (!result.IsEmpty()) return value;
}
- MaybeObject* raw_result =
- this_handle->SetPropertyPostInterceptor(*name_handle,
- *value_handle,
- attributes,
- strict_mode);
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- return raw_result;
+ Handle<Object> result =
+ SetPropertyPostInterceptor(object, name, value, attributes, strict_mode);
+ RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
+ return result;
}
Handle<Object> JSReceiver::SetProperty(Handle<JSReceiver> object,
- Handle<Name> key,
+ Handle<Name> name,
Handle<Object> value,
PropertyAttributes attributes,
- StrictModeFlag strict_mode) {
- CALL_HEAP_FUNCTION(object->GetIsolate(),
- object->SetProperty(*key, *value, attributes, strict_mode),
- Object);
-}
-
-
-MaybeObject* JSReceiver::SetPropertyOrFail(
- Handle<JSReceiver> object,
- Handle<Name> key,
- Handle<Object> value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode,
- JSReceiver::StoreFromKeyed store_mode) {
- CALL_HEAP_FUNCTION_PASS_EXCEPTION(
- object->GetIsolate(),
- object->SetProperty(*key, *value, attributes, strict_mode, store_mode));
-}
-
-
-MaybeObject* JSReceiver::SetProperty(Name* name,
- Object* value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode,
- JSReceiver::StoreFromKeyed store_mode) {
- LookupResult result(GetIsolate());
- LocalLookup(name, &result, true);
+ StrictModeFlag strict_mode,
+ StoreFromKeyed store_mode) {
+ LookupResult result(object->GetIsolate());
+ object->LocalLookup(*name, &result, true);
if (!result.IsFound()) {
- map()->LookupTransition(JSObject::cast(this), name, &result);
+ object->map()->LookupTransition(JSObject::cast(*object), *name, &result);
}
- return SetProperty(&result, name, value, attributes, strict_mode, store_mode);
+ return SetProperty(object, &result, name, value, attributes, strict_mode,
+ store_mode);
}
-MaybeObject* JSObject::SetPropertyWithCallback(Object* structure,
- Name* name,
- Object* value,
- JSObject* holder,
- StrictModeFlag strict_mode) {
- Isolate* isolate = GetIsolate();
- HandleScope scope(isolate);
+Handle<Object> JSObject::SetPropertyWithCallback(Handle<JSObject> object,
+ Handle<Object> structure,
+ Handle<Name> name,
+ Handle<Object> value,
+ Handle<JSObject> holder,
+ StrictModeFlag strict_mode) {
+ Isolate* isolate = object->GetIsolate();
// We should never get here to initialize a const with the hole
// value since a const declaration would conflict with the setter.
ASSERT(!value->IsTheHole());
- Handle<Object> value_handle(value, isolate);
// To accommodate both the old and the new api we switch on the
// data structure used to store the callbacks. Eventually foreign
@@ -2798,26 +2771,27 @@
if (structure->IsForeign()) {
AccessorDescriptor* callback =
reinterpret_cast<AccessorDescriptor*>(
- Foreign::cast(structure)->foreign_address());
- MaybeObject* obj = (callback->setter)(
- isolate, this, value, callback->data);
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- if (obj->IsFailure()) return obj;
- return *value_handle;
+ Handle<Foreign>::cast(structure)->foreign_address());
+ CALL_AND_RETRY_OR_DIE(isolate,
+ (callback->setter)(
+ isolate, *object, *value, callback->data),
+ break,
+ return Handle<Object>());
+ RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
+ return value;
}
if (structure->IsExecutableAccessorInfo()) {
// api style callbacks
- ExecutableAccessorInfo* data = ExecutableAccessorInfo::cast(structure);
- if (!data->IsCompatibleReceiver(this)) {
- Handle<Object> name_handle(name, isolate);
- Handle<Object> receiver_handle(this, isolate);
- Handle<Object> args[2] = { name_handle, receiver_handle };
+ ExecutableAccessorInfo* data = ExecutableAccessorInfo::cast(*structure);
+ if (!data->IsCompatibleReceiver(*object)) {
+ Handle<Object> args[2] = { name, object };
Handle<Object> error =
isolate->factory()->NewTypeError("incompatible_method_receiver",
HandleVector(args,
ARRAY_SIZE(args)));
- return isolate->Throw(*error);
+ isolate->Throw(*error);
+ return Handle<Object>();
}
// TODO(rossberg): Support symbols in the API.
if (name->IsSymbol()) return value;
@@ -2825,32 +2799,33 @@
v8::AccessorSetterCallback call_fun =
v8::ToCData<v8::AccessorSetterCallback>(call_obj);
if (call_fun == NULL) return value;
- Handle<String> key(String::cast(name));
- LOG(isolate, ApiNamedPropertyAccess("store", this, name));
+ Handle<String> key = Handle<String>::cast(name);
+ LOG(isolate, ApiNamedPropertyAccess("store", *object, *name));
PropertyCallbackArguments args(
- isolate, data->data(), this, JSObject::cast(holder));
+ isolate, data->data(), *object, JSObject::cast(*holder));
args.Call(call_fun,
v8::Utils::ToLocal(key),
- v8::Utils::ToLocal(value_handle));
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- return *value_handle;
+ v8::Utils::ToLocal(value));
+ RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
+ return value;
}
if (structure->IsAccessorPair()) {
- Object* setter = AccessorPair::cast(structure)->setter();
+ Handle<Object> setter(AccessorPair::cast(*structure)->setter(), isolate);
if (setter->IsSpecFunction()) {
// TODO(rossberg): nicer would be to cast to some JSCallable here...
- return SetPropertyWithDefinedSetter(JSReceiver::cast(setter), value);
+ return SetPropertyWithDefinedSetter(
+ object, Handle<JSReceiver>::cast(setter), value);
} else {
if (strict_mode == kNonStrictMode) {
return value;
}
- Handle<Name> key(name);
- Handle<Object> holder_handle(holder, isolate);
- Handle<Object> args[2] = { key, holder_handle };
- return isolate->Throw(
- *isolate->factory()->NewTypeError("no_setter_in_callback",
- HandleVector(args, 2)));
+ Handle<Object> args[2] = { name, holder };
+ Handle<Object> error =
+ isolate->factory()->NewTypeError("no_setter_in_callback",
+ HandleVector(args, 2));
+ isolate->Throw(*error);
+ return Handle<Object>();
}
}
@@ -2860,32 +2835,33 @@
}
UNREACHABLE();
- return NULL;
+ return Handle<Object>();
}
-MaybeObject* JSReceiver::SetPropertyWithDefinedSetter(JSReceiver* setter,
- Object* value) {
- Isolate* isolate = GetIsolate();
- Handle<Object> value_handle(value, isolate);
- Handle<JSReceiver> fun(setter, isolate);
- Handle<JSReceiver> self(this, isolate);
+Handle<Object> JSReceiver::SetPropertyWithDefinedSetter(
+ Handle<JSReceiver> object,
+ Handle<JSReceiver> setter,
+ Handle<Object> value) {
+ Isolate* isolate = object->GetIsolate();
+
#ifdef ENABLE_DEBUGGER_SUPPORT
Debug* debug = isolate->debug();
// Handle stepping into a setter if step into is active.
// TODO(rossberg): should this apply to getters that are function proxies?
- if (debug->StepInActive() && fun->IsJSFunction()) {
+ if (debug->StepInActive() && setter->IsJSFunction()) {
debug->HandleStepIn(
- Handle<JSFunction>::cast(fun), Handle<Object>::null(), 0, false);
+ Handle<JSFunction>::cast(setter), Handle<Object>::null(), 0, false);
}
#endif
+
bool has_pending_exception;
- Handle<Object> argv[] = { value_handle };
+ Handle<Object> argv[] = { value };
Execution::Call(
- isolate, fun, self, ARRAY_SIZE(argv), argv, &has_pending_exception);
+ isolate, setter, object, ARRAY_SIZE(argv), argv, &has_pending_exception);
// Check for pending exception and return the result.
- if (has_pending_exception) return Failure::Exception();
- return *value_handle;
+ if (has_pending_exception) return Handle<Object>();
+ return value;
}
@@ -2899,14 +2875,16 @@
pt != heap->null_value();
pt = pt->GetPrototype(GetIsolate())) {
if (pt->IsJSProxy()) {
- String* name;
- MaybeObject* maybe = heap->Uint32ToString(index);
- if (!maybe->To<String>(&name)) {
- *found = true; // Force abort
- return maybe;
- }
- return JSProxy::cast(pt)->SetPropertyViaPrototypesWithHandler(
- this, name, value, NONE, strict_mode, found);
+ Isolate* isolate = GetIsolate();
+ HandleScope scope(isolate);
+ Handle<JSProxy> proxy(JSProxy::cast(pt));
+ Handle<JSObject> self(this, isolate);
+ Handle<String> name = isolate->factory()->Uint32ToString(index);
+ Handle<Object> value_handle(value, isolate);
+ Handle<Object> result = JSProxy::SetPropertyViaPrototypesWithHandler(
+ proxy, self, name, value_handle, NONE, strict_mode, found);
+ RETURN_IF_EMPTY_HANDLE(isolate, result);
+ return *result;
}
if (!JSObject::cast(pt)->HasDictionaryElements()) {
continue;
@@ -2918,11 +2896,16 @@
PropertyDetails details = dictionary->DetailsAt(entry);
if (details.type() == CALLBACKS) {
*found = true;
- return SetElementWithCallback(dictionary->ValueAt(entry),
- index,
- value,
- JSObject::cast(pt),
- strict_mode);
+ Isolate* isolate = GetIsolate();
+ HandleScope scope(isolate);
+ Handle<JSObject> self(this, isolate);
+ Handle<Object> structure(dictionary->ValueAt(entry), isolate);
+ Handle<Object> value_handle(value, isolate);
+ Handle<JSObject> holder(JSObject::cast(pt));
+ Handle<Object> result = SetElementWithCallback(
+ self, structure, index, value_handle, holder, strict_mode);
+ RETURN_IF_EMPTY_HANDLE(isolate, result);
+ return *result;
}
}
}
@@ -2930,21 +2913,21 @@
return heap->the_hole_value();
}
-MaybeObject* JSObject::SetPropertyViaPrototypes(
- Name* name,
- Object* value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode,
- bool* done) {
- Heap* heap = GetHeap();
- Isolate* isolate = heap->isolate();
+
+Handle<Object> JSObject::SetPropertyViaPrototypes(Handle<JSObject> object,
+ Handle<Name> name,
+ Handle<Object> value,
+ PropertyAttributes attributes,
+ StrictModeFlag strict_mode,
+ bool* done) {
+ Isolate* isolate = object->GetIsolate();
*done = false;
// We could not find a local property so let's check whether there is an
// accessor that wants to handle the property, or whether the property is
// read-only on the prototype chain.
LookupResult result(isolate);
- LookupRealNamedPropertyInPrototypes(name, &result);
+ object->LookupRealNamedPropertyInPrototypes(*name, &result);
if (result.IsFound()) {
switch (result.type()) {
case NORMAL:
@@ -2955,19 +2938,21 @@
case INTERCEPTOR: {
PropertyAttributes attr =
result.holder()->GetPropertyAttributeWithInterceptor(
- this, name, true);
+ *object, *name, true);
*done = !!(attr & READ_ONLY);
break;
}
case CALLBACKS: {
if (!FLAG_es5_readonly && result.IsReadOnly()) break;
*done = true;
- return SetPropertyWithCallback(result.GetCallbackObject(),
- name, value, result.holder(), strict_mode);
+ Handle<Object> callback_object(result.GetCallbackObject(), isolate);
+ return SetPropertyWithCallback(object, callback_object, name, value,
+ handle(result.holder()), strict_mode);
}
case HANDLER: {
- return result.proxy()->SetPropertyViaPrototypesWithHandler(
- this, name, value, attributes, strict_mode, done);
+ Handle<JSProxy> proxy(result.proxy());
+ return JSProxy::SetPropertyViaPrototypesWithHandler(
+ proxy, object, name, value, attributes, strict_mode, done);
}
case TRANSITION:
case NONEXISTENT:
@@ -2980,12 +2965,13 @@
if (!FLAG_es5_readonly) *done = false;
if (*done) {
if (strict_mode == kNonStrictMode) return value;
- Handle<Object> args[] = { Handle<Object>(name, isolate),
- Handle<Object>(this, isolate)};
- return isolate->Throw(*isolate->factory()->NewTypeError(
- "strict_read_only_property", HandleVector(args, ARRAY_SIZE(args))));
+ Handle<Object> args[] = { name, object };
+ Handle<Object> error = isolate->factory()->NewTypeError(
+ "strict_read_only_property", HandleVector(args, ARRAY_SIZE(args)));
+ isolate->Throw(*error);
+ return Handle<Object>();
}
- return heap->the_hole_value();
+ return isolate->factory()->the_hole_value();
}
@@ -3340,14 +3326,15 @@
// We only need to deal with CALLBACKS and INTERCEPTORS
-MaybeObject* JSObject::SetPropertyWithFailedAccessCheck(
+Handle<Object> JSObject::SetPropertyWithFailedAccessCheck(
+ Handle<JSObject> object,
LookupResult* result,
- Name* name,
- Object* value,
+ Handle<Name> name,
+ Handle<Object> value,
bool check_prototype,
StrictModeFlag strict_mode) {
if (check_prototype && !result->IsProperty()) {
- LookupRealNamedPropertyInPrototypes(name, result);
+ object->LookupRealNamedPropertyInPrototypes(*name, result);
}
if (result->IsProperty()) {
@@ -3356,21 +3343,23 @@
case CALLBACKS: {
Object* obj = result->GetCallbackObject();
if (obj->IsAccessorInfo()) {
- AccessorInfo* info = AccessorInfo::cast(obj);
+ Handle<AccessorInfo> info(AccessorInfo::cast(obj));
if (info->all_can_write()) {
- return SetPropertyWithCallback(result->GetCallbackObject(),
+ return SetPropertyWithCallback(object,
+ info,
name,
value,
- result->holder(),
+ handle(result->holder()),
strict_mode);
}
} else if (obj->IsAccessorPair()) {
- AccessorPair* pair = AccessorPair::cast(obj);
+ Handle<AccessorPair> pair(AccessorPair::cast(obj));
if (pair->all_can_read()) {
- return SetPropertyWithCallback(result->GetCallbackObject(),
+ return SetPropertyWithCallback(object,
+ pair,
name,
value,
- result->holder(),
+ handle(result->holder()),
strict_mode);
}
}
@@ -3379,10 +3368,11 @@
case INTERCEPTOR: {
// Try lookup real named properties. Note that only property can be
// set is callbacks marked as ALL_CAN_WRITE on the prototype chain.
- LookupResult r(GetIsolate());
- LookupRealNamedProperty(name, &r);
+ LookupResult r(object->GetIsolate());
+ object->LookupRealNamedProperty(*name, &r);
if (r.IsProperty()) {
- return SetPropertyWithFailedAccessCheck(&r,
+ return SetPropertyWithFailedAccessCheck(object,
+ &r,
name,
value,
check_prototype,
@@ -3397,42 +3387,38 @@
}
}
- Isolate* isolate = GetIsolate();
- HandleScope scope(isolate);
- Handle<Object> value_handle(value, isolate);
- isolate->ReportFailedAccessCheck(this, v8::ACCESS_SET);
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- return *value_handle;
+ Isolate* isolate = object->GetIsolate();
+ isolate->ReportFailedAccessCheck(*object, v8::ACCESS_SET);
+ RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
+ return value;
}
-MaybeObject* JSReceiver::SetProperty(LookupResult* result,
- Name* key,
- Object* value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode,
- JSReceiver::StoreFromKeyed store_mode) {
+Handle<Object> JSReceiver::SetProperty(Handle<JSReceiver> object,
+ LookupResult* result,
+ Handle<Name> key,
+ Handle<Object> value,
+ PropertyAttributes attributes,
+ StrictModeFlag strict_mode,
+ StoreFromKeyed store_mode) {
if (result->IsHandler()) {
- return result->proxy()->SetPropertyWithHandler(
- this, key, value, attributes, strict_mode);
+ return JSProxy::SetPropertyWithHandler(handle(result->proxy()),
+ object, key, value, attributes, strict_mode);
} else {
- return JSObject::cast(this)->SetPropertyForResult(
+ return JSObject::SetPropertyForResult(Handle<JSObject>::cast(object),
result, key, value, attributes, strict_mode, store_mode);
}
}
-bool JSProxy::HasPropertyWithHandler(Name* name_raw) {
- Isolate* isolate = GetIsolate();
- HandleScope scope(isolate);
- Handle<Object> receiver(this, isolate);
- Handle<Object> name(name_raw, isolate);
+bool JSProxy::HasPropertyWithHandler(Handle<JSProxy> proxy, Handle<Name> name) {
+ Isolate* isolate = proxy->GetIsolate();
// TODO(rossberg): adjust once there is a story for symbols vs proxies.
if (name->IsSymbol()) return false;
Handle<Object> args[] = { name };
- Handle<Object> result = CallTrap(
+ Handle<Object> result = proxy->CallTrap(
"has", isolate->derived_has_trap(), ARRAY_SIZE(args), args);
if (isolate->has_pending_exception()) return false;
@@ -3440,58 +3426,51 @@
}
-MUST_USE_RESULT MaybeObject* JSProxy::SetPropertyWithHandler(
- JSReceiver* receiver_raw,
- Name* name_raw,
- Object* value_raw,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode) {
- Isolate* isolate = GetIsolate();
- HandleScope scope(isolate);
- Handle<JSReceiver> receiver(receiver_raw);
- Handle<Object> name(name_raw, isolate);
- Handle<Object> value(value_raw, isolate);
+Handle<Object> JSProxy::SetPropertyWithHandler(Handle<JSProxy> proxy,
+ Handle<JSReceiver> receiver,
+ Handle<Name> name,
+ Handle<Object> value,
+ PropertyAttributes attributes,
+ StrictModeFlag strict_mode) {
+ Isolate* isolate = proxy->GetIsolate();
// TODO(rossberg): adjust once there is a story for symbols vs proxies.
- if (name->IsSymbol()) return *value;
+ if (name->IsSymbol()) return value;
Handle<Object> args[] = { receiver, name, value };
- CallTrap("set", isolate->derived_set_trap(), ARRAY_SIZE(args), args);
- if (isolate->has_pending_exception()) return Failure::Exception();
+ proxy->CallTrap("set", isolate->derived_set_trap(), ARRAY_SIZE(args), args);
+ if (isolate->has_pending_exception()) return Handle<Object>();
- return *value;
+ return value;
}
-MUST_USE_RESULT MaybeObject* JSProxy::SetPropertyViaPrototypesWithHandler(
- JSReceiver* receiver_raw,
- Name* name_raw,
- Object* value_raw,
+Handle<Object> JSProxy::SetPropertyViaPrototypesWithHandler(
+ Handle<JSProxy> proxy,
+ Handle<JSReceiver> receiver,
+ Handle<Name> name,
+ Handle<Object> value,
PropertyAttributes attributes,
StrictModeFlag strict_mode,
bool* done) {
- Isolate* isolate = GetIsolate();
- Handle<JSProxy> proxy(this);
- Handle<JSReceiver> receiver(receiver_raw);
- Handle<Name> name(name_raw);
- Handle<Object> value(value_raw, isolate);
- Handle<Object> handler(this->handler(), isolate); // Trap might morph proxy.
+ Isolate* isolate = proxy->GetIsolate();
+ Handle<Object> handler(proxy->handler(), isolate); // Trap might morph proxy.
// TODO(rossberg): adjust once there is a story for symbols vs proxies.
if (name->IsSymbol()) {
*done = false;
- return isolate->heap()->the_hole_value();
+ return isolate->factory()->the_hole_value();
}
*done = true; // except where redefined...
Handle<Object> args[] = { name };
Handle<Object> result = proxy->CallTrap(
"getPropertyDescriptor", Handle<Object>(), ARRAY_SIZE(args), args);
- if (isolate->has_pending_exception()) return Failure::Exception();
+ if (isolate->has_pending_exception()) return Handle<Object>();
if (result->IsUndefined()) {
*done = false;
- return isolate->heap()->the_hole_value();
+ return isolate->factory()->the_hole_value();
}
// Emulate [[GetProperty]] semantics for proxies.
@@ -3500,7 +3479,7 @@
Handle<Object> desc = Execution::Call(
isolate, isolate->to_complete_property_descriptor(), result,
ARRAY_SIZE(argv), argv, &has_pending_exception);
- if (has_pending_exception) return Failure::Exception();
+ if (has_pending_exception) return Handle<Object>();
// [[GetProperty]] requires to check that all properties are configurable.
Handle<String> configurable_name =
@@ -3517,7 +3496,8 @@
Handle<Object> args[] = { handler, trap, name };
Handle<Object> error = isolate->factory()->NewTypeError(
"proxy_prop_not_configurable", HandleVector(args, ARRAY_SIZE(args)));
- return isolate->Throw(*error);
+ isolate->Throw(*error);
+ return Handle<Object>();
}
ASSERT(configurable->IsTrue());
@@ -3538,12 +3518,13 @@
ASSERT(!isolate->has_pending_exception());
ASSERT(writable->IsTrue() || writable->IsFalse());
*done = writable->IsFalse();
- if (!*done) return GetHeap()->the_hole_value();
- if (strict_mode == kNonStrictMode) return *value;
+ if (!*done) return isolate->factory()->the_hole_value();
+ if (strict_mode == kNonStrictMode) return value;
Handle<Object> args[] = { name, receiver };
Handle<Object> error = isolate->factory()->NewTypeError(
"strict_read_only_property", HandleVector(args, ARRAY_SIZE(args)));
- return isolate->Throw(*error);
+ isolate->Throw(*error);
+ return Handle<Object>();
}
// We have an AccessorDescriptor.
@@ -3553,15 +3534,16 @@
ASSERT(!isolate->has_pending_exception());
if (!setter->IsUndefined()) {
// TODO(rossberg): nicer would be to cast to some JSCallable here...
- return receiver->SetPropertyWithDefinedSetter(
- JSReceiver::cast(*setter), *value);
+ return SetPropertyWithDefinedSetter(
+ receiver, Handle<JSReceiver>::cast(setter), value);
}
- if (strict_mode == kNonStrictMode) return *value;
+ if (strict_mode == kNonStrictMode) return value;
Handle<Object> args2[] = { name, proxy };
Handle<Object> error = isolate->factory()->NewTypeError(
"no_setter_in_callback", HandleVector(args2, ARRAY_SIZE(args2)));
- return isolate->Throw(*error);
+ isolate->Throw(*error);
+ return Handle<Object>();
}
@@ -3726,44 +3708,68 @@
}
+// TODO(mstarzinger): Temporary wrapper until handlified.
+static Handle<Map> MapAsElementsKind(Handle<Map> map, ElementsKind kind) {
+ CALL_HEAP_FUNCTION(map->GetIsolate(), map->AsElementsKind(kind), Map);
+}
+
+
void JSObject::AllocateStorageForMap(Handle<JSObject> object, Handle<Map> map) {
- CALL_HEAP_FUNCTION_VOID(
- object->GetIsolate(),
- object->AllocateStorageForMap(*map));
+ ASSERT(object->map()->inobject_properties() == map->inobject_properties());
+ ElementsKind obj_kind = object->map()->elements_kind();
+ ElementsKind map_kind = map->elements_kind();
+ if (map_kind != obj_kind) {
+ ElementsKind to_kind = map_kind;
+ if (IsMoreGeneralElementsKindTransition(map_kind, obj_kind) ||
+ IsDictionaryElementsKind(obj_kind)) {
+ to_kind = obj_kind;
+ }
+ if (IsDictionaryElementsKind(to_kind)) {
+ NormalizeElements(object);
+ } else {
+ TransitionElementsKind(object, to_kind);
+ }
+ map = MapAsElementsKind(map, to_kind);
+ }
+ int total_size =
+ map->NumberOfOwnDescriptors() + map->unused_property_fields();
+ int out_of_object = total_size - map->inobject_properties();
+ if (out_of_object != object->properties()->length()) {
+ Isolate* isolate = object->GetIsolate();
+ Handle<FixedArray> new_properties = isolate->factory()->CopySizeFixedArray(
+ handle(object->properties()), out_of_object);
+ object->set_properties(*new_properties);
+ }
+ object->set_map(*map);
}
void JSObject::MigrateInstance(Handle<JSObject> object) {
- CALL_HEAP_FUNCTION_VOID(
- object->GetIsolate(),
- object->MigrateInstance());
+ // Converting any field to the most specific type will cause the
+ // GeneralizeFieldRepresentation algorithm to create the most general existing
+ // transition that matches the object. This achieves what is needed.
+ Handle<Map> original_map(object->map());
+ GeneralizeFieldRepresentation(
+ object, 0, Representation::None(), ALLOW_AS_CONSTANT);
+ if (FLAG_trace_migration) {
+ object->PrintInstanceMigration(stdout, *original_map, object->map());
+ }
}
Handle<Object> JSObject::TryMigrateInstance(Handle<JSObject> object) {
- CALL_HEAP_FUNCTION(
- object->GetIsolate(),
- object->MigrateInstance(),
- Object);
+ MigrateInstance(object);
+ return object;
}
-Handle<Map> Map::GeneralizeRepresentation(Handle<Map> map,
- int modify_index,
- Representation representation,
- StoreMode store_mode) {
- CALL_HEAP_FUNCTION(
- map->GetIsolate(),
- map->GeneralizeRepresentation(modify_index, representation, store_mode),
- Map);
-}
-
-
-static MaybeObject* SetPropertyUsingTransition(LookupResult* lookup,
- Handle<Name> name,
- Handle<Object> value,
- PropertyAttributes attributes) {
- Map* transition_map = lookup->GetTransitionTarget();
+Handle<Object> JSObject::SetPropertyUsingTransition(
+ Handle<JSObject> object,
+ LookupResult* lookup,
+ Handle<Name> name,
+ Handle<Object> value,
+ PropertyAttributes attributes) {
+ Handle<Map> transition_map(lookup->GetTransitionTarget());
int descriptor = transition_map->LastAdded();
DescriptorArray* descriptors = transition_map->instance_descriptors();
@@ -3773,8 +3779,8 @@
// AddProperty will either normalize the object, or create a new fast copy
// of the map. If we get a fast copy of the map, all field representations
// will be tagged since the transition is omitted.
- return lookup->holder()->AddProperty(
- *name, *value, attributes, kNonStrictMode,
+ return JSObject::AddProperty(
+ object, name, value, attributes, kNonStrictMode,
JSReceiver::CERTAINLY_NOT_STORE_FROM_KEYED,
JSReceiver::OMIT_EXTENSIBILITY_CHECK,
JSObject::FORCE_TAGGED, FORCE_FIELD, OMIT_TRANSITION);
@@ -3785,45 +3791,41 @@
// (value->IsUninitialized) as constant.
if (details.type() == CONSTANT &&
descriptors->GetValue(descriptor) == *value) {
- lookup->holder()->set_map(transition_map);
- return *value;
+ object->set_map(*transition_map);
+ return value;
}
Representation representation = details.representation();
if (!value->FitsRepresentation(representation) ||
details.type() == CONSTANT) {
- MaybeObject* maybe_map = transition_map->GeneralizeRepresentation(
+ transition_map = Map::GeneralizeRepresentation(transition_map,
descriptor, value->OptimalRepresentation(), FORCE_FIELD);
- if (!maybe_map->To(&transition_map)) return maybe_map;
Object* back = transition_map->GetBackPointer();
if (back->IsMap()) {
- MaybeObject* maybe_failure =
- lookup->holder()->MigrateToMap(Map::cast(back));
- if (maybe_failure->IsFailure()) return maybe_failure;
+ MigrateToMap(object, handle(Map::cast(back)));
}
descriptors = transition_map->instance_descriptors();
representation = descriptors->GetDetails(descriptor).representation();
}
int field_index = descriptors->GetFieldIndex(descriptor);
- return lookup->holder()->AddFastPropertyUsingMap(
- transition_map, *name, *value, field_index, representation);
+ AddFastPropertyUsingMap(
+ object, transition_map, name, value, field_index, representation);
+ return value;
}
-static MaybeObject* SetPropertyToField(LookupResult* lookup,
- Handle<Name> name,
- Handle<Object> value) {
+static void SetPropertyToField(LookupResult* lookup,
+ Handle<Name> name,
+ Handle<Object> value) {
Representation representation = lookup->representation();
if (!value->FitsRepresentation(representation) ||
lookup->type() == CONSTANT) {
- MaybeObject* maybe_failure =
- lookup->holder()->GeneralizeFieldRepresentation(
- lookup->GetDescriptorIndex(),
- value->OptimalRepresentation(),
- FORCE_FIELD);
- if (maybe_failure->IsFailure()) return maybe_failure;
+ JSObject::GeneralizeFieldRepresentation(handle(lookup->holder()),
+ lookup->GetDescriptorIndex(),
+ value->OptimalRepresentation(),
+ FORCE_FIELD);
DescriptorArray* desc = lookup->holder()->map()->instance_descriptors();
int descriptor = lookup->GetDescriptorIndex();
representation = desc->GetDetails(descriptor).representation();
@@ -3833,199 +3835,180 @@
HeapNumber* storage = HeapNumber::cast(lookup->holder()->RawFastPropertyAt(
lookup->GetFieldIndex().field_index()));
storage->set_value(value->Number());
- return *value;
+ return;
}
lookup->holder()->FastPropertyAtPut(
lookup->GetFieldIndex().field_index(), *value);
- return *value;
}
-static MaybeObject* ConvertAndSetLocalProperty(LookupResult* lookup,
- Name* name,
- Object* value,
- PropertyAttributes attributes) {
- JSObject* object = lookup->holder();
+static void ConvertAndSetLocalProperty(LookupResult* lookup,
+ Handle<Name> name,
+ Handle<Object> value,
+ PropertyAttributes attributes) {
+ Handle<JSObject> object(lookup->holder());
if (object->TooManyFastProperties()) {
- MaybeObject* maybe_failure = object->NormalizeProperties(
- CLEAR_INOBJECT_PROPERTIES, 0);
- if (maybe_failure->IsFailure()) return maybe_failure;
+ JSObject::NormalizeProperties(object, CLEAR_INOBJECT_PROPERTIES, 0);
}
if (!object->HasFastProperties()) {
- return object->ReplaceSlowProperty(name, value, attributes);
+ ReplaceSlowProperty(object, name, value, attributes);
+ return;
}
int descriptor_index = lookup->GetDescriptorIndex();
if (lookup->GetAttributes() == attributes) {
- MaybeObject* maybe_failure = object->GeneralizeFieldRepresentation(
- descriptor_index, Representation::Tagged(), FORCE_FIELD);
- if (maybe_failure->IsFailure()) return maybe_failure;
+ JSObject::GeneralizeFieldRepresentation(
+ object, descriptor_index, Representation::Tagged(), FORCE_FIELD);
} else {
- Map* map;
- MaybeObject* maybe_map = object->map()->CopyGeneralizeAllRepresentations(
+ Handle<Map> old_map(object->map());
+ Handle<Map> new_map = Map::CopyGeneralizeAllRepresentations(old_map,
descriptor_index, FORCE_FIELD, attributes, "attributes mismatch");
- if (!maybe_map->To(&map)) return maybe_map;
- MaybeObject* maybe_failure = object->MigrateToMap(map);
- if (maybe_failure->IsFailure()) return maybe_failure;
+ JSObject::MigrateToMap(object, new_map);
}
DescriptorArray* descriptors = object->map()->instance_descriptors();
int index = descriptors->GetDetails(descriptor_index).field_index();
- object->FastPropertyAtPut(index, value);
- return value;
+ object->FastPropertyAtPut(index, *value);
}
-static MaybeObject* SetPropertyToFieldWithAttributes(
- LookupResult* lookup,
- Handle<Name> name,
- Handle<Object> value,
- PropertyAttributes attributes) {
+static void SetPropertyToFieldWithAttributes(LookupResult* lookup,
+ Handle<Name> name,
+ Handle<Object> value,
+ PropertyAttributes attributes) {
if (lookup->GetAttributes() == attributes) {
- if (value->IsUninitialized()) return *value;
- return SetPropertyToField(lookup, name, value);
+ if (value->IsUninitialized()) return;
+ SetPropertyToField(lookup, name, value);
} else {
- return ConvertAndSetLocalProperty(lookup, *name, *value, attributes);
+ ConvertAndSetLocalProperty(lookup, name, value, attributes);
}
}
-MaybeObject* JSObject::SetPropertyForResult(LookupResult* lookup,
- Name* name_raw,
- Object* value_raw,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode,
- StoreFromKeyed store_mode) {
- Heap* heap = GetHeap();
- Isolate* isolate = heap->isolate();
+Handle<Object> JSObject::SetPropertyForResult(Handle<JSObject> object,
+ LookupResult* lookup,
+ Handle<Name> name,
+ Handle<Object> value,
+ PropertyAttributes attributes,
+ StrictModeFlag strict_mode,
+ StoreFromKeyed store_mode) {
+ Isolate* isolate = object->GetIsolate();
// Make sure that the top context does not change when doing callbacks or
// interceptor calls.
- AssertNoContextChangeWithHandleScope ncc;
+ AssertNoContextChange ncc;
// Optimization for 2-byte strings often used as keys in a decompression
// dictionary. We internalize these short keys to avoid constantly
// reallocating them.
- if (name_raw->IsString() && !name_raw->IsInternalizedString() &&
- String::cast(name_raw)->length() <= 2) {
- Object* internalized_version;
- { MaybeObject* maybe_string_version =
- heap->InternalizeString(String::cast(name_raw));
- if (maybe_string_version->ToObject(&internalized_version)) {
- name_raw = String::cast(internalized_version);
- }
- }
+ if (name->IsString() && !name->IsInternalizedString() &&
+ Handle<String>::cast(name)->length() <= 2) {
+ name = isolate->factory()->InternalizeString(Handle<String>::cast(name));
}
// Check access rights if needed.
- if (IsAccessCheckNeeded()) {
- if (!isolate->MayNamedAccess(this, name_raw, v8::ACCESS_SET)) {
- return SetPropertyWithFailedAccessCheck(
- lookup, name_raw, value_raw, true, strict_mode);
+ if (object->IsAccessCheckNeeded()) {
+ if (!isolate->MayNamedAccess(*object, *name, v8::ACCESS_SET)) {
+ return SetPropertyWithFailedAccessCheck(object, lookup, name, value,
+ true, strict_mode);
}
}
- if (IsJSGlobalProxy()) {
- Object* proto = GetPrototype();
- if (proto->IsNull()) return value_raw;
+ if (object->IsJSGlobalProxy()) {
+ Handle<Object> proto(object->GetPrototype(), isolate);
+ if (proto->IsNull()) return value;
ASSERT(proto->IsJSGlobalObject());
- return JSObject::cast(proto)->SetPropertyForResult(
- lookup, name_raw, value_raw, attributes, strict_mode, store_mode);
+ return SetPropertyForResult(Handle<JSObject>::cast(proto),
+ lookup, name, value, attributes, strict_mode, store_mode);
}
- ASSERT(!lookup->IsFound() || lookup->holder() == this ||
+ ASSERT(!lookup->IsFound() || lookup->holder() == *object ||
lookup->holder()->map()->is_hidden_prototype());
- // From this point on everything needs to be handlified, because
- // SetPropertyViaPrototypes might call back into JavaScript.
- HandleScope scope(isolate);
- Handle<JSObject> self(this);
- Handle<Name> name(name_raw);
- Handle<Object> value(value_raw, isolate);
-
- if (!lookup->IsProperty() && !self->IsJSContextExtensionObject()) {
+ if (!lookup->IsProperty() && !object->IsJSContextExtensionObject()) {
bool done = false;
- MaybeObject* result_object = self->SetPropertyViaPrototypes(
- *name, *value, attributes, strict_mode, &done);
+ Handle<Object> result_object = SetPropertyViaPrototypes(
+ object, name, value, attributes, strict_mode, &done);
if (done) return result_object;
}
if (!lookup->IsFound()) {
// Neither properties nor transitions found.
- return self->AddProperty(
- *name, *value, attributes, strict_mode, store_mode);
+ return AddProperty(
+ object, name, value, attributes, strict_mode, store_mode);
}
if (lookup->IsProperty() && lookup->IsReadOnly()) {
if (strict_mode == kStrictMode) {
- Handle<Object> args[] = { name, self };
- return isolate->Throw(*isolate->factory()->NewTypeError(
- "strict_read_only_property", HandleVector(args, ARRAY_SIZE(args))));
+ Handle<Object> args[] = { name, object };
+ Handle<Object> error = isolate->factory()->NewTypeError(
+ "strict_read_only_property", HandleVector(args, ARRAY_SIZE(args)));
+ isolate->Throw(*error);
+ return Handle<Object>();
} else {
- return *value;
+ return value;
}
}
- Handle<Object> old_value(heap->the_hole_value(), isolate);
+ Handle<Object> old_value = isolate->factory()->the_hole_value();
if (FLAG_harmony_observation &&
- map()->is_observed() && lookup->IsDataProperty()) {
- old_value = Object::GetProperty(self, name);
+ object->map()->is_observed() && lookup->IsDataProperty()) {
+ old_value = Object::GetProperty(object, name);
}
// This is a real property that is not read-only, or it is a
// transition or null descriptor and there are no setters in the prototypes.
- MaybeObject* result = *value;
+ Handle<Object> result = value;
switch (lookup->type()) {
case NORMAL:
- result = lookup->holder()->SetNormalizedProperty(lookup, *value);
+ SetNormalizedProperty(handle(lookup->holder()), lookup, value);
break;
case FIELD:
- result = SetPropertyToField(lookup, name, value);
+ SetPropertyToField(lookup, name, value);
break;
case CONSTANT:
// Only replace the constant if necessary.
- if (*value == lookup->GetConstant()) return *value;
- result = SetPropertyToField(lookup, name, value);
+ if (*value == lookup->GetConstant()) return value;
+ SetPropertyToField(lookup, name, value);
break;
case CALLBACKS: {
- Object* callback_object = lookup->GetCallbackObject();
- return self->SetPropertyWithCallback(
- callback_object, *name, *value, lookup->holder(), strict_mode);
+ Handle<Object> callback_object(lookup->GetCallbackObject(), isolate);
+ return SetPropertyWithCallback(object, callback_object, name, value,
+ handle(lookup->holder()), strict_mode);
}
case INTERCEPTOR:
- result = lookup->holder()->SetPropertyWithInterceptor(
- *name, *value, attributes, strict_mode);
+ result = SetPropertyWithInterceptor(handle(lookup->holder()), name, value,
+ attributes, strict_mode);
break;
- case TRANSITION: {
- result = SetPropertyUsingTransition(lookup, name, value, attributes);
+ case TRANSITION:
+ result = SetPropertyUsingTransition(handle(lookup->holder()), lookup,
+ name, value, attributes);
break;
- }
case HANDLER:
case NONEXISTENT:
UNREACHABLE();
}
- Handle<Object> hresult;
- if (!result->ToHandle(&hresult, isolate)) return result;
+ RETURN_IF_EMPTY_HANDLE_VALUE(isolate, result, Handle<Object>());
- if (FLAG_harmony_observation && self->map()->is_observed()) {
+ if (FLAG_harmony_observation && object->map()->is_observed()) {
if (lookup->IsTransition()) {
- EnqueueChangeRecord(self, "new", name, old_value);
+ EnqueueChangeRecord(object, "new", name, old_value);
} else {
LookupResult new_lookup(isolate);
- self->LocalLookup(*name, &new_lookup, true);
+ object->LocalLookup(*name, &new_lookup, true);
if (new_lookup.IsDataProperty()) {
- Handle<Object> new_value = Object::GetProperty(self, name);
+ Handle<Object> new_value = Object::GetProperty(object, name);
if (!new_value->SameValue(*old_value)) {
- EnqueueChangeRecord(self, "updated", name, old_value);
+ EnqueueChangeRecord(object, "updated", name, old_value);
}
}
}
}
- return *hresult;
+ return result;
}
@@ -4063,142 +4046,114 @@
// doesn't handle function prototypes correctly.
Handle<Object> JSObject::SetLocalPropertyIgnoreAttributes(
Handle<JSObject> object,
- Handle<Name> key,
+ Handle<Name> name,
Handle<Object> value,
PropertyAttributes attributes,
ValueType value_type,
StoreMode mode,
ExtensibilityCheck extensibility_check) {
- CALL_HEAP_FUNCTION(
- object->GetIsolate(),
- object->SetLocalPropertyIgnoreAttributes(
- *key, *value, attributes, value_type, mode, extensibility_check),
- Object);
-}
+ Isolate* isolate = object->GetIsolate();
-
-MaybeObject* JSObject::SetLocalPropertyIgnoreAttributes(
- Name* name_raw,
- Object* value_raw,
- PropertyAttributes attributes,
- ValueType value_type,
- StoreMode mode,
- ExtensibilityCheck extensibility_check) {
// Make sure that the top context does not change when doing callbacks or
// interceptor calls.
- AssertNoContextChangeWithHandleScope ncc;
- Isolate* isolate = GetIsolate();
+ AssertNoContextChange ncc;
+
LookupResult lookup(isolate);
- LocalLookup(name_raw, &lookup, true);
- if (!lookup.IsFound()) map()->LookupTransition(this, name_raw, &lookup);
+ object->LocalLookup(*name, &lookup, true);
+ if (!lookup.IsFound()) {
+ object->map()->LookupTransition(*object, *name, &lookup);
+ }
+
// Check access rights if needed.
- if (IsAccessCheckNeeded()) {
- if (!isolate->MayNamedAccess(this, name_raw, v8::ACCESS_SET)) {
- return SetPropertyWithFailedAccessCheck(&lookup,
- name_raw,
- value_raw,
- false,
- kNonStrictMode);
+ if (object->IsAccessCheckNeeded()) {
+ if (!isolate->MayNamedAccess(*object, *name, v8::ACCESS_SET)) {
+ return SetPropertyWithFailedAccessCheck(object, &lookup, name, value,
+ false, kNonStrictMode);
}
}
- if (IsJSGlobalProxy()) {
- Object* proto = GetPrototype();
- if (proto->IsNull()) return value_raw;
+ if (object->IsJSGlobalProxy()) {
+ Handle<Object> proto(object->GetPrototype(), isolate);
+ if (proto->IsNull()) return value;
ASSERT(proto->IsJSGlobalObject());
- return JSObject::cast(proto)->SetLocalPropertyIgnoreAttributes(
- name_raw,
- value_raw,
- attributes,
- value_type,
- mode,
- extensibility_check);
+ return SetLocalPropertyIgnoreAttributes(Handle<JSObject>::cast(proto),
+ name, value, attributes, value_type, mode, extensibility_check);
}
if (lookup.IsFound() &&
(lookup.type() == INTERCEPTOR || lookup.type() == CALLBACKS)) {
- LocalLookupRealNamedProperty(name_raw, &lookup);
+ object->LocalLookupRealNamedProperty(*name, &lookup);
}
// Check for accessor in prototype chain removed here in clone.
if (!lookup.IsFound()) {
// Neither properties nor transitions found.
- return AddProperty(
- name_raw, value_raw, attributes, kNonStrictMode,
+ return AddProperty(object, name, value, attributes, kNonStrictMode,
MAY_BE_STORE_FROM_KEYED, extensibility_check, value_type, mode);
}
- // From this point on everything needs to be handlified.
- HandleScope scope(isolate);
- Handle<JSObject> self(this);
- Handle<Name> name(name_raw);
- Handle<Object> value(value_raw, isolate);
-
- Handle<Object> old_value(isolate->heap()->the_hole_value(), isolate);
+ Handle<Object> old_value = isolate->factory()->the_hole_value();
PropertyAttributes old_attributes = ABSENT;
- bool is_observed = FLAG_harmony_observation && self->map()->is_observed();
+ bool is_observed = FLAG_harmony_observation && object->map()->is_observed();
if (is_observed && lookup.IsProperty()) {
if (lookup.IsDataProperty()) old_value =
- Object::GetProperty(self, name);
+ Object::GetProperty(object, name);
old_attributes = lookup.GetAttributes();
}
// Check of IsReadOnly removed from here in clone.
- MaybeObject* result = *value;
switch (lookup.type()) {
case NORMAL:
- result = self->ReplaceSlowProperty(*name, *value, attributes);
+ ReplaceSlowProperty(object, name, value, attributes);
break;
case FIELD:
- result = SetPropertyToFieldWithAttributes(
- &lookup, name, value, attributes);
+ SetPropertyToFieldWithAttributes(&lookup, name, value, attributes);
break;
case CONSTANT:
// Only replace the constant if necessary.
if (lookup.GetAttributes() != attributes ||
*value != lookup.GetConstant()) {
- result = SetPropertyToFieldWithAttributes(
- &lookup, name, value, attributes);
+ SetPropertyToFieldWithAttributes(&lookup, name, value, attributes);
}
break;
case CALLBACKS:
- result = ConvertAndSetLocalProperty(&lookup, *name, *value, attributes);
+ ConvertAndSetLocalProperty(&lookup, name, value, attributes);
break;
- case TRANSITION:
- result = SetPropertyUsingTransition(&lookup, name, value, attributes);
+ case TRANSITION: {
+ Handle<Object> result = SetPropertyUsingTransition(
+ handle(lookup.holder()), &lookup, name, value, attributes);
+ RETURN_IF_EMPTY_HANDLE_VALUE(isolate, result, Handle<Object>());
break;
+ }
case NONEXISTENT:
case HANDLER:
case INTERCEPTOR:
UNREACHABLE();
}
- Handle<Object> hresult;
- if (!result->ToHandle(&hresult, isolate)) return result;
-
if (is_observed) {
if (lookup.IsTransition()) {
- EnqueueChangeRecord(self, "new", name, old_value);
+ EnqueueChangeRecord(object, "new", name, old_value);
} else if (old_value->IsTheHole()) {
- EnqueueChangeRecord(self, "reconfigured", name, old_value);
+ EnqueueChangeRecord(object, "reconfigured", name, old_value);
} else {
LookupResult new_lookup(isolate);
- self->LocalLookup(*name, &new_lookup, true);
+ object->LocalLookup(*name, &new_lookup, true);
bool value_changed = false;
if (new_lookup.IsDataProperty()) {
- Handle<Object> new_value = Object::GetProperty(self, name);
+ Handle<Object> new_value = Object::GetProperty(object, name);
value_changed = !old_value->SameValue(*new_value);
}
if (new_lookup.GetAttributes() != old_attributes) {
if (!value_changed) old_value = isolate->factory()->the_hole_value();
- EnqueueChangeRecord(self, "reconfigured", name, old_value);
+ EnqueueChangeRecord(object, "reconfigured", name, old_value);
} else if (value_changed) {
- EnqueueChangeRecord(self, "updated", name, old_value);
+ EnqueueChangeRecord(object, "updated", name, old_value);
}
}
}
- return *hresult;
+ return value;
}
@@ -4422,12 +4377,12 @@
}
-MaybeObject* NormalizedMapCache::Get(JSObject* obj,
- PropertyNormalizationMode mode) {
- Isolate* isolate = obj->GetIsolate();
+Handle<Map> NormalizedMapCache::Get(Handle<NormalizedMapCache> cache,
+ Handle<JSObject> obj,
+ PropertyNormalizationMode mode) {
Map* fast = obj->map();
int index = fast->Hash() % kEntries;
- Object* result = get(index);
+ Object* result = cache->get(index);
if (result->IsMap() &&
Map::cast(result)->EquivalentToForNormalization(fast, mode)) {
#ifdef VERIFY_HEAP
@@ -4456,18 +4411,17 @@
}
}
#endif
- return result;
+ return handle(Map::cast(result));
}
- { MaybeObject* maybe_result =
- fast->CopyNormalized(mode, SHARED_NORMALIZED_MAP);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- ASSERT(Map::cast(result)->is_dictionary_map());
- set(index, result);
+ Isolate* isolate = cache->GetIsolate();
+ Handle<Map> map = Map::CopyNormalized(handle(fast), mode,
+ SHARED_NORMALIZED_MAP);
+ ASSERT(map->is_dictionary_map());
+ cache->set(index, *map);
isolate->counters()->normalized_maps()->Increment();
- return result;
+ return map;
}
@@ -4500,65 +4454,55 @@
void JSObject::NormalizeProperties(Handle<JSObject> object,
PropertyNormalizationMode mode,
int expected_additional_properties) {
- CALL_HEAP_FUNCTION_VOID(object->GetIsolate(),
- object->NormalizeProperties(
- mode, expected_additional_properties));
-}
-
-
-MaybeObject* JSObject::NormalizeProperties(PropertyNormalizationMode mode,
- int expected_additional_properties) {
- if (!HasFastProperties()) return this;
+ if (!object->HasFastProperties()) return;
// The global object is always normalized.
- ASSERT(!IsGlobalObject());
+ ASSERT(!object->IsGlobalObject());
// JSGlobalProxy must never be normalized
- ASSERT(!IsJSGlobalProxy());
+ ASSERT(!object->IsJSGlobalProxy());
- Map* map_of_this = map();
+ Isolate* isolate = object->GetIsolate();
+ HandleScope scope(isolate);
+ Handle<Map> map(object->map());
// Allocate new content.
- int real_size = map_of_this->NumberOfOwnDescriptors();
+ int real_size = map->NumberOfOwnDescriptors();
int property_count = real_size;
if (expected_additional_properties > 0) {
property_count += expected_additional_properties;
} else {
property_count += 2; // Make space for two more properties.
}
- NameDictionary* dictionary;
- MaybeObject* maybe_dictionary =
- NameDictionary::Allocate(GetHeap(), property_count);
- if (!maybe_dictionary->To(&dictionary)) return maybe_dictionary;
+ Handle<NameDictionary> dictionary =
+ isolate->factory()->NewNameDictionary(property_count);
- DescriptorArray* descs = map_of_this->instance_descriptors();
+ Handle<DescriptorArray> descs(map->instance_descriptors());
for (int i = 0; i < real_size; i++) {
PropertyDetails details = descs->GetDetails(i);
switch (details.type()) {
case CONSTANT: {
+ Handle<Name> key(descs->GetKey(i));
+ Handle<Object> value(descs->GetConstant(i), isolate);
PropertyDetails d = PropertyDetails(
details.attributes(), NORMAL, i + 1);
- Object* value = descs->GetConstant(i);
- MaybeObject* maybe_dictionary =
- dictionary->Add(descs->GetKey(i), value, d);
- if (!maybe_dictionary->To(&dictionary)) return maybe_dictionary;
+ dictionary = NameDictionaryAdd(dictionary, key, value, d);
break;
}
case FIELD: {
+ Handle<Name> key(descs->GetKey(i));
+ Handle<Object> value(
+ object->RawFastPropertyAt(descs->GetFieldIndex(i)), isolate);
PropertyDetails d =
PropertyDetails(details.attributes(), NORMAL, i + 1);
- Object* value = RawFastPropertyAt(descs->GetFieldIndex(i));
- MaybeObject* maybe_dictionary =
- dictionary->Add(descs->GetKey(i), value, d);
- if (!maybe_dictionary->To(&dictionary)) return maybe_dictionary;
+ dictionary = NameDictionaryAdd(dictionary, key, value, d);
break;
}
case CALLBACKS: {
- Object* value = descs->GetCallbacksObject(i);
+ Handle<Name> key(descs->GetKey(i));
+ Handle<Object> value(descs->GetCallbacksObject(i), isolate);
PropertyDetails d = PropertyDetails(
details.attributes(), CALLBACKS, i + 1);
- MaybeObject* maybe_dictionary =
- dictionary->Add(descs->GetKey(i), value, d);
- if (!maybe_dictionary->To(&dictionary)) return maybe_dictionary;
+ dictionary = NameDictionaryAdd(dictionary, key, value, d);
break;
}
case INTERCEPTOR:
@@ -4572,62 +4516,52 @@
}
}
- Heap* current_heap = GetHeap();
-
// Copy the next enumeration index from instance descriptor.
dictionary->SetNextEnumerationIndex(real_size + 1);
- Map* new_map;
- MaybeObject* maybe_map =
- current_heap->isolate()->context()->native_context()->
- normalized_map_cache()->Get(this, mode);
- if (!maybe_map->To(&new_map)) return maybe_map;
+ Handle<NormalizedMapCache> cache(
+ isolate->context()->native_context()->normalized_map_cache());
+ Handle<Map> new_map = NormalizedMapCache::Get(cache, object, mode);
ASSERT(new_map->is_dictionary_map());
- // We have now successfully allocated all the necessary objects.
- // Changes can now be made with the guarantee that all of them take effect.
+ // From here on we cannot fail and we shouldn't GC anymore.
+ DisallowHeapAllocation no_allocation;
// Resize the object in the heap if necessary.
int new_instance_size = new_map->instance_size();
- int instance_size_delta = map_of_this->instance_size() - new_instance_size;
+ int instance_size_delta = map->instance_size() - new_instance_size;
ASSERT(instance_size_delta >= 0);
- current_heap->CreateFillerObjectAt(this->address() + new_instance_size,
- instance_size_delta);
- if (Marking::IsBlack(Marking::MarkBitFrom(this))) {
- MemoryChunk::IncrementLiveBytesFromMutator(this->address(),
+ isolate->heap()->CreateFillerObjectAt(object->address() + new_instance_size,
+ instance_size_delta);
+ if (Marking::IsBlack(Marking::MarkBitFrom(*object))) {
+ MemoryChunk::IncrementLiveBytesFromMutator(object->address(),
-instance_size_delta);
}
- set_map(new_map);
- map_of_this->NotifyLeafMapLayoutChange();
+ object->set_map(*new_map);
+ map->NotifyLeafMapLayoutChange();
- set_properties(dictionary);
+ object->set_properties(*dictionary);
- current_heap->isolate()->counters()->props_to_dictionary()->Increment();
+ isolate->counters()->props_to_dictionary()->Increment();
#ifdef DEBUG
if (FLAG_trace_normalization) {
PrintF("Object properties have been normalized:\n");
- Print();
+ object->Print();
}
#endif
- return this;
}
void JSObject::TransformToFastProperties(Handle<JSObject> object,
int unused_property_fields) {
+ if (object->HasFastProperties()) return;
+ ASSERT(!object->IsGlobalObject());
CALL_HEAP_FUNCTION_VOID(
object->GetIsolate(),
- object->TransformToFastProperties(unused_property_fields));
-}
-
-
-MaybeObject* JSObject::TransformToFastProperties(int unused_property_fields) {
- if (HasFastProperties()) return this;
- ASSERT(!IsGlobalObject());
- return property_dictionary()->
- TransformPropertiesToFastFor(this, unused_property_fields);
+ object->property_dictionary()->TransformPropertiesToFastFor(
+ *object, unused_property_fields));
}
@@ -4667,6 +4601,18 @@
}
+static Handle<SeededNumberDictionary> CopyFastElementsToDictionary(
+ Handle<FixedArrayBase> array,
+ int length,
+ Handle<SeededNumberDictionary> dict) {
+ Isolate* isolate = array->GetIsolate();
+ CALL_HEAP_FUNCTION(isolate,
+ CopyFastElementsToDictionary(
+ isolate, *array, length, *dict),
+ SeededNumberDictionary);
+}
+
+
Handle<SeededNumberDictionary> JSObject::NormalizeElements(
Handle<JSObject> object) {
CALL_HEAP_FUNCTION(object->GetIsolate(),
@@ -5152,7 +5098,7 @@
Handle<Object> old_value;
bool should_enqueue_change_record = false;
if (FLAG_harmony_observation && object->map()->is_observed()) {
- should_enqueue_change_record = object->HasLocalElement(index);
+ should_enqueue_change_record = HasLocalElement(object, index);
if (should_enqueue_change_record) {
old_value = object->GetLocalElementAccessorPair(index) != NULL
? Handle<Object>::cast(factory->the_hole_value())
@@ -5168,7 +5114,7 @@
result = AccessorDelete(object, index, mode);
}
- if (should_enqueue_change_record && !object->HasLocalElement(index)) {
+ if (should_enqueue_change_record && !HasLocalElement(object, index)) {
Handle<String> name = factory->Uint32ToString(index);
EnqueueChangeRecord(object, "deleted", name, old_value);
}
@@ -5243,7 +5189,7 @@
result = DeleteNormalizedProperty(object, name, mode);
}
- if (is_observed && !object->HasLocalProperty(*name)) {
+ if (is_observed && !HasLocalProperty(object, name)) {
EnqueueChangeRecord(object, "deleted", name, old_value);
}
@@ -5405,59 +5351,50 @@
Handle<Object> JSObject::PreventExtensions(Handle<JSObject> object) {
- CALL_HEAP_FUNCTION(object->GetIsolate(), object->PreventExtensions(), Object);
-}
-
-
-MaybeObject* JSObject::PreventExtensions() {
- Isolate* isolate = GetIsolate();
- if (IsAccessCheckNeeded() &&
- !isolate->MayNamedAccess(this,
+ Isolate* isolate = object->GetIsolate();
+ if (object->IsAccessCheckNeeded() &&
+ !isolate->MayNamedAccess(*object,
isolate->heap()->undefined_value(),
v8::ACCESS_KEYS)) {
- isolate->ReportFailedAccessCheck(this, v8::ACCESS_KEYS);
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- return isolate->heap()->false_value();
+ isolate->ReportFailedAccessCheck(*object, v8::ACCESS_KEYS);
+ RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
+ return isolate->factory()->false_value();
}
- if (IsJSGlobalProxy()) {
- Object* proto = GetPrototype();
- if (proto->IsNull()) return this;
+ if (object->IsJSGlobalProxy()) {
+ Handle<Object> proto(object->GetPrototype(), isolate);
+ if (proto->IsNull()) return object;
ASSERT(proto->IsJSGlobalObject());
- return JSObject::cast(proto)->PreventExtensions();
+ return PreventExtensions(Handle<JSObject>::cast(proto));
}
// It's not possible to seal objects with external array elements
- if (HasExternalArrayElements()) {
- HandleScope scope(isolate);
- Handle<Object> object(this, isolate);
+ if (object->HasExternalArrayElements()) {
Handle<Object> error =
isolate->factory()->NewTypeError(
"cant_prevent_ext_external_array_elements",
HandleVector(&object, 1));
- return isolate->Throw(*error);
+ isolate->Throw(*error);
+ return Handle<Object>();
}
// If there are fast elements we normalize.
- SeededNumberDictionary* dictionary = NULL;
- { MaybeObject* maybe = NormalizeElements();
- if (!maybe->To<SeededNumberDictionary>(&dictionary)) return maybe;
- }
- ASSERT(HasDictionaryElements() || HasDictionaryArgumentsElements());
+ Handle<SeededNumberDictionary> dictionary = NormalizeElements(object);
+ ASSERT(object->HasDictionaryElements() ||
+ object->HasDictionaryArgumentsElements());
+
// Make sure that we never go back to fast case.
dictionary->set_requires_slow_elements();
// Do a map transition, other objects with this map may still
// be extensible.
// TODO(adamk): Extend the NormalizedMapCache to handle non-extensible maps.
- Map* new_map;
- MaybeObject* maybe = map()->Copy();
- if (!maybe->To(&new_map)) return maybe;
+ Handle<Map> new_map = Map::Copy(handle(object->map()));
new_map->set_is_extensible(false);
- set_map(new_map);
- ASSERT(!map()->is_extensible());
- return new_map;
+ object->set_map(*new_map);
+ ASSERT(!object->map()->is_extensible());
+ return object;
}
@@ -5482,122 +5419,114 @@
}
-MUST_USE_RESULT MaybeObject* JSObject::Freeze(Isolate* isolate) {
+Handle<Object> JSObject::Freeze(Handle<JSObject> object) {
// Freezing non-strict arguments should be handled elsewhere.
- ASSERT(!HasNonStrictArgumentsElements());
+ ASSERT(!object->HasNonStrictArgumentsElements());
- Heap* heap = isolate->heap();
+ if (object->map()->is_frozen()) return object;
- if (map()->is_frozen()) return this;
-
- if (IsAccessCheckNeeded() &&
- !isolate->MayNamedAccess(this,
- heap->undefined_value(),
+ Isolate* isolate = object->GetIsolate();
+ if (object->IsAccessCheckNeeded() &&
+ !isolate->MayNamedAccess(*object,
+ isolate->heap()->undefined_value(),
v8::ACCESS_KEYS)) {
- isolate->ReportFailedAccessCheck(this, v8::ACCESS_KEYS);
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- return heap->false_value();
+ isolate->ReportFailedAccessCheck(*object, v8::ACCESS_KEYS);
+ RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
+ return isolate->factory()->false_value();
}
- if (IsJSGlobalProxy()) {
- Object* proto = GetPrototype();
- if (proto->IsNull()) return this;
+ if (object->IsJSGlobalProxy()) {
+ Handle<Object> proto(object->GetPrototype(), isolate);
+ if (proto->IsNull()) return object;
ASSERT(proto->IsJSGlobalObject());
- return JSObject::cast(proto)->Freeze(isolate);
+ return Freeze(Handle<JSObject>::cast(proto));
}
// It's not possible to freeze objects with external array elements
- if (HasExternalArrayElements()) {
- HandleScope scope(isolate);
- Handle<Object> object(this, isolate);
+ if (object->HasExternalArrayElements()) {
Handle<Object> error =
isolate->factory()->NewTypeError(
"cant_prevent_ext_external_array_elements",
HandleVector(&object, 1));
- return isolate->Throw(*error);
+ isolate->Throw(*error);
+ return Handle<Object>();
}
- SeededNumberDictionary* new_element_dictionary = NULL;
- if (!elements()->IsDictionary()) {
- int length = IsJSArray()
- ? Smi::cast(JSArray::cast(this)->length())->value()
- : elements()->length();
+ Handle<SeededNumberDictionary> new_element_dictionary;
+ if (!object->elements()->IsDictionary()) {
+ int length = object->IsJSArray()
+ ? Smi::cast(Handle<JSArray>::cast(object)->length())->value()
+ : object->elements()->length();
if (length > 0) {
int capacity = 0;
int used = 0;
- GetElementsCapacityAndUsage(&capacity, &used);
- MaybeObject* maybe_dict = SeededNumberDictionary::Allocate(heap, used);
- if (!maybe_dict->To(&new_element_dictionary)) return maybe_dict;
+ object->GetElementsCapacityAndUsage(&capacity, &used);
+ new_element_dictionary =
+ isolate->factory()->NewSeededNumberDictionary(used);
// Move elements to a dictionary; avoid calling NormalizeElements to avoid
// unnecessary transitions.
- maybe_dict = CopyFastElementsToDictionary(isolate, elements(), length,
- new_element_dictionary);
- if (!maybe_dict->To(&new_element_dictionary)) return maybe_dict;
+ new_element_dictionary = CopyFastElementsToDictionary(
+ handle(object->elements()), length, new_element_dictionary);
} else {
// No existing elements, use a pre-allocated empty backing store
- new_element_dictionary = heap->empty_slow_element_dictionary();
+ new_element_dictionary =
+ isolate->factory()->empty_slow_element_dictionary();
}
}
LookupResult result(isolate);
- map()->LookupTransition(this, heap->frozen_symbol(), &result);
+ Handle<Map> old_map(object->map());
+ old_map->LookupTransition(*object, isolate->heap()->frozen_symbol(), &result);
if (result.IsTransition()) {
Map* transition_map = result.GetTransitionTarget();
ASSERT(transition_map->has_dictionary_elements());
ASSERT(transition_map->is_frozen());
ASSERT(!transition_map->is_extensible());
- set_map(transition_map);
- } else if (HasFastProperties() && map()->CanHaveMoreTransitions()) {
+ object->set_map(transition_map);
+ } else if (object->HasFastProperties() && old_map->CanHaveMoreTransitions()) {
// Create a new descriptor array with fully-frozen properties
- int num_descriptors = map()->NumberOfOwnDescriptors();
- DescriptorArray* new_descriptors;
- MaybeObject* maybe_descriptors =
- map()->instance_descriptors()->CopyUpToAddAttributes(num_descriptors,
- FROZEN);
- if (!maybe_descriptors->To(&new_descriptors)) return maybe_descriptors;
-
- Map* new_map;
- MaybeObject* maybe_new_map = map()->CopyReplaceDescriptors(
- new_descriptors, INSERT_TRANSITION, heap->frozen_symbol());
- if (!maybe_new_map->To(&new_map)) return maybe_new_map;
+ int num_descriptors = old_map->NumberOfOwnDescriptors();
+ Handle<DescriptorArray> new_descriptors =
+ DescriptorArray::CopyUpToAddAttributes(
+ handle(old_map->instance_descriptors()), num_descriptors, FROZEN);
+ Handle<Map> new_map = Map::CopyReplaceDescriptors(
+ old_map, new_descriptors, INSERT_TRANSITION,
+ isolate->factory()->frozen_symbol());
new_map->freeze();
new_map->set_is_extensible(false);
new_map->set_elements_kind(DICTIONARY_ELEMENTS);
- set_map(new_map);
+ object->set_map(*new_map);
} else {
// Slow path: need to normalize properties for safety
- MaybeObject* maybe = NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0);
- if (maybe->IsFailure()) return maybe;
+ NormalizeProperties(object, CLEAR_INOBJECT_PROPERTIES, 0);
// Create a new map, since other objects with this map may be extensible.
// TODO(adamk): Extend the NormalizedMapCache to handle non-extensible maps.
- Map* new_map;
- MaybeObject* maybe_copy = map()->Copy();
- if (!maybe_copy->To(&new_map)) return maybe_copy;
+ Handle<Map> new_map = Map::Copy(handle(object->map()));
new_map->freeze();
new_map->set_is_extensible(false);
new_map->set_elements_kind(DICTIONARY_ELEMENTS);
- set_map(new_map);
+ object->set_map(*new_map);
// Freeze dictionary-mode properties
- FreezeDictionary(property_dictionary());
+ FreezeDictionary(object->property_dictionary());
}
- ASSERT(map()->has_dictionary_elements());
- if (new_element_dictionary != NULL) {
- set_elements(new_element_dictionary);
+ ASSERT(object->map()->has_dictionary_elements());
+ if (!new_element_dictionary.is_null()) {
+ object->set_elements(*new_element_dictionary);
}
- if (elements() != heap->empty_slow_element_dictionary()) {
- SeededNumberDictionary* dictionary = element_dictionary();
+ if (object->elements() != isolate->heap()->empty_slow_element_dictionary()) {
+ SeededNumberDictionary* dictionary = object->element_dictionary();
// Make sure we never go back to the fast case
dictionary->set_requires_slow_elements();
// Freeze all elements in the dictionary
FreezeDictionary(dictionary);
}
- return this;
+ return object;
}
@@ -5635,71 +5564,69 @@
}
-MUST_USE_RESULT MaybeObject* JSObject::DeepCopy(Isolate* isolate) {
+Handle<JSObject> JSObject::Copy(Handle<JSObject> object) {
+ Isolate* isolate = object->GetIsolate();
+ CALL_HEAP_FUNCTION(isolate,
+ isolate->heap()->CopyJSObject(*object), JSObject);
+}
+
+
+Handle<JSObject> JSObject::DeepCopy(Handle<JSObject> object) {
+ Isolate* isolate = object->GetIsolate();
StackLimitCheck check(isolate);
- if (check.HasOverflowed()) return isolate->StackOverflow();
-
- if (map()->is_deprecated()) {
- MaybeObject* maybe_failure = MigrateInstance();
- if (maybe_failure->IsFailure()) return maybe_failure;
+ if (check.HasOverflowed()) {
+ isolate->StackOverflow();
+ return Handle<JSObject>::null();
}
- Heap* heap = isolate->heap();
- Object* result;
- { MaybeObject* maybe_result = heap->CopyJSObject(this);
- if (!maybe_result->ToObject(&result)) return maybe_result;
+ if (object->map()->is_deprecated()) {
+ MigrateInstance(object);
}
- JSObject* copy = JSObject::cast(result);
+
+ Handle<JSObject> copy = Copy(object);
+
+ HandleScope scope(isolate);
// Deep copy local properties.
if (copy->HasFastProperties()) {
- DescriptorArray* descriptors = copy->map()->instance_descriptors();
+ Handle<DescriptorArray> descriptors(copy->map()->instance_descriptors());
int limit = copy->map()->NumberOfOwnDescriptors();
for (int i = 0; i < limit; i++) {
PropertyDetails details = descriptors->GetDetails(i);
if (details.type() != FIELD) continue;
int index = descriptors->GetFieldIndex(i);
- Object* value = RawFastPropertyAt(index);
+ Handle<Object> value(object->RawFastPropertyAt(index), isolate);
if (value->IsJSObject()) {
- JSObject* js_object = JSObject::cast(value);
- MaybeObject* maybe_copy = js_object->DeepCopy(isolate);
- if (!maybe_copy->To(&value)) return maybe_copy;
+ value = DeepCopy(Handle<JSObject>::cast(value));
+ RETURN_IF_EMPTY_HANDLE_VALUE(isolate, value, Handle<JSObject>());
} else {
Representation representation = details.representation();
- MaybeObject* maybe_storage =
- value->AllocateNewStorageFor(heap, representation);
- if (!maybe_storage->To(&value)) return maybe_storage;
+ value = NewStorageFor(isolate, value, representation);
}
- copy->FastPropertyAtPut(index, value);
+ copy->FastPropertyAtPut(index, *value);
}
} else {
- { MaybeObject* maybe_result =
- heap->AllocateFixedArray(copy->NumberOfLocalProperties());
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- FixedArray* names = FixedArray::cast(result);
- copy->GetLocalPropertyNames(names, 0);
+ Handle<FixedArray> names =
+ isolate->factory()->NewFixedArray(copy->NumberOfLocalProperties());
+ copy->GetLocalPropertyNames(*names, 0);
for (int i = 0; i < names->length(); i++) {
ASSERT(names->get(i)->IsString());
- String* key_string = String::cast(names->get(i));
+ Handle<String> key_string(String::cast(names->get(i)));
PropertyAttributes attributes =
- copy->GetLocalPropertyAttribute(key_string);
+ copy->GetLocalPropertyAttribute(*key_string);
// Only deep copy fields from the object literal expression.
// In particular, don't try to copy the length attribute of
// an array.
if (attributes != NONE) continue;
- Object* value =
- copy->GetProperty(key_string, &attributes)->ToObjectUnchecked();
+ Handle<Object> value(
+ copy->GetProperty(*key_string, &attributes)->ToObjectUnchecked(),
+ isolate);
if (value->IsJSObject()) {
- JSObject* js_object = JSObject::cast(value);
- { MaybeObject* maybe_result = js_object->DeepCopy(isolate);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- { MaybeObject* maybe_result =
- // Creating object copy for literals. No strict mode needed.
- copy->SetProperty(key_string, result, NONE, kNonStrictMode);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
+ Handle<Object> result = DeepCopy(Handle<JSObject>::cast(value));
+ RETURN_IF_EMPTY_HANDLE_VALUE(isolate, result, Handle<JSObject>());
+ // Creating object copy for literals. No strict mode needed.
+ CHECK_NOT_EMPTY_HANDLE(isolate, SetProperty(
+ copy, key_string, result, NONE, kNonStrictMode));
}
}
}
@@ -5712,8 +5639,8 @@
case FAST_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_ELEMENTS: {
- FixedArray* elements = FixedArray::cast(copy->elements());
- if (elements->map() == heap->fixed_cow_array_map()) {
+ Handle<FixedArray> elements(FixedArray::cast(copy->elements()));
+ if (elements->map() == isolate->heap()->fixed_cow_array_map()) {
isolate->counters()->cow_arrays_created_runtime()->Increment();
#ifdef DEBUG
for (int i = 0; i < elements->length(); i++) {
@@ -5722,34 +5649,31 @@
#endif
} else {
for (int i = 0; i < elements->length(); i++) {
- Object* value = elements->get(i);
+ Handle<Object> value(elements->get(i), isolate);
ASSERT(value->IsSmi() ||
value->IsTheHole() ||
(IsFastObjectElementsKind(copy->GetElementsKind())));
if (value->IsJSObject()) {
- JSObject* js_object = JSObject::cast(value);
- { MaybeObject* maybe_result = js_object->DeepCopy(isolate);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- elements->set(i, result);
+ Handle<Object> result = DeepCopy(Handle<JSObject>::cast(value));
+ RETURN_IF_EMPTY_HANDLE_VALUE(isolate, result, Handle<JSObject>());
+ elements->set(i, *result);
}
}
}
break;
}
case DICTIONARY_ELEMENTS: {
- SeededNumberDictionary* element_dictionary = copy->element_dictionary();
+ Handle<SeededNumberDictionary> element_dictionary(
+ copy->element_dictionary());
int capacity = element_dictionary->Capacity();
for (int i = 0; i < capacity; i++) {
Object* k = element_dictionary->KeyAt(i);
if (element_dictionary->IsKey(k)) {
- Object* value = element_dictionary->ValueAt(i);
+ Handle<Object> value(element_dictionary->ValueAt(i), isolate);
if (value->IsJSObject()) {
- JSObject* js_object = JSObject::cast(value);
- { MaybeObject* maybe_result = js_object->DeepCopy(isolate);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- element_dictionary->ValueAtPut(i, result);
+ Handle<Object> result = DeepCopy(Handle<JSObject>::cast(value));
+ RETURN_IF_EMPTY_HANDLE_VALUE(isolate, result, Handle<JSObject>());
+ element_dictionary->ValueAtPut(i, *result);
}
}
}
@@ -6190,7 +6114,7 @@
bool preexists = false;
if (is_observed) {
if (is_element) {
- preexists = object->HasLocalElement(index);
+ preexists = HasLocalElement(object, index);
if (preexists && object->GetLocalElementAccessorPair(index) == NULL) {
old_value = Object::GetElement(isolate, object, index);
}
@@ -6660,6 +6584,16 @@
}
+Handle<Map> Map::CopyReplaceDescriptors(Handle<Map> map,
+ Handle<DescriptorArray> descriptors,
+ TransitionFlag flag,
+ Handle<Name> name) {
+ CALL_HEAP_FUNCTION(map->GetIsolate(),
+ map->CopyReplaceDescriptors(*descriptors, flag, *name),
+ Map);
+}
+
+
MaybeObject* Map::CopyReplaceDescriptors(DescriptorArray* descriptors,
TransitionFlag flag,
Name* name,
@@ -6686,6 +6620,15 @@
}
+Handle<Map> Map::CopyInstallDescriptors(Handle<Map> map,
+ int new_descriptor,
+ Handle<DescriptorArray> descriptors) {
+ CALL_HEAP_FUNCTION(map->GetIsolate(),
+ map->CopyInstallDescriptors(new_descriptor, *descriptors),
+ Map);
+}
+
+
// Since this method is used to rewrite an existing transition tree, it can
// always insert transitions without checking.
MaybeObject* Map::CopyInstallDescriptors(int new_descriptor,
@@ -6904,6 +6847,16 @@
}
+Handle<DescriptorArray> DescriptorArray::CopyUpToAddAttributes(
+ Handle<DescriptorArray> desc,
+ int enumeration_index,
+ PropertyAttributes attributes) {
+ CALL_HEAP_FUNCTION(desc->GetIsolate(),
+ desc->CopyUpToAddAttributes(enumeration_index, attributes),
+ DescriptorArray);
+}
+
+
MaybeObject* DescriptorArray::CopyUpToAddAttributes(
int enumeration_index, PropertyAttributes attributes) {
if (enumeration_index == 0) return GetHeap()->empty_descriptor_array();
@@ -7798,6 +7751,20 @@
}
+Handle<DescriptorArray> DescriptorArray::Merge(Handle<DescriptorArray> desc,
+ int verbatim,
+ int valid,
+ int new_size,
+ int modify_index,
+ StoreMode store_mode,
+ Handle<DescriptorArray> other) {
+ CALL_HEAP_FUNCTION(desc->GetIsolate(),
+ desc->Merge(verbatim, valid, new_size, modify_index,
+ store_mode, *other),
+ DescriptorArray);
+}
+
+
// Generalize the |other| descriptor array by merging it into the (at least
// partly) updated |this| descriptor array.
// The method merges two descriptor array in three parts. Both descriptor arrays
@@ -10409,10 +10376,9 @@
BailoutId Code::TranslatePcOffsetToAstId(uint32_t pc_offset) {
DisallowHeapAllocation no_gc;
ASSERT(kind() == FUNCTION);
- for (FullCodeGenerator::BackEdgeTableIterator it(this, &no_gc);
- !it.Done();
- it.Next()) {
- if (it.pc_offset() == pc_offset) return it.ast_id();
+ BackEdgeTable back_edges(this, &no_gc);
+ for (uint32_t i = 0; i < back_edges.length(); i++) {
+ if (back_edges.pc_offset(i) == pc_offset) return back_edges.ast_id(i);
}
return BailoutId::None();
}
@@ -10425,8 +10391,8 @@
}
-void Code::MakeCodeAgeSequenceYoung(byte* sequence) {
- PatchPlatformCodeAge(sequence, kNoAge, NO_MARKING_PARITY);
+void Code::MakeCodeAgeSequenceYoung(byte* sequence, Isolate* isolate) {
+ PatchPlatformCodeAge(isolate, sequence, kNoAge, NO_MARKING_PARITY);
}
@@ -10437,7 +10403,9 @@
MarkingParity code_parity;
GetCodeAgeAndParity(sequence, &age, &code_parity);
if (age != kLastCodeAge && code_parity != current_parity) {
- PatchPlatformCodeAge(sequence, static_cast<Age>(age + 1),
+ PatchPlatformCodeAge(GetIsolate(),
+ sequence,
+ static_cast<Age>(age + 1),
current_parity);
}
}
@@ -10500,8 +10468,7 @@
}
-Code* Code::GetCodeAgeStub(Age age, MarkingParity parity) {
- Isolate* isolate = Isolate::Current();
+Code* Code::GetCodeAgeStub(Isolate* isolate, Age age, MarkingParity parity) {
Builtins* builtins = isolate->builtins();
switch (age) {
#define HANDLE_CODE_AGE(AGE) \
@@ -10772,7 +10739,7 @@
case CONSTANT: return "CONSTANT";
case CALLBACKS: return "CALLBACKS";
case INTERCEPTOR: return "INTERCEPTOR";
- case MAP_TRANSITION: return "MAP_TRANSITION";
+ case TRANSITION: return "TRANSITION";
case NONEXISTENT: return "NONEXISTENT";
}
UNREACHABLE(); // keep the compiler happy
@@ -10879,15 +10846,15 @@
// (due to alignment) the end of the instruction stream.
if (static_cast<int>(offset) < instruction_size()) {
DisallowHeapAllocation no_gc;
- FullCodeGenerator::BackEdgeTableIterator back_edges(this, &no_gc);
+ BackEdgeTable back_edges(this, &no_gc);
- PrintF(out, "Back edges (size = %u)\n", back_edges.table_length());
+ PrintF(out, "Back edges (size = %u)\n", back_edges.length());
PrintF(out, "ast_id pc_offset loop_depth\n");
- for ( ; !back_edges.Done(); back_edges.Next()) {
- PrintF(out, "%6d %9u %10u\n", back_edges.ast_id().ToInt(),
- back_edges.pc_offset(),
- back_edges.loop_depth());
+ for (uint32_t i = 0; i < back_edges.length(); i++) {
+ PrintF(out, "%6d %9u %10u\n", back_edges.ast_id(i).ToInt(),
+ back_edges.pc_offset(i),
+ back_edges.loop_depth(i));
}
PrintF(out, "\n");
@@ -10958,6 +10925,10 @@
}
ValidateElements();
set_map_and_elements(new_map, new_elements);
+
+ // Transition through the allocation site as well if present.
+ maybe_obj = UpdateAllocationSite(new_elements_kind);
+ if (maybe_obj->IsFailure()) return maybe_obj;
} else {
FixedArray* parameter_map = FixedArray::cast(old_elements);
parameter_map->set(1, new_elements);
@@ -11709,18 +11680,17 @@
}
-MaybeObject* JSObject::SetElementWithCallback(Object* structure,
- uint32_t index,
- Object* value,
- JSObject* holder,
- StrictModeFlag strict_mode) {
- Isolate* isolate = GetIsolate();
- HandleScope scope(isolate);
+Handle<Object> JSObject::SetElementWithCallback(Handle<JSObject> object,
+ Handle<Object> structure,
+ uint32_t index,
+ Handle<Object> value,
+ Handle<JSObject> holder,
+ StrictModeFlag strict_mode) {
+ Isolate* isolate = object->GetIsolate();
// We should never get here to initialize a const with the hole
// value since a const declaration would conflict with the setter.
ASSERT(!value->IsTheHole());
- Handle<Object> value_handle(value, isolate);
// To accommodate both the old and the new api we switch on the
// data structure used to store the callbacks. Eventually foreign
@@ -11729,41 +11699,40 @@
if (structure->IsExecutableAccessorInfo()) {
// api style callbacks
- Handle<JSObject> self(this);
- Handle<JSObject> holder_handle(JSObject::cast(holder));
- Handle<ExecutableAccessorInfo> data(
- ExecutableAccessorInfo::cast(structure));
+ Handle<ExecutableAccessorInfo> data =
+ Handle<ExecutableAccessorInfo>::cast(structure);
Object* call_obj = data->setter();
v8::AccessorSetterCallback call_fun =
v8::ToCData<v8::AccessorSetterCallback>(call_obj);
if (call_fun == NULL) return value;
Handle<Object> number = isolate->factory()->NewNumberFromUint(index);
Handle<String> key(isolate->factory()->NumberToString(number));
- LOG(isolate, ApiNamedPropertyAccess("store", *self, *key));
+ LOG(isolate, ApiNamedPropertyAccess("store", *object, *key));
PropertyCallbackArguments
- args(isolate, data->data(), *self, *holder_handle);
+ args(isolate, data->data(), *object, *holder);
args.Call(call_fun,
v8::Utils::ToLocal(key),
- v8::Utils::ToLocal(value_handle));
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- return *value_handle;
+ v8::Utils::ToLocal(value));
+ RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
+ return value;
}
if (structure->IsAccessorPair()) {
- Handle<Object> setter(AccessorPair::cast(structure)->setter(), isolate);
+ Handle<Object> setter(AccessorPair::cast(*structure)->setter(), isolate);
if (setter->IsSpecFunction()) {
// TODO(rossberg): nicer would be to cast to some JSCallable here...
- return SetPropertyWithDefinedSetter(JSReceiver::cast(*setter), value);
+ return SetPropertyWithDefinedSetter(
+ object, Handle<JSReceiver>::cast(setter), value);
} else {
if (strict_mode == kNonStrictMode) {
return value;
}
- Handle<Object> holder_handle(holder, isolate);
Handle<Object> key(isolate->factory()->NewNumberFromUint(index));
- Handle<Object> args[2] = { key, holder_handle };
- return isolate->Throw(
- *isolate->factory()->NewTypeError("no_setter_in_callback",
- HandleVector(args, 2)));
+ Handle<Object> args[2] = { key, holder };
+ Handle<Object> error = isolate->factory()->NewTypeError(
+ "no_setter_in_callback", HandleVector(args, 2));
+ isolate->Throw(*error);
+ return Handle<Object>();
}
}
@@ -11771,7 +11740,7 @@
if (structure->IsDeclaredAccessorInfo()) return value;
UNREACHABLE();
- return NULL;
+ return Handle<Object>();
}
@@ -11968,10 +11937,13 @@
int entry = dictionary->FindEntry(index);
if (entry != SeededNumberDictionary::kNotFound) {
- Object* element = dictionary->ValueAt(entry);
+ Handle<Object> element(dictionary->ValueAt(entry), isolate);
PropertyDetails details = dictionary->DetailsAt(entry);
if (details.type() == CALLBACKS && set_mode == SET_PROPERTY) {
- return SetElementWithCallback(element, index, *value, this, strict_mode);
+ Handle<Object> result = SetElementWithCallback(self, element, index,
+ value, self, strict_mode);
+ RETURN_IF_EMPTY_HANDLE(isolate, result);
+ return *result;
} else {
dictionary->UpdateMaxNumberKey(index);
// If a value has not been initialized we allow writing to it even if it
@@ -11996,13 +11968,13 @@
}
// Elements of the arguments object in slow mode might be slow aliases.
if (is_arguments && element->IsAliasedArgumentsEntry()) {
- AliasedArgumentsEntry* entry = AliasedArgumentsEntry::cast(element);
+ AliasedArgumentsEntry* entry = AliasedArgumentsEntry::cast(*element);
Context* context = Context::cast(elements->get(0));
int context_index = entry->aliased_context_slot();
ASSERT(!context->get(context_index)->IsTheHole());
context->set(context_index, *value);
// For elements that are still writable we keep slow aliasing.
- if (!details.IsReadOnly()) value = handle(element, isolate);
+ if (!details.IsReadOnly()) value = element;
}
dictionary->ValueAtPut(entry, *value);
}
@@ -12465,11 +12437,10 @@
}
-Handle<Object> JSObject::TransitionElementsKind(Handle<JSObject> object,
- ElementsKind to_kind) {
- CALL_HEAP_FUNCTION(object->GetIsolate(),
- object->TransitionElementsKind(to_kind),
- Object);
+void JSObject::TransitionElementsKind(Handle<JSObject> object,
+ ElementsKind to_kind) {
+ CALL_HEAP_FUNCTION_VOID(object->GetIsolate(),
+ object->TransitionElementsKind(to_kind));
}
@@ -12492,7 +12463,7 @@
if (IsHoleyElementsKind(kind)) {
to_kind = GetHoleyElementsKind(to_kind);
}
- if (AllocationSite::GetMode(kind, to_kind) == TRACK_ALLOCATION_SITE) {
+ if (IsMoreGeneralElementsKindTransition(kind, to_kind)) {
// If the array is huge, it's not likely to be defined in a local
// function, so we shouldn't make new instances of it very often.
uint32_t length = 0;
@@ -12514,7 +12485,7 @@
if (IsHoleyElementsKind(kind)) {
to_kind = GetHoleyElementsKind(to_kind);
}
- if (AllocationSite::GetMode(kind, to_kind) == TRACK_ALLOCATION_SITE) {
+ if (IsMoreGeneralElementsKindTransition(kind, to_kind)) {
if (FLAG_trace_track_allocation_sites) {
PrintF("AllocationSite: JSArray %p site updated %s->%s\n",
reinterpret_cast<void*>(this),
@@ -14508,17 +14479,6 @@
}
-// TODO(mstarzinger): Temporary wrapper until handlified.
-static Handle<NameDictionary> NameDictionaryAdd(Handle<NameDictionary> dict,
- Handle<Name> name,
- Handle<Object> value,
- PropertyDetails details) {
- CALL_HEAP_FUNCTION(dict->GetIsolate(),
- dict->Add(*name, *value, details),
- NameDictionary);
-}
-
-
Handle<PropertyCell> GlobalObject::EnsurePropertyCell(
Handle<GlobalObject> global,
Handle<Name> name) {
@@ -16097,6 +16057,14 @@
}
+void PropertyCell::SetValueInferType(Handle<PropertyCell> cell,
+ Handle<Object> value,
+ WriteBarrierMode mode) {
+ CALL_HEAP_FUNCTION_VOID(cell->GetIsolate(),
+ cell->SetValueInferType(*value, mode));
+}
+
+
MaybeObject* PropertyCell::SetValueInferType(Object* value,
WriteBarrierMode ignored) {
set_value(value, ignored);
diff --git a/src/objects.h b/src/objects.h
index d3593b6..c894915 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -1950,42 +1950,27 @@
// Casting.
static inline JSReceiver* cast(Object* obj);
+ // Implementation of [[Put]], ECMA-262 5th edition, section 8.12.5.
static Handle<Object> SetProperty(Handle<JSReceiver> object,
Handle<Name> key,
Handle<Object> value,
PropertyAttributes attributes,
- StrictModeFlag strict_mode);
+ StrictModeFlag strict_mode,
+ StoreFromKeyed store_mode =
+ MAY_BE_STORE_FROM_KEYED);
static Handle<Object> SetElement(Handle<JSReceiver> object,
uint32_t index,
Handle<Object> value,
PropertyAttributes attributes,
StrictModeFlag strict_mode);
- MUST_USE_RESULT static MaybeObject* SetPropertyOrFail(
- Handle<JSReceiver> object,
- Handle<Name> key,
- Handle<Object> value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode,
- StoreFromKeyed store_from_keyed = MAY_BE_STORE_FROM_KEYED);
+ // Implementation of [[HasProperty]], ECMA-262 5th edition, section 8.12.6.
+ static inline bool HasProperty(Handle<JSReceiver> object, Handle<Name> name);
+ static inline bool HasLocalProperty(Handle<JSReceiver>, Handle<Name> name);
+ static inline bool HasElement(Handle<JSReceiver> object, uint32_t index);
+ static inline bool HasLocalElement(Handle<JSReceiver> object, uint32_t index);
- // Can cause GC.
- MUST_USE_RESULT MaybeObject* SetProperty(
- Name* key,
- Object* value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode,
- StoreFromKeyed store_from_keyed = MAY_BE_STORE_FROM_KEYED);
- MUST_USE_RESULT MaybeObject* SetProperty(
- LookupResult* result,
- Name* key,
- Object* value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode,
- StoreFromKeyed store_from_keyed = MAY_BE_STORE_FROM_KEYED);
- MUST_USE_RESULT MaybeObject* SetPropertyWithDefinedSetter(JSReceiver* setter,
- Object* value);
-
+ // Implementation of [[Delete]], ECMA-262 5th edition, section 8.12.7.
static Handle<Object> DeleteProperty(Handle<JSReceiver> object,
Handle<Name> name,
DeleteMode mode = NORMAL_DELETION);
@@ -2011,12 +1996,6 @@
inline PropertyAttributes GetElementAttribute(uint32_t index);
inline PropertyAttributes GetLocalElementAttribute(uint32_t index);
- // Can cause a GC.
- inline bool HasProperty(Name* name);
- inline bool HasLocalProperty(Name* name);
- inline bool HasElement(uint32_t index);
- inline bool HasLocalElement(uint32_t index);
-
// Return the object's prototype (might be Heap::null_value()).
inline Object* GetPrototype();
@@ -2036,12 +2015,24 @@
protected:
Smi* GenerateIdentityHash();
+ static Handle<Object> SetPropertyWithDefinedSetter(Handle<JSReceiver> object,
+ Handle<JSReceiver> setter,
+ Handle<Object> value);
+
private:
PropertyAttributes GetPropertyAttributeForResult(JSReceiver* receiver,
LookupResult* result,
Name* name,
bool continue_search);
+ static Handle<Object> SetProperty(Handle<JSReceiver> receiver,
+ LookupResult* result,
+ Handle<Name> key,
+ Handle<Object> value,
+ PropertyAttributes attributes,
+ StrictModeFlag strict_mode,
+ StoreFromKeyed store_from_keyed);
+
DISALLOW_IMPLICIT_CONSTRUCTORS(JSReceiver);
};
@@ -2135,36 +2126,29 @@
Object* structure,
Name* name);
- // Can cause GC.
- MUST_USE_RESULT MaybeObject* SetPropertyForResult(LookupResult* result,
- Name* key,
- Object* value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode,
- StoreFromKeyed store_mode);
- MUST_USE_RESULT MaybeObject* SetPropertyWithFailedAccessCheck(
- LookupResult* result,
- Name* name,
- Object* value,
- bool check_prototype,
+ static Handle<Object> SetPropertyWithCallback(
+ Handle<JSObject> object,
+ Handle<Object> structure,
+ Handle<Name> name,
+ Handle<Object> value,
+ Handle<JSObject> holder,
StrictModeFlag strict_mode);
- MUST_USE_RESULT MaybeObject* SetPropertyWithCallback(
- Object* structure,
- Name* name,
- Object* value,
- JSObject* holder,
- StrictModeFlag strict_mode);
- MUST_USE_RESULT MaybeObject* SetPropertyWithInterceptor(
- Name* name,
- Object* value,
+
+ static Handle<Object> SetPropertyWithInterceptor(
+ Handle<JSObject> object,
+ Handle<Name> name,
+ Handle<Object> value,
PropertyAttributes attributes,
StrictModeFlag strict_mode);
- MUST_USE_RESULT MaybeObject* SetPropertyPostInterceptor(
- Name* name,
- Object* value,
+
+ static Handle<Object> SetPropertyForResult(
+ Handle<JSObject> object,
+ LookupResult* result,
+ Handle<Name> name,
+ Handle<Object> value,
PropertyAttributes attributes,
StrictModeFlag strict_mode,
- StoreMode mode = ALLOW_AS_CONSTANT);
+ StoreFromKeyed store_mode = MAY_BE_STORE_FROM_KEYED);
static Handle<Object> SetLocalPropertyIgnoreAttributes(
Handle<JSObject> object,
@@ -2188,10 +2172,8 @@
// Extend the receiver with a single fast property appeared first in the
// passed map. This also extends the property backing store if necessary.
static void AllocateStorageForMap(Handle<JSObject> object, Handle<Map> map);
- inline MUST_USE_RESULT MaybeObject* AllocateStorageForMap(Map* map);
static void MigrateInstance(Handle<JSObject> instance);
- inline MUST_USE_RESULT MaybeObject* MigrateInstance();
static Handle<Object> TryMigrateInstance(Handle<JSObject> instance);
inline MUST_USE_RESULT MaybeObject* TryMigrateInstance();
@@ -2209,27 +2191,18 @@
// Handles the special representation of JS global objects.
Object* GetNormalizedProperty(LookupResult* result);
- // Sets the property value in a normalized object given (key, value).
- // Handles the special representation of JS global objects.
- static Handle<Object> SetNormalizedProperty(Handle<JSObject> object,
- LookupResult* result,
- Handle<Object> value);
-
// Sets the property value in a normalized object given a lookup result.
// Handles the special representation of JS global objects.
- MUST_USE_RESULT MaybeObject* SetNormalizedProperty(LookupResult* result,
- Object* value);
+ static void SetNormalizedProperty(Handle<JSObject> object,
+ LookupResult* result,
+ Handle<Object> value);
// Sets the property value in a normalized object given (key, value, details).
// Handles the special representation of JS global objects.
- static Handle<Object> SetNormalizedProperty(Handle<JSObject> object,
- Handle<Name> key,
- Handle<Object> value,
- PropertyDetails details);
-
- MUST_USE_RESULT MaybeObject* SetNormalizedProperty(Name* name,
- Object* value,
- PropertyDetails details);
+ static void SetNormalizedProperty(Handle<JSObject> object,
+ Handle<Name> key,
+ Handle<Object> value,
+ PropertyDetails details);
static void OptimizeAsPrototype(Handle<JSObject> object);
@@ -2456,8 +2429,6 @@
void LocalLookupRealNamedProperty(Name* name, LookupResult* result);
void LookupRealNamedProperty(Name* name, LookupResult* result);
void LookupRealNamedPropertyInPrototypes(Name* name, LookupResult* result);
- MUST_USE_RESULT MaybeObject* SetElementWithCallbackSetterInPrototypes(
- uint32_t index, Object* value, bool* found, StrictModeFlag strict_mode);
void LookupCallbackProperty(Name* name, LookupResult* result);
// Returns the number of properties on this object filtering out properties
@@ -2483,32 +2454,6 @@
// Returns the number of enumerable elements.
int GetEnumElementKeys(FixedArray* storage);
- // Add a property to a fast-case object using a map transition to
- // new_map.
- MUST_USE_RESULT MaybeObject* AddFastPropertyUsingMap(
- Map* new_map,
- Name* name,
- Object* value,
- int field_index,
- Representation representation);
-
- // Add a constant function property to a fast-case object.
- // This leaves a CONSTANT_TRANSITION in the old map, and
- // if it is called on a second object with this map, a
- // normal property is added instead, with a map transition.
- // This avoids the creation of many maps with the same constant
- // function, all orphaned.
- MUST_USE_RESULT MaybeObject* AddConstantProperty(
- Name* name,
- Object* constant,
- PropertyAttributes attributes,
- TransitionFlag flag);
-
- MUST_USE_RESULT MaybeObject* ReplaceSlowProperty(
- Name* name,
- Object* value,
- PropertyAttributes attributes);
-
// Returns a new map with all transitions dropped from the object's current
// map and the ElementsKind set.
static Handle<Map> GetElementsTransitionMap(Handle<JSObject> object,
@@ -2519,43 +2464,18 @@
MUST_USE_RESULT MaybeObject* GetElementsTransitionMapSlow(
ElementsKind elements_kind);
- static Handle<Object> TransitionElementsKind(Handle<JSObject> object,
- ElementsKind to_kind);
+ static void TransitionElementsKind(Handle<JSObject> object,
+ ElementsKind to_kind);
MUST_USE_RESULT MaybeObject* TransitionElementsKind(ElementsKind to_kind);
MUST_USE_RESULT MaybeObject* UpdateAllocationSite(ElementsKind to_kind);
+ static void MigrateToMap(Handle<JSObject> object, Handle<Map> new_map);
MUST_USE_RESULT MaybeObject* MigrateToMap(Map* new_map);
- MUST_USE_RESULT MaybeObject* GeneralizeFieldRepresentation(
- int modify_index,
- Representation new_representation,
- StoreMode store_mode);
-
- // Add a property to a fast-case object.
- MUST_USE_RESULT MaybeObject* AddFastProperty(
- Name* name,
- Object* value,
- PropertyAttributes attributes,
- StoreFromKeyed store_mode = MAY_BE_STORE_FROM_KEYED,
- ValueType value_type = OPTIMAL_REPRESENTATION,
- TransitionFlag flag = INSERT_TRANSITION);
-
- // Add a property to a slow-case object.
- MUST_USE_RESULT MaybeObject* AddSlowProperty(Name* name,
- Object* value,
- PropertyAttributes attributes);
-
- // Add a property to an object. May cause GC.
- MUST_USE_RESULT MaybeObject* AddProperty(
- Name* name,
- Object* value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode,
- StoreFromKeyed store_mode = MAY_BE_STORE_FROM_KEYED,
- ExtensibilityCheck extensibility_check = PERFORM_EXTENSIBILITY_CHECK,
- ValueType value_type = OPTIMAL_REPRESENTATION,
- StoreMode mode = ALLOW_AS_CONSTANT,
- TransitionFlag flag = INSERT_TRANSITION);
+ static void GeneralizeFieldRepresentation(Handle<JSObject> object,
+ int modify_index,
+ Representation new_representation,
+ StoreMode store_mode);
// Convert the object to use the canonical dictionary
// representation. If the object is expected to have additional properties
@@ -2565,10 +2485,6 @@
PropertyNormalizationMode mode,
int expected_additional_properties);
- MUST_USE_RESULT MaybeObject* NormalizeProperties(
- PropertyNormalizationMode mode,
- int expected_additional_properties);
-
// Convert and update the elements backing store to be a
// SeededNumberDictionary dictionary. Returns the backing after conversion.
static Handle<SeededNumberDictionary> NormalizeElements(
@@ -2577,13 +2493,9 @@
MUST_USE_RESULT MaybeObject* NormalizeElements();
// Transform slow named properties to fast variants.
- // Returns failure if allocation failed.
static void TransformToFastProperties(Handle<JSObject> object,
int unused_property_fields);
- MUST_USE_RESULT MaybeObject* TransformToFastProperties(
- int unused_property_fields);
-
// Access fast-case object properties at index.
MUST_USE_RESULT inline MaybeObject* FastPropertyAt(
Representation representation,
@@ -2616,22 +2528,21 @@
// Check whether this object references another object
bool ReferencesObject(Object* obj);
- // Casting.
- static inline JSObject* cast(Object* obj);
-
// Disalow further properties to be added to the object.
static Handle<Object> PreventExtensions(Handle<JSObject> object);
- MUST_USE_RESULT MaybeObject* PreventExtensions();
// ES5 Object.freeze
- MUST_USE_RESULT MaybeObject* Freeze(Isolate* isolate);
-
+ static Handle<Object> Freeze(Handle<JSObject> object);
// Called the first time an object is observed with ES7 Object.observe.
MUST_USE_RESULT MaybeObject* SetObserved(Isolate* isolate);
- // Copy object
- MUST_USE_RESULT MaybeObject* DeepCopy(Isolate* isolate);
+ // Copy object.
+ static Handle<JSObject> Copy(Handle<JSObject> object);
+ static Handle<JSObject> DeepCopy(Handle<JSObject> object);
+
+ // Casting.
+ static inline JSObject* cast(Object* obj);
// Dispatched behavior.
void JSObjectShortPrint(StringStream* accumulator);
@@ -2734,15 +2645,6 @@
friend class DictionaryElementsAccessor;
friend class JSReceiver;
- // TODO(mstarzinger): Soon to be handlified.
- MUST_USE_RESULT MaybeObject* SetLocalPropertyIgnoreAttributes(
- Name* key,
- Object* value,
- PropertyAttributes attributes,
- ValueType value_type = OPTIMAL_REPRESENTATION,
- StoreMode mode = ALLOW_AS_CONSTANT,
- ExtensibilityCheck extensibility_check = PERFORM_EXTENSIBILITY_CHECK);
-
MUST_USE_RESULT MaybeObject* GetElementWithCallback(Object* receiver,
Object* structure,
uint32_t index,
@@ -2755,11 +2657,12 @@
JSReceiver* receiver,
uint32_t index,
bool continue_search);
- MUST_USE_RESULT MaybeObject* SetElementWithCallback(
- Object* structure,
+ static Handle<Object> SetElementWithCallback(
+ Handle<JSObject> object,
+ Handle<Object> structure,
uint32_t index,
- Object* value,
- JSObject* holder,
+ Handle<Object> value,
+ Handle<JSObject> holder,
StrictModeFlag strict_mode);
MUST_USE_RESULT MaybeObject* SetElementWithInterceptor(
uint32_t index,
@@ -2775,17 +2678,91 @@
StrictModeFlag strict_mode,
bool check_prototype,
SetPropertyMode set_mode);
+ MUST_USE_RESULT MaybeObject* SetElementWithCallbackSetterInPrototypes(
+ uint32_t index,
+ Object* value,
+ bool* found,
+ StrictModeFlag strict_mode);
// Searches the prototype chain for property 'name'. If it is found and
// has a setter, invoke it and set '*done' to true. If it is found and is
// read-only, reject and set '*done' to true. Otherwise, set '*done' to
- // false. Can cause GC and can return a failure result with '*done==true'.
- MUST_USE_RESULT MaybeObject* SetPropertyViaPrototypes(
- Name* name,
- Object* value,
+ // false. Can throw and return an empty handle with '*done==true'.
+ static Handle<Object> SetPropertyViaPrototypes(
+ Handle<JSObject> object,
+ Handle<Name> name,
+ Handle<Object> value,
PropertyAttributes attributes,
StrictModeFlag strict_mode,
bool* done);
+ static Handle<Object> SetPropertyPostInterceptor(
+ Handle<JSObject> object,
+ Handle<Name> name,
+ Handle<Object> value,
+ PropertyAttributes attributes,
+ StrictModeFlag strict_mode);
+ static Handle<Object> SetPropertyUsingTransition(
+ Handle<JSObject> object,
+ LookupResult* lookup,
+ Handle<Name> name,
+ Handle<Object> value,
+ PropertyAttributes attributes);
+ static Handle<Object> SetPropertyWithFailedAccessCheck(
+ Handle<JSObject> object,
+ LookupResult* result,
+ Handle<Name> name,
+ Handle<Object> value,
+ bool check_prototype,
+ StrictModeFlag strict_mode);
+
+ // Add a property to an object.
+ static Handle<Object> AddProperty(
+ Handle<JSObject> object,
+ Handle<Name> name,
+ Handle<Object> value,
+ PropertyAttributes attributes,
+ StrictModeFlag strict_mode,
+ StoreFromKeyed store_mode = MAY_BE_STORE_FROM_KEYED,
+ ExtensibilityCheck extensibility_check = PERFORM_EXTENSIBILITY_CHECK,
+ ValueType value_type = OPTIMAL_REPRESENTATION,
+ StoreMode mode = ALLOW_AS_CONSTANT,
+ TransitionFlag flag = INSERT_TRANSITION);
+
+ // Add a constant function property to a fast-case object.
+ // This leaves a CONSTANT_TRANSITION in the old map, and
+ // if it is called on a second object with this map, a
+ // normal property is added instead, with a map transition.
+ // This avoids the creation of many maps with the same constant
+ // function, all orphaned.
+ static void AddConstantProperty(Handle<JSObject> object,
+ Handle<Name> name,
+ Handle<Object> constant,
+ PropertyAttributes attributes,
+ TransitionFlag flag);
+
+ // Add a property to a fast-case object.
+ static void AddFastProperty(Handle<JSObject> object,
+ Handle<Name> name,
+ Handle<Object> value,
+ PropertyAttributes attributes,
+ StoreFromKeyed store_mode,
+ ValueType value_type,
+ TransitionFlag flag);
+
+ // Add a property to a fast-case object using a map transition to
+ // new_map.
+ static void AddFastPropertyUsingMap(Handle<JSObject> object,
+ Handle<Map> new_map,
+ Handle<Name> name,
+ Handle<Object> value,
+ int field_index,
+ Representation representation);
+
+ // Add a property to a slow-case object.
+ static void AddSlowProperty(Handle<JSObject> object,
+ Handle<Name> name,
+ Handle<Object> value,
+ PropertyAttributes attributes);
static Handle<Object> DeleteProperty(Handle<JSObject> object,
Handle<Name> name,
@@ -3175,6 +3152,13 @@
DescriptorArray* src,
int src_index,
const WhitenessWitness&);
+ static Handle<DescriptorArray> Merge(Handle<DescriptorArray> desc,
+ int verbatim,
+ int valid,
+ int new_size,
+ int modify_index,
+ StoreMode store_mode,
+ Handle<DescriptorArray> other);
MUST_USE_RESULT MaybeObject* Merge(int verbatim,
int valid,
int new_size,
@@ -3191,6 +3175,10 @@
return CopyUpToAddAttributes(enumeration_index, NONE);
}
+ static Handle<DescriptorArray> CopyUpToAddAttributes(
+ Handle<DescriptorArray> desc,
+ int enumeration_index,
+ PropertyAttributes attributes);
MUST_USE_RESULT MaybeObject* CopyUpToAddAttributes(
int enumeration_index,
PropertyAttributes attributes);
@@ -4233,8 +4221,9 @@
public:
static const int kEntries = 64;
- MUST_USE_RESULT MaybeObject* Get(JSObject* object,
- PropertyNormalizationMode mode);
+ static Handle<Map> Get(Handle<NormalizedMapCache> cache,
+ Handle<JSObject> object,
+ PropertyNormalizationMode mode);
void Clear();
@@ -4811,15 +4800,10 @@
CONSTANT,
CALLBACKS,
INTERCEPTOR,
- MAP_TRANSITION,
+ TRANSITION,
NONEXISTENT
};
- enum StubHolder {
- OWN_STUB,
- PROTOTYPE_STUB
- };
-
typedef int ExtraICState;
static const ExtraICState kNoExtraICState = 0;
@@ -5024,8 +5008,6 @@
class ExtraICStateKeyedAccessStoreMode:
public BitField<KeyedAccessStoreMode, 1, 4> {}; // NOLINT
- class ExtraICStateStubHolder: public BitField<StubHolder, 0, 1> {};
-
static inline StrictModeFlag GetStrictMode(ExtraICState extra_ic_state) {
return ExtraICStateStrictMode::decode(extra_ic_state);
}
@@ -5042,10 +5024,6 @@
ExtraICStateStrictMode::encode(strict_mode);
}
- static inline ExtraICState ComputeExtraICState(StubHolder stub_holder) {
- return ExtraICStateStubHolder::encode(stub_holder);
- }
-
// Flags operations.
static inline Flags ComputeFlags(
Kind kind,
@@ -5154,7 +5132,7 @@
// being entered through the prologue. Used to determine when it is
// relatively safe to flush this code object and replace it with the lazy
// compilation stub.
- static void MakeCodeAgeSequenceYoung(byte* sequence);
+ static void MakeCodeAgeSequenceYoung(byte* sequence, Isolate* isolate);
void MakeOlder(MarkingParity);
static bool IsYoungSequence(byte* sequence);
bool IsOld();
@@ -5300,10 +5278,11 @@
MarkingParity* parity);
static void GetCodeAgeAndParity(byte* sequence, Age* age,
MarkingParity* parity);
- static Code* GetCodeAgeStub(Age age, MarkingParity parity);
+ static Code* GetCodeAgeStub(Isolate* isolate, Age age, MarkingParity parity);
// Code aging -- platform-specific
- static void PatchPlatformCodeAge(byte* sequence, Age age,
+ static void PatchPlatformCodeAge(Isolate* isolate,
+ byte* sequence, Age age,
MarkingParity parity);
DISALLOW_IMPLICIT_CONSTRUCTORS(Code);
@@ -5616,11 +5595,8 @@
int modify_index,
Representation new_representation,
StoreMode store_mode);
- MUST_USE_RESULT MaybeObject* GeneralizeRepresentation(
- int modify_index,
- Representation representation,
- StoreMode store_mode);
- MUST_USE_RESULT MaybeObject* CopyGeneralizeAllRepresentations(
+ static Handle<Map> CopyGeneralizeAllRepresentations(
+ Handle<Map> map,
int modify_index,
StoreMode store_mode,
PropertyAttributes attributes,
@@ -5795,11 +5771,19 @@
MUST_USE_RESULT MaybeObject* CopyWithPreallocatedFieldDescriptors();
static Handle<Map> CopyDropDescriptors(Handle<Map> map);
MUST_USE_RESULT MaybeObject* CopyDropDescriptors();
+ static Handle<Map> CopyReplaceDescriptors(Handle<Map> map,
+ Handle<DescriptorArray> descriptors,
+ TransitionFlag flag,
+ Handle<Name> name);
MUST_USE_RESULT MaybeObject* CopyReplaceDescriptors(
DescriptorArray* descriptors,
TransitionFlag flag,
Name* name = NULL,
SimpleTransitionFlag simple_flag = FULL_TRANSITION);
+ static Handle<Map> CopyInstallDescriptors(
+ Handle<Map> map,
+ int new_descriptor,
+ Handle<DescriptorArray> descriptors);
MUST_USE_RESULT MaybeObject* CopyInstallDescriptors(
int new_descriptor,
DescriptorArray* descriptors);
@@ -7841,11 +7825,10 @@
static const uint32_t kMaximumArrayBytesToPretransition = 8 * 1024;
DECL_ACCESSORS(transition_info, Object)
+ DECL_ACCESSORS(dependent_code, DependentCode)
DECL_ACCESSORS(weak_next, Object)
- void Initialize() {
- SetElementsKind(GetInitialFastElementsKind());
- }
+ inline void Initialize();
ElementsKind GetElementsKind() {
ASSERT(!IsLiteralSite());
@@ -7873,11 +7856,12 @@
static inline bool CanTrack(InstanceType type);
static const int kTransitionInfoOffset = HeapObject::kHeaderSize;
- static const int kWeakNextOffset = kTransitionInfoOffset + kPointerSize;
+ static const int kDependentCodeOffset = kTransitionInfoOffset + kPointerSize;
+ static const int kWeakNextOffset = kDependentCodeOffset + kPointerSize;
static const int kSize = kWeakNextOffset + kPointerSize;
typedef FixedBodyDescriptor<HeapObject::kHeaderSize,
- kTransitionInfoOffset + kPointerSize,
+ kDependentCodeOffset + kPointerSize,
kSize> BodyDescriptor;
private:
@@ -9018,6 +9002,9 @@
// of the cell's current type and the value's type. If the change causes
// a change of the type of the cell's contents, code dependent on the cell
// will be deoptimized.
+ static void SetValueInferType(Handle<PropertyCell> cell,
+ Handle<Object> value,
+ WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
MUST_USE_RESULT MaybeObject* SetValueInferType(
Object* value,
WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
@@ -9070,9 +9057,6 @@
// Casting.
static inline JSProxy* cast(Object* obj);
- bool HasPropertyWithHandler(Name* name);
- bool HasElementWithHandler(uint32_t index);
-
MUST_USE_RESULT MaybeObject* GetPropertyWithHandler(
Object* receiver,
Name* name);
@@ -9080,21 +9064,15 @@
Object* receiver,
uint32_t index);
- MUST_USE_RESULT MaybeObject* SetPropertyWithHandler(
- JSReceiver* receiver,
- Name* name,
- Object* value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode);
-
// If the handler defines an accessor property with a setter, invoke it.
// If it defines an accessor property without a setter, or a data property
// that is read-only, throw. In all these cases set '*done' to true,
// otherwise set it to false.
- MUST_USE_RESULT MaybeObject* SetPropertyViaPrototypesWithHandler(
- JSReceiver* receiver,
- Name* name,
- Object* value,
+ static Handle<Object> SetPropertyViaPrototypesWithHandler(
+ Handle<JSProxy> proxy,
+ Handle<JSReceiver> receiver,
+ Handle<Name> name,
+ Handle<Object> value,
PropertyAttributes attributes,
StrictModeFlag strict_mode,
bool* done);
@@ -9142,12 +9120,21 @@
private:
friend class JSReceiver;
+ static Handle<Object> SetPropertyWithHandler(Handle<JSProxy> proxy,
+ Handle<JSReceiver> receiver,
+ Handle<Name> name,
+ Handle<Object> value,
+ PropertyAttributes attributes,
+ StrictModeFlag strict_mode);
static Handle<Object> SetElementWithHandler(Handle<JSProxy> proxy,
Handle<JSReceiver> receiver,
uint32_t index,
Handle<Object> value,
StrictModeFlag strict_mode);
+ static bool HasPropertyWithHandler(Handle<JSProxy> proxy, Handle<Name> name);
+ static bool HasElementWithHandler(Handle<JSProxy> proxy, uint32_t index);
+
static Handle<Object> DeletePropertyWithHandler(Handle<JSProxy> proxy,
Handle<Name> name,
DeleteMode mode);
diff --git a/src/optimizing-compiler-thread.cc b/src/optimizing-compiler-thread.cc
index 085143d..029c115 100644
--- a/src/optimizing-compiler-thread.cc
+++ b/src/optimizing-compiler-thread.cc
@@ -74,7 +74,6 @@
{ AllowHandleDereference allow_handle_dereference;
FlushInputQueue(true);
}
- Release_Store(&queue_length_, static_cast<AtomicWord>(0));
Release_Store(&stop_thread_, static_cast<AtomicWord>(CONTINUE));
stop_semaphore_.Signal();
// Return to start of consumer loop.
@@ -114,6 +113,7 @@
osr_candidates_.RemoveElement(optimizing_compiler);
ready_for_osr_.Add(optimizing_compiler);
} else {
+ LockGuard<Mutex> access_queue(&queue_mutex_);
output_queue_.Enqueue(optimizing_compiler);
isolate_->stack_guard()->RequestInstallCode();
}
@@ -134,13 +134,20 @@
}
delete info;
}
+ Release_Store(&queue_length_, static_cast<AtomicWord>(0));
+
+ LockGuard<Mutex> access_osr_lists(&osr_list_mutex_);
+ osr_candidates_.Clear();
}
void OptimizingCompilerThread::FlushOutputQueue(bool restore_function_code) {
OptimizingCompiler* optimizing_compiler;
// The optimizing compiler is allocated in the CompilationInfo's zone.
- while (output_queue_.Dequeue(&optimizing_compiler)) {
+ while (true) {
+ { LockGuard<Mutex> access_queue(&queue_mutex_);
+ if (!output_queue_.Dequeue(&optimizing_compiler)) break;
+ }
CompilationInfo* info = optimizing_compiler->info();
if (restore_function_code) {
Handle<JSFunction> function = info->closure();
@@ -149,7 +156,6 @@
delete info;
}
- osr_candidates_.Clear();
RemoveStaleOSRCandidates(0);
}
@@ -196,9 +202,12 @@
void OptimizingCompilerThread::InstallOptimizedFunctions() {
ASSERT(!IsOptimizerThread());
HandleScope handle_scope(isolate_);
+
OptimizingCompiler* compiler;
while (true) {
- if (!output_queue_.Dequeue(&compiler)) return;
+ { LockGuard<Mutex> access_queue(&queue_mutex_);
+ if (!output_queue_.Dequeue(&compiler)) break;
+ }
Compiler::InstallOptimizedCode(compiler);
}
diff --git a/src/optimizing-compiler-thread.h b/src/optimizing-compiler-thread.h
index d1ed6a2..4231765 100644
--- a/src/optimizing-compiler-thread.h
+++ b/src/optimizing-compiler-thread.h
@@ -115,7 +115,7 @@
UnboundQueue<OptimizingCompiler*> input_queue_;
// Queue of recompilation tasks ready to be installed (excluding OSR).
UnboundQueue<OptimizingCompiler*> output_queue_;
- // List of all OSR related recompilation tasks (both incoming and ready ones).
+ // List of recompilation tasks for OSR in the input queue.
List<OptimizingCompiler*> osr_candidates_;
// List of recompilation tasks ready for OSR.
List<OptimizingCompiler*> ready_for_osr_;
@@ -125,6 +125,8 @@
TimeDelta time_spent_compiling_;
TimeDelta time_spent_total_;
+ // TODO(yangguo): remove this once the memory leak has been figured out.
+ Mutex queue_mutex_;
Mutex osr_list_mutex_;
int osr_hits_;
int osr_attempts_;
diff --git a/src/platform-cygwin.cc b/src/platform-cygwin.cc
index 4d3b1e3..5903438 100644
--- a/src/platform-cygwin.cc
+++ b/src/platform-cygwin.cc
@@ -205,12 +205,6 @@
}
-int OS::StackWalk(Vector<OS::StackFrame> frames) {
- // Not supported on Cygwin.
- return 0;
-}
-
-
// The VirtualMemory implementation is taken from platform-win32.cc.
// The mmap-based virtual memory implementation as it is used on most posix
// platforms does not work well because Cygwin does not support MAP_FIXED.
diff --git a/src/platform-freebsd.cc b/src/platform-freebsd.cc
index d818278..518ad31 100644
--- a/src/platform-freebsd.cc
+++ b/src/platform-freebsd.cc
@@ -199,10 +199,6 @@
}
-int OS::StackWalk(Vector<OS::StackFrame> frames) {
- return POSIXBacktraceHelper<backtrace, backtrace_symbols>::StackWalk(frames);
-}
-
// Constants used for mmap.
static const int kMmapFd = -1;
diff --git a/src/platform-linux.cc b/src/platform-linux.cc
index b8b9602..74d473f 100644
--- a/src/platform-linux.cc
+++ b/src/platform-linux.cc
@@ -313,16 +313,6 @@
}
-int OS::StackWalk(Vector<OS::StackFrame> frames) {
- // backtrace is a glibc extension.
-#if defined(__GLIBC__) && !defined(__UCLIBC__)
- return POSIXBacktraceHelper<backtrace, backtrace_symbols>::StackWalk(frames);
-#else
- return 0;
-#endif
-}
-
-
// Constants used for mmap.
static const int kMmapFd = -1;
static const int kMmapFdOffset = 0;
diff --git a/src/platform-macos.cc b/src/platform-macos.cc
index 67cc96f..a58bc1a 100644
--- a/src/platform-macos.cc
+++ b/src/platform-macos.cc
@@ -220,14 +220,6 @@
}
-int OS::StackWalk(Vector<StackFrame> frames) {
- // If weak link to execinfo lib has failed, ie because we are on 10.4, abort.
- if (backtrace == NULL) return 0;
-
- return POSIXBacktraceHelper<backtrace, backtrace_symbols>::StackWalk(frames);
-}
-
-
VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
diff --git a/src/platform-openbsd.cc b/src/platform-openbsd.cc
index 30a484f..4f5420e 100644
--- a/src/platform-openbsd.cc
+++ b/src/platform-openbsd.cc
@@ -231,34 +231,6 @@
}
-int OS::StackWalk(Vector<OS::StackFrame> frames) {
- // backtrace is a glibc extension.
- int frames_size = frames.length();
- ScopedVector<void*> addresses(frames_size);
-
- int frames_count = backtrace(addresses.start(), frames_size);
-
- char** symbols = backtrace_symbols(addresses.start(), frames_count);
- if (symbols == NULL) {
- return kStackWalkError;
- }
-
- for (int i = 0; i < frames_count; i++) {
- frames[i].address = addresses[i];
- // Format a text representation of the frame based on the information
- // available.
- SNPrintF(MutableCStrVector(frames[i].text, kStackWalkMaxTextLen),
- "%s",
- symbols[i]);
- // Make sure line termination is in place.
- frames[i].text[kStackWalkMaxTextLen - 1] = '\0';
- }
-
- free(symbols);
-
- return frames_count;
-}
-
// Constants used for mmap.
static const int kMmapFd = -1;
diff --git a/src/platform-posix.h b/src/platform-posix.h
index 6b73387..e0fbc0c 100644
--- a/src/platform-posix.h
+++ b/src/platform-posix.h
@@ -39,7 +39,6 @@
namespace internal {
// Used by platform implementation files during OS::DumpBacktrace()
-// and OS::StackWalk().
template<int (*backtrace)(void**, int),
char** (*backtrace_symbols)(void* const*, int)>
struct POSIXBacktraceHelper {
@@ -73,32 +72,6 @@
fflush(stderr);
free(symbols);
}
-
- static int StackWalk(Vector<OS::StackFrame> frames) {
- int frames_size = frames.length();
- ScopedVector<void*> addresses(frames_size);
-
- int frames_count = backtrace(addresses.start(), frames_size);
-
- char** symbols = backtrace_symbols(addresses.start(), frames_count);
- if (symbols == NULL) {
- return OS::kStackWalkError;
- }
-
- for (int i = 0; i < frames_count; i++) {
- frames[i].address = addresses[i];
- // Format a text representation of the frame based on the information
- // available.
- OS::SNPrintF(MutableCStrVector(frames[i].text, OS::kStackWalkMaxTextLen),
- "%s", symbols[i]);
- // Make sure line termination is in place.
- frames[i].text[OS::kStackWalkMaxTextLen - 1] = '\0';
- }
-
- free(symbols);
-
- return frames_count;
- }
};
} } // namespace v8::internal
diff --git a/src/platform-solaris.cc b/src/platform-solaris.cc
index f082af1..df81c3a 100644
--- a/src/platform-solaris.cc
+++ b/src/platform-solaris.cc
@@ -211,20 +211,6 @@
}
-int OS::StackWalk(Vector<OS::StackFrame> frames) {
- ucontext_t ctx;
- struct StackWalker walker = { frames, 0 };
-
- if (getcontext(&ctx) < 0) return kStackWalkError;
-
- if (!walkcontext(&ctx, StackWalkCallback, &walker)) {
- return kStackWalkError;
- }
-
- return walker.index;
-}
-
-
// Constants used for mmap.
static const int kMmapFd = -1;
static const int kMmapFdOffset = 0;
diff --git a/src/platform-win32.cc b/src/platform-win32.cc
index ea4f7ea..073b21a 100644
--- a/src/platform-win32.cc
+++ b/src/platform-win32.cc
@@ -1208,133 +1208,9 @@
}
-// Walk the stack using the facilities in dbghelp.dll and tlhelp32.dll
-
-// Switch off warning 4748 (/GS can not protect parameters and local variables
-// from local buffer overrun because optimizations are disabled in function) as
-// it is triggered by the use of inline assembler.
-#pragma warning(push)
-#pragma warning(disable : 4748)
-int OS::StackWalk(Vector<OS::StackFrame> frames) {
- BOOL ok;
-
- // Load the required functions from DLL's.
- if (!LoadDbgHelpAndTlHelp32()) return kStackWalkError;
-
- // Get the process and thread handles.
- HANDLE process_handle = GetCurrentProcess();
- HANDLE thread_handle = GetCurrentThread();
-
- // Read the symbols.
- if (!LoadSymbols(Isolate::Current(), process_handle)) return kStackWalkError;
-
- // Capture current context.
- CONTEXT context;
- RtlCaptureContext(&context);
-
- // Initialize the stack walking
- STACKFRAME64 stack_frame;
- memset(&stack_frame, 0, sizeof(stack_frame));
-#ifdef _WIN64
- stack_frame.AddrPC.Offset = context.Rip;
- stack_frame.AddrFrame.Offset = context.Rbp;
- stack_frame.AddrStack.Offset = context.Rsp;
-#else
- stack_frame.AddrPC.Offset = context.Eip;
- stack_frame.AddrFrame.Offset = context.Ebp;
- stack_frame.AddrStack.Offset = context.Esp;
-#endif
- stack_frame.AddrPC.Mode = AddrModeFlat;
- stack_frame.AddrFrame.Mode = AddrModeFlat;
- stack_frame.AddrStack.Mode = AddrModeFlat;
- int frames_count = 0;
-
- // Collect stack frames.
- int frames_size = frames.length();
- while (frames_count < frames_size) {
- ok = _StackWalk64(
- IMAGE_FILE_MACHINE_I386, // MachineType
- process_handle, // hProcess
- thread_handle, // hThread
- &stack_frame, // StackFrame
- &context, // ContextRecord
- NULL, // ReadMemoryRoutine
- _SymFunctionTableAccess64, // FunctionTableAccessRoutine
- _SymGetModuleBase64, // GetModuleBaseRoutine
- NULL); // TranslateAddress
- if (!ok) break;
-
- // Store the address.
- ASSERT((stack_frame.AddrPC.Offset >> 32) == 0); // 32-bit address.
- frames[frames_count].address =
- reinterpret_cast<void*>(stack_frame.AddrPC.Offset);
-
- // Try to locate a symbol for this frame.
- DWORD64 symbol_displacement;
- SmartArrayPointer<IMAGEHLP_SYMBOL64> symbol(
- NewArray<IMAGEHLP_SYMBOL64>(kStackWalkMaxNameLen));
- if (symbol.is_empty()) return kStackWalkError; // Out of memory.
- memset(*symbol, 0, sizeof(IMAGEHLP_SYMBOL64) + kStackWalkMaxNameLen);
- (*symbol)->SizeOfStruct = sizeof(IMAGEHLP_SYMBOL64);
- (*symbol)->MaxNameLength = kStackWalkMaxNameLen;
- ok = _SymGetSymFromAddr64(process_handle, // hProcess
- stack_frame.AddrPC.Offset, // Address
- &symbol_displacement, // Displacement
- *symbol); // Symbol
- if (ok) {
- // Try to locate more source information for the symbol.
- IMAGEHLP_LINE64 Line;
- memset(&Line, 0, sizeof(Line));
- Line.SizeOfStruct = sizeof(Line);
- DWORD line_displacement;
- ok = _SymGetLineFromAddr64(
- process_handle, // hProcess
- stack_frame.AddrPC.Offset, // dwAddr
- &line_displacement, // pdwDisplacement
- &Line); // Line
- // Format a text representation of the frame based on the information
- // available.
- if (ok) {
- SNPrintF(MutableCStrVector(frames[frames_count].text,
- kStackWalkMaxTextLen),
- "%s %s:%d:%d",
- (*symbol)->Name, Line.FileName, Line.LineNumber,
- line_displacement);
- } else {
- SNPrintF(MutableCStrVector(frames[frames_count].text,
- kStackWalkMaxTextLen),
- "%s",
- (*symbol)->Name);
- }
- // Make sure line termination is in place.
- frames[frames_count].text[kStackWalkMaxTextLen - 1] = '\0';
- } else {
- // No text representation of this frame
- frames[frames_count].text[0] = '\0';
-
- // Continue if we are just missing a module (for non C/C++ frames a
- // module will never be found).
- int err = GetLastError();
- if (err != ERROR_MOD_NOT_FOUND) {
- break;
- }
- }
-
- frames_count++;
- }
-
- // Return the number of frames filled in.
- return frames_count;
-}
-
-
-// Restore warnings to previous settings.
-#pragma warning(pop)
-
#else // __MINGW32__
void OS::LogSharedLibraryAddresses(Isolate* isolate) { }
void OS::SignalCodeMovingGC() { }
-int OS::StackWalk(Vector<OS::StackFrame> frames) { return 0; }
#endif // __MINGW32__
diff --git a/src/platform.h b/src/platform.h
index ee8fb92..e0e62fa 100644
--- a/src/platform.h
+++ b/src/platform.h
@@ -67,6 +67,8 @@
int strncasecmp(const char* s1, const char* s2, int n);
+// Visual C++ 2013 and higher implement this function.
+#if (_MSC_VER < 1800)
inline int lrint(double flt) {
int intgr;
#if V8_TARGET_ARCH_IA32
@@ -84,6 +86,8 @@
return intgr;
}
+#endif // _MSC_VER < 1800
+
#endif // V8_CC_MSVC
namespace v8 {
@@ -264,8 +268,6 @@
char text[kStackWalkMaxTextLen];
};
- static int StackWalk(Vector<StackFrame> frames);
-
class MemoryMappedFile {
public:
static MemoryMappedFile* open(const char* name);
diff --git a/src/preparser.cc b/src/preparser.cc
index 36a94a3..2486632 100644
--- a/src/preparser.cc
+++ b/src/preparser.cc
@@ -42,10 +42,10 @@
#include "unicode.h"
#include "utils.h"
-#ifdef _MSC_VER
+#if V8_CC_MSVC && (_MSC_VER < 1800)
namespace std {
-// Usually defined in math.h, but not in MSVC.
+// Usually defined in math.h, but not in MSVC until VS2013+.
// Abstracted to work
int isfinite(double value);
diff --git a/src/runtime-profiler.cc b/src/runtime-profiler.cc
index 95dcc4f..32a85cc 100644
--- a/src/runtime-profiler.cc
+++ b/src/runtime-profiler.cc
@@ -185,7 +185,7 @@
PrintF("]\n");
}
- Deoptimizer::PatchInterruptCode(isolate_, shared->code());
+ BackEdgeTable::Patch(isolate_, shared->code());
}
diff --git a/src/runtime.cc b/src/runtime.cc
index c09fb1d..dbc8f11 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -348,10 +348,8 @@
ElementsKind from_kind =
Handle<JSObject>::cast(object)->map()->elements_kind();
if (Map::IsValidElementsTransition(from_kind, to_kind)) {
- Handle<Object> result = JSObject::TransitionElementsKind(
- Handle<JSObject>::cast(object), to_kind);
- if (result.is_null()) return isolate->ThrowIllegalOperation();
- return *result;
+ JSObject::TransitionElementsKind(Handle<JSObject>::cast(object), to_kind);
+ return *object;
}
return isolate->ThrowIllegalOperation();
}
@@ -499,33 +497,10 @@
// Update the functions literal and return the boilerplate.
literals->set(literals_index, *boilerplate);
}
- return JSObject::cast(*boilerplate)->DeepCopy(isolate);
-}
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateObjectLiteralShallow) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 4);
- CONVERT_ARG_HANDLE_CHECKED(FixedArray, literals, 0);
- CONVERT_SMI_ARG_CHECKED(literals_index, 1);
- CONVERT_ARG_HANDLE_CHECKED(FixedArray, constant_properties, 2);
- CONVERT_SMI_ARG_CHECKED(flags, 3);
- bool should_have_fast_elements = (flags & ObjectLiteral::kFastElements) != 0;
- bool has_function_literal = (flags & ObjectLiteral::kHasFunction) != 0;
-
- // Check if boilerplate exists. If not, create it first.
- Handle<Object> boilerplate(literals->get(literals_index), isolate);
- if (*boilerplate == isolate->heap()->undefined_value()) {
- boilerplate = CreateObjectLiteralBoilerplate(isolate,
- literals,
- constant_properties,
- should_have_fast_elements,
- has_function_literal);
- RETURN_IF_EMPTY_HANDLE(isolate, boilerplate);
- // Update the functions literal and return the boilerplate.
- literals->set(literals_index, *boilerplate);
- }
- return isolate->heap()->CopyJSObject(JSObject::cast(*boilerplate));
+ Handle<Object> copy = JSObject::DeepCopy(Handle<JSObject>::cast(boilerplate));
+ RETURN_IF_EMPTY_HANDLE(isolate, copy);
+ return *copy;
}
@@ -541,7 +516,10 @@
ASSERT(*elements != isolate->heap()->empty_fixed_array());
Handle<Object> boilerplate =
Runtime::CreateArrayLiteralBoilerplate(isolate, literals, elements);
- if (boilerplate.is_null()) return site;
+ if (boilerplate.is_null()) {
+ ASSERT(site.is_null());
+ return site;
+ }
site = isolate->factory()->NewAllocationSite();
site->set_transition_info(*boilerplate);
literals->set(literals_index, *site);
@@ -564,8 +542,10 @@
literals_index, elements);
RETURN_IF_EMPTY_HANDLE(isolate, site);
- JSObject* boilerplate = JSObject::cast(site->transition_info());
- return boilerplate->DeepCopy(isolate);
+ Handle<JSObject> boilerplate(JSObject::cast(site->transition_info()));
+ Handle<JSObject> copy = JSObject::DeepCopy(boilerplate);
+ RETURN_IF_EMPTY_HANDLE(isolate, copy);
+ return *copy;
}
@@ -589,8 +569,7 @@
AllocationSiteMode mode = AllocationSite::GetMode(
boilerplate->GetElementsKind());
if (mode == TRACK_ALLOCATION_SITE) {
- return isolate->heap()->CopyJSObjectWithAllocationSite(
- boilerplate, *site);
+ return isolate->heap()->CopyJSObject(boilerplate, *site);
}
return isolate->heap()->CopyJSObject(boilerplate);
@@ -1844,10 +1823,12 @@
RUNTIME_FUNCTION(MaybeObject*, Runtime_PreventExtensions) {
- SealHandleScope shs(isolate);
+ HandleScope scope(isolate);
ASSERT(args.length() == 1);
- CONVERT_ARG_CHECKED(JSObject, obj, 0);
- return obj->PreventExtensions();
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
+ Handle<Object> result = JSObject::PreventExtensions(obj);
+ RETURN_IF_EMPTY_HANDLE(isolate, result);
+ return *result;
}
@@ -1871,8 +1852,7 @@
CONVERT_ARG_HANDLE_CHECKED(JSRegExp, re, 0);
CONVERT_ARG_HANDLE_CHECKED(String, pattern, 1);
CONVERT_ARG_HANDLE_CHECKED(String, flags, 2);
- Handle<Object> result =
- RegExpImpl::Compile(re, pattern, flags);
+ Handle<Object> result = RegExpImpl::Compile(re, pattern, flags);
RETURN_IF_EMPTY_HANDLE(isolate, result);
return *result;
}
@@ -2164,7 +2144,7 @@
// Declare the property by setting it to the initial value if provided,
// or undefined, and use the correct mode (e.g. READ_ONLY attribute for
// constant declarations).
- ASSERT(!object->HasLocalProperty(*name));
+ ASSERT(!JSReceiver::HasLocalProperty(object, name));
Handle<Object> value(isolate->heap()->undefined_value(), isolate);
if (*initial_value != NULL) value = initial_value;
// Declaring a const context slot is a conflicting declaration if
@@ -2196,7 +2176,7 @@
RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeVarGlobal) {
- SealHandleScope shs(isolate);
+ HandleScope scope(isolate);
// args[0] == name
// args[1] == language_mode
// args[2] == value (optional)
@@ -2207,7 +2187,6 @@
bool assign = args.length() == 3;
CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
- GlobalObject* global = isolate->context()->global_object();
RUNTIME_ASSERT(args[1]->IsSmi());
CONVERT_LANGUAGE_MODE_ARG(language_mode, 1);
StrictModeFlag strict_mode_flag = (language_mode == CLASSIC_MODE)
@@ -2224,28 +2203,33 @@
// to assign to the property.
// Note that objects can have hidden prototypes, so we need to traverse
// the whole chain of hidden prototypes to do a 'local' lookup.
- Object* object = global;
LookupResult lookup(isolate);
- JSObject::cast(object)->LocalLookup(*name, &lookup, true);
+ isolate->context()->global_object()->LocalLookup(*name, &lookup, true);
if (lookup.IsInterceptor()) {
- HandleScope handle_scope(isolate);
PropertyAttributes intercepted =
lookup.holder()->GetPropertyAttribute(*name);
if (intercepted != ABSENT && (intercepted & READ_ONLY) == 0) {
// Found an interceptor that's not read only.
if (assign) {
- return lookup.holder()->SetProperty(
- &lookup, *name, args[2], attributes, strict_mode_flag);
+ CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
+ Handle<Object> result = JSObject::SetPropertyForResult(
+ handle(lookup.holder()), &lookup, name, value, attributes,
+ strict_mode_flag);
+ RETURN_IF_EMPTY_HANDLE(isolate, result);
+ return *result;
} else {
return isolate->heap()->undefined_value();
}
}
}
- // Reload global in case the loop above performed a GC.
- global = isolate->context()->global_object();
if (assign) {
- return global->SetProperty(*name, args[2], attributes, strict_mode_flag);
+ CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
+ Handle<GlobalObject> global(isolate->context()->global_object());
+ Handle<Object> result = JSReceiver::SetProperty(
+ global, name, value, attributes, strict_mode_flag);
+ RETURN_IF_EMPTY_HANDLE(isolate, result);
+ return *result;
}
return isolate->heap()->undefined_value();
}
@@ -3090,10 +3074,12 @@
RUNTIME_FUNCTION(MaybeObject*, Runtime_ObjectFreeze) {
- SealHandleScope shs(isolate);
+ HandleScope scope(isolate);
ASSERT(args.length() == 1);
- CONVERT_ARG_CHECKED(JSObject, object, 0);
- return object->Freeze(isolate);
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
+ Handle<Object> result = JSObject::Freeze(object);
+ RETURN_IF_EMPTY_HANDLE(isolate, result);
+ return *result;
}
@@ -4778,7 +4764,7 @@
// Check if the given key is an array index.
uint32_t index;
if (key->ToArrayIndex(&index)) {
- return isolate->heap()->ToBoolean(object->HasElement(index));
+ return isolate->heap()->ToBoolean(JSReceiver::HasElement(object, index));
}
// Convert the key to a name - possibly by calling back into JavaScript.
@@ -4793,7 +4779,7 @@
name = Handle<Name>::cast(converted);
}
- return isolate->heap()->ToBoolean(object->HasProperty(*name));
+ return isolate->heap()->ToBoolean(JSReceiver::HasProperty(object, name));
}
MaybeObject* Runtime::GetObjectPropertyOrFail(
@@ -5028,11 +5014,15 @@
// TODO(mstarzinger): So far this only works if property attributes don't
// change, this should be fixed once we cleanup the underlying code.
if (callback->IsForeign() && result.GetAttributes() == attr) {
- return js_object->SetPropertyWithCallback(callback,
- *name,
- *obj_value,
- result.holder(),
- kStrictMode);
+ Handle<Object> result_object =
+ JSObject::SetPropertyWithCallback(js_object,
+ handle(callback, isolate),
+ name,
+ obj_value,
+ handle(result.holder()),
+ kStrictMode);
+ RETURN_IF_EMPTY_HANDLE(isolate, result_object);
+ return *result_object;
}
}
@@ -5128,11 +5118,14 @@
if (object->IsJSProxy()) {
bool has_pending_exception = false;
- Handle<Object> name = key->IsSymbol()
+ Handle<Object> name_object = key->IsSymbol()
? key : Execution::ToString(isolate, key, &has_pending_exception);
if (has_pending_exception) return Failure::Exception();
- return JSProxy::cast(*object)->SetProperty(
- Name::cast(*name), *value, attr, strict_mode);
+ Handle<Name> name = Handle<Name>::cast(name_object);
+ Handle<Object> result = JSReceiver::SetProperty(
+ Handle<JSProxy>::cast(object), name, value, attr, strict_mode);
+ RETURN_IF_EMPTY_HANDLE(isolate, result);
+ return *result;
}
// If the object isn't a JavaScript object, we ignore the store.
@@ -5172,7 +5165,6 @@
}
if (key->IsName()) {
- MaybeObject* result;
Handle<Name> name = Handle<Name>::cast(key);
if (name->AsArrayIndex(&index)) {
if (js_object->HasExternalArrayElements()) {
@@ -5184,13 +5176,15 @@
value = number;
}
}
- result = js_object->SetElement(
+ MaybeObject* result = js_object->SetElement(
index, *value, attr, strict_mode, true, set_mode);
+ if (result->IsFailure()) return result;
} else {
if (name->IsString()) Handle<String>::cast(name)->TryFlatten();
- result = js_object->SetProperty(*name, *value, attr, strict_mode);
+ Handle<Object> result =
+ JSReceiver::SetProperty(js_object, name, value, attr, strict_mode);
+ RETURN_IF_EMPTY_HANDLE(isolate, result);
}
- if (result->IsFailure()) return result;
return *value;
}
@@ -5205,7 +5199,10 @@
return js_object->SetElement(
index, *value, attr, strict_mode, true, set_mode);
} else {
- return js_object->SetProperty(*name, *value, attr, strict_mode);
+ Handle<Object> result =
+ JSReceiver::SetProperty(js_object, name, value, attr, strict_mode);
+ RETURN_IF_EMPTY_HANDLE(isolate, result);
+ return *result;
}
}
@@ -5504,7 +5501,9 @@
static MaybeObject* HasLocalPropertyImplementation(Isolate* isolate,
Handle<JSObject> object,
Handle<Name> key) {
- if (object->HasLocalProperty(*key)) return isolate->heap()->true_value();
+ if (JSReceiver::HasLocalProperty(object, key)) {
+ return isolate->heap()->true_value();
+ }
// Handle hidden prototypes. If there's a hidden prototype above this thing
// then we have to check it for properties, because they are supposed to
// look like they are on this object.
@@ -5564,12 +5563,12 @@
RUNTIME_FUNCTION(MaybeObject*, Runtime_HasProperty) {
- SealHandleScope shs(isolate);
+ HandleScope scope(isolate);
ASSERT(args.length() == 2);
- CONVERT_ARG_CHECKED(JSReceiver, receiver, 0);
- CONVERT_ARG_CHECKED(Name, key, 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, receiver, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Name, key, 1);
- bool result = receiver->HasProperty(key);
+ bool result = JSReceiver::HasProperty(receiver, key);
RETURN_IF_SCHEDULED_EXCEPTION(isolate);
if (isolate->has_pending_exception()) return Failure::Exception();
return isolate->heap()->ToBoolean(result);
@@ -5577,12 +5576,12 @@
RUNTIME_FUNCTION(MaybeObject*, Runtime_HasElement) {
- SealHandleScope shs(isolate);
+ HandleScope scope(isolate);
ASSERT(args.length() == 2);
- CONVERT_ARG_CHECKED(JSReceiver, receiver, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, receiver, 0);
CONVERT_SMI_ARG_CHECKED(index, 1);
- bool result = receiver->HasElement(index);
+ bool result = JSReceiver::HasElement(receiver, index);
RETURN_IF_SCHEDULED_EXCEPTION(isolate);
if (isolate->has_pending_exception()) return Failure::Exception();
return isolate->heap()->ToBoolean(result);
@@ -5923,12 +5922,13 @@
RUNTIME_FUNCTION(MaybeObject*, Runtime_ToFastProperties) {
- SealHandleScope shs(isolate);
+ HandleScope scope(isolate);
ASSERT(args.length() == 1);
- Object* object = args[0];
- return (object->IsJSObject() && !object->IsGlobalObject())
- ? JSObject::cast(object)->TransformToFastProperties(0)
- : object;
+ CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
+ if (object->IsJSObject() && !object->IsGlobalObject()) {
+ JSObject::TransformToFastProperties(Handle<JSObject>::cast(object), 0);
+ }
+ return *object;
}
@@ -8501,8 +8501,7 @@
if (type->IsOneByteEqualTo(STATIC_ASCII_VECTOR("osr"))) {
// Start patching from the currently patched loop nesting level.
int current_level = unoptimized->allow_osr_at_loop_nesting_level();
- ASSERT(Deoptimizer::VerifyInterruptCode(
- isolate, unoptimized, current_level));
+ ASSERT(BackEdgeTable::Verify(isolate, unoptimized, current_level));
for (int i = current_level + 1; i <= Code::kMaxLoopNestingMarker; i++) {
unoptimized->set_allow_osr_at_loop_nesting_level(i);
isolate->runtime_profiler()->AttemptOnStackReplacement(*function);
@@ -8655,8 +8654,8 @@
result = JSFunction::CompileOsr(function, ast_id, CLEAR_EXCEPTION);
}
- // Revert the patched interrupt now, regardless of whether OSR succeeds.
- Deoptimizer::RevertInterruptCode(isolate, *unoptimized);
+ // Revert the patched back edge table, regardless of whether OSR succeeds.
+ BackEdgeTable::Revert(isolate, *unoptimized);
// Check whether we ended up with usable optimized code.
if (!result.is_null() && result->kind() == Code::OPTIMIZED_FUNCTION) {
@@ -9193,7 +9192,7 @@
// property from it.
if (!holder.is_null()) {
Handle<JSReceiver> object = Handle<JSReceiver>::cast(holder);
- ASSERT(object->IsJSProxy() || object->HasProperty(*name));
+ ASSERT(object->IsJSProxy() || JSReceiver::HasProperty(object, name));
// GetProperty below can cause GC.
Handle<Object> receiver_handle(
object->IsGlobalObject()
@@ -10174,7 +10173,7 @@
Handle<Object> element_value(elements->get(j), isolate);
if (!element_value->IsTheHole()) {
visitor->visit(j, element_value);
- } else if (receiver->HasElement(j)) {
+ } else if (JSReceiver::HasElement(receiver, j)) {
// Call GetElement on receiver, not its prototype, or getters won't
// have the correct receiver.
element_value = Object::GetElement(isolate, receiver, j);
@@ -10199,7 +10198,7 @@
Handle<Object> element_value =
isolate->factory()->NewNumber(double_value);
visitor->visit(j, element_value);
- } else if (receiver->HasElement(j)) {
+ } else if (JSReceiver::HasElement(receiver, j)) {
// Call GetElement on receiver, not its prototype, or getters won't
// have the correct receiver.
Handle<Object> element_value =
@@ -11515,7 +11514,7 @@
!function_context->IsNativeContext()) {
Handle<JSObject> ext(JSObject::cast(function_context->extension()));
- if (ext->HasProperty(*variable_name)) {
+ if (JSReceiver::HasProperty(ext, variable_name)) {
// We don't expect this to do anything except replacing
// property value.
SetProperty(isolate,
@@ -11603,7 +11602,7 @@
// be variables introduced by eval.
if (context->has_extension()) {
Handle<JSObject> ext(JSObject::cast(context->extension()));
- if (ext->HasProperty(*variable_name)) {
+ if (JSReceiver::HasProperty(ext, variable_name)) {
// We don't expect this to do anything except replacing property value.
SetProperty(isolate,
ext,
@@ -12646,7 +12645,8 @@
// Do not materialize the arguments object for eval or top-level code.
// Skip if "arguments" is already taken.
if (!function->shared()->is_function() ||
- target->HasLocalProperty(isolate->heap()->arguments_string())) {
+ JSReceiver::HasLocalProperty(target,
+ isolate->factory()->arguments_string())) {
return target;
}
@@ -14786,8 +14786,7 @@
}
-void Runtime::PerformGC(Object* result) {
- Isolate* isolate = Isolate::Current();
+void Runtime::PerformGC(Object* result, Isolate* isolate) {
Failure* failure = Failure::cast(result);
if (failure->IsRetryAfterGC()) {
if (isolate->heap()->new_space()->AddFreshPage()) {
diff --git a/src/runtime.h b/src/runtime.h
index 60c6677..64fade4 100644
--- a/src/runtime.h
+++ b/src/runtime.h
@@ -299,7 +299,6 @@
/* Literals */ \
F(MaterializeRegExpLiteral, 4, 1)\
F(CreateObjectLiteral, 4, 1) \
- F(CreateObjectLiteralShallow, 4, 1) \
F(CreateArrayLiteral, 3, 1) \
F(CreateArrayLiteralShallow, 3, 1) \
\
@@ -838,7 +837,7 @@
JSArrayBuffer* phantom_array_buffer);
// Helper functions used stubs.
- static void PerformGC(Object* result);
+ static void PerformGC(Object* result, Isolate* isolate);
// Used in runtime.cc and hydrogen's VisitArrayLiteral.
static Handle<Object> CreateArrayLiteralBoilerplate(
diff --git a/src/serialize.cc b/src/serialize.cc
index d05dd26..b3a7878 100644
--- a/src/serialize.cc
+++ b/src/serialize.cc
@@ -1284,7 +1284,6 @@
root_index_wave_front_(0) {
// The serializer is meant to be used only to generate initial heap images
// from a context in which there is only one isolate.
- ASSERT(isolate_->IsDefaultIsolate());
for (int i = 0; i <= LAST_SPACE; i++) {
fullness_[i] = 0;
}
diff --git a/src/stub-cache.cc b/src/stub-cache.cc
index 7b23d0c..0318a39 100644
--- a/src/stub-cache.cc
+++ b/src/stub-cache.cc
@@ -136,12 +136,11 @@
Handle<JSObject> stub_holder,
Code::Kind kind,
Code::StubType type) {
- Code::ExtraICState extra_ic_state = Code::ComputeExtraICState(
- receiver.is_identical_to(stub_holder) ? Code::OWN_STUB
- : Code::PROTOTYPE_STUB);
+ InlineCacheHolderFlag holder_flag = receiver.is_identical_to(stub_holder)
+ ? OWN_MAP : PROTOTYPE_MAP;
ASSERT(type != Code::NORMAL);
Code::Flags flags = Code::ComputeMonomorphicFlags(
- Code::STUB, extra_ic_state, type, kind);
+ Code::STUB, Code::kNoExtraICState, type, kind, holder_flag);
Handle<Object> probe(stub_holder->map()->FindInCodeCache(*name, flags),
isolate_);
if (probe->IsCode()) return Handle<Code>::cast(probe);
@@ -520,7 +519,7 @@
Handle<Map> transition,
StrictModeFlag strict_mode) {
Handle<Code> stub = FindStoreHandler(
- name, receiver, Code::STORE_IC, Code::MAP_TRANSITION, strict_mode);
+ name, receiver, Code::STORE_IC, Code::TRANSITION, strict_mode);
if (!stub.is_null()) return stub;
StoreStubCompiler compiler(isolate_, strict_mode);
@@ -702,7 +701,7 @@
Handle<Map> transition,
StrictModeFlag strict_mode) {
Handle<Code> stub = FindStoreHandler(
- name, receiver, Code::KEYED_STORE_IC, Code::MAP_TRANSITION, strict_mode);
+ name, receiver, Code::KEYED_STORE_IC, Code::TRANSITION, strict_mode);
if (!stub.is_null()) return stub;
KeyedStoreStubCompiler compiler(isolate(), strict_mode, STANDARD_STORE);
@@ -1395,17 +1394,19 @@
RUNTIME_FUNCTION(MaybeObject*, StoreInterceptorProperty) {
+ HandleScope scope(isolate);
ASSERT(args.length() == 4);
- JSObject* recv = JSObject::cast(args[0]);
- Name* name = Name::cast(args[1]);
- Object* value = args[2];
+ Handle<JSObject> recv(JSObject::cast(args[0]));
+ Handle<Name> name(Name::cast(args[1]));
+ Handle<Object> value(args[2], isolate);
ASSERT(args.smi_at(3) == kStrictMode || args.smi_at(3) == kNonStrictMode);
StrictModeFlag strict_mode = static_cast<StrictModeFlag>(args.smi_at(3));
ASSERT(recv->HasNamedInterceptor());
PropertyAttributes attr = NONE;
- MaybeObject* result = recv->SetPropertyWithInterceptor(
- name, value, attr, strict_mode);
- return result;
+ Handle<Object> result = JSObject::SetPropertyWithInterceptor(
+ recv, name, value, attr, strict_mode);
+ RETURN_IF_EMPTY_HANDLE(isolate, result);
+ return *result;
}
@@ -1849,7 +1850,7 @@
TailCallBuiltin(masm(), SlowBuiltin(kind()));
// Return the generated code.
- return GetCode(kind(), Code::MAP_TRANSITION, name);
+ return GetCode(kind(), Code::TRANSITION, name);
}
diff --git a/src/stub-cache.h b/src/stub-cache.h
index 63cb42b..16028d8 100644
--- a/src/stub-cache.h
+++ b/src/stub-cache.h
@@ -572,8 +572,7 @@
Register receiver,
Register scratch1,
Register scratch2,
- Label* miss_label,
- bool support_wrappers);
+ Label* miss_label);
static void GenerateLoadFunctionPrototype(MacroAssembler* masm,
Register receiver,
diff --git a/src/types.h b/src/types.h
index 2810ffc..6325945 100644
--- a/src/types.h
+++ b/src/types.h
@@ -128,6 +128,7 @@
V(Receiver, kObject | kProxy) \
V(Allocated, kDouble | kName | kReceiver) \
V(Any, kOddball | kNumber | kAllocated | kInternal) \
+ V(NonNumber, kAny - kNumber) \
V(Detectable, kAllocated - kUndetectable)
#define TYPE_LIST(V) \
diff --git a/src/unique.h b/src/unique.h
index 7ae704a..6816654 100644
--- a/src/unique.h
+++ b/src/unique.h
@@ -29,6 +29,7 @@
#define V8_HYDROGEN_UNIQUE_H_
#include "handles.h"
+#include "objects.h"
#include "utils.h"
#include "zone.h"
@@ -53,19 +54,30 @@
template <typename T>
class Unique V8_FINAL {
public:
- // TODO(titzer): make private and introduce some builder/owner class.
+ // TODO(titzer): make private and introduce a factory.
explicit Unique(Handle<T> handle) {
if (handle.is_null()) {
raw_address_ = NULL;
} else {
+ // This is a best-effort check to prevent comparing Unique<T>'s created
+ // in different GC eras; we require heap allocation to be disallowed at
+ // creation time.
+ // NOTE: we currently consider maps to be non-movable, so no special
+ // assurance is required for creating a Unique<Map>.
+ // TODO(titzer): other immortable immovable objects are also fine.
+ ASSERT(!AllowHeapAllocation::IsAllowed() || handle->IsMap());
raw_address_ = reinterpret_cast<Address>(*handle);
- ASSERT_NE(raw_address_, NULL);
+ ASSERT_NE(raw_address_, NULL); // Non-null should imply non-zero address.
}
handle_ = handle;
}
+ // TODO(titzer): this is a hack to migrate to Unique<T> incrementally.
+ Unique(Address raw_address, Handle<T> handle)
+ : raw_address_(raw_address), handle_(handle) { }
+
// Constructor for handling automatic up casting.
- // Ex. Unique<JSFunction> can be passed when Unique<Object> is expected.
+ // Eg. Unique<JSFunction> can be passed when Unique<Object> is expected.
template <class S> Unique(Unique<S> uniq) {
#ifdef DEBUG
T* a = NULL;
@@ -74,34 +86,46 @@
USE(a);
#endif
raw_address_ = uniq.raw_address_;
- handle_ = uniq.handle_; // Creates a new handle sharing the same location.
+ handle_ = uniq.handle_;
}
template <typename U>
- bool operator==(const Unique<U>& other) const {
+ inline bool operator==(const Unique<U>& other) const {
+ ASSERT(IsInitialized() && other.IsInitialized());
return raw_address_ == other.raw_address_;
}
template <typename U>
- bool operator!=(const Unique<U>& other) const {
+ inline bool operator!=(const Unique<U>& other) const {
+ ASSERT(IsInitialized() && other.IsInitialized());
return raw_address_ != other.raw_address_;
}
- intptr_t Hashcode() const {
+ inline intptr_t Hashcode() const {
+ ASSERT(IsInitialized());
return reinterpret_cast<intptr_t>(raw_address_);
}
- bool IsNull() {
+ inline bool IsNull() const {
+ ASSERT(IsInitialized());
return raw_address_ == NULL;
}
- // Don't do this unless you have access to the heap!
- // No, seriously! You can compare and hash and set-ify uniques that were
- // all created at the same time; please don't dereference.
- Handle<T> handle() {
+ // Extract the handle from this Unique in order to dereference it.
+ // WARNING: Only do this if you have access to the heap.
+ inline Handle<T> handle() const {
return handle_;
}
+ inline bool IsInitialized() const {
+ return raw_address_ != NULL || handle_.is_null();
+ }
+
+ // TODO(titzer): this is a hack to migrate to Unique<T> incrementally.
+ static Unique<T> CreateUninitialized(Handle<T> handle) {
+ return Unique<T>(static_cast<Address>(NULL), handle);
+ }
+
friend class UniqueSet<T>; // Uses internal details for speed.
template <class U>
friend class Unique; // For comparing raw_address values.
@@ -120,6 +144,7 @@
// Add a new element to this unique set. Mutates this set. O(|this|).
void Add(Unique<T> uniq, Zone* zone) {
+ ASSERT(uniq.IsInitialized());
// Keep the set sorted by the {raw_address} of the unique elements.
for (int i = 0; i < size_; i++) {
if (array_[i] == uniq) return;
@@ -138,7 +163,7 @@
}
// Compare this set against another set. O(|this|).
- bool Equals(UniqueSet<T>* that) {
+ bool Equals(UniqueSet<T>* that) const {
if (that->size_ != this->size_) return false;
for (int i = 0; i < this->size_; i++) {
if (this->array_[i] != that->array_[i]) return false;
@@ -146,8 +171,17 @@
return true;
}
+ template <typename U>
+ bool Contains(Unique<U> elem) const {
+ // TODO(titzer): use binary search for larger sets.
+ for (int i = 0; i < size_; i++) {
+ if (this->array_[i] == elem) return true;
+ }
+ return false;
+ }
+
// Check if this set is a subset of the given set. O(|this| + |that|).
- bool IsSubset(UniqueSet<T>* that) {
+ bool IsSubset(UniqueSet<T>* that) const {
if (that->size_ < this->size_) return false;
int j = 0;
for (int i = 0; i < this->size_; i++) {
@@ -163,7 +197,7 @@
// Returns a new set representing the intersection of this set and the other.
// O(|this| + |that|).
- UniqueSet<T>* Intersect(UniqueSet<T>* that, Zone* zone) {
+ UniqueSet<T>* Intersect(UniqueSet<T>* that, Zone* zone) const {
if (that->size_ == 0 || this->size_ == 0) return new(zone) UniqueSet<T>();
UniqueSet<T>* out = new(zone) UniqueSet<T>();
@@ -190,7 +224,7 @@
// Returns a new set representing the union of this set and the other.
// O(|this| + |that|).
- UniqueSet<T>* Union(UniqueSet<T>* that, Zone* zone) {
+ UniqueSet<T>* Union(UniqueSet<T>* that, Zone* zone) const {
if (that->size_ == 0) return this->Copy(zone);
if (this->size_ == 0) return that->Copy(zone);
@@ -222,7 +256,7 @@
}
// Makes an exact copy of this set. O(|this| + |that|).
- UniqueSet<T>* Copy(Zone* zone) {
+ UniqueSet<T>* Copy(Zone* zone) const {
UniqueSet<T>* copy = new(zone) UniqueSet<T>();
copy->size_ = this->size_;
copy->capacity_ = this->size_;
@@ -231,10 +265,15 @@
return copy;
}
- inline int size() {
+ inline int size() const {
return size_;
}
+ inline Unique<T> at(int index) const {
+ ASSERT(index >= 0 && index < size_);
+ return array_[index];
+ }
+
private:
// These sets should be small, since operations are implemented with simple
// linear algorithms. Enforce a maximum size.
diff --git a/src/v8threads.cc b/src/v8threads.cc
index 33b620d..94a5e80 100644
--- a/src/v8threads.cc
+++ b/src/v8threads.cc
@@ -481,7 +481,6 @@
// Acknowledge the preemption by the receiving thread.
void ContextSwitcher::PreemptionReceived() {
- ASSERT(Locker::IsLocked(i::Isolate::GetDefaultIsolateForLocking()));
// There is currently no accounting being done for this. But could be in the
// future, which is why we leave this in.
}
diff --git a/src/version.cc b/src/version.cc
index ce8c409..4467f6a 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -33,9 +33,9 @@
// NOTE these macros are used by some of the tool scripts and the build
// system so their names cannot be changed without changing the scripts.
#define MAJOR_VERSION 3
-#define MINOR_VERSION 21
-#define BUILD_NUMBER 18
-#define PATCH_LEVEL 2
+#define MINOR_VERSION 22
+#define BUILD_NUMBER 0
+#define PATCH_LEVEL 0
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
#define IS_CANDIDATE_VERSION 0
diff --git a/src/win32-math.cc b/src/win32-math.cc
index 88fa3a6..8f6d077 100644
--- a/src/win32-math.cc
+++ b/src/win32-math.cc
@@ -29,7 +29,7 @@
// refer to The Open Group Base Specification for specification of the correct
// semantics for these functions.
// (http://www.opengroup.org/onlinepubs/000095399/)
-#ifdef _MSC_VER
+#if defined(_MSC_VER) && (_MSC_VER < 1800)
#include "win32-headers.h"
#include <limits.h> // Required for INT_MAX etc.
diff --git a/src/win32-math.h b/src/win32-math.h
index 0397c7e..fd9312b 100644
--- a/src/win32-math.h
+++ b/src/win32-math.h
@@ -37,6 +37,8 @@
#error Wrong environment, expected MSVC.
#endif // _MSC_VER
+// MSVC 2013+ provides implementations of all standard math functions.
+#if (_MSC_VER < 1800)
enum {
FP_NAN,
FP_INFINITE,
@@ -58,4 +60,6 @@
} // namespace std
+#endif // _MSC_VER < 1800
+
#endif // V8_WIN32_MATH_H_
diff --git a/src/x64/assembler-x64.h b/src/x64/assembler-x64.h
index f2e37fe..f4cc4a3 100644
--- a/src/x64/assembler-x64.h
+++ b/src/x64/assembler-x64.h
@@ -504,6 +504,7 @@
static uint64_t found_by_runtime_probing_only_;
friend class ExternalReference;
+ friend class PlatformFeatureScope;
DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
};
diff --git a/src/x64/builtins-x64.cc b/src/x64/builtins-x64.cc
index 81721c2..a6dc003 100644
--- a/src/x64/builtins-x64.cc
+++ b/src/x64/builtins-x64.cc
@@ -600,6 +600,8 @@
// the stub returns.
__ subq(Operand(rsp, 0), Immediate(5));
__ Pushad();
+ __ movq(arg_reg_2,
+ ExternalReference::isolate_address(masm->isolate()));
__ movq(arg_reg_1, Operand(rsp, kNumSafepointRegisters * kPointerSize));
{ // NOLINT
FrameScope scope(masm, StackFrame::MANUAL);
@@ -1140,13 +1142,11 @@
// Lookup the argument in the number to string cache.
Label not_cached, argument_is_string;
- NumberToStringStub::GenerateLookupNumberStringCache(
- masm,
- rax, // Input.
- rbx, // Result.
- rcx, // Scratch 1.
- rdx, // Scratch 2.
- ¬_cached);
+ __ LookupNumberStringCache(rax, // Input.
+ rbx, // Result.
+ rcx, // Scratch 1.
+ rdx, // Scratch 2.
+ ¬_cached);
__ IncrementCounter(counters->string_ctor_cached_number(), 1);
__ bind(&argument_is_string);
diff --git a/src/x64/code-stubs-x64.cc b/src/x64/code-stubs-x64.cc
index 51e1a53..8f27374 100644
--- a/src/x64/code-stubs-x64.cc
+++ b/src/x64/code-stubs-x64.cc
@@ -78,7 +78,7 @@
descriptor->register_param_count_ = 4;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kCreateObjectLiteralShallow)->entry;
+ Runtime::FunctionForId(Runtime::kCreateObjectLiteral)->entry;
}
@@ -1009,7 +1009,7 @@
__ movsd(xmm0, FieldOperand(input, HeapNumber::kValueOffset));
// Convert, convert back, and compare the two doubles' bits.
__ cvttsd2siq(scratch2, xmm0);
- __ cvtlsi2sd(xmm1, scratch2);
+ __ Cvtlsi2sd(xmm1, scratch2);
__ movq(scratch1, xmm0);
__ movq(scratch2, xmm1);
__ cmpq(scratch1, scratch2);
@@ -1145,7 +1145,7 @@
// Then load the bits of the double into rbx.
__ SmiToInteger32(rax, rax);
__ subq(rsp, Immediate(kDoubleSize));
- __ cvtlsi2sd(xmm1, rax);
+ __ Cvtlsi2sd(xmm1, rax);
__ movsd(Operand(rsp, 0), xmm1);
__ movq(rbx, xmm1);
__ movq(rdx, xmm1);
@@ -1477,9 +1477,9 @@
void FloatingPointHelper::LoadSSE2SmiOperands(MacroAssembler* masm) {
__ SmiToInteger32(kScratchRegister, rdx);
- __ cvtlsi2sd(xmm0, kScratchRegister);
+ __ Cvtlsi2sd(xmm0, kScratchRegister);
__ SmiToInteger32(kScratchRegister, rax);
- __ cvtlsi2sd(xmm1, kScratchRegister);
+ __ Cvtlsi2sd(xmm1, kScratchRegister);
}
@@ -1503,12 +1503,12 @@
__ bind(&load_smi_rdx);
__ SmiToInteger32(kScratchRegister, rdx);
- __ cvtlsi2sd(xmm0, kScratchRegister);
+ __ Cvtlsi2sd(xmm0, kScratchRegister);
__ JumpIfNotSmi(rax, &load_nonsmi_rax);
__ bind(&load_smi_rax);
__ SmiToInteger32(kScratchRegister, rax);
- __ cvtlsi2sd(xmm1, kScratchRegister);
+ __ Cvtlsi2sd(xmm1, kScratchRegister);
__ bind(&done);
}
@@ -1541,7 +1541,7 @@
__ cvttsd2siq(smi_result, xmm0);
// Check if conversion was successful by converting back and
// comparing to the original double's bits.
- __ cvtlsi2sd(xmm1, smi_result);
+ __ Cvtlsi2sd(xmm1, smi_result);
__ movq(kScratchRegister, xmm1);
__ cmpq(scratch2, kScratchRegister);
__ j(not_equal, on_not_smis);
@@ -1560,7 +1560,7 @@
__ movsd(xmm0, FieldOperand(second, HeapNumber::kValueOffset));
__ movq(scratch2, xmm0);
__ cvttsd2siq(smi_result, xmm0);
- __ cvtlsi2sd(xmm1, smi_result);
+ __ Cvtlsi2sd(xmm1, smi_result);
__ movq(kScratchRegister, xmm1);
__ cmpq(scratch2, kScratchRegister);
__ j(not_equal, on_not_smis);
@@ -1603,7 +1603,7 @@
// Save 1 in double_result - we need this several times later on.
__ movq(scratch, Immediate(1));
- __ cvtlsi2sd(double_result, scratch);
+ __ Cvtlsi2sd(double_result, scratch);
if (exponent_type_ == ON_STACK) {
Label base_is_smi, unpack_exponent;
@@ -1623,7 +1623,7 @@
__ bind(&base_is_smi);
__ SmiToInteger32(base, base);
- __ cvtlsi2sd(double_base, base);
+ __ Cvtlsi2sd(double_base, base);
__ bind(&unpack_exponent);
__ JumpIfNotSmi(exponent, &exponent_not_smi, Label::kNear);
@@ -1812,7 +1812,7 @@
// and may not have contained the exponent value in the first place when the
// input was a smi. We reset it with exponent value before bailing out.
__ j(not_equal, &done);
- __ cvtlsi2sd(double_exponent, exponent);
+ __ Cvtlsi2sd(double_exponent, exponent);
// Returning or bailing out.
Counters* counters = masm->isolate()->counters();
@@ -1902,8 +1902,7 @@
receiver = rax;
}
- StubCompiler::GenerateLoadStringLength(masm, receiver, r8, r9, &miss,
- support_wrapper_);
+ StubCompiler::GenerateLoadStringLength(masm, receiver, r8, r9, &miss);
__ bind(&miss);
StubCompiler::TailCallBuiltin(
masm, BaseLoadStoreStubCompiler::MissBuiltin(kind()));
@@ -2649,7 +2648,7 @@
__ addq(r11, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ call(r11);
- __ LeaveApiExitFrame();
+ __ LeaveApiExitFrame(true);
// Check the result.
Label success;
@@ -2910,96 +2909,6 @@
}
-void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
- Register object,
- Register result,
- Register scratch1,
- Register scratch2,
- Label* not_found) {
- // Use of registers. Register result is used as a temporary.
- Register number_string_cache = result;
- Register mask = scratch1;
- Register scratch = scratch2;
-
- // Load the number string cache.
- __ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
-
- // Make the hash mask from the length of the number string cache. It
- // contains two elements (number and string) for each cache entry.
- __ SmiToInteger32(
- mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
- __ shrl(mask, Immediate(1));
- __ subq(mask, Immediate(1)); // Make mask.
-
- // Calculate the entry in the number string cache. The hash value in the
- // number string cache for smis is just the smi value, and the hash for
- // doubles is the xor of the upper and lower words. See
- // Heap::GetNumberStringCache.
- Label is_smi;
- Label load_result_from_cache;
- Factory* factory = masm->isolate()->factory();
- __ JumpIfSmi(object, &is_smi);
- __ CheckMap(object,
- factory->heap_number_map(),
- not_found,
- DONT_DO_SMI_CHECK);
-
- STATIC_ASSERT(8 == kDoubleSize);
- __ movl(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
- __ xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset));
- GenerateConvertHashCodeToIndex(masm, scratch, mask);
-
- Register index = scratch;
- Register probe = mask;
- __ movq(probe,
- FieldOperand(number_string_cache,
- index,
- times_1,
- FixedArray::kHeaderSize));
- __ JumpIfSmi(probe, not_found);
- __ movsd(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
- __ movsd(xmm1, FieldOperand(probe, HeapNumber::kValueOffset));
- __ ucomisd(xmm0, xmm1);
- __ j(parity_even, not_found); // Bail out if NaN is involved.
- __ j(not_equal, not_found); // The cache did not contain this value.
- __ jmp(&load_result_from_cache);
-
- __ bind(&is_smi);
- __ SmiToInteger32(scratch, object);
- GenerateConvertHashCodeToIndex(masm, scratch, mask);
-
- // Check if the entry is the smi we are looking for.
- __ cmpq(object,
- FieldOperand(number_string_cache,
- index,
- times_1,
- FixedArray::kHeaderSize));
- __ j(not_equal, not_found);
-
- // Get the result from the cache.
- __ bind(&load_result_from_cache);
- __ movq(result,
- FieldOperand(number_string_cache,
- index,
- times_1,
- FixedArray::kHeaderSize + kPointerSize));
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->number_to_string_native(), 1);
-}
-
-
-void NumberToStringStub::GenerateConvertHashCodeToIndex(MacroAssembler* masm,
- Register hash,
- Register mask) {
- __ and_(hash, mask);
- // Each entry in string cache consists of two pointer sized fields,
- // but times_twice_pointer_size (multiplication by 16) scale factor
- // is not supported by addrmode on x64 platform.
- // So we have to premultiply entry index before lookup.
- __ shl(hash, Immediate(kPointerSizeLog2 + 1));
-}
-
-
void NumberToStringStub::Generate(MacroAssembler* masm) {
Label runtime;
@@ -3007,7 +2916,7 @@
__ movq(rbx, args.GetArgumentOperand(0));
// Generate code to lookup number in the number string cache.
- GenerateLookupNumberStringCache(masm, rbx, rax, r8, r9, &runtime);
+ __ LookupNumberStringCache(rbx, rax, r8, r9, &runtime);
__ ret(1 * kPointerSize);
__ bind(&runtime);
@@ -3322,6 +3231,7 @@
// Cache the called function in a global property cell. Cache states
// are uninitialized, monomorphic (indicated by a JSFunction), and
// megamorphic.
+ // rax : number of arguments to the construct function
// rbx : cache cell for call target
// rdi : the function to call
Isolate* isolate = masm->isolate();
@@ -3341,9 +3251,8 @@
// If we didn't have a matching function, and we didn't find the megamorph
// sentinel, then we have in the cell either some other function or an
// AllocationSite. Do a map check on the object in rcx.
- Handle<Map> allocation_site_map(
- masm->isolate()->heap()->allocation_site_map(),
- masm->isolate());
+ Handle<Map> allocation_site_map =
+ masm->isolate()->factory()->allocation_site_map();
__ Cmp(FieldOperand(rcx, 0), allocation_site_map);
__ j(not_equal, &miss);
@@ -3379,6 +3288,7 @@
{
FrameScope scope(masm, StackFrame::INTERNAL);
+ // Arguments register must be smi-tagged to call out.
__ Integer32ToSmi(rax, rax);
__ push(rax);
__ push(rdi);
@@ -3619,6 +3529,7 @@
// PerformGC. No need to use PrepareCallCFunction/CallCFunction here as the
// stack is known to be aligned. This function takes one argument which is
// passed in register.
+ __ movq(arg_reg_2, ExternalReference::isolate_address(masm->isolate()));
__ movq(arg_reg_1, rax);
__ movq(kScratchRegister,
ExternalReference::perform_gc_function(masm->isolate()));
@@ -4646,12 +4557,7 @@
// Check the number to string cache.
__ bind(¬_string);
// Puts the cached result into scratch1.
- NumberToStringStub::GenerateLookupNumberStringCache(masm,
- arg,
- scratch1,
- scratch2,
- scratch3,
- slow);
+ __ LookupNumberStringCache(arg, scratch1, scratch2, scratch3, slow);
__ movq(arg, scratch1);
__ movq(Operand(rsp, stack_offset), arg);
__ bind(&done);
@@ -5376,7 +5282,7 @@
__ jmp(&left, Label::kNear);
__ bind(&right_smi);
__ SmiToInteger32(rcx, rax); // Can't clobber rax yet.
- __ cvtlsi2sd(xmm1, rcx);
+ __ Cvtlsi2sd(xmm1, rcx);
__ bind(&left);
__ JumpIfSmi(rdx, &left_smi, Label::kNear);
@@ -5386,7 +5292,7 @@
__ jmp(&done);
__ bind(&left_smi);
__ SmiToInteger32(rcx, rdx); // Can't clobber rdx yet.
- __ cvtlsi2sd(xmm0, rcx);
+ __ Cvtlsi2sd(xmm0, rcx);
__ bind(&done);
// Compare operands
@@ -6392,9 +6298,8 @@
__ incl(rdx);
__ movq(rcx, FieldOperand(rbx, Cell::kValueOffset));
if (FLAG_debug_code) {
- Handle<Map> allocation_site_map(
- masm->isolate()->heap()->allocation_site_map(),
- masm->isolate());
+ Handle<Map> allocation_site_map =
+ masm->isolate()->factory()->allocation_site_map();
__ Cmp(FieldOperand(rcx, 0), allocation_site_map);
__ Assert(equal, kExpectedAllocationSiteInCell);
}
@@ -6541,7 +6446,7 @@
__ j(equal, &no_info);
__ movq(rdx, FieldOperand(rbx, Cell::kValueOffset));
__ Cmp(FieldOperand(rdx, 0),
- Handle<Map>(masm->isolate()->heap()->allocation_site_map()));
+ masm->isolate()->factory()->allocation_site_map());
__ j(not_equal, &no_info);
__ movq(rdx, FieldOperand(rdx, AllocationSite::kTransitionInfoOffset));
diff --git a/src/x64/code-stubs-x64.h b/src/x64/code-stubs-x64.h
index 41678ec..c3eac81 100644
--- a/src/x64/code-stubs-x64.h
+++ b/src/x64/code-stubs-x64.h
@@ -212,23 +212,7 @@
public:
NumberToStringStub() { }
- // Generate code to do a lookup in the number string cache. If the number in
- // the register object is found in the cache the generated code falls through
- // with the result in the result register. The object and the result register
- // can be the same. If the number is not found in the cache the code jumps to
- // the label not_found with only the content of register object unchanged.
- static void GenerateLookupNumberStringCache(MacroAssembler* masm,
- Register object,
- Register result,
- Register scratch1,
- Register scratch2,
- Label* not_found);
-
private:
- static void GenerateConvertHashCodeToIndex(MacroAssembler* masm,
- Register hash,
- Register mask);
-
Major MajorKey() { return NumberToString; }
int MinorKey() { return 0; }
diff --git a/src/x64/codegen-x64.cc b/src/x64/codegen-x64.cc
index 24773c2..b3f4eaf 100644
--- a/src/x64/codegen-x64.cc
+++ b/src/x64/codegen-x64.cc
@@ -386,7 +386,7 @@
// rbx: current element (smi-tagged)
__ JumpIfNotSmi(rbx, &convert_hole);
__ SmiToInteger32(rbx, rbx);
- __ cvtlsi2sd(xmm0, rbx);
+ __ Cvtlsi2sd(xmm0, rbx);
__ movsd(FieldOperand(r14, r9, times_8, FixedDoubleArray::kHeaderSize),
xmm0);
__ jmp(&entry);
@@ -723,7 +723,8 @@
}
-void Code::PatchPlatformCodeAge(byte* sequence,
+void Code::PatchPlatformCodeAge(Isolate* isolate,
+ byte* sequence,
Code::Age age,
MarkingParity parity) {
uint32_t young_length;
@@ -732,7 +733,7 @@
CopyBytes(sequence, young_sequence, young_length);
CPU::FlushICache(sequence, young_length);
} else {
- Code* stub = GetCodeAgeStub(age, parity);
+ Code* stub = GetCodeAgeStub(isolate, age, parity);
CodePatcher patcher(sequence, young_length);
patcher.masm()->call(stub->instruction_start());
for (int i = 0;
diff --git a/src/x64/deoptimizer-x64.cc b/src/x64/deoptimizer-x64.cc
index 303b756..a5e4583 100644
--- a/src/x64/deoptimizer-x64.cc
+++ b/src/x64/deoptimizer-x64.cc
@@ -82,87 +82,6 @@
}
-static const byte kJnsInstruction = 0x79;
-static const byte kJnsOffset = 0x1d;
-static const byte kCallInstruction = 0xe8;
-static const byte kNopByteOne = 0x66;
-static const byte kNopByteTwo = 0x90;
-
-// The back edge bookkeeping code matches the pattern:
-//
-// add <profiling_counter>, <-delta>
-// jns ok
-// call <stack guard>
-// ok:
-//
-// We will patch away the branch so the code is:
-//
-// add <profiling_counter>, <-delta> ;; Not changed
-// nop
-// nop
-// call <on-stack replacment>
-// ok:
-
-void Deoptimizer::PatchInterruptCodeAt(Code* unoptimized_code,
- Address pc_after,
- Code* replacement_code) {
- // Turn the jump into nops.
- Address call_target_address = pc_after - kIntSize;
- *(call_target_address - 3) = kNopByteOne;
- *(call_target_address - 2) = kNopByteTwo;
- // Replace the call address.
- Assembler::set_target_address_at(call_target_address,
- replacement_code->entry());
-
- unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
- unoptimized_code, call_target_address, replacement_code);
-}
-
-
-void Deoptimizer::RevertInterruptCodeAt(Code* unoptimized_code,
- Address pc_after,
- Code* interrupt_code) {
- // Restore the original jump.
- Address call_target_address = pc_after - kIntSize;
- *(call_target_address - 3) = kJnsInstruction;
- *(call_target_address - 2) = kJnsOffset;
- // Restore the original call address.
- Assembler::set_target_address_at(call_target_address,
- interrupt_code->entry());
-
- interrupt_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
- unoptimized_code, call_target_address, interrupt_code);
-}
-
-
-#ifdef DEBUG
-Deoptimizer::InterruptPatchState Deoptimizer::GetInterruptPatchState(
- Isolate* isolate,
- Code* unoptimized_code,
- Address pc_after) {
- Address call_target_address = pc_after - kIntSize;
- ASSERT_EQ(kCallInstruction, *(call_target_address - 1));
- if (*(call_target_address - 3) == kNopByteOne) {
- ASSERT_EQ(kNopByteTwo, *(call_target_address - 2));
- Code* osr_builtin =
- isolate->builtins()->builtin(Builtins::kOnStackReplacement);
- ASSERT_EQ(osr_builtin->entry(),
- Assembler::target_address_at(call_target_address));
- return PATCHED_FOR_OSR;
- } else {
- // Get the interrupt stub code object to match against from cache.
- Code* interrupt_builtin =
- isolate->builtins()->builtin(Builtins::kInterruptCheck);
- ASSERT_EQ(interrupt_builtin->entry(),
- Assembler::target_address_at(call_target_address));
- ASSERT_EQ(kJnsInstruction, *(call_target_address - 3));
- ASSERT_EQ(kJnsOffset, *(call_target_address - 2));
- return NOT_PATCHED;
- }
-}
-#endif // DEBUG
-
-
void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
// Set the register values. The values are not important as there are no
// callee saved registers in JavaScript frames, so all registers are
diff --git a/src/x64/disasm-x64.cc b/src/x64/disasm-x64.cc
index 9984a46..a1b019a 100644
--- a/src/x64/disasm-x64.cc
+++ b/src/x64/disasm-x64.cc
@@ -93,7 +93,7 @@
{ 0x39, OPER_REG_OP_ORDER, "cmp" },
{ 0x3A, BYTE_REG_OPER_OP_ORDER, "cmp" },
{ 0x3B, REG_OPER_OP_ORDER, "cmp" },
- { 0x63, REG_OPER_OP_ORDER, "movsxlq" },
+ { 0x63, REG_OPER_OP_ORDER, "movsxl" },
{ 0x84, BYTE_REG_OPER_OP_ORDER, "test" },
{ 0x85, REG_OPER_OP_ORDER, "test" },
{ 0x86, BYTE_REG_OPER_OP_ORDER, "xchg" },
diff --git a/src/x64/full-codegen-x64.cc b/src/x64/full-codegen-x64.cc
index c24512e..f9d1ffa 100644
--- a/src/x64/full-codegen-x64.cc
+++ b/src/x64/full-codegen-x64.cc
@@ -1609,21 +1609,15 @@
: ObjectLiteral::kNoFlags;
int properties_count = constant_properties->length() / 2;
if ((FLAG_track_double_fields && expr->may_store_doubles()) ||
- expr->depth() > 1) {
- __ movq(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- __ push(FieldOperand(rdi, JSFunction::kLiteralsOffset));
- __ Push(Smi::FromInt(expr->literal_index()));
- __ Push(constant_properties);
- __ Push(Smi::FromInt(flags));
- __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
- } else if (Serializer::enabled() || flags != ObjectLiteral::kFastElements ||
+ expr->depth() > 1 || Serializer::enabled() ||
+ flags != ObjectLiteral::kFastElements ||
properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
__ movq(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
__ push(FieldOperand(rdi, JSFunction::kLiteralsOffset));
__ Push(Smi::FromInt(expr->literal_index()));
__ Push(constant_properties);
__ Push(Smi::FromInt(flags));
- __ CallRuntime(Runtime::kCreateObjectLiteralShallow, 4);
+ __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
} else {
__ movq(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
__ movq(rax, FieldOperand(rdi, JSFunction::kLiteralsOffset));
@@ -4883,6 +4877,88 @@
#undef __
+
+static const byte kJnsInstruction = 0x79;
+static const byte kJnsOffset = 0x1d;
+static const byte kCallInstruction = 0xe8;
+static const byte kNopByteOne = 0x66;
+static const byte kNopByteTwo = 0x90;
+
+// The back edge bookkeeping code matches the pattern:
+//
+// add <profiling_counter>, <-delta>
+// jns ok
+// call <stack guard>
+// ok:
+//
+// We will patch away the branch so the code is:
+//
+// add <profiling_counter>, <-delta> ;; Not changed
+// nop
+// nop
+// call <on-stack replacment>
+// ok:
+
+void BackEdgeTable::PatchAt(Code* unoptimized_code,
+ Address pc_after,
+ Code* replacement_code) {
+ // Turn the jump into nops.
+ Address call_target_address = pc_after - kIntSize;
+ *(call_target_address - 3) = kNopByteOne;
+ *(call_target_address - 2) = kNopByteTwo;
+ // Replace the call address.
+ Assembler::set_target_address_at(call_target_address,
+ replacement_code->entry());
+
+ unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
+ unoptimized_code, call_target_address, replacement_code);
+}
+
+
+void BackEdgeTable::RevertAt(Code* unoptimized_code,
+ Address pc_after,
+ Code* interrupt_code) {
+ // Restore the original jump.
+ Address call_target_address = pc_after - kIntSize;
+ *(call_target_address - 3) = kJnsInstruction;
+ *(call_target_address - 2) = kJnsOffset;
+ // Restore the original call address.
+ Assembler::set_target_address_at(call_target_address,
+ interrupt_code->entry());
+
+ interrupt_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
+ unoptimized_code, call_target_address, interrupt_code);
+}
+
+
+#ifdef DEBUG
+BackEdgeTable::BackEdgeState BackEdgeTable::GetBackEdgeState(
+ Isolate* isolate,
+ Code* unoptimized_code,
+ Address pc_after) {
+ Address call_target_address = pc_after - kIntSize;
+ ASSERT_EQ(kCallInstruction, *(call_target_address - 1));
+ if (*(call_target_address - 3) == kNopByteOne) {
+ ASSERT_EQ(kNopByteTwo, *(call_target_address - 2));
+ Code* osr_builtin =
+ isolate->builtins()->builtin(Builtins::kOnStackReplacement);
+ ASSERT_EQ(osr_builtin->entry(),
+ Assembler::target_address_at(call_target_address));
+ return ON_STACK_REPLACEMENT;
+ } else {
+ // Get the interrupt stub code object to match against from cache.
+ Code* interrupt_builtin =
+ isolate->builtins()->builtin(Builtins::kInterruptCheck);
+ ASSERT_EQ(interrupt_builtin->entry(),
+ Assembler::target_address_at(call_target_address));
+ ASSERT_EQ(kJnsInstruction, *(call_target_address - 3));
+ ASSERT_EQ(kJnsOffset, *(call_target_address - 2));
+ return INTERRUPT;
+ }
+}
+#endif // DEBUG
+
+
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_X64
diff --git a/src/x64/lithium-codegen-x64.cc b/src/x64/lithium-codegen-x64.cc
index 9dca6b3..83a8cb2 100644
--- a/src/x64/lithium-codegen-x64.cc
+++ b/src/x64/lithium-codegen-x64.cc
@@ -1832,7 +1832,7 @@
__ jmp(&return_right, Label::kNear);
__ bind(&check_zero);
- XMMRegister xmm_scratch = xmm0;
+ XMMRegister xmm_scratch = double_scratch0();
__ xorps(xmm_scratch, xmm_scratch);
__ ucomisd(left_reg, xmm_scratch);
__ j(not_equal, &return_left, Label::kNear); // left == right != 0.
@@ -1878,15 +1878,17 @@
// when there is a mulsd depending on the result
__ movaps(left, left);
break;
- case Token::MOD:
+ case Token::MOD: {
+ XMMRegister xmm_scratch = double_scratch0();
__ PrepareCallCFunction(2);
- __ movaps(xmm0, left);
+ __ movaps(xmm_scratch, left);
ASSERT(right.is(xmm1));
__ CallCFunction(
ExternalReference::double_fp_operation(Token::MOD, isolate()), 2);
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- __ movaps(result, xmm0);
+ __ movaps(result, xmm_scratch);
break;
+ }
default:
UNREACHABLE();
break;
@@ -1947,25 +1949,6 @@
}
-void LCodeGen::DoIsNumberAndBranch(LIsNumberAndBranch* instr) {
- Representation r = instr->hydrogen()->value()->representation();
- if (r.IsSmiOrInteger32() || r.IsDouble()) {
- EmitBranch(instr, no_condition);
- } else {
- ASSERT(r.IsTagged());
- Register reg = ToRegister(instr->value());
- HType type = instr->hydrogen()->value()->type();
- if (type.IsTaggedNumber()) {
- EmitBranch(instr, no_condition);
- }
- __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
- __ CompareRoot(FieldOperand(reg, HeapObject::kMapOffset),
- Heap::kHeapNumberMapRootIndex);
- EmitBranch(instr, equal);
- }
-}
-
-
void LCodeGen::DoBranch(LBranch* instr) {
Representation r = instr->hydrogen()->value()->representation();
if (r.IsInteger32()) {
@@ -1981,8 +1964,9 @@
} else if (r.IsDouble()) {
ASSERT(!info()->IsStub());
XMMRegister reg = ToDoubleRegister(instr->value());
- __ xorps(xmm0, xmm0);
- __ ucomisd(reg, xmm0);
+ XMMRegister xmm_scratch = double_scratch0();
+ __ xorps(xmm_scratch, xmm_scratch);
+ __ ucomisd(reg, xmm_scratch);
EmitBranch(instr, not_equal);
} else {
ASSERT(r.IsTagged());
@@ -2001,8 +1985,9 @@
EmitBranch(instr, no_condition);
} else if (type.IsHeapNumber()) {
ASSERT(!info()->IsStub());
- __ xorps(xmm0, xmm0);
- __ ucomisd(xmm0, FieldOperand(reg, HeapNumber::kValueOffset));
+ XMMRegister xmm_scratch = double_scratch0();
+ __ xorps(xmm_scratch, xmm_scratch);
+ __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset));
EmitBranch(instr, not_equal);
} else if (type.IsString()) {
ASSERT(!info()->IsStub());
@@ -2083,8 +2068,9 @@
Label not_heap_number;
__ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
__ j(not_equal, ¬_heap_number, Label::kNear);
- __ xorps(xmm0, xmm0);
- __ ucomisd(xmm0, FieldOperand(reg, HeapNumber::kValueOffset));
+ XMMRegister xmm_scratch = double_scratch0();
+ __ xorps(xmm_scratch, xmm_scratch);
+ __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset));
__ j(zero, instr->FalseLabel(chunk_));
__ jmp(instr->TrueLabel(chunk_));
__ bind(¬_heap_number);
@@ -2682,7 +2668,7 @@
void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
Register result = ToRegister(instr->result());
- __ LoadGlobalCell(result, instr->hydrogen()->cell());
+ __ LoadGlobalCell(result, instr->hydrogen()->cell().handle());
if (instr->hydrogen()->RequiresHoleCheck()) {
__ CompareRoot(result, Heap::kTheHoleValueRootIndex);
DeoptimizeIf(equal, instr->environment());
@@ -2704,7 +2690,7 @@
void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
Register value = ToRegister(instr->value());
- Handle<Cell> cell_handle = instr->hydrogen()->cell();
+ Handle<Cell> cell_handle = instr->hydrogen()->cell().handle();
// If the cell we are storing to contains the hole it could have
// been deleted from the property dictionary. In that case, we need
@@ -2799,6 +2785,7 @@
int offset = access.offset();
if (access.IsExternalMemory()) {
+ ASSERT(!access.representation().IsInteger32());
Register result = ToRegister(instr->result());
if (instr->object()->IsConstantOperand()) {
ASSERT(result.is(rax));
@@ -2820,10 +2807,18 @@
Register result = ToRegister(instr->result());
if (access.IsInobject()) {
- __ movq(result, FieldOperand(object, offset));
+ if (access.representation().IsInteger32()) {
+ __ movl(result, FieldOperand(object, offset));
+ } else {
+ __ movq(result, FieldOperand(object, offset));
+ }
} else {
__ movq(result, FieldOperand(object, JSObject::kPropertiesOffset));
- __ movq(result, FieldOperand(result, offset));
+ if (access.representation().IsInteger32()) {
+ __ movl(result, FieldOperand(result, offset));
+ } else {
+ __ movq(result, FieldOperand(result, offset));
+ }
}
}
@@ -2879,6 +2874,12 @@
}
+void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
+ Register result = ToRegister(instr->result());
+ __ LoadRoot(result, instr->index());
+}
+
+
void LCodeGen::DoLoadExternalArrayPointer(
LLoadExternalArrayPointer* instr) {
Register result = ToRegister(instr->result());
@@ -3451,7 +3452,7 @@
Representation r = instr->hydrogen()->value()->representation();
if (r.IsDouble()) {
- XMMRegister scratch = xmm0;
+ XMMRegister scratch = double_scratch0();
XMMRegister input_reg = ToDoubleRegister(instr->value());
__ xorps(scratch, scratch);
__ subsd(scratch, input_reg);
@@ -3473,7 +3474,7 @@
void LCodeGen::DoMathFloor(LMathFloor* instr) {
- XMMRegister xmm_scratch = xmm0;
+ XMMRegister xmm_scratch = double_scratch0();
Register output_reg = ToRegister(instr->result());
XMMRegister input_reg = ToDoubleRegister(instr->value());
@@ -3520,7 +3521,7 @@
__ bind(&negative_sign);
// Truncate, then compare and compensate.
__ cvttsd2si(output_reg, input_reg);
- __ cvtlsi2sd(xmm_scratch, output_reg);
+ __ Cvtlsi2sd(xmm_scratch, output_reg);
__ ucomisd(input_reg, xmm_scratch);
__ j(equal, &done, Label::kNear);
__ subl(output_reg, Immediate(1));
@@ -3532,7 +3533,7 @@
void LCodeGen::DoMathRound(LMathRound* instr) {
- const XMMRegister xmm_scratch = xmm0;
+ const XMMRegister xmm_scratch = double_scratch0();
Register output_reg = ToRegister(instr->result());
XMMRegister input_reg = ToDoubleRegister(instr->value());
static int64_t one_half = V8_INT64_C(0x3FE0000000000000); // 0.5
@@ -3569,7 +3570,7 @@
__ RecordComment("D2I conversion overflow");
DeoptimizeIf(equal, instr->environment());
- __ cvtlsi2sd(xmm_scratch, output_reg);
+ __ Cvtlsi2sd(xmm_scratch, output_reg);
__ ucomisd(input_reg, xmm_scratch);
__ j(equal, &restore, Label::kNear);
__ subl(output_reg, Immediate(1));
@@ -3600,7 +3601,7 @@
void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
- XMMRegister xmm_scratch = xmm0;
+ XMMRegister xmm_scratch = double_scratch0();
XMMRegister input_reg = ToDoubleRegister(instr->value());
ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
@@ -3717,8 +3718,7 @@
// by computing:
// ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
XMMRegister result = ToDoubleRegister(instr->result());
- // We use xmm0 as fixed scratch register here.
- XMMRegister scratch4 = xmm0;
+ XMMRegister scratch4 = double_scratch0();
__ movq(scratch3, V8_INT64_C(0x4130000000000000),
RelocInfo::NONE64); // 1.0 x 2^20 as double
__ movq(scratch4, scratch3);
@@ -3731,10 +3731,11 @@
void LCodeGen::DoMathExp(LMathExp* instr) {
XMMRegister input = ToDoubleRegister(instr->value());
XMMRegister result = ToDoubleRegister(instr->result());
+ XMMRegister temp0 = double_scratch0();
Register temp1 = ToRegister(instr->temp1());
Register temp2 = ToRegister(instr->temp2());
- MathExpGenerator::EmitMathExp(masm(), input, result, xmm0, temp1, temp2);
+ MathExpGenerator::EmitMathExp(masm(), input, result, temp0, temp1, temp2);
}
@@ -3936,6 +3937,7 @@
int offset = access.offset();
if (access.IsExternalMemory()) {
+ ASSERT(!access.representation().IsInteger32());
ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
Register value = ToRegister(instr->value());
if (instr->object()->IsConstantOperand()) {
@@ -4013,15 +4015,24 @@
if (instr->value()->IsConstantOperand()) {
LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
if (operand_value->IsRegister()) {
- __ movq(FieldOperand(write_register, offset),
- ToRegister(operand_value));
+ if (access.representation().IsInteger32()) {
+ __ movl(FieldOperand(write_register, offset),
+ ToRegister(operand_value));
+ } else {
+ __ movq(FieldOperand(write_register, offset),
+ ToRegister(operand_value));
+ }
} else {
Handle<Object> handle_value = ToHandle(operand_value);
ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
__ Move(FieldOperand(write_register, offset), handle_value);
}
} else {
- __ movq(FieldOperand(write_register, offset), ToRegister(instr->value()));
+ if (access.representation().IsInteger32()) {
+ __ movl(FieldOperand(write_register, offset), ToRegister(instr->value()));
+ } else {
+ __ movq(FieldOperand(write_register, offset), ToRegister(instr->value()));
+ }
}
if (instr->hydrogen()->NeedsWriteBarrier()) {
@@ -4449,9 +4460,9 @@
LOperand* output = instr->result();
ASSERT(output->IsDoubleRegister());
if (input->IsRegister()) {
- __ cvtlsi2sd(ToDoubleRegister(output), ToRegister(input));
+ __ Cvtlsi2sd(ToDoubleRegister(output), ToRegister(input));
} else {
- __ cvtlsi2sd(ToDoubleRegister(output), ToOperand(input));
+ __ Cvtlsi2sd(ToDoubleRegister(output), ToOperand(input));
}
}
@@ -4525,7 +4536,8 @@
// Load value into xmm1 which will be preserved across potential call to
// runtime (MacroAssembler::EnterExitFrameEpilogue preserves only allocatable
// XMM registers on x64).
- __ LoadUint32(xmm1, reg, xmm0);
+ XMMRegister xmm_scratch = double_scratch0();
+ __ LoadUint32(xmm1, reg, xmm_scratch);
if (FLAG_inline_new) {
__ AllocateHeapNumber(reg, tmp, &slow);
@@ -4623,7 +4635,7 @@
bool deoptimize_on_minus_zero,
LEnvironment* env,
NumberUntagDMode mode) {
- Label load_smi, done;
+ Label convert, load_smi, done;
if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
// Smi check.
@@ -4632,27 +4644,19 @@
// Heap number map check.
__ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
Heap::kHeapNumberMapRootIndex);
- if (!can_convert_undefined_to_nan) {
- DeoptimizeIf(not_equal, env);
- } else {
- Label heap_number, convert;
- __ j(equal, &heap_number, Label::kNear);
- // Convert undefined (and hole) to NaN. Compute NaN as 0/0.
- __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
- DeoptimizeIf(not_equal, env);
-
- __ bind(&convert);
- __ xorps(result_reg, result_reg);
- __ divsd(result_reg, result_reg);
- __ jmp(&done, Label::kNear);
-
- __ bind(&heap_number);
- }
- // Heap number to XMM conversion.
+ // On x64 it is safe to load at heap number offset before evaluating the map
+ // check, since all heap objects are at least two words long.
__ movsd(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
+
+ if (can_convert_undefined_to_nan) {
+ __ j(not_equal, &convert);
+ } else {
+ DeoptimizeIf(not_equal, env);
+ }
+
if (deoptimize_on_minus_zero) {
- XMMRegister xmm_scratch = xmm0;
+ XMMRegister xmm_scratch = double_scratch0();
__ xorps(xmm_scratch, xmm_scratch);
__ ucomisd(xmm_scratch, result_reg);
__ j(not_equal, &done, Label::kNear);
@@ -4661,6 +4665,18 @@
DeoptimizeIf(not_zero, env);
}
__ jmp(&done, Label::kNear);
+
+ if (can_convert_undefined_to_nan) {
+ __ bind(&convert);
+
+ // Convert undefined (and hole) to NaN. Compute NaN as 0/0.
+ __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
+ DeoptimizeIf(not_equal, env);
+
+ __ xorps(result_reg, result_reg);
+ __ divsd(result_reg, result_reg);
+ __ jmp(&done, Label::kNear);
+ }
} else {
ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
}
@@ -4668,7 +4684,7 @@
// Smi to XMM conversion
__ bind(&load_smi);
__ SmiToInteger32(kScratchRegister, input_reg);
- __ cvtlsi2sd(result_reg, kScratchRegister);
+ __ Cvtlsi2sd(result_reg, kScratchRegister);
__ bind(&done);
}
@@ -4721,12 +4737,16 @@
LOperand* input = instr->value();
ASSERT(input->IsRegister());
ASSERT(input->Equals(instr->result()));
-
Register input_reg = ToRegister(input);
- DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
- __ JumpIfNotSmi(input_reg, deferred->entry());
- __ SmiToInteger32(input_reg, input_reg);
- __ bind(deferred->exit());
+
+ if (instr->hydrogen()->value()->representation().IsSmi()) {
+ __ SmiToInteger32(input_reg, input_reg);
+ } else {
+ DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
+ __ JumpIfNotSmi(input_reg, deferred->entry());
+ __ SmiToInteger32(input_reg, input_reg);
+ __ bind(deferred->exit());
+ }
}
@@ -4764,7 +4784,8 @@
__ TruncateDoubleToI(result_reg, input_reg);
} else {
Label bailout, done;
- __ DoubleToI(result_reg, input_reg, xmm0,
+ XMMRegister xmm_scratch = double_scratch0();
+ __ DoubleToI(result_reg, input_reg, xmm_scratch,
instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear);
__ jmp(&done, Label::kNear);
@@ -4785,7 +4806,8 @@
Register result_reg = ToRegister(result);
Label bailout, done;
- __ DoubleToI(result_reg, input_reg, xmm0,
+ XMMRegister xmm_scratch = double_scratch0();
+ __ DoubleToI(result_reg, input_reg, xmm_scratch,
instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear);
__ jmp(&done, Label::kNear);
@@ -4862,7 +4884,7 @@
void LCodeGen::DoCheckValue(LCheckValue* instr) {
Register reg = ToRegister(instr->value());
- Handle<HeapObject> object = instr->hydrogen()->object();
+ Handle<HeapObject> object = instr->hydrogen()->object().handle();
__ CmpHeapObject(reg, object);
DeoptimizeIf(not_equal, instr->environment());
}
@@ -4903,22 +4925,21 @@
ASSERT(input->IsRegister());
Register reg = ToRegister(input);
- SmallMapList* map_set = instr->hydrogen()->map_set();
-
DeferredCheckMaps* deferred = NULL;
if (instr->hydrogen()->has_migration_target()) {
deferred = new(zone()) DeferredCheckMaps(this, instr, reg);
__ bind(deferred->check_maps());
}
+ UniqueSet<Map> map_set = instr->hydrogen()->map_set();
Label success;
- for (int i = 0; i < map_set->length() - 1; i++) {
- Handle<Map> map = map_set->at(i);
+ for (int i = 0; i < map_set.size() - 1; i++) {
+ Handle<Map> map = map_set.at(i).handle();
__ CompareMap(reg, map, &success);
__ j(equal, &success);
}
- Handle<Map> map = map_set->last();
+ Handle<Map> map = map_set.at(map_set.size() - 1).handle();
__ CompareMap(reg, map, &success);
if (instr->hydrogen()->has_migration_target()) {
__ j(not_equal, deferred->entry());
@@ -4932,8 +4953,9 @@
void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
XMMRegister value_reg = ToDoubleRegister(instr->unclamped());
+ XMMRegister xmm_scratch = double_scratch0();
Register result_reg = ToRegister(instr->result());
- __ ClampDoubleToUint8(value_reg, xmm0, result_reg);
+ __ ClampDoubleToUint8(value_reg, xmm_scratch, result_reg);
}
@@ -4948,6 +4970,7 @@
ASSERT(instr->unclamped()->Equals(instr->result()));
Register input_reg = ToRegister(instr->unclamped());
XMMRegister temp_xmm_reg = ToDoubleRegister(instr->temp_xmm());
+ XMMRegister xmm_scratch = double_scratch0();
Label is_smi, done, heap_number;
__ JumpIfSmi(input_reg, &is_smi);
@@ -4966,8 +4989,8 @@
// Heap number
__ bind(&heap_number);
- __ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
- __ ClampDoubleToUint8(xmm0, temp_xmm_reg, input_reg);
+ __ movsd(xmm_scratch, FieldOperand(input_reg, HeapNumber::kValueOffset));
+ __ ClampDoubleToUint8(xmm_scratch, temp_xmm_reg, input_reg);
__ jmp(&done, Label::kNear);
// smi
diff --git a/src/x64/lithium-codegen-x64.h b/src/x64/lithium-codegen-x64.h
index f994645..8764d9d 100644
--- a/src/x64/lithium-codegen-x64.h
+++ b/src/x64/lithium-codegen-x64.h
@@ -166,6 +166,8 @@
Scope* scope() const { return scope_; }
HGraph* graph() const { return chunk()->graph(); }
+ XMMRegister double_scratch0() const { return xmm0; }
+
int GetNextEmittedBlock() const;
void EmitClassOfTest(Label* if_true,
diff --git a/src/x64/lithium-x64.cc b/src/x64/lithium-x64.cc
index d9daaac..ddaae82 100644
--- a/src/x64/lithium-x64.cc
+++ b/src/x64/lithium-x64.cc
@@ -719,46 +719,39 @@
LInstruction* LChunkBuilder::DoShift(Token::Value op,
HBitwiseBinaryOperation* instr) {
- if (instr->representation().IsTagged()) {
- ASSERT(instr->left()->representation().IsTagged());
- ASSERT(instr->right()->representation().IsTagged());
+ if (instr->representation().IsSmiOrInteger32()) {
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* left = UseFixed(instr->left(), rdx);
- LOperand* right = UseFixed(instr->right(), rax);
- LArithmeticT* result = new(zone()) LArithmeticT(op, left, right);
- return MarkAsCall(DefineFixed(result, rax), instr);
- }
-
- ASSERT(instr->representation().IsSmiOrInteger32());
- ASSERT(instr->left()->representation().Equals(instr->representation()));
- ASSERT(instr->right()->representation().Equals(instr->representation()));
- LOperand* left = UseRegisterAtStart(instr->left());
-
- HValue* right_value = instr->right();
- LOperand* right = NULL;
- int constant_value = 0;
- if (right_value->IsConstant()) {
- HConstant* constant = HConstant::cast(right_value);
- right = chunk_->DefineConstantOperand(constant);
- constant_value = constant->Integer32Value() & 0x1f;
- } else {
- right = UseFixed(right_value, rcx);
- }
-
- // Shift operations can only deoptimize if we do a logical shift by 0 and
- // the result cannot be truncated to int32.
- bool does_deopt = false;
- if (op == Token::SHR && constant_value == 0) {
- if (FLAG_opt_safe_uint32_operations) {
- does_deopt = !instr->CheckFlag(HInstruction::kUint32);
+ HValue* right_value = instr->right();
+ LOperand* right = NULL;
+ int constant_value = 0;
+ if (right_value->IsConstant()) {
+ HConstant* constant = HConstant::cast(right_value);
+ right = chunk_->DefineConstantOperand(constant);
+ constant_value = constant->Integer32Value() & 0x1f;
} else {
- does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToInt32);
+ right = UseFixed(right_value, rcx);
}
- }
- LInstruction* result =
- DefineSameAsFirst(new(zone()) LShiftI(op, left, right, does_deopt));
- return does_deopt ? AssignEnvironment(result) : result;
+ // Shift operations can only deoptimize if we do a logical shift by 0 and
+ // the result cannot be truncated to int32.
+ bool does_deopt = false;
+ if (op == Token::SHR && constant_value == 0) {
+ if (FLAG_opt_safe_uint32_operations) {
+ does_deopt = !instr->CheckFlag(HInstruction::kUint32);
+ } else {
+ does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToInt32);
+ }
+ }
+
+ LInstruction* result =
+ DefineSameAsFirst(new(zone()) LShiftI(op, left, right, does_deopt));
+ return does_deopt ? AssignEnvironment(result) : result;
+ } else {
+ return DoArithmeticT(op, instr);
+ }
}
@@ -767,21 +760,22 @@
ASSERT(instr->representation().IsDouble());
ASSERT(instr->left()->representation().IsDouble());
ASSERT(instr->right()->representation().IsDouble());
- ASSERT(op != Token::MOD);
- LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
- LOperand* right = UseRegisterAtStart(instr->BetterRightOperand());
- LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
- return DefineSameAsFirst(result);
+ if (op == Token::MOD) {
+ LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+ LOperand* right = UseFixedDouble(instr->BetterRightOperand(), xmm1);
+ LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
+ return MarkAsCall(DefineSameAsFirst(result), instr);
+ } else {
+ LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+ LOperand* right = UseRegisterAtStart(instr->BetterRightOperand());
+ LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
+ return DefineSameAsFirst(result);
+ }
}
LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op,
- HArithmeticBinaryOperation* instr) {
- ASSERT(op == Token::ADD ||
- op == Token::DIV ||
- op == Token::MOD ||
- op == Token::MUL ||
- op == Token::SUB);
+ HBinaryOperation* instr) {
HValue* left = instr->left();
HValue* right = instr->right();
ASSERT(left->representation().IsTagged());
@@ -1348,27 +1342,19 @@
if (instr->representation().IsSmiOrInteger32()) {
ASSERT(instr->left()->representation().Equals(instr->representation()));
ASSERT(instr->right()->representation().Equals(instr->representation()));
+ ASSERT(instr->CheckFlag(HValue::kTruncatingToInt32));
LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand());
return DefineSameAsFirst(new(zone()) LBitI(left, right));
} else {
- ASSERT(instr->representation().IsTagged());
- ASSERT(instr->left()->representation().IsTagged());
- ASSERT(instr->right()->representation().IsTagged());
-
- LOperand* left = UseFixed(instr->left(), rdx);
- LOperand* right = UseFixed(instr->right(), rax);
- LArithmeticT* result = new(zone()) LArithmeticT(instr->op(), left, right);
- return MarkAsCall(DefineFixed(result, rax), instr);
+ return DoArithmeticT(instr->op(), instr);
}
}
LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
- if (instr->representation().IsDouble()) {
- return DoArithmeticD(Token::DIV, instr);
- } else if (instr->representation().IsSmiOrInteger32()) {
+ if (instr->representation().IsSmiOrInteger32()) {
ASSERT(instr->left()->representation().Equals(instr->representation()));
ASSERT(instr->right()->representation().Equals(instr->representation()));
if (instr->HasPowerOf2Divisor()) {
@@ -1385,8 +1371,9 @@
LOperand* divisor = UseRegister(instr->right());
LDivI* result = new(zone()) LDivI(dividend, divisor, temp);
return AssignEnvironment(DefineFixed(result, rax));
+ } else if (instr->representation().IsDouble()) {
+ return DoArithmeticD(Token::DIV, instr);
} else {
- ASSERT(instr->representation().IsTagged());
return DoArithmeticT(Token::DIV, instr);
}
}
@@ -1485,17 +1472,10 @@
? AssignEnvironment(result)
: result;
}
- } else if (instr->representation().IsTagged()) {
- return DoArithmeticT(Token::MOD, instr);
+ } else if (instr->representation().IsDouble()) {
+ return DoArithmeticD(Token::MOD, instr);
} else {
- ASSERT(instr->representation().IsDouble());
- // We call a C function for double modulo. It can't trigger a GC. We need to
- // use fixed result register for the call.
- // TODO(fschneider): Allow any register as input registers.
- LArithmeticD* mod = new(zone()) LArithmeticD(Token::MOD,
- UseFixedDouble(left, xmm2),
- UseFixedDouble(right, xmm1));
- return MarkAsCall(DefineFixedDouble(mod, xmm1), instr);
+ return DoArithmeticT(Token::MOD, instr);
}
}
@@ -1515,7 +1495,6 @@
} else if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::MUL, instr);
} else {
- ASSERT(instr->representation().IsTagged());
return DoArithmeticT(Token::MUL, instr);
}
}
@@ -1536,7 +1515,6 @@
} else if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::SUB, instr);
} else {
- ASSERT(instr->representation().IsTagged());
return DoArithmeticT(Token::SUB, instr);
}
}
@@ -1568,7 +1546,6 @@
} else if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::ADD, instr);
} else {
- ASSERT(instr->representation().IsTagged());
return DoArithmeticT(Token::ADD, instr);
}
return NULL;
@@ -1670,8 +1647,8 @@
LInstruction* LChunkBuilder::DoCompareHoleAndBranch(
HCompareHoleAndBranch* instr) {
- LOperand* object = UseRegisterAtStart(instr->object());
- return new(zone()) LCmpHoleAndBranch(object);
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return new(zone()) LCmpHoleAndBranch(value);
}
@@ -1934,12 +1911,6 @@
}
-LInstruction* LChunkBuilder::DoIsNumberAndBranch(HIsNumberAndBranch* instr) {
- return new(zone()) LIsNumberAndBranch(
- UseRegisterOrConstantAtStart(instr->value()));
-}
-
-
LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
LCheckInstanceType* result = new(zone()) LCheckInstanceType(value);
@@ -2098,6 +2069,11 @@
}
+LInstruction* LChunkBuilder::DoLoadRoot(HLoadRoot* instr) {
+ return DefineAsRegister(new(zone()) LLoadRoot);
+}
+
+
LInstruction* LChunkBuilder::DoLoadExternalArrayPointer(
HLoadExternalArrayPointer* instr) {
LOperand* input = UseRegisterAtStart(instr->value());
diff --git a/src/x64/lithium-x64.h b/src/x64/lithium-x64.h
index b3d08c8..9f45f97 100644
--- a/src/x64/lithium-x64.h
+++ b/src/x64/lithium-x64.h
@@ -114,12 +114,12 @@
V(IsObjectAndBranch) \
V(IsStringAndBranch) \
V(IsSmiAndBranch) \
- V(IsNumberAndBranch) \
V(IsUndetectableAndBranch) \
V(Label) \
V(LazyBailout) \
V(LoadContextSlot) \
V(LoadExternalArrayPointer) \
+ V(LoadRoot) \
V(LoadFieldByIndex) \
V(LoadFunctionPrototype) \
V(LoadGlobalCell) \
@@ -883,19 +883,6 @@
};
-class LIsNumberAndBranch V8_FINAL : public LControlInstruction<1, 0> {
- public:
- explicit LIsNumberAndBranch(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(IsNumberAndBranch, "is-number-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(IsNumberAndBranch)
-};
-
-
class LIsStringAndBranch V8_FINAL : public LControlInstruction<1, 1> {
public:
explicit LIsStringAndBranch(LOperand* value, LOperand* temp) {
@@ -1259,7 +1246,7 @@
DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch")
DECLARE_HYDROGEN_ACCESSOR(CompareMap)
- Handle<Map> map() const { return hydrogen()->map(); }
+ Handle<Map> map() const { return hydrogen()->map().handle(); }
};
@@ -1535,6 +1522,15 @@
};
+class LLoadRoot V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(LoadRoot, "load-root")
+ DECLARE_HYDROGEN_ACCESSOR(LoadRoot)
+
+ Heap::RootListIndex index() const { return hydrogen()->index(); }
+};
+
+
class LLoadExternalArrayPointer V8_FINAL
: public LTemplateInstruction<1, 1, 0> {
public:
@@ -2049,7 +2045,7 @@
LOperand* temp() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i")
- DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
+ DECLARE_HYDROGEN_ACCESSOR(Change)
bool truncating() { return hydrogen()->CanTruncateToInt32(); }
};
@@ -2205,8 +2201,10 @@
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- Handle<Map> original_map() { return hydrogen()->original_map(); }
- Handle<Map> transitioned_map() { return hydrogen()->transitioned_map(); }
+ Handle<Map> original_map() { return hydrogen()->original_map().handle(); }
+ Handle<Map> transitioned_map() {
+ return hydrogen()->transitioned_map().handle();
+ }
ElementsKind from_kind() { return hydrogen()->from_kind(); }
ElementsKind to_kind() { return hydrogen()->to_kind(); }
};
@@ -2701,7 +2699,7 @@
LInstruction* DoArithmeticD(Token::Value op,
HArithmeticBinaryOperation* instr);
LInstruction* DoArithmeticT(Token::Value op,
- HArithmeticBinaryOperation* instr);
+ HBinaryOperation* instr);
LPlatformChunk* chunk_;
CompilationInfo* info_;
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc
index 69abc54..96fa4fc 100644
--- a/src/x64/macro-assembler-x64.cc
+++ b/src/x64/macro-assembler-x64.cc
@@ -691,13 +691,16 @@
}
-void MacroAssembler::CallApiFunctionAndReturn(Address function_address,
- Address thunk_address,
- Register thunk_last_arg,
- int stack_space,
- int return_value_offset) {
+void MacroAssembler::CallApiFunctionAndReturn(
+ Address function_address,
+ Address thunk_address,
+ Register thunk_last_arg,
+ int stack_space,
+ Operand return_value_operand,
+ Operand* context_restore_operand) {
Label prologue;
Label promote_scheduled_exception;
+ Label exception_handled;
Label delete_allocated_handles;
Label leave_exit_frame;
Label write_back;
@@ -768,7 +771,7 @@
}
// Load the value from ReturnValue
- movq(rax, Operand(rbp, return_value_offset * kPointerSize));
+ movq(rax, return_value_operand);
bind(&prologue);
// No more valid handles (the result handle was the last one). Restore
@@ -783,6 +786,7 @@
movq(rsi, scheduled_exception_address);
Cmp(Operand(rsi, 0), factory->the_hole_value());
j(not_equal, &promote_scheduled_exception);
+ bind(&exception_handled);
#if ENABLE_EXTRA_CHECKS
// Check if the function returned a valid JavaScript value.
@@ -819,11 +823,19 @@
bind(&ok);
#endif
- LeaveApiExitFrame();
+ bool restore_context = context_restore_operand != NULL;
+ if (restore_context) {
+ movq(rsi, *context_restore_operand);
+ }
+ LeaveApiExitFrame(!restore_context);
ret(stack_space * kPointerSize);
bind(&promote_scheduled_exception);
- TailCallRuntime(Runtime::kPromoteScheduledException, 0, 1);
+ {
+ FrameScope frame(this, StackFrame::INTERNAL);
+ CallRuntime(Runtime::kPromoteScheduledException, 0);
+ }
+ jmp(&exception_handled);
// HandleScope limit has changed. Delete allocated extensions.
bind(&delete_allocated_handles);
@@ -936,6 +948,18 @@
}
+void MacroAssembler::Cvtlsi2sd(XMMRegister dst, Register src) {
+ xorps(dst, dst);
+ cvtlsi2sd(dst, src);
+}
+
+
+void MacroAssembler::Cvtlsi2sd(XMMRegister dst, const Operand& src) {
+ xorps(dst, dst);
+ cvtlsi2sd(dst, src);
+}
+
+
void MacroAssembler::Set(Register dst, int64_t x) {
if (x == 0) {
xorl(dst, dst);
@@ -2240,6 +2264,90 @@
// ----------------------------------------------------------------------------
+void MacroAssembler::LookupNumberStringCache(Register object,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* not_found) {
+ // Use of registers. Register result is used as a temporary.
+ Register number_string_cache = result;
+ Register mask = scratch1;
+ Register scratch = scratch2;
+
+ // Load the number string cache.
+ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
+
+ // Make the hash mask from the length of the number string cache. It
+ // contains two elements (number and string) for each cache entry.
+ SmiToInteger32(
+ mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
+ shrl(mask, Immediate(1));
+ subq(mask, Immediate(1)); // Make mask.
+
+ // Calculate the entry in the number string cache. The hash value in the
+ // number string cache for smis is just the smi value, and the hash for
+ // doubles is the xor of the upper and lower words. See
+ // Heap::GetNumberStringCache.
+ Label is_smi;
+ Label load_result_from_cache;
+ JumpIfSmi(object, &is_smi);
+ CheckMap(object,
+ isolate()->factory()->heap_number_map(),
+ not_found,
+ DONT_DO_SMI_CHECK);
+
+ STATIC_ASSERT(8 == kDoubleSize);
+ movl(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
+ xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset));
+ and_(scratch, mask);
+ // Each entry in string cache consists of two pointer sized fields,
+ // but times_twice_pointer_size (multiplication by 16) scale factor
+ // is not supported by addrmode on x64 platform.
+ // So we have to premultiply entry index before lookup.
+ shl(scratch, Immediate(kPointerSizeLog2 + 1));
+
+ Register index = scratch;
+ Register probe = mask;
+ movq(probe,
+ FieldOperand(number_string_cache,
+ index,
+ times_1,
+ FixedArray::kHeaderSize));
+ JumpIfSmi(probe, not_found);
+ movsd(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
+ ucomisd(xmm0, FieldOperand(probe, HeapNumber::kValueOffset));
+ j(parity_even, not_found); // Bail out if NaN is involved.
+ j(not_equal, not_found); // The cache did not contain this value.
+ jmp(&load_result_from_cache);
+
+ bind(&is_smi);
+ SmiToInteger32(scratch, object);
+ and_(scratch, mask);
+ // Each entry in string cache consists of two pointer sized fields,
+ // but times_twice_pointer_size (multiplication by 16) scale factor
+ // is not supported by addrmode on x64 platform.
+ // So we have to premultiply entry index before lookup.
+ shl(scratch, Immediate(kPointerSizeLog2 + 1));
+
+ // Check if the entry is the smi we are looking for.
+ cmpq(object,
+ FieldOperand(number_string_cache,
+ index,
+ times_1,
+ FixedArray::kHeaderSize));
+ j(not_equal, not_found);
+
+ // Get the result from the cache.
+ bind(&load_result_from_cache);
+ movq(result,
+ FieldOperand(number_string_cache,
+ index,
+ times_1,
+ FixedArray::kHeaderSize + kPointerSize));
+ IncrementCounter(isolate()->counters()->number_to_string_native(), 1);
+}
+
+
void MacroAssembler::JumpIfNotString(Register object,
Register object_map,
Label* not_string,
@@ -2917,7 +3025,7 @@
// Value is a smi. convert to a double and store.
// Preserve original value.
SmiToInteger32(kScratchRegister, maybe_number);
- cvtlsi2sd(xmm_scratch, kScratchRegister);
+ Cvtlsi2sd(xmm_scratch, kScratchRegister);
movsd(FieldOperand(elements, index, times_8,
FixedDoubleArray::kHeaderSize - elements_offset),
xmm_scratch);
@@ -3050,7 +3158,7 @@
Label* conversion_failed,
Label::Distance dst) {
cvttsd2si(result_reg, input_reg);
- cvtlsi2sd(xmm0, result_reg);
+ Cvtlsi2sd(xmm0, result_reg);
ucomisd(xmm0, input_reg);
j(not_equal, conversion_failed, dst);
j(parity_even, conversion_failed, dst); // NaN.
@@ -3087,7 +3195,7 @@
movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
cvttsd2si(result_reg, xmm0);
- cvtlsi2sd(temp, result_reg);
+ Cvtlsi2sd(temp, result_reg);
ucomisd(xmm0, temp);
RecordComment("Deferred TaggedToI: lost precision");
j(not_equal, lost_precision, dst);
@@ -3683,23 +3791,25 @@
PushReturnAddressFrom(rcx);
- LeaveExitFrameEpilogue();
+ LeaveExitFrameEpilogue(true);
}
-void MacroAssembler::LeaveApiExitFrame() {
+void MacroAssembler::LeaveApiExitFrame(bool restore_context) {
movq(rsp, rbp);
pop(rbp);
- LeaveExitFrameEpilogue();
+ LeaveExitFrameEpilogue(restore_context);
}
-void MacroAssembler::LeaveExitFrameEpilogue() {
+void MacroAssembler::LeaveExitFrameEpilogue(bool restore_context) {
// Restore current context from top and clear it in debug mode.
ExternalReference context_address(Isolate::kContextAddress, isolate());
Operand context_operand = ExternalOperand(context_address);
- movq(rsi, context_operand);
+ if (restore_context) {
+ movq(rsi, context_operand);
+ }
#ifdef DEBUG
movq(context_operand, Immediate(0));
#endif
diff --git a/src/x64/macro-assembler-x64.h b/src/x64/macro-assembler-x64.h
index 09c8a80..09dae3e 100644
--- a/src/x64/macro-assembler-x64.h
+++ b/src/x64/macro-assembler-x64.h
@@ -302,7 +302,7 @@
// Leave the current exit frame. Expects/provides the return value in
// register rax (untouched).
- void LeaveApiExitFrame();
+ void LeaveApiExitFrame(bool restore_context);
// Push and pop the registers that can hold pointers.
void PushSafepointRegisters() { Pushad(); }
@@ -739,6 +739,17 @@
// ---------------------------------------------------------------------------
// String macros.
+ // Generate code to do a lookup in the number string cache. If the number in
+ // the register object is found in the cache the generated code falls through
+ // with the result in the result register. The object and the result register
+ // can be the same. If the number is not found in the cache the code jumps to
+ // the label not_found with only the content of register object unchanged.
+ void LookupNumberStringCache(Register object,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* not_found);
+
// If object is a string, its map is loaded into object_map.
void JumpIfNotString(Register object,
Register object_map,
@@ -784,6 +795,12 @@
void Set(Register dst, int64_t x);
void Set(const Operand& dst, int64_t x);
+ // cvtsi2sd instruction only writes to the low 64-bit of dst register, which
+ // hinders register renaming and makes dependence chains longer. So we use
+ // xorps to clear the dst register before cvtsi2sd to solve this issue.
+ void Cvtlsi2sd(XMMRegister dst, Register src);
+ void Cvtlsi2sd(XMMRegister dst, const Operand& src);
+
// Move if the registers are not identical.
void Move(Register target, Register source);
@@ -1274,7 +1291,8 @@
Address thunk_address,
Register thunk_last_arg,
int stack_space,
- int return_value_offset_from_rbp);
+ Operand return_value_operand,
+ Operand* context_restore_operand);
// Before calling a C-function from generated code, align arguments on stack.
// After aligning the frame, arguments must be stored in rsp[0], rsp[8],
@@ -1430,7 +1448,7 @@
// accessible via StackSpaceOperand.
void EnterExitFrameEpilogue(int arg_stack_space, bool save_doubles);
- void LeaveExitFrameEpilogue();
+ void LeaveExitFrameEpilogue(bool restore_context);
// Allocation support helpers.
// Loads the top of new-space into the result register.
diff --git a/src/x64/stub-cache-x64.cc b/src/x64/stub-cache-x64.cc
index 95276d5..af8e55f 100644
--- a/src/x64/stub-cache-x64.cc
+++ b/src/x64/stub-cache-x64.cc
@@ -304,32 +304,28 @@
Register receiver,
Register scratch1,
Register scratch2,
- Label* miss,
- bool support_wrappers) {
+ Label* miss) {
Label check_wrapper;
// Check if the object is a string leaving the instance type in the
// scratch register.
- GenerateStringCheck(masm, receiver, scratch1, miss,
- support_wrappers ? &check_wrapper : miss);
+ GenerateStringCheck(masm, receiver, scratch1, miss, &check_wrapper);
// Load length directly from the string.
__ movq(rax, FieldOperand(receiver, String::kLengthOffset));
__ ret(0);
- if (support_wrappers) {
- // Check if the object is a JSValue wrapper.
- __ bind(&check_wrapper);
- __ cmpl(scratch1, Immediate(JS_VALUE_TYPE));
- __ j(not_equal, miss);
+ // Check if the object is a JSValue wrapper.
+ __ bind(&check_wrapper);
+ __ cmpl(scratch1, Immediate(JS_VALUE_TYPE));
+ __ j(not_equal, miss);
- // Check if the wrapped value is a string and load the length
- // directly if it is.
- __ movq(scratch2, FieldOperand(receiver, JSValue::kValueOffset));
- GenerateStringCheck(masm, scratch2, scratch1, miss, miss);
- __ movq(rax, FieldOperand(scratch2, String::kLengthOffset));
- __ ret(0);
- }
+ // Check if the wrapped value is a string and load the length
+ // directly if it is.
+ __ movq(scratch2, FieldOperand(receiver, JSValue::kValueOffset));
+ GenerateStringCheck(masm, scratch2, scratch1, miss, miss);
+ __ movq(rax, FieldOperand(scratch2, String::kLengthOffset));
+ __ ret(0);
}
@@ -447,65 +443,61 @@
// Generates call to API function.
static void GenerateFastApiCall(MacroAssembler* masm,
const CallOptimization& optimization,
- int argc) {
+ int argc,
+ bool restore_context) {
// ----------- S t a t e -------------
// -- rsp[0] : return address
- // -- rsp[8] : object passing the type check
+ // -- rsp[8] : context save
+ // -- rsp[16] : object passing the type check
// (last fast api call extra argument,
// set by CheckPrototypes)
- // -- rsp[16] : api function
+ // -- rsp[24] : api function
// (first fast api call extra argument)
- // -- rsp[24] : api call data
- // -- rsp[32] : isolate
- // -- rsp[40] : ReturnValue default value
- // -- rsp[48] : ReturnValue
+ // -- rsp[32] : api call data
+ // -- rsp[40] : isolate
+ // -- rsp[48] : ReturnValue default value
+ // -- rsp[56] : ReturnValue
//
- // -- rsp[56] : last argument
+ // -- rsp[64] : last argument
// -- ...
- // -- rsp[(argc + 6) * 8] : first argument
- // -- rsp[(argc + 7) * 8] : receiver
+ // -- rsp[(argc + 7) * 8] : first argument
+ // -- rsp[(argc + 8) * 8] : receiver
// -----------------------------------
+ int api_call_argc = argc + kFastApiCallArguments;
+ StackArgumentsAccessor args(rsp, api_call_argc);
+
+ // Save calling context.
+ __ movq(args.GetArgumentOperand(api_call_argc), rsi);
+
// Get the function and setup the context.
Handle<JSFunction> function = optimization.constant_function();
__ LoadHeapObject(rdi, function);
__ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
-
- int api_call_argc = argc + kFastApiCallArguments;
- StackArgumentsAccessor args(rsp, api_call_argc);
-
// Pass the additional arguments.
- __ movq(args.GetArgumentOperand(api_call_argc - 1), rdi);
+ __ movq(args.GetArgumentOperand(api_call_argc - 2), rdi);
Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
Handle<Object> call_data(api_call_info->data(), masm->isolate());
if (masm->isolate()->heap()->InNewSpace(*call_data)) {
__ Move(rcx, api_call_info);
__ movq(rbx, FieldOperand(rcx, CallHandlerInfo::kDataOffset));
- __ movq(args.GetArgumentOperand(api_call_argc - 2), rbx);
+ __ movq(args.GetArgumentOperand(api_call_argc - 3), rbx);
} else {
- __ Move(args.GetArgumentOperand(api_call_argc - 2), call_data);
+ __ Move(args.GetArgumentOperand(api_call_argc - 3), call_data);
}
__ movq(kScratchRegister,
ExternalReference::isolate_address(masm->isolate()));
- __ movq(args.GetArgumentOperand(api_call_argc - 3), kScratchRegister);
- __ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
__ movq(args.GetArgumentOperand(api_call_argc - 4), kScratchRegister);
+ __ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
__ movq(args.GetArgumentOperand(api_call_argc - 5), kScratchRegister);
+ __ movq(args.GetArgumentOperand(api_call_argc - 6), kScratchRegister);
// Prepare arguments.
- STATIC_ASSERT(kFastApiCallArguments == 6);
+ STATIC_ASSERT(kFastApiCallArguments == 7);
__ lea(rbx, Operand(rsp, kFastApiCallArguments * kPointerSize));
// Function address is a foreign pointer outside V8's heap.
Address function_address = v8::ToCData<Address>(api_call_info->callback());
-#if defined(__MINGW64__) || defined(_WIN64)
- Register arguments_arg = rcx;
- Register callback_arg = rdx;
-#else
- Register arguments_arg = rdi;
- Register callback_arg = rsi;
-#endif
-
// Allocate the v8::Arguments structure in the arguments' space since
// it's not controlled by GC.
const int kApiStackSpace = 4;
@@ -519,16 +511,29 @@
// v8::Arguments::is_construct_call_.
__ Set(StackSpaceOperand(3), 0);
+#if defined(__MINGW64__) || defined(_WIN64)
+ Register arguments_arg = rcx;
+ Register callback_arg = rdx;
+#else
+ Register arguments_arg = rdi;
+ Register callback_arg = rsi;
+#endif
+
// v8::InvocationCallback's argument.
__ lea(arguments_arg, StackSpaceOperand(0));
Address thunk_address = FUNCTION_ADDR(&InvokeFunctionCallback);
+ Operand context_restore_operand(rbp, 2 * kPointerSize);
+ Operand return_value_operand(
+ rbp, (kFastApiCallArguments + 1) * kPointerSize);
__ CallApiFunctionAndReturn(function_address,
thunk_address,
callback_arg,
api_call_argc + 1,
- kFastApiCallArguments + 1);
+ return_value_operand,
+ restore_context ?
+ &context_restore_operand : NULL);
}
@@ -543,6 +548,8 @@
ASSERT(!receiver.is(scratch));
const int stack_space = kFastApiCallArguments + argc + 1;
+ const int kHolderIndex = kFastApiCallArguments +
+ FunctionCallbackArguments::kHolderIndex;
// Copy return value.
__ movq(scratch, Operand(rsp, 0));
// Assign stack space for the call arguments.
@@ -550,7 +557,7 @@
// Move the return address on top of the stack.
__ movq(Operand(rsp, 0), scratch);
// Write holder to stack frame.
- __ movq(Operand(rsp, 1 * kPointerSize), receiver);
+ __ movq(Operand(rsp, kHolderIndex * kPointerSize), receiver);
// Write receiver to stack frame.
int index = stack_space;
__ movq(Operand(rsp, index-- * kPointerSize), receiver);
@@ -561,7 +568,7 @@
__ movq(Operand(rsp, index-- * kPointerSize), values[i]);
}
- GenerateFastApiCall(masm, optimization, argc);
+ GenerateFastApiCall(masm, optimization, argc, true);
}
@@ -675,7 +682,7 @@
// Invoke function.
if (can_do_fast_api_call) {
- GenerateFastApiCall(masm, optimization, arguments_.immediate());
+ GenerateFastApiCall(masm, optimization, arguments_.immediate(), false);
} else {
CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
? CALL_AS_FUNCTION
@@ -842,7 +849,7 @@
__ JumpIfNotSmi(value_reg, &heap_number);
__ SmiToInteger32(scratch1, value_reg);
- __ cvtlsi2sd(xmm0, scratch1);
+ __ Cvtlsi2sd(xmm0, scratch1);
__ jmp(&do_store);
__ bind(&heap_number);
@@ -996,7 +1003,7 @@
Label do_store, heap_number;
__ JumpIfNotSmi(value_reg, &heap_number);
__ SmiToInteger32(scratch2, value_reg);
- __ cvtlsi2sd(xmm0, scratch2);
+ __ Cvtlsi2sd(xmm0, scratch2);
__ jmp(&do_store);
__ bind(&heap_number);
@@ -1091,6 +1098,8 @@
int save_at_depth,
Label* miss,
PrototypeCheckType check) {
+ const int kHolderIndex = kFastApiCallArguments +
+ FunctionCallbackArguments::kHolderIndex;
// Make sure that the type feedback oracle harvests the receiver map.
// TODO(svenpanne) Remove this hack when all ICs are reworked.
__ Move(scratch1, Handle<Map>(object->map()));
@@ -1108,7 +1117,7 @@
int depth = 0;
if (save_at_depth == depth) {
- __ movq(Operand(rsp, kPCOnStackSize), object_reg);
+ __ movq(Operand(rsp, kHolderIndex * kPointerSize), object_reg);
}
// Check the maps in the prototype chain.
@@ -1168,7 +1177,7 @@
}
if (save_at_depth == depth) {
- __ movq(Operand(rsp, kPCOnStackSize), reg);
+ __ movq(Operand(rsp, kHolderIndex * kPointerSize), reg);
}
// Go to the next object in the prototype chain.
@@ -1390,7 +1399,8 @@
thunk_address,
getter_arg,
kStackSpace,
- 6);
+ Operand(rbp, 6 * kPointerSize),
+ NULL);
}
@@ -2508,7 +2518,7 @@
StackOperandForReturnAddress(kFastApiCallArguments * kPointerSize));
__ movq(StackOperandForReturnAddress(0), rax);
- GenerateFastApiCall(masm(), optimization, argc);
+ GenerateFastApiCall(masm(), optimization, argc, false);
__ bind(&miss);
__ addq(rsp, Immediate(kFastApiCallArguments * kPointerSize));