Merge V8 at 3.8.9.11
Bug: 5688872
Change-Id: Ie3b1dd67a730ec5e82686b7b37dba26f6a9bb24f
diff --git a/src/accessors.cc b/src/accessors.cc
index e60f568..9b16525 100644
--- a/src/accessors.cc
+++ b/src/accessors.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -26,15 +26,16 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
-
#include "accessors.h"
-#include "ast.h"
+
+#include "contexts.h"
#include "deoptimizer.h"
#include "execution.h"
#include "factory.h"
+#include "frames-inl.h"
+#include "isolate.h"
#include "list-inl.h"
-#include "safepoint-table.h"
-#include "scopeinfo.h"
+#include "property-details.h"
namespace v8 {
namespace internal {
@@ -574,11 +575,12 @@
Handle<JSFunction> inlined_function,
int inlined_frame_index) {
Factory* factory = Isolate::Current()->factory();
- int args_count = inlined_function->shared()->formal_parameter_count();
- ScopedVector<SlotRef> args_slots(args_count);
- SlotRef::ComputeSlotMappingForArguments(frame,
- inlined_frame_index,
- &args_slots);
+ Vector<SlotRef> args_slots =
+ SlotRef::ComputeSlotMappingForArguments(
+ frame,
+ inlined_frame_index,
+ inlined_function->shared()->formal_parameter_count());
+ int args_count = args_slots.length();
Handle<JSObject> arguments =
factory->NewArgumentsObject(inlined_function, args_count);
Handle<FixedArray> array = factory->NewFixedArray(args_count);
@@ -587,6 +589,7 @@
array->set(i, *value);
}
arguments->set_elements(*array);
+ args_slots.Dispose();
// Return the freshly allocated arguments object.
return *arguments;
diff --git a/src/accessors.h b/src/accessors.h
index 385536d..36b9a99 100644
--- a/src/accessors.h
+++ b/src/accessors.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -29,6 +29,7 @@
#define V8_ACCESSORS_H_
#include "allocation.h"
+#include "v8globals.h"
namespace v8 {
namespace internal {
diff --git a/src/allocation.cc b/src/allocation.cc
index 119b087..6c7a08c 100644
--- a/src/allocation.cc
+++ b/src/allocation.cc
@@ -1,4 +1,4 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -25,10 +25,11 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#include "../include/v8stdint.h"
-#include "globals.h"
-#include "checks.h"
#include "allocation.h"
+
+#include <stdlib.h> // For free, malloc.
+#include <string.h> // For memcpy.
+#include "checks.h"
#include "utils.h"
namespace v8 {
diff --git a/src/allocation.h b/src/allocation.h
index 00c5664..69e72bd 100644
--- a/src/allocation.h
+++ b/src/allocation.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -28,7 +28,6 @@
#ifndef V8_ALLOCATION_H_
#define V8_ALLOCATION_H_
-#include "checks.h"
#include "globals.h"
namespace v8 {
diff --git a/src/api.cc b/src/api.cc
index f60b2cd..ff7ab2d 100644
--- a/src/api.cc
+++ b/src/api.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -25,34 +25,36 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#include "v8.h"
-
#include "api.h"
-#include "arguments.h"
+#include <math.h> // For isnan.
+#include <string.h> // For memcpy, strlen.
+#include "../include/v8-debug.h"
+#include "../include/v8-profiler.h"
+#include "../include/v8-testing.h"
#include "bootstrapper.h"
#include "compiler.h"
+#include "conversions-inl.h"
+#include "counters.h"
#include "debug.h"
#include "deoptimizer.h"
#include "execution.h"
-#include "flags.h"
#include "global-handles.h"
#include "heap-profiler.h"
#include "messages.h"
-#include "natives.h"
#include "parser.h"
#include "platform.h"
#include "profile-generator-inl.h"
+#include "property-details.h"
+#include "property.h"
#include "runtime-profiler.h"
#include "scanner-character-streams.h"
-#include "serialize.h"
#include "snapshot.h"
+#include "unicode-inl.h"
#include "v8threads.h"
#include "version.h"
#include "vm-state-inl.h"
-#include "../include/v8-profiler.h"
-#include "../include/v8-testing.h"
#define LOG_API(isolate, expr) LOG(isolate, ApiEntryCall(expr))
@@ -78,7 +80,7 @@
bool has_pending_exception = false
-#define EXCEPTION_BAILOUT_CHECK(isolate, value) \
+#define EXCEPTION_BAILOUT_CHECK_GENERIC(isolate, value, do_callback) \
do { \
i::HandleScopeImplementer* handle_scope_implementer = \
(isolate)->handle_scope_implementer(); \
@@ -91,11 +93,22 @@
} \
bool call_depth_is_zero = handle_scope_implementer->CallDepthIsZero(); \
(isolate)->OptionalRescheduleException(call_depth_is_zero); \
+ do_callback \
return value; \
} \
+ do_callback \
} while (false)
+#define EXCEPTION_BAILOUT_CHECK_DO_CALLBACK(isolate, value) \
+ EXCEPTION_BAILOUT_CHECK_GENERIC( \
+ isolate, value, i::V8::FireCallCompletedCallback(isolate);)
+
+
+#define EXCEPTION_BAILOUT_CHECK(isolate, value) \
+ EXCEPTION_BAILOUT_CHECK_GENERIC(isolate, value, ;)
+
+
#define API_ENTRY_CHECK(isolate, msg) \
do { \
if (v8::Locker::IsActive()) { \
@@ -730,6 +743,7 @@
i::Context* last_context =
isolate->handle_scope_implementer()->RestoreContext();
isolate->set_context(last_context);
+ isolate->set_context_exit_happened(true);
}
@@ -1568,7 +1582,7 @@
isolate->context()->global_proxy(), isolate);
i::Handle<i::Object> result =
i::Execution::Call(fun, receiver, 0, NULL, &has_pending_exception);
- EXCEPTION_BAILOUT_CHECK(isolate, Local<Value>());
+ EXCEPTION_BAILOUT_CHECK_DO_CALLBACK(isolate, Local<Value>());
raw_result = *result;
}
i::Handle<i::Object> result(raw_result, isolate);
@@ -2154,6 +2168,11 @@
if (obj->IsSmi()) return true;
if (obj->IsNumber()) {
double value = obj->Number();
+ static const i::DoubleRepresentation minus_zero(-0.0);
+ i::DoubleRepresentation rep(value);
+ if (rep.bits == minus_zero.bits) {
+ return false;
+ }
return i::FastI2D(i::FastD2I(value)) == value;
}
return false;
@@ -2166,6 +2185,11 @@
if (obj->IsSmi()) return i::Smi::cast(*obj)->value() >= 0;
if (obj->IsNumber()) {
double value = obj->Number();
+ static const i::DoubleRepresentation minus_zero(-0.0);
+ i::DoubleRepresentation rep(value);
+ if (rep.bits == minus_zero.bits) {
+ return false;
+ }
return i::FastUI2D(i::FastD2UI(value)) == value;
}
return false;
@@ -2728,7 +2752,7 @@
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> obj = i::SetElement(
+ i::Handle<i::Object> obj = i::JSObject::SetElement(
self,
index,
value_obj,
@@ -2834,7 +2858,7 @@
return Local<v8::Value>());
ENTER_V8(isolate);
i::Handle<i::Object> self = Utils::OpenHandle(this);
- i::Handle<i::Object> result = i::GetPrototype(self);
+ i::Handle<i::Object> result(self->GetPrototype());
return Utils::ToLocal(result);
}
@@ -2988,7 +3012,7 @@
i::HandleScope scope(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
- return i::DeleteProperty(self, key_obj)->IsTrue();
+ return i::JSObject::DeleteProperty(self, key_obj)->IsTrue();
}
@@ -3009,7 +3033,7 @@
ENTER_V8(isolate);
HandleScope scope;
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- return i::DeleteElement(self, index)->IsTrue();
+ return i::JSObject::DeleteElement(self, index)->IsTrue();
}
@@ -3214,7 +3238,7 @@
ENTER_V8(isolate);
i::HandleScope scope(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- return i::GetIdentityHash(self);
+ return i::JSObject::GetIdentityHash(self);
}
@@ -3227,7 +3251,8 @@
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
- i::Handle<i::Object> result = i::SetHiddenProperty(self, key_obj, value_obj);
+ i::Handle<i::Object> result =
+ i::JSObject::SetHiddenProperty(self, key_obj, value_obj);
return *result == *self;
}
@@ -3494,7 +3519,7 @@
EXCEPTION_PREAMBLE(isolate);
i::Handle<i::Object> returned =
i::Execution::Call(fun, recv_obj, argc, args, &has_pending_exception);
- EXCEPTION_BAILOUT_CHECK(isolate, Local<Value>());
+ EXCEPTION_BAILOUT_CHECK_DO_CALLBACK(isolate, Local<Value>());
return Utils::ToLocal(scope.CloseAndEscape(returned));
}
@@ -3515,7 +3540,7 @@
EXCEPTION_PREAMBLE(isolate);
i::Handle<i::Object> returned =
i::Execution::New(fun, argc, args, &has_pending_exception);
- EXCEPTION_BAILOUT_CHECK(isolate, Local<v8::Object>());
+ EXCEPTION_BAILOUT_CHECK_DO_CALLBACK(isolate, Local<v8::Object>());
return Utils::ToLocal(scope.CloseAndEscape(
i::Handle<i::JSObject>::cast(returned)));
}
@@ -3528,7 +3553,7 @@
EXCEPTION_PREAMBLE(isolate);
i::Handle<i::Object> returned =
i::Execution::Call(fun, obj, argc, args, &has_pending_exception);
- EXCEPTION_BAILOUT_CHECK(isolate, Local<v8::Object>());
+ EXCEPTION_BAILOUT_CHECK_DO_CALLBACK(isolate, Local<v8::Object>());
ASSERT(!delegate->IsUndefined());
return Utils::ToLocal(scope.CloseAndEscape(returned));
}
@@ -3555,7 +3580,7 @@
EXCEPTION_PREAMBLE(isolate);
i::Handle<i::Object> returned =
i::Execution::New(function, argc, args, &has_pending_exception);
- EXCEPTION_BAILOUT_CHECK(isolate, Local<v8::Object>());
+ EXCEPTION_BAILOUT_CHECK_DO_CALLBACK(isolate, Local<v8::Object>());
return scope.Close(Utils::ToLocal(i::Handle<i::JSObject>::cast(returned)));
}
@@ -3576,7 +3601,7 @@
EXCEPTION_PREAMBLE(isolate);
i::Handle<i::Object> returned =
i::Execution::Call(fun, recv_obj, argc, args, &has_pending_exception);
- EXCEPTION_BAILOUT_CHECK(isolate, Local<Object>());
+ EXCEPTION_BAILOUT_CHECK_DO_CALLBACK(isolate, Local<Object>());
raw_result = *returned;
}
i::Handle<i::Object> result(raw_result);
@@ -3599,6 +3624,12 @@
}
+Handle<Value> Function::GetInferredName() const {
+ i::Handle<i::JSFunction> func = Utils::OpenHandle(this);
+ return Utils::ToLocal(i::Handle<i::Object>(func->shared()->inferred_name()));
+}
+
+
ScriptOrigin Function::GetScriptOrigin() const {
i::Handle<i::JSFunction> func = Utils::OpenHandle(this);
if (func->shared()->script()->IsScript()) {
@@ -4027,12 +4058,19 @@
}
-bool v8::V8::IdleNotification() {
+void v8::V8::VisitExternalResources(ExternalResourceVisitor* visitor) {
+ i::Isolate* isolate = i::Isolate::Current();
+ IsDeadCheck(isolate, "v8::V8::VisitExternalResources");
+ isolate->heap()->VisitExternalResources(visitor);
+}
+
+
+bool v8::V8::IdleNotification(int hint) {
// Returning true tells the caller that it need not
// continue to call IdleNotification.
i::Isolate* isolate = i::Isolate::Current();
if (isolate == NULL || !isolate->IsInitialized()) return true;
- return i::V8::IdleNotification();
+ return i::V8::IdleNotification(hint);
}
@@ -5045,6 +5083,21 @@
}
+void V8::AddCallCompletedCallback(CallCompletedCallback callback) {
+ if (callback == NULL) return;
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::V8::AddLeaveScriptCallback()")) return;
+ i::V8::AddCallCompletedCallback(callback);
+}
+
+
+void V8::RemoveCallCompletedCallback(CallCompletedCallback callback) {
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::V8::RemoveLeaveScriptCallback()")) return;
+ i::V8::RemoveCallCompletedCallback(callback);
+}
+
+
void V8::PauseProfiler() {
i::Isolate* isolate = i::Isolate::Current();
isolate->logger()->PauseProfiler();
@@ -5516,7 +5569,7 @@
void Debug::ProcessDebugMessages() {
- i::Execution::ProcessDebugMesssages(true);
+ i::Execution::ProcessDebugMessages(true);
}
Local<Context> Debug::GetDebugContext() {
diff --git a/src/api.h b/src/api.h
index a825dd7..89cf0c8 100644
--- a/src/api.h
+++ b/src/api.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -28,10 +28,14 @@
#ifndef V8_API_H_
#define V8_API_H_
-#include "apiutils.h"
-#include "factory.h"
+#include "v8.h"
#include "../include/v8-testing.h"
+#include "apiutils.h"
+#include "contexts.h"
+#include "factory.h"
+#include "isolate.h"
+#include "list-inl.h"
namespace v8 {
diff --git a/src/arm/assembler-arm-inl.h b/src/arm/assembler-arm-inl.h
index 2ec6c7c..dd8ffcd 100644
--- a/src/arm/assembler-arm-inl.h
+++ b/src/arm/assembler-arm-inl.h
@@ -38,6 +38,7 @@
#define V8_ARM_ASSEMBLER_ARM_INL_H_
#include "arm/assembler-arm.h"
+
#include "cpu.h"
#include "debug.h"
diff --git a/src/arm/assembler-arm.cc b/src/arm/assembler-arm.cc
index 329493a..2592236 100644
--- a/src/arm/assembler-arm.cc
+++ b/src/arm/assembler-arm.cc
@@ -317,7 +317,7 @@
own_buffer_ = false;
}
- // Setup buffer pointers.
+ // Set up buffer pointers.
ASSERT(buffer_ != NULL);
pc_ = buffer_;
reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
@@ -349,7 +349,7 @@
CheckConstPool(true, false);
ASSERT(num_pending_reloc_info_ == 0);
- // Setup code descriptor.
+ // Set up code descriptor.
desc->buffer = buffer_;
desc->buffer_size = buffer_size_;
desc->instr_size = pc_offset();
@@ -2446,7 +2446,7 @@
}
CHECK_GT(desc.buffer_size, 0); // no overflow
- // Setup new buffer.
+ // Set up new buffer.
desc.buffer = NewArray<byte>(desc.buffer_size);
desc.instr_size = pc_offset();
diff --git a/src/arm/assembler-arm.h b/src/arm/assembler-arm.h
index e88739e..11e39df 100644
--- a/src/arm/assembler-arm.h
+++ b/src/arm/assembler-arm.h
@@ -300,11 +300,13 @@
const DwVfpRegister d14 = { 14 };
const DwVfpRegister d15 = { 15 };
-// Aliases for double registers.
-static const DwVfpRegister& kFirstCalleeSavedDoubleReg = d8;
-static const DwVfpRegister& kLastCalleeSavedDoubleReg = d15;
-static const DwVfpRegister& kDoubleRegZero = d14;
-static const DwVfpRegister& kScratchDoubleReg = d15;
+// Aliases for double registers. Defined using #define instead of
+// "static const DwVfpRegister&" because Clang complains otherwise when a
+// compilation unit that includes this header doesn't use the variables.
+#define kFirstCalleeSavedDoubleReg d8
+#define kLastCalleeSavedDoubleReg d15
+#define kDoubleRegZero d14
+#define kScratchDoubleReg d15
// Coprocessor register
diff --git a/src/arm/builtins-arm.cc b/src/arm/builtins-arm.cc
index d0136f5..b461b45 100644
--- a/src/arm/builtins-arm.cc
+++ b/src/arm/builtins-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -72,6 +72,22 @@
}
+// Load the built-in InternalArray function from the current context.
+static void GenerateLoadInternalArrayFunction(MacroAssembler* masm,
+ Register result) {
+ // Load the global context.
+
+ __ ldr(result, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ ldr(result,
+ FieldMemOperand(result, GlobalObject::kGlobalContextOffset));
+ // Load the InternalArray function from the global context.
+ __ ldr(result,
+ MemOperand(result,
+ Context::SlotOffset(
+ Context::INTERNAL_ARRAY_FUNCTION_INDEX)));
+}
+
+
// Load the built-in Array function from the current context.
static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) {
// Load the global context.
@@ -300,7 +316,8 @@
static void ArrayNativeCode(MacroAssembler* masm,
Label* call_generic_code) {
Counters* counters = masm->isolate()->counters();
- Label argc_one_or_more, argc_two_or_more, not_empty_array, empty_array;
+ Label argc_one_or_more, argc_two_or_more, not_empty_array, empty_array,
+ has_non_smi_element;
// Check for array construction with zero arguments or one.
__ cmp(r0, Operand(0, RelocInfo::NONE));
@@ -316,7 +333,7 @@
r5,
call_generic_code);
__ IncrementCounter(counters->array_function_native(), 1, r3, r4);
- // Setup return value, remove receiver from stack and return.
+ // Set up return value, remove receiver from stack and return.
__ mov(r0, r2);
__ add(sp, sp, Operand(kPointerSize));
__ Jump(lr);
@@ -359,7 +376,7 @@
true,
call_generic_code);
__ IncrementCounter(counters->array_function_native(), 1, r2, r4);
- // Setup return value, remove receiver and argument from stack and return.
+ // Set up return value, remove receiver and argument from stack and return.
__ mov(r0, r3);
__ add(sp, sp, Operand(2 * kPointerSize));
__ Jump(lr);
@@ -394,13 +411,18 @@
// r5: elements_array_end (untagged)
// sp[0]: last argument
Label loop, entry;
+ __ mov(r7, sp);
__ jmp(&entry);
__ bind(&loop);
- __ ldr(r2, MemOperand(sp, kPointerSize, PostIndex));
+ __ ldr(r2, MemOperand(r7, kPointerSize, PostIndex));
+ if (FLAG_smi_only_arrays) {
+ __ JumpIfNotSmi(r2, &has_non_smi_element);
+ }
__ str(r2, MemOperand(r5, -kPointerSize, PreIndex));
__ bind(&entry);
__ cmp(r4, r5);
__ b(lt, &loop);
+ __ mov(sp, r7);
// Remove caller arguments and receiver from the stack, setup return value and
// return.
@@ -410,6 +432,44 @@
__ add(sp, sp, Operand(kPointerSize));
__ mov(r0, r3);
__ Jump(lr);
+
+ __ bind(&has_non_smi_element);
+ __ UndoAllocationInNewSpace(r3, r4);
+ __ b(call_generic_code);
+}
+
+
+void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r0 : number of arguments
+ // -- lr : return address
+ // -- sp[...]: constructor arguments
+ // -----------------------------------
+ Label generic_array_code, one_or_more_arguments, two_or_more_arguments;
+
+ // Get the InternalArray function.
+ GenerateLoadInternalArrayFunction(masm, r1);
+
+ if (FLAG_debug_code) {
+ // Initial map for the builtin InternalArray functions should be maps.
+ __ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
+ __ tst(r2, Operand(kSmiTagMask));
+ __ Assert(ne, "Unexpected initial map for InternalArray function");
+ __ CompareObjectType(r2, r3, r4, MAP_TYPE);
+ __ Assert(eq, "Unexpected initial map for InternalArray function");
+ }
+
+ // Run the native code for the InternalArray function called as a normal
+ // function.
+ ArrayNativeCode(masm, &generic_array_code);
+
+ // Jump to the generic array code if the specialized code cannot handle the
+ // construction.
+ __ bind(&generic_array_code);
+
+ Handle<Code> array_code =
+ masm->isolate()->builtins()->InternalArrayCodeGeneric();
+ __ Jump(array_code, RelocInfo::CODE_TARGET);
}
@@ -891,10 +951,10 @@
// sp[4]: number of arguments (smi-tagged)
__ ldr(r3, MemOperand(sp, 4 * kPointerSize));
- // Setup pointer to last argument.
+ // Set up pointer to last argument.
__ add(r2, fp, Operand(StandardFrameConstants::kCallerSPOffset));
- // Setup number of arguments for function call below
+ // Set up number of arguments for function call below
__ mov(r0, Operand(r3, LSR, kSmiTagSize));
// Copy arguments and receiver to the expression stack.
@@ -1022,10 +1082,7 @@
// Set up the context from the function argument.
__ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
- // Set up the roots register.
- ExternalReference roots_array_start =
- ExternalReference::roots_array_start(masm->isolate());
- __ mov(r10, Operand(roots_array_start));
+ __ InitializeRootRegister();
// Push the function and the receiver onto the stack.
__ push(r1);
@@ -1703,6 +1760,7 @@
__ bind(&invoke);
__ Call(r3);
+ masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
// Exit frame and return.
LeaveArgumentsAdaptorFrame(masm);
__ Jump(lr);
diff --git a/src/arm/code-stubs-arm.cc b/src/arm/code-stubs-arm.cc
index f2c0f99..c33df5c 100644
--- a/src/arm/code-stubs-arm.cc
+++ b/src/arm/code-stubs-arm.cc
@@ -156,13 +156,13 @@
// Load the function from the stack.
__ ldr(r3, MemOperand(sp, 0));
- // Setup the object header.
+ // Set up the object header.
__ LoadRoot(r2, Heap::kFunctionContextMapRootIndex);
__ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
__ mov(r2, Operand(Smi::FromInt(length)));
__ str(r2, FieldMemOperand(r0, FixedArray::kLengthOffset));
- // Setup the fixed slots.
+ // Set up the fixed slots.
__ mov(r1, Operand(Smi::FromInt(0)));
__ str(r3, MemOperand(r0, Context::SlotOffset(Context::CLOSURE_INDEX)));
__ str(cp, MemOperand(r0, Context::SlotOffset(Context::PREVIOUS_INDEX)));
@@ -207,7 +207,7 @@
// Load the serialized scope info from the stack.
__ ldr(r1, MemOperand(sp, 1 * kPointerSize));
- // Setup the object header.
+ // Set up the object header.
__ LoadRoot(r2, Heap::kBlockContextMapRootIndex);
__ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
__ mov(r2, Operand(Smi::FromInt(length)));
@@ -229,7 +229,7 @@
__ ldr(r3, ContextOperand(r3, Context::CLOSURE_INDEX));
__ bind(&after_sentinel);
- // Setup the fixed slots.
+ // Set up the fixed slots.
__ str(r3, ContextOperand(r0, Context::CLOSURE_INDEX));
__ str(cp, ContextOperand(r0, Context::PREVIOUS_INDEX));
__ str(r1, ContextOperand(r0, Context::EXTENSION_INDEX));
@@ -717,7 +717,7 @@
// Get the absolute value of the object (as an unsigned integer).
__ rsb(int_scratch, int_scratch, Operand::Zero(), SetCC, mi);
- // Get mantisssa[51:20].
+ // Get mantissa[51:20].
// Get the position of the first set bit.
__ CountLeadingZeros(dst1, int_scratch, scratch2);
@@ -951,7 +951,7 @@
// non zero bits left. So we need the (30 - exponent) last bits of the
// 31 higher bits of the mantissa to be null.
// Because bits [21:0] are null, we can check instead that the
- // (32 - exponent) last bits of the 32 higher bits of the mantisssa are null.
+ // (32 - exponent) last bits of the 32 higher bits of the mantissa are null.
// Get the 32 higher bits of the mantissa in dst.
__ Ubfx(dst,
@@ -3455,110 +3455,202 @@
void MathPowStub::Generate(MacroAssembler* masm) {
- Label call_runtime;
+ CpuFeatures::Scope vfp3_scope(VFP3);
+ const Register base = r1;
+ const Register exponent = r2;
+ const Register heapnumbermap = r5;
+ const Register heapnumber = r0;
+ const DoubleRegister double_base = d1;
+ const DoubleRegister double_exponent = d2;
+ const DoubleRegister double_result = d3;
+ const DoubleRegister double_scratch = d0;
+ const SwVfpRegister single_scratch = s0;
+ const Register scratch = r9;
+ const Register scratch2 = r7;
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
-
- Label base_not_smi;
- Label exponent_not_smi;
- Label convert_exponent;
-
- const Register base = r0;
- const Register exponent = r1;
- const Register heapnumbermap = r5;
- const Register heapnumber = r6;
- const DoubleRegister double_base = d0;
- const DoubleRegister double_exponent = d1;
- const DoubleRegister double_result = d2;
- const SwVfpRegister single_scratch = s0;
- const Register scratch = r9;
- const Register scratch2 = r7;
-
- __ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex);
+ Label call_runtime, done, exponent_not_smi, int_exponent;
+ if (exponent_type_ == ON_STACK) {
+ Label base_is_smi, unpack_exponent;
+ // The exponent and base are supplied as arguments on the stack.
+ // This can only happen if the stub is called from non-optimized code.
+ // Load input parameters from stack to double registers.
__ ldr(base, MemOperand(sp, 1 * kPointerSize));
__ ldr(exponent, MemOperand(sp, 0 * kPointerSize));
- // Convert base to double value and store it in d0.
- __ JumpIfNotSmi(base, &base_not_smi);
- // Base is a Smi. Untag and convert it.
- __ SmiUntag(base);
- __ vmov(single_scratch, base);
- __ vcvt_f64_s32(double_base, single_scratch);
- __ b(&convert_exponent);
+ __ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex);
- __ bind(&base_not_smi);
+ __ JumpIfSmi(base, &base_is_smi);
__ ldr(scratch, FieldMemOperand(base, JSObject::kMapOffset));
__ cmp(scratch, heapnumbermap);
__ b(ne, &call_runtime);
- // Base is a heapnumber. Load it into double register.
- __ vldr(double_base, FieldMemOperand(base, HeapNumber::kValueOffset));
- __ bind(&convert_exponent);
+ __ vldr(double_base, FieldMemOperand(base, HeapNumber::kValueOffset));
+ __ jmp(&unpack_exponent);
+
+ __ bind(&base_is_smi);
+ __ SmiUntag(base);
+ __ vmov(single_scratch, base);
+ __ vcvt_f64_s32(double_base, single_scratch);
+ __ bind(&unpack_exponent);
+
__ JumpIfNotSmi(exponent, &exponent_not_smi);
__ SmiUntag(exponent);
-
- // The base is in a double register and the exponent is
- // an untagged smi. Allocate a heap number and call a
- // C function for integer exponents. The register containing
- // the heap number is callee-saved.
- __ AllocateHeapNumber(heapnumber,
- scratch,
- scratch2,
- heapnumbermap,
- &call_runtime);
- __ push(lr);
- __ PrepareCallCFunction(1, 1, scratch);
- __ SetCallCDoubleArguments(double_base, exponent);
- {
- AllowExternalCallThatCantCauseGC scope(masm);
- __ CallCFunction(
- ExternalReference::power_double_int_function(masm->isolate()),
- 1, 1);
- __ pop(lr);
- __ GetCFunctionDoubleResult(double_result);
- }
- __ vstr(double_result,
- FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
- __ mov(r0, heapnumber);
- __ Ret(2 * kPointerSize);
+ __ jmp(&int_exponent);
__ bind(&exponent_not_smi);
__ ldr(scratch, FieldMemOperand(exponent, JSObject::kMapOffset));
__ cmp(scratch, heapnumbermap);
__ b(ne, &call_runtime);
- // Exponent is a heapnumber. Load it into double register.
__ vldr(double_exponent,
FieldMemOperand(exponent, HeapNumber::kValueOffset));
+ } else if (exponent_type_ == TAGGED) {
+ // Base is already in double_base.
+ __ JumpIfNotSmi(exponent, &exponent_not_smi);
+ __ SmiUntag(exponent);
+ __ jmp(&int_exponent);
- // The base and the exponent are in double registers.
- // Allocate a heap number and call a C function for
- // double exponents. The register containing
- // the heap number is callee-saved.
- __ AllocateHeapNumber(heapnumber,
- scratch,
- scratch2,
- heapnumbermap,
- &call_runtime);
+ __ bind(&exponent_not_smi);
+ __ vldr(double_exponent,
+ FieldMemOperand(exponent, HeapNumber::kValueOffset));
+ }
+
+ if (exponent_type_ != INTEGER) {
+ Label int_exponent_convert;
+ // Detect integer exponents stored as double.
+ __ vcvt_u32_f64(single_scratch, double_exponent);
+ // We do not check for NaN or Infinity here because comparing numbers on
+ // ARM correctly distinguishes NaNs. We end up calling the built-in.
+ __ vcvt_f64_u32(double_scratch, single_scratch);
+ __ VFPCompareAndSetFlags(double_scratch, double_exponent);
+ __ b(eq, &int_exponent_convert);
+
+ if (exponent_type_ == ON_STACK) {
+ // Detect square root case. Crankshaft detects constant +/-0.5 at
+ // compile time and uses DoMathPowHalf instead. We then skip this check
+ // for non-constant cases of +/-0.5 as these hardly occur.
+ Label not_plus_half;
+
+ // Test for 0.5.
+ __ vmov(double_scratch, 0.5);
+ __ VFPCompareAndSetFlags(double_exponent, double_scratch);
+ __ b(ne, ¬_plus_half);
+
+ // Calculates square root of base. Check for the special case of
+ // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
+ __ vmov(double_scratch, -V8_INFINITY);
+ __ VFPCompareAndSetFlags(double_base, double_scratch);
+ __ vneg(double_result, double_scratch, eq);
+ __ b(eq, &done);
+
+ // Add +0 to convert -0 to +0.
+ __ vadd(double_scratch, double_base, kDoubleRegZero);
+ __ vsqrt(double_result, double_scratch);
+ __ jmp(&done);
+
+ __ bind(¬_plus_half);
+ __ vmov(double_scratch, -0.5);
+ __ VFPCompareAndSetFlags(double_exponent, double_scratch);
+ __ b(ne, &call_runtime);
+
+ // Calculates square root of base. Check for the special case of
+ // Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
+ __ vmov(double_scratch, -V8_INFINITY);
+ __ VFPCompareAndSetFlags(double_base, double_scratch);
+ __ vmov(double_result, kDoubleRegZero, eq);
+ __ b(eq, &done);
+
+ // Add +0 to convert -0 to +0.
+ __ vadd(double_scratch, double_base, kDoubleRegZero);
+ __ vmov(double_result, 1);
+ __ vsqrt(double_scratch, double_scratch);
+ __ vdiv(double_result, double_result, double_scratch);
+ __ jmp(&done);
+ }
+
__ push(lr);
- __ PrepareCallCFunction(0, 2, scratch);
- __ SetCallCDoubleArguments(double_base, double_exponent);
{
AllowExternalCallThatCantCauseGC scope(masm);
+ __ PrepareCallCFunction(0, 2, scratch);
+ __ SetCallCDoubleArguments(double_base, double_exponent);
__ CallCFunction(
ExternalReference::power_double_double_function(masm->isolate()),
0, 2);
- __ pop(lr);
- __ GetCFunctionDoubleResult(double_result);
}
- __ vstr(double_result,
- FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
- __ mov(r0, heapnumber);
- __ Ret(2 * kPointerSize);
+ __ pop(lr);
+ __ GetCFunctionDoubleResult(double_result);
+ __ jmp(&done);
+
+ __ bind(&int_exponent_convert);
+ __ vcvt_u32_f64(single_scratch, double_exponent);
+ __ vmov(exponent, single_scratch);
}
- __ bind(&call_runtime);
- __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
+ // Calculate power with integer exponent.
+ __ bind(&int_exponent);
+
+ __ mov(scratch, exponent); // Back up exponent.
+ __ vmov(double_scratch, double_base); // Back up base.
+ __ vmov(double_result, 1.0);
+
+ // Get absolute value of exponent.
+ __ cmp(scratch, Operand(0));
+ __ mov(scratch2, Operand(0), LeaveCC, mi);
+ __ sub(scratch, scratch2, scratch, LeaveCC, mi);
+
+ Label while_true;
+ __ bind(&while_true);
+ __ mov(scratch, Operand(scratch, ASR, 1), SetCC);
+ __ vmul(double_result, double_result, double_scratch, cs);
+ __ vmul(double_scratch, double_scratch, double_scratch, ne);
+ __ b(ne, &while_true);
+
+ __ cmp(exponent, Operand(0));
+ __ b(ge, &done);
+ __ vmov(double_scratch, 1.0);
+ __ vdiv(double_result, double_scratch, double_result);
+ // Test whether result is zero. Bail out to check for subnormal result.
+ // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
+ __ VFPCompareAndSetFlags(double_result, 0.0);
+ __ b(ne, &done);
+ // double_exponent may not containe the exponent value if the input was a
+ // smi. We set it with exponent value before bailing out.
+ __ vmov(single_scratch, exponent);
+ __ vcvt_f64_s32(double_exponent, single_scratch);
+
+ // Returning or bailing out.
+ Counters* counters = masm->isolate()->counters();
+ if (exponent_type_ == ON_STACK) {
+ // The arguments are still on the stack.
+ __ bind(&call_runtime);
+ __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
+
+ // The stub is called from non-optimized code, which expects the result
+ // as heap number in exponent.
+ __ bind(&done);
+ __ AllocateHeapNumber(
+ heapnumber, scratch, scratch2, heapnumbermap, &call_runtime);
+ __ vstr(double_result,
+ FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
+ ASSERT(heapnumber.is(r0));
+ __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
+ __ Ret(2);
+ } else {
+ __ push(lr);
+ {
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ PrepareCallCFunction(0, 2, scratch);
+ __ SetCallCDoubleArguments(double_base, double_exponent);
+ __ CallCFunction(
+ ExternalReference::power_double_double_function(masm->isolate()),
+ 0, 2);
+ }
+ __ pop(lr);
+ __ GetCFunctionDoubleResult(double_result);
+
+ __ bind(&done);
+ __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
+ __ Ret();
+ }
}
@@ -3750,7 +3842,7 @@
FrameScope scope(masm, StackFrame::MANUAL);
__ EnterExitFrame(save_doubles_);
- // Setup argc and the builtin function in callee-saved registers.
+ // Set up argc and the builtin function in callee-saved registers.
__ mov(r4, Operand(r0));
__ mov(r5, Operand(r1));
@@ -3827,7 +3919,7 @@
// r2: receiver
// r3: argc
- // Setup argv in r4.
+ // Set up argv in r4.
int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize;
if (CpuFeatures::IsSupported(VFP3)) {
offset_to_argv += kNumDoubleCalleeSaved * kDoubleSize;
@@ -3850,7 +3942,7 @@
__ ldr(r5, MemOperand(r5));
__ Push(r8, r7, r6, r5);
- // Setup frame pointer for the frame to be pushed.
+ // Set up frame pointer for the frame to be pushed.
__ add(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
// If this is the outermost JS call, set js_entry_sp value.
@@ -4310,7 +4402,7 @@
__ str(r3, FieldMemOperand(r0, i));
}
- // Setup the callee in-object property.
+ // Set up the callee in-object property.
STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
__ ldr(r3, MemOperand(sp, 2 * kPointerSize));
const int kCalleeOffset = JSObject::kHeaderSize +
@@ -4323,7 +4415,7 @@
Heap::kArgumentsLengthIndex * kPointerSize;
__ str(r2, FieldMemOperand(r0, kLengthOffset));
- // Setup the elements pointer in the allocated arguments object.
+ // Set up the elements pointer in the allocated arguments object.
// If we allocated a parameter map, r4 will point there, otherwise
// it will point to the backing store.
__ add(r4, r0, Operand(Heap::kArgumentsObjectSize));
@@ -4418,7 +4510,7 @@
__ Ret();
// Do the runtime call to allocate the arguments object.
- // r2 = argument count (taggged)
+ // r2 = argument count (tagged)
__ bind(&runtime);
__ str(r2, MemOperand(sp, 0 * kPointerSize)); // Patch argument count.
__ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
@@ -4491,7 +4583,7 @@
// Get the parameters pointer from the stack.
__ ldr(r2, MemOperand(sp, 1 * kPointerSize));
- // Setup the elements pointer in the allocated arguments object and
+ // Set up the elements pointer in the allocated arguments object and
// initialize the header in the elements fixed array.
__ add(r4, r0, Operand(Heap::kArgumentsObjectSizeStrict));
__ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset));
@@ -4503,7 +4595,7 @@
// Copy the fixed array slots.
Label loop;
- // Setup r4 to point to the first array slot.
+ // Set up r4 to point to the first array slot.
__ add(r4, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ bind(&loop);
// Pre-decrement r2 with kPointerSize on each iteration.
@@ -5118,7 +5210,7 @@
// of the original receiver from the call site).
__ bind(&non_function);
__ str(r1, MemOperand(sp, argc_ * kPointerSize));
- __ mov(r0, Operand(argc_)); // Setup the number of arguments.
+ __ mov(r0, Operand(argc_)); // Set up the number of arguments.
__ mov(r2, Operand(0, RelocInfo::NONE));
__ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION);
__ SetCallKind(r5, CALL_AS_METHOD);
@@ -5639,7 +5731,11 @@
Register hash,
Register character) {
// hash = character + (character << 10);
- __ add(hash, character, Operand(character, LSL, 10));
+ __ LoadRoot(hash, Heap::kHashSeedRootIndex);
+ // Untag smi seed and add the character.
+ __ add(hash, character, Operand(hash, LSR, kSmiTagSize));
+ // hash += hash << 10;
+ __ add(hash, hash, Operand(hash, LSL, 10));
// hash ^= hash >> 6;
__ eor(hash, hash, Operand(hash, LSR, 6));
}
@@ -5664,13 +5760,12 @@
// hash ^= hash >> 11;
__ eor(hash, hash, Operand(hash, LSR, 11));
// hash += hash << 15;
- __ add(hash, hash, Operand(hash, LSL, 15), SetCC);
+ __ add(hash, hash, Operand(hash, LSL, 15));
- uint32_t kHashShiftCutOffMask = (1 << (32 - String::kHashShift)) - 1;
- __ and_(hash, hash, Operand(kHashShiftCutOffMask));
+ __ and_(hash, hash, Operand(String::kHashBitMask), SetCC);
// if (hash == 0) hash = 27;
- __ mov(hash, Operand(27), LeaveCC, eq);
+ __ mov(hash, Operand(StringHasher::kZeroHash), LeaveCC, eq);
}
@@ -5694,37 +5789,23 @@
static const int kFromOffset = 1 * kPointerSize;
static const int kStringOffset = 2 * kPointerSize;
- // Check bounds and smi-ness.
- Register to = r6;
- Register from = r7;
-
- __ Ldrd(to, from, MemOperand(sp, kToOffset));
+ __ Ldrd(r2, r3, MemOperand(sp, kToOffset));
STATIC_ASSERT(kFromOffset == kToOffset + 4);
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
// I.e., arithmetic shift right by one un-smi-tags.
- __ mov(r2, Operand(to, ASR, 1), SetCC);
- __ mov(r3, Operand(from, ASR, 1), SetCC, cc);
+ __ mov(r2, Operand(r2, ASR, 1), SetCC);
+ __ mov(r3, Operand(r3, ASR, 1), SetCC, cc);
// If either to or from had the smi tag bit set, then carry is set now.
__ b(cs, &runtime); // Either "from" or "to" is not a smi.
__ b(mi, &runtime); // From is negative.
- // Both to and from are smis.
+ // Both r2 and r3 are untagged integers.
__ sub(r2, r2, Operand(r3), SetCC);
__ b(mi, &runtime); // Fail if from > to.
- // Special handling of sub-strings of length 1 and 2. One character strings
- // are handled in the runtime system (looked up in the single character
- // cache). Two character strings are looked for in the symbol cache in
- // generated code.
- __ cmp(r2, Operand(2));
- __ b(lt, &runtime);
- // r2: result string length
- // r3: from index (untagged smi)
- // r6 (a.k.a. to): to (smi)
- // r7 (a.k.a. from): from offset (smi)
- // Make sure first argument is a sequential (or flat) string.
+ // Make sure first argument is a string.
__ ldr(r0, MemOperand(sp, kStringOffset));
STATIC_ASSERT(kSmiTag == 0);
__ JumpIfSmi(r0, &runtime);
@@ -5739,67 +5820,15 @@
__ cmp(r2, Operand(r4, ASR, 1));
__ b(eq, &return_r0);
- Label create_slice;
- if (FLAG_string_slices) {
- __ cmp(r2, Operand(SlicedString::kMinLength));
- __ b(ge, &create_slice);
- }
-
- // r0: original string
- // r1: instance type
- // r2: result string length
- // r3: from index (untagged smi)
- // r6 (a.k.a. to): to (smi)
- // r7 (a.k.a. from): from offset (smi)
- Label seq_string;
- __ and_(r4, r1, Operand(kStringRepresentationMask));
- STATIC_ASSERT(kSeqStringTag < kConsStringTag);
- STATIC_ASSERT(kConsStringTag < kExternalStringTag);
- STATIC_ASSERT(kConsStringTag < kSlicedStringTag);
- __ cmp(r4, Operand(kConsStringTag));
- __ b(gt, &runtime); // Slices and external strings go to runtime.
- __ b(lt, &seq_string); // Sequential strings are handled directly.
-
- // Cons string. Try to recurse (once) on the first substring.
- // (This adds a little more generality than necessary to handle flattened
- // cons strings, but not much).
- __ ldr(r0, FieldMemOperand(r0, ConsString::kFirstOffset));
- __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ ldrb(r1, FieldMemOperand(r4, Map::kInstanceTypeOffset));
- __ tst(r1, Operand(kStringRepresentationMask));
- STATIC_ASSERT(kSeqStringTag == 0);
- __ b(ne, &runtime); // Cons, slices and external strings go to runtime.
-
- // Definitly a sequential string.
- __ bind(&seq_string);
-
- // r0: original string
- // r1: instance type
- // r2: result string length
- // r3: from index (untagged smi)
- // r6 (a.k.a. to): to (smi)
- // r7 (a.k.a. from): from offset (smi)
- __ ldr(r4, FieldMemOperand(r0, String::kLengthOffset));
- __ cmp(r4, Operand(to));
- __ b(lt, &runtime); // Fail if to > length.
- to = no_reg;
-
- // r0: original string or left hand side of the original cons string.
- // r1: instance type
- // r2: result string length
- // r3: from index (untagged smi)
- // r7 (a.k.a. from): from offset (smi)
- // Check for flat ASCII string.
- Label non_ascii_flat;
- __ tst(r1, Operand(kStringEncodingMask));
- STATIC_ASSERT(kTwoByteStringTag == 0);
- __ b(eq, &non_ascii_flat);
-
Label result_longer_than_two;
+ // Check for special case of two character ASCII string, in which case
+ // we do a lookup in the symbol table first.
__ cmp(r2, Operand(2));
__ b(gt, &result_longer_than_two);
+ __ b(lt, &runtime);
- // Sub string of length 2 requested.
+ __ JumpIfInstanceTypeIsNotSequentialAscii(r1, r1, &runtime);
+
// Get the two characters forming the sub string.
__ add(r0, r0, Operand(r3));
__ ldrb(r3, FieldMemOperand(r0, SeqAsciiString::kHeaderSize));
@@ -5809,7 +5838,6 @@
Label make_two_character_string;
StringHelper::GenerateTwoCharacterSymbolTableProbe(
masm, r3, r4, r1, r5, r6, r7, r9, &make_two_character_string);
- Counters* counters = masm->isolate()->counters();
__ jmp(&return_r0);
// r2: result string length.
@@ -5820,18 +5848,114 @@
__ jmp(&return_r0);
__ bind(&result_longer_than_two);
+ // Deal with different string types: update the index if necessary
+ // and put the underlying string into r5.
+ // r0: original string
+ // r1: instance type
+ // r2: length
+ // r3: from index (untagged)
+ Label underlying_unpacked, sliced_string, seq_or_external_string;
+ // If the string is not indirect, it can only be sequential or external.
+ STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
+ STATIC_ASSERT(kIsIndirectStringMask != 0);
+ __ tst(r1, Operand(kIsIndirectStringMask));
+ __ b(eq, &seq_or_external_string);
- // Locate 'from' character of string.
- __ add(r5, r0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
- __ add(r5, r5, Operand(from, ASR, 1));
+ __ tst(r1, Operand(kSlicedNotConsMask));
+ __ b(ne, &sliced_string);
+ // Cons string. Check whether it is flat, then fetch first part.
+ __ ldr(r5, FieldMemOperand(r0, ConsString::kSecondOffset));
+ __ CompareRoot(r5, Heap::kEmptyStringRootIndex);
+ __ b(ne, &runtime);
+ __ ldr(r5, FieldMemOperand(r0, ConsString::kFirstOffset));
+ // Update instance type.
+ __ ldr(r1, FieldMemOperand(r5, HeapObject::kMapOffset));
+ __ ldrb(r1, FieldMemOperand(r1, Map::kInstanceTypeOffset));
+ __ jmp(&underlying_unpacked);
- // Allocate the result.
- __ AllocateAsciiString(r0, r2, r3, r4, r1, &runtime);
+ __ bind(&sliced_string);
+ // Sliced string. Fetch parent and correct start index by offset.
+ __ ldr(r5, FieldMemOperand(r0, SlicedString::kOffsetOffset));
+ __ add(r3, r3, Operand(r5, ASR, 1));
+ __ ldr(r5, FieldMemOperand(r0, SlicedString::kParentOffset));
+ // Update instance type.
+ __ ldr(r1, FieldMemOperand(r5, HeapObject::kMapOffset));
+ __ ldrb(r1, FieldMemOperand(r1, Map::kInstanceTypeOffset));
+ __ jmp(&underlying_unpacked);
- // r0: result string
- // r2: result string length
- // r5: first character of substring to copy
- // r7 (a.k.a. from): from offset (smi)
+ __ bind(&seq_or_external_string);
+ // Sequential or external string. Just move string to the expected register.
+ __ mov(r5, r0);
+
+ __ bind(&underlying_unpacked);
+
+ if (FLAG_string_slices) {
+ Label copy_routine;
+ // r5: underlying subject string
+ // r1: instance type of underlying subject string
+ // r2: length
+ // r3: adjusted start index (untagged)
+ __ cmp(r2, Operand(SlicedString::kMinLength));
+ // Short slice. Copy instead of slicing.
+ __ b(lt, ©_routine);
+ // Allocate new sliced string. At this point we do not reload the instance
+ // type including the string encoding because we simply rely on the info
+ // provided by the original string. It does not matter if the original
+ // string's encoding is wrong because we always have to recheck encoding of
+ // the newly created string's parent anyways due to externalized strings.
+ Label two_byte_slice, set_slice_header;
+ STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
+ STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
+ __ tst(r1, Operand(kStringEncodingMask));
+ __ b(eq, &two_byte_slice);
+ __ AllocateAsciiSlicedString(r0, r2, r6, r7, &runtime);
+ __ jmp(&set_slice_header);
+ __ bind(&two_byte_slice);
+ __ AllocateTwoByteSlicedString(r0, r2, r6, r7, &runtime);
+ __ bind(&set_slice_header);
+ __ mov(r3, Operand(r3, LSL, 1));
+ __ str(r3, FieldMemOperand(r0, SlicedString::kOffsetOffset));
+ __ str(r5, FieldMemOperand(r0, SlicedString::kParentOffset));
+ __ jmp(&return_r0);
+
+ __ bind(©_routine);
+ }
+
+ // r5: underlying subject string
+ // r1: instance type of underlying subject string
+ // r2: length
+ // r3: adjusted start index (untagged)
+ Label two_byte_sequential, sequential_string, allocate_result;
+ STATIC_ASSERT(kExternalStringTag != 0);
+ STATIC_ASSERT(kSeqStringTag == 0);
+ __ tst(r1, Operand(kExternalStringTag));
+ __ b(eq, &sequential_string);
+
+ // Handle external string.
+ // Rule out short external strings.
+ STATIC_CHECK(kShortExternalStringTag != 0);
+ __ tst(r1, Operand(kShortExternalStringTag));
+ __ b(ne, &runtime);
+ __ ldr(r5, FieldMemOperand(r5, ExternalString::kResourceDataOffset));
+ // r5 already points to the first character of underlying string.
+ __ jmp(&allocate_result);
+
+ __ bind(&sequential_string);
+ // Locate first character of underlying subject string.
+ STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
+ __ add(r5, r5, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+
+ __ bind(&allocate_result);
+ // Sequential acii string. Allocate the result.
+ STATIC_ASSERT((kAsciiStringTag & kStringEncodingMask) != 0);
+ __ tst(r1, Operand(kStringEncodingMask));
+ __ b(eq, &two_byte_sequential);
+
+ // Allocate and copy the resulting ASCII string.
+ __ AllocateAsciiString(r0, r2, r4, r6, r7, &runtime);
+
+ // Locate first character of substring to copy.
+ __ add(r5, r5, r3);
// Locate first character of result.
__ add(r1, r0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
@@ -5844,30 +5968,16 @@
COPY_ASCII | DEST_ALWAYS_ALIGNED);
__ jmp(&return_r0);
- __ bind(&non_ascii_flat);
- // r0: original string
- // r2: result string length
- // r7 (a.k.a. from): from offset (smi)
- // Check for flat two byte string.
+ // Allocate and copy the resulting two-byte string.
+ __ bind(&two_byte_sequential);
+ __ AllocateTwoByteString(r0, r2, r4, r6, r7, &runtime);
- // Locate 'from' character of string.
- __ add(r5, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
- // As "from" is a smi it is 2 times the value which matches the size of a two
- // byte character.
+ // Locate first character of substring to copy.
STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
- __ add(r5, r5, Operand(from));
-
- // Allocate the result.
- __ AllocateTwoByteString(r0, r2, r1, r3, r4, &runtime);
-
- // r0: result string
- // r2: result string length
- // r5: first character of substring to copy
+ __ add(r5, r5, Operand(r3, LSL, 1));
// Locate first character of result.
__ add(r1, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
- from = no_reg;
-
// r0: result string.
// r1: first character of result.
// r2: result length.
@@ -5875,69 +5985,9 @@
STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
StringHelper::GenerateCopyCharactersLong(
masm, r1, r5, r2, r3, r4, r6, r7, r9, DEST_ALWAYS_ALIGNED);
- __ jmp(&return_r0);
-
- if (FLAG_string_slices) {
- __ bind(&create_slice);
- // r0: original string
- // r1: instance type
- // r2: length
- // r3: from index (untagged smi)
- // r6 (a.k.a. to): to (smi)
- // r7 (a.k.a. from): from offset (smi)
- Label allocate_slice, sliced_string, seq_or_external_string;
- // If the string is not indirect, it can only be sequential or external.
- STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
- STATIC_ASSERT(kIsIndirectStringMask != 0);
- __ tst(r1, Operand(kIsIndirectStringMask));
- __ b(eq, &seq_or_external_string);
-
- __ tst(r1, Operand(kSlicedNotConsMask));
- __ b(ne, &sliced_string);
- // Cons string. Check whether it is flat, then fetch first part.
- __ ldr(r5, FieldMemOperand(r0, ConsString::kSecondOffset));
- __ LoadRoot(r9, Heap::kEmptyStringRootIndex);
- __ cmp(r5, r9);
- __ b(ne, &runtime);
- __ ldr(r5, FieldMemOperand(r0, ConsString::kFirstOffset));
- __ jmp(&allocate_slice);
-
- __ bind(&sliced_string);
- // Sliced string. Fetch parent and correct start index by offset.
- __ ldr(r5, FieldMemOperand(r0, SlicedString::kOffsetOffset));
- __ add(r7, r7, r5);
- __ ldr(r5, FieldMemOperand(r0, SlicedString::kParentOffset));
- __ jmp(&allocate_slice);
-
- __ bind(&seq_or_external_string);
- // Sequential or external string. Just move string to the correct register.
- __ mov(r5, r0);
-
- __ bind(&allocate_slice);
- // r1: instance type of original string
- // r2: length
- // r5: underlying subject string
- // r7 (a.k.a. from): from offset (smi)
- // Allocate new sliced string. At this point we do not reload the instance
- // type including the string encoding because we simply rely on the info
- // provided by the original string. It does not matter if the original
- // string's encoding is wrong because we always have to recheck encoding of
- // the newly created string's parent anyways due to externalized strings.
- Label two_byte_slice, set_slice_header;
- STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
- STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
- __ tst(r1, Operand(kStringEncodingMask));
- __ b(eq, &two_byte_slice);
- __ AllocateAsciiSlicedString(r0, r2, r3, r4, &runtime);
- __ jmp(&set_slice_header);
- __ bind(&two_byte_slice);
- __ AllocateTwoByteSlicedString(r0, r2, r3, r4, &runtime);
- __ bind(&set_slice_header);
- __ str(r7, FieldMemOperand(r0, SlicedString::kOffsetOffset));
- __ str(r5, FieldMemOperand(r0, SlicedString::kParentOffset));
- }
__ bind(&return_r0);
+ Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->sub_string_native(), 1, r3, r4);
__ add(sp, sp, Operand(3 * kPointerSize));
__ Ret();
@@ -6094,7 +6144,7 @@
void StringAddStub::Generate(MacroAssembler* masm) {
- Label string_add_runtime, call_builtin;
+ Label call_runtime, call_builtin;
Builtins::JavaScript builtin_id = Builtins::ADD;
Counters* counters = masm->isolate()->counters();
@@ -6109,7 +6159,7 @@
// Make sure that both arguments are strings if not known in advance.
if (flags_ == NO_STRING_ADD_FLAGS) {
- __ JumpIfEitherSmi(r0, r1, &string_add_runtime);
+ __ JumpIfEitherSmi(r0, r1, &call_runtime);
// Load instance types.
__ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
__ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
@@ -6119,7 +6169,7 @@
// If either is not a string, go to runtime.
__ tst(r4, Operand(kIsNotStringMask));
__ tst(r5, Operand(kIsNotStringMask), eq);
- __ b(ne, &string_add_runtime);
+ __ b(ne, &call_runtime);
} else {
// Here at least one of the arguments is definitely a string.
// We convert the one that is not known to be a string.
@@ -6188,7 +6238,7 @@
__ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
}
__ JumpIfBothInstanceTypesAreNotSequentialAscii(r4, r5, r6, r7,
- &string_add_runtime);
+ &call_runtime);
// Get the two characters forming the sub string.
__ ldrb(r2, FieldMemOperand(r0, SeqAsciiString::kHeaderSize));
@@ -6210,7 +6260,7 @@
// halfword store instruction (which assumes that processor is
// in a little endian mode)
__ mov(r6, Operand(2));
- __ AllocateAsciiString(r0, r6, r4, r5, r9, &string_add_runtime);
+ __ AllocateAsciiString(r0, r6, r4, r5, r9, &call_runtime);
__ strh(r2, FieldMemOperand(r0, SeqAsciiString::kHeaderSize));
__ IncrementCounter(counters->string_add_native(), 1, r2, r3);
__ add(sp, sp, Operand(2 * kPointerSize));
@@ -6218,14 +6268,14 @@
__ bind(&longer_than_two);
// Check if resulting string will be flat.
- __ cmp(r6, Operand(String::kMinNonFlatLength));
+ __ cmp(r6, Operand(ConsString::kMinLength));
__ b(lt, &string_add_flat_result);
// Handle exceptionally long strings in the runtime system.
STATIC_ASSERT((String::kMaxLength & 0x80000000) == 0);
ASSERT(IsPowerOf2(String::kMaxLength + 1));
// kMaxLength + 1 is representable as shifted literal, kMaxLength is not.
__ cmp(r6, Operand(String::kMaxLength + 1));
- __ b(hs, &string_add_runtime);
+ __ b(hs, &call_runtime);
// If result is not supposed to be flat, allocate a cons string object.
// If both strings are ASCII the result is an ASCII cons string.
@@ -6243,7 +6293,7 @@
// Allocate an ASCII cons string.
__ bind(&ascii_data);
- __ AllocateAsciiConsString(r7, r6, r4, r5, &string_add_runtime);
+ __ AllocateAsciiConsString(r7, r6, r4, r5, &call_runtime);
__ bind(&allocated);
// Fill the fields of the cons string.
__ str(r0, FieldMemOperand(r7, ConsString::kFirstOffset));
@@ -6268,11 +6318,13 @@
__ b(eq, &ascii_data);
// Allocate a two byte cons string.
- __ AllocateTwoByteConsString(r7, r6, r4, r5, &string_add_runtime);
+ __ AllocateTwoByteConsString(r7, r6, r4, r5, &call_runtime);
__ jmp(&allocated);
- // Handle creating a flat result. First check that both strings are
- // sequential and that they have the same encoding.
+ // We cannot encounter sliced strings or cons strings here since:
+ STATIC_ASSERT(SlicedString::kMinLength >= ConsString::kMinLength);
+ // Handle creating a flat result from either external or sequential strings.
+ // Locate the first characters' locations.
// r0: first string
// r1: second string
// r2: length of first string
@@ -6280,6 +6332,7 @@
// r4: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
// r5: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
// r6: sum of lengths.
+ Label first_prepared, second_prepared;
__ bind(&string_add_flat_result);
if (flags_ != NO_STRING_ADD_FLAGS) {
__ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
@@ -6287,97 +6340,88 @@
__ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
__ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
}
- // Check that both strings are sequential.
- STATIC_ASSERT(kSeqStringTag == 0);
- __ tst(r4, Operand(kStringRepresentationMask));
- __ tst(r5, Operand(kStringRepresentationMask), eq);
- __ b(ne, &string_add_runtime);
- // Now check if both strings have the same encoding (ASCII/Two-byte).
- // r0: first string.
- // r1: second string.
- // r2: length of first string.
- // r3: length of second string.
- // r6: sum of lengths..
- Label non_ascii_string_add_flat_result;
- ASSERT(IsPowerOf2(kStringEncodingMask)); // Just one bit to test.
+
+ // Check whether both strings have same encoding
__ eor(r7, r4, Operand(r5));
__ tst(r7, Operand(kStringEncodingMask));
- __ b(ne, &string_add_runtime);
- // And see if it's ASCII or two-byte.
- __ tst(r4, Operand(kStringEncodingMask));
+ __ b(ne, &call_runtime);
+
+ STATIC_ASSERT(kSeqStringTag == 0);
+ __ tst(r4, Operand(kStringRepresentationMask));
+ STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
+ __ add(r7,
+ r0,
+ Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag),
+ LeaveCC,
+ eq);
+ __ b(eq, &first_prepared);
+ // External string: rule out short external string and load string resource.
+ STATIC_ASSERT(kShortExternalStringTag != 0);
+ __ tst(r4, Operand(kShortExternalStringMask));
+ __ b(ne, &call_runtime);
+ __ ldr(r7, FieldMemOperand(r0, ExternalString::kResourceDataOffset));
+ __ bind(&first_prepared);
+
+ STATIC_ASSERT(kSeqStringTag == 0);
+ __ tst(r5, Operand(kStringRepresentationMask));
+ STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
+ __ add(r1,
+ r1,
+ Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag),
+ LeaveCC,
+ eq);
+ __ b(eq, &second_prepared);
+ // External string: rule out short external string and load string resource.
+ STATIC_ASSERT(kShortExternalStringTag != 0);
+ __ tst(r5, Operand(kShortExternalStringMask));
+ __ b(ne, &call_runtime);
+ __ ldr(r1, FieldMemOperand(r1, ExternalString::kResourceDataOffset));
+ __ bind(&second_prepared);
+
+ Label non_ascii_string_add_flat_result;
+ // r7: first character of first string
+ // r1: first character of second string
+ // r2: length of first string.
+ // r3: length of second string.
+ // r6: sum of lengths.
+ // Both strings have the same encoding.
+ STATIC_ASSERT(kTwoByteStringTag == 0);
+ __ tst(r5, Operand(kStringEncodingMask));
__ b(eq, &non_ascii_string_add_flat_result);
- // Both strings are sequential ASCII strings. We also know that they are
- // short (since the sum of the lengths is less than kMinNonFlatLength).
- // r6: length of resulting flat string
- __ AllocateAsciiString(r7, r6, r4, r5, r9, &string_add_runtime);
- // Locate first character of result.
- __ add(r6, r7, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
- // Locate first character of first argument.
- __ add(r0, r0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
- // r0: first character of first string.
- // r1: second string.
+ __ AllocateAsciiString(r0, r6, r4, r5, r9, &call_runtime);
+ __ add(r6, r0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ // r0: result string.
+ // r7: first character of first string.
+ // r1: first character of second string.
// r2: length of first string.
// r3: length of second string.
// r6: first character of result.
- // r7: result string.
- StringHelper::GenerateCopyCharacters(masm, r6, r0, r2, r4, true);
-
- // Load second argument and locate first character.
- __ add(r1, r1, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
- // r1: first character of second string.
- // r3: length of second string.
+ StringHelper::GenerateCopyCharacters(masm, r6, r7, r2, r4, true);
// r6: next character of result.
- // r7: result string.
StringHelper::GenerateCopyCharacters(masm, r6, r1, r3, r4, true);
- __ mov(r0, Operand(r7));
__ IncrementCounter(counters->string_add_native(), 1, r2, r3);
__ add(sp, sp, Operand(2 * kPointerSize));
__ Ret();
__ bind(&non_ascii_string_add_flat_result);
- // Both strings are sequential two byte strings.
- // r0: first string.
- // r1: second string.
- // r2: length of first string.
- // r3: length of second string.
- // r6: sum of length of strings.
- __ AllocateTwoByteString(r7, r6, r4, r5, r9, &string_add_runtime);
- // r0: first string.
- // r1: second string.
- // r2: length of first string.
- // r3: length of second string.
- // r7: result string.
-
- // Locate first character of result.
- __ add(r6, r7, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
- // Locate first character of first argument.
- __ add(r0, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
-
- // r0: first character of first string.
- // r1: second string.
+ __ AllocateTwoByteString(r0, r6, r4, r5, r9, &call_runtime);
+ __ add(r6, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+ // r0: result string.
+ // r7: first character of first string.
+ // r1: first character of second string.
// r2: length of first string.
// r3: length of second string.
// r6: first character of result.
- // r7: result string.
- StringHelper::GenerateCopyCharacters(masm, r6, r0, r2, r4, false);
-
- // Locate first character of second argument.
- __ add(r1, r1, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
-
- // r1: first character of second string.
- // r3: length of second string.
- // r6: next character of result (after copy of first string).
- // r7: result string.
+ StringHelper::GenerateCopyCharacters(masm, r6, r7, r2, r4, false);
+ // r6: next character of result.
StringHelper::GenerateCopyCharacters(masm, r6, r1, r3, r4, false);
-
- __ mov(r0, Operand(r7));
__ IncrementCounter(counters->string_add_native(), 1, r2, r3);
__ add(sp, sp, Operand(2 * kPointerSize));
__ Ret();
// Just jump to runtime to add the two strings.
- __ bind(&string_add_runtime);
+ __ bind(&call_runtime);
__ TailCallRuntime(Runtime::kStringAdd, 2, 1);
if (call_builtin.is_linked()) {
@@ -6629,26 +6673,47 @@
}
-void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
- __ Push(r1, r0);
- __ push(lr);
+void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
+ Label miss;
+ __ and_(r2, r1, Operand(r0));
+ __ JumpIfSmi(r2, &miss);
+ __ ldr(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
+ __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
+ __ cmp(r2, Operand(known_map_));
+ __ b(ne, &miss);
+ __ cmp(r3, Operand(known_map_));
+ __ b(ne, &miss);
- // Call the runtime system in a fresh internal frame.
- ExternalReference miss =
- ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate());
+ __ sub(r0, r0, Operand(r1));
+ __ Ret();
+
+ __ bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+
+void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
{
+ // Call the runtime system in a fresh internal frame.
+ ExternalReference miss =
+ ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate());
+
FrameScope scope(masm, StackFrame::INTERNAL);
__ Push(r1, r0);
+ __ push(lr);
+ __ Push(r1, r0);
__ mov(ip, Operand(Smi::FromInt(op_)));
__ push(ip);
__ CallExternalReference(miss, 3);
+ // Compute the entry point of the rewritten stub.
+ __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
+ // Restore registers.
+ __ pop(lr);
+ __ pop(r0);
+ __ pop(r1);
}
- // Compute the entry point of the rewritten stub.
- __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
- // Restore registers.
- __ pop(lr);
- __ pop(r0);
- __ pop(r1);
+
__ Jump(r2);
}
diff --git a/src/arm/constants-arm.h b/src/arm/constants-arm.h
index 49b8db7..e767001 100644
--- a/src/arm/constants-arm.h
+++ b/src/arm/constants-arm.h
@@ -573,13 +573,13 @@
// The naming of these accessor corresponds to figure A3-1.
//
// Two kind of accessors are declared:
- // - <Name>Field() will return the raw field, ie the field's bits at their
+ // - <Name>Field() will return the raw field, i.e. the field's bits at their
// original place in the instruction encoding.
- // eg. if instr is the 'addgt r0, r1, r2' instruction, encoded as 0xC0810002
- // ConditionField(instr) will return 0xC0000000.
+ // e.g. if instr is the 'addgt r0, r1, r2' instruction, encoded as
+ // 0xC0810002 ConditionField(instr) will return 0xC0000000.
// - <Name>Value() will return the field value, shifted back to bit 0.
- // eg. if instr is the 'addgt r0, r1, r2' instruction, encoded as 0xC0810002
- // ConditionField(instr) will return 0xC.
+ // e.g. if instr is the 'addgt r0, r1, r2' instruction, encoded as
+ // 0xC0810002 ConditionField(instr) will return 0xC.
// Generally applicable fields
diff --git a/src/arm/cpu-arm.cc b/src/arm/cpu-arm.cc
index 51cfeb6..7b08ed8 100644
--- a/src/arm/cpu-arm.cc
+++ b/src/arm/cpu-arm.cc
@@ -41,7 +41,7 @@
namespace v8 {
namespace internal {
-void CPU::Setup() {
+void CPU::SetUp() {
CpuFeatures::Probe();
}
diff --git a/src/arm/deoptimizer-arm.cc b/src/arm/deoptimizer-arm.cc
index 4b54b6d..76d8954 100644
--- a/src/arm/deoptimizer-arm.cc
+++ b/src/arm/deoptimizer-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -211,12 +211,13 @@
ASSERT(Translation::BEGIN == opcode);
USE(opcode);
int count = iterator.Next();
+ iterator.Skip(1); // Drop JS frame count.
ASSERT(count == 1);
USE(count);
opcode = static_cast<Translation::Opcode>(iterator.Next());
USE(opcode);
- ASSERT(Translation::FRAME == opcode);
+ ASSERT(Translation::JS_FRAME == opcode);
unsigned node_id = iterator.Next();
USE(node_id);
ASSERT(node_id == ast_id);
@@ -252,9 +253,7 @@
output_ = new FrameDescription*[1];
output_[0] = new(output_frame_size) FrameDescription(
output_frame_size, function_);
-#ifdef DEBUG
- output_[0]->SetKind(Code::OPTIMIZED_FUNCTION);
-#endif
+ output_[0]->SetFrameType(StackFrame::JAVA_SCRIPT);
// Clear the incoming parameters in the optimized frame to avoid
// confusing the garbage collector.
@@ -319,7 +318,7 @@
output_[0] = input_;
output_[0]->SetPc(reinterpret_cast<uint32_t>(from_));
} else {
- // Setup the frame pointer and the context pointer.
+ // Set up the frame pointer and the context pointer.
output_[0]->SetRegister(fp.code(), input_->GetRegister(fp.code()));
output_[0]->SetRegister(cp.code(), input_->GetRegister(cp.code()));
@@ -342,15 +341,115 @@
}
+void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
+ int frame_index) {
+ JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
+ unsigned height = iterator->Next();
+ unsigned height_in_bytes = height * kPointerSize;
+ if (FLAG_trace_deopt) {
+ PrintF(" translating arguments adaptor => height=%d\n", height_in_bytes);
+ }
+
+ unsigned fixed_frame_size = ArgumentsAdaptorFrameConstants::kFrameSize;
+ unsigned input_frame_size = input_->GetFrameSize();
+ unsigned output_frame_size = height_in_bytes + fixed_frame_size;
+
+ // Allocate and store the output frame description.
+ FrameDescription* output_frame =
+ new(output_frame_size) FrameDescription(output_frame_size, function);
+ output_frame->SetFrameType(StackFrame::ARGUMENTS_ADAPTOR);
+
+ // Arguments adaptor can not be topmost or bottommost.
+ ASSERT(frame_index > 0 && frame_index < output_count_ - 1);
+ ASSERT(output_[frame_index] == NULL);
+ output_[frame_index] = output_frame;
+
+ // The top address of the frame is computed from the previous
+ // frame's top and this frame's size.
+ uint32_t top_address;
+ top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
+ output_frame->SetTop(top_address);
+
+ // Compute the incoming parameter translation.
+ int parameter_count = height;
+ unsigned output_offset = output_frame_size;
+ unsigned input_offset = input_frame_size;
+ for (int i = 0; i < parameter_count; ++i) {
+ output_offset -= kPointerSize;
+ DoTranslateCommand(iterator, frame_index, output_offset);
+ }
+ input_offset -= (parameter_count * kPointerSize);
+
+ // Read caller's PC from the previous frame.
+ output_offset -= kPointerSize;
+ input_offset -= kPointerSize;
+ intptr_t callers_pc = output_[frame_index - 1]->GetPc();
+ output_frame->SetFrameSlot(output_offset, callers_pc);
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's pc\n",
+ top_address + output_offset, output_offset, callers_pc);
+ }
+
+ // Read caller's FP from the previous frame, and set this frame's FP.
+ output_offset -= kPointerSize;
+ input_offset -= kPointerSize;
+ intptr_t value = output_[frame_index - 1]->GetFp();
+ output_frame->SetFrameSlot(output_offset, value);
+ intptr_t fp_value = top_address + output_offset;
+ output_frame->SetFp(fp_value);
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's fp\n",
+ fp_value, output_offset, value);
+ }
+
+ // A marker value is used in place of the context.
+ output_offset -= kPointerSize;
+ input_offset -= kPointerSize;
+ intptr_t context = reinterpret_cast<intptr_t>(
+ Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ output_frame->SetFrameSlot(output_offset, context);
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08x: [top + %d] <- 0x%08x ; context (adaptor sentinel)\n",
+ top_address + output_offset, output_offset, context);
+ }
+
+ // The function was mentioned explicitly in the ARGUMENTS_ADAPTOR_FRAME.
+ output_offset -= kPointerSize;
+ input_offset -= kPointerSize;
+ value = reinterpret_cast<intptr_t>(function);
+ output_frame->SetFrameSlot(output_offset, value);
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08x: [top + %d] <- 0x%08x ; function\n",
+ top_address + output_offset, output_offset, value);
+ }
+
+ // Number of incoming arguments.
+ output_offset -= kPointerSize;
+ input_offset -= kPointerSize;
+ value = reinterpret_cast<uint32_t>(Smi::FromInt(height - 1));
+ output_frame->SetFrameSlot(output_offset, value);
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08x: [top + %d] <- 0x%08x ; argc (%d)\n",
+ top_address + output_offset, output_offset, value, height - 1);
+ }
+
+ ASSERT(0 == output_offset);
+
+ Builtins* builtins = isolate_->builtins();
+ Code* adaptor_trampoline =
+ builtins->builtin(Builtins::kArgumentsAdaptorTrampoline);
+ uint32_t pc = reinterpret_cast<uint32_t>(
+ adaptor_trampoline->instruction_start() +
+ isolate_->heap()->arguments_adaptor_deopt_pc_offset()->value());
+ output_frame->SetPc(pc);
+}
+
+
// This code is very similar to ia32 code, but relies on register names (fp, sp)
// and how the frame is laid out.
-void Deoptimizer::DoComputeFrame(TranslationIterator* iterator,
- int frame_index) {
+void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
+ int frame_index) {
// Read the ast node id, function, and frame height for this output frame.
- Translation::Opcode opcode =
- static_cast<Translation::Opcode>(iterator->Next());
- USE(opcode);
- ASSERT(Translation::FRAME == opcode);
int node_id = iterator->Next();
JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
unsigned height = iterator->Next();
@@ -370,9 +469,7 @@
// Allocate and store the output frame description.
FrameDescription* output_frame =
new(output_frame_size) FrameDescription(output_frame_size, function);
-#ifdef DEBUG
- output_frame->SetKind(Code::FUNCTION);
-#endif
+ output_frame->SetFrameType(StackFrame::JAVA_SCRIPT);
bool is_bottommost = (0 == frame_index);
bool is_topmost = (output_count_ - 1 == frame_index);
@@ -723,10 +820,7 @@
__ pop(ip); // remove sp
__ pop(ip); // remove lr
- // Set up the roots register.
- ExternalReference roots_array_start =
- ExternalReference::roots_array_start(isolate);
- __ mov(r10, Operand(roots_array_start));
+ __ InitializeRootRegister();
__ pop(ip); // remove pc
__ pop(r7); // get continuation, leave pc on stack
diff --git a/src/arm/disasm-arm.cc b/src/arm/disasm-arm.cc
index 603b3cf..96a7d3c 100644
--- a/src/arm/disasm-arm.cc
+++ b/src/arm/disasm-arm.cc
@@ -473,7 +473,7 @@
return 1;
}
case 'i': { // 'i: immediate value from adjacent bits.
- // Expects tokens in the form imm%02d@%02d, ie. imm05@07, imm10@16
+ // Expects tokens in the form imm%02d@%02d, i.e. imm05@07, imm10@16
int width = (format[3] - '0') * 10 + (format[4] - '0');
int lsb = (format[6] - '0') * 10 + (format[7] - '0');
@@ -662,6 +662,15 @@
}
+// The disassembler may end up decoding data inlined in the code. We do not want
+// it to crash if the data does not ressemble any known instruction.
+#define VERIFY(condition) \
+if(!(condition)) { \
+ Unknown(instr); \
+ return; \
+}
+
+
// For currently unimplemented decodings the disassembler calls Unknown(instr)
// which will just print "unknown" of the instruction bits.
void Decoder::Unknown(Instruction* instr) {
@@ -947,13 +956,13 @@
void Decoder::DecodeType3(Instruction* instr) {
switch (instr->PUField()) {
case da_x: {
- ASSERT(!instr->HasW());
+ VERIFY(!instr->HasW());
Format(instr, "'memop'cond'b 'rd, ['rn], -'shift_rm");
break;
}
case ia_x: {
if (instr->HasW()) {
- ASSERT(instr->Bits(5, 4) == 0x1);
+ VERIFY(instr->Bits(5, 4) == 0x1);
if (instr->Bit(22) == 0x1) {
Format(instr, "usat 'rd, #'imm05@16, 'rm'shift_sat");
} else {
@@ -1074,8 +1083,8 @@
// vmsr
// Dd = vsqrt(Dm)
void Decoder::DecodeTypeVFP(Instruction* instr) {
- ASSERT((instr->TypeValue() == 7) && (instr->Bit(24) == 0x0) );
- ASSERT(instr->Bits(11, 9) == 0x5);
+ VERIFY((instr->TypeValue() == 7) && (instr->Bit(24) == 0x0) );
+ VERIFY(instr->Bits(11, 9) == 0x5);
if (instr->Bit(4) == 0) {
if (instr->Opc1Value() == 0x7) {
@@ -1166,7 +1175,7 @@
void Decoder::DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(
Instruction* instr) {
- ASSERT((instr->Bit(4) == 1) && (instr->VCValue() == 0x0) &&
+ VERIFY((instr->Bit(4) == 1) && (instr->VCValue() == 0x0) &&
(instr->VAValue() == 0x0));
bool to_arm_register = (instr->VLValue() == 0x1);
@@ -1180,8 +1189,8 @@
void Decoder::DecodeVCMP(Instruction* instr) {
- ASSERT((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7));
- ASSERT(((instr->Opc2Value() == 0x4) || (instr->Opc2Value() == 0x5)) &&
+ VERIFY((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7));
+ VERIFY(((instr->Opc2Value() == 0x4) || (instr->Opc2Value() == 0x5)) &&
(instr->Opc3Value() & 0x1));
// Comparison.
@@ -1203,8 +1212,8 @@
void Decoder::DecodeVCVTBetweenDoubleAndSingle(Instruction* instr) {
- ASSERT((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7));
- ASSERT((instr->Opc2Value() == 0x7) && (instr->Opc3Value() == 0x3));
+ VERIFY((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7));
+ VERIFY((instr->Opc2Value() == 0x7) && (instr->Opc3Value() == 0x3));
bool double_to_single = (instr->SzValue() == 1);
@@ -1217,8 +1226,8 @@
void Decoder::DecodeVCVTBetweenFloatingPointAndInteger(Instruction* instr) {
- ASSERT((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7));
- ASSERT(((instr->Opc2Value() == 0x8) && (instr->Opc3Value() & 0x1)) ||
+ VERIFY((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7));
+ VERIFY(((instr->Opc2Value() == 0x8) && (instr->Opc3Value() & 0x1)) ||
(((instr->Opc2Value() >> 1) == 0x6) && (instr->Opc3Value() & 0x1)));
bool to_integer = (instr->Bit(18) == 1);
@@ -1265,7 +1274,7 @@
// Ddst = MEM(Rbase + 4*offset).
// MEM(Rbase + 4*offset) = Dsrc.
void Decoder::DecodeType6CoprocessorIns(Instruction* instr) {
- ASSERT(instr->TypeValue() == 6);
+ VERIFY(instr->TypeValue() == 6);
if (instr->CoprocessorValue() == 0xA) {
switch (instr->OpcodeValue()) {
@@ -1347,6 +1356,7 @@
}
}
+#undef VERIFIY
bool Decoder::IsConstantPoolAt(byte* instr_ptr) {
int instruction_bits = *(reinterpret_cast<int*>(instr_ptr));
diff --git a/src/arm/frames-arm.h b/src/arm/frames-arm.h
index 1844149..a10acd0 100644
--- a/src/arm/frames-arm.h
+++ b/src/arm/frames-arm.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -136,6 +136,9 @@
class StandardFrameConstants : public AllStatic {
public:
+ // Fixed part of the frame consists of return address, caller fp,
+ // context and function.
+ static const int kFixedFrameSize = 4 * kPointerSize;
static const int kExpressionsOffset = -3 * kPointerSize;
static const int kMarkerOffset = -2 * kPointerSize;
static const int kContextOffset = -1 * kPointerSize;
@@ -161,6 +164,8 @@
class ArgumentsAdaptorFrameConstants : public AllStatic {
public:
static const int kLengthOffset = StandardFrameConstants::kExpressionsOffset;
+ static const int kFrameSize =
+ StandardFrameConstants::kFixedFrameSize + kPointerSize;
};
diff --git a/src/arm/full-codegen-arm.cc b/src/arm/full-codegen-arm.cc
index fdd3266..6654263 100644
--- a/src/arm/full-codegen-arm.cc
+++ b/src/arm/full-codegen-arm.cc
@@ -115,7 +115,7 @@
// function.
//
// The live registers are:
-// o r1: the JS function object being called (ie, ourselves)
+// o r1: the JS function object being called (i.e., ourselves)
// o cp: our context
// o fp: our caller's frame pointer
// o sp: stack pointer
@@ -1009,7 +1009,7 @@
__ ldr(r1, FieldMemOperand(r1, DescriptorArray::kEnumerationIndexOffset));
__ ldr(r2, FieldMemOperand(r1, DescriptorArray::kEnumCacheBridgeCacheOffset));
- // Setup the four remaining stack slots.
+ // Set up the four remaining stack slots.
__ push(r0); // Map.
__ ldr(r1, FieldMemOperand(r2, FixedArray::kLengthOffset));
__ mov(r0, Operand(Smi::FromInt(0)));
@@ -2938,8 +2938,12 @@
ASSERT(args->length() == 2);
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
- MathPowStub stub;
- __ CallStub(&stub);
+ if (CpuFeatures::IsSupported(VFP3)) {
+ MathPowStub stub(MathPowStub::ON_STACK);
+ __ CallStub(&stub);
+ } else {
+ __ CallRuntime(Runtime::kMath_pow, 2);
+ }
context()->Plug(r0);
}
@@ -3614,7 +3618,7 @@
// One-character separator case
__ bind(&one_char_separator);
- // Replace separator with its ascii character value.
+ // Replace separator with its ASCII character value.
__ ldrb(separator, FieldMemOperand(separator, SeqAsciiString::kHeaderSize));
// Jump into the loop after the code that copies the separator, so the first
// element is not preceded by a separator
@@ -3625,7 +3629,7 @@
// result_pos: the position to which we are currently copying characters.
// element: Current array element.
// elements_end: Array end.
- // separator: Single separator ascii char (in lower byte).
+ // separator: Single separator ASCII char (in lower byte).
// Copy the separator character to the result.
__ strb(separator, MemOperand(result_pos, 1, PostIndex));
diff --git a/src/arm/ic-arm.cc b/src/arm/ic-arm.cc
index f8e4bbb..dfd4d2e 100644
--- a/src/arm/ic-arm.cc
+++ b/src/arm/ic-arm.cc
@@ -1031,15 +1031,34 @@
__ mov(r3, Operand(r2, ASR, KeyedLookupCache::kMapHashShift));
__ ldr(r4, FieldMemOperand(r0, String::kHashFieldOffset));
__ eor(r3, r3, Operand(r4, ASR, String::kHashShift));
- __ And(r3, r3, Operand(KeyedLookupCache::kCapacityMask));
+ int mask = KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask;
+ __ And(r3, r3, Operand(mask));
// Load the key (consisting of map and symbol) from the cache and
// check for match.
+ Label load_in_object_property;
+ static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket;
+ Label hit_on_nth_entry[kEntriesPerBucket];
ExternalReference cache_keys =
ExternalReference::keyed_lookup_cache_keys(isolate);
+
__ mov(r4, Operand(cache_keys));
__ add(r4, r4, Operand(r3, LSL, kPointerSizeLog2 + 1));
- __ ldr(r5, MemOperand(r4, kPointerSize, PostIndex)); // Move r4 to symbol.
+
+ for (int i = 0; i < kEntriesPerBucket - 1; i++) {
+ Label try_next_entry;
+ // Load map and move r4 to next entry.
+ __ ldr(r5, MemOperand(r4, kPointerSize * 2, PostIndex));
+ __ cmp(r2, r5);
+ __ b(ne, &try_next_entry);
+ __ ldr(r5, MemOperand(r4, -kPointerSize)); // Load symbol
+ __ cmp(r0, r5);
+ __ b(eq, &hit_on_nth_entry[i]);
+ __ bind(&try_next_entry);
+ }
+
+ // Last entry: Load map and move r4 to symbol.
+ __ ldr(r5, MemOperand(r4, kPointerSize, PostIndex));
__ cmp(r2, r5);
__ b(ne, &slow);
__ ldr(r5, MemOperand(r4));
@@ -1053,13 +1072,25 @@
// r3 : lookup cache index
ExternalReference cache_field_offsets =
ExternalReference::keyed_lookup_cache_field_offsets(isolate);
- __ mov(r4, Operand(cache_field_offsets));
- __ ldr(r5, MemOperand(r4, r3, LSL, kPointerSizeLog2));
- __ ldrb(r6, FieldMemOperand(r2, Map::kInObjectPropertiesOffset));
- __ sub(r5, r5, r6, SetCC);
- __ b(ge, &property_array_property);
+
+ // Hit on nth entry.
+ for (int i = kEntriesPerBucket - 1; i >= 0; i--) {
+ __ bind(&hit_on_nth_entry[i]);
+ __ mov(r4, Operand(cache_field_offsets));
+ if (i != 0) {
+ __ add(r3, r3, Operand(i));
+ }
+ __ ldr(r5, MemOperand(r4, r3, LSL, kPointerSizeLog2));
+ __ ldrb(r6, FieldMemOperand(r2, Map::kInObjectPropertiesOffset));
+ __ sub(r5, r5, r6, SetCC);
+ __ b(ge, &property_array_property);
+ if (i != 0) {
+ __ jmp(&load_in_object_property);
+ }
+ }
// Load in-object property.
+ __ bind(&load_in_object_property);
__ ldrb(r6, FieldMemOperand(r2, Map::kInstanceSizeOffset));
__ add(r6, r6, r5); // Index from start of object.
__ sub(r1, r1, Operand(kHeapObjectTag)); // Remove the heap tag.
@@ -1469,11 +1500,10 @@
// -- lr : return address
// -----------------------------------
//
- // This accepts as a receiver anything JSObject::SetElementsLength accepts
- // (currently anything except for external and pixel arrays which means
- // anything with elements of FixedArray type.), but currently is restricted
- // to JSArray.
- // Value must be a number, but only smis are accepted as the most common case.
+ // This accepts as a receiver anything JSArray::SetElementsLength accepts
+ // (currently anything except for external arrays which means anything with
+ // elements of FixedArray type). Value must be a number, but only smis are
+ // accepted as the most common case.
Label miss;
@@ -1495,6 +1525,13 @@
__ CompareObjectType(scratch, scratch, scratch, FIXED_ARRAY_TYPE);
__ b(ne, &miss);
+ // Check that the array has fast properties, otherwise the length
+ // property might have been redefined.
+ __ ldr(scratch, FieldMemOperand(receiver, JSArray::kPropertiesOffset));
+ __ ldr(scratch, FieldMemOperand(scratch, FixedArray::kMapOffset));
+ __ CompareRoot(scratch, Heap::kHashTableMapRootIndex);
+ __ b(eq, &miss);
+
// Check that value is a smi.
__ JumpIfNotSmi(value, &miss);
@@ -1587,6 +1624,9 @@
rewritten = stub.GetCode();
} else {
ICCompareStub stub(op_, state);
+ if (state == KNOWN_OBJECTS) {
+ stub.set_known_map(Handle<Map>(Handle<JSObject>::cast(x)->map()));
+ }
rewritten = stub.GetCode();
}
set_target(*rewritten);
diff --git a/src/arm/lithium-arm.cc b/src/arm/lithium-arm.cc
index 2341774..846680f 100644
--- a/src/arm/lithium-arm.cc
+++ b/src/arm/lithium-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -1005,14 +1005,16 @@
LEnvironment* outer =
CreateEnvironment(hydrogen_env->outer(), argument_index_accumulator);
int ast_id = hydrogen_env->ast_id();
- ASSERT(ast_id != AstNode::kNoNumber);
+ ASSERT(ast_id != AstNode::kNoNumber || hydrogen_env->is_arguments_adaptor());
int value_count = hydrogen_env->length();
LEnvironment* result = new LEnvironment(hydrogen_env->closure(),
+ hydrogen_env->is_arguments_adaptor(),
ast_id,
hydrogen_env->parameter_count(),
argument_count_,
value_count,
outer);
+ int argument_index = *argument_index_accumulator;
for (int i = 0; i < value_count; ++i) {
if (hydrogen_env->is_special_index(i)) continue;
@@ -1021,13 +1023,17 @@
if (value->IsArgumentsObject()) {
op = NULL;
} else if (value->IsPushArgument()) {
- op = new LArgument((*argument_index_accumulator)++);
+ op = new LArgument(argument_index++);
} else {
op = UseAny(value);
}
result->AddValue(op, value->representation());
}
+ if (!hydrogen_env->is_arguments_adaptor()) {
+ *argument_index_accumulator = argument_index;
+ }
+
return result;
}
@@ -1038,14 +1044,23 @@
LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
- HValue* v = instr->value();
- if (v->EmitAtUses()) {
- HBasicBlock* successor = HConstant::cast(v)->ToBoolean()
+ HValue* value = instr->value();
+ if (value->EmitAtUses()) {
+ HBasicBlock* successor = HConstant::cast(value)->ToBoolean()
? instr->FirstSuccessor()
: instr->SecondSuccessor();
return new LGoto(successor->block_id());
}
- return AssignEnvironment(new LBranch(UseRegister(v)));
+
+ LBranch* result = new LBranch(UseRegister(value));
+ // Tagged values that are not known smis or booleans require a
+ // deoptimization environment.
+ Representation rep = value->representation();
+ HType type = value->type();
+ if (rep.IsTagged() && !type.IsSmi() && !type.IsBoolean()) {
+ return AssignEnvironment(result);
+ }
+ return result;
}
@@ -1153,6 +1168,11 @@
LOperand* input = UseFixedDouble(instr->value(), d2);
LUnaryMathOperation* result = new LUnaryMathOperation(input, NULL);
return MarkAsCall(DefineFixedDouble(result, d2), instr);
+ } else if (op == kMathPowHalf) {
+ LOperand* input = UseFixedDouble(instr->value(), d2);
+ LOperand* temp = FixedTemp(d3);
+ LUnaryMathOperation* result = new LUnaryMathOperation(input, temp);
+ return DefineFixedDouble(result, d2);
} else {
LOperand* input = UseRegisterAtStart(instr->value());
LOperand* temp = (op == kMathFloor) ? TempRegister() : NULL;
@@ -1166,8 +1186,6 @@
return DefineAsRegister(result);
case kMathRound:
return AssignEnvironment(DefineAsRegister(result));
- case kMathPowHalf:
- return DefineAsRegister(result);
default:
UNREACHABLE();
return NULL;
@@ -1341,7 +1359,12 @@
} else {
left = UseRegisterAtStart(instr->LeastConstantOperand());
}
- return AssignEnvironment(DefineAsRegister(new LMulI(left, right, temp)));
+ LMulI* mul = new LMulI(left, right, temp);
+ if (instr->CheckFlag(HValue::kCanOverflow) ||
+ instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ AssignEnvironment(mul);
+ }
+ return DefineAsRegister(mul);
} else if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::MUL, instr);
@@ -1402,7 +1425,7 @@
LOperand* left = UseFixedDouble(instr->left(), d1);
LOperand* right = exponent_type.IsDouble() ?
UseFixedDouble(instr->right(), d2) :
- UseFixed(instr->right(), r0);
+ UseFixed(instr->right(), r2);
LPower* result = new LPower(left, right);
return MarkAsCall(DefineFixedDouble(result, d3),
instr,
@@ -1410,6 +1433,15 @@
}
+LInstruction* LChunkBuilder::DoRandom(HRandom* instr) {
+ ASSERT(instr->representation().IsDouble());
+ ASSERT(instr->global_object()->representation().IsTagged());
+ LOperand* global_object = UseFixed(instr->global_object(), r0);
+ LRandom* result = new LRandom(global_object);
+ return MarkAsCall(DefineFixedDouble(result, d7), instr);
+}
+
+
LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
ASSERT(instr->left()->representation().IsTagged());
ASSERT(instr->right()->representation().IsTagged());
@@ -1526,7 +1558,7 @@
LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
HClassOfTestAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
- return new LClassOfTestAndBranch(UseTempRegister(instr->value()),
+ return new LClassOfTestAndBranch(UseRegister(instr->value()),
TempRegister());
}
@@ -1553,7 +1585,7 @@
LInstruction* LChunkBuilder::DoValueOf(HValueOf* instr) {
LOperand* object = UseRegister(instr->value());
LValueOf* result = new LValueOf(object, TempRegister());
- return AssignEnvironment(DefineAsRegister(result));
+ return DefineAsRegister(result);
}
@@ -1776,11 +1808,12 @@
LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) {
- LOperand* temp = TempRegister();
- LOperand* value = UseTempRegister(instr->value());
- LInstruction* result = new LStoreGlobalCell(value, temp);
- if (instr->RequiresHoleCheck()) result = AssignEnvironment(result);
- return result;
+ LOperand* value = UseRegister(instr->value());
+ // Use a temp to check the value in the cell in the case where we perform
+ // a hole check.
+ return instr->RequiresHoleCheck()
+ ? AssignEnvironment(new LStoreGlobalCell(value, TempRegister()))
+ : new LStoreGlobalCell(value, NULL);
}
@@ -1795,7 +1828,8 @@
LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
LOperand* context = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new LLoadContextSlot(context));
+ LInstruction* result = DefineAsRegister(new LLoadContextSlot(context));
+ return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
}
@@ -1809,7 +1843,8 @@
context = UseRegister(instr->context());
value = UseRegister(instr->value());
}
- return new LStoreContextSlot(context, value);
+ LInstruction* result = new LStoreContextSlot(context, value);
+ return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
}
@@ -1868,7 +1903,8 @@
LOperand* obj = UseRegisterAtStart(instr->object());
LOperand* key = UseRegisterAtStart(instr->key());
LLoadKeyedFastElement* result = new LLoadKeyedFastElement(obj, key);
- return AssignEnvironment(DefineAsRegister(result));
+ if (instr->RequiresHoleCheck()) AssignEnvironment(result);
+ return DefineAsRegister(result);
}
@@ -1887,12 +1923,11 @@
LInstruction* LChunkBuilder::DoLoadKeyedSpecializedArrayElement(
HLoadKeyedSpecializedArrayElement* instr) {
ElementsKind elements_kind = instr->elements_kind();
- Representation representation(instr->representation());
ASSERT(
- (representation.IsInteger32() &&
+ (instr->representation().IsInteger32() &&
(elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
(elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
- (representation.IsDouble() &&
+ (instr->representation().IsDouble() &&
((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
(elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
ASSERT(instr->key()->representation().IsInteger32());
@@ -1932,8 +1967,7 @@
LOperand* key = needs_write_barrier
? UseTempRegister(instr->key())
: UseRegisterOrConstantAtStart(instr->key());
-
- return AssignEnvironment(new LStoreKeyedFastElement(obj, key, val));
+ return new LStoreKeyedFastElement(obj, key, val);
}
@@ -1953,13 +1987,12 @@
LInstruction* LChunkBuilder::DoStoreKeyedSpecializedArrayElement(
HStoreKeyedSpecializedArrayElement* instr) {
- Representation representation(instr->value()->representation());
ElementsKind elements_kind = instr->elements_kind();
ASSERT(
- (representation.IsInteger32() &&
+ (instr->value()->representation().IsInteger32() &&
(elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
(elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
- (representation.IsDouble() &&
+ (instr->value()->representation().IsDouble() &&
((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
(elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
ASSERT(instr->external_pointer()->representation().IsExternal());
@@ -2215,6 +2248,7 @@
HEnvironment* outer = current_block_->last_environment();
HConstant* undefined = graph()->GetConstantUndefined();
HEnvironment* inner = outer->CopyForInlining(instr->closure(),
+ instr->arguments_count(),
instr->function(),
undefined,
instr->call_kind());
@@ -2225,7 +2259,8 @@
LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
- HEnvironment* outer = current_block_->last_environment()->outer();
+ HEnvironment* outer = current_block_->last_environment()->
+ DiscardInlined(false);
current_block_->UpdateEnvironment(outer);
return NULL;
}
diff --git a/src/arm/lithium-arm.h b/src/arm/lithium-arm.h
index 6051ad9..d3aff76 100644
--- a/src/arm/lithium-arm.h
+++ b/src/arm/lithium-arm.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -141,6 +141,7 @@
V(Parameter) \
V(Power) \
V(PushArgument) \
+ V(Random) \
V(RegExpLiteral) \
V(Return) \
V(ShiftI) \
@@ -1026,6 +1027,17 @@
};
+class LRandom: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LRandom(LOperand* global_object) {
+ inputs_[0] = global_object;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(Random, "random")
+ DECLARE_HYDROGEN_ACCESSOR(Random)
+};
+
+
class LArithmeticD: public LTemplateInstruction<1, 2, 0> {
public:
LArithmeticD(Token::Value op, LOperand* left, LOperand* right)
@@ -1242,6 +1254,8 @@
DECLARE_CONCRETE_INSTRUCTION(StoreGlobalCell, "store-global-cell")
DECLARE_HYDROGEN_ACCESSOR(StoreGlobalCell)
+
+ LOperand* value() { return inputs_[0]; }
};
@@ -1793,6 +1807,8 @@
inputs_[0] = value;
}
+ LOperand* value() { return InputAt(0); }
+
DECLARE_CONCRETE_INSTRUCTION(CheckFunction, "check-function")
DECLARE_HYDROGEN_ACCESSOR(CheckFunction)
};
diff --git a/src/arm/lithium-codegen-arm.cc b/src/arm/lithium-codegen-arm.cc
index 0a4a691..76c8443 100644
--- a/src/arm/lithium-codegen-arm.cc
+++ b/src/arm/lithium-codegen-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -262,7 +262,7 @@
bool LCodeGen::GenerateDeoptJumpTable() {
// Check that the jump table is accessible from everywhere in the function
- // code, ie that offsets to the table can be encoded in the 24bit signed
+ // code, i.e. that offsets to the table can be encoded in the 24bit signed
// immediate of a branch instruction.
// To simplify we consider the code size from the first instruction to the
// end of the jump table. We also don't consider the pc load delta.
@@ -321,7 +321,22 @@
if (op->IsRegister()) {
return ToRegister(op->index());
} else if (op->IsConstantOperand()) {
- __ mov(scratch, ToOperand(op));
+ LConstantOperand* const_op = LConstantOperand::cast(op);
+ Handle<Object> literal = chunk_->LookupLiteral(const_op);
+ Representation r = chunk_->LookupLiteralRepresentation(const_op);
+ if (r.IsInteger32()) {
+ ASSERT(literal->IsNumber());
+ __ mov(scratch, Operand(static_cast<int32_t>(literal->Number())));
+ } else if (r.IsDouble()) {
+ Abort("EmitLoadRegister: Unsupported double immediate.");
+ } else {
+ ASSERT(r.IsTagged());
+ if (literal->IsSmi()) {
+ __ mov(scratch, Operand(literal));
+ } else {
+ __ LoadHeapObject(scratch, Handle<HeapObject>::cast(literal));
+ }
+ }
return scratch;
} else if (op->IsStackSlot() || op->IsArgument()) {
__ ldr(scratch, ToMemOperand(op));
@@ -370,6 +385,18 @@
}
+Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
+ Handle<Object> literal = chunk_->LookupLiteral(op);
+ ASSERT(chunk_->LookupLiteralRepresentation(op).IsTagged());
+ return literal;
+}
+
+
+bool LCodeGen::IsInteger32(LConstantOperand* op) const {
+ return chunk_->LookupLiteralRepresentation(op).IsInteger32();
+}
+
+
int LCodeGen::ToInteger32(LConstantOperand* op) const {
Handle<Object> value = chunk_->LookupLiteral(op);
ASSERT(chunk_->LookupLiteralRepresentation(op).IsInteger32());
@@ -452,7 +479,11 @@
WriteTranslation(environment->outer(), translation);
int closure_id = DefineDeoptimizationLiteral(environment->closure());
- translation->BeginFrame(environment->ast_id(), closure_id, height);
+ if (environment->is_arguments_adaptor()) {
+ translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
+ } else {
+ translation->BeginJSFrame(environment->ast_id(), closure_id, height);
+ }
for (int i = 0; i < translation_size; ++i) {
LOperand* value = environment->values()->at(i);
// spilled_registers_ and spilled_double_registers_ are either
@@ -585,10 +616,14 @@
// |>------------ translation_size ------------<|
int frame_count = 0;
+ int jsframe_count = 0;
for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
++frame_count;
+ if (!e->is_arguments_adaptor()) {
+ ++jsframe_count;
+ }
}
- Translation translation(&translations_, frame_count);
+ Translation translation(&translations_, frame_count, jsframe_count);
WriteTranslation(environment, &translation);
int deoptimization_index = deoptimizations_.length();
int pc_offset = masm()->pc_offset();
@@ -1337,8 +1372,13 @@
void LCodeGen::DoConstantT(LConstantT* instr) {
- ASSERT(instr->result()->IsRegister());
- __ mov(ToRegister(instr->result()), Operand(instr->value()));
+ Handle<Object> value = instr->value();
+ if (value->IsSmi()) {
+ __ mov(ToRegister(instr->result()), Operand(value));
+ } else {
+ __ LoadHeapObject(ToRegister(instr->result()),
+ Handle<HeapObject>::cast(value));
+ }
}
@@ -1962,7 +2002,7 @@
// Branches to a label or falls through with the answer in flags. Trashes
-// the temp registers, but not the input. Only input and temp2 may alias.
+// the temp registers, but not the input.
void LCodeGen::EmitClassOfTest(Label* is_true,
Label* is_false,
Handle<String>class_name,
@@ -1970,7 +2010,9 @@
Register temp,
Register temp2) {
ASSERT(!input.is(temp));
- ASSERT(!temp.is(temp2)); // But input and temp2 may be the same register.
+ ASSERT(!input.is(temp2));
+ ASSERT(!temp.is(temp2));
+
__ JumpIfSmi(input, is_false);
if (class_name->IsEqualTo(CStrVector("Function"))) {
@@ -2167,7 +2209,7 @@
// offset to the location of the map check.
Register temp = ToRegister(instr->TempAt(0));
ASSERT(temp.is(r4));
- __ mov(InstanceofStub::right(), Operand(instr->function()));
+ __ LoadHeapObject(InstanceofStub::right(), instr->function());
static const int kAdditionalDelta = 4;
int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta;
Label before_push_delta;
@@ -2245,27 +2287,26 @@
void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
- Register value = ToRegister(instr->InputAt(0));
- Register scratch = scratch0();
- Register scratch2 = ToRegister(instr->TempAt(0));
+ Register value = ToRegister(instr->value());
+ Register cell = scratch0();
// Load the cell.
- __ mov(scratch, Operand(Handle<Object>(instr->hydrogen()->cell())));
+ __ mov(cell, Operand(instr->hydrogen()->cell()));
// If the cell we are storing to contains the hole it could have
// been deleted from the property dictionary. In that case, we need
// to update the property details in the property dictionary to mark
// it as no longer deleted.
if (instr->hydrogen()->RequiresHoleCheck()) {
- __ ldr(scratch2,
- FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ cmp(scratch2, ip);
+ // We use a temp to check the payload (CompareRoot might clobber ip).
+ Register payload = ToRegister(instr->TempAt(0));
+ __ ldr(payload, FieldMemOperand(cell, JSGlobalPropertyCell::kValueOffset));
+ __ CompareRoot(payload, Heap::kTheHoleValueRootIndex);
DeoptimizeIf(eq, instr->environment());
}
// Store the value.
- __ str(value, FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
+ __ str(value, FieldMemOperand(cell, JSGlobalPropertyCell::kValueOffset));
// Cells are always rescanned, so no write barrier here.
}
@@ -2286,13 +2327,37 @@
Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
__ ldr(result, ContextOperand(context, instr->slot_index()));
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+ __ cmp(result, ip);
+ if (instr->hydrogen()->DeoptimizesOnHole()) {
+ DeoptimizeIf(eq, instr->environment());
+ } else {
+ __ mov(result, Operand(factory()->undefined_value()), LeaveCC, eq);
+ }
+ }
}
void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
Register context = ToRegister(instr->context());
Register value = ToRegister(instr->value());
+ Register scratch = scratch0();
MemOperand target = ContextOperand(context, instr->slot_index());
+
+ Label skip_assignment;
+
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ __ ldr(scratch, target);
+ __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+ __ cmp(scratch, ip);
+ if (instr->hydrogen()->DeoptimizesOnHole()) {
+ DeoptimizeIf(eq, instr->environment());
+ } else {
+ __ b(ne, &skip_assignment);
+ }
+ }
+
__ str(value, target);
if (instr->hydrogen()->NeedsWriteBarrier()) {
HType type = instr->hydrogen()->value()->type();
@@ -2301,12 +2366,14 @@
__ RecordWriteContextSlot(context,
target.offset(),
value,
- scratch0(),
+ scratch,
kLRHasBeenSaved,
kSaveFPRegs,
EMIT_REMEMBERED_SET,
check_needed);
}
+
+ __ bind(&skip_assignment);
}
@@ -2328,7 +2395,7 @@
Handle<String> name) {
LookupResult lookup(isolate());
type->LookupInDescriptors(NULL, *name, &lookup);
- ASSERT(lookup.IsProperty() &&
+ ASSERT(lookup.IsFound() &&
(lookup.type() == FIELD || lookup.type() == CONSTANT_FUNCTION));
if (lookup.type() == FIELD) {
int index = lookup.GetLocalFieldIndexFromMap(*type);
@@ -2344,7 +2411,7 @@
}
} else {
Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*type));
- LoadHeapObject(result, Handle<HeapObject>::cast(function));
+ __ LoadHeapObject(result, function);
}
}
@@ -2769,7 +2836,7 @@
this, pointers, Safepoint::kLazyDeopt);
// The number of arguments is stored in receiver which is r0, as expected
// by InvokeFunction.
- v8::internal::ParameterCount actual(receiver);
+ ParameterCount actual(receiver);
__ InvokeFunction(function, actual, CALL_FUNCTION,
safepoint_generator, CALL_AS_METHOD);
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -2789,7 +2856,7 @@
void LCodeGen::DoThisFunction(LThisFunction* instr) {
Register result = ToRegister(instr->result());
- LoadHeapObject(result, instr->hydrogen()->closure());
+ __ LoadHeapObject(result, instr->hydrogen()->closure());
}
@@ -2824,31 +2891,41 @@
int arity,
LInstruction* instr,
CallKind call_kind) {
- // Change context if needed.
- bool change_context =
- (info()->closure()->context() != function->context()) ||
- scope()->contains_with() ||
- (scope()->num_heap_slots() > 0);
- if (change_context) {
- __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
- }
-
- // Set r0 to arguments count if adaption is not needed. Assumes that r0
- // is available to write to at this point.
- if (!function->NeedsArgumentsAdaption()) {
- __ mov(r0, Operand(arity));
- }
+ bool can_invoke_directly = !function->NeedsArgumentsAdaption() ||
+ function->shared()->formal_parameter_count() == arity;
LPointerMap* pointers = instr->pointer_map();
RecordPosition(pointers->position());
- // Invoke function.
- __ SetCallKind(r5, call_kind);
- __ ldr(ip, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
- __ Call(ip);
+ if (can_invoke_directly) {
+ __ LoadHeapObject(r1, function);
+ // Change context if needed.
+ bool change_context =
+ (info()->closure()->context() != function->context()) ||
+ scope()->contains_with() ||
+ (scope()->num_heap_slots() > 0);
+ if (change_context) {
+ __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
+ }
- // Setup deoptimization.
- RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+ // Set r0 to arguments count if adaption is not needed. Assumes that r0
+ // is available to write to at this point.
+ if (!function->NeedsArgumentsAdaption()) {
+ __ mov(r0, Operand(arity));
+ }
+
+ // Invoke function.
+ __ SetCallKind(r5, call_kind);
+ __ ldr(ip, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
+ __ Call(ip);
+
+ // Set up deoptimization.
+ RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+ } else {
+ SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+ ParameterCount count(arity);
+ __ InvokeFunction(function, count, CALL_FUNCTION, generator, call_kind);
+ }
// Restore context.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -2857,7 +2934,6 @@
void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
ASSERT(ToRegister(instr->result()).is(r0));
- __ mov(r1, Operand(instr->function()));
CallKnownFunction(instr->function(),
instr->arity(),
instr,
@@ -3086,68 +3162,77 @@
void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
DoubleRegister result = ToDoubleRegister(instr->result());
+ DoubleRegister temp = ToDoubleRegister(instr->TempAt(0));
+
+ // Note that according to ECMA-262 15.8.2.13:
+ // Math.pow(-Infinity, 0.5) == Infinity
+ // Math.sqrt(-Infinity) == NaN
+ Label done;
+ __ vmov(temp, -V8_INFINITY);
+ __ VFPCompareAndSetFlags(input, temp);
+ __ vneg(result, temp, eq);
+ __ b(&done, eq);
+
// Add +0 to convert -0 to +0.
__ vadd(result, input, kDoubleRegZero);
__ vsqrt(result, result);
+ __ bind(&done);
}
void LCodeGen::DoPower(LPower* instr) {
- LOperand* left = instr->InputAt(0);
- LOperand* right = instr->InputAt(1);
- Register scratch = scratch0();
- DoubleRegister result_reg = ToDoubleRegister(instr->result());
Representation exponent_type = instr->hydrogen()->right()->representation();
- if (exponent_type.IsDouble()) {
- // Prepare arguments and call C function.
- __ PrepareCallCFunction(0, 2, scratch);
- __ SetCallCDoubleArguments(ToDoubleRegister(left),
- ToDoubleRegister(right));
- __ CallCFunction(
- ExternalReference::power_double_double_function(isolate()), 0, 2);
- } else if (exponent_type.IsInteger32()) {
- ASSERT(ToRegister(right).is(r0));
- // Prepare arguments and call C function.
- __ PrepareCallCFunction(1, 1, scratch);
- __ SetCallCDoubleArguments(ToDoubleRegister(left), ToRegister(right));
- __ CallCFunction(
- ExternalReference::power_double_int_function(isolate()), 1, 1);
- } else {
- ASSERT(exponent_type.IsTagged());
- ASSERT(instr->hydrogen()->left()->representation().IsDouble());
+ // Having marked this as a call, we can use any registers.
+ // Just make sure that the input/output registers are the expected ones.
+ ASSERT(!instr->InputAt(1)->IsDoubleRegister() ||
+ ToDoubleRegister(instr->InputAt(1)).is(d2));
+ ASSERT(!instr->InputAt(1)->IsRegister() ||
+ ToRegister(instr->InputAt(1)).is(r2));
+ ASSERT(ToDoubleRegister(instr->InputAt(0)).is(d1));
+ ASSERT(ToDoubleRegister(instr->result()).is(d3));
- Register right_reg = ToRegister(right);
-
- // Check for smi on the right hand side.
- Label non_smi, call;
- __ JumpIfNotSmi(right_reg, &non_smi);
-
- // Untag smi and convert it to a double.
- __ SmiUntag(right_reg);
- SwVfpRegister single_scratch = double_scratch0().low();
- __ vmov(single_scratch, right_reg);
- __ vcvt_f64_s32(result_reg, single_scratch);
- __ jmp(&call);
-
- // Heap number map check.
- __ bind(&non_smi);
- __ ldr(scratch, FieldMemOperand(right_reg, HeapObject::kMapOffset));
+ if (exponent_type.IsTagged()) {
+ Label no_deopt;
+ __ JumpIfSmi(r2, &no_deopt);
+ __ ldr(r7, FieldMemOperand(r2, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
- __ cmp(scratch, Operand(ip));
+ __ cmp(r7, Operand(ip));
DeoptimizeIf(ne, instr->environment());
- int32_t value_offset = HeapNumber::kValueOffset - kHeapObjectTag;
- __ add(scratch, right_reg, Operand(value_offset));
- __ vldr(result_reg, scratch, 0);
-
- // Prepare arguments and call C function.
- __ bind(&call);
- __ PrepareCallCFunction(0, 2, scratch);
- __ SetCallCDoubleArguments(ToDoubleRegister(left), result_reg);
- __ CallCFunction(
- ExternalReference::power_double_double_function(isolate()), 0, 2);
+ __ bind(&no_deopt);
+ MathPowStub stub(MathPowStub::TAGGED);
+ __ CallStub(&stub);
+ } else if (exponent_type.IsInteger32()) {
+ MathPowStub stub(MathPowStub::INTEGER);
+ __ CallStub(&stub);
+ } else {
+ ASSERT(exponent_type.IsDouble());
+ MathPowStub stub(MathPowStub::DOUBLE);
+ __ CallStub(&stub);
}
- // Store the result in the result register.
- __ GetCFunctionDoubleResult(result_reg);
+}
+
+
+void LCodeGen::DoRandom(LRandom* instr) {
+ // Having marked this instruction as a call we can use any
+ // registers.
+ ASSERT(ToDoubleRegister(instr->result()).is(d7));
+ ASSERT(ToRegister(instr->InputAt(0)).is(r0));
+
+ __ PrepareCallCFunction(1, scratch0());
+ __ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalContextOffset));
+ __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
+
+ // 0x41300000 is the top half of 1.0 x 2^20 as a double.
+ // Create this constant using mov/orr to avoid PC relative load.
+ __ mov(r1, Operand(0x41000000));
+ __ orr(r1, r1, Operand(0x300000));
+ // Move 0x41300000xxxxxxxx (x = random bits) to VFP.
+ __ vmov(d7, r0, r1);
+ // Move 0x4130000000000000 to VFP.
+ __ mov(r0, Operand(0, RelocInfo::NONE));
+ __ vmov(d8, r0, r1);
+ // Subtract and store the result in the heap number.
+ __ vsub(d7, d7, d8);
}
@@ -3283,7 +3368,6 @@
void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
ASSERT(ToRegister(instr->result()).is(r0));
- __ mov(r1, Operand(instr->target()));
CallKnownFunction(instr->target(), instr->arity(), instr, CALL_AS_FUNCTION);
}
@@ -3377,13 +3461,6 @@
Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
Register scratch = scratch0();
- // This instruction cannot handle the FAST_SMI_ONLY_ELEMENTS -> FAST_ELEMENTS
- // conversion, so it deopts in that case.
- if (instr->hydrogen()->ValueNeedsSmiCheck()) {
- __ tst(value, Operand(kSmiTagMask));
- DeoptimizeIf(ne, instr->environment());
- }
-
// Do the store.
if (instr->key()->IsConstantOperand()) {
ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
@@ -3842,6 +3919,7 @@
void LCodeGen::EmitNumberUntagD(Register input_reg,
DoubleRegister result_reg,
bool deoptimize_on_undefined,
+ bool deoptimize_on_minus_zero,
LEnvironment* env) {
Register scratch = scratch0();
SwVfpRegister flt_scratch = double_scratch0().low();
@@ -3877,6 +3955,14 @@
// Heap number to double register conversion.
__ sub(ip, input_reg, Operand(kHeapObjectTag));
__ vldr(result_reg, ip, HeapNumber::kValueOffset);
+ if (deoptimize_on_minus_zero) {
+ __ vmov(ip, result_reg.low());
+ __ cmp(ip, Operand(0));
+ __ b(ne, &done);
+ __ vmov(ip, result_reg.high());
+ __ cmp(ip, Operand(HeapNumber::kSignMask));
+ DeoptimizeIf(eq, env);
+ }
__ jmp(&done);
// Smi to double register conversion
@@ -4010,6 +4096,7 @@
EmitNumberUntagD(input_reg, result_reg,
instr->hydrogen()->deoptimize_on_undefined(),
+ instr->hydrogen()->deoptimize_on_minus_zero(),
instr->environment());
}
@@ -4107,21 +4194,42 @@
void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
- ASSERT(instr->InputAt(0)->IsRegister());
- Register reg = ToRegister(instr->InputAt(0));
- __ cmp(reg, Operand(instr->hydrogen()->target()));
+ Register reg = ToRegister(instr->value());
+ Handle<JSFunction> target = instr->hydrogen()->target();
+ if (isolate()->heap()->InNewSpace(*target)) {
+ Register reg = ToRegister(instr->value());
+ Handle<JSGlobalPropertyCell> cell =
+ isolate()->factory()->NewJSGlobalPropertyCell(target);
+ __ mov(ip, Operand(Handle<Object>(cell)));
+ __ ldr(ip, FieldMemOperand(ip, JSGlobalPropertyCell::kValueOffset));
+ __ cmp(reg, ip);
+ } else {
+ __ cmp(reg, Operand(target));
+ }
DeoptimizeIf(ne, instr->environment());
}
+void LCodeGen::DoCheckMapCommon(Register reg,
+ Register scratch,
+ Handle<Map> map,
+ CompareMapMode mode,
+ LEnvironment* env) {
+ Label success;
+ __ CompareMap(reg, scratch, map, &success, mode);
+ DeoptimizeIf(ne, env);
+ __ bind(&success);
+}
+
+
void LCodeGen::DoCheckMap(LCheckMap* instr) {
Register scratch = scratch0();
LOperand* input = instr->InputAt(0);
ASSERT(input->IsRegister());
Register reg = ToRegister(input);
- __ ldr(scratch, FieldMemOperand(reg, HeapObject::kMapOffset));
- __ cmp(scratch, Operand(instr->hydrogen()->map()));
- DeoptimizeIf(ne, instr->environment());
+ Handle<Map> map = instr->hydrogen()->map();
+ DoCheckMapCommon(reg, scratch, map, instr->hydrogen()->mode(),
+ instr->environment());
}
@@ -4178,19 +4286,6 @@
}
-void LCodeGen::LoadHeapObject(Register result,
- Handle<HeapObject> object) {
- if (heap()->InNewSpace(*object)) {
- Handle<JSGlobalPropertyCell> cell =
- factory()->NewJSGlobalPropertyCell(object);
- __ mov(result, Operand(cell));
- __ ldr(result, FieldMemOperand(result, JSGlobalPropertyCell::kValueOffset));
- } else {
- __ mov(result, Operand(object));
- }
-}
-
-
void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
Register temp1 = ToRegister(instr->TempAt(0));
Register temp2 = ToRegister(instr->TempAt(1));
@@ -4199,36 +4294,53 @@
Handle<JSObject> current_prototype = instr->prototype();
// Load prototype object.
- LoadHeapObject(temp1, current_prototype);
+ __ LoadHeapObject(temp1, current_prototype);
// Check prototype maps up to the holder.
while (!current_prototype.is_identical_to(holder)) {
- __ ldr(temp2, FieldMemOperand(temp1, HeapObject::kMapOffset));
- __ cmp(temp2, Operand(Handle<Map>(current_prototype->map())));
- DeoptimizeIf(ne, instr->environment());
+ DoCheckMapCommon(temp1, temp2,
+ Handle<Map>(current_prototype->map()),
+ ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
current_prototype =
Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype()));
// Load next prototype object.
- LoadHeapObject(temp1, current_prototype);
+ __ LoadHeapObject(temp1, current_prototype);
}
// Check the holder map.
- __ ldr(temp2, FieldMemOperand(temp1, HeapObject::kMapOffset));
- __ cmp(temp2, Operand(Handle<Map>(current_prototype->map())));
+ DoCheckMapCommon(temp1, temp2,
+ Handle<Map>(current_prototype->map()),
+ ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
DeoptimizeIf(ne, instr->environment());
}
void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
- Handle<FixedArray> constant_elements = instr->hydrogen()->constant_elements();
- ASSERT_EQ(2, constant_elements->length());
- ElementsKind constant_elements_kind =
- static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value());
+ Heap* heap = isolate()->heap();
+ ElementsKind boilerplate_elements_kind =
+ instr->hydrogen()->boilerplate_elements_kind();
+
+ // Deopt if the array literal boilerplate ElementsKind is of a type different
+ // than the expected one. The check isn't necessary if the boilerplate has
+ // already been converted to FAST_ELEMENTS.
+ if (boilerplate_elements_kind != FAST_ELEMENTS) {
+ __ LoadHeapObject(r1, instr->hydrogen()->boilerplate_object());
+ // Load map into r2.
+ __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
+ // Load the map's "bit field 2".
+ __ ldrb(r2, FieldMemOperand(r2, Map::kBitField2Offset));
+ // Retrieve elements_kind from bit field 2.
+ __ ubfx(r2, r2, Map::kElementsKindShift, Map::kElementsKindBitCount);
+ __ cmp(r2, Operand(boilerplate_elements_kind));
+ DeoptimizeIf(ne, instr->environment());
+ }
__ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ ldr(r3, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
__ mov(r2, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
- __ mov(r1, Operand(constant_elements));
+ // Boilerplate already exists, constant elements are never accessed.
+ // Pass an empty fixed array.
+ __ mov(r1, Operand(Handle<FixedArray>(heap->empty_fixed_array())));
__ Push(r3, r2, r1);
// Pick the right runtime function or stub to call.
@@ -4245,9 +4357,9 @@
CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr);
} else {
FastCloneShallowArrayStub::Mode mode =
- constant_elements_kind == FAST_DOUBLE_ELEMENTS
- ? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
- : FastCloneShallowArrayStub::CLONE_ELEMENTS;
+ boilerplate_elements_kind == FAST_DOUBLE_ELEMENTS
+ ? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
+ : FastCloneShallowArrayStub::CLONE_ELEMENTS;
FastCloneShallowArrayStub stub(mode, length);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
@@ -4286,10 +4398,10 @@
Handle<JSObject> value_object = Handle<JSObject>::cast(value);
__ add(r2, result, Operand(*offset));
__ str(r2, FieldMemOperand(result, total_offset));
- LoadHeapObject(source, value_object);
+ __ LoadHeapObject(source, value_object);
EmitDeepCopy(value_object, result, source, offset);
} else if (value->IsHeapObject()) {
- LoadHeapObject(r2, Handle<HeapObject>::cast(value));
+ __ LoadHeapObject(r2, Handle<HeapObject>::cast(value));
__ str(r2, FieldMemOperand(result, total_offset));
} else {
__ mov(r2, Operand(value));
@@ -4315,7 +4427,7 @@
__ bind(&allocated);
int offset = 0;
- LoadHeapObject(r1, instr->hydrogen()->boilerplate());
+ __ LoadHeapObject(r1, instr->hydrogen()->boilerplate());
EmitDeepCopy(instr->hydrogen()->boilerplate(), r0, r1, &offset);
ASSERT_EQ(size, offset);
}
diff --git a/src/arm/lithium-codegen-arm.h b/src/arm/lithium-codegen-arm.h
index e9dd149..00823e1 100644
--- a/src/arm/lithium-codegen-arm.h
+++ b/src/arm/lithium-codegen-arm.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -93,6 +93,9 @@
// Returns a MemOperand pointing to the high word of a DoubleStackSlot.
MemOperand ToHighMemOperand(LOperand* op) const;
+ bool IsInteger32(LConstantOperand* op) const;
+ Handle<Object> ToHandle(LConstantOperand* op) const;
+
// Try to generate code for the entire chunk, but it may fail if the
// chunk contains constructs we cannot handle. Returns true if the
// code generation attempt succeeded.
@@ -116,6 +119,9 @@
void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
Label* map_check);
+ void DoCheckMapCommon(Register reg, Register scratch, Handle<Map> map,
+ CompareMapMode mode, LEnvironment* env);
+
// Parallel move support.
void DoParallelMove(LParallelMove* move);
void DoGap(LGap* instr);
@@ -267,6 +273,7 @@
void EmitNumberUntagD(Register input,
DoubleRegister result,
bool deoptimize_on_undefined,
+ bool deoptimize_on_minus_zero,
LEnvironment* env);
// Emits optimized code for typeof x == "y". Modifies input register.
@@ -405,7 +412,7 @@
virtual void Generate() = 0;
virtual LInstruction* instr() = 0;
- void SetExit(Label *exit) { external_exit_ = exit; }
+ void SetExit(Label* exit) { external_exit_ = exit; }
Label* entry() { return &entry_; }
Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; }
int instruction_index() const { return instruction_index_; }
diff --git a/src/arm/lithium-gap-resolver-arm.cc b/src/arm/lithium-gap-resolver-arm.cc
index 1cfdc79..cefca47 100644
--- a/src/arm/lithium-gap-resolver-arm.cc
+++ b/src/arm/lithium-gap-resolver-arm.cc
@@ -245,13 +245,24 @@
}
} else if (source->IsConstantOperand()) {
- Operand source_operand = cgen_->ToOperand(source);
+ LConstantOperand* constant_source = LConstantOperand::cast(source);
if (destination->IsRegister()) {
- __ mov(cgen_->ToRegister(destination), source_operand);
+ Register dst = cgen_->ToRegister(destination);
+ if (cgen_->IsInteger32(constant_source)) {
+ __ mov(dst, Operand(cgen_->ToInteger32(constant_source)));
+ } else {
+ __ LoadObject(dst, cgen_->ToHandle(constant_source));
+ }
} else {
ASSERT(destination->IsStackSlot());
ASSERT(!in_cycle_); // Constant moves happen after all cycles are gone.
- __ mov(kSavedValueRegister, source_operand);
+ if (cgen_->IsInteger32(constant_source)) {
+ __ mov(kSavedValueRegister,
+ Operand(cgen_->ToInteger32(constant_source)));
+ } else {
+ __ LoadObject(kSavedValueRegister,
+ cgen_->ToHandle(constant_source));
+ }
__ str(kSavedValueRegister, cgen_->ToMemOperand(destination));
}
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
index 4fc3b03..9894ff2 100644
--- a/src/arm/macro-assembler-arm.cc
+++ b/src/arm/macro-assembler-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -396,14 +396,27 @@
void MacroAssembler::LoadRoot(Register destination,
Heap::RootListIndex index,
Condition cond) {
- ldr(destination, MemOperand(roots, index << kPointerSizeLog2), cond);
+ ldr(destination, MemOperand(kRootRegister, index << kPointerSizeLog2), cond);
}
void MacroAssembler::StoreRoot(Register source,
Heap::RootListIndex index,
Condition cond) {
- str(source, MemOperand(roots, index << kPointerSizeLog2), cond);
+ str(source, MemOperand(kRootRegister, index << kPointerSizeLog2), cond);
+}
+
+
+void MacroAssembler::LoadHeapObject(Register result,
+ Handle<HeapObject> object) {
+ if (isolate()->heap()->InNewSpace(*object)) {
+ Handle<JSGlobalPropertyCell> cell =
+ isolate()->factory()->NewJSGlobalPropertyCell(object);
+ mov(result, Operand(cell));
+ ldr(result, FieldMemOperand(result, JSGlobalPropertyCell::kValueOffset));
+ } else {
+ mov(result, Operand(object));
+ }
}
@@ -483,13 +496,10 @@
// registers are cp.
ASSERT(!address.is(cp) && !value.is(cp));
- if (FLAG_debug_code) {
- Label ok;
+ if (emit_debug_code()) {
ldr(ip, MemOperand(address));
cmp(ip, value);
- b(eq, &ok);
- stop("Wrong address or value passed to RecordWrite");
- bind(&ok);
+ Check(eq, "Wrong address or value passed to RecordWrite");
}
Label done;
@@ -538,7 +548,7 @@
SaveFPRegsMode fp_mode,
RememberedSetFinalAction and_then) {
Label done;
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
Label ok;
JumpIfNotInNewSpace(object, scratch, &ok);
stop("Remembered set pointer is in new space");
@@ -807,12 +817,12 @@
void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
- // Setup the frame structure on the stack.
+ // Set up the frame structure on the stack.
ASSERT_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement);
ASSERT_EQ(1 * kPointerSize, ExitFrameConstants::kCallerPCOffset);
ASSERT_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset);
Push(lr, fp);
- mov(fp, Operand(sp)); // Setup new frame pointer.
+ mov(fp, Operand(sp)); // Set up new frame pointer.
// Reserve room for saved entry sp and code object.
sub(sp, sp, Operand(2 * kPointerSize));
if (emit_debug_code()) {
@@ -947,10 +957,12 @@
Handle<Code> code_constant,
Register code_reg,
Label* done,
+ bool* definitely_mismatches,
InvokeFlag flag,
const CallWrapper& call_wrapper,
CallKind call_kind) {
bool definitely_matches = false;
+ *definitely_mismatches = false;
Label regular_invoke;
// Check whether the expected and actual arguments count match. If not,
@@ -981,6 +993,7 @@
// arguments.
definitely_matches = true;
} else {
+ *definitely_mismatches = true;
mov(r2, Operand(expected.immediate()));
}
}
@@ -1008,7 +1021,9 @@
SetCallKind(r5, call_kind);
Call(adaptor);
call_wrapper.AfterCall();
- b(done);
+ if (!*definitely_mismatches) {
+ b(done);
+ }
} else {
SetCallKind(r5, call_kind);
Jump(adaptor, RelocInfo::CODE_TARGET);
@@ -1028,23 +1043,26 @@
ASSERT(flag == JUMP_FUNCTION || has_frame());
Label done;
-
- InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag,
+ bool definitely_mismatches = false;
+ InvokePrologue(expected, actual, Handle<Code>::null(), code,
+ &done, &definitely_mismatches, flag,
call_wrapper, call_kind);
- if (flag == CALL_FUNCTION) {
- call_wrapper.BeforeCall(CallSize(code));
- SetCallKind(r5, call_kind);
- Call(code);
- call_wrapper.AfterCall();
- } else {
- ASSERT(flag == JUMP_FUNCTION);
- SetCallKind(r5, call_kind);
- Jump(code);
- }
+ if (!definitely_mismatches) {
+ if (flag == CALL_FUNCTION) {
+ call_wrapper.BeforeCall(CallSize(code));
+ SetCallKind(r5, call_kind);
+ Call(code);
+ call_wrapper.AfterCall();
+ } else {
+ ASSERT(flag == JUMP_FUNCTION);
+ SetCallKind(r5, call_kind);
+ Jump(code);
+ }
- // Continue here if InvokePrologue does handle the invocation due to
- // mismatched parameter counts.
- bind(&done);
+ // Continue here if InvokePrologue does handle the invocation due to
+ // mismatched parameter counts.
+ bind(&done);
+ }
}
@@ -1058,20 +1076,23 @@
ASSERT(flag == JUMP_FUNCTION || has_frame());
Label done;
-
- InvokePrologue(expected, actual, code, no_reg, &done, flag,
+ bool definitely_mismatches = false;
+ InvokePrologue(expected, actual, code, no_reg,
+ &done, &definitely_mismatches, flag,
NullCallWrapper(), call_kind);
- if (flag == CALL_FUNCTION) {
- SetCallKind(r5, call_kind);
- Call(code, rmode);
- } else {
- SetCallKind(r5, call_kind);
- Jump(code, rmode);
- }
+ if (!definitely_mismatches) {
+ if (flag == CALL_FUNCTION) {
+ SetCallKind(r5, call_kind);
+ Call(code, rmode);
+ } else {
+ SetCallKind(r5, call_kind);
+ Jump(code, rmode);
+ }
- // Continue here if InvokePrologue does handle the invocation due to
- // mismatched parameter counts.
- bind(&done);
+ // Continue here if InvokePrologue does handle the invocation due to
+ // mismatched parameter counts.
+ bind(&done);
+ }
}
@@ -1106,12 +1127,13 @@
void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
const ParameterCount& actual,
InvokeFlag flag,
+ const CallWrapper& call_wrapper,
CallKind call_kind) {
// You can't call a function without a valid frame.
ASSERT(flag == JUMP_FUNCTION || has_frame());
// Get the function and setup the context.
- mov(r1, Operand(function));
+ LoadHeapObject(r1, function);
ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
ParameterCount expected(function->shared()->formal_parameter_count());
@@ -1119,7 +1141,7 @@
// allow recompilation to take effect without changing any of the
// call sites.
ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
- InvokeCode(r3, expected, actual, flag, NullCallWrapper(), call_kind);
+ InvokeCode(r3, expected, actual, flag, call_wrapper, call_kind);
}
@@ -1401,6 +1423,35 @@
}
+void MacroAssembler::GetNumberHash(Register t0, Register scratch) {
+ // First of all we assign the hash seed to scratch.
+ LoadRoot(scratch, Heap::kHashSeedRootIndex);
+ SmiUntag(scratch);
+
+ // Xor original key with a seed.
+ eor(t0, t0, Operand(scratch));
+
+ // Compute the hash code from the untagged key. This must be kept in sync
+ // with ComputeIntegerHash in utils.h.
+ //
+ // hash = ~hash + (hash << 15);
+ mvn(scratch, Operand(t0));
+ add(t0, scratch, Operand(t0, LSL, 15));
+ // hash = hash ^ (hash >> 12);
+ eor(t0, t0, Operand(t0, LSR, 12));
+ // hash = hash + (hash << 2);
+ add(t0, t0, Operand(t0, LSL, 2));
+ // hash = hash ^ (hash >> 4);
+ eor(t0, t0, Operand(t0, LSR, 4));
+ // hash = hash * 2057;
+ mov(scratch, Operand(t0, LSL, 11));
+ add(t0, t0, Operand(t0, LSL, 3));
+ add(t0, t0, scratch);
+ // hash = hash ^ (hash >> 16);
+ eor(t0, t0, Operand(t0, LSR, 16));
+}
+
+
void MacroAssembler::LoadFromNumberDictionary(Label* miss,
Register elements,
Register key,
@@ -1430,26 +1481,10 @@
// t2 - used for the index into the dictionary.
Label done;
- // Compute the hash code from the untagged key. This must be kept in sync
- // with ComputeIntegerHash in utils.h.
- //
- // hash = ~hash + (hash << 15);
- mvn(t1, Operand(t0));
- add(t0, t1, Operand(t0, LSL, 15));
- // hash = hash ^ (hash >> 12);
- eor(t0, t0, Operand(t0, LSR, 12));
- // hash = hash + (hash << 2);
- add(t0, t0, Operand(t0, LSL, 2));
- // hash = hash ^ (hash >> 4);
- eor(t0, t0, Operand(t0, LSR, 4));
- // hash = hash * 2057;
- mov(t1, Operand(2057));
- mul(t0, t0, t1);
- // hash = hash ^ (hash >> 16);
- eor(t0, t0, Operand(t0, LSR, 16));
+ GetNumberHash(t0, t1);
// Compute the capacity mask.
- ldr(t1, FieldMemOperand(elements, NumberDictionary::kCapacityOffset));
+ ldr(t1, FieldMemOperand(elements, SeededNumberDictionary::kCapacityOffset));
mov(t1, Operand(t1, ASR, kSmiTagSize)); // convert smi to int
sub(t1, t1, Operand(1));
@@ -1460,17 +1495,17 @@
mov(t2, t0);
// Compute the masked index: (hash + i + i * i) & mask.
if (i > 0) {
- add(t2, t2, Operand(NumberDictionary::GetProbeOffset(i)));
+ add(t2, t2, Operand(SeededNumberDictionary::GetProbeOffset(i)));
}
and_(t2, t2, Operand(t1));
// Scale the index by multiplying by the element size.
- ASSERT(NumberDictionary::kEntrySize == 3);
+ ASSERT(SeededNumberDictionary::kEntrySize == 3);
add(t2, t2, Operand(t2, LSL, 1)); // t2 = t2 * 3
// Check if the key is identical to the name.
add(t2, elements, Operand(t2, LSL, kPointerSizeLog2));
- ldr(ip, FieldMemOperand(t2, NumberDictionary::kElementsStartOffset));
+ ldr(ip, FieldMemOperand(t2, SeededNumberDictionary::kElementsStartOffset));
cmp(key, Operand(ip));
if (i != kProbes - 1) {
b(eq, &done);
@@ -1483,14 +1518,14 @@
// Check that the value is a normal property.
// t2: elements + (index * kPointerSize)
const int kDetailsOffset =
- NumberDictionary::kElementsStartOffset + 2 * kPointerSize;
+ SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
ldr(t1, FieldMemOperand(t2, kDetailsOffset));
tst(t1, Operand(Smi::FromInt(PropertyDetails::TypeField::kMask)));
b(ne, miss);
// Get the value at the masked, scaled index and return.
const int kValueOffset =
- NumberDictionary::kElementsStartOffset + kPointerSize;
+ SeededNumberDictionary::kElementsStartOffset + kPointerSize;
ldr(result, FieldMemOperand(t2, kValueOffset));
}
@@ -1979,18 +2014,49 @@
}
+void MacroAssembler::CompareMap(Register obj,
+ Register scratch,
+ Handle<Map> map,
+ Label* early_success,
+ CompareMapMode mode) {
+ ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
+ cmp(scratch, Operand(map));
+ if (mode == ALLOW_ELEMENT_TRANSITION_MAPS) {
+ Map* transitioned_fast_element_map(
+ map->LookupElementsTransitionMap(FAST_ELEMENTS, NULL));
+ ASSERT(transitioned_fast_element_map == NULL ||
+ map->elements_kind() != FAST_ELEMENTS);
+ if (transitioned_fast_element_map != NULL) {
+ b(eq, early_success);
+ cmp(scratch, Operand(Handle<Map>(transitioned_fast_element_map)));
+ }
+
+ Map* transitioned_double_map(
+ map->LookupElementsTransitionMap(FAST_DOUBLE_ELEMENTS, NULL));
+ ASSERT(transitioned_double_map == NULL ||
+ map->elements_kind() == FAST_SMI_ONLY_ELEMENTS);
+ if (transitioned_double_map != NULL) {
+ b(eq, early_success);
+ cmp(scratch, Operand(Handle<Map>(transitioned_double_map)));
+ }
+ }
+}
+
+
void MacroAssembler::CheckMap(Register obj,
Register scratch,
Handle<Map> map,
Label* fail,
- SmiCheckType smi_check_type) {
+ SmiCheckType smi_check_type,
+ CompareMapMode mode) {
if (smi_check_type == DO_SMI_CHECK) {
JumpIfSmi(obj, fail);
}
- ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
- mov(ip, Operand(map));
- cmp(scratch, ip);
+
+ Label success;
+ CompareMap(obj, scratch, map, &success, mode);
b(ne, fail);
+ bind(&success);
}
@@ -2333,7 +2399,7 @@
b(gt, not_int32);
// We know the exponent is smaller than 30 (biased). If it is less than
- // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie
+ // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, i.e.
// it rounds to zero.
const uint32_t zero_exponent = HeapNumber::kExponentBias + 0;
sub(scratch2, scratch2, Operand(zero_exponent - fudge_factor), SetCC);
@@ -3447,7 +3513,7 @@
tst(mask_scratch, load_scratch);
b(ne, &done);
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
// Check for impossible bit pattern.
Label ok;
// LSL may overflow, making the check conservative.
diff --git a/src/arm/macro-assembler-arm.h b/src/arm/macro-assembler-arm.h
index 2725883..60c2e6f 100644
--- a/src/arm/macro-assembler-arm.h
+++ b/src/arm/macro-assembler-arm.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -52,7 +52,7 @@
// Give alias names to registers
const Register cp = { 8 }; // JavaScript context pointer
-const Register roots = { 10 }; // Roots array pointer.
+const Register kRootRegister = { 10 }; // Roots array pointer.
// Flags used for the AllocateInNewSpace functions.
enum AllocationFlags {
@@ -166,6 +166,16 @@
Heap::RootListIndex index,
Condition cond = al);
+ void LoadHeapObject(Register dst, Handle<HeapObject> object);
+
+ void LoadObject(Register result, Handle<Object> object) {
+ if (object->IsHeapObject()) {
+ LoadHeapObject(result, Handle<HeapObject>::cast(object));
+ } else {
+ Move(result, object);
+ }
+ }
+
// ---------------------------------------------------------------------------
// GC Support
@@ -233,7 +243,7 @@
Register scratch3,
Label* object_is_white_and_not_data);
- // Detects conservatively whether an object is data-only, ie it does need to
+ // Detects conservatively whether an object is data-only, i.e. it does need to
// be scanned by the garbage collector.
void JumpIfDataObject(Register value,
Register scratch,
@@ -489,10 +499,16 @@
Register map,
Register scratch);
+ void InitializeRootRegister() {
+ ExternalReference roots_array_start =
+ ExternalReference::roots_array_start(isolate());
+ mov(kRootRegister, Operand(roots_array_start));
+ }
+
// ---------------------------------------------------------------------------
// JavaScript invokes
- // Setup call kind marking in ecx. The method takes ecx as an
+ // Set up call kind marking in ecx. The method takes ecx as an
// explicit first parameter to make the code more readable at the
// call sites.
void SetCallKind(Register dst, CallKind kind);
@@ -523,6 +539,7 @@
void InvokeFunction(Handle<JSFunction> function,
const ParameterCount& actual,
InvokeFlag flag,
+ const CallWrapper& call_wrapper,
CallKind call_kind);
void IsObjectJSObjectType(Register heap_object,
@@ -574,6 +591,7 @@
Register scratch,
Label* miss);
+ void GetNumberHash(Register t0, Register scratch);
void LoadFromNumberDictionary(Label* miss,
Register elements,
@@ -589,7 +607,7 @@
}
// Check if the given instruction is a 'type' marker.
- // ie. check if is is a mov r<type>, r<type> (referenced as nop(type))
+ // i.e. check if is is a mov r<type>, r<type> (referenced as nop(type))
// These instructions are generated to mark special location in the code,
// like some special IC code.
static inline bool IsMarkedCode(Instr instr, int type) {
@@ -780,15 +798,26 @@
Register scratch4,
Label* fail);
- // Check if the map of an object is equal to a specified map (either
- // given directly or as an index into the root list) and branch to
- // label if not. Skip the smi check if not required (object is known
- // to be a heap object)
+ // Compare an object's map with the specified map and its transitioned
+ // elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. Condition flags are
+ // set with result of map compare. If multiple map compares are required, the
+ // compare sequences branches to early_success.
+ void CompareMap(Register obj,
+ Register scratch,
+ Handle<Map> map,
+ Label* early_success,
+ CompareMapMode mode = REQUIRE_EXACT_MAP);
+
+ // Check if the map of an object is equal to a specified map and branch to
+ // label if not. Skip the smi check if not required (object is known to be a
+ // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
+ // against maps that are ElementsKind transition maps of the specified map.
void CheckMap(Register obj,
Register scratch,
Handle<Map> map,
Label* fail,
- SmiCheckType smi_check_type);
+ SmiCheckType smi_check_type,
+ CompareMapMode mode = REQUIRE_EXACT_MAP);
void CheckMap(Register obj,
@@ -880,7 +909,7 @@
// Truncates a double using a specific rounding mode.
// Clears the z flag (ne condition) if an overflow occurs.
// If exact_conversion is true, the z flag is also cleared if the conversion
- // was inexact, ie. if the double value could not be converted exactly
+ // was inexact, i.e. if the double value could not be converted exactly
// to a 32bit integer.
void EmitVFPTruncate(VFPRoundingMode rounding_mode,
SwVfpRegister result,
@@ -997,7 +1026,7 @@
// Calls an API function. Allocates HandleScope, extracts returned value
// from handle and propagates exceptions. Restores context. stack_space
- // - space to be unwound on exit (includes the call js arguments space and
+ // - space to be unwound on exit (includes the call JS arguments space and
// the additional space allocated for the fast call).
void CallApiFunctionAndReturn(ExternalReference function, int stack_space);
@@ -1220,6 +1249,7 @@
Handle<Code> code_constant,
Register code_reg,
Label* done,
+ bool* definitely_mismatches,
InvokeFlag flag,
const CallWrapper& call_wrapper,
CallKind call_kind);
diff --git a/src/arm/simulator-arm.cc b/src/arm/simulator-arm.cc
index 0525529..1ae172c 100644
--- a/src/arm/simulator-arm.cc
+++ b/src/arm/simulator-arm.cc
@@ -741,7 +741,7 @@
isolate_->set_simulator_i_cache(i_cache_);
}
Initialize(isolate);
- // Setup simulator support first. Some of this information is needed to
+ // Set up simulator support first. Some of this information is needed to
// setup the architecture state.
size_t stack_size = 1 * 1024*1024; // allocate 1MB for stack
stack_ = reinterpret_cast<char*>(malloc(stack_size));
@@ -750,7 +750,7 @@
break_pc_ = NULL;
break_instr_ = 0;
- // Setup architecture state.
+ // Set up architecture state.
// All registers are initialized to zero to start with.
for (int i = 0; i < num_registers; i++) {
registers_[i] = 0;
@@ -3324,7 +3324,7 @@
int32_t Simulator::Call(byte* entry, int argument_count, ...) {
va_list parameters;
va_start(parameters, argument_count);
- // Setup arguments
+ // Set up arguments
// First four arguments passed in registers.
ASSERT(argument_count >= 4);
@@ -3367,7 +3367,7 @@
int32_t r10_val = get_register(r10);
int32_t r11_val = get_register(r11);
- // Setup the callee-saved registers with a known value. To be able to check
+ // Set up the callee-saved registers with a known value. To be able to check
// that they are preserved properly across JS execution.
int32_t callee_saved_value = icount_;
set_register(r4, callee_saved_value);
diff --git a/src/arm/stub-cache-arm.cc b/src/arm/stub-cache-arm.cc
index d229ae6..33fbee5 100644
--- a/src/arm/stub-cache-arm.cc
+++ b/src/arm/stub-cache-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -376,13 +376,9 @@
// r0 : value
Label exit;
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver_reg, miss_label);
-
- // Check that the map of the receiver hasn't changed.
- __ ldr(scratch, FieldMemOperand(receiver_reg, HeapObject::kMapOffset));
- __ cmp(scratch, Operand(Handle<Map>(object->map())));
- __ b(ne, miss_label);
+ // Check that the map of the object hasn't changed.
+ __ CheckMap(receiver_reg, scratch, Handle<Map>(object->map()), miss_label,
+ DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
// Perform global security token check if needed.
if (object->IsJSGlobalProxy()) {
@@ -566,16 +562,16 @@
int argc) {
// ----------- S t a t e -------------
// -- sp[0] : holder (set by CheckPrototypes)
- // -- sp[4] : callee js function
+ // -- sp[4] : callee JS function
// -- sp[8] : call data
- // -- sp[12] : last js argument
+ // -- sp[12] : last JS argument
// -- ...
- // -- sp[(argc + 3) * 4] : first js argument
+ // -- sp[(argc + 3) * 4] : first JS argument
// -- sp[(argc + 4) * 4] : receiver
// -----------------------------------
// Get the function and setup the context.
Handle<JSFunction> function = optimization.constant_function();
- __ mov(r5, Operand(function));
+ __ LoadHeapObject(r5, function);
__ ldr(cp, FieldMemOperand(r5, JSFunction::kContextOffset));
// Pass the additional arguments FastHandleApiCall expects.
@@ -587,7 +583,7 @@
} else {
__ Move(r6, call_data);
}
- // Store js function and call data.
+ // Store JS function and call data.
__ stm(ib, sp, r5.bit() | r6.bit());
// r2 points to call data as expected by Arguments
@@ -742,7 +738,7 @@
? CALL_AS_FUNCTION
: CALL_AS_METHOD;
__ InvokeFunction(optimization.constant_function(), arguments_,
- JUMP_FUNCTION, call_kind);
+ JUMP_FUNCTION, NullCallWrapper(), call_kind);
}
// Deferred code for fast API call case---clean preallocated space.
@@ -1019,10 +1015,9 @@
__ ldr(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
} else {
Handle<Map> current_map(current->map());
- __ ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
- __ cmp(scratch1, Operand(current_map));
- // Branch on the result of the map check.
- __ b(ne, miss);
+ __ CheckMap(reg, scratch1, current_map, miss, DONT_DO_SMI_CHECK,
+ ALLOW_ELEMENT_TRANSITION_MAPS);
+
// Check access rights to the global object. This has to happen after
// the map check so that we know that the object is actually a global
// object.
@@ -1053,9 +1048,8 @@
LOG(masm()->isolate(), IntEvent("check-maps-depth", depth + 1));
// Check the holder map.
- __ ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
- __ cmp(scratch1, Operand(Handle<Map>(current->map())));
- __ b(ne, miss);
+ __ CheckMap(reg, scratch1, Handle<Map>(current->map()), miss,
+ DONT_DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
// Perform security check for access to the global object.
ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
@@ -1099,7 +1093,7 @@
Register scratch1,
Register scratch2,
Register scratch3,
- Handle<Object> value,
+ Handle<JSFunction> value,
Handle<String> name,
Label* miss) {
// Check that the receiver isn't a smi.
@@ -1110,7 +1104,7 @@
object, receiver, holder, scratch1, scratch2, scratch3, name, miss);
// Return the constant value.
- __ mov(r0, Operand(value));
+ __ LoadHeapObject(r0, value);
__ Ret();
}
@@ -1150,7 +1144,7 @@
__ EnterExitFrame(false, kApiStackSpace);
// Create AccessorInfo instance on the stack above the exit frame with
- // scratch2 (internal::Object **args_) as the data.
+ // scratch2 (internal::Object** args_) as the data.
__ str(scratch2, MemOperand(sp, 1 * kPointerSize));
__ add(r1, sp, Operand(1 * kPointerSize)); // r1 = AccessorInfo&
@@ -1185,7 +1179,7 @@
// and CALLBACKS, so inline only them, other cases may be added
// later.
bool compile_followup_inline = false;
- if (lookup->IsProperty() && lookup->IsCacheable()) {
+ if (lookup->IsFound() && lookup->IsCacheable()) {
if (lookup->type() == FIELD) {
compile_followup_inline = true;
} else if (lookup->type() == CALLBACKS &&
@@ -1910,7 +1904,8 @@
// Tail call the full function. We do not have to patch the receiver
// because the function makes no use of it.
__ bind(&slow);
- __ InvokeFunction(function, arguments(), JUMP_FUNCTION, CALL_AS_METHOD);
+ __ InvokeFunction(
+ function, arguments(), JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
__ bind(&miss);
// r2: function name.
@@ -1989,7 +1984,7 @@
__ vmrs(r3);
// Set custom FPCSR:
// - Set rounding mode to "Round towards Minus Infinity"
- // (ie bits [23:22] = 0b10).
+ // (i.e. bits [23:22] = 0b10).
// - Clear vfp cumulative exception flags (bits [3:0]).
// - Make sure Flush-to-zero mode control bit is unset (bit 22).
__ bic(r9, r3,
@@ -2055,7 +2050,8 @@
__ bind(&slow);
// Tail call the full function. We do not have to patch the receiver
// because the function makes no use of it.
- __ InvokeFunction(function, arguments(), JUMP_FUNCTION, CALL_AS_METHOD);
+ __ InvokeFunction(
+ function, arguments(), JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
__ bind(&miss);
// r2: function name.
@@ -2153,7 +2149,8 @@
// Tail call the full function. We do not have to patch the receiver
// because the function makes no use of it.
__ bind(&slow);
- __ InvokeFunction(function, arguments(), JUMP_FUNCTION, CALL_AS_METHOD);
+ __ InvokeFunction(
+ function, arguments(), JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
__ bind(&miss);
// r2: function name.
@@ -2331,7 +2328,8 @@
CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
? CALL_AS_FUNCTION
: CALL_AS_METHOD;
- __ InvokeFunction(function, arguments(), JUMP_FUNCTION, call_kind);
+ __ InvokeFunction(
+ function, arguments(), JUMP_FUNCTION, NullCallWrapper(), call_kind);
// Handle call cache miss.
__ bind(&miss);
@@ -2411,7 +2409,7 @@
__ str(r3, MemOperand(sp, argc * kPointerSize));
}
- // Setup the context (function already in r1).
+ // Set up the context (function already in r1).
__ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
// Jump to the cached code (tail call).
@@ -2472,13 +2470,9 @@
// -----------------------------------
Label miss;
- // Check that the object isn't a smi.
- __ JumpIfSmi(r1, &miss);
-
// Check that the map of the object hasn't changed.
- __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
- __ cmp(r3, Operand(Handle<Map>(object->map())));
- __ b(ne, &miss);
+ __ CheckMap(r1, r3, Handle<Map>(object->map()), &miss,
+ DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
// Perform global security token check if needed.
if (object->IsJSGlobalProxy()) {
@@ -2520,13 +2514,9 @@
// -----------------------------------
Label miss;
- // Check that the object isn't a smi.
- __ JumpIfSmi(r1, &miss);
-
// Check that the map of the object hasn't changed.
- __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
- __ cmp(r3, Operand(Handle<Map>(receiver->map())));
- __ b(ne, &miss);
+ __ CheckMap(r1, r3, Handle<Map>(receiver->map()), &miss,
+ DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
// Perform global security token check if needed.
if (receiver->IsJSGlobalProxy()) {
@@ -2682,7 +2672,7 @@
Handle<Code> LoadStubCompiler::CompileLoadConstant(Handle<JSObject> object,
Handle<JSObject> holder,
- Handle<Object> value,
+ Handle<JSFunction> value,
Handle<String> name) {
// ----------- S t a t e -------------
// -- r0 : receiver
@@ -2822,7 +2812,7 @@
Handle<String> name,
Handle<JSObject> receiver,
Handle<JSObject> holder,
- Handle<Object> value) {
+ Handle<JSFunction> value) {
// ----------- S t a t e -------------
// -- lr : return address
// -- r0 : key
diff --git a/src/array.js b/src/array.js
index 3d8e278..16e37c5 100644
--- a/src/array.js
+++ b/src/array.js
@@ -204,7 +204,7 @@
if (IS_NULL_OR_UNDEFINED(e)) {
return '';
} else {
- // According to ES5, seciton 15.4.4.3, the toLocaleString conversion
+ // According to ES5, section 15.4.4.3, the toLocaleString conversion
// must throw a TypeError if ToObject(e).toLocaleString isn't
// callable.
var e_obj = ToObject(e);
diff --git a/src/assembler.cc b/src/assembler.cc
index fd8c75e..0bec577 100644
--- a/src/assembler.cc
+++ b/src/assembler.cc
@@ -1,4 +1,4 @@
-// Copyright (c) 2011 Sun Microsystems Inc.
+// Copyright (c) 1994-2006 Sun Microsystems Inc.
// All Rights Reserved.
//
// Redistribution and use in source and binary forms, with or without
@@ -30,25 +30,42 @@
// The original source code covered by the above license above has been
// modified significantly by Google Inc.
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
-#include "v8.h"
+#include "assembler.h"
-#include "arguments.h"
+#include <math.h> // For cos, log, pow, sin, tan, etc.
+#include "api.h"
+#include "builtins.h"
+#include "counters.h"
+#include "cpu.h"
+#include "debug.h"
#include "deoptimizer.h"
#include "execution.h"
-#include "ic-inl.h"
-#include "incremental-marking.h"
-#include "factory.h"
-#include "runtime.h"
-#include "runtime-profiler.h"
-#include "serialize.h"
-#include "stub-cache.h"
-#include "regexp-stack.h"
-#include "ast.h"
-#include "regexp-macro-assembler.h"
+#include "ic.h"
+#include "isolate.h"
+#include "jsregexp.h"
#include "platform.h"
-#include "store-buffer.h"
+#include "regexp-macro-assembler.h"
+#include "regexp-stack.h"
+#include "runtime.h"
+#include "serialize.h"
+#include "store-buffer-inl.h"
+#include "stub-cache.h"
+#include "token.h"
+
+#if V8_TARGET_ARCH_IA32
+#include "ia32/assembler-ia32-inl.h"
+#elif V8_TARGET_ARCH_X64
+#include "x64/assembler-x64-inl.h"
+#elif V8_TARGET_ARCH_ARM
+#include "arm/assembler-arm-inl.h"
+#elif V8_TARGET_ARCH_MIPS
+#include "mips/assembler-mips-inl.h"
+#else
+#error "Unknown architecture."
+#endif
+
// Include native regexp-macro-assembler.
#ifndef V8_INTERPRETED_REGEXP
#if V8_TARGET_ARCH_IA32
@@ -1108,17 +1125,9 @@
double power_double_double(double x, double y) {
- int y_int = static_cast<int>(y);
- if (y == y_int) {
- return power_double_int(x, y_int); // Returns 1.0 for exponent 0.
- }
- if (!isinf(x)) {
- if (y == 0.5) return sqrt(x + 0.0); // -0 must be converted to +0.
- if (y == -0.5) return 1.0 / sqrt(x + 0.0);
- }
- if (isnan(y) || ((x == 1 || x == -1) && isinf(y))) {
- return OS::nan_value();
- }
+ // The checks for special cases can be dropped in ia32 because it has already
+ // been done in generated code before bailing out here.
+ if (isnan(y) || ((x == 1 || x == -1) && isinf(y))) return OS::nan_value();
return pow(x, y);
}
diff --git a/src/assembler.h b/src/assembler.h
index cec20fc..e7c92b4 100644
--- a/src/assembler.h
+++ b/src/assembler.h
@@ -30,19 +30,27 @@
// The original source code covered by the above license above has been
// modified significantly by Google Inc.
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
#ifndef V8_ASSEMBLER_H_
#define V8_ASSEMBLER_H_
+#include "v8.h"
+
#include "allocation.h"
+#include "builtins.h"
#include "gdb-jit.h"
+#include "isolate.h"
#include "runtime.h"
#include "token.h"
namespace v8 {
+
+class ApiFunction;
+
namespace internal {
+struct StatsCounter;
const unsigned kNoASTId = -1;
// -----------------------------------------------------------------------------
// Platform independent assembler base class.
@@ -271,7 +279,7 @@
INLINE(void apply(intptr_t delta));
// Is the pointer this relocation info refers to coded like a plain pointer
- // or is it strange in some way (eg relative or patched into a series of
+ // or is it strange in some way (e.g. relative or patched into a series of
// instructions).
bool IsCodedSpecially();
@@ -371,7 +379,7 @@
// routines expect to access these pointers indirectly. The following
// location provides a place for these pointers to exist natually
// when accessed via the Iterator.
- Object *reconstructed_obj_ptr_;
+ Object* reconstructed_obj_ptr_;
// External-reference pointers are also split across instruction-pairs
// in mips, but are accessed via indirect pointers. This location
// provides a place for that pointer to exist naturally. Its address
diff --git a/src/ast.cc b/src/ast.cc
index 13e5589..811193b 100644
--- a/src/ast.cc
+++ b/src/ast.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -25,10 +25,15 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#include "v8.h"
-
#include "ast.h"
+
+#include <math.h> // For isfinite.
+#include "builtins.h"
+#include "conversions.h"
+#include "hashmap.h"
#include "parser.h"
+#include "property-details.h"
+#include "property.h"
#include "scopes.h"
#include "string-stream.h"
#include "type-info.h"
@@ -70,6 +75,7 @@
var_(NULL), // Will be set by the call to BindTo.
is_this_(var->is_this()),
is_trivial_(false),
+ is_lvalue_(false),
position_(RelocInfo::kNoPosition) {
BindTo(var);
}
@@ -84,6 +90,7 @@
var_(NULL),
is_this_(is_this),
is_trivial_(false),
+ is_lvalue_(false),
position_(position) {
// Names must be canonicalized for fast equality checks.
ASSERT(name->IsSymbol());
@@ -722,17 +729,11 @@
}
-static bool CanCallWithoutIC(Handle<JSFunction> target, int arity) {
- SharedFunctionInfo* info = target->shared();
- // If the number of formal parameters of the target function does
- // not match the number of arguments we're passing, we don't want to
- // deal with it. Otherwise, we can call it directly.
- return !target->NeedsArgumentsAdaption() ||
- info->formal_parameter_count() == arity;
-}
-
-
bool Call::ComputeTarget(Handle<Map> type, Handle<String> name) {
+ // If there is an interceptor, we can't compute the target for
+ // a direct call.
+ if (type->has_named_interceptor()) return false;
+
if (check_type_ == RECEIVER_MAP_CHECK) {
// For primitive checks the holder is set up to point to the
// corresponding prototype object, i.e. one step of the algorithm
@@ -746,12 +747,13 @@
type->LookupInDescriptors(NULL, *name, &lookup);
// If the function wasn't found directly in the map, we start
// looking upwards through the prototype chain.
- if (!lookup.IsFound() && type->prototype()->IsJSObject()) {
+ if ((!lookup.IsFound() || IsTransitionType(lookup.type()))
+ && type->prototype()->IsJSObject()) {
holder_ = Handle<JSObject>(JSObject::cast(type->prototype()));
type = Handle<Map>(holder()->map());
- } else if (lookup.IsProperty() && lookup.type() == CONSTANT_FUNCTION) {
+ } else if (lookup.IsFound() && lookup.type() == CONSTANT_FUNCTION) {
target_ = Handle<JSFunction>(lookup.GetConstantFunctionFromMap(*type));
- return CanCallWithoutIC(target_, arguments()->length());
+ return true;
} else {
return false;
}
@@ -763,7 +765,7 @@
LookupResult* lookup) {
target_ = Handle<JSFunction>::null();
cell_ = Handle<JSGlobalPropertyCell>::null();
- ASSERT(lookup->IsProperty() &&
+ ASSERT(lookup->IsFound() &&
lookup->type() == NORMAL &&
lookup->holder() == *global);
cell_ = Handle<JSGlobalPropertyCell>(global->GetPropertyCell(lookup));
@@ -771,8 +773,7 @@
Handle<JSFunction> candidate(JSFunction::cast(cell_->value()));
// If the function is in new space we assume it's more likely to
// change and thus prefer the general IC code.
- if (!HEAP->InNewSpace(*candidate) &&
- CanCallWithoutIC(candidate, arguments()->length())) {
+ if (!HEAP->InNewSpace(*candidate)) {
target_ = candidate;
return true;
}
diff --git a/src/ast.h b/src/ast.h
index 805526a..34fadab 100644
--- a/src/ast.h
+++ b/src/ast.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -28,14 +28,19 @@
#ifndef V8_AST_H_
#define V8_AST_H_
-#include "allocation.h"
-#include "execution.h"
+#include "v8.h"
+
+#include "assembler.h"
#include "factory.h"
+#include "isolate.h"
#include "jsregexp.h"
+#include "list-inl.h"
#include "runtime.h"
#include "small-pointer-list.h"
+#include "smart-array-pointer.h"
#include "token.h"
#include "variables.h"
+#include "zone-inl.h"
namespace v8 {
namespace internal {
@@ -98,12 +103,28 @@
EXPRESSION_NODE_LIST(V)
// Forward declarations
-class BitVector;
-class DefinitionInfo;
+class AstVisitor;
+class BreakableStatement;
+class Expression;
+class IterationStatement;
class MaterializedLiteral;
+class Statement;
class TargetCollector;
class TypeFeedbackOracle;
+class RegExpAlternative;
+class RegExpAssertion;
+class RegExpAtom;
+class RegExpBackReference;
+class RegExpCapture;
+class RegExpCharacterClass;
+class RegExpCompiler;
+class RegExpDisjunction;
+class RegExpEmpty;
+class RegExpLookahead;
+class RegExpQuantifier;
+class RegExpText;
+
#define DEF_FORWARD_DECLARATION(type) class type;
AST_NODE_LIST(DEF_FORWARD_DECLARATION)
#undef DEF_FORWARD_DECLARATION
@@ -115,11 +136,6 @@
typedef ZoneList<Handle<Object> > ZoneObjectList;
-#define DECLARE_NODE_TYPE(type) \
- virtual void Accept(AstVisitor* v); \
- virtual AstNode::Type node_type() const { return AstNode::k##type; } \
-
-
class AstNode: public ZoneObject {
public:
#define DECLARE_TYPE_ENUM(type) k##type,
@@ -190,6 +206,11 @@
};
+#define DECLARE_NODE_TYPE(type) \
+ virtual void Accept(AstVisitor* v); \
+ virtual AstNode::Type node_type() const { return AstNode::k##type; } \
+
+
class Statement: public AstNode {
public:
Statement() : statement_pos_(RelocInfo::kNoPosition) {}
@@ -1159,12 +1180,17 @@
bool IsArguments() { return var_ != NULL && var_->is_arguments(); }
+ bool IsLValue() {
+ return is_lvalue_;
+ }
+
Handle<String> name() const { return name_; }
Variable* var() const { return var_; }
bool is_this() const { return is_this_; }
int position() const { return position_; }
void MarkAsTrivial() { is_trivial_ = true; }
+ void MarkAsLValue() { is_lvalue_ = true; }
// Bind this proxy to the variable var.
void BindTo(Variable* var);
@@ -1174,6 +1200,9 @@
Variable* var_; // resolved variable, or NULL
bool is_this_;
bool is_trivial_;
+ // True if this variable proxy is being used in an assignment
+ // or with a increment/decrement operator.
+ bool is_lvalue_;
int position_;
};
diff --git a/src/atomicops_internals_x86_macosx.h b/src/atomicops_internals_x86_macosx.h
index 2bac006..bfb02b3 100644
--- a/src/atomicops_internals_x86_macosx.h
+++ b/src/atomicops_internals_x86_macosx.h
@@ -35,7 +35,7 @@
namespace v8 {
namespace internal {
-inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32 *ptr,
+inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 prev_value;
@@ -49,7 +49,7 @@
return prev_value;
}
-inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32 *ptr,
+inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
Atomic32 old_value;
do {
@@ -59,12 +59,12 @@
return old_value;
}
-inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32 *ptr,
+inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
return OSAtomicAdd32(increment, const_cast<Atomic32*>(ptr));
}
-inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32 *ptr,
+inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
return OSAtomicAdd32Barrier(increment, const_cast<Atomic32*>(ptr));
}
@@ -73,7 +73,7 @@
OSMemoryBarrier();
}
-inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32 *ptr,
+inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 prev_value;
@@ -87,7 +87,7 @@
return prev_value;
}
-inline Atomic32 Release_CompareAndSwap(volatile Atomic32 *ptr,
+inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
return Acquire_CompareAndSwap(ptr, old_value, new_value);
@@ -97,12 +97,12 @@
*ptr = value;
}
-inline void Acquire_Store(volatile Atomic32 *ptr, Atomic32 value) {
+inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
MemoryBarrier();
}
-inline void Release_Store(volatile Atomic32 *ptr, Atomic32 value) {
+inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
MemoryBarrier();
*ptr = value;
}
@@ -111,13 +111,13 @@
return *ptr;
}
-inline Atomic32 Acquire_Load(volatile const Atomic32 *ptr) {
+inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
Atomic32 value = *ptr;
MemoryBarrier();
return value;
}
-inline Atomic32 Release_Load(volatile const Atomic32 *ptr) {
+inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
MemoryBarrier();
return *ptr;
}
@@ -126,7 +126,7 @@
// 64-bit implementation on 64-bit platform
-inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64 *ptr,
+inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
Atomic64 prev_value;
@@ -140,7 +140,7 @@
return prev_value;
}
-inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64 *ptr,
+inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
Atomic64 old_value;
do {
@@ -150,17 +150,17 @@
return old_value;
}
-inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64 *ptr,
+inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
Atomic64 increment) {
return OSAtomicAdd64(increment, const_cast<Atomic64*>(ptr));
}
-inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64 *ptr,
+inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
Atomic64 increment) {
return OSAtomicAdd64Barrier(increment, const_cast<Atomic64*>(ptr));
}
-inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64 *ptr,
+inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
Atomic64 prev_value;
@@ -174,7 +174,7 @@
return prev_value;
}
-inline Atomic64 Release_CompareAndSwap(volatile Atomic64 *ptr,
+inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
// The lib kern interface does not distinguish between
@@ -186,12 +186,12 @@
*ptr = value;
}
-inline void Acquire_Store(volatile Atomic64 *ptr, Atomic64 value) {
+inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
*ptr = value;
MemoryBarrier();
}
-inline void Release_Store(volatile Atomic64 *ptr, Atomic64 value) {
+inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
MemoryBarrier();
*ptr = value;
}
@@ -200,13 +200,13 @@
return *ptr;
}
-inline Atomic64 Acquire_Load(volatile const Atomic64 *ptr) {
+inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
Atomic64 value = *ptr;
MemoryBarrier();
return value;
}
-inline Atomic64 Release_Load(volatile const Atomic64 *ptr) {
+inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
MemoryBarrier();
return *ptr;
}
@@ -264,7 +264,7 @@
old_value, new_value);
}
-inline void NoBarrier_Store(volatile AtomicWord *ptr, AtomicWord value) {
+inline void NoBarrier_Store(volatile AtomicWord* ptr, AtomicWord value) {
NoBarrier_Store(
reinterpret_cast<volatile AtomicWordCastType*>(ptr), value);
}
@@ -279,7 +279,7 @@
reinterpret_cast<volatile AtomicWordCastType*>(ptr), value);
}
-inline AtomicWord NoBarrier_Load(volatile const AtomicWord *ptr) {
+inline AtomicWord NoBarrier_Load(volatile const AtomicWord* ptr) {
return NoBarrier_Load(
reinterpret_cast<volatile const AtomicWordCastType*>(ptr));
}
diff --git a/src/bignum-dtoa.h b/src/bignum-dtoa.h
index ea1acbb..93ec1f7 100644
--- a/src/bignum-dtoa.h
+++ b/src/bignum-dtoa.h
@@ -44,7 +44,7 @@
BIGNUM_DTOA_PRECISION
};
-// Converts the given double 'v' to ascii.
+// Converts the given double 'v' to ASCII.
// The result should be interpreted as buffer * 10^(point-length).
// The buffer will be null-terminated.
//
diff --git a/src/bootstrapper.cc b/src/bootstrapper.cc
index 29c16ee..cedb0ef 100644
--- a/src/bootstrapper.cc
+++ b/src/bootstrapper.cc
@@ -264,13 +264,13 @@
Handle<Map> CreateStrictModeFunctionMap(
PrototypePropertyMode prototype_mode,
Handle<JSFunction> empty_function,
- Handle<FixedArray> arguments_callbacks,
- Handle<FixedArray> caller_callbacks);
+ Handle<AccessorPair> arguments_callbacks,
+ Handle<AccessorPair> caller_callbacks);
Handle<DescriptorArray> ComputeStrictFunctionInstanceDescriptor(
PrototypePropertyMode propertyMode,
- Handle<FixedArray> arguments,
- Handle<FixedArray> caller);
+ Handle<AccessorPair> arguments,
+ Handle<AccessorPair> caller);
static bool CompileBuiltin(Isolate* isolate, int index);
static bool CompileExperimentalBuiltin(Isolate* isolate, int index);
@@ -299,7 +299,7 @@
void Bootstrapper::Iterate(ObjectVisitor* v) {
extensions_cache_.Iterate(v);
- v->Synchronize("Extensions");
+ v->Synchronize(VisitorSynchronization::kExtensions);
}
@@ -378,7 +378,9 @@
} else {
attributes = DONT_ENUM;
}
- SetLocalPropertyNoThrow(target, symbol, function, attributes);
+ CHECK_NOT_EMPTY_HANDLE(isolate,
+ JSObject::SetLocalPropertyIgnoreAttributes(
+ target, symbol, function, attributes));
if (is_ecma_native) {
function->shared()->set_instance_class_name(*symbol);
}
@@ -538,8 +540,8 @@
Handle<DescriptorArray> Genesis::ComputeStrictFunctionInstanceDescriptor(
PrototypePropertyMode prototypeMode,
- Handle<FixedArray> arguments,
- Handle<FixedArray> caller) {
+ Handle<AccessorPair> arguments,
+ Handle<AccessorPair> caller) {
Handle<DescriptorArray> descriptors =
factory()->NewDescriptorArray(prototypeMode == DONT_ADD_PROTOTYPE
? 4
@@ -600,7 +602,7 @@
throw_type_error_function->shared()->set_code(*code);
throw_type_error_function->shared()->DontAdaptArguments();
- PreventExtensions(throw_type_error_function);
+ JSObject::PreventExtensions(throw_type_error_function);
}
return throw_type_error_function;
}
@@ -609,8 +611,8 @@
Handle<Map> Genesis::CreateStrictModeFunctionMap(
PrototypePropertyMode prototype_mode,
Handle<JSFunction> empty_function,
- Handle<FixedArray> arguments_callbacks,
- Handle<FixedArray> caller_callbacks) {
+ Handle<AccessorPair> arguments_callbacks,
+ Handle<AccessorPair> caller_callbacks) {
Handle<Map> map = factory()->NewMap(JS_FUNCTION_TYPE, JSFunction::kSize);
Handle<DescriptorArray> descriptors =
ComputeStrictFunctionInstanceDescriptor(prototype_mode,
@@ -627,8 +629,8 @@
// Create the callbacks arrays for ThrowTypeError functions.
// The get/set callacks are filled in after the maps are created below.
Factory* factory = empty->GetIsolate()->factory();
- Handle<FixedArray> arguments = factory->NewFixedArray(2, TENURED);
- Handle<FixedArray> caller = factory->NewFixedArray(2, TENURED);
+ Handle<AccessorPair> arguments(factory->NewAccessorPair());
+ Handle<AccessorPair> caller(factory->NewAccessorPair());
// Allocate map for the strict mode function instances.
Handle<Map> strict_mode_function_instance_map =
@@ -663,11 +665,11 @@
Handle<JSFunction> throw_function =
GetThrowTypeErrorFunction();
- // Complete the callback fixed arrays.
- arguments->set(0, *throw_function);
- arguments->set(1, *throw_function);
- caller->set(0, *throw_function);
- caller->set(1, *throw_function);
+ // Complete the callbacks.
+ arguments->set_getter(*throw_function);
+ arguments->set_setter(*throw_function);
+ caller->set_getter(*throw_function);
+ caller->set_setter(*throw_function);
}
@@ -753,11 +755,10 @@
Handle<JSObject> prototype =
Handle<JSObject>(
JSObject::cast(js_global_function->instance_prototype()));
- SetLocalPropertyNoThrow(
- prototype,
- factory()->constructor_symbol(),
- isolate()->object_function(),
- NONE);
+ CHECK_NOT_EMPTY_HANDLE(isolate(),
+ JSObject::SetLocalPropertyIgnoreAttributes(
+ prototype, factory()->constructor_symbol(),
+ isolate()->object_function(), NONE));
} else {
Handle<FunctionTemplateInfo> js_global_constructor(
FunctionTemplateInfo::cast(js_global_template->constructor()));
@@ -834,7 +835,7 @@
factory()->LookupAsciiSymbol("global"),
inner_global,
attributes);
- // Setup the reference from the global object to the builtins object.
+ // Set up the reference from the global object to the builtins object.
JSGlobalObject::cast(*inner_global)->set_builtins(*builtins_global);
TransferNamedProperties(inner_global_from_snapshot, inner_global);
TransferIndexedProperties(inner_global_from_snapshot, inner_global);
@@ -863,8 +864,10 @@
Heap* heap = isolate->heap();
Handle<String> object_name = Handle<String>(heap->Object_symbol());
- SetLocalPropertyNoThrow(inner_global, object_name,
- isolate->object_function(), DONT_ENUM);
+ CHECK_NOT_EMPTY_HANDLE(isolate,
+ JSObject::SetLocalPropertyIgnoreAttributes(
+ inner_global, object_name,
+ isolate->object_function(), DONT_ENUM));
Handle<JSObject> global = Handle<JSObject>(global_context()->global());
@@ -1046,14 +1049,15 @@
{ // -- J S O N
Handle<String> name = factory->NewStringFromAscii(CStrVector("JSON"));
- Handle<JSFunction> cons = factory->NewFunction(
- name,
- factory->the_hole_value());
+ Handle<JSFunction> cons = factory->NewFunction(name,
+ factory->the_hole_value());
cons->SetInstancePrototype(global_context()->initial_object_prototype());
cons->SetInstanceClassName(*name);
Handle<JSObject> json_object = factory->NewJSObject(cons, TENURED);
ASSERT(json_object->IsJSObject());
- SetLocalPropertyNoThrow(global, name, json_object, DONT_ENUM);
+ CHECK_NOT_EMPTY_HANDLE(isolate,
+ JSObject::SetLocalPropertyIgnoreAttributes(
+ global, name, json_object, DONT_ENUM));
global_context()->set_json_object(*json_object);
}
@@ -1083,21 +1087,23 @@
global_context()->set_arguments_boilerplate(*result);
// Note: length must be added as the first property and
// callee must be added as the second property.
- SetLocalPropertyNoThrow(result, factory->length_symbol(),
- factory->undefined_value(),
- DONT_ENUM);
- SetLocalPropertyNoThrow(result, factory->callee_symbol(),
- factory->undefined_value(),
- DONT_ENUM);
+ CHECK_NOT_EMPTY_HANDLE(isolate,
+ JSObject::SetLocalPropertyIgnoreAttributes(
+ result, factory->length_symbol(),
+ factory->undefined_value(), DONT_ENUM));
+ CHECK_NOT_EMPTY_HANDLE(isolate,
+ JSObject::SetLocalPropertyIgnoreAttributes(
+ result, factory->callee_symbol(),
+ factory->undefined_value(), DONT_ENUM));
#ifdef DEBUG
LookupResult lookup(isolate);
result->LocalLookup(heap->callee_symbol(), &lookup);
- ASSERT(lookup.IsProperty() && (lookup.type() == FIELD));
+ ASSERT(lookup.IsFound() && (lookup.type() == FIELD));
ASSERT(lookup.GetFieldIndex() == Heap::kArgumentsCalleeIndex);
result->LocalLookup(heap->length_symbol(), &lookup);
- ASSERT(lookup.IsProperty() && (lookup.type() == FIELD));
+ ASSERT(lookup.IsFound() && (lookup.type() == FIELD));
ASSERT(lookup.GetFieldIndex() == Heap::kArgumentsLengthIndex);
ASSERT(result->map()->inobject_properties() > Heap::kArgumentsCalleeIndex);
@@ -1136,17 +1142,17 @@
static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
// Create the ThrowTypeError functions.
- Handle<FixedArray> callee = factory->NewFixedArray(2, TENURED);
- Handle<FixedArray> caller = factory->NewFixedArray(2, TENURED);
+ Handle<AccessorPair> callee = factory->NewAccessorPair();
+ Handle<AccessorPair> caller = factory->NewAccessorPair();
Handle<JSFunction> throw_function =
GetThrowTypeErrorFunction();
// Install the ThrowTypeError functions.
- callee->set(0, *throw_function);
- callee->set(1, *throw_function);
- caller->set(0, *throw_function);
- caller->set(1, *throw_function);
+ callee->set_getter(*throw_function);
+ callee->set_setter(*throw_function);
+ caller->set_getter(*throw_function);
+ caller->set_setter(*throw_function);
// Create the descriptor array for the arguments object.
Handle<DescriptorArray> descriptors = factory->NewDescriptorArray(3);
@@ -1183,14 +1189,15 @@
global_context()->set_strict_mode_arguments_boilerplate(*result);
// Add length property only for strict mode boilerplate.
- SetLocalPropertyNoThrow(result, factory->length_symbol(),
- factory->undefined_value(),
- DONT_ENUM);
+ CHECK_NOT_EMPTY_HANDLE(isolate,
+ JSObject::SetLocalPropertyIgnoreAttributes(
+ result, factory->length_symbol(),
+ factory->undefined_value(), DONT_ENUM));
#ifdef DEBUG
LookupResult lookup(isolate);
result->LocalLookup(heap->length_symbol(), &lookup);
- ASSERT(lookup.IsProperty() && (lookup.type() == FIELD));
+ ASSERT(lookup.IsFound() && (lookup.type() == FIELD));
ASSERT(lookup.GetFieldIndex() == Heap::kArgumentsLengthIndex);
ASSERT(result->map()->inobject_properties() > Heap::kArgumentsLengthIndex);
@@ -1353,7 +1360,7 @@
if (cache != NULL) cache->Add(name, function_info);
}
- // Setup the function context. Conceptually, we should clone the
+ // Set up the function context. Conceptually, we should clone the
// function before overwriting the context but since we're in a
// single-threaded environment it is not strictly necessary.
ASSERT(top_context->IsGlobalContext());
@@ -1440,7 +1447,7 @@
builtins->set_global_context(*global_context());
builtins->set_global_receiver(*builtins);
- // Setup the 'global' properties of the builtins object. The
+ // Set up the 'global' properties of the builtins object. The
// 'global' property that refers to the global object is the only
// way to get from code running in the builtins context to the
// global object.
@@ -1448,9 +1455,11 @@
static_cast<PropertyAttributes>(READ_ONLY | DONT_DELETE);
Handle<String> global_symbol = factory()->LookupAsciiSymbol("global");
Handle<Object> global_obj(global_context()->global());
- SetLocalPropertyNoThrow(builtins, global_symbol, global_obj, attributes);
+ CHECK_NOT_EMPTY_HANDLE(isolate(),
+ JSObject::SetLocalPropertyIgnoreAttributes(
+ builtins, global_symbol, global_obj, attributes));
- // Setup the reference from the global object to the builtins object.
+ // Set up the reference from the global object to the builtins object.
JSGlobalObject::cast(global_context()->global())->set_builtins(*builtins);
// Create a bridge function that has context in the global context.
@@ -1613,16 +1622,13 @@
// doesn't inherit from Object.prototype.
// To be used only for internal work by builtins. Instances
// must not be leaked to user code.
- // Only works correctly when called as a constructor. The normal
- // Array code uses Array.prototype as prototype when called as
- // a function.
Handle<JSFunction> array_function =
InstallFunction(builtins,
"InternalArray",
JS_ARRAY_TYPE,
JSArray::kSize,
isolate()->initial_object_prototype(),
- Builtins::kArrayCode,
+ Builtins::kInternalArrayCode,
true);
Handle<JSObject> prototype =
factory()->NewJSObject(isolate()->object_function(), TENURED);
@@ -1654,6 +1660,8 @@
array_function->initial_map()->set_instance_descriptors(
*array_descriptors);
+
+ global_context()->set_internal_array_function(*array_function);
}
if (FLAG_disable_native_files) {
@@ -1675,7 +1683,7 @@
InstallNativeFunctions();
// Store the map for the string prototype after the natives has been compiled
- // and the String function has been setup.
+ // and the String function has been set up.
Handle<JSFunction> string_function(global_context()->string_function());
ASSERT(JSObject::cast(
string_function->initial_map()->prototype())->HasFastProperties());
@@ -1912,25 +1920,28 @@
void Genesis::InstallSpecialObjects(Handle<Context> global_context) {
- Factory* factory = global_context->GetIsolate()->factory();
+ Isolate* isolate = global_context->GetIsolate();
+ Factory* factory = isolate->factory();
HandleScope scope;
- Handle<JSGlobalObject> js_global(
- JSGlobalObject::cast(global_context->global()));
+ Handle<JSGlobalObject> global(JSGlobalObject::cast(global_context->global()));
// Expose the natives in global if a name for it is specified.
if (FLAG_expose_natives_as != NULL && strlen(FLAG_expose_natives_as) != 0) {
- Handle<String> natives_string =
- factory->LookupAsciiSymbol(FLAG_expose_natives_as);
- SetLocalPropertyNoThrow(js_global, natives_string,
- Handle<JSObject>(js_global->builtins()), DONT_ENUM);
+ Handle<String> natives = factory->LookupAsciiSymbol(FLAG_expose_natives_as);
+ CHECK_NOT_EMPTY_HANDLE(isolate,
+ JSObject::SetLocalPropertyIgnoreAttributes(
+ global, natives,
+ Handle<JSObject>(global->builtins()),
+ DONT_ENUM));
}
- Handle<Object> Error = GetProperty(js_global, "Error");
+ Handle<Object> Error = GetProperty(global, "Error");
if (Error->IsJSObject()) {
Handle<String> name = factory->LookupAsciiSymbol("stackTraceLimit");
- SetLocalPropertyNoThrow(Handle<JSObject>::cast(Error),
- name,
- Handle<Smi>(Smi::FromInt(FLAG_stack_trace_limit)),
- NONE);
+ Handle<Smi> stack_trace_limit(Smi::FromInt(FLAG_stack_trace_limit));
+ CHECK_NOT_EMPTY_HANDLE(isolate,
+ JSObject::SetLocalPropertyIgnoreAttributes(
+ Handle<JSObject>::cast(Error), name,
+ stack_trace_limit, NONE));
}
#ifdef ENABLE_DEBUGGER_SUPPORT
@@ -1949,7 +1960,9 @@
Handle<String> debug_string =
factory->LookupAsciiSymbol(FLAG_expose_debug_as);
Handle<Object> global_proxy(debug->debug_context()->global_proxy());
- SetLocalPropertyNoThrow(js_global, debug_string, global_proxy, DONT_ENUM);
+ CHECK_NOT_EMPTY_HANDLE(isolate,
+ JSObject::SetLocalPropertyIgnoreAttributes(
+ global, debug_string, global_proxy, DONT_ENUM));
}
#endif
}
@@ -2165,7 +2178,9 @@
Handle<String> key = Handle<String>(descs->GetKey(i));
int index = descs->GetFieldIndex(i);
Handle<Object> value = Handle<Object>(from->FastPropertyAt(index));
- SetLocalPropertyNoThrow(to, key, value, details.attributes());
+ CHECK_NOT_EMPTY_HANDLE(to->GetIsolate(),
+ JSObject::SetLocalPropertyIgnoreAttributes(
+ to, key, value, details.attributes()));
break;
}
case CONSTANT_FUNCTION: {
@@ -2173,7 +2188,9 @@
Handle<String> key = Handle<String>(descs->GetKey(i));
Handle<JSFunction> fun =
Handle<JSFunction>(descs->GetConstantFunction(i));
- SetLocalPropertyNoThrow(to, key, fun, details.attributes());
+ CHECK_NOT_EMPTY_HANDLE(to->GetIsolate(),
+ JSObject::SetLocalPropertyIgnoreAttributes(
+ to, key, fun, details.attributes()));
break;
}
case CALLBACKS: {
@@ -2188,7 +2205,7 @@
Handle<Object> callbacks(descs->GetCallbacksObject(i));
PropertyDetails d =
PropertyDetails(details.attributes(), CALLBACKS, details.index());
- SetNormalizedProperty(to, key, callbacks, d);
+ JSObject::SetNormalizedProperty(to, key, callbacks, d);
break;
}
case MAP_TRANSITION:
@@ -2225,7 +2242,9 @@
value = Handle<Object>(JSGlobalPropertyCell::cast(*value)->value());
}
PropertyDetails details = properties->DetailsAt(i);
- SetLocalPropertyNoThrow(to, key, value, details.attributes());
+ CHECK_NOT_EMPTY_HANDLE(to->GetIsolate(),
+ JSObject::SetLocalPropertyIgnoreAttributes(
+ to, key, value, details.attributes()));
}
}
}
diff --git a/src/bootstrapper.h b/src/bootstrapper.h
index abf61b9..101c2e1 100644
--- a/src/bootstrapper.h
+++ b/src/bootstrapper.h
@@ -88,7 +88,7 @@
// context.
class Bootstrapper {
public:
- // Requires: Heap::Setup has been called.
+ // Requires: Heap::SetUp has been called.
void Initialize(bool create_heap_objects);
void TearDown();
diff --git a/src/builtins.cc b/src/builtins.cc
index 43cf358..90a8d3e 100644
--- a/src/builtins.cc
+++ b/src/builtins.cc
@@ -184,17 +184,17 @@
}
-BUILTIN(ArrayCodeGeneric) {
+static MaybeObject* ArrayCodeGenericCommon(Arguments* args,
+ Isolate* isolate,
+ JSFunction* constructor) {
Heap* heap = isolate->heap();
isolate->counters()->array_function_runtime()->Increment();
JSArray* array;
if (CalledAsConstructor(isolate)) {
- array = JSArray::cast(*args.receiver());
+ array = JSArray::cast((*args)[0]);
} else {
// Allocate the JS Array
- JSFunction* constructor =
- isolate->context()->global_context()->array_function();
Object* obj;
{ MaybeObject* maybe_obj = heap->AllocateJSObject(constructor);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
@@ -202,13 +202,10 @@
array = JSArray::cast(obj);
}
- // 'array' now contains the JSArray we should initialize.
- ASSERT(array->HasFastTypeElements());
-
// Optimize the case where there is one argument and the argument is a
// small smi.
- if (args.length() == 2) {
- Object* obj = args[1];
+ if (args->length() == 2) {
+ Object* obj = (*args)[1];
if (obj->IsSmi()) {
int len = Smi::cast(obj)->value();
if (len >= 0 && len < JSObject::kInitialMaxFastElementArray) {
@@ -225,42 +222,85 @@
{ MaybeObject* maybe_obj = array->Initialize(0);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
- return array->SetElementsLength(args[1]);
+ return array->SetElementsLength((*args)[1]);
}
// Optimize the case where there are no parameters passed.
- if (args.length() == 1) {
+ if (args->length() == 1) {
return array->Initialize(JSArray::kPreallocatedArrayElements);
}
- // Take the arguments as elements.
- int number_of_elements = args.length() - 1;
- Smi* len = Smi::FromInt(number_of_elements);
- Object* obj;
- { MaybeObject* maybe_obj = heap->AllocateFixedArrayWithHoles(len->value());
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
-
// Set length and elements on the array.
+ int number_of_elements = args->length() - 1;
MaybeObject* maybe_object =
- array->EnsureCanContainElements(FixedArray::cast(obj));
+ array->EnsureCanContainElements(args, 1, number_of_elements,
+ ALLOW_CONVERTED_DOUBLE_ELEMENTS);
if (maybe_object->IsFailure()) return maybe_object;
- AssertNoAllocation no_gc;
- FixedArray* elms = FixedArray::cast(obj);
- WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
+ // Allocate an appropriately typed elements array.
+ MaybeObject* maybe_elms;
+ ElementsKind elements_kind = array->GetElementsKind();
+ if (elements_kind == FAST_DOUBLE_ELEMENTS) {
+ maybe_elms = heap->AllocateUninitializedFixedDoubleArray(
+ number_of_elements);
+ } else {
+ maybe_elms = heap->AllocateFixedArrayWithHoles(number_of_elements);
+ }
+ FixedArrayBase* elms;
+ if (!maybe_elms->To<FixedArrayBase>(&elms)) return maybe_elms;
+
// Fill in the content
- for (int index = 0; index < number_of_elements; index++) {
- elms->set(index, args[index+1], mode);
+ switch (array->GetElementsKind()) {
+ case FAST_SMI_ONLY_ELEMENTS: {
+ FixedArray* smi_elms = FixedArray::cast(elms);
+ for (int index = 0; index < number_of_elements; index++) {
+ smi_elms->set(index, (*args)[index+1], SKIP_WRITE_BARRIER);
+ }
+ break;
+ }
+ case FAST_ELEMENTS: {
+ AssertNoAllocation no_gc;
+ WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
+ FixedArray* object_elms = FixedArray::cast(elms);
+ for (int index = 0; index < number_of_elements; index++) {
+ object_elms->set(index, (*args)[index+1], mode);
+ }
+ break;
+ }
+ case FAST_DOUBLE_ELEMENTS: {
+ FixedDoubleArray* double_elms = FixedDoubleArray::cast(elms);
+ for (int index = 0; index < number_of_elements; index++) {
+ double_elms->set(index, (*args)[index+1]->Number());
+ }
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
}
- array->set_elements(FixedArray::cast(obj));
- array->set_length(len);
-
+ array->set_elements(elms);
+ array->set_length(Smi::FromInt(number_of_elements));
return array;
}
+BUILTIN(InternalArrayCodeGeneric) {
+ return ArrayCodeGenericCommon(
+ &args,
+ isolate,
+ isolate->context()->global_context()->internal_array_function());
+}
+
+
+BUILTIN(ArrayCodeGeneric) {
+ return ArrayCodeGenericCommon(
+ &args,
+ isolate,
+ isolate->context()->global_context()->array_function());
+}
+
+
MUST_USE_RESULT static MaybeObject* AllocateJSArray(Heap* heap) {
JSFunction* array_function =
heap->isolate()->context()->global_context()->array_function();
@@ -371,7 +411,7 @@
int size_delta = to_trim * kPointerSize;
if (heap->marking()->TransferMark(elms->address(),
elms->address() + size_delta)) {
- MemoryChunk::IncrementLiveBytes(elms->address(), -size_delta);
+ MemoryChunk::IncrementLiveBytesFromMutator(elms->address(), -size_delta);
}
return FixedArray::cast(HeapObject::FromAddress(
@@ -424,7 +464,8 @@
MaybeObject* maybe_array = array->EnsureCanContainElements(
args,
first_added_arg,
- args_length - first_added_arg);
+ args_length - first_added_arg,
+ DONT_ALLOW_DOUBLE_ELEMENTS);
if (maybe_array->IsFailure()) return maybe_array;
return array->elements();
}
@@ -627,7 +668,8 @@
ASSERT(to_add <= (Smi::kMaxValue - len));
MaybeObject* maybe_object =
- array->EnsureCanContainElements(&args, 1, to_add);
+ array->EnsureCanContainElements(&args, 1, to_add,
+ DONT_ALLOW_DOUBLE_ELEMENTS);
if (maybe_object->IsFailure()) return maybe_object;
if (new_length > elms->length()) {
@@ -758,7 +800,8 @@
FixedArray* result_elms = FixedArray::cast(result);
MaybeObject* maybe_object =
- result_array->EnsureCanContainElements(result_elms);
+ result_array->EnsureCanContainElements(result_elms,
+ DONT_ALLOW_DOUBLE_ELEMENTS);
if (maybe_object->IsFailure()) return maybe_object;
AssertNoAllocation no_gc;
@@ -1022,7 +1065,7 @@
for (int i = 0; i < n_arguments; i++) {
JSArray* array = JSArray::cast(args[i]);
if (!array->HasFastSmiOnlyElements()) {
- result_array->EnsureCanContainNonSmiElements();
+ result_array->EnsureCanContainHeapObjectElements();
break;
}
}
@@ -1676,7 +1719,7 @@
#undef DEF_FUNCTION_PTR_A
}
-void Builtins::Setup(bool create_heap_objects) {
+void Builtins::SetUp(bool create_heap_objects) {
ASSERT(!initialized_);
Isolate* isolate = Isolate::Current();
Heap* heap = isolate->heap();
diff --git a/src/builtins.h b/src/builtins.h
index 3659f99..f20d97d 100644
--- a/src/builtins.h
+++ b/src/builtins.h
@@ -44,6 +44,7 @@
\
V(EmptyFunction, NO_EXTRA_ARGUMENTS) \
\
+ V(InternalArrayCodeGeneric, NO_EXTRA_ARGUMENTS) \
V(ArrayCodeGeneric, NO_EXTRA_ARGUMENTS) \
\
V(ArrayPush, NO_EXTRA_ARGUMENTS) \
@@ -178,6 +179,8 @@
V(FunctionApply, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
\
+ V(InternalArrayCode, BUILTIN, UNINITIALIZED, \
+ Code::kNoExtraICState) \
V(ArrayCode, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(ArrayConstructCode, BUILTIN, UNINITIALIZED, \
@@ -262,7 +265,7 @@
// Generate all builtin code objects. Should be called once during
// isolate initialization.
- void Setup(bool create_heap_objects);
+ void SetUp(bool create_heap_objects);
void TearDown();
// Garbage collection support.
@@ -359,6 +362,7 @@
static void Generate_FunctionCall(MacroAssembler* masm);
static void Generate_FunctionApply(MacroAssembler* masm);
+ static void Generate_InternalArrayCode(MacroAssembler* masm);
static void Generate_ArrayCode(MacroAssembler* masm);
static void Generate_ArrayConstructCode(MacroAssembler* masm);
diff --git a/src/checks.h b/src/checks.h
index 8608b0e..d93d451 100644
--- a/src/checks.h
+++ b/src/checks.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -265,16 +265,16 @@
// The ASSERT macro is equivalent to CHECK except that it only
// generates code in debug builds.
#ifdef DEBUG
-#define ASSERT_RESULT(expr) CHECK(expr)
-#define ASSERT(condition) CHECK(condition)
-#define ASSERT_EQ(v1, v2) CHECK_EQ(v1, v2)
-#define ASSERT_NE(v1, v2) CHECK_NE(v1, v2)
-#define ASSERT_GE(v1, v2) CHECK_GE(v1, v2)
-#define ASSERT_LT(v1, v2) CHECK_LT(v1, v2)
-#define ASSERT_LE(v1, v2) CHECK_LE(v1, v2)
-#define SLOW_ASSERT(condition) if (FLAG_enable_slow_asserts) CHECK(condition)
+#define ASSERT_RESULT(expr) CHECK(expr)
+#define ASSERT(condition) CHECK(condition)
+#define ASSERT_EQ(v1, v2) CHECK_EQ(v1, v2)
+#define ASSERT_NE(v1, v2) CHECK_NE(v1, v2)
+#define ASSERT_GE(v1, v2) CHECK_GE(v1, v2)
+#define ASSERT_LT(v1, v2) CHECK_LT(v1, v2)
+#define ASSERT_LE(v1, v2) CHECK_LE(v1, v2)
+#define SLOW_ASSERT(condition) CHECK(!FLAG_enable_slow_asserts || (condition))
#else
-#define ASSERT_RESULT(expr) (expr)
+#define ASSERT_RESULT(expr) (expr)
#define ASSERT(condition) ((void) 0)
#define ASSERT_EQ(v1, v2) ((void) 0)
#define ASSERT_NE(v1, v2) ((void) 0)
diff --git a/src/code-stubs.cc b/src/code-stubs.cc
index ba7df80..5fa9a2b 100644
--- a/src/code-stubs.cc
+++ b/src/code-stubs.cc
@@ -40,7 +40,7 @@
bool CodeStub::FindCodeInCache(Code** code_out) {
Heap* heap = Isolate::Current()->heap();
int index = heap->code_stubs()->FindEntry(GetKey());
- if (index != NumberDictionary::kNotFound) {
+ if (index != UnseededNumberDictionary::kNotFound) {
*code_out = Code::cast(heap->code_stubs()->ValueAt(index));
return true;
}
@@ -101,7 +101,14 @@
Factory* factory = isolate->factory();
Heap* heap = isolate->heap();
Code* code;
- if (!FindCodeInCache(&code)) {
+ if (UseSpecialCache()
+ ? FindCodeInSpecialCache(&code)
+ : FindCodeInCache(&code)) {
+ ASSERT(IsPregenerated() == code->is_pregenerated());
+ return Handle<Code>(code);
+ }
+
+ {
HandleScope scope(isolate);
// Generate the new code.
@@ -121,19 +128,21 @@
RecordCodeGeneration(*new_object, &masm);
FinishCode(new_object);
- // Update the dictionary and the root in Heap.
- Handle<NumberDictionary> dict =
- factory->DictionaryAtNumberPut(
- Handle<NumberDictionary>(heap->code_stubs()),
- GetKey(),
- new_object);
- heap->public_set_code_stubs(*dict);
+ if (UseSpecialCache()) {
+ AddToSpecialCache(new_object);
+ } else {
+ // Update the dictionary and the root in Heap.
+ Handle<UnseededNumberDictionary> dict =
+ factory->DictionaryAtNumberPut(
+ Handle<UnseededNumberDictionary>(heap->code_stubs()),
+ GetKey(),
+ new_object);
+ heap->public_set_code_stubs(*dict);
+ }
code = *new_object;
- Activate(code);
- } else {
- CHECK(IsPregenerated() == code->is_pregenerated());
}
+ Activate(code);
ASSERT(!NeedsImmovableCode() || heap->lo_space()->Contains(code));
return Handle<Code>(code, isolate);
}
@@ -159,6 +168,32 @@
}
+void ICCompareStub::AddToSpecialCache(Handle<Code> new_object) {
+ ASSERT(*known_map_ != NULL);
+ Isolate* isolate = new_object->GetIsolate();
+ Factory* factory = isolate->factory();
+ return Map::UpdateCodeCache(known_map_,
+ factory->compare_ic_symbol(),
+ new_object);
+}
+
+
+bool ICCompareStub::FindCodeInSpecialCache(Code** code_out) {
+ Isolate* isolate = known_map_->GetIsolate();
+ Factory* factory = isolate->factory();
+ Code::Flags flags = Code::ComputeFlags(
+ static_cast<Code::Kind>(GetCodeKind()),
+ UNINITIALIZED);
+ Handle<Object> probe(
+ known_map_->FindInCodeCache(*factory->compare_ic_symbol(), flags));
+ if (probe->IsCode()) {
+ *code_out = Code::cast(*probe);
+ return true;
+ }
+ return false;
+}
+
+
int ICCompareStub::MinorKey() {
return OpField::encode(op_ - Token::EQ) | StateField::encode(state_);
}
@@ -184,6 +219,10 @@
case CompareIC::OBJECTS:
GenerateObjects(masm);
break;
+ case CompareIC::KNOWN_OBJECTS:
+ ASSERT(*known_map_ != NULL);
+ GenerateKnownObjects(masm);
+ break;
default:
UNREACHABLE();
}
diff --git a/src/code-stubs.h b/src/code-stubs.h
index 6bda5da..34da148 100644
--- a/src/code-stubs.h
+++ b/src/code-stubs.h
@@ -194,6 +194,17 @@
return UNINITIALIZED;
}
+ // Add the code to a specialized cache, specific to an individual
+ // stub type. Please note, this method must add the code object to a
+ // roots object, otherwise we will remove the code during GC.
+ virtual void AddToSpecialCache(Handle<Code> new_object) { }
+
+ // Find code in a specialized cache, work is delegated to the specific stub.
+ virtual bool FindCodeInSpecialCache(Code** code_out) { return false; }
+
+ // If a stub uses a special cache override this.
+ virtual bool UseSpecialCache() { return false; }
+
// Returns a name for logging/debugging purposes.
SmartArrayPointer<const char> GetName();
virtual void PrintName(StringStream* stream);
@@ -442,12 +453,17 @@
class MathPowStub: public CodeStub {
public:
- MathPowStub() {}
+ enum ExponentType { INTEGER, DOUBLE, TAGGED, ON_STACK};
+
+ explicit MathPowStub(ExponentType exponent_type)
+ : exponent_type_(exponent_type) { }
virtual void Generate(MacroAssembler* masm);
private:
virtual CodeStub::Major MajorKey() { return MathPow; }
- virtual int MinorKey() { return 0; }
+ virtual int MinorKey() { return exponent_type_; }
+
+ ExponentType exponent_type_;
};
@@ -460,6 +476,8 @@
virtual void Generate(MacroAssembler* masm);
+ void set_known_map(Handle<Map> map) { known_map_ = map; }
+
private:
class OpField: public BitField<int, 0, 3> { };
class StateField: public BitField<int, 3, 5> { };
@@ -479,12 +497,18 @@
void GenerateStrings(MacroAssembler* masm);
void GenerateObjects(MacroAssembler* masm);
void GenerateMiss(MacroAssembler* masm);
+ void GenerateKnownObjects(MacroAssembler* masm);
bool strict() const { return op_ == Token::EQ_STRICT; }
Condition GetCondition() const { return CompareIC::ComputeCondition(op_); }
+ virtual void AddToSpecialCache(Handle<Code> new_object);
+ virtual bool FindCodeInSpecialCache(Code** code_out);
+ virtual bool UseSpecialCache() { return state_ == CompareIC::KNOWN_OBJECTS; }
+
Token::Value op_;
CompareIC::State state_;
+ Handle<Map> known_map_;
};
diff --git a/src/collection.js b/src/collection.js
index d116126..fcf4d38 100644
--- a/src/collection.js
+++ b/src/collection.js
@@ -47,6 +47,10 @@
function SetAdd(key) {
+ if (!IS_SET(this)) {
+ throw MakeTypeError('incompatible_method_receiver',
+ ['Set.prototype.add', this]);
+ }
if (IS_UNDEFINED(key)) {
key = undefined_sentinel;
}
@@ -55,6 +59,10 @@
function SetHas(key) {
+ if (!IS_SET(this)) {
+ throw MakeTypeError('incompatible_method_receiver',
+ ['Set.prototype.has', this]);
+ }
if (IS_UNDEFINED(key)) {
key = undefined_sentinel;
}
@@ -63,6 +71,10 @@
function SetDelete(key) {
+ if (!IS_SET(this)) {
+ throw MakeTypeError('incompatible_method_receiver',
+ ['Set.prototype.delete', this]);
+ }
if (IS_UNDEFINED(key)) {
key = undefined_sentinel;
}
@@ -80,6 +92,10 @@
function MapGet(key) {
+ if (!IS_MAP(this)) {
+ throw MakeTypeError('incompatible_method_receiver',
+ ['Map.prototype.get', this]);
+ }
if (IS_UNDEFINED(key)) {
key = undefined_sentinel;
}
@@ -88,6 +104,10 @@
function MapSet(key, value) {
+ if (!IS_MAP(this)) {
+ throw MakeTypeError('incompatible_method_receiver',
+ ['Map.prototype.set', this]);
+ }
if (IS_UNDEFINED(key)) {
key = undefined_sentinel;
}
@@ -96,6 +116,10 @@
function MapHas(key) {
+ if (!IS_MAP(this)) {
+ throw MakeTypeError('incompatible_method_receiver',
+ ['Map.prototype.has', this]);
+ }
if (IS_UNDEFINED(key)) {
key = undefined_sentinel;
}
@@ -104,6 +128,10 @@
function MapDelete(key) {
+ if (!IS_MAP(this)) {
+ throw MakeTypeError('incompatible_method_receiver',
+ ['Map.prototype.delete', this]);
+ }
if (IS_UNDEFINED(key)) {
key = undefined_sentinel;
}
@@ -126,6 +154,10 @@
function WeakMapGet(key) {
+ if (!IS_WEAKMAP(this)) {
+ throw MakeTypeError('incompatible_method_receiver',
+ ['WeakMap.prototype.get', this]);
+ }
if (!IS_SPEC_OBJECT(key)) {
throw %MakeTypeError('invalid_weakmap_key', [this, key]);
}
@@ -134,6 +166,10 @@
function WeakMapSet(key, value) {
+ if (!IS_WEAKMAP(this)) {
+ throw MakeTypeError('incompatible_method_receiver',
+ ['WeakMap.prototype.set', this]);
+ }
if (!IS_SPEC_OBJECT(key)) {
throw %MakeTypeError('invalid_weakmap_key', [this, key]);
}
@@ -142,6 +178,10 @@
function WeakMapHas(key) {
+ if (!IS_WEAKMAP(this)) {
+ throw MakeTypeError('incompatible_method_receiver',
+ ['WeakMap.prototype.has', this]);
+ }
if (!IS_SPEC_OBJECT(key)) {
throw %MakeTypeError('invalid_weakmap_key', [this, key]);
}
@@ -150,6 +190,10 @@
function WeakMapDelete(key) {
+ if (!IS_WEAKMAP(this)) {
+ throw MakeTypeError('incompatible_method_receiver',
+ ['WeakMap.prototype.delete', this]);
+ }
if (!IS_SPEC_OBJECT(key)) {
throw %MakeTypeError('invalid_weakmap_key', [this, key]);
}
diff --git a/src/compiler.cc b/src/compiler.cc
index 16ccfa0..362273e 100644
--- a/src/compiler.cc
+++ b/src/compiler.cc
@@ -398,7 +398,7 @@
FunctionLiteral* lit = info->function();
LiveEditFunctionTracker live_edit_tracker(isolate, lit);
if (!MakeCode(info)) {
- isolate->StackOverflow();
+ if (!isolate->has_pending_exception()) isolate->StackOverflow();
return Handle<SharedFunctionInfo>::null();
}
@@ -628,7 +628,7 @@
// scope info. Please note, that the order of the shared function
// info initialization is important since set_scope_info might
// trigger a GC, causing the ASSERT below to be invalid if the code
- // was flushed. By settting the code object last we avoid this.
+ // was flushed. By setting the code object last we avoid this.
Handle<ScopeInfo> scope_info = ScopeInfo::Create(info->scope());
shared->set_scope_info(*scope_info);
shared->set_code(*code);
diff --git a/src/contexts.h b/src/contexts.h
index 10ef33d..eec86f1 100644
--- a/src/contexts.h
+++ b/src/contexts.h
@@ -104,6 +104,7 @@
V(STRING_FUNCTION_INDEX, JSFunction, string_function) \
V(STRING_FUNCTION_PROTOTYPE_MAP_INDEX, Map, string_function_prototype_map) \
V(OBJECT_FUNCTION_INDEX, JSFunction, object_function) \
+ V(INTERNAL_ARRAY_FUNCTION_INDEX, JSFunction, internal_array_function) \
V(ARRAY_FUNCTION_INDEX, JSFunction, array_function) \
V(DATE_FUNCTION_INDEX, JSFunction, date_function) \
V(JSON_OBJECT_INDEX, JSObject, json_object) \
@@ -244,6 +245,7 @@
STRING_FUNCTION_INDEX,
STRING_FUNCTION_PROTOTYPE_MAP_INDEX,
OBJECT_FUNCTION_INDEX,
+ INTERNAL_ARRAY_FUNCTION_INDEX,
ARRAY_FUNCTION_INDEX,
DATE_FUNCTION_INDEX,
JSON_OBJECT_INDEX,
diff --git a/src/cpu-profiler.cc b/src/cpu-profiler.cc
index d74c034..953952a 100644
--- a/src/cpu-profiler.cc
+++ b/src/cpu-profiler.cc
@@ -39,13 +39,14 @@
namespace v8 {
namespace internal {
-static const int kEventsBufferSize = 256*KB;
-static const int kTickSamplesBufferChunkSize = 64*KB;
+static const int kEventsBufferSize = 256 * KB;
+static const int kTickSamplesBufferChunkSize = 64 * KB;
static const int kTickSamplesBufferChunksCount = 16;
+static const int kProfilerStackSize = 32 * KB;
ProfilerEventsProcessor::ProfilerEventsProcessor(ProfileGenerator* generator)
- : Thread("v8:ProfEvntProc"),
+ : Thread(Thread::Options("v8:ProfEvntProc", kProfilerStackSize)),
generator_(generator),
running_(true),
ticks_buffer_(sizeof(TickSampleEventRecord),
@@ -493,7 +494,7 @@
NoBarrier_Store(&is_profiling_, true);
processor_->Start();
// Enumerate stuff we already have in the heap.
- if (isolate->heap()->HasBeenSetup()) {
+ if (isolate->heap()->HasBeenSetUp()) {
if (!FLAG_prof_browser_mode) {
bool saved_log_code_flag = FLAG_log_code;
FLAG_log_code = true;
@@ -562,7 +563,7 @@
}
-void CpuProfiler::Setup() {
+void CpuProfiler::SetUp() {
Isolate* isolate = Isolate::Current();
if (isolate->cpu_profiler() == NULL) {
isolate->set_cpu_profiler(new CpuProfiler());
diff --git a/src/cpu-profiler.h b/src/cpu-profiler.h
index a71c0e0..3f4fec5 100644
--- a/src/cpu-profiler.h
+++ b/src/cpu-profiler.h
@@ -204,7 +204,7 @@
// TODO(isolates): isolatify this class.
class CpuProfiler {
public:
- static void Setup();
+ static void SetUp();
static void TearDown();
static void StartProfiling(const char* title);
@@ -230,11 +230,11 @@
Code* code, String* name);
static void CodeCreateEvent(Logger::LogEventsAndTags tag,
Code* code,
- SharedFunctionInfo *shared,
+ SharedFunctionInfo* shared,
String* name);
static void CodeCreateEvent(Logger::LogEventsAndTags tag,
Code* code,
- SharedFunctionInfo *shared,
+ SharedFunctionInfo* shared,
String* source, int line);
static void CodeCreateEvent(Logger::LogEventsAndTags tag,
Code* code, int args_count);
diff --git a/src/cpu.h b/src/cpu.h
index 2525484..247af71 100644
--- a/src/cpu.h
+++ b/src/cpu.h
@@ -53,7 +53,7 @@
class CPU : public AllStatic {
public:
// Initializes the cpu architecture support. Called once at VM startup.
- static void Setup();
+ static void SetUp();
static bool SupportsCrankshaft();
diff --git a/src/d8-debug.cc b/src/d8-debug.cc
index 8fbc876..de0faa8 100644
--- a/src/d8-debug.cc
+++ b/src/d8-debug.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -169,7 +169,7 @@
bool ok;
// Make sure that socket support is initialized.
- ok = i::Socket::Setup();
+ ok = i::Socket::SetUp();
if (!ok) {
printf("Unable to initialize socket support %d\n", i::Socket::LastError());
return;
@@ -310,9 +310,7 @@
Handle<Value> request =
Shell::DebugCommandToJSONRequest(String::New(command));
if (try_catch.HasCaught()) {
- v8::String::Utf8Value exception(try_catch.Exception());
- const char* exception_string = Shell::ToCString(exception);
- printf("%s\n", exception_string);
+ Shell::ReportException(&try_catch);
PrintPrompt();
return;
}
diff --git a/src/d8-posix.cc b/src/d8-posix.cc
index 289c3b0..8a278e4 100644
--- a/src/d8-posix.cc
+++ b/src/d8-posix.cc
@@ -366,7 +366,8 @@
// We're disabling usage of waitid in Mac OS X because it doens't work for us:
// a parent process hangs on waiting while a child process is already a zombie.
// See http://code.google.com/p/v8/issues/detail?id=401.
-#if defined(WNOWAIT) && !defined(ANDROID) && !defined(__APPLE__)
+#if defined(WNOWAIT) && !defined(ANDROID) && !defined(__APPLE__) \
+ && !defined(__NetBSD__)
#if !defined(__FreeBSD__)
#define HAS_WAITID 1
#endif
diff --git a/src/d8-readline.cc b/src/d8-readline.cc
index 71be933..ed7721c 100644
--- a/src/d8-readline.cc
+++ b/src/d8-readline.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -49,10 +49,14 @@
class ReadLineEditor: public LineEditor {
public:
ReadLineEditor() : LineEditor(LineEditor::READLINE, "readline") { }
- virtual i::SmartArrayPointer<char> Prompt(const char* prompt);
+ virtual Handle<String> Prompt(const char* prompt);
virtual bool Open();
virtual bool Close();
virtual void AddHistory(const char* str);
+
+ static const char* kHistoryFileName;
+ static const int kMaxHistoryEntries;
+
private:
static char** AttemptedCompletion(const char* text, int start, int end);
static char* CompletionGenerator(const char* text, int state);
@@ -66,25 +70,38 @@
'\0'};
+const char* ReadLineEditor::kHistoryFileName = ".d8_history";
+const int ReadLineEditor::kMaxHistoryEntries = 1000;
+
+
bool ReadLineEditor::Open() {
rl_initialize();
rl_attempted_completion_function = AttemptedCompletion;
rl_completer_word_break_characters = kWordBreakCharacters;
rl_bind_key('\t', rl_complete);
using_history();
- stifle_history(Shell::kMaxHistoryEntries);
- return read_history(Shell::kHistoryFileName) == 0;
+ stifle_history(kMaxHistoryEntries);
+ return read_history(kHistoryFileName) == 0;
}
bool ReadLineEditor::Close() {
- return write_history(Shell::kHistoryFileName) == 0;
+ return write_history(kHistoryFileName) == 0;
}
-i::SmartArrayPointer<char> ReadLineEditor::Prompt(const char* prompt) {
- char* result = readline(prompt);
- return i::SmartArrayPointer<char>(result);
+Handle<String> ReadLineEditor::Prompt(const char* prompt) {
+ char* result = NULL;
+ { // Release lock for blocking input.
+ Unlocker unlock(Isolate::GetCurrent());
+ result = readline(prompt);
+ }
+ if (result != NULL) {
+ AddHistory(result);
+ } else {
+ return Handle<String>();
+ }
+ return String::New(result);
}
@@ -118,10 +135,10 @@
static unsigned current_index;
static Persistent<Array> current_completions;
if (state == 0) {
- i::SmartArrayPointer<char> full_text(i::StrNDup(rl_line_buffer, rl_point));
HandleScope scope;
+ Local<String> full_text = String::New(rl_line_buffer, rl_point);
Handle<Array> completions =
- Shell::GetCompletions(String::New(text), String::New(*full_text));
+ Shell::GetCompletions(String::New(text), full_text);
current_completions = Persistent<Array>::New(completions);
current_index = 0;
}
diff --git a/src/d8.cc b/src/d8.cc
index 9eccc7e..e555c15 100644
--- a/src/d8.cc
+++ b/src/d8.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -66,11 +66,7 @@
namespace v8 {
-
-#ifndef V8_SHARED
LineEditor *LineEditor::first_ = NULL;
-const char* Shell::kHistoryFileName = ".d8_history";
-const int Shell::kMaxHistoryEntries = 1000;
LineEditor::LineEditor(Type type, const char* name)
@@ -96,36 +92,37 @@
class DumbLineEditor: public LineEditor {
public:
DumbLineEditor() : LineEditor(LineEditor::DUMB, "dumb") { }
- virtual i::SmartArrayPointer<char> Prompt(const char* prompt);
+ virtual Handle<String> Prompt(const char* prompt);
};
static DumbLineEditor dumb_line_editor;
-i::SmartArrayPointer<char> DumbLineEditor::Prompt(const char* prompt) {
- static const int kBufferSize = 256;
- char buffer[kBufferSize];
+Handle<String> DumbLineEditor::Prompt(const char* prompt) {
printf("%s", prompt);
- char* str = fgets(buffer, kBufferSize, stdin);
- return i::SmartArrayPointer<char>(str ? i::StrDup(str) : str);
+ return Shell::ReadFromStdin();
}
+#ifndef V8_SHARED
CounterMap* Shell::counter_map_;
i::OS::MemoryMappedFile* Shell::counters_file_ = NULL;
CounterCollection Shell::local_counters_;
CounterCollection* Shell::counters_ = &local_counters_;
i::Mutex* Shell::context_mutex_(i::OS::CreateMutex());
Persistent<Context> Shell::utility_context_;
-LineEditor* Shell::console = NULL;
#endif // V8_SHARED
+LineEditor* Shell::console = NULL;
Persistent<Context> Shell::evaluation_context_;
ShellOptions Shell::options;
const char* Shell::kPrompt = "d8> ";
+const int MB = 1024 * 1024;
+
+
#ifndef V8_SHARED
bool CounterMap::Match(void* key1, void* key2) {
const char* name1 = reinterpret_cast<const char*>(key1);
@@ -238,7 +235,7 @@
}
-Handle<Value> Shell::ReadLine(const Arguments& args) {
+Handle<String> Shell::ReadFromStdin() {
static const int kBufferSize = 256;
char buffer[kBufferSize];
Handle<String> accumulator = String::New("");
@@ -247,7 +244,12 @@
// Continue reading if the line ends with an escape '\\' or the line has
// not been fully read into the buffer yet (does not end with '\n').
// If fgets gets an error, just give up.
- if (fgets(buffer, kBufferSize, stdin) == NULL) return Null();
+ char* input = NULL;
+ { // Release lock for blocking input.
+ Unlocker unlock(Isolate::GetCurrent());
+ input = fgets(buffer, kBufferSize, stdin);
+ }
+ if (input == NULL) return Handle<String>();
length = static_cast<int>(strlen(buffer));
if (length == 0) {
return accumulator;
@@ -281,51 +283,161 @@
return Undefined();
}
-
-Handle<Value> Shell::CreateExternalArray(const Arguments& args,
- ExternalArrayType type,
- size_t element_size) {
- ASSERT(element_size == 1 || element_size == 2 || element_size == 4 ||
- element_size == 8);
- if (args.Length() != 1) {
- return ThrowException(
- String::New("Array constructor needs one parameter."));
+static size_t convertToUint(Local<Value> value_in, TryCatch* try_catch) {
+ if (value_in->IsUint32()) {
+ return value_in->Uint32Value();
}
+
+ Local<Value> number = value_in->ToNumber();
+ if (try_catch->HasCaught()) return 0;
+
+ ASSERT(number->IsNumber());
+ Local<Int32> int32 = number->ToInt32();
+ if (try_catch->HasCaught() || int32.IsEmpty()) return 0;
+
+ int32_t raw_value = int32->Int32Value();
+ if (try_catch->HasCaught()) return 0;
+
+ if (raw_value < 0) {
+ ThrowException(String::New("Array length must not be negative."));
+ return 0;
+ }
+
static const int kMaxLength = 0x3fffffff;
#ifndef V8_SHARED
ASSERT(kMaxLength == i::ExternalArray::kMaxLength);
#endif // V8_SHARED
- size_t length = 0;
- if (args[0]->IsUint32()) {
- length = args[0]->Uint32Value();
- } else {
- Local<Number> number = args[0]->ToNumber();
- if (number.IsEmpty() || !number->IsNumber()) {
- return ThrowException(String::New("Array length must be a number."));
- }
- int32_t raw_length = number->ToInt32()->Int32Value();
- if (raw_length < 0) {
- return ThrowException(String::New("Array length must not be negative."));
- }
- if (raw_length > static_cast<int32_t>(kMaxLength)) {
- return ThrowException(
- String::New("Array length exceeds maximum length."));
- }
- length = static_cast<size_t>(raw_length);
+ if (raw_value > static_cast<int32_t>(kMaxLength)) {
+ ThrowException(
+ String::New("Array length exceeds maximum length."));
}
- if (length > static_cast<size_t>(kMaxLength)) {
- return ThrowException(String::New("Array length exceeds maximum length."));
+ return static_cast<size_t>(raw_value);
+}
+
+
+const char kArrayBufferReferencePropName[] = "_is_array_buffer_";
+const char kArrayBufferMarkerPropName[] = "_array_buffer_ref_";
+
+
+Handle<Value> Shell::CreateExternalArray(const Arguments& args,
+ ExternalArrayType type,
+ size_t element_size) {
+ TryCatch try_catch;
+ bool is_array_buffer_construct = element_size == 0;
+ if (is_array_buffer_construct) {
+ type = v8::kExternalByteArray;
+ element_size = 1;
}
- void* data = calloc(length, element_size);
- if (data == NULL) {
- return ThrowException(String::New("Memory allocation failed."));
+ ASSERT(element_size == 1 || element_size == 2 || element_size == 4 ||
+ element_size == 8);
+ if (args.Length() == 0) {
+ return ThrowException(
+ String::New("Array constructor must have at least one "
+ "parameter."));
}
+ bool first_arg_is_array_buffer =
+ args[0]->IsObject() &&
+ args[0]->ToObject()->Get(
+ String::New(kArrayBufferMarkerPropName))->IsTrue();
+ // Currently, only the following constructors are supported:
+ // TypedArray(unsigned long length)
+ // TypedArray(ArrayBuffer buffer,
+ // optional unsigned long byteOffset,
+ // optional unsigned long length)
+ if (args.Length() > 3) {
+ return ThrowException(
+ String::New("Array constructor from ArrayBuffer must "
+ "have 1-3 parameters."));
+ }
+
+ Local<Value> length_value = (args.Length() < 3)
+ ? (first_arg_is_array_buffer
+ ? args[0]->ToObject()->Get(String::New("length"))
+ : args[0])
+ : args[2];
+ size_t length = convertToUint(length_value, &try_catch);
+ if (try_catch.HasCaught()) return try_catch.Exception();
+
+ void* data = NULL;
+ size_t offset = 0;
+
Handle<Object> array = Object::New();
+ if (first_arg_is_array_buffer) {
+ Handle<Object> derived_from = args[0]->ToObject();
+ data = derived_from->GetIndexedPropertiesExternalArrayData();
+
+ size_t array_buffer_length = convertToUint(
+ derived_from->Get(String::New("length")),
+ &try_catch);
+ if (try_catch.HasCaught()) return try_catch.Exception();
+
+ if (data == NULL && array_buffer_length != 0) {
+ return ThrowException(
+ String::New("ArrayBuffer doesn't have data"));
+ }
+
+ if (args.Length() > 1) {
+ offset = convertToUint(args[1], &try_catch);
+ if (try_catch.HasCaught()) return try_catch.Exception();
+
+ // The given byteOffset must be a multiple of the element size of the
+ // specific type, otherwise an exception is raised.
+ if (offset % element_size != 0) {
+ return ThrowException(
+ String::New("offset must be multiple of element_size"));
+ }
+ }
+
+ if (offset > array_buffer_length) {
+ return ThrowException(
+ String::New("byteOffset must be less than ArrayBuffer length."));
+ }
+
+ if (args.Length() == 2) {
+ // If length is not explicitly specified, the length of the ArrayBuffer
+ // minus the byteOffset must be a multiple of the element size of the
+ // specific type, or an exception is raised.
+ length = array_buffer_length - offset;
+ }
+
+ if (args.Length() != 3) {
+ if (length % element_size != 0) {
+ return ThrowException(
+ String::New("ArrayBuffer length minus the byteOffset must be a "
+ "multiple of the element size"));
+ }
+ length /= element_size;
+ }
+
+ // If a given byteOffset and length references an area beyond the end of
+ // the ArrayBuffer an exception is raised.
+ if (offset + (length * element_size) > array_buffer_length) {
+ return ThrowException(
+ String::New("length references an area beyond the end of the "
+ "ArrayBuffer"));
+ }
+
+ // Hold a reference to the ArrayBuffer so its buffer doesn't get collected.
+ array->Set(String::New(kArrayBufferReferencePropName), args[0], ReadOnly);
+ }
+
+ if (is_array_buffer_construct) {
+ array->Set(String::New(kArrayBufferMarkerPropName), True(), ReadOnly);
+ }
+
Persistent<Object> persistent_array = Persistent<Object>::New(array);
persistent_array.MakeWeak(data, ExternalArrayWeakCallback);
persistent_array.MarkIndependent();
- array->SetIndexedPropertiesToExternalArrayData(data, type,
- static_cast<int>(length));
+ if (data == NULL && length != 0) {
+ data = calloc(length, element_size);
+ if (data == NULL) {
+ return ThrowException(String::New("Memory allocation failed."));
+ }
+ }
+
+ array->SetIndexedPropertiesToExternalArrayData(
+ reinterpret_cast<uint8_t*>(data) + offset, type,
+ static_cast<int>(length));
array->Set(String::New("length"),
Int32::New(static_cast<int32_t>(length)), ReadOnly);
array->Set(String::New("BYTES_PER_ELEMENT"),
@@ -335,11 +447,22 @@
void Shell::ExternalArrayWeakCallback(Persistent<Value> object, void* data) {
- free(data);
+ HandleScope scope;
+ Handle<String> prop_name = String::New(kArrayBufferReferencePropName);
+ Handle<Object> converted_object = object->ToObject();
+ Local<Value> prop_value = converted_object->Get(prop_name);
+ if (data != NULL && !prop_value->IsObject()) {
+ free(data);
+ }
object.Dispose();
}
+Handle<Value> Shell::ArrayBuffer(const Arguments& args) {
+ return CreateExternalArray(args, v8::kExternalByteArray, 0);
+}
+
+
Handle<Value> Shell::Int8Array(const Arguments& args) {
return CreateExternalArray(args, v8::kExternalByteArray, sizeof(int8_t));
}
@@ -411,6 +534,10 @@
void Shell::ReportException(v8::TryCatch* try_catch) {
HandleScope handle_scope;
+#if !defined(V8_SHARED) && defined(ENABLE_DEBUGGER_SUPPORT)
+ bool enter_context = !Context::InContext();
+ if (enter_context) utility_context_->Enter();
+#endif // !V8_SHARED && ENABLE_DEBUGGER_SUPPORT
v8::String::Utf8Value exception(try_catch->Exception());
const char* exception_string = ToCString(exception);
Handle<Message> message = try_catch->Message();
@@ -445,6 +572,9 @@
}
}
printf("\n");
+#if !defined(V8_SHARED) && defined(ENABLE_DEBUGGER_SUPPORT)
+ if (enter_context) utility_context_->Exit();
+#endif // !V8_SHARED && ENABLE_DEBUGGER_SUPPORT
}
@@ -482,6 +612,12 @@
Handle<Value> val = Handle<Function>::Cast(fun)->Call(global, kArgc, argv);
return val;
}
+
+
+void Shell::DispatchDebugMessages() {
+ v8::Context::Scope scope(Shell::evaluation_context_);
+ v8::Debug::ProcessDebugMessages();
+}
#endif // ENABLE_DEBUGGER_SUPPORT
#endif // V8_SHARED
@@ -670,6 +806,8 @@
global_template->Set(String::New("print"), FunctionTemplate::New(Print));
global_template->Set(String::New("write"), FunctionTemplate::New(Write));
global_template->Set(String::New("read"), FunctionTemplate::New(Read));
+ global_template->Set(String::New("readbinary"),
+ FunctionTemplate::New(ReadBinary));
global_template->Set(String::New("readline"),
FunctionTemplate::New(ReadLine));
global_template->Set(String::New("load"), FunctionTemplate::New(Load));
@@ -681,6 +819,8 @@
FunctionTemplate::New(DisableProfiler));
// Bind the handlers for external arrays.
+ global_template->Set(String::New("ArrayBuffer"),
+ FunctionTemplate::New(ArrayBuffer));
global_template->Set(String::New("Int8Array"),
FunctionTemplate::New(Int8Array));
global_template->Set(String::New("Uint8Array"),
@@ -749,6 +889,7 @@
// Start the debugger agent if requested.
if (i::FLAG_debugger_agent) {
v8::Debug::EnableAgent("d8 shell", i::FLAG_debugger_port, true);
+ v8::Debug::SetDebugMessageDispatchHandler(DispatchDebugMessages, true);
}
#endif // ENABLE_DEBUGGER_SUPPORT
#endif // V8_SHARED
@@ -885,6 +1026,23 @@
}
+Handle<Value> Shell::ReadBinary(const Arguments& args) {
+ String::Utf8Value filename(args[0]);
+ int size;
+ if (*filename == NULL) {
+ return ThrowException(String::New("Error loading file"));
+ }
+ char* chars = ReadChars(*filename, &size);
+ if (chars == NULL) {
+ return ThrowException(String::New("Error reading file"));
+ }
+ // We skip checking the string for UTF8 characters and use it raw as
+ // backing store for the external string with 8-bit characters.
+ BinaryResource* resource = new BinaryResource(chars, size);
+ return String::NewExternal(resource);
+}
+
+
#ifndef V8_SHARED
static char* ReadToken(char* data, char token) {
char* next = i::OS::StrChr(data, token);
@@ -924,28 +1082,15 @@
Context::Scope context_scope(evaluation_context_);
HandleScope outer_scope;
Handle<String> name = String::New("(d8)");
-#ifndef V8_SHARED
console = LineEditor::Get();
printf("V8 version %s [console: %s]\n", V8::GetVersion(), console->name());
console->Open();
while (true) {
- i::SmartArrayPointer<char> input = console->Prompt(Shell::kPrompt);
- if (input.is_empty()) break;
- console->AddHistory(*input);
HandleScope inner_scope;
- ExecuteString(String::New(*input), name, true, true);
+ Handle<String> input = console->Prompt(Shell::kPrompt);
+ if (input.IsEmpty()) break;
+ ExecuteString(input, name, true, true);
}
-#else
- printf("V8 version %s [D8 light using shared library]\n", V8::GetVersion());
- static const int kBufferSize = 256;
- while (true) {
- char buffer[kBufferSize];
- printf("%s", Shell::kPrompt);
- if (fgets(buffer, kBufferSize, stdin) == NULL) break;
- HandleScope inner_scope;
- ExecuteString(String::New(buffer), name, true, true);
- }
-#endif // V8_SHARED
printf("\n");
}
@@ -1068,14 +1213,11 @@
#ifndef V8_SHARED
i::Thread::Options SourceGroup::GetThreadOptions() {
- i::Thread::Options options;
- options.name = "IsolateThread";
// On some systems (OSX 10.6) the stack size default is 0.5Mb or less
// which is not enough to parse the big literal expressions used in tests.
// The stack size should be at least StackGuard::kLimitSize + some
- // OS-specific padding for thread startup code.
- options.stack_size = 2 << 20; // 2 Mb seems to be enough
- return options;
+ // OS-specific padding for thread startup code. 2Mbytes seems to be enough.
+ return i::Thread::Options("IsolateThread", 2 * MB);
}
diff --git a/src/d8.gyp b/src/d8.gyp
index bdc23a2..3b92d03 100644
--- a/src/d8.gyp
+++ b/src/d8.gyp
@@ -1,4 +1,4 @@
-# Copyright 2010 the V8 project authors. All rights reserved.
+# Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
@@ -64,8 +64,8 @@
'libraries': [ '-lreadline', ],
'sources': [ 'd8-readline.cc' ],
}],
- [ '(OS=="linux" or OS=="mac" or OS=="freebsd" \
- or OS=="openbsd" or OS=="solaris" or OS=="android")', {
+ ['(OS=="linux" or OS=="mac" or OS=="freebsd" or OS=="netbsd" \
+ or OS=="openbsd" or OS=="solaris" or OS=="android")', {
'sources': [ 'd8-posix.cc', ]
}],
[ 'OS=="win"', {
diff --git a/src/d8.h b/src/d8.h
index 15d8d5d..c872f90 100644
--- a/src/d8.h
+++ b/src/d8.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -116,14 +116,13 @@
#endif // V8_SHARED
-#ifndef V8_SHARED
class LineEditor {
public:
enum Type { DUMB = 0, READLINE = 1 };
LineEditor(Type type, const char* name);
virtual ~LineEditor() { }
- virtual i::SmartArrayPointer<char> Prompt(const char* prompt) = 0;
+ virtual Handle<String> Prompt(const char* prompt) = 0;
virtual bool Open() { return true; }
virtual bool Close() { return true; }
virtual void AddHistory(const char* str) { }
@@ -136,7 +135,6 @@
LineEditor* next_;
static LineEditor* first_;
};
-#endif // V8_SHARED
class SourceGroup {
@@ -197,6 +195,27 @@
};
+class BinaryResource : public v8::String::ExternalAsciiStringResource {
+ public:
+ BinaryResource(const char* string, int length)
+ : data_(string),
+ length_(length) { }
+
+ ~BinaryResource() {
+ delete[] data_;
+ data_ = NULL;
+ length_ = 0;
+ }
+
+ virtual const char* data() const { return data_; }
+ virtual size_t length() const { return length_; }
+
+ private:
+ const char* data_;
+ size_t length_;
+};
+
+
class ShellOptions {
public:
ShellOptions() :
@@ -268,12 +287,13 @@
size_t buckets);
static void AddHistogramSample(void* histogram, int sample);
static void MapCounters(const char* name);
-#endif // V8_SHARED
#ifdef ENABLE_DEBUGGER_SUPPORT
static Handle<Object> DebugMessageDetails(Handle<String> message);
static Handle<Value> DebugCommandToJSONRequest(Handle<String> command);
-#endif
+ static void DispatchDebugMessages();
+#endif // ENABLE_DEBUGGER_SUPPORT
+#endif // V8_SHARED
#ifdef WIN32
#undef Yield
@@ -287,8 +307,13 @@
static Handle<Value> EnableProfiler(const Arguments& args);
static Handle<Value> DisableProfiler(const Arguments& args);
static Handle<Value> Read(const Arguments& args);
- static Handle<Value> ReadLine(const Arguments& args);
+ static Handle<Value> ReadBinary(const Arguments& args);
+ static Handle<String> ReadFromStdin();
+ static Handle<Value> ReadLine(const Arguments& args) {
+ return ReadFromStdin();
+ }
static Handle<Value> Load(const Arguments& args);
+ static Handle<Value> ArrayBuffer(const Arguments& args);
static Handle<Value> Int8Array(const Arguments& args);
static Handle<Value> Uint8Array(const Arguments& args);
static Handle<Value> Int16Array(const Arguments& args);
@@ -334,11 +359,8 @@
static Handle<Value> RemoveDirectory(const Arguments& args);
static void AddOSMethods(Handle<ObjectTemplate> os_template);
-#ifndef V8_SHARED
- static const char* kHistoryFileName;
- static const int kMaxHistoryEntries;
+
static LineEditor* console;
-#endif // V8_SHARED
static const char* kPrompt;
static ShellOptions options;
diff --git a/src/debug-agent.cc b/src/debug-agent.cc
index 591d0b3..c30afa8 100644
--- a/src/debug-agent.cc
+++ b/src/debug-agent.cc
@@ -229,8 +229,6 @@
const char* const DebuggerAgentUtil::kContentLength = "Content-Length";
-const int DebuggerAgentUtil::kContentLengthSize =
- StrLength(kContentLength);
SmartArrayPointer<char> DebuggerAgentUtil::ReceiveMessage(const Socket* conn) {
diff --git a/src/debug-agent.h b/src/debug-agent.h
index a07fb0f..6115190 100644
--- a/src/debug-agent.h
+++ b/src/debug-agent.h
@@ -115,7 +115,6 @@
class DebuggerAgentUtil {
public:
static const char* const kContentLength;
- static const int kContentLengthSize;
static SmartArrayPointer<char> ReceiveMessage(const Socket* conn);
static bool SendConnectMessage(const Socket* conn,
diff --git a/src/debug-debugger.js b/src/debug-debugger.js
index 8cbe0b3..120a297 100644
--- a/src/debug-debugger.js
+++ b/src/debug-debugger.js
@@ -1547,7 +1547,7 @@
}
}
- // Setup the VM for stepping.
+ // Set up the VM for stepping.
this.exec_state_.prepareStep(action, count);
}
diff --git a/src/debug.cc b/src/debug.cc
index c654dfb..01c4dba 100644
--- a/src/debug.cc
+++ b/src/debug.cc
@@ -682,7 +682,7 @@
}
-void Debug::Setup(bool create_heap_objects) {
+void Debug::SetUp(bool create_heap_objects) {
ThreadInit();
if (create_heap_objects) {
// Get code to handle debug break on return.
@@ -827,8 +827,8 @@
Handle<GlobalObject> global = Handle<GlobalObject>(context->global());
RETURN_IF_EMPTY_HANDLE_VALUE(
isolate_,
- SetProperty(global, key, Handle<Object>(global->builtins()),
- NONE, kNonStrictMode),
+ JSReceiver::SetProperty(global, key, Handle<Object>(global->builtins()),
+ NONE, kNonStrictMode),
false);
// Compile the JavaScript for the debugger in the debugger context.
@@ -1146,7 +1146,7 @@
Handle<DebugInfo> debug_info = GetDebugInfo(shared);
// Source positions starts with zero.
- ASSERT(source_position >= 0);
+ ASSERT(*source_position >= 0);
// Find the break point and change it.
BreakLocationIterator it(debug_info, SOURCE_BREAK_LOCATIONS);
@@ -1213,7 +1213,7 @@
void Debug::FloodWithOneShot(Handle<SharedFunctionInfo> shared) {
PrepareForBreakPoints();
- // Make sure the function has setup the debug info.
+ // Make sure the function has set up the debug info.
if (!EnsureDebugInfo(shared)) {
// Return if we failed to retrieve the debug info.
return;
@@ -1758,6 +1758,135 @@
}
+static void CollectActiveFunctionsFromThread(
+ Isolate* isolate,
+ ThreadLocalTop* top,
+ List<Handle<JSFunction> >* active_functions,
+ Object* active_code_marker) {
+ // Find all non-optimized code functions with activation frames
+ // on the stack. This includes functions which have optimized
+ // activations (including inlined functions) on the stack as the
+ // non-optimized code is needed for the lazy deoptimization.
+ for (JavaScriptFrameIterator it(isolate, top); !it.done(); it.Advance()) {
+ JavaScriptFrame* frame = it.frame();
+ if (frame->is_optimized()) {
+ List<JSFunction*> functions(Compiler::kMaxInliningLevels + 1);
+ frame->GetFunctions(&functions);
+ for (int i = 0; i < functions.length(); i++) {
+ JSFunction* function = functions[i];
+ active_functions->Add(Handle<JSFunction>(function));
+ function->shared()->code()->set_gc_metadata(active_code_marker);
+ }
+ } else if (frame->function()->IsJSFunction()) {
+ JSFunction* function = JSFunction::cast(frame->function());
+ ASSERT(frame->LookupCode()->kind() == Code::FUNCTION);
+ active_functions->Add(Handle<JSFunction>(function));
+ function->shared()->code()->set_gc_metadata(active_code_marker);
+ }
+ }
+}
+
+
+static void RedirectActivationsToRecompiledCodeOnThread(
+ Isolate* isolate,
+ ThreadLocalTop* top) {
+ for (JavaScriptFrameIterator it(isolate, top); !it.done(); it.Advance()) {
+ JavaScriptFrame* frame = it.frame();
+
+ if (frame->is_optimized() || !frame->function()->IsJSFunction()) continue;
+
+ JSFunction* function = JSFunction::cast(frame->function());
+
+ ASSERT(frame->LookupCode()->kind() == Code::FUNCTION);
+
+ Handle<Code> frame_code(frame->LookupCode());
+ if (frame_code->has_debug_break_slots()) continue;
+
+ Handle<Code> new_code(function->shared()->code());
+ if (new_code->kind() != Code::FUNCTION ||
+ !new_code->has_debug_break_slots()) {
+ continue;
+ }
+
+ intptr_t delta = frame->pc() - frame_code->instruction_start();
+ int debug_break_slot_count = 0;
+ int mask = RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT);
+ for (RelocIterator it(*new_code, mask); !it.done(); it.next()) {
+ // Check if the pc in the new code with debug break
+ // slots is before this slot.
+ RelocInfo* info = it.rinfo();
+ int debug_break_slot_bytes =
+ debug_break_slot_count * Assembler::kDebugBreakSlotLength;
+ intptr_t new_delta =
+ info->pc() -
+ new_code->instruction_start() -
+ debug_break_slot_bytes;
+ if (new_delta > delta) {
+ break;
+ }
+
+ // Passed a debug break slot in the full code with debug
+ // break slots.
+ debug_break_slot_count++;
+ }
+ int debug_break_slot_bytes =
+ debug_break_slot_count * Assembler::kDebugBreakSlotLength;
+ if (FLAG_trace_deopt) {
+ PrintF("Replacing code %08" V8PRIxPTR " - %08" V8PRIxPTR " (%d) "
+ "with %08" V8PRIxPTR " - %08" V8PRIxPTR " (%d) "
+ "for debugging, "
+ "changing pc from %08" V8PRIxPTR " to %08" V8PRIxPTR "\n",
+ reinterpret_cast<intptr_t>(
+ frame_code->instruction_start()),
+ reinterpret_cast<intptr_t>(
+ frame_code->instruction_start()) +
+ frame_code->instruction_size(),
+ frame_code->instruction_size(),
+ reinterpret_cast<intptr_t>(new_code->instruction_start()),
+ reinterpret_cast<intptr_t>(new_code->instruction_start()) +
+ new_code->instruction_size(),
+ new_code->instruction_size(),
+ reinterpret_cast<intptr_t>(frame->pc()),
+ reinterpret_cast<intptr_t>(new_code->instruction_start()) +
+ delta + debug_break_slot_bytes);
+ }
+
+ // Patch the return address to return into the code with
+ // debug break slots.
+ frame->set_pc(
+ new_code->instruction_start() + delta + debug_break_slot_bytes);
+ }
+}
+
+
+class ActiveFunctionsCollector : public ThreadVisitor {
+ public:
+ explicit ActiveFunctionsCollector(List<Handle<JSFunction> >* active_functions,
+ Object* active_code_marker)
+ : active_functions_(active_functions),
+ active_code_marker_(active_code_marker) { }
+
+ void VisitThread(Isolate* isolate, ThreadLocalTop* top) {
+ CollectActiveFunctionsFromThread(isolate,
+ top,
+ active_functions_,
+ active_code_marker_);
+ }
+
+ private:
+ List<Handle<JSFunction> >* active_functions_;
+ Object* active_code_marker_;
+};
+
+
+class ActiveFunctionsRedirector : public ThreadVisitor {
+ public:
+ void VisitThread(Isolate* isolate, ThreadLocalTop* top) {
+ RedirectActivationsToRecompiledCodeOnThread(isolate, top);
+ }
+};
+
+
void Debug::PrepareForBreakPoints() {
// If preparing for the first break point make sure to deoptimize all
// functions as debugging does not work with optimized code.
@@ -1776,70 +1905,59 @@
// debug break slots.
isolate_->heap()->CollectAllGarbage(Heap::kMakeHeapIterableMask);
- // Ensure no GC in this scope as we are comparing raw pointer
- // values and performing a heap iteration.
+ // Ensure no GC in this scope as we are going to use gc_metadata
+ // field in the Code object to mark active functions.
AssertNoAllocation no_allocation;
- // Find all non-optimized code functions with activation frames
- // on the stack. This includes functions which have optimized
- // activations (including inlined functions) on the stack as the
- // non-optimized code is needed for the lazy deoptimization.
- for (JavaScriptFrameIterator it(isolate_); !it.done(); it.Advance()) {
- JavaScriptFrame* frame = it.frame();
- if (frame->is_optimized()) {
- List<JSFunction*> functions(Compiler::kMaxInliningLevels + 1);
- frame->GetFunctions(&functions);
- for (int i = 0; i < functions.length(); i++) {
- if (!functions[i]->shared()->code()->has_debug_break_slots()) {
- active_functions.Add(Handle<JSFunction>(functions[i]));
- }
- }
- } else if (frame->function()->IsJSFunction()) {
- JSFunction* function = JSFunction::cast(frame->function());
- if (function->code()->kind() == Code::FUNCTION &&
- !function->code()->has_debug_break_slots()) {
- active_functions.Add(Handle<JSFunction>(function));
- }
- }
- }
+ Object* active_code_marker = isolate_->heap()->the_hole_value();
- // Sort the functions on the object pointer value to prepare for
- // the binary search below.
- active_functions.Sort(HandleObjectPointerCompare<JSFunction>);
+ CollectActiveFunctionsFromThread(isolate_,
+ isolate_->thread_local_top(),
+ &active_functions,
+ active_code_marker);
+ ActiveFunctionsCollector active_functions_collector(&active_functions,
+ active_code_marker);
+ isolate_->thread_manager()->IterateArchivedThreads(
+ &active_functions_collector);
- // Scan the heap for all non-optimized functions which has no
- // debug break slots.
+ // Scan the heap for all non-optimized functions which have no
+ // debug break slots and are not active or inlined into an active
+ // function and mark them for lazy compilation.
HeapIterator iterator;
HeapObject* obj = NULL;
while (((obj = iterator.next()) != NULL)) {
if (obj->IsJSFunction()) {
JSFunction* function = JSFunction::cast(obj);
- if (function->shared()->allows_lazy_compilation() &&
- function->shared()->script()->IsScript() &&
+ SharedFunctionInfo* shared = function->shared();
+ if (shared->allows_lazy_compilation() &&
+ shared->script()->IsScript() &&
function->code()->kind() == Code::FUNCTION &&
- !function->code()->has_debug_break_slots()) {
- bool has_activation =
- SortedListBSearch<Handle<JSFunction> >(
- active_functions,
- Handle<JSFunction>(function),
- HandleObjectPointerCompare<JSFunction>) != -1;
- if (!has_activation) {
- function->set_code(*lazy_compile);
- function->shared()->set_code(*lazy_compile);
- }
+ !function->code()->has_debug_break_slots() &&
+ shared->code()->gc_metadata() != active_code_marker) {
+ function->set_code(*lazy_compile);
+ function->shared()->set_code(*lazy_compile);
}
}
}
- }
- // Now the non-GC scope is left, and the sorting of the functions
- // in active_function is not ensured any more. The code below does
- // not rely on it.
+ // Clear gc_metadata field.
+ for (int i = 0; i < active_functions.length(); i++) {
+ Handle<JSFunction> function = active_functions[i];
+ function->shared()->code()->set_gc_metadata(Smi::FromInt(0));
+ }
+ }
// Now recompile all functions with activation frames and and
// patch the return address to run in the new compiled code.
for (int i = 0; i < active_functions.length(); i++) {
Handle<JSFunction> function = active_functions[i];
+
+ if (function->code()->kind() == Code::FUNCTION &&
+ function->code()->has_debug_break_slots()) {
+ // Nothing to do. Function code already had debug break slots.
+ continue;
+ }
+
Handle<SharedFunctionInfo> shared(function->shared());
// If recompilation is not possible just skip it.
if (shared->is_toplevel() ||
@@ -1850,23 +1968,16 @@
// Make sure that the shared full code is compiled with debug
// break slots.
- if (function->code() == *lazy_compile) {
- function->set_code(shared->code());
- }
- Handle<Code> current_code(function->code());
- if (shared->code()->has_debug_break_slots()) {
- // if the code is already recompiled to have break slots skip
- // recompilation.
- ASSERT(!function->code()->has_debug_break_slots());
- } else {
+ if (!shared->code()->has_debug_break_slots()) {
// Try to compile the full code with debug break slots. If it
// fails just keep the current code.
- ASSERT(shared->code() == *current_code);
+ Handle<Code> current_code(function->shared()->code());
ZoneScope zone_scope(isolate_, DELETE_ON_EXIT);
shared->set_code(*lazy_compile);
bool prev_force_debugger_active =
isolate_->debugger()->force_debugger_active();
isolate_->debugger()->set_force_debugger_active(true);
+ ASSERT(current_code->kind() == Code::FUNCTION);
CompileFullCodeForDebugging(shared, current_code);
isolate_->debugger()->set_force_debugger_active(
prev_force_debugger_active);
@@ -1875,67 +1986,17 @@
continue;
}
}
- Handle<Code> new_code(shared->code());
- // Find the function and patch the return address.
- for (JavaScriptFrameIterator it(isolate_); !it.done(); it.Advance()) {
- JavaScriptFrame* frame = it.frame();
- // If the current frame is for this function in its
- // non-optimized form rewrite the return address to continue
- // in the newly compiled full code with debug break slots.
- if (frame->function()->IsJSFunction() &&
- frame->function() == *function &&
- frame->LookupCode()->kind() == Code::FUNCTION) {
- intptr_t delta = frame->pc() - current_code->instruction_start();
- int debug_break_slot_count = 0;
- int mask = RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT);
- for (RelocIterator it(*new_code, mask); !it.done(); it.next()) {
- // Check if the pc in the new code with debug break
- // slots is before this slot.
- RelocInfo* info = it.rinfo();
- int debug_break_slot_bytes =
- debug_break_slot_count * Assembler::kDebugBreakSlotLength;
- intptr_t new_delta =
- info->pc() -
- new_code->instruction_start() -
- debug_break_slot_bytes;
- if (new_delta > delta) {
- break;
- }
-
- // Passed a debug break slot in the full code with debug
- // break slots.
- debug_break_slot_count++;
- }
- int debug_break_slot_bytes =
- debug_break_slot_count * Assembler::kDebugBreakSlotLength;
- if (FLAG_trace_deopt) {
- PrintF("Replacing code %08" V8PRIxPTR " - %08" V8PRIxPTR " (%d) "
- "with %08" V8PRIxPTR " - %08" V8PRIxPTR " (%d) "
- "for debugging, "
- "changing pc from %08" V8PRIxPTR " to %08" V8PRIxPTR "\n",
- reinterpret_cast<intptr_t>(
- current_code->instruction_start()),
- reinterpret_cast<intptr_t>(
- current_code->instruction_start()) +
- current_code->instruction_size(),
- current_code->instruction_size(),
- reinterpret_cast<intptr_t>(new_code->instruction_start()),
- reinterpret_cast<intptr_t>(new_code->instruction_start()) +
- new_code->instruction_size(),
- new_code->instruction_size(),
- reinterpret_cast<intptr_t>(frame->pc()),
- reinterpret_cast<intptr_t>(new_code->instruction_start()) +
- delta + debug_break_slot_bytes);
- }
-
- // Patch the return address to return into the code with
- // debug break slots.
- frame->set_pc(
- new_code->instruction_start() + delta + debug_break_slot_bytes);
- }
- }
+ // Keep function code in sync with shared function info.
+ function->set_code(shared->code());
}
+
+ RedirectActivationsToRecompiledCodeOnThread(isolate_,
+ isolate_->thread_local_top());
+
+ ActiveFunctionsRedirector active_functions_redirector;
+ isolate_->thread_manager()->IterateArchivedThreads(
+ &active_functions_redirector);
}
}
@@ -2855,7 +2916,7 @@
command.Dispose();
// Return from debug event processing if either the VM is put into the
- // runnning state (through a continue command) or auto continue is active
+ // running state (through a continue command) or auto continue is active
// and there are no more commands queued.
if (running && !HasCommands()) {
return;
@@ -3065,7 +3126,7 @@
v8::Debug::DebugBreak();
}
- if (Socket::Setup()) {
+ if (Socket::SetUp()) {
if (agent_ == NULL) {
agent_ = new DebuggerAgent(name, port);
agent_->Start();
diff --git a/src/debug.h b/src/debug.h
index a39d801..582aada 100644
--- a/src/debug.h
+++ b/src/debug.h
@@ -178,7 +178,9 @@
private:
// Calculate the hash value from the key (script id).
- static uint32_t Hash(int key) { return ComputeIntegerHash(key); }
+ static uint32_t Hash(int key) {
+ return ComputeIntegerHash(key, v8::internal::kZeroHashSeed);
+ }
// Scripts match if their keys (script id) match.
static bool ScriptMatch(void* key1, void* key2) { return key1 == key2; }
@@ -222,7 +224,7 @@
// DebugInfo.
class Debug {
public:
- void Setup(bool create_heap_objects);
+ void SetUp(bool create_heap_objects);
bool Load();
void Unload();
bool IsLoaded() { return !debug_context_.is_null(); }
diff --git a/src/deoptimizer.cc b/src/deoptimizer.cc
index cb24b10..68f82ce 100644
--- a/src/deoptimizer.cc
+++ b/src/deoptimizer.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -104,10 +104,27 @@
return result;
}
+
+int Deoptimizer::ConvertJSFrameIndexToFrameIndex(int jsframe_index) {
+ if (jsframe_index == 0) return 0;
+
+ int frame_index = 0;
+ while (jsframe_index >= 0) {
+ FrameDescription* frame = output_[frame_index];
+ if (frame->GetFrameType() == StackFrame::JAVA_SCRIPT) {
+ jsframe_index--;
+ }
+ frame_index++;
+ }
+
+ return frame_index - 1;
+}
+
+
#ifdef ENABLE_DEBUGGER_SUPPORT
DeoptimizedFrameInfo* Deoptimizer::DebuggerInspectableFrame(
JavaScriptFrame* frame,
- int frame_index,
+ int jsframe_index,
Isolate* isolate) {
ASSERT(isolate == Isolate::Current());
ASSERT(frame->is_optimized());
@@ -143,22 +160,40 @@
// Create the GC safe output frame information and register it for GC
// handling.
- ASSERT_LT(frame_index, deoptimizer->output_count());
+ ASSERT_LT(jsframe_index, deoptimizer->jsframe_count());
+
+ // Convert JS frame index into frame index.
+ int frame_index = deoptimizer->ConvertJSFrameIndexToFrameIndex(jsframe_index);
+
+ bool has_arguments_adaptor =
+ frame_index > 0 &&
+ deoptimizer->output_[frame_index - 1]->GetFrameType() ==
+ StackFrame::ARGUMENTS_ADAPTOR;
+
DeoptimizedFrameInfo* info =
- new DeoptimizedFrameInfo(deoptimizer, frame_index);
+ new DeoptimizedFrameInfo(deoptimizer, frame_index, has_arguments_adaptor);
isolate->deoptimizer_data()->deoptimized_frame_info_ = info;
// Get the "simulated" top and size for the requested frame.
- Address top =
- reinterpret_cast<Address>(deoptimizer->output_[frame_index]->GetTop());
- uint32_t size = deoptimizer->output_[frame_index]->GetFrameSize();
+ FrameDescription* parameters_frame =
+ deoptimizer->output_[
+ has_arguments_adaptor ? (frame_index - 1) : frame_index];
+
+ uint32_t parameters_size = (info->parameters_count() + 1) * kPointerSize;
+ Address parameters_top = reinterpret_cast<Address>(
+ parameters_frame->GetTop() + (parameters_frame->GetFrameSize() -
+ parameters_size));
+
+ uint32_t expressions_size = info->expression_count() * kPointerSize;
+ Address expressions_top = reinterpret_cast<Address>(
+ deoptimizer->output_[frame_index]->GetTop());
// Done with the GC-unsafe frame descriptions. This re-enables allocation.
deoptimizer->DeleteFrameDescriptions();
// Allocate a heap number for the doubles belonging to this frame.
deoptimizer->MaterializeHeapNumbersForDebuggerInspectableFrame(
- top, size, info);
+ parameters_top, parameters_size, expressions_top, expressions_size, info);
// Finished using the deoptimizer instance.
delete deoptimizer;
@@ -313,6 +348,7 @@
fp_to_sp_delta_(fp_to_sp_delta),
input_(NULL),
output_count_(0),
+ jsframe_count_(0),
output_(NULL),
frame_alignment_marker_(isolate->heap()->frame_alignment_marker()),
has_alignment_padding_(0),
@@ -377,9 +413,7 @@
ASSERT(HEAP->allow_allocation(false));
unsigned size = ComputeInputFrameSize();
input_ = new(size) FrameDescription(size, function);
-#ifdef DEBUG
- input_->SetKind(Code::OPTIMIZED_FUNCTION);
-#endif
+ input_->SetFrameType(StackFrame::JAVA_SCRIPT);
}
@@ -515,6 +549,7 @@
// Read the number of output frames and allocate an array for their
// descriptions.
int count = iterator.Next();
+ iterator.Next(); // Drop JS frames count.
ASSERT(output_ == NULL);
output_ = new FrameDescription*[count];
for (int i = 0; i < count; ++i) {
@@ -524,7 +559,21 @@
// Translate each output frame.
for (int i = 0; i < count; ++i) {
- DoComputeFrame(&iterator, i);
+ // Read the ast node id, function, and frame height for this output frame.
+ Translation::Opcode opcode =
+ static_cast<Translation::Opcode>(iterator.Next());
+ switch (opcode) {
+ case Translation::JS_FRAME:
+ DoComputeJSFrame(&iterator, i);
+ jsframe_count_++;
+ break;
+ case Translation::ARGUMENTS_ADAPTOR_FRAME:
+ DoComputeArgumentsAdaptorFrame(&iterator, i);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
}
// Print some helpful diagnostic information.
@@ -565,39 +614,52 @@
#ifdef ENABLE_DEBUGGER_SUPPORT
void Deoptimizer::MaterializeHeapNumbersForDebuggerInspectableFrame(
- Address top, uint32_t size, DeoptimizedFrameInfo* info) {
+ Address parameters_top,
+ uint32_t parameters_size,
+ Address expressions_top,
+ uint32_t expressions_size,
+ DeoptimizedFrameInfo* info) {
ASSERT_EQ(DEBUGGER, bailout_type_);
+ Address parameters_bottom = parameters_top + parameters_size;
+ Address expressions_bottom = expressions_top + expressions_size;
for (int i = 0; i < deferred_heap_numbers_.length(); i++) {
HeapNumberMaterializationDescriptor d = deferred_heap_numbers_[i];
// Check of the heap number to materialize actually belong to the frame
// being extracted.
Address slot = d.slot_address();
- if (top <= slot && slot < top + size) {
+ if (parameters_top <= slot && slot < parameters_bottom) {
Handle<Object> num = isolate_->factory()->NewNumber(d.value());
- // Calculate the index with the botton of the expression stack
- // at index 0, and the fixed part (including incoming arguments)
- // at negative indexes.
- int index = static_cast<int>(
- info->expression_count_ - (slot - top) / kPointerSize - 1);
+
+ int index = (info->parameters_count() - 1) -
+ static_cast<int>(slot - parameters_top) / kPointerSize;
+
if (FLAG_trace_deopt) {
PrintF("Materializing a new heap number %p [%e] in slot %p"
- "for stack index %d\n",
+ "for parameter slot #%d\n",
reinterpret_cast<void*>(*num),
d.value(),
d.slot_address(),
index);
}
- if (index >=0) {
- info->SetExpression(index, *num);
- } else {
- // Calculate parameter index subtracting one for the receiver.
- int parameter_index =
- index +
- static_cast<int>(size) / kPointerSize -
- info->expression_count_ - 1;
- info->SetParameter(parameter_index, *num);
+
+ info->SetParameter(index, *num);
+ } else if (expressions_top <= slot && slot < expressions_bottom) {
+ Handle<Object> num = isolate_->factory()->NewNumber(d.value());
+
+ int index = info->expression_count() - 1 -
+ static_cast<int>(slot - expressions_top) / kPointerSize;
+
+ if (FLAG_trace_deopt) {
+ PrintF("Materializing a new heap number %p [%e] in slot %p"
+ "for expression slot #%d\n",
+ reinterpret_cast<void*>(*num),
+ d.value(),
+ d.slot_address(),
+ index);
}
+
+ info->SetExpression(index, *num);
}
}
}
@@ -622,7 +684,8 @@
switch (opcode) {
case Translation::BEGIN:
- case Translation::FRAME:
+ case Translation::JS_FRAME:
+ case Translation::ARGUMENTS_ADAPTOR_FRAME:
case Translation::DUPLICATE:
UNREACHABLE();
return;
@@ -691,7 +754,7 @@
case Translation::STACK_SLOT: {
int input_slot_index = iterator->Next();
unsigned input_offset =
- input_->GetOffsetFromSlotIndex(this, input_slot_index);
+ input_->GetOffsetFromSlotIndex(input_slot_index);
intptr_t input_value = input_->GetFrameSlot(input_offset);
if (FLAG_trace_deopt) {
PrintF(" 0x%08" V8PRIxPTR ": ",
@@ -710,7 +773,7 @@
case Translation::INT32_STACK_SLOT: {
int input_slot_index = iterator->Next();
unsigned input_offset =
- input_->GetOffsetFromSlotIndex(this, input_slot_index);
+ input_->GetOffsetFromSlotIndex(input_slot_index);
intptr_t value = input_->GetFrameSlot(input_offset);
bool is_smi = Smi::IsValid(value);
if (FLAG_trace_deopt) {
@@ -739,7 +802,7 @@
case Translation::DOUBLE_STACK_SLOT: {
int input_slot_index = iterator->Next();
unsigned input_offset =
- input_->GetOffsetFromSlotIndex(this, input_slot_index);
+ input_->GetOffsetFromSlotIndex(input_slot_index);
double value = input_->GetDoubleFrameSlot(input_offset);
if (FLAG_trace_deopt) {
PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- %e ; [esp + %d]\n",
@@ -808,7 +871,8 @@
switch (opcode) {
case Translation::BEGIN:
- case Translation::FRAME:
+ case Translation::JS_FRAME:
+ case Translation::ARGUMENTS_ADAPTOR_FRAME:
case Translation::DUPLICATE:
UNREACHABLE(); // Malformed input.
return false;
@@ -871,7 +935,7 @@
case Translation::STACK_SLOT: {
int output_index = iterator->Next();
unsigned output_offset =
- output->GetOffsetFromSlotIndex(this, output_index);
+ output->GetOffsetFromSlotIndex(output_index);
if (FLAG_trace_osr) {
PrintF(" [sp + %d] <- 0x%08" V8PRIxPTR " ; [sp + %d] ",
output_offset,
@@ -890,7 +954,7 @@
int output_index = iterator->Next();
unsigned output_offset =
- output->GetOffsetFromSlotIndex(this, output_index);
+ output->GetOffsetFromSlotIndex(output_index);
int int32_value = input_object->IsSmi()
? Smi::cast(input_object)->value()
: DoubleToInt32(input_object->Number());
@@ -922,7 +986,7 @@
int output_index = iterator->Next();
unsigned output_offset =
- output->GetOffsetFromSlotIndex(this, output_index);
+ output->GetOffsetFromSlotIndex(output_index);
double double_value = input_object->Number();
uint64_t int_value = BitCast<uint64_t, double>(double_value);
int32_t lower = static_cast<int32_t>(int_value);
@@ -1033,8 +1097,8 @@
unsigned Deoptimizer::ComputeFixedSize(JSFunction* function) const {
// The fixed part of the frame consists of the return address, frame
// pointer, function, context, and all the incoming arguments.
- static const unsigned kFixedSlotSize = 4 * kPointerSize;
- return ComputeIncomingArgumentSize(function) + kFixedSlotSize;
+ return ComputeIncomingArgumentSize(function) +
+ StandardFrameConstants::kFixedFrameSize;
}
@@ -1155,49 +1219,62 @@
}
-unsigned FrameDescription::GetOffsetFromSlotIndex(Deoptimizer* deoptimizer,
- int slot_index) {
+int FrameDescription::ComputeFixedSize() {
+ return StandardFrameConstants::kFixedFrameSize +
+ (ComputeParametersCount() + 1) * kPointerSize;
+}
+
+
+unsigned FrameDescription::GetOffsetFromSlotIndex(int slot_index) {
if (slot_index >= 0) {
// Local or spill slots. Skip the fixed part of the frame
// including all arguments.
- unsigned base =
- GetFrameSize() - deoptimizer->ComputeFixedSize(GetFunction());
+ unsigned base = GetFrameSize() - ComputeFixedSize();
return base - ((slot_index + 1) * kPointerSize);
} else {
// Incoming parameter.
- unsigned base = GetFrameSize() -
- deoptimizer->ComputeIncomingArgumentSize(GetFunction());
+ int arg_size = (ComputeParametersCount() + 1) * kPointerSize;
+ unsigned base = GetFrameSize() - arg_size;
return base - ((slot_index + 1) * kPointerSize);
}
}
int FrameDescription::ComputeParametersCount() {
- return function_->shared()->formal_parameter_count();
+ switch (type_) {
+ case StackFrame::JAVA_SCRIPT:
+ return function_->shared()->formal_parameter_count();
+ case StackFrame::ARGUMENTS_ADAPTOR: {
+ // Last slot contains number of incomming arguments as a smi.
+ // Can't use GetExpression(0) because it would cause infinite recursion.
+ return reinterpret_cast<Smi*>(*GetFrameSlotPointer(0))->value();
+ }
+ default:
+ UNREACHABLE();
+ return 0;
+ }
}
-Object* FrameDescription::GetParameter(Deoptimizer* deoptimizer, int index) {
- ASSERT_EQ(Code::FUNCTION, kind_);
+Object* FrameDescription::GetParameter(int index) {
ASSERT(index >= 0);
ASSERT(index < ComputeParametersCount());
// The slot indexes for incoming arguments are negative.
- unsigned offset = GetOffsetFromSlotIndex(deoptimizer,
- index - ComputeParametersCount());
+ unsigned offset = GetOffsetFromSlotIndex(index - ComputeParametersCount());
return reinterpret_cast<Object*>(*GetFrameSlotPointer(offset));
}
-unsigned FrameDescription::GetExpressionCount(Deoptimizer* deoptimizer) {
- ASSERT_EQ(Code::FUNCTION, kind_);
- unsigned size = GetFrameSize() - deoptimizer->ComputeFixedSize(GetFunction());
+unsigned FrameDescription::GetExpressionCount() {
+ ASSERT_EQ(StackFrame::JAVA_SCRIPT, type_);
+ unsigned size = GetFrameSize() - ComputeFixedSize();
return size / kPointerSize;
}
-Object* FrameDescription::GetExpression(Deoptimizer* deoptimizer, int index) {
- ASSERT_EQ(Code::FUNCTION, kind_);
- unsigned offset = GetOffsetFromSlotIndex(deoptimizer, index);
+Object* FrameDescription::GetExpression(int index) {
+ ASSERT_EQ(StackFrame::JAVA_SCRIPT, type_);
+ unsigned offset = GetOffsetFromSlotIndex(index);
return reinterpret_cast<Object*>(*GetFrameSlotPointer(offset));
}
@@ -1243,8 +1320,15 @@
}
-void Translation::BeginFrame(int node_id, int literal_id, unsigned height) {
- buffer_->Add(FRAME);
+void Translation::BeginArgumentsAdaptorFrame(int literal_id, unsigned height) {
+ buffer_->Add(ARGUMENTS_ADAPTOR_FRAME);
+ buffer_->Add(literal_id);
+ buffer_->Add(height);
+}
+
+
+void Translation::BeginJSFrame(int node_id, int literal_id, unsigned height) {
+ buffer_->Add(JS_FRAME);
buffer_->Add(node_id);
buffer_->Add(literal_id);
buffer_->Add(height);
@@ -1308,7 +1392,6 @@
case ARGUMENTS_OBJECT:
case DUPLICATE:
return 0;
- case BEGIN:
case REGISTER:
case INT32_REGISTER:
case DOUBLE_REGISTER:
@@ -1317,7 +1400,10 @@
case DOUBLE_STACK_SLOT:
case LITERAL:
return 1;
- case FRAME:
+ case BEGIN:
+ case ARGUMENTS_ADAPTOR_FRAME:
+ return 2;
+ case JS_FRAME:
return 3;
}
UNREACHABLE();
@@ -1331,8 +1417,10 @@
switch (opcode) {
case BEGIN:
return "BEGIN";
- case FRAME:
- return "FRAME";
+ case JS_FRAME:
+ return "JS_FRAME";
+ case ARGUMENTS_ADAPTOR_FRAME:
+ return "ARGUMENTS_ADAPTOR_FRAME";
case REGISTER:
return "REGISTER";
case INT32_REGISTER:
@@ -1386,7 +1474,8 @@
switch (opcode) {
case Translation::BEGIN:
- case Translation::FRAME:
+ case Translation::JS_FRAME:
+ case Translation::ARGUMENTS_ADAPTOR_FRAME:
// Peeled off before getting here.
break;
@@ -1432,9 +1521,27 @@
}
-void SlotRef::ComputeSlotMappingForArguments(JavaScriptFrame* frame,
- int inlined_frame_index,
- Vector<SlotRef>* args_slots) {
+void SlotRef::ComputeSlotsForArguments(Vector<SlotRef>* args_slots,
+ TranslationIterator* it,
+ DeoptimizationInputData* data,
+ JavaScriptFrame* frame) {
+ // Process the translation commands for the arguments.
+
+ // Skip the translation command for the receiver.
+ it->Skip(Translation::NumberOfOperandsFor(
+ static_cast<Translation::Opcode>(it->Next())));
+
+ // Compute slots for arguments.
+ for (int i = 0; i < args_slots->length(); ++i) {
+ (*args_slots)[i] = ComputeSlotForNextArgument(it, data, frame);
+ }
+}
+
+
+Vector<SlotRef> SlotRef::ComputeSlotMappingForArguments(
+ JavaScriptFrame* frame,
+ int inlined_jsframe_index,
+ int formal_parameter_count) {
AssertNoAllocation no_gc;
int deopt_index = AstNode::kNoNumber;
DeoptimizationInputData* data =
@@ -1443,51 +1550,73 @@
data->TranslationIndex(deopt_index)->value());
Translation::Opcode opcode = static_cast<Translation::Opcode>(it.Next());
ASSERT(opcode == Translation::BEGIN);
- int frame_count = it.Next();
- USE(frame_count);
- ASSERT(frame_count > inlined_frame_index);
- int frames_to_skip = inlined_frame_index;
+ it.Next(); // Drop frame count.
+ int jsframe_count = it.Next();
+ USE(jsframe_count);
+ ASSERT(jsframe_count > inlined_jsframe_index);
+ int jsframes_to_skip = inlined_jsframe_index;
while (true) {
opcode = static_cast<Translation::Opcode>(it.Next());
- // Skip over operands to advance to the next opcode.
- it.Skip(Translation::NumberOfOperandsFor(opcode));
- if (opcode == Translation::FRAME) {
- if (frames_to_skip == 0) {
+ if (opcode == Translation::ARGUMENTS_ADAPTOR_FRAME) {
+ if (jsframes_to_skip == 0) {
+ ASSERT(Translation::NumberOfOperandsFor(opcode) == 2);
+
+ it.Skip(1); // literal id
+ int height = it.Next();
+
+ // We reached the arguments adaptor frame corresponding to the
+ // inlined function in question. Number of arguments is height - 1.
+ Vector<SlotRef> args_slots =
+ Vector<SlotRef>::New(height - 1); // Minus receiver.
+ ComputeSlotsForArguments(&args_slots, &it, data, frame);
+ return args_slots;
+ }
+ } else if (opcode == Translation::JS_FRAME) {
+ if (jsframes_to_skip == 0) {
+ // Skip over operands to advance to the next opcode.
+ it.Skip(Translation::NumberOfOperandsFor(opcode));
+
// We reached the frame corresponding to the inlined function
// in question. Process the translation commands for the
- // arguments.
- //
- // Skip the translation command for the receiver.
- it.Skip(Translation::NumberOfOperandsFor(
- static_cast<Translation::Opcode>(it.Next())));
- // Compute slots for arguments.
- for (int i = 0; i < args_slots->length(); ++i) {
- (*args_slots)[i] = ComputeSlotForNextArgument(&it, data, frame);
- }
- return;
+ // arguments. Number of arguments is equal to the number of
+ // format parameter count.
+ Vector<SlotRef> args_slots =
+ Vector<SlotRef>::New(formal_parameter_count);
+ ComputeSlotsForArguments(&args_slots, &it, data, frame);
+ return args_slots;
}
- frames_to_skip--;
+ jsframes_to_skip--;
}
+
+ // Skip over operands to advance to the next opcode.
+ it.Skip(Translation::NumberOfOperandsFor(opcode));
}
UNREACHABLE();
+ return Vector<SlotRef>();
}
#ifdef ENABLE_DEBUGGER_SUPPORT
DeoptimizedFrameInfo::DeoptimizedFrameInfo(
- Deoptimizer* deoptimizer, int frame_index) {
+ Deoptimizer* deoptimizer, int frame_index, bool has_arguments_adaptor) {
FrameDescription* output_frame = deoptimizer->output_[frame_index];
SetFunction(output_frame->GetFunction());
- expression_count_ = output_frame->GetExpressionCount(deoptimizer);
+ expression_count_ = output_frame->GetExpressionCount();
+ expression_stack_ = new Object*[expression_count_];
+ for (int i = 0; i < expression_count_; i++) {
+ SetExpression(i, output_frame->GetExpression(i));
+ }
+
+ if (has_arguments_adaptor) {
+ output_frame = deoptimizer->output_[frame_index - 1];
+ ASSERT(output_frame->GetFrameType() == StackFrame::ARGUMENTS_ADAPTOR);
+ }
+
parameters_count_ = output_frame->ComputeParametersCount();
parameters_ = new Object*[parameters_count_];
for (int i = 0; i < parameters_count_; i++) {
- SetParameter(i, output_frame->GetParameter(deoptimizer, i));
- }
- expression_stack_ = new Object*[expression_count_];
- for (int i = 0; i < expression_count_; i++) {
- SetExpression(i, output_frame->GetExpression(deoptimizer, i));
+ SetParameter(i, output_frame->GetParameter(i));
}
}
diff --git a/src/deoptimizer.h b/src/deoptimizer.h
index 284676c..8b1152d 100644
--- a/src/deoptimizer.h
+++ b/src/deoptimizer.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -119,6 +119,9 @@
int output_count() const { return output_count_; }
+ // Number of created JS frames. Not all created frames are necessarily JS.
+ int jsframe_count() const { return jsframe_count_; }
+
static Deoptimizer* New(JSFunction* function,
BailoutType type,
unsigned bailout_id,
@@ -131,7 +134,7 @@
// The returned object with information on the optimized frame needs to be
// freed before another one can be generated.
static DeoptimizedFrameInfo* DebuggerInspectableFrame(JavaScriptFrame* frame,
- int frame_index,
+ int jsframe_index,
Isolate* isolate);
static void DeleteDebuggerInspectableFrame(DeoptimizedFrameInfo* info,
Isolate* isolate);
@@ -196,7 +199,11 @@
void MaterializeHeapNumbers();
#ifdef ENABLE_DEBUGGER_SUPPORT
void MaterializeHeapNumbersForDebuggerInspectableFrame(
- Address top, uint32_t size, DeoptimizedFrameInfo* info);
+ Address parameters_top,
+ uint32_t parameters_size,
+ Address expressions_top,
+ uint32_t expressions_size,
+ DeoptimizedFrameInfo* info);
#endif
static void ComputeOutputFrames(Deoptimizer* deoptimizer);
@@ -257,8 +264,14 @@
int count_;
};
+ int ConvertJSFrameIndexToFrameIndex(int jsframe_index);
+
private:
+#ifdef V8_TARGET_ARCH_MIPS
static const int kNumberOfEntries = 4096;
+#else
+ static const int kNumberOfEntries = 8192;
+#endif
Deoptimizer(Isolate* isolate,
JSFunction* function,
@@ -271,7 +284,9 @@
void DoComputeOutputFrames();
void DoComputeOsrOutputFrame();
- void DoComputeFrame(TranslationIterator* iterator, int frame_index);
+ void DoComputeJSFrame(TranslationIterator* iterator, int frame_index);
+ void DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
+ int frame_index);
void DoTranslateCommand(TranslationIterator* iterator,
int frame_index,
unsigned output_offset);
@@ -319,6 +334,8 @@
FrameDescription* input_;
// Number of output frames.
int output_count_;
+ // Number of output js frames.
+ int jsframe_count_;
// Array of output frame descriptions.
FrameDescription** output_;
@@ -362,7 +379,7 @@
JSFunction* GetFunction() const { return function_; }
- unsigned GetOffsetFromSlotIndex(Deoptimizer* deoptimizer, int slot_index);
+ unsigned GetOffsetFromSlotIndex(int slot_index);
intptr_t GetFrameSlot(unsigned offset) {
return *GetFrameSlotPointer(offset);
@@ -423,22 +440,20 @@
void SetContinuation(intptr_t pc) { continuation_ = pc; }
-#ifdef DEBUG
- Code::Kind GetKind() const { return kind_; }
- void SetKind(Code::Kind kind) { kind_ = kind; }
-#endif
+ StackFrame::Type GetFrameType() const { return type_; }
+ void SetFrameType(StackFrame::Type type) { type_ = type; }
// Get the incoming arguments count.
int ComputeParametersCount();
// Get a parameter value for an unoptimized frame.
- Object* GetParameter(Deoptimizer* deoptimizer, int index);
+ Object* GetParameter(int index);
// Get the expression stack height for a unoptimized frame.
- unsigned GetExpressionCount(Deoptimizer* deoptimizer);
+ unsigned GetExpressionCount();
// Get the expression stack value for an unoptimized frame.
- Object* GetExpression(Deoptimizer* deoptimizer, int index);
+ Object* GetExpression(int index);
static int registers_offset() {
return OFFSET_OF(FrameDescription, registers_);
@@ -481,6 +496,7 @@
intptr_t top_;
intptr_t pc_;
intptr_t fp_;
+ StackFrame::Type type_;
Smi* state_;
#ifdef DEBUG
Code::Kind kind_;
@@ -499,6 +515,8 @@
return reinterpret_cast<intptr_t*>(
reinterpret_cast<Address>(this) + frame_content_offset() + offset);
}
+
+ int ComputeFixedSize();
};
@@ -541,7 +559,8 @@
public:
enum Opcode {
BEGIN,
- FRAME,
+ JS_FRAME,
+ ARGUMENTS_ADAPTOR_FRAME,
REGISTER,
INT32_REGISTER,
DOUBLE_REGISTER,
@@ -556,17 +575,19 @@
DUPLICATE
};
- Translation(TranslationBuffer* buffer, int frame_count)
+ Translation(TranslationBuffer* buffer, int frame_count, int jsframe_count)
: buffer_(buffer),
index_(buffer->CurrentIndex()) {
buffer_->Add(BEGIN);
buffer_->Add(frame_count);
+ buffer_->Add(jsframe_count);
}
int index() const { return index_; }
// Commands.
- void BeginFrame(int node_id, int literal_id, unsigned height);
+ void BeginJSFrame(int node_id, int literal_id, unsigned height);
+ void BeginArgumentsAdaptorFrame(int literal_id, unsigned height);
void StoreRegister(Register reg);
void StoreInt32Register(Register reg);
void StoreDoubleRegister(DoubleRegister reg);
@@ -656,9 +677,10 @@
}
}
- static void ComputeSlotMappingForArguments(JavaScriptFrame* frame,
- int inlined_frame_index,
- Vector<SlotRef>* args_slots);
+ static Vector<SlotRef> ComputeSlotMappingForArguments(
+ JavaScriptFrame* frame,
+ int inlined_frame_index,
+ int formal_parameter_count);
private:
Address addr_;
@@ -678,6 +700,12 @@
static SlotRef ComputeSlotForNextArgument(TranslationIterator* iterator,
DeoptimizationInputData* data,
JavaScriptFrame* frame);
+
+ static void ComputeSlotsForArguments(
+ Vector<SlotRef>* args_slots,
+ TranslationIterator* iterator,
+ DeoptimizationInputData* data,
+ JavaScriptFrame* frame);
};
@@ -686,9 +714,13 @@
// needs to inspect a frame that is part of an optimized frame. The
// internally used FrameDescription objects are not GC safe so for use
// by the debugger frame information is copied to an object of this type.
+// Represents parameters in unadapted form so their number might mismatch
+// formal parameter count.
class DeoptimizedFrameInfo : public Malloced {
public:
- DeoptimizedFrameInfo(Deoptimizer* deoptimizer, int frame_index);
+ DeoptimizedFrameInfo(Deoptimizer* deoptimizer,
+ int frame_index,
+ bool has_arguments_adaptor);
virtual ~DeoptimizedFrameInfo();
// GC support.
diff --git a/src/dtoa.h b/src/dtoa.h
index a2d6fde..948a079 100644
--- a/src/dtoa.h
+++ b/src/dtoa.h
@@ -49,7 +49,7 @@
// be at least kBase10MaximalLength + 1 characters long.
const int kBase10MaximalLength = 17;
-// Converts the given double 'v' to ascii.
+// Converts the given double 'v' to ASCII.
// The result should be interpreted as buffer * 10^(point-length).
//
// The output depends on the given mode:
diff --git a/src/elements.cc b/src/elements.cc
index 49ecd88..b62e144 100644
--- a/src/elements.cc
+++ b/src/elements.cc
@@ -133,6 +133,22 @@
JSObject* obj,
Object* length);
+ virtual MaybeObject* SetCapacityAndLength(JSArray* array,
+ int capacity,
+ int length) {
+ return ElementsAccessorSubclass::SetFastElementsCapacityAndLength(
+ array,
+ capacity,
+ length);
+ }
+
+ static MaybeObject* SetFastElementsCapacityAndLength(JSObject* obj,
+ int capacity,
+ int length) {
+ UNIMPLEMENTED();
+ return obj;
+ }
+
virtual MaybeObject* Delete(JSObject* obj,
uint32_t key,
JSReceiver::DeleteMode mode) = 0;
@@ -371,11 +387,6 @@
return heap->true_value();
}
- protected:
- friend class FastElementsAccessor<FastObjectElementsAccessor,
- FixedArray,
- kPointerSize>;
-
static MaybeObject* SetFastElementsCapacityAndLength(JSObject* obj,
uint32_t capacity,
uint32_t length) {
@@ -388,6 +399,11 @@
set_capacity_mode);
}
+ protected:
+ friend class FastElementsAccessor<FastObjectElementsAccessor,
+ FixedArray,
+ kPointerSize>;
+
virtual MaybeObject* Delete(JSObject* obj,
uint32_t key,
JSReceiver::DeleteMode mode) {
@@ -400,6 +416,12 @@
: public FastElementsAccessor<FastDoubleElementsAccessor,
FixedDoubleArray,
kDoubleSize> {
+ static MaybeObject* SetFastElementsCapacityAndLength(JSObject* obj,
+ uint32_t capacity,
+ uint32_t length) {
+ return obj->SetFastDoubleElementsCapacityAndLength(capacity, length);
+ }
+
protected:
friend class ElementsAccessorBase<FastDoubleElementsAccessor,
FixedDoubleArray>;
@@ -407,12 +429,6 @@
FixedDoubleArray,
kDoubleSize>;
- static MaybeObject* SetFastElementsCapacityAndLength(JSObject* obj,
- uint32_t capacity,
- uint32_t length) {
- return obj->SetFastDoubleElementsCapacityAndLength(capacity, length);
- }
-
virtual MaybeObject* Delete(JSObject* obj,
uint32_t key,
JSReceiver::DeleteMode mode) {
@@ -527,11 +543,11 @@
class DictionaryElementsAccessor
: public ElementsAccessorBase<DictionaryElementsAccessor,
- NumberDictionary> {
+ SeededNumberDictionary> {
public:
// Adjusts the length of the dictionary backing store and returns the new
// length according to ES5 section 15.4.5.2 behavior.
- static MaybeObject* SetLengthWithoutNormalize(NumberDictionary* dict,
+ static MaybeObject* SetLengthWithoutNormalize(SeededNumberDictionary* dict,
JSArray* array,
Object* length_object,
uint32_t length) {
@@ -597,9 +613,10 @@
if (is_arguments) {
backing_store = FixedArray::cast(backing_store->get(1));
}
- NumberDictionary* dictionary = NumberDictionary::cast(backing_store);
+ SeededNumberDictionary* dictionary =
+ SeededNumberDictionary::cast(backing_store);
int entry = dictionary->FindEntry(key);
- if (entry != NumberDictionary::kNotFound) {
+ if (entry != SeededNumberDictionary::kNotFound) {
Object* result = dictionary->DeleteProperty(entry, mode);
if (result == heap->true_value()) {
MaybeObject* maybe_elements = dictionary->Shrink(key);
@@ -632,7 +649,7 @@
protected:
friend class ElementsAccessorBase<DictionaryElementsAccessor,
- NumberDictionary>;
+ SeededNumberDictionary>;
virtual MaybeObject* Delete(JSObject* obj,
uint32_t key,
@@ -640,12 +657,12 @@
return DeleteCommon(obj, key, mode);
}
- static MaybeObject* GetImpl(NumberDictionary* backing_store,
+ static MaybeObject* GetImpl(SeededNumberDictionary* backing_store,
uint32_t key,
JSObject* obj,
Object* receiver) {
int entry = backing_store->FindEntry(key);
- if (entry != NumberDictionary::kNotFound) {
+ if (entry != SeededNumberDictionary::kNotFound) {
Object* element = backing_store->ValueAt(entry);
PropertyDetails details = backing_store->DetailsAt(entry);
if (details.type() == CALLBACKS) {
@@ -660,7 +677,7 @@
return obj->GetHeap()->the_hole_value();
}
- static uint32_t GetKeyForIndexImpl(NumberDictionary* dict,
+ static uint32_t GetKeyForIndexImpl(SeededNumberDictionary* dict,
uint32_t index) {
Object* key = dict->KeyAt(index);
return Smi::cast(key)->value();
@@ -873,7 +890,7 @@
if (length->IsNumber()) {
uint32_t value;
if (length->ToArrayIndex(&value)) {
- NumberDictionary* dictionary;
+ SeededNumberDictionary* dictionary;
MaybeObject* maybe_object = array->NormalizeElements();
if (!maybe_object->To(&dictionary)) return maybe_object;
Object* new_length;
diff --git a/src/elements.h b/src/elements.h
index ed1ca5e..a2a184d 100644
--- a/src/elements.h
+++ b/src/elements.h
@@ -44,11 +44,24 @@
JSObject* holder,
Object* receiver) = 0;
- // Modifies the length data property as specified for JSArrays and resizes
- // the underlying backing store accordingly.
+ // Modifies the length data property as specified for JSArrays and resizes the
+ // underlying backing store accordingly. The method honors the semantics of
+ // changing array sizes as defined in EcmaScript 5.1 15.4.5.2, i.e. array that
+ // have non-deletable elements can only be shrunk to the size of highest
+ // element that is non-deletable.
virtual MaybeObject* SetLength(JSObject* holder,
Object* new_length) = 0;
+ // Modifies both the length and capacity of a JSArray, resizing the underlying
+ // backing store as necessary. This method does NOT honor the semantics of
+ // EcmaScript 5.1 15.4.5.2, arrays can be shrunk beyond non-deletable
+ // elements. This method should only be called for array expansion OR by
+ // runtime JavaScript code that use InternalArrays and don't care about
+ // EcmaScript 5.1 semantics.
+ virtual MaybeObject* SetCapacityAndLength(JSArray* array,
+ int capacity,
+ int length) = 0;
+
virtual MaybeObject* Delete(JSObject* holder,
uint32_t key,
JSReceiver::DeleteMode mode) = 0;
diff --git a/src/execution.cc b/src/execution.cc
index b16e739..8a0242f 100644
--- a/src/execution.cc
+++ b/src/execution.cc
@@ -356,7 +356,7 @@
void StackGuard::SetStackLimit(uintptr_t limit) {
ExecutionAccess access(isolate_);
- // If the current limits are special (eg due to a pending interrupt) then
+ // If the current limits are special (e.g. due to a pending interrupt) then
// leave them alone.
uintptr_t jslimit = SimulatorStack::JsLimitFromCLimit(isolate_, limit);
if (thread_local_.jslimit_ == thread_local_.real_jslimit_) {
@@ -845,13 +845,13 @@
// Clear the debug break request flag.
isolate->stack_guard()->Continue(DEBUGBREAK);
- ProcessDebugMesssages(debug_command_only);
+ ProcessDebugMessages(debug_command_only);
// Return to continue execution.
return isolate->heap()->undefined_value();
}
-void Execution::ProcessDebugMesssages(bool debug_command_only) {
+void Execution::ProcessDebugMessages(bool debug_command_only) {
Isolate* isolate = Isolate::Current();
// Clear the debug command request flag.
isolate->stack_guard()->Continue(DEBUGCOMMAND);
diff --git a/src/execution.h b/src/execution.h
index f2d17d0..014736e 100644
--- a/src/execution.h
+++ b/src/execution.h
@@ -136,7 +136,7 @@
Handle<Object> is_global);
#ifdef ENABLE_DEBUGGER_SUPPORT
static Object* DebugBreakHelper();
- static void ProcessDebugMesssages(bool debug_command_only);
+ static void ProcessDebugMessages(bool debug_command_only);
#endif
// If the stack guard is triggered, but it is not an actual
diff --git a/src/extensions/experimental/break-iterator.cc b/src/extensions/experimental/break-iterator.cc
deleted file mode 100644
index e695a3e..0000000
--- a/src/extensions/experimental/break-iterator.cc
+++ /dev/null
@@ -1,252 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "src/extensions/experimental/break-iterator.h"
-
-#include <string.h>
-
-#include "unicode/brkiter.h"
-#include "unicode/locid.h"
-#include "unicode/rbbi.h"
-
-namespace v8 {
-namespace internal {
-
-v8::Persistent<v8::FunctionTemplate> BreakIterator::break_iterator_template_;
-
-icu::BreakIterator* BreakIterator::UnpackBreakIterator(
- v8::Handle<v8::Object> obj) {
- if (break_iterator_template_->HasInstance(obj)) {
- return static_cast<icu::BreakIterator*>(
- obj->GetPointerFromInternalField(0));
- }
-
- return NULL;
-}
-
-icu::UnicodeString* BreakIterator::ResetAdoptedText(
- v8::Handle<v8::Object> obj, v8::Handle<v8::Value> value) {
- // Get the previous value from the internal field.
- icu::UnicodeString* text = static_cast<icu::UnicodeString*>(
- obj->GetPointerFromInternalField(1));
- delete text;
-
- // Assign new value to the internal pointer.
- v8::String::Value text_value(value);
- text = new icu::UnicodeString(
- reinterpret_cast<const UChar*>(*text_value), text_value.length());
- obj->SetPointerInInternalField(1, text);
-
- // Return new unicode string pointer.
- return text;
-}
-
-void BreakIterator::DeleteBreakIterator(v8::Persistent<v8::Value> object,
- void* param) {
- v8::Persistent<v8::Object> persistent_object =
- v8::Persistent<v8::Object>::Cast(object);
-
- // First delete the hidden C++ object.
- // Unpacking should never return NULL here. That would only happen if
- // this method is used as the weak callback for persistent handles not
- // pointing to a break iterator.
- delete UnpackBreakIterator(persistent_object);
-
- delete static_cast<icu::UnicodeString*>(
- persistent_object->GetPointerFromInternalField(1));
-
- // Then dispose of the persistent handle to JS object.
- persistent_object.Dispose();
-}
-
-// Throws a JavaScript exception.
-static v8::Handle<v8::Value> ThrowUnexpectedObjectError() {
- // Returns undefined, and schedules an exception to be thrown.
- return v8::ThrowException(v8::Exception::Error(
- v8::String::New("BreakIterator method called on an object "
- "that is not a BreakIterator.")));
-}
-
-v8::Handle<v8::Value> BreakIterator::BreakIteratorAdoptText(
- const v8::Arguments& args) {
- if (args.Length() != 1 || !args[0]->IsString()) {
- return v8::ThrowException(v8::Exception::SyntaxError(
- v8::String::New("Text input is required.")));
- }
-
- icu::BreakIterator* break_iterator = UnpackBreakIterator(args.Holder());
- if (!break_iterator) {
- return ThrowUnexpectedObjectError();
- }
-
- break_iterator->setText(*ResetAdoptedText(args.Holder(), args[0]));
-
- return v8::Undefined();
-}
-
-v8::Handle<v8::Value> BreakIterator::BreakIteratorFirst(
- const v8::Arguments& args) {
- icu::BreakIterator* break_iterator = UnpackBreakIterator(args.Holder());
- if (!break_iterator) {
- return ThrowUnexpectedObjectError();
- }
-
- return v8::Int32::New(break_iterator->first());
-}
-
-v8::Handle<v8::Value> BreakIterator::BreakIteratorNext(
- const v8::Arguments& args) {
- icu::BreakIterator* break_iterator = UnpackBreakIterator(args.Holder());
- if (!break_iterator) {
- return ThrowUnexpectedObjectError();
- }
-
- return v8::Int32::New(break_iterator->next());
-}
-
-v8::Handle<v8::Value> BreakIterator::BreakIteratorCurrent(
- const v8::Arguments& args) {
- icu::BreakIterator* break_iterator = UnpackBreakIterator(args.Holder());
- if (!break_iterator) {
- return ThrowUnexpectedObjectError();
- }
-
- return v8::Int32::New(break_iterator->current());
-}
-
-v8::Handle<v8::Value> BreakIterator::BreakIteratorBreakType(
- const v8::Arguments& args) {
- icu::BreakIterator* break_iterator = UnpackBreakIterator(args.Holder());
- if (!break_iterator) {
- return ThrowUnexpectedObjectError();
- }
-
- // TODO(cira): Remove cast once ICU fixes base BreakIterator class.
- icu::RuleBasedBreakIterator* rule_based_iterator =
- static_cast<icu::RuleBasedBreakIterator*>(break_iterator);
- int32_t status = rule_based_iterator->getRuleStatus();
- // Keep return values in sync with JavaScript BreakType enum.
- if (status >= UBRK_WORD_NONE && status < UBRK_WORD_NONE_LIMIT) {
- return v8::Int32::New(UBRK_WORD_NONE);
- } else if (status >= UBRK_WORD_NUMBER && status < UBRK_WORD_NUMBER_LIMIT) {
- return v8::Int32::New(UBRK_WORD_NUMBER);
- } else if (status >= UBRK_WORD_LETTER && status < UBRK_WORD_LETTER_LIMIT) {
- return v8::Int32::New(UBRK_WORD_LETTER);
- } else if (status >= UBRK_WORD_KANA && status < UBRK_WORD_KANA_LIMIT) {
- return v8::Int32::New(UBRK_WORD_KANA);
- } else if (status >= UBRK_WORD_IDEO && status < UBRK_WORD_IDEO_LIMIT) {
- return v8::Int32::New(UBRK_WORD_IDEO);
- } else {
- return v8::Int32::New(-1);
- }
-}
-
-v8::Handle<v8::Value> BreakIterator::JSBreakIterator(
- const v8::Arguments& args) {
- v8::HandleScope handle_scope;
-
- if (args.Length() != 2 || !args[0]->IsString() || !args[1]->IsString()) {
- return v8::ThrowException(v8::Exception::SyntaxError(
- v8::String::New("Locale and iterator type are required.")));
- }
-
- v8::String::Utf8Value locale(args[0]);
- icu::Locale icu_locale(*locale);
-
- UErrorCode status = U_ZERO_ERROR;
- icu::BreakIterator* break_iterator = NULL;
- v8::String::Utf8Value type(args[1]);
- if (!strcmp(*type, "character")) {
- break_iterator =
- icu::BreakIterator::createCharacterInstance(icu_locale, status);
- } else if (!strcmp(*type, "word")) {
- break_iterator =
- icu::BreakIterator::createWordInstance(icu_locale, status);
- } else if (!strcmp(*type, "sentence")) {
- break_iterator =
- icu::BreakIterator::createSentenceInstance(icu_locale, status);
- } else if (!strcmp(*type, "line")) {
- break_iterator =
- icu::BreakIterator::createLineInstance(icu_locale, status);
- } else {
- return v8::ThrowException(v8::Exception::SyntaxError(
- v8::String::New("Invalid iterator type.")));
- }
-
- if (U_FAILURE(status)) {
- delete break_iterator;
- return v8::ThrowException(v8::Exception::Error(
- v8::String::New("Failed to create break iterator.")));
- }
-
- if (break_iterator_template_.IsEmpty()) {
- v8::Local<v8::FunctionTemplate> raw_template(v8::FunctionTemplate::New());
-
- raw_template->SetClassName(v8::String::New("v8Locale.v8BreakIterator"));
-
- // Define internal field count on instance template.
- v8::Local<v8::ObjectTemplate> object_template =
- raw_template->InstanceTemplate();
-
- // Set aside internal fields for icu break iterator and adopted text.
- object_template->SetInternalFieldCount(2);
-
- // Define all of the prototype methods on prototype template.
- v8::Local<v8::ObjectTemplate> proto = raw_template->PrototypeTemplate();
- proto->Set(v8::String::New("adoptText"),
- v8::FunctionTemplate::New(BreakIteratorAdoptText));
- proto->Set(v8::String::New("first"),
- v8::FunctionTemplate::New(BreakIteratorFirst));
- proto->Set(v8::String::New("next"),
- v8::FunctionTemplate::New(BreakIteratorNext));
- proto->Set(v8::String::New("current"),
- v8::FunctionTemplate::New(BreakIteratorCurrent));
- proto->Set(v8::String::New("breakType"),
- v8::FunctionTemplate::New(BreakIteratorBreakType));
-
- break_iterator_template_ =
- v8::Persistent<v8::FunctionTemplate>::New(raw_template);
- }
-
- // Create an empty object wrapper.
- v8::Local<v8::Object> local_object =
- break_iterator_template_->GetFunction()->NewInstance();
- v8::Persistent<v8::Object> wrapper =
- v8::Persistent<v8::Object>::New(local_object);
-
- // Set break iterator as internal field of the resulting JS object.
- wrapper->SetPointerInInternalField(0, break_iterator);
- // Make sure that the pointer to adopted text is NULL.
- wrapper->SetPointerInInternalField(1, NULL);
-
- // Make object handle weak so we can delete iterator once GC kicks in.
- wrapper.MakeWeak(NULL, DeleteBreakIterator);
-
- return wrapper;
-}
-
-} } // namespace v8::internal
diff --git a/src/extensions/experimental/break-iterator.h b/src/extensions/experimental/break-iterator.h
deleted file mode 100644
index 73b9bbd..0000000
--- a/src/extensions/experimental/break-iterator.h
+++ /dev/null
@@ -1,89 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_EXTENSIONS_EXPERIMENTAL_BREAK_ITERATOR_H_
-#define V8_EXTENSIONS_EXPERIMENTAL_BREAK_ITERATOR_H_
-
-#include "include/v8.h"
-
-#include "unicode/uversion.h"
-
-namespace U_ICU_NAMESPACE {
-class BreakIterator;
-class UnicodeString;
-}
-
-namespace v8 {
-namespace internal {
-
-class BreakIterator {
- public:
- static v8::Handle<v8::Value> JSBreakIterator(const v8::Arguments& args);
-
- // Helper methods for various bindings.
-
- // Unpacks break iterator object from corresponding JavaScript object.
- static icu::BreakIterator* UnpackBreakIterator(v8::Handle<v8::Object> obj);
-
- // Deletes the old value and sets the adopted text in
- // corresponding JavaScript object.
- static icu::UnicodeString* ResetAdoptedText(v8::Handle<v8::Object> obj,
- v8::Handle<v8::Value> text_value);
-
- // Release memory we allocated for the BreakIterator once the JS object that
- // holds the pointer gets garbage collected.
- static void DeleteBreakIterator(v8::Persistent<v8::Value> object,
- void* param);
-
- // Assigns new text to the iterator.
- static v8::Handle<v8::Value> BreakIteratorAdoptText(
- const v8::Arguments& args);
-
- // Moves iterator to the beginning of the string and returns new position.
- static v8::Handle<v8::Value> BreakIteratorFirst(const v8::Arguments& args);
-
- // Moves iterator to the next position and returns it.
- static v8::Handle<v8::Value> BreakIteratorNext(const v8::Arguments& args);
-
- // Returns current iterator's current position.
- static v8::Handle<v8::Value> BreakIteratorCurrent(
- const v8::Arguments& args);
-
- // Returns type of the item from current position.
- // This call is only valid for word break iterators. Others just return 0.
- static v8::Handle<v8::Value> BreakIteratorBreakType(
- const v8::Arguments& args);
-
- private:
- BreakIterator() {}
-
- static v8::Persistent<v8::FunctionTemplate> break_iterator_template_;
-};
-
-} } // namespace v8::internal
-
-#endif // V8_EXTENSIONS_EXPERIMENTAL_BREAK_ITERATOR_H_
diff --git a/src/extensions/experimental/collator.cc b/src/extensions/experimental/collator.cc
deleted file mode 100644
index 5cf2192..0000000
--- a/src/extensions/experimental/collator.cc
+++ /dev/null
@@ -1,222 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "src/extensions/experimental/collator.h"
-
-#include "unicode/coll.h"
-#include "unicode/locid.h"
-#include "unicode/ucol.h"
-
-namespace v8 {
-namespace internal {
-
-v8::Persistent<v8::FunctionTemplate> Collator::collator_template_;
-
-icu::Collator* Collator::UnpackCollator(v8::Handle<v8::Object> obj) {
- if (collator_template_->HasInstance(obj)) {
- return static_cast<icu::Collator*>(obj->GetPointerFromInternalField(0));
- }
-
- return NULL;
-}
-
-void Collator::DeleteCollator(v8::Persistent<v8::Value> object, void* param) {
- v8::Persistent<v8::Object> persistent_object =
- v8::Persistent<v8::Object>::Cast(object);
-
- // First delete the hidden C++ object.
- // Unpacking should never return NULL here. That would only happen if
- // this method is used as the weak callback for persistent handles not
- // pointing to a collator.
- delete UnpackCollator(persistent_object);
-
- // Then dispose of the persistent handle to JS object.
- persistent_object.Dispose();
-}
-
-// Throws a JavaScript exception.
-static v8::Handle<v8::Value> ThrowUnexpectedObjectError() {
- // Returns undefined, and schedules an exception to be thrown.
- return v8::ThrowException(v8::Exception::Error(
- v8::String::New("Collator method called on an object "
- "that is not a Collator.")));
-}
-
-// Extract a boolean option named in |option| and set it to |result|.
-// Return true if it's specified. Otherwise, return false.
-static bool ExtractBooleanOption(const v8::Local<v8::Object>& options,
- const char* option,
- bool* result) {
- v8::HandleScope handle_scope;
- v8::TryCatch try_catch;
- v8::Handle<v8::Value> value = options->Get(v8::String::New(option));
- if (try_catch.HasCaught()) {
- return false;
- }
- // No need to check if |value| is empty because it's taken care of
- // by TryCatch above.
- if (!value->IsUndefined() && !value->IsNull()) {
- if (value->IsBoolean()) {
- *result = value->BooleanValue();
- return true;
- }
- }
- return false;
-}
-
-// When there's an ICU error, throw a JavaScript error with |message|.
-static v8::Handle<v8::Value> ThrowExceptionForICUError(const char* message) {
- return v8::ThrowException(v8::Exception::Error(v8::String::New(message)));
-}
-
-v8::Handle<v8::Value> Collator::CollatorCompare(const v8::Arguments& args) {
- if (args.Length() != 2 || !args[0]->IsString() || !args[1]->IsString()) {
- return v8::ThrowException(v8::Exception::SyntaxError(
- v8::String::New("Two string arguments are required.")));
- }
-
- icu::Collator* collator = UnpackCollator(args.Holder());
- if (!collator) {
- return ThrowUnexpectedObjectError();
- }
-
- v8::String::Value string_value1(args[0]);
- v8::String::Value string_value2(args[1]);
- const UChar* string1 = reinterpret_cast<const UChar*>(*string_value1);
- const UChar* string2 = reinterpret_cast<const UChar*>(*string_value2);
- UErrorCode status = U_ZERO_ERROR;
- UCollationResult result = collator->compare(
- string1, string_value1.length(), string2, string_value2.length(), status);
-
- if (U_FAILURE(status)) {
- return ThrowExceptionForICUError(
- "Unexpected failure in Collator.compare.");
- }
-
- return v8::Int32::New(result);
-}
-
-v8::Handle<v8::Value> Collator::JSCollator(const v8::Arguments& args) {
- v8::HandleScope handle_scope;
-
- if (args.Length() != 2 || !args[0]->IsString() || !args[1]->IsObject()) {
- return v8::ThrowException(v8::Exception::SyntaxError(
- v8::String::New("Locale and collation options are required.")));
- }
-
- v8::String::AsciiValue locale(args[0]);
- icu::Locale icu_locale(*locale);
-
- icu::Collator* collator = NULL;
- UErrorCode status = U_ZERO_ERROR;
- collator = icu::Collator::createInstance(icu_locale, status);
-
- if (U_FAILURE(status)) {
- delete collator;
- return ThrowExceptionForICUError("Failed to create collator.");
- }
-
- v8::Local<v8::Object> options(args[1]->ToObject());
-
- // Below, we change collation options that are explicitly specified
- // by a caller in JavaScript. Otherwise, we don't touch because
- // we don't want to change the locale-dependent default value.
- // The three options below are very likely to have the same default
- // across locales, but I haven't checked them all. Others we may add
- // in the future have certainly locale-dependent default (e.g.
- // caseFirst is upperFirst for Danish while is off for most other locales).
-
- bool ignore_case, ignore_accents, numeric;
-
- if (ExtractBooleanOption(options, "ignoreCase", &ignore_case)) {
- // We need to explicitly set the level to secondary to get case ignored.
- // The default L3 ignores UCOL_CASE_LEVEL == UCOL_OFF !
- if (ignore_case) {
- collator->setStrength(icu::Collator::SECONDARY);
- }
- collator->setAttribute(UCOL_CASE_LEVEL, ignore_case ? UCOL_OFF : UCOL_ON,
- status);
- if (U_FAILURE(status)) {
- delete collator;
- return ThrowExceptionForICUError("Failed to set ignoreCase.");
- }
- }
-
- // Accents are taken into account with strength secondary or higher.
- if (ExtractBooleanOption(options, "ignoreAccents", &ignore_accents)) {
- if (!ignore_accents) {
- collator->setStrength(icu::Collator::SECONDARY);
- } else {
- collator->setStrength(icu::Collator::PRIMARY);
- }
- }
-
- if (ExtractBooleanOption(options, "numeric", &numeric)) {
- collator->setAttribute(UCOL_NUMERIC_COLLATION,
- numeric ? UCOL_ON : UCOL_OFF, status);
- if (U_FAILURE(status)) {
- delete collator;
- return ThrowExceptionForICUError("Failed to set numeric sort option.");
- }
- }
-
- if (collator_template_.IsEmpty()) {
- v8::Local<v8::FunctionTemplate> raw_template(v8::FunctionTemplate::New());
- raw_template->SetClassName(v8::String::New("v8Locale.Collator"));
-
- // Define internal field count on instance template.
- v8::Local<v8::ObjectTemplate> object_template =
- raw_template->InstanceTemplate();
-
- // Set aside internal fields for icu collator.
- object_template->SetInternalFieldCount(1);
-
- // Define all of the prototype methods on prototype template.
- v8::Local<v8::ObjectTemplate> proto = raw_template->PrototypeTemplate();
- proto->Set(v8::String::New("compare"),
- v8::FunctionTemplate::New(CollatorCompare));
-
- collator_template_ =
- v8::Persistent<v8::FunctionTemplate>::New(raw_template);
- }
-
- // Create an empty object wrapper.
- v8::Local<v8::Object> local_object =
- collator_template_->GetFunction()->NewInstance();
- v8::Persistent<v8::Object> wrapper =
- v8::Persistent<v8::Object>::New(local_object);
-
- // Set collator as internal field of the resulting JS object.
- wrapper->SetPointerInInternalField(0, collator);
-
- // Make object handle weak so we can delete iterator once GC kicks in.
- wrapper.MakeWeak(NULL, DeleteCollator);
-
- return wrapper;
-}
-
-} } // namespace v8::internal
diff --git a/src/extensions/experimental/collator.h b/src/extensions/experimental/collator.h
deleted file mode 100644
index ca7e4dc..0000000
--- a/src/extensions/experimental/collator.h
+++ /dev/null
@@ -1,68 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_EXTENSIONS_EXPERIMENTAL_COLLATOR_H
-#define V8_EXTENSIONS_EXPERIMENTAL_COLLATOR_H_
-
-#include "include/v8.h"
-
-#include "unicode/uversion.h"
-
-namespace U_ICU_NAMESPACE {
-class Collator;
-class UnicodeString;
-}
-
-namespace v8 {
-namespace internal {
-
-class Collator {
- public:
- static v8::Handle<v8::Value> JSCollator(const v8::Arguments& args);
-
- // Helper methods for various bindings.
-
- // Unpacks collator object from corresponding JavaScript object.
- static icu::Collator* UnpackCollator(v8::Handle<v8::Object> obj);
-
- // Release memory we allocated for the Collator once the JS object that
- // holds the pointer gets garbage collected.
- static void DeleteCollator(v8::Persistent<v8::Value> object, void* param);
-
- // Compare two strings and returns -1, 0 and 1 depending on
- // whether string1 is smaller than, equal to or larger than string2.
- static v8::Handle<v8::Value> CollatorCompare(const v8::Arguments& args);
-
- private:
- Collator() {}
-
- static v8::Persistent<v8::FunctionTemplate> collator_template_;
-};
-
-} } // namespace v8::internal
-
-#endif // V8_EXTENSIONS_EXPERIMENTAL_COLLATOR
diff --git a/src/extensions/experimental/datetime-format.cc b/src/extensions/experimental/datetime-format.cc
deleted file mode 100644
index 94a29ac..0000000
--- a/src/extensions/experimental/datetime-format.cc
+++ /dev/null
@@ -1,384 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "src/extensions/experimental/datetime-format.h"
-
-#include <string.h>
-
-#include "src/extensions/experimental/i18n-utils.h"
-#include "unicode/dtfmtsym.h"
-#include "unicode/dtptngen.h"
-#include "unicode/locid.h"
-#include "unicode/smpdtfmt.h"
-
-namespace v8 {
-namespace internal {
-
-v8::Persistent<v8::FunctionTemplate> DateTimeFormat::datetime_format_template_;
-
-static icu::DateFormat* CreateDateTimeFormat(v8::Handle<v8::String>,
- v8::Handle<v8::Object>);
-static v8::Handle<v8::Value> GetSymbols(
- const v8::Arguments&,
- const icu::UnicodeString*, int32_t,
- const icu::UnicodeString*, int32_t,
- const icu::UnicodeString*, int32_t);
-static v8::Handle<v8::Value> ThrowUnexpectedObjectError();
-static icu::DateFormat::EStyle GetDateTimeStyle(const icu::UnicodeString&);
-
-icu::SimpleDateFormat* DateTimeFormat::UnpackDateTimeFormat(
- v8::Handle<v8::Object> obj) {
- if (datetime_format_template_->HasInstance(obj)) {
- return static_cast<icu::SimpleDateFormat*>(
- obj->GetPointerFromInternalField(0));
- }
-
- return NULL;
-}
-
-void DateTimeFormat::DeleteDateTimeFormat(v8::Persistent<v8::Value> object,
- void* param) {
- v8::Persistent<v8::Object> persistent_object =
- v8::Persistent<v8::Object>::Cast(object);
-
- // First delete the hidden C++ object.
- // Unpacking should never return NULL here. That would only happen if
- // this method is used as the weak callback for persistent handles not
- // pointing to a date time formatter.
- delete UnpackDateTimeFormat(persistent_object);
-
- // Then dispose of the persistent handle to JS object.
- persistent_object.Dispose();
-}
-
-v8::Handle<v8::Value> DateTimeFormat::Format(const v8::Arguments& args) {
- v8::HandleScope handle_scope;
-
- double millis = 0.0;
- if (args.Length() != 1 || !args[0]->IsDate()) {
- // Create a new date.
- v8::TryCatch try_catch;
- v8::Local<v8::Script> date_script =
- v8::Script::Compile(v8::String::New("eval('new Date()')"));
- millis = date_script->Run()->NumberValue();
- if (try_catch.HasCaught()) {
- return try_catch.ReThrow();
- }
- } else {
- millis = v8::Date::Cast(*args[0])->NumberValue();
- }
-
- icu::SimpleDateFormat* date_format = UnpackDateTimeFormat(args.Holder());
- if (!date_format) {
- return ThrowUnexpectedObjectError();
- }
-
- icu::UnicodeString result;
- date_format->format(millis, result);
-
- return v8::String::New(
- reinterpret_cast<const uint16_t*>(result.getBuffer()), result.length());
-}
-
-v8::Handle<v8::Value> DateTimeFormat::GetMonths(const v8::Arguments& args) {
- icu::SimpleDateFormat* date_format = UnpackDateTimeFormat(args.Holder());
- if (!date_format) {
- return ThrowUnexpectedObjectError();
- }
-
- const icu::DateFormatSymbols* symbols = date_format->getDateFormatSymbols();
-
- int32_t narrow_count;
- const icu::UnicodeString* narrow = symbols->getMonths(
- narrow_count,
- icu::DateFormatSymbols::STANDALONE,
- icu::DateFormatSymbols::NARROW);
- int32_t abbrev_count;
- const icu::UnicodeString* abbrev = symbols->getMonths(
- abbrev_count,
- icu::DateFormatSymbols::STANDALONE,
- icu::DateFormatSymbols::ABBREVIATED);
- int32_t wide_count;
- const icu::UnicodeString* wide = symbols->getMonths(
- wide_count,
- icu::DateFormatSymbols::STANDALONE,
- icu::DateFormatSymbols::WIDE);
-
- return GetSymbols(
- args, narrow, narrow_count, abbrev, abbrev_count, wide, wide_count);
-}
-
-v8::Handle<v8::Value> DateTimeFormat::GetWeekdays(const v8::Arguments& args) {
- icu::SimpleDateFormat* date_format = UnpackDateTimeFormat(args.Holder());
- if (!date_format) {
- return ThrowUnexpectedObjectError();
- }
-
- const icu::DateFormatSymbols* symbols = date_format->getDateFormatSymbols();
-
- int32_t narrow_count;
- const icu::UnicodeString* narrow = symbols->getWeekdays(
- narrow_count,
- icu::DateFormatSymbols::STANDALONE,
- icu::DateFormatSymbols::NARROW);
- int32_t abbrev_count;
- const icu::UnicodeString* abbrev = symbols->getWeekdays(
- abbrev_count,
- icu::DateFormatSymbols::STANDALONE,
- icu::DateFormatSymbols::ABBREVIATED);
- int32_t wide_count;
- const icu::UnicodeString* wide = symbols->getWeekdays(
- wide_count,
- icu::DateFormatSymbols::STANDALONE,
- icu::DateFormatSymbols::WIDE);
-
- // getXXXWeekdays always returns 8 elements - ICU stable API.
- // We can't use ASSERT_EQ(8, narrow_count) because ASSERT is internal to v8.
- if (narrow_count != 8 || abbrev_count != 8 || wide_count != 8) {
- return v8::ThrowException(v8::Exception::Error(
- v8::String::New("Failed to get weekday information.")));
- }
-
- // ICU documentation says we should ignore element 0 of the returned array.
- return GetSymbols(args, narrow + 1, narrow_count - 1, abbrev + 1,
- abbrev_count -1 , wide + 1, wide_count - 1);
-}
-
-v8::Handle<v8::Value> DateTimeFormat::GetEras(const v8::Arguments& args) {
- icu::SimpleDateFormat* date_format = UnpackDateTimeFormat(args.Holder());
- if (!date_format) {
- return ThrowUnexpectedObjectError();
- }
-
- const icu::DateFormatSymbols* symbols = date_format->getDateFormatSymbols();
-
- int32_t narrow_count;
- const icu::UnicodeString* narrow = symbols->getNarrowEras(narrow_count);
- int32_t abbrev_count;
- const icu::UnicodeString* abbrev = symbols->getEras(abbrev_count);
- int32_t wide_count;
- const icu::UnicodeString* wide = symbols->getEraNames(wide_count);
-
- return GetSymbols(
- args, narrow, narrow_count, abbrev, abbrev_count, wide, wide_count);
-}
-
-v8::Handle<v8::Value> DateTimeFormat::GetAmPm(const v8::Arguments& args) {
- icu::SimpleDateFormat* date_format = UnpackDateTimeFormat(args.Holder());
- if (!date_format) {
- return ThrowUnexpectedObjectError();
- }
-
- const icu::DateFormatSymbols* symbols = date_format->getDateFormatSymbols();
-
- // In this case narrow == abbreviated == wide
- int32_t count;
- const icu::UnicodeString* wide = symbols->getAmPmStrings(count);
-
- return GetSymbols(args, wide, count, wide, count, wide, count);
-}
-
-v8::Handle<v8::Value> DateTimeFormat::JSDateTimeFormat(
- const v8::Arguments& args) {
- v8::HandleScope handle_scope;
-
- if (args.Length() != 2 || !args[0]->IsString() || !args[1]->IsObject()) {
- return v8::ThrowException(v8::Exception::SyntaxError(
- v8::String::New("Locale and date/time options are required.")));
- }
-
- icu::SimpleDateFormat* date_format = static_cast<icu::SimpleDateFormat*>(
- CreateDateTimeFormat(args[0]->ToString(), args[1]->ToObject()));
-
- if (datetime_format_template_.IsEmpty()) {
- v8::Local<v8::FunctionTemplate> raw_template(v8::FunctionTemplate::New());
-
- raw_template->SetClassName(v8::String::New("v8Locale.DateTimeFormat"));
-
- // Define internal field count on instance template.
- v8::Local<v8::ObjectTemplate> object_template =
- raw_template->InstanceTemplate();
-
- // Set aside internal field for icu date time formatter.
- object_template->SetInternalFieldCount(1);
-
- // Define all of the prototype methods on prototype template.
- v8::Local<v8::ObjectTemplate> proto = raw_template->PrototypeTemplate();
- proto->Set(v8::String::New("format"),
- v8::FunctionTemplate::New(Format));
- proto->Set(v8::String::New("getMonths"),
- v8::FunctionTemplate::New(GetMonths));
- proto->Set(v8::String::New("getWeekdays"),
- v8::FunctionTemplate::New(GetWeekdays));
- proto->Set(v8::String::New("getEras"),
- v8::FunctionTemplate::New(GetEras));
- proto->Set(v8::String::New("getAmPm"),
- v8::FunctionTemplate::New(GetAmPm));
-
- datetime_format_template_ =
- v8::Persistent<v8::FunctionTemplate>::New(raw_template);
- }
-
- // Create an empty object wrapper.
- v8::Local<v8::Object> local_object =
- datetime_format_template_->GetFunction()->NewInstance();
- v8::Persistent<v8::Object> wrapper =
- v8::Persistent<v8::Object>::New(local_object);
-
- // Set date time formatter as internal field of the resulting JS object.
- wrapper->SetPointerInInternalField(0, date_format);
-
- // Set resolved pattern in options.pattern.
- icu::UnicodeString pattern;
- date_format->toPattern(pattern);
- v8::Local<v8::Object> options = v8::Object::New();
- options->Set(v8::String::New("pattern"),
- v8::String::New(reinterpret_cast<const uint16_t*>(
- pattern.getBuffer()), pattern.length()));
- wrapper->Set(v8::String::New("options"), options);
-
- // Make object handle weak so we can delete iterator once GC kicks in.
- wrapper.MakeWeak(NULL, DeleteDateTimeFormat);
-
- return wrapper;
-}
-
-// Returns SimpleDateFormat.
-static icu::DateFormat* CreateDateTimeFormat(
- v8::Handle<v8::String> locale, v8::Handle<v8::Object> settings) {
- v8::HandleScope handle_scope;
-
- v8::String::AsciiValue ascii_locale(locale);
- icu::Locale icu_locale(*ascii_locale);
-
- // Make formatter from skeleton.
- icu::SimpleDateFormat* date_format = NULL;
- UErrorCode status = U_ZERO_ERROR;
- icu::UnicodeString skeleton;
- if (I18NUtils::ExtractStringSetting(settings, "skeleton", &skeleton)) {
- v8::Local<icu::DateTimePatternGenerator> generator(
- icu::DateTimePatternGenerator::createInstance(icu_locale, status));
- icu::UnicodeString pattern =
- generator->getBestPattern(skeleton, status);
-
- date_format = new icu::SimpleDateFormat(pattern, icu_locale, status);
- if (U_SUCCESS(status)) {
- return date_format;
- } else {
- delete date_format;
- }
- }
-
- // Extract date style and time style from settings.
- icu::UnicodeString date_style;
- icu::DateFormat::EStyle icu_date_style = icu::DateFormat::kNone;
- if (I18NUtils::ExtractStringSetting(settings, "dateStyle", &date_style)) {
- icu_date_style = GetDateTimeStyle(date_style);
- }
-
- icu::UnicodeString time_style;
- icu::DateFormat::EStyle icu_time_style = icu::DateFormat::kNone;
- if (I18NUtils::ExtractStringSetting(settings, "timeStyle", &time_style)) {
- icu_time_style = GetDateTimeStyle(time_style);
- }
-
- // Try all combinations of date/time styles.
- if (icu_date_style == icu::DateFormat::kNone &&
- icu_time_style == icu::DateFormat::kNone) {
- // Return default short date, short
- return icu::DateFormat::createDateTimeInstance(
- icu::DateFormat::kShort, icu::DateFormat::kShort, icu_locale);
- } else if (icu_date_style != icu::DateFormat::kNone &&
- icu_time_style != icu::DateFormat::kNone) {
- return icu::DateFormat::createDateTimeInstance(
- icu_date_style, icu_time_style, icu_locale);
- } else if (icu_date_style != icu::DateFormat::kNone) {
- return icu::DateFormat::createDateInstance(icu_date_style, icu_locale);
- } else {
- // icu_time_style != icu::DateFormat::kNone
- return icu::DateFormat::createTimeInstance(icu_time_style, icu_locale);
- }
-}
-
-// Creates a v8::Array of narrow, abbrev or wide symbols.
-static v8::Handle<v8::Value> GetSymbols(const v8::Arguments& args,
- const icu::UnicodeString* narrow,
- int32_t narrow_count,
- const icu::UnicodeString* abbrev,
- int32_t abbrev_count,
- const icu::UnicodeString* wide,
- int32_t wide_count) {
- v8::HandleScope handle_scope;
-
- // Make wide width default.
- const icu::UnicodeString* result = wide;
- int32_t count = wide_count;
-
- if (args.Length() == 1 && args[0]->IsString()) {
- v8::String::AsciiValue ascii_value(args[0]);
- if (strcmp(*ascii_value, "abbreviated") == 0) {
- result = abbrev;
- count = abbrev_count;
- } else if (strcmp(*ascii_value, "narrow") == 0) {
- result = narrow;
- count = narrow_count;
- }
- }
-
- v8::Handle<v8::Array> symbols = v8::Array::New();
- for (int32_t i = 0; i < count; ++i) {
- symbols->Set(i, v8::String::New(
- reinterpret_cast<const uint16_t*>(result[i].getBuffer()),
- result[i].length()));
- }
-
- return handle_scope.Close(symbols);
-}
-
-// Throws a JavaScript exception.
-static v8::Handle<v8::Value> ThrowUnexpectedObjectError() {
- // Returns undefined, and schedules an exception to be thrown.
- return v8::ThrowException(v8::Exception::Error(
- v8::String::New("DateTimeFormat method called on an object "
- "that is not a DateTimeFormat.")));
-}
-
-// Returns icu date/time style.
-static icu::DateFormat::EStyle GetDateTimeStyle(
- const icu::UnicodeString& type) {
- if (type == UNICODE_STRING_SIMPLE("medium")) {
- return icu::DateFormat::kMedium;
- } else if (type == UNICODE_STRING_SIMPLE("long")) {
- return icu::DateFormat::kLong;
- } else if (type == UNICODE_STRING_SIMPLE("full")) {
- return icu::DateFormat::kFull;
- }
-
- return icu::DateFormat::kShort;
-}
-
-} } // namespace v8::internal
diff --git a/src/extensions/experimental/datetime-format.h b/src/extensions/experimental/datetime-format.h
deleted file mode 100644
index a6a228c..0000000
--- a/src/extensions/experimental/datetime-format.h
+++ /dev/null
@@ -1,83 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_EXTENSIONS_EXPERIMENTAL_DATETIME_FORMAT_H_
-#define V8_EXTENSIONS_EXPERIMENTAL_DATETIME_FORMAT_H_
-
-#include "include/v8.h"
-
-#include "unicode/uversion.h"
-
-namespace U_ICU_NAMESPACE {
-class SimpleDateFormat;
-}
-
-namespace v8 {
-namespace internal {
-
-class DateTimeFormat {
- public:
- static v8::Handle<v8::Value> JSDateTimeFormat(const v8::Arguments& args);
-
- // Helper methods for various bindings.
-
- // Unpacks date format object from corresponding JavaScript object.
- static icu::SimpleDateFormat* UnpackDateTimeFormat(
- v8::Handle<v8::Object> obj);
-
- // Release memory we allocated for the DateFormat once the JS object that
- // holds the pointer gets garbage collected.
- static void DeleteDateTimeFormat(v8::Persistent<v8::Value> object,
- void* param);
-
- // Formats date and returns corresponding string.
- static v8::Handle<v8::Value> Format(const v8::Arguments& args);
-
- // All date time symbol methods below return stand-alone names in
- // either narrow, abbreviated or wide width.
-
- // Get list of months.
- static v8::Handle<v8::Value> GetMonths(const v8::Arguments& args);
-
- // Get list of weekdays.
- static v8::Handle<v8::Value> GetWeekdays(const v8::Arguments& args);
-
- // Get list of eras.
- static v8::Handle<v8::Value> GetEras(const v8::Arguments& args);
-
- // Get list of day periods.
- static v8::Handle<v8::Value> GetAmPm(const v8::Arguments& args);
-
- private:
- DateTimeFormat();
-
- static v8::Persistent<v8::FunctionTemplate> datetime_format_template_;
-};
-
-} } // namespace v8::internal
-
-#endif // V8_EXTENSIONS_EXPERIMENTAL_DATETIME_FORMAT_H_
diff --git a/src/extensions/experimental/experimental.gyp b/src/extensions/experimental/experimental.gyp
deleted file mode 100644
index 24fb683..0000000
--- a/src/extensions/experimental/experimental.gyp
+++ /dev/null
@@ -1,105 +0,0 @@
-# Copyright 2011 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-{
- 'variables': {
- # TODO(cira): Find out how to pass this value for arbitrary embedder.
- # Chromium sets it in common.gypi and does force include of that file for
- # all sub projects.
- 'icu_src_dir%': '../../../../third_party/icu',
- },
- 'targets': [
- {
- 'target_name': 'i18n_api',
- 'type': 'static_library',
- 'sources': [
- 'break-iterator.cc',
- 'break-iterator.h',
- 'collator.cc',
- 'collator.h',
- 'datetime-format.cc',
- 'datetime-format.h',
- 'i18n-extension.cc',
- 'i18n-extension.h',
- 'i18n-locale.cc',
- 'i18n-locale.h',
- 'i18n-natives.h',
- 'i18n-utils.cc',
- 'i18n-utils.h',
- 'language-matcher.cc',
- 'language-matcher.h',
- 'number-format.cc',
- 'number-format.h',
- '<(SHARED_INTERMEDIATE_DIR)/i18n-js.cc',
- ],
- 'include_dirs': [
- '<(icu_src_dir)/public/common',
- # v8/ is root for all includes.
- '../../..'
- ],
- 'dependencies': [
- '<(icu_src_dir)/icu.gyp:*',
- 'js2c_i18n#host',
- '../../../tools/gyp/v8.gyp:v8',
- ],
- 'direct_dependent_settings': {
- # Adds -Iv8 for embedders.
- 'include_dirs': [
- '../../..'
- ],
- },
- },
- {
- 'target_name': 'js2c_i18n',
- 'type': 'none',
- 'toolsets': ['host'],
- 'variables': {
- 'js_files': [
- 'i18n.js'
- ],
- },
- 'actions': [
- {
- 'action_name': 'js2c_i18n',
- 'inputs': [
- 'i18n-js2c.py',
- '<@(js_files)',
- ],
- 'outputs': [
- '<(SHARED_INTERMEDIATE_DIR)/i18n-js.cc',
- ],
- 'action': [
- 'python',
- 'i18n-js2c.py',
- '<@(_outputs)',
- '<@(js_files)'
- ],
- },
- ],
- },
- ], # targets
-}
diff --git a/src/extensions/experimental/i18n-extension.cc b/src/extensions/experimental/i18n-extension.cc
deleted file mode 100644
index c5afcf0..0000000
--- a/src/extensions/experimental/i18n-extension.cc
+++ /dev/null
@@ -1,74 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "src/extensions/experimental/i18n-extension.h"
-
-#include "src/extensions/experimental/break-iterator.h"
-#include "src/extensions/experimental/collator.h"
-#include "src/extensions/experimental/datetime-format.h"
-#include "src/extensions/experimental/i18n-locale.h"
-#include "src/extensions/experimental/i18n-natives.h"
-#include "src/extensions/experimental/number-format.h"
-
-namespace v8 {
-namespace internal {
-
-I18NExtension* I18NExtension::extension_ = NULL;
-
-I18NExtension::I18NExtension()
- : v8::Extension("v8/i18n", I18Natives::GetScriptSource()) {
-}
-
-v8::Handle<v8::FunctionTemplate> I18NExtension::GetNativeFunction(
- v8::Handle<v8::String> name) {
- if (name->Equals(v8::String::New("NativeJSLocale"))) {
- return v8::FunctionTemplate::New(I18NLocale::JSLocale);
- } else if (name->Equals(v8::String::New("NativeJSBreakIterator"))) {
- return v8::FunctionTemplate::New(BreakIterator::JSBreakIterator);
- } else if (name->Equals(v8::String::New("NativeJSCollator"))) {
- return v8::FunctionTemplate::New(Collator::JSCollator);
- } else if (name->Equals(v8::String::New("NativeJSDateTimeFormat"))) {
- return v8::FunctionTemplate::New(DateTimeFormat::JSDateTimeFormat);
- } else if (name->Equals(v8::String::New("NativeJSNumberFormat"))) {
- return v8::FunctionTemplate::New(NumberFormat::JSNumberFormat);
- }
-
- return v8::Handle<v8::FunctionTemplate>();
-}
-
-I18NExtension* I18NExtension::get() {
- if (!extension_) {
- extension_ = new I18NExtension();
- }
- return extension_;
-}
-
-void I18NExtension::Register() {
- static v8::DeclareExtension i18n_extension_declaration(I18NExtension::get());
-}
-
-} } // namespace v8::internal
diff --git a/src/extensions/experimental/i18n-extension.h b/src/extensions/experimental/i18n-extension.h
deleted file mode 100644
index 5401f25..0000000
--- a/src/extensions/experimental/i18n-extension.h
+++ /dev/null
@@ -1,54 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_EXTENSIONS_EXPERIMENTAL_I18N_EXTENSION_H_
-#define V8_EXTENSIONS_EXPERIMENTAL_I18N_EXTENSION_H_
-
-#include "include/v8.h"
-
-namespace v8 {
-namespace internal {
-
-
-class I18NExtension : public v8::Extension {
- public:
- I18NExtension();
-
- virtual v8::Handle<v8::FunctionTemplate> GetNativeFunction(
- v8::Handle<v8::String> name);
-
- // V8 code prefers Register, while Chrome and WebKit use get kind of methods.
- static void Register();
- static I18NExtension* get();
-
- private:
- static I18NExtension* extension_;
-};
-
-} } // namespace v8::internal
-
-#endif // V8_EXTENSIONS_EXPERIMENTAL_I18N_EXTENSION_H_
diff --git a/src/extensions/experimental/i18n-js2c.py b/src/extensions/experimental/i18n-js2c.py
deleted file mode 100644
index 9c3128b..0000000
--- a/src/extensions/experimental/i18n-js2c.py
+++ /dev/null
@@ -1,126 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2011 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-# This is a utility for converting I18N JavaScript source code into C-style
-# char arrays. It is used for embedded JavaScript code in the V8
-# library.
-# This is a pared down copy of v8/tools/js2c.py that avoids use of
-# v8/src/natives.h and produces different cc template.
-
-import os, re, sys, string
-
-
-def ToCArray(lines):
- result = []
- for chr in lines:
- value = ord(chr)
- assert value < 128
- result.append(str(value))
- result.append("0")
- return ", ".join(result)
-
-
-def RemoveCommentsAndTrailingWhitespace(lines):
- lines = re.sub(r'//.*\n', '\n', lines) # end-of-line comments
- lines = re.sub(re.compile(r'/\*.*?\*/', re.DOTALL), '', lines) # comments.
- lines = re.sub(r'\s+\n+', '\n', lines) # trailing whitespace
- return lines
-
-
-def ReadFile(filename):
- file = open(filename, "rt")
- try:
- lines = file.read()
- finally:
- file.close()
- return lines
-
-
-EVAL_PATTERN = re.compile(r'\beval\s*\(');
-WITH_PATTERN = re.compile(r'\bwith\s*\(');
-
-
-def Validate(lines, file):
- lines = RemoveCommentsAndTrailingWhitespace(lines)
- # Because of simplified context setup, eval and with is not
- # allowed in the natives files.
- eval_match = EVAL_PATTERN.search(lines)
- if eval_match:
- raise ("Eval disallowed in natives: %s" % file)
- with_match = WITH_PATTERN.search(lines)
- if with_match:
- raise ("With statements disallowed in natives: %s" % file)
-
-
-HEADER_TEMPLATE = """\
-// Copyright 2011 Google Inc. All Rights Reserved.
-
-// This file was generated from .js source files by gyp. If you
-// want to make changes to this file you should either change the
-// javascript source files or the i18n-js2c.py script.
-
-#include "src/extensions/experimental/i18n-natives.h"
-
-namespace v8 {
-namespace internal {
-
-// static
-const char* I18Natives::GetScriptSource() {
- // JavaScript source gets injected here.
- static const char i18n_source[] = {%s};
-
- return i18n_source;
-}
-
-} // internal
-} // v8
-"""
-
-
-def JS2C(source, target):
- filename = str(source)
-
- lines = ReadFile(filename)
- Validate(lines, filename)
- data = ToCArray(lines)
-
- # Emit result
- output = open(target, "w")
- output.write(HEADER_TEMPLATE % data)
- output.close()
-
-
-def main():
- target = sys.argv[1]
- source = sys.argv[2]
- JS2C(source, target)
-
-
-if __name__ == "__main__":
- main()
diff --git a/src/extensions/experimental/i18n-locale.cc b/src/extensions/experimental/i18n-locale.cc
deleted file mode 100644
index 46a5f87..0000000
--- a/src/extensions/experimental/i18n-locale.cc
+++ /dev/null
@@ -1,111 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "src/extensions/experimental/i18n-locale.h"
-
-#include "src/extensions/experimental/i18n-utils.h"
-#include "src/extensions/experimental/language-matcher.h"
-#include "unicode/locid.h"
-#include "unicode/uloc.h"
-
-namespace v8 {
-namespace internal {
-
-const char* const I18NLocale::kLocaleID = "localeID";
-const char* const I18NLocale::kRegionID = "regionID";
-const char* const I18NLocale::kICULocaleID = "icuLocaleID";
-
-v8::Handle<v8::Value> I18NLocale::JSLocale(const v8::Arguments& args) {
- v8::HandleScope handle_scope;
-
- if (args.Length() != 1 || !args[0]->IsObject()) {
- return v8::Undefined();
- }
-
- v8::Local<v8::Object> settings = args[0]->ToObject();
-
- // Get best match for locale.
- v8::TryCatch try_catch;
- v8::Handle<v8::Value> locale_id = settings->Get(v8::String::New(kLocaleID));
- if (try_catch.HasCaught()) {
- return v8::Undefined();
- }
-
- LocaleIDMatch result;
- if (locale_id->IsArray()) {
- LanguageMatcher::GetBestMatchForPriorityList(
- v8::Handle<v8::Array>::Cast(locale_id), &result);
- } else if (locale_id->IsString()) {
- LanguageMatcher::GetBestMatchForString(locale_id->ToString(), &result);
- } else {
- LanguageMatcher::GetBestMatchForString(v8::String::New(""), &result);
- }
-
- // Get best match for region.
- char region_id[ULOC_COUNTRY_CAPACITY];
- I18NUtils::StrNCopy(region_id, ULOC_COUNTRY_CAPACITY, "");
-
- v8::Handle<v8::Value> region = settings->Get(v8::String::New(kRegionID));
- if (try_catch.HasCaught()) {
- return v8::Undefined();
- }
-
- if (!GetBestMatchForRegionID(result.icu_id, region, region_id)) {
- // Set region id to empty string because region couldn't be inferred.
- I18NUtils::StrNCopy(region_id, ULOC_COUNTRY_CAPACITY, "");
- }
-
- // Build JavaScript object that contains bcp and icu locale ID and region ID.
- v8::Handle<v8::Object> locale = v8::Object::New();
- locale->Set(v8::String::New(kLocaleID), v8::String::New(result.bcp47_id));
- locale->Set(v8::String::New(kICULocaleID), v8::String::New(result.icu_id));
- locale->Set(v8::String::New(kRegionID), v8::String::New(region_id));
-
- return handle_scope.Close(locale);
-}
-
-bool I18NLocale::GetBestMatchForRegionID(
- const char* locale_id, v8::Handle<v8::Value> region_id, char* result) {
- if (region_id->IsString() && region_id->ToString()->Length() != 0) {
- icu::Locale user_locale(
- icu::Locale("und", *v8::String::Utf8Value(region_id->ToString())));
- I18NUtils::StrNCopy(
- result, ULOC_COUNTRY_CAPACITY, user_locale.getCountry());
- return true;
- }
- // Maximize locale_id to infer the region (e.g. expand "de" to "de-Latn-DE"
- // and grab "DE" from the result).
- UErrorCode status = U_ZERO_ERROR;
- char maximized_locale[ULOC_FULLNAME_CAPACITY];
- uloc_addLikelySubtags(
- locale_id, maximized_locale, ULOC_FULLNAME_CAPACITY, &status);
- uloc_getCountry(maximized_locale, result, ULOC_COUNTRY_CAPACITY, &status);
-
- return !U_FAILURE(status);
-}
-
-} } // namespace v8::internal
diff --git a/src/extensions/experimental/i18n-locale.h b/src/extensions/experimental/i18n-locale.h
deleted file mode 100644
index 607818c..0000000
--- a/src/extensions/experimental/i18n-locale.h
+++ /dev/null
@@ -1,60 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_EXTENSIONS_EXPERIMENTAL_I18N_LOCALE_H_
-#define V8_EXTENSIONS_EXPERIMENTAL_I18N_LOCALE_H_
-
-#include "include/v8.h"
-
-namespace v8 {
-namespace internal {
-
-class I18NLocale {
- public:
- I18NLocale() {}
-
- // Implementations of window.Locale methods.
- static v8::Handle<v8::Value> JSLocale(const v8::Arguments& args);
-
- // Infers region id given the locale id, or uses user specified region id.
- // Result is canonicalized.
- // Returns status of ICU operation (maximizing locale or get region call).
- static bool GetBestMatchForRegionID(
- const char* locale_id, v8::Handle<v8::Value> regions, char* result);
-
- private:
- // Key name for localeID parameter.
- static const char* const kLocaleID;
- // Key name for regionID parameter.
- static const char* const kRegionID;
- // Key name for the icuLocaleID result.
- static const char* const kICULocaleID;
-};
-
-} } // namespace v8::internal
-
-#endif // V8_EXTENSIONS_EXPERIMENTAL_I18N_LOCALE_H_
diff --git a/src/extensions/experimental/i18n-natives.h b/src/extensions/experimental/i18n-natives.h
deleted file mode 100644
index 37362d0..0000000
--- a/src/extensions/experimental/i18n-natives.h
+++ /dev/null
@@ -1,43 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_EXTENSIONS_EXPERIMENTAL_I18N_NATIVES_H_
-#define V8_EXTENSIONS_EXPERIMENTAL_I18N_NATIVES_H_
-
-namespace v8 {
-namespace internal {
-
-class I18Natives {
- public:
- // Gets script source from generated file.
- // Source is statically allocated string.
- static const char* GetScriptSource();
-};
-
-} } // namespace v8::internal
-
-#endif // V8_EXTENSIONS_EXPERIMENTAL_I18N_NATIVES_H_
diff --git a/src/extensions/experimental/i18n-utils.cc b/src/extensions/experimental/i18n-utils.cc
deleted file mode 100644
index dc2be1a..0000000
--- a/src/extensions/experimental/i18n-utils.cc
+++ /dev/null
@@ -1,87 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "src/extensions/experimental/i18n-utils.h"
-
-#include <string.h>
-
-#include "unicode/unistr.h"
-
-namespace v8 {
-namespace internal {
-
-// static
-void I18NUtils::StrNCopy(char* dest, int length, const char* src) {
- if (!dest || !src) return;
-
- strncpy(dest, src, length);
- dest[length - 1] = '\0';
-}
-
-// static
-bool I18NUtils::ExtractStringSetting(const v8::Handle<v8::Object>& settings,
- const char* setting,
- icu::UnicodeString* result) {
- if (!setting || !result) return false;
-
- v8::HandleScope handle_scope;
- v8::TryCatch try_catch;
- v8::Handle<v8::Value> value = settings->Get(v8::String::New(setting));
- if (try_catch.HasCaught()) {
- return false;
- }
- // No need to check if |value| is empty because it's taken care of
- // by TryCatch above.
- if (!value->IsUndefined() && !value->IsNull() && value->IsString()) {
- v8::String::Utf8Value utf8_value(value);
- if (*utf8_value == NULL) return false;
- result->setTo(icu::UnicodeString::fromUTF8(*utf8_value));
- return true;
- }
- return false;
-}
-
-// static
-void I18NUtils::AsciiToUChar(const char* source,
- int32_t source_length,
- UChar* target,
- int32_t target_length) {
- int32_t length =
- source_length < target_length ? source_length : target_length;
-
- if (length <= 0) {
- return;
- }
-
- for (int32_t i = 0; i < length - 1; ++i) {
- target[i] = static_cast<UChar>(source[i]);
- }
-
- target[length - 1] = 0x0u;
-}
-
-} } // namespace v8::internal
diff --git a/src/extensions/experimental/i18n-utils.h b/src/extensions/experimental/i18n-utils.h
deleted file mode 100644
index 7c31528..0000000
--- a/src/extensions/experimental/i18n-utils.h
+++ /dev/null
@@ -1,69 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_EXTENSIONS_EXPERIMENTAL_I18N_UTILS_H_
-#define V8_EXTENSIONS_EXPERIMENTAL_I18N_UTILS_H_
-
-#include "include/v8.h"
-
-#include "unicode/uversion.h"
-
-namespace U_ICU_NAMESPACE {
-class UnicodeString;
-}
-
-namespace v8 {
-namespace internal {
-
-class I18NUtils {
- public:
- // Safe string copy. Null terminates the destination. Copies at most
- // (length - 1) bytes.
- // We can't use snprintf since it's not supported on all relevant platforms.
- // We can't use OS::SNPrintF, it's only for internal code.
- static void StrNCopy(char* dest, int length, const char* src);
-
- // Extract a string setting named in |settings| and set it to |result|.
- // Return true if it's specified. Otherwise, return false.
- static bool ExtractStringSetting(const v8::Handle<v8::Object>& settings,
- const char* setting,
- icu::UnicodeString* result);
-
- // Converts ASCII array into UChar array.
- // Target is always \0 terminated.
- static void AsciiToUChar(const char* source,
- int32_t source_length,
- UChar* target,
- int32_t target_length);
-
- private:
- I18NUtils() {}
-};
-
-} } // namespace v8::internal
-
-#endif // V8_EXTENSIONS_EXPERIMENTAL_I18N_UTILS_H_
diff --git a/src/extensions/experimental/i18n.js b/src/extensions/experimental/i18n.js
deleted file mode 100644
index 56bcf9e..0000000
--- a/src/extensions/experimental/i18n.js
+++ /dev/null
@@ -1,380 +0,0 @@
-// Copyright 2006-2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// TODO(cira): Rename v8Locale into LocaleInfo once we have stable API.
-/**
- * LocaleInfo class is an aggregate class of all i18n API calls.
- * @param {Object} settings - localeID and regionID to create LocaleInfo from.
- * {Array.<string>|string} settings.localeID -
- * Unicode identifier of the locale.
- * See http://unicode.org/reports/tr35/#BCP_47_Conformance
- * {string} settings.regionID - ISO3166 region ID with addition of
- * invalid, undefined and reserved region codes.
- * @constructor
- */
-v8Locale = function(settings) {
- native function NativeJSLocale();
-
- // Assume user wanted to do v8Locale("sr");
- if (typeof(settings) === "string") {
- settings = {'localeID': settings};
- }
-
- var properties = NativeJSLocale(
- v8Locale.__createSettingsOrDefault(settings, {'localeID': 'root'}));
-
- // Keep the resolved ICU locale ID around to avoid resolving localeID to
- // ICU locale ID every time BreakIterator, Collator and so forth are called.
- this.__icuLocaleID = properties.icuLocaleID;
- this.options = {'localeID': properties.localeID,
- 'regionID': properties.regionID};
-};
-
-/**
- * Clones existing locale with possible overrides for some of the options.
- * @param {!Object} settings - overrides for current locale settings.
- * @returns {Object} - new LocaleInfo object.
- */
-v8Locale.prototype.derive = function(settings) {
- return new v8Locale(
- v8Locale.__createSettingsOrDefault(settings, this.options));
-};
-
-/**
- * v8BreakIterator class implements locale aware segmenatation.
- * It is not part of EcmaScript proposal.
- * @param {Object} locale - locale object to pass to break
- * iterator implementation.
- * @param {string} type - type of segmenatation:
- * - character
- * - word
- * - sentence
- * - line
- * @private
- * @constructor
- */
-v8Locale.v8BreakIterator = function(locale, type) {
- native function NativeJSBreakIterator();
-
- locale = v8Locale.__createLocaleOrDefault(locale);
- // BCP47 ID would work in this case, but we use ICU locale for consistency.
- var iterator = NativeJSBreakIterator(locale.__icuLocaleID, type);
- iterator.type = type;
- return iterator;
-};
-
-/**
- * Type of the break we encountered during previous iteration.
- * @type{Enum}
- */
-v8Locale.v8BreakIterator.BreakType = {
- 'unknown': -1,
- 'none': 0,
- 'number': 100,
- 'word': 200,
- 'kana': 300,
- 'ideo': 400
-};
-
-/**
- * Creates new v8BreakIterator based on current locale.
- * @param {string} - type of segmentation. See constructor.
- * @returns {Object} - new v8BreakIterator object.
- */
-v8Locale.prototype.v8CreateBreakIterator = function(type) {
- return new v8Locale.v8BreakIterator(this, type);
-};
-
-// TODO(jungshik): Set |collator.options| to actually recognized / resolved
-// values.
-/**
- * Collator class implements locale-aware sort.
- * @param {Object} locale - locale object to pass to collator implementation.
- * @param {Object} settings - collation flags:
- * - ignoreCase
- * - ignoreAccents
- * - numeric
- * @private
- * @constructor
- */
-v8Locale.Collator = function(locale, settings) {
- native function NativeJSCollator();
-
- locale = v8Locale.__createLocaleOrDefault(locale);
- var collator = NativeJSCollator(
- locale.__icuLocaleID, v8Locale.__createSettingsOrDefault(settings, {}));
- return collator;
-};
-
-/**
- * Creates new Collator based on current locale.
- * @param {Object} - collation flags. See constructor.
- * @returns {Object} - new Collator object.
- */
-v8Locale.prototype.createCollator = function(settings) {
- return new v8Locale.Collator(this, settings);
-};
-
-/**
- * DateTimeFormat class implements locale-aware date and time formatting.
- * Constructor is not part of public API.
- * @param {Object} locale - locale object to pass to formatter.
- * @param {Object} settings - formatting flags:
- * - skeleton
- * - dateStyle
- * - timeStyle
- * @private
- * @constructor
- */
-v8Locale.__DateTimeFormat = function(locale, settings) {
- native function NativeJSDateTimeFormat();
-
- settings = v8Locale.__createSettingsOrDefault(settings, {});
-
- var cleanSettings = {};
- if (settings.hasOwnProperty('skeleton')) {
- cleanSettings['skeleton'] = settings['skeleton'];
- } else {
- cleanSettings = {};
- if (settings.hasOwnProperty('dateStyle')) {
- var ds = settings['dateStyle'];
- if (!/^(short|medium|long|full)$/.test(ds)) ds = 'short';
- cleanSettings['dateStyle'] = ds;
- } else if (settings.hasOwnProperty('dateType')) {
- // Obsolete. New spec requires dateStyle, but we'll keep this around
- // for current users.
- // TODO(cira): Remove when all internal users switch to dateStyle.
- var dt = settings['dateType'];
- if (!/^(short|medium|long|full)$/.test(dt)) dt = 'short';
- cleanSettings['dateStyle'] = dt;
- }
-
- if (settings.hasOwnProperty('timeStyle')) {
- var ts = settings['timeStyle'];
- if (!/^(short|medium|long|full)$/.test(ts)) ts = 'short';
- cleanSettings['timeStyle'] = ts;
- } else if (settings.hasOwnProperty('timeType')) {
- // TODO(cira): Remove when all internal users switch to timeStyle.
- var tt = settings['timeType'];
- if (!/^(short|medium|long|full)$/.test(tt)) tt = 'short';
- cleanSettings['timeStyle'] = tt;
- }
- }
-
- // Default is to show short date and time.
- if (!cleanSettings.hasOwnProperty('skeleton') &&
- !cleanSettings.hasOwnProperty('dateStyle') &&
- !cleanSettings.hasOwnProperty('timeStyle')) {
- cleanSettings = {'dateStyle': 'short',
- 'timeStyle': 'short'};
- }
-
- locale = v8Locale.__createLocaleOrDefault(locale);
- var formatter = NativeJSDateTimeFormat(locale.__icuLocaleID, cleanSettings);
-
- // NativeJSDateTimeFormat creates formatter.options for us, we just need
- // to append actual settings to it.
- for (key in cleanSettings) {
- formatter.options[key] = cleanSettings[key];
- }
-
- /**
- * Clones existing date time format with possible overrides for some
- * of the options.
- * @param {!Object} overrideSettings - overrides for current format settings.
- * @returns {Object} - new DateTimeFormat object.
- * @public
- */
- formatter.derive = function(overrideSettings) {
- // To remove a setting user can specify undefined as its value. We'll remove
- // it from the map in that case.
- for (var prop in overrideSettings) {
- if (settings.hasOwnProperty(prop) && !overrideSettings[prop]) {
- delete settings[prop];
- }
- }
- return new v8Locale.__DateTimeFormat(
- locale, v8Locale.__createSettingsOrDefault(overrideSettings, settings));
- };
-
- return formatter;
-};
-
-/**
- * Creates new DateTimeFormat based on current locale.
- * @param {Object} - formatting flags. See constructor.
- * @returns {Object} - new DateTimeFormat object.
- */
-v8Locale.prototype.createDateTimeFormat = function(settings) {
- return new v8Locale.__DateTimeFormat(this, settings);
-};
-
-/**
- * NumberFormat class implements locale-aware number formatting.
- * Constructor is not part of public API.
- * @param {Object} locale - locale object to pass to formatter.
- * @param {Object} settings - formatting flags:
- * - skeleton
- * - pattern
- * - style - decimal, currency, percent or scientific
- * - currencyCode - ISO 4217 3-letter currency code
- * @private
- * @constructor
- */
-v8Locale.__NumberFormat = function(locale, settings) {
- native function NativeJSNumberFormat();
-
- settings = v8Locale.__createSettingsOrDefault(settings, {});
-
- var cleanSettings = {};
- if (settings.hasOwnProperty('skeleton')) {
- // Assign skeleton to cleanSettings and fix invalid currency pattern
- // if present - 'ooxo' becomes 'o'.
- cleanSettings['skeleton'] =
- settings['skeleton'].replace(/\u00a4+[^\u00a4]+\u00a4+/g, '\u00a4');
- } else if (settings.hasOwnProperty('pattern')) {
- cleanSettings['pattern'] = settings['pattern'];
- } else if (settings.hasOwnProperty('style')) {
- var style = settings['style'];
- if (!/^(decimal|currency|percent|scientific)$/.test(style)) {
- style = 'decimal';
- }
- cleanSettings['style'] = style;
- }
-
- // Default is to show decimal style.
- if (!cleanSettings.hasOwnProperty('skeleton') &&
- !cleanSettings.hasOwnProperty('pattern') &&
- !cleanSettings.hasOwnProperty('style')) {
- cleanSettings = {'style': 'decimal'};
- }
-
- // Add currency code if available and valid (3-letter ASCII code).
- if (settings.hasOwnProperty('currencyCode') &&
- /^[a-zA-Z]{3}$/.test(settings['currencyCode'])) {
- cleanSettings['currencyCode'] = settings['currencyCode'].toUpperCase();
- }
-
- locale = v8Locale.__createLocaleOrDefault(locale);
- // Pass in region ID for proper currency detection. Use ZZ if region is empty.
- var region = locale.options.regionID !== '' ? locale.options.regionID : 'ZZ';
- var formatter = NativeJSNumberFormat(
- locale.__icuLocaleID, 'und_' + region, cleanSettings);
-
- // ICU doesn't always uppercase the currency code.
- if (formatter.options.hasOwnProperty('currencyCode')) {
- formatter.options['currencyCode'] =
- formatter.options['currencyCode'].toUpperCase();
- }
-
- for (key in cleanSettings) {
- // Don't overwrite keys that are alredy in.
- if (formatter.options.hasOwnProperty(key)) continue;
-
- formatter.options[key] = cleanSettings[key];
- }
-
- /**
- * Clones existing number format with possible overrides for some
- * of the options.
- * @param {!Object} overrideSettings - overrides for current format settings.
- * @returns {Object} - new or cached NumberFormat object.
- * @public
- */
- formatter.derive = function(overrideSettings) {
- // To remove a setting user can specify undefined as its value. We'll remove
- // it from the map in that case.
- for (var prop in overrideSettings) {
- if (settings.hasOwnProperty(prop) && !overrideSettings[prop]) {
- delete settings[prop];
- }
- }
- return new v8Locale.__NumberFormat(
- locale, v8Locale.__createSettingsOrDefault(overrideSettings, settings));
- };
-
- return formatter;
-};
-
-/**
- * Creates new NumberFormat based on current locale.
- * @param {Object} - formatting flags. See constructor.
- * @returns {Object} - new or cached NumberFormat object.
- */
-v8Locale.prototype.createNumberFormat = function(settings) {
- return new v8Locale.__NumberFormat(this, settings);
-};
-
-/**
- * Merges user settings and defaults.
- * Settings that are not of object type are rejected.
- * Actual property values are not validated, but whitespace is trimmed if they
- * are strings.
- * @param {!Object} settings - user provided settings.
- * @param {!Object} defaults - default values for this type of settings.
- * @returns {Object} - valid settings object.
- * @private
- */
-v8Locale.__createSettingsOrDefault = function(settings, defaults) {
- if (!settings || typeof(settings) !== 'object' ) {
- return defaults;
- }
- for (var key in defaults) {
- if (!settings.hasOwnProperty(key)) {
- settings[key] = defaults[key];
- }
- }
- // Clean up settings.
- for (var key in settings) {
- // Trim whitespace.
- if (typeof(settings[key]) === "string") {
- settings[key] = settings[key].trim();
- }
- // Remove all properties that are set to undefined/null. This allows
- // derive method to remove a setting we don't need anymore.
- if (!settings[key]) {
- delete settings[key];
- }
- }
-
- return settings;
-};
-
-/**
- * If locale is valid (defined and of v8Locale type) we return it. If not
- * we create default locale and return it.
- * @param {!Object} locale - user provided locale.
- * @returns {Object} - v8Locale object.
- * @private
- */
-v8Locale.__createLocaleOrDefault = function(locale) {
- if (!locale || !(locale instanceof v8Locale)) {
- return new v8Locale();
- } else {
- return locale;
- }
-};
diff --git a/src/extensions/experimental/language-matcher.cc b/src/extensions/experimental/language-matcher.cc
deleted file mode 100644
index 127e571..0000000
--- a/src/extensions/experimental/language-matcher.cc
+++ /dev/null
@@ -1,252 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// TODO(cira): Remove LanguageMatcher from v8 when ICU implements
-// language matching API.
-
-#include "src/extensions/experimental/language-matcher.h"
-
-#include <string.h>
-
-#include "src/extensions/experimental/i18n-utils.h"
-#include "unicode/datefmt.h" // For getAvailableLocales
-#include "unicode/locid.h"
-#include "unicode/uloc.h"
-
-namespace v8 {
-namespace internal {
-
-const unsigned int LanguageMatcher::kLanguageWeight = 75;
-const unsigned int LanguageMatcher::kScriptWeight = 20;
-const unsigned int LanguageMatcher::kRegionWeight = 5;
-const unsigned int LanguageMatcher::kThreshold = 50;
-const unsigned int LanguageMatcher::kPositionBonus = 1;
-const char* const LanguageMatcher::kDefaultLocale = "root";
-
-static const char* GetLanguageException(const char*);
-static bool BCP47ToICUFormat(const char*, char*);
-static int CompareLocaleSubtags(const char*, const char*);
-static bool BuildLocaleName(const char*, const char*, LocaleIDMatch*);
-
-LocaleIDMatch::LocaleIDMatch()
- : score(-1) {
- I18NUtils::StrNCopy(
- bcp47_id, ULOC_FULLNAME_CAPACITY, LanguageMatcher::kDefaultLocale);
-
- I18NUtils::StrNCopy(
- icu_id, ULOC_FULLNAME_CAPACITY, LanguageMatcher::kDefaultLocale);
-}
-
-LocaleIDMatch& LocaleIDMatch::operator=(const LocaleIDMatch& rhs) {
- I18NUtils::StrNCopy(this->bcp47_id, ULOC_FULLNAME_CAPACITY, rhs.bcp47_id);
- I18NUtils::StrNCopy(this->icu_id, ULOC_FULLNAME_CAPACITY, rhs.icu_id);
- this->score = rhs.score;
-
- return *this;
-}
-
-// static
-void LanguageMatcher::GetBestMatchForPriorityList(
- v8::Handle<v8::Array> locales, LocaleIDMatch* result) {
- v8::HandleScope handle_scope;
-
- unsigned int position_bonus = locales->Length() * kPositionBonus;
-
- int max_score = 0;
- LocaleIDMatch match;
- for (unsigned int i = 0; i < locales->Length(); ++i) {
- position_bonus -= kPositionBonus;
-
- v8::TryCatch try_catch;
- v8::Local<v8::Value> locale_id = locales->Get(v8::Integer::New(i));
-
- // Return default if exception is raised when reading parameter.
- if (try_catch.HasCaught()) break;
-
- // JavaScript arrays can be heterogenous so check each item
- // if it's a string.
- if (!locale_id->IsString()) continue;
-
- if (!CompareToSupportedLocaleIDList(locale_id->ToString(), &match)) {
- continue;
- }
-
- // Skip items under threshold.
- if (match.score < kThreshold) continue;
-
- match.score += position_bonus;
- if (match.score > max_score) {
- *result = match;
-
- max_score = match.score;
- }
- }
-}
-
-// static
-void LanguageMatcher::GetBestMatchForString(
- v8::Handle<v8::String> locale, LocaleIDMatch* result) {
- LocaleIDMatch match;
-
- if (CompareToSupportedLocaleIDList(locale, &match) &&
- match.score >= kThreshold) {
- *result = match;
- }
-}
-
-// static
-bool LanguageMatcher::CompareToSupportedLocaleIDList(
- v8::Handle<v8::String> locale_id, LocaleIDMatch* result) {
- static int32_t available_count = 0;
- // Depending on how ICU data is built, locales returned by
- // Locale::getAvailableLocale() are not guaranteed to support DateFormat,
- // Collation and other services. We can call getAvailableLocale() of all the
- // services we want to support and take the intersection of them all, but
- // using DateFormat::getAvailableLocales() should suffice.
- // TODO(cira): Maybe make this thread-safe?
- static const icu::Locale* available_locales =
- icu::DateFormat::getAvailableLocales(available_count);
-
- // Skip this locale_id if it's not in ASCII.
- static LocaleIDMatch default_match;
- v8::String::AsciiValue ascii_value(locale_id);
- if (*ascii_value == NULL) return false;
-
- char locale[ULOC_FULLNAME_CAPACITY];
- if (!BCP47ToICUFormat(*ascii_value, locale)) return false;
-
- icu::Locale input_locale(locale);
-
- // Position of the best match locale in list of available locales.
- int position = -1;
- const char* language = GetLanguageException(input_locale.getLanguage());
- const char* script = input_locale.getScript();
- const char* region = input_locale.getCountry();
- for (int32_t i = 0; i < available_count; ++i) {
- int current_score = 0;
- int sign =
- CompareLocaleSubtags(language, available_locales[i].getLanguage());
- current_score += sign * kLanguageWeight;
-
- sign = CompareLocaleSubtags(script, available_locales[i].getScript());
- current_score += sign * kScriptWeight;
-
- sign = CompareLocaleSubtags(region, available_locales[i].getCountry());
- current_score += sign * kRegionWeight;
-
- if (current_score >= kThreshold && current_score > result->score) {
- result->score = current_score;
- position = i;
- }
- }
-
- // Didn't find any good matches so use defaults.
- if (position == -1) return false;
-
- return BuildLocaleName(available_locales[position].getBaseName(),
- input_locale.getName(), result);
-}
-
-// For some unsupported language subtags it is better to fallback to related
-// language that is supported than to default.
-static const char* GetLanguageException(const char* language) {
- // Serbo-croatian to Serbian.
- if (!strcmp(language, "sh")) return "sr";
-
- // Norweigan to Norweiaan to Norwegian Bokmal.
- if (!strcmp(language, "no")) return "nb";
-
- // Moldavian to Romanian.
- if (!strcmp(language, "mo")) return "ro";
-
- // Tagalog to Filipino.
- if (!strcmp(language, "tl")) return "fil";
-
- return language;
-}
-
-// Converts user input from BCP47 locale id format to ICU compatible format.
-// Returns false if uloc_forLanguageTag call fails or if extension is too long.
-static bool BCP47ToICUFormat(const char* locale_id, char* result) {
- UErrorCode status = U_ZERO_ERROR;
- int32_t locale_size = 0;
-
- char locale[ULOC_FULLNAME_CAPACITY];
- I18NUtils::StrNCopy(locale, ULOC_FULLNAME_CAPACITY, locale_id);
-
- // uloc_forLanguageTag has a bug where long extension can crash the code.
- // We need to check if extension part of language id conforms to the length.
- // ICU bug: http://bugs.icu-project.org/trac/ticket/8519
- const char* extension = strstr(locale_id, "-u-");
- if (extension != NULL &&
- strlen(extension) > ULOC_KEYWORD_AND_VALUES_CAPACITY) {
- // Truncate to get non-crashing string, but still preserve base language.
- int base_length = strlen(locale_id) - strlen(extension);
- locale[base_length] = '\0';
- }
-
- uloc_forLanguageTag(locale, result, ULOC_FULLNAME_CAPACITY,
- &locale_size, &status);
- return !U_FAILURE(status);
-}
-
-// Compares locale id subtags.
-// Returns 1 for match or -1 for mismatch.
-static int CompareLocaleSubtags(const char* lsubtag, const char* rsubtag) {
- return strcmp(lsubtag, rsubtag) == 0 ? 1 : -1;
-}
-
-// Builds a BCP47 compliant locale id from base name of matched locale and
-// full user specified locale.
-// Returns false if uloc_toLanguageTag failed to convert locale id.
-// Example:
-// base_name of matched locale (ICU ID): de_DE
-// input_locale_name (ICU ID): de_AT@collation=phonebk
-// result (ICU ID): de_DE@collation=phonebk
-// result (BCP47 ID): de-DE-u-co-phonebk
-static bool BuildLocaleName(const char* base_name,
- const char* input_locale_name,
- LocaleIDMatch* result) {
- I18NUtils::StrNCopy(result->icu_id, ULOC_LANG_CAPACITY, base_name);
-
- // Get extensions (if any) from the original locale.
- const char* extension = strchr(input_locale_name, ULOC_KEYWORD_SEPARATOR);
- if (extension != NULL) {
- I18NUtils::StrNCopy(result->icu_id + strlen(base_name),
- ULOC_KEYWORD_AND_VALUES_CAPACITY, extension);
- } else {
- I18NUtils::StrNCopy(result->icu_id, ULOC_LANG_CAPACITY, base_name);
- }
-
- // Convert ICU locale name into BCP47 format.
- UErrorCode status = U_ZERO_ERROR;
- uloc_toLanguageTag(result->icu_id, result->bcp47_id,
- ULOC_FULLNAME_CAPACITY, false, &status);
- return !U_FAILURE(status);
-}
-
-} } // namespace v8::internal
diff --git a/src/extensions/experimental/language-matcher.h b/src/extensions/experimental/language-matcher.h
deleted file mode 100644
index dd29304..0000000
--- a/src/extensions/experimental/language-matcher.h
+++ /dev/null
@@ -1,95 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_EXTENSIONS_EXPERIMENTAL_LANGUAGE_MATCHER_H_
-#define V8_EXTENSIONS_EXPERIMENTAL_LANGUAGE_MATCHER_H_
-
-#include "include/v8.h"
-
-#include "unicode/uloc.h"
-
-namespace v8 {
-namespace internal {
-
-struct LocaleIDMatch {
- LocaleIDMatch();
-
- LocaleIDMatch& operator=(const LocaleIDMatch& rhs);
-
- // Bcp47 locale id - "de-Latn-DE-u-co-phonebk".
- char bcp47_id[ULOC_FULLNAME_CAPACITY];
-
- // ICU locale id - "de_Latn_DE@collation=phonebk".
- char icu_id[ULOC_FULLNAME_CAPACITY];
-
- // Score for this locale.
- int score;
-};
-
-class LanguageMatcher {
- public:
- // Default locale.
- static const char* const kDefaultLocale;
-
- // Finds best supported locale for a given a list of locale identifiers.
- // It preserves the extension for the locale id.
- static void GetBestMatchForPriorityList(
- v8::Handle<v8::Array> locale_list, LocaleIDMatch* result);
-
- // Finds best supported locale for a single locale identifier.
- // It preserves the extension for the locale id.
- static void GetBestMatchForString(
- v8::Handle<v8::String> locale_id, LocaleIDMatch* result);
-
- private:
- // If langauge subtags match add this amount to the score.
- static const unsigned int kLanguageWeight;
-
- // If script subtags match add this amount to the score.
- static const unsigned int kScriptWeight;
-
- // If region subtags match add this amount to the score.
- static const unsigned int kRegionWeight;
-
- // LocaleID match score has to be over this number to accept the match.
- static const unsigned int kThreshold;
-
- // For breaking ties in priority queue.
- static const unsigned int kPositionBonus;
-
- LanguageMatcher();
-
- // Compares locale_id to the supported list of locales and returns best
- // match.
- // Returns false if it fails to convert locale id from ICU to BCP47 format.
- static bool CompareToSupportedLocaleIDList(v8::Handle<v8::String> locale_id,
- LocaleIDMatch* result);
-};
-
-} } // namespace v8::internal
-
-#endif // V8_EXTENSIONS_EXPERIMENTAL_LANGUAGE_MATCHER_H_
diff --git a/src/extensions/experimental/number-format.cc b/src/extensions/experimental/number-format.cc
deleted file mode 100644
index 2932c52..0000000
--- a/src/extensions/experimental/number-format.cc
+++ /dev/null
@@ -1,374 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "src/extensions/experimental/number-format.h"
-
-#include <string.h>
-
-#include "src/extensions/experimental/i18n-utils.h"
-#include "unicode/dcfmtsym.h"
-#include "unicode/decimfmt.h"
-#include "unicode/locid.h"
-#include "unicode/numfmt.h"
-#include "unicode/uchar.h"
-#include "unicode/ucurr.h"
-#include "unicode/unum.h"
-#include "unicode/uversion.h"
-
-namespace v8 {
-namespace internal {
-
-const int NumberFormat::kCurrencyCodeLength = 4;
-
-v8::Persistent<v8::FunctionTemplate> NumberFormat::number_format_template_;
-
-static icu::DecimalFormat* CreateNumberFormat(v8::Handle<v8::String>,
- v8::Handle<v8::String>,
- v8::Handle<v8::Object>);
-static icu::DecimalFormat* CreateFormatterFromSkeleton(
- const icu::Locale&, const icu::UnicodeString&, UErrorCode*);
-static icu::DecimalFormatSymbols* GetFormatSymbols(const icu::Locale&);
-static bool GetCurrencyCode(const icu::Locale&,
- const char* const,
- v8::Handle<v8::Object>,
- UChar*);
-static v8::Handle<v8::Value> ThrowUnexpectedObjectError();
-
-icu::DecimalFormat* NumberFormat::UnpackNumberFormat(
- v8::Handle<v8::Object> obj) {
- if (number_format_template_->HasInstance(obj)) {
- return static_cast<icu::DecimalFormat*>(
- obj->GetPointerFromInternalField(0));
- }
-
- return NULL;
-}
-
-void NumberFormat::DeleteNumberFormat(v8::Persistent<v8::Value> object,
- void* param) {
- v8::Persistent<v8::Object> persistent_object =
- v8::Persistent<v8::Object>::Cast(object);
-
- // First delete the hidden C++ object.
- // Unpacking should never return NULL here. That would only happen if
- // this method is used as the weak callback for persistent handles not
- // pointing to a number formatter.
- delete UnpackNumberFormat(persistent_object);
-
- // Then dispose of the persistent handle to JS object.
- persistent_object.Dispose();
-}
-
-v8::Handle<v8::Value> NumberFormat::Format(const v8::Arguments& args) {
- v8::HandleScope handle_scope;
-
- if (args.Length() != 1 || !args[0]->IsNumber()) {
- // Just return NaN on invalid input.
- return v8::String::New("NaN");
- }
-
- icu::DecimalFormat* number_format = UnpackNumberFormat(args.Holder());
- if (!number_format) {
- return ThrowUnexpectedObjectError();
- }
-
- // ICU will handle actual NaN value properly and return NaN string.
- icu::UnicodeString result;
- number_format->format(args[0]->NumberValue(), result);
-
- return v8::String::New(
- reinterpret_cast<const uint16_t*>(result.getBuffer()), result.length());
-}
-
-v8::Handle<v8::Value> NumberFormat::JSNumberFormat(const v8::Arguments& args) {
- v8::HandleScope handle_scope;
-
- // Expect locale id, region id and settings.
- if (args.Length() != 3 ||
- !args[0]->IsString() || !args[1]->IsString() || !args[2]->IsObject()) {
- return v8::ThrowException(v8::Exception::SyntaxError(
- v8::String::New("Locale, region and number settings are required.")));
- }
-
- icu::DecimalFormat* number_format = CreateNumberFormat(
- args[0]->ToString(), args[1]->ToString(), args[2]->ToObject());
-
- if (number_format_template_.IsEmpty()) {
- v8::Local<v8::FunctionTemplate> raw_template(v8::FunctionTemplate::New());
-
- raw_template->SetClassName(v8::String::New("v8Locale.NumberFormat"));
-
- // Define internal field count on instance template.
- v8::Local<v8::ObjectTemplate> object_template =
- raw_template->InstanceTemplate();
-
- // Set aside internal field for icu number formatter.
- object_template->SetInternalFieldCount(1);
-
- // Define all of the prototype methods on prototype template.
- v8::Local<v8::ObjectTemplate> proto = raw_template->PrototypeTemplate();
- proto->Set(v8::String::New("format"),
- v8::FunctionTemplate::New(Format));
-
- number_format_template_ =
- v8::Persistent<v8::FunctionTemplate>::New(raw_template);
- }
-
- // Create an empty object wrapper.
- v8::Local<v8::Object> local_object =
- number_format_template_->GetFunction()->NewInstance();
- v8::Persistent<v8::Object> wrapper =
- v8::Persistent<v8::Object>::New(local_object);
-
- // Set number formatter as internal field of the resulting JS object.
- wrapper->SetPointerInInternalField(0, number_format);
-
- // Create options key.
- v8::Local<v8::Object> options = v8::Object::New();
-
- // Show what ICU decided to use for easier problem tracking.
- // Keep it as v8 specific extension.
- icu::UnicodeString pattern;
- number_format->toPattern(pattern);
- options->Set(v8::String::New("v8ResolvedPattern"),
- v8::String::New(reinterpret_cast<const uint16_t*>(
- pattern.getBuffer()), pattern.length()));
-
- // Set resolved currency code in options.currency if not empty.
- icu::UnicodeString currency(number_format->getCurrency());
- if (!currency.isEmpty()) {
- options->Set(v8::String::New("currencyCode"),
- v8::String::New(reinterpret_cast<const uint16_t*>(
- currency.getBuffer()), currency.length()));
- }
-
- wrapper->Set(v8::String::New("options"), options);
-
- // Make object handle weak so we can delete iterator once GC kicks in.
- wrapper.MakeWeak(NULL, DeleteNumberFormat);
-
- return wrapper;
-}
-
-// Returns DecimalFormat.
-static icu::DecimalFormat* CreateNumberFormat(v8::Handle<v8::String> locale,
- v8::Handle<v8::String> region,
- v8::Handle<v8::Object> settings) {
- v8::HandleScope handle_scope;
-
- v8::String::AsciiValue ascii_locale(locale);
- icu::Locale icu_locale(*ascii_locale);
-
- // Make formatter from skeleton.
- icu::DecimalFormat* number_format = NULL;
- UErrorCode status = U_ZERO_ERROR;
- icu::UnicodeString setting;
-
- if (I18NUtils::ExtractStringSetting(settings, "skeleton", &setting)) {
- // TODO(cira): Use ICU skeleton once
- // http://bugs.icu-project.org/trac/ticket/8610 is resolved.
- number_format = CreateFormatterFromSkeleton(icu_locale, setting, &status);
- } else if (I18NUtils::ExtractStringSetting(settings, "pattern", &setting)) {
- number_format =
- new icu::DecimalFormat(setting, GetFormatSymbols(icu_locale), status);
- } else if (I18NUtils::ExtractStringSetting(settings, "style", &setting)) {
- if (setting == UNICODE_STRING_SIMPLE("currency")) {
- number_format = static_cast<icu::DecimalFormat*>(
- icu::NumberFormat::createCurrencyInstance(icu_locale, status));
- } else if (setting == UNICODE_STRING_SIMPLE("percent")) {
- number_format = static_cast<icu::DecimalFormat*>(
- icu::NumberFormat::createPercentInstance(icu_locale, status));
- } else if (setting == UNICODE_STRING_SIMPLE("scientific")) {
- number_format = static_cast<icu::DecimalFormat*>(
- icu::NumberFormat::createScientificInstance(icu_locale, status));
- } else {
- // Make it decimal in any other case.
- number_format = static_cast<icu::DecimalFormat*>(
- icu::NumberFormat::createInstance(icu_locale, status));
- }
- }
-
- if (U_FAILURE(status)) {
- delete number_format;
- status = U_ZERO_ERROR;
- number_format = static_cast<icu::DecimalFormat*>(
- icu::NumberFormat::createInstance(icu_locale, status));
- }
-
- // Attach appropriate currency code to the formatter.
- // It affects currency formatters only.
- // Region is full language identifier in form 'und_' + region id.
- v8::String::AsciiValue ascii_region(region);
-
- UChar currency_code[NumberFormat::kCurrencyCodeLength];
- if (GetCurrencyCode(icu_locale, *ascii_region, settings, currency_code)) {
- number_format->setCurrency(currency_code, status);
- }
-
- return number_format;
-}
-
-// Generates ICU number format pattern from given skeleton.
-// TODO(cira): Remove once ICU includes equivalent method
-// (see http://bugs.icu-project.org/trac/ticket/8610).
-static icu::DecimalFormat* CreateFormatterFromSkeleton(
- const icu::Locale& icu_locale,
- const icu::UnicodeString& skeleton,
- UErrorCode* status) {
- icu::DecimalFormat skeleton_format(
- skeleton, GetFormatSymbols(icu_locale), *status);
-
- // Find out if skeleton contains currency or percent symbol and create
- // proper instance to tweak.
- icu::DecimalFormat* base_format = NULL;
-
- // UChar representation of U+00A4 currency symbol.
- const UChar currency_symbol = 0xA4u;
-
- int32_t index = skeleton.indexOf(currency_symbol);
- if (index != -1) {
- // Find how many U+00A4 are there. There is at least one.
- // Case of non-consecutive U+00A4 is taken care of in i18n.js.
- int32_t end_index = skeleton.lastIndexOf(currency_symbol, index);
-
-#if (U_ICU_VERSION_MAJOR_NUM == 4) && (U_ICU_VERSION_MINOR_NUM <= 6)
- icu::NumberFormat::EStyles style;
- switch (end_index - index) {
- case 0:
- style = icu::NumberFormat::kCurrencyStyle;
- break;
- case 1:
- style = icu::NumberFormat::kIsoCurrencyStyle;
- break;
- default:
- style = icu::NumberFormat::kPluralCurrencyStyle;
- }
-#else // ICU version is 4.8 or above (we ignore versions below 4.0).
- UNumberFormatStyle style;
- switch (end_index - index) {
- case 0:
- style = UNUM_CURRENCY;
- break;
- case 1:
- style = UNUM_CURRENCY_ISO;
- break;
- default:
- style = UNUM_CURRENCY_PLURAL;
- }
-#endif
-
- base_format = static_cast<icu::DecimalFormat*>(
- icu::NumberFormat::createInstance(icu_locale, style, *status));
- } else if (skeleton.indexOf('%') != -1) {
- base_format = static_cast<icu::DecimalFormat*>(
- icu::NumberFormat::createPercentInstance(icu_locale, *status));
- } else {
- // TODO(cira): Handle scientific skeleton.
- base_format = static_cast<icu::DecimalFormat*>(
- icu::NumberFormat::createInstance(icu_locale, *status));
- }
-
- if (U_FAILURE(*status)) {
- delete base_format;
- return NULL;
- }
-
- // Copy important information from skeleton to the new formatter.
- // TODO(cira): copy rounding information from skeleton?
- base_format->setGroupingUsed(skeleton_format.isGroupingUsed());
-
- base_format->setMinimumIntegerDigits(
- skeleton_format.getMinimumIntegerDigits());
-
- base_format->setMinimumFractionDigits(
- skeleton_format.getMinimumFractionDigits());
-
- base_format->setMaximumFractionDigits(
- skeleton_format.getMaximumFractionDigits());
-
- return base_format;
-}
-
-// Gets decimal symbols for a locale.
-static icu::DecimalFormatSymbols* GetFormatSymbols(
- const icu::Locale& icu_locale) {
- UErrorCode status = U_ZERO_ERROR;
- icu::DecimalFormatSymbols* symbols =
- new icu::DecimalFormatSymbols(icu_locale, status);
-
- if (U_FAILURE(status)) {
- delete symbols;
- // Use symbols from default locale.
- symbols = new icu::DecimalFormatSymbols(status);
- }
-
- return symbols;
-}
-
-// Gets currency ISO 4217 3-letter code.
-// Check currencyCode setting first, then @currency=code and in the end
-// try to infer currency code from locale in the form 'und_' + region id.
-// Returns false in case of error.
-static bool GetCurrencyCode(const icu::Locale& icu_locale,
- const char* const und_region_locale,
- v8::Handle<v8::Object> settings,
- UChar* code) {
- UErrorCode status = U_ZERO_ERROR;
-
- // If there is user specified currency code, use it.
- icu::UnicodeString currency;
- if (I18NUtils::ExtractStringSetting(settings, "currencyCode", ¤cy)) {
- currency.extract(code, NumberFormat::kCurrencyCodeLength, status);
- return true;
- }
-
- // If ICU locale has -cu- currency code use it.
- char currency_code[NumberFormat::kCurrencyCodeLength];
- int32_t length = icu_locale.getKeywordValue(
- "currency", currency_code, NumberFormat::kCurrencyCodeLength, status);
- if (length != 0) {
- I18NUtils::AsciiToUChar(currency_code, length + 1,
- code, NumberFormat::kCurrencyCodeLength);
- return true;
- }
-
- // Otherwise infer currency code from the region id.
- ucurr_forLocale(
- und_region_locale, code, NumberFormat::kCurrencyCodeLength, &status);
-
- return !!U_SUCCESS(status);
-}
-
-// Throws a JavaScript exception.
-static v8::Handle<v8::Value> ThrowUnexpectedObjectError() {
- // Returns undefined, and schedules an exception to be thrown.
- return v8::ThrowException(v8::Exception::Error(
- v8::String::New("NumberFormat method called on an object "
- "that is not a NumberFormat.")));
-}
-
-} } // namespace v8::internal
diff --git a/src/extensions/experimental/number-format.h b/src/extensions/experimental/number-format.h
deleted file mode 100644
index bcfaed6..0000000
--- a/src/extensions/experimental/number-format.h
+++ /dev/null
@@ -1,71 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_EXTENSIONS_EXPERIMENTAL_NUMBER_FORMAT_H_
-#define V8_EXTENSIONS_EXPERIMENTAL_NUMBER_FORMAT_H_
-
-#include "include/v8.h"
-
-#include "unicode/uversion.h"
-
-namespace U_ICU_NAMESPACE {
-class DecimalFormat;
-}
-
-namespace v8 {
-namespace internal {
-
-class NumberFormat {
- public:
- // 3-letter ISO 4217 currency code plus \0.
- static const int kCurrencyCodeLength;
-
- static v8::Handle<v8::Value> JSNumberFormat(const v8::Arguments& args);
-
- // Helper methods for various bindings.
-
- // Unpacks date format object from corresponding JavaScript object.
- static icu::DecimalFormat* UnpackNumberFormat(
- v8::Handle<v8::Object> obj);
-
- // Release memory we allocated for the NumberFormat once the JS object that
- // holds the pointer gets garbage collected.
- static void DeleteNumberFormat(v8::Persistent<v8::Value> object,
- void* param);
-
- // Formats number and returns corresponding string.
- static v8::Handle<v8::Value> Format(const v8::Arguments& args);
-
- private:
- NumberFormat();
-
- static v8::Persistent<v8::FunctionTemplate> number_format_template_;
-};
-
-} } // namespace v8::internal
-
-#endif // V8_EXTENSIONS_EXPERIMENTAL_NUMBER_FORMAT_H_
diff --git a/src/factory.cc b/src/factory.cc
index f1042a4..0b79635 100644
--- a/src/factory.cc
+++ b/src/factory.cc
@@ -77,11 +77,21 @@
}
-Handle<NumberDictionary> Factory::NewNumberDictionary(int at_least_space_for) {
+Handle<SeededNumberDictionary> Factory::NewSeededNumberDictionary(
+ int at_least_space_for) {
ASSERT(0 <= at_least_space_for);
CALL_HEAP_FUNCTION(isolate(),
- NumberDictionary::Allocate(at_least_space_for),
- NumberDictionary);
+ SeededNumberDictionary::Allocate(at_least_space_for),
+ SeededNumberDictionary);
+}
+
+
+Handle<UnseededNumberDictionary> Factory::NewUnseededNumberDictionary(
+ int at_least_space_for) {
+ ASSERT(0 <= at_least_space_for);
+ CALL_HEAP_FUNCTION(isolate(),
+ UnseededNumberDictionary::Allocate(at_least_space_for),
+ UnseededNumberDictionary);
}
@@ -131,6 +141,13 @@
}
+Handle<AccessorPair> Factory::NewAccessorPair() {
+ CALL_HEAP_FUNCTION(isolate(),
+ isolate()->heap()->AllocateAccessorPair(),
+ AccessorPair);
+}
+
+
// Symbols are created in the old generation (data space).
Handle<String> Factory::LookupSymbol(Vector<const char> string) {
CALL_HEAP_FUNCTION(isolate(),
@@ -698,7 +715,7 @@
// Allocate the function
Handle<JSFunction> function = NewFunction(name, the_hole_value());
- // Setup the code pointer in both the shared function info and in
+ // Set up the code pointer in both the shared function info and in
// the function itself.
function->shared()->set_code(*code);
function->set_code(*code);
@@ -729,7 +746,7 @@
// Allocate the function.
Handle<JSFunction> function = NewFunction(name, prototype);
- // Setup the code pointer in both the shared function info and in
+ // Set up the code pointer in both the shared function info and in
// the function itself.
function->shared()->set_code(*code);
function->set_code(*code);
@@ -751,7 +768,10 @@
// property that refers to the function.
SetPrototypeProperty(function, prototype);
// Currently safe because it is only invoked from Genesis.
- SetLocalPropertyNoThrow(prototype, constructor_symbol(), function, DONT_ENUM);
+ CHECK_NOT_EMPTY_HANDLE(isolate(),
+ JSObject::SetLocalPropertyIgnoreAttributes(
+ prototype, constructor_symbol(),
+ function, DONT_ENUM));
return function;
}
@@ -926,28 +946,48 @@
}
-Handle<JSArray> Factory::NewJSArrayWithElements(Handle<FixedArray> elements,
+Handle<JSArray> Factory::NewJSArrayWithElements(Handle<FixedArrayBase> elements,
PretenureFlag pretenure) {
Handle<JSArray> result =
Handle<JSArray>::cast(NewJSObject(isolate()->array_function(),
pretenure));
+ result->set_length(Smi::FromInt(0));
SetContent(result, elements);
return result;
}
+void Factory::SetElementsCapacityAndLength(Handle<JSArray> array,
+ int capacity,
+ int length) {
+ ElementsAccessor* accessor = array->GetElementsAccessor();
+ CALL_HEAP_FUNCTION_VOID(
+ isolate(),
+ accessor->SetCapacityAndLength(*array, capacity, length));
+}
+
+
void Factory::SetContent(Handle<JSArray> array,
- Handle<FixedArray> elements) {
+ Handle<FixedArrayBase> elements) {
CALL_HEAP_FUNCTION_VOID(
isolate(),
array->SetContent(*elements));
}
-void Factory::EnsureCanContainNonSmiElements(Handle<JSArray> array) {
+void Factory::EnsureCanContainHeapObjectElements(Handle<JSArray> array) {
CALL_HEAP_FUNCTION_VOID(
isolate(),
- array->EnsureCanContainNonSmiElements());
+ array->EnsureCanContainHeapObjectElements());
+}
+
+
+void Factory::EnsureCanContainElements(Handle<JSArray> array,
+ Handle<FixedArrayBase> elements,
+ EnsureElementsMode mode) {
+ CALL_HEAP_FUNCTION_VOID(
+ isolate(),
+ array->EnsureCanContainElements(*elements, mode));
}
@@ -1041,13 +1081,23 @@
}
-Handle<NumberDictionary> Factory::DictionaryAtNumberPut(
- Handle<NumberDictionary> dictionary,
+Handle<SeededNumberDictionary> Factory::DictionaryAtNumberPut(
+ Handle<SeededNumberDictionary> dictionary,
uint32_t key,
Handle<Object> value) {
CALL_HEAP_FUNCTION(isolate(),
dictionary->AtNumberPut(key, *value),
- NumberDictionary);
+ SeededNumberDictionary);
+}
+
+
+Handle<UnseededNumberDictionary> Factory::DictionaryAtNumberPut(
+ Handle<UnseededNumberDictionary> dictionary,
+ uint32_t key,
+ Handle<Object> value) {
+ CALL_HEAP_FUNCTION(isolate(),
+ dictionary->AtNumberPut(key, *value),
+ UnseededNumberDictionary);
}
diff --git a/src/factory.h b/src/factory.h
index 0f028e5..e68cc7e 100644
--- a/src/factory.h
+++ b/src/factory.h
@@ -54,7 +54,11 @@
int size,
PretenureFlag pretenure = NOT_TENURED);
- Handle<NumberDictionary> NewNumberDictionary(int at_least_space_for);
+ Handle<SeededNumberDictionary> NewSeededNumberDictionary(
+ int at_least_space_for);
+
+ Handle<UnseededNumberDictionary> NewUnseededNumberDictionary(
+ int at_least_space_for);
Handle<StringDictionary> NewStringDictionary(int at_least_space_for);
@@ -69,6 +73,8 @@
Handle<DeoptimizationOutputData> NewDeoptimizationOutputData(
int deopt_entry_count,
PretenureFlag pretenure);
+ // Allocates a pre-tenured empty AccessorPair.
+ Handle<AccessorPair> NewAccessorPair();
Handle<String> LookupSymbol(Vector<const char> str);
Handle<String> LookupSymbol(Handle<String> str);
@@ -227,7 +233,7 @@
Handle<FixedDoubleArray> CopyFixedDoubleArray(
Handle<FixedDoubleArray> array);
- // Numbers (eg, literals) are pretenured by the parser.
+ // Numbers (e.g. literals) are pretenured by the parser.
Handle<Object> NewNumber(double value,
PretenureFlag pretenure = NOT_TENURED);
@@ -259,12 +265,19 @@
PretenureFlag pretenure = NOT_TENURED);
Handle<JSArray> NewJSArrayWithElements(
- Handle<FixedArray> elements,
+ Handle<FixedArrayBase> elements,
PretenureFlag pretenure = NOT_TENURED);
- void SetContent(Handle<JSArray> array, Handle<FixedArray> elements);
+ void SetElementsCapacityAndLength(Handle<JSArray> array,
+ int capacity,
+ int length);
- void EnsureCanContainNonSmiElements(Handle<JSArray> array);
+ void SetContent(Handle<JSArray> array, Handle<FixedArrayBase> elements);
+
+ void EnsureCanContainHeapObjectElements(Handle<JSArray> array);
+ void EnsureCanContainElements(Handle<JSArray> array,
+ Handle<FixedArrayBase> elements,
+ EnsureElementsMode mode);
Handle<JSProxy> NewJSProxy(Handle<Object> handler, Handle<Object> prototype);
@@ -423,8 +436,13 @@
Handle<Object> stack_trace,
Handle<Object> stack_frames);
- Handle<NumberDictionary> DictionaryAtNumberPut(
- Handle<NumberDictionary>,
+ Handle<SeededNumberDictionary> DictionaryAtNumberPut(
+ Handle<SeededNumberDictionary>,
+ uint32_t key,
+ Handle<Object> value);
+
+ Handle<UnseededNumberDictionary> DictionaryAtNumberPut(
+ Handle<UnseededNumberDictionary>,
uint32_t key,
Handle<Object> value);
diff --git a/src/flag-definitions.h b/src/flag-definitions.h
index f145df7..9284e13 100644
--- a/src/flag-definitions.h
+++ b/src/flag-definitions.h
@@ -41,6 +41,7 @@
extern ctype FLAG_##nam;
#define FLAG_READONLY(ftype, ctype, nam, def, cmt) \
static ctype const FLAG_##nam = def;
+#define DEFINE_implication(whenflag, thenflag)
// We want to supply the actual storage and value for the flag variable in the
// .cc file. We only do this for writable flags.
@@ -48,6 +49,7 @@
#define FLAG_FULL(ftype, ctype, nam, def, cmt) \
ctype FLAG_##nam = def;
#define FLAG_READONLY(ftype, ctype, nam, def, cmt)
+#define DEFINE_implication(whenflag, thenflag)
// We need to define all of our default values so that the Flag structure can
// access them by pointer. These are just used internally inside of one .cc,
@@ -56,7 +58,7 @@
#define FLAG_FULL(ftype, ctype, nam, def, cmt) \
static ctype const FLAGDEFAULT_##nam = def;
#define FLAG_READONLY(ftype, ctype, nam, def, cmt)
-
+#define DEFINE_implication(whenflag, thenflag)
// We want to write entries into our meta data table, for internal parsing and
// printing / etc in the flag parser code. We only do this for writable flags.
@@ -64,6 +66,14 @@
#define FLAG_FULL(ftype, ctype, nam, def, cmt) \
{ Flag::TYPE_##ftype, #nam, &FLAG_##nam, &FLAGDEFAULT_##nam, cmt, false },
#define FLAG_READONLY(ftype, ctype, nam, def, cmt)
+#define DEFINE_implication(whenflag, thenflag)
+
+// We produce the code to set flags when it is implied by another flag.
+#elif defined(FLAG_MODE_DEFINE_IMPLICATIONS)
+#define FLAG_FULL(ftype, ctype, nam, def, cmt)
+#define FLAG_READONLY(ftype, ctype, nam, def, cmt)
+#define DEFINE_implication(whenflag, thenflag) \
+ if (FLAG_##whenflag) FLAG_##thenflag = true;
#else
#error No mode supplied when including flags.defs
@@ -103,6 +113,10 @@
DEFINE_bool(harmony_collections, false,
"enable harmony collections (sets, maps, and weak maps)")
DEFINE_bool(harmony, false, "enable all harmony features")
+DEFINE_implication(harmony, harmony_typeof)
+DEFINE_implication(harmony, harmony_scoping)
+DEFINE_implication(harmony, harmony_proxies)
+DEFINE_implication(harmony, harmony_collections)
// Flags for experimental implementation features.
DEFINE_bool(unbox_double_arrays, true, "automatically unbox arrays of doubles")
@@ -335,6 +349,14 @@
"print stack trace when throwing exceptions")
DEFINE_bool(preallocate_message_memory, false,
"preallocate some memory to build stack traces.")
+DEFINE_bool(randomize_hashes,
+ true,
+ "randomize hashes to avoid predictable hash collisions "
+ "(with snapshots this option cannot override the baked-in seed)")
+DEFINE_int(hash_seed,
+ 0,
+ "Fixed seed to use to hash property keys (0 means random)"
+ "(with snapshots this option cannot override the baked-in seed)")
// v8.cc
DEFINE_bool(preemption, false,
@@ -542,6 +564,20 @@
DEFINE_bool(print_code_verbose, false, "print more information for code")
DEFINE_bool(print_builtin_code, false, "print generated code for builtins")
+#ifdef ENABLE_DISASSEMBLER
+DEFINE_bool(print_all_code, false, "enable all flags related to printing code")
+DEFINE_implication(print_all_code, print_code)
+DEFINE_implication(print_all_code, print_opt_code)
+DEFINE_implication(print_all_code, print_unopt_code)
+DEFINE_implication(print_all_code, print_code_verbose)
+DEFINE_implication(print_all_code, print_builtin_code)
+DEFINE_implication(print_all_code, print_code_stubs)
+DEFINE_implication(print_all_code, code_comments)
+#ifdef DEBUG
+DEFINE_implication(print_all_code, trace_codegen)
+#endif
+#endif
+
// Cleanup...
#undef FLAG_FULL
#undef FLAG_READONLY
@@ -550,8 +586,10 @@
#undef DEFINE_bool
#undef DEFINE_int
#undef DEFINE_string
+#undef DEFINE_implication
#undef FLAG_MODE_DECLARE
#undef FLAG_MODE_DEFINE
#undef FLAG_MODE_DEFINE_DEFAULTS
#undef FLAG_MODE_META
+#undef FLAG_MODE_DEFINE_IMPLICATIONS
diff --git a/src/flags.cc b/src/flags.cc
index ab5b57c..75e66ce 100644
--- a/src/flags.cc
+++ b/src/flags.cc
@@ -548,4 +548,9 @@
}
+void FlagList::EnforceFlagImplications() {
+#define FLAG_MODE_DEFINE_IMPLICATIONS
+#include "flag-definitions.h"
+}
+
} } // namespace v8::internal
diff --git a/src/flags.h b/src/flags.h
index f9cbde0..f0b239b 100644
--- a/src/flags.h
+++ b/src/flags.h
@@ -72,6 +72,9 @@
// Print help to stdout with flags, types, and default values.
static void PrintHelp();
+
+ // Set flags as consequence of being implied by another flag.
+ static void EnforceFlagImplications();
};
} } // namespace v8::internal
diff --git a/src/frames.cc b/src/frames.cc
index 9fd0042..4402496 100644
--- a/src/frames.cc
+++ b/src/frames.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -485,7 +485,7 @@
void ExitFrame::ComputeCallerState(State* state) const {
- // Setup the caller state.
+ // Set up the caller state.
state->sp = caller_sp();
state->fp = Memory::Address_at(fp() + ExitFrameConstants::kCallerFPOffset);
state->pc_address
@@ -723,12 +723,17 @@
JavaScriptFrame* frame = it.frame();
if (frame->IsConstructor()) PrintF(file, "new ");
// function name
- Object* fun = frame->function();
- if (fun->IsJSFunction()) {
- SharedFunctionInfo* shared = JSFunction::cast(fun)->shared();
- shared->DebugName()->ShortPrint(file);
+ Object* maybe_fun = frame->function();
+ if (maybe_fun->IsJSFunction()) {
+ JSFunction* fun = JSFunction::cast(maybe_fun);
+ fun->PrintName();
+ Code* js_code = frame->unchecked_code();
+ Address pc = frame->pc();
+ int code_offset =
+ static_cast<int>(pc - js_code->instruction_start());
+ PrintF("+%d", code_offset);
+ SharedFunctionInfo* shared = fun->shared();
if (print_line_number) {
- Address pc = frame->pc();
Code* code = Code::cast(
v8::internal::Isolate::Current()->heap()->FindCodeObject(pc));
int source_pos = code->SourcePosition(pc);
@@ -751,7 +756,7 @@
}
}
} else {
- fun->ShortPrint(file);
+ PrintF("<unknown>");
}
if (print_args) {
@@ -808,17 +813,18 @@
data->TranslationIndex(deopt_index)->value());
Translation::Opcode opcode = static_cast<Translation::Opcode>(it.Next());
ASSERT(opcode == Translation::BEGIN);
- int frame_count = it.Next();
+ it.Next(); // Drop frame count.
+ int jsframe_count = it.Next();
// We create the summary in reverse order because the frames
// in the deoptimization translation are ordered bottom-to-top.
- int i = frame_count;
+ int i = jsframe_count;
while (i > 0) {
opcode = static_cast<Translation::Opcode>(it.Next());
- if (opcode == Translation::FRAME) {
+ if (opcode == Translation::JS_FRAME) {
// We don't inline constructor calls, so only the first, outermost
// frame can be a constructor frame in case of inlining.
- bool is_constructor = (i == frame_count) && IsConstructor();
+ bool is_constructor = (i == jsframe_count) && IsConstructor();
i--;
int ast_id = it.Next();
@@ -913,8 +919,9 @@
Translation::Opcode opcode = static_cast<Translation::Opcode>(it.Next());
ASSERT(opcode == Translation::BEGIN);
USE(opcode);
- int frame_count = it.Next();
- return frame_count;
+ it.Next(); // Drop frame count.
+ int jsframe_count = it.Next();
+ return jsframe_count;
}
@@ -929,14 +936,15 @@
data->TranslationIndex(deopt_index)->value());
Translation::Opcode opcode = static_cast<Translation::Opcode>(it.Next());
ASSERT(opcode == Translation::BEGIN);
- int frame_count = it.Next();
+ it.Next(); // Drop frame count.
+ int jsframe_count = it.Next();
// We insert the frames in reverse order because the frames
// in the deoptimization translation are ordered bottom-to-top.
- while (frame_count > 0) {
+ while (jsframe_count > 0) {
opcode = static_cast<Translation::Opcode>(it.Next());
- if (opcode == Translation::FRAME) {
- frame_count--;
+ if (opcode == Translation::JS_FRAME) {
+ jsframe_count--;
it.Next(); // Skip ast id.
int function_id = it.Next();
it.Next(); // Skip height.
@@ -1298,7 +1306,8 @@
isolate_->counters()->pc_to_code()->Increment();
ASSERT(IsPowerOf2(kInnerPointerToCodeCacheSize));
uint32_t hash = ComputeIntegerHash(
- static_cast<uint32_t>(reinterpret_cast<uintptr_t>(inner_pointer)));
+ static_cast<uint32_t>(reinterpret_cast<uintptr_t>(inner_pointer)),
+ v8::internal::kZeroHashSeed);
uint32_t index = hash & (kInnerPointerToCodeCacheSize - 1);
InnerPointerToCodeCacheEntry* entry = cache(index);
if (entry->inner_pointer == inner_pointer) {
diff --git a/src/full-codegen.cc b/src/full-codegen.cc
index 04086d4..4d1707d 100644
--- a/src/full-codegen.cc
+++ b/src/full-codegen.cc
@@ -370,12 +370,14 @@
StateField::encode(state) | PcField::encode(masm_->pc_offset());
BailoutEntry entry = { id, pc_and_state };
#ifdef DEBUG
- // Assert that we don't have multiple bailout entries for the same node.
- for (int i = 0; i < bailout_entries_.length(); i++) {
- if (bailout_entries_.at(i).id == entry.id) {
- AstPrinter printer;
- PrintF("%s", printer.PrintProgram(info_->function()));
- UNREACHABLE();
+ if (FLAG_enable_slow_asserts) {
+ // Assert that we don't have multiple bailout entries for the same node.
+ for (int i = 0; i < bailout_entries_.length(); i++) {
+ if (bailout_entries_.at(i).id == entry.id) {
+ AstPrinter printer;
+ PrintF("%s", printer.PrintProgram(info_->function()));
+ UNREACHABLE();
+ }
}
}
#endif // DEBUG
@@ -1178,7 +1180,7 @@
}
ExitFinallyBlock(); // Return to the calling code.
- // Setup try handler.
+ // Set up try handler.
__ bind(&try_entry);
__ PushTryHandler(IN_JAVASCRIPT, TRY_FINALLY_HANDLER, stmt->index());
{ TryFinally try_body(this, &finally_entry);
@@ -1284,7 +1286,7 @@
bool FullCodeGenerator::TryLiteralCompare(CompareOperation* expr) {
- Expression *sub_expr;
+ Expression* sub_expr;
Handle<String> check;
if (expr->IsLiteralCompareTypeof(&sub_expr, &check)) {
EmitLiteralCompareTypeof(expr, sub_expr, check);
diff --git a/src/full-codegen.h b/src/full-codegen.h
index fbb6979..0270e13 100644
--- a/src/full-codegen.h
+++ b/src/full-codegen.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -142,11 +142,13 @@
return previous_;
}
- protected:
+ protected:
MacroAssembler* masm() { return codegen_->masm(); }
FullCodeGenerator* codegen_;
NestedStatement* previous_;
+
+ private:
DISALLOW_COPY_AND_ASSIGN(NestedStatement);
};
@@ -618,8 +620,8 @@
Label** if_false,
Label** fall_through) const = 0;
- // Returns true if we are evaluating only for side effects (ie if the result
- // will be discarded).
+ // Returns true if we are evaluating only for side effects (i.e. if the
+ // result will be discarded).
virtual bool IsEffect() const { return false; }
// Returns true if we are evaluating for the value (in accu/on stack).
diff --git a/src/gdb-jit.cc b/src/gdb-jit.cc
index b386bed..4192222 100644
--- a/src/gdb-jit.cc
+++ b/src/gdb-jit.cc
@@ -1556,23 +1556,23 @@
class UnwindInfoSection : public DebugSection {
public:
- explicit UnwindInfoSection(CodeDescription *desc);
- virtual bool WriteBody(Writer *w);
+ explicit UnwindInfoSection(CodeDescription* desc);
+ virtual bool WriteBody(Writer* w);
- int WriteCIE(Writer *w);
- void WriteFDE(Writer *w, int);
+ int WriteCIE(Writer* w);
+ void WriteFDE(Writer* w, int);
- void WriteFDEStateOnEntry(Writer *w);
- void WriteFDEStateAfterRBPPush(Writer *w);
- void WriteFDEStateAfterRBPSet(Writer *w);
- void WriteFDEStateAfterRBPPop(Writer *w);
+ void WriteFDEStateOnEntry(Writer* w);
+ void WriteFDEStateAfterRBPPush(Writer* w);
+ void WriteFDEStateAfterRBPSet(Writer* w);
+ void WriteFDEStateAfterRBPPop(Writer* w);
- void WriteLength(Writer *w,
+ void WriteLength(Writer* w,
Writer::Slot<uint32_t>* length_slot,
int initial_position);
private:
- CodeDescription *desc_;
+ CodeDescription* desc_;
// DWARF3 Specification, Table 7.23
enum CFIInstructions {
@@ -1623,7 +1623,7 @@
};
-void UnwindInfoSection::WriteLength(Writer *w,
+void UnwindInfoSection::WriteLength(Writer* w,
Writer::Slot<uint32_t>* length_slot,
int initial_position) {
uint32_t align = (w->position() - initial_position) % kPointerSize;
@@ -1639,7 +1639,7 @@
}
-UnwindInfoSection::UnwindInfoSection(CodeDescription *desc)
+UnwindInfoSection::UnwindInfoSection(CodeDescription* desc)
#ifdef __ELF
: ELFSection(".eh_frame", TYPE_X86_64_UNWIND, 1),
#else
@@ -1648,7 +1648,7 @@
#endif
desc_(desc) { }
-int UnwindInfoSection::WriteCIE(Writer *w) {
+int UnwindInfoSection::WriteCIE(Writer* w) {
Writer::Slot<uint32_t> cie_length_slot = w->CreateSlotHere<uint32_t>();
uint32_t cie_position = w->position();
@@ -1668,7 +1668,7 @@
}
-void UnwindInfoSection::WriteFDE(Writer *w, int cie_position) {
+void UnwindInfoSection::WriteFDE(Writer* w, int cie_position) {
// The only FDE for this function. The CFA is the current RBP.
Writer::Slot<uint32_t> fde_length_slot = w->CreateSlotHere<uint32_t>();
int fde_position = w->position();
@@ -1686,7 +1686,7 @@
}
-void UnwindInfoSection::WriteFDEStateOnEntry(Writer *w) {
+void UnwindInfoSection::WriteFDEStateOnEntry(Writer* w) {
// The first state, just after the control has been transferred to the the
// function.
@@ -1713,7 +1713,7 @@
}
-void UnwindInfoSection::WriteFDEStateAfterRBPPush(Writer *w) {
+void UnwindInfoSection::WriteFDEStateAfterRBPPush(Writer* w) {
// The second state, just after RBP has been pushed.
// RBP / CFA for this function is now the current RSP, so just set the
@@ -1734,7 +1734,7 @@
}
-void UnwindInfoSection::WriteFDEStateAfterRBPSet(Writer *w) {
+void UnwindInfoSection::WriteFDEStateAfterRBPSet(Writer* w) {
// The third state, after the RBP has been set.
// The CFA can now directly be set to RBP.
@@ -1749,7 +1749,7 @@
}
-void UnwindInfoSection::WriteFDEStateAfterRBPPop(Writer *w) {
+void UnwindInfoSection::WriteFDEStateAfterRBPPop(Writer* w) {
// The fourth (final) state. The RBP has been popped (just before issuing a
// return).
@@ -1769,7 +1769,7 @@
}
-bool UnwindInfoSection::WriteBody(Writer *w) {
+bool UnwindInfoSection::WriteBody(Writer* w) {
uint32_t cie_position = WriteCIE(w);
WriteFDE(w, cie_position);
return true;
@@ -1810,8 +1810,8 @@
struct JITDescriptor {
uint32_t version_;
uint32_t action_flag_;
- JITCodeEntry *relevant_entry_;
- JITCodeEntry *first_entry_;
+ JITCodeEntry* relevant_entry_;
+ JITCodeEntry* first_entry_;
};
// GDB will place breakpoint into this function.
@@ -1998,7 +1998,7 @@
}
}
-static void AddUnwindInfo(CodeDescription *desc) {
+static void AddUnwindInfo(CodeDescription* desc) {
#ifdef V8_TARGET_ARCH_X64
if (desc->tag() == GDBJITInterface::FUNCTION) {
// To avoid propagating unwinding information through
diff --git a/src/global-handles.cc b/src/global-handles.cc
index 87066fa..471f5a3 100644
--- a/src/global-handles.cc
+++ b/src/global-handles.cc
@@ -232,7 +232,7 @@
VMState state(isolate, EXTERNAL);
func(object, par);
}
- // Absense of explicit cleanup or revival of weak handle
+ // Absence of explicit cleanup or revival of weak handle
// in most of the cases would lead to memory leak.
ASSERT(state_ != NEAR_DEATH);
return true;
diff --git a/src/handles.cc b/src/handles.cc
index 2ff797d..34eaddb 100644
--- a/src/handles.cc
+++ b/src/handles.cc
@@ -208,42 +208,6 @@
}
-void NormalizeProperties(Handle<JSObject> object,
- PropertyNormalizationMode mode,
- int expected_additional_properties) {
- CALL_HEAP_FUNCTION_VOID(object->GetIsolate(),
- object->NormalizeProperties(
- mode,
- expected_additional_properties));
-}
-
-
-Handle<NumberDictionary> NormalizeElements(Handle<JSObject> object) {
- CALL_HEAP_FUNCTION(object->GetIsolate(),
- object->NormalizeElements(),
- NumberDictionary);
-}
-
-
-void TransformToFastProperties(Handle<JSObject> object,
- int unused_property_fields) {
- CALL_HEAP_FUNCTION_VOID(
- object->GetIsolate(),
- object->TransformToFastProperties(unused_property_fields));
-}
-
-
-Handle<NumberDictionary> NumberDictionarySet(
- Handle<NumberDictionary> dictionary,
- uint32_t index,
- Handle<Object> value,
- PropertyDetails details) {
- CALL_HEAP_FUNCTION(dictionary->GetIsolate(),
- dictionary->Set(index, *value, details),
- NumberDictionary);
-}
-
-
void FlattenString(Handle<String> string) {
CALL_HEAP_FUNCTION_VOID(string->GetIsolate(), string->TryFlatten());
}
@@ -265,17 +229,6 @@
}
-Handle<Object> SetProperty(Handle<JSReceiver> object,
- Handle<String> key,
- Handle<Object> value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode) {
- CALL_HEAP_FUNCTION(object->GetIsolate(),
- object->SetProperty(*key, *value, attributes, strict_mode),
- Object);
-}
-
-
Handle<Object> SetProperty(Handle<Object> object,
Handle<Object> key,
Handle<Object> value,
@@ -303,16 +256,6 @@
}
-Handle<Object> SetNormalizedProperty(Handle<JSObject> object,
- Handle<String> key,
- Handle<Object> value,
- PropertyDetails details) {
- CALL_HEAP_FUNCTION(object->GetIsolate(),
- object->SetNormalizedProperty(*key, *value, details),
- Object);
-}
-
-
Handle<Object> ForceDeleteProperty(Handle<JSObject> object,
Handle<Object> key) {
Isolate* isolate = object->GetIsolate();
@@ -322,30 +265,6 @@
}
-Handle<Object> SetLocalPropertyIgnoreAttributes(
- Handle<JSObject> object,
- Handle<String> key,
- Handle<Object> value,
- PropertyAttributes attributes) {
- CALL_HEAP_FUNCTION(
- object->GetIsolate(),
- object->SetLocalPropertyIgnoreAttributes(*key, *value, attributes),
- Object);
-}
-
-
-void SetLocalPropertyNoThrow(Handle<JSObject> object,
- Handle<String> key,
- Handle<Object> value,
- PropertyAttributes attributes) {
- Isolate* isolate = object->GetIsolate();
- ASSERT(!isolate->has_pending_exception());
- CHECK(!SetLocalPropertyIgnoreAttributes(
- object, key, value, attributes).is_null());
- CHECK(!isolate->has_pending_exception());
-}
-
-
Handle<Object> SetPropertyWithInterceptor(Handle<JSObject> object,
Handle<String> key,
Handle<Object> value,
@@ -389,12 +308,6 @@
}
-Handle<Object> GetPrototype(Handle<Object> obj) {
- Handle<Object> result(obj->GetPrototype());
- return result;
-}
-
-
Handle<Object> SetPrototype(Handle<JSObject> obj, Handle<Object> value) {
const bool skip_hidden_prototypes = false;
CALL_HEAP_FUNCTION(obj->GetIsolate(),
@@ -402,44 +315,6 @@
}
-Handle<Object> PreventExtensions(Handle<JSObject> object) {
- CALL_HEAP_FUNCTION(object->GetIsolate(), object->PreventExtensions(), Object);
-}
-
-
-Handle<Object> SetHiddenProperty(Handle<JSObject> obj,
- Handle<String> key,
- Handle<Object> value) {
- CALL_HEAP_FUNCTION(obj->GetIsolate(),
- obj->SetHiddenProperty(*key, *value),
- Object);
-}
-
-
-int GetIdentityHash(Handle<JSReceiver> obj) {
- CALL_AND_RETRY(obj->GetIsolate(),
- obj->GetIdentityHash(ALLOW_CREATION),
- return Smi::cast(__object__)->value(),
- return 0);
-}
-
-
-Handle<Object> DeleteElement(Handle<JSObject> obj,
- uint32_t index) {
- CALL_HEAP_FUNCTION(obj->GetIsolate(),
- obj->DeleteElement(index, JSObject::NORMAL_DELETION),
- Object);
-}
-
-
-Handle<Object> DeleteProperty(Handle<JSObject> obj,
- Handle<String> prop) {
- CALL_HEAP_FUNCTION(obj->GetIsolate(),
- obj->DeleteProperty(*prop, JSObject::NORMAL_DELETION),
- Object);
-}
-
-
Handle<Object> LookupSingleCharacterStringFromCode(uint32_t index) {
Isolate* isolate = Isolate::Current();
CALL_HEAP_FUNCTION(
@@ -457,43 +332,6 @@
}
-Handle<Object> SetElement(Handle<JSObject> object,
- uint32_t index,
- Handle<Object> value,
- StrictModeFlag strict_mode) {
- if (object->HasExternalArrayElements()) {
- if (!value->IsSmi() && !value->IsHeapNumber() && !value->IsUndefined()) {
- bool has_exception;
- Handle<Object> number = Execution::ToNumber(value, &has_exception);
- if (has_exception) return Handle<Object>();
- value = number;
- }
- }
- CALL_HEAP_FUNCTION(object->GetIsolate(),
- object->SetElement(index, *value, strict_mode, true),
- Object);
-}
-
-
-Handle<Object> SetOwnElement(Handle<JSObject> object,
- uint32_t index,
- Handle<Object> value,
- StrictModeFlag strict_mode) {
- ASSERT(!object->HasExternalArrayElements());
- CALL_HEAP_FUNCTION(object->GetIsolate(),
- object->SetElement(index, *value, strict_mode, false),
- Object);
-}
-
-
-Handle<Object> TransitionElementsKind(Handle<JSObject> object,
- ElementsKind to_kind) {
- CALL_HEAP_FUNCTION(object->GetIsolate(),
- object->TransitionElementsKind(to_kind),
- Object);
-}
-
-
Handle<JSObject> Copy(Handle<JSObject> obj) {
Isolate* isolate = obj->GetIsolate();
CALL_HEAP_FUNCTION(isolate,
diff --git a/src/handles.h b/src/handles.h
index cfa65b3..4208913 100644
--- a/src/handles.h
+++ b/src/handles.h
@@ -167,18 +167,6 @@
// an object of expected type, or the handle is an error if running out
// of space or encountering an internal error.
-void NormalizeProperties(Handle<JSObject> object,
- PropertyNormalizationMode mode,
- int expected_additional_properties);
-Handle<NumberDictionary> NormalizeElements(Handle<JSObject> object);
-void TransformToFastProperties(Handle<JSObject> object,
- int unused_property_fields);
-MUST_USE_RESULT Handle<NumberDictionary> NumberDictionarySet(
- Handle<NumberDictionary> dictionary,
- uint32_t index,
- Handle<Object> value,
- PropertyDetails details);
-
// Flattens a string.
void FlattenString(Handle<String> str);
@@ -186,12 +174,6 @@
// string.
Handle<String> FlattenGetString(Handle<String> str);
-Handle<Object> SetProperty(Handle<JSReceiver> object,
- Handle<String> key,
- Handle<Object> value,
- PropertyAttributes attributes,
- StrictModeFlag strict_mode);
-
Handle<Object> SetProperty(Handle<Object> object,
Handle<Object> key,
Handle<Object> value,
@@ -203,40 +185,9 @@
Handle<Object> value,
PropertyAttributes attributes);
-Handle<Object> SetNormalizedProperty(Handle<JSObject> object,
- Handle<String> key,
- Handle<Object> value,
- PropertyDetails details);
-
Handle<Object> ForceDeleteProperty(Handle<JSObject> object,
Handle<Object> key);
-Handle<Object> SetLocalPropertyIgnoreAttributes(
- Handle<JSObject> object,
- Handle<String> key,
- Handle<Object> value,
- PropertyAttributes attributes);
-
-// Used to set local properties on the object we totally control
-// and which therefore has no accessors and alikes.
-void SetLocalPropertyNoThrow(Handle<JSObject> object,
- Handle<String> key,
- Handle<Object> value,
- PropertyAttributes attributes = NONE);
-
-MUST_USE_RESULT Handle<Object> SetElement(Handle<JSObject> object,
- uint32_t index,
- Handle<Object> value,
- StrictModeFlag strict_mode);
-
-Handle<Object> SetOwnElement(Handle<JSObject> object,
- uint32_t index,
- Handle<Object> value,
- StrictModeFlag strict_mode);
-
-Handle<Object> TransitionElementsKind(Handle<JSObject> object,
- ElementsKind to_kind);
-
Handle<Object> GetProperty(Handle<JSReceiver> obj,
const char* name);
@@ -248,21 +199,8 @@
Handle<String> name,
PropertyAttributes* attributes);
-Handle<Object> GetPrototype(Handle<Object> obj);
-
Handle<Object> SetPrototype(Handle<JSObject> obj, Handle<Object> value);
-// Sets a hidden property on an object. Returns obj on success, undefined
-// if trying to set the property on a detached proxy.
-Handle<Object> SetHiddenProperty(Handle<JSObject> obj,
- Handle<String> key,
- Handle<Object> value);
-
-int GetIdentityHash(Handle<JSReceiver> obj);
-
-Handle<Object> DeleteElement(Handle<JSObject> obj, uint32_t index);
-Handle<Object> DeleteProperty(Handle<JSObject> obj, Handle<String> prop);
-
Handle<Object> LookupSingleCharacterStringFromCode(uint32_t index);
Handle<JSObject> Copy(Handle<JSObject> obj);
@@ -316,7 +254,6 @@
int end,
PretenureFlag pretenure = NOT_TENURED);
-
// Sets the expected number of properties for the function's instances.
void SetExpectedNofProperties(Handle<JSFunction> func, int nof);
@@ -335,8 +272,6 @@
Handle<Object> SetPrototype(Handle<JSFunction> function,
Handle<Object> prototype);
-Handle<Object> PreventExtensions(Handle<JSObject> object);
-
Handle<ObjectHashSet> ObjectHashSetAdd(Handle<ObjectHashSet> table,
Handle<Object> key);
diff --git a/src/heap-inl.h b/src/heap-inl.h
index 6ff350a..ccf0da8 100644
--- a/src/heap-inl.h
+++ b/src/heap-inl.h
@@ -120,7 +120,8 @@
if (!maybe_result->ToObject(&result)) return maybe_result;
}
- reinterpret_cast<HeapObject*>(result)->set_map(map);
+ // String maps are all immortal immovable objects.
+ reinterpret_cast<HeapObject*>(result)->set_map_no_write_barrier(map);
// Set length and hash fields of the allocated string.
String* answer = String::cast(result);
answer->set_length(str.length());
@@ -457,7 +458,7 @@
int Heap::AdjustAmountOfExternalAllocatedMemory(int change_in_bytes) {
- ASSERT(HasBeenSetup());
+ ASSERT(HasBeenSetUp());
int amount = amount_of_external_allocated_memory_ + change_in_bytes;
if (change_in_bytes >= 0) {
// Avoid overflow.
@@ -499,7 +500,6 @@
#define GC_GREEDY_CHECK() { }
#endif
-
// Calls the FUNCTION_CALL function and retries it up to three times
// to guarantee that any allocations performed during the call will
// succeed if there's enough memory.
diff --git a/src/heap-profiler.cc b/src/heap-profiler.cc
index 46c63c2..8be6f27 100644
--- a/src/heap-profiler.cc
+++ b/src/heap-profiler.cc
@@ -51,7 +51,7 @@
}
-void HeapProfiler::Setup() {
+void HeapProfiler::SetUp() {
Isolate* isolate = Isolate::Current();
if (isolate->heap_profiler() == NULL) {
isolate->set_heap_profiler(new HeapProfiler());
diff --git a/src/heap-profiler.h b/src/heap-profiler.h
index b1bc91c..ef5c4f4 100644
--- a/src/heap-profiler.h
+++ b/src/heap-profiler.h
@@ -48,7 +48,7 @@
// to generate .hp files for use by the GHC/Valgrind tool hp2ps.
class HeapProfiler {
public:
- static void Setup();
+ static void SetUp();
static void TearDown();
static HeapSnapshot* TakeSnapshot(const char* name,
diff --git a/src/heap.cc b/src/heap.cc
index 9bb4e40..ba26c1d 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -80,7 +80,7 @@
#endif
reserved_semispace_size_(8 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
max_semispace_size_(8 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
- initial_semispace_size_(Max(LUMP_OF_MEMORY, Page::kPageSize)),
+ initial_semispace_size_(Page::kPageSize),
max_old_generation_size_(700ul * LUMP_OF_MEMORY),
max_executable_size_(128l * LUMP_OF_MEMORY),
@@ -144,6 +144,11 @@
number_idle_notifications_(0),
last_idle_notification_gc_count_(0),
last_idle_notification_gc_count_init_(false),
+ idle_notification_will_schedule_next_gc_(false),
+ mark_sweeps_since_idle_round_started_(0),
+ ms_count_at_last_idle_notification_(0),
+ gc_count_at_last_idle_gc_(0),
+ scavenges_since_last_idle_round_(kIdleScavengeThreshold),
promotion_queue_(this),
configured_(false),
chunks_queued_for_free_(NULL) {
@@ -171,7 +176,7 @@
intptr_t Heap::Capacity() {
- if (!HasBeenSetup()) return 0;
+ if (!HasBeenSetUp()) return 0;
return new_space_.Capacity() +
old_pointer_space_->Capacity() +
@@ -183,7 +188,7 @@
intptr_t Heap::CommittedMemory() {
- if (!HasBeenSetup()) return 0;
+ if (!HasBeenSetUp()) return 0;
return new_space_.CommittedMemory() +
old_pointer_space_->CommittedMemory() +
@@ -195,14 +200,14 @@
}
intptr_t Heap::CommittedMemoryExecutable() {
- if (!HasBeenSetup()) return 0;
+ if (!HasBeenSetUp()) return 0;
return isolate()->memory_allocator()->SizeExecutable();
}
intptr_t Heap::Available() {
- if (!HasBeenSetup()) return 0;
+ if (!HasBeenSetUp()) return 0;
return new_space_.Available() +
old_pointer_space_->Available() +
@@ -213,7 +218,7 @@
}
-bool Heap::HasBeenSetup() {
+bool Heap::HasBeenSetUp() {
return old_pointer_space_ != NULL &&
old_data_space_ != NULL &&
code_space_ != NULL &&
@@ -458,6 +463,8 @@
}
mark_compact_collector()->SetFlags(kNoGCFlags);
new_space_.Shrink();
+ UncommitFromSpace();
+ Shrink();
incremental_marking()->UncommitMarkingDeque();
}
@@ -688,12 +695,18 @@
(static_cast<double>(young_survivors_after_last_gc_) * 100) /
start_new_space_size;
- if (survival_rate > kYoungSurvivalRateThreshold) {
+ if (survival_rate > kYoungSurvivalRateHighThreshold) {
high_survival_rate_period_length_++;
} else {
high_survival_rate_period_length_ = 0;
}
+ if (survival_rate < kYoungSurvivalRateLowThreshold) {
+ low_survival_rate_period_length_++;
+ } else {
+ low_survival_rate_period_length_ = 0;
+ }
+
double survival_rate_diff = survival_rate_ - survival_rate;
if (survival_rate_diff > kYoungSurvivalRateAllowedDeviation) {
@@ -753,32 +766,6 @@
UpdateSurvivalRateTrend(start_new_space_size);
- if (!new_space_high_promotion_mode_active_ &&
- new_space_.Capacity() == new_space_.MaximumCapacity() &&
- IsStableOrIncreasingSurvivalTrend() &&
- IsHighSurvivalRate()) {
- // Stable high survival rates even though young generation is at
- // maximum capacity indicates that most objects will be promoted.
- // To decrease scavenger pauses and final mark-sweep pauses, we
- // have to limit maximal capacity of the young generation.
- new_space_high_promotion_mode_active_ = true;
- if (FLAG_trace_gc) {
- PrintF("Limited new space size due to high promotion rate: %d MB\n",
- new_space_.InitialCapacity() / MB);
- }
- } else if (new_space_high_promotion_mode_active_ &&
- IsDecreasingSurvivalTrend() &&
- !IsHighSurvivalRate()) {
- // Decreasing low survival rates might indicate that the above high
- // promotion mode is over and we should allow the young generation
- // to grow again.
- new_space_high_promotion_mode_active_ = false;
- if (FLAG_trace_gc) {
- PrintF("Unlimited new space size due to low promotion rate: %d MB\n",
- new_space_.MaximumCapacity() / MB);
- }
- }
-
size_of_old_gen_at_last_old_space_gc_ = PromotedSpaceSize();
if (high_survival_rate_during_scavenges &&
@@ -808,6 +795,32 @@
UpdateSurvivalRateTrend(start_new_space_size);
}
+ if (!new_space_high_promotion_mode_active_ &&
+ new_space_.Capacity() == new_space_.MaximumCapacity() &&
+ IsStableOrIncreasingSurvivalTrend() &&
+ IsHighSurvivalRate()) {
+ // Stable high survival rates even though young generation is at
+ // maximum capacity indicates that most objects will be promoted.
+ // To decrease scavenger pauses and final mark-sweep pauses, we
+ // have to limit maximal capacity of the young generation.
+ new_space_high_promotion_mode_active_ = true;
+ if (FLAG_trace_gc) {
+ PrintF("Limited new space size due to high promotion rate: %d MB\n",
+ new_space_.InitialCapacity() / MB);
+ }
+ } else if (new_space_high_promotion_mode_active_ &&
+ IsStableOrDecreasingSurvivalTrend() &&
+ IsLowSurvivalRate()) {
+ // Decreasing low survival rates might indicate that the above high
+ // promotion mode is over and we should allow the young generation
+ // to grow again.
+ new_space_high_promotion_mode_active_ = false;
+ if (FLAG_trace_gc) {
+ PrintF("Unlimited new space size due to low promotion rate: %d MB\n",
+ new_space_.MaximumCapacity() / MB);
+ }
+ }
+
if (new_space_high_promotion_mode_active_ &&
new_space_.Capacity() > new_space_.InitialCapacity()) {
new_space_.Shrink();
@@ -872,6 +885,8 @@
isolate_->counters()->objs_since_last_full()->Set(0);
contexts_disposed_ = 0;
+
+ isolate_->set_context_exit_happened(false);
}
@@ -1024,7 +1039,7 @@
// Store Buffer overflowed while scanning promoted objects. These are not
// in any particular page, though they are likely to be clustered by the
// allocation routines.
- store_buffer_->HandleFullness();
+ store_buffer_->EnsureSpace(StoreBuffer::kStoreBufferSize);
} else {
// Store Buffer overflowed while scanning a particular old space page for
// pointers to new space.
@@ -1090,7 +1105,7 @@
isolate_->descriptor_lookup_cache()->Clear();
// Used for updating survived_since_last_expansion_ at function end.
- intptr_t survived_watermark = PromotedSpaceSize();
+ intptr_t survived_watermark = PromotedSpaceSizeOfObjects();
CheckNewSpaceExpansionCriteria();
@@ -1098,8 +1113,7 @@
incremental_marking()->PrepareForScavenge();
- old_pointer_space()->AdvanceSweeper(new_space_.Size());
- old_data_space()->AdvanceSweeper(new_space_.Size());
+ AdvanceSweepers(static_cast<int>(new_space_.Size()));
// Flip the semispaces. After flipping, to space is empty, from space has
// live objects.
@@ -1183,11 +1197,13 @@
// Update how much has survived scavenge.
IncrementYoungSurvivorsCounter(static_cast<int>(
- (PromotedSpaceSize() - survived_watermark) + new_space_.Size()));
+ (PromotedSpaceSizeOfObjects() - survived_watermark) + new_space_.Size()));
LOG(isolate_, ResourceEvent("scavenge", "end"));
gc_state_ = NOT_IN_GC;
+
+ scavenges_since_last_idle_round_++;
}
@@ -1353,6 +1369,28 @@
}
+void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
+ AssertNoAllocation no_allocation;
+
+ class VisitorAdapter : public ObjectVisitor {
+ public:
+ explicit VisitorAdapter(v8::ExternalResourceVisitor* visitor)
+ : visitor_(visitor) {}
+ virtual void VisitPointers(Object** start, Object** end) {
+ for (Object** p = start; p < end; p++) {
+ if ((*p)->IsExternalString()) {
+ visitor_->VisitExternalString(Utils::ToLocal(
+ Handle<String>(String::cast(*p))));
+ }
+ }
+ }
+ private:
+ v8::ExternalResourceVisitor* visitor_;
+ } visitor_adapter(visitor);
+ external_string_table_.Iterate(&visitor_adapter);
+}
+
+
class NewSpaceScavenger : public StaticNewSpaceVisitor<NewSpaceScavenger> {
public:
static inline void VisitPointer(Heap* heap, Object** p) {
@@ -1529,7 +1567,7 @@
if (marks_handling == TRANSFER_MARKS) {
if (Marking::TransferColor(source, target)) {
- MemoryChunk::IncrementLiveBytes(target->address(), size);
+ MemoryChunk::IncrementLiveBytesFromGC(target->address(), size);
}
}
}
@@ -1824,7 +1862,7 @@
}
Map* map = reinterpret_cast<Map*>(result);
- map->set_map_unsafe(meta_map());
+ map->set_map_no_write_barrier(meta_map());
map->set_instance_type(instance_type);
map->set_visitor_id(
StaticVisitorBase::GetVisitorId(instance_type, instance_size));
@@ -1868,6 +1906,19 @@
}
+MaybeObject* Heap::AllocateAccessorPair() {
+ Object* result;
+ { MaybeObject* maybe_result = AllocateStruct(ACCESSOR_PAIR_TYPE);
+ if (!maybe_result->ToObject(&result)) return maybe_result;
+ }
+ AccessorPair* accessors = AccessorPair::cast(result);
+ // Later we will have to distinguish between undefined and the hole...
+ // accessors->set_getter(the_hole_value(), SKIP_WRITE_BARRIER);
+ // accessors->set_setter(the_hole_value(), SKIP_WRITE_BARRIER);
+ return accessors;
+}
+
+
const Heap::StringTypeTable Heap::string_type_table[] = {
#define STRING_TYPE_ELEMENT(type, size, name, camel_name) \
{type, size, k##camel_name##MapRootIndex},
@@ -2184,7 +2235,7 @@
if (!maybe_result->ToObject(&result)) return maybe_result;
}
- HeapObject::cast(result)->set_map_unsafe(heap_number_map());
+ HeapObject::cast(result)->set_map_no_write_barrier(heap_number_map());
HeapNumber::cast(result)->set_value(value);
return result;
}
@@ -2202,7 +2253,7 @@
{ MaybeObject* maybe_result = new_space_.AllocateRaw(HeapNumber::kSize);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
- HeapObject::cast(result)->set_map_unsafe(heap_number_map());
+ HeapObject::cast(result)->set_map_no_write_barrier(heap_number_map());
HeapNumber::cast(result)->set_value(value);
return result;
}
@@ -2213,7 +2264,8 @@
{ MaybeObject* maybe_result = AllocateRawCell();
if (!maybe_result->ToObject(&result)) return maybe_result;
}
- HeapObject::cast(result)->set_map_unsafe(global_property_cell_map());
+ HeapObject::cast(result)->set_map_no_write_barrier(
+ global_property_cell_map());
JSGlobalPropertyCell::cast(result)->set_value(value);
return result;
}
@@ -2316,6 +2368,10 @@
}
set_infinity_value(HeapNumber::cast(obj));
+ // The hole has not been created yet, but we want to put something
+ // predictable in the gaps in the symbol table, so lets make that Smi zero.
+ set_the_hole_value(reinterpret_cast<Oddball*>(Smi::FromInt(0)));
+
// Allocate initial symbol table.
{ MaybeObject* maybe_obj = SymbolTable::Allocate(kInitialSymbolTableSize);
if (!maybe_obj->ToObject(&obj)) return false;
@@ -2422,17 +2478,18 @@
// Allocate the code_stubs dictionary. The initial size is set to avoid
// expanding the dictionary during bootstrapping.
- { MaybeObject* maybe_obj = NumberDictionary::Allocate(128);
+ { MaybeObject* maybe_obj = UnseededNumberDictionary::Allocate(128);
if (!maybe_obj->ToObject(&obj)) return false;
}
- set_code_stubs(NumberDictionary::cast(obj));
+ set_code_stubs(UnseededNumberDictionary::cast(obj));
+
// Allocate the non_monomorphic_cache used in stub-cache.cc. The initial size
// is set to avoid expanding the dictionary during bootstrapping.
- { MaybeObject* maybe_obj = NumberDictionary::Allocate(64);
+ { MaybeObject* maybe_obj = UnseededNumberDictionary::Allocate(64);
if (!maybe_obj->ToObject(&obj)) return false;
}
- set_non_monomorphic_cache(NumberDictionary::cast(obj));
+ set_non_monomorphic_cache(UnseededNumberDictionary::cast(obj));
{ MaybeObject* maybe_obj = AllocatePolymorphicCodeCache();
if (!maybe_obj->ToObject(&obj)) return false;
@@ -2554,7 +2611,7 @@
}
}
}
- array->set_map(heap->fixed_cow_array_map());
+ array->set_map_no_write_barrier(heap->fixed_cow_array_map());
}
@@ -2881,8 +2938,8 @@
bool is_ascii_data_in_two_byte_string = false;
if (!is_ascii) {
// At least one of the strings uses two-byte representation so we
- // can't use the fast case code for short ascii strings below, but
- // we can try to save memory if all chars actually fit in ascii.
+ // can't use the fast case code for short ASCII strings below, but
+ // we can try to save memory if all chars actually fit in ASCII.
is_ascii_data_in_two_byte_string =
first->HasOnlyAsciiChars() && second->HasOnlyAsciiChars();
if (is_ascii_data_in_two_byte_string) {
@@ -2891,9 +2948,9 @@
}
// If the resulting string is small make a flat string.
- if (length < String::kMinNonFlatLength) {
+ if (length < ConsString::kMinLength) {
// Note that neither of the two inputs can be a slice because:
- STATIC_ASSERT(String::kMinNonFlatLength <= SlicedString::kMinLength);
+ STATIC_ASSERT(ConsString::kMinLength <= SlicedString::kMinLength);
ASSERT(first->IsFlat());
ASSERT(second->IsFlat());
if (is_ascii) {
@@ -2969,7 +3026,7 @@
int end,
PretenureFlag pretenure) {
int length = end - start;
- if (length == 0) {
+ if (length <= 0) {
return empty_string();
} else if (length == 1) {
return LookupSingleCharacterStringFromCode(buffer->Get(start));
@@ -3150,7 +3207,8 @@
if (!maybe_result->ToObject(&result)) return maybe_result;
}
- reinterpret_cast<ByteArray*>(result)->set_map_unsafe(byte_array_map());
+ reinterpret_cast<ByteArray*>(result)->set_map_no_write_barrier(
+ byte_array_map());
reinterpret_cast<ByteArray*>(result)->set_length(length);
return result;
}
@@ -3168,7 +3226,8 @@
if (!maybe_result->ToObject(&result)) return maybe_result;
}
- reinterpret_cast<ByteArray*>(result)->set_map_unsafe(byte_array_map());
+ reinterpret_cast<ByteArray*>(result)->set_map_no_write_barrier(
+ byte_array_map());
reinterpret_cast<ByteArray*>(result)->set_length(length);
return result;
}
@@ -3178,11 +3237,11 @@
if (size == 0) return;
HeapObject* filler = HeapObject::FromAddress(addr);
if (size == kPointerSize) {
- filler->set_map_unsafe(one_pointer_filler_map());
+ filler->set_map_no_write_barrier(one_pointer_filler_map());
} else if (size == 2 * kPointerSize) {
- filler->set_map_unsafe(two_pointer_filler_map());
+ filler->set_map_no_write_barrier(two_pointer_filler_map());
} else {
- filler->set_map_unsafe(free_space_map());
+ filler->set_map_no_write_barrier(free_space_map());
FreeSpace::cast(filler)->set_size(size);
}
}
@@ -3200,7 +3259,7 @@
if (!maybe_result->ToObject(&result)) return maybe_result;
}
- reinterpret_cast<ExternalArray*>(result)->set_map_unsafe(
+ reinterpret_cast<ExternalArray*>(result)->set_map_no_write_barrier(
MapForExternalArrayType(array_type));
reinterpret_cast<ExternalArray*>(result)->set_length(length);
reinterpret_cast<ExternalArray*>(result)->set_external_pointer(
@@ -3237,7 +3296,7 @@
if (!maybe_result->ToObject(&result)) return maybe_result;
// Initialize the object
- HeapObject::cast(result)->set_map_unsafe(code_map());
+ HeapObject::cast(result)->set_map_no_write_barrier(code_map());
Code* code = Code::cast(result);
ASSERT(!isolate_->code_range()->exists() ||
isolate_->code_range()->contains(code->address()));
@@ -3249,7 +3308,7 @@
}
code->set_deoptimization_data(empty_fixed_array(), SKIP_WRITE_BARRIER);
code->set_handler_table(empty_fixed_array(), SKIP_WRITE_BARRIER);
- code->set_next_code_flushing_candidate(undefined_value());
+ code->set_gc_metadata(Smi::FromInt(0));
// Allow self references to created code object by patching the handle to
// point to the newly allocated Code object.
if (!self_reference.is_null()) {
@@ -3366,7 +3425,7 @@
if (!maybe_result->ToObject(&result)) return maybe_result;
}
// No need for write barrier since object is white and map is in old space.
- HeapObject::cast(result)->set_map_unsafe(map);
+ HeapObject::cast(result)->set_map_no_write_barrier(map);
return result;
}
@@ -3591,8 +3650,8 @@
// TODO(1240798): Initialize the object's body using valid initial values
// according to the object's initial map. For example, if the map's
// instance type is JS_ARRAY_TYPE, the length field should be initialized
- // to a number (eg, Smi::FromInt(0)) and the elements initialized to a
- // fixed array (eg, Heap::empty_fixed_array()). Currently, the object
+ // to a number (e.g. Smi::FromInt(0)) and the elements initialized to a
+ // fixed array (e.g. Heap::empty_fixed_array()). Currently, the object
// verification code has to cope with (temporarily) invalid objects. See
// for example, JSArray::JSArrayVerify).
Object* filler;
@@ -3785,7 +3844,7 @@
}
Map* new_map = Map::cast(obj);
- // Setup the global object as a normalized object.
+ // Set up the global object as a normalized object.
global->set_map(new_map);
global->map()->clear_instance_descriptors();
global->set_properties(dictionary);
@@ -4059,7 +4118,7 @@
ASSERT(chars >= 0);
// Ensure the chars matches the number of characters in the buffer.
ASSERT(static_cast<unsigned>(chars) == buffer->Length());
- // Determine whether the string is ascii.
+ // Determine whether the string is ASCII.
bool is_ascii = true;
while (buffer->has_more()) {
if (buffer->GetNext() > unibrow::Utf8::kMaxOneByteChar) {
@@ -4095,7 +4154,7 @@
if (!maybe_result->ToObject(&result)) return maybe_result;
}
- reinterpret_cast<HeapObject*>(result)->set_map_unsafe(map);
+ reinterpret_cast<HeapObject*>(result)->set_map_no_write_barrier(map);
// Set length and hash fields of the allocated string.
String* answer = String::cast(result);
answer->set_length(chars);
@@ -4140,7 +4199,7 @@
}
// Partially initialize the object.
- HeapObject::cast(result)->set_map_unsafe(ascii_string_map());
+ HeapObject::cast(result)->set_map_no_write_barrier(ascii_string_map());
String::cast(result)->set_length(length);
String::cast(result)->set_hash_field(String::kEmptyHashField);
ASSERT_EQ(size, HeapObject::cast(result)->Size());
@@ -4176,7 +4235,7 @@
}
// Partially initialize the object.
- HeapObject::cast(result)->set_map_unsafe(string_map());
+ HeapObject::cast(result)->set_map_no_write_barrier(string_map());
String::cast(result)->set_length(length);
String::cast(result)->set_hash_field(String::kEmptyHashField);
ASSERT_EQ(size, HeapObject::cast(result)->Size());
@@ -4192,7 +4251,8 @@
if (!maybe_result->ToObject(&result)) return maybe_result;
}
// Initialize the object.
- reinterpret_cast<FixedArray*>(result)->set_map_unsafe(fixed_array_map());
+ reinterpret_cast<FixedArray*>(result)->set_map_no_write_barrier(
+ fixed_array_map());
reinterpret_cast<FixedArray*>(result)->set_length(0);
return result;
}
@@ -4221,13 +4281,13 @@
}
if (InNewSpace(obj)) {
HeapObject* dst = HeapObject::cast(obj);
- dst->set_map_unsafe(map);
+ dst->set_map_no_write_barrier(map);
CopyBlock(dst->address() + kPointerSize,
src->address() + kPointerSize,
FixedArray::SizeFor(len) - kPointerSize);
return obj;
}
- HeapObject::cast(obj)->set_map_unsafe(map);
+ HeapObject::cast(obj)->set_map_no_write_barrier(map);
FixedArray* result = FixedArray::cast(obj);
result->set_length(len);
@@ -4247,7 +4307,7 @@
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
HeapObject* dst = HeapObject::cast(obj);
- dst->set_map_unsafe(map);
+ dst->set_map_no_write_barrier(map);
CopyBlock(
dst->address() + FixedDoubleArray::kLengthOffset,
src->address() + FixedDoubleArray::kLengthOffset,
@@ -4265,7 +4325,7 @@
}
// Initialize header.
FixedArray* array = reinterpret_cast<FixedArray*>(result);
- array->set_map_unsafe(fixed_array_map());
+ array->set_map_no_write_barrier(fixed_array_map());
array->set_length(length);
// Initialize body.
ASSERT(!InNewSpace(undefined_value()));
@@ -4313,7 +4373,7 @@
if (!maybe_result->ToObject(&result)) return maybe_result;
}
- HeapObject::cast(result)->set_map_unsafe(heap->fixed_array_map());
+ HeapObject::cast(result)->set_map_no_write_barrier(heap->fixed_array_map());
FixedArray* array = FixedArray::cast(result);
array->set_length(length);
MemsetPointer(array->data_start(), filler, length);
@@ -4346,7 +4406,8 @@
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
- reinterpret_cast<FixedArray*>(obj)->set_map_unsafe(fixed_array_map());
+ reinterpret_cast<FixedArray*>(obj)->set_map_no_write_barrier(
+ fixed_array_map());
FixedArray::cast(obj)->set_length(length);
return obj;
}
@@ -4360,7 +4421,7 @@
if (!maybe_result->ToObject(&result)) return maybe_result;
}
// Initialize the object.
- reinterpret_cast<FixedDoubleArray*>(result)->set_map_unsafe(
+ reinterpret_cast<FixedDoubleArray*>(result)->set_map_no_write_barrier(
fixed_double_array_map());
reinterpret_cast<FixedDoubleArray*>(result)->set_length(0);
return result;
@@ -4377,7 +4438,7 @@
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
- reinterpret_cast<FixedDoubleArray*>(obj)->set_map_unsafe(
+ reinterpret_cast<FixedDoubleArray*>(obj)->set_map_no_write_barrier(
fixed_double_array_map());
FixedDoubleArray::cast(obj)->set_length(length);
return obj;
@@ -4414,7 +4475,8 @@
{ MaybeObject* maybe_result = AllocateFixedArray(length, pretenure);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
- reinterpret_cast<HeapObject*>(result)->set_map_unsafe(hash_table_map());
+ reinterpret_cast<HeapObject*>(result)->set_map_no_write_barrier(
+ hash_table_map());
ASSERT(result->IsHashTable());
return result;
}
@@ -4427,7 +4489,7 @@
if (!maybe_result->ToObject(&result)) return maybe_result;
}
Context* context = reinterpret_cast<Context*>(result);
- context->set_map_unsafe(global_context_map());
+ context->set_map_no_write_barrier(global_context_map());
ASSERT(context->IsGlobalContext());
ASSERT(result->IsContext());
return result;
@@ -4441,7 +4503,7 @@
if (!maybe_result->ToObject(&result)) return maybe_result;
}
Context* context = reinterpret_cast<Context*>(result);
- context->set_map_unsafe(function_context_map());
+ context->set_map_no_write_barrier(function_context_map());
context->set_closure(function);
context->set_previous(function->context());
context->set_extension(NULL);
@@ -4461,7 +4523,7 @@
if (!maybe_result->ToObject(&result)) return maybe_result;
}
Context* context = reinterpret_cast<Context*>(result);
- context->set_map_unsafe(catch_context_map());
+ context->set_map_no_write_barrier(catch_context_map());
context->set_closure(function);
context->set_previous(previous);
context->set_extension(name);
@@ -4479,7 +4541,7 @@
if (!maybe_result->ToObject(&result)) return maybe_result;
}
Context* context = reinterpret_cast<Context*>(result);
- context->set_map_unsafe(with_context_map());
+ context->set_map_no_write_barrier(with_context_map());
context->set_closure(function);
context->set_previous(previous);
context->set_extension(extension);
@@ -4497,7 +4559,7 @@
if (!maybe_result->ToObject(&result)) return maybe_result;
}
Context* context = reinterpret_cast<Context*>(result);
- context->set_map_unsafe(block_context_map());
+ context->set_map_no_write_barrier(block_context_map());
context->set_closure(function);
context->set_previous(previous);
context->set_extension(scope_info);
@@ -4510,7 +4572,7 @@
FixedArray* scope_info;
MaybeObject* maybe_scope_info = AllocateFixedArray(length, TENURED);
if (!maybe_scope_info->To(&scope_info)) return maybe_scope_info;
- scope_info->set_map_unsafe(scope_info_map());
+ scope_info->set_map_no_write_barrier(scope_info_map());
return scope_info;
}
@@ -4553,7 +4615,82 @@
}
-bool Heap::IdleNotification() {
+bool Heap::IdleNotification(int hint) {
+ if (hint >= 1000) return IdleGlobalGC();
+ if (contexts_disposed_ > 0 || !FLAG_incremental_marking ||
+ FLAG_expose_gc || Serializer::enabled()) {
+ return true;
+ }
+
+ // By doing small chunks of GC work in each IdleNotification,
+ // perform a round of incremental GCs and after that wait until
+ // the mutator creates enough garbage to justify a new round.
+ // An incremental GC progresses as follows:
+ // 1. many incremental marking steps,
+ // 2. one old space mark-sweep-compact,
+ // 3. many lazy sweep steps.
+ // Use mark-sweep-compact events to count incremental GCs in a round.
+
+ intptr_t size_factor = Min(Max(hint, 30), 1000) / 10;
+ // The size factor is in range [3..100].
+ intptr_t step_size = size_factor * IncrementalMarking::kAllocatedThreshold;
+
+ if (incremental_marking()->IsStopped()) {
+ if (!IsSweepingComplete() &&
+ !AdvanceSweepers(static_cast<int>(step_size))) {
+ return false;
+ }
+ }
+
+ if (mark_sweeps_since_idle_round_started_ >= kMaxMarkSweepsInIdleRound) {
+ if (EnoughGarbageSinceLastIdleRound()) {
+ StartIdleRound();
+ } else {
+ return true;
+ }
+ }
+
+ int new_mark_sweeps = ms_count_ - ms_count_at_last_idle_notification_;
+ mark_sweeps_since_idle_round_started_ += new_mark_sweeps;
+ ms_count_at_last_idle_notification_ = ms_count_;
+
+ if (mark_sweeps_since_idle_round_started_ >= kMaxMarkSweepsInIdleRound) {
+ FinishIdleRound();
+ return true;
+ }
+
+ if (incremental_marking()->IsStopped()) {
+ if (hint < 1000 && !WorthStartingGCWhenIdle()) {
+ FinishIdleRound();
+ return true;
+ }
+ incremental_marking()->Start();
+ }
+
+ // This flag prevents incremental marking from requesting GC via stack guard
+ idle_notification_will_schedule_next_gc_ = true;
+ incremental_marking()->Step(step_size);
+ idle_notification_will_schedule_next_gc_ = false;
+
+ if (incremental_marking()->IsComplete()) {
+ bool uncommit = false;
+ if (gc_count_at_last_idle_gc_ == gc_count_) {
+ // No GC since the last full GC, the mutator is probably not active.
+ isolate_->compilation_cache()->Clear();
+ uncommit = true;
+ }
+ CollectAllGarbage(kNoGCFlags);
+ gc_count_at_last_idle_gc_ = gc_count_;
+ if (uncommit) {
+ new_space_.Shrink();
+ UncommitFromSpace();
+ }
+ }
+ return false;
+}
+
+
+bool Heap::IdleGlobalGC() {
static const int kIdlesBeforeScavenge = 4;
static const int kIdlesBeforeMarkSweep = 7;
static const int kIdlesBeforeMarkCompact = 8;
@@ -4642,7 +4779,7 @@
#ifdef DEBUG
void Heap::Print() {
- if (!HasBeenSetup()) return;
+ if (!HasBeenSetUp()) return;
isolate()->PrintStack();
AllSpaces spaces;
for (Space* space = spaces.next(); space != NULL; space = spaces.next())
@@ -4707,7 +4844,7 @@
bool Heap::Contains(Address addr) {
if (OS::IsOutsideAllocatedSpace(addr)) return false;
- return HasBeenSetup() &&
+ return HasBeenSetUp() &&
(new_space_.ToSpaceContains(addr) ||
old_pointer_space_->Contains(addr) ||
old_data_space_->Contains(addr) ||
@@ -4725,7 +4862,7 @@
bool Heap::InSpace(Address addr, AllocationSpace space) {
if (OS::IsOutsideAllocatedSpace(addr)) return false;
- if (!HasBeenSetup()) return false;
+ if (!HasBeenSetUp()) return false;
switch (space) {
case NEW_SPACE:
@@ -4750,7 +4887,7 @@
#ifdef DEBUG
void Heap::Verify() {
- ASSERT(HasBeenSetup());
+ ASSERT(HasBeenSetUp());
store_buffer()->Verify();
@@ -5090,29 +5227,29 @@
void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) {
v->VisitPointer(reinterpret_cast<Object**>(&roots_[kSymbolTableRootIndex]));
- v->Synchronize("symbol_table");
+ v->Synchronize(VisitorSynchronization::kSymbolTable);
if (mode != VISIT_ALL_IN_SCAVENGE &&
mode != VISIT_ALL_IN_SWEEP_NEWSPACE) {
// Scavenge collections have special processing for this.
external_string_table_.Iterate(v);
}
- v->Synchronize("external_string_table");
+ v->Synchronize(VisitorSynchronization::kExternalStringsTable);
}
void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
- v->Synchronize("strong_root_list");
+ v->Synchronize(VisitorSynchronization::kStrongRootList);
v->VisitPointer(BitCast<Object**>(&hidden_symbol_));
- v->Synchronize("symbol");
+ v->Synchronize(VisitorSynchronization::kSymbol);
isolate_->bootstrapper()->Iterate(v);
- v->Synchronize("bootstrapper");
+ v->Synchronize(VisitorSynchronization::kBootstrapper);
isolate_->Iterate(v);
- v->Synchronize("top");
+ v->Synchronize(VisitorSynchronization::kTop);
Relocatable::Iterate(v);
- v->Synchronize("relocatable");
+ v->Synchronize(VisitorSynchronization::kRelocatable);
#ifdef ENABLE_DEBUGGER_SUPPORT
isolate_->debug()->Iterate(v);
@@ -5120,13 +5257,13 @@
isolate_->deoptimizer_data()->Iterate(v);
}
#endif
- v->Synchronize("debug");
+ v->Synchronize(VisitorSynchronization::kDebug);
isolate_->compilation_cache()->Iterate(v);
- v->Synchronize("compilationcache");
+ v->Synchronize(VisitorSynchronization::kCompilationCache);
// Iterate over local handles in handle scopes.
isolate_->handle_scope_implementer()->Iterate(v);
- v->Synchronize("handlescope");
+ v->Synchronize(VisitorSynchronization::kHandleScope);
// Iterate over the builtin code objects and code stubs in the
// heap. Note that it is not necessary to iterate over code objects
@@ -5134,7 +5271,7 @@
if (mode != VISIT_ALL_IN_SCAVENGE) {
isolate_->builtins()->IterateBuiltins(v);
}
- v->Synchronize("builtins");
+ v->Synchronize(VisitorSynchronization::kBuiltins);
// Iterate over global handles.
switch (mode) {
@@ -5149,11 +5286,11 @@
isolate_->global_handles()->IterateAllRoots(v);
break;
}
- v->Synchronize("globalhandles");
+ v->Synchronize(VisitorSynchronization::kGlobalHandles);
// Iterate over pointers being held by inactive threads.
isolate_->thread_manager()->Iterate(v);
- v->Synchronize("threadmanager");
+ v->Synchronize(VisitorSynchronization::kThreadManager);
// Iterate over the pointers the Serialization/Deserialization code is
// holding.
@@ -5177,7 +5314,7 @@
bool Heap::ConfigureHeap(int max_semispace_size,
intptr_t max_old_gen_size,
intptr_t max_executable_size) {
- if (HasBeenSetup()) return false;
+ if (HasBeenSetUp()) return false;
if (max_semispace_size > 0) {
if (max_semispace_size < Page::kPageSize) {
@@ -5293,6 +5430,16 @@
}
+intptr_t Heap::PromotedSpaceSizeOfObjects() {
+ return old_pointer_space_->SizeOfObjects()
+ + old_data_space_->SizeOfObjects()
+ + code_space_->SizeOfObjects()
+ + map_space_->SizeOfObjects()
+ + cell_space_->SizeOfObjects()
+ + lo_space_->SizeOfObjects();
+}
+
+
int Heap::PromotedExternalMemorySize() {
if (amount_of_external_allocated_memory_
<= amount_of_external_allocated_memory_at_last_global_gc_) return 0;
@@ -5353,7 +5500,7 @@
Address map_addr = map_p->address();
- obj->set_map(reinterpret_cast<Map*>(map_addr + kMarkTag));
+ obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_addr + kMarkTag));
MarkObjectRecursively(&map);
@@ -5400,7 +5547,7 @@
HeapObject* map_p = HeapObject::FromAddress(map_addr);
- obj->set_map(reinterpret_cast<Map*>(map_p));
+ obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_p));
UnmarkObjectRecursively(reinterpret_cast<Object**>(&map_p));
@@ -5466,7 +5613,7 @@
#endif
-bool Heap::Setup(bool create_heap_objects) {
+bool Heap::SetUp(bool create_heap_objects) {
#ifdef DEBUG
allocation_timeout_ = FLAG_gc_interval;
debug_utils_ = new HeapDebugUtils(this);
@@ -5476,7 +5623,7 @@
// goes wrong, just return false. The caller should check the results and
// call Heap::TearDown() to release allocated memory.
//
- // If the heap is not yet configured (eg, through the API), configure it.
+ // If the heap is not yet configured (e.g. through the API), configure it.
// Configuration is based on the flags new-space-size (really the semispace
// size) and old-space-size if set or the initial values of semispace_size_
// and old_generation_size_ otherwise.
@@ -5496,12 +5643,12 @@
MarkMapPointersAsEncoded(false);
- // Setup memory allocator.
- if (!isolate_->memory_allocator()->Setup(MaxReserved(), MaxExecutableSize()))
+ // Set up memory allocator.
+ if (!isolate_->memory_allocator()->SetUp(MaxReserved(), MaxExecutableSize()))
return false;
- // Setup new space.
- if (!new_space_.Setup(reserved_semispace_size_, max_semispace_size_)) {
+ // Set up new space.
+ if (!new_space_.SetUp(reserved_semispace_size_, max_semispace_size_)) {
return false;
}
@@ -5512,7 +5659,7 @@
OLD_POINTER_SPACE,
NOT_EXECUTABLE);
if (old_pointer_space_ == NULL) return false;
- if (!old_pointer_space_->Setup()) return false;
+ if (!old_pointer_space_->SetUp()) return false;
// Initialize old data space.
old_data_space_ =
@@ -5521,14 +5668,14 @@
OLD_DATA_SPACE,
NOT_EXECUTABLE);
if (old_data_space_ == NULL) return false;
- if (!old_data_space_->Setup()) return false;
+ if (!old_data_space_->SetUp()) return false;
// Initialize the code space, set its maximum capacity to the old
// generation size. It needs executable memory.
// On 64-bit platform(s), we put all code objects in a 2 GB range of
// virtual address space, so that they can call each other with near calls.
if (code_range_size_ > 0) {
- if (!isolate_->code_range()->Setup(code_range_size_)) {
+ if (!isolate_->code_range()->SetUp(code_range_size_)) {
return false;
}
}
@@ -5536,7 +5683,7 @@
code_space_ =
new OldSpace(this, max_old_generation_size_, CODE_SPACE, EXECUTABLE);
if (code_space_ == NULL) return false;
- if (!code_space_->Setup()) return false;
+ if (!code_space_->SetUp()) return false;
// Initialize map space.
map_space_ = new MapSpace(this,
@@ -5544,19 +5691,31 @@
FLAG_max_map_space_pages,
MAP_SPACE);
if (map_space_ == NULL) return false;
- if (!map_space_->Setup()) return false;
+ if (!map_space_->SetUp()) return false;
// Initialize global property cell space.
cell_space_ = new CellSpace(this, max_old_generation_size_, CELL_SPACE);
if (cell_space_ == NULL) return false;
- if (!cell_space_->Setup()) return false;
+ if (!cell_space_->SetUp()) return false;
// The large object code space may contain code or data. We set the memory
// to be non-executable here for safety, but this means we need to enable it
// explicitly when allocating large code objects.
lo_space_ = new LargeObjectSpace(this, max_old_generation_size_, LO_SPACE);
if (lo_space_ == NULL) return false;
- if (!lo_space_->Setup()) return false;
+ if (!lo_space_->SetUp()) return false;
+
+ // Set up the seed that is used to randomize the string hash function.
+ ASSERT(hash_seed() == 0);
+ if (FLAG_randomize_hashes) {
+ if (FLAG_hash_seed == 0) {
+ set_hash_seed(
+ Smi::FromInt(V8::RandomPrivate(isolate()) & 0x3fffffff));
+ } else {
+ set_hash_seed(Smi::FromInt(FLAG_hash_seed));
+ }
+ }
+
if (create_heap_objects) {
// Create initial maps.
if (!CreateInitialMaps()) return false;
@@ -5571,7 +5730,7 @@
LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
LOG(isolate_, IntPtrTEvent("heap-available", Available()));
- store_buffer()->Setup();
+ store_buffer()->SetUp();
return true;
}
@@ -6112,7 +6271,7 @@
Address map_addr = map_p->address();
- obj->set_map(reinterpret_cast<Map*>(map_addr + kMarkTag));
+ obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_addr + kMarkTag));
// Scan the object body.
if (is_global_context && (visit_mode_ == VISIT_ONLY_STRONG)) {
@@ -6154,7 +6313,7 @@
HeapObject* map_p = HeapObject::FromAddress(map_addr);
- obj->set_map(reinterpret_cast<Map*>(map_p));
+ obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_p));
UnmarkRecursively(reinterpret_cast<Object**>(&map_p), unmark_visitor);
@@ -6381,10 +6540,12 @@
int KeyedLookupCache::Lookup(Map* map, String* name) {
- int index = Hash(map, name);
- Key& key = keys_[index];
- if ((key.map == map) && key.name->Equals(name)) {
- return field_offsets_[index];
+ int index = (Hash(map, name) & kHashMask);
+ for (int i = 0; i < kEntriesPerBucket; i++) {
+ Key& key = keys_[index + i];
+ if ((key.map == map) && key.name->Equals(name)) {
+ return field_offsets_[index + i];
+ }
}
return kNotFound;
}
@@ -6393,7 +6554,29 @@
void KeyedLookupCache::Update(Map* map, String* name, int field_offset) {
String* symbol;
if (HEAP->LookupSymbolIfExists(name, &symbol)) {
- int index = Hash(map, symbol);
+ int index = (Hash(map, symbol) & kHashMask);
+ // After a GC there will be free slots, so we use them in order (this may
+ // help to get the most frequently used one in position 0).
+ for (int i = 0; i< kEntriesPerBucket; i++) {
+ Key& key = keys_[index];
+ Object* free_entry_indicator = NULL;
+ if (key.map == free_entry_indicator) {
+ key.map = map;
+ key.name = symbol;
+ field_offsets_[index + i] = field_offset;
+ return;
+ }
+ }
+ // No free entry found in this bucket, so we move them all down one and
+ // put the new entry at position zero.
+ for (int i = kEntriesPerBucket - 1; i > 0; i--) {
+ Key& key = keys_[index + i];
+ Key& key2 = keys_[index + i - 1];
+ key = key2;
+ field_offsets_[index + i] = field_offsets_[index + i - 1];
+ }
+
+ // Write the new first entry.
Key& key = keys_[index];
key.map = map;
key.name = symbol;
diff --git a/src/heap.h b/src/heap.h
index 1864f7b..d4b3fad 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -96,6 +96,7 @@
V(FixedArray, single_character_string_cache, SingleCharacterStringCache) \
V(FixedArray, string_split_cache, StringSplitCache) \
V(Object, termination_exception, TerminationException) \
+ V(Smi, hash_seed, HashSeed) \
V(Map, string_map, StringMap) \
V(Map, symbol_map, SymbolMap) \
V(Map, cons_string_map, ConsStringMap) \
@@ -145,8 +146,8 @@
V(Map, neander_map, NeanderMap) \
V(JSObject, message_listeners, MessageListeners) \
V(Foreign, prototype_accessors, PrototypeAccessors) \
- V(NumberDictionary, code_stubs, CodeStubs) \
- V(NumberDictionary, non_monomorphic_cache, NonMonomorphicCache) \
+ V(UnseededNumberDictionary, code_stubs, CodeStubs) \
+ V(UnseededNumberDictionary, non_monomorphic_cache, NonMonomorphicCache) \
V(PolymorphicCodeCache, polymorphic_code_cache, PolymorphicCodeCache) \
V(Code, js_entry_code, JsEntryCode) \
V(Code, js_construct_entry_code, JsConstructEntryCode) \
@@ -155,6 +156,7 @@
V(Script, empty_script, EmptyScript) \
V(Smi, real_stack_limit, RealStackLimit) \
V(StringDictionary, intrinsic_function_names, IntrinsicFunctionNames) \
+ V(Smi, arguments_adaptor_deopt_pc_offset, ArgumentsAdaptorDeoptPCOffset)
#define ROOT_LIST(V) \
STRONG_ROOT_LIST(V) \
@@ -245,6 +247,7 @@
V(use_strict, "use strict") \
V(dot_symbol, ".") \
V(anonymous_function_symbol, "(anonymous function)") \
+ V(compare_ic_symbol, ".compare_ic") \
V(infinity_symbol, "Infinity") \
V(minus_infinity_symbol, "-Infinity")
@@ -432,7 +435,7 @@
class Heap {
public:
// Configure heap size before setup. Return false if the heap has been
- // setup already.
+ // set up already.
bool ConfigureHeap(int max_semispace_size,
intptr_t max_old_gen_size,
intptr_t max_executable_size);
@@ -441,7 +444,7 @@
// Initializes the global object heap. If create_heap_objects is true,
// also creates the basic non-mutable objects.
// Returns whether it succeeded.
- bool Setup(bool create_heap_objects);
+ bool SetUp(bool create_heap_objects);
// Destroys all memory allocated by the heap.
void TearDown();
@@ -451,8 +454,8 @@
// jslimit_/real_jslimit_ variable in the StackGuard.
void SetStackLimits();
- // Returns whether Setup has been called.
- bool HasBeenSetup();
+ // Returns whether SetUp has been called.
+ bool HasBeenSetUp();
// Returns the maximum amount of memory reserved for the heap. For
// the young generation, we reserve 4 times the amount needed for a
@@ -610,6 +613,9 @@
// Allocates an empty PolymorphicCodeCache.
MUST_USE_RESULT MaybeObject* AllocatePolymorphicCodeCache();
+ // Allocates a pre-tenured empty AccessorPair.
+ MUST_USE_RESULT MaybeObject* AllocateAccessorPair();
+
// Clear the Instanceof cache (used when a prototype changes).
inline void ClearInstanceofCache();
@@ -682,7 +688,7 @@
PretenureFlag pretenure = NOT_TENURED);
// Computes a single character string where the character has code.
- // A cache is used for ascii codes.
+ // A cache is used for ASCII codes.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed. Please note this does not perform a garbage collection.
MUST_USE_RESULT MaybeObject* LookupSingleCharacterStringFromCode(
@@ -950,8 +956,7 @@
// Please note this function does not perform a garbage collection.
MUST_USE_RESULT MaybeObject* LookupSymbol(Vector<const char> str);
MUST_USE_RESULT MaybeObject* LookupAsciiSymbol(Vector<const char> str);
- MUST_USE_RESULT MaybeObject* LookupTwoByteSymbol(
- Vector<const uc16> str);
+ MUST_USE_RESULT MaybeObject* LookupTwoByteSymbol(Vector<const uc16> str);
MUST_USE_RESULT MaybeObject* LookupAsciiSymbol(const char* str) {
return LookupSymbol(CStrVector(str));
}
@@ -1060,7 +1065,7 @@
// Heap root getters. We have versions with and without type::cast() here.
// You can't use type::cast during GC because the assert fails.
// TODO(1490): Try removing the unchecked accessors, now that GC marking does
- // not corrupt the stack.
+ // not corrupt the map.
#define ROOT_ACCESSOR(type, name, camel_name) \
type* name() { \
return type::cast(roots_[k##camel_name##RootIndex]); \
@@ -1132,7 +1137,7 @@
inline AllocationSpace TargetSpaceId(InstanceType type);
// Sets the stub_cache_ (only used when expanding the dictionary).
- void public_set_code_stubs(NumberDictionary* value) {
+ void public_set_code_stubs(UnseededNumberDictionary* value) {
roots_[kCodeStubsRootIndex] = value;
}
@@ -1144,7 +1149,7 @@
}
// Sets the non_monomorphic_cache_ (only used when expanding the dictionary).
- void public_set_non_monomorphic_cache(NumberDictionary* value) {
+ void public_set_non_monomorphic_cache(UnseededNumberDictionary* value) {
roots_[kNonMonomorphicCacheRootIndex] = value;
}
@@ -1328,8 +1333,8 @@
return Min(limit, halfway_to_the_max);
}
- // Can be called when the embedding application is idle.
- bool IdleNotification();
+ // Implements the corresponding V8 API function.
+ bool IdleNotification(int hint);
// Declare all the root indices.
enum RootListIndex {
@@ -1374,6 +1379,7 @@
void CheckNewSpaceExpansionCriteria();
inline void IncrementYoungSurvivorsCounter(int survived) {
+ ASSERT(survived >= 0);
young_survivors_after_last_gc_ = survived;
survived_since_last_expansion_ += survived;
}
@@ -1405,6 +1411,8 @@
void ProcessWeakReferences(WeakObjectRetainer* retainer);
+ void VisitExternalResources(v8::ExternalResourceVisitor* visitor);
+
// Helper function that governs the promotion policy from new space to
// old. If the object's old address lies below the new space's age
// mark or if we've already filled the bottom 1/16th of the to space,
@@ -1421,6 +1429,7 @@
// Returns the size of objects residing in non new spaces.
intptr_t PromotedSpaceSize();
+ intptr_t PromotedSpaceSizeOfObjects();
double total_regexp_code_generated() { return total_regexp_code_generated_; }
void IncreaseTotalRegexpCodeGenerated(int size) {
@@ -1452,6 +1461,17 @@
return &incremental_marking_;
}
+ bool IsSweepingComplete() {
+ return old_data_space()->IsSweepingComplete() &&
+ old_pointer_space()->IsSweepingComplete();
+ }
+
+ bool AdvanceSweepers(int step_size) {
+ bool sweeping_complete = old_data_space()->AdvanceSweeper(step_size);
+ sweeping_complete &= old_pointer_space()->AdvanceSweeper(step_size);
+ return sweeping_complete;
+ }
+
ExternalStringTable* external_string_table() {
return &external_string_table_;
}
@@ -1487,6 +1507,21 @@
// The roots that have an index less than this are always in old space.
static const int kOldSpaceRoots = 0x20;
+ bool idle_notification_will_schedule_next_gc() {
+ return idle_notification_will_schedule_next_gc_;
+ }
+
+ uint32_t HashSeed() {
+ uint32_t seed = static_cast<uint32_t>(hash_seed()->value());
+ ASSERT(FLAG_randomize_hashes || seed == 0);
+ return seed;
+ }
+
+ void SetArgumentsAdaptorDeoptPCOffset(int pc_offset) {
+ ASSERT(arguments_adaptor_deopt_pc_offset() == Smi::FromInt(0));
+ set_arguments_adaptor_deopt_pc_offset(Smi::FromInt(pc_offset));
+ }
+
private:
Heap();
@@ -1769,11 +1804,13 @@
enum SurvivalRateTrend { INCREASING, STABLE, DECREASING, FLUCTUATING };
- static const int kYoungSurvivalRateThreshold = 90;
+ static const int kYoungSurvivalRateHighThreshold = 90;
+ static const int kYoungSurvivalRateLowThreshold = 10;
static const int kYoungSurvivalRateAllowedDeviation = 15;
int young_survivors_after_last_gc_;
int high_survival_rate_period_length_;
+ int low_survival_rate_period_length_;
double survival_rate_;
SurvivalRateTrend previous_survival_rate_trend_;
SurvivalRateTrend survival_rate_trend_;
@@ -1806,20 +1843,54 @@
}
}
- bool IsIncreasingSurvivalTrend() {
- return survival_rate_trend() == INCREASING;
+ bool IsStableOrDecreasingSurvivalTrend() {
+ switch (survival_rate_trend()) {
+ case STABLE:
+ case DECREASING:
+ return true;
+ default:
+ return false;
+ }
}
- bool IsDecreasingSurvivalTrend() {
- return survival_rate_trend() == DECREASING;
+ bool IsIncreasingSurvivalTrend() {
+ return survival_rate_trend() == INCREASING;
}
bool IsHighSurvivalRate() {
return high_survival_rate_period_length_ > 0;
}
+ bool IsLowSurvivalRate() {
+ return low_survival_rate_period_length_ > 0;
+ }
+
void SelectScavengingVisitorsTable();
+ void StartIdleRound() {
+ mark_sweeps_since_idle_round_started_ = 0;
+ ms_count_at_last_idle_notification_ = ms_count_;
+ }
+
+ void FinishIdleRound() {
+ mark_sweeps_since_idle_round_started_ = kMaxMarkSweepsInIdleRound;
+ scavenges_since_last_idle_round_ = 0;
+ }
+
+ bool EnoughGarbageSinceLastIdleRound() {
+ return (scavenges_since_last_idle_round_ >= kIdleScavengeThreshold);
+ }
+
+ bool WorthStartingGCWhenIdle() {
+ if (contexts_disposed_ > 0) {
+ return true;
+ }
+ return incremental_marking()->WorthActivating();
+ }
+
+ // Returns true if no more GC work is left.
+ bool IdleGlobalGC();
+
static const int kInitialSymbolTableSize = 2048;
static const int kInitialEvalCacheSize = 64;
@@ -1849,11 +1920,20 @@
unsigned int last_idle_notification_gc_count_;
bool last_idle_notification_gc_count_init_;
+ bool idle_notification_will_schedule_next_gc_;
+ int mark_sweeps_since_idle_round_started_;
+ int ms_count_at_last_idle_notification_;
+ unsigned int gc_count_at_last_idle_gc_;
+ int scavenges_since_last_idle_round_;
+
+ static const int kMaxMarkSweepsInIdleRound = 7;
+ static const int kIdleScavengeThreshold = 5;
+
// Shared state read by the scavenge collector and set by ScavengeObject.
PromotionQueue promotion_queue_;
// Flag is set when the heap has been configured. The heap can be repeatedly
- // configured through the API until it is setup.
+ // configured through the API until it is set up.
bool configured_;
ExternalStringTable external_string_table_;
@@ -2072,11 +2152,17 @@
// Clear the cache.
void Clear();
- static const int kLength = 64;
+ static const int kLength = 256;
static const int kCapacityMask = kLength - 1;
- static const int kMapHashShift = 2;
+ static const int kMapHashShift = 5;
+ static const int kHashMask = -4; // Zero the last two bits.
+ static const int kEntriesPerBucket = 4;
static const int kNotFound = -1;
+ // kEntriesPerBucket should be a power of 2.
+ STATIC_ASSERT((kEntriesPerBucket & (kEntriesPerBucket - 1)) == 0);
+ STATIC_ASSERT(kEntriesPerBucket == -kHashMask);
+
private:
KeyedLookupCache() {
for (int i = 0; i < kLength; ++i) {
@@ -2313,7 +2399,7 @@
intptr_t start_size_; // Size of objects in heap set in constructor.
GarbageCollector collector_; // Type of collector.
- // A count (including this one, eg, the first collection is 1) of the
+ // A count (including this one, e.g. the first collection is 1) of the
// number of garbage collections.
unsigned int gc_count_;
@@ -2550,6 +2636,7 @@
AssertNoAllocation no_alloc; // i.e. no gc allowed.
+ private:
DISALLOW_IMPLICIT_CONSTRUCTORS(PathTracer);
};
#endif // DEBUG || LIVE_OBJECT_LIST
diff --git a/src/hydrogen-instructions.cc b/src/hydrogen-instructions.cc
index 32c3abf..7ae0b44 100644
--- a/src/hydrogen-instructions.cc
+++ b/src/hydrogen-instructions.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -416,18 +416,18 @@
void HValue::PrintChangesTo(StringStream* stream) {
- int changes_flags = ChangesFlags();
- if (changes_flags == 0) return;
+ GVNFlagSet changes_flags = ChangesFlags();
+ if (changes_flags.IsEmpty()) return;
stream->Add(" changes[");
- if (changes_flags == AllSideEffects()) {
+ if (changes_flags == AllSideEffectsFlagSet()) {
stream->Add("*");
} else {
bool add_comma = false;
-#define PRINT_DO(type) \
- if (changes_flags & (1 << kChanges##type)) { \
- if (add_comma) stream->Add(","); \
- add_comma = true; \
- stream->Add(#type); \
+#define PRINT_DO(type) \
+ if (changes_flags.Contains(kChanges##type)) { \
+ if (add_comma) stream->Add(","); \
+ add_comma = true; \
+ stream->Add(#type); \
}
GVN_FLAG_LIST(PRINT_DO);
#undef PRINT_DO
@@ -788,6 +788,29 @@
}
+HValue* HBitwise::Canonicalize() {
+ if (!representation().IsInteger32()) return this;
+ // If x is an int32, then x & -1 == x, x | 0 == x and x ^ 0 == x.
+ int32_t nop_constant = (op() == Token::BIT_AND) ? -1 : 0;
+ if (left()->IsConstant() &&
+ HConstant::cast(left())->HasInteger32Value() &&
+ HConstant::cast(left())->Integer32Value() == nop_constant) {
+ return right();
+ }
+ if (right()->IsConstant() &&
+ HConstant::cast(right())->HasInteger32Value() &&
+ HConstant::cast(right())->Integer32Value() == nop_constant) {
+ return left();
+ }
+ return this;
+}
+
+
+HValue* HChange::Canonicalize() {
+ return (from().Equals(to())) ? value() : this;
+}
+
+
void HTypeof::PrintDataTo(StringStream* stream) {
value()->PrintNameTo(stream);
}
@@ -1227,10 +1250,9 @@
bool HArrayLiteral::IsCopyOnWrite() const {
- Handle<FixedArray> constant_elements = this->constant_elements();
- FixedArrayBase* constant_elements_values =
- FixedArrayBase::cast(constant_elements->get(1));
- return constant_elements_values->map() == HEAP->fixed_cow_array_map();
+ if (!boilerplate_object_->IsJSObject()) return false;
+ return Handle<JSObject>::cast(boilerplate_object_)->elements()->map() ==
+ HEAP->fixed_cow_array_map();
}
@@ -1317,6 +1339,23 @@
}
+Range* HLoadKeyedSpecializedArrayElement::InferRange() {
+ switch (elements_kind()) {
+ case EXTERNAL_PIXEL_ELEMENTS:
+ return new Range(0, 255);
+ case EXTERNAL_BYTE_ELEMENTS:
+ return new Range(-128, 127);
+ case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+ return new Range(0, 255);
+ case EXTERNAL_SHORT_ELEMENTS:
+ return new Range(-32768, 32767);
+ case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+ return new Range(0, 65535);
+ default:
+ return HValue::InferRange();
+ }
+}
+
void HCompareGeneric::PrintDataTo(StringStream* stream) {
stream->Add(Token::Name(token()));
@@ -1386,21 +1425,21 @@
SetOperandAt(0, context);
SetOperandAt(1, object);
set_representation(Representation::Tagged());
- SetFlag(kDependsOnMaps);
+ SetGVNFlag(kDependsOnMaps);
for (int i = 0;
i < types->length() && types_.length() < kMaxLoadPolymorphism;
++i) {
Handle<Map> map = types->at(i);
LookupResult lookup(map->GetIsolate());
map->LookupInDescriptors(NULL, *name, &lookup);
- if (lookup.IsProperty()) {
+ if (lookup.IsFound()) {
switch (lookup.type()) {
case FIELD: {
int index = lookup.GetLocalFieldIndexFromMap(*map);
if (index < 0) {
- SetFlag(kDependsOnInobjectFields);
+ SetGVNFlag(kDependsOnInobjectFields);
} else {
- SetFlag(kDependsOnBackingStoreFields);
+ SetGVNFlag(kDependsOnBackingStoreFields);
}
types_.Add(types->at(i));
break;
diff --git a/src/hydrogen-instructions.h b/src/hydrogen-instructions.h
index 52fed88..38277e9 100644
--- a/src/hydrogen-instructions.h
+++ b/src/hydrogen-instructions.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -146,6 +146,7 @@
V(Parameter) \
V(Power) \
V(PushArgument) \
+ V(Random) \
V(RegExpLiteral) \
V(Return) \
V(Sar) \
@@ -491,18 +492,26 @@
};
+// There must be one corresponding kDepends flag for every kChanges flag and
+// the order of the kChanges flags must be exactly the same as of the kDepends
+// flags.
+enum GVNFlag {
+ // Declare global value numbering flags.
+#define DECLARE_FLAG(type) kChanges##type, kDependsOn##type,
+ GVN_FLAG_LIST(DECLARE_FLAG)
+#undef DECLARE_FLAG
+ kAfterLastFlag,
+ kLastFlag = kAfterLastFlag - 1
+};
+
+typedef EnumSet<GVNFlag> GVNFlagSet;
+
+
class HValue: public ZoneObject {
public:
static const int kNoNumber = -1;
- // There must be one corresponding kDepends flag for every kChanges flag and
- // the order of the kChanges flags must be exactly the same as of the kDepends
- // flags.
enum Flag {
- // Declare global value numbering flags.
- #define DECLARE_DO(type) kChanges##type, kDependsOn##type,
- GVN_FLAG_LIST(DECLARE_DO)
- #undef DECLARE_DO
kFlexibleRepresentation,
// Participate in Global Value Numbering, i.e. elimination of
// unnecessary recomputations. If an instruction sets this flag, it must
@@ -522,8 +531,8 @@
static const int kChangesToDependsFlagsLeftShift = 1;
- static int ConvertChangesToDependsFlags(int flags) {
- return flags << kChangesToDependsFlagsLeftShift;
+ static GVNFlagSet ConvertChangesToDependsFlags(GVNFlagSet flags) {
+ return GVNFlagSet(flags.ToIntegral() << kChangesToDependsFlagsLeftShift);
}
static HValue* cast(HValue* value) { return value; }
@@ -621,16 +630,32 @@
void ClearFlag(Flag f) { flags_ &= ~(1 << f); }
bool CheckFlag(Flag f) const { return (flags_ & (1 << f)) != 0; }
- void SetAllSideEffects() { flags_ |= AllSideEffects(); }
- void ClearAllSideEffects() { flags_ &= ~AllSideEffects(); }
- bool HasSideEffects() const { return (flags_ & AllSideEffects()) != 0; }
+ GVNFlagSet gvn_flags() const { return gvn_flags_; }
+ void SetGVNFlag(GVNFlag f) { gvn_flags_.Add(f); }
+ void ClearGVNFlag(GVNFlag f) { gvn_flags_.Remove(f); }
+ bool CheckGVNFlag(GVNFlag f) const { return gvn_flags_.Contains(f); }
+ void SetAllSideEffects() { gvn_flags_.Add(AllSideEffectsFlagSet()); }
+ void ClearAllSideEffects() {
+ gvn_flags_.Remove(AllSideEffectsFlagSet());
+ }
+ bool HasSideEffects() const {
+ return gvn_flags_.ContainsAnyOf(AllSideEffectsFlagSet());
+ }
bool HasObservableSideEffects() const {
- return (flags_ & ObservableSideEffects()) != 0;
+ return gvn_flags_.ContainsAnyOf(AllObservableSideEffectsFlagSet());
}
- int ChangesFlags() const { return flags_ & ChangesFlagsMask(); }
- int ObservableChangesFlags() const {
- return flags_ & ChangesFlagsMask() & ObservableSideEffects();
+ GVNFlagSet ChangesFlags() const {
+ GVNFlagSet result = gvn_flags_;
+ result.Intersect(AllChangesFlagSet());
+ return result;
+ }
+
+ GVNFlagSet ObservableChangesFlags() const {
+ GVNFlagSet result = gvn_flags_;
+ result.Intersect(AllChangesFlagSet());
+ result.Intersect(AllObservableSideEffectsFlagSet());
+ return result;
}
Range* range() const { return range_; }
@@ -696,25 +721,28 @@
representation_ = r;
}
- private:
- static int ChangesFlagsMask() {
- int result = 0;
+ static GVNFlagSet AllChangesFlagSet() {
+ GVNFlagSet result;
// Create changes mask.
-#define ADD_FLAG(type) result |= (1 << kChanges##type);
+#define ADD_FLAG(type) result.Add(kChanges##type);
GVN_FLAG_LIST(ADD_FLAG)
#undef ADD_FLAG
return result;
}
// A flag mask to mark an instruction as having arbitrary side effects.
- static int AllSideEffects() {
- return ChangesFlagsMask() & ~(1 << kChangesOsrEntries);
+ static GVNFlagSet AllSideEffectsFlagSet() {
+ GVNFlagSet result = AllChangesFlagSet();
+ result.Remove(kChangesOsrEntries);
+ return result;
}
// A flag mask of all side effects that can make observable changes in
// an executing program (i.e. are not safe to repeat, move or remove);
- static int ObservableSideEffects() {
- return ChangesFlagsMask() & ~(1 << kChangesElementsKind);
+ static GVNFlagSet AllObservableSideEffectsFlagSet() {
+ GVNFlagSet result = AllChangesFlagSet();
+ result.Remove(kChangesElementsKind);
+ return result;
}
// Remove the matching use from the use list if present. Returns the
@@ -734,7 +762,9 @@
HUseListNode* use_list_;
Range* range_;
int flags_;
+ GVNFlagSet gvn_flags_;
+ private:
DISALLOW_COPY_AND_ASSIGN(HValue);
};
@@ -771,7 +801,7 @@
: next_(NULL),
previous_(NULL),
position_(RelocInfo::kNoPosition) {
- SetFlag(kDependsOnOsrEntries);
+ SetGVNFlag(kDependsOnOsrEntries);
}
virtual void DeleteFromGraph() { Unlink(); }
@@ -1130,12 +1160,16 @@
virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
virtual HType CalculateInferredType();
+ virtual HValue* Canonicalize();
Representation from() { return value()->representation(); }
Representation to() { return representation(); }
bool deoptimize_on_undefined() const {
return CheckFlag(kDeoptimizeOnUndefined);
}
+ bool deoptimize_on_minus_zero() const {
+ return CheckFlag(kBailoutOnMinusZero);
+ }
virtual Representation RequiredInputRepresentation(int index) {
return from();
}
@@ -1307,9 +1341,11 @@
class HEnterInlined: public HTemplateInstruction<0> {
public:
HEnterInlined(Handle<JSFunction> closure,
+ int arguments_count,
FunctionLiteral* function,
CallKind call_kind)
: closure_(closure),
+ arguments_count_(arguments_count),
function_(function),
call_kind_(call_kind) {
}
@@ -1317,6 +1353,7 @@
virtual void PrintDataTo(StringStream* stream);
Handle<JSFunction> closure() const { return closure_; }
+ int arguments_count() const { return arguments_count_; }
FunctionLiteral* function() const { return function_; }
CallKind call_kind() const { return call_kind_; }
@@ -1328,6 +1365,7 @@
private:
Handle<JSFunction> closure_;
+ int arguments_count_;
FunctionLiteral* function_;
CallKind call_kind_;
};
@@ -1711,8 +1749,8 @@
SetOperandAt(1, typecheck);
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
- SetFlag(kDependsOnArrayLengths);
- SetFlag(kDependsOnMaps);
+ SetGVNFlag(kDependsOnArrayLengths);
+ SetGVNFlag(kDependsOnMaps);
}
virtual Representation RequiredInputRepresentation(int index) {
@@ -1736,7 +1774,7 @@
explicit HFixedArrayBaseLength(HValue* value) : HUnaryOperation(value) {
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
- SetFlag(kDependsOnArrayLengths);
+ SetGVNFlag(kDependsOnArrayLengths);
}
virtual Representation RequiredInputRepresentation(int index) {
@@ -1755,7 +1793,7 @@
explicit HElementsKind(HValue* value) : HUnaryOperation(value) {
set_representation(Representation::Integer32());
SetFlag(kUseGVN);
- SetFlag(kDependsOnElementsKind);
+ SetGVNFlag(kDependsOnElementsKind);
}
virtual Representation RequiredInputRepresentation(int index) {
@@ -1881,8 +1919,8 @@
explicit HLoadElements(HValue* value) : HUnaryOperation(value) {
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
- SetFlag(kDependsOnMaps);
- SetFlag(kDependsOnElementsKind);
+ SetGVNFlag(kDependsOnMaps);
+ SetGVNFlag(kDependsOnElementsKind);
}
virtual Representation RequiredInputRepresentation(int index) {
@@ -1921,15 +1959,21 @@
class HCheckMap: public HTemplateInstruction<2> {
public:
- HCheckMap(HValue* value, Handle<Map> map, HValue* typecheck = NULL)
- : map_(map) {
+ HCheckMap(HValue* value, Handle<Map> map,
+ HValue* typecheck = NULL,
+ CompareMapMode mode = REQUIRE_EXACT_MAP)
+ : map_(map),
+ mode_(mode) {
SetOperandAt(0, value);
// If callers don't depend on a typecheck, they can pass in NULL. In that
// case we use a copy of the |value| argument as a dummy value.
SetOperandAt(1, typecheck != NULL ? typecheck : value);
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
- SetFlag(kDependsOnMaps);
+ SetGVNFlag(kDependsOnMaps);
+ has_element_transitions_ =
+ map->LookupElementsTransitionMap(FAST_DOUBLE_ELEMENTS, NULL) != NULL ||
+ map->LookupElementsTransitionMap(FAST_ELEMENTS, NULL) != NULL;
}
virtual Representation RequiredInputRepresentation(int index) {
@@ -1940,17 +1984,24 @@
HValue* value() { return OperandAt(0); }
Handle<Map> map() const { return map_; }
+ CompareMapMode mode() const { return mode_; }
DECLARE_CONCRETE_INSTRUCTION(CheckMap)
protected:
virtual bool DataEquals(HValue* other) {
HCheckMap* b = HCheckMap::cast(other);
- return map_.is_identical_to(b->map());
+ // Two CheckMaps instructions are DataEqual if their maps are identical and
+ // they have the same mode. The mode comparison can be ignored if the map
+ // has no elements transitions.
+ return map_.is_identical_to(b->map()) &&
+ (b->mode() == mode() || !has_element_transitions_);
}
private:
+ bool has_element_transitions_;
Handle<Map> map_;
+ CompareMapMode mode_;
};
@@ -2087,7 +2138,7 @@
HCheckPrototypeMaps(Handle<JSObject> prototype, Handle<JSObject> holder)
: prototype_(prototype), holder_(holder) {
SetFlag(kUseGVN);
- SetFlag(kDependsOnMaps);
+ SetGVNFlag(kDependsOnMaps);
}
#ifdef DEBUG
@@ -2985,6 +3036,23 @@
};
+class HRandom: public HTemplateInstruction<1> {
+ public:
+ explicit HRandom(HValue* global_object) {
+ SetOperandAt(0, global_object);
+ set_representation(Representation::Double());
+ }
+
+ HValue* global_object() { return OperandAt(0); }
+
+ virtual Representation RequiredInputRepresentation(int index) {
+ return Representation::Tagged();
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(Random)
+};
+
+
class HAdd: public HArithmeticBinaryOperation {
public:
HAdd(HValue* context, HValue* left, HValue* right)
@@ -3138,6 +3206,8 @@
virtual bool IsCommutative() const { return true; }
+ virtual HValue* Canonicalize();
+
static HInstruction* NewHBitwise(Zone* zone,
Token::Value op,
HValue* context,
@@ -3218,7 +3288,7 @@
class HOsrEntry: public HTemplateInstruction<0> {
public:
explicit HOsrEntry(int ast_id) : ast_id_(ast_id) {
- SetFlag(kChangesOsrEntries);
+ SetGVNFlag(kChangesOsrEntries);
}
int ast_id() const { return ast_id_; }
@@ -3306,7 +3376,7 @@
: cell_(cell), details_(details) {
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
- SetFlag(kDependsOnGlobalVars);
+ SetGVNFlag(kDependsOnGlobalVars);
}
Handle<JSGlobalPropertyCell> cell() const { return cell_; }
@@ -3385,7 +3455,7 @@
: HUnaryOperation(value),
cell_(cell),
details_(details) {
- SetFlag(kChangesGlobalVars);
+ SetGVNFlag(kChangesGlobalVars);
}
Handle<JSGlobalPropertyCell> cell() const { return cell_; }
@@ -3447,14 +3517,48 @@
class HLoadContextSlot: public HUnaryOperation {
public:
- HLoadContextSlot(HValue* context , int slot_index)
- : HUnaryOperation(context), slot_index_(slot_index) {
+ enum Mode {
+ // Perform a normal load of the context slot without checking its value.
+ kNoCheck,
+ // Load and check the value of the context slot. Deoptimize if it's the
+ // hole value. This is used for checking for loading of uninitialized
+ // harmony bindings where we deoptimize into full-codegen generated code
+ // which will subsequently throw a reference error.
+ kCheckDeoptimize,
+ // Load and check the value of the context slot. Return undefined if it's
+ // the hole value. This is used for non-harmony const assignments
+ kCheckReturnUndefined
+ };
+
+ HLoadContextSlot(HValue* context, Variable* var)
+ : HUnaryOperation(context), slot_index_(var->index()) {
+ ASSERT(var->IsContextSlot());
+ switch (var->mode()) {
+ case LET:
+ case CONST_HARMONY:
+ mode_ = kCheckDeoptimize;
+ break;
+ case CONST:
+ mode_ = kCheckReturnUndefined;
+ break;
+ default:
+ mode_ = kNoCheck;
+ }
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
- SetFlag(kDependsOnContextSlots);
+ SetGVNFlag(kDependsOnContextSlots);
}
int slot_index() const { return slot_index_; }
+ Mode mode() const { return mode_; }
+
+ bool DeoptimizesOnHole() {
+ return mode_ == kCheckDeoptimize;
+ }
+
+ bool RequiresHoleCheck() {
+ return mode_ != kNoCheck;
+ }
virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
@@ -3472,26 +3576,49 @@
private:
int slot_index_;
+ Mode mode_;
};
class HStoreContextSlot: public HTemplateInstruction<2> {
public:
- HStoreContextSlot(HValue* context, int slot_index, HValue* value)
- : slot_index_(slot_index) {
+ enum Mode {
+ // Perform a normal store to the context slot without checking its previous
+ // value.
+ kNoCheck,
+ // Check the previous value of the context slot and deoptimize if it's the
+ // hole value. This is used for checking for assignments to uninitialized
+ // harmony bindings where we deoptimize into full-codegen generated code
+ // which will subsequently throw a reference error.
+ kCheckDeoptimize,
+ // Check the previous value and ignore assignment if it isn't a hole value
+ kCheckIgnoreAssignment
+ };
+
+ HStoreContextSlot(HValue* context, int slot_index, Mode mode, HValue* value)
+ : slot_index_(slot_index), mode_(mode) {
SetOperandAt(0, context);
SetOperandAt(1, value);
- SetFlag(kChangesContextSlots);
+ SetGVNFlag(kChangesContextSlots);
}
HValue* context() { return OperandAt(0); }
HValue* value() { return OperandAt(1); }
int slot_index() const { return slot_index_; }
+ Mode mode() const { return mode_; }
bool NeedsWriteBarrier() {
return StoringValueNeedsWriteBarrier(value());
}
+ bool DeoptimizesOnHole() {
+ return mode_ == kCheckDeoptimize;
+ }
+
+ bool RequiresHoleCheck() {
+ return mode_ != kNoCheck;
+ }
+
virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
}
@@ -3502,6 +3629,7 @@
private:
int slot_index_;
+ Mode mode_;
};
@@ -3513,11 +3641,11 @@
offset_(offset) {
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
- SetFlag(kDependsOnMaps);
+ SetGVNFlag(kDependsOnMaps);
if (is_in_object) {
- SetFlag(kDependsOnInobjectFields);
+ SetGVNFlag(kDependsOnInobjectFields);
} else {
- SetFlag(kDependsOnBackingStoreFields);
+ SetGVNFlag(kDependsOnBackingStoreFields);
}
}
@@ -3611,7 +3739,7 @@
: HUnaryOperation(function) {
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
- SetFlag(kDependsOnCalls);
+ SetGVNFlag(kDependsOnCalls);
}
HValue* function() { return OperandAt(0); }
@@ -3633,7 +3761,7 @@
SetOperandAt(0, obj);
SetOperandAt(1, key);
set_representation(Representation::Tagged());
- SetFlag(kDependsOnArrayElements);
+ SetGVNFlag(kDependsOnArrayElements);
SetFlag(kUseGVN);
}
@@ -3664,7 +3792,7 @@
SetOperandAt(0, elements);
SetOperandAt(1, key);
set_representation(Representation::Double());
- SetFlag(kDependsOnDoubleArrayElements);
+ SetGVNFlag(kDependsOnDoubleArrayElements);
SetFlag(kUseGVN);
}
@@ -3701,9 +3829,9 @@
} else {
set_representation(Representation::Integer32());
}
- SetFlag(kDependsOnSpecializedArrayElements);
+ SetGVNFlag(kDependsOnSpecializedArrayElements);
// Native code could change the specialized array.
- SetFlag(kDependsOnCalls);
+ SetGVNFlag(kDependsOnCalls);
SetFlag(kUseGVN);
}
@@ -3721,6 +3849,8 @@
HValue* key() { return OperandAt(1); }
ElementsKind elements_kind() const { return elements_kind_; }
+ virtual Range* InferRange();
+
DECLARE_CONCRETE_INSTRUCTION(LoadKeyedSpecializedArrayElement)
protected:
@@ -3773,9 +3903,9 @@
SetOperandAt(0, obj);
SetOperandAt(1, val);
if (is_in_object_) {
- SetFlag(kChangesInobjectFields);
+ SetGVNFlag(kChangesInobjectFields);
} else {
- SetFlag(kChangesBackingStoreFields);
+ SetGVNFlag(kChangesBackingStoreFields);
}
}
@@ -3850,7 +3980,7 @@
SetOperandAt(0, obj);
SetOperandAt(1, key);
SetOperandAt(2, val);
- SetFlag(kChangesArrayElements);
+ SetGVNFlag(kChangesArrayElements);
}
virtual Representation RequiredInputRepresentation(int index) {
@@ -3875,10 +4005,6 @@
}
}
- bool ValueNeedsSmiCheck() {
- return value_is_smi();
- }
-
virtual void PrintDataTo(StringStream* stream);
DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastElement)
@@ -3896,7 +4022,7 @@
SetOperandAt(0, elements);
SetOperandAt(1, key);
SetOperandAt(2, val);
- SetFlag(kChangesDoubleArrayElements);
+ SetGVNFlag(kChangesDoubleArrayElements);
}
virtual Representation RequiredInputRepresentation(int index) {
@@ -3930,7 +4056,7 @@
HValue* val,
ElementsKind elements_kind)
: elements_kind_(elements_kind) {
- SetFlag(kChangesSpecializedArrayElements);
+ SetGVNFlag(kChangesSpecializedArrayElements);
SetOperandAt(0, external_elements);
SetOperandAt(1, key);
SetOperandAt(2, val);
@@ -4008,7 +4134,7 @@
transitioned_map_(transitioned_map) {
SetOperandAt(0, object);
SetFlag(kUseGVN);
- SetFlag(kChangesElementsKind);
+ SetGVNFlag(kChangesElementsKind);
set_representation(Representation::Tagged());
}
@@ -4043,7 +4169,7 @@
: HBinaryOperation(context, left, right) {
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
- SetFlag(kDependsOnMaps);
+ SetGVNFlag(kDependsOnMaps);
}
virtual Representation RequiredInputRepresentation(int index) {
@@ -4069,7 +4195,7 @@
SetOperandAt(2, index);
set_representation(Representation::Integer32());
SetFlag(kUseGVN);
- SetFlag(kDependsOnMaps);
+ SetGVNFlag(kDependsOnMaps);
}
virtual Representation RequiredInputRepresentation(int index) {
@@ -4124,7 +4250,7 @@
explicit HStringLength(HValue* string) : HUnaryOperation(string) {
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
- SetFlag(kDependsOnMaps);
+ SetGVNFlag(kDependsOnMaps);
}
virtual Representation RequiredInputRepresentation(int index) {
@@ -4167,18 +4293,24 @@
class HArrayLiteral: public HMaterializedLiteral<1> {
public:
HArrayLiteral(HValue* context,
- Handle<FixedArray> constant_elements,
+ Handle<HeapObject> boilerplate_object,
int length,
int literal_index,
int depth)
: HMaterializedLiteral<1>(literal_index, depth),
length_(length),
- constant_elements_(constant_elements) {
+ boilerplate_object_(boilerplate_object) {
SetOperandAt(0, context);
}
HValue* context() { return OperandAt(0); }
- Handle<FixedArray> constant_elements() const { return constant_elements_; }
+ ElementsKind boilerplate_elements_kind() const {
+ if (!boilerplate_object_->IsJSObject()) {
+ return FAST_ELEMENTS;
+ }
+ return Handle<JSObject>::cast(boilerplate_object_)->GetElementsKind();
+ }
+ Handle<HeapObject> boilerplate_object() const { return boilerplate_object_; }
int length() const { return length_; }
bool IsCopyOnWrite() const;
@@ -4192,7 +4324,7 @@
private:
int length_;
- Handle<FixedArray> constant_elements_;
+ Handle<HeapObject> boilerplate_object_;
};
diff --git a/src/hydrogen.cc b/src/hydrogen.cc
index 5cf6e3d..862afe5 100644
--- a/src/hydrogen.cc
+++ b/src/hydrogen.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -167,8 +167,7 @@
void HBasicBlock::Goto(HBasicBlock* block, bool drop_extra) {
if (block->IsInlineReturnTarget()) {
AddInstruction(new(zone()) HLeaveInlined);
- last_environment_ = last_environment()->outer();
- if (drop_extra) last_environment_->Drop(1);
+ last_environment_ = last_environment()->DiscardInlined(drop_extra);
}
AddSimulate(AstNode::kNoNumber);
HGoto* instr = new(zone()) HGoto(block);
@@ -182,8 +181,7 @@
ASSERT(target->IsInlineReturnTarget());
ASSERT(return_value != NULL);
AddInstruction(new(zone()) HLeaveInlined);
- last_environment_ = last_environment()->outer();
- if (drop_extra) last_environment_->Drop(1);
+ last_environment_ = last_environment()->DiscardInlined(drop_extra);
last_environment()->Push(return_value);
AddSimulate(AstNode::kNoNumber);
HGoto* instr = new(zone()) HGoto(target);
@@ -628,7 +626,11 @@
Handle<Code> HGraph::Compile(CompilationInfo* info) {
int values = GetMaximumValueID();
if (values > LAllocator::max_initial_value_ids()) {
- if (FLAG_trace_bailout) PrintF("Function is too big\n");
+ if (FLAG_trace_bailout) {
+ SmartArrayPointer<char> name(
+ info->shared_info()->DebugName()->ToCString());
+ PrintF("Function @\"%s\" is too big.\n", *name);
+ }
return Handle<Code>::null();
}
@@ -1106,10 +1108,10 @@
}
-void HValueMap::Kill(int flags) {
- int depends_flags = HValue::ConvertChangesToDependsFlags(flags);
- if ((present_flags_ & depends_flags) == 0) return;
- present_flags_ = 0;
+void HValueMap::Kill(GVNFlagSet flags) {
+ GVNFlagSet depends_flags = HValue::ConvertChangesToDependsFlags(flags);
+ if (!present_flags_.ContainsAnyOf(depends_flags)) return;
+ present_flags_.RemoveAll();
for (int i = 0; i < array_size_; ++i) {
HValue* value = array_[i].value;
if (value != NULL) {
@@ -1118,7 +1120,8 @@
int next;
for (int current = array_[i].next; current != kNil; current = next) {
next = lists_[current].next;
- if ((lists_[current].value->flags() & depends_flags) != 0) {
+ HValue* value = lists_[current].value;
+ if (value->gvn_flags().ContainsAnyOf(depends_flags)) {
// Drop it.
count_--;
lists_[current].next = free_list_head_;
@@ -1127,13 +1130,14 @@
// Keep it.
lists_[current].next = kept;
kept = current;
- present_flags_ |= lists_[current].value->flags();
+ present_flags_.Add(value->gvn_flags());
}
}
array_[i].next = kept;
// Now possibly drop directly indexed element.
- if ((array_[i].value->flags() & depends_flags) != 0) { // Drop it.
+ value = array_[i].value;
+ if (value->gvn_flags().ContainsAnyOf(depends_flags)) { // Drop it.
count_--;
int head = array_[i].next;
if (head == kNil) {
@@ -1145,7 +1149,7 @@
free_list_head_ = head;
}
} else {
- present_flags_ |= array_[i].value->flags(); // Keep it.
+ present_flags_.Add(value->gvn_flags()); // Keep it.
}
}
}
@@ -1352,8 +1356,8 @@
loop_side_effects_(graph->blocks()->length()),
visited_on_paths_(graph->zone(), graph->blocks()->length()) {
ASSERT(info->isolate()->heap()->allow_allocation(false));
- block_side_effects_.AddBlock(0, graph_->blocks()->length());
- loop_side_effects_.AddBlock(0, graph_->blocks()->length());
+ block_side_effects_.AddBlock(GVNFlagSet(), graph_->blocks()->length());
+ loop_side_effects_.AddBlock(GVNFlagSet(), graph_->blocks()->length());
}
~HGlobalValueNumberer() {
ASSERT(!info_->isolate()->heap()->allow_allocation(true));
@@ -1363,14 +1367,15 @@
bool Analyze();
private:
- int CollectSideEffectsOnPathsToDominatedBlock(HBasicBlock* dominator,
- HBasicBlock* dominated);
+ GVNFlagSet CollectSideEffectsOnPathsToDominatedBlock(
+ HBasicBlock* dominator,
+ HBasicBlock* dominated);
void AnalyzeBlock(HBasicBlock* block, HValueMap* map);
void ComputeBlockSideEffects();
void LoopInvariantCodeMotion();
void ProcessLoopBlock(HBasicBlock* block,
HBasicBlock* before_loop,
- int loop_kills);
+ GVNFlagSet loop_kills);
bool AllowCodeMotion();
bool ShouldMove(HInstruction* instr, HBasicBlock* loop_header);
@@ -1383,10 +1388,10 @@
bool removed_side_effects_;
// A map of block IDs to their side effects.
- ZoneList<int> block_side_effects_;
+ ZoneList<GVNFlagSet> block_side_effects_;
// A map of loop header block IDs to their loop's side effects.
- ZoneList<int> loop_side_effects_;
+ ZoneList<GVNFlagSet> loop_side_effects_;
// Used when collecting side effects on paths from dominator to
// dominated.
@@ -1411,23 +1416,24 @@
HBasicBlock* block = graph_->blocks()->at(i);
HInstruction* instr = block->first();
int id = block->block_id();
- int side_effects = 0;
+ GVNFlagSet side_effects;
while (instr != NULL) {
- side_effects |= instr->ChangesFlags();
+ side_effects.Add(instr->ChangesFlags());
instr = instr->next();
}
- block_side_effects_[id] |= side_effects;
+ block_side_effects_[id].Add(side_effects);
// Loop headers are part of their loop.
if (block->IsLoopHeader()) {
- loop_side_effects_[id] |= side_effects;
+ loop_side_effects_[id].Add(side_effects);
}
// Propagate loop side effects upwards.
if (block->HasParentLoopHeader()) {
int header_id = block->parent_loop_header()->block_id();
- loop_side_effects_[header_id] |=
- block->IsLoopHeader() ? loop_side_effects_[id] : side_effects;
+ loop_side_effects_[header_id].Add(block->IsLoopHeader()
+ ? loop_side_effects_[id]
+ : side_effects);
}
}
}
@@ -1437,10 +1443,10 @@
for (int i = graph_->blocks()->length() - 1; i >= 0; --i) {
HBasicBlock* block = graph_->blocks()->at(i);
if (block->IsLoopHeader()) {
- int side_effects = loop_side_effects_[block->block_id()];
+ GVNFlagSet side_effects = loop_side_effects_[block->block_id()];
TraceGVN("Try loop invariant motion for block B%d effects=0x%x\n",
block->block_id(),
- side_effects);
+ side_effects.ToIntegral());
HBasicBlock* last = block->loop_information()->GetLastBackEdge();
for (int j = block->block_id(); j <= last->block_id(); ++j) {
@@ -1453,17 +1459,17 @@
void HGlobalValueNumberer::ProcessLoopBlock(HBasicBlock* block,
HBasicBlock* loop_header,
- int loop_kills) {
+ GVNFlagSet loop_kills) {
HBasicBlock* pre_header = loop_header->predecessors()->at(0);
- int depends_flags = HValue::ConvertChangesToDependsFlags(loop_kills);
+ GVNFlagSet depends_flags = HValue::ConvertChangesToDependsFlags(loop_kills);
TraceGVN("Loop invariant motion for B%d depends_flags=0x%x\n",
block->block_id(),
- depends_flags);
+ depends_flags.ToIntegral());
HInstruction* instr = block->first();
while (instr != NULL) {
HInstruction* next = instr->next();
if (instr->CheckFlag(HValue::kUseGVN) &&
- (instr->flags() & depends_flags) == 0) {
+ !instr->gvn_flags().ContainsAnyOf(depends_flags)) {
TraceGVN("Checking instruction %d (%s)\n",
instr->id(),
instr->Mnemonic());
@@ -1499,20 +1505,20 @@
}
-int HGlobalValueNumberer::CollectSideEffectsOnPathsToDominatedBlock(
+GVNFlagSet HGlobalValueNumberer::CollectSideEffectsOnPathsToDominatedBlock(
HBasicBlock* dominator, HBasicBlock* dominated) {
- int side_effects = 0;
+ GVNFlagSet side_effects;
for (int i = 0; i < dominated->predecessors()->length(); ++i) {
HBasicBlock* block = dominated->predecessors()->at(i);
if (dominator->block_id() < block->block_id() &&
block->block_id() < dominated->block_id() &&
visited_on_paths_.Add(block->block_id())) {
- side_effects |= block_side_effects_[block->block_id()];
+ side_effects.Add(block_side_effects_[block->block_id()]);
if (block->IsLoopHeader()) {
- side_effects |= loop_side_effects_[block->block_id()];
+ side_effects.Add(loop_side_effects_[block->block_id()]);
}
- side_effects |= CollectSideEffectsOnPathsToDominatedBlock(
- dominator, block);
+ side_effects.Add(CollectSideEffectsOnPathsToDominatedBlock(
+ dominator, block));
}
}
return side_effects;
@@ -1533,8 +1539,8 @@
HInstruction* instr = block->first();
while (instr != NULL) {
HInstruction* next = instr->next();
- int flags = instr->ChangesFlags();
- if (flags != 0) {
+ GVNFlagSet flags = instr->ChangesFlags();
+ if (!flags.IsEmpty()) {
// Clear all instructions in the map that are affected by side effects.
map->Kill(flags);
TraceGVN("Instruction %d kills\n", instr->id());
@@ -2068,6 +2074,7 @@
for_typeof_(false) {
owner->set_ast_context(this); // Push.
#ifdef DEBUG
+ ASSERT(!owner->environment()->is_arguments_adaptor());
original_length_ = owner->environment()->length();
#endif
}
@@ -2081,14 +2088,16 @@
EffectContext::~EffectContext() {
ASSERT(owner()->HasStackOverflow() ||
owner()->current_block() == NULL ||
- owner()->environment()->length() == original_length_);
+ (owner()->environment()->length() == original_length_ &&
+ !owner()->environment()->is_arguments_adaptor()));
}
ValueContext::~ValueContext() {
ASSERT(owner()->HasStackOverflow() ||
owner()->current_block() == NULL ||
- owner()->environment()->length() == original_length_ + 1);
+ (owner()->environment()->length() == original_length_ + 1 &&
+ !owner()->environment()->is_arguments_adaptor()));
}
@@ -2301,7 +2310,7 @@
Bailout("function with illegal redeclaration");
return NULL;
}
- SetupScope(scope);
+ SetUpScope(scope);
// Add an edge to the body entry. This is warty: the graph's start
// environment will be used by the Lithium translation as the initial
@@ -2465,7 +2474,7 @@
}
-void HGraphBuilder::SetupScope(Scope* scope) {
+void HGraphBuilder::SetUpScope(Scope* scope) {
HConstant* undefined_constant = new(zone()) HConstant(
isolate()->factory()->undefined_value(), Representation::Tagged());
AddInstruction(undefined_constant);
@@ -3199,7 +3208,7 @@
}
Handle<GlobalObject> global(info()->global_object());
global->Lookup(*var->name(), lookup);
- if (!lookup->IsProperty() ||
+ if (!lookup->IsFound() ||
lookup->type() != NORMAL ||
(is_store && lookup->IsReadOnly()) ||
lookup->holder() != *global) {
@@ -3228,11 +3237,11 @@
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
Variable* variable = expr->var();
- if (variable->mode() == LET) {
- return Bailout("reference to let variable");
- }
switch (variable->location()) {
case Variable::UNALLOCATED: {
+ if (variable->mode() == LET || variable->mode() == CONST_HARMONY) {
+ return Bailout("reference to global harmony declared variable");
+ }
// Handle known global constants like 'undefined' specially to avoid a
// load from a global cell for them.
Handle<Object> constant_value =
@@ -3275,20 +3284,18 @@
case Variable::PARAMETER:
case Variable::LOCAL: {
HValue* value = environment()->Lookup(variable);
- if (variable->mode() == CONST &&
- value == graph()->GetConstantHole()) {
- return Bailout("reference to uninitialized const variable");
+ if (value == graph()->GetConstantHole()) {
+ ASSERT(variable->mode() == CONST ||
+ variable->mode() == CONST_HARMONY ||
+ variable->mode() == LET);
+ return Bailout("reference to uninitialized variable");
}
return ast_context()->ReturnValue(value);
}
case Variable::CONTEXT: {
- if (variable->mode() == CONST) {
- return Bailout("reference to const context slot");
- }
HValue* context = BuildContextChainWalk(variable);
- HLoadContextSlot* instr =
- new(zone()) HLoadContextSlot(context, variable->index());
+ HLoadContextSlot* instr = new(zone()) HLoadContextSlot(context, variable);
return ast_context()->ReturnInstruction(instr, expr->id());
}
@@ -3331,13 +3338,13 @@
int* total_size) {
if (max_depth <= 0) return false;
- FixedArrayBase* elements = boilerplate->elements();
+ Handle<FixedArrayBase> elements(boilerplate->elements());
if (elements->length() > 0 &&
elements->map() != HEAP->fixed_cow_array_map()) {
return false;
}
- FixedArray* properties = boilerplate->properties();
+ Handle<FixedArray> properties(boilerplate->properties());
if (properties->length() > 0) {
return false;
} else {
@@ -3463,11 +3470,33 @@
int length = subexprs->length();
HValue* context = environment()->LookupContext();
- HArrayLiteral* literal = new(zone()) HArrayLiteral(context,
- expr->constant_elements(),
- length,
- expr->literal_index(),
- expr->depth());
+ Handle<FixedArray> literals(environment()->closure()->literals());
+ Handle<Object> raw_boilerplate(literals->get(expr->literal_index()));
+
+ if (raw_boilerplate->IsUndefined()) {
+ raw_boilerplate = Runtime::CreateArrayLiteralBoilerplate(
+ isolate(), literals, expr->constant_elements());
+ if (raw_boilerplate.is_null()) {
+ return Bailout("array boilerplate creation failed");
+ }
+ literals->set(expr->literal_index(), *raw_boilerplate);
+ if (JSObject::cast(*raw_boilerplate)->elements()->map() ==
+ isolate()->heap()->fixed_cow_array_map()) {
+ isolate()->counters()->cow_arrays_created_runtime()->Increment();
+ }
+ }
+
+ Handle<JSObject> boilerplate = Handle<JSObject>::cast(raw_boilerplate);
+ ElementsKind boilerplate_elements_kind =
+ Handle<JSObject>::cast(boilerplate)->GetElementsKind();
+
+ HArrayLiteral* literal = new(zone()) HArrayLiteral(
+ context,
+ boilerplate,
+ length,
+ expr->literal_index(),
+ expr->depth());
+
// The array is expected in the bailout environment during computation
// of the property values and is the value of the entire expression.
PushAndAdd(literal);
@@ -3490,42 +3519,28 @@
HValue* key = AddInstruction(
new(zone()) HConstant(Handle<Object>(Smi::FromInt(i)),
Representation::Integer32()));
- HInstruction* elements_kind =
- AddInstruction(new(zone()) HElementsKind(literal));
- HBasicBlock* store_fast = graph()->CreateBasicBlock();
- // Two empty blocks to satisfy edge split form.
- HBasicBlock* store_fast_edgesplit1 = graph()->CreateBasicBlock();
- HBasicBlock* store_fast_edgesplit2 = graph()->CreateBasicBlock();
- HBasicBlock* store_generic = graph()->CreateBasicBlock();
- HBasicBlock* check_smi_only_elements = graph()->CreateBasicBlock();
- HBasicBlock* join = graph()->CreateBasicBlock();
- HIsSmiAndBranch* smicheck = new(zone()) HIsSmiAndBranch(value);
- smicheck->SetSuccessorAt(0, store_fast_edgesplit1);
- smicheck->SetSuccessorAt(1, check_smi_only_elements);
- current_block()->Finish(smicheck);
- store_fast_edgesplit1->Finish(new(zone()) HGoto(store_fast));
-
- set_current_block(check_smi_only_elements);
- HCompareConstantEqAndBranch* smi_elements_check =
- new(zone()) HCompareConstantEqAndBranch(elements_kind,
- FAST_ELEMENTS,
- Token::EQ_STRICT);
- smi_elements_check->SetSuccessorAt(0, store_fast_edgesplit2);
- smi_elements_check->SetSuccessorAt(1, store_generic);
- current_block()->Finish(smi_elements_check);
- store_fast_edgesplit2->Finish(new(zone()) HGoto(store_fast));
-
- set_current_block(store_fast);
- AddInstruction(new(zone()) HStoreKeyedFastElement(elements, key, value));
- store_fast->Goto(join);
-
- set_current_block(store_generic);
- AddInstruction(BuildStoreKeyedGeneric(literal, key, value));
- store_generic->Goto(join);
-
- join->SetJoinId(expr->id());
- set_current_block(join);
+ switch (boilerplate_elements_kind) {
+ case FAST_SMI_ONLY_ELEMENTS:
+ // Smi-only arrays need a smi check.
+ AddInstruction(new(zone()) HCheckSmi(value));
+ // Fall through.
+ case FAST_ELEMENTS:
+ AddInstruction(new(zone()) HStoreKeyedFastElement(
+ elements,
+ key,
+ value,
+ boilerplate_elements_kind));
+ break;
+ case FAST_DOUBLE_ELEMENTS:
+ AddInstruction(new(zone()) HStoreKeyedFastDoubleElement(elements,
+ key,
+ value));
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
AddSimulate(expr->GetIdForElement(i));
}
@@ -3538,7 +3553,7 @@
Handle<String> name,
LookupResult* lookup) {
type->LookupInDescriptors(NULL, *name, lookup);
- if (!lookup->IsPropertyOrTransition()) return false;
+ if (!lookup->IsFound()) return false;
if (lookup->type() == FIELD) return true;
return (lookup->type() == MAP_TRANSITION) &&
(type->unused_property_fields() > 0);
@@ -3566,7 +3581,8 @@
bool smi_and_map_check) {
if (smi_and_map_check) {
AddInstruction(new(zone()) HCheckNonSmi(object));
- AddInstruction(new(zone()) HCheckMap(object, type));
+ AddInstruction(new(zone()) HCheckMap(object, type, NULL,
+ ALLOW_ELEMENT_TRANSITION_MAPS));
}
int index = ComputeStoredFieldIndex(type, name, lookup);
@@ -3586,7 +3602,7 @@
instr->set_transition(transition);
// TODO(fschneider): Record the new map type of the object in the IR to
// enable elimination of redundant checks after the transition store.
- instr->SetFlag(HValue::kChangesMaps);
+ instr->SetGVNFlag(kChangesMaps);
}
return instr;
}
@@ -3807,8 +3823,8 @@
if (proxy != NULL) {
Variable* var = proxy->var();
- if (var->mode() == CONST || var->mode() == LET) {
- return Bailout("unsupported let or const compound assignment");
+ if (var->mode() == LET) {
+ return Bailout("unsupported let compound assignment");
}
CHECK_ALIVE(VisitForValue(operation));
@@ -3823,6 +3839,9 @@
case Variable::PARAMETER:
case Variable::LOCAL:
+ if (var->mode() == CONST) {
+ return Bailout("unsupported const compound assignment");
+ }
Bind(var, Top());
break;
@@ -3843,9 +3862,25 @@
}
}
+ HStoreContextSlot::Mode mode;
+
+ switch (var->mode()) {
+ case LET:
+ mode = HStoreContextSlot::kCheckDeoptimize;
+ break;
+ case CONST:
+ return ast_context()->ReturnValue(Pop());
+ case CONST_HARMONY:
+ // This case is checked statically so no need to
+ // perform checks here
+ UNREACHABLE();
+ default:
+ mode = HStoreContextSlot::kNoCheck;
+ }
+
HValue* context = BuildContextChainWalk(var);
HStoreContextSlot* instr =
- new(zone()) HStoreContextSlot(context, var->index(), Top());
+ new(zone()) HStoreContextSlot(context, var->index(), mode, Top());
AddInstruction(instr);
if (instr->HasObservableSideEffects()) {
AddSimulate(expr->AssignmentId());
@@ -3954,19 +3989,23 @@
HandlePropertyAssignment(expr);
} else if (proxy != NULL) {
Variable* var = proxy->var();
+
if (var->mode() == CONST) {
if (expr->op() != Token::INIT_CONST) {
+ CHECK_ALIVE(VisitForValue(expr->value()));
+ return ast_context()->ReturnValue(Pop());
+ }
+
+ if (var->IsStackAllocated()) {
+ // We insert a use of the old value to detect unsupported uses of const
+ // variables (e.g. initialization inside a loop).
+ HValue* old_value = environment()->Lookup(var);
+ AddInstruction(new HUseConst(old_value));
+ }
+ } else if (var->mode() == CONST_HARMONY) {
+ if (expr->op() != Token::INIT_CONST_HARMONY) {
return Bailout("non-initializer assignment to const");
}
- if (!var->IsStackAllocated()) {
- return Bailout("assignment to const context slot");
- }
- // We insert a use of the old value to detect unsupported uses of const
- // variables (e.g. initialization inside a loop).
- HValue* old_value = environment()->Lookup(var);
- AddInstruction(new HUseConst(old_value));
- } else if (var->mode() == LET) {
- return Bailout("unsupported assignment to let");
}
if (proxy->IsArguments()) return Bailout("assignment to arguments");
@@ -3983,6 +4022,14 @@
case Variable::PARAMETER:
case Variable::LOCAL: {
+ // Perform an initialization check for let declared variables
+ // or parameters.
+ if (var->mode() == LET && expr->op() == Token::ASSIGN) {
+ HValue* env_value = environment()->Lookup(var);
+ if (env_value == graph()->GetConstantHole()) {
+ return Bailout("assignment to let variable before initialization");
+ }
+ }
// We do not allow the arguments object to occur in a context where it
// may escape, but assignments to stack-allocated locals are
// permitted.
@@ -3993,7 +4040,6 @@
}
case Variable::CONTEXT: {
- ASSERT(var->mode() != CONST);
// Bail out if we try to mutate a parameter value in a function using
// the arguments object. We do not (yet) correctly handle the
// arguments property of the function.
@@ -4009,9 +4055,34 @@
}
CHECK_ALIVE(VisitForValue(expr->value()));
+ HStoreContextSlot::Mode mode;
+ if (expr->op() == Token::ASSIGN) {
+ switch (var->mode()) {
+ case LET:
+ mode = HStoreContextSlot::kCheckDeoptimize;
+ break;
+ case CONST:
+ return ast_context()->ReturnValue(Pop());
+ case CONST_HARMONY:
+ // This case is checked statically so no need to
+ // perform checks here
+ UNREACHABLE();
+ default:
+ mode = HStoreContextSlot::kNoCheck;
+ }
+ } else if (expr->op() == Token::INIT_VAR ||
+ expr->op() == Token::INIT_LET ||
+ expr->op() == Token::INIT_CONST_HARMONY) {
+ mode = HStoreContextSlot::kNoCheck;
+ } else {
+ ASSERT(expr->op() == Token::INIT_CONST);
+
+ mode = HStoreContextSlot::kCheckIgnoreAssignment;
+ }
+
HValue* context = BuildContextChainWalk(var);
- HStoreContextSlot* instr =
- new(zone()) HStoreContextSlot(context, var->index(), Top());
+ HStoreContextSlot* instr = new(zone()) HStoreContextSlot(
+ context, var->index(), mode, Top());
AddInstruction(instr);
if (instr->HasObservableSideEffects()) {
AddSimulate(expr->AssignmentId());
@@ -4056,7 +4127,8 @@
bool smi_and_map_check) {
if (smi_and_map_check) {
AddInstruction(new(zone()) HCheckNonSmi(object));
- AddInstruction(new(zone()) HCheckMap(object, type));
+ AddInstruction(new(zone()) HCheckMap(object, type, NULL,
+ ALLOW_ELEMENT_TRANSITION_MAPS));
}
int index = lookup->GetLocalFieldIndexFromMap(*type);
@@ -4088,15 +4160,16 @@
Handle<String> name) {
LookupResult lookup(isolate());
map->LookupInDescriptors(NULL, *name, &lookup);
- if (lookup.IsProperty() && lookup.type() == FIELD) {
+ if (lookup.IsFound() && lookup.type() == FIELD) {
return BuildLoadNamedField(obj,
expr,
map,
&lookup,
true);
- } else if (lookup.IsProperty() && lookup.type() == CONSTANT_FUNCTION) {
+ } else if (lookup.IsFound() && lookup.type() == CONSTANT_FUNCTION) {
AddInstruction(new(zone()) HCheckNonSmi(obj));
- AddInstruction(new(zone()) HCheckMap(obj, map));
+ AddInstruction(new(zone()) HCheckMap(obj, map, NULL,
+ ALLOW_ELEMENT_TRANSITION_MAPS));
Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*map));
return new(zone()) HConstant(function, Representation::Tagged());
} else {
@@ -4165,12 +4238,20 @@
bool is_store) {
if (is_store) {
ASSERT(val != NULL);
- if (elements_kind == FAST_DOUBLE_ELEMENTS) {
- return new(zone()) HStoreKeyedFastDoubleElement(
- elements, checked_key, val);
- } else { // FAST_ELEMENTS or FAST_SMI_ONLY_ELEMENTS.
- return new(zone()) HStoreKeyedFastElement(
- elements, checked_key, val, elements_kind);
+ switch (elements_kind) {
+ case FAST_DOUBLE_ELEMENTS:
+ return new(zone()) HStoreKeyedFastDoubleElement(
+ elements, checked_key, val);
+ case FAST_SMI_ONLY_ELEMENTS:
+ // Smi-only arrays need a smi check.
+ AddInstruction(new(zone()) HCheckSmi(val));
+ // Fall through.
+ case FAST_ELEMENTS:
+ return new(zone()) HStoreKeyedFastElement(
+ elements, checked_key, val, elements_kind);
+ default:
+ UNREACHABLE();
+ return NULL;
}
}
// It's an element load (!is_store).
@@ -4190,14 +4271,6 @@
HInstruction* mapcheck = AddInstruction(new(zone()) HCheckMap(object, map));
bool fast_smi_only_elements = map->has_fast_smi_only_elements();
bool fast_elements = map->has_fast_elements();
- bool fast_double_elements = map->has_fast_double_elements();
- if (!fast_smi_only_elements &&
- !fast_elements &&
- !fast_double_elements &&
- !map->has_external_array_elements()) {
- return is_store ? BuildStoreKeyedGeneric(object, key, val)
- : BuildLoadKeyedGeneric(object, key);
- }
HInstruction* elements = AddInstruction(new(zone()) HLoadElements(object));
if (is_store && (fast_elements || fast_smi_only_elements)) {
AddInstruction(new(zone()) HCheckMap(
@@ -4214,7 +4287,9 @@
return BuildExternalArrayElementAccess(external_elements, checked_key,
val, map->elements_kind(), is_store);
}
- ASSERT(fast_smi_only_elements || fast_elements || fast_double_elements);
+ ASSERT(fast_smi_only_elements ||
+ fast_elements ||
+ map->has_fast_double_elements());
if (map->instance_type() == JS_ARRAY_TYPE) {
length = AddInstruction(new(zone()) HJSArrayLength(object, mapcheck));
} else {
@@ -4286,8 +4361,14 @@
// If only one map is left after transitioning, handle this case
// monomorphically.
if (num_untransitionable_maps == 1) {
- HInstruction* instr = AddInstruction(BuildMonomorphicElementAccess(
- object, key, val, untransitionable_map, is_store));
+ HInstruction* instr = NULL;
+ if (untransitionable_map->has_slow_elements_kind()) {
+ instr = AddInstruction(is_store ? BuildStoreKeyedGeneric(object, key, val)
+ : BuildLoadKeyedGeneric(object, key));
+ } else {
+ instr = AddInstruction(BuildMonomorphicElementAccess(
+ object, key, val, untransitionable_map, is_store));
+ }
*has_side_effects |= instr->HasObservableSideEffects();
instr->set_position(position);
return is_store ? NULL : instr;
@@ -4341,9 +4422,6 @@
if (elements_kind == FAST_SMI_ONLY_ELEMENTS ||
elements_kind == FAST_ELEMENTS ||
elements_kind == FAST_DOUBLE_ELEMENTS) {
- if (is_store && elements_kind == FAST_SMI_ONLY_ELEMENTS) {
- AddInstruction(new(zone()) HCheckSmi(val));
- }
if (is_store && elements_kind != FAST_DOUBLE_ELEMENTS) {
AddInstruction(new(zone()) HCheckMap(
elements, isolate()->factory()->fixed_array_map(),
@@ -4426,8 +4504,13 @@
HInstruction* instr = NULL;
if (expr->IsMonomorphic()) {
Handle<Map> map = expr->GetMonomorphicReceiverType();
- AddInstruction(new(zone()) HCheckNonSmi(obj));
- instr = BuildMonomorphicElementAccess(obj, key, val, map, is_store);
+ if (map->has_slow_elements_kind()) {
+ instr = is_store ? BuildStoreKeyedGeneric(obj, key, val)
+ : BuildLoadKeyedGeneric(obj, key);
+ } else {
+ AddInstruction(new(zone()) HCheckNonSmi(obj));
+ instr = BuildMonomorphicElementAccess(obj, key, val, map, is_store);
+ }
} else if (expr->GetReceiverTypes() != NULL &&
!expr->GetReceiverTypes()->is_empty()) {
return HandlePolymorphicElementAccess(
@@ -4586,7 +4669,8 @@
// its prototypes.
if (smi_and_map_check) {
AddInstruction(new(zone()) HCheckNonSmi(receiver));
- AddInstruction(new(zone()) HCheckMap(receiver, receiver_map));
+ AddInstruction(new(zone()) HCheckMap(receiver, receiver_map, NULL,
+ ALLOW_ELEMENT_TRANSITION_MAPS));
}
if (!expr->holder().is_null()) {
AddInstruction(new(zone()) HCheckPrototypeMaps(
@@ -4745,7 +4829,9 @@
TraceInline(target, caller, "inline depth limit reached");
return false;
}
- current_level++;
+ if (!env->outer()->is_arguments_adaptor()) {
+ current_level++;
+ }
env = env->outer();
}
@@ -4793,11 +4879,8 @@
return false;
}
- // Don't inline functions that uses the arguments object or that
- // have a mismatching number of parameters.
- int arity = expr->arguments()->length();
- if (function->scope()->arguments() != NULL ||
- arity != target_shared->formal_parameter_count()) {
+ // Don't inline functions that uses the arguments object.
+ if (function->scope()->arguments() != NULL) {
TraceInline(target, caller, "target requires special argument handling");
return false;
}
@@ -4861,6 +4944,7 @@
HConstant* undefined = graph()->GetConstantUndefined();
HEnvironment* inner_env =
environment()->CopyForInlining(target,
+ expr->arguments()->length(),
function,
undefined,
call_kind);
@@ -4880,6 +4964,7 @@
body_entry->SetJoinId(expr->ReturnId());
set_current_block(body_entry);
AddInstruction(new(zone()) HEnterInlined(target,
+ expr->arguments()->length(),
function,
call_kind));
VisitDeclarations(target_info.scope()->declarations());
@@ -5058,6 +5143,69 @@
return true;
}
break;
+ case kMathRandom:
+ if (argument_count == 1 && check_type == RECEIVER_MAP_CHECK) {
+ AddCheckConstantFunction(expr, receiver, receiver_map, true);
+ Drop(1);
+ HValue* context = environment()->LookupContext();
+ HGlobalObject* global_object = new(zone()) HGlobalObject(context);
+ AddInstruction(global_object);
+ HRandom* result = new(zone()) HRandom(global_object);
+ ast_context()->ReturnInstruction(result, expr->id());
+ return true;
+ }
+ break;
+ case kMathMax:
+ case kMathMin:
+ if (argument_count == 3 && check_type == RECEIVER_MAP_CHECK) {
+ AddCheckConstantFunction(expr, receiver, receiver_map, true);
+ HValue* right = Pop();
+ HValue* left = Pop();
+ // Do not inline if the return representation is not certain.
+ if (!left->representation().Equals(right->representation())) {
+ Push(left);
+ Push(right);
+ return false;
+ }
+
+ Pop(); // Pop receiver.
+ Token::Value op = (id == kMathMin) ? Token::LT : Token::GT;
+ HCompareIDAndBranch* compare = NULL;
+
+ if (left->representation().IsTagged()) {
+ HChange* left_cvt =
+ new(zone()) HChange(left, Representation::Double(), false, true);
+ left_cvt->SetFlag(HValue::kBailoutOnMinusZero);
+ AddInstruction(left_cvt);
+ HChange* right_cvt =
+ new(zone()) HChange(right, Representation::Double(), false, true);
+ right_cvt->SetFlag(HValue::kBailoutOnMinusZero);
+ AddInstruction(right_cvt);
+ compare = new(zone()) HCompareIDAndBranch(left_cvt, right_cvt, op);
+ compare->SetInputRepresentation(Representation::Double());
+ } else {
+ compare = new(zone()) HCompareIDAndBranch(left, right, op);
+ compare->SetInputRepresentation(left->representation());
+ }
+
+ HBasicBlock* return_left = graph()->CreateBasicBlock();
+ HBasicBlock* return_right = graph()->CreateBasicBlock();
+
+ compare->SetSuccessorAt(0, return_left);
+ compare->SetSuccessorAt(1, return_right);
+ current_block()->Finish(compare);
+
+ set_current_block(return_left);
+ Push(left);
+ set_current_block(return_right);
+ Push(right);
+
+ HBasicBlock* join = CreateJoin(return_left, return_right, expr->id());
+ set_current_block(join);
+ ast_context()->ReturnValue(Pop());
+ return true;
+ }
+ break;
default:
// Not yet supported for inlining.
break;
@@ -5620,8 +5768,11 @@
}
HValue* context = BuildContextChainWalk(var);
+ HStoreContextSlot::Mode mode =
+ (var->mode() == LET || var->mode() == CONST_HARMONY)
+ ? HStoreContextSlot::kCheckDeoptimize : HStoreContextSlot::kNoCheck;
HStoreContextSlot* instr =
- new(zone()) HStoreContextSlot(context, var->index(), after);
+ new(zone()) HStoreContextSlot(context, var->index(), mode, after);
AddInstruction(instr);
if (instr->HasObservableSideEffects()) {
AddSimulate(expr->AssignmentId());
@@ -6027,6 +6178,15 @@
}
+static bool IsLiteralCompareBool(HValue* left,
+ Token::Value op,
+ HValue* right) {
+ return op == Token::EQ_STRICT &&
+ ((left->IsConstant() && HConstant::cast(left)->handle()->IsBoolean()) ||
+ (right->IsConstant() && HConstant::cast(right)->handle()->IsBoolean()));
+}
+
+
void HGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
@@ -6074,6 +6234,12 @@
if (IsLiteralCompareNil(left, op, right, f->null_value(), &sub_expr)) {
return HandleLiteralCompareNil(expr, sub_expr, kNullValue);
}
+ if (IsLiteralCompareBool(left, op, right)) {
+ HCompareObjectEqAndBranch* result =
+ new(zone()) HCompareObjectEqAndBranch(left, right);
+ result->set_position(expr->position());
+ return ast_context()->ReturnControl(result, expr->id());
+ }
if (op == Token::INSTANCEOF) {
// Check to see if the rhs of the instanceof is a global function not
@@ -6089,7 +6255,7 @@
Handle<GlobalObject> global(info()->global_object());
LookupResult lookup(isolate());
global->Lookup(*name, &lookup);
- if (lookup.IsProperty() &&
+ if (lookup.IsFound() &&
lookup.type() == NORMAL &&
lookup.GetValue()->IsJSFunction()) {
Handle<JSFunction> candidate(JSFunction::cast(lookup.GetValue()));
@@ -6122,14 +6288,29 @@
switch (op) {
case Token::EQ:
case Token::EQ_STRICT: {
- AddInstruction(new(zone()) HCheckNonSmi(left));
- AddInstruction(HCheckInstanceType::NewIsSpecObject(left));
- AddInstruction(new(zone()) HCheckNonSmi(right));
- AddInstruction(HCheckInstanceType::NewIsSpecObject(right));
- HCompareObjectEqAndBranch* result =
- new(zone()) HCompareObjectEqAndBranch(left, right);
- result->set_position(expr->position());
- return ast_context()->ReturnControl(result, expr->id());
+ // Can we get away with map check and not instance type check?
+ Handle<Map> map = oracle()->GetCompareMap(expr);
+ if (!map.is_null()) {
+ AddInstruction(new(zone()) HCheckNonSmi(left));
+ AddInstruction(new(zone()) HCheckMap(left, map, NULL,
+ ALLOW_ELEMENT_TRANSITION_MAPS));
+ AddInstruction(new(zone()) HCheckNonSmi(right));
+ AddInstruction(new(zone()) HCheckMap(right, map, NULL,
+ ALLOW_ELEMENT_TRANSITION_MAPS));
+ HCompareObjectEqAndBranch* result =
+ new(zone()) HCompareObjectEqAndBranch(left, right);
+ result->set_position(expr->position());
+ return ast_context()->ReturnControl(result, expr->id());
+ } else {
+ AddInstruction(new(zone()) HCheckNonSmi(left));
+ AddInstruction(HCheckInstanceType::NewIsSpecObject(left));
+ AddInstruction(new(zone()) HCheckNonSmi(right));
+ AddInstruction(HCheckInstanceType::NewIsSpecObject(right));
+ HCompareObjectEqAndBranch* result =
+ new(zone()) HCompareObjectEqAndBranch(left, right);
+ result->set_position(expr->position());
+ return ast_context()->ReturnControl(result, expr->id());
+ }
}
default:
return Bailout("Unsupported non-primitive compare");
@@ -6194,28 +6375,27 @@
void HGraphBuilder::HandleDeclaration(VariableProxy* proxy,
VariableMode mode,
FunctionLiteral* function) {
- if (mode == LET || mode == CONST_HARMONY) {
- return Bailout("unsupported harmony declaration");
- }
Variable* var = proxy->var();
+ bool binding_needs_init =
+ (mode == CONST || mode == CONST_HARMONY || mode == LET);
switch (var->location()) {
case Variable::UNALLOCATED:
return Bailout("unsupported global declaration");
case Variable::PARAMETER:
case Variable::LOCAL:
case Variable::CONTEXT:
- if (mode == CONST || function != NULL) {
+ if (binding_needs_init || function != NULL) {
HValue* value = NULL;
- if (mode == CONST) {
- value = graph()->GetConstantHole();
- } else {
+ if (function != NULL) {
VisitForValue(function);
value = Pop();
+ } else {
+ value = graph()->GetConstantHole();
}
if (var->IsContextSlot()) {
HValue* context = environment()->LookupContext();
- HStoreContextSlot* store =
- new HStoreContextSlot(context, var->index(), value);
+ HStoreContextSlot* store = new HStoreContextSlot(
+ context, var->index(), HStoreContextSlot::kNoCheck, value);
AddInstruction(store);
if (store->HasObservableSideEffects()) AddSimulate(proxy->id());
} else {
@@ -6488,7 +6668,11 @@
// Fast support for Math.random().
void HGraphBuilder::GenerateRandomHeapNumber(CallRuntime* call) {
- return Bailout("inlined runtime function: RandomHeapNumber");
+ HValue* context = environment()->LookupContext();
+ HGlobalObject* global_object = new(zone()) HGlobalObject(context);
+ AddInstruction(global_object);
+ HRandom* result = new(zone()) HRandom(global_object);
+ return ast_context()->ReturnInstruction(result, call->id());
}
@@ -6720,7 +6904,8 @@
outer_(outer),
pop_count_(0),
push_count_(0),
- ast_id_(AstNode::kNoNumber) {
+ ast_id_(AstNode::kNoNumber),
+ arguments_adaptor_(false) {
Initialize(scope->num_parameters() + 1, scope->num_stack_slots(), 0);
}
@@ -6734,11 +6919,28 @@
outer_(NULL),
pop_count_(0),
push_count_(0),
- ast_id_(other->ast_id()) {
+ ast_id_(other->ast_id()),
+ arguments_adaptor_(false) {
Initialize(other);
}
+HEnvironment::HEnvironment(HEnvironment* outer,
+ Handle<JSFunction> closure,
+ int arguments)
+ : closure_(closure),
+ values_(arguments),
+ assigned_variables_(0),
+ parameter_count_(arguments),
+ local_count_(0),
+ outer_(outer),
+ pop_count_(0),
+ push_count_(0),
+ ast_id_(AstNode::kNoNumber),
+ arguments_adaptor_(true) {
+}
+
+
void HEnvironment::Initialize(int parameter_count,
int local_count,
int stack_height) {
@@ -6762,6 +6964,7 @@
pop_count_ = other->pop_count_;
push_count_ = other->push_count_;
ast_id_ = other->ast_id_;
+ arguments_adaptor_ = other->arguments_adaptor_;
}
@@ -6865,20 +7068,36 @@
HEnvironment* HEnvironment::CopyForInlining(
Handle<JSFunction> target,
+ int arguments,
FunctionLiteral* function,
HConstant* undefined,
CallKind call_kind) const {
+ ASSERT(!is_arguments_adaptor());
+
+ Zone* zone = closure()->GetIsolate()->zone();
+
// Outer environment is a copy of this one without the arguments.
int arity = function->scope()->num_parameters();
+
HEnvironment* outer = Copy();
- outer->Drop(arity + 1); // Including receiver.
+ outer->Drop(arguments + 1); // Including receiver.
outer->ClearHistory();
- Zone* zone = closure()->GetIsolate()->zone();
+
+ if (arity != arguments) {
+ // Create artificial arguments adaptation environment.
+ outer = new(zone) HEnvironment(outer, target, arguments + 1);
+ for (int i = 0; i <= arguments; ++i) { // Include receiver.
+ outer->Push(ExpressionStackAt(arguments - i));
+ }
+ outer->ClearHistory();
+ }
+
HEnvironment* inner =
new(zone) HEnvironment(outer, function->scope(), target);
// Get the argument values from the original environment.
for (int i = 0; i <= arity; ++i) { // Include receiver.
- HValue* push = ExpressionStackAt(arity - i);
+ HValue* push = (i <= arguments) ?
+ ExpressionStackAt(arguments - i) : undefined;
inner->SetValueAt(i, push);
}
// If the function we are inlining is a strict mode function or a
@@ -6888,7 +7107,7 @@
call_kind == CALL_AS_FUNCTION) {
inner->SetValueAt(0, undefined);
}
- inner->SetValueAt(arity + 1, outer->LookupContext());
+ inner->SetValueAt(arity + 1, LookupContext());
for (int i = arity + 2; i < inner->length(); ++i) {
inner->SetValueAt(i, undefined);
}
@@ -6904,7 +7123,7 @@
if (i == parameter_count()) stream->Add("specials\n");
if (i == parameter_count() + specials_count()) stream->Add("locals\n");
if (i == parameter_count() + specials_count() + local_count()) {
- stream->Add("expressions");
+ stream->Add("expressions\n");
}
HValue* val = values_.at(i);
stream->Add("%d: ", i);
@@ -6915,6 +7134,7 @@
}
stream->Add("\n");
}
+ PrintF("\n");
}
diff --git a/src/hydrogen.h b/src/hydrogen.h
index ded1356..c8cfc91 100644
--- a/src/hydrogen.h
+++ b/src/hydrogen.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -343,6 +343,17 @@
Scope* scope,
Handle<JSFunction> closure);
+ bool is_arguments_adaptor() const {
+ return arguments_adaptor_;
+ }
+
+ HEnvironment* DiscardInlined(bool drop_extra) {
+ HEnvironment* outer = outer_->is_arguments_adaptor() ?
+ outer_->outer_ : outer_;
+ if (drop_extra) outer->Drop(1);
+ return outer;
+ }
+
// Simple accessors.
Handle<JSFunction> closure() const { return closure_; }
const ZoneList<HValue*>* values() const { return &values_; }
@@ -427,6 +438,7 @@
// environment is the outer environment but the top expression stack
// elements are moved to an inner environment as parameters.
HEnvironment* CopyForInlining(Handle<JSFunction> target,
+ int arguments,
FunctionLiteral* function,
HConstant* undefined,
CallKind call_kind) const;
@@ -450,6 +462,10 @@
private:
explicit HEnvironment(const HEnvironment* other);
+ // Create an argument adaptor environment.
+ HEnvironment(HEnvironment* outer, Handle<JSFunction> closure, int arguments);
+
+
// True if index is included in the expression stack part of the environment.
bool HasExpressionAt(int index) const;
@@ -478,6 +494,7 @@
int pop_count_;
int push_count_;
int ast_id_;
+ bool arguments_adaptor_;
};
@@ -870,7 +887,7 @@
Representation rep);
static Representation ToRepresentation(TypeInfo info);
- void SetupScope(Scope* scope);
+ void SetUpScope(Scope* scope);
virtual void VisitStatements(ZoneList<Statement*>* statements);
#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
@@ -1056,10 +1073,10 @@
Resize(kInitialSize);
}
- void Kill(int flags);
+ void Kill(GVNFlagSet flags);
void Add(HValue* value) {
- present_flags_ |= value->flags();
+ present_flags_.Add(value->gvn_flags());
Insert(value);
}
@@ -1092,7 +1109,8 @@
int array_size_;
int lists_size_;
int count_; // The number of values stored in the HValueMap.
- int present_flags_; // All flags that are in any value in the HValueMap.
+ GVNFlagSet present_flags_; // All flags that are in any value in the
+ // HValueMap.
HValueMapListElement* array_; // Primary store - contains the first value
// with a given hash. Colliding elements are stored in linked lists.
HValueMapListElement* lists_; // The linked lists containing hash collisions.
diff --git a/src/ia32/assembler-ia32-inl.h b/src/ia32/assembler-ia32-inl.h
index 5f67077..ef10922 100644
--- a/src/ia32/assembler-ia32-inl.h
+++ b/src/ia32/assembler-ia32-inl.h
@@ -30,13 +30,15 @@
// The original source code covered by the above license above has been
// modified significantly by Google Inc.
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// A light-weight IA32 Assembler.
#ifndef V8_IA32_ASSEMBLER_IA32_INL_H_
#define V8_IA32_ASSEMBLER_IA32_INL_H_
+#include "ia32/assembler-ia32.h"
+
#include "cpu.h"
#include "debug.h"
diff --git a/src/ia32/assembler-ia32.cc b/src/ia32/assembler-ia32.cc
index 322ba44..a42f632 100644
--- a/src/ia32/assembler-ia32.cc
+++ b/src/ia32/assembler-ia32.cc
@@ -350,7 +350,7 @@
}
#endif
- // Setup buffer pointers.
+ // Set up buffer pointers.
ASSERT(buffer_ != NULL);
pc_ = buffer_;
reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
@@ -377,7 +377,7 @@
// Finalize code (at this point overflow() may be true, but the gap ensures
// that we are still not overlapping instructions and relocation info).
ASSERT(pc_ <= reloc_info_writer.pos()); // No overlap.
- // Setup code descriptor.
+ // Set up code descriptor.
desc->buffer = buffer_;
desc->buffer_size = buffer_size_;
desc->instr_size = pc_offset();
@@ -388,8 +388,91 @@
void Assembler::Align(int m) {
ASSERT(IsPowerOf2(m));
- while ((pc_offset() & (m - 1)) != 0) {
- nop();
+ int mask = m - 1;
+ int addr = pc_offset();
+ Nop((m - (addr & mask)) & mask);
+}
+
+
+bool Assembler::IsNop(Address addr) {
+ Address a = addr;
+ while (*a == 0x66) a++;
+ if (*a == 0x90) return true;
+ if (a[0] == 0xf && a[1] == 0x1f) return true;
+ return false;
+}
+
+
+void Assembler::Nop(int bytes) {
+ EnsureSpace ensure_space(this);
+
+ if (!CpuFeatures::IsSupported(SSE2)) {
+ // Older CPUs that do not support SSE2 may not support multibyte NOP
+ // instructions.
+ for (; bytes > 0; bytes--) {
+ EMIT(0x90);
+ }
+ return;
+ }
+
+ // Multi byte nops from http://support.amd.com/us/Processor_TechDocs/40546.pdf
+ while (bytes > 0) {
+ switch (bytes) {
+ case 2:
+ EMIT(0x66);
+ case 1:
+ EMIT(0x90);
+ return;
+ case 3:
+ EMIT(0xf);
+ EMIT(0x1f);
+ EMIT(0);
+ return;
+ case 4:
+ EMIT(0xf);
+ EMIT(0x1f);
+ EMIT(0x40);
+ EMIT(0);
+ return;
+ case 6:
+ EMIT(0x66);
+ case 5:
+ EMIT(0xf);
+ EMIT(0x1f);
+ EMIT(0x44);
+ EMIT(0);
+ EMIT(0);
+ return;
+ case 7:
+ EMIT(0xf);
+ EMIT(0x1f);
+ EMIT(0x80);
+ EMIT(0);
+ EMIT(0);
+ EMIT(0);
+ EMIT(0);
+ return;
+ default:
+ case 11:
+ EMIT(0x66);
+ bytes--;
+ case 10:
+ EMIT(0x66);
+ bytes--;
+ case 9:
+ EMIT(0x66);
+ bytes--;
+ case 8:
+ EMIT(0xf);
+ EMIT(0x1f);
+ EMIT(0x84);
+ EMIT(0);
+ EMIT(0);
+ EMIT(0);
+ EMIT(0);
+ EMIT(0);
+ bytes -= 8;
+ }
}
}
@@ -463,13 +546,6 @@
}
-void Assembler::push(Handle<Object> handle) {
- EnsureSpace ensure_space(this);
- EMIT(0x68);
- emit(handle);
-}
-
-
void Assembler::pop(Register dst) {
ASSERT(reloc_info_writer.last_pc() != NULL);
EnsureSpace ensure_space(this);
@@ -1644,6 +1720,27 @@
}
+void Assembler::f2xm1() {
+ EnsureSpace ensure_space(this);
+ EMIT(0xD9);
+ EMIT(0xF0);
+}
+
+
+void Assembler::fscale() {
+ EnsureSpace ensure_space(this);
+ EMIT(0xD9);
+ EMIT(0xFD);
+}
+
+
+void Assembler::fninit() {
+ EnsureSpace ensure_space(this);
+ EMIT(0xDB);
+ EMIT(0xE3);
+}
+
+
void Assembler::fadd(int i) {
EnsureSpace ensure_space(this);
emit_farith(0xDC, 0xC0, i);
@@ -1957,6 +2054,16 @@
}
+void Assembler::ucomisd(XMMRegister dst, const Operand& src) {
+ ASSERT(CpuFeatures::IsEnabled(SSE2));
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0x2E);
+ emit_sse_operand(dst, src);
+}
+
+
void Assembler::roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode) {
ASSERT(CpuFeatures::IsEnabled(SSE4_1));
EnsureSpace ensure_space(this);
@@ -2162,6 +2269,19 @@
}
+void Assembler::extractps(Register dst, XMMRegister src, byte imm8) {
+ ASSERT(CpuFeatures::IsSupported(SSE4_1));
+ ASSERT(is_uint8(imm8));
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0x3A);
+ EMIT(0x17);
+ emit_sse_operand(dst, src);
+ EMIT(imm8);
+}
+
+
void Assembler::pand(XMMRegister dst, XMMRegister src) {
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
@@ -2341,7 +2461,7 @@
V8::FatalProcessOutOfMemory("Assembler::GrowBuffer");
}
- // Setup new buffer.
+ // Set up new buffer.
desc.buffer = NewArray<byte>(desc.buffer_size);
desc.instr_size = pc_offset();
desc.reloc_size = (buffer_ + buffer_size_) - (reloc_info_writer.pos());
diff --git a/src/ia32/assembler-ia32.h b/src/ia32/assembler-ia32.h
index d798f81..9ed46fc 100644
--- a/src/ia32/assembler-ia32.h
+++ b/src/ia32/assembler-ia32.h
@@ -659,6 +659,7 @@
// possible to align the pc offset to a multiple
// of m. m must be a power of 2.
void Align(int m);
+ void Nop(int bytes = 1);
// Aligns code to something that's optimal for a jump target for the platform.
void CodeTargetAlign();
@@ -673,7 +674,6 @@
void push_imm32(int32_t imm32);
void push(Register src);
void push(const Operand& src);
- void push(Handle<Object> handle);
void pop(Register dst);
void pop(const Operand& dst);
@@ -926,6 +926,9 @@
void fsin();
void fptan();
void fyl2x();
+ void f2xm1();
+ void fscale();
+ void fninit();
void fadd(int i);
void fsub(int i);
@@ -983,6 +986,7 @@
void andpd(XMMRegister dst, XMMRegister src);
void ucomisd(XMMRegister dst, XMMRegister src);
+ void ucomisd(XMMRegister dst, const Operand& src);
enum RoundingMode {
kRoundToNearest = 0x0,
@@ -1017,6 +1021,7 @@
void movss(XMMRegister dst, const Operand& src);
void movss(const Operand& dst, XMMRegister src);
void movss(XMMRegister dst, XMMRegister src);
+ void extractps(Register dst, XMMRegister src, byte imm8);
void pand(XMMRegister dst, XMMRegister src);
void pxor(XMMRegister dst, XMMRegister src);
@@ -1080,7 +1085,7 @@
// Get the number of bytes available in the buffer.
inline int available_space() const { return reloc_info_writer.pos() - pc_; }
- static bool IsNop(Address addr) { return *addr == 0x90; }
+ static bool IsNop(Address addr);
PositionsRecorder* positions_recorder() { return &positions_recorder_; }
diff --git a/src/ia32/builtins-ia32.cc b/src/ia32/builtins-ia32.cc
index e12e79a..28c97f0 100644
--- a/src/ia32/builtins-ia32.cc
+++ b/src/ia32/builtins-ia32.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -333,7 +333,7 @@
__ push(ebx);
__ push(ebx);
- // Setup pointer to last argument.
+ // Set up pointer to last argument.
__ lea(ebx, Operand(ebp, StandardFrameConstants::kCallerSPOffset));
// Copy arguments and receiver to the expression stack.
@@ -537,7 +537,7 @@
{
FrameScope scope(masm, StackFrame::INTERNAL);
- // Pass the function and deoptimization type to the runtime system.
+ // Pass deoptimization type to the runtime system.
__ push(Immediate(Smi::FromInt(static_cast<int>(type))));
__ CallRuntime(Runtime::kNotifyDeoptimized, 1);
@@ -1238,37 +1238,42 @@
false,
&prepare_generic_code_call);
__ IncrementCounter(counters->array_function_native(), 1);
- __ mov(eax, ebx);
- __ pop(ebx);
- if (construct_call) {
- __ pop(edi);
- }
- __ push(eax);
- // eax: JSArray
+ __ push(ebx);
+ __ mov(ebx, Operand(esp, kPointerSize));
// ebx: argc
// edx: elements_array_end (untagged)
// esp[0]: JSArray
- // esp[4]: return address
- // esp[8]: last argument
+ // esp[4]: argc
+ // esp[8]: constructor (only if construct_call)
+ // esp[12]: return address
+ // esp[16]: last argument
// Location of the last argument
- __ lea(edi, Operand(esp, 2 * kPointerSize));
+ int last_arg_offset = (construct_call ? 4 : 3) * kPointerSize;
+ __ lea(edi, Operand(esp, last_arg_offset));
// Location of the first array element (Parameter fill_with_holes to
- // AllocateJSArrayis false, so the FixedArray is returned in ecx).
+ // AllocateJSArray is false, so the FixedArray is returned in ecx).
__ lea(edx, Operand(ecx, FixedArray::kHeaderSize - kHeapObjectTag));
+ Label has_non_smi_element;
+
// ebx: argc
// edx: location of the first array element
// edi: location of the last argument
// esp[0]: JSArray
- // esp[4]: return address
- // esp[8]: last argument
+ // esp[4]: argc
+ // esp[8]: constructor (only if construct_call)
+ // esp[12]: return address
+ // esp[16]: last argument
Label loop, entry;
__ mov(ecx, ebx);
__ jmp(&entry);
__ bind(&loop);
__ mov(eax, Operand(edi, ecx, times_pointer_size, 0));
+ if (FLAG_smi_only_arrays) {
+ __ JumpIfNotSmi(eax, &has_non_smi_element);
+ }
__ mov(Operand(edx, 0), eax);
__ add(edx, Immediate(kPointerSize));
__ bind(&entry);
@@ -1278,13 +1283,21 @@
// Remove caller arguments from the stack and return.
// ebx: argc
// esp[0]: JSArray
- // esp[4]: return address
- // esp[8]: last argument
+ // esp[4]: argc
+ // esp[8]: constructor (only if construct_call)
+ // esp[12]: return address
+ // esp[16]: last argument
+ __ mov(ecx, Operand(esp, last_arg_offset - kPointerSize));
__ pop(eax);
- __ pop(ecx);
- __ lea(esp, Operand(esp, ebx, times_pointer_size, 1 * kPointerSize));
- __ push(ecx);
- __ ret(0);
+ __ pop(ebx);
+ __ lea(esp, Operand(esp, ebx, times_pointer_size,
+ last_arg_offset - kPointerSize));
+ __ jmp(ecx);
+
+ __ bind(&has_non_smi_element);
+ // Throw away the array that's only been partially constructed.
+ __ pop(eax);
+ __ UndoAllocationInNewSpace(eax);
// Restore argc and constructor before running the generic code.
__ bind(&prepare_generic_code_call);
@@ -1296,6 +1309,40 @@
}
+void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : argc
+ // -- esp[0] : return address
+ // -- esp[4] : last argument
+ // -----------------------------------
+ Label generic_array_code;
+
+ // Get the InternalArray function.
+ __ LoadGlobalFunction(Context::INTERNAL_ARRAY_FUNCTION_INDEX, edi);
+
+ if (FLAG_debug_code) {
+ // Initial map for the builtin InternalArray function shoud be a map.
+ __ mov(ebx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
+ // Will both indicate a NULL and a Smi.
+ __ test(ebx, Immediate(kSmiTagMask));
+ __ Assert(not_zero, "Unexpected initial map for InternalArray function");
+ __ CmpObjectType(ebx, MAP_TYPE, ecx);
+ __ Assert(equal, "Unexpected initial map for InternalArray function");
+ }
+
+ // Run the native code for the InternalArray function called as a normal
+ // function.
+ ArrayNativeCode(masm, false, &generic_array_code);
+
+ // Jump to the generic array code in case the specialized code cannot handle
+ // the construction.
+ __ bind(&generic_array_code);
+ Handle<Code> array_code =
+ masm->isolate()->builtins()->InternalArrayCodeGeneric();
+ __ jmp(array_code, RelocInfo::CODE_TARGET);
+}
+
+
void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : argc
@@ -1597,6 +1644,7 @@
__ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
__ call(edx);
+ masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
// Leave frame and return.
LeaveArgumentsAdaptorFrame(masm);
__ ret(0);
diff --git a/src/ia32/code-stubs-ia32.cc b/src/ia32/code-stubs-ia32.cc
index 3a286f0..eded335 100644
--- a/src/ia32/code-stubs-ia32.cc
+++ b/src/ia32/code-stubs-ia32.cc
@@ -128,14 +128,14 @@
// Get the function from the stack.
__ mov(ecx, Operand(esp, 1 * kPointerSize));
- // Setup the object header.
+ // Set up the object header.
Factory* factory = masm->isolate()->factory();
__ mov(FieldOperand(eax, HeapObject::kMapOffset),
factory->function_context_map());
__ mov(FieldOperand(eax, Context::kLengthOffset),
Immediate(Smi::FromInt(length)));
- // Setup the fixed slots.
+ // Set up the fixed slots.
__ Set(ebx, Immediate(0)); // Set to NULL.
__ mov(Operand(eax, Context::SlotOffset(Context::CLOSURE_INDEX)), ecx);
__ mov(Operand(eax, Context::SlotOffset(Context::PREVIOUS_INDEX)), esi);
@@ -179,7 +179,7 @@
// Get the serialized scope info from the stack.
__ mov(ebx, Operand(esp, 2 * kPointerSize));
- // Setup the object header.
+ // Set up the object header.
Factory* factory = masm->isolate()->factory();
__ mov(FieldOperand(eax, HeapObject::kMapOffset),
factory->block_context_map());
@@ -202,7 +202,7 @@
__ mov(ecx, ContextOperand(ecx, Context::CLOSURE_INDEX));
__ bind(&after_sentinel);
- // Setup the fixed slots.
+ // Set up the fixed slots.
__ mov(ContextOperand(eax, Context::CLOSURE_INDEX), ecx);
__ mov(ContextOperand(eax, Context::PREVIOUS_INDEX), esi);
__ mov(ContextOperand(eax, Context::EXTENSION_INDEX), ebx);
@@ -749,7 +749,7 @@
// Exponent word in scratch, exponent part of exponent word in scratch2.
// Zero in ecx.
// We know the exponent is smaller than 30 (biased). If it is less than
- // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie
+ // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, i.e.
// it rounds to zero.
const uint32_t zero_exponent =
(HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift;
@@ -2938,157 +2938,263 @@
void MathPowStub::Generate(MacroAssembler* masm) {
- // Registers are used as follows:
- // edx = base
- // eax = exponent
- // ecx = temporary, result
-
CpuFeatures::Scope use_sse2(SSE2);
- Label allocate_return, call_runtime;
-
- // Load input parameters.
- __ mov(edx, Operand(esp, 2 * kPointerSize));
- __ mov(eax, Operand(esp, 1 * kPointerSize));
-
- // Save 1 in xmm3 - we need this several times later on.
- __ mov(ecx, Immediate(1));
- __ cvtsi2sd(xmm3, ecx);
-
- Label exponent_nonsmi;
- Label base_nonsmi;
- // If the exponent is a heap number go to that specific case.
- __ JumpIfNotSmi(eax, &exponent_nonsmi);
- __ JumpIfNotSmi(edx, &base_nonsmi);
-
- // Optimized version when both exponent and base are smis.
- Label powi;
- __ SmiUntag(edx);
- __ cvtsi2sd(xmm0, edx);
- __ jmp(&powi);
- // exponent is smi and base is a heapnumber.
- __ bind(&base_nonsmi);
Factory* factory = masm->isolate()->factory();
- __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
- factory->heap_number_map());
- __ j(not_equal, &call_runtime);
+ const Register exponent = eax;
+ const Register base = edx;
+ const Register scratch = ecx;
+ const XMMRegister double_result = xmm3;
+ const XMMRegister double_base = xmm2;
+ const XMMRegister double_exponent = xmm1;
+ const XMMRegister double_scratch = xmm4;
- __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
+ Label call_runtime, done, exponent_not_smi, int_exponent;
- // Optimized version of pow if exponent is a smi.
- // xmm0 contains the base.
- __ bind(&powi);
- __ SmiUntag(eax);
+ // Save 1 in double_result - we need this several times later on.
+ __ mov(scratch, Immediate(1));
+ __ cvtsi2sd(double_result, scratch);
- // Save exponent in base as we need to check if exponent is negative later.
- // We know that base and exponent are in different registers.
- __ mov(edx, eax);
+ if (exponent_type_ == ON_STACK) {
+ Label base_is_smi, unpack_exponent;
+ // The exponent and base are supplied as arguments on the stack.
+ // This can only happen if the stub is called from non-optimized code.
+ // Load input parameters from stack.
+ __ mov(base, Operand(esp, 2 * kPointerSize));
+ __ mov(exponent, Operand(esp, 1 * kPointerSize));
+
+ __ JumpIfSmi(base, &base_is_smi, Label::kNear);
+ __ cmp(FieldOperand(base, HeapObject::kMapOffset),
+ factory->heap_number_map());
+ __ j(not_equal, &call_runtime);
+
+ __ movdbl(double_base, FieldOperand(base, HeapNumber::kValueOffset));
+ __ jmp(&unpack_exponent, Label::kNear);
+
+ __ bind(&base_is_smi);
+ __ SmiUntag(base);
+ __ cvtsi2sd(double_base, base);
+
+ __ bind(&unpack_exponent);
+ __ JumpIfNotSmi(exponent, &exponent_not_smi, Label::kNear);
+ __ SmiUntag(exponent);
+ __ jmp(&int_exponent);
+
+ __ bind(&exponent_not_smi);
+ __ cmp(FieldOperand(exponent, HeapObject::kMapOffset),
+ factory->heap_number_map());
+ __ j(not_equal, &call_runtime);
+ __ movdbl(double_exponent,
+ FieldOperand(exponent, HeapNumber::kValueOffset));
+ } else if (exponent_type_ == TAGGED) {
+ __ JumpIfNotSmi(exponent, &exponent_not_smi, Label::kNear);
+ __ SmiUntag(exponent);
+ __ jmp(&int_exponent);
+
+ __ bind(&exponent_not_smi);
+ __ movdbl(double_exponent,
+ FieldOperand(exponent, HeapNumber::kValueOffset));
+ }
+
+ if (exponent_type_ != INTEGER) {
+ Label fast_power;
+ // Detect integer exponents stored as double.
+ __ cvttsd2si(exponent, Operand(double_exponent));
+ // Skip to runtime if possibly NaN (indicated by the indefinite integer).
+ __ cmp(exponent, Immediate(0x80000000u));
+ __ j(equal, &call_runtime);
+ __ cvtsi2sd(double_scratch, exponent);
+ // Already ruled out NaNs for exponent.
+ __ ucomisd(double_exponent, double_scratch);
+ __ j(equal, &int_exponent);
+
+ if (exponent_type_ == ON_STACK) {
+ // Detect square root case. Crankshaft detects constant +/-0.5 at
+ // compile time and uses DoMathPowHalf instead. We then skip this check
+ // for non-constant cases of +/-0.5 as these hardly occur.
+ Label continue_sqrt, continue_rsqrt, not_plus_half;
+ // Test for 0.5.
+ // Load double_scratch with 0.5.
+ __ mov(scratch, Immediate(0x3F000000u));
+ __ movd(double_scratch, scratch);
+ __ cvtss2sd(double_scratch, double_scratch);
+ // Already ruled out NaNs for exponent.
+ __ ucomisd(double_scratch, double_exponent);
+ __ j(not_equal, ¬_plus_half, Label::kNear);
+
+ // Calculates square root of base. Check for the special case of
+ // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
+ // According to IEEE-754, single-precision -Infinity has the highest
+ // 9 bits set and the lowest 23 bits cleared.
+ __ mov(scratch, 0xFF800000u);
+ __ movd(double_scratch, scratch);
+ __ cvtss2sd(double_scratch, double_scratch);
+ __ ucomisd(double_base, double_scratch);
+ // Comparing -Infinity with NaN results in "unordered", which sets the
+ // zero flag as if both were equal. However, it also sets the carry flag.
+ __ j(not_equal, &continue_sqrt, Label::kNear);
+ __ j(carry, &continue_sqrt, Label::kNear);
+
+ // Set result to Infinity in the special case.
+ __ xorps(double_result, double_result);
+ __ subsd(double_result, double_scratch);
+ __ jmp(&done);
+
+ __ bind(&continue_sqrt);
+ // sqrtsd returns -0 when input is -0. ECMA spec requires +0.
+ __ xorps(double_scratch, double_scratch);
+ __ addsd(double_scratch, double_base); // Convert -0 to +0.
+ __ sqrtsd(double_result, double_scratch);
+ __ jmp(&done);
+
+ // Test for -0.5.
+ __ bind(¬_plus_half);
+ // Load double_exponent with -0.5 by substracting 1.
+ __ subsd(double_scratch, double_result);
+ // Already ruled out NaNs for exponent.
+ __ ucomisd(double_scratch, double_exponent);
+ __ j(not_equal, &fast_power, Label::kNear);
+
+ // Calculates reciprocal of square root of base. Check for the special
+ // case of Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
+ // According to IEEE-754, single-precision -Infinity has the highest
+ // 9 bits set and the lowest 23 bits cleared.
+ __ mov(scratch, 0xFF800000u);
+ __ movd(double_scratch, scratch);
+ __ cvtss2sd(double_scratch, double_scratch);
+ __ ucomisd(double_base, double_scratch);
+ // Comparing -Infinity with NaN results in "unordered", which sets the
+ // zero flag as if both were equal. However, it also sets the carry flag.
+ __ j(not_equal, &continue_rsqrt, Label::kNear);
+ __ j(carry, &continue_rsqrt, Label::kNear);
+
+ // Set result to 0 in the special case.
+ __ xorps(double_result, double_result);
+ __ jmp(&done);
+
+ __ bind(&continue_rsqrt);
+ // sqrtsd returns -0 when input is -0. ECMA spec requires +0.
+ __ xorps(double_exponent, double_exponent);
+ __ addsd(double_exponent, double_base); // Convert -0 to +0.
+ __ sqrtsd(double_exponent, double_exponent);
+ __ divsd(double_result, double_exponent);
+ __ jmp(&done);
+ }
+
+ // Using FPU instructions to calculate power.
+ Label fast_power_failed;
+ __ bind(&fast_power);
+ __ fnclex(); // Clear flags to catch exceptions later.
+ // Transfer (B)ase and (E)xponent onto the FPU register stack.
+ __ sub(esp, Immediate(kDoubleSize));
+ __ movdbl(Operand(esp, 0), double_exponent);
+ __ fld_d(Operand(esp, 0)); // E
+ __ movdbl(Operand(esp, 0), double_base);
+ __ fld_d(Operand(esp, 0)); // B, E
+
+ // Exponent is in st(1) and base is in st(0)
+ // B ^ E = (2^(E * log2(B)) - 1) + 1 = (2^X - 1) + 1 for X = E * log2(B)
+ // FYL2X calculates st(1) * log2(st(0))
+ __ fyl2x(); // X
+ __ fld(0); // X, X
+ __ frndint(); // rnd(X), X
+ __ fsub(1); // rnd(X), X-rnd(X)
+ __ fxch(1); // X - rnd(X), rnd(X)
+ // F2XM1 calculates 2^st(0) - 1 for -1 < st(0) < 1
+ __ f2xm1(); // 2^(X-rnd(X)) - 1, rnd(X)
+ __ fld1(); // 1, 2^(X-rnd(X)) - 1, rnd(X)
+ __ faddp(1); // 1, 2^(X-rnd(X)), rnd(X)
+ // FSCALE calculates st(0) * 2^st(1)
+ __ fscale(); // 2^X, rnd(X)
+ __ fstp(1);
+ // Bail out to runtime in case of exceptions in the status word.
+ __ fnstsw_ax();
+ __ test_b(eax, 0x5F); // We check for all but precision exception.
+ __ j(not_zero, &fast_power_failed, Label::kNear);
+ __ fstp_d(Operand(esp, 0));
+ __ movdbl(double_result, Operand(esp, 0));
+ __ add(esp, Immediate(kDoubleSize));
+ __ jmp(&done);
+
+ __ bind(&fast_power_failed);
+ __ fninit();
+ __ add(esp, Immediate(kDoubleSize));
+ __ jmp(&call_runtime);
+ }
+
+ // Calculate power with integer exponent.
+ __ bind(&int_exponent);
+ const XMMRegister double_scratch2 = double_exponent;
+ __ mov(scratch, exponent); // Back up exponent.
+ __ movsd(double_scratch, double_base); // Back up base.
+ __ movsd(double_scratch2, double_result); // Load double_exponent with 1.
// Get absolute value of exponent.
- Label no_neg;
- __ cmp(eax, 0);
- __ j(greater_equal, &no_neg, Label::kNear);
- __ neg(eax);
+ Label no_neg, while_true, no_multiply;
+ __ test(scratch, scratch);
+ __ j(positive, &no_neg, Label::kNear);
+ __ neg(scratch);
__ bind(&no_neg);
- // Load xmm1 with 1.
- __ movsd(xmm1, xmm3);
- Label while_true;
- Label no_multiply;
-
__ bind(&while_true);
- __ shr(eax, 1);
+ __ shr(scratch, 1);
__ j(not_carry, &no_multiply, Label::kNear);
- __ mulsd(xmm1, xmm0);
+ __ mulsd(double_result, double_scratch);
__ bind(&no_multiply);
- __ mulsd(xmm0, xmm0);
+
+ __ mulsd(double_scratch, double_scratch);
__ j(not_zero, &while_true);
- // base has the original value of the exponent - if the exponent is
- // negative return 1/result.
- __ test(edx, edx);
- __ j(positive, &allocate_return);
- // Special case if xmm1 has reached infinity.
- __ mov(ecx, Immediate(0x7FB00000));
- __ movd(xmm0, ecx);
- __ cvtss2sd(xmm0, xmm0);
- __ ucomisd(xmm0, xmm1);
- __ j(equal, &call_runtime);
- __ divsd(xmm3, xmm1);
- __ movsd(xmm1, xmm3);
- __ jmp(&allocate_return);
+ // scratch has the original value of the exponent - if the exponent is
+ // negative, return 1/result.
+ __ test(exponent, exponent);
+ __ j(positive, &done);
+ __ divsd(double_scratch2, double_result);
+ __ movsd(double_result, double_scratch2);
+ // Test whether result is zero. Bail out to check for subnormal result.
+ // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
+ __ xorps(double_scratch2, double_scratch2);
+ __ ucomisd(double_scratch2, double_result); // Result cannot be NaN.
+ // double_exponent aliased as double_scratch2 has already been overwritten
+ // and may not have contained the exponent value in the first place when the
+ // exponent is a smi. We reset it with exponent value before bailing out.
+ __ j(not_equal, &done);
+ __ cvtsi2sd(double_exponent, exponent);
- // exponent (or both) is a heapnumber - no matter what we should now work
- // on doubles.
- __ bind(&exponent_nonsmi);
- __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
- factory->heap_number_map());
- __ j(not_equal, &call_runtime);
- __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
- // Test if exponent is nan.
- __ ucomisd(xmm1, xmm1);
- __ j(parity_even, &call_runtime);
+ // Returning or bailing out.
+ Counters* counters = masm->isolate()->counters();
+ if (exponent_type_ == ON_STACK) {
+ // The arguments are still on the stack.
+ __ bind(&call_runtime);
+ __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
- Label base_not_smi;
- Label handle_special_cases;
- __ JumpIfNotSmi(edx, &base_not_smi, Label::kNear);
- __ SmiUntag(edx);
- __ cvtsi2sd(xmm0, edx);
- __ jmp(&handle_special_cases, Label::kNear);
+ // The stub is called from non-optimized code, which expects the result
+ // as heap number in exponent.
+ __ bind(&done);
+ __ AllocateHeapNumber(eax, scratch, base, &call_runtime);
+ __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), double_result);
+ __ IncrementCounter(counters->math_pow(), 1);
+ __ ret(2 * kPointerSize);
+ } else {
+ __ bind(&call_runtime);
+ {
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ PrepareCallCFunction(4, scratch);
+ __ movdbl(Operand(esp, 0 * kDoubleSize), double_base);
+ __ movdbl(Operand(esp, 1 * kDoubleSize), double_exponent);
+ __ CallCFunction(
+ ExternalReference::power_double_double_function(masm->isolate()), 4);
+ }
+ // Return value is in st(0) on ia32.
+ // Store it into the (fixed) result register.
+ __ sub(esp, Immediate(kDoubleSize));
+ __ fstp_d(Operand(esp, 0));
+ __ movdbl(double_result, Operand(esp, 0));
+ __ add(esp, Immediate(kDoubleSize));
- __ bind(&base_not_smi);
- __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
- factory->heap_number_map());
- __ j(not_equal, &call_runtime);
- __ mov(ecx, FieldOperand(edx, HeapNumber::kExponentOffset));
- __ and_(ecx, HeapNumber::kExponentMask);
- __ cmp(ecx, Immediate(HeapNumber::kExponentMask));
- // base is NaN or +/-Infinity
- __ j(greater_equal, &call_runtime);
- __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
-
- // base is in xmm0 and exponent is in xmm1.
- __ bind(&handle_special_cases);
- Label not_minus_half;
- // Test for -0.5.
- // Load xmm2 with -0.5.
- __ mov(ecx, Immediate(0xBF000000));
- __ movd(xmm2, ecx);
- __ cvtss2sd(xmm2, xmm2);
- // xmm2 now has -0.5.
- __ ucomisd(xmm2, xmm1);
- __ j(not_equal, ¬_minus_half, Label::kNear);
-
- // Calculates reciprocal of square root.
- // sqrtsd returns -0 when input is -0. ECMA spec requires +0.
- __ xorps(xmm1, xmm1);
- __ addsd(xmm1, xmm0);
- __ sqrtsd(xmm1, xmm1);
- __ divsd(xmm3, xmm1);
- __ movsd(xmm1, xmm3);
- __ jmp(&allocate_return);
-
- // Test for 0.5.
- __ bind(¬_minus_half);
- // Load xmm2 with 0.5.
- // Since xmm3 is 1 and xmm2 is -0.5 this is simply xmm2 + xmm3.
- __ addsd(xmm2, xmm3);
- // xmm2 now has 0.5.
- __ ucomisd(xmm2, xmm1);
- __ j(not_equal, &call_runtime);
- // Calculates square root.
- // sqrtsd returns -0 when input is -0. ECMA spec requires +0.
- __ xorps(xmm1, xmm1);
- __ addsd(xmm1, xmm0);
- __ sqrtsd(xmm1, xmm1);
-
- __ bind(&allocate_return);
- __ AllocateHeapNumber(ecx, eax, edx, &call_runtime);
- __ movdbl(FieldOperand(ecx, HeapNumber::kValueOffset), xmm1);
- __ mov(eax, ecx);
- __ ret(2 * kPointerSize);
-
- __ bind(&call_runtime);
- __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
+ __ bind(&done);
+ __ IncrementCounter(counters->math_pow(), 1);
+ __ ret(0);
+ }
}
@@ -3273,7 +3379,7 @@
__ mov(FieldOperand(eax, i), edx);
}
- // Setup the callee in-object property.
+ // Set up the callee in-object property.
STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
__ mov(edx, Operand(esp, 4 * kPointerSize));
__ mov(FieldOperand(eax, JSObject::kHeaderSize +
@@ -3286,7 +3392,7 @@
Heap::kArgumentsLengthIndex * kPointerSize),
ecx);
- // Setup the elements pointer in the allocated arguments object.
+ // Set up the elements pointer in the allocated arguments object.
// If we allocated a parameter map, edi will point there, otherwise to the
// backing store.
__ lea(edi, Operand(eax, Heap::kArgumentsObjectSize));
@@ -3465,7 +3571,7 @@
// Get the parameters pointer from the stack.
__ mov(edx, Operand(esp, 2 * kPointerSize));
- // Setup the elements pointer in the allocated arguments object and
+ // Set up the elements pointer in the allocated arguments object and
// initialize the header in the elements fixed array.
__ lea(edi, Operand(eax, Heap::kArgumentsObjectSizeStrict));
__ mov(FieldOperand(eax, JSObject::kElementsOffset), edi);
@@ -3617,7 +3723,7 @@
kShortExternalStringMask);
STATIC_ASSERT((kStringTag | kSeqStringTag | kTwoByteStringTag) == 0);
__ j(zero, &seq_two_byte_string, Label::kNear);
- // Any other flat string must be a flat ascii string. None of the following
+ // Any other flat string must be a flat ASCII string. None of the following
// string type tests will succeed if subject is not a string or a short
// external string.
__ and_(ebx, Immediate(kIsNotStringMask |
@@ -3666,16 +3772,16 @@
kStringRepresentationMask | kStringEncodingMask);
STATIC_ASSERT((kSeqStringTag | kTwoByteStringTag) == 0);
__ j(zero, &seq_two_byte_string, Label::kNear);
- // Any other flat string must be sequential ascii or external.
+ // Any other flat string must be sequential ASCII or external.
__ test_b(FieldOperand(ebx, Map::kInstanceTypeOffset),
kStringRepresentationMask);
__ j(not_zero, &external_string);
__ bind(&seq_ascii_string);
- // eax: subject string (flat ascii)
+ // eax: subject string (flat ASCII)
// ecx: RegExp data (FixedArray)
__ mov(edx, FieldOperand(ecx, JSRegExp::kDataAsciiCodeOffset));
- __ Set(ecx, Immediate(1)); // Type is ascii.
+ __ Set(ecx, Immediate(1)); // Type is ASCII.
__ jmp(&check_code, Label::kNear);
__ bind(&seq_two_byte_string);
@@ -3692,7 +3798,7 @@
// eax: subject string
// edx: code
- // ecx: encoding of subject string (1 if ascii, 0 if two_byte);
+ // ecx: encoding of subject string (1 if ASCII, 0 if two_byte);
// Load used arguments before starting to push arguments for call to native
// RegExp code to avoid handling changing stack height.
__ mov(ebx, Operand(esp, kPreviousIndexOffset));
@@ -3701,7 +3807,7 @@
// eax: subject string
// ebx: previous index
// edx: code
- // ecx: encoding of subject string (1 if ascii 0 if two_byte);
+ // ecx: encoding of subject string (1 if ASCII 0 if two_byte);
// All checks done. Now push arguments for native regexp code.
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->regexp_entry_native(), 1);
@@ -3741,7 +3847,7 @@
// esi: original subject string
// eax: underlying subject string
// ebx: previous index
- // ecx: encoding of subject string (1 if ascii 0 if two_byte);
+ // ecx: encoding of subject string (1 if ASCII 0 if two_byte);
// edx: code
// Argument 4: End of string data
// Argument 3: Start of string data
@@ -4369,7 +4475,7 @@
__ JumpIfNotBothSequentialAsciiStrings(edx, eax, ecx, ebx,
&check_unequal_objects);
- // Inline comparison of ascii strings.
+ // Inline comparison of ASCII strings.
if (cc_ == equal) {
StringCompareStub::GenerateFlatAsciiStringEquals(masm,
edx,
@@ -4844,7 +4950,7 @@
Label invoke, handler_entry, exit;
Label not_outermost_js, not_outermost_js_2;
- // Setup frame.
+ // Set up frame.
__ push(ebp);
__ mov(ebp, esp);
@@ -5322,7 +5428,7 @@
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize == 1);
STATIC_ASSERT(kSmiShiftSize == 0);
- // At this point code register contains smi tagged ascii char code.
+ // At this point code register contains smi tagged ASCII char code.
__ mov(result_, FieldOperand(result_,
code_, times_half_pointer_size,
FixedArray::kHeaderSize));
@@ -5369,7 +5475,7 @@
void StringAddStub::Generate(MacroAssembler* masm) {
- Label string_add_runtime, call_builtin;
+ Label call_runtime, call_builtin;
Builtins::JavaScript builtin_id = Builtins::ADD;
// Load the two arguments.
@@ -5378,14 +5484,14 @@
// Make sure that both arguments are strings if not known in advance.
if (flags_ == NO_STRING_ADD_FLAGS) {
- __ JumpIfSmi(eax, &string_add_runtime);
+ __ JumpIfSmi(eax, &call_runtime);
__ CmpObjectType(eax, FIRST_NONSTRING_TYPE, ebx);
- __ j(above_equal, &string_add_runtime);
+ __ j(above_equal, &call_runtime);
// First argument is a a string, test second.
- __ JumpIfSmi(edx, &string_add_runtime);
+ __ JumpIfSmi(edx, &call_runtime);
__ CmpObjectType(edx, FIRST_NONSTRING_TYPE, ebx);
- __ j(above_equal, &string_add_runtime);
+ __ j(above_equal, &call_runtime);
} else {
// Here at least one of the arguments is definitely a string.
// We convert the one that is not known to be a string.
@@ -5436,15 +5542,14 @@
__ add(ebx, ecx);
STATIC_ASSERT(Smi::kMaxValue == String::kMaxLength);
// Handle exceptionally long strings in the runtime system.
- __ j(overflow, &string_add_runtime);
+ __ j(overflow, &call_runtime);
// Use the symbol table when adding two one character strings, as it
// helps later optimizations to return a symbol here.
__ cmp(ebx, Immediate(Smi::FromInt(2)));
__ j(not_equal, &longer_than_two);
- // Check that both strings are non-external ascii strings.
- __ JumpIfNotBothSequentialAsciiStrings(eax, edx, ebx, ecx,
- &string_add_runtime);
+ // Check that both strings are non-external ASCII strings.
+ __ JumpIfNotBothSequentialAsciiStrings(eax, edx, ebx, ecx, &call_runtime);
// Get the two characters forming the new string.
__ movzx_b(ebx, FieldOperand(eax, SeqAsciiString::kHeaderSize));
@@ -5469,11 +5574,7 @@
__ movzx_b(ecx, FieldOperand(edx, SeqAsciiString::kHeaderSize));
__ bind(&make_two_character_string_no_reload);
__ IncrementCounter(counters->string_add_make_two_char(), 1);
- __ AllocateAsciiString(eax, // Result.
- 2, // Length.
- edi, // Scratch 1.
- edx, // Scratch 2.
- &string_add_runtime);
+ __ AllocateAsciiString(eax, 2, edi, edx, &call_runtime);
// Pack both characters in ebx.
__ shl(ecx, kBitsPerByte);
__ or_(ebx, ecx);
@@ -5484,11 +5585,11 @@
__ bind(&longer_than_two);
// Check if resulting string will be flat.
- __ cmp(ebx, Immediate(Smi::FromInt(String::kMinNonFlatLength)));
+ __ cmp(ebx, Immediate(Smi::FromInt(ConsString::kMinLength)));
__ j(below, &string_add_flat_result);
// If result is not supposed to be flat allocate a cons string object. If both
- // strings are ascii the result is an ascii cons string.
+ // strings are ASCII the result is an ASCII cons string.
Label non_ascii, allocated, ascii_data;
__ mov(edi, FieldOperand(eax, HeapObject::kMapOffset));
__ movzx_b(ecx, FieldOperand(edi, Map::kInstanceTypeOffset));
@@ -5500,8 +5601,8 @@
__ test(ecx, Immediate(kStringEncodingMask));
__ j(zero, &non_ascii);
__ bind(&ascii_data);
- // Allocate an acsii cons string.
- __ AllocateAsciiConsString(ecx, edi, no_reg, &string_add_runtime);
+ // Allocate an ASCII cons string.
+ __ AllocateAsciiConsString(ecx, edi, no_reg, &call_runtime);
__ bind(&allocated);
// Fill the fields of the cons string.
if (FLAG_debug_code) __ AbortIfNotSmi(ebx);
@@ -5515,7 +5616,7 @@
__ ret(2 * kPointerSize);
__ bind(&non_ascii);
// At least one of the strings is two-byte. Check whether it happens
- // to contain only ascii characters.
+ // to contain only ASCII characters.
// ecx: first instance type AND second instance type.
// edi: second instance type.
__ test(ecx, Immediate(kAsciiDataHintMask));
@@ -5528,64 +5629,93 @@
__ cmp(edi, kAsciiStringTag | kAsciiDataHintTag);
__ j(equal, &ascii_data);
// Allocate a two byte cons string.
- __ AllocateTwoByteConsString(ecx, edi, no_reg, &string_add_runtime);
+ __ AllocateTwoByteConsString(ecx, edi, no_reg, &call_runtime);
__ jmp(&allocated);
- // Handle creating a flat result. First check that both strings are not
- // external strings.
+ // We cannot encounter sliced strings or cons strings here since:
+ STATIC_ASSERT(SlicedString::kMinLength >= ConsString::kMinLength);
+ // Handle creating a flat result from either external or sequential strings.
+ // Locate the first characters' locations.
// eax: first string
// ebx: length of resulting flat string as a smi
// edx: second string
+ Label first_prepared, second_prepared;
+ Label first_is_sequential, second_is_sequential;
__ bind(&string_add_flat_result);
__ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
__ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
- __ and_(ecx, kStringRepresentationMask);
- __ cmp(ecx, kExternalStringTag);
- __ j(equal, &string_add_runtime);
- __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
- __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
- __ and_(ecx, kStringRepresentationMask);
- __ cmp(ecx, kExternalStringTag);
- __ j(equal, &string_add_runtime);
- // We cannot encounter sliced strings here since:
- STATIC_ASSERT(SlicedString::kMinLength >= String::kMinNonFlatLength);
- // Now check if both strings are ascii strings.
- // eax: first string
- // ebx: length of resulting flat string as a smi
- // edx: second string
- Label non_ascii_string_add_flat_result;
- STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
- STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
- __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
- __ test_b(FieldOperand(ecx, Map::kInstanceTypeOffset), kStringEncodingMask);
- __ j(zero, &non_ascii_string_add_flat_result);
- __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
- __ test_b(FieldOperand(ecx, Map::kInstanceTypeOffset), kStringEncodingMask);
- __ j(zero, &string_add_runtime);
+ // ecx: instance type of first string
+ STATIC_ASSERT(kSeqStringTag == 0);
+ __ test_b(ecx, kStringRepresentationMask);
+ __ j(zero, &first_is_sequential, Label::kNear);
+ // Rule out short external string and load string resource.
+ STATIC_ASSERT(kShortExternalStringTag != 0);
+ __ test_b(ecx, kShortExternalStringMask);
+ __ j(not_zero, &call_runtime);
+ __ mov(eax, FieldOperand(eax, ExternalString::kResourceDataOffset));
+ STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
+ __ jmp(&first_prepared, Label::kNear);
+ __ bind(&first_is_sequential);
+ __ add(eax, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ bind(&first_prepared);
- // Both strings are ascii strings. As they are short they are both flat.
+ __ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
+ __ movzx_b(edi, FieldOperand(edi, Map::kInstanceTypeOffset));
+ // Check whether both strings have same encoding.
+ // edi: instance type of second string
+ __ xor_(ecx, edi);
+ __ test_b(ecx, kStringEncodingMask);
+ __ j(not_zero, &call_runtime);
+ STATIC_ASSERT(kSeqStringTag == 0);
+ __ test_b(edi, kStringRepresentationMask);
+ __ j(zero, &second_is_sequential, Label::kNear);
+ // Rule out short external string and load string resource.
+ STATIC_ASSERT(kShortExternalStringTag != 0);
+ __ test_b(edi, kShortExternalStringMask);
+ __ j(not_zero, &call_runtime);
+ __ mov(edx, FieldOperand(edx, ExternalString::kResourceDataOffset));
+ STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
+ __ jmp(&second_prepared, Label::kNear);
+ __ bind(&second_is_sequential);
+ __ add(edx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ bind(&second_prepared);
+
+ // Push the addresses of both strings' first characters onto the stack.
+ __ push(edx);
+ __ push(eax);
+
+ Label non_ascii_string_add_flat_result, call_runtime_drop_two;
+ // edi: instance type of second string
+ // First string and second string have the same encoding.
+ STATIC_ASSERT(kTwoByteStringTag == 0);
+ __ test_b(edi, kStringEncodingMask);
+ __ j(zero, &non_ascii_string_add_flat_result);
+
+ // Both strings are ASCII strings.
// ebx: length of resulting flat string as a smi
__ SmiUntag(ebx);
- __ AllocateAsciiString(eax, ebx, ecx, edx, edi, &string_add_runtime);
+ __ AllocateAsciiString(eax, ebx, ecx, edx, edi, &call_runtime_drop_two);
// eax: result string
__ mov(ecx, eax);
// Locate first character of result.
__ add(ecx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
- // Load first argument and locate first character.
- __ mov(edx, Operand(esp, 2 * kPointerSize));
+ // Load first argument's length and first character location. Account for
+ // values currently on the stack when fetching arguments from it.
+ __ mov(edx, Operand(esp, 4 * kPointerSize));
__ mov(edi, FieldOperand(edx, String::kLengthOffset));
__ SmiUntag(edi);
- __ add(edx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ pop(edx);
// eax: result string
// ecx: first character of result
// edx: first char of first argument
// edi: length of first argument
StringHelper::GenerateCopyCharacters(masm, ecx, edx, edi, ebx, true);
- // Load second argument and locate first character.
- __ mov(edx, Operand(esp, 1 * kPointerSize));
+ // Load second argument's length and first character location. Account for
+ // values currently on the stack when fetching arguments from it.
+ __ mov(edx, Operand(esp, 2 * kPointerSize));
__ mov(edi, FieldOperand(edx, String::kLengthOffset));
__ SmiUntag(edi);
- __ add(edx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ pop(edx);
// eax: result string
// ecx: next character of result
// edx: first char of second argument
@@ -5599,34 +5729,30 @@
// ebx: length of resulting flat string as a smi
// edx: second string
__ bind(&non_ascii_string_add_flat_result);
- __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
- __ test_b(FieldOperand(ecx, Map::kInstanceTypeOffset), kStringEncodingMask);
- __ j(not_zero, &string_add_runtime);
- // Both strings are two byte strings. As they are short they are both
- // flat.
+ // Both strings are two byte strings.
__ SmiUntag(ebx);
- __ AllocateTwoByteString(eax, ebx, ecx, edx, edi, &string_add_runtime);
+ __ AllocateTwoByteString(eax, ebx, ecx, edx, edi, &call_runtime_drop_two);
// eax: result string
__ mov(ecx, eax);
// Locate first character of result.
- __ add(ecx,
- Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
- // Load first argument and locate first character.
- __ mov(edx, Operand(esp, 2 * kPointerSize));
+ __ add(ecx, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+ // Load second argument's length and first character location. Account for
+ // values currently on the stack when fetching arguments from it.
+ __ mov(edx, Operand(esp, 4 * kPointerSize));
__ mov(edi, FieldOperand(edx, String::kLengthOffset));
__ SmiUntag(edi);
- __ add(edx,
- Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+ __ pop(edx);
// eax: result string
// ecx: first character of result
// edx: first char of first argument
// edi: length of first argument
StringHelper::GenerateCopyCharacters(masm, ecx, edx, edi, ebx, false);
- // Load second argument and locate first character.
- __ mov(edx, Operand(esp, 1 * kPointerSize));
+ // Load second argument's length and first character location. Account for
+ // values currently on the stack when fetching arguments from it.
+ __ mov(edx, Operand(esp, 2 * kPointerSize));
__ mov(edi, FieldOperand(edx, String::kLengthOffset));
__ SmiUntag(edi);
- __ add(edx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ pop(edx);
// eax: result string
// ecx: next character of result
// edx: first char of second argument
@@ -5635,8 +5761,11 @@
__ IncrementCounter(counters->string_add_native(), 1);
__ ret(2 * kPointerSize);
+ // Recover stack pointer before jumping to runtime.
+ __ bind(&call_runtime_drop_two);
+ __ Drop(2);
// Just jump to runtime to add the two strings.
- __ bind(&string_add_runtime);
+ __ bind(&call_runtime);
__ TailCallRuntime(Runtime::kStringAdd, 2, 1);
if (call_builtin.is_linked()) {
@@ -5872,7 +6001,7 @@
__ push(mask);
Register temp = mask;
- // Check that the candidate is a non-external ascii string.
+ // Check that the candidate is a non-external ASCII string.
__ mov(temp, FieldOperand(candidate, HeapObject::kMapOffset));
__ movzx_b(temp, FieldOperand(temp, Map::kInstanceTypeOffset));
__ JumpIfInstanceTypeIsNotSequentialAscii(
@@ -5905,10 +6034,25 @@
Register hash,
Register character,
Register scratch) {
- // hash = character + (character << 10);
- __ mov(hash, character);
- __ shl(hash, 10);
- __ add(hash, character);
+ // hash = (seed + character) + ((seed + character) << 10);
+ if (Serializer::enabled()) {
+ ExternalReference roots_array_start =
+ ExternalReference::roots_array_start(masm->isolate());
+ __ mov(scratch, Immediate(Heap::kHashSeedRootIndex));
+ __ mov(scratch, Operand::StaticArray(scratch,
+ times_pointer_size,
+ roots_array_start));
+ __ SmiUntag(scratch);
+ __ add(scratch, character);
+ __ mov(hash, scratch);
+ __ shl(scratch, 10);
+ __ add(hash, scratch);
+ } else {
+ int32_t seed = masm->isolate()->heap()->HashSeed();
+ __ lea(scratch, Operand(character, seed));
+ __ shl(scratch, 10);
+ __ lea(hash, Operand(scratch, character, times_1, seed));
+ }
// hash ^= hash >> 6;
__ mov(scratch, hash);
__ shr(scratch, 6);
@@ -5949,14 +6093,12 @@
__ shl(scratch, 15);
__ add(hash, scratch);
- uint32_t kHashShiftCutOffMask = (1 << (32 - String::kHashShift)) - 1;
- __ and_(hash, kHashShiftCutOffMask);
+ __ and_(hash, String::kHashBitMask);
// if (hash == 0) hash = 27;
Label hash_not_zero;
- __ test(hash, hash);
__ j(not_zero, &hash_not_zero, Label::kNear);
- __ mov(hash, Immediate(27));
+ __ mov(hash, Immediate(StringHasher::kZeroHash));
__ bind(&hash_not_zero);
}
@@ -5988,20 +6130,23 @@
__ JumpIfNotSmi(edx, &runtime);
__ sub(ecx, edx);
__ cmp(ecx, FieldOperand(eax, String::kLengthOffset));
- Label return_eax;
- __ j(equal, &return_eax);
+ Label not_original_string;
+ __ j(not_equal, ¬_original_string, Label::kNear);
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->sub_string_native(), 1);
+ __ ret(3 * kPointerSize);
+ __ bind(¬_original_string);
// Special handling of sub-strings of length 1 and 2. One character strings
// are handled in the runtime system (looked up in the single character
// cache). Two character strings are looked for in the symbol cache.
- __ SmiUntag(ecx); // Result length is no longer smi.
- __ cmp(ecx, 2);
+ __ cmp(ecx, Immediate(Smi::FromInt(2)));
__ j(greater, &result_longer_than_two);
__ j(less, &runtime);
// Sub string of length 2 requested.
// eax: string
// ebx: instance type
- // ecx: sub string length (value is 2)
+ // ecx: sub string length (smi, value is 2)
// edx: from index (smi)
__ JumpIfInstanceTypeIsNotSequentialAscii(ebx, ebx, &runtime);
@@ -6012,66 +6157,73 @@
FieldOperand(eax, edx, times_1, SeqAsciiString::kHeaderSize + 1));
// Try to lookup two character string in symbol table.
- Label make_two_character_string;
+ Label combine_two_char, save_two_char;
StringHelper::GenerateTwoCharacterSymbolTableProbe(
- masm, ebx, ecx, eax, edx, edi,
- &make_two_character_string, &make_two_character_string);
+ masm, ebx, ecx, eax, edx, edi, &combine_two_char, &save_two_char);
+ __ IncrementCounter(counters->sub_string_native(), 1);
__ ret(3 * kPointerSize);
- __ bind(&make_two_character_string);
- // Setup registers for allocating the two character string.
- __ mov(eax, Operand(esp, 3 * kPointerSize));
- __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ bind(&combine_two_char);
+ __ shl(ecx, kBitsPerByte);
+ __ or_(ebx, ecx);
+ __ bind(&save_two_char);
+ __ AllocateAsciiString(eax, 2, ecx, edx, &runtime);
+ __ mov_w(FieldOperand(eax, SeqAsciiString::kHeaderSize), ebx);
+ __ IncrementCounter(counters->sub_string_native(), 1);
+ __ ret(3 * kPointerSize);
+
+ __ bind(&result_longer_than_two);
+ // eax: string
+ // ebx: instance type
+ // ecx: sub string length (smi)
+ // edx: from index (smi)
+ // Deal with different string types: update the index if necessary
+ // and put the underlying string into edi.
+ Label underlying_unpacked, sliced_string, seq_or_external_string;
+ // If the string is not indirect, it can only be sequential or external.
+ STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
+ STATIC_ASSERT(kIsIndirectStringMask != 0);
+ __ test(ebx, Immediate(kIsIndirectStringMask));
+ __ j(zero, &seq_or_external_string, Label::kNear);
+
+ Factory* factory = masm->isolate()->factory();
+ __ test(ebx, Immediate(kSlicedNotConsMask));
+ __ j(not_zero, &sliced_string, Label::kNear);
+ // Cons string. Check whether it is flat, then fetch first part.
+ // Flat cons strings have an empty second part.
+ __ cmp(FieldOperand(eax, ConsString::kSecondOffset),
+ factory->empty_string());
+ __ j(not_equal, &runtime);
+ __ mov(edi, FieldOperand(eax, ConsString::kFirstOffset));
+ // Update instance type.
+ __ mov(ebx, FieldOperand(edi, HeapObject::kMapOffset));
__ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
- __ Set(ecx, Immediate(2));
+ __ jmp(&underlying_unpacked, Label::kNear);
+
+ __ bind(&sliced_string);
+ // Sliced string. Fetch parent and adjust start index by offset.
+ __ add(edx, FieldOperand(eax, SlicedString::kOffsetOffset));
+ __ mov(edi, FieldOperand(eax, SlicedString::kParentOffset));
+ // Update instance type.
+ __ mov(ebx, FieldOperand(edi, HeapObject::kMapOffset));
+ __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
+ __ jmp(&underlying_unpacked, Label::kNear);
+
+ __ bind(&seq_or_external_string);
+ // Sequential or external string. Just move string to the expected register.
+ __ mov(edi, eax);
+
+ __ bind(&underlying_unpacked);
if (FLAG_string_slices) {
Label copy_routine;
- // If coming from the make_two_character_string path, the string
- // is too short to be sliced anyways.
- STATIC_ASSERT(2 < SlicedString::kMinLength);
- __ jmp(©_routine);
- __ bind(&result_longer_than_two);
-
- // eax: string
- // ebx: instance type
- // ecx: sub string length
- // edx: from index (smi)
- Label allocate_slice, sliced_string, seq_or_external_string;
- __ cmp(ecx, SlicedString::kMinLength);
+ // edi: underlying subject string
+ // ebx: instance type of underlying subject string
+ // edx: adjusted start index (smi)
+ // ecx: length (smi)
+ __ cmp(ecx, Immediate(Smi::FromInt(SlicedString::kMinLength)));
// Short slice. Copy instead of slicing.
__ j(less, ©_routine);
- // If the string is not indirect, it can only be sequential or external.
- STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
- STATIC_ASSERT(kIsIndirectStringMask != 0);
- __ test(ebx, Immediate(kIsIndirectStringMask));
- __ j(zero, &seq_or_external_string, Label::kNear);
-
- Factory* factory = masm->isolate()->factory();
- __ test(ebx, Immediate(kSlicedNotConsMask));
- __ j(not_zero, &sliced_string, Label::kNear);
- // Cons string. Check whether it is flat, then fetch first part.
- __ cmp(FieldOperand(eax, ConsString::kSecondOffset),
- factory->empty_string());
- __ j(not_equal, &runtime);
- __ mov(edi, FieldOperand(eax, ConsString::kFirstOffset));
- __ jmp(&allocate_slice, Label::kNear);
-
- __ bind(&sliced_string);
- // Sliced string. Fetch parent and correct start index by offset.
- __ add(edx, FieldOperand(eax, SlicedString::kOffsetOffset));
- __ mov(edi, FieldOperand(eax, SlicedString::kParentOffset));
- __ jmp(&allocate_slice, Label::kNear);
-
- __ bind(&seq_or_external_string);
- // Sequential or external string. Just move string to the correct register.
- __ mov(edi, eax);
-
- __ bind(&allocate_slice);
- // edi: underlying subject string
- // ebx: instance type of original subject string
- // edx: offset
- // ecx: length
// Allocate new sliced string. At this point we do not reload the instance
// type including the string encoding because we simply rely on the info
// provided by the original string. It does not matter if the original
@@ -6088,27 +6240,49 @@
__ AllocateTwoByteSlicedString(eax, ebx, no_reg, &runtime);
__ bind(&set_slice_header);
__ mov(FieldOperand(eax, SlicedString::kOffsetOffset), edx);
- __ SmiTag(ecx);
__ mov(FieldOperand(eax, SlicedString::kLengthOffset), ecx);
__ mov(FieldOperand(eax, SlicedString::kParentOffset), edi);
__ mov(FieldOperand(eax, SlicedString::kHashFieldOffset),
Immediate(String::kEmptyHashField));
- __ jmp(&return_eax);
+ __ IncrementCounter(counters->sub_string_native(), 1);
+ __ ret(3 * kPointerSize);
__ bind(©_routine);
- } else {
- __ bind(&result_longer_than_two);
}
- // eax: string
- // ebx: instance type
- // ecx: result string length
- // Check for flat ascii string
- Label non_ascii_flat;
- __ JumpIfInstanceTypeIsNotSequentialAscii(ebx, ebx, &non_ascii_flat);
+ // edi: underlying subject string
+ // ebx: instance type of underlying subject string
+ // edx: adjusted start index (smi)
+ // ecx: length (smi)
+ // The subject string can only be external or sequential string of either
+ // encoding at this point.
+ Label two_byte_sequential, runtime_drop_two, sequential_string;
+ STATIC_ASSERT(kExternalStringTag != 0);
+ STATIC_ASSERT(kSeqStringTag == 0);
+ __ test_b(ebx, kExternalStringTag);
+ __ j(zero, &sequential_string);
- // Allocate the result.
- __ AllocateAsciiString(eax, ecx, ebx, edx, edi, &runtime);
+ // Handle external string.
+ // Rule out short external strings.
+ STATIC_CHECK(kShortExternalStringTag != 0);
+ __ test_b(ebx, kShortExternalStringMask);
+ __ j(not_zero, &runtime);
+ __ mov(edi, FieldOperand(edi, ExternalString::kResourceDataOffset));
+ // Move the pointer so that offset-wise, it looks like a sequential string.
+ STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
+ __ sub(edi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+
+ __ bind(&sequential_string);
+ // Stash away (adjusted) index and (underlying) string.
+ __ push(edx);
+ __ push(edi);
+ __ SmiUntag(ecx);
+ STATIC_ASSERT((kAsciiStringTag & kStringEncodingMask) != 0);
+ __ test_b(ebx, kStringEncodingMask);
+ __ j(zero, &two_byte_sequential);
+
+ // Sequential ASCII string. Allocate the result.
+ __ AllocateAsciiString(eax, ecx, ebx, edx, edi, &runtime_drop_two);
// eax: result string
// ecx: result string length
@@ -6117,11 +6291,10 @@
__ mov(edi, eax);
__ add(edi, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
// Load string argument and locate character of sub string start.
- __ mov(esi, Operand(esp, 3 * kPointerSize));
- __ add(esi, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
- __ mov(ebx, Operand(esp, 2 * kPointerSize)); // from
+ __ pop(esi);
+ __ pop(ebx);
__ SmiUntag(ebx);
- __ add(esi, ebx);
+ __ lea(esi, FieldOperand(esi, ebx, times_1, SeqAsciiString::kHeaderSize));
// eax: result string
// ecx: result length
@@ -6130,20 +6303,12 @@
// esi: character of sub string start
StringHelper::GenerateCopyCharactersREP(masm, edi, esi, ecx, ebx, true);
__ mov(esi, edx); // Restore esi.
- Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->sub_string_native(), 1);
__ ret(3 * kPointerSize);
- __ bind(&non_ascii_flat);
- // eax: string
- // ebx: instance type & kStringRepresentationMask | kStringEncodingMask
- // ecx: result string length
- // Check for flat two byte string
- __ cmp(ebx, kSeqStringTag | kTwoByteStringTag);
- __ j(not_equal, &runtime);
-
- // Allocate the result.
- __ AllocateTwoByteString(eax, ecx, ebx, edx, edi, &runtime);
+ __ bind(&two_byte_sequential);
+ // Sequential two-byte string. Allocate the result.
+ __ AllocateTwoByteString(eax, ecx, ebx, edx, edi, &runtime_drop_two);
// eax: result string
// ecx: result string length
@@ -6153,14 +6318,13 @@
__ add(edi,
Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
// Load string argument and locate character of sub string start.
- __ mov(esi, Operand(esp, 3 * kPointerSize));
- __ add(esi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
- __ mov(ebx, Operand(esp, 2 * kPointerSize)); // from
+ __ pop(esi);
+ __ pop(ebx);
// As from is a smi it is 2 times the value which matches the size of a two
// byte character.
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
- __ add(esi, ebx);
+ __ lea(esi, FieldOperand(esi, ebx, times_1, SeqTwoByteString::kHeaderSize));
// eax: result string
// ecx: result length
@@ -6169,11 +6333,13 @@
// esi: character of sub string start
StringHelper::GenerateCopyCharactersREP(masm, edi, esi, ecx, ebx, false);
__ mov(esi, edx); // Restore esi.
-
- __ bind(&return_eax);
__ IncrementCounter(counters->sub_string_native(), 1);
__ ret(3 * kPointerSize);
+ // Drop pushed values on the stack before tail call.
+ __ bind(&runtime_drop_two);
+ __ Drop(2);
+
// Just jump to runtime to create the sub string.
__ bind(&runtime);
__ TailCallRuntime(Runtime::kSubString, 3, 1);
@@ -6328,10 +6494,10 @@
__ bind(¬_same);
- // Check that both objects are sequential ascii strings.
+ // Check that both objects are sequential ASCII strings.
__ JumpIfNotBothSequentialAsciiStrings(edx, eax, ecx, ebx, &runtime);
- // Compare flat ascii strings.
+ // Compare flat ASCII strings.
// Drop arguments from the stack.
__ pop(ecx);
__ add(esp, Immediate(2 * kPointerSize));
@@ -6565,33 +6731,45 @@
}
-void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
- // Save the registers.
- __ pop(ecx);
- __ push(edx);
- __ push(eax);
- __ push(ecx);
+void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
+ Label miss;
+ __ mov(ecx, edx);
+ __ and_(ecx, eax);
+ __ JumpIfSmi(ecx, &miss, Label::kNear);
+ __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
+ __ cmp(ecx, known_map_);
+ __ j(not_equal, &miss, Label::kNear);
+ __ cmp(ebx, known_map_);
+ __ j(not_equal, &miss, Label::kNear);
+
+ __ sub(eax, edx);
+ __ ret(0);
+
+ __ bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
{
// Call the runtime system in a fresh internal frame.
ExternalReference miss = ExternalReference(IC_Utility(IC::kCompareIC_Miss),
masm->isolate());
FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(edx);
+ __ push(edx); // Preserve edx and eax.
+ __ push(eax);
+ __ push(edx); // And also use them as the arguments.
__ push(eax);
__ push(Immediate(Smi::FromInt(op_)));
__ CallExternalReference(miss, 3);
+ // Compute the entry point of the rewritten stub.
+ __ lea(edi, FieldOperand(eax, Code::kHeaderSize));
+ __ pop(eax);
+ __ pop(edx);
}
- // Compute the entry point of the rewritten stub.
- __ lea(edi, FieldOperand(eax, Code::kHeaderSize));
-
- // Restore registers.
- __ pop(ecx);
- __ pop(eax);
- __ pop(edx);
- __ push(ecx);
-
// Do a tail call to the rewritten stub.
__ jmp(edi);
}
diff --git a/src/ia32/cpu-ia32.cc b/src/ia32/cpu-ia32.cc
index 57e66df..9eabb2a 100644
--- a/src/ia32/cpu-ia32.cc
+++ b/src/ia32/cpu-ia32.cc
@@ -41,7 +41,7 @@
namespace v8 {
namespace internal {
-void CPU::Setup() {
+void CPU::SetUp() {
CpuFeatures::Probe();
}
diff --git a/src/ia32/debug-ia32.cc b/src/ia32/debug-ia32.cc
index 2649560..b37b54b 100644
--- a/src/ia32/debug-ia32.cc
+++ b/src/ia32/debug-ia32.cc
@@ -258,9 +258,7 @@
Label check_codesize;
__ bind(&check_codesize);
__ RecordDebugBreakSlot();
- for (int i = 0; i < Assembler::kDebugBreakSlotLength; i++) {
- __ nop();
- }
+ __ Nop(Assembler::kDebugBreakSlotLength);
ASSERT_EQ(Assembler::kDebugBreakSlotLength,
masm->SizeOfCodeGeneratedSince(&check_codesize));
}
diff --git a/src/ia32/deoptimizer-ia32.cc b/src/ia32/deoptimizer-ia32.cc
index eeee4f2..14f2675 100644
--- a/src/ia32/deoptimizer-ia32.cc
+++ b/src/ia32/deoptimizer-ia32.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -231,8 +231,8 @@
ASSERT(*(call_target_address - 3) == 0x73 && // jae
*(call_target_address - 2) == 0x07 && // offset
*(call_target_address - 1) == 0xe8); // call
- *(call_target_address - 3) = 0x90; // nop
- *(call_target_address - 2) = 0x90; // nop
+ *(call_target_address - 3) = 0x66; // 2 byte nop part 1
+ *(call_target_address - 2) = 0x90; // 2 byte nop part 2
Assembler::set_target_address_at(call_target_address,
replacement_code->entry());
@@ -250,8 +250,8 @@
Assembler::target_address_at(call_target_address));
// Replace the nops from patching (Deoptimizer::PatchStackCheckCode) to
// restore the conditional branch.
- ASSERT(*(call_target_address - 3) == 0x90 && // nop
- *(call_target_address - 2) == 0x90 && // nop
+ ASSERT(*(call_target_address - 3) == 0x66 && // 2 byte nop part 1
+ *(call_target_address - 2) == 0x90 && // 2 byte nop part 2
*(call_target_address - 1) == 0xe8); // call
*(call_target_address - 3) = 0x73; // jae
*(call_target_address - 2) = 0x07; // offset
@@ -299,12 +299,13 @@
ASSERT(Translation::BEGIN == opcode);
USE(opcode);
int count = iterator.Next();
+ iterator.Next(); // Drop JS frames count.
ASSERT(count == 1);
USE(count);
opcode = static_cast<Translation::Opcode>(iterator.Next());
USE(opcode);
- ASSERT(Translation::FRAME == opcode);
+ ASSERT(Translation::JS_FRAME == opcode);
unsigned node_id = iterator.Next();
USE(node_id);
ASSERT(node_id == ast_id);
@@ -340,9 +341,7 @@
output_ = new FrameDescription*[1];
output_[0] = new(output_frame_size) FrameDescription(
output_frame_size, function_);
-#ifdef DEBUG
- output_[0]->SetKind(Code::OPTIMIZED_FUNCTION);
-#endif
+ output_[0]->SetFrameType(StackFrame::JAVA_SCRIPT);
// Clear the incoming parameters in the optimized frame to avoid
// confusing the garbage collector.
@@ -406,7 +405,7 @@
output_[0] = input_;
output_[0]->SetPc(reinterpret_cast<uint32_t>(from_));
} else {
- // Setup the frame pointer and the context pointer.
+ // Set up the frame pointer and the context pointer.
// All OSR stack frames are dynamically aligned to an 8-byte boundary.
int frame_pointer = input_->GetRegister(ebp.code());
if ((frame_pointer & 0x4) == 0) {
@@ -437,13 +436,112 @@
}
-void Deoptimizer::DoComputeFrame(TranslationIterator* iterator,
- int frame_index) {
- // Read the ast node id, function, and frame height for this output frame.
- Translation::Opcode opcode =
- static_cast<Translation::Opcode>(iterator->Next());
- USE(opcode);
- ASSERT(Translation::FRAME == opcode);
+void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
+ int frame_index) {
+ JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
+ unsigned height = iterator->Next();
+ unsigned height_in_bytes = height * kPointerSize;
+ if (FLAG_trace_deopt) {
+ PrintF(" translating arguments adaptor => height=%d\n", height_in_bytes);
+ }
+
+ unsigned fixed_frame_size = ArgumentsAdaptorFrameConstants::kFrameSize;
+ unsigned input_frame_size = input_->GetFrameSize();
+ unsigned output_frame_size = height_in_bytes + fixed_frame_size;
+
+ // Allocate and store the output frame description.
+ FrameDescription* output_frame =
+ new(output_frame_size) FrameDescription(output_frame_size, function);
+ output_frame->SetFrameType(StackFrame::ARGUMENTS_ADAPTOR);
+
+ // Arguments adaptor can not be topmost or bottommost.
+ ASSERT(frame_index > 0 && frame_index < output_count_ - 1);
+ ASSERT(output_[frame_index] == NULL);
+ output_[frame_index] = output_frame;
+
+ // The top address of the frame is computed from the previous
+ // frame's top and this frame's size.
+ uint32_t top_address;
+ top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
+ output_frame->SetTop(top_address);
+
+ // Compute the incoming parameter translation.
+ int parameter_count = height;
+ unsigned output_offset = output_frame_size;
+ unsigned input_offset = input_frame_size;
+ for (int i = 0; i < parameter_count; ++i) {
+ output_offset -= kPointerSize;
+ DoTranslateCommand(iterator, frame_index, output_offset);
+ }
+ input_offset -= (parameter_count * kPointerSize);
+
+ // Read caller's PC from the previous frame.
+ output_offset -= kPointerSize;
+ input_offset -= kPointerSize;
+ intptr_t callers_pc = output_[frame_index - 1]->GetPc();
+ output_frame->SetFrameSlot(output_offset, callers_pc);
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's pc\n",
+ top_address + output_offset, output_offset, callers_pc);
+ }
+
+ // Read caller's FP from the previous frame, and set this frame's FP.
+ output_offset -= kPointerSize;
+ input_offset -= kPointerSize;
+ intptr_t value = output_[frame_index - 1]->GetFp();
+ output_frame->SetFrameSlot(output_offset, value);
+ intptr_t fp_value = top_address + output_offset;
+ output_frame->SetFp(fp_value);
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's fp\n",
+ fp_value, output_offset, value);
+ }
+
+ // A marker value is used in place of the context.
+ output_offset -= kPointerSize;
+ input_offset -= kPointerSize;
+ intptr_t context = reinterpret_cast<intptr_t>(
+ Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ output_frame->SetFrameSlot(output_offset, context);
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08x: [top + %d] <- 0x%08x ; context (adaptor sentinel)\n",
+ top_address + output_offset, output_offset, context);
+ }
+
+ // The function was mentioned explicitly in the ARGUMENTS_ADAPTOR_FRAME.
+ output_offset -= kPointerSize;
+ input_offset -= kPointerSize;
+ value = reinterpret_cast<intptr_t>(function);
+ output_frame->SetFrameSlot(output_offset, value);
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08x: [top + %d] <- 0x%08x ; function\n",
+ top_address + output_offset, output_offset, value);
+ }
+
+ // Number of incoming arguments.
+ output_offset -= kPointerSize;
+ input_offset -= kPointerSize;
+ value = reinterpret_cast<uint32_t>(Smi::FromInt(height - 1));
+ output_frame->SetFrameSlot(output_offset, value);
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08x: [top + %d] <- 0x%08x ; argc (%d)\n",
+ top_address + output_offset, output_offset, value, height - 1);
+ }
+
+ ASSERT(0 == output_offset);
+
+ Builtins* builtins = isolate_->builtins();
+ Code* adaptor_trampoline =
+ builtins->builtin(Builtins::kArgumentsAdaptorTrampoline);
+ uint32_t pc = reinterpret_cast<uint32_t>(
+ adaptor_trampoline->instruction_start() +
+ isolate_->heap()->arguments_adaptor_deopt_pc_offset()->value());
+ output_frame->SetPc(pc);
+}
+
+
+void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
+ int frame_index) {
int node_id = iterator->Next();
JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
unsigned height = iterator->Next();
@@ -463,9 +561,7 @@
// Allocate and store the output frame description.
FrameDescription* output_frame =
new(output_frame_size) FrameDescription(output_frame_size, function);
-#ifdef DEBUG
- output_frame->SetKind(Code::FUNCTION);
-#endif
+ output_frame->SetFrameType(StackFrame::JAVA_SCRIPT);
bool is_bottommost = (0 == frame_index);
bool is_topmost = (output_count_ - 1 == frame_index);
diff --git a/src/ia32/disasm-ia32.cc b/src/ia32/disasm-ia32.cc
index da22390..b5ddcca 100644
--- a/src/ia32/disasm-ia32.cc
+++ b/src/ia32/disasm-ia32.cc
@@ -763,10 +763,13 @@
case 0xEB: mnem = "fldpi"; break;
case 0xED: mnem = "fldln2"; break;
case 0xEE: mnem = "fldz"; break;
+ case 0xF0: mnem = "f2xm1"; break;
case 0xF1: mnem = "fyl2x"; break;
case 0xF5: mnem = "fprem1"; break;
case 0xF7: mnem = "fincstp"; break;
case 0xF8: mnem = "fprem"; break;
+ case 0xFC: mnem = "frndint"; break;
+ case 0xFD: mnem = "fscale"; break;
case 0xFE: mnem = "fsin"; break;
case 0xFF: mnem = "fcos"; break;
default: UnimplementedInstruction();
@@ -788,6 +791,8 @@
has_register = true;
} else if (modrm_byte == 0xE2) {
mnem = "fclex";
+ } else if (modrm_byte == 0xE3) {
+ mnem = "fninit";
} else {
UnimplementedInstruction();
}
@@ -987,7 +992,7 @@
break;
case 0x0F:
- { byte f0byte = *(data+1);
+ { byte f0byte = data[1];
const char* f0mnem = F0Mnem(f0byte);
if (f0byte == 0x18) {
int mod, regop, rm;
@@ -995,6 +1000,25 @@
const char* suffix[] = {"nta", "1", "2", "3"};
AppendToBuffer("%s%s ", f0mnem, suffix[regop & 0x03]);
data += PrintRightOperand(data);
+ } else if (f0byte == 0x1F && data[2] == 0) {
+ AppendToBuffer("nop"); // 3 byte nop.
+ data += 3;
+ } else if (f0byte == 0x1F && data[2] == 0x40 && data[3] == 0) {
+ AppendToBuffer("nop"); // 4 byte nop.
+ data += 4;
+ } else if (f0byte == 0x1F && data[2] == 0x44 && data[3] == 0 &&
+ data[4] == 0) {
+ AppendToBuffer("nop"); // 5 byte nop.
+ data += 5;
+ } else if (f0byte == 0x1F && data[2] == 0x80 && data[3] == 0 &&
+ data[4] == 0 && data[5] == 0 && data[6] == 0) {
+ AppendToBuffer("nop"); // 7 byte nop.
+ data += 7;
+ } else if (f0byte == 0x1F && data[2] == 0x84 && data[3] == 0 &&
+ data[4] == 0 && data[5] == 0 && data[6] == 0 &&
+ data[7] == 0) {
+ AppendToBuffer("nop"); // 8 byte nop.
+ data += 8;
} else if (f0byte == 0xA2 || f0byte == 0x31) {
AppendToBuffer("%s", f0mnem);
data += 2;
@@ -1130,8 +1154,12 @@
break;
case 0x66: // prefix
- data++;
- if (*data == 0x8B) {
+ while (*data == 0x66) data++;
+ if (*data == 0xf && data[1] == 0x1f) {
+ AppendToBuffer("nop"); // 0x66 prefix
+ } else if (*data == 0x90) {
+ AppendToBuffer("nop"); // 0x66 prefix
+ } else if (*data == 0x8B) {
data++;
data += PrintOperands("mov_w", REG_OPER_OP_ORDER, data);
} else if (*data == 0x89) {
@@ -1185,6 +1213,16 @@
NameOfXMMRegister(rm),
static_cast<int>(imm8));
data += 2;
+ } else if (*data == 0x17) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ int8_t imm8 = static_cast<int8_t>(data[1]);
+ AppendToBuffer("extractps %s,%s,%d",
+ NameOfCPURegister(regop),
+ NameOfXMMRegister(rm),
+ static_cast<int>(imm8));
+ data += 2;
} else if (*data == 0x22) {
data++;
int mod, regop, rm;
@@ -1258,6 +1296,9 @@
NameOfXMMRegister(rm),
static_cast<int>(imm8));
data += 2;
+ } else if (*data == 0x90) {
+ data++;
+ AppendToBuffer("nop"); // 2 byte nop.
} else if (*data == 0xF3) {
data++;
int mod, regop, rm;
diff --git a/src/ia32/frames-ia32.h b/src/ia32/frames-ia32.h
index 45b847a..9e51857 100644
--- a/src/ia32/frames-ia32.h
+++ b/src/ia32/frames-ia32.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -95,9 +95,11 @@
class StandardFrameConstants : public AllStatic {
public:
+ // Fixed part of the frame consists of return address, caller fp,
+ // context and function.
// StandardFrame::IterateExpressions assumes that kContextOffset is the last
// object pointer.
- static const int kFixedFrameSize = 4; // Currently unused.
+ static const int kFixedFrameSize = 4 * kPointerSize;
static const int kExpressionsOffset = -3 * kPointerSize;
static const int kMarkerOffset = -2 * kPointerSize;
static const int kContextOffset = -1 * kPointerSize;
@@ -123,6 +125,8 @@
class ArgumentsAdaptorFrameConstants : public AllStatic {
public:
static const int kLengthOffset = StandardFrameConstants::kExpressionsOffset;
+ static const int kFrameSize =
+ StandardFrameConstants::kFixedFrameSize + kPointerSize;
};
diff --git a/src/ia32/full-codegen-ia32.cc b/src/ia32/full-codegen-ia32.cc
index ef4f0c5..ede810c 100644
--- a/src/ia32/full-codegen-ia32.cc
+++ b/src/ia32/full-codegen-ia32.cc
@@ -106,7 +106,7 @@
// formal parameter count expected by the function.
//
// The live registers are:
-// o edi: the JS function object being called (ie, ourselves)
+// o edi: the JS function object being called (i.e. ourselves)
// o esi: our context
// o ebp: our caller's frame pointer
// o esp: stack pointer (pointing to return address)
@@ -227,7 +227,7 @@
Operand(ebp, StandardFrameConstants::kCallerSPOffset + offset));
__ push(edx);
__ SafePush(Immediate(Smi::FromInt(num_parameters)));
- // Arguments to ArgumentsAccessStub and/or New...:
+ // Arguments to ArgumentsAccessStub:
// function, receiver address, parameter count.
// The stub will rewrite receiver and parameter count if the previous
// stack frame was an arguments adapter frame.
@@ -967,7 +967,7 @@
__ mov(ecx, FieldOperand(ecx, DescriptorArray::kEnumerationIndexOffset));
__ mov(edx, FieldOperand(ecx, DescriptorArray::kEnumCacheBridgeCacheOffset));
- // Setup the four remaining stack slots.
+ // Set up the four remaining stack slots.
__ push(eax); // Map.
__ push(edx); // Enumeration cache.
__ mov(eax, FieldOperand(edx, FixedArray::kLengthOffset));
@@ -2883,7 +2883,7 @@
VisitForStackValue(args->at(1));
if (CpuFeatures::IsSupported(SSE2)) {
- MathPowStub stub;
+ MathPowStub stub(MathPowStub::ON_STACK);
__ CallStub(&stub);
} else {
__ CallRuntime(Runtime::kMath_pow, 2);
@@ -3571,7 +3571,7 @@
// One-character separator case
__ bind(&one_char_separator);
- // Replace separator with its ascii character value.
+ // Replace separator with its ASCII character value.
__ mov_b(scratch, FieldOperand(string, SeqAsciiString::kHeaderSize));
__ mov_b(separator_operand, scratch);
@@ -3787,7 +3787,7 @@
if (context()->IsAccumulatorValue()) {
__ mov(eax, isolate()->factory()->true_value());
} else {
- __ push(isolate()->factory()->true_value());
+ __ Push(isolate()->factory()->true_value());
}
__ jmp(&done, Label::kNear);
__ bind(&materialize_false);
@@ -3795,7 +3795,7 @@
if (context()->IsAccumulatorValue()) {
__ mov(eax, isolate()->factory()->false_value());
} else {
- __ push(isolate()->factory()->false_value());
+ __ Push(isolate()->factory()->false_value());
}
__ bind(&done);
}
diff --git a/src/ia32/ic-ia32.cc b/src/ia32/ic-ia32.cc
index e93353e..3a93790 100644
--- a/src/ia32/ic-ia32.cc
+++ b/src/ia32/ic-ia32.cc
@@ -473,7 +473,6 @@
Counters* counters = isolate->counters();
__ IncrementCounter(counters->keyed_load_generic_smi(), 1);
__ ret(0);
-
__ bind(&check_number_dictionary);
__ mov(ebx, eax);
__ SmiUntag(ebx);
@@ -535,14 +534,34 @@
__ mov(edi, FieldOperand(eax, String::kHashFieldOffset));
__ shr(edi, String::kHashShift);
__ xor_(ecx, edi);
- __ and_(ecx, KeyedLookupCache::kCapacityMask);
+ __ and_(ecx, KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask);
// Load the key (consisting of map and symbol) from the cache and
// check for match.
+ Label load_in_object_property;
+ static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket;
+ Label hit_on_nth_entry[kEntriesPerBucket];
ExternalReference cache_keys =
ExternalReference::keyed_lookup_cache_keys(masm->isolate());
- __ mov(edi, ecx);
+
+ for (int i = 0; i < kEntriesPerBucket - 1; i++) {
+ Label try_next_entry;
+ __ mov(edi, ecx);
+ __ shl(edi, kPointerSizeLog2 + 1);
+ if (i != 0) {
+ __ add(edi, Immediate(kPointerSize * i * 2));
+ }
+ __ cmp(ebx, Operand::StaticArray(edi, times_1, cache_keys));
+ __ j(not_equal, &try_next_entry);
+ __ add(edi, Immediate(kPointerSize));
+ __ cmp(eax, Operand::StaticArray(edi, times_1, cache_keys));
+ __ j(equal, &hit_on_nth_entry[i]);
+ __ bind(&try_next_entry);
+ }
+
+ __ lea(edi, Operand(ecx, 1));
__ shl(edi, kPointerSizeLog2 + 1);
+ __ add(edi, Immediate(kPointerSize * (kEntriesPerBucket - 1) * 2));
__ cmp(ebx, Operand::StaticArray(edi, times_1, cache_keys));
__ j(not_equal, &slow);
__ add(edi, Immediate(kPointerSize));
@@ -556,13 +575,25 @@
// ecx : lookup cache index
ExternalReference cache_field_offsets =
ExternalReference::keyed_lookup_cache_field_offsets(masm->isolate());
- __ mov(edi,
- Operand::StaticArray(ecx, times_pointer_size, cache_field_offsets));
- __ movzx_b(ecx, FieldOperand(ebx, Map::kInObjectPropertiesOffset));
- __ sub(edi, ecx);
- __ j(above_equal, &property_array_property);
+
+ // Hit on nth entry.
+ for (int i = kEntriesPerBucket - 1; i >= 0; i--) {
+ __ bind(&hit_on_nth_entry[i]);
+ if (i != 0) {
+ __ add(ecx, Immediate(i));
+ }
+ __ mov(edi,
+ Operand::StaticArray(ecx, times_pointer_size, cache_field_offsets));
+ __ movzx_b(ecx, FieldOperand(ebx, Map::kInObjectPropertiesOffset));
+ __ sub(edi, ecx);
+ __ j(above_equal, &property_array_property);
+ if (i != 0) {
+ __ jmp(&load_in_object_property);
+ }
+ }
// Load in-object property.
+ __ bind(&load_in_object_property);
__ movzx_b(ecx, FieldOperand(ebx, Map::kInstanceSizeOffset));
__ add(ecx, edi);
__ mov(eax, FieldOperand(edx, ecx, times_pointer_size, 0));
@@ -1374,10 +1405,10 @@
// -- esp[0] : return address
// -----------------------------------
//
- // This accepts as a receiver anything JSObject::SetElementsLength accepts
+ // This accepts as a receiver anything JSArray::SetElementsLength accepts
// (currently anything except for external arrays which means anything with
- // elements of FixedArray type.), but currently is restricted to JSArray.
- // Value must be a number, but only smis are accepted as the most common case.
+ // elements of FixedArray type). Value must be a number, but only smis are
+ // accepted as the most common case.
Label miss;
@@ -1399,6 +1430,13 @@
__ CmpObjectType(scratch, FIXED_ARRAY_TYPE, scratch);
__ j(not_equal, &miss);
+ // Check that the array has fast properties, otherwise the length
+ // property might have been redefined.
+ __ mov(scratch, FieldOperand(receiver, JSArray::kPropertiesOffset));
+ __ CompareRoot(FieldOperand(scratch, FixedArray::kMapOffset),
+ Heap::kHashTableMapRootIndex);
+ __ j(equal, &miss);
+
// Check that value is a smi.
__ JumpIfNotSmi(value, &miss);
@@ -1625,6 +1663,9 @@
rewritten = stub.GetCode();
} else {
ICCompareStub stub(op_, state);
+ if (state == KNOWN_OBJECTS) {
+ stub.set_known_map(Handle<Map>(Handle<JSObject>::cast(x)->map()));
+ }
rewritten = stub.GetCode();
}
set_target(*rewritten);
diff --git a/src/ia32/lithium-codegen-ia32.cc b/src/ia32/lithium-codegen-ia32.cc
index 33adc21..46a35b6 100644
--- a/src/ia32/lithium-codegen-ia32.cc
+++ b/src/ia32/lithium-codegen-ia32.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -341,24 +341,21 @@
}
+Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
+ Handle<Object> literal = chunk_->LookupLiteral(op);
+ ASSERT(chunk_->LookupLiteralRepresentation(op).IsTagged());
+ return literal;
+}
+
+
double LCodeGen::ToDouble(LConstantOperand* op) const {
Handle<Object> value = chunk_->LookupLiteral(op);
return value->Number();
}
-Immediate LCodeGen::ToImmediate(LOperand* op) {
- LConstantOperand* const_op = LConstantOperand::cast(op);
- Handle<Object> literal = chunk_->LookupLiteral(const_op);
- Representation r = chunk_->LookupLiteralRepresentation(const_op);
- if (r.IsInteger32()) {
- ASSERT(literal->IsNumber());
- return Immediate(static_cast<int32_t>(literal->Number()));
- } else if (r.IsDouble()) {
- Abort("unsupported double immediate");
- }
- ASSERT(r.IsTagged());
- return Immediate(literal);
+bool LCodeGen::IsInteger32(LConstantOperand* op) const {
+ return chunk_->LookupLiteralRepresentation(op).IsInteger32();
}
@@ -397,7 +394,11 @@
WriteTranslation(environment->outer(), translation);
int closure_id = DefineDeoptimizationLiteral(environment->closure());
- translation->BeginFrame(environment->ast_id(), closure_id, height);
+ if (environment->is_arguments_adaptor()) {
+ translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
+ } else {
+ translation->BeginJSFrame(environment->ast_id(), closure_id, height);
+ }
for (int i = 0; i < translation_size; ++i) {
LOperand* value = environment->values()->at(i);
// spilled_registers_ and spilled_double_registers_ are either
@@ -518,7 +519,7 @@
} else if (context->IsConstantOperand()) {
Handle<Object> literal =
chunk_->LookupLiteral(LConstantOperand::cast(context));
- LoadHeapObject(esi, Handle<Context>::cast(literal));
+ __ LoadHeapObject(esi, Handle<Context>::cast(literal));
} else {
UNREACHABLE();
}
@@ -546,10 +547,14 @@
// |>------------ translation_size ------------<|
int frame_count = 0;
+ int jsframe_count = 0;
for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
++frame_count;
+ if (!e->is_arguments_adaptor()) {
+ ++jsframe_count;
+ }
}
- Translation translation(&translations_, frame_count);
+ Translation translation(&translations_, frame_count, jsframe_count);
WriteTranslation(environment, &translation);
int deoptimization_index = deoptimizations_.length();
int pc_offset = masm()->pc_offset();
@@ -1160,7 +1165,7 @@
ASSERT(left->Equals(instr->result()));
if (right->IsConstantOperand()) {
- __ sub(ToOperand(left), ToImmediate(right));
+ __ sub(ToOperand(left), ToInteger32Immediate(right));
} else {
__ sub(ToRegister(left), ToOperand(right));
}
@@ -1219,7 +1224,7 @@
Register reg = ToRegister(instr->result());
Handle<Object> handle = instr->value();
if (handle->IsHeapObject()) {
- LoadHeapObject(reg, Handle<HeapObject>::cast(handle));
+ __ LoadHeapObject(reg, Handle<HeapObject>::cast(handle));
} else {
__ Set(reg, Immediate(handle));
}
@@ -1299,7 +1304,7 @@
ASSERT(left->Equals(instr->result()));
if (right->IsConstantOperand()) {
- __ add(ToOperand(left), ToImmediate(right));
+ __ add(ToOperand(left), ToInteger32Immediate(right));
} else {
__ add(ToRegister(left), ToOperand(right));
}
@@ -1571,9 +1576,9 @@
__ j(parity_even, chunk_->GetAssemblyLabel(false_block));
} else {
if (right->IsConstantOperand()) {
- __ cmp(ToRegister(left), ToImmediate(right));
+ __ cmp(ToRegister(left), ToInteger32Immediate(right));
} else if (left->IsConstantOperand()) {
- __ cmp(ToOperand(right), ToImmediate(left));
+ __ cmp(ToOperand(right), ToInteger32Immediate(left));
// We transposed the operands. Reverse the condition.
cc = ReverseCondition(cc);
} else {
@@ -1834,7 +1839,7 @@
// Branches to a label or falls through with the answer in the z flag. Trashes
-// the temp registers, but not the input. Only input and temp2 may alias.
+// the temp registers, but not the input.
void LCodeGen::EmitClassOfTest(Label* is_true,
Label* is_false,
Handle<String>class_name,
@@ -1842,7 +1847,8 @@
Register temp,
Register temp2) {
ASSERT(!input.is(temp));
- ASSERT(!temp.is(temp2)); // But input and temp2 may be the same register.
+ ASSERT(!input.is(temp2));
+ ASSERT(!temp.is(temp2));
__ JumpIfSmi(input, is_false);
if (class_name->IsEqualTo(CStrVector("Function"))) {
@@ -1901,12 +1907,7 @@
Register input = ToRegister(instr->InputAt(0));
Register temp = ToRegister(instr->TempAt(0));
Register temp2 = ToRegister(instr->TempAt(1));
- if (input.is(temp)) {
- // Swap.
- Register swapper = temp;
- temp = temp2;
- temp2 = swapper;
- }
+
Handle<String> class_name = instr->hydrogen()->class_name();
int true_block = chunk_->LookupDestination(instr->true_block_id());
@@ -2031,7 +2032,7 @@
// the stub.
Register temp = ToRegister(instr->TempAt(0));
ASSERT(MacroAssembler::SafepointRegisterStackIndex(temp) == 0);
- __ mov(InstanceofStub::right(), Immediate(instr->function()));
+ __ LoadHeapObject(InstanceofStub::right(), instr->function());
static const int kAdditionalDelta = 13;
int delta = masm_->SizeOfCodeGeneratedSince(map_check) + kAdditionalDelta;
__ mov(temp, Immediate(delta));
@@ -2118,26 +2119,20 @@
void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
- Register object = ToRegister(instr->TempAt(0));
- Register address = ToRegister(instr->TempAt(1));
- Register value = ToRegister(instr->InputAt(0));
- ASSERT(!value.is(object));
- Handle<JSGlobalPropertyCell> cell_handle(instr->hydrogen()->cell());
-
- int offset = JSGlobalPropertyCell::kValueOffset;
- __ mov(object, Immediate(cell_handle));
+ Register value = ToRegister(instr->value());
+ Handle<JSGlobalPropertyCell> cell_handle = instr->hydrogen()->cell();
// If the cell we are storing to contains the hole it could have
// been deleted from the property dictionary. In that case, we need
// to update the property details in the property dictionary to mark
// it as no longer deleted. We deoptimize in that case.
if (instr->hydrogen()->RequiresHoleCheck()) {
- __ cmp(FieldOperand(object, offset), factory()->the_hole_value());
+ __ cmp(Operand::Cell(cell_handle), factory()->the_hole_value());
DeoptimizeIf(equal, instr->environment());
}
// Store the value.
- __ mov(FieldOperand(object, offset), value);
+ __ mov(Operand::Cell(cell_handle), value);
// Cells are always rescanned, so no write barrier here.
}
@@ -2159,13 +2154,38 @@
Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
__ mov(result, ContextOperand(context, instr->slot_index()));
+
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ __ cmp(result, factory()->the_hole_value());
+ if (instr->hydrogen()->DeoptimizesOnHole()) {
+ DeoptimizeIf(equal, instr->environment());
+ } else {
+ Label is_not_hole;
+ __ j(not_equal, &is_not_hole, Label::kNear);
+ __ mov(result, factory()->undefined_value());
+ __ bind(&is_not_hole);
+ }
+ }
}
void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
Register context = ToRegister(instr->context());
Register value = ToRegister(instr->value());
- __ mov(ContextOperand(context, instr->slot_index()), value);
+
+ Label skip_assignment;
+
+ Operand target = ContextOperand(context, instr->slot_index());
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ __ cmp(target, factory()->the_hole_value());
+ if (instr->hydrogen()->DeoptimizesOnHole()) {
+ DeoptimizeIf(equal, instr->environment());
+ } else {
+ __ j(not_equal, &skip_assignment, Label::kNear);
+ }
+ }
+
+ __ mov(target, value);
if (instr->hydrogen()->NeedsWriteBarrier()) {
HType type = instr->hydrogen()->value()->type();
SmiCheck check_needed =
@@ -2180,6 +2200,8 @@
EMIT_REMEMBERED_SET,
check_needed);
}
+
+ __ bind(&skip_assignment);
}
@@ -2201,7 +2223,7 @@
Handle<String> name) {
LookupResult lookup(isolate());
type->LookupInDescriptors(NULL, *name, &lookup);
- ASSERT(lookup.IsProperty() &&
+ ASSERT(lookup.IsFound() &&
(lookup.type() == FIELD || lookup.type() == CONSTANT_FUNCTION));
if (lookup.type() == FIELD) {
int index = lookup.GetLocalFieldIndexFromMap(*type);
@@ -2217,7 +2239,24 @@
}
} else {
Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*type));
- LoadHeapObject(result, Handle<HeapObject>::cast(function));
+ __ LoadHeapObject(result, function);
+ }
+}
+
+
+void LCodeGen::EmitPushTaggedOperand(LOperand* operand) {
+ ASSERT(!operand->IsDoubleRegister());
+ if (operand->IsConstantOperand()) {
+ Handle<Object> object = ToHandle(LConstantOperand::cast(operand));
+ if (object->IsSmi()) {
+ __ Push(Handle<Smi>::cast(object));
+ } else {
+ __ PushHeapObject(Handle<HeapObject>::cast(object));
+ }
+ } else if (operand->IsRegister()) {
+ __ push(ToRegister(operand));
+ } else {
+ __ push(ToOperand(operand));
}
}
@@ -2627,17 +2666,13 @@
void LCodeGen::DoPushArgument(LPushArgument* instr) {
LOperand* argument = instr->InputAt(0);
- if (argument->IsConstantOperand()) {
- __ push(ToImmediate(argument));
- } else {
- __ push(ToOperand(argument));
- }
+ EmitPushTaggedOperand(argument);
}
void LCodeGen::DoThisFunction(LThisFunction* instr) {
Register result = ToRegister(instr->result());
- LoadHeapObject(result, instr->hydrogen()->closure());
+ __ LoadHeapObject(result, instr->hydrogen()->closure());
}
@@ -2673,41 +2708,53 @@
int arity,
LInstruction* instr,
CallKind call_kind) {
- // Change context if needed.
- bool change_context =
- (info()->closure()->context() != function->context()) ||
- scope()->contains_with() ||
- (scope()->num_heap_slots() > 0);
- if (change_context) {
- __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
- } else {
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- }
-
- // Set eax to arguments count if adaption is not needed. Assumes that eax
- // is available to write to at this point.
- if (!function->NeedsArgumentsAdaption()) {
- __ mov(eax, arity);
- }
+ bool can_invoke_directly = !function->NeedsArgumentsAdaption() ||
+ function->shared()->formal_parameter_count() == arity;
LPointerMap* pointers = instr->pointer_map();
RecordPosition(pointers->position());
- // Invoke function.
- __ SetCallKind(ecx, call_kind);
- if (*function == *info()->closure()) {
- __ CallSelf();
- } else {
- __ call(FieldOperand(edi, JSFunction::kCodeEntryOffset));
- }
+ if (can_invoke_directly) {
+ __ LoadHeapObject(edi, function);
- RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+ // Change context if needed.
+ bool change_context =
+ (info()->closure()->context() != function->context()) ||
+ scope()->contains_with() ||
+ (scope()->num_heap_slots() > 0);
+
+ if (change_context) {
+ __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
+ } else {
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ }
+
+ // Set eax to arguments count if adaption is not needed. Assumes that eax
+ // is available to write to at this point.
+ if (!function->NeedsArgumentsAdaption()) {
+ __ mov(eax, arity);
+ }
+
+ // Invoke function directly.
+ __ SetCallKind(ecx, call_kind);
+ if (*function == *info()->closure()) {
+ __ CallSelf();
+ } else {
+ __ call(FieldOperand(edi, JSFunction::kCodeEntryOffset));
+ }
+ RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+ } else {
+ // We need to adapt arguments.
+ SafepointGenerator generator(
+ this, pointers, Safepoint::kLazyDeopt);
+ ParameterCount count(arity);
+ __ InvokeFunction(function, count, CALL_FUNCTION, generator, call_kind);
+ }
}
void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
ASSERT(ToRegister(instr->result()).is(eax));
- __ mov(edi, instr->function());
CallKnownFunction(instr->function(),
instr->arity(),
instr,
@@ -2922,72 +2969,90 @@
}
-void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
+void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
XMMRegister xmm_scratch = xmm0;
XMMRegister input_reg = ToDoubleRegister(instr->value());
+ Register scratch = ToRegister(instr->temp());
ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
+
+ // Note that according to ECMA-262 15.8.2.13:
+ // Math.pow(-Infinity, 0.5) == Infinity
+ // Math.sqrt(-Infinity) == NaN
+ Label done, sqrt;
+ // Check base for -Infinity. According to IEEE-754, single-precision
+ // -Infinity has the highest 9 bits set and the lowest 23 bits cleared.
+ __ mov(scratch, 0xFF800000);
+ __ movd(xmm_scratch, scratch);
+ __ cvtss2sd(xmm_scratch, xmm_scratch);
+ __ ucomisd(input_reg, xmm_scratch);
+ // Comparing -Infinity with NaN results in "unordered", which sets the
+ // zero flag as if both were equal. However, it also sets the carry flag.
+ __ j(not_equal, &sqrt, Label::kNear);
+ __ j(carry, &sqrt, Label::kNear);
+ // If input is -Infinity, return Infinity.
+ __ xorps(input_reg, input_reg);
+ __ subsd(input_reg, xmm_scratch);
+ __ jmp(&done, Label::kNear);
+
+ // Square root.
+ __ bind(&sqrt);
__ xorps(xmm_scratch, xmm_scratch);
__ addsd(input_reg, xmm_scratch); // Convert -0 to +0.
__ sqrtsd(input_reg, input_reg);
+ __ bind(&done);
}
void LCodeGen::DoPower(LPower* instr) {
- LOperand* left = instr->InputAt(0);
- LOperand* right = instr->InputAt(1);
- DoubleRegister result_reg = ToDoubleRegister(instr->result());
Representation exponent_type = instr->hydrogen()->right()->representation();
+ // Having marked this as a call, we can use any registers.
+ // Just make sure that the input/output registers are the expected ones.
+ ASSERT(!instr->InputAt(1)->IsDoubleRegister() ||
+ ToDoubleRegister(instr->InputAt(1)).is(xmm1));
+ ASSERT(!instr->InputAt(1)->IsRegister() ||
+ ToRegister(instr->InputAt(1)).is(eax));
+ ASSERT(ToDoubleRegister(instr->InputAt(0)).is(xmm2));
+ ASSERT(ToDoubleRegister(instr->result()).is(xmm3));
- if (exponent_type.IsDouble()) {
- // It is safe to use ebx directly since the instruction is marked
- // as a call.
- __ PrepareCallCFunction(4, ebx);
- __ movdbl(Operand(esp, 0 * kDoubleSize), ToDoubleRegister(left));
- __ movdbl(Operand(esp, 1 * kDoubleSize), ToDoubleRegister(right));
- __ CallCFunction(ExternalReference::power_double_double_function(isolate()),
- 4);
- } else if (exponent_type.IsInteger32()) {
- // It is safe to use ebx directly since the instruction is marked
- // as a call.
- ASSERT(!ToRegister(right).is(ebx));
- __ PrepareCallCFunction(4, ebx);
- __ movdbl(Operand(esp, 0 * kDoubleSize), ToDoubleRegister(left));
- __ mov(Operand(esp, 1 * kDoubleSize), ToRegister(right));
- __ CallCFunction(ExternalReference::power_double_int_function(isolate()),
- 4);
- } else {
- ASSERT(exponent_type.IsTagged());
- CpuFeatures::Scope scope(SSE2);
- Register right_reg = ToRegister(right);
-
- Label non_smi, call;
- __ JumpIfNotSmi(right_reg, &non_smi);
- __ SmiUntag(right_reg);
- __ cvtsi2sd(result_reg, Operand(right_reg));
- __ jmp(&call);
-
- __ bind(&non_smi);
- // It is safe to use ebx directly since the instruction is marked
- // as a call.
- ASSERT(!right_reg.is(ebx));
- __ CmpObjectType(right_reg, HEAP_NUMBER_TYPE , ebx);
+ if (exponent_type.IsTagged()) {
+ Label no_deopt;
+ __ JumpIfSmi(eax, &no_deopt);
+ __ CmpObjectType(eax, HEAP_NUMBER_TYPE, ecx);
DeoptimizeIf(not_equal, instr->environment());
- __ movdbl(result_reg, FieldOperand(right_reg, HeapNumber::kValueOffset));
-
- __ bind(&call);
- __ PrepareCallCFunction(4, ebx);
- __ movdbl(Operand(esp, 0 * kDoubleSize), ToDoubleRegister(left));
- __ movdbl(Operand(esp, 1 * kDoubleSize), result_reg);
- __ CallCFunction(ExternalReference::power_double_double_function(isolate()),
- 4);
+ __ bind(&no_deopt);
+ MathPowStub stub(MathPowStub::TAGGED);
+ __ CallStub(&stub);
+ } else if (exponent_type.IsInteger32()) {
+ MathPowStub stub(MathPowStub::INTEGER);
+ __ CallStub(&stub);
+ } else {
+ ASSERT(exponent_type.IsDouble());
+ MathPowStub stub(MathPowStub::DOUBLE);
+ __ CallStub(&stub);
}
+}
- // Return value is in st(0) on ia32.
- // Store it into the (fixed) result register.
- __ sub(Operand(esp), Immediate(kDoubleSize));
- __ fstp_d(Operand(esp, 0));
- __ movdbl(result_reg, Operand(esp, 0));
- __ add(Operand(esp), Immediate(kDoubleSize));
+
+void LCodeGen::DoRandom(LRandom* instr) {
+ // Having marked this instruction as a call we can use any
+ // registers.
+ ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
+ ASSERT(ToRegister(instr->InputAt(0)).is(eax));
+
+ __ PrepareCallCFunction(1, ebx);
+ __ mov(eax, FieldOperand(eax, GlobalObject::kGlobalContextOffset));
+ __ mov(Operand(esp, 0), eax);
+ __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
+
+ // Convert 32 random bits in eax to 0.(32 random bits) in a double
+ // by computing:
+ // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
+ __ mov(ebx, Immediate(0x49800000)); // 1.0 x 2^20 as single.
+ __ movd(xmm2, ebx);
+ __ movd(xmm1, eax);
+ __ cvtss2sd(xmm2, xmm2);
+ __ xorps(xmm1, xmm2);
+ __ subsd(xmm1, xmm2);
}
@@ -3060,9 +3125,6 @@
case kMathSqrt:
DoMathSqrt(instr);
break;
- case kMathPowHalf:
- DoMathPowHalf(instr);
- break;
case kMathCos:
DoMathCos(instr);
break;
@@ -3147,7 +3209,6 @@
void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
ASSERT(ToRegister(instr->result()).is(eax));
- __ mov(edi, instr->target());
CallKnownFunction(instr->target(), instr->arity(), instr, CALL_AS_FUNCTION);
}
@@ -3229,7 +3290,7 @@
void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
if (instr->index()->IsConstantOperand()) {
__ cmp(ToOperand(instr->length()),
- ToImmediate(LConstantOperand::cast(instr->index())));
+ Immediate(ToInteger32(LConstantOperand::cast(instr->index()))));
DeoptimizeIf(below_equal, instr->environment());
} else {
__ cmp(ToRegister(instr->index()), ToOperand(instr->length()));
@@ -3283,13 +3344,6 @@
Register elements = ToRegister(instr->object());
Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
- // This instruction cannot handle the FAST_SMI_ONLY_ELEMENTS -> FAST_ELEMENTS
- // conversion, so it deopts in that case.
- if (instr->hydrogen()->ValueNeedsSmiCheck()) {
- __ test(value, Immediate(kSmiTagMask));
- DeoptimizeIf(not_zero, instr->environment());
- }
-
// Do the store.
if (instr->key()->IsConstantOperand()) {
ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
@@ -3512,16 +3566,8 @@
void LCodeGen::DoStringAdd(LStringAdd* instr) {
- if (instr->left()->IsConstantOperand()) {
- __ push(ToImmediate(instr->left()));
- } else {
- __ push(ToOperand(instr->left()));
- }
- if (instr->right()->IsConstantOperand()) {
- __ push(ToImmediate(instr->right()));
- } else {
- __ push(ToOperand(instr->right()));
- }
+ EmitPushTaggedOperand(instr->left());
+ EmitPushTaggedOperand(instr->right());
StringAddStub stub(NO_STRING_CHECK_IN_STUB);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
@@ -3671,8 +3717,10 @@
void LCodeGen::EmitNumberUntagD(Register input_reg,
+ Register temp_reg,
XMMRegister result_reg,
bool deoptimize_on_undefined,
+ bool deoptimize_on_minus_zero,
LEnvironment* env) {
Label load_smi, done;
@@ -3701,6 +3749,15 @@
}
// Heap number to XMM conversion.
__ movdbl(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
+ if (deoptimize_on_minus_zero) {
+ XMMRegister xmm_scratch = xmm0;
+ __ xorps(xmm_scratch, xmm_scratch);
+ __ ucomisd(result_reg, xmm_scratch);
+ __ j(not_zero, &done, Label::kNear);
+ __ movmskpd(temp_reg, result_reg);
+ __ test_b(temp_reg, 1);
+ DeoptimizeIf(not_zero, env);
+ }
__ jmp(&done, Label::kNear);
// Smi to XMM conversion
@@ -3823,14 +3880,23 @@
void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
LOperand* input = instr->InputAt(0);
ASSERT(input->IsRegister());
+ LOperand* temp = instr->TempAt(0);
+ ASSERT(temp == NULL || temp->IsRegister());
LOperand* result = instr->result();
ASSERT(result->IsDoubleRegister());
Register input_reg = ToRegister(input);
XMMRegister result_reg = ToDoubleRegister(result);
- EmitNumberUntagD(input_reg, result_reg,
+ bool deoptimize_on_minus_zero =
+ instr->hydrogen()->deoptimize_on_minus_zero();
+ Register temp_reg = deoptimize_on_minus_zero ? ToRegister(temp) : no_reg;
+
+ EmitNumberUntagD(input_reg,
+ temp_reg,
+ result_reg,
instr->hydrogen()->deoptimize_on_undefined(),
+ deoptimize_on_minus_zero,
instr->environment());
}
@@ -4020,19 +4086,29 @@
__ cmp(reg, Operand::Cell(cell));
} else {
Operand operand = ToOperand(instr->value());
- __ cmp(operand, instr->hydrogen()->target());
+ __ cmp(operand, target);
}
DeoptimizeIf(not_equal, instr->environment());
}
+void LCodeGen::DoCheckMapCommon(Register reg,
+ Handle<Map> map,
+ CompareMapMode mode,
+ LEnvironment* env) {
+ Label success;
+ __ CompareMap(reg, map, &success, mode);
+ DeoptimizeIf(not_equal, env);
+ __ bind(&success);
+}
+
+
void LCodeGen::DoCheckMap(LCheckMap* instr) {
LOperand* input = instr->InputAt(0);
ASSERT(input->IsRegister());
Register reg = ToRegister(input);
- __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
- instr->hydrogen()->map());
- DeoptimizeIf(not_equal, instr->environment());
+ Handle<Map> map = instr->hydrogen()->map();
+ DoCheckMapCommon(reg, map, instr->hydrogen()->mode(), instr->environment());
}
@@ -4084,17 +4160,6 @@
}
-void LCodeGen::LoadHeapObject(Register result, Handle<HeapObject> object) {
- if (isolate()->heap()->InNewSpace(*object)) {
- Handle<JSGlobalPropertyCell> cell =
- isolate()->factory()->NewJSGlobalPropertyCell(object);
- __ mov(result, Operand::Cell(cell));
- } else {
- __ mov(result, object);
- }
-}
-
-
void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
Register reg = ToRegister(instr->TempAt(0));
@@ -4102,39 +4167,53 @@
Handle<JSObject> current_prototype = instr->prototype();
// Load prototype object.
- LoadHeapObject(reg, current_prototype);
+ __ LoadHeapObject(reg, current_prototype);
// Check prototype maps up to the holder.
while (!current_prototype.is_identical_to(holder)) {
- __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
- Handle<Map>(current_prototype->map()));
- DeoptimizeIf(not_equal, instr->environment());
+ DoCheckMapCommon(reg, Handle<Map>(current_prototype->map()),
+ ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
+
current_prototype =
Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype()));
// Load next prototype object.
- LoadHeapObject(reg, current_prototype);
+ __ LoadHeapObject(reg, current_prototype);
}
// Check the holder map.
- __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
- Handle<Map>(current_prototype->map()));
- DeoptimizeIf(not_equal, instr->environment());
+ DoCheckMapCommon(reg, Handle<Map>(current_prototype->map()),
+ ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
}
void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
ASSERT(ToRegister(instr->context()).is(esi));
+ Heap* heap = isolate()->heap();
+ ElementsKind boilerplate_elements_kind =
+ instr->hydrogen()->boilerplate_elements_kind();
- Handle<FixedArray> constant_elements = instr->hydrogen()->constant_elements();
- ASSERT_EQ(2, constant_elements->length());
- ElementsKind constant_elements_kind =
- static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value());
+ // Deopt if the array literal boilerplate ElementsKind is of a type different
+ // than the expected one. The check isn't necessary if the boilerplate has
+ // already been converted to FAST_ELEMENTS.
+ if (boilerplate_elements_kind != FAST_ELEMENTS) {
+ __ LoadHeapObject(eax, instr->hydrogen()->boilerplate_object());
+ __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
+ // Load the map's "bit field 2". We only need the first byte,
+ // but the following masking takes care of that anyway.
+ __ mov(ebx, FieldOperand(ebx, Map::kBitField2Offset));
+ // Retrieve elements_kind from bit field 2.
+ __ and_(ebx, Map::kElementsKindMask);
+ __ cmp(ebx, boilerplate_elements_kind << Map::kElementsKindShift);
+ DeoptimizeIf(not_equal, instr->environment());
+ }
- // Setup the parameters to the stub/runtime call.
+ // Set up the parameters to the stub/runtime call.
__ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
__ push(FieldOperand(eax, JSFunction::kLiteralsOffset));
__ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
- __ push(Immediate(constant_elements));
+ // Boilerplate already exists, constant elements are never accessed.
+ // Pass an empty fixed array.
+ __ push(Immediate(Handle<FixedArray>(heap->empty_fixed_array())));
// Pick the right runtime function or stub to call.
int length = instr->hydrogen()->length();
@@ -4150,9 +4229,9 @@
CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr);
} else {
FastCloneShallowArrayStub::Mode mode =
- constant_elements_kind == FAST_DOUBLE_ELEMENTS
- ? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
- : FastCloneShallowArrayStub::CLONE_ELEMENTS;
+ boilerplate_elements_kind == FAST_DOUBLE_ELEMENTS
+ ? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
+ : FastCloneShallowArrayStub::CLONE_ELEMENTS;
FastCloneShallowArrayStub stub(mode, length);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
@@ -4167,7 +4246,7 @@
ASSERT(!result.is(ecx));
if (FLAG_debug_code) {
- LoadHeapObject(ecx, object);
+ __ LoadHeapObject(ecx, object);
__ cmp(source, ecx);
__ Assert(equal, "Unexpected object literal boilerplate");
}
@@ -4197,10 +4276,10 @@
Handle<JSObject> value_object = Handle<JSObject>::cast(value);
__ lea(ecx, Operand(result, *offset));
__ mov(FieldOperand(result, total_offset), ecx);
- LoadHeapObject(source, value_object);
+ __ LoadHeapObject(source, value_object);
EmitDeepCopy(value_object, result, source, offset);
} else if (value->IsHeapObject()) {
- LoadHeapObject(ecx, Handle<HeapObject>::cast(value));
+ __ LoadHeapObject(ecx, Handle<HeapObject>::cast(value));
__ mov(FieldOperand(result, total_offset), ecx);
} else {
__ mov(FieldOperand(result, total_offset), Immediate(value));
@@ -4225,7 +4304,7 @@
__ bind(&allocated);
int offset = 0;
- LoadHeapObject(ebx, instr->hydrogen()->boilerplate());
+ __ LoadHeapObject(ebx, instr->hydrogen()->boilerplate());
EmitDeepCopy(instr->hydrogen()->boilerplate(), eax, ebx, &offset);
ASSERT_EQ(size, offset);
}
@@ -4236,7 +4315,7 @@
Handle<FixedArray> constant_properties =
instr->hydrogen()->constant_properties();
- // Setup the parameters to the stub/runtime call.
+ // Set up the parameters to the stub/runtime call.
__ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
__ push(FieldOperand(eax, JSFunction::kLiteralsOffset));
__ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
@@ -4347,11 +4426,7 @@
void LCodeGen::DoTypeof(LTypeof* instr) {
LOperand* input = instr->InputAt(1);
- if (input->IsConstantOperand()) {
- __ push(ToImmediate(input));
- } else {
- __ push(ToOperand(input));
- }
+ EmitPushTaggedOperand(input);
CallRuntime(Runtime::kTypeof, 1, instr);
}
@@ -4475,9 +4550,7 @@
int patch_size = Deoptimizer::patch_size();
if (current_pc < last_lazy_deopt_pc_ + patch_size) {
int padding_size = last_lazy_deopt_pc_ + patch_size - current_pc;
- while (padding_size-- > 0) {
- __ nop();
- }
+ __ Nop(padding_size);
}
last_lazy_deopt_pc_ = masm()->pc_offset();
}
@@ -4501,11 +4574,7 @@
LOperand* obj = instr->object();
LOperand* key = instr->key();
__ push(ToOperand(obj));
- if (key->IsConstantOperand()) {
- __ push(ToImmediate(key));
- } else {
- __ push(ToOperand(key));
- }
+ EmitPushTaggedOperand(key);
ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
LPointerMap* pointers = instr->pointer_map();
RecordPosition(pointers->position());
@@ -4602,16 +4671,8 @@
void LCodeGen::DoIn(LIn* instr) {
LOperand* obj = instr->object();
LOperand* key = instr->key();
- if (key->IsConstantOperand()) {
- __ push(ToImmediate(key));
- } else {
- __ push(ToOperand(key));
- }
- if (obj->IsConstantOperand()) {
- __ push(ToImmediate(obj));
- } else {
- __ push(ToOperand(obj));
- }
+ EmitPushTaggedOperand(key);
+ EmitPushTaggedOperand(obj);
ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
LPointerMap* pointers = instr->pointer_map();
RecordPosition(pointers->position());
diff --git a/src/ia32/lithium-codegen-ia32.h b/src/ia32/lithium-codegen-ia32.h
index 9d1a4f7..d86d48c 100644
--- a/src/ia32/lithium-codegen-ia32.h
+++ b/src/ia32/lithium-codegen-ia32.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -78,7 +78,13 @@
Operand ToOperand(LOperand* op) const;
Register ToRegister(LOperand* op) const;
XMMRegister ToDoubleRegister(LOperand* op) const;
- Immediate ToImmediate(LOperand* op);
+
+ bool IsInteger32(LConstantOperand* op) const;
+ Immediate ToInteger32Immediate(LOperand* op) const {
+ return Immediate(ToInteger32(LConstantOperand::cast(op)));
+ }
+
+ Handle<Object> ToHandle(LConstantOperand* op) const;
// The operand denoting the second word (the one with a higher address) of
// a double stack slot.
@@ -104,6 +110,9 @@
void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
Label* map_check);
+ void DoCheckMapCommon(Register reg, Handle<Map> map,
+ CompareMapMode mode, LEnvironment* env);
+
// Parallel move support.
void DoParallelMove(LParallelMove* move);
void DoGap(LGap* instr);
@@ -207,8 +216,6 @@
LInstruction* instr,
CallKind call_kind);
- void LoadHeapObject(Register result, Handle<HeapObject> object);
-
void RecordSafepointWithLazyDeopt(LInstruction* instr,
SafepointMode safepoint_mode);
@@ -227,6 +234,7 @@
Register ToRegister(int index) const;
XMMRegister ToDoubleRegister(int index) const;
int ToInteger32(LConstantOperand* op) const;
+
double ToDouble(LConstantOperand* op) const;
Operand BuildFastArrayOperand(LOperand* elements_pointer,
LOperand* key,
@@ -239,7 +247,6 @@
void DoMathFloor(LUnaryMathOperation* instr);
void DoMathRound(LUnaryMathOperation* instr);
void DoMathSqrt(LUnaryMathOperation* instr);
- void DoMathPowHalf(LUnaryMathOperation* instr);
void DoMathLog(LUnaryMathOperation* instr);
void DoMathTan(LUnaryMathOperation* instr);
void DoMathCos(LUnaryMathOperation* instr);
@@ -261,8 +268,10 @@
void EmitGoto(int block);
void EmitBranch(int left_block, int right_block, Condition cc);
void EmitNumberUntagD(Register input,
+ Register temp,
XMMRegister result,
bool deoptimize_on_undefined,
+ bool deoptimize_on_minus_zero,
LEnvironment* env);
// Emits optimized code for typeof x == "y". Modifies input register.
@@ -306,6 +315,10 @@
void EnsureSpaceForLazyDeopt();
+ // Emits code for pushing either a tagged constant, a (non-double)
+ // register, or a stack slot operand.
+ void EmitPushTaggedOperand(LOperand* operand);
+
LChunk* const chunk_;
MacroAssembler* const masm_;
CompilationInfo* const info_;
@@ -372,7 +385,7 @@
virtual void Generate() = 0;
virtual LInstruction* instr() = 0;
- void SetExit(Label *exit) { external_exit_ = exit; }
+ void SetExit(Label* exit) { external_exit_ = exit; }
Label* entry() { return &entry_; }
Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; }
int instruction_index() const { return instruction_index_; }
diff --git a/src/ia32/lithium-gap-resolver-ia32.cc b/src/ia32/lithium-gap-resolver-ia32.cc
index fcf1f91..510d9f1 100644
--- a/src/ia32/lithium-gap-resolver-ia32.cc
+++ b/src/ia32/lithium-gap-resolver-ia32.cc
@@ -303,14 +303,24 @@
}
} else if (source->IsConstantOperand()) {
- ASSERT(destination->IsRegister() || destination->IsStackSlot());
- Immediate src = cgen_->ToImmediate(source);
+ LConstantOperand* constant_source = LConstantOperand::cast(source);
if (destination->IsRegister()) {
Register dst = cgen_->ToRegister(destination);
- __ Set(dst, src);
+ if (cgen_->IsInteger32(constant_source)) {
+ __ Set(dst, cgen_->ToInteger32Immediate(constant_source));
+ } else {
+ __ LoadObject(dst, cgen_->ToHandle(constant_source));
+ }
} else {
+ ASSERT(destination->IsStackSlot());
Operand dst = cgen_->ToOperand(destination);
- __ Set(dst, src);
+ if (cgen_->IsInteger32(constant_source)) {
+ __ Set(dst, cgen_->ToInteger32Immediate(constant_source));
+ } else {
+ Register tmp = EnsureTempRegister();
+ __ LoadObject(tmp, cgen_->ToHandle(constant_source));
+ __ mov(dst, tmp);
+ }
}
} else if (source->IsDoubleRegister()) {
diff --git a/src/ia32/lithium-ia32.cc b/src/ia32/lithium-ia32.cc
index 4e5f278..c81aca8 100644
--- a/src/ia32/lithium-ia32.cc
+++ b/src/ia32/lithium-ia32.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -298,6 +298,12 @@
}
+void LMathPowHalf::PrintDataTo(StringStream* stream) {
+ stream->Add("/pow_half ");
+ InputAt(0)->PrintTo(stream);
+}
+
+
void LLoadContextSlot::PrintDataTo(StringStream* stream) {
InputAt(0)->PrintTo(stream);
stream->Add("[%d]", slot_index());
@@ -1007,15 +1013,17 @@
LEnvironment* outer =
CreateEnvironment(hydrogen_env->outer(), argument_index_accumulator);
int ast_id = hydrogen_env->ast_id();
- ASSERT(ast_id != AstNode::kNoNumber);
+ ASSERT(ast_id != AstNode::kNoNumber || hydrogen_env->is_arguments_adaptor());
int value_count = hydrogen_env->length();
LEnvironment* result =
new(zone()) LEnvironment(hydrogen_env->closure(),
+ hydrogen_env->is_arguments_adaptor(),
ast_id,
hydrogen_env->parameter_count(),
argument_count_,
value_count,
outer);
+ int argument_index = *argument_index_accumulator;
for (int i = 0; i < value_count; ++i) {
if (hydrogen_env->is_special_index(i)) continue;
@@ -1024,13 +1032,17 @@
if (value->IsArgumentsObject()) {
op = NULL;
} else if (value->IsPushArgument()) {
- op = new(zone()) LArgument((*argument_index_accumulator)++);
+ op = new(zone()) LArgument(argument_index++);
} else {
op = UseAny(value);
}
result->AddValue(op, value->representation());
}
+ if (!hydrogen_env->is_arguments_adaptor()) {
+ *argument_index_accumulator = argument_index;
+ }
+
return result;
}
@@ -1041,22 +1053,31 @@
LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
- HValue* v = instr->value();
- if (v->EmitAtUses()) {
- ASSERT(v->IsConstant());
- ASSERT(!v->representation().IsDouble());
- HBasicBlock* successor = HConstant::cast(v)->ToBoolean()
+ HValue* value = instr->value();
+ if (value->EmitAtUses()) {
+ ASSERT(value->IsConstant());
+ ASSERT(!value->representation().IsDouble());
+ HBasicBlock* successor = HConstant::cast(value)->ToBoolean()
? instr->FirstSuccessor()
: instr->SecondSuccessor();
return new(zone()) LGoto(successor->block_id());
}
+
+ // Untagged integers or doubles, smis and booleans don't require a
+ // deoptimization environment nor a temp register.
+ Representation rep = value->representation();
+ HType type = value->type();
+ if (!rep.IsTagged() || type.IsSmi() || type.IsBoolean()) {
+ return new(zone()) LBranch(UseRegister(value), NULL);
+ }
+
ToBooleanStub::Types expected = instr->expected_input_types();
// We need a temporary register when we have to access the map *or* we have
// no type info yet, in which case we handle all cases (including the ones
// involving maps).
bool needs_temp = expected.NeedsMap() || expected.IsEmpty();
LOperand* temp = needs_temp ? TempRegister() : NULL;
- return AssignEnvironment(new(zone()) LBranch(UseRegister(v), temp));
+ return AssignEnvironment(new(zone()) LBranch(UseRegister(value), temp));
}
@@ -1184,6 +1205,11 @@
} else {
LOperand* input = UseRegisterAtStart(instr->value());
LOperand* context = UseAny(instr->context()); // Deferred use by MathAbs.
+ if (op == kMathPowHalf) {
+ LOperand* temp = TempRegister();
+ LMathPowHalf* result = new(zone()) LMathPowHalf(context, input, temp);
+ return DefineSameAsFirst(result);
+ }
LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(context,
input);
switch (op) {
@@ -1195,8 +1221,6 @@
return AssignEnvironment(DefineAsRegister(result));
case kMathSqrt:
return DefineSameAsFirst(result);
- case kMathPowHalf:
- return DefineSameAsFirst(result);
default:
UNREACHABLE();
return NULL;
@@ -1379,7 +1403,11 @@
temp = TempRegister();
}
LMulI* mul = new(zone()) LMulI(left, right, temp);
- return AssignEnvironment(DefineSameAsFirst(mul));
+ if (instr->CheckFlag(HValue::kCanOverflow) ||
+ instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ AssignEnvironment(mul);
+ }
+ return DefineSameAsFirst(mul);
} else if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::MUL, instr);
} else {
@@ -1437,9 +1465,9 @@
// We need to use fixed result register for the call.
Representation exponent_type = instr->right()->representation();
ASSERT(instr->left()->representation().IsDouble());
- LOperand* left = UseFixedDouble(instr->left(), xmm1);
+ LOperand* left = UseFixedDouble(instr->left(), xmm2);
LOperand* right = exponent_type.IsDouble() ?
- UseFixedDouble(instr->right(), xmm2) :
+ UseFixedDouble(instr->right(), xmm1) :
UseFixed(instr->right(), eax);
LPower* result = new(zone()) LPower(left, right);
return MarkAsCall(DefineFixedDouble(result, xmm3), instr,
@@ -1447,6 +1475,15 @@
}
+LInstruction* LChunkBuilder::DoRandom(HRandom* instr) {
+ ASSERT(instr->representation().IsDouble());
+ ASSERT(instr->global_object()->representation().IsTagged());
+ LOperand* global_object = UseFixed(instr->global_object(), eax);
+ LRandom* result = new(zone()) LRandom(global_object);
+ return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
+}
+
+
LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
ASSERT(instr->left()->representation().IsTagged());
ASSERT(instr->right()->representation().IsTagged());
@@ -1579,9 +1616,9 @@
LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
HClassOfTestAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
- return new(zone()) LClassOfTestAndBranch(UseTempRegister(instr->value()),
- TempRegister(),
- TempRegister());
+ return new(zone()) LClassOfTestAndBranch(UseRegister(instr->value()),
+ TempRegister(),
+ TempRegister());
}
@@ -1607,7 +1644,7 @@
LInstruction* LChunkBuilder::DoValueOf(HValueOf* instr) {
LOperand* object = UseRegister(instr->value());
LValueOf* result = new(zone()) LValueOf(object, TempRegister());
- return AssignEnvironment(DefineSameAsFirst(result));
+ return DefineSameAsFirst(result);
}
@@ -1651,7 +1688,11 @@
if (from.IsTagged()) {
if (to.IsDouble()) {
LOperand* value = UseRegister(instr->value());
- LNumberUntagD* res = new(zone()) LNumberUntagD(value);
+ // Temp register only necessary for minus zero check.
+ LOperand* temp = instr->deoptimize_on_minus_zero()
+ ? TempRegister()
+ : NULL;
+ LNumberUntagD* res = new(zone()) LNumberUntagD(value, temp);
return AssignEnvironment(DefineAsRegister(res));
} else {
ASSERT(to.IsInteger32());
@@ -1847,9 +1888,7 @@
LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) {
LStoreGlobalCell* result =
- new(zone()) LStoreGlobalCell(UseTempRegister(instr->value()),
- TempRegister(),
- TempRegister());
+ new(zone()) LStoreGlobalCell(UseRegister(instr->value()));
return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
}
@@ -1866,7 +1905,9 @@
LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
LOperand* context = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LLoadContextSlot(context));
+ LInstruction* result =
+ DefineAsRegister(new(zone()) LLoadContextSlot(context));
+ return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
}
@@ -1881,7 +1922,8 @@
value = UseRegister(instr->value());
temp = NULL;
}
- return new(zone()) LStoreContextSlot(context, value, temp);
+ LInstruction* result = new(zone()) LStoreContextSlot(context, value, temp);
+ return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
}
@@ -1946,7 +1988,8 @@
LOperand* obj = UseRegisterAtStart(instr->object());
LOperand* key = UseRegisterOrConstantAtStart(instr->key());
LLoadKeyedFastElement* result = new(zone()) LLoadKeyedFastElement(obj, key);
- return AssignEnvironment(DefineAsRegister(result));
+ if (instr->RequiresHoleCheck()) AssignEnvironment(result);
+ return DefineAsRegister(result);
}
@@ -1965,12 +2008,11 @@
LInstruction* LChunkBuilder::DoLoadKeyedSpecializedArrayElement(
HLoadKeyedSpecializedArrayElement* instr) {
ElementsKind elements_kind = instr->elements_kind();
- Representation representation(instr->representation());
ASSERT(
- (representation.IsInteger32() &&
+ (instr->representation().IsInteger32() &&
(elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
(elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
- (representation.IsDouble() &&
+ (instr->representation().IsDouble() &&
((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
(elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
ASSERT(instr->key()->representation().IsInteger32());
@@ -2013,8 +2055,7 @@
LOperand* key = needs_write_barrier
? UseTempRegister(instr->key())
: UseRegisterOrConstantAtStart(instr->key());
-
- return AssignEnvironment(new(zone()) LStoreKeyedFastElement(obj, key, val));
+ return new(zone()) LStoreKeyedFastElement(obj, key, val);
}
@@ -2034,13 +2075,12 @@
LInstruction* LChunkBuilder::DoStoreKeyedSpecializedArrayElement(
HStoreKeyedSpecializedArrayElement* instr) {
- Representation representation(instr->value()->representation());
ElementsKind elements_kind = instr->elements_kind();
ASSERT(
- (representation.IsInteger32() &&
+ (instr->value()->representation().IsInteger32() &&
(elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
(elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
- (representation.IsDouble() &&
+ (instr->value()->representation().IsDouble() &&
((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
(elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
ASSERT(instr->external_pointer()->representation().IsExternal());
@@ -2344,6 +2384,7 @@
HEnvironment* outer = current_block_->last_environment();
HConstant* undefined = graph()->GetConstantUndefined();
HEnvironment* inner = outer->CopyForInlining(instr->closure(),
+ instr->arguments_count(),
instr->function(),
undefined,
instr->call_kind());
@@ -2354,7 +2395,8 @@
LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
- HEnvironment* outer = current_block_->last_environment()->outer();
+ HEnvironment* outer = current_block_->last_environment()->
+ DiscardInlined(false);
current_block_->UpdateEnvironment(outer);
return NULL;
}
diff --git a/src/ia32/lithium-ia32.h b/src/ia32/lithium-ia32.h
index 5170647..67bf937 100644
--- a/src/ia32/lithium-ia32.h
+++ b/src/ia32/lithium-ia32.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -123,6 +123,7 @@
V(LoadNamedField) \
V(LoadNamedFieldPolymorphic) \
V(LoadNamedGeneric) \
+ V(MathPowHalf) \
V(ModI) \
V(MulI) \
V(NumberTagD) \
@@ -134,6 +135,7 @@
V(OuterContext) \
V(Parameter) \
V(Power) \
+ V(Random) \
V(PushArgument) \
V(RegExpLiteral) \
V(Return) \
@@ -582,6 +584,24 @@
};
+class LMathPowHalf: public LTemplateInstruction<1, 2, 1> {
+ public:
+ LMathPowHalf(LOperand* context, LOperand* value, LOperand* temp) {
+ inputs_[1] = context;
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* context() { return inputs_[1]; }
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathPowHalf, "math-pow-half")
+
+ virtual void PrintDataTo(StringStream* stream);
+};
+
+
class LCmpObjectEqAndBranch: public LControlInstruction<2, 0> {
public:
LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
@@ -1024,6 +1044,17 @@
};
+class LRandom: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LRandom(LOperand* global_object) {
+ inputs_[0] = global_object;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(Random, "random")
+ DECLARE_HYDROGEN_ACCESSOR(Random)
+};
+
+
class LArithmeticD: public LTemplateInstruction<1, 2, 0> {
public:
LArithmeticD(Token::Value op, LOperand* left, LOperand* right)
@@ -1250,16 +1281,16 @@
};
-class LStoreGlobalCell: public LTemplateInstruction<0, 1, 2> {
+class LStoreGlobalCell: public LTemplateInstruction<0, 1, 0> {
public:
- explicit LStoreGlobalCell(LOperand* value, LOperand* temp1, LOperand* temp2) {
+ explicit LStoreGlobalCell(LOperand* value) {
inputs_[0] = value;
- temps_[0] = temp1;
- temps_[1] = temp2;
}
DECLARE_CONCRETE_INSTRUCTION(StoreGlobalCell, "store-global-cell")
DECLARE_HYDROGEN_ACCESSOR(StoreGlobalCell)
+
+ LOperand* value() { return inputs_[0]; }
};
@@ -1593,10 +1624,11 @@
};
-class LNumberUntagD: public LTemplateInstruction<1, 1, 0> {
+class LNumberUntagD: public LTemplateInstruction<1, 1, 1> {
public:
- explicit LNumberUntagD(LOperand* value) {
+ explicit LNumberUntagD(LOperand* value, LOperand* temp) {
inputs_[0] = value;
+ temps_[0] = temp;
}
DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag")
diff --git a/src/ia32/macro-assembler-ia32.cc b/src/ia32/macro-assembler-ia32.cc
index fcae7a2..d0d9e19 100644
--- a/src/ia32/macro-assembler-ia32.cc
+++ b/src/ia32/macro-assembler-ia32.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -357,6 +357,14 @@
}
+void MacroAssembler::CompareRoot(const Operand& with,
+ Heap::RootListIndex index) {
+ // see ROOT_ACCESSOR macro in factory.h
+ Handle<Object> value(&isolate()->heap()->roots_array_start()[index]);
+ cmp(with, value);
+}
+
+
void MacroAssembler::CmpObjectType(Register heap_object,
InstanceType type,
Register map) {
@@ -479,15 +487,48 @@
}
+void MacroAssembler::CompareMap(Register obj,
+ Handle<Map> map,
+ Label* early_success,
+ CompareMapMode mode) {
+ cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
+ if (mode == ALLOW_ELEMENT_TRANSITION_MAPS) {
+ Map* transitioned_fast_element_map(
+ map->LookupElementsTransitionMap(FAST_ELEMENTS, NULL));
+ ASSERT(transitioned_fast_element_map == NULL ||
+ map->elements_kind() != FAST_ELEMENTS);
+ if (transitioned_fast_element_map != NULL) {
+ j(equal, early_success, Label::kNear);
+ cmp(FieldOperand(obj, HeapObject::kMapOffset),
+ Handle<Map>(transitioned_fast_element_map));
+ }
+
+ Map* transitioned_double_map(
+ map->LookupElementsTransitionMap(FAST_DOUBLE_ELEMENTS, NULL));
+ ASSERT(transitioned_double_map == NULL ||
+ map->elements_kind() == FAST_SMI_ONLY_ELEMENTS);
+ if (transitioned_double_map != NULL) {
+ j(equal, early_success, Label::kNear);
+ cmp(FieldOperand(obj, HeapObject::kMapOffset),
+ Handle<Map>(transitioned_double_map));
+ }
+ }
+}
+
+
void MacroAssembler::CheckMap(Register obj,
Handle<Map> map,
Label* fail,
- SmiCheckType smi_check_type) {
+ SmiCheckType smi_check_type,
+ CompareMapMode mode) {
if (smi_check_type == DO_SMI_CHECK) {
JumpIfSmi(obj, fail);
}
- cmp(FieldOperand(obj, HeapObject::kMapOffset), Immediate(map));
+
+ Label success;
+ CompareMap(obj, map, &success, mode);
j(not_equal, fail);
+ bind(&success);
}
@@ -608,7 +649,7 @@
void MacroAssembler::EnterExitFramePrologue() {
- // Setup the frame structure on the stack.
+ // Set up the frame structure on the stack.
ASSERT(ExitFrameConstants::kCallerSPDisplacement == +2 * kPointerSize);
ASSERT(ExitFrameConstants::kCallerPCOffset == +1 * kPointerSize);
ASSERT(ExitFrameConstants::kCallerFPOffset == 0 * kPointerSize);
@@ -660,7 +701,7 @@
void MacroAssembler::EnterExitFrame(bool save_doubles) {
EnterExitFramePrologue();
- // Setup argc and argv in callee-saved registers.
+ // Set up argc and argv in callee-saved registers.
int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
mov(edi, eax);
lea(esi, Operand(ebp, eax, times_4, offset));
@@ -755,7 +796,7 @@
// Push the state and the code object.
push(Immediate(state));
- push(CodeObject());
+ Push(CodeObject());
// Link the current handler as the next handler.
ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
@@ -951,6 +992,50 @@
}
+// Compute the hash code from the untagged key. This must be kept in sync
+// with ComputeIntegerHash in utils.h.
+//
+// Note: r0 will contain hash code
+void MacroAssembler::GetNumberHash(Register r0, Register scratch) {
+ // Xor original key with a seed.
+ if (Serializer::enabled()) {
+ ExternalReference roots_array_start =
+ ExternalReference::roots_array_start(isolate());
+ mov(scratch, Immediate(Heap::kHashSeedRootIndex));
+ mov(scratch,
+ Operand::StaticArray(scratch, times_pointer_size, roots_array_start));
+ SmiUntag(scratch);
+ xor_(r0, scratch);
+ } else {
+ int32_t seed = isolate()->heap()->HashSeed();
+ xor_(r0, Immediate(seed));
+ }
+
+ // hash = ~hash + (hash << 15);
+ mov(scratch, r0);
+ not_(r0);
+ shl(scratch, 15);
+ add(r0, scratch);
+ // hash = hash ^ (hash >> 12);
+ mov(scratch, r0);
+ shr(scratch, 12);
+ xor_(r0, scratch);
+ // hash = hash + (hash << 2);
+ lea(r0, Operand(r0, r0, times_4, 0));
+ // hash = hash ^ (hash >> 4);
+ mov(scratch, r0);
+ shr(scratch, 4);
+ xor_(r0, scratch);
+ // hash = hash * 2057;
+ imul(r0, r0, 2057);
+ // hash = hash ^ (hash >> 16);
+ mov(scratch, r0);
+ shr(scratch, 16);
+ xor_(r0, scratch);
+}
+
+
+
void MacroAssembler::LoadFromNumberDictionary(Label* miss,
Register elements,
Register key,
@@ -976,33 +1061,10 @@
Label done;
- // Compute the hash code from the untagged key. This must be kept in sync
- // with ComputeIntegerHash in utils.h.
- //
- // hash = ~hash + (hash << 15);
- mov(r1, r0);
- not_(r0);
- shl(r1, 15);
- add(r0, r1);
- // hash = hash ^ (hash >> 12);
- mov(r1, r0);
- shr(r1, 12);
- xor_(r0, r1);
- // hash = hash + (hash << 2);
- lea(r0, Operand(r0, r0, times_4, 0));
- // hash = hash ^ (hash >> 4);
- mov(r1, r0);
- shr(r1, 4);
- xor_(r0, r1);
- // hash = hash * 2057;
- imul(r0, r0, 2057);
- // hash = hash ^ (hash >> 16);
- mov(r1, r0);
- shr(r1, 16);
- xor_(r0, r1);
+ GetNumberHash(r0, r1);
// Compute capacity mask.
- mov(r1, FieldOperand(elements, NumberDictionary::kCapacityOffset));
+ mov(r1, FieldOperand(elements, SeededNumberDictionary::kCapacityOffset));
shr(r1, kSmiTagSize); // convert smi to int
dec(r1);
@@ -1013,19 +1075,19 @@
mov(r2, r0);
// Compute the masked index: (hash + i + i * i) & mask.
if (i > 0) {
- add(r2, Immediate(NumberDictionary::GetProbeOffset(i)));
+ add(r2, Immediate(SeededNumberDictionary::GetProbeOffset(i)));
}
and_(r2, r1);
// Scale the index by multiplying by the entry size.
- ASSERT(NumberDictionary::kEntrySize == 3);
+ ASSERT(SeededNumberDictionary::kEntrySize == 3);
lea(r2, Operand(r2, r2, times_2, 0)); // r2 = r2 * 3
// Check if the key matches.
cmp(key, FieldOperand(elements,
r2,
times_pointer_size,
- NumberDictionary::kElementsStartOffset));
+ SeededNumberDictionary::kElementsStartOffset));
if (i != (kProbes - 1)) {
j(equal, &done);
} else {
@@ -1036,7 +1098,7 @@
bind(&done);
// Check that the value is a normal propety.
const int kDetailsOffset =
- NumberDictionary::kElementsStartOffset + 2 * kPointerSize;
+ SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
ASSERT_EQ(NORMAL, 0);
test(FieldOperand(elements, r2, times_pointer_size, kDetailsOffset),
Immediate(PropertyDetails::TypeField::kMask << kSmiTagSize));
@@ -1044,7 +1106,7 @@
// Get the value at the masked, scaled index.
const int kValueOffset =
- NumberDictionary::kElementsStartOffset + kPointerSize;
+ SeededNumberDictionary::kElementsStartOffset + kPointerSize;
mov(result, FieldOperand(elements, r2, times_pointer_size, kValueOffset));
}
@@ -1325,7 +1387,7 @@
add(scratch1, Immediate(kObjectAlignmentMask));
and_(scratch1, Immediate(~kObjectAlignmentMask));
- // Allocate ascii string in new space.
+ // Allocate ASCII string in new space.
AllocateInNewSpace(SeqAsciiString::kHeaderSize,
times_1,
scratch1,
@@ -1353,7 +1415,7 @@
Label* gc_required) {
ASSERT(length > 0);
- // Allocate ascii string in new space.
+ // Allocate ASCII string in new space.
AllocateInNewSpace(SeqAsciiString::SizeFor(length),
result,
scratch1,
@@ -1871,11 +1933,13 @@
Handle<Code> code_constant,
const Operand& code_operand,
Label* done,
+ bool* definitely_mismatches,
InvokeFlag flag,
Label::Distance done_near,
const CallWrapper& call_wrapper,
CallKind call_kind) {
bool definitely_matches = false;
+ *definitely_mismatches = false;
Label invoke;
if (expected.is_immediate()) {
ASSERT(actual.is_immediate());
@@ -1891,6 +1955,7 @@
// arguments.
definitely_matches = true;
} else {
+ *definitely_mismatches = true;
mov(ebx, expected.immediate());
}
}
@@ -1928,7 +1993,9 @@
SetCallKind(ecx, call_kind);
call(adaptor, RelocInfo::CODE_TARGET);
call_wrapper.AfterCall();
- jmp(done, done_near);
+ if (!*definitely_mismatches) {
+ jmp(done, done_near);
+ }
} else {
SetCallKind(ecx, call_kind);
jmp(adaptor, RelocInfo::CODE_TARGET);
@@ -1948,20 +2015,23 @@
ASSERT(flag == JUMP_FUNCTION || has_frame());
Label done;
+ bool definitely_mismatches = false;
InvokePrologue(expected, actual, Handle<Code>::null(), code,
- &done, flag, Label::kNear, call_wrapper,
- call_kind);
- if (flag == CALL_FUNCTION) {
- call_wrapper.BeforeCall(CallSize(code));
- SetCallKind(ecx, call_kind);
- call(code);
- call_wrapper.AfterCall();
- } else {
- ASSERT(flag == JUMP_FUNCTION);
- SetCallKind(ecx, call_kind);
- jmp(code);
+ &done, &definitely_mismatches, flag, Label::kNear,
+ call_wrapper, call_kind);
+ if (!definitely_mismatches) {
+ if (flag == CALL_FUNCTION) {
+ call_wrapper.BeforeCall(CallSize(code));
+ SetCallKind(ecx, call_kind);
+ call(code);
+ call_wrapper.AfterCall();
+ } else {
+ ASSERT(flag == JUMP_FUNCTION);
+ SetCallKind(ecx, call_kind);
+ jmp(code);
+ }
+ bind(&done);
}
- bind(&done);
}
@@ -1977,19 +2047,22 @@
Label done;
Operand dummy(eax, 0);
- InvokePrologue(expected, actual, code, dummy, &done, flag, Label::kNear,
- call_wrapper, call_kind);
- if (flag == CALL_FUNCTION) {
- call_wrapper.BeforeCall(CallSize(code, rmode));
- SetCallKind(ecx, call_kind);
- call(code, rmode);
- call_wrapper.AfterCall();
- } else {
- ASSERT(flag == JUMP_FUNCTION);
- SetCallKind(ecx, call_kind);
- jmp(code, rmode);
+ bool definitely_mismatches = false;
+ InvokePrologue(expected, actual, code, dummy, &done, &definitely_mismatches,
+ flag, Label::kNear, call_wrapper, call_kind);
+ if (!definitely_mismatches) {
+ if (flag == CALL_FUNCTION) {
+ call_wrapper.BeforeCall(CallSize(code, rmode));
+ SetCallKind(ecx, call_kind);
+ call(code, rmode);
+ call_wrapper.AfterCall();
+ } else {
+ ASSERT(flag == JUMP_FUNCTION);
+ SetCallKind(ecx, call_kind);
+ jmp(code, rmode);
+ }
+ bind(&done);
}
- bind(&done);
}
@@ -2022,7 +2095,7 @@
ASSERT(flag == JUMP_FUNCTION || has_frame());
// Get the function and setup the context.
- mov(edi, Immediate(function));
+ LoadHeapObject(edi, function);
mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
ParameterCount expected(function->shared()->formal_parameter_count());
@@ -2151,6 +2224,29 @@
}
+void MacroAssembler::LoadHeapObject(Register result,
+ Handle<HeapObject> object) {
+ if (isolate()->heap()->InNewSpace(*object)) {
+ Handle<JSGlobalPropertyCell> cell =
+ isolate()->factory()->NewJSGlobalPropertyCell(object);
+ mov(result, Operand::Cell(cell));
+ } else {
+ mov(result, object);
+ }
+}
+
+
+void MacroAssembler::PushHeapObject(Handle<HeapObject> object) {
+ if (isolate()->heap()->InNewSpace(*object)) {
+ Handle<JSGlobalPropertyCell> cell =
+ isolate()->factory()->NewJSGlobalPropertyCell(object);
+ push(Operand::Cell(cell));
+ } else {
+ Push(object);
+ }
+}
+
+
void MacroAssembler::Ret() {
ret(0);
}
@@ -2182,11 +2278,6 @@
}
-void MacroAssembler::Move(Register dst, Handle<Object> value) {
- mov(dst, value);
-}
-
-
void MacroAssembler::SetCounter(StatsCounter* counter, int value) {
if (FLAG_native_code_counters && counter->Enabled()) {
mov(Operand::StaticVariable(ExternalReference(counter)), Immediate(value));
@@ -2384,7 +2475,7 @@
movzx_b(scratch1, FieldOperand(scratch1, Map::kInstanceTypeOffset));
movzx_b(scratch2, FieldOperand(scratch2, Map::kInstanceTypeOffset));
- // Check that both are flat ascii strings.
+ // Check that both are flat ASCII strings.
const int kFlatAsciiStringMask =
kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
diff --git a/src/ia32/macro-assembler-ia32.h b/src/ia32/macro-assembler-ia32.h
index 03ec28a..0fcb94f 100644
--- a/src/ia32/macro-assembler-ia32.h
+++ b/src/ia32/macro-assembler-ia32.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -237,10 +237,21 @@
void StoreToSafepointRegisterSlot(Register dst, Immediate src);
void LoadFromSafepointRegisterSlot(Register dst, Register src);
+ void LoadHeapObject(Register result, Handle<HeapObject> object);
+ void PushHeapObject(Handle<HeapObject> object);
+
+ void LoadObject(Register result, Handle<Object> object) {
+ if (object->IsHeapObject()) {
+ LoadHeapObject(result, Handle<HeapObject>::cast(object));
+ } else {
+ Set(result, Immediate(object));
+ }
+ }
+
// ---------------------------------------------------------------------------
// JavaScript invokes
- // Setup call kind marking in ecx. The method takes ecx as an
+ // Set up call kind marking in ecx. The method takes ecx as an
// explicit first parameter to make the code more readable at the
// call sites.
void SetCallKind(Register dst, CallKind kind);
@@ -305,8 +316,9 @@
void SafeSet(Register dst, const Immediate& x);
void SafePush(const Immediate& x);
- // Compare a register against a known root, e.g. undefined, null, true, ...
+ // Compare against a known root, e.g. undefined, null, true, ...
void CompareRoot(Register with, Heap::RootListIndex index);
+ void CompareRoot(const Operand& with, Heap::RootListIndex index);
// Compare object type for heap object.
// Incoming register is heap_object and outgoing register is map.
@@ -344,13 +356,24 @@
Label* fail,
bool specialize_for_processor);
+ // Compare an object's map with the specified map and its transitioned
+ // elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. FLAGS are set with
+ // result of map compare. If multiple map compares are required, the compare
+ // sequences branches to early_success.
+ void CompareMap(Register obj,
+ Handle<Map> map,
+ Label* early_success,
+ CompareMapMode mode = REQUIRE_EXACT_MAP);
+
// Check if the map of an object is equal to a specified map and branch to
// label if not. Skip the smi check if not required (object is known to be a
- // heap object)
+ // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
+ // against maps that are ElementsKind transition maps of the specified map.
void CheckMap(Register obj,
Handle<Map> map,
Label* fail,
- SmiCheckType smi_check_type);
+ SmiCheckType smi_check_type,
+ CompareMapMode mode = REQUIRE_EXACT_MAP);
// Check if the map of an object is equal to a specified map and branch to a
// specified target if equal. Skip the smi check if not required (object is
@@ -474,6 +497,7 @@
Register scratch,
Label* miss);
+ void GetNumberHash(Register r0, Register scratch);
void LoadFromNumberDictionary(Label* miss,
Register elements,
@@ -718,10 +742,8 @@
// Move if the registers are not identical.
void Move(Register target, Register source);
- void Move(Register target, Handle<Object> value);
-
// Push a handle value.
- void Push(Handle<Object> handle) { push(handle); }
+ void Push(Handle<Object> handle) { push(Immediate(handle)); }
Handle<Object> CodeObject() {
ASSERT(!code_object_.is_null());
@@ -769,7 +791,7 @@
// ---------------------------------------------------------------------------
// String utilities.
- // Check whether the instance type represents a flat ascii string. Jump to the
+ // Check whether the instance type represents a flat ASCII string. Jump to the
// label if not. If the instance type can be scratched specify same register
// for both instance type and scratch.
void JumpIfInstanceTypeIsNotSequentialAscii(Register instance_type,
@@ -805,6 +827,7 @@
Handle<Code> code_constant,
const Operand& code_operand,
Label* done,
+ bool* definitely_mismatches,
InvokeFlag flag,
Label::Distance done_distance,
const CallWrapper& call_wrapper = NullCallWrapper(),
diff --git a/src/ia32/regexp-macro-assembler-ia32.cc b/src/ia32/regexp-macro-assembler-ia32.cc
index dbf01ab..e613a06 100644
--- a/src/ia32/regexp-macro-assembler-ia32.cc
+++ b/src/ia32/regexp-macro-assembler-ia32.cc
@@ -210,7 +210,7 @@
bool check_end_of_string) {
#ifdef DEBUG
// If input is ASCII, don't even bother calling here if the string to
- // match contains a non-ascii character.
+ // match contains a non-ASCII character.
if (mode_ == ASCII) {
ASSERT(String::IsAscii(str.start(), str.length()));
}
diff --git a/src/ia32/stub-cache-ia32.cc b/src/ia32/stub-cache-ia32.cc
index 722d718..f6f4241 100644
--- a/src/ia32/stub-cache-ia32.cc
+++ b/src/ia32/stub-cache-ia32.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -429,7 +429,7 @@
// -----------------------------------
// Get the function and setup the context.
Handle<JSFunction> function = optimization.constant_function();
- __ mov(edi, Immediate(function));
+ __ LoadHeapObject(edi, function);
__ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
// Pass the additional arguments.
@@ -695,13 +695,9 @@
Register name_reg,
Register scratch,
Label* miss_label) {
- // Check that the object isn't a smi.
- __ JumpIfSmi(receiver_reg, miss_label);
-
// Check that the map of the object hasn't changed.
- __ cmp(FieldOperand(receiver_reg, HeapObject::kMapOffset),
- Immediate(Handle<Map>(object->map())));
- __ j(not_equal, miss_label);
+ __ CheckMap(receiver_reg, Handle<Map>(object->map()),
+ miss_label, DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
// Perform global security token check if needed.
if (object->IsJSGlobalProxy()) {
@@ -878,13 +874,10 @@
if (in_new_space) {
// Save the map in scratch1 for later.
__ mov(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
- __ cmp(scratch1, Immediate(current_map));
- } else {
- __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
- Immediate(current_map));
}
- // Branch on the result of the map check.
- __ j(not_equal, miss);
+ __ CheckMap(reg, current_map, miss, DONT_DO_SMI_CHECK,
+ ALLOW_ELEMENT_TRANSITION_MAPS);
+
// Check access rights to the global object. This has to happen after
// the map check so that we know that the object is actually a global
// object.
@@ -916,9 +909,8 @@
LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
// Check the holder map.
- __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
- Immediate(Handle<Map>(holder->map())));
- __ j(not_equal, miss);
+ __ CheckMap(reg, Handle<Map>(holder->map()),
+ miss, DONT_DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
// Perform security check for access to the global object.
ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
@@ -1000,7 +992,7 @@
__ push(scratch3); // Restore return address.
- // 3 elements array for v8::Agruments::values_, handler for name and pointer
+ // 3 elements array for v8::Arguments::values_, handler for name and pointer
// to the values (it considered as smi in GC).
const int kStackSpace = 5;
const int kApiArgc = 2;
@@ -1025,7 +1017,7 @@
Register scratch1,
Register scratch2,
Register scratch3,
- Handle<Object> value,
+ Handle<JSFunction> value,
Handle<String> name,
Label* miss) {
// Check that the receiver isn't a smi.
@@ -1036,7 +1028,7 @@
object, receiver, holder, scratch1, scratch2, scratch3, name, miss);
// Return the constant value.
- __ mov(eax, value);
+ __ LoadHeapObject(eax, value);
__ ret(0);
}
@@ -1061,7 +1053,7 @@
// and CALLBACKS, so inline only them, other cases may be added
// later.
bool compile_followup_inline = false;
- if (lookup->IsProperty() && lookup->IsCacheable()) {
+ if (lookup->IsFound() && lookup->IsCacheable()) {
if (lookup->type() == FIELD) {
compile_followup_inline = true;
} else if (lookup->type() == CALLBACKS &&
@@ -2338,7 +2330,7 @@
__ mov(Operand(esp, (argc + 1) * kPointerSize), edx);
}
- // Setup the context (function already in edi).
+ // Set up the context (function already in edi).
__ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
// Jump to the cached code (tail call).
@@ -2403,13 +2395,9 @@
// -----------------------------------
Label miss;
- // Check that the object isn't a smi.
- __ JumpIfSmi(edx, &miss);
-
// Check that the map of the object hasn't changed.
- __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
- Immediate(Handle<Map>(object->map())));
- __ j(not_equal, &miss);
+ __ CheckMap(edx, Handle<Map>(object->map()),
+ &miss, DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
// Perform global security token check if needed.
if (object->IsJSGlobalProxy()) {
@@ -2453,13 +2441,9 @@
// -----------------------------------
Label miss;
- // Check that the object isn't a smi.
- __ JumpIfSmi(edx, &miss);
-
// Check that the map of the object hasn't changed.
- __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
- Immediate(Handle<Map>(receiver->map())));
- __ j(not_equal, &miss);
+ __ CheckMap(edx, Handle<Map>(receiver->map()),
+ &miss, DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
// Perform global security token check if needed.
if (receiver->IsJSGlobalProxy()) {
@@ -2715,7 +2699,7 @@
Handle<Code> LoadStubCompiler::CompileLoadConstant(Handle<JSObject> object,
Handle<JSObject> holder,
- Handle<Object> value,
+ Handle<JSFunction> value,
Handle<String> name) {
// ----------- S t a t e -------------
// -- eax : receiver
@@ -2877,7 +2861,7 @@
Handle<String> name,
Handle<JSObject> receiver,
Handle<JSObject> holder,
- Handle<Object> value) {
+ Handle<JSFunction> value) {
// ----------- S t a t e -------------
// -- eax : key
// -- edx : receiver
diff --git a/src/ic-inl.h b/src/ic-inl.h
index 498cf3a..56cea81 100644
--- a/src/ic-inl.h
+++ b/src/ic-inl.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -36,7 +36,7 @@
namespace internal {
-Address IC::address() {
+Address IC::address() const {
// Get the address of the call.
Address result = pc() - Assembler::kCallTargetAddressOffset;
diff --git a/src/ic.cc b/src/ic.cc
index 2c6d55b..b084109 100644
--- a/src/ic.cc
+++ b/src/ic.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -40,13 +40,13 @@
namespace internal {
#ifdef DEBUG
-static char TransitionMarkFromState(IC::State state) {
+char IC::TransitionMarkFromState(IC::State state) {
switch (state) {
case UNINITIALIZED: return '0';
case PREMONOMORPHIC: return 'P';
case MONOMORPHIC: return '1';
case MONOMORPHIC_PROTOTYPE_FAILURE: return '^';
- case MEGAMORPHIC: return 'N';
+ case MEGAMORPHIC: return IsGeneric() ? 'G' : 'N';
// We never see the debugger states here, because the state is
// computed from the original code - not the patched code. Let
@@ -80,19 +80,7 @@
raw_frame = it.frame();
}
}
- if (raw_frame->is_java_script()) {
- JavaScriptFrame* frame = JavaScriptFrame::cast(raw_frame);
- Code* js_code = frame->unchecked_code();
- // Find the function on the stack and both the active code for the
- // function and the original code.
- JSFunction* function = JSFunction::cast(frame->function());
- function->PrintName();
- int code_offset =
- static_cast<int>(address() - js_code->instruction_start());
- PrintF("+%d", code_offset);
- } else {
- PrintF("<unknown>");
- }
+ JavaScriptFrame::PrintTop(stdout, false, true);
PrintF(" (%c->%c)",
TransitionMarkFromState(old_state),
TransitionMarkFromState(new_state));
@@ -100,13 +88,23 @@
PrintF("]\n");
}
}
-#endif // DEBUG
+#define TRACE_GENERIC_IC(type, reason) \
+ do { \
+ if (FLAG_trace_ic) { \
+ PrintF("[%s patching generic stub in ", type); \
+ JavaScriptFrame::PrintTop(stdout, false, true); \
+ PrintF(" (%s)]\n", reason); \
+ } \
+ } while (false)
+
+#else
+#define TRACE_GENERIC_IC(type, reason)
+#endif // DEBUG
#define TRACE_IC(type, name, old_state, new_target) \
ASSERT((TraceIC(type, name, old_state, new_target), true))
-
IC::IC(FrameDepth depth, Isolate* isolate) : isolate_(isolate) {
ASSERT(isolate == Isolate::Current());
// To improve the performance of the (much used) IC code, we unfold
@@ -137,7 +135,7 @@
#ifdef ENABLE_DEBUGGER_SUPPORT
-Address IC::OriginalCodeAddress() {
+Address IC::OriginalCodeAddress() const {
HandleScope scope;
// Compute the JavaScript frame for the frame pointer of this IC
// structure. We need this to be able to find the function
@@ -862,7 +860,7 @@
}
PropertyAttributes attr;
- if (lookup.IsProperty() &&
+ if (lookup.IsFound() &&
(lookup.type() == INTERCEPTOR || lookup.type() == HANDLER)) {
// Get the property.
Handle<Object> result =
@@ -914,7 +912,7 @@
name, receiver, holder, lookup->GetFieldIndex());
break;
case CONSTANT_FUNCTION: {
- Handle<Object> constant(lookup->GetConstantFunction());
+ Handle<JSFunction> constant(lookup->GetConstantFunction());
code = isolate()->stub_cache()->ComputeLoadConstant(
name, receiver, holder, constant);
break;
@@ -1085,7 +1083,7 @@
}
PropertyAttributes attr;
- if (lookup.IsProperty() && lookup.type() == INTERCEPTOR) {
+ if (lookup.IsFound() && lookup.type() == INTERCEPTOR) {
// Get the property.
Handle<Object> result =
Object::GetProperty(object, object, &lookup, name, &attr);
@@ -1123,6 +1121,8 @@
stub = ComputeStub(receiver, LOAD, kNonStrictMode, stub);
}
}
+ } else {
+ TRACE_GENERIC_IC("KeyedLoadIC", "force generic");
}
if (!stub.is_null()) set_target(*stub);
}
@@ -1163,7 +1163,7 @@
name, receiver, holder, lookup->GetFieldIndex());
break;
case CONSTANT_FUNCTION: {
- Handle<Object> constant(lookup->GetConstantFunction());
+ Handle<JSFunction> constant(lookup->GetConstantFunction());
code = isolate()->stub_cache()->ComputeKeyedLoadConstant(
name, receiver, holder, constant);
break;
@@ -1206,10 +1206,12 @@
static bool StoreICableLookup(LookupResult* lookup) {
// Bail out if we didn't find a result.
- if (!lookup->IsPropertyOrTransition() || !lookup->IsCacheable()) return false;
+ if (!lookup->IsFound() || lookup->type() == NULL_DESCRIPTOR) return false;
- // If the property is read-only, we leave the IC in its current
- // state.
+ // Bail out if inline caching is not allowed.
+ if (!lookup->IsCacheable()) return false;
+
+ // If the property is read-only, we leave the IC in its current state.
if (lookup->IsReadOnly()) return false;
return true;
@@ -1267,15 +1269,19 @@
// Check if the given name is an array index.
uint32_t index;
if (name->AsArrayIndex(&index)) {
- Handle<Object> result = SetElement(receiver, index, value, strict_mode);
+ Handle<Object> result =
+ JSObject::SetElement(receiver, index, value, strict_mode);
RETURN_IF_EMPTY_HANDLE(isolate(), result);
return *value;
}
- // Use specialized code for setting the length of arrays.
- if (receiver->IsJSArray()
- && name->Equals(isolate()->heap()->length_symbol())
- && Handle<JSArray>::cast(receiver)->AllowsSetElementsLength()) {
+ // Use specialized code for setting the length of arrays with fast
+ // properties. Slow properties might indicate redefinition of the
+ // length property.
+ if (receiver->IsJSArray() &&
+ name->Equals(isolate()->heap()->length_symbol()) &&
+ Handle<JSArray>::cast(receiver)->AllowsSetElementsLength() &&
+ receiver->HasFastProperties()) {
#ifdef DEBUG
if (FLAG_trace_ic) PrintF("[StoreIC : +#length /array]\n");
#endif
@@ -1473,6 +1479,7 @@
// via megamorphic stubs, since they don't have a map in their relocation info
// and so the stubs can't be harvested for the object needed for a map check.
if (target()->type() != NORMAL) {
+ TRACE_GENERIC_IC("KeyedIC", "non-NORMAL target type");
return generic_stub;
}
@@ -1494,12 +1501,14 @@
if (!map_added) {
// If the miss wasn't due to an unseen map, a polymorphic stub
// won't help, use the generic stub.
+ TRACE_GENERIC_IC("KeyedIC", "same map added twice");
return generic_stub;
}
// If the maximum number of receiver maps has been exceeded, use the generic
// version of the IC.
if (target_receiver_maps.length() > kMaxKeyedPolymorphism) {
+ TRACE_GENERIC_IC("KeyedIC", "max polymorph exceeded");
return generic_stub;
}
@@ -1638,7 +1647,8 @@
// Check if the given name is an array index.
uint32_t index;
if (name->AsArrayIndex(&index)) {
- Handle<Object> result = SetElement(receiver, index, value, strict_mode);
+ Handle<Object> result =
+ JSObject::SetElement(receiver, index, value, strict_mode);
RETURN_IF_EMPTY_HANDLE(isolate(), result);
return *value;
}
@@ -1685,6 +1695,8 @@
}
stub = ComputeStub(receiver, stub_kind, strict_mode, stub);
}
+ } else {
+ TRACE_GENERIC_IC("KeyedStoreIC", "force generic");
}
}
if (!stub.is_null()) set_target(*stub);
@@ -1874,12 +1886,19 @@
NoHandleAllocation nha;
ASSERT(args.length() == 2);
- JSObject* receiver = JSObject::cast(args[0]);
+ JSArray* receiver = JSArray::cast(args[0]);
Object* len = args[1];
// The generated code should filter out non-Smis before we get here.
ASSERT(len->IsSmi());
+#ifdef DEBUG
+ // The length property has to be a writable callback property.
+ LookupResult debug_lookup(isolate);
+ receiver->LocalLookup(isolate->heap()->length_symbol(), &debug_lookup);
+ ASSERT(debug_lookup.type() == CALLBACKS && !debug_lookup.IsReadOnly());
+#endif
+
Object* result;
{ MaybeObject* maybe_result = receiver->SetElementsLength(len);
if (!maybe_result->ToObject(&result)) return maybe_result;
@@ -2315,6 +2334,7 @@
case SMIS: return "SMIS";
case HEAP_NUMBERS: return "HEAP_NUMBERS";
case OBJECTS: return "OBJECTS";
+ case KNOWN_OBJECTS: return "OBJECTS";
case SYMBOLS: return "SYMBOLS";
case STRINGS: return "STRINGS";
case GENERIC: return "GENERIC";
@@ -2329,19 +2349,38 @@
bool has_inlined_smi_code,
Handle<Object> x,
Handle<Object> y) {
- if (!has_inlined_smi_code && state != UNINITIALIZED && state != SYMBOLS) {
- return GENERIC;
+ switch (state) {
+ case UNINITIALIZED:
+ if (x->IsSmi() && y->IsSmi()) return SMIS;
+ if (x->IsNumber() && y->IsNumber()) return HEAP_NUMBERS;
+ if (!Token::IsEqualityOp(op_)) return GENERIC;
+ if (x->IsSymbol() && y->IsSymbol()) return SYMBOLS;
+ if (x->IsString() && y->IsString()) return STRINGS;
+ if (x->IsJSObject() && y->IsJSObject()) {
+ if (Handle<JSObject>::cast(x)->map() ==
+ Handle<JSObject>::cast(y)->map() &&
+ Token::IsEqualityOp(op_)) {
+ return KNOWN_OBJECTS;
+ } else {
+ return OBJECTS;
+ }
+ }
+ return GENERIC;
+ case SMIS:
+ return has_inlined_smi_code && x->IsNumber() && y->IsNumber()
+ ? HEAP_NUMBERS
+ : GENERIC;
+ case SYMBOLS:
+ ASSERT(Token::IsEqualityOp(op_));
+ return x->IsString() && y->IsString() ? STRINGS : GENERIC;
+ case HEAP_NUMBERS:
+ case STRINGS:
+ case OBJECTS:
+ case KNOWN_OBJECTS:
+ case GENERIC:
+ return GENERIC;
}
- if (state == UNINITIALIZED && x->IsSmi() && y->IsSmi()) return SMIS;
- if ((state == UNINITIALIZED || (state == SMIS && has_inlined_smi_code)) &&
- x->IsNumber() && y->IsNumber()) return HEAP_NUMBERS;
- if (op_ != Token::EQ && op_ != Token::EQ_STRICT) return GENERIC;
- if (state == UNINITIALIZED &&
- x->IsSymbol() && y->IsSymbol()) return SYMBOLS;
- if ((state == UNINITIALIZED || state == SYMBOLS) &&
- x->IsString() && y->IsString()) return STRINGS;
- if (state == UNINITIALIZED &&
- x->IsJSObject() && y->IsJSObject()) return OBJECTS;
+ UNREACHABLE();
return GENERIC;
}
diff --git a/src/ic.h b/src/ic.h
index 81aa6b7..94e83dc 100644
--- a/src/ic.h
+++ b/src/ic.h
@@ -91,10 +91,13 @@
// Construct the IC structure with the given number of extra
// JavaScript frames on the stack.
IC(FrameDepth depth, Isolate* isolate);
+ virtual ~IC() {}
// Get the call-site target; used for determining the state.
- Code* target() { return GetTargetAtAddress(address()); }
- inline Address address();
+ Code* target() const { return GetTargetAtAddress(address()); }
+ inline Address address() const;
+
+ virtual bool IsGeneric() const { return false; }
// Compute the current IC state based on the target stub, receiver and name.
static State StateFrom(Code* target, Object* receiver, Object* name);
@@ -139,13 +142,15 @@
#ifdef ENABLE_DEBUGGER_SUPPORT
// Computes the address in the original code when the code running is
// containing break points (calls to DebugBreakXXX builtins).
- Address OriginalCodeAddress();
+ Address OriginalCodeAddress() const;
#endif
// Set the call-site target.
void set_target(Code* code) { SetTargetAtAddress(address(), code); }
#ifdef DEBUG
+ char TransitionMarkFromState(IC::State state);
+
void TraceIC(const char* type,
Handle<Object> name,
State old_state,
@@ -452,6 +457,10 @@
bool is_js_array,
ElementsKind elements_kind);
+ virtual bool IsGeneric() const {
+ return target() == *generic_stub();
+ }
+
protected:
virtual Code::Kind kind() const { return Code::KEYED_LOAD_IC; }
@@ -477,7 +486,7 @@
Handle<Code> megamorphic_stub() {
return isolate()->builtins()->KeyedLoadIC_Generic();
}
- Handle<Code> generic_stub() {
+ Handle<Code> generic_stub() const {
return isolate()->builtins()->KeyedLoadIC_Generic();
}
Handle<Code> pre_monomorphic_stub() {
@@ -595,6 +604,11 @@
bool is_js_array,
ElementsKind elements_kind);
+ virtual bool IsGeneric() const {
+ return target() == *generic_stub() ||
+ target() == *generic_stub_strict();
+ }
+
protected:
virtual Code::Kind kind() const { return Code::KEYED_STORE_IC; }
@@ -632,10 +646,10 @@
Handle<Code> megamorphic_stub_strict() {
return isolate()->builtins()->KeyedStoreIC_Generic_Strict();
}
- Handle<Code> generic_stub() {
+ Handle<Code> generic_stub() const {
return isolate()->builtins()->KeyedStoreIC_Generic();
}
- Handle<Code> generic_stub_strict() {
+ Handle<Code> generic_stub_strict() const {
return isolate()->builtins()->KeyedStoreIC_Generic_Strict();
}
Handle<Code> non_strict_arguments_stub() {
@@ -710,6 +724,7 @@
SYMBOLS,
STRINGS,
OBJECTS,
+ KNOWN_OBJECTS,
GENERIC
};
diff --git a/src/incremental-marking-inl.h b/src/incremental-marking-inl.h
index 7ae2c99..3e3d6c4 100644
--- a/src/incremental-marking-inl.h
+++ b/src/incremental-marking-inl.h
@@ -95,7 +95,7 @@
ASSERT(IsMarking());
Marking::BlackToGrey(mark_bit);
int obj_size = obj->Size();
- MemoryChunk::IncrementLiveBytes(obj->address(), -obj_size);
+ MemoryChunk::IncrementLiveBytesFromGC(obj->address(), -obj_size);
bytes_scanned_ -= obj_size;
int64_t old_bytes_rescanned = bytes_rescanned_;
bytes_rescanned_ = old_bytes_rescanned + obj_size;
diff --git a/src/incremental-marking.cc b/src/incremental-marking.cc
index c866346..6248524 100644
--- a/src/incremental-marking.cc
+++ b/src/incremental-marking.cc
@@ -225,8 +225,8 @@
MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
if (mark_bit.data_only()) {
if (incremental_marking_->MarkBlackOrKeepGrey(mark_bit)) {
- MemoryChunk::IncrementLiveBytes(heap_object->address(),
- heap_object->Size());
+ MemoryChunk::IncrementLiveBytesFromGC(heap_object->address(),
+ heap_object->Size());
}
} else if (Marking::IsWhite(mark_bit)) {
incremental_marking_->WhiteToGreyAndPush(heap_object, mark_bit);
@@ -263,8 +263,8 @@
MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
if (mark_bit.data_only()) {
if (incremental_marking_->MarkBlackOrKeepGrey(mark_bit)) {
- MemoryChunk::IncrementLiveBytes(heap_object->address(),
- heap_object->Size());
+ MemoryChunk::IncrementLiveBytesFromGC(heap_object->address(),
+ heap_object->Size());
}
} else {
if (Marking::IsWhite(mark_bit)) {
@@ -418,7 +418,7 @@
static void PatchIncrementalMarkingRecordWriteStubs(
Heap* heap, RecordWriteStub::Mode mode) {
- NumberDictionary* stubs = heap->code_stubs();
+ UnseededNumberDictionary* stubs = heap->code_stubs();
int capacity = stubs->Capacity();
for (int i = 0; i < capacity; i++) {
@@ -491,8 +491,8 @@
HeapObject* heap_obj = HeapObject::cast(obj);
MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::cast(obj));
if (Marking::IsBlack(mark_bit)) {
- MemoryChunk::IncrementLiveBytes(heap_obj->address(),
- -heap_obj->Size());
+ MemoryChunk::IncrementLiveBytesFromGC(heap_obj->address(),
+ -heap_obj->Size());
}
Marking::AnyToGrey(mark_bit);
}
@@ -658,7 +658,7 @@
MarkBit mark_bit = Marking::MarkBitFrom(obj);
ASSERT(!Marking::IsBlack(mark_bit));
Marking::MarkBlack(mark_bit);
- MemoryChunk::IncrementLiveBytes(obj->address(), obj->Size());
+ MemoryChunk::IncrementLiveBytesFromGC(obj->address(), obj->Size());
}
state_ = COMPLETE;
if (FLAG_trace_incremental_marking) {
@@ -671,8 +671,8 @@
if (FLAG_cleanup_code_caches_at_gc) {
PolymorphicCodeCache* poly_cache = heap_->polymorphic_code_cache();
Marking::GreyToBlack(Marking::MarkBitFrom(poly_cache));
- MemoryChunk::IncrementLiveBytes(poly_cache->address(),
- PolymorphicCodeCache::kSize);
+ MemoryChunk::IncrementLiveBytesFromGC(poly_cache->address(),
+ PolymorphicCodeCache::kSize);
}
Object* context = heap_->global_contexts_list();
@@ -685,7 +685,7 @@
MarkBit mark_bit = Marking::MarkBitFrom(cache);
if (Marking::IsGrey(mark_bit)) {
Marking::GreyToBlack(mark_bit);
- MemoryChunk::IncrementLiveBytes(cache->address(), cache->Size());
+ MemoryChunk::IncrementLiveBytesFromGC(cache->address(), cache->Size());
}
}
context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
@@ -748,7 +748,9 @@
if (FLAG_trace_incremental_marking) {
PrintF("[IncrementalMarking] Complete (normal).\n");
}
- heap_->isolate()->stack_guard()->RequestGC();
+ if (!heap_->idle_notification_will_schedule_next_gc()) {
+ heap_->isolate()->stack_guard()->RequestGC();
+ }
}
@@ -776,8 +778,7 @@
}
if (state_ == SWEEPING) {
- if (heap_->old_pointer_space()->AdvanceSweeper(bytes_to_process) &&
- heap_->old_data_space()->AdvanceSweeper(bytes_to_process)) {
+ if (heap_->AdvanceSweepers(static_cast<int>(bytes_to_process))) {
bytes_scanned_ = 0;
StartMarking(PREVENT_COMPACTION);
}
@@ -818,7 +819,7 @@
SLOW_ASSERT(Marking::IsGrey(obj_mark_bit) ||
(obj->IsFiller() && Marking::IsWhite(obj_mark_bit)));
Marking::MarkBlack(obj_mark_bit);
- MemoryChunk::IncrementLiveBytes(obj->address(), size);
+ MemoryChunk::IncrementLiveBytesFromGC(obj->address(), size);
}
if (marking_deque_.IsEmpty()) MarkingComplete();
}
diff --git a/src/incremental-marking.h b/src/incremental-marking.h
index b9d83ee..4f8fa6b 100644
--- a/src/incremental-marking.h
+++ b/src/incremental-marking.h
@@ -64,6 +64,8 @@
inline bool IsMarkingIncomplete() { return state() == MARKING; }
+ inline bool IsComplete() { return state() == COMPLETE; }
+
bool WorthActivating();
void Start();
@@ -102,6 +104,7 @@
void OldSpaceStep(intptr_t allocated) {
Step(allocated * kFastMarking / kInitialAllocationMarkingFactor);
}
+
void Step(intptr_t allocated);
inline void RestartIfNotMarking() {
diff --git a/src/inspector.cc b/src/inspector.cc
index 8fb80f1..833d338 100644
--- a/src/inspector.cc
+++ b/src/inspector.cc
@@ -38,11 +38,11 @@
//============================================================================
// The Inspector.
-void Inspector::DumpObjectType(FILE* out, Object *obj, bool print_more) {
+void Inspector::DumpObjectType(FILE* out, Object* obj, bool print_more) {
// Dump the object pointer.
OS::FPrint(out, "%p:", reinterpret_cast<void*>(obj));
if (obj->IsHeapObject()) {
- HeapObject *hobj = HeapObject::cast(obj);
+ HeapObject* hobj = HeapObject::cast(obj);
OS::FPrint(out, " size %d :", hobj->Size());
}
diff --git a/src/inspector.h b/src/inspector.h
index e328bcd..6962e21 100644
--- a/src/inspector.h
+++ b/src/inspector.h
@@ -41,14 +41,14 @@
class Inspector {
public:
- static void DumpObjectType(FILE* out, Object *obj, bool print_more);
- static void DumpObjectType(FILE* out, Object *obj) {
+ static void DumpObjectType(FILE* out, Object* obj, bool print_more);
+ static void DumpObjectType(FILE* out, Object* obj) {
DumpObjectType(out, obj, false);
}
- static void DumpObjectType(Object *obj, bool print_more) {
+ static void DumpObjectType(Object* obj, bool print_more) {
DumpObjectType(stdout, obj, print_more);
}
- static void DumpObjectType(Object *obj) {
+ static void DumpObjectType(Object* obj) {
DumpObjectType(stdout, obj, false);
}
};
diff --git a/src/isolate.cc b/src/isolate.cc
index c235a23..82af337 100644
--- a/src/isolate.cc
+++ b/src/isolate.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -570,7 +570,7 @@
frame->Summarize(&frames);
for (int i = frames.length() - 1; i >= 0 && frames_seen < limit; i--) {
// Create a JSObject to hold the information for the StackFrame.
- Handle<JSObject> stackFrame = factory()->NewJSObject(object_function());
+ Handle<JSObject> stack_frame = factory()->NewJSObject(object_function());
Handle<JSFunction> fun = frames[i].function();
Handle<Script> script(Script::cast(fun->shared()->script()));
@@ -591,16 +591,24 @@
// tag.
column_offset += script->column_offset()->value();
}
- SetLocalPropertyNoThrow(stackFrame, column_key,
- Handle<Smi>(Smi::FromInt(column_offset + 1)));
+ CHECK_NOT_EMPTY_HANDLE(
+ this,
+ JSObject::SetLocalPropertyIgnoreAttributes(
+ stack_frame, column_key,
+ Handle<Smi>(Smi::FromInt(column_offset + 1)), NONE));
}
- SetLocalPropertyNoThrow(stackFrame, line_key,
- Handle<Smi>(Smi::FromInt(line_number + 1)));
+ CHECK_NOT_EMPTY_HANDLE(
+ this,
+ JSObject::SetLocalPropertyIgnoreAttributes(
+ stack_frame, line_key,
+ Handle<Smi>(Smi::FromInt(line_number + 1)), NONE));
}
if (options & StackTrace::kScriptName) {
Handle<Object> script_name(script->name(), this);
- SetLocalPropertyNoThrow(stackFrame, script_key, script_name);
+ CHECK_NOT_EMPTY_HANDLE(this,
+ JSObject::SetLocalPropertyIgnoreAttributes(
+ stack_frame, script_key, script_name, NONE));
}
if (options & StackTrace::kScriptNameOrSourceURL) {
@@ -616,8 +624,10 @@
if (caught_exception) {
result = factory()->undefined_value();
}
- SetLocalPropertyNoThrow(stackFrame, script_name_or_source_url_key,
- result);
+ CHECK_NOT_EMPTY_HANDLE(this,
+ JSObject::SetLocalPropertyIgnoreAttributes(
+ stack_frame, script_name_or_source_url_key,
+ result, NONE));
}
if (options & StackTrace::kFunctionName) {
@@ -625,23 +635,30 @@
if (fun_name->ToBoolean()->IsFalse()) {
fun_name = Handle<Object>(fun->shared()->inferred_name(), this);
}
- SetLocalPropertyNoThrow(stackFrame, function_key, fun_name);
+ CHECK_NOT_EMPTY_HANDLE(this,
+ JSObject::SetLocalPropertyIgnoreAttributes(
+ stack_frame, function_key, fun_name, NONE));
}
if (options & StackTrace::kIsEval) {
int type = Smi::cast(script->compilation_type())->value();
Handle<Object> is_eval = (type == Script::COMPILATION_TYPE_EVAL) ?
factory()->true_value() : factory()->false_value();
- SetLocalPropertyNoThrow(stackFrame, eval_key, is_eval);
+ CHECK_NOT_EMPTY_HANDLE(this,
+ JSObject::SetLocalPropertyIgnoreAttributes(
+ stack_frame, eval_key, is_eval, NONE));
}
if (options & StackTrace::kIsConstructor) {
Handle<Object> is_constructor = (frames[i].is_constructor()) ?
factory()->true_value() : factory()->false_value();
- SetLocalPropertyNoThrow(stackFrame, constructor_key, is_constructor);
+ CHECK_NOT_EMPTY_HANDLE(this,
+ JSObject::SetLocalPropertyIgnoreAttributes(
+ stack_frame, constructor_key,
+ is_constructor, NONE));
}
- FixedArray::cast(stack_trace->elements())->set(frames_seen, *stackFrame);
+ FixedArray::cast(stack_trace->elements())->set(frames_seen, *stack_frame);
frames_seen++;
}
it.Advance();
@@ -1202,7 +1219,7 @@
ASSERT(has_pending_exception());
PropagatePendingExceptionToExternalTryCatch();
- // Allways reschedule out of memory exceptions.
+ // Always reschedule out of memory exceptions.
if (!is_out_of_memory()) {
bool is_termination_exception =
pending_exception() == heap_.termination_exception();
@@ -1437,7 +1454,8 @@
has_installed_extensions_(false),
string_tracker_(NULL),
regexp_stack_(NULL),
- embedder_data_(NULL) {
+ embedder_data_(NULL),
+ context_exit_happened_(false) {
TRACE_ISOLATE(constructor);
memset(isolate_addresses_, 0,
@@ -1734,10 +1752,10 @@
regexp_stack_->isolate_ = this;
// Enable logging before setting up the heap
- logger_->Setup();
+ logger_->SetUp();
- CpuProfiler::Setup();
- HeapProfiler::Setup();
+ CpuProfiler::SetUp();
+ HeapProfiler::SetUp();
// Initialize other runtime facilities
#if defined(USE_SIMULATOR)
@@ -1754,10 +1772,10 @@
stack_guard_.InitThread(lock);
}
- // Setup the object heap.
+ // SetUp the object heap.
const bool create_heap_objects = (des == NULL);
- ASSERT(!heap_.HasBeenSetup());
- if (!heap_.Setup(create_heap_objects)) {
+ ASSERT(!heap_.HasBeenSetUp());
+ if (!heap_.SetUp(create_heap_objects)) {
V8::SetFatalError();
return false;
}
@@ -1765,7 +1783,7 @@
InitializeThreadLocal();
bootstrapper_->Initialize(create_heap_objects);
- builtins_.Setup(create_heap_objects);
+ builtins_.SetUp(create_heap_objects);
// Only preallocate on the first initialization.
if (FLAG_preallocate_message_memory && preallocated_message_space_ == NULL) {
@@ -1784,7 +1802,7 @@
}
#ifdef ENABLE_DEBUGGER_SUPPORT
- debug_->Setup(create_heap_objects);
+ debug_->SetUp(create_heap_objects);
#endif
stub_cache_->Initialize(create_heap_objects);
@@ -1805,7 +1823,7 @@
deoptimizer_data_ = new DeoptimizerData;
runtime_profiler_ = new RuntimeProfiler(this);
- runtime_profiler_->Setup();
+ runtime_profiler_->SetUp();
// If we are deserializing, log non-function code objects and compiled
// functions found in the snapshot.
diff --git a/src/isolate.h b/src/isolate.h
index 2ea9b80..7e6807b 100644
--- a/src/isolate.h
+++ b/src/isolate.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -106,15 +106,28 @@
// of handles to the actual constants.
typedef ZoneList<Handle<Object> > ZoneObjectList;
-#define RETURN_IF_SCHEDULED_EXCEPTION(isolate) \
- if (isolate->has_scheduled_exception()) \
- return isolate->PromoteScheduledException()
+#define RETURN_IF_SCHEDULED_EXCEPTION(isolate) \
+ do { \
+ Isolate* __isolate__ = (isolate); \
+ if (__isolate__->has_scheduled_exception()) { \
+ return __isolate__->PromoteScheduledException(); \
+ } \
+ } while (false)
#define RETURN_IF_EMPTY_HANDLE_VALUE(isolate, call, value) \
- if (call.is_null()) { \
- ASSERT(isolate->has_pending_exception()); \
- return value; \
- }
+ do { \
+ if ((call).is_null()) { \
+ ASSERT((isolate)->has_pending_exception()); \
+ return (value); \
+ } \
+ } while (false)
+
+#define CHECK_NOT_EMPTY_HANDLE(isolate, call) \
+ do { \
+ ASSERT(!(isolate)->has_pending_exception()); \
+ CHECK(!(call).is_null()); \
+ CHECK(!(isolate)->has_pending_exception()); \
+ } while (false)
#define RETURN_IF_EMPTY_HANDLE(isolate, call) \
RETURN_IF_EMPTY_HANDLE_VALUE(isolate, call, Failure::Exception())
@@ -245,7 +258,7 @@
#endif
#endif // USE_SIMULATOR
- Address js_entry_sp_; // the stack pointer of the bottom js entry frame
+ Address js_entry_sp_; // the stack pointer of the bottom JS entry frame
Address external_callback_; // the external callback we're currently in
StateTag current_vm_state_;
@@ -472,7 +485,7 @@
bool IsDefaultIsolate() const { return this == default_isolate_; }
// Ensures that process-wide resources and the default isolate have been
- // allocated. It is only necessary to call this method in rare casses, for
+ // allocated. It is only necessary to call this method in rare cases, for
// example if you are using V8 from within the body of a static initializer.
// Safe to call multiple times.
static void EnsureDefaultIsolate();
@@ -622,7 +635,7 @@
void* formal_count_address() { return &thread_local_top_.formal_count_; }
// Returns the global object of the current context. It could be
- // a builtin object, or a js global object.
+ // a builtin object, or a JS global object.
Handle<GlobalObject> global() {
return Handle<GlobalObject>(context()->global());
}
@@ -1010,6 +1023,13 @@
thread_local_top_.top_lookup_result_ = top;
}
+ bool context_exit_happened() {
+ return context_exit_happened_;
+ }
+ void set_context_exit_happened(bool context_exit_happened) {
+ context_exit_happened_ = context_exit_happened;
+ }
+
private:
Isolate();
@@ -1051,6 +1071,7 @@
Isolate* previous_isolate;
EntryStackItem* previous_item;
+ private:
DISALLOW_COPY_AND_ASSIGN(EntryStackItem);
};
@@ -1175,6 +1196,10 @@
unibrow::Mapping<unibrow::Ecma262Canonicalize> interp_canonicalize_mapping_;
void* embedder_data_;
+ // The garbage collector should be a little more aggressive when it knows
+ // that a context was recently exited.
+ bool context_exit_happened_;
+
#if defined(V8_TARGET_ARCH_ARM) && !defined(__arm__) || \
defined(V8_TARGET_ARCH_MIPS) && !defined(__mips__)
bool simulator_initialized_;
diff --git a/src/json-parser.h b/src/json-parser.h
index ca796a6..d22cd0d 100644
--- a/src/json-parser.h
+++ b/src/json-parser.h
@@ -130,7 +130,7 @@
// An object literal is a squiggly-braced and comma separated sequence
// (possibly empty) of key/value pairs, where the key is a JSON string
// literal, the value is a JSON value, and the two are separated by a colon.
- // A JSON array dosn't allow numbers and identifiers as keys, like a
+ // A JSON array doesn't allow numbers and identifiers as keys, like a
// JavaScript array.
Handle<Object> ParseJsonObject();
@@ -177,7 +177,7 @@
// Set initial position right before the string.
position_ = -1;
- // Advance to the first character (posibly EOS)
+ // Advance to the first character (possibly EOS)
AdvanceSkipWhitespace();
Handle<Object> result = ParseJsonValue();
if (result.is_null() || c0_ != kEndOfString) {
@@ -303,11 +303,12 @@
uint32_t index;
if (key->AsArrayIndex(&index)) {
- SetOwnElement(json_object, index, value, kNonStrictMode);
+ JSObject::SetOwnElement(json_object, index, value, kNonStrictMode);
} else if (key->Equals(isolate()->heap()->Proto_symbol())) {
SetPrototype(json_object, value);
} else {
- SetLocalPropertyIgnoreAttributes(json_object, key, value, NONE);
+ JSObject::SetLocalPropertyIgnoreAttributes(
+ json_object, key, value, NONE);
}
} while (MatchSkipWhiteSpace(','));
if (c0_ != '}') {
diff --git a/src/jsregexp.cc b/src/jsregexp.cc
index 18ff257..18b86ba 100644
--- a/src/jsregexp.cc
+++ b/src/jsregexp.cc
@@ -704,7 +704,7 @@
// the virtualized backtrack stack and some register changes. When a node is
// to be emitted it can flush the Trace or update it. Flushing the Trace
// will emit code to bring the actual state into line with the virtual state.
-// Avoiding flushing the state can postpone some work (eg updates of capture
+// Avoiding flushing the state can postpone some work (e.g. updates of capture
// registers). Postponing work can save time when executing the regular
// expression since it may be found that the work never has to be done as a
// failure to match can occur. In addition it is much faster to jump to a
@@ -2636,7 +2636,7 @@
TextElement elm = elms_->at(i);
if (elm.type == TextElement::CHAR_CLASS) {
RegExpCharacterClass* cc = elm.data.u_char_class;
- // None of the standard character classses is different in the case
+ // None of the standard character classes is different in the case
// independent case and it slows us down if we don't know that.
if (cc->is_standard()) continue;
ZoneList<CharacterRange>* ranges = cc->ranges();
diff --git a/src/jsregexp.h b/src/jsregexp.h
index df110d1..42c76fb 100644
--- a/src/jsregexp.h
+++ b/src/jsregexp.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -35,9 +35,11 @@
namespace v8 {
namespace internal {
-
+class NodeVisitor;
+class RegExpCompiler;
class RegExpMacroAssembler;
-
+class RegExpNode;
+class RegExpTree;
class RegExpImpl {
public:
@@ -634,7 +636,7 @@
static const int kNodeIsTooComplexForGreedyLoops = -1;
virtual int GreedyLoopTextLength() { return kNodeIsTooComplexForGreedyLoops; }
Label* label() { return &label_; }
- // If non-generic code is generated for a node (ie the node is not at the
+ // If non-generic code is generated for a node (i.e. the node is not at the
// start of the trace) then it cannot be reused. This variable sets a limit
// on how often we allow that to happen before we insist on starting a new
// trace and generating generic code for a node that can be reused by flushing
diff --git a/src/list.h b/src/list.h
index 57504e0..adddea4 100644
--- a/src/list.h
+++ b/src/list.h
@@ -67,7 +67,7 @@
// Returns a reference to the element at index i. This reference is
// not safe to use after operations that can change the list's
- // backing store (eg, Add).
+ // backing store (e.g. Add).
inline T& operator[](int i) const {
ASSERT(0 <= i);
ASSERT(i < length_);
diff --git a/src/lithium-allocator.cc b/src/lithium-allocator.cc
index c4d8b1e..0e5c992 100644
--- a/src/lithium-allocator.cc
+++ b/src/lithium-allocator.cc
@@ -49,13 +49,13 @@
#define DEFINE_OPERAND_CACHE(name, type) \
name name::cache[name::kNumCachedOperands]; \
- void name::SetupCache() { \
+ void name::SetUpCache() { \
for (int i = 0; i < kNumCachedOperands; i++) { \
cache[i].ConvertTo(type, i); \
} \
} \
static bool name##_initialize() { \
- name::SetupCache(); \
+ name::SetUpCache(); \
return true; \
} \
static bool name##_cache_initialized = name##_initialize();
@@ -161,9 +161,8 @@
next_(NULL),
current_interval_(NULL),
last_processed_use_(NULL),
- spill_start_index_(kMaxInt) {
- spill_operand_ = new LUnallocated(LUnallocated::IGNORE);
-}
+ spill_operand_(new LOperand()),
+ spill_start_index_(kMaxInt) { }
void LiveRange::set_assigned_register(int reg, RegisterKind register_kind) {
@@ -184,14 +183,15 @@
bool LiveRange::HasAllocatedSpillOperand() const {
- return spill_operand_ != NULL && !spill_operand_->IsUnallocated();
+ ASSERT(spill_operand_ != NULL);
+ return !spill_operand_->IsIgnored();
}
void LiveRange::SetSpillOperand(LOperand* operand) {
ASSERT(!operand->IsUnallocated());
ASSERT(spill_operand_ != NULL);
- ASSERT(spill_operand_->IsUnallocated());
+ ASSERT(spill_operand_->IsIgnored());
spill_operand_->ConvertTo(operand->kind(), operand->index());
}
@@ -1643,7 +1643,7 @@
int LAllocator::max_initial_value_ids() {
- return LUnallocated::kMaxVirtualRegisters / 32;
+ return LUnallocated::kMaxVirtualRegisters / 16;
}
diff --git a/src/lithium.cc b/src/lithium.cc
index 31b1698..5beca33 100644
--- a/src/lithium.cc
+++ b/src/lithium.cc
@@ -36,6 +36,7 @@
LUnallocated* unalloc = NULL;
switch (kind()) {
case INVALID:
+ stream->Add("(0)");
break;
case UNALLOCATED:
unalloc = LUnallocated::cast(this);
@@ -70,9 +71,6 @@
case LUnallocated::ANY:
stream->Add("(-)");
break;
- case LUnallocated::IGNORE:
- stream->Add("(0)");
- break;
}
break;
case CONSTANT_OPERAND:
diff --git a/src/lithium.h b/src/lithium.h
index b605eb9..c0d7d07 100644
--- a/src/lithium.h
+++ b/src/lithium.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -59,6 +59,7 @@
bool IsDoubleRegister() const { return kind() == DOUBLE_REGISTER; }
bool IsArgument() const { return kind() == ARGUMENT; }
bool IsUnallocated() const { return kind() == UNALLOCATED; }
+ bool IsIgnored() const { return kind() == INVALID; }
bool Equals(LOperand* other) const { return value_ == other->value_; }
int VirtualRegister();
@@ -89,8 +90,7 @@
FIXED_SLOT,
MUST_HAVE_REGISTER,
WRITABLE_REGISTER,
- SAME_AS_FIRST_INPUT,
- IGNORE
+ SAME_AS_FIRST_INPUT
};
// Lifetime of operand inside the instruction.
@@ -121,9 +121,9 @@
// The superclass has a KindField. Some policies have a signed fixed
// index in the upper bits.
- static const int kPolicyWidth = 4;
+ static const int kPolicyWidth = 3;
static const int kLifetimeWidth = 1;
- static const int kVirtualRegisterWidth = 17;
+ static const int kVirtualRegisterWidth = 18;
static const int kPolicyShift = kKindFieldWidth;
static const int kLifetimeShift = kPolicyShift + kPolicyWidth;
@@ -143,12 +143,10 @@
kVirtualRegisterWidth> {
};
- static const int kMaxVirtualRegisters = 1 << (kVirtualRegisterWidth + 1);
+ static const int kMaxVirtualRegisters = 1 << kVirtualRegisterWidth;
static const int kMaxFixedIndex = 63;
static const int kMinFixedIndex = -64;
- bool HasIgnorePolicy() const { return policy() == IGNORE; }
- bool HasNoPolicy() const { return policy() == NONE; }
bool HasAnyPolicy() const {
return policy() == ANY;
}
@@ -234,9 +232,7 @@
}
bool IsIgnored() const {
- return destination_ != NULL &&
- destination_->IsUnallocated() &&
- LUnallocated::cast(destination_)->HasIgnorePolicy();
+ return destination_ != NULL && destination_->IsIgnored();
}
// We clear both operands to indicate move that's been eliminated.
@@ -265,7 +261,7 @@
return reinterpret_cast<LConstantOperand*>(op);
}
- static void SetupCache();
+ static void SetUpCache();
private:
static const int kNumCachedOperands = 128;
@@ -300,7 +296,7 @@
return reinterpret_cast<LStackSlot*>(op);
}
- static void SetupCache();
+ static void SetUpCache();
private:
static const int kNumCachedOperands = 128;
@@ -324,7 +320,7 @@
return reinterpret_cast<LDoubleStackSlot*>(op);
}
- static void SetupCache();
+ static void SetUpCache();
private:
static const int kNumCachedOperands = 128;
@@ -348,7 +344,7 @@
return reinterpret_cast<LRegister*>(op);
}
- static void SetupCache();
+ static void SetUpCache();
private:
static const int kNumCachedOperands = 16;
@@ -372,7 +368,7 @@
return reinterpret_cast<LDoubleRegister*>(op);
}
- static void SetupCache();
+ static void SetUpCache();
private:
static const int kNumCachedOperands = 16;
@@ -443,12 +439,14 @@
class LEnvironment: public ZoneObject {
public:
LEnvironment(Handle<JSFunction> closure,
+ bool is_arguments_adaptor,
int ast_id,
int parameter_count,
int argument_count,
int value_count,
LEnvironment* outer)
: closure_(closure),
+ is_arguments_adaptor_(is_arguments_adaptor),
arguments_stack_height_(argument_count),
deoptimization_index_(Safepoint::kNoDeoptimizationIndex),
translation_index_(-1),
@@ -505,8 +503,11 @@
void PrintTo(StringStream* stream);
+ bool is_arguments_adaptor() const { return is_arguments_adaptor_; }
+
private:
Handle<JSFunction> closure_;
+ bool is_arguments_adaptor_;
int arguments_stack_height_;
int deoptimization_index_;
int translation_index_;
@@ -523,8 +524,6 @@
LOperand** spilled_double_registers_;
LEnvironment* outer_;
-
- friend class LCodegen;
};
diff --git a/src/liveedit-debugger.js b/src/liveedit-debugger.js
index c94a3ee..abfb0f6 100644
--- a/src/liveedit-debugger.js
+++ b/src/liveedit-debugger.js
@@ -581,7 +581,7 @@
// children of unchanged functions are ignored.
function MarkChangedFunctions(code_info_tree, chunks) {
- // A convenient interator over diff chunks that also translates
+ // A convenient iterator over diff chunks that also translates
// positions from old to new in a current non-changed part of script.
var chunk_it = new function() {
var chunk_index = 0;
diff --git a/src/liveedit.cc b/src/liveedit.cc
index eb183da..5ff8ff9 100644
--- a/src/liveedit.cc
+++ b/src/liveedit.cc
@@ -54,7 +54,7 @@
// are element setters causing exceptions and the debugger context has none
// of these.
Handle<Object> no_failure;
- no_failure = SetElement(object, index, value, kNonStrictMode);
+ no_failure = JSObject::SetElement(object, index, value, kNonStrictMode);
ASSERT(!no_failure.is_null());
USE(no_failure);
}
@@ -1228,7 +1228,7 @@
V8::FatalProcessOutOfMemory("RelocInfoBuffer::GrowBuffer");
}
- // Setup new buffer.
+ // Set up new buffer.
byte* new_buffer = NewArray<byte>(new_buffer_size);
// Copy the data.
diff --git a/src/liveobjectlist-inl.h b/src/liveobjectlist-inl.h
index f742de3..2bc2296 100644
--- a/src/liveobjectlist-inl.h
+++ b/src/liveobjectlist-inl.h
@@ -59,7 +59,7 @@
}
-void LiveObjectList::ProcessNonLive(HeapObject *obj) {
+void LiveObjectList::ProcessNonLive(HeapObject* obj) {
// Only do work if we have at least one list to process.
if (last()) DoProcessNonLive(obj);
}
@@ -93,7 +93,7 @@
template <typename T>
inline LiveObjectList::Element*
LiveObjectList::FindElementFor(T (*GetValue)(LiveObjectList::Element*), T key) {
- LiveObjectList *lol = last();
+ LiveObjectList* lol = last();
while (lol != NULL) {
Element* elements = lol->elements_;
for (int i = 0; i < lol->obj_count_; i++) {
diff --git a/src/liveobjectlist.cc b/src/liveobjectlist.cc
index 408e2a3..1aabc59 100644
--- a/src/liveobjectlist.cc
+++ b/src/liveobjectlist.cc
@@ -165,7 +165,7 @@
}
-bool IsOfType(LiveObjectType type, HeapObject *obj) {
+bool IsOfType(LiveObjectType type, HeapObject* obj) {
// Note: there are types that are more general (e.g. JSObject) that would
// have passed the Is##type_() test for more specialized types (e.g.
// JSFunction). If we find a more specialized match but we're looking for
@@ -211,7 +211,7 @@
}
-static bool InSpace(AllocationSpace space, HeapObject *heap_obj) {
+static bool InSpace(AllocationSpace space, HeapObject* heap_obj) {
Heap* heap = ISOLATE->heap();
if (space != LO_SPACE) {
return heap->InSpace(heap_obj, space);
@@ -462,7 +462,7 @@
char prev_ch = 0;
while (*dst != '\0') {
char ch = *src++;
- // We will treat non-ascii chars as '?'.
+ // We will treat non-ASCII chars as '?'.
if ((ch & 0x80) != 0) {
ch = '?';
}
@@ -498,7 +498,7 @@
length);
} else if (obj->IsString()) {
- String *str = String::cast(obj);
+ String* str = String::cast(obj);
// Only grab up to 160 chars in case they are double byte.
// We'll only dump 80 of them after we compact them.
const int kMaxCharToDump = 80;
@@ -842,7 +842,7 @@
bool found_root_;
bool found_weak_root_;
- LolFilter *filter_;
+ LolFilter* filter_;
};
@@ -857,8 +857,8 @@
// A summary writer for filling in a summary of lol lists and diffs.
class LolSummaryWriter: public SummaryWriter {
public:
- LolSummaryWriter(LiveObjectList *older_lol,
- LiveObjectList *newer_lol)
+ LolSummaryWriter(LiveObjectList* older_lol,
+ LiveObjectList* newer_lol)
: older_(older_lol), newer_(newer_lol) {
}
@@ -944,7 +944,7 @@
int LiveObjectList::GetTotalObjCountAndSize(int* size_p) {
int size = 0;
int count = 0;
- LiveObjectList *lol = this;
+ LiveObjectList* lol = this;
do {
// Only compute total size if requested i.e. when size_p is not null.
if (size_p != NULL) {
@@ -1183,7 +1183,7 @@
// only time we'll actually delete the lol is when we Reset() or if the lol is
// invisible, and its element count reaches 0.
bool LiveObjectList::Delete(int id) {
- LiveObjectList *lol = last();
+ LiveObjectList* lol = last();
while (lol != NULL) {
if (lol->id() == id) {
break;
@@ -1246,8 +1246,8 @@
newer_id = temp;
}
- LiveObjectList *newer_lol = FindLolForId(newer_id, last());
- LiveObjectList *older_lol = FindLolForId(older_id, newer_lol);
+ LiveObjectList* newer_lol = FindLolForId(newer_id, last());
+ LiveObjectList* older_lol = FindLolForId(older_id, newer_lol);
// If the id is defined, and we can't find a LOL for it, then we have an
// invalid id.
@@ -1365,8 +1365,8 @@
newer_id = temp;
}
- LiveObjectList *newer_lol = FindLolForId(newer_id, last());
- LiveObjectList *older_lol = FindLolForId(older_id, newer_lol);
+ LiveObjectList* newer_lol = FindLolForId(newer_id, last());
+ LiveObjectList* older_lol = FindLolForId(older_id, newer_lol);
// If the id is defined, and we can't find a LOL for it, then we have an
// invalid id.
@@ -1626,7 +1626,7 @@
// Deletes all captured lols.
void LiveObjectList::Reset() {
- LiveObjectList *lol = last();
+ LiveObjectList* lol = last();
// Just delete the last. Each lol will delete it's prev automatically.
delete lol;
@@ -1715,8 +1715,8 @@
inline bool AddRootRetainerIfFound(const LolVisitor& visitor,
LolFilter* filter,
- LiveObjectSummary *summary,
- void (*SetRootFound)(LiveObjectSummary *s),
+ LiveObjectSummary* summary,
+ void (*SetRootFound)(LiveObjectSummary* s),
int start,
int dump_limit,
int* total_count,
@@ -1762,12 +1762,12 @@
}
-inline void SetFoundRoot(LiveObjectSummary *summary) {
+inline void SetFoundRoot(LiveObjectSummary* summary) {
summary->set_found_root();
}
-inline void SetFoundWeakRoot(LiveObjectSummary *summary) {
+inline void SetFoundWeakRoot(LiveObjectSummary* summary) {
summary->set_found_weak_root();
}
@@ -1779,7 +1779,7 @@
int dump_limit,
int* total_count,
LolFilter* filter,
- LiveObjectSummary *summary,
+ LiveObjectSummary* summary,
JSFunction* arguments_function,
Handle<Object> error) {
HandleScope scope;
@@ -2267,7 +2267,7 @@
}
-void LiveObjectList::DoProcessNonLive(HeapObject *obj) {
+void LiveObjectList::DoProcessNonLive(HeapObject* obj) {
// We should only be called if we have at least one lol to search.
ASSERT(last() != NULL);
Element* element = last()->Find(obj);
@@ -2284,7 +2284,7 @@
int count = lol->obj_count_;
for (int i = 0; i < count; i++) {
HeapObject** p = &elements[i].obj_;
- v->VisitPointer(reinterpret_cast<Object **>(p));
+ v->VisitPointer(reinterpret_cast<Object** >(p));
}
lol = lol->prev_;
}
@@ -2389,11 +2389,11 @@
PurgeDuplicates();
// After the GC, sweep away all free'd Elements and compact.
- LiveObjectList *prev = NULL;
- LiveObjectList *next = NULL;
+ LiveObjectList* prev = NULL;
+ LiveObjectList* next = NULL;
// Iterating from the youngest lol to the oldest lol.
- for (LiveObjectList *lol = last(); lol; lol = prev) {
+ for (LiveObjectList* lol = last(); lol; lol = prev) {
Element* elements = lol->elements_;
prev = lol->prev(); // Save the prev.
@@ -2446,7 +2446,7 @@
const int kMaxUnusedSpace = 64;
if (diff > kMaxUnusedSpace) { // Threshold for shrinking.
// Shrink the list.
- Element *new_elements = NewArray<Element>(new_count);
+ Element* new_elements = NewArray<Element>(new_count);
memcpy(new_elements, elements, new_count * sizeof(Element));
DeleteArray<Element>(elements);
diff --git a/src/liveobjectlist.h b/src/liveobjectlist.h
index 65470d7..1aa9196 100644
--- a/src/liveobjectlist.h
+++ b/src/liveobjectlist.h
@@ -77,7 +77,7 @@
inline static void GCEpilogue();
inline static void GCPrologue();
inline static void IterateElements(ObjectVisitor* v);
- inline static void ProcessNonLive(HeapObject *obj);
+ inline static void ProcessNonLive(HeapObject* obj);
inline static void UpdateReferencesForScavengeGC();
// Note: LOLs can be listed by calling Dump(0, <lol id>), and 2 LOLs can be
@@ -125,7 +125,7 @@
static void GCEpiloguePrivate();
static void IterateElementsPrivate(ObjectVisitor* v);
- static void DoProcessNonLive(HeapObject *obj);
+ static void DoProcessNonLive(HeapObject* obj);
static int CompareElement(const Element* a, const Element* b);
@@ -138,7 +138,7 @@
int dump_limit,
int* total_count,
LolFilter* filter,
- LiveObjectSummary *summary,
+ LiveObjectSummary* summary,
JSFunction* arguments_function,
Handle<Object> error);
@@ -151,7 +151,7 @@
bool is_tracking_roots);
static bool NeedLOLProcessing() { return (last() != NULL); }
- static void NullifyNonLivePointer(HeapObject **p) {
+ static void NullifyNonLivePointer(HeapObject** p) {
// Mask out the low bit that marks this as a heap object. We'll use this
// cleared bit as an indicator that this pointer needs to be collected.
//
@@ -202,7 +202,7 @@
int id_;
int capacity_;
int obj_count_;
- Element *elements_;
+ Element* elements_;
// Statics for managing all the lists.
static uint32_t next_element_id_;
diff --git a/src/log.cc b/src/log.cc
index eab2639..5e82872 100644
--- a/src/log.cc
+++ b/src/log.cc
@@ -1615,7 +1615,7 @@
}
-bool Logger::Setup() {
+bool Logger::SetUp() {
// Tests and EnsureInitialize() can call this twice in a row. It's harmless.
if (is_initialized_) return true;
is_initialized_ = true;
@@ -1708,9 +1708,9 @@
void Logger::EnableSlidingStateWindow() {
- // If the ticker is NULL, Logger::Setup has not been called yet. In
+ // If the ticker is NULL, Logger::SetUp has not been called yet. In
// that case, we set the sliding_state_window flag so that the
- // sliding window computation will be started when Logger::Setup is
+ // sliding window computation will be started when Logger::SetUp is
// called.
if (ticker_ == NULL) {
FLAG_sliding_state_window = true;
diff --git a/src/log.h b/src/log.h
index 677dada..86bcad6 100644
--- a/src/log.h
+++ b/src/log.h
@@ -150,14 +150,14 @@
#undef DECLARE_ENUM
// Acquires resources for logging if the right flags are set.
- bool Setup();
+ bool SetUp();
void EnsureTickerStarted();
void EnsureTickerStopped();
Sampler* sampler();
- // Frees resources acquired in Setup.
+ // Frees resources acquired in SetUp.
// When a temporary file is used for the log, returns its stream descriptor,
// leaving the file open.
FILE* TearDown();
@@ -411,7 +411,7 @@
NameMap* address_to_name_map_;
// Guards against multiple calls to TearDown() that can happen in some tests.
- // 'true' between Setup() and TearDown().
+ // 'true' between SetUp() and TearDown().
bool is_initialized_;
// Support for 'incremental addresses' in compressed logs:
diff --git a/src/macros.py b/src/macros.py
index bf7119f..8e9c62d 100644
--- a/src/macros.py
+++ b/src/macros.py
@@ -26,7 +26,7 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Dictionary that is passed as defines for js2c.py.
-# Used for defines that must be defined for all native js files.
+# Used for defines that must be defined for all native JS files.
const NONE = 0;
const READ_ONLY = 1;
@@ -101,6 +101,9 @@
macro IS_ARRAY(arg) = (%_IsArray(arg));
macro IS_FUNCTION(arg) = (%_IsFunction(arg));
macro IS_REGEXP(arg) = (%_IsRegExp(arg));
+macro IS_SET(arg) = (%_ClassOf(arg) === 'Set');
+macro IS_MAP(arg) = (%_ClassOf(arg) === 'Map');
+macro IS_WEAKMAP(arg) = (%_ClassOf(arg) === 'WeakMap');
macro IS_DATE(arg) = (%_ClassOf(arg) === 'Date');
macro IS_NUMBER_WRAPPER(arg) = (%_ClassOf(arg) === 'Number');
macro IS_STRING_WRAPPER(arg) = (%_ClassOf(arg) === 'String');
diff --git a/src/mark-compact-inl.h b/src/mark-compact-inl.h
index 573715e..a9107bd 100644
--- a/src/mark-compact-inl.h
+++ b/src/mark-compact-inl.h
@@ -49,21 +49,44 @@
}
+void MarkCompactCollector::ClearCacheOnMap(Map* map) {
+ if (FLAG_cleanup_code_caches_at_gc) {
+ map->ClearCodeCache(heap());
+ }
+}
+
+
void MarkCompactCollector::MarkObject(HeapObject* obj, MarkBit mark_bit) {
ASSERT(Marking::MarkBitFrom(obj) == mark_bit);
if (!mark_bit.Get()) {
mark_bit.Set();
- MemoryChunk::IncrementLiveBytes(obj->address(), obj->Size());
+ MemoryChunk::IncrementLiveBytesFromGC(obj->address(), obj->Size());
ProcessNewlyMarkedObject(obj);
}
}
+bool MarkCompactCollector::MarkObjectWithoutPush(HeapObject* object) {
+ MarkBit mark = Marking::MarkBitFrom(object);
+ bool old_mark = mark.Get();
+ if (!old_mark) SetMark(object, mark);
+ return old_mark;
+}
+
+
+void MarkCompactCollector::MarkObjectAndPush(HeapObject* object) {
+ if (!MarkObjectWithoutPush(object)) marking_deque_.PushBlack(object);
+}
+
+
void MarkCompactCollector::SetMark(HeapObject* obj, MarkBit mark_bit) {
ASSERT(!mark_bit.Get());
ASSERT(Marking::MarkBitFrom(obj) == mark_bit);
mark_bit.Set();
- MemoryChunk::IncrementLiveBytes(obj->address(), obj->Size());
+ MemoryChunk::IncrementLiveBytesFromGC(obj->address(), obj->Size());
+ if (obj->IsMap()) {
+ ClearCacheOnMap(Map::cast(obj));
+ }
}
diff --git a/src/mark-compact.cc b/src/mark-compact.cc
index cc5fda7..ebfce20 100644
--- a/src/mark-compact.cc
+++ b/src/mark-compact.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -63,6 +63,7 @@
compacting_(false),
was_marked_incrementally_(false),
collect_maps_(FLAG_collect_maps),
+ flush_monomorphic_ics_(false),
tracer_(NULL),
migration_slots_buffer_(NULL),
heap_(NULL),
@@ -515,6 +516,12 @@
// order which is not implemented for incremental marking.
collect_maps_ = FLAG_collect_maps && !was_marked_incrementally_;
+ // Monomorphic ICs are preserved when possible, but need to be flushed
+ // when they might be keeping a Context alive, or when the heap is about
+ // to be serialized.
+ flush_monomorphic_ics_ =
+ heap()->isolate()->context_exit_happened() || Serializer::enabled();
+
// Rather than passing the tracer around we stash it in a static member
// variable.
tracer_ = tracer;
@@ -703,16 +710,17 @@
SharedFunctionInfo* candidate) {
Code* code = candidate->code();
return reinterpret_cast<SharedFunctionInfo**>(
- code->address() + Code::kNextCodeFlushingCandidateOffset);
+ code->address() + Code::kGCMetadataOffset);
}
static SharedFunctionInfo* GetNextCandidate(SharedFunctionInfo* candidate) {
- return *GetNextCandidateField(candidate);
+ return reinterpret_cast<SharedFunctionInfo*>(
+ candidate->code()->gc_metadata());
}
static void SetNextCandidate(SharedFunctionInfo* candidate,
SharedFunctionInfo* next_candidate) {
- *GetNextCandidateField(candidate) = next_candidate;
+ candidate->code()->set_gc_metadata(next_candidate);
}
Isolate* isolate_;
@@ -737,7 +745,7 @@
// it in place to its left substring. Return the updated value.
//
// Here we assume that if we change *p, we replace it with a heap object
- // (ie, the left substring of a cons string is always a heap object).
+ // (i.e., the left substring of a cons string is always a heap object).
//
// The check performed is:
// object->IsConsString() && !object->IsSymbol() &&
@@ -881,10 +889,10 @@
static inline void VisitCodeTarget(Heap* heap, RelocInfo* rinfo) {
ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
- if (FLAG_cleanup_code_caches_at_gc && target->is_inline_cache_stub()) {
+ if (FLAG_cleanup_code_caches_at_gc && target->is_inline_cache_stub()
+ && (target->ic_state() == MEGAMORPHIC ||
+ heap->mark_compact_collector()->flush_monomorphic_ics_)) {
IC::Clear(rinfo->pc());
- // Please note targets for cleared inline cached do not have to be
- // marked since they are contained in HEAP->non_monomorphic_cache().
target = Code::GetCodeFromTargetAddress(rinfo->target_address());
} else {
if (FLAG_cleanup_code_caches_at_gc &&
@@ -893,9 +901,10 @@
target->has_function_cache()) {
CallFunctionStub::Clear(heap, rinfo->pc());
}
- MarkBit code_mark = Marking::MarkBitFrom(target);
- heap->mark_compact_collector()->MarkObject(target, code_mark);
}
+ MarkBit code_mark = Marking::MarkBitFrom(target);
+ heap->mark_compact_collector()->MarkObject(target, code_mark);
+
heap->mark_compact_collector()->RecordRelocSlot(rinfo, target);
}
@@ -1197,7 +1206,7 @@
return;
}
JSRegExp* re = reinterpret_cast<JSRegExp*>(object);
- // Flush code or set age on both ascii and two byte code.
+ // Flush code or set age on both ASCII and two byte code.
UpdateRegExpCodeAgeAndFlush(heap, re, true);
UpdateRegExpCodeAgeAndFlush(heap, re, false);
// Visit the fields of the RegExp, including the updated FixedArray.
@@ -1615,9 +1624,7 @@
ASSERT(HEAP->Contains(object));
if (object->IsMap()) {
Map* map = Map::cast(object);
- if (FLAG_cleanup_code_caches_at_gc) {
- map->ClearCodeCache(heap());
- }
+ ClearCacheOnMap(map);
// When map collection is enabled we have to mark through map's transitions
// in a special way to make transition links weak.
@@ -1642,8 +1649,8 @@
MarkBit mark = Marking::MarkBitFrom(prototype_transitions);
if (!mark.Get()) {
mark.Set();
- MemoryChunk::IncrementLiveBytes(prototype_transitions->address(),
- prototype_transitions->Size());
+ MemoryChunk::IncrementLiveBytesFromGC(prototype_transitions->address(),
+ prototype_transitions->Size());
}
Object** raw_descriptor_array_slot =
@@ -1666,6 +1673,16 @@
}
+void MarkCompactCollector::MarkAccessorPairSlot(HeapObject* accessors,
+ int offset) {
+ Object** slot = HeapObject::RawField(accessors, offset);
+ HeapObject* accessor = HeapObject::cast(*slot);
+ if (accessor->IsMap()) return;
+ RecordSlot(slot, slot, accessor);
+ MarkObjectAndPush(accessor);
+}
+
+
void MarkCompactCollector::MarkDescriptorArray(
DescriptorArray* descriptors) {
MarkBit descriptors_mark = Marking::MarkBitFrom(descriptors);
@@ -1693,27 +1710,37 @@
PropertyDetails details(Smi::cast(contents->get(i + 1)));
Object** slot = contents->data_start() + i;
- Object* value = *slot;
- if (!value->IsHeapObject()) continue;
+ if (!(*slot)->IsHeapObject()) continue;
+ HeapObject* value = HeapObject::cast(*slot);
RecordSlot(slot, slot, *slot);
- if (details.IsProperty()) {
- HeapObject* object = HeapObject::cast(value);
- MarkBit mark = Marking::MarkBitFrom(HeapObject::cast(object));
- if (!mark.Get()) {
- SetMark(HeapObject::cast(object), mark);
- marking_deque_.PushBlack(object);
- }
- } else if (details.type() == ELEMENTS_TRANSITION && value->IsFixedArray()) {
- // For maps with multiple elements transitions, the transition maps are
- // stored in a FixedArray. Keep the fixed array alive but not the maps
- // that it refers to.
- HeapObject* object = HeapObject::cast(value);
- MarkBit mark = Marking::MarkBitFrom(HeapObject::cast(object));
- if (!mark.Get()) {
- SetMark(HeapObject::cast(object), mark);
- }
+ switch (details.type()) {
+ case NORMAL:
+ case FIELD:
+ case CONSTANT_FUNCTION:
+ case HANDLER:
+ case INTERCEPTOR:
+ MarkObjectAndPush(value);
+ break;
+ case CALLBACKS:
+ if (!value->IsAccessorPair()) {
+ MarkObjectAndPush(value);
+ } else if (!MarkObjectWithoutPush(value)) {
+ MarkAccessorPairSlot(value, AccessorPair::kGetterOffset);
+ MarkAccessorPairSlot(value, AccessorPair::kSetterOffset);
+ }
+ break;
+ case ELEMENTS_TRANSITION:
+ // For maps with multiple elements transitions, the transition maps are
+ // stored in a FixedArray. Keep the fixed array alive but not the maps
+ // that it refers to.
+ if (value->IsFixedArray()) MarkObjectWithoutPush(value);
+ break;
+ case MAP_TRANSITION:
+ case CONSTANT_TRANSITION:
+ case NULL_DESCRIPTOR:
+ break;
}
}
// The DescriptorArray descriptors contains a pointer to its contents array,
@@ -1757,7 +1784,7 @@
MarkBit markbit = Marking::MarkBitFrom(object);
if ((object->map() != filler_map) && Marking::IsGrey(markbit)) {
Marking::GreyToBlack(markbit);
- MemoryChunk::IncrementLiveBytes(object->address(), object->Size());
+ MemoryChunk::IncrementLiveBytesFromGC(object->address(), object->Size());
marking_deque->PushBlack(object);
if (marking_deque->IsFull()) return;
}
@@ -1812,7 +1839,7 @@
Marking::GreyToBlack(markbit);
Address addr = cell_base + offset * kPointerSize;
HeapObject* object = HeapObject::FromAddress(addr);
- MemoryChunk::IncrementLiveBytes(object->address(), object->Size());
+ MemoryChunk::IncrementLiveBytesFromGC(object->address(), object->Size());
marking_deque->PushBlack(object);
if (marking_deque->IsFull()) return;
offset += 2;
@@ -2287,89 +2314,92 @@
map->unchecked_constructor()->unchecked_shared()->AttachInitialMap(map);
}
- // Clear dead prototype transitions.
- int number_of_transitions = map->NumberOfProtoTransitions();
- FixedArray* prototype_transitions = map->prototype_transitions();
+ ClearNonLivePrototypeTransitions(map);
+ ClearNonLiveMapTransitions(map, map_mark);
+ }
+}
- int new_number_of_transitions = 0;
- const int header = Map::kProtoTransitionHeaderSize;
- const int proto_offset =
- header + Map::kProtoTransitionPrototypeOffset;
- const int map_offset = header + Map::kProtoTransitionMapOffset;
- const int step = Map::kProtoTransitionElementsPerEntry;
- for (int i = 0; i < number_of_transitions; i++) {
- Object* prototype = prototype_transitions->get(proto_offset + i * step);
- Object* cached_map = prototype_transitions->get(map_offset + i * step);
- if (IsMarked(prototype) && IsMarked(cached_map)) {
- if (new_number_of_transitions != i) {
- prototype_transitions->set_unchecked(
- heap_,
- proto_offset + new_number_of_transitions * step,
- prototype,
- UPDATE_WRITE_BARRIER);
- prototype_transitions->set_unchecked(
- heap_,
- map_offset + new_number_of_transitions * step,
- cached_map,
- SKIP_WRITE_BARRIER);
- }
+
+void MarkCompactCollector::ClearNonLivePrototypeTransitions(Map* map) {
+ int number_of_transitions = map->NumberOfProtoTransitions();
+ FixedArray* prototype_transitions = map->prototype_transitions();
+
+ int new_number_of_transitions = 0;
+ const int header = Map::kProtoTransitionHeaderSize;
+ const int proto_offset = header + Map::kProtoTransitionPrototypeOffset;
+ const int map_offset = header + Map::kProtoTransitionMapOffset;
+ const int step = Map::kProtoTransitionElementsPerEntry;
+ for (int i = 0; i < number_of_transitions; i++) {
+ Object* prototype = prototype_transitions->get(proto_offset + i * step);
+ Object* cached_map = prototype_transitions->get(map_offset + i * step);
+ if (IsMarked(prototype) && IsMarked(cached_map)) {
+ int proto_index = proto_offset + new_number_of_transitions * step;
+ int map_index = map_offset + new_number_of_transitions * step;
+ if (new_number_of_transitions != i) {
+ prototype_transitions->set_unchecked(
+ heap_,
+ proto_index,
+ prototype,
+ UPDATE_WRITE_BARRIER);
+ prototype_transitions->set_unchecked(
+ heap_,
+ map_index,
+ cached_map,
+ SKIP_WRITE_BARRIER);
}
+ Object** slot =
+ HeapObject::RawField(prototype_transitions,
+ FixedArray::OffsetOfElementAt(proto_index));
+ RecordSlot(slot, slot, prototype);
+ new_number_of_transitions++;
+ }
+ }
- // Fill slots that became free with undefined value.
- Object* undefined = heap()->undefined_value();
- for (int i = new_number_of_transitions * step;
- i < number_of_transitions * step;
- i++) {
- // The undefined object is on a page that is never compacted and never
- // in new space so it is OK to skip the write barrier. Also it's a
- // root.
- prototype_transitions->set_unchecked(heap_,
- header + i,
- undefined,
- SKIP_WRITE_BARRIER);
+ if (new_number_of_transitions != number_of_transitions) {
+ map->SetNumberOfProtoTransitions(new_number_of_transitions);
+ }
- Object** undefined_slot =
- prototype_transitions->data_start() + i;
- RecordSlot(undefined_slot, undefined_slot, undefined);
- }
- map->SetNumberOfProtoTransitions(new_number_of_transitions);
+ // Fill slots that became free with undefined value.
+ for (int i = new_number_of_transitions * step;
+ i < number_of_transitions * step;
+ i++) {
+ prototype_transitions->set_undefined(heap_, header + i);
+ }
+}
+
+
+void MarkCompactCollector::ClearNonLiveMapTransitions(Map* map,
+ MarkBit map_mark) {
+ // Follow the chain of back pointers to find the prototype.
+ Map* real_prototype = map;
+ while (real_prototype->IsMap()) {
+ real_prototype = reinterpret_cast<Map*>(real_prototype->prototype());
+ ASSERT(real_prototype->IsHeapObject());
+ }
+
+ // Follow back pointers, setting them to prototype, clearing map transitions
+ // when necessary.
+ Map* current = map;
+ bool current_is_alive = map_mark.Get();
+ bool on_dead_path = !current_is_alive;
+ while (current->IsMap()) {
+ Object* next = current->prototype();
+ // There should never be a dead map above a live map.
+ ASSERT(on_dead_path || current_is_alive);
+
+ // A live map above a dead map indicates a dead transition. This test will
+ // always be false on the first iteration.
+ if (on_dead_path && current_is_alive) {
+ on_dead_path = false;
+ current->ClearNonLiveTransitions(heap(), real_prototype);
}
- // Follow the chain of back pointers to find the prototype.
- Map* current = map;
- while (current->IsMap()) {
- current = reinterpret_cast<Map*>(current->prototype());
- ASSERT(current->IsHeapObject());
- }
- Object* real_prototype = current;
+ Object** slot = HeapObject::RawField(current, Map::kPrototypeOffset);
+ *slot = real_prototype;
+ if (current_is_alive) RecordSlot(slot, slot, real_prototype);
- // Follow back pointers, setting them to prototype,
- // clearing map transitions when necessary.
- current = map;
- bool on_dead_path = !map_mark.Get();
- Object* next;
- while (current->IsMap()) {
- next = current->prototype();
- // There should never be a dead map above a live map.
- MarkBit current_mark = Marking::MarkBitFrom(current);
- bool is_alive = current_mark.Get();
- ASSERT(on_dead_path || is_alive);
-
- // A live map above a dead map indicates a dead transition.
- // This test will always be false on the first iteration.
- if (on_dead_path && is_alive) {
- on_dead_path = false;
- current->ClearNonLiveTransitions(heap(), real_prototype);
- }
- *HeapObject::RawField(current, Map::kPrototypeOffset) =
- real_prototype;
-
- if (is_alive) {
- Object** slot = HeapObject::RawField(current, Map::kPrototypeOffset);
- RecordSlot(slot, slot, real_prototype);
- }
- current = reinterpret_cast<Map*>(next);
- }
+ current = reinterpret_cast<Map*>(next);
+ current_is_alive = Marking::MarkBitFrom(current).Get();
}
}
@@ -2622,6 +2652,10 @@
void MarkCompactCollector::EvacuateNewSpace() {
+ // There are soft limits in the allocation code, designed trigger a mark
+ // sweep collection by failing allocations. But since we are already in
+ // a mark-sweep allocation, there is no sense in trying to trigger one.
+ AlwaysAllocateScope scope;
heap()->CheckNewSpaceExpansionCriteria();
NewSpace* new_space = heap()->new_space();
@@ -3641,6 +3675,9 @@
PrintF("Sweeping 0x%" V8PRIxPTR " released page.\n",
reinterpret_cast<intptr_t>(p));
}
+ // Adjust unswept free bytes because releasing a page expects said
+ // counter to be accurate for unswept pages.
+ space->IncreaseUnsweptFreeBytes(p);
space->ReleasePage(p);
continue;
}
@@ -3652,6 +3689,7 @@
PrintF("Sweeping 0x%" V8PRIxPTR " lazily postponed.\n",
reinterpret_cast<intptr_t>(p));
}
+ space->IncreaseUnsweptFreeBytes(p);
continue;
}
@@ -3720,6 +3758,7 @@
#endif
SweeperType how_to_sweep =
FLAG_lazy_sweeping ? LAZY_CONSERVATIVE : CONSERVATIVE;
+ if (FLAG_expose_gc) how_to_sweep = CONSERVATIVE;
if (sweep_precisely_) how_to_sweep = PRECISE;
// Noncompacting collections simply sweep the spaces to clear the mark
// bits and free the nonlive blocks (for old and map spaces). We sweep
diff --git a/src/mark-compact.h b/src/mark-compact.h
index e0a7d94..a911b49 100644
--- a/src/mark-compact.h
+++ b/src/mark-compact.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -198,7 +198,7 @@
ASSERT(object->IsHeapObject());
if (IsFull()) {
Marking::BlackToGrey(object);
- MemoryChunk::IncrementLiveBytes(object->address(), -object->Size());
+ MemoryChunk::IncrementLiveBytesFromGC(object->address(), -object->Size());
SetOverflowed();
} else {
array_[top_] = object;
@@ -407,7 +407,7 @@
// object from the forwarding address of the previous live object in the
// page as input, and is updated to contain the offset to be used for the
// next live object in the same page. For spaces using a different
- // encoding (ie, contiguous spaces), the offset parameter is ignored.
+ // encoding (i.e., contiguous spaces), the offset parameter is ignored.
typedef void (*EncodingFunction)(Heap* heap,
HeapObject* old_object,
int object_size,
@@ -580,6 +580,8 @@
bool collect_maps_;
+ bool flush_monomorphic_ics_;
+
// A pointer to the current stack-allocated GC tracer object during a full
// collection (NULL before and after).
GCTracer* tracer_;
@@ -622,10 +624,19 @@
void AfterMarking();
+ // Marks the object black and pushes it on the marking stack.
+ // This is for non-incremental marking.
INLINE(void MarkObject(HeapObject* obj, MarkBit mark_bit));
+ INLINE(bool MarkObjectWithoutPush(HeapObject* object));
+ INLINE(void MarkObjectAndPush(HeapObject* value));
+
+ // Marks the object black. This is for non-incremental marking.
INLINE(void SetMark(HeapObject* obj, MarkBit mark_bit));
+ // Clears the cache of ICs related to this map.
+ INLINE(void ClearCacheOnMap(Map* map));
+
void ProcessNewlyMarkedObject(HeapObject* obj);
// Creates back pointers for all map transitions, stores them in
@@ -637,6 +648,7 @@
// Mark a Map and its DescriptorArray together, skipping transitions.
void MarkMapContents(Map* map);
+ void MarkAccessorPairSlot(HeapObject* accessors, int offset);
void MarkDescriptorArray(DescriptorArray* descriptors);
// Mark the heap roots and all objects reachable from them.
@@ -684,6 +696,8 @@
// Map transitions from a live map to a dead map must be killed.
// We replace them with a null descriptor, with the same key.
void ClearNonLiveTransitions();
+ void ClearNonLivePrototypeTransitions(Map* map);
+ void ClearNonLiveMapTransitions(Map* map, MarkBit map_mark);
// Marking detaches initial maps from SharedFunctionInfo objects
// to make this reference weak. We need to reattach initial maps
diff --git a/src/math.js b/src/math.js
index 18492aa..f4426f4 100644
--- a/src/math.js
+++ b/src/math.js
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -119,6 +119,19 @@
// ECMA 262 - 15.8.2.11
function MathMax(arg1, arg2) { // length == 2
var length = %_ArgumentsLength();
+ if (length == 2) {
+ if (!IS_NUMBER(arg1)) arg1 = NonNumberToNumber(arg1);
+ if (!IS_NUMBER(arg2)) arg2 = NonNumberToNumber(arg2);
+ if (arg2 > arg1) return arg2;
+ if (arg1 > arg2) return arg1;
+ if (arg1 == arg2) {
+ // Make sure -0 is considered less than +0. -0 is never a Smi, +0 can be
+ // a Smi or a heap number.
+ return (arg1 == 0 && !%_IsSmi(arg1) && 1 / arg1 < 0) ? arg2 : arg1;
+ }
+ // All comparisons failed, one of the arguments must be NaN.
+ return 0/0; // Compiler constant-folds this to NaN.
+ }
if (length == 0) {
return -1/0; // Compiler constant-folds this to -Infinity.
}
@@ -131,7 +144,7 @@
if (NUMBER_IS_NAN(n)) return n;
// Make sure +0 is considered greater than -0. -0 is never a Smi, +0 can be
// a Smi or heap number.
- if (n > r || (r === 0 && n === 0 && !%_IsSmi(r) && 1 / r < 0)) r = n;
+ if (n > r || (r == 0 && n == 0 && !%_IsSmi(r) && 1 / r < 0)) r = n;
}
return r;
}
@@ -139,6 +152,19 @@
// ECMA 262 - 15.8.2.12
function MathMin(arg1, arg2) { // length == 2
var length = %_ArgumentsLength();
+ if (length == 2) {
+ if (!IS_NUMBER(arg1)) arg1 = NonNumberToNumber(arg1);
+ if (!IS_NUMBER(arg2)) arg2 = NonNumberToNumber(arg2);
+ if (arg2 > arg1) return arg1;
+ if (arg1 > arg2) return arg2;
+ if (arg1 == arg2) {
+ // Make sure -0 is considered less than +0. -0 is never a Smi, +0 can be
+ // a Smi or a heap number.
+ return (arg1 == 0 && !%_IsSmi(arg1) && 1 / arg1 < 0) ? arg1 : arg2;
+ }
+ // All comparisons failed, one of the arguments must be NaN.
+ return 0/0; // Compiler constant-folds this to NaN.
+ }
if (length == 0) {
return 1/0; // Compiler constant-folds this to Infinity.
}
@@ -149,9 +175,9 @@
var n = %_Arguments(i);
if (!IS_NUMBER(n)) n = NonNumberToNumber(n);
if (NUMBER_IS_NAN(n)) return n;
- // Make sure -0 is considered less than +0. -0 is never a Smi, +0 can b a
+ // Make sure -0 is considered less than +0. -0 is never a Smi, +0 can be a
// Smi or a heap number.
- if (n < r || (r === 0 && n === 0 && !%_IsSmi(n) && 1 / n < 0)) r = n;
+ if (n < r || (r == 0 && n == 0 && !%_IsSmi(n) && 1 / n < 0)) r = n;
}
return r;
}
diff --git a/src/messages.js b/src/messages.js
index 5a3f12e..5310938 100644
--- a/src/messages.js
+++ b/src/messages.js
@@ -246,6 +246,7 @@
"unprotected_const", ["Illegal const declaration in unprotected statement context."],
"cant_prevent_ext_external_array_elements", ["Cannot prevent extension of an object with external array elements"],
"redef_external_array_element", ["Cannot redefine a property of an object with external array elements"],
+ "harmony_const_assign", ["Assignment to constant variable."],
];
var messages = { __proto__ : null };
for (var i = 0; i < messagesDictionary.length; i += 2) {
diff --git a/src/mips/assembler-mips-inl.h b/src/mips/assembler-mips-inl.h
index 2ba9760..f9e75fa 100644
--- a/src/mips/assembler-mips-inl.h
+++ b/src/mips/assembler-mips-inl.h
@@ -30,13 +30,14 @@
// The original source code covered by the above license above has been
// modified significantly by Google Inc.
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
#ifndef V8_MIPS_ASSEMBLER_MIPS_INL_H_
#define V8_MIPS_ASSEMBLER_MIPS_INL_H_
#include "mips/assembler-mips.h"
+
#include "cpu.h"
#include "debug.h"
@@ -78,6 +79,16 @@
}
+int FPURegister::ToAllocationIndex(FPURegister reg) {
+ ASSERT(reg.code() % 2 == 0);
+ ASSERT(reg.code() / 2 < kNumAllocatableRegisters);
+ ASSERT(reg.is_valid());
+ ASSERT(!reg.is(kDoubleRegZero));
+ ASSERT(!reg.is(kLithiumScratchDouble));
+ return (reg.code() / 2);
+}
+
+
// -----------------------------------------------------------------------------
// RelocInfo.
@@ -133,7 +144,7 @@
}
-Handle<Object> RelocInfo::target_object_handle(Assembler *origin) {
+Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
return Handle<Object>(reinterpret_cast<Object**>(
Assembler::target_address_at(pc_)));
diff --git a/src/mips/assembler-mips.cc b/src/mips/assembler-mips.cc
index e933181..85b6ed8 100644
--- a/src/mips/assembler-mips.cc
+++ b/src/mips/assembler-mips.cc
@@ -301,7 +301,7 @@
own_buffer_ = false;
}
- // Setup buffer pointers.
+ // Set up buffer pointers.
ASSERT(buffer_ != NULL);
pc_ = buffer_;
reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
@@ -337,7 +337,7 @@
void Assembler::GetCode(CodeDesc* desc) {
ASSERT(pc_ <= reloc_info_writer.pos()); // No overlap.
- // Setup code descriptor.
+ // Set up code descriptor.
desc->buffer = buffer_;
desc->buffer_size = buffer_size_;
desc->instr_size = pc_offset();
@@ -1970,7 +1970,7 @@
}
CHECK_GT(desc.buffer_size, 0); // No overflow.
- // Setup new buffer.
+ // Set up new buffer.
desc.buffer = NewArray<byte>(desc.buffer_size);
desc.instr_size = pc_offset();
diff --git a/src/mips/assembler-mips.h b/src/mips/assembler-mips.h
index b66ea0d..b1ffc45 100644
--- a/src/mips/assembler-mips.h
+++ b/src/mips/assembler-mips.h
@@ -30,7 +30,7 @@
// The original source code covered by the above license above has been
// modified significantly by Google Inc.
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
#ifndef V8_MIPS_ASSEMBLER_MIPS_H_
@@ -182,12 +182,7 @@
kNumReservedRegisters;
- static int ToAllocationIndex(FPURegister reg) {
- ASSERT(reg.code() % 2 == 0);
- ASSERT(reg.code() / 2 < kNumAllocatableRegisters);
- ASSERT(reg.is_valid());
- return (reg.code() / 2);
- }
+ inline static int ToAllocationIndex(FPURegister reg);
static FPURegister FromAllocationIndex(int index) {
ASSERT(index >= 0 && index < kNumAllocatableRegisters);
@@ -302,6 +297,14 @@
const FPURegister f30 = { 30 };
const FPURegister f31 = { 31 };
+// Register aliases.
+// cp is assumed to be a callee saved register.
+static const Register& kLithiumScratchReg = s3; // Scratch register.
+static const Register& kLithiumScratchReg2 = s4; // Scratch register.
+static const Register& kRootRegister = s6; // Roots array pointer.
+static const Register& cp = s7; // JavaScript context pointer.
+static const Register& fp = s8_fp; // Alias for fp.
+static const DoubleRegister& kLithiumScratchDouble = f30;
static const FPURegister& kDoubleRegZero = f28;
// FPU (coprocessor 1) control registers.
@@ -667,7 +670,7 @@
// Never use the int16_t b(l)cond version with a branch offset
// instead of using the Label* version.
- // Jump targets must be in the current 256 MB-aligned region. ie 28 bits.
+ // Jump targets must be in the current 256 MB-aligned region. i.e. 28 bits.
void j(int32_t target);
void jal(int32_t target);
void jalr(Register rs, Register rd = ra);
diff --git a/src/mips/builtins-mips.cc b/src/mips/builtins-mips.cc
index 98fd57d..3f4aab3 100644
--- a/src/mips/builtins-mips.cc
+++ b/src/mips/builtins-mips.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -74,17 +74,33 @@
}
+// Load the built-in InternalArray function from the current context.
+static void GenerateLoadInternalArrayFunction(MacroAssembler* masm,
+ Register result) {
+ // Load the global context.
+
+ __ lw(result, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ lw(result,
+ FieldMemOperand(result, GlobalObject::kGlobalContextOffset));
+ // Load the InternalArray function from the global context.
+ __ lw(result,
+ MemOperand(result,
+ Context::SlotOffset(
+ Context::INTERNAL_ARRAY_FUNCTION_INDEX)));
+}
+
+
// Load the built-in Array function from the current context.
static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) {
// Load the global context.
__ lw(result, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
__ lw(result,
- FieldMemOperand(result, GlobalObject::kGlobalContextOffset));
+ FieldMemOperand(result, GlobalObject::kGlobalContextOffset));
// Load the Array function from the global context.
__ lw(result,
- MemOperand(result,
- Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
+ MemOperand(result,
+ Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
}
@@ -308,7 +324,8 @@
static void ArrayNativeCode(MacroAssembler* masm,
Label* call_generic_code) {
Counters* counters = masm->isolate()->counters();
- Label argc_one_or_more, argc_two_or_more, not_empty_array, empty_array;
+ Label argc_one_or_more, argc_two_or_more, not_empty_array, empty_array,
+ has_non_smi_element;
// Check for array construction with zero arguments or one.
__ Branch(&argc_one_or_more, ne, a0, Operand(zero_reg));
@@ -322,7 +339,7 @@
t1,
call_generic_code);
__ IncrementCounter(counters->array_function_native(), 1, a3, t0);
- // Setup return value, remove receiver from stack and return.
+ // Set up return value, remove receiver from stack and return.
__ mov(v0, a2);
__ Addu(sp, sp, Operand(kPointerSize));
__ Ret();
@@ -365,7 +382,7 @@
call_generic_code);
__ IncrementCounter(counters->array_function_native(), 1, a2, t0);
- // Setup return value, remove receiver and argument from stack and return.
+ // Set up return value, remove receiver and argument from stack and return.
__ mov(v0, a3);
__ Addu(sp, sp, Operand(2 * kPointerSize));
__ Ret();
@@ -400,13 +417,19 @@
// sp[0]: last argument
Label loop, entry;
- __ Branch(&entry);
+ __ Branch(USE_DELAY_SLOT, &entry);
+ __ mov(t3, sp);
__ bind(&loop);
- __ pop(a2);
+ __ lw(a2, MemOperand(t3));
+ __ Addu(t3, t3, kPointerSize);
+ if (FLAG_smi_only_arrays) {
+ __ JumpIfNotSmi(a2, &has_non_smi_element);
+ }
__ Addu(t1, t1, -kPointerSize);
__ sw(a2, MemOperand(t1));
__ bind(&entry);
__ Branch(&loop, lt, t0, Operand(t1));
+ __ mov(sp, t3);
// Remove caller arguments and receiver from the stack, setup return value and
// return.
@@ -416,6 +439,46 @@
__ Addu(sp, sp, Operand(kPointerSize));
__ mov(v0, a3);
__ Ret();
+
+ __ bind(&has_non_smi_element);
+ __ UndoAllocationInNewSpace(a3, t0);
+ __ b(call_generic_code);
+}
+
+
+void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : number of arguments
+ // -- ra : return address
+ // -- sp[...]: constructor arguments
+ // -----------------------------------
+ Label generic_array_code, one_or_more_arguments, two_or_more_arguments;
+
+ // Get the InternalArray function.
+ GenerateLoadInternalArrayFunction(masm, a1);
+
+ if (FLAG_debug_code) {
+ // Initial map for the builtin InternalArray functions should be maps.
+ __ lw(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
+ __ And(t0, a2, Operand(kSmiTagMask));
+ __ Assert(ne, "Unexpected initial map for InternalArray function",
+ t0, Operand(zero_reg));
+ __ GetObjectType(a2, a3, t0);
+ __ Assert(eq, "Unexpected initial map for InternalArray function",
+ t0, Operand(MAP_TYPE));
+ }
+
+ // Run the native code for the InternalArray function called as a normal
+ // function.
+ ArrayNativeCode(masm, &generic_array_code);
+
+ // Jump to the generic array code if the specialized code cannot handle the
+ // construction.
+ __ bind(&generic_array_code);
+
+ Handle<Code> array_code =
+ masm->isolate()->builtins()->InternalArrayCodeGeneric();
+ __ Jump(array_code, RelocInfo::CODE_TARGET);
}
@@ -832,7 +895,7 @@
// Initialize the FixedArray.
// a1: constructor
- // a3: number of elements in properties array (un-tagged)
+ // a3: number of elements in properties array (untagged)
// t4: JSObject
// t5: start of next object
__ LoadRoot(t6, Heap::kFixedArrayMapRootIndex);
@@ -918,10 +981,10 @@
// sp[4]: number of arguments (smi-tagged)
__ lw(a3, MemOperand(sp, 4 * kPointerSize));
- // Setup pointer to last argument.
+ // Set up pointer to last argument.
__ Addu(a2, fp, Operand(StandardFrameConstants::kCallerSPOffset));
- // Setup number of arguments for function call below.
+ // Set up number of arguments for function call below.
__ srl(a0, a3, kSmiTagSize);
// Copy arguments and receiver to the expression stack.
@@ -1036,7 +1099,7 @@
// ----------- S t a t e -------------
// -- a0: code entry
// -- a1: function
- // -- a2: reveiver_pointer
+ // -- a2: receiver_pointer
// -- a3: argc
// -- s0: argv
// -----------------------------------
@@ -1051,17 +1114,14 @@
// Set up the context from the function argument.
__ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
- // Set up the roots register.
- ExternalReference roots_array_start =
- ExternalReference::roots_array_start(masm->isolate());
- __ li(s6, Operand(roots_array_start));
+ __ InitializeRootRegister();
// Push the function and the receiver onto the stack.
__ Push(a1, a2);
// Copy arguments to the stack in a loop.
// a3: argc
- // s0: argv, ie points to first arg
+ // s0: argv, i.e. points to first arg
Label loop, entry;
__ sll(t0, a3, kPointerSizeLog2);
__ addu(t2, s0, t0);
@@ -1740,6 +1800,7 @@
__ Call(a3);
+ masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
// Exit frame and return.
LeaveArgumentsAdaptorFrame(masm);
__ Ret();
diff --git a/src/mips/code-stubs-mips.cc b/src/mips/code-stubs-mips.cc
index 92abf6d..289e6b8 100644
--- a/src/mips/code-stubs-mips.cc
+++ b/src/mips/code-stubs-mips.cc
@@ -157,13 +157,13 @@
// Load the function from the stack.
__ lw(a3, MemOperand(sp, 0));
- // Setup the object header.
+ // Set up the object header.
__ LoadRoot(a2, Heap::kFunctionContextMapRootIndex);
__ sw(a2, FieldMemOperand(v0, HeapObject::kMapOffset));
__ li(a2, Operand(Smi::FromInt(length)));
__ sw(a2, FieldMemOperand(v0, FixedArray::kLengthOffset));
- // Setup the fixed slots.
+ // Set up the fixed slots.
__ li(a1, Operand(Smi::FromInt(0)));
__ sw(a3, MemOperand(v0, Context::SlotOffset(Context::CLOSURE_INDEX)));
__ sw(cp, MemOperand(v0, Context::SlotOffset(Context::PREVIOUS_INDEX)));
@@ -208,7 +208,7 @@
// Load the serialized scope info from the stack.
__ lw(a1, MemOperand(sp, 1 * kPointerSize));
- // Setup the object header.
+ // Set up the object header.
__ LoadRoot(a2, Heap::kBlockContextMapRootIndex);
__ sw(a2, FieldMemOperand(v0, HeapObject::kMapOffset));
__ li(a2, Operand(Smi::FromInt(length)));
@@ -229,7 +229,7 @@
__ lw(a3, ContextOperand(a3, Context::CLOSURE_INDEX));
__ bind(&after_sentinel);
- // Setup the fixed slots.
+ // Set up the fixed slots.
__ sw(a3, ContextOperand(v0, Context::CLOSURE_INDEX));
__ sw(cp, ContextOperand(v0, Context::PREVIOUS_INDEX));
__ sw(a1, ContextOperand(v0, Context::EXTENSION_INDEX));
@@ -726,7 +726,7 @@
__ Subu(int_scratch, zero_reg, int_scratch);
__ bind(&skip_sub);
- // Get mantisssa[51:20].
+ // Get mantissa[51:20].
// Get the position of the first set bit.
__ clz(dst1, int_scratch);
@@ -971,7 +971,7 @@
// non zero bits left. So we need the (30 - exponent) last bits of the
// 31 higher bits of the mantissa to be null.
// Because bits [21:0] are null, we can check instead that the
- // (32 - exponent) last bits of the 32 higher bits of the mantisssa are null.
+ // (32 - exponent) last bits of the 32 higher bits of the mantissa are null.
// Get the 32 higher bits of the mantissa in dst.
__ Ext(dst,
@@ -3592,113 +3592,218 @@
void MathPowStub::Generate(MacroAssembler* masm) {
- Label call_runtime;
+ CpuFeatures::Scope fpu_scope(FPU);
+ const Register base = a1;
+ const Register exponent = a2;
+ const Register heapnumbermap = t1;
+ const Register heapnumber = v0;
+ const DoubleRegister double_base = f2;
+ const DoubleRegister double_exponent = f4;
+ const DoubleRegister double_result = f0;
+ const DoubleRegister double_scratch = f6;
+ const FPURegister single_scratch = f8;
+ const Register scratch = t5;
+ const Register scratch2 = t3;
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatures::Scope scope(FPU);
-
- Label base_not_smi;
- Label exponent_not_smi;
- Label convert_exponent;
-
- const Register base = a0;
- const Register exponent = a2;
- const Register heapnumbermap = t1;
- const Register heapnumber = s0; // Callee-saved register.
- const Register scratch = t2;
- const Register scratch2 = t3;
-
- // Alocate FP values in the ABI-parameter-passing regs.
- const DoubleRegister double_base = f12;
- const DoubleRegister double_exponent = f14;
- const DoubleRegister double_result = f0;
- const DoubleRegister double_scratch = f2;
-
- __ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex);
+ Label call_runtime, done, exponent_not_smi, int_exponent;
+ if (exponent_type_ == ON_STACK) {
+ Label base_is_smi, unpack_exponent;
+ // The exponent and base are supplied as arguments on the stack.
+ // This can only happen if the stub is called from non-optimized code.
+ // Load input parameters from stack to double registers.
__ lw(base, MemOperand(sp, 1 * kPointerSize));
__ lw(exponent, MemOperand(sp, 0 * kPointerSize));
- // Convert base to double value and store it in f0.
- __ JumpIfNotSmi(base, &base_not_smi);
- // Base is a Smi. Untag and convert it.
- __ SmiUntag(base);
- __ mtc1(base, double_scratch);
- __ cvt_d_w(double_base, double_scratch);
- __ Branch(&convert_exponent);
+ __ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex);
- __ bind(&base_not_smi);
+ __ JumpIfSmi(base, &base_is_smi);
__ lw(scratch, FieldMemOperand(base, JSObject::kMapOffset));
__ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap));
- // Base is a heapnumber. Load it into double register.
- __ ldc1(double_base, FieldMemOperand(base, HeapNumber::kValueOffset));
- __ bind(&convert_exponent);
+ __ ldc1(double_base, FieldMemOperand(base, HeapNumber::kValueOffset));
+ __ jmp(&unpack_exponent);
+
+ __ bind(&base_is_smi);
+ __ SmiUntag(base);
+ __ mtc1(base, single_scratch);
+ __ cvt_d_w(double_base, single_scratch);
+ __ bind(&unpack_exponent);
+
__ JumpIfNotSmi(exponent, &exponent_not_smi);
__ SmiUntag(exponent);
-
- // The base is in a double register and the exponent is
- // an untagged smi. Allocate a heap number and call a
- // C function for integer exponents. The register containing
- // the heap number is callee-saved.
- __ AllocateHeapNumber(heapnumber,
- scratch,
- scratch2,
- heapnumbermap,
- &call_runtime);
- __ push(ra);
- __ PrepareCallCFunction(1, 1, scratch);
- __ SetCallCDoubleArguments(double_base, exponent);
- {
- AllowExternalCallThatCantCauseGC scope(masm);
- __ CallCFunction(
- ExternalReference::power_double_int_function(masm->isolate()), 1, 1);
- __ pop(ra);
- __ GetCFunctionDoubleResult(double_result);
- }
- __ sdc1(double_result,
- FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
- __ mov(v0, heapnumber);
- __ DropAndRet(2 * kPointerSize);
+ __ jmp(&int_exponent);
__ bind(&exponent_not_smi);
__ lw(scratch, FieldMemOperand(exponent, JSObject::kMapOffset));
__ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap));
- // Exponent is a heapnumber. Load it into double register.
__ ldc1(double_exponent,
FieldMemOperand(exponent, HeapNumber::kValueOffset));
+ } else if (exponent_type_ == TAGGED) {
+ // Base is already in double_base.
+ __ JumpIfNotSmi(exponent, &exponent_not_smi);
+ __ SmiUntag(exponent);
+ __ jmp(&int_exponent);
- // The base and the exponent are in double registers.
- // Allocate a heap number and call a C function for
- // double exponents. The register containing
- // the heap number is callee-saved.
- __ AllocateHeapNumber(heapnumber,
- scratch,
- scratch2,
- heapnumbermap,
- &call_runtime);
- __ push(ra);
- __ PrepareCallCFunction(0, 2, scratch);
- // ABI (o32) for func(double a, double b): a in f12, b in f14.
- ASSERT(double_base.is(f12));
- ASSERT(double_exponent.is(f14));
- __ SetCallCDoubleArguments(double_base, double_exponent);
- {
- AllowExternalCallThatCantCauseGC scope(masm);
- __ CallCFunction(
- ExternalReference::power_double_double_function(masm->isolate()),
- 0,
- 2);
- __ pop(ra);
- __ GetCFunctionDoubleResult(double_result);
- }
- __ sdc1(double_result,
- FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
- __ mov(v0, heapnumber);
- __ DropAndRet(2 * kPointerSize);
+ __ bind(&exponent_not_smi);
+ __ ldc1(double_exponent,
+ FieldMemOperand(exponent, HeapNumber::kValueOffset));
}
- __ bind(&call_runtime);
- __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
+ if (exponent_type_ != INTEGER) {
+ Label int_exponent_convert;
+ // Detect integer exponents stored as double.
+ __ EmitFPUTruncate(kRoundToMinusInf,
+ single_scratch,
+ double_exponent,
+ scratch,
+ scratch2,
+ kCheckForInexactConversion);
+ // scratch2 == 0 means there was no conversion error.
+ __ Branch(&int_exponent_convert, eq, scratch2, Operand(zero_reg));
+
+ if (exponent_type_ == ON_STACK) {
+ // Detect square root case. Crankshaft detects constant +/-0.5 at
+ // compile time and uses DoMathPowHalf instead. We then skip this check
+ // for non-constant cases of +/-0.5 as these hardly occur.
+ Label not_plus_half;
+
+ // Test for 0.5.
+ __ Move(double_scratch, 0.5);
+ __ BranchF(USE_DELAY_SLOT,
+ ¬_plus_half,
+ NULL,
+ ne,
+ double_exponent,
+ double_scratch);
+
+ // Calculates square root of base. Check for the special case of
+ // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
+ __ Move(double_scratch, -V8_INFINITY);
+ __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, double_base, double_scratch);
+ __ neg_d(double_result, double_scratch);
+
+ // Add +0 to convert -0 to +0.
+ __ add_d(double_scratch, double_base, kDoubleRegZero);
+ __ sqrt_d(double_result, double_scratch);
+ __ jmp(&done);
+
+ __ bind(¬_plus_half);
+ __ Move(double_scratch, -0.5);
+ __ BranchF(USE_DELAY_SLOT,
+ &call_runtime,
+ NULL,
+ ne,
+ double_exponent,
+ double_scratch);
+
+ // Calculates square root of base. Check for the special case of
+ // Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
+ __ Move(double_scratch, -V8_INFINITY);
+ __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, double_base, double_scratch);
+ __ Move(double_result, kDoubleRegZero);
+
+ // Add +0 to convert -0 to +0.
+ __ add_d(double_scratch, double_base, kDoubleRegZero);
+ __ Move(double_result, 1);
+ __ sqrt_d(double_scratch, double_scratch);
+ __ div_d(double_result, double_result, double_scratch);
+ __ jmp(&done);
+ }
+
+ __ push(ra);
+ {
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ PrepareCallCFunction(0, 2, scratch);
+ __ SetCallCDoubleArguments(double_base, double_exponent);
+ __ CallCFunction(
+ ExternalReference::power_double_double_function(masm->isolate()),
+ 0, 2);
+ }
+ __ pop(ra);
+ __ GetCFunctionDoubleResult(double_result);
+ __ jmp(&done);
+
+ __ bind(&int_exponent_convert);
+ __ mfc1(exponent, single_scratch);
+ }
+
+ // Calculate power with integer exponent.
+ __ bind(&int_exponent);
+
+ __ mov(scratch, exponent); // Back up exponent.
+ __ mov_d(double_scratch, double_base); // Back up base.
+ __ Move(double_result, 1.0);
+
+ // Get absolute value of exponent.
+ Label positive_exponent;
+ __ Branch(&positive_exponent, ge, scratch, Operand(zero_reg));
+ __ Subu(scratch, zero_reg, scratch);
+ __ bind(&positive_exponent);
+
+ Label while_true, no_carry, loop_end;
+ __ bind(&while_true);
+
+ __ And(scratch2, scratch, 1);
+
+ __ Branch(&no_carry, eq, scratch2, Operand(zero_reg));
+ __ mul_d(double_result, double_result, double_scratch);
+ __ bind(&no_carry);
+
+ __ sra(scratch, scratch, 1);
+
+ __ Branch(&loop_end, eq, scratch, Operand(zero_reg));
+ __ mul_d(double_scratch, double_scratch, double_scratch);
+
+ __ Branch(&while_true);
+
+ __ bind(&loop_end);
+
+ __ Branch(&done, ge, exponent, Operand(zero_reg));
+ __ Move(double_scratch, 1.0);
+ __ div_d(double_result, double_scratch, double_result);
+ // Test whether result is zero. Bail out to check for subnormal result.
+ // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
+ __ BranchF(&done, NULL, ne, double_result, kDoubleRegZero);
+
+ // double_exponent may not contain the exponent value if the input was a
+ // smi. We set it with exponent value before bailing out.
+ __ mtc1(exponent, single_scratch);
+ __ cvt_d_w(double_exponent, single_scratch);
+
+ // Returning or bailing out.
+ Counters* counters = masm->isolate()->counters();
+ if (exponent_type_ == ON_STACK) {
+ // The arguments are still on the stack.
+ __ bind(&call_runtime);
+ __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
+
+ // The stub is called from non-optimized code, which expects the result
+ // as heap number in exponent.
+ __ bind(&done);
+ __ AllocateHeapNumber(
+ heapnumber, scratch, scratch2, heapnumbermap, &call_runtime);
+ __ sdc1(double_result,
+ FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
+ ASSERT(heapnumber.is(v0));
+ __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
+ __ DropAndRet(2);
+ } else {
+ __ push(ra);
+ {
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ PrepareCallCFunction(0, 2, scratch);
+ __ SetCallCDoubleArguments(double_base, double_exponent);
+ __ CallCFunction(
+ ExternalReference::power_double_double_function(masm->isolate()),
+ 0, 2);
+ }
+ __ pop(ra);
+ __ GetCFunctionDoubleResult(double_result);
+
+ __ bind(&done);
+ __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
+ __ Ret();
+ }
}
@@ -3900,7 +4005,7 @@
FrameScope scope(masm, StackFrame::MANUAL);
__ EnterExitFrame(save_doubles_);
- // Setup argc and the builtin function in callee-saved registers.
+ // Set up argc and the builtin function in callee-saved registers.
__ mov(s0, a0);
__ mov(s2, a1);
@@ -3956,7 +4061,7 @@
// Registers:
// a0: entry address
// a1: function
- // a2: reveiver
+ // a2: receiver
// a3: argc
//
// Stack:
@@ -3992,13 +4097,13 @@
isolate)));
__ lw(t0, MemOperand(t0));
__ Push(t3, t2, t1, t0);
- // Setup frame pointer for the frame to be pushed.
+ // Set up frame pointer for the frame to be pushed.
__ addiu(fp, sp, -EntryFrameConstants::kCallerFPOffset);
// Registers:
// a0: entry_address
// a1: function
- // a2: reveiver_pointer
+ // a2: receiver_pointer
// a3: argc
// s0: argv
//
@@ -4065,7 +4170,7 @@
// Registers:
// a0: entry_address
// a1: function
- // a2: reveiver_pointer
+ // a2: receiver_pointer
// a3: argc
// s0: argv
//
@@ -4480,7 +4585,7 @@
__ sw(a3, FieldMemOperand(v0, i));
}
- // Setup the callee in-object property.
+ // Set up the callee in-object property.
STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
__ lw(a3, MemOperand(sp, 2 * kPointerSize));
const int kCalleeOffset = JSObject::kHeaderSize +
@@ -4493,7 +4598,7 @@
Heap::kArgumentsLengthIndex * kPointerSize;
__ sw(a2, FieldMemOperand(v0, kLengthOffset));
- // Setup the elements pointer in the allocated arguments object.
+ // Set up the elements pointer in the allocated arguments object.
// If we allocated a parameter map, t0 will point there, otherwise
// it will point to the backing store.
__ Addu(t0, v0, Operand(Heap::kArgumentsObjectSize));
@@ -4595,7 +4700,7 @@
__ Ret();
// Do the runtime call to allocate the arguments object.
- // a2 = argument count (taggged)
+ // a2 = argument count (tagged)
__ bind(&runtime);
__ sw(a2, MemOperand(sp, 0 * kPointerSize)); // Patch argument count.
__ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
@@ -4670,7 +4775,7 @@
// Get the parameters pointer from the stack.
__ lw(a2, MemOperand(sp, 1 * kPointerSize));
- // Setup the elements pointer in the allocated arguments object and
+ // Set up the elements pointer in the allocated arguments object and
// initialize the header in the elements fixed array.
__ Addu(t0, v0, Operand(Heap::kArgumentsObjectSizeStrict));
__ sw(t0, FieldMemOperand(v0, JSObject::kElementsOffset));
@@ -4682,7 +4787,7 @@
// Copy the fixed array slots.
Label loop;
- // Setup t0 to point to the first array slot.
+ // Set up t0 to point to the first array slot.
__ Addu(t0, t0, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ bind(&loop);
// Pre-decrement a2 with kPointerSize on each iteration.
@@ -4902,9 +5007,9 @@
STATIC_ASSERT(kAsciiStringTag == 4);
STATIC_ASSERT(kTwoByteStringTag == 0);
// Find the code object based on the assumptions above.
- __ And(a0, a0, Operand(kStringEncodingMask)); // Non-zero for ascii.
+ __ And(a0, a0, Operand(kStringEncodingMask)); // Non-zero for ASCII.
__ lw(t9, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset));
- __ sra(a3, a0, 2); // a3 is 1 for ascii, 0 for UC16 (usyed below).
+ __ sra(a3, a0, 2); // a3 is 1 for ASCII, 0 for UC16 (used below).
__ lw(t1, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset));
__ movz(t9, t1, a0); // If UC16 (a0 is 0), replace t9 w/kDataUC16CodeOffset.
@@ -5321,7 +5426,7 @@
// of the original receiver from the call site).
__ bind(&non_function);
__ sw(a1, MemOperand(sp, argc_ * kPointerSize));
- __ li(a0, Operand(argc_)); // Setup the number of arguments.
+ __ li(a0, Operand(argc_)); // Set up the number of arguments.
__ mov(a2, zero_reg);
__ GetBuiltinEntry(a3, Builtins::CALL_NON_FUNCTION);
__ SetCallKind(t1, CALL_AS_METHOD);
@@ -5820,11 +5925,15 @@
void StringHelper::GenerateHashInit(MacroAssembler* masm,
- Register hash,
- Register character) {
- // hash = character + (character << 10);
- __ sll(hash, character, 10);
+ Register hash,
+ Register character) {
+ // hash = seed + character + ((seed + character) << 10);
+ __ LoadRoot(hash, Heap::kHashSeedRootIndex);
+ // Untag smi seed and add the character.
+ __ SmiUntag(hash);
__ addu(hash, hash, character);
+ __ sll(at, hash, 10);
+ __ addu(hash, hash, at);
// hash ^= hash >> 6;
__ srl(at, hash, 6);
__ xor_(hash, hash, at);
@@ -5832,8 +5941,8 @@
void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
- Register hash,
- Register character) {
+ Register hash,
+ Register character) {
// hash += character;
__ addu(hash, hash, character);
// hash += hash << 10;
@@ -5846,7 +5955,7 @@
void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
- Register hash) {
+ Register hash) {
// hash += hash << 3;
__ sll(at, hash, 3);
__ addu(hash, hash, at);
@@ -5857,18 +5966,17 @@
__ sll(at, hash, 15);
__ addu(hash, hash, at);
- uint32_t kHashShiftCutOffMask = (1 << (32 - String::kHashShift)) - 1;
- __ li(at, Operand(kHashShiftCutOffMask));
+ __ li(at, Operand(String::kHashBitMask));
__ and_(hash, hash, at);
// if (hash == 0) hash = 27;
- __ ori(at, zero_reg, 27);
+ __ ori(at, zero_reg, StringHasher::kZeroHash);
__ movz(hash, at, hash);
}
void SubStringStub::Generate(MacroAssembler* masm) {
- Label sub_string_runtime;
+ Label runtime;
// Stack frame on entry.
// ra: return address
// sp[0]: to
@@ -5886,53 +5994,35 @@
static const int kFromOffset = 1 * kPointerSize;
static const int kStringOffset = 2 * kPointerSize;
- Register to = t2;
- Register from = t3;
-
- // Check bounds and smi-ness.
- __ lw(to, MemOperand(sp, kToOffset));
- __ lw(from, MemOperand(sp, kFromOffset));
+ __ lw(a2, MemOperand(sp, kToOffset));
+ __ lw(a3, MemOperand(sp, kFromOffset));
STATIC_ASSERT(kFromOffset == kToOffset + 4);
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
- __ JumpIfNotSmi(from, &sub_string_runtime);
- __ JumpIfNotSmi(to, &sub_string_runtime);
+ // Utilize delay slots. SmiUntag doesn't emit a jump, everything else is
+ // safe in this case.
+ __ JumpIfSmi(a2, &runtime, at, USE_DELAY_SLOT);
+ __ SmiUntag(a2);
+ __ JumpIfSmi(a3, &runtime, at, USE_DELAY_SLOT);
+ __ SmiUntag(a3);
- __ sra(a3, from, kSmiTagSize); // Remove smi tag.
- __ sra(t5, to, kSmiTagSize); // Remove smi tag.
+ // Both a2 and a3 are untagged integers.
- // a3: from index (untagged smi)
- // t5: to index (untagged smi)
-
- __ Branch(&sub_string_runtime, lt, a3, Operand(zero_reg)); // From < 0.
+ __ Branch(&runtime, lt, a3, Operand(zero_reg)); // From < 0.
__ subu(a2, t5, a3);
- __ Branch(&sub_string_runtime, gt, a3, Operand(t5)); // Fail if from > to.
+ __ Branch(&runtime, gt, a3, Operand(t5)); // Fail if from > to.
- // Special handling of sub-strings of length 1 and 2. One character strings
- // are handled in the runtime system (looked up in the single character
- // cache). Two character strings are looked for in the symbol cache in
- // generated code.
- __ Branch(&sub_string_runtime, lt, a2, Operand(2));
-
- // Both to and from are smis.
-
- // a2: result string length
- // a3: from index (untagged smi)
- // t2: (a.k.a. to): to (smi)
- // t3: (a.k.a. from): from offset (smi)
- // t5: to index (untagged smi)
-
- // Make sure first argument is a sequential (or flat) string.
+ // Make sure first argument is a string.
__ lw(v0, MemOperand(sp, kStringOffset));
- __ Branch(&sub_string_runtime, eq, v0, Operand(kSmiTagMask));
+ __ Branch(&runtime, eq, v0, Operand(kSmiTagMask));
__ lw(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
__ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
__ And(t4, v0, Operand(kIsNotStringMask));
- __ Branch(&sub_string_runtime, ne, t4, Operand(zero_reg));
+ __ Branch(&runtime, ne, t4, Operand(zero_reg));
// Short-cut for the case of trivial substring.
Label return_v0;
@@ -5942,74 +6032,16 @@
__ sra(t0, t0, 1);
__ Branch(&return_v0, eq, a2, Operand(t0));
- Label create_slice;
- if (FLAG_string_slices) {
- __ Branch(&create_slice, ge, a2, Operand(SlicedString::kMinLength));
- }
-
- // v0: original string
- // a1: instance type
- // a2: result string length
- // a3: from index (untagged smi)
- // t2: (a.k.a. to): to (smi)
- // t3: (a.k.a. from): from offset (smi)
- // t5: to index (untagged smi)
-
- Label seq_string;
- __ And(t0, a1, Operand(kStringRepresentationMask));
- STATIC_ASSERT(kSeqStringTag < kConsStringTag);
- STATIC_ASSERT(kConsStringTag < kExternalStringTag);
- STATIC_ASSERT(kConsStringTag < kSlicedStringTag);
-
- // Slices and external strings go to runtime.
- __ Branch(&sub_string_runtime, gt, t0, Operand(kConsStringTag));
-
- // Sequential strings are handled directly.
- __ Branch(&seq_string, lt, t0, Operand(kConsStringTag));
-
- // Cons string. Try to recurse (once) on the first substring.
- // (This adds a little more generality than necessary to handle flattened
- // cons strings, but not much).
- __ lw(v0, FieldMemOperand(v0, ConsString::kFirstOffset));
- __ lw(t0, FieldMemOperand(v0, HeapObject::kMapOffset));
- __ lbu(a1, FieldMemOperand(t0, Map::kInstanceTypeOffset));
- STATIC_ASSERT(kSeqStringTag == 0);
- // Cons, slices and external strings go to runtime.
- __ Branch(&sub_string_runtime, ne, a1, Operand(kStringRepresentationMask));
-
- // Definitly a sequential string.
- __ bind(&seq_string);
-
- // v0: original string
- // a1: instance type
- // a2: result string length
- // a3: from index (untagged smi)
- // t2: (a.k.a. to): to (smi)
- // t3: (a.k.a. from): from offset (smi)
- // t5: to index (untagged smi)
-
- __ lw(t0, FieldMemOperand(v0, String::kLengthOffset));
- __ Branch(&sub_string_runtime, lt, t0, Operand(to)); // Fail if to > length.
- to = no_reg;
-
- // v0: original string or left hand side of the original cons string.
- // a1: instance type
- // a2: result string length
- // a3: from index (untagged smi)
- // t3: (a.k.a. from): from offset (smi)
- // t5: to index (untagged smi)
-
- // Check for flat ASCII string.
- Label non_ascii_flat;
- STATIC_ASSERT(kTwoByteStringTag == 0);
-
- __ And(t4, a1, Operand(kStringEncodingMask));
- __ Branch(&non_ascii_flat, eq, t4, Operand(zero_reg));
Label result_longer_than_two;
- __ Branch(&result_longer_than_two, gt, a2, Operand(2));
+ // Check for special case of two character ASCII string, in which case
+ // we do a lookup in the symbol table first.
+ __ li(t0, 2);
+ __ Branch(&result_longer_than_two, gt, a2, Operand(t0));
+ __ Branch(&runtime, lt, a2, Operand(t0));
- // Sub string of length 2 requested.
+ __ JumpIfInstanceTypeIsNotSequentialAscii(a1, a1, &runtime);
+
// Get the two characters forming the sub string.
__ Addu(v0, v0, Operand(a3));
__ lbu(a3, FieldMemOperand(v0, SeqAsciiString::kHeaderSize));
@@ -6019,31 +6051,126 @@
Label make_two_character_string;
StringHelper::GenerateTwoCharacterSymbolTableProbe(
masm, a3, t0, a1, t1, t2, t3, t4, &make_two_character_string);
- Counters* counters = masm->isolate()->counters();
__ jmp(&return_v0);
// a2: result string length.
// a3: two characters combined into halfword in little endian byte order.
__ bind(&make_two_character_string);
- __ AllocateAsciiString(v0, a2, t0, t1, t4, &sub_string_runtime);
+ __ AllocateAsciiString(v0, a2, t0, t1, t4, &runtime);
__ sh(a3, FieldMemOperand(v0, SeqAsciiString::kHeaderSize));
__ jmp(&return_v0);
__ bind(&result_longer_than_two);
- // Locate 'from' character of string.
- __ Addu(t1, v0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
- __ sra(t4, from, 1);
- __ Addu(t1, t1, t4);
+ // Deal with different string types: update the index if necessary
+ // and put the underlying string into t1.
+ // v0: original string
+ // a1: instance type
+ // a2: length
+ // a3: from index (untagged)
+ Label underlying_unpacked, sliced_string, seq_or_external_string;
+ // If the string is not indirect, it can only be sequential or external.
+ STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
+ STATIC_ASSERT(kIsIndirectStringMask != 0);
+ __ And(t0, a1, Operand(kIsIndirectStringMask));
+ __ Branch(USE_DELAY_SLOT, &seq_or_external_string, eq, t0, Operand(zero_reg));
- // Allocate the result.
- __ AllocateAsciiString(v0, a2, t4, t0, a1, &sub_string_runtime);
+ __ And(t0, a1, Operand(kSlicedNotConsMask));
+ __ Branch(&sliced_string, ne, t0, Operand(zero_reg));
+ // Cons string. Check whether it is flat, then fetch first part.
+ __ lw(t1, FieldMemOperand(v0, ConsString::kSecondOffset));
+ __ LoadRoot(t0, Heap::kEmptyStringRootIndex);
+ __ Branch(&runtime, ne, t1, Operand(t0));
+ __ lw(t1, FieldMemOperand(v0, ConsString::kFirstOffset));
+ // Update instance type.
+ __ lw(a1, FieldMemOperand(t1, HeapObject::kMapOffset));
+ __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
+ __ jmp(&underlying_unpacked);
- // v0: result string
- // a2: result string length
- // a3: from index (untagged smi)
- // t1: first character of substring to copy
- // t3: (a.k.a. from): from offset (smi)
+ __ bind(&sliced_string);
+ // Sliced string. Fetch parent and correct start index by offset.
+ __ lw(t1, FieldMemOperand(v0, SlicedString::kOffsetOffset));
+ __ sra(t1, t1, 1);
+ __ Addu(a3, a3, t1);
+ __ lw(t1, FieldMemOperand(v0, SlicedString::kParentOffset));
+ // Update instance type.
+ __ lw(a1, FieldMemOperand(t1, HeapObject::kMapOffset));
+ __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
+ __ jmp(&underlying_unpacked);
+
+ __ bind(&seq_or_external_string);
+ // Sequential or external string. Just move string to the expected register.
+ __ mov(t1, v0);
+
+ __ bind(&underlying_unpacked);
+
+ if (FLAG_string_slices) {
+ Label copy_routine;
+ // t1: underlying subject string
+ // a1: instance type of underlying subject string
+ // a2: length
+ // a3: adjusted start index (untagged)
+ // Short slice. Copy instead of slicing.
+ __ Branch(©_routine, lt, a2, Operand(SlicedString::kMinLength));
+ // Allocate new sliced string. At this point we do not reload the instance
+ // type including the string encoding because we simply rely on the info
+ // provided by the original string. It does not matter if the original
+ // string's encoding is wrong because we always have to recheck encoding of
+ // the newly created string's parent anyways due to externalized strings.
+ Label two_byte_slice, set_slice_header;
+ STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
+ STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
+ __ And(t0, a1, Operand(kStringEncodingMask));
+ __ Branch(&two_byte_slice, eq, t0, Operand(zero_reg));
+ __ AllocateAsciiSlicedString(v0, a2, t2, t3, &runtime);
+ __ jmp(&set_slice_header);
+ __ bind(&two_byte_slice);
+ __ AllocateTwoByteSlicedString(v0, a2, t2, t3, &runtime);
+ __ bind(&set_slice_header);
+ __ sll(a3, a3, 1);
+ __ sw(a3, FieldMemOperand(v0, SlicedString::kOffsetOffset));
+ __ sw(t1, FieldMemOperand(v0, SlicedString::kParentOffset));
+ __ jmp(&return_v0);
+
+ __ bind(©_routine);
+ }
+
+ // t1: underlying subject string
+ // a1: instance type of underlying subject string
+ // a2: length
+ // a3: adjusted start index (untagged)
+ Label two_byte_sequential, sequential_string, allocate_result;
+ STATIC_ASSERT(kExternalStringTag != 0);
+ STATIC_ASSERT(kSeqStringTag == 0);
+ __ And(t0, a1, Operand(kExternalStringTag));
+ __ Branch(&sequential_string, eq, t0, Operand(zero_reg));
+
+ // Handle external string.
+ // Rule out short external strings.
+ STATIC_CHECK(kShortExternalStringTag != 0);
+ __ And(t0, a1, Operand(kShortExternalStringTag));
+ __ Branch(&runtime, ne, t0, Operand(zero_reg));
+ __ lw(t1, FieldMemOperand(t1, ExternalString::kResourceDataOffset));
+ // t1 already points to the first character of underlying string.
+ __ jmp(&allocate_result);
+
+ __ bind(&sequential_string);
+ // Locate first character of underlying subject string.
+ STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
+ __ Addu(t1, t1, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+
+ __ bind(&allocate_result);
+ // Sequential acii string. Allocate the result.
+ STATIC_ASSERT((kAsciiStringTag & kStringEncodingMask) != 0);
+ __ And(t0, a1, Operand(kStringEncodingMask));
+ __ Branch(&two_byte_sequential, eq, t0, Operand(zero_reg));
+
+ // Allocate and copy the resulting ASCII string.
+ __ AllocateAsciiString(v0, a2, t0, t2, t3, &runtime);
+
+ // Locate first character of substring to copy.
+ __ Addu(t1, t1, a3);
+
// Locate first character of result.
__ Addu(a1, v0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
@@ -6056,30 +6183,17 @@
masm, a1, t1, a2, a3, t0, t2, t3, t4, COPY_ASCII | DEST_ALWAYS_ALIGNED);
__ jmp(&return_v0);
- __ bind(&non_ascii_flat);
- // a2: result string length
- // t1: string
- // t3: (a.k.a. from): from offset (smi)
- // Check for flat two byte string.
+ // Allocate and copy the resulting two-byte string.
+ __ bind(&two_byte_sequential);
+ __ AllocateTwoByteString(v0, a2, t0, t2, t3, &runtime);
- // Locate 'from' character of string.
- __ Addu(t1, v0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
- // As "from" is a smi it is 2 times the value which matches the size of a two
- // byte character.
+ // Locate first character of substring to copy.
STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
- __ Addu(t1, t1, Operand(from));
-
- // Allocate the result.
- __ AllocateTwoByteString(v0, a2, a1, a3, t0, &sub_string_runtime);
-
- // v0: result string
- // a2: result string length
- // t1: first character of substring to copy
+ __ sll(t0, a3, 1);
+ __ Addu(t1, t1, t0);
// Locate first character of result.
__ Addu(a1, v0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
- from = no_reg;
-
// v0: result string.
// a1: first character of result.
// a2: result length.
@@ -6087,75 +6201,14 @@
STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
StringHelper::GenerateCopyCharactersLong(
masm, a1, t1, a2, a3, t0, t2, t3, t4, DEST_ALWAYS_ALIGNED);
- __ jmp(&return_v0);
-
- if (FLAG_string_slices) {
- __ bind(&create_slice);
- // v0: original string
- // a1: instance type
- // a2: length
- // a3: from index (untagged smi)
- // t2 (a.k.a. to): to (smi)
- // t3 (a.k.a. from): from offset (smi)
- Label allocate_slice, sliced_string, seq_or_external_string;
- // If the string is not indirect, it can only be sequential or external.
- STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
- STATIC_ASSERT(kIsIndirectStringMask != 0);
- __ And(t4, a1, Operand(kIsIndirectStringMask));
- // External string. Jump to runtime.
- __ Branch(&seq_or_external_string, eq, t4, Operand(zero_reg));
-
- __ And(t4, a1, Operand(kSlicedNotConsMask));
- __ Branch(&sliced_string, ne, t4, Operand(zero_reg));
- // Cons string. Check whether it is flat, then fetch first part.
- __ lw(t1, FieldMemOperand(v0, ConsString::kSecondOffset));
- __ LoadRoot(t5, Heap::kEmptyStringRootIndex);
- __ Branch(&sub_string_runtime, ne, t1, Operand(t5));
- __ lw(t1, FieldMemOperand(v0, ConsString::kFirstOffset));
- __ jmp(&allocate_slice);
-
- __ bind(&sliced_string);
- // Sliced string. Fetch parent and correct start index by offset.
- __ lw(t1, FieldMemOperand(v0, SlicedString::kOffsetOffset));
- __ addu(t3, t3, t1);
- __ lw(t1, FieldMemOperand(v0, SlicedString::kParentOffset));
- __ jmp(&allocate_slice);
-
- __ bind(&seq_or_external_string);
- // Sequential or external string. Just move string to the correct register.
- __ mov(t1, v0);
-
- __ bind(&allocate_slice);
- // a1: instance type of original string
- // a2: length
- // t1: underlying subject string
- // t3 (a.k.a. from): from offset (smi)
- // Allocate new sliced string. At this point we do not reload the instance
- // type including the string encoding because we simply rely on the info
- // provided by the original string. It does not matter if the original
- // string's encoding is wrong because we always have to recheck encoding of
- // the newly created string's parent anyways due to externalized strings.
- Label two_byte_slice, set_slice_header;
- STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
- STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
- __ And(t4, a1, Operand(kStringEncodingMask));
- __ Branch(&two_byte_slice, eq, t4, Operand(zero_reg));
- __ AllocateAsciiSlicedString(v0, a2, a3, t0, &sub_string_runtime);
- __ jmp(&set_slice_header);
- __ bind(&two_byte_slice);
- __ AllocateTwoByteSlicedString(v0, a2, a3, t0, &sub_string_runtime);
- __ bind(&set_slice_header);
- __ sw(t3, FieldMemOperand(v0, SlicedString::kOffsetOffset));
- __ sw(t1, FieldMemOperand(v0, SlicedString::kParentOffset));
- }
__ bind(&return_v0);
+ Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->sub_string_native(), 1, a3, t0);
- __ Addu(sp, sp, Operand(3 * kPointerSize));
- __ Ret();
+ __ DropAndRet(3);
// Just jump to runtime to create the sub string.
- __ bind(&sub_string_runtime);
+ __ bind(&runtime);
__ TailCallRuntime(Runtime::kSubString, 3, 1);
}
@@ -6313,7 +6366,7 @@
void StringAddStub::Generate(MacroAssembler* masm) {
- Label string_add_runtime, call_builtin;
+ Label call_runtime, call_builtin;
Builtins::JavaScript builtin_id = Builtins::ADD;
Counters* counters = masm->isolate()->counters();
@@ -6328,7 +6381,7 @@
// Make sure that both arguments are strings if not known in advance.
if (flags_ == NO_STRING_ADD_FLAGS) {
- __ JumpIfEitherSmi(a0, a1, &string_add_runtime);
+ __ JumpIfEitherSmi(a0, a1, &call_runtime);
// Load instance types.
__ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset));
__ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
@@ -6338,7 +6391,7 @@
// If either is not a string, go to runtime.
__ Or(t4, t0, Operand(t1));
__ And(t4, t4, Operand(kIsNotStringMask));
- __ Branch(&string_add_runtime, ne, t4, Operand(zero_reg));
+ __ Branch(&call_runtime, ne, t4, Operand(zero_reg));
} else {
// Here at least one of the arguments is definitely a string.
// We convert the one that is not known to be a string.
@@ -6377,8 +6430,7 @@
__ Branch(&strings_not_empty, ne, t4, Operand(zero_reg));
__ IncrementCounter(counters->string_add_native(), 1, a2, a3);
- __ Addu(sp, sp, Operand(2 * kPointerSize));
- __ Ret();
+ __ DropAndRet(2);
__ bind(&strings_not_empty);
}
@@ -6411,7 +6463,7 @@
__ lbu(t1, FieldMemOperand(t1, Map::kInstanceTypeOffset));
}
__ JumpIfBothInstanceTypesAreNotSequentialAscii(t0, t1, t2, t3,
- &string_add_runtime);
+ &call_runtime);
// Get the two characters forming the sub string.
__ lbu(a2, FieldMemOperand(a0, SeqAsciiString::kHeaderSize));
@@ -6421,10 +6473,9 @@
// just allocate a new one.
Label make_two_character_string;
StringHelper::GenerateTwoCharacterSymbolTableProbe(
- masm, a2, a3, t2, t3, t0, t1, t4, &make_two_character_string);
+ masm, a2, a3, t2, t3, t0, t1, t5, &make_two_character_string);
__ IncrementCounter(counters->string_add_native(), 1, a2, a3);
- __ Addu(sp, sp, Operand(2 * kPointerSize));
- __ Ret();
+ __ DropAndRet(2);
__ bind(&make_two_character_string);
// Resulting string has length 2 and first chars of two strings
@@ -6433,21 +6484,20 @@
// halfword store instruction (which assumes that processor is
// in a little endian mode).
__ li(t2, Operand(2));
- __ AllocateAsciiString(v0, t2, t0, t1, t4, &string_add_runtime);
+ __ AllocateAsciiString(v0, t2, t0, t1, t5, &call_runtime);
__ sh(a2, FieldMemOperand(v0, SeqAsciiString::kHeaderSize));
__ IncrementCounter(counters->string_add_native(), 1, a2, a3);
- __ Addu(sp, sp, Operand(2 * kPointerSize));
- __ Ret();
+ __ DropAndRet(2);
__ bind(&longer_than_two);
// Check if resulting string will be flat.
__ Branch(&string_add_flat_result, lt, t2,
- Operand(String::kMinNonFlatLength));
+ Operand(ConsString::kMinLength));
// Handle exceptionally long strings in the runtime system.
STATIC_ASSERT((String::kMaxLength & 0x80000000) == 0);
ASSERT(IsPowerOf2(String::kMaxLength + 1));
// kMaxLength + 1 is representable as shifted literal, kMaxLength is not.
- __ Branch(&string_add_runtime, hs, t2, Operand(String::kMaxLength + 1));
+ __ Branch(&call_runtime, hs, t2, Operand(String::kMaxLength + 1));
// If result is not supposed to be flat, allocate a cons string object.
// If both strings are ASCII the result is an ASCII cons string.
@@ -6459,22 +6509,20 @@
}
Label non_ascii, allocated, ascii_data;
STATIC_ASSERT(kTwoByteStringTag == 0);
- // Branch to non_ascii if either string-encoding field is zero (non-ascii).
+ // Branch to non_ascii if either string-encoding field is zero (non-ASCII).
__ And(t4, t0, Operand(t1));
__ And(t4, t4, Operand(kStringEncodingMask));
__ Branch(&non_ascii, eq, t4, Operand(zero_reg));
// Allocate an ASCII cons string.
__ bind(&ascii_data);
- __ AllocateAsciiConsString(t3, t2, t0, t1, &string_add_runtime);
+ __ AllocateAsciiConsString(v0, t2, t0, t1, &call_runtime);
__ bind(&allocated);
// Fill the fields of the cons string.
- __ sw(a0, FieldMemOperand(t3, ConsString::kFirstOffset));
- __ sw(a1, FieldMemOperand(t3, ConsString::kSecondOffset));
- __ mov(v0, t3);
+ __ sw(a0, FieldMemOperand(v0, ConsString::kFirstOffset));
+ __ sw(a1, FieldMemOperand(v0, ConsString::kSecondOffset));
__ IncrementCounter(counters->string_add_native(), 1, a2, a3);
- __ Addu(sp, sp, Operand(2 * kPointerSize));
- __ Ret();
+ __ DropAndRet(2);
__ bind(&non_ascii);
// At least one of the strings is two-byte. Check whether it happens
@@ -6492,11 +6540,13 @@
__ Branch(&ascii_data, eq, t0, Operand(kAsciiStringTag | kAsciiDataHintTag));
// Allocate a two byte cons string.
- __ AllocateTwoByteConsString(t3, t2, t0, t1, &string_add_runtime);
+ __ AllocateTwoByteConsString(v0, t2, t0, t1, &call_runtime);
__ Branch(&allocated);
- // Handle creating a flat result. First check that both strings are
- // sequential and that they have the same encoding.
+ // We cannot encounter sliced strings or cons strings here since:
+ STATIC_ASSERT(SlicedString::kMinLength >= ConsString::kMinLength);
+ // Handle creating a flat result from either external or sequential strings.
+ // Locate the first characters' locations.
// a0: first string
// a1: second string
// a2: length of first string
@@ -6504,6 +6554,7 @@
// t0: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
// t1: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
// t2: sum of lengths.
+ Label first_prepared, second_prepared;
__ bind(&string_add_flat_result);
if (flags_ != NO_STRING_ADD_FLAGS) {
__ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset));
@@ -6511,101 +6562,86 @@
__ lbu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
__ lbu(t1, FieldMemOperand(t1, Map::kInstanceTypeOffset));
}
- // Check that both strings are sequential, meaning that we
- // branch to runtime if either string tag is non-zero.
- STATIC_ASSERT(kSeqStringTag == 0);
- __ Or(t4, t0, Operand(t1));
- __ And(t4, t4, Operand(kStringRepresentationMask));
- __ Branch(&string_add_runtime, ne, t4, Operand(zero_reg));
+ // Check whether both strings have same encoding
+ __ Xor(t3, t0, Operand(t1));
+ __ And(t3, t3, Operand(kStringEncodingMask));
+ __ Branch(&call_runtime, ne, t3, Operand(zero_reg));
- // Now check if both strings have the same encoding (ASCII/Two-byte).
- // a0: first string
- // a1: second string
+ STATIC_ASSERT(kSeqStringTag == 0);
+ __ And(t4, t0, Operand(kStringRepresentationMask));
+
+ STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
+ Label skip_first_add;
+ __ Branch(&skip_first_add, ne, t4, Operand(zero_reg));
+ __ Branch(USE_DELAY_SLOT, &first_prepared);
+ __ addiu(t3, a0, SeqAsciiString::kHeaderSize - kHeapObjectTag);
+ __ bind(&skip_first_add);
+ // External string: rule out short external string and load string resource.
+ STATIC_ASSERT(kShortExternalStringTag != 0);
+ __ And(t4, t0, Operand(kShortExternalStringMask));
+ __ Branch(&call_runtime, ne, t4, Operand(zero_reg));
+ __ lw(t3, FieldMemOperand(a0, ExternalString::kResourceDataOffset));
+ __ bind(&first_prepared);
+
+ STATIC_ASSERT(kSeqStringTag == 0);
+ __ And(t4, t1, Operand(kStringRepresentationMask));
+ STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
+ Label skip_second_add;
+ __ Branch(&skip_second_add, ne, t4, Operand(zero_reg));
+ __ Branch(USE_DELAY_SLOT, &second_prepared);
+ __ addiu(a1, a1, SeqAsciiString::kHeaderSize - kHeapObjectTag);
+ __ bind(&skip_second_add);
+ // External string: rule out short external string and load string resource.
+ STATIC_ASSERT(kShortExternalStringTag != 0);
+ __ And(t4, t1, Operand(kShortExternalStringMask));
+ __ Branch(&call_runtime, ne, t4, Operand(zero_reg));
+ __ lw(a1, FieldMemOperand(a1, ExternalString::kResourceDataOffset));
+ __ bind(&second_prepared);
+
+ Label non_ascii_string_add_flat_result;
+ // t3: first character of first string
+ // a1: first character of second string
// a2: length of first string
// a3: length of second string
- // t0: first string instance type
- // t1: second string instance type
// t2: sum of lengths.
- Label non_ascii_string_add_flat_result;
- ASSERT(IsPowerOf2(kStringEncodingMask)); // Just one bit to test.
- __ xor_(t3, t1, t0);
- __ And(t3, t3, Operand(kStringEncodingMask));
- __ Branch(&string_add_runtime, ne, t3, Operand(zero_reg));
- // And see if it's ASCII (0) or two-byte (1).
- __ And(t3, t0, Operand(kStringEncodingMask));
- __ Branch(&non_ascii_string_add_flat_result, eq, t3, Operand(zero_reg));
+ // Both strings have the same encoding.
+ STATIC_ASSERT(kTwoByteStringTag == 0);
+ __ And(t4, t1, Operand(kStringEncodingMask));
+ __ Branch(&non_ascii_string_add_flat_result, eq, t4, Operand(zero_reg));
- // Both strings are sequential ASCII strings. We also know that they are
- // short (since the sum of the lengths is less than kMinNonFlatLength).
- // t2: length of resulting flat string
- __ AllocateAsciiString(t3, t2, t0, t1, t4, &string_add_runtime);
- // Locate first character of result.
- __ Addu(t2, t3, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
- // Locate first character of first argument.
- __ Addu(a0, a0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
- // a0: first character of first string.
- // a1: second string.
+ __ AllocateAsciiString(v0, t2, t0, t1, t5, &call_runtime);
+ __ Addu(t2, v0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ // v0: result string.
+ // t3: first character of first string.
+ // a1: first character of second string
// a2: length of first string.
// a3: length of second string.
// t2: first character of result.
- // t3: result string.
- StringHelper::GenerateCopyCharacters(masm, t2, a0, a2, t0, true);
- // Load second argument and locate first character.
- __ Addu(a1, a1, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
- // a1: first character of second string.
- // a3: length of second string.
+ StringHelper::GenerateCopyCharacters(masm, t2, t3, a2, t0, true);
// t2: next character of result.
- // t3: result string.
StringHelper::GenerateCopyCharacters(masm, t2, a1, a3, t0, true);
- __ mov(v0, t3);
__ IncrementCounter(counters->string_add_native(), 1, a2, a3);
- __ Addu(sp, sp, Operand(2 * kPointerSize));
- __ Ret();
+ __ DropAndRet(2);
__ bind(&non_ascii_string_add_flat_result);
- // Both strings are sequential two byte strings.
- // a0: first string.
- // a1: second string.
- // a2: length of first string.
- // a3: length of second string.
- // t2: sum of length of strings.
- __ AllocateTwoByteString(t3, t2, t0, t1, t4, &string_add_runtime);
- // a0: first string.
- // a1: second string.
- // a2: length of first string.
- // a3: length of second string.
- // t3: result string.
-
- // Locate first character of result.
- __ Addu(t2, t3, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
- // Locate first character of first argument.
- __ Addu(a0, a0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
-
- // a0: first character of first string.
- // a1: second string.
+ __ AllocateTwoByteString(v0, t2, t0, t1, t5, &call_runtime);
+ __ Addu(t2, v0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+ // v0: result string.
+ // t3: first character of first string.
+ // a1: first character of second string.
// a2: length of first string.
// a3: length of second string.
// t2: first character of result.
- // t3: result string.
- StringHelper::GenerateCopyCharacters(masm, t2, a0, a2, t0, false);
-
- // Locate first character of second argument.
- __ Addu(a1, a1, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
-
- // a1: first character of second string.
- // a3: length of second string.
- // t2: next character of result (after copy of first string).
- // t3: result string.
+ StringHelper::GenerateCopyCharacters(masm, t2, t3, a2, t0, false);
+ // t2: next character of result.
StringHelper::GenerateCopyCharacters(masm, t2, a1, a3, t0, false);
- __ mov(v0, t3);
__ IncrementCounter(counters->string_add_native(), 1, a2, a3);
- __ Addu(sp, sp, Operand(2 * kPointerSize));
- __ Ret();
+ __ DropAndRet(2);
// Just jump to runtime to add the two strings.
- __ bind(&string_add_runtime);
+ __ bind(&call_runtime);
__ TailCallRuntime(Runtime::kStringAdd, 2, 1);
if (call_builtin.is_linked()) {
@@ -6870,26 +6906,39 @@
}
-void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
- __ Push(a1, a0);
- __ push(ra);
+void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
+ Label miss;
+ __ And(a2, a1, a0);
+ __ JumpIfSmi(a2, &miss);
+ __ lw(a2, FieldMemOperand(a0, HeapObject::kMapOffset));
+ __ lw(a3, FieldMemOperand(a1, HeapObject::kMapOffset));
+ __ Branch(&miss, ne, a2, Operand(known_map_));
+ __ Branch(&miss, ne, a3, Operand(known_map_));
- // Call the runtime system in a fresh internal frame.
- ExternalReference miss = ExternalReference(IC_Utility(IC::kCompareIC_Miss),
- masm->isolate());
+ __ Ret(USE_DELAY_SLOT);
+ __ subu(v0, a0, a1);
+
+ __ bind(&miss);
+ GenerateMiss(masm);
+}
+
+void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
{
+ // Call the runtime system in a fresh internal frame.
+ ExternalReference miss =
+ ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate());
FrameScope scope(masm, StackFrame::INTERNAL);
__ Push(a1, a0);
+ __ push(ra);
+ __ Push(a1, a0);
__ li(t0, Operand(Smi::FromInt(op_)));
__ push(t0);
__ CallExternalReference(miss, 3);
+ // Compute the entry point of the rewritten stub.
+ __ Addu(a2, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
+ // Restore registers.
+ __ Pop(a1, a0, ra);
}
- // Compute the entry point of the rewritten stub.
- __ Addu(a2, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
- // Restore registers.
- __ pop(ra);
- __ pop(a0);
- __ pop(a1);
__ Jump(a2);
}
diff --git a/src/mips/constants-mips.cc b/src/mips/constants-mips.cc
index d0a7af5..7d654f6 100644
--- a/src/mips/constants-mips.cc
+++ b/src/mips/constants-mips.cc
@@ -302,7 +302,7 @@
return kRegisterType;
};
break;
- // 16 bits Immediate type instructions. eg: addi dest, src, imm16.
+ // 16 bits Immediate type instructions. e.g.: addi dest, src, imm16.
case REGIMM:
case BEQ:
case BNE:
@@ -337,7 +337,7 @@
case SWC1:
case SDC1:
return kImmediateType;
- // 26 bits immediate type instructions. eg: j imm26.
+ // 26 bits immediate type instructions. e.g.: j imm26.
case J:
case JAL:
return kJumpType;
diff --git a/src/mips/constants-mips.h b/src/mips/constants-mips.h
index 4f486c1..d62a890 100644
--- a/src/mips/constants-mips.h
+++ b/src/mips/constants-mips.h
@@ -125,7 +125,7 @@
struct RegisterAlias {
int reg;
- const char *name;
+ const char* name;
};
static const int32_t kMaxValue = 0x7fffffff;
@@ -147,7 +147,7 @@
struct RegisterAlias {
int creg;
- const char *name;
+ const char* name;
};
private:
@@ -747,7 +747,7 @@
// Say if the instruction should not be used in a branch delay slot.
bool IsForbiddenInBranchDelay() const;
- // Say if the instruction 'links'. eg: jal, bal.
+ // Say if the instruction 'links'. e.g. jal, bal.
bool IsLinkingInstruction() const;
// Say if the instruction is a break or a trap.
bool IsTrap() const;
diff --git a/src/mips/cpu-mips.cc b/src/mips/cpu-mips.cc
index 5c3912e..93ebeda 100644
--- a/src/mips/cpu-mips.cc
+++ b/src/mips/cpu-mips.cc
@@ -47,7 +47,7 @@
namespace internal {
-void CPU::Setup() {
+void CPU::SetUp() {
CpuFeatures::Probe();
}
diff --git a/src/mips/deoptimizer-mips.cc b/src/mips/deoptimizer-mips.cc
index a27c61c..26a4063 100644
--- a/src/mips/deoptimizer-mips.cc
+++ b/src/mips/deoptimizer-mips.cc
@@ -218,12 +218,13 @@
ASSERT(Translation::BEGIN == opcode);
USE(opcode);
int count = iterator.Next();
+ iterator.Skip(1); // Drop JS frame count.
ASSERT(count == 1);
USE(count);
opcode = static_cast<Translation::Opcode>(iterator.Next());
USE(opcode);
- ASSERT(Translation::FRAME == opcode);
+ ASSERT(Translation::JS_FRAME == opcode);
unsigned node_id = iterator.Next();
USE(node_id);
ASSERT(node_id == ast_id);
@@ -259,9 +260,7 @@
output_ = new FrameDescription*[1];
output_[0] = new(output_frame_size) FrameDescription(
output_frame_size, function_);
-#ifdef DEBUG
- output_[0]->SetKind(Code::OPTIMIZED_FUNCTION);
-#endif
+ output_[0]->SetFrameType(StackFrame::JAVA_SCRIPT);
// Clear the incoming parameters in the optimized frame to avoid
// confusing the garbage collector.
@@ -326,7 +325,7 @@
output_[0] = input_;
output_[0]->SetPc(reinterpret_cast<uint32_t>(from_));
} else {
- // Setup the frame pointer and the context pointer.
+ // Set up the frame pointer and the context pointer.
output_[0]->SetRegister(fp.code(), input_->GetRegister(fp.code()));
output_[0]->SetRegister(cp.code(), input_->GetRegister(cp.code()));
@@ -349,15 +348,115 @@
}
+void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
+ int frame_index) {
+ JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
+ unsigned height = iterator->Next();
+ unsigned height_in_bytes = height * kPointerSize;
+ if (FLAG_trace_deopt) {
+ PrintF(" translating arguments adaptor => height=%d\n", height_in_bytes);
+ }
+
+ unsigned fixed_frame_size = ArgumentsAdaptorFrameConstants::kFrameSize;
+ unsigned input_frame_size = input_->GetFrameSize();
+ unsigned output_frame_size = height_in_bytes + fixed_frame_size;
+
+ // Allocate and store the output frame description.
+ FrameDescription* output_frame =
+ new(output_frame_size) FrameDescription(output_frame_size, function);
+ output_frame->SetFrameType(StackFrame::ARGUMENTS_ADAPTOR);
+
+ // Arguments adaptor can not be topmost or bottommost.
+ ASSERT(frame_index > 0 && frame_index < output_count_ - 1);
+ ASSERT(output_[frame_index] == NULL);
+ output_[frame_index] = output_frame;
+
+ // The top address of the frame is computed from the previous
+ // frame's top and this frame's size.
+ uint32_t top_address;
+ top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
+ output_frame->SetTop(top_address);
+
+ // Compute the incoming parameter translation.
+ int parameter_count = height;
+ unsigned output_offset = output_frame_size;
+ unsigned input_offset = input_frame_size;
+ for (int i = 0; i < parameter_count; ++i) {
+ output_offset -= kPointerSize;
+ DoTranslateCommand(iterator, frame_index, output_offset);
+ }
+ input_offset -= (parameter_count * kPointerSize);
+
+ // Read caller's PC from the previous frame.
+ output_offset -= kPointerSize;
+ input_offset -= kPointerSize;
+ intptr_t callers_pc = output_[frame_index - 1]->GetPc();
+ output_frame->SetFrameSlot(output_offset, callers_pc);
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's pc\n",
+ top_address + output_offset, output_offset, callers_pc);
+ }
+
+ // Read caller's FP from the previous frame, and set this frame's FP.
+ output_offset -= kPointerSize;
+ input_offset -= kPointerSize;
+ intptr_t value = output_[frame_index - 1]->GetFp();
+ output_frame->SetFrameSlot(output_offset, value);
+ intptr_t fp_value = top_address + output_offset;
+ output_frame->SetFp(fp_value);
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's fp\n",
+ fp_value, output_offset, value);
+ }
+
+ // A marker value is used in place of the context.
+ output_offset -= kPointerSize;
+ input_offset -= kPointerSize;
+ intptr_t context = reinterpret_cast<intptr_t>(
+ Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ output_frame->SetFrameSlot(output_offset, context);
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08x: [top + %d] <- 0x%08x ; context (adaptor sentinel)\n",
+ top_address + output_offset, output_offset, context);
+ }
+
+ // The function was mentioned explicitly in the ARGUMENTS_ADAPTOR_FRAME.
+ output_offset -= kPointerSize;
+ input_offset -= kPointerSize;
+ value = reinterpret_cast<intptr_t>(function);
+ output_frame->SetFrameSlot(output_offset, value);
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08x: [top + %d] <- 0x%08x ; function\n",
+ top_address + output_offset, output_offset, value);
+ }
+
+ // Number of incoming arguments.
+ output_offset -= kPointerSize;
+ input_offset -= kPointerSize;
+ value = reinterpret_cast<uint32_t>(Smi::FromInt(height - 1));
+ output_frame->SetFrameSlot(output_offset, value);
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08x: [top + %d] <- 0x%08x ; argc (%d)\n",
+ top_address + output_offset, output_offset, value, height - 1);
+ }
+
+ ASSERT(0 == output_offset);
+
+ Builtins* builtins = isolate_->builtins();
+ Code* adaptor_trampoline =
+ builtins->builtin(Builtins::kArgumentsAdaptorTrampoline);
+ uint32_t pc = reinterpret_cast<uint32_t>(
+ adaptor_trampoline->instruction_start() +
+ isolate_->heap()->arguments_adaptor_deopt_pc_offset()->value());
+ output_frame->SetPc(pc);
+}
+
+
// This code is very similar to ia32/arm code, but relies on register names
// (fp, sp) and how the frame is laid out.
-void Deoptimizer::DoComputeFrame(TranslationIterator* iterator,
- int frame_index) {
+void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
+ int frame_index) {
// Read the ast node id, function, and frame height for this output frame.
- Translation::Opcode opcode =
- static_cast<Translation::Opcode>(iterator->Next());
- USE(opcode);
- ASSERT(Translation::FRAME == opcode);
int node_id = iterator->Next();
JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
unsigned height = iterator->Next();
@@ -377,9 +476,7 @@
// Allocate and store the output frame description.
FrameDescription* output_frame =
new(output_frame_size) FrameDescription(output_frame_size, function);
-#ifdef DEBUG
- output_frame->SetKind(Code::FUNCTION);
-#endif
+ output_frame->SetFrameType(StackFrame::JAVA_SCRIPT);
bool is_bottommost = (0 == frame_index);
bool is_topmost = (output_count_ - 1 == frame_index);
@@ -733,10 +830,7 @@
}
}
- // Set up the roots register.
- ExternalReference roots_array_start =
- ExternalReference::roots_array_start(isolate);
- __ li(roots, Operand(roots_array_start));
+ __ InitializeRootRegister();
__ pop(at); // Get continuation, leave pc on stack.
__ pop(ra);
diff --git a/src/mips/frames-mips.h b/src/mips/frames-mips.h
index 9e626f3..2ed358a 100644
--- a/src/mips/frames-mips.h
+++ b/src/mips/frames-mips.h
@@ -195,6 +195,9 @@
class StandardFrameConstants : public AllStatic {
public:
+ // Fixed part of the frame consists of return address, caller fp,
+ // context and function.
+ static const int kFixedFrameSize = 4 * kPointerSize;
static const int kExpressionsOffset = -3 * kPointerSize;
static const int kMarkerOffset = -2 * kPointerSize;
static const int kContextOffset = -1 * kPointerSize;
@@ -230,6 +233,8 @@
class ArgumentsAdaptorFrameConstants : public AllStatic {
public:
static const int kLengthOffset = StandardFrameConstants::kExpressionsOffset;
+ static const int kFrameSize =
+ StandardFrameConstants::kFixedFrameSize + kPointerSize;
};
diff --git a/src/mips/full-codegen-mips.cc b/src/mips/full-codegen-mips.cc
index f5b851d..da3be4c 100644
--- a/src/mips/full-codegen-mips.cc
+++ b/src/mips/full-codegen-mips.cc
@@ -125,7 +125,7 @@
// function.
//
// The live registers are:
-// o a1: the JS function object being called (ie, ourselves)
+// o a1: the JS function object being called (i.e. ourselves)
// o cp: our context
// o fp: our caller's frame pointer
// o sp: stack pointer
@@ -1017,7 +1017,7 @@
__ lw(a1, FieldMemOperand(a1, DescriptorArray::kEnumerationIndexOffset));
__ lw(a2, FieldMemOperand(a1, DescriptorArray::kEnumCacheBridgeCacheOffset));
- // Setup the four remaining stack slots.
+ // Set up the four remaining stack slots.
__ push(v0); // Map.
__ lw(a1, FieldMemOperand(a2, FixedArray::kLengthOffset));
__ li(a0, Operand(Smi::FromInt(0)));
@@ -2958,8 +2958,12 @@
ASSERT(args->length() == 2);
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
- MathPowStub stub;
- __ CallStub(&stub);
+ if (CpuFeatures::IsSupported(FPU)) {
+ MathPowStub stub(MathPowStub::ON_STACK);
+ __ CallStub(&stub);
+ } else {
+ __ CallRuntime(Runtime::kMath_pow, 2);
+ }
context()->Plug(v0);
}
@@ -3651,7 +3655,7 @@
// One-character separator case.
__ bind(&one_char_separator);
- // Replace separator with its ascii character value.
+ // Replace separator with its ASCII character value.
__ lbu(separator, FieldMemOperand(separator, SeqAsciiString::kHeaderSize));
// Jump into the loop after the code that copies the separator, so the first
// element is not preceded by a separator.
@@ -3662,7 +3666,7 @@
// result_pos: the position to which we are currently copying characters.
// element: Current array element.
// elements_end: Array end.
- // separator: Single separator ascii char (in lower byte).
+ // separator: Single separator ASCII char (in lower byte).
// Copy the separator character to the result.
__ sb(separator, MemOperand(result_pos));
diff --git a/src/mips/ic-mips.cc b/src/mips/ic-mips.cc
index b057695..3489936 100644
--- a/src/mips/ic-mips.cc
+++ b/src/mips/ic-mips.cc
@@ -868,22 +868,26 @@
// -- lr : return address
// -----------------------------------
Label slow, notin;
+ // Store address is returned in register (of MemOperand) mapped_location.
MemOperand mapped_location =
GenerateMappedArgumentsLookup(masm, a2, a1, a3, t0, t1, ¬in, &slow);
__ sw(a0, mapped_location);
- __ Addu(t2, a3, t1);
__ mov(t5, a0);
- __ RecordWrite(a3, t2, t5, kRAHasNotBeenSaved, kDontSaveFPRegs);
+ ASSERT_EQ(mapped_location.offset(), 0);
+ __ RecordWrite(a3, mapped_location.rm(), t5,
+ kRAHasNotBeenSaved, kDontSaveFPRegs);
__ Ret(USE_DELAY_SLOT);
__ mov(v0, a0); // (In delay slot) return the value stored in v0.
__ bind(¬in);
// The unmapped lookup expects that the parameter map is in a3.
+ // Store address is returned in register (of MemOperand) unmapped_location.
MemOperand unmapped_location =
GenerateUnmappedArgumentsLookup(masm, a1, a3, t0, &slow);
__ sw(a0, unmapped_location);
- __ Addu(t2, a3, t0);
__ mov(t5, a0);
- __ RecordWrite(a3, t2, t5, kRAHasNotBeenSaved, kDontSaveFPRegs);
+ ASSERT_EQ(unmapped_location.offset(), 0);
+ __ RecordWrite(a3, unmapped_location.rm(), t5,
+ kRAHasNotBeenSaved, kDontSaveFPRegs);
__ Ret(USE_DELAY_SLOT);
__ mov(v0, a0); // (In delay slot) return the value stored in v0.
__ bind(&slow);
@@ -1029,19 +1033,32 @@
__ lw(t0, FieldMemOperand(a0, String::kHashFieldOffset));
__ sra(at, t0, String::kHashShift);
__ xor_(a3, a3, at);
- __ And(a3, a3, Operand(KeyedLookupCache::kCapacityMask));
+ int mask = KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask;
+ __ And(a3, a3, Operand(mask));
// Load the key (consisting of map and symbol) from the cache and
// check for match.
+ Label load_in_object_property;
+ static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket;
+ Label hit_on_nth_entry[kEntriesPerBucket];
ExternalReference cache_keys =
ExternalReference::keyed_lookup_cache_keys(isolate);
__ li(t0, Operand(cache_keys));
__ sll(at, a3, kPointerSizeLog2 + 1);
__ addu(t0, t0, at);
- __ lw(t1, MemOperand(t0)); // Move t0 to symbol.
- __ Addu(t0, t0, Operand(kPointerSize));
+
+ for (int i = 0; i < kEntriesPerBucket - 1; i++) {
+ Label try_next_entry;
+ __ lw(t1, MemOperand(t0, kPointerSize * i * 2));
+ __ Branch(&try_next_entry, ne, a2, Operand(t1));
+ __ lw(t1, MemOperand(t0, kPointerSize * (i * 2 + 1)));
+ __ Branch(&hit_on_nth_entry[i], eq, a0, Operand(t1));
+ __ bind(&try_next_entry);
+ }
+
+ __ lw(t1, MemOperand(t0, kPointerSize * (kEntriesPerBucket - 1) * 2));
__ Branch(&slow, ne, a2, Operand(t1));
- __ lw(t1, MemOperand(t0));
+ __ lw(t1, MemOperand(t0, kPointerSize * ((kEntriesPerBucket - 1) * 2 + 1)));
__ Branch(&slow, ne, a0, Operand(t1));
// Get field offset.
@@ -1051,15 +1068,24 @@
// a3 : lookup cache index
ExternalReference cache_field_offsets =
ExternalReference::keyed_lookup_cache_field_offsets(isolate);
- __ li(t0, Operand(cache_field_offsets));
- __ sll(at, a3, kPointerSizeLog2);
- __ addu(at, t0, at);
- __ lw(t1, MemOperand(at));
- __ lbu(t2, FieldMemOperand(a2, Map::kInObjectPropertiesOffset));
- __ Subu(t1, t1, t2);
- __ Branch(&property_array_property, ge, t1, Operand(zero_reg));
+
+ // Hit on nth entry.
+ for (int i = kEntriesPerBucket - 1; i >= 0; i--) {
+ __ bind(&hit_on_nth_entry[i]);
+ __ li(t0, Operand(cache_field_offsets));
+ __ sll(at, a3, kPointerSizeLog2);
+ __ addu(at, t0, at);
+ __ lw(t1, MemOperand(at, kPointerSize * i));
+ __ lbu(t2, FieldMemOperand(a2, Map::kInObjectPropertiesOffset));
+ __ Subu(t1, t1, t2);
+ __ Branch(&property_array_property, ge, t1, Operand(zero_reg));
+ if (i != 0) {
+ __ Branch(&load_in_object_property);
+ }
+ }
// Load in-object property.
+ __ bind(&load_in_object_property);
__ lbu(t2, FieldMemOperand(a2, Map::kInstanceSizeOffset));
__ addu(t2, t2, t1); // Index from start of object.
__ Subu(a1, a1, Operand(kHeapObjectTag)); // Remove the heap tag.
@@ -1470,11 +1496,10 @@
// -- ra : return address
// -----------------------------------
//
- // This accepts as a receiver anything JSObject::SetElementsLength accepts
- // (currently anything except for external and pixel arrays which means
- // anything with elements of FixedArray type.), but currently is restricted
- // to JSArray.
- // Value must be a number, but only smis are accepted as the most common case.
+ // This accepts as a receiver anything JSArray::SetElementsLength accepts
+ // (currently anything except for external arrays which means anything with
+ // elements of FixedArray type). Value must be a number, but only smis are
+ // accepted as the most common case.
Label miss;
@@ -1496,6 +1521,10 @@
__ GetObjectType(scratch, scratch, scratch);
__ Branch(&miss, ne, scratch, Operand(FIXED_ARRAY_TYPE));
+ // Check that the array has fast properties, otherwise the length
+ // property might have been redefined.
+ // TODO(mstarzinger): Port this check to MIPS.
+
// Check that value is a smi.
__ JumpIfNotSmi(value, &miss);
@@ -1587,6 +1616,9 @@
rewritten = stub.GetCode();
} else {
ICCompareStub stub(op_, state);
+ if (state == KNOWN_OBJECTS) {
+ stub.set_known_map(Handle<Map>(Handle<JSObject>::cast(x)->map()));
+ }
rewritten = stub.GetCode();
}
set_target(*rewritten);
diff --git a/src/mips/lithium-codegen-mips.cc b/src/mips/lithium-codegen-mips.cc
index 848cf3e..e886c4a 100644
--- a/src/mips/lithium-codegen-mips.cc
+++ b/src/mips/lithium-codegen-mips.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -291,7 +291,22 @@
if (op->IsRegister()) {
return ToRegister(op->index());
} else if (op->IsConstantOperand()) {
- __ li(scratch, ToOperand(op));
+ LConstantOperand* const_op = LConstantOperand::cast(op);
+ Handle<Object> literal = chunk_->LookupLiteral(const_op);
+ Representation r = chunk_->LookupLiteralRepresentation(const_op);
+ if (r.IsInteger32()) {
+ ASSERT(literal->IsNumber());
+ __ li(scratch, Operand(static_cast<int32_t>(literal->Number())));
+ } else if (r.IsDouble()) {
+ Abort("EmitLoadRegister: Unsupported double immediate.");
+ } else {
+ ASSERT(r.IsTagged());
+ if (literal->IsSmi()) {
+ __ li(scratch, Operand(literal));
+ } else {
+ __ LoadHeapObject(scratch, Handle<HeapObject>::cast(literal));
+ }
+ }
return scratch;
} else if (op->IsStackSlot() || op->IsArgument()) {
__ lw(scratch, ToMemOperand(op));
@@ -338,6 +353,18 @@
}
+Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
+ Handle<Object> literal = chunk_->LookupLiteral(op);
+ ASSERT(chunk_->LookupLiteralRepresentation(op).IsTagged());
+ return literal;
+}
+
+
+bool LCodeGen::IsInteger32(LConstantOperand* op) const {
+ return chunk_->LookupLiteralRepresentation(op).IsInteger32();
+}
+
+
int LCodeGen::ToInteger32(LConstantOperand* op) const {
Handle<Object> value = chunk_->LookupLiteral(op);
ASSERT(chunk_->LookupLiteralRepresentation(op).IsInteger32());
@@ -420,7 +447,11 @@
WriteTranslation(environment->outer(), translation);
int closure_id = DefineDeoptimizationLiteral(environment->closure());
- translation->BeginFrame(environment->ast_id(), closure_id, height);
+ if (environment->is_arguments_adaptor()) {
+ translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
+ } else {
+ translation->BeginJSFrame(environment->ast_id(), closure_id, height);
+ }
for (int i = 0; i < translation_size; ++i) {
LOperand* value = environment->values()->at(i);
// spilled_registers_ and spilled_double_registers_ are either
@@ -546,10 +577,14 @@
// |>------------ translation_size ------------<|
int frame_count = 0;
+ int jsframe_count = 0;
for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
++frame_count;
+ if (!e->is_arguments_adaptor()) {
+ ++jsframe_count;
+ }
}
- Translation translation(&translations_, frame_count);
+ Translation translation(&translations_, frame_count, jsframe_count);
WriteTranslation(environment, &translation);
int deoptimization_index = deoptimizations_.length();
int pc_offset = masm()->pc_offset();
@@ -1162,8 +1197,13 @@
void LCodeGen::DoConstantT(LConstantT* instr) {
- ASSERT(instr->result()->IsRegister());
- __ li(ToRegister(instr->result()), Operand(instr->value()));
+ Handle<Object> value = instr->value();
+ if (value->IsSmi()) {
+ __ li(ToRegister(instr->result()), Operand(value));
+ } else {
+ __ LoadHeapObject(ToRegister(instr->result()),
+ Handle<HeapObject>::cast(value));
+ }
}
@@ -1830,9 +1870,8 @@
}
-// Branches to a label or falls through with this instance class-name adr
-// returned in temp reg, available for comparison by the caller. Trashes the
-// temp registers, but not the input. Only input and temp2 may alias.
+// Branches to a label or falls through with the answer in flags. Trashes
+// the temp registers, but not the input.
void LCodeGen::EmitClassOfTest(Label* is_true,
Label* is_false,
Handle<String>class_name,
@@ -1840,7 +1879,9 @@
Register temp,
Register temp2) {
ASSERT(!input.is(temp));
- ASSERT(!temp.is(temp2)); // But input and temp2 may be the same register.
+ ASSERT(!input.is(temp2));
+ ASSERT(!temp.is(temp2));
+
__ JumpIfSmi(input, is_false);
if (class_name->IsEqualTo(CStrVector("Function"))) {
@@ -2042,7 +2083,7 @@
// offset to the location of the map check.
Register temp = ToRegister(instr->TempAt(0));
ASSERT(temp.is(t0));
- __ li(InstanceofStub::right(), Operand(instr->function()));
+ __ LoadHeapObject(InstanceofStub::right(), instr->function());
static const int kAdditionalDelta = 7;
int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta;
Label before_push_delta;
@@ -2124,26 +2165,26 @@
void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
- Register value = ToRegister(instr->InputAt(0));
- Register scratch = scratch0();
- Register scratch2 = ToRegister(instr->TempAt(0));
+ Register value = ToRegister(instr->value());
+ Register cell = scratch0();
// Load the cell.
- __ li(scratch, Operand(Handle<Object>(instr->hydrogen()->cell())));
+ __ li(cell, Operand(instr->hydrogen()->cell()));
// If the cell we are storing to contains the hole it could have
// been deleted from the property dictionary. In that case, we need
// to update the property details in the property dictionary to mark
// it as no longer deleted.
if (instr->hydrogen()->RequiresHoleCheck()) {
- __ lw(scratch2,
- FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
+ // We use a temp to check the payload.
+ Register payload = ToRegister(instr->TempAt(0));
+ __ lw(payload, FieldMemOperand(cell, JSGlobalPropertyCell::kValueOffset));
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
- DeoptimizeIf(eq, instr->environment(), scratch2, Operand(at));
+ DeoptimizeIf(eq, instr->environment(), payload, Operand(at));
}
// Store the value.
- __ sw(value, FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
+ __ sw(value, FieldMemOperand(cell, JSGlobalPropertyCell::kValueOffset));
// Cells are always rescanned, so no write barrier here.
}
@@ -2163,14 +2204,42 @@
void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
+
__ lw(result, ContextOperand(context, instr->slot_index()));
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+
+ if (instr->hydrogen()->DeoptimizesOnHole()) {
+ DeoptimizeIf(eq, instr->environment(), result, Operand(at));
+ } else {
+ Label is_not_hole;
+ __ Branch(&is_not_hole, ne, result, Operand(at));
+ __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
+ __ bind(&is_not_hole);
+ }
+ }
}
void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
Register context = ToRegister(instr->context());
Register value = ToRegister(instr->value());
+ Register scratch = scratch0();
MemOperand target = ContextOperand(context, instr->slot_index());
+
+ Label skip_assignment;
+
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ __ lw(scratch, target);
+ __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+
+ if (instr->hydrogen()->DeoptimizesOnHole()) {
+ DeoptimizeIf(eq, instr->environment(), scratch, Operand(at));
+ } else {
+ __ Branch(&skip_assignment, ne, scratch, Operand(at));
+ }
+ }
+
__ sw(value, target);
if (instr->hydrogen()->NeedsWriteBarrier()) {
HType type = instr->hydrogen()->value()->type();
@@ -2185,6 +2254,8 @@
EMIT_REMEMBERED_SET,
check_needed);
}
+
+ __ bind(&skip_assignment);
}
@@ -2206,7 +2277,7 @@
Handle<String> name) {
LookupResult lookup(isolate());
type->LookupInDescriptors(NULL, *name, &lookup);
- ASSERT(lookup.IsProperty() &&
+ ASSERT(lookup.IsFound() &&
(lookup.type() == FIELD || lookup.type() == CONSTANT_FUNCTION));
if (lookup.type() == FIELD) {
int index = lookup.GetLocalFieldIndexFromMap(*type);
@@ -2222,7 +2293,7 @@
}
} else {
Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*type));
- LoadHeapObject(result, Handle<HeapObject>::cast(function));
+ __ LoadHeapObject(result, function);
}
}
@@ -2656,7 +2727,7 @@
this, pointers, Safepoint::kLazyDeopt);
// The number of arguments is stored in receiver which is a0, as expected
// by InvokeFunction.
- v8::internal::ParameterCount actual(receiver);
+ ParameterCount actual(receiver);
__ InvokeFunction(function, actual, CALL_FUNCTION,
safepoint_generator, CALL_AS_METHOD);
__ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -2676,7 +2747,7 @@
void LCodeGen::DoThisFunction(LThisFunction* instr) {
Register result = ToRegister(instr->result());
- LoadHeapObject(result, instr->hydrogen()->closure());
+ __ LoadHeapObject(result, instr->hydrogen()->closure());
}
@@ -2712,31 +2783,41 @@
int arity,
LInstruction* instr,
CallKind call_kind) {
- // Change context if needed.
- bool change_context =
- (info()->closure()->context() != function->context()) ||
- scope()->contains_with() ||
- (scope()->num_heap_slots() > 0);
- if (change_context) {
- __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
- }
-
- // Set a0 to arguments count if adaption is not needed. Assumes that a0
- // is available to write to at this point.
- if (!function->NeedsArgumentsAdaption()) {
- __ li(a0, Operand(arity));
- }
+ bool can_invoke_directly = !function->NeedsArgumentsAdaption() ||
+ function->shared()->formal_parameter_count() == arity;
LPointerMap* pointers = instr->pointer_map();
RecordPosition(pointers->position());
- // Invoke function.
- __ SetCallKind(t1, call_kind);
- __ lw(at, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
- __ Call(at);
+ if (can_invoke_directly) {
+ __ LoadHeapObject(a1, function);
+ // Change context if needed.
+ bool change_context =
+ (info()->closure()->context() != function->context()) ||
+ scope()->contains_with() ||
+ (scope()->num_heap_slots() > 0);
+ if (change_context) {
+ __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+ }
- // Setup deoptimization.
- RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+ // Set r0 to arguments count if adaption is not needed. Assumes that r0
+ // is available to write to at this point.
+ if (!function->NeedsArgumentsAdaption()) {
+ __ li(a0, Operand(arity));
+ }
+
+ // Invoke function.
+ __ SetCallKind(t1, call_kind);
+ __ lw(at, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
+ __ Call(at);
+
+ // Set up deoptimization.
+ RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+ } else {
+ SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+ ParameterCount count(arity);
+ __ InvokeFunction(function, count, CALL_FUNCTION, generator, call_kind);
+ }
// Restore context.
__ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -2746,7 +2827,6 @@
void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
ASSERT(ToRegister(instr->result()).is(v0));
__ mov(a0, v0);
- __ li(a1, Operand(instr->function()));
CallKnownFunction(instr->function(), instr->arity(), instr, CALL_AS_METHOD);
}
@@ -2825,7 +2905,7 @@
__ mov(result, input);
ASSERT_EQ(2, masm()->InstructionsGeneratedSince(&done));
__ subu(result, zero_reg, input);
- // Overflow if result is still negative, ie 0x80000000.
+ // Overflow if result is still negative, i.e. 0x80000000.
DeoptimizeIf(lt, instr->environment(), result, Operand(zero_reg));
__ bind(&done);
}
@@ -2985,69 +3065,75 @@
void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
DoubleRegister result = ToDoubleRegister(instr->result());
- DoubleRegister double_scratch = double_scratch0();
+ DoubleRegister temp = ToDoubleRegister(instr->TempAt(0));
+
+ ASSERT(!input.is(result));
+
+ // Note that according to ECMA-262 15.8.2.13:
+ // Math.pow(-Infinity, 0.5) == Infinity
+ // Math.sqrt(-Infinity) == NaN
+ Label done;
+ __ Move(temp, -V8_INFINITY);
+ __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, temp, input);
+ // Set up Infinity in the delay slot.
+ // result is overwritten if the branch is not taken.
+ __ neg_d(result, temp);
// Add +0 to convert -0 to +0.
- __ mtc1(zero_reg, double_scratch.low());
- __ mtc1(zero_reg, double_scratch.high());
- __ add_d(result, input, double_scratch);
+ __ add_d(result, input, kDoubleRegZero);
__ sqrt_d(result, result);
+ __ bind(&done);
}
void LCodeGen::DoPower(LPower* instr) {
- LOperand* left = instr->InputAt(0);
- LOperand* right = instr->InputAt(1);
- Register scratch = scratch0();
- DoubleRegister result_reg = ToDoubleRegister(instr->result());
Representation exponent_type = instr->hydrogen()->right()->representation();
- if (exponent_type.IsDouble()) {
- // Prepare arguments and call C function.
- __ PrepareCallCFunction(0, 2, scratch);
- __ SetCallCDoubleArguments(ToDoubleRegister(left),
- ToDoubleRegister(right));
- __ CallCFunction(
- ExternalReference::power_double_double_function(isolate()), 0, 2);
+ // Having marked this as a call, we can use any registers.
+ // Just make sure that the input/output registers are the expected ones.
+ ASSERT(!instr->InputAt(1)->IsDoubleRegister() ||
+ ToDoubleRegister(instr->InputAt(1)).is(f4));
+ ASSERT(!instr->InputAt(1)->IsRegister() ||
+ ToRegister(instr->InputAt(1)).is(a2));
+ ASSERT(ToDoubleRegister(instr->InputAt(0)).is(f2));
+ ASSERT(ToDoubleRegister(instr->result()).is(f0));
+
+ if (exponent_type.IsTagged()) {
+ Label no_deopt;
+ __ JumpIfSmi(a2, &no_deopt);
+ __ lw(t3, FieldMemOperand(a2, HeapObject::kMapOffset));
+ DeoptimizeIf(ne, instr->environment(), t3, Operand(at));
+ __ bind(&no_deopt);
+ MathPowStub stub(MathPowStub::TAGGED);
+ __ CallStub(&stub);
} else if (exponent_type.IsInteger32()) {
- ASSERT(ToRegister(right).is(a0));
- // Prepare arguments and call C function.
- __ PrepareCallCFunction(1, 1, scratch);
- __ SetCallCDoubleArguments(ToDoubleRegister(left), ToRegister(right));
- __ CallCFunction(
- ExternalReference::power_double_int_function(isolate()), 1, 1);
+ MathPowStub stub(MathPowStub::INTEGER);
+ __ CallStub(&stub);
} else {
- ASSERT(exponent_type.IsTagged());
- ASSERT(instr->hydrogen()->left()->representation().IsDouble());
-
- Register right_reg = ToRegister(right);
-
- // Check for smi on the right hand side.
- Label non_smi, call;
- __ JumpIfNotSmi(right_reg, &non_smi);
-
- // Untag smi and convert it to a double.
- __ SmiUntag(right_reg);
- FPURegister single_scratch = double_scratch0();
- __ mtc1(right_reg, single_scratch);
- __ cvt_d_w(result_reg, single_scratch);
- __ Branch(&call);
-
- // Heap number map check.
- __ bind(&non_smi);
- __ lw(scratch, FieldMemOperand(right_reg, HeapObject::kMapOffset));
- __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
- DeoptimizeIf(ne, instr->environment(), scratch, Operand(at));
- __ ldc1(result_reg, FieldMemOperand(right_reg, HeapNumber::kValueOffset));
-
- // Prepare arguments and call C function.
- __ bind(&call);
- __ PrepareCallCFunction(0, 2, scratch);
- __ SetCallCDoubleArguments(ToDoubleRegister(left), result_reg);
- __ CallCFunction(
- ExternalReference::power_double_double_function(isolate()), 0, 2);
+ ASSERT(exponent_type.IsDouble());
+ MathPowStub stub(MathPowStub::DOUBLE);
+ __ CallStub(&stub);
}
- // Store the result in the result register.
- __ GetCFunctionDoubleResult(result_reg);
+}
+
+
+void LCodeGen::DoRandom(LRandom* instr) {
+ // Having marked this instruction as a call we can use any
+ // registers.
+ ASSERT(ToDoubleRegister(instr->result()).is(f0));
+ ASSERT(ToRegister(instr->InputAt(0)).is(a0));
+
+ __ PrepareCallCFunction(1, a1);
+ __ lw(a0, FieldMemOperand(a0, GlobalObject::kGlobalContextOffset));
+ __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
+
+ // 0x41300000 is the top half of 1.0 x 2^20 as a double.
+ __ li(a2, Operand(0x41300000));
+ // Move 0x41300000xxxxxxxx (x = random bits in v0) to FPU.
+ __ Move(f12, v0, a2);
+ // Move 0x4130000000000000 to FPU.
+ __ Move(f14, zero_reg, a2);
+ // Subtract to get the result.
+ __ sub_d(f0, f12, f14);
}
@@ -3183,7 +3269,6 @@
void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
ASSERT(ToRegister(instr->result()).is(v0));
- __ li(a1, Operand(instr->target()));
CallKnownFunction(instr->target(), instr->arity(), instr, CALL_AS_FUNCTION);
}
@@ -3279,13 +3364,6 @@
Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
Register scratch = scratch0();
- // This instruction cannot handle the FAST_SMI_ONLY_ELEMENTS -> FAST_ELEMENTS
- // conversion, so it deopts in that case.
- if (instr->hydrogen()->ValueNeedsSmiCheck()) {
- __ And(at, value, Operand(kSmiTagMask));
- DeoptimizeIf(ne, instr->environment(), at, Operand(zero_reg));
- }
-
// Do the store.
if (instr->key()->IsConstantOperand()) {
ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
@@ -3758,6 +3836,7 @@
void LCodeGen::EmitNumberUntagD(Register input_reg,
DoubleRegister result_reg,
bool deoptimize_on_undefined,
+ bool deoptimize_on_minus_zero,
LEnvironment* env) {
Register scratch = scratch0();
@@ -3787,6 +3866,12 @@
}
// Heap number to double register conversion.
__ ldc1(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
+ if (deoptimize_on_minus_zero) {
+ __ mfc1(at, result_reg.low());
+ __ Branch(&done, ne, at, Operand(zero_reg));
+ __ mfc1(scratch, result_reg.high());
+ DeoptimizeIf(eq, env, scratch, Operand(HeapNumber::kSignMask));
+ }
__ Branch(&done);
// Smi to double register conversion
@@ -3918,6 +4003,7 @@
EmitNumberUntagD(input_reg, result_reg,
instr->hydrogen()->deoptimize_on_undefined(),
+ instr->hydrogen()->deoptimize_on_minus_zero(),
instr->environment());
}
@@ -4011,10 +4097,32 @@
void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
- ASSERT(instr->InputAt(0)->IsRegister());
- Register reg = ToRegister(instr->InputAt(0));
- DeoptimizeIf(ne, instr->environment(), reg,
- Operand(instr->hydrogen()->target()));
+ Register reg = ToRegister(instr->value());
+ Handle<JSFunction> target = instr->hydrogen()->target();
+ if (isolate()->heap()->InNewSpace(*target)) {
+ Register reg = ToRegister(instr->value());
+ Handle<JSGlobalPropertyCell> cell =
+ isolate()->factory()->NewJSGlobalPropertyCell(target);
+ __ li(at, Operand(Handle<Object>(cell)));
+ __ lw(at, FieldMemOperand(at, JSGlobalPropertyCell::kValueOffset));
+ DeoptimizeIf(ne, instr->environment(), reg,
+ Operand(at));
+ } else {
+ DeoptimizeIf(ne, instr->environment(), reg,
+ Operand(target));
+ }
+}
+
+
+void LCodeGen::DoCheckMapCommon(Register reg,
+ Register scratch,
+ Handle<Map> map,
+ CompareMapMode mode,
+ LEnvironment* env) {
+ Label success;
+ __ CompareMapAndBranch(reg, scratch, map, &success, eq, &success, mode);
+ DeoptimizeIf(al, env);
+ __ bind(&success);
}
@@ -4023,11 +4131,9 @@
LOperand* input = instr->InputAt(0);
ASSERT(input->IsRegister());
Register reg = ToRegister(input);
- __ lw(scratch, FieldMemOperand(reg, HeapObject::kMapOffset));
- DeoptimizeIf(ne,
- instr->environment(),
- scratch,
- Operand(instr->hydrogen()->map()));
+ Handle<Map> map = instr->hydrogen()->map();
+ DoCheckMapCommon(reg, scratch, map, instr->hydrogen()->mode(),
+ instr->environment());
}
@@ -4083,19 +4189,6 @@
}
-void LCodeGen::LoadHeapObject(Register result,
- Handle<HeapObject> object) {
- if (heap()->InNewSpace(*object)) {
- Handle<JSGlobalPropertyCell> cell =
- factory()->NewJSGlobalPropertyCell(object);
- __ li(result, Operand(cell));
- __ lw(result, FieldMemOperand(result, JSGlobalPropertyCell::kValueOffset));
- } else {
- __ li(result, Operand(object));
- }
-}
-
-
void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
Register temp1 = ToRegister(instr->TempAt(0));
Register temp2 = ToRegister(instr->TempAt(1));
@@ -4104,40 +4197,53 @@
Handle<JSObject> current_prototype = instr->prototype();
// Load prototype object.
- LoadHeapObject(temp1, current_prototype);
+ __ LoadHeapObject(temp1, current_prototype);
// Check prototype maps up to the holder.
while (!current_prototype.is_identical_to(holder)) {
- __ lw(temp2, FieldMemOperand(temp1, HeapObject::kMapOffset));
- DeoptimizeIf(ne,
- instr->environment(),
- temp2,
- Operand(Handle<Map>(current_prototype->map())));
+ DoCheckMapCommon(temp1, temp2,
+ Handle<Map>(current_prototype->map()),
+ ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
current_prototype =
Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype()));
// Load next prototype object.
- LoadHeapObject(temp1, current_prototype);
+ __ LoadHeapObject(temp1, current_prototype);
}
// Check the holder map.
- __ lw(temp2, FieldMemOperand(temp1, HeapObject::kMapOffset));
- DeoptimizeIf(ne,
- instr->environment(),
- temp2,
- Operand(Handle<Map>(current_prototype->map())));
+ DoCheckMapCommon(temp1, temp2,
+ Handle<Map>(current_prototype->map()),
+ ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
}
void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
- Handle<FixedArray> constant_elements = instr->hydrogen()->constant_elements();
- ASSERT_EQ(2, constant_elements->length());
- ElementsKind constant_elements_kind =
- static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value());
+ Heap* heap = isolate()->heap();
+ ElementsKind boilerplate_elements_kind =
+ instr->hydrogen()->boilerplate_elements_kind();
+ // Deopt if the array literal boilerplate ElementsKind is of a type different
+ // than the expected one. The check isn't necessary if the boilerplate has
+ // already been converted to FAST_ELEMENTS.
+ if (boilerplate_elements_kind != FAST_ELEMENTS) {
+ __ LoadHeapObject(a1, instr->hydrogen()->boilerplate_object());
+ // Load map into a2.
+ __ lw(a2, FieldMemOperand(a1, HeapObject::kMapOffset));
+ // Load the map's "bit field 2".
+ __ lbu(a2, FieldMemOperand(a2, Map::kBitField2Offset));
+ // Retrieve elements_kind from bit field 2.
+ __ Ext(a2, a2, Map::kElementsKindShift, Map::kElementsKindBitCount);
+ DeoptimizeIf(ne,
+ instr->environment(),
+ a2,
+ Operand(boilerplate_elements_kind));
+ }
__ lw(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ lw(a3, FieldMemOperand(a3, JSFunction::kLiteralsOffset));
__ li(a2, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
- __ li(a1, Operand(constant_elements));
+ // Boilerplate already exists, constant elements are never accessed.
+ // Pass an empty fixed array.
+ __ li(a1, Operand(Handle<FixedArray>(heap->empty_fixed_array())));
__ Push(a3, a2, a1);
// Pick the right runtime function or stub to call.
@@ -4154,9 +4260,9 @@
CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr);
} else {
FastCloneShallowArrayStub::Mode mode =
- constant_elements_kind == FAST_DOUBLE_ELEMENTS
- ? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
- : FastCloneShallowArrayStub::CLONE_ELEMENTS;
+ boilerplate_elements_kind == FAST_DOUBLE_ELEMENTS
+ ? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
+ : FastCloneShallowArrayStub::CLONE_ELEMENTS;
FastCloneShallowArrayStub stub(mode, length);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
@@ -4195,10 +4301,10 @@
Handle<JSObject> value_object = Handle<JSObject>::cast(value);
__ Addu(a2, result, Operand(*offset));
__ sw(a2, FieldMemOperand(result, total_offset));
- LoadHeapObject(source, value_object);
+ __ LoadHeapObject(source, value_object);
EmitDeepCopy(value_object, result, source, offset);
} else if (value->IsHeapObject()) {
- LoadHeapObject(a2, Handle<HeapObject>::cast(value));
+ __ LoadHeapObject(a2, Handle<HeapObject>::cast(value));
__ sw(a2, FieldMemOperand(result, total_offset));
} else {
__ li(a2, Operand(value));
@@ -4224,7 +4330,7 @@
__ bind(&allocated);
int offset = 0;
- LoadHeapObject(a1, instr->hydrogen()->boilerplate());
+ __ LoadHeapObject(a1, instr->hydrogen()->boilerplate());
EmitDeepCopy(instr->hydrogen()->boilerplate(), v0, a1, &offset);
ASSERT_EQ(size, offset);
}
diff --git a/src/mips/lithium-codegen-mips.h b/src/mips/lithium-codegen-mips.h
index 32d4fb3..513992c 100644
--- a/src/mips/lithium-codegen-mips.h
+++ b/src/mips/lithium-codegen-mips.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -93,6 +93,9 @@
// Returns a MemOperand pointing to the high word of a DoubleStackSlot.
MemOperand ToHighMemOperand(LOperand* op) const;
+ bool IsInteger32(LConstantOperand* op) const;
+ Handle<Object> ToHandle(LConstantOperand* op) const;
+
// Try to generate code for the entire chunk, but it may fail if the
// chunk contains constructs we cannot handle. Returns true if the
// code generation attempt succeeded.
@@ -112,6 +115,9 @@
void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
Label* map_check);
+ void DoCheckMapCommon(Register reg, Register scratch, Handle<Map> map,
+ CompareMapMode mode, LEnvironment* env);
+
// Parallel move support.
void DoParallelMove(LParallelMove* move);
void DoGap(LGap* instr);
@@ -145,9 +151,9 @@
Scope* scope() const { return scope_; }
HGraph* graph() const { return chunk_->graph(); }
- Register scratch0() { return lithiumScratchReg; }
- Register scratch1() { return lithiumScratchReg2; }
- DoubleRegister double_scratch0() { return lithiumScratchDouble; }
+ Register scratch0() { return kLithiumScratchReg; }
+ Register scratch1() { return kLithiumScratchReg2; }
+ DoubleRegister double_scratch0() { return kLithiumScratchDouble; }
int GetNextEmittedBlock(int block);
LInstruction* GetNextInstruction();
@@ -220,8 +226,8 @@
Safepoint::DeoptMode mode);
void DeoptimizeIf(Condition cc,
LEnvironment* environment,
- Register src1,
- const Operand& src2);
+ Register src1 = zero_reg,
+ const Operand& src2 = Operand(zero_reg));
void AddToTranslation(Translation* translation,
LOperand* op,
@@ -277,6 +283,7 @@
void EmitNumberUntagD(Register input,
DoubleRegister result,
bool deoptimize_on_undefined,
+ bool deoptimize_on_minus_zero,
LEnvironment* env);
// Emits optimized code for typeof x == "y". Modifies input register.
@@ -420,7 +427,7 @@
virtual void Generate() = 0;
virtual LInstruction* instr() = 0;
- void SetExit(Label *exit) { external_exit_ = exit; }
+ void SetExit(Label* exit) { external_exit_ = exit; }
Label* entry() { return &entry_; }
Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; }
int instruction_index() const { return instruction_index_; }
diff --git a/src/mips/lithium-gap-resolver-mips.cc b/src/mips/lithium-gap-resolver-mips.cc
index 8f7f89c..41b060d 100644
--- a/src/mips/lithium-gap-resolver-mips.cc
+++ b/src/mips/lithium-gap-resolver-mips.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -33,8 +33,7 @@
namespace v8 {
namespace internal {
-static const Register kSavedValueRegister = lithiumScratchReg;
-static const DoubleRegister kSavedDoubleValueRegister = lithiumScratchDouble;
+static const Register kSavedValueRegister = kLithiumScratchReg;
LGapResolver::LGapResolver(LCodeGen* owner)
: cgen_(owner),
@@ -175,9 +174,9 @@
} else if (source->IsStackSlot()) {
__ lw(kSavedValueRegister, cgen_->ToMemOperand(source));
} else if (source->IsDoubleRegister()) {
- __ mov_d(kSavedDoubleValueRegister, cgen_->ToDoubleRegister(source));
+ __ mov_d(kLithiumScratchDouble, cgen_->ToDoubleRegister(source));
} else if (source->IsDoubleStackSlot()) {
- __ ldc1(kSavedDoubleValueRegister, cgen_->ToMemOperand(source));
+ __ ldc1(kLithiumScratchDouble, cgen_->ToMemOperand(source));
} else {
UNREACHABLE();
}
@@ -190,16 +189,16 @@
ASSERT(in_cycle_);
ASSERT(saved_destination_ != NULL);
- // Spilled value is in kSavedValueRegister or kSavedDoubleValueRegister.
+ // Spilled value is in kSavedValueRegister or kLithiumScratchDouble.
if (saved_destination_->IsRegister()) {
__ mov(cgen_->ToRegister(saved_destination_), kSavedValueRegister);
} else if (saved_destination_->IsStackSlot()) {
__ sw(kSavedValueRegister, cgen_->ToMemOperand(saved_destination_));
} else if (saved_destination_->IsDoubleRegister()) {
__ mov_d(cgen_->ToDoubleRegister(saved_destination_),
- kSavedDoubleValueRegister);
+ kLithiumScratchDouble);
} else if (saved_destination_->IsDoubleStackSlot()) {
- __ sdc1(kSavedDoubleValueRegister,
+ __ sdc1(kLithiumScratchDouble,
cgen_->ToMemOperand(saved_destination_));
} else {
UNREACHABLE();
@@ -239,8 +238,8 @@
// Therefore we can't use 'at'. It is OK if the read from the source
// destroys 'at', since that happens before the value is read.
// This uses only a single reg of the double reg-pair.
- __ lwc1(kSavedDoubleValueRegister, source_operand);
- __ swc1(kSavedDoubleValueRegister, destination_operand);
+ __ lwc1(kLithiumScratchDouble, source_operand);
+ __ swc1(kLithiumScratchDouble, destination_operand);
} else {
__ lw(at, source_operand);
__ sw(at, destination_operand);
@@ -252,14 +251,24 @@
}
} else if (source->IsConstantOperand()) {
- Operand source_operand = cgen_->ToOperand(source);
+ LConstantOperand* constant_source = LConstantOperand::cast(source);
if (destination->IsRegister()) {
- __ li(cgen_->ToRegister(destination), source_operand);
+ Register dst = cgen_->ToRegister(destination);
+ if (cgen_->IsInteger32(constant_source)) {
+ __ li(dst, Operand(cgen_->ToInteger32(constant_source)));
+ } else {
+ __ LoadObject(dst, cgen_->ToHandle(constant_source));
+ }
} else {
ASSERT(destination->IsStackSlot());
ASSERT(!in_cycle_); // Constant moves happen after all cycles are gone.
- MemOperand destination_operand = cgen_->ToMemOperand(destination);
- __ li(kSavedValueRegister, source_operand);
+ if (cgen_->IsInteger32(constant_source)) {
+ __ li(kSavedValueRegister,
+ Operand(cgen_->ToInteger32(constant_source)));
+ } else {
+ __ LoadObject(kSavedValueRegister,
+ cgen_->ToHandle(constant_source));
+ }
__ sw(kSavedValueRegister, cgen_->ToMemOperand(destination));
}
@@ -281,7 +290,7 @@
ASSERT(destination->IsDoubleStackSlot());
MemOperand destination_operand = cgen_->ToMemOperand(destination);
if (in_cycle_) {
- // kSavedDoubleValueRegister was used to break the cycle,
+ // kLithiumScratchDouble was used to break the cycle,
// but kSavedValueRegister is free.
MemOperand source_high_operand =
cgen_->ToHighMemOperand(source);
@@ -292,8 +301,8 @@
__ lw(kSavedValueRegister, source_high_operand);
__ sw(kSavedValueRegister, destination_high_operand);
} else {
- __ ldc1(kSavedDoubleValueRegister, source_operand);
- __ sdc1(kSavedDoubleValueRegister, destination_operand);
+ __ ldc1(kLithiumScratchDouble, source_operand);
+ __ sdc1(kLithiumScratchDouble, destination_operand);
}
}
} else {
diff --git a/src/mips/lithium-mips.cc b/src/mips/lithium-mips.cc
index 19892fc..99e718c 100644
--- a/src/mips/lithium-mips.cc
+++ b/src/mips/lithium-mips.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -1005,14 +1005,16 @@
LEnvironment* outer =
CreateEnvironment(hydrogen_env->outer(), argument_index_accumulator);
int ast_id = hydrogen_env->ast_id();
- ASSERT(ast_id != AstNode::kNoNumber);
+ ASSERT(ast_id != AstNode::kNoNumber || hydrogen_env->is_arguments_adaptor());
int value_count = hydrogen_env->length();
LEnvironment* result = new LEnvironment(hydrogen_env->closure(),
+ hydrogen_env->is_arguments_adaptor(),
ast_id,
hydrogen_env->parameter_count(),
argument_count_,
value_count,
outer);
+ int argument_index = *argument_index_accumulator;
for (int i = 0; i < value_count; ++i) {
if (hydrogen_env->is_special_index(i)) continue;
@@ -1021,13 +1023,17 @@
if (value->IsArgumentsObject()) {
op = NULL;
} else if (value->IsPushArgument()) {
- op = new LArgument((*argument_index_accumulator)++);
+ op = new LArgument(argument_index++);
} else {
op = UseAny(value);
}
result->AddValue(op, value->representation());
}
+ if (!hydrogen_env->is_arguments_adaptor()) {
+ *argument_index_accumulator = argument_index;
+ }
+
return result;
}
@@ -1038,14 +1044,23 @@
LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
- HValue* v = instr->value();
- if (v->EmitAtUses()) {
- HBasicBlock* successor = HConstant::cast(v)->ToBoolean()
+ HValue* value = instr->value();
+ if (value->EmitAtUses()) {
+ HBasicBlock* successor = HConstant::cast(value)->ToBoolean()
? instr->FirstSuccessor()
: instr->SecondSuccessor();
return new LGoto(successor->block_id());
}
- return AssignEnvironment(new LBranch(UseRegister(v)));
+
+ LBranch* result = new LBranch(UseRegister(value));
+ // Tagged values that are not known smis or booleans require a
+ // deoptimization environment.
+ Representation rep = value->representation();
+ HType type = value->type();
+ if (rep.IsTagged() && !type.IsSmi() && !type.IsBoolean()) {
+ return AssignEnvironment(result);
+ }
+ return result;
}
@@ -1152,6 +1167,13 @@
LOperand* input = UseFixedDouble(instr->value(), f4);
LUnaryMathOperation* result = new LUnaryMathOperation(input, NULL);
return MarkAsCall(DefineFixedDouble(result, f4), instr);
+ } else if (op == kMathPowHalf) {
+ // Input cannot be the same as the result.
+ // See lithium-codegen-mips.cc::DoMathPowHalf.
+ LOperand* input = UseFixedDouble(instr->value(), f8);
+ LOperand* temp = FixedTemp(f6);
+ LUnaryMathOperation* result = new LUnaryMathOperation(input, temp);
+ return DefineFixedDouble(result, f4);
} else {
LOperand* input = UseRegisterAtStart(instr->value());
LOperand* temp = (op == kMathFloor) ? TempRegister() : NULL;
@@ -1165,8 +1187,6 @@
return DefineAsRegister(result);
case kMathRound:
return AssignEnvironment(DefineAsRegister(result));
- case kMathPowHalf:
- return DefineAsRegister(result);
default:
UNREACHABLE();
return NULL;
@@ -1340,7 +1360,12 @@
} else {
left = UseRegisterAtStart(instr->LeastConstantOperand());
}
- return AssignEnvironment(DefineAsRegister(new LMulI(left, right, temp)));
+ LMulI* mul = new LMulI(left, right, temp);
+ if (instr->CheckFlag(HValue::kCanOverflow) ||
+ instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ AssignEnvironment(mul);
+ }
+ return DefineAsRegister(mul);
} else if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::MUL, instr);
@@ -1401,14 +1426,23 @@
LOperand* left = UseFixedDouble(instr->left(), f2);
LOperand* right = exponent_type.IsDouble() ?
UseFixedDouble(instr->right(), f4) :
- UseFixed(instr->right(), a0);
+ UseFixed(instr->right(), a2);
LPower* result = new LPower(left, right);
- return MarkAsCall(DefineFixedDouble(result, f6),
+ return MarkAsCall(DefineFixedDouble(result, f0),
instr,
CAN_DEOPTIMIZE_EAGERLY);
}
+LInstruction* LChunkBuilder::DoRandom(HRandom* instr) {
+ ASSERT(instr->representation().IsDouble());
+ ASSERT(instr->global_object()->representation().IsTagged());
+ LOperand* global_object = UseFixed(instr->global_object(), a0);
+ LRandom* result = new LRandom(global_object);
+ return MarkAsCall(DefineFixedDouble(result, f0), instr);
+}
+
+
LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
Representation r = instr->GetInputRepresentation();
ASSERT(instr->left()->representation().IsTagged());
@@ -1526,7 +1560,7 @@
LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
HClassOfTestAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
- return new LClassOfTestAndBranch(UseTempRegister(instr->value()),
+ return new LClassOfTestAndBranch(UseRegister(instr->value()),
TempRegister());
}
@@ -1553,7 +1587,7 @@
LInstruction* LChunkBuilder::DoValueOf(HValueOf* instr) {
LOperand* object = UseRegister(instr->value());
LValueOf* result = new LValueOf(object, TempRegister());
- return AssignEnvironment(DefineAsRegister(result));
+ return DefineAsRegister(result);
}
@@ -1777,11 +1811,12 @@
LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) {
- LOperand* temp = TempRegister();
- LOperand* value = UseTempRegister(instr->value());
- LInstruction* result = new LStoreGlobalCell(value, temp);
- if (instr->RequiresHoleCheck()) result = AssignEnvironment(result);
- return result;
+ LOperand* value = UseRegister(instr->value());
+ // Use a temp to check the value in the cell in the case where we perform
+ // a hole check.
+ return instr->RequiresHoleCheck()
+ ? AssignEnvironment(new LStoreGlobalCell(value, TempRegister()))
+ : new LStoreGlobalCell(value, NULL);
}
@@ -1796,7 +1831,8 @@
LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
LOperand* context = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new LLoadContextSlot(context));
+ LInstruction* result = DefineAsRegister(new LLoadContextSlot(context));
+ return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
}
@@ -1810,7 +1846,8 @@
context = UseRegister(instr->context());
value = UseRegister(instr->value());
}
- return new LStoreContextSlot(context, value);
+ LInstruction* result = new LStoreContextSlot(context, value);
+ return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
}
@@ -1869,7 +1906,8 @@
LOperand* obj = UseRegisterAtStart(instr->object());
LOperand* key = UseRegisterAtStart(instr->key());
LLoadKeyedFastElement* result = new LLoadKeyedFastElement(obj, key);
- return AssignEnvironment(DefineAsRegister(result));
+ if (instr->RequiresHoleCheck()) AssignEnvironment(result);
+ return DefineAsRegister(result);
}
@@ -1933,8 +1971,7 @@
LOperand* key = needs_write_barrier
? UseTempRegister(instr->key())
: UseRegisterOrConstantAtStart(instr->key());
-
- return AssignEnvironment(new LStoreKeyedFastElement(obj, key, val));
+ return new LStoreKeyedFastElement(obj, key, val);
}
@@ -2216,6 +2253,7 @@
HEnvironment* outer = current_block_->last_environment();
HConstant* undefined = graph()->GetConstantUndefined();
HEnvironment* inner = outer->CopyForInlining(instr->closure(),
+ instr->arguments_count(),
instr->function(),
undefined,
instr->call_kind());
@@ -2226,7 +2264,8 @@
LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
- HEnvironment* outer = current_block_->last_environment()->outer();
+ HEnvironment* outer = current_block_->last_environment()->
+ DiscardInlined(false);
current_block_->UpdateEnvironment(outer);
return NULL;
}
diff --git a/src/mips/lithium-mips.h b/src/mips/lithium-mips.h
index eb85f10..069a025 100644
--- a/src/mips/lithium-mips.h
+++ b/src/mips/lithium-mips.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -141,6 +141,7 @@
V(Parameter) \
V(Power) \
V(PushArgument) \
+ V(Random) \
V(RegExpLiteral) \
V(Return) \
V(ShiftI) \
@@ -1026,6 +1027,17 @@
};
+class LRandom: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LRandom(LOperand* global_object) {
+ inputs_[0] = global_object;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(Random, "random")
+ DECLARE_HYDROGEN_ACCESSOR(Random)
+};
+
+
class LArithmeticD: public LTemplateInstruction<1, 2, 0> {
public:
LArithmeticD(Token::Value op, LOperand* left, LOperand* right)
@@ -1242,6 +1254,8 @@
DECLARE_CONCRETE_INSTRUCTION(StoreGlobalCell, "store-global-cell")
DECLARE_HYDROGEN_ACCESSOR(StoreGlobalCell)
+
+ LOperand* value() { return inputs_[0]; }
};
@@ -1793,6 +1807,8 @@
inputs_[0] = value;
}
+ LOperand* value() { return InputAt(0); }
+
DECLARE_CONCRETE_INSTRUCTION(CheckFunction, "check-function")
DECLARE_HYDROGEN_ACCESSOR(CheckFunction)
};
diff --git a/src/mips/macro-assembler-mips.cc b/src/mips/macro-assembler-mips.cc
index a6dcddc..941c7fe 100644
--- a/src/mips/macro-assembler-mips.cc
+++ b/src/mips/macro-assembler-mips.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -81,6 +81,19 @@
}
+void MacroAssembler::LoadHeapObject(Register result,
+ Handle<HeapObject> object) {
+ if (isolate()->heap()->InNewSpace(*object)) {
+ Handle<JSGlobalPropertyCell> cell =
+ isolate()->factory()->NewJSGlobalPropertyCell(object);
+ li(result, Operand(cell));
+ lw(result, FieldMemOperand(result, JSGlobalPropertyCell::kValueOffset));
+ } else {
+ li(result, Operand(object));
+ }
+}
+
+
// Push and pop all registers that can hold pointers.
void MacroAssembler::PushSafepointRegisters() {
// Safepoints expect a block of kNumSafepointRegisters values on the
@@ -239,6 +252,12 @@
// registers are cp.
ASSERT(!address.is(cp) && !value.is(cp));
+ if (emit_debug_code()) {
+ lw(at, MemOperand(address));
+ Assert(
+ eq, "Wrong address or value passed to RecordWrite", at, Operand(value));
+ }
+
Label done;
if (smi_check == INLINE_SMI_CHECK) {
@@ -284,7 +303,7 @@
SaveFPRegsMode fp_mode,
RememberedSetFinalAction and_then) {
Label done;
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
Label ok;
JumpIfNotInNewSpace(object, scratch, &ok);
stop("Remembered set pointer is in new space");
@@ -396,6 +415,46 @@
}
+void MacroAssembler::GetNumberHash(Register reg0, Register scratch) {
+ // First of all we assign the hash seed to scratch.
+ LoadRoot(scratch, Heap::kHashSeedRootIndex);
+ SmiUntag(scratch);
+
+ // Xor original key with a seed.
+ xor_(reg0, reg0, scratch);
+
+ // Compute the hash code from the untagged key. This must be kept in sync
+ // with ComputeIntegerHash in utils.h.
+ //
+ // hash = ~hash + (hash << 15);
+ nor(scratch, reg0, zero_reg);
+ sll(at, reg0, 15);
+ addu(reg0, scratch, at);
+
+ // hash = hash ^ (hash >> 12);
+ srl(at, reg0, 12);
+ xor_(reg0, reg0, at);
+
+ // hash = hash + (hash << 2);
+ sll(at, reg0, 2);
+ addu(reg0, reg0, at);
+
+ // hash = hash ^ (hash >> 4);
+ srl(at, reg0, 4);
+ xor_(reg0, reg0, at);
+
+ // hash = hash * 2057;
+ sll(scratch, reg0, 11);
+ sll(at, reg0, 3);
+ addu(reg0, reg0, at);
+ addu(reg0, reg0, scratch);
+
+ // hash = hash ^ (hash >> 16);
+ srl(at, reg0, 16);
+ xor_(reg0, reg0, at);
+}
+
+
void MacroAssembler::LoadFromNumberDictionary(Label* miss,
Register elements,
Register key,
@@ -427,36 +486,10 @@
// at - Temporary (avoid MacroAssembler instructions also using 'at').
Label done;
- // Compute the hash code from the untagged key. This must be kept in sync
- // with ComputeIntegerHash in utils.h.
- //
- // hash = ~hash + (hash << 15);
- nor(reg1, reg0, zero_reg);
- sll(at, reg0, 15);
- addu(reg0, reg1, at);
-
- // hash = hash ^ (hash >> 12);
- srl(at, reg0, 12);
- xor_(reg0, reg0, at);
-
- // hash = hash + (hash << 2);
- sll(at, reg0, 2);
- addu(reg0, reg0, at);
-
- // hash = hash ^ (hash >> 4);
- srl(at, reg0, 4);
- xor_(reg0, reg0, at);
-
- // hash = hash * 2057;
- li(reg1, Operand(2057));
- mul(reg0, reg0, reg1);
-
- // hash = hash ^ (hash >> 16);
- srl(at, reg0, 16);
- xor_(reg0, reg0, at);
+ GetNumberHash(reg0, reg1);
// Compute the capacity mask.
- lw(reg1, FieldMemOperand(elements, NumberDictionary::kCapacityOffset));
+ lw(reg1, FieldMemOperand(elements, SeededNumberDictionary::kCapacityOffset));
sra(reg1, reg1, kSmiTagSize);
Subu(reg1, reg1, Operand(1));
@@ -467,12 +500,12 @@
mov(reg2, reg0);
// Compute the masked index: (hash + i + i * i) & mask.
if (i > 0) {
- Addu(reg2, reg2, Operand(NumberDictionary::GetProbeOffset(i)));
+ Addu(reg2, reg2, Operand(SeededNumberDictionary::GetProbeOffset(i)));
}
and_(reg2, reg2, reg1);
// Scale the index by multiplying by the element size.
- ASSERT(NumberDictionary::kEntrySize == 3);
+ ASSERT(SeededNumberDictionary::kEntrySize == 3);
sll(at, reg2, 1); // 2x.
addu(reg2, reg2, at); // reg2 = reg2 * 3.
@@ -480,7 +513,7 @@
sll(at, reg2, kPointerSizeLog2);
addu(reg2, elements, at);
- lw(at, FieldMemOperand(reg2, NumberDictionary::kElementsStartOffset));
+ lw(at, FieldMemOperand(reg2, SeededNumberDictionary::kElementsStartOffset));
if (i != kProbes - 1) {
Branch(&done, eq, key, Operand(at));
} else {
@@ -492,14 +525,14 @@
// Check that the value is a normal property.
// reg2: elements + (index * kPointerSize).
const int kDetailsOffset =
- NumberDictionary::kElementsStartOffset + 2 * kPointerSize;
+ SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
lw(reg1, FieldMemOperand(reg2, kDetailsOffset));
And(at, reg1, Operand(Smi::FromInt(PropertyDetails::TypeField::kMask)));
Branch(miss, ne, at, Operand(zero_reg));
// Get the value at the masked, scaled index and return.
const int kValueOffset =
- NumberDictionary::kElementsStartOffset + kPointerSize;
+ SeededNumberDictionary::kElementsStartOffset + kPointerSize;
lw(result, FieldMemOperand(reg2, kValueOffset));
}
@@ -1145,7 +1178,7 @@
Branch(not_int32, gt, scratch2, Operand(non_smi_exponent));
// We know the exponent is smaller than 30 (biased). If it is less than
- // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie
+ // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, i.e.
// it rounds to zero.
const uint32_t zero_exponent =
(HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift;
@@ -3282,17 +3315,51 @@
}
+void MacroAssembler::CompareMapAndBranch(Register obj,
+ Register scratch,
+ Handle<Map> map,
+ Label* early_success,
+ Condition cond,
+ Label* branch_to,
+ CompareMapMode mode) {
+ lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
+ Operand right = Operand(map);
+ if (mode == ALLOW_ELEMENT_TRANSITION_MAPS) {
+ Map* transitioned_fast_element_map(
+ map->LookupElementsTransitionMap(FAST_ELEMENTS, NULL));
+ ASSERT(transitioned_fast_element_map == NULL ||
+ map->elements_kind() != FAST_ELEMENTS);
+ if (transitioned_fast_element_map != NULL) {
+ Branch(early_success, eq, scratch, right);
+ right = Operand(Handle<Map>(transitioned_fast_element_map));
+ }
+
+ Map* transitioned_double_map(
+ map->LookupElementsTransitionMap(FAST_DOUBLE_ELEMENTS, NULL));
+ ASSERT(transitioned_double_map == NULL ||
+ map->elements_kind() == FAST_SMI_ONLY_ELEMENTS);
+ if (transitioned_double_map != NULL) {
+ Branch(early_success, eq, scratch, right);
+ right = Operand(Handle<Map>(transitioned_double_map));
+ }
+ }
+
+ Branch(branch_to, cond, scratch, right);
+}
+
+
void MacroAssembler::CheckMap(Register obj,
Register scratch,
Handle<Map> map,
Label* fail,
- SmiCheckType smi_check_type) {
+ SmiCheckType smi_check_type,
+ CompareMapMode mode) {
if (smi_check_type == DO_SMI_CHECK) {
JumpIfSmi(obj, fail);
}
- lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
- li(at, Operand(map));
- Branch(fail, ne, scratch, Operand(at));
+ Label success;
+ CompareMapAndBranch(obj, scratch, map, &success, ne, fail, mode);
+ bind(&success);
}
@@ -3399,10 +3466,12 @@
Handle<Code> code_constant,
Register code_reg,
Label* done,
+ bool* definitely_mismatches,
InvokeFlag flag,
const CallWrapper& call_wrapper,
CallKind call_kind) {
bool definitely_matches = false;
+ *definitely_mismatches = false;
Label regular_invoke;
// Check whether the expected and actual arguments count match. If not,
@@ -3433,6 +3502,7 @@
// arguments.
definitely_matches = true;
} else {
+ *definitely_mismatches = true;
li(a2, Operand(expected.immediate()));
}
}
@@ -3456,7 +3526,9 @@
SetCallKind(t1, call_kind);
Call(adaptor);
call_wrapper.AfterCall();
- jmp(done);
+ if (!*definitely_mismatches) {
+ Branch(done);
+ }
} else {
SetCallKind(t1, call_kind);
Jump(adaptor, RelocInfo::CODE_TARGET);
@@ -3477,21 +3549,25 @@
Label done;
- InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag,
+ bool definitely_mismatches = false;
+ InvokePrologue(expected, actual, Handle<Code>::null(), code,
+ &done, &definitely_mismatches, flag,
call_wrapper, call_kind);
- if (flag == CALL_FUNCTION) {
- call_wrapper.BeforeCall(CallSize(code));
- SetCallKind(t1, call_kind);
- Call(code);
- call_wrapper.AfterCall();
- } else {
- ASSERT(flag == JUMP_FUNCTION);
- SetCallKind(t1, call_kind);
- Jump(code);
+ if (!definitely_mismatches) {
+ if (flag == CALL_FUNCTION) {
+ call_wrapper.BeforeCall(CallSize(code));
+ SetCallKind(t1, call_kind);
+ Call(code);
+ call_wrapper.AfterCall();
+ } else {
+ ASSERT(flag == JUMP_FUNCTION);
+ SetCallKind(t1, call_kind);
+ Jump(code);
+ }
+ // Continue here if InvokePrologue does handle the invocation due to
+ // mismatched parameter counts.
+ bind(&done);
}
- // Continue here if InvokePrologue does handle the invocation due to
- // mismatched parameter counts.
- bind(&done);
}
@@ -3506,18 +3582,22 @@
Label done;
- InvokePrologue(expected, actual, code, no_reg, &done, flag,
+ bool definitely_mismatches = false;
+ InvokePrologue(expected, actual, code, no_reg,
+ &done, &definitely_mismatches, flag,
NullCallWrapper(), call_kind);
- if (flag == CALL_FUNCTION) {
- SetCallKind(t1, call_kind);
- Call(code, rmode);
- } else {
- SetCallKind(t1, call_kind);
- Jump(code, rmode);
+ if (!definitely_mismatches) {
+ if (flag == CALL_FUNCTION) {
+ SetCallKind(t1, call_kind);
+ Call(code, rmode);
+ } else {
+ SetCallKind(t1, call_kind);
+ Jump(code, rmode);
+ }
+ // Continue here if InvokePrologue does handle the invocation due to
+ // mismatched parameter counts.
+ bind(&done);
}
- // Continue here if InvokePrologue does handle the invocation due to
- // mismatched parameter counts.
- bind(&done);
}
@@ -3550,12 +3630,13 @@
void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
const ParameterCount& actual,
InvokeFlag flag,
+ const CallWrapper& call_wrapper,
CallKind call_kind) {
// You can't call a function without a valid frame.
ASSERT(flag == JUMP_FUNCTION || has_frame());
// Get the function and setup the context.
- li(a1, Operand(function));
+ LoadHeapObject(a1, function);
lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
ParameterCount expected(function->shared()->formal_parameter_count());
@@ -3563,7 +3644,7 @@
// allow recompilation to take effect without changing any of the
// call sites.
lw(a3, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
- InvokeCode(a3, expected, actual, flag, NullCallWrapper(), call_kind);
+ InvokeCode(a3, expected, actual, flag, call_wrapper, call_kind);
}
@@ -4248,7 +4329,7 @@
void MacroAssembler::EnterExitFrame(bool save_doubles,
int stack_space) {
- // Setup the frame structure on the stack.
+ // Set up the frame structure on the stack.
STATIC_ASSERT(2 * kPointerSize == ExitFrameConstants::kCallerSPDisplacement);
STATIC_ASSERT(1 * kPointerSize == ExitFrameConstants::kCallerPCOffset);
STATIC_ASSERT(0 * kPointerSize == ExitFrameConstants::kCallerFPOffset);
@@ -4266,7 +4347,7 @@
addiu(sp, sp, -4 * kPointerSize);
sw(ra, MemOperand(sp, 3 * kPointerSize));
sw(fp, MemOperand(sp, 2 * kPointerSize));
- addiu(fp, sp, 2 * kPointerSize); // Setup new frame pointer.
+ addiu(fp, sp, 2 * kPointerSize); // Set up new frame pointer.
if (emit_debug_code()) {
sw(zero_reg, MemOperand(fp, ExitFrameConstants::kSPOffset));
@@ -4841,7 +4922,7 @@
And(t8, mask_scratch, load_scratch);
Branch(&done, ne, t8, Operand(zero_reg));
- if (FLAG_debug_code) {
+ if (emit_debug_code()) {
// Check for impossible bit pattern.
Label ok;
// sll may overflow, making the check conservative.
diff --git a/src/mips/macro-assembler-mips.h b/src/mips/macro-assembler-mips.h
index 454fe9e..b976f6e 100644
--- a/src/mips/macro-assembler-mips.h
+++ b/src/mips/macro-assembler-mips.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -51,16 +51,6 @@
// MIPS generated code calls C code, it must be via t9 register.
-// Register aliases.
-// cp is assumed to be a callee saved register.
-const Register lithiumScratchReg = s3; // Scratch register.
-const Register lithiumScratchReg2 = s4; // Scratch register.
-const Register condReg = s5; // Simulated (partial) condition code for mips.
-const Register roots = s6; // Roots array pointer.
-const Register cp = s7; // JavaScript context pointer.
-const Register fp = s8_fp; // Alias for fp.
-const DoubleRegister lithiumScratchDouble = f30; // Double scratch register.
-
// Flags used for the AllocateInNewSpace functions.
enum AllocationFlags {
// No special flags.
@@ -262,6 +252,15 @@
Heap::RootListIndex index,
Condition cond, Register src1, const Operand& src2);
+ void LoadHeapObject(Register dst, Handle<HeapObject> object);
+
+ void LoadObject(Register result, Handle<Object> object) {
+ if (object->IsHeapObject()) {
+ LoadHeapObject(result, Handle<HeapObject>::cast(object));
+ } else {
+ li(result, object);
+ }
+ }
// ---------------------------------------------------------------------------
// GC Support
@@ -331,7 +330,7 @@
Register scratch3,
Label* object_is_white_and_not_data);
- // Detects conservatively whether an object is data-only, ie it does need to
+ // Detects conservatively whether an object is data-only, i.e. it does need to
// be scanned by the garbage collector.
void JumpIfDataObject(Register value,
Register scratch,
@@ -396,6 +395,7 @@
Register scratch,
Label* miss);
+ void GetNumberHash(Register reg0, Register scratch);
void LoadFromNumberDictionary(Label* miss,
Register elements,
@@ -411,7 +411,7 @@
}
// Check if the given instruction is a 'type' marker.
- // ie. check if it is a sll zero_reg, zero_reg, <type> (referenced as
+ // i.e. check if it is a sll zero_reg, zero_reg, <type> (referenced as
// nop(type)). These instructions are generated to mark special location in
// the code, like some special IC code.
static inline bool IsMarkedCode(Instr instr, int type) {
@@ -780,11 +780,16 @@
Register map,
Register scratch);
+ void InitializeRootRegister() {
+ ExternalReference roots_array_start =
+ ExternalReference::roots_array_start(isolate());
+ li(kRootRegister, Operand(roots_array_start));
+ }
// -------------------------------------------------------------------------
// JavaScript invokes.
- // Setup call kind marking in t1. The method takes t1 as an
+ // Set up call kind marking in t1. The method takes t1 as an
// explicit first parameter to make the code more readable at the
// call sites.
void SetCallKind(Register dst, CallKind kind);
@@ -815,6 +820,7 @@
void InvokeFunction(Handle<JSFunction> function,
const ParameterCount& actual,
InvokeFlag flag,
+ const CallWrapper& call_wrapper,
CallKind call_kind);
@@ -925,15 +931,29 @@
Register scratch4,
Label* fail);
- // Check if the map of an object is equal to a specified map (either
- // given directly or as an index into the root list) and branch to
- // label if not. Skip the smi check if not required (object is known
- // to be a heap object).
+ // Compare an object's map with the specified map and its transitioned
+ // elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. Jumps to
+ // "branch_to" if the result of the comparison is "cond". If multiple map
+ // compares are required, the compare sequences branches to early_success.
+ void CompareMapAndBranch(Register obj,
+ Register scratch,
+ Handle<Map> map,
+ Label* early_success,
+ Condition cond,
+ Label* branch_to,
+ CompareMapMode mode = REQUIRE_EXACT_MAP);
+
+ // Check if the map of an object is equal to a specified map and branch to
+ // label if not. Skip the smi check if not required (object is known to be a
+ // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
+ // against maps that are ElementsKind transition maps of the specificed map.
void CheckMap(Register obj,
Register scratch,
Handle<Map> map,
Label* fail,
- SmiCheckType smi_check_type);
+ SmiCheckType smi_check_type,
+ CompareMapMode mode = REQUIRE_EXACT_MAP);
+
void CheckMap(Register obj,
Register scratch,
@@ -1117,7 +1137,7 @@
// Calls an API function. Allocates HandleScope, extracts returned value
// from handle and propagates exceptions. Restores context. stack_space
- // - space to be unwound on exit (includes the call js arguments space and
+ // - space to be unwound on exit (includes the call JS arguments space and
// the additional space allocated for the fast call).
void CallApiFunctionAndReturn(ExternalReference function, int stack_space);
@@ -1358,6 +1378,7 @@
Handle<Code> code_constant,
Register code_reg,
Label* done,
+ bool* definitely_mismatches,
InvokeFlag flag,
const CallWrapper& call_wrapper,
CallKind call_kind);
diff --git a/src/mips/simulator-mips.cc b/src/mips/simulator-mips.cc
index f70775d..a158f04 100644
--- a/src/mips/simulator-mips.cc
+++ b/src/mips/simulator-mips.cc
@@ -888,7 +888,7 @@
isolate_->set_simulator_i_cache(i_cache_);
}
Initialize(isolate);
- // Setup simulator support first. Some of this information is needed to
+ // Set up simulator support first. Some of this information is needed to
// setup the architecture state.
stack_ = reinterpret_cast<char*>(malloc(stack_size_));
pc_modified_ = false;
@@ -897,7 +897,7 @@
break_pc_ = NULL;
break_instr_ = 0;
- // Setup architecture state.
+ // Set up architecture state.
// All registers are initialized to zero to start with.
for (int i = 0; i < kNumSimuRegisters; i++) {
registers_[i] = 0;
@@ -1944,7 +1944,7 @@
// Next pc
int32_t next_pc = 0;
- // Setup the variables if needed before executing the instruction.
+ // Set up the variables if needed before executing the instruction.
ConfigureTypeRegister(instr,
alu_out,
i64hilo,
@@ -2291,7 +2291,7 @@
}
-// Type 2: instructions using a 16 bytes immediate. (eg: addi, beq).
+// Type 2: instructions using a 16 bytes immediate. (e.g. addi, beq).
void Simulator::DecodeTypeImmediate(Instruction* instr) {
// Instruction fields.
Opcode op = instr->OpcodeFieldRaw();
@@ -2614,7 +2614,7 @@
}
-// Type 3: instructions using a 26 bytes immediate. (eg: j, jal).
+// Type 3: instructions using a 26 bytes immediate. (e.g. j, jal).
void Simulator::DecodeTypeJump(Instruction* instr) {
// Get current pc.
int32_t current_pc = get_pc();
@@ -2711,7 +2711,7 @@
int32_t Simulator::Call(byte* entry, int argument_count, ...) {
va_list parameters;
va_start(parameters, argument_count);
- // Setup arguments.
+ // Set up arguments.
// First four arguments passed in registers.
ASSERT(argument_count >= 4);
@@ -2758,7 +2758,7 @@
int32_t sp_val = get_register(sp);
int32_t fp_val = get_register(fp);
- // Setup the callee-saved registers with a known value. To be able to check
+ // Set up the callee-saved registers with a known value. To be able to check
// that they are preserved properly across JS execution.
int32_t callee_saved_value = icount_;
set_register(s0, callee_saved_value);
diff --git a/src/mips/stub-cache-mips.cc b/src/mips/stub-cache-mips.cc
index 9f214a3..0051edf 100644
--- a/src/mips/stub-cache-mips.cc
+++ b/src/mips/stub-cache-mips.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -377,13 +377,9 @@
Label* miss_label) {
// a0 : value.
Label exit;
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver_reg, miss_label, scratch);
-
- // Check that the map of the receiver hasn't changed.
- __ lw(scratch, FieldMemOperand(receiver_reg, HeapObject::kMapOffset));
- __ Branch(miss_label, ne, scratch, Operand(Handle<Map>(object->map())));
+ // Check that the map of the object hasn't changed.
+ __ CheckMap(receiver_reg, scratch, Handle<Map>(object->map()), miss_label,
+ DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
// Perform global security token check if needed.
if (object->IsJSGlobalProxy()) {
@@ -565,16 +561,16 @@
int argc) {
// ----------- S t a t e -------------
// -- sp[0] : holder (set by CheckPrototypes)
- // -- sp[4] : callee js function
+ // -- sp[4] : callee JS function
// -- sp[8] : call data
- // -- sp[12] : last js argument
+ // -- sp[12] : last JS argument
// -- ...
- // -- sp[(argc + 3) * 4] : first js argument
+ // -- sp[(argc + 3) * 4] : first JS argument
// -- sp[(argc + 4) * 4] : receiver
// -----------------------------------
// Get the function and setup the context.
Handle<JSFunction> function = optimization.constant_function();
- __ li(t1, Operand(function));
+ __ LoadHeapObject(t1, function);
__ lw(cp, FieldMemOperand(t1, JSFunction::kContextOffset));
// Pass the additional arguments FastHandleApiCall expects.
@@ -587,7 +583,7 @@
__ li(t2, call_data);
}
- // Store js function and call data.
+ // Store JS function and call data.
__ sw(t1, MemOperand(sp, 1 * kPointerSize));
__ sw(t2, MemOperand(sp, 2 * kPointerSize));
@@ -747,7 +743,7 @@
? CALL_AS_FUNCTION
: CALL_AS_METHOD;
__ InvokeFunction(optimization.constant_function(), arguments_,
- JUMP_FUNCTION, call_kind);
+ JUMP_FUNCTION, NullCallWrapper(), call_kind);
}
// Deferred code for fast API call case---clean preallocated space.
@@ -1037,9 +1033,8 @@
__ lw(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
} else {
Handle<Map> current_map(current->map());
- __ lw(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
- // Branch on the result of the map check.
- __ Branch(miss, ne, scratch1, Operand(current_map));
+ __ CheckMap(reg, scratch1, current_map, miss, DONT_DO_SMI_CHECK,
+ ALLOW_ELEMENT_TRANSITION_MAPS);
// Check access rights to the global object. This has to happen after
// the map check so that we know that the object is actually a global
// object.
@@ -1070,8 +1065,8 @@
LOG(masm()->isolate(), IntEvent("check-maps-depth", depth + 1));
// Check the holder map.
- __ lw(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
- __ Branch(miss, ne, scratch1, Operand(Handle<Map>(current->map())));
+ __ CheckMap(reg, scratch1, Handle<Map>(current->map()), miss,
+ DONT_DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
// Perform security check for access to the global object.
ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
@@ -1115,7 +1110,7 @@
Register scratch1,
Register scratch2,
Register scratch3,
- Handle<Object> value,
+ Handle<JSFunction> value,
Handle<String> name,
Label* miss) {
// Check that the receiver isn't a smi.
@@ -1127,7 +1122,7 @@
scratch1, scratch2, scratch3, name, miss);
// Return the constant value.
- __ li(v0, Operand(value));
+ __ LoadHeapObject(v0, value);
__ Ret();
}
@@ -1173,7 +1168,7 @@
__ EnterExitFrame(false, kApiStackSpace);
// Create AccessorInfo instance on the stack above the exit frame with
- // scratch2 (internal::Object **args_) as the data.
+ // scratch2 (internal::Object** args_) as the data.
__ sw(a2, MemOperand(sp, kPointerSize));
// a2 (second argument - see note above) = AccessorInfo&
__ Addu(a2, sp, kPointerSize);
@@ -1209,7 +1204,7 @@
// and CALLBACKS, so inline only them, other cases may be added
// later.
bool compile_followup_inline = false;
- if (lookup->IsProperty() && lookup->IsCacheable()) {
+ if (lookup->IsFound() && lookup->IsCacheable()) {
if (lookup->type() == FIELD) {
compile_followup_inline = true;
} else if (lookup->type() == CALLBACKS &&
@@ -1934,7 +1929,8 @@
// Tail call the full function. We do not have to patch the receiver
// because the function makes no use of it.
__ bind(&slow);
- __ InvokeFunction(function, arguments(), JUMP_FUNCTION, CALL_AS_METHOD);
+ __ InvokeFunction(
+ function, arguments(), JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
__ bind(&miss);
// a2: function name.
@@ -2067,7 +2063,8 @@
__ bind(&slow);
// Tail call the full function. We do not have to patch the receiver
// because the function makes no use of it.
- __ InvokeFunction(function, arguments(), JUMP_FUNCTION, CALL_AS_METHOD);
+ __ InvokeFunction(
+ function, arguments(), JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
__ bind(&miss);
// a2: function name.
@@ -2167,7 +2164,8 @@
// Tail call the full function. We do not have to patch the receiver
// because the function makes no use of it.
__ bind(&slow);
- __ InvokeFunction(function, arguments(), JUMP_FUNCTION, CALL_AS_METHOD);
+ __ InvokeFunction(
+ function, arguments(), JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
__ bind(&miss);
// a2: function name.
@@ -2346,7 +2344,8 @@
CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
? CALL_AS_FUNCTION
: CALL_AS_METHOD;
- __ InvokeFunction(function, arguments(), JUMP_FUNCTION, call_kind);
+ __ InvokeFunction(
+ function, arguments(), JUMP_FUNCTION, NullCallWrapper(), call_kind);
// Handle call cache miss.
__ bind(&miss);
@@ -2430,7 +2429,7 @@
__ sw(a3, MemOperand(sp, argc * kPointerSize));
}
- // Setup the context (function already in r1).
+ // Set up the context (function already in r1).
__ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
// Jump to the cached code (tail call).
@@ -2493,12 +2492,9 @@
// -----------------------------------
Label miss;
- // Check that the object isn't a smi.
- __ JumpIfSmi(a1, &miss);
-
// Check that the map of the object hasn't changed.
- __ lw(a3, FieldMemOperand(a1, HeapObject::kMapOffset));
- __ Branch(&miss, ne, a3, Operand(Handle<Map>(object->map())));
+ __ CheckMap(a1, a3, Handle<Map>(object->map()), &miss,
+ DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
// Perform global security token check if needed.
if (object->IsJSGlobalProxy()) {
@@ -2540,12 +2536,9 @@
// -----------------------------------
Label miss;
- // Check that the object isn't a smi.
- __ JumpIfSmi(a1, &miss);
-
// Check that the map of the object hasn't changed.
- __ lw(a3, FieldMemOperand(a1, HeapObject::kMapOffset));
- __ Branch(&miss, ne, a3, Operand(Handle<Map>(receiver->map())));
+ __ CheckMap(a1, a3, Handle<Map>(receiver->map()), &miss,
+ DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
// Perform global security token check if needed.
if (receiver->IsJSGlobalProxy()) {
@@ -2701,7 +2694,7 @@
Handle<Code> LoadStubCompiler::CompileLoadConstant(Handle<JSObject> object,
Handle<JSObject> holder,
- Handle<Object> value,
+ Handle<JSFunction> value,
Handle<String> name) {
// ----------- S t a t e -------------
// -- a0 : receiver
@@ -2839,7 +2832,7 @@
Handle<String> name,
Handle<JSObject> receiver,
Handle<JSObject> holder,
- Handle<Object> value) {
+ Handle<JSFunction> value) {
// ----------- S t a t e -------------
// -- ra : return address
// -- a0 : key
diff --git a/src/mksnapshot.cc b/src/mksnapshot.cc
index bc0c2fc..82871c0 100644
--- a/src/mksnapshot.cc
+++ b/src/mksnapshot.cc
@@ -109,7 +109,7 @@
if (j != 0) {
fprintf(fp, ",");
}
- fprintf(fp, "%d", at(j));
+ fprintf(fp, "%u", static_cast<unsigned char>(at(j)));
}
}
char at(int i) { return data_[i]; }
diff --git a/src/objects-debug.cc b/src/objects-debug.cc
index 64bda94..3a667a4 100644
--- a/src/objects-debug.cc
+++ b/src/objects-debug.cc
@@ -388,7 +388,7 @@
CHECK(this->first()->IsString());
CHECK(this->second() == GetHeap()->empty_string() ||
this->second()->IsString());
- CHECK(this->length() >= String::kMinNonFlatLength);
+ CHECK(this->length() >= ConsString::kMinLength);
if (this->IsFlat()) {
// A flat cons can only be created by String::SlowTryFlatten.
// Afterwards, the first part may be externalized.
@@ -602,6 +602,13 @@
}
+void AccessorPair::AccessorPairVerify() {
+ CHECK(IsAccessorPair());
+ VerifyPointer(getter());
+ VerifyPointer(setter());
+}
+
+
void AccessCheckInfo::AccessCheckInfoVerify() {
CHECK(IsAccessCheckInfo());
VerifyPointer(named_callback());
@@ -739,7 +746,7 @@
break;
}
case DICTIONARY_ELEMENTS: {
- NumberDictionary* dict = element_dictionary();
+ SeededNumberDictionary* dict = element_dictionary();
info->number_of_slow_used_elements_ += dict->NumberOfElements();
info->number_of_slow_unused_elements_ +=
dict->Capacity() - dict->NumberOfElements();
diff --git a/src/objects-inl.h b/src/objects-inl.h
index 39d6e04..9457b04 100644
--- a/src/objects-inl.h
+++ b/src/objects-inl.h
@@ -1115,7 +1115,7 @@
// Unsafe accessor omitting write barrier.
-void HeapObject::set_map_unsafe(Map* value) {
+void HeapObject::set_map_no_write_barrier(Map* value) {
set_map_word(MapWord::FromMap(value));
}
@@ -1183,6 +1183,22 @@
ACCESSORS(JSObject, properties, FixedArray, kPropertiesOffset)
+Object** FixedArray::GetFirstElementAddress() {
+ return reinterpret_cast<Object**>(FIELD_ADDR(this, OffsetOfElementAt(0)));
+}
+
+
+bool FixedArray::ContainsOnlySmisOrHoles() {
+ Object* the_hole = GetHeap()->the_hole_value();
+ Object** current = GetFirstElementAddress();
+ for (int i = 0; i < length(); ++i) {
+ Object* candidate = *current++;
+ if (!candidate->IsSmi() && candidate != the_hole) return false;
+ }
+ return true;
+}
+
+
FixedArrayBase* JSObject::elements() {
Object* array = READ_FIELD(this, kElementsOffset);
return static_cast<FixedArrayBase*>(array);
@@ -1203,7 +1219,7 @@
map != heap->free_space_map()) {
for (int i = 0; i < fixed_array->length(); i++) {
Object* current = fixed_array->get(i);
- ASSERT(current->IsSmi() || current == heap->the_hole_value());
+ ASSERT(current->IsSmi() || current->IsTheHole());
}
}
}
@@ -1211,57 +1227,100 @@
}
-MaybeObject* JSObject::EnsureCanContainNonSmiElements() {
+MaybeObject* JSObject::EnsureCanContainHeapObjectElements() {
#if DEBUG
ValidateSmiOnlyElements();
#endif
- if ((map()->elements_kind() == FAST_SMI_ONLY_ELEMENTS)) {
- Object* obj;
- MaybeObject* maybe_obj = GetElementsTransitionMap(FAST_ELEMENTS);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- set_map(Map::cast(obj));
+ if ((map()->elements_kind() != FAST_ELEMENTS)) {
+ return TransitionElementsKind(FAST_ELEMENTS);
}
return this;
}
MaybeObject* JSObject::EnsureCanContainElements(Object** objects,
- uint32_t count) {
- if (map()->elements_kind() == FAST_SMI_ONLY_ELEMENTS) {
- for (uint32_t i = 0; i < count; ++i) {
- Object* current = *objects++;
- if (!current->IsSmi() && current != GetHeap()->the_hole_value()) {
- return EnsureCanContainNonSmiElements();
+ uint32_t count,
+ EnsureElementsMode mode) {
+ ElementsKind current_kind = map()->elements_kind();
+ ElementsKind target_kind = current_kind;
+ ASSERT(mode != ALLOW_COPIED_DOUBLE_ELEMENTS);
+ if (current_kind == FAST_ELEMENTS) return this;
+
+ Heap* heap = GetHeap();
+ Object* the_hole = heap->the_hole_value();
+ Object* heap_number_map = heap->heap_number_map();
+ for (uint32_t i = 0; i < count; ++i) {
+ Object* current = *objects++;
+ if (!current->IsSmi() && current != the_hole) {
+ if (mode == ALLOW_CONVERTED_DOUBLE_ELEMENTS &&
+ HeapObject::cast(current)->map() == heap_number_map) {
+ target_kind = FAST_DOUBLE_ELEMENTS;
+ } else {
+ target_kind = FAST_ELEMENTS;
+ break;
}
}
}
+
+ if (target_kind != current_kind) {
+ return TransitionElementsKind(target_kind);
+ }
return this;
}
-MaybeObject* JSObject::EnsureCanContainElements(FixedArray* elements) {
- Object** objects = reinterpret_cast<Object**>(
- FIELD_ADDR(elements, elements->OffsetOfElementAt(0)));
- return EnsureCanContainElements(objects, elements->length());
+MaybeObject* JSObject::EnsureCanContainElements(FixedArrayBase* elements,
+ EnsureElementsMode mode) {
+ if (elements->map() != GetHeap()->fixed_double_array_map()) {
+ ASSERT(elements->map() == GetHeap()->fixed_array_map() ||
+ elements->map() == GetHeap()->fixed_cow_array_map());
+ if (mode == ALLOW_COPIED_DOUBLE_ELEMENTS) {
+ mode = DONT_ALLOW_DOUBLE_ELEMENTS;
+ }
+ Object** objects = FixedArray::cast(elements)->GetFirstElementAddress();
+ return EnsureCanContainElements(objects, elements->length(), mode);
+ }
+
+ ASSERT(mode == ALLOW_COPIED_DOUBLE_ELEMENTS);
+ if (GetElementsKind() == FAST_SMI_ONLY_ELEMENTS) {
+ return TransitionElementsKind(FAST_DOUBLE_ELEMENTS);
+ }
+
+ return this;
}
-void JSObject::set_elements(FixedArrayBase* value, WriteBarrierMode mode) {
+void JSObject::set_map_and_elements(Map* new_map,
+ FixedArrayBase* value,
+ WriteBarrierMode mode) {
+ ASSERT(value->HasValidElements());
+#ifdef DEBUG
+ ValidateSmiOnlyElements();
+#endif
+ if (new_map != NULL) {
+ if (mode == UPDATE_WRITE_BARRIER) {
+ set_map(new_map);
+ } else {
+ ASSERT(mode == SKIP_WRITE_BARRIER);
+ set_map_no_write_barrier(new_map);
+ }
+ }
ASSERT((map()->has_fast_elements() ||
map()->has_fast_smi_only_elements()) ==
(value->map() == GetHeap()->fixed_array_map() ||
value->map() == GetHeap()->fixed_cow_array_map()));
ASSERT(map()->has_fast_double_elements() ==
value->IsFixedDoubleArray());
- ASSERT(value->HasValidElements());
-#ifdef DEBUG
- ValidateSmiOnlyElements();
-#endif
WRITE_FIELD(this, kElementsOffset, value);
CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kElementsOffset, value, mode);
}
+void JSObject::set_elements(FixedArrayBase* value, WriteBarrierMode mode) {
+ set_map_and_elements(NULL, value, mode);
+}
+
+
void JSObject::initialize_properties() {
ASSERT(!GetHeap()->InNewSpace(GetHeap()->empty_fixed_array()));
WRITE_FIELD(this, kPropertiesOffset, GetHeap()->empty_fixed_array());
@@ -1311,8 +1370,6 @@
// The write barrier is not used for global property cells.
ASSERT(!val->IsJSGlobalPropertyCell());
WRITE_FIELD(this, kValueOffset, val);
- GetHeap()->incremental_marking()->RecordWrite(
- this, HeapObject::RawField(this, kValueOffset), val);
}
@@ -1334,11 +1391,11 @@
case JS_VALUE_TYPE:
return JSValue::kSize;
case JS_ARRAY_TYPE:
- return JSValue::kSize;
+ return JSArray::kSize;
case JS_WEAK_MAP_TYPE:
return JSWeakMap::kSize;
case JS_REGEXP_TYPE:
- return JSValue::kSize;
+ return JSRegExp::kSize;
case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
return JSObject::kHeaderSize;
case JS_MESSAGE_OBJECT_TYPE:
@@ -1668,7 +1725,7 @@
}
-void FixedDoubleArray::Initialize(NumberDictionary* from) {
+void FixedDoubleArray::Initialize(SeededNumberDictionary* from) {
int offset = kHeaderSize;
for (int current = 0; current < length(); ++current) {
WRITE_DOUBLE_FIELD(this, offset, hole_nan_as_double());
@@ -1703,6 +1760,20 @@
}
+void FixedArray::NoIncrementalWriteBarrierSet(FixedArray* array,
+ int index,
+ Object* value) {
+ ASSERT(array->map() != HEAP->raw_unchecked_fixed_cow_array_map());
+ ASSERT(index >= 0 && index < array->length());
+ int offset = kHeaderSize + index * kPointerSize;
+ WRITE_FIELD(array, offset, value);
+ Heap* heap = array->GetHeap();
+ if (heap->InNewSpace(value)) {
+ heap->RecordWrite(array->address(), offset);
+ }
+}
+
+
void FixedArray::NoWriteBarrierSet(FixedArray* array,
int index,
Object* value) {
@@ -1797,12 +1868,12 @@
}
-void DescriptorArray::NoWriteBarrierSwap(FixedArray* array,
- int first,
- int second) {
+void DescriptorArray::NoIncrementalWriteBarrierSwap(FixedArray* array,
+ int first,
+ int second) {
Object* tmp = array->get(first);
- NoWriteBarrierSet(array, first, array->get(second));
- NoWriteBarrierSet(array, second, tmp);
+ NoIncrementalWriteBarrierSet(array, first, array->get(second));
+ NoIncrementalWriteBarrierSet(array, second, tmp);
}
@@ -1914,20 +1985,16 @@
// Range check.
ASSERT(descriptor_number < number_of_descriptors());
- // Make sure none of the elements in desc are in new space.
- ASSERT(!HEAP->InNewSpace(desc->GetKey()));
- ASSERT(!HEAP->InNewSpace(desc->GetValue()));
-
- NoWriteBarrierSet(this,
- ToKeyIndex(descriptor_number),
- desc->GetKey());
+ NoIncrementalWriteBarrierSet(this,
+ ToKeyIndex(descriptor_number),
+ desc->GetKey());
FixedArray* content_array = GetContentArray();
- NoWriteBarrierSet(content_array,
- ToValueIndex(descriptor_number),
- desc->GetValue());
- NoWriteBarrierSet(content_array,
- ToDetailsIndex(descriptor_number),
- desc->GetDetails().AsSmi());
+ NoIncrementalWriteBarrierSet(content_array,
+ ToValueIndex(descriptor_number),
+ desc->GetValue());
+ NoIncrementalWriteBarrierSet(content_array,
+ ToDetailsIndex(descriptor_number),
+ desc->GetDetails().AsSmi());
}
@@ -1941,15 +2008,16 @@
}
-void DescriptorArray::NoWriteBarrierSwapDescriptors(int first, int second) {
- NoWriteBarrierSwap(this, ToKeyIndex(first), ToKeyIndex(second));
+void DescriptorArray::NoIncrementalWriteBarrierSwapDescriptors(
+ int first, int second) {
+ NoIncrementalWriteBarrierSwap(this, ToKeyIndex(first), ToKeyIndex(second));
FixedArray* content_array = GetContentArray();
- NoWriteBarrierSwap(content_array,
- ToValueIndex(first),
- ToValueIndex(second));
- NoWriteBarrierSwap(content_array,
- ToDetailsIndex(first),
- ToDetailsIndex(second));
+ NoIncrementalWriteBarrierSwap(content_array,
+ ToValueIndex(first),
+ ToValueIndex(second));
+ NoIncrementalWriteBarrierSwap(content_array,
+ ToDetailsIndex(first),
+ ToDetailsIndex(second));
}
@@ -1989,13 +2057,14 @@
template<typename Shape, typename Key>
int HashTable<Shape, Key>::FindEntry(Isolate* isolate, Key key) {
uint32_t capacity = Capacity();
- uint32_t entry = FirstProbe(Shape::Hash(key), capacity);
+ uint32_t entry = FirstProbe(HashTable<Shape, Key>::Hash(key), capacity);
uint32_t count = 1;
// EnsureCapacity will guarantee the hash table is never full.
while (true) {
Object* element = KeyAt(entry);
- if (element == isolate->heap()->undefined_value()) break; // Empty entry.
- if (element != isolate->heap()->the_hole_value() &&
+ // Empty entry.
+ if (element == isolate->heap()->raw_unchecked_undefined_value()) break;
+ if (element != isolate->heap()->raw_unchecked_the_hole_value() &&
Shape::IsMatch(key, element)) return entry;
entry = NextProbe(entry, count++, capacity);
}
@@ -2003,14 +2072,14 @@
}
-bool NumberDictionary::requires_slow_elements() {
+bool SeededNumberDictionary::requires_slow_elements() {
Object* max_index_object = get(kMaxNumberKeyIndex);
if (!max_index_object->IsSmi()) return false;
return 0 !=
(Smi::cast(max_index_object)->value() & kRequiresSlowElementsMask);
}
-uint32_t NumberDictionary::max_number_key() {
+uint32_t SeededNumberDictionary::max_number_key() {
ASSERT(!requires_slow_elements());
Object* max_index_object = get(kMaxNumberKeyIndex);
if (!max_index_object->IsSmi()) return 0;
@@ -2018,7 +2087,7 @@
return value >> kRequiresSlowElementsTagSize;
}
-void NumberDictionary::set_requires_slow_elements() {
+void SeededNumberDictionary::set_requires_slow_elements() {
set(kMaxNumberKeyIndex, Smi::FromInt(kRequiresSlowElementsMask));
}
@@ -3317,6 +3386,9 @@
ACCESSORS(AccessorInfo, name, Object, kNameOffset)
ACCESSORS(AccessorInfo, flag, Smi, kFlagOffset)
+ACCESSORS(AccessorPair, getter, Object, kGetterOffset)
+ACCESSORS(AccessorPair, setter, Object, kSetterOffset)
+
ACCESSORS(AccessCheckInfo, named_callback, Object, kNamedCallbackOffset)
ACCESSORS(AccessCheckInfo, indexed_callback, Object, kIndexedCallbackOffset)
ACCESSORS(AccessCheckInfo, data, Object, kDataOffset)
@@ -3970,8 +4042,7 @@
ACCESSORS(Code, relocation_info, ByteArray, kRelocationInfoOffset)
ACCESSORS(Code, handler_table, FixedArray, kHandlerTableOffset)
ACCESSORS(Code, deoptimization_data, FixedArray, kDeoptimizationDataOffset)
-ACCESSORS(Code, next_code_flushing_candidate,
- Object, kNextCodeFlushingCandidateOffset)
+ACCESSORS(Code, gc_metadata, Object, kGCMetadataOffset)
byte* Code::instruction_start() {
@@ -4111,7 +4182,8 @@
(map == GetHeap()->fixed_array_map() ||
map == GetHeap()->fixed_cow_array_map())) ||
(kind == FAST_DOUBLE_ELEMENTS &&
- fixed_array->IsFixedDoubleArray()) ||
+ (fixed_array->IsFixedDoubleArray() ||
+ fixed_array == GetHeap()->empty_fixed_array())) ||
(kind == DICTIONARY_ELEMENTS &&
fixed_array->IsFixedArray() &&
fixed_array->IsDictionary()) ||
@@ -4202,14 +4274,6 @@
}
-bool JSObject::AllowsSetElementsLength() {
- bool result = elements()->IsFixedArray() ||
- elements()->IsFixedDoubleArray();
- ASSERT(result == !HasExternalArrayElements());
- return result;
-}
-
-
MaybeObject* JSObject::EnsureWritableFastElements() {
ASSERT(HasFastTypeElements());
FixedArray* elems = FixedArray::cast(elements());
@@ -4234,9 +4298,9 @@
}
-NumberDictionary* JSObject::element_dictionary() {
+SeededNumberDictionary* JSObject::element_dictionary() {
ASSERT(HasDictionaryElements());
- return NumberDictionary::cast(elements());
+ return SeededNumberDictionary::cast(elements());
}
@@ -4259,13 +4323,15 @@
}
-StringHasher::StringHasher(int length)
+StringHasher::StringHasher(int length, uint32_t seed)
: length_(length),
- raw_running_hash_(0),
+ raw_running_hash_(seed),
array_index_(0),
is_array_index_(0 < length_ && length_ <= String::kMaxArrayIndexSize),
is_first_char_(true),
- is_valid_(true) { }
+ is_valid_(true) {
+ ASSERT(FLAG_randomize_hashes || raw_running_hash_ == 0);
+}
bool StringHasher::has_trivial_hash() {
@@ -4317,7 +4383,7 @@
result += (result << 3);
result ^= (result >> 11);
result += (result << 15);
- if (result == 0) {
+ if ((result & String::kHashBitMask) == 0) {
result = 27;
}
return result;
@@ -4325,8 +4391,8 @@
template <typename schar>
-uint32_t HashSequentialString(const schar* chars, int length) {
- StringHasher hasher(length);
+uint32_t HashSequentialString(const schar* chars, int length, uint32_t seed) {
+ StringHasher hasher(length, seed);
if (!hasher.has_trivial_hash()) {
int i;
for (i = 0; hasher.is_array_index() && (i < length); i++) {
@@ -4471,16 +4537,27 @@
}
-uint32_t NumberDictionaryShape::Hash(uint32_t key) {
- return ComputeIntegerHash(key);
+uint32_t UnseededNumberDictionaryShape::Hash(uint32_t key) {
+ return ComputeIntegerHash(key, 0);
}
-uint32_t NumberDictionaryShape::HashForObject(uint32_t key, Object* other) {
+uint32_t UnseededNumberDictionaryShape::HashForObject(uint32_t key,
+ Object* other) {
ASSERT(other->IsNumber());
- return ComputeIntegerHash(static_cast<uint32_t>(other->Number()));
+ return ComputeIntegerHash(static_cast<uint32_t>(other->Number()), 0);
}
+uint32_t SeededNumberDictionaryShape::SeededHash(uint32_t key, uint32_t seed) {
+ return ComputeIntegerHash(key, seed);
+}
+
+uint32_t SeededNumberDictionaryShape::SeededHashForObject(uint32_t key,
+ uint32_t seed,
+ Object* other) {
+ ASSERT(other->IsNumber());
+ return ComputeIntegerHash(static_cast<uint32_t>(other->Number()), seed);
+}
MaybeObject* NumberDictionaryShape::AsObject(uint32_t key) {
return Isolate::Current()->heap()->NumberFromUint32(key);
@@ -4570,11 +4647,25 @@
}
-MaybeObject* JSArray::SetContent(FixedArray* storage) {
- MaybeObject* maybe_object = EnsureCanContainElements(storage);
- if (maybe_object->IsFailure()) return maybe_object;
- set_length(Smi::FromInt(storage->length()));
+bool JSArray::AllowsSetElementsLength() {
+ bool result = elements()->IsFixedArray() || elements()->IsFixedDoubleArray();
+ ASSERT(result == !HasExternalArrayElements());
+ return result;
+}
+
+
+MaybeObject* JSArray::SetContent(FixedArrayBase* storage) {
+ MaybeObject* maybe_result = EnsureCanContainElements(
+ storage, ALLOW_COPIED_DOUBLE_ELEMENTS);
+ if (maybe_result->IsFailure()) return maybe_result;
+ ASSERT((storage->map() == GetHeap()->fixed_double_array_map() &&
+ GetElementsKind() == FAST_DOUBLE_ELEMENTS) ||
+ ((storage->map() != GetHeap()->fixed_double_array_map()) &&
+ ((GetElementsKind() == FAST_ELEMENTS) ||
+ (GetElementsKind() == FAST_SMI_ONLY_ELEMENTS &&
+ FixedArray::cast(storage)->ContainsOnlySmisOrHoles()))));
set_elements(storage);
+ set_length(Smi::FromInt(storage->length()));
return this;
}
diff --git a/src/objects-printer.cc b/src/objects-printer.cc
index 4b5d049..eca9bab 100644
--- a/src/objects-printer.cc
+++ b/src/objects-printer.cc
@@ -627,7 +627,7 @@
// This method is only meant to be called from gdb for debugging purposes.
-// Since the string can also be in two-byte encoding, non-ascii characters
+// Since the string can also be in two-byte encoding, non-ASCII characters
// will be ignored in the output.
char* String::ToAsciiArray() {
// Static so that subsequent calls frees previously allocated space.
@@ -786,6 +786,15 @@
}
+void AccessorPair::AccessorPairPrint(FILE* out) {
+ HeapObject::PrintHeader(out, "AccessorPair");
+ PrintF(out, "\n - getter: ");
+ getter()->ShortPrint(out);
+ PrintF(out, "\n - setter: ");
+ setter()->ShortPrint(out);
+}
+
+
void AccessCheckInfo::AccessCheckInfoPrint(FILE* out) {
HeapObject::PrintHeader(out, "AccessCheckInfo");
PrintF(out, "\n - named_callback: ");
diff --git a/src/objects.cc b/src/objects.cc
index 1565504..9f596c4 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -246,8 +246,8 @@
}
// __defineGetter__ callback
- if (structure->IsFixedArray()) {
- Object* getter = FixedArray::cast(structure)->get(kGetterIndex);
+ if (structure->IsAccessorPair()) {
+ Object* getter = AccessorPair::cast(structure)->getter();
if (getter->IsSpecFunction()) {
// TODO(rossberg): nicer would be to cast to some JSCallable here...
return GetPropertyWithDefinedGetter(receiver, JSReceiver::cast(getter));
@@ -485,6 +485,16 @@
}
+Handle<Object> JSObject::SetNormalizedProperty(Handle<JSObject> object,
+ Handle<String> key,
+ Handle<Object> value,
+ PropertyDetails details) {
+ CALL_HEAP_FUNCTION(object->GetIsolate(),
+ object->SetNormalizedProperty(*key, *value, details),
+ Object);
+}
+
+
MaybeObject* JSObject::SetNormalizedProperty(String* name,
Object* value,
PropertyDetails details) {
@@ -961,14 +971,14 @@
// Morph the object to an external string by adjusting the map and
// reinitializing the fields.
if (size >= ExternalString::kSize) {
- this->set_map(
+ this->set_map_no_write_barrier(
is_symbol
? (is_ascii ? heap->external_symbol_with_ascii_data_map()
: heap->external_symbol_map())
: (is_ascii ? heap->external_string_with_ascii_data_map()
: heap->external_string_map()));
} else {
- this->set_map(
+ this->set_map_no_write_barrier(
is_symbol
? (is_ascii ? heap->short_external_symbol_with_ascii_data_map()
: heap->short_external_symbol_map())
@@ -983,7 +993,8 @@
int new_size = this->Size(); // Byte size of the external String object.
heap->CreateFillerObjectAt(this->address() + new_size, size - new_size);
if (Marking::IsBlack(Marking::MarkBitFrom(this))) {
- MemoryChunk::IncrementLiveBytes(this->address(), new_size - size);
+ MemoryChunk::IncrementLiveBytesFromMutator(this->address(),
+ new_size - size);
}
return true;
}
@@ -1011,11 +1022,13 @@
// Morph the object to an external string by adjusting the map and
// reinitializing the fields. Use short version if space is limited.
if (size >= ExternalString::kSize) {
- this->set_map(is_symbol ? heap->external_ascii_symbol_map()
- : heap->external_ascii_string_map());
+ this->set_map_no_write_barrier(
+ is_symbol ? heap->external_ascii_symbol_map()
+ : heap->external_ascii_string_map());
} else {
- this->set_map(is_symbol ? heap->short_external_ascii_symbol_map()
- : heap->short_external_ascii_string_map());
+ this->set_map_no_write_barrier(
+ is_symbol ? heap->short_external_ascii_symbol_map()
+ : heap->short_external_ascii_string_map());
}
ExternalAsciiString* self = ExternalAsciiString::cast(this);
self->set_resource(resource);
@@ -1025,7 +1038,8 @@
int new_size = this->Size(); // Byte size of the external String object.
heap->CreateFillerObjectAt(this->address() + new_size, size - new_size);
if (Marking::IsBlack(Marking::MarkBitFrom(this))) {
- MemoryChunk::IncrementLiveBytes(this->address(), new_size - size);
+ MemoryChunk::IncrementLiveBytesFromMutator(this->address(),
+ new_size - size);
}
return true;
}
@@ -1640,8 +1654,6 @@
String* name,
JSFunction* function,
PropertyAttributes attributes) {
- ASSERT(!GetHeap()->InNewSpace(function));
-
// Allocate new instance descriptors with (name, function) added
ConstantFunctionDescriptor d(name, function, attributes);
Object* new_descriptors;
@@ -1756,7 +1768,7 @@
// Ensure the descriptor array does not get too big.
if (map_of_this->instance_descriptors()->number_of_descriptors() <
DescriptorArray::kMaxNumberOfDescriptors) {
- if (value->IsJSFunction() && !heap->InNewSpace(value)) {
+ if (value->IsJSFunction()) {
return AddConstantFunctionProperty(name,
JSFunction::cast(value),
attributes);
@@ -1961,6 +1973,17 @@
}
+Handle<Object> JSReceiver::SetProperty(Handle<JSReceiver> object,
+ Handle<String> key,
+ Handle<Object> value,
+ PropertyAttributes attributes,
+ StrictModeFlag strict_mode) {
+ CALL_HEAP_FUNCTION(object->GetIsolate(),
+ object->SetProperty(*key, *value, attributes, strict_mode),
+ Object);
+}
+
+
MaybeObject* JSReceiver::SetProperty(String* name,
Object* value,
PropertyAttributes attributes,
@@ -2018,8 +2041,8 @@
return *value_handle;
}
- if (structure->IsFixedArray()) {
- Object* setter = FixedArray::cast(structure)->get(kSetterIndex);
+ if (structure->IsAccessorPair()) {
+ Object* setter = AccessorPair::cast(structure)->setter();
if (setter->IsSpecFunction()) {
// TODO(rossberg): nicer would be to cast to some JSCallable here...
return SetPropertyWithDefinedSetter(JSReceiver::cast(setter), value);
@@ -2107,9 +2130,10 @@
if (!JSObject::cast(pt)->HasDictionaryElements()) {
continue;
}
- NumberDictionary* dictionary = JSObject::cast(pt)->element_dictionary();
+ SeededNumberDictionary* dictionary =
+ JSObject::cast(pt)->element_dictionary();
int entry = dictionary->FindEntry(index);
- if (entry != NumberDictionary::kNotFound) {
+ if (entry != SeededNumberDictionary::kNotFound) {
PropertyDetails details = dictionary->DetailsAt(entry);
if (details.type() == CALLBACKS) {
*found = true;
@@ -2342,7 +2366,9 @@
if (details.type() == ELEMENTS_TRANSITION) {
return descriptors->GetValue(index);
} else {
- *safe_to_add_transition = false;
+ if (safe_to_add_transition != NULL) {
+ *safe_to_add_transition = false;
+ }
}
}
return NULL;
@@ -2995,7 +3021,6 @@
ASSERT(target_descriptors->GetType(number) == CONSTANT_FUNCTION);
JSFunction* function =
JSFunction::cast(target_descriptors->GetValue(number));
- ASSERT(!HEAP->InNewSpace(function));
if (value == function) {
set_map(target_map);
return value;
@@ -3025,6 +3050,18 @@
// Note that this method cannot be used to set the prototype of a function
// because ConvertDescriptorToField() which is called in "case CALLBACKS:"
// doesn't handle function prototypes correctly.
+Handle<Object> JSObject::SetLocalPropertyIgnoreAttributes(
+ Handle<JSObject> object,
+ Handle<String> key,
+ Handle<Object> value,
+ PropertyAttributes attributes) {
+ CALL_HEAP_FUNCTION(
+ object->GetIsolate(),
+ object->SetLocalPropertyIgnoreAttributes(*key, *value, attributes),
+ Object);
+}
+
+
MaybeObject* JSObject::SetLocalPropertyIgnoreAttributes(
String* name,
Object* value,
@@ -3315,6 +3352,15 @@
}
+void JSObject::NormalizeProperties(Handle<JSObject> object,
+ PropertyNormalizationMode mode,
+ int expected_additional_properties) {
+ CALL_HEAP_FUNCTION_VOID(object->GetIsolate(),
+ object->NormalizeProperties(
+ mode, expected_additional_properties));
+}
+
+
MaybeObject* JSObject::NormalizeProperties(PropertyNormalizationMode mode,
int expected_additional_properties) {
if (!HasFastProperties()) return this;
@@ -3416,7 +3462,8 @@
current_heap->CreateFillerObjectAt(this->address() + new_instance_size,
instance_size_delta);
if (Marking::IsBlack(Marking::MarkBitFrom(this))) {
- MemoryChunk::IncrementLiveBytes(this->address(), -instance_size_delta);
+ MemoryChunk::IncrementLiveBytesFromMutator(this->address(),
+ -instance_size_delta);
}
@@ -3437,6 +3484,14 @@
}
+void JSObject::TransformToFastProperties(Handle<JSObject> object,
+ int unused_property_fields) {
+ CALL_HEAP_FUNCTION_VOID(
+ object->GetIsolate(),
+ object->TransformToFastProperties(unused_property_fields));
+}
+
+
MaybeObject* JSObject::TransformToFastProperties(int unused_property_fields) {
if (HasFastProperties()) return this;
ASSERT(!IsGlobalObject());
@@ -3445,6 +3500,14 @@
}
+Handle<SeededNumberDictionary> JSObject::NormalizeElements(
+ Handle<JSObject> object) {
+ CALL_HEAP_FUNCTION(object->GetIsolate(),
+ object->NormalizeElements(),
+ SeededNumberDictionary);
+}
+
+
MaybeObject* JSObject::NormalizeElements() {
ASSERT(!HasExternalArrayElements());
@@ -3469,11 +3532,11 @@
int old_capacity = 0;
int used_elements = 0;
GetElementsCapacityAndUsage(&old_capacity, &used_elements);
- NumberDictionary* dictionary = NULL;
+ SeededNumberDictionary* dictionary = NULL;
{ Object* object;
- MaybeObject* maybe = NumberDictionary::Allocate(used_elements);
+ MaybeObject* maybe = SeededNumberDictionary::Allocate(used_elements);
if (!maybe->ToObject(&object)) return maybe;
- dictionary = NumberDictionary::cast(object);
+ dictionary = SeededNumberDictionary::cast(object);
}
// Copy the elements to the new backing store.
@@ -3504,7 +3567,7 @@
MaybeObject* maybe_result =
dictionary->AddNumberEntry(i, value, details);
if (!maybe_result->ToObject(&result)) return maybe_result;
- dictionary = NumberDictionary::cast(result);
+ dictionary = SeededNumberDictionary::cast(result);
}
}
@@ -3561,6 +3624,14 @@
}
+int JSObject::GetIdentityHash(Handle<JSObject> obj) {
+ CALL_AND_RETRY(obj->GetIsolate(),
+ obj->GetIdentityHash(ALLOW_CREATION),
+ return Smi::cast(__object__)->value(),
+ return 0);
+}
+
+
MaybeObject* JSObject::GetIdentityHash(CreationFlag flag) {
Object* stored_value = GetHiddenProperty(GetHeap()->identity_hash_symbol());
if (stored_value->IsSmi()) return stored_value;
@@ -3613,6 +3684,15 @@
}
+Handle<Object> JSObject::SetHiddenProperty(Handle<JSObject> obj,
+ Handle<String> key,
+ Handle<Object> value) {
+ CALL_HEAP_FUNCTION(obj->GetIsolate(),
+ obj->SetHiddenProperty(*key, *value),
+ Object);
+}
+
+
MaybeObject* JSObject::SetHiddenProperty(String* key, Object* value) {
if (IsJSGlobalProxy()) {
// For a proxy, use the prototype as target object.
@@ -3840,6 +3920,14 @@
}
+Handle<Object> JSObject::DeleteElement(Handle<JSObject> obj,
+ uint32_t index) {
+ CALL_HEAP_FUNCTION(obj->GetIsolate(),
+ obj->DeleteElement(index, JSObject::NORMAL_DELETION),
+ Object);
+}
+
+
MaybeObject* JSObject::DeleteElement(uint32_t index, DeleteMode mode) {
Isolate* isolate = GetIsolate();
// Check access rights if needed.
@@ -3868,19 +3956,11 @@
}
-MaybeObject* JSReceiver::DeleteProperty(String* name, DeleteMode mode) {
- if (IsJSProxy()) {
- return JSProxy::cast(this)->DeletePropertyWithHandler(name, mode);
- }
- return JSObject::cast(this)->DeleteProperty(name, mode);
-}
-
-
-MaybeObject* JSReceiver::DeleteElement(uint32_t index, DeleteMode mode) {
- if (IsJSProxy()) {
- return JSProxy::cast(this)->DeleteElementWithHandler(index, mode);
- }
- return JSObject::cast(this)->DeleteElement(index, mode);
+Handle<Object> JSObject::DeleteProperty(Handle<JSObject> obj,
+ Handle<String> prop) {
+ CALL_HEAP_FUNCTION(obj->GetIsolate(),
+ obj->DeleteProperty(*prop, JSObject::NORMAL_DELETION),
+ Object);
}
@@ -3941,6 +4021,22 @@
}
+MaybeObject* JSReceiver::DeleteElement(uint32_t index, DeleteMode mode) {
+ if (IsJSProxy()) {
+ return JSProxy::cast(this)->DeleteElementWithHandler(index, mode);
+ }
+ return JSObject::cast(this)->DeleteElement(index, mode);
+}
+
+
+MaybeObject* JSReceiver::DeleteProperty(String* name, DeleteMode mode) {
+ if (IsJSProxy()) {
+ return JSProxy::cast(this)->DeletePropertyWithHandler(name, mode);
+ }
+ return JSObject::cast(this)->DeleteProperty(name, mode);
+}
+
+
bool JSObject::ReferencesObjectFromElements(FixedArray* elements,
ElementsKind kind,
Object* object) {
@@ -3955,7 +4051,8 @@
if (!element->IsTheHole() && element == object) return true;
}
} else {
- Object* key = NumberDictionary::cast(elements)->SlowReverseLookup(object);
+ Object* key =
+ SeededNumberDictionary::cast(elements)->SlowReverseLookup(object);
if (!key->IsUndefined()) return true;
}
return false;
@@ -4067,6 +4164,11 @@
}
+Handle<Object> JSObject::PreventExtensions(Handle<JSObject> object) {
+ CALL_HEAP_FUNCTION(object->GetIsolate(), object->PreventExtensions(), Object);
+}
+
+
MaybeObject* JSObject::PreventExtensions() {
Isolate* isolate = GetIsolate();
if (IsAccessCheckNeeded() &&
@@ -4096,9 +4198,9 @@
}
// If there are fast elements we normalize.
- NumberDictionary* dictionary = NULL;
+ SeededNumberDictionary* dictionary = NULL;
{ MaybeObject* maybe = NormalizeElements();
- if (!maybe->To<NumberDictionary>(&dictionary)) return maybe;
+ if (!maybe->To<SeededNumberDictionary>(&dictionary)) return maybe;
}
ASSERT(HasDictionaryElements() || HasDictionaryArgumentsElements());
// Make sure that we never go back to fast case.
@@ -4251,27 +4353,28 @@
current != heap->null_value() && current->IsJSObject();
current = JSObject::cast(current)->GetPrototype()) {
JSObject::cast(current)->LocalLookupRealNamedProperty(name, result);
- if (result->IsProperty() && result->type() == CALLBACKS) return;
+ if (result->IsFound() && result->type() == CALLBACKS) return;
}
result->NotFound();
}
// Search for a getter or setter in an elements dictionary and update its
-// attributes. Returns either undefined if the element is read-only, or the
-// getter/setter pair (fixed array) if there is an existing one, or the hole
-// value if the element does not exist or is a normal non-getter/setter data
-// element.
-static Object* UpdateGetterSetterInDictionary(NumberDictionary* dictionary,
- uint32_t index,
- PropertyAttributes attributes,
- Heap* heap) {
+// attributes. Returns either undefined if the element is non-deletable, or the
+// getter/setter pair if there is an existing one, or the hole value if the
+// element does not exist or is a normal non-getter/setter data element.
+static Object* UpdateGetterSetterInDictionary(
+ SeededNumberDictionary* dictionary,
+ uint32_t index,
+ PropertyAttributes attributes,
+ Heap* heap) {
int entry = dictionary->FindEntry(index);
- if (entry != NumberDictionary::kNotFound) {
+ if (entry != SeededNumberDictionary::kNotFound) {
Object* result = dictionary->ValueAt(entry);
PropertyDetails details = dictionary->DetailsAt(entry);
- if (details.IsReadOnly()) return heap->undefined_value();
- if (details.type() == CALLBACKS && result->IsFixedArray()) {
+ // TODO(mstarzinger): We should check for details.IsDontDelete() here once
+ // we only call into the runtime once to set both getter and setter.
+ if (details.type() == CALLBACKS && result->IsAccessorPair()) {
if (details.attributes() != attributes) {
dictionary->DetailsAtPut(entry,
PropertyDetails(attributes, CALLBACKS, index));
@@ -4338,7 +4441,8 @@
if (probe == NULL || probe->IsTheHole()) {
FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
if (arguments->IsDictionary()) {
- NumberDictionary* dictionary = NumberDictionary::cast(arguments);
+ SeededNumberDictionary* dictionary =
+ SeededNumberDictionary::cast(arguments);
probe = UpdateGetterSetterInDictionary(dictionary,
index,
attributes,
@@ -4352,13 +4456,14 @@
} else {
// Lookup the name.
LookupResult result(heap->isolate());
- LocalLookup(name, &result);
- if (result.IsProperty()) {
- if (result.IsReadOnly()) return heap->undefined_value();
+ LocalLookupRealNamedProperty(name, &result);
+ if (result.IsFound()) {
+ // TODO(mstarzinger): We should check for result.IsDontDelete() here once
+ // we only call into the runtime once to set both getter and setter.
if (result.type() == CALLBACKS) {
Object* obj = result.GetCallbackObject();
// Need to preserve old getters/setters.
- if (obj->IsFixedArray()) {
+ if (obj->IsAccessorPair()) {
// Use set to update attributes.
return SetPropertyCallback(name, obj, attributes);
}
@@ -4366,16 +4471,15 @@
}
}
- // Allocate the fixed array to hold getter and setter.
- Object* structure;
- { MaybeObject* maybe_structure = heap->AllocateFixedArray(2, TENURED);
- if (!maybe_structure->ToObject(&structure)) return maybe_structure;
+ AccessorPair* accessors;
+ { MaybeObject* maybe_accessors = heap->AllocateAccessorPair();
+ if (!maybe_accessors->To<AccessorPair>(&accessors)) return maybe_accessors;
}
if (is_element) {
- return SetElementCallback(index, structure, attributes);
+ return SetElementCallback(index, accessors, attributes);
} else {
- return SetPropertyCallback(name, structure, attributes);
+ return SetPropertyCallback(name, accessors, attributes);
}
}
@@ -4410,11 +4514,11 @@
PropertyDetails details = PropertyDetails(attributes, CALLBACKS);
// Normalize elements to make this operation simple.
- NumberDictionary* dictionary = NULL;
+ SeededNumberDictionary* dictionary = NULL;
{ Object* result;
MaybeObject* maybe = NormalizeElements();
if (!maybe->ToObject(&result)) return maybe;
- dictionary = NumberDictionary::cast(result);
+ dictionary = SeededNumberDictionary::cast(result);
}
ASSERT(HasDictionaryElements() || HasDictionaryArgumentsElements());
@@ -4422,7 +4526,7 @@
{ Object* result;
MaybeObject* maybe = dictionary->Set(index, structure, details);
if (!maybe->ToObject(&result)) return maybe;
- dictionary = NumberDictionary::cast(result);
+ dictionary = SeededNumberDictionary::cast(result);
}
dictionary->set_requires_slow_elements();
@@ -4511,12 +4615,16 @@
fun, attributes);
}
- Object* array;
- { MaybeObject* maybe_array = DefineGetterSetter(name, attributes);
- if (!maybe_array->ToObject(&array)) return maybe_array;
+ Object* accessors;
+ { MaybeObject* maybe_accessors = DefineGetterSetter(name, attributes);
+ if (!maybe_accessors->To<Object>(&accessors)) return maybe_accessors;
}
- if (array->IsUndefined()) return array;
- FixedArray::cast(array)->set(is_getter ? 0 : 1, fun);
+ if (accessors->IsUndefined()) return accessors;
+ if (is_getter) {
+ AccessorPair::cast(accessors)->set_getter(fun);
+ } else {
+ AccessorPair::cast(accessors)->set_setter(fun);
+ }
return this;
}
@@ -4620,11 +4728,6 @@
}
// Make the lookup and include prototypes.
- // Introducing constants below makes static constants usage purely static
- // and avoids linker errors in debug build using gcc.
- const int getter_index = kGetterIndex;
- const int setter_index = kSetterIndex;
- int accessor_index = is_getter ? getter_index : setter_index;
uint32_t index = 0;
if (name->AsArrayIndex(&index)) {
for (Object* obj = this;
@@ -4632,14 +4735,15 @@
obj = JSObject::cast(obj)->GetPrototype()) {
JSObject* js_object = JSObject::cast(obj);
if (js_object->HasDictionaryElements()) {
- NumberDictionary* dictionary = js_object->element_dictionary();
+ SeededNumberDictionary* dictionary = js_object->element_dictionary();
int entry = dictionary->FindEntry(index);
- if (entry != NumberDictionary::kNotFound) {
+ if (entry != SeededNumberDictionary::kNotFound) {
Object* element = dictionary->ValueAt(entry);
PropertyDetails details = dictionary->DetailsAt(entry);
if (details.type() == CALLBACKS) {
- if (element->IsFixedArray()) {
- return FixedArray::cast(element)->get(accessor_index);
+ if (element->IsAccessorPair()) {
+ AccessorPair* accessors = AccessorPair::cast(element);
+ return is_getter ? accessors->getter() : accessors->setter();
}
}
}
@@ -4655,8 +4759,9 @@
if (result.IsReadOnly()) return heap->undefined_value();
if (result.type() == CALLBACKS) {
Object* obj = result.GetCallbackObject();
- if (obj->IsFixedArray()) {
- return FixedArray::cast(obj)->get(accessor_index);
+ if (obj->IsAccessorPair()) {
+ AccessorPair* accessors = AccessorPair::cast(obj);
+ return is_getter ? accessors->getter() : accessors->setter();
}
}
}
@@ -4830,75 +4935,219 @@
}
-void Map::TraverseTransitionTree(TraverseCallback callback, void* data) {
- // Traverse the transition tree without using a stack. We do this by
- // reversing the pointers in the maps and descriptor arrays.
- Map* current = this;
- Map* meta_map = GetHeap()->meta_map();
- Object** map_or_index_field = NULL;
- while (current != meta_map) {
- DescriptorArray* d = reinterpret_cast<DescriptorArray*>(
- *RawField(current, Map::kInstanceDescriptorsOrBitField3Offset));
- if (!d->IsEmpty()) {
- FixedArray* contents = reinterpret_cast<FixedArray*>(
- d->get(DescriptorArray::kContentArrayIndex));
- map_or_index_field = RawField(contents, HeapObject::kMapOffset);
- Object* map_or_index = *map_or_index_field;
- bool map_done = true; // Controls a nested continue statement.
- for (int i = map_or_index->IsSmi() ? Smi::cast(map_or_index)->value() : 0;
- i < contents->length();
- i += 2) {
- PropertyDetails details(Smi::cast(contents->get(i + 1)));
- if (details.IsTransition()) {
- // Found a map in the transition array. We record our progress in
- // the transition array by recording the current map in the map field
- // of the next map and recording the index in the transition array in
- // the map field of the array.
- Map* next = Map::cast(contents->get(i));
- next->set_map_unsafe(current);
- *map_or_index_field = Smi::FromInt(i + 2);
- current = next;
- map_done = false;
+// An iterator over all map transitions in an descriptor array, reusing the map
+// field of the contens array while it is running.
+class IntrusiveMapTransitionIterator {
+ public:
+ explicit IntrusiveMapTransitionIterator(DescriptorArray* descriptor_array)
+ : descriptor_array_(descriptor_array) { }
+
+ void Start() {
+ ASSERT(!IsIterating());
+ if (HasContentArray()) *ContentHeader() = Smi::FromInt(0);
+ }
+
+ bool IsIterating() {
+ return HasContentArray() && (*ContentHeader())->IsSmi();
+ }
+
+ Map* Next() {
+ ASSERT(IsIterating());
+ FixedArray* contents = ContentArray();
+ // Attention, tricky index manipulation ahead: Every entry in the contents
+ // array consists of a value/details pair, so the index is typically even.
+ // An exception is made for CALLBACKS entries: An even index means we look
+ // at its getter, and an odd index means we look at its setter.
+ int index = Smi::cast(*ContentHeader())->value();
+ while (index < contents->length()) {
+ PropertyDetails details(Smi::cast(contents->get(index | 1)));
+ switch (details.type()) {
+ case MAP_TRANSITION:
+ case CONSTANT_TRANSITION:
+ case ELEMENTS_TRANSITION:
+ // We definitely have a map transition.
+ *ContentHeader() = Smi::FromInt(index + 2);
+ return static_cast<Map*>(contents->get(index));
+ case CALLBACKS: {
+ // We might have a map transition in a getter or in a setter.
+ AccessorPair* accessors =
+ static_cast<AccessorPair*>(contents->get(index & ~1));
+ Object* accessor =
+ ((index & 1) == 0) ? accessors->getter() : accessors->setter();
+ index++;
+ if (accessor->IsMap()) {
+ *ContentHeader() = Smi::FromInt(index);
+ return static_cast<Map*>(accessor);
+ }
break;
}
- }
- if (!map_done) continue;
- } else {
- map_or_index_field = NULL;
- }
- // That was the regular transitions, now for the prototype transitions.
- FixedArray* prototype_transitions =
- current->unchecked_prototype_transitions();
- Object** proto_map_or_index_field =
- RawField(prototype_transitions, HeapObject::kMapOffset);
- Object* map_or_index = *proto_map_or_index_field;
- const int start = kProtoTransitionHeaderSize + kProtoTransitionMapOffset;
- int i = map_or_index->IsSmi() ? Smi::cast(map_or_index)->value() : start;
- if (i < prototype_transitions->length()) {
- // Found a map in the prototype transition array. Record progress in
- // an analogous way to the regular transitions array above.
- Object* perhaps_map = prototype_transitions->get(i);
- if (perhaps_map->IsMap()) {
- Map* next = Map::cast(perhaps_map);
- next->set_map_unsafe(current);
- *proto_map_or_index_field =
- Smi::FromInt(i + kProtoTransitionElementsPerEntry);
- current = next;
- continue;
+ case NORMAL:
+ case FIELD:
+ case CONSTANT_FUNCTION:
+ case HANDLER:
+ case INTERCEPTOR:
+ case NULL_DESCRIPTOR:
+ // We definitely have no map transition.
+ index += 2;
+ break;
}
}
- *proto_map_or_index_field = GetHeap()->fixed_array_map();
- if (map_or_index_field != NULL) {
- *map_or_index_field = GetHeap()->fixed_array_map();
- }
+ *ContentHeader() = descriptor_array_->GetHeap()->fixed_array_map();
+ return NULL;
+ }
- // The callback expects a map to have a real map as its map, so we save
- // the map field, which is being used to track the traversal and put the
- // correct map (the meta_map) in place while we do the callback.
- Map* prev = current->map();
- current->set_map_unsafe(meta_map);
- callback(current, data);
- current = prev;
+ private:
+ bool HasContentArray() {
+ return descriptor_array_-> length() > DescriptorArray::kContentArrayIndex;
+ }
+
+ FixedArray* ContentArray() {
+ Object* array = descriptor_array_->get(DescriptorArray::kContentArrayIndex);
+ return static_cast<FixedArray*>(array);
+ }
+
+ Object** ContentHeader() {
+ return HeapObject::RawField(ContentArray(), DescriptorArray::kMapOffset);
+ }
+
+ DescriptorArray* descriptor_array_;
+};
+
+
+// An iterator over all prototype transitions, reusing the map field of the
+// underlying array while it is running.
+class IntrusivePrototypeTransitionIterator {
+ public:
+ explicit IntrusivePrototypeTransitionIterator(FixedArray* proto_trans)
+ : proto_trans_(proto_trans) { }
+
+ void Start() {
+ ASSERT(!IsIterating());
+ if (HasTransitions()) *Header() = Smi::FromInt(0);
+ }
+
+ bool IsIterating() {
+ return HasTransitions() && (*Header())->IsSmi();
+ }
+
+ Map* Next() {
+ ASSERT(IsIterating());
+ int transitionNumber = Smi::cast(*Header())->value();
+ if (transitionNumber < NumberOfTransitions()) {
+ *Header() = Smi::FromInt(transitionNumber + 1);
+ return GetTransition(transitionNumber);
+ }
+ *Header() = proto_trans_->GetHeap()->fixed_array_map();
+ return NULL;
+ }
+
+ private:
+ bool HasTransitions() {
+ return proto_trans_->length() >= Map::kProtoTransitionHeaderSize;
+ }
+
+ Object** Header() {
+ return HeapObject::RawField(proto_trans_, FixedArray::kMapOffset);
+ }
+
+ int NumberOfTransitions() {
+ Object* num = proto_trans_->get(Map::kProtoTransitionNumberOfEntriesOffset);
+ return Smi::cast(num)->value();
+ }
+
+ Map* GetTransition(int transitionNumber) {
+ return Map::cast(proto_trans_->get(IndexFor(transitionNumber)));
+ }
+
+ int IndexFor(int transitionNumber) {
+ return Map::kProtoTransitionHeaderSize +
+ Map::kProtoTransitionMapOffset +
+ transitionNumber * Map::kProtoTransitionElementsPerEntry;
+ }
+
+ FixedArray* proto_trans_;
+};
+
+
+// To traverse the transition tree iteratively, we have to store two kinds of
+// information in a map: The parent map in the traversal and which children of a
+// node have already been visited. To do this without additional memory, we
+// temporarily reuse two maps with known values:
+//
+// (1) The map of the map temporarily holds the parent, and is restored to the
+// meta map afterwards.
+//
+// (2) The info which children have already been visited depends on which part
+// of the map we currently iterate:
+//
+// (a) If we currently follow normal map transitions, we temporarily store
+// the current index in the map of the FixedArray of the desciptor
+// array's contents, and restore it to the fixed array map afterwards.
+// Note that a single descriptor can have 0, 1, or 2 transitions.
+//
+// (b) If we currently follow prototype transitions, we temporarily store
+// the current index in the map of the FixedArray holding the prototype
+// transitions, and restore it to the fixed array map afterwards.
+//
+// Note that the child iterator is just a concatenation of two iterators: One
+// iterating over map transitions and one iterating over prototype transisitons.
+class TraversableMap : public Map {
+ public:
+ // Record the parent in the traversal within this map. Note that this destroys
+ // this map's map!
+ void SetParent(TraversableMap* parent) { set_map_no_write_barrier(parent); }
+
+ // Reset the current map's map, returning the parent previously stored in it.
+ TraversableMap* GetAndResetParent() {
+ TraversableMap* old_parent = static_cast<TraversableMap*>(map());
+ set_map_no_write_barrier(GetHeap()->meta_map());
+ return old_parent;
+ }
+
+ // Start iterating over this map's children, possibly destroying a FixedArray
+ // map (see explanation above).
+ void ChildIteratorStart() {
+ IntrusiveMapTransitionIterator(instance_descriptors()).Start();
+ IntrusivePrototypeTransitionIterator(
+ unchecked_prototype_transitions()).Start();
+ }
+
+ // If we have an unvisited child map, return that one and advance. If we have
+ // none, return NULL and reset any destroyed FixedArray maps.
+ TraversableMap* ChildIteratorNext() {
+ IntrusiveMapTransitionIterator descriptor_iterator(instance_descriptors());
+ if (descriptor_iterator.IsIterating()) {
+ Map* next = descriptor_iterator.Next();
+ if (next != NULL) return static_cast<TraversableMap*>(next);
+ }
+ IntrusivePrototypeTransitionIterator
+ proto_iterator(unchecked_prototype_transitions());
+ if (proto_iterator.IsIterating()) {
+ Map* next = proto_iterator.Next();
+ if (next != NULL) return static_cast<TraversableMap*>(next);
+ }
+ return NULL;
+ }
+};
+
+
+// Traverse the transition tree in postorder without using the C++ stack by
+// doing pointer reversal.
+void Map::TraverseTransitionTree(TraverseCallback callback, void* data) {
+ TraversableMap* current = static_cast<TraversableMap*>(this);
+ current->ChildIteratorStart();
+ while (true) {
+ TraversableMap* child = current->ChildIteratorNext();
+ if (child != NULL) {
+ child->ChildIteratorStart();
+ child->SetParent(current);
+ current = child;
+ } else {
+ TraversableMap* parent = current->GetAndResetParent();
+ callback(current, data);
+ if (current == this) break;
+ current = parent;
+ }
}
}
@@ -5395,7 +5644,9 @@
AssertNoAllocation no_gc;
int len = length();
if (new_length < len) len = new_length;
- result->set_map(map());
+ // We are taking the map from the old fixed array so the map is sure to
+ // be an immortal immutable object.
+ result->set_map_no_write_barrier(map());
WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
for (int i = 0; i < len; i++) {
result->set(i, get(i), mode);
@@ -5635,7 +5886,7 @@
}
}
if (child_hash <= parent_hash) break;
- NoWriteBarrierSwapDescriptors(parent_index, child_index);
+ NoIncrementalWriteBarrierSwapDescriptors(parent_index, child_index);
// Now element at child_index could be < its children.
parent_index = child_index; // parent_hash remains correct.
}
@@ -5644,7 +5895,7 @@
// Extract elements and create sorted array.
for (int i = len - 1; i > 0; --i) {
// Put max element at the back of the array.
- NoWriteBarrierSwapDescriptors(0, i);
+ NoIncrementalWriteBarrierSwapDescriptors(0, i);
// Shift down the new top element.
int parent_index = 0;
const uint32_t parent_hash = GetKey(parent_index)->Hash();
@@ -5660,7 +5911,7 @@
}
}
if (child_hash <= parent_hash) break;
- NoWriteBarrierSwapDescriptors(parent_index, child_index);
+ NoIncrementalWriteBarrierSwapDescriptors(parent_index, child_index);
parent_index = child_index;
}
}
@@ -6679,6 +6930,20 @@
// Fast check: if hash code is computed for both strings
// a fast negative check can be performed.
if (HasHashCode() && other->HasHashCode()) {
+#ifdef DEBUG
+ if (FLAG_enable_slow_asserts) {
+ if (Hash() != other->Hash()) {
+ bool found_difference = false;
+ for (int i = 0; i < len; i++) {
+ if (Get(i) != other->Get(i)) {
+ found_difference = true;
+ break;
+ }
+ }
+ ASSERT(found_difference);
+ }
+ }
+#endif
if (Hash() != other->Hash()) return false;
}
@@ -6814,12 +7079,16 @@
// Compute the hash code.
uint32_t field = 0;
if (StringShape(this).IsSequentialAscii()) {
- field = HashSequentialString(SeqAsciiString::cast(this)->GetChars(), len);
+ field = HashSequentialString(SeqAsciiString::cast(this)->GetChars(),
+ len,
+ GetHeap()->HashSeed());
} else if (StringShape(this).IsSequentialTwoByte()) {
- field = HashSequentialString(SeqTwoByteString::cast(this)->GetChars(), len);
+ field = HashSequentialString(SeqTwoByteString::cast(this)->GetChars(),
+ len,
+ GetHeap()->HashSeed());
} else {
StringInputBuffer buffer(this);
- field = ComputeHashField(&buffer, len);
+ field = ComputeHashField(&buffer, len, GetHeap()->HashSeed());
}
// Store the hash code in the object.
@@ -6910,8 +7179,9 @@
uint32_t String::ComputeHashField(unibrow::CharacterStream* buffer,
- int length) {
- StringHasher hasher(length);
+ int length,
+ uint32_t seed) {
+ StringHasher hasher(length, seed);
// Very long strings have a trivial hash that doesn't inspect the
// string contents.
@@ -7346,7 +7616,7 @@
LookupResult result(heap->isolate());
String* name = GetThisPropertyAssignmentName(i);
js_object->LocalLookupRealNamedProperty(name, &result);
- if (result.IsProperty() && result.type() == CALLBACKS) {
+ if (result.IsFound() && result.type() == CALLBACKS) {
return false;
}
}
@@ -7639,6 +7909,22 @@
}
+#define DECLARE_TAG(ignore1, name, ignore2) name,
+const char* const VisitorSynchronization::kTags[
+ VisitorSynchronization::kNumberOfSyncTags] = {
+ VISITOR_SYNCHRONIZATION_TAGS_LIST(DECLARE_TAG)
+};
+#undef DECLARE_TAG
+
+
+#define DECLARE_TAG(ignore1, ignore2, name) name,
+const char* const VisitorSynchronization::kTagNames[
+ VisitorSynchronization::kNumberOfSyncTags] = {
+ VISITOR_SYNCHRONIZATION_TAGS_LIST(DECLARE_TAG)
+};
+#undef DECLARE_TAG
+
+
void ObjectVisitor::VisitCodeTarget(RelocInfo* rinfo) {
ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
@@ -7855,8 +8141,11 @@
static_cast<Translation::Opcode>(iterator.Next());
ASSERT(Translation::BEGIN == opcode);
int frame_count = iterator.Next();
- PrintF(out, " %s {count=%d}\n", Translation::StringFor(opcode),
- frame_count);
+ int jsframe_count = iterator.Next();
+ PrintF(out, " %s {frame count=%d, js frame count=%d}\n",
+ Translation::StringFor(opcode),
+ frame_count,
+ jsframe_count);
while (iterator.HasNext() &&
Translation::BEGIN !=
@@ -7868,7 +8157,7 @@
UNREACHABLE();
break;
- case Translation::FRAME: {
+ case Translation::JS_FRAME: {
int ast_id = iterator.Next();
int function_id = iterator.Next();
JSFunction* function =
@@ -7880,6 +8169,12 @@
break;
}
+ case Translation::ARGUMENTS_ADAPTOR_FRAME: {
+ unsigned height = iterator.Next();
+ PrintF(out, "{arguments adaptor, height=%d}", height);
+ break;
+ }
+
case Translation::DUPLICATE:
break;
@@ -8115,21 +8410,35 @@
static void CopyFastElementsToFast(FixedArray* source,
FixedArray* destination,
WriteBarrierMode mode) {
- uint32_t count = static_cast<uint32_t>(source->length());
- for (uint32_t i = 0; i < count; ++i) {
- destination->set(i, source->get(i), mode);
+ int count = source->length();
+ int copy_size = Min(count, destination->length());
+ if (mode == SKIP_WRITE_BARRIER ||
+ !Page::FromAddress(destination->address())->IsFlagSet(
+ MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING)) {
+ Address to = destination->address() + FixedArray::kHeaderSize;
+ Address from = source->address() + FixedArray::kHeaderSize;
+ memcpy(reinterpret_cast<void*>(to),
+ reinterpret_cast<void*>(from),
+ kPointerSize * copy_size);
+ } else {
+ for (int i = 0; i < copy_size; ++i) {
+ destination->set(i, source->get(i), mode);
+ }
}
}
-static void CopySlowElementsToFast(NumberDictionary* source,
+static void CopySlowElementsToFast(SeededNumberDictionary* source,
FixedArray* destination,
WriteBarrierMode mode) {
+ int destination_length = destination->length();
for (int i = 0; i < source->Capacity(); ++i) {
Object* key = source->KeyAt(i);
if (key->IsNumber()) {
uint32_t entry = static_cast<uint32_t>(key->Number());
- destination->set(entry, source->ValueAt(i), mode);
+ if (entry < static_cast<uint32_t>(destination_length)) {
+ destination->set(entry, source->ValueAt(i), mode);
+ }
}
}
}
@@ -8155,10 +8464,13 @@
Map* new_map = NULL;
if (elements()->map() != heap->non_strict_arguments_elements_map()) {
Object* object;
+ // The resized array has FAST_SMI_ONLY_ELEMENTS if the capacity mode forces
+ // it, or if it's allowed and the old elements array contained only SMIs.
bool has_fast_smi_only_elements =
- (set_capacity_mode == kAllowSmiOnlyElements) &&
- (elements()->map()->has_fast_smi_only_elements() ||
- elements() == heap->empty_fixed_array());
+ (set_capacity_mode == kForceSmiOnlyElements) ||
+ ((set_capacity_mode == kAllowSmiOnlyElements) &&
+ (elements()->map()->has_fast_smi_only_elements() ||
+ elements() == heap->empty_fixed_array()));
ElementsKind elements_kind = has_fast_smi_only_elements
? FAST_SMI_ONLY_ELEMENTS
: FAST_ELEMENTS;
@@ -8176,18 +8488,16 @@
WriteBarrierMode mode(new_elements->GetWriteBarrierMode(no_gc));
CopyFastElementsToFast(FixedArray::cast(old_elements_raw),
new_elements, mode);
- set_map(new_map);
- set_elements(new_elements);
+ set_map_and_elements(new_map, new_elements);
break;
}
case DICTIONARY_ELEMENTS: {
AssertNoAllocation no_gc;
WriteBarrierMode mode = new_elements->GetWriteBarrierMode(no_gc);
- CopySlowElementsToFast(NumberDictionary::cast(old_elements_raw),
+ CopySlowElementsToFast(SeededNumberDictionary::cast(old_elements_raw),
new_elements,
mode);
- set_map(new_map);
- set_elements(new_elements);
+ set_map_and_elements(new_map, new_elements);
break;
}
case NON_STRICT_ARGUMENTS_ELEMENTS: {
@@ -8198,7 +8508,7 @@
FixedArray* parameter_map = FixedArray::cast(old_elements_raw);
FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
if (arguments->IsDictionary()) {
- CopySlowElementsToFast(NumberDictionary::cast(arguments),
+ CopySlowElementsToFast(SeededNumberDictionary::cast(arguments),
new_elements,
mode);
} else {
@@ -8294,7 +8604,7 @@
break;
}
case DICTIONARY_ELEMENTS: {
- elems->Initialize(NumberDictionary::cast(old_elements));
+ elems->Initialize(SeededNumberDictionary::cast(old_elements));
break;
}
default:
@@ -8340,18 +8650,12 @@
void JSArray::Expand(int required_size) {
- Handle<JSArray> self(this);
- Handle<FixedArray> old_backing(FixedArray::cast(elements()));
- int old_size = old_backing->length();
- int new_size = required_size > old_size ? required_size : old_size;
- Handle<FixedArray> new_backing = FACTORY->NewFixedArray(new_size);
- // Can't use this any more now because we may have had a GC!
- for (int i = 0; i < old_size; i++) new_backing->set(i, old_backing->get(i));
- GetIsolate()->factory()->SetContent(self, new_backing);
+ GetIsolate()->factory()->SetElementsCapacityAndLength(
+ Handle<JSArray>(this), required_size, required_size);
}
-MaybeObject* JSObject::SetElementsLength(Object* len) {
+MaybeObject* JSArray::SetElementsLength(Object* len) {
// We should never end in here with a pixel or external array.
ASSERT(AllowsSetElementsLength());
return GetElementsAccessor()->SetLength(this, len);
@@ -8501,13 +8805,14 @@
MaybeObject* JSObject::EnsureCanContainElements(Arguments* args,
uint32_t first_arg,
- uint32_t arg_count) {
+ uint32_t arg_count,
+ EnsureElementsMode mode) {
// Elements in |Arguments| are ordered backwards (because they're on the
// stack), but the method that's called here iterates over them in forward
// direction.
return EnsureCanContainElements(
args->arguments() - first_arg - (arg_count - 1),
- arg_count);
+ arg_count, mode);
}
@@ -8559,7 +8864,7 @@
}
case DICTIONARY_ELEMENTS: {
if (element_dictionary()->FindEntry(index)
- != NumberDictionary::kNotFound) {
+ != SeededNumberDictionary::kNotFound) {
return true;
}
break;
@@ -8697,7 +9002,7 @@
}
case DICTIONARY_ELEMENTS: {
if (element_dictionary()->FindEntry(index) !=
- NumberDictionary::kNotFound) {
+ SeededNumberDictionary::kNotFound) {
return DICTIONARY_ELEMENT;
}
break;
@@ -8714,8 +9019,9 @@
// If not aliased, check the arguments.
FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
if (arguments->IsDictionary()) {
- NumberDictionary* dictionary = NumberDictionary::cast(arguments);
- if (dictionary->FindEntry(index) != NumberDictionary::kNotFound) {
+ SeededNumberDictionary* dictionary =
+ SeededNumberDictionary::cast(arguments);
+ if (dictionary->FindEntry(index) != SeededNumberDictionary::kNotFound) {
return DICTIONARY_ELEMENT;
}
} else {
@@ -8744,8 +9050,8 @@
return true;
}
} else {
- if (NumberDictionary::cast(elements)->FindEntry(index) !=
- NumberDictionary::kNotFound) {
+ if (SeededNumberDictionary::cast(elements)->FindEntry(index) !=
+ SeededNumberDictionary::kNotFound) {
return true;
}
}
@@ -8812,7 +9118,7 @@
}
case DICTIONARY_ELEMENTS: {
if (element_dictionary()->FindEntry(index)
- != NumberDictionary::kNotFound) {
+ != SeededNumberDictionary::kNotFound) {
return true;
}
break;
@@ -8916,8 +9222,8 @@
}
// __defineGetter__ callback
- if (structure->IsFixedArray()) {
- Object* getter = FixedArray::cast(structure)->get(kGetterIndex);
+ if (structure->IsAccessorPair()) {
+ Object* getter = AccessorPair::cast(structure)->getter();
if (getter->IsSpecFunction()) {
// TODO(rossberg): nicer would be to cast to some JSCallable here...
return GetPropertyWithDefinedGetter(receiver, JSReceiver::cast(getter));
@@ -8973,8 +9279,8 @@
return *value_handle;
}
- if (structure->IsFixedArray()) {
- Handle<Object> setter(FixedArray::cast(structure)->get(kSetterIndex));
+ if (structure->IsAccessorPair()) {
+ Handle<Object> setter(AccessorPair::cast(structure)->setter());
if (setter->IsSpecFunction()) {
// TODO(rossberg): nicer would be to cast to some JSCallable here...
return SetPropertyWithDefinedSetter(JSReceiver::cast(*setter), value);
@@ -9135,15 +9441,15 @@
FixedArray* elements = FixedArray::cast(this->elements());
bool is_arguments =
(elements->map() == heap->non_strict_arguments_elements_map());
- NumberDictionary* dictionary = NULL;
+ SeededNumberDictionary* dictionary = NULL;
if (is_arguments) {
- dictionary = NumberDictionary::cast(elements->get(1));
+ dictionary = SeededNumberDictionary::cast(elements->get(1));
} else {
- dictionary = NumberDictionary::cast(elements);
+ dictionary = SeededNumberDictionary::cast(elements);
}
int entry = dictionary->FindEntry(index);
- if (entry != NumberDictionary::kNotFound) {
+ if (entry != SeededNumberDictionary::kNotFound) {
Object* element = dictionary->ValueAt(entry);
PropertyDetails details = dictionary->DetailsAt(entry);
if (details.type() == CALLBACKS) {
@@ -9188,13 +9494,13 @@
FixedArrayBase* new_dictionary;
MaybeObject* maybe = dictionary->AtNumberPut(index, value);
if (!maybe->To<FixedArrayBase>(&new_dictionary)) return maybe;
- if (dictionary != NumberDictionary::cast(new_dictionary)) {
+ if (dictionary != SeededNumberDictionary::cast(new_dictionary)) {
if (is_arguments) {
elements->set(1, new_dictionary);
} else {
set_elements(new_dictionary);
}
- dictionary = NumberDictionary::cast(new_dictionary);
+ dictionary = SeededNumberDictionary::cast(new_dictionary);
}
}
@@ -9213,11 +9519,20 @@
} else {
new_length = dictionary->max_number_key() + 1;
}
- MaybeObject* result = CanConvertToFastDoubleElements()
+ SetFastElementsCapacityMode set_capacity_mode = FLAG_smi_only_arrays
+ ? kAllowSmiOnlyElements
+ : kDontAllowSmiOnlyElements;
+ bool has_smi_only_elements = false;
+ bool should_convert_to_fast_double_elements =
+ ShouldConvertToFastDoubleElements(&has_smi_only_elements);
+ if (has_smi_only_elements) {
+ set_capacity_mode = kForceSmiOnlyElements;
+ }
+ MaybeObject* result = should_convert_to_fast_double_elements
? SetFastDoubleElementsCapacityAndLength(new_length, new_length)
: SetFastElementsCapacityAndLength(new_length,
new_length,
- kDontAllowSmiOnlyElements);
+ set_capacity_mode);
if (result->IsFailure()) return result;
#ifdef DEBUG
if (FLAG_trace_normalization) {
@@ -9331,6 +9646,35 @@
}
+Handle<Object> JSObject::SetOwnElement(Handle<JSObject> object,
+ uint32_t index,
+ Handle<Object> value,
+ StrictModeFlag strict_mode) {
+ ASSERT(!object->HasExternalArrayElements());
+ CALL_HEAP_FUNCTION(object->GetIsolate(),
+ object->SetElement(index, *value, strict_mode, false),
+ Object);
+}
+
+
+Handle<Object> JSObject::SetElement(Handle<JSObject> object,
+ uint32_t index,
+ Handle<Object> value,
+ StrictModeFlag strict_mode) {
+ if (object->HasExternalArrayElements()) {
+ if (!value->IsSmi() && !value->IsHeapNumber() && !value->IsUndefined()) {
+ bool has_exception;
+ Handle<Object> number = Execution::ToNumber(value, &has_exception);
+ if (has_exception) return Handle<Object>();
+ value = number;
+ }
+ }
+ CALL_HEAP_FUNCTION(object->GetIsolate(),
+ object->SetElement(index, *value, strict_mode, true),
+ Object);
+}
+
+
MaybeObject* JSObject::SetElement(uint32_t index,
Object* value,
StrictModeFlag strict_mode,
@@ -9453,37 +9797,59 @@
}
+Handle<Object> JSObject::TransitionElementsKind(Handle<JSObject> object,
+ ElementsKind to_kind) {
+ CALL_HEAP_FUNCTION(object->GetIsolate(),
+ object->TransitionElementsKind(to_kind),
+ Object);
+}
+
+
MUST_USE_RESULT MaybeObject* JSObject::TransitionElementsKind(
ElementsKind to_kind) {
ElementsKind from_kind = map()->elements_kind();
FixedArrayBase* elms = FixedArrayBase::cast(elements());
uint32_t capacity = static_cast<uint32_t>(elms->length());
uint32_t length = capacity;
+
if (IsJSArray()) {
- CHECK(JSArray::cast(this)->length()->ToArrayIndex(&length));
- }
- if (from_kind == FAST_SMI_ONLY_ELEMENTS) {
- if (to_kind == FAST_DOUBLE_ELEMENTS) {
- MaybeObject* maybe_result =
- SetFastDoubleElementsCapacityAndLength(capacity, length);
- if (maybe_result->IsFailure()) return maybe_result;
- return this;
- } else if (to_kind == FAST_ELEMENTS) {
- MaybeObject* maybe_new_map = GetElementsTransitionMap(FAST_ELEMENTS);
- Map* new_map;
- if (!maybe_new_map->To(&new_map)) return maybe_new_map;
- if (FLAG_trace_elements_transitions) {
- PrintElementsTransition(stdout, from_kind, elms, FAST_ELEMENTS, elms);
- }
- set_map(new_map);
- return this;
+ Object* raw_length = JSArray::cast(this)->length();
+ if (raw_length->IsUndefined()) {
+ // If length is undefined, then JSArray is being initialized and has no
+ // elements, assume a length of zero.
+ length = 0;
+ } else {
+ CHECK(JSArray::cast(this)->length()->ToArrayIndex(&length));
}
- } else if (from_kind == FAST_DOUBLE_ELEMENTS && to_kind == FAST_ELEMENTS) {
+ }
+
+ if ((from_kind == FAST_SMI_ONLY_ELEMENTS && to_kind == FAST_ELEMENTS) ||
+ (length == 0)) {
+ MaybeObject* maybe_new_map = GetElementsTransitionMap(to_kind);
+ Map* new_map;
+ if (!maybe_new_map->To(&new_map)) return maybe_new_map;
+ if (FLAG_trace_elements_transitions) {
+ PrintElementsTransition(stdout, from_kind, elms, to_kind, elms);
+ }
+ set_map(new_map);
+ return this;
+ }
+
+ if (from_kind == FAST_SMI_ONLY_ELEMENTS &&
+ to_kind == FAST_DOUBLE_ELEMENTS) {
+ MaybeObject* maybe_result =
+ SetFastDoubleElementsCapacityAndLength(capacity, length);
+ if (maybe_result->IsFailure()) return maybe_result;
+ return this;
+ }
+
+ if (from_kind == FAST_DOUBLE_ELEMENTS && to_kind == FAST_ELEMENTS) {
MaybeObject* maybe_result = SetFastElementsCapacityAndLength(
capacity, length, kDontAllowSmiOnlyElements);
if (maybe_result->IsFailure()) return maybe_result;
return this;
}
+
// This method should never be called for any other case than the ones
// handled above.
UNREACHABLE();
@@ -9582,7 +9948,8 @@
FixedArray::cast(FixedArray::cast(backing_store_base)->get(1));
backing_store = FixedArray::cast(backing_store_base);
if (backing_store->IsDictionary()) {
- NumberDictionary* dictionary = NumberDictionary::cast(backing_store);
+ SeededNumberDictionary* dictionary =
+ SeededNumberDictionary::cast(backing_store);
*capacity = dictionary->Capacity();
*used = dictionary->NumberOfElements();
break;
@@ -9597,8 +9964,8 @@
}
break;
case DICTIONARY_ELEMENTS: {
- NumberDictionary* dictionary =
- NumberDictionary::cast(FixedArray::cast(elements()));
+ SeededNumberDictionary* dictionary =
+ SeededNumberDictionary::cast(FixedArray::cast(elements()));
*capacity = dictionary->Capacity();
*used = dictionary->NumberOfElements();
break;
@@ -9643,8 +10010,8 @@
int old_capacity = 0;
int used_elements = 0;
GetElementsCapacityAndUsage(&old_capacity, &used_elements);
- int dictionary_size = NumberDictionary::ComputeCapacity(used_elements) *
- NumberDictionary::kEntrySize;
+ int dictionary_size = SeededNumberDictionary::ComputeCapacity(used_elements) *
+ SeededNumberDictionary::kEntrySize;
return 3 * dictionary_size <= new_capacity;
}
@@ -9658,11 +10025,11 @@
if (IsAccessCheckNeeded()) return false;
FixedArray* elements = FixedArray::cast(this->elements());
- NumberDictionary* dictionary = NULL;
+ SeededNumberDictionary* dictionary = NULL;
if (elements->map() == GetHeap()->non_strict_arguments_elements_map()) {
- dictionary = NumberDictionary::cast(elements->get(1));
+ dictionary = SeededNumberDictionary::cast(elements->get(1));
} else {
- dictionary = NumberDictionary::cast(elements);
+ dictionary = SeededNumberDictionary::cast(elements);
}
// If an element has been added at a very high index in the elements
// dictionary, we cannot go back to fast case.
@@ -9677,22 +10044,31 @@
array_size = dictionary->max_number_key();
}
uint32_t dictionary_size = static_cast<uint32_t>(dictionary->Capacity()) *
- NumberDictionary::kEntrySize;
+ SeededNumberDictionary::kEntrySize;
return 2 * dictionary_size >= array_size;
}
-bool JSObject::CanConvertToFastDoubleElements() {
+bool JSObject::ShouldConvertToFastDoubleElements(
+ bool* has_smi_only_elements) {
+ *has_smi_only_elements = false;
if (FLAG_unbox_double_arrays) {
ASSERT(HasDictionaryElements());
- NumberDictionary* dictionary = NumberDictionary::cast(elements());
+ SeededNumberDictionary* dictionary =
+ SeededNumberDictionary::cast(elements());
+ bool found_double = false;
for (int i = 0; i < dictionary->Capacity(); i++) {
Object* key = dictionary->KeyAt(i);
if (key->IsNumber()) {
- if (!dictionary->ValueAt(i)->IsNumber()) return false;
+ Object* value = dictionary->ValueAt(i);
+ if (!value->IsNumber()) return false;
+ if (!value->IsSmi()) {
+ found_double = true;
+ }
}
}
- return true;
+ *has_smi_only_elements = !found_double;
+ return found_double;
} else {
return false;
}
@@ -9899,7 +10275,7 @@
}
case DICTIONARY_ELEMENTS: {
return element_dictionary()->FindEntry(index)
- != NumberDictionary::kNotFound;
+ != SeededNumberDictionary::kNotFound;
}
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNIMPLEMENTED();
@@ -9923,7 +10299,7 @@
LookupResult result(isolate);
LocalLookupRealNamedProperty(key, &result);
- return result.IsProperty() && (result.type() == CALLBACKS);
+ return result.IsFound() && (result.type() == CALLBACKS);
}
@@ -10168,7 +10544,7 @@
if (storage != NULL) {
element_dictionary()->CopyKeysTo(storage,
filter,
- NumberDictionary::SORTED);
+ SeededNumberDictionary::SORTED);
}
counter += element_dictionary()->NumberOfElementsFilterAttributes(filter);
break;
@@ -10180,9 +10556,11 @@
if (arguments->IsDictionary()) {
// Copy the keys from arguments first, because Dictionary::CopyKeysTo
// will insert in storage starting at index 0.
- NumberDictionary* dictionary = NumberDictionary::cast(arguments);
+ SeededNumberDictionary* dictionary =
+ SeededNumberDictionary::cast(arguments);
if (storage != NULL) {
- dictionary->CopyKeysTo(storage, filter, NumberDictionary::UNSORTED);
+ dictionary->CopyKeysTo(
+ storage, filter, SeededNumberDictionary::UNSORTED);
}
counter += dictionary->NumberOfElementsFilterAttributes(filter);
for (int i = 0; i < mapped_length; ++i) {
@@ -10396,8 +10774,8 @@
// Utf8SymbolKey carries a vector of chars as key.
class Utf8SymbolKey : public HashTableKey {
public:
- explicit Utf8SymbolKey(Vector<const char> string)
- : string_(string), hash_field_(0) { }
+ explicit Utf8SymbolKey(Vector<const char> string, uint32_t seed)
+ : string_(string), hash_field_(0), seed_(seed) { }
bool IsMatch(Object* string) {
return String::cast(string)->IsEqualTo(string_);
@@ -10408,7 +10786,7 @@
unibrow::Utf8InputBuffer<> buffer(string_.start(),
static_cast<unsigned>(string_.length()));
chars_ = buffer.Length();
- hash_field_ = String::ComputeHashField(&buffer, chars_);
+ hash_field_ = String::ComputeHashField(&buffer, chars_, seed_);
uint32_t result = hash_field_ >> String::kHashShift;
ASSERT(result != 0); // Ensure that the hash value of 0 is never computed.
return result;
@@ -10427,17 +10805,18 @@
Vector<const char> string_;
uint32_t hash_field_;
int chars_; // Caches the number of characters when computing the hash code.
+ uint32_t seed_;
};
template <typename Char>
class SequentialSymbolKey : public HashTableKey {
public:
- explicit SequentialSymbolKey(Vector<const Char> string)
- : string_(string), hash_field_(0) { }
+ explicit SequentialSymbolKey(Vector<const Char> string, uint32_t seed)
+ : string_(string), hash_field_(0), seed_(seed) { }
uint32_t Hash() {
- StringHasher hasher(string_.length());
+ StringHasher hasher(string_.length(), seed_);
// Very long strings have a trivial hash that doesn't inspect the
// string contents.
@@ -10473,14 +10852,15 @@
Vector<const Char> string_;
uint32_t hash_field_;
+ uint32_t seed_;
};
class AsciiSymbolKey : public SequentialSymbolKey<char> {
public:
- explicit AsciiSymbolKey(Vector<const char> str)
- : SequentialSymbolKey<char>(str) { }
+ AsciiSymbolKey(Vector<const char> str, uint32_t seed)
+ : SequentialSymbolKey<char>(str, seed) { }
bool IsMatch(Object* string) {
return String::cast(string)->IsAsciiEqualTo(string_);
@@ -10497,13 +10877,14 @@
public:
explicit SubStringAsciiSymbolKey(Handle<SeqAsciiString> string,
int from,
- int length)
- : string_(string), from_(from), length_(length) { }
+ int length,
+ uint32_t seed)
+ : string_(string), from_(from), length_(length), seed_(seed) { }
uint32_t Hash() {
ASSERT(length_ >= 0);
ASSERT(from_ + length_ <= string_->length());
- StringHasher hasher(length_);
+ StringHasher hasher(length_, string_->GetHeap()->HashSeed());
// Very long strings have a trivial hash that doesn't inspect the
// string contents.
@@ -10555,13 +10936,14 @@
int from_;
int length_;
uint32_t hash_field_;
+ uint32_t seed_;
};
class TwoByteSymbolKey : public SequentialSymbolKey<uc16> {
public:
- explicit TwoByteSymbolKey(Vector<const uc16> str)
- : SequentialSymbolKey<uc16>(str) { }
+ explicit TwoByteSymbolKey(Vector<const uc16> str, uint32_t seed)
+ : SequentialSymbolKey<uc16>(str, seed) { }
bool IsMatch(Object* string) {
return String::cast(string)->IsTwoByteEqualTo(string_);
@@ -10598,7 +10980,7 @@
// Transform string to symbol if possible.
Map* map = heap->SymbolMapForString(string_);
if (map != NULL) {
- string_->set_map(map);
+ string_->set_map_no_write_barrier(map);
ASSERT(string_->IsSymbol());
return string_;
}
@@ -10659,7 +11041,7 @@
// Optimized for symbol key. Knowledge of the key type allows:
// 1. Move the check if the key is a symbol out of the loop.
- // 2. Avoid comparing hash codes in symbol to symbol comparision.
+ // 2. Avoid comparing hash codes in symbol to symbol comparison.
// 3. Detect a case when a dictionary key is not a symbol but the key is.
// In case of positive result the dictionary key may be replaced by
// the symbol with minimal performance penalty. It gives a chance to
@@ -10711,7 +11093,7 @@
uint32_t from_index = EntryToIndex(i);
Object* k = get(from_index);
if (IsKey(k)) {
- uint32_t hash = Shape::HashForObject(key, k);
+ uint32_t hash = HashTable<Shape, Key>::HashForObject(key, k);
uint32_t insertion_index =
EntryToIndex(new_table->FindInsertionEntry(hash));
for (int j = 0; j < Shape::kEntrySize; j++) {
@@ -10809,38 +11191,49 @@
template class Dictionary<StringDictionaryShape, String*>;
-template class Dictionary<NumberDictionaryShape, uint32_t>;
+template class Dictionary<SeededNumberDictionaryShape, uint32_t>;
-template MaybeObject* Dictionary<NumberDictionaryShape, uint32_t>::Allocate(
- int);
+template class Dictionary<UnseededNumberDictionaryShape, uint32_t>;
+
+template MaybeObject* Dictionary<SeededNumberDictionaryShape, uint32_t>::
+ Allocate(int at_least_space_for);
+
+template MaybeObject* Dictionary<UnseededNumberDictionaryShape, uint32_t>::
+ Allocate(int at_least_space_for);
template MaybeObject* Dictionary<StringDictionaryShape, String*>::Allocate(
int);
-template MaybeObject* Dictionary<NumberDictionaryShape, uint32_t>::AtPut(
+template MaybeObject* Dictionary<SeededNumberDictionaryShape, uint32_t>::AtPut(
uint32_t, Object*);
-template Object* Dictionary<NumberDictionaryShape, uint32_t>::SlowReverseLookup(
- Object*);
+template MaybeObject* Dictionary<UnseededNumberDictionaryShape, uint32_t>::
+ AtPut(uint32_t, Object*);
+
+template Object* Dictionary<SeededNumberDictionaryShape, uint32_t>::
+ SlowReverseLookup(Object* value);
+
+template Object* Dictionary<UnseededNumberDictionaryShape, uint32_t>::
+ SlowReverseLookup(Object* value);
template Object* Dictionary<StringDictionaryShape, String*>::SlowReverseLookup(
Object*);
-template void Dictionary<NumberDictionaryShape, uint32_t>::CopyKeysTo(
+template void Dictionary<SeededNumberDictionaryShape, uint32_t>::CopyKeysTo(
FixedArray*,
PropertyAttributes,
- Dictionary<NumberDictionaryShape, uint32_t>::SortMode);
+ Dictionary<SeededNumberDictionaryShape, uint32_t>::SortMode);
template Object* Dictionary<StringDictionaryShape, String*>::DeleteProperty(
int, JSObject::DeleteMode);
-template Object* Dictionary<NumberDictionaryShape, uint32_t>::DeleteProperty(
- int, JSObject::DeleteMode);
+template Object* Dictionary<SeededNumberDictionaryShape, uint32_t>::
+ DeleteProperty(int, JSObject::DeleteMode);
template MaybeObject* Dictionary<StringDictionaryShape, String*>::Shrink(
String*);
-template MaybeObject* Dictionary<NumberDictionaryShape, uint32_t>::Shrink(
+template MaybeObject* Dictionary<SeededNumberDictionaryShape, uint32_t>::Shrink(
uint32_t);
template void Dictionary<StringDictionaryShape, String*>::CopyKeysTo(
@@ -10859,32 +11252,41 @@
Dictionary<StringDictionaryShape, String*>::GenerateNewEnumerationIndices();
template int
-Dictionary<NumberDictionaryShape, uint32_t>::NumberOfElementsFilterAttributes(
- PropertyAttributes);
+Dictionary<SeededNumberDictionaryShape, uint32_t>::
+ NumberOfElementsFilterAttributes(PropertyAttributes);
-template MaybeObject* Dictionary<NumberDictionaryShape, uint32_t>::Add(
+template MaybeObject* Dictionary<SeededNumberDictionaryShape, uint32_t>::Add(
uint32_t, Object*, PropertyDetails);
-template MaybeObject* Dictionary<NumberDictionaryShape, uint32_t>::
+template MaybeObject* Dictionary<UnseededNumberDictionaryShape, uint32_t>::Add(
+ uint32_t, Object*, PropertyDetails);
+
+template MaybeObject* Dictionary<SeededNumberDictionaryShape, uint32_t>::
+ EnsureCapacity(int, uint32_t);
+
+template MaybeObject* Dictionary<UnseededNumberDictionaryShape, uint32_t>::
EnsureCapacity(int, uint32_t);
template MaybeObject* Dictionary<StringDictionaryShape, String*>::
EnsureCapacity(int, String*);
-template MaybeObject* Dictionary<NumberDictionaryShape, uint32_t>::AddEntry(
- uint32_t, Object*, PropertyDetails, uint32_t);
+template MaybeObject* Dictionary<SeededNumberDictionaryShape, uint32_t>::
+ AddEntry(uint32_t, Object*, PropertyDetails, uint32_t);
+
+template MaybeObject* Dictionary<UnseededNumberDictionaryShape, uint32_t>::
+ AddEntry(uint32_t, Object*, PropertyDetails, uint32_t);
template MaybeObject* Dictionary<StringDictionaryShape, String*>::AddEntry(
String*, Object*, PropertyDetails, uint32_t);
template
-int Dictionary<NumberDictionaryShape, uint32_t>::NumberOfEnumElements();
+int Dictionary<SeededNumberDictionaryShape, uint32_t>::NumberOfEnumElements();
template
int Dictionary<StringDictionaryShape, String*>::NumberOfEnumElements();
template
-int HashTable<NumberDictionaryShape, uint32_t>::FindEntry(uint32_t);
+int HashTable<SeededNumberDictionaryShape, uint32_t>::FindEntry(uint32_t);
// Collates undefined and unexisting elements below limit from position
@@ -10894,7 +11296,7 @@
// Must stay in dictionary mode, either because of requires_slow_elements,
// or because we are not going to sort (and therefore compact) all of the
// elements.
- NumberDictionary* dict = element_dictionary();
+ SeededNumberDictionary* dict = element_dictionary();
HeapNumber* result_double = NULL;
if (limit > static_cast<uint32_t>(Smi::kMaxValue)) {
// Allocate space for result before we start mutating the object.
@@ -10907,10 +11309,10 @@
Object* obj;
{ MaybeObject* maybe_obj =
- NumberDictionary::Allocate(dict->NumberOfElements());
+ SeededNumberDictionary::Allocate(dict->NumberOfElements());
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
- NumberDictionary* new_dict = NumberDictionary::cast(obj);
+ SeededNumberDictionary* new_dict = SeededNumberDictionary::cast(obj);
AssertNoAllocation no_alloc;
@@ -10994,7 +11396,7 @@
if (HasDictionaryElements()) {
// Convert to fast elements containing only the existing properties.
// Ordering is irrelevant, since we are going to sort anyway.
- NumberDictionary* dict = element_dictionary();
+ SeededNumberDictionary* dict = element_dictionary();
if (IsJSArray() || dict->requires_slow_elements() ||
dict->max_number_key() >= limit) {
return PrepareSlowElementsForSort(limit);
@@ -11346,10 +11748,12 @@
// algorithm.
class TwoCharHashTableKey : public HashTableKey {
public:
- TwoCharHashTableKey(uint32_t c1, uint32_t c2)
+ TwoCharHashTableKey(uint32_t c1, uint32_t c2, uint32_t seed)
: c1_(c1), c2_(c2) {
// Char 1.
- uint32_t hash = c1 + (c1 << 10);
+ uint32_t hash = seed;
+ hash += c1;
+ hash += hash << 10;
hash ^= hash >> 6;
// Char 2.
hash += c2;
@@ -11359,9 +11763,9 @@
hash += hash << 3;
hash ^= hash >> 11;
hash += hash << 15;
- if (hash == 0) hash = 27;
+ if ((hash & String::kHashBitMask) == 0) hash = String::kZeroHash;
#ifdef DEBUG
- StringHasher hasher(2);
+ StringHasher hasher(2, seed);
hasher.AddCharacter(c1);
hasher.AddCharacter(c2);
// If this assert fails then we failed to reproduce the two-character
@@ -11418,7 +11822,7 @@
bool SymbolTable::LookupTwoCharsSymbolIfExists(uint32_t c1,
uint32_t c2,
String** symbol) {
- TwoCharHashTableKey key(c1, c2);
+ TwoCharHashTableKey key(c1, c2, GetHeap()->HashSeed());
int entry = FindEntry(&key);
if (entry == kNotFound) {
return false;
@@ -11431,15 +11835,16 @@
}
-MaybeObject* SymbolTable::LookupSymbol(Vector<const char> str, Object** s) {
- Utf8SymbolKey key(str);
+MaybeObject* SymbolTable::LookupSymbol(Vector<const char> str,
+ Object** s) {
+ Utf8SymbolKey key(str, GetHeap()->HashSeed());
return LookupKey(&key, s);
}
MaybeObject* SymbolTable::LookupAsciiSymbol(Vector<const char> str,
Object** s) {
- AsciiSymbolKey key(str);
+ AsciiSymbolKey key(str, GetHeap()->HashSeed());
return LookupKey(&key, s);
}
@@ -11448,14 +11853,14 @@
int from,
int length,
Object** s) {
- SubStringAsciiSymbolKey key(str, from, length);
+ SubStringAsciiSymbolKey key(str, from, length, GetHeap()->HashSeed());
return LookupKey(&key, s);
}
MaybeObject* SymbolTable::LookupTwoByteSymbol(Vector<const uc16> str,
Object** s) {
- TwoByteSymbolKey key(str);
+ TwoByteSymbolKey key(str, GetHeap()->HashSeed());
return LookupKey(&key, s);
}
@@ -11794,8 +12199,9 @@
if (!maybe_k->ToObject(&k)) return maybe_k;
}
PropertyDetails details = PropertyDetails(NONE, NORMAL);
- return Dictionary<Shape, Key>::cast(obj)->
- AddEntry(key, value, details, Shape::Hash(key));
+
+ return Dictionary<Shape, Key>::cast(obj)->AddEntry(key, value, details,
+ Dictionary<Shape, Key>::Hash(key));
}
@@ -11810,8 +12216,9 @@
{ MaybeObject* maybe_obj = EnsureCapacity(1, key);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
- return Dictionary<Shape, Key>::cast(obj)->
- AddEntry(key, value, details, Shape::Hash(key));
+
+ return Dictionary<Shape, Key>::cast(obj)->AddEntry(key, value, details,
+ Dictionary<Shape, Key>::Hash(key));
}
@@ -11844,7 +12251,7 @@
}
-void NumberDictionary::UpdateMaxNumberKey(uint32_t key) {
+void SeededNumberDictionary::UpdateMaxNumberKey(uint32_t key) {
// If the dictionary requires slow elements an element has already
// been added at a high index.
if (requires_slow_elements()) return;
@@ -11863,31 +12270,65 @@
}
-MaybeObject* NumberDictionary::AddNumberEntry(uint32_t key,
- Object* value,
- PropertyDetails details) {
+MaybeObject* SeededNumberDictionary::AddNumberEntry(uint32_t key,
+ Object* value,
+ PropertyDetails details) {
UpdateMaxNumberKey(key);
SLOW_ASSERT(this->FindEntry(key) == kNotFound);
return Add(key, value, details);
}
-MaybeObject* NumberDictionary::AtNumberPut(uint32_t key, Object* value) {
+MaybeObject* UnseededNumberDictionary::AddNumberEntry(uint32_t key,
+ Object* value) {
+ SLOW_ASSERT(this->FindEntry(key) == kNotFound);
+ return Add(key, value, PropertyDetails(NONE, NORMAL));
+}
+
+
+MaybeObject* SeededNumberDictionary::AtNumberPut(uint32_t key, Object* value) {
UpdateMaxNumberKey(key);
return AtPut(key, value);
}
-MaybeObject* NumberDictionary::Set(uint32_t key,
- Object* value,
- PropertyDetails details) {
+MaybeObject* UnseededNumberDictionary::AtNumberPut(uint32_t key,
+ Object* value) {
+ return AtPut(key, value);
+}
+
+
+Handle<SeededNumberDictionary> SeededNumberDictionary::Set(
+ Handle<SeededNumberDictionary> dictionary,
+ uint32_t index,
+ Handle<Object> value,
+ PropertyDetails details) {
+ CALL_HEAP_FUNCTION(dictionary->GetIsolate(),
+ dictionary->Set(index, *value, details),
+ SeededNumberDictionary);
+}
+
+
+Handle<UnseededNumberDictionary> UnseededNumberDictionary::Set(
+ Handle<UnseededNumberDictionary> dictionary,
+ uint32_t index,
+ Handle<Object> value) {
+ CALL_HEAP_FUNCTION(dictionary->GetIsolate(),
+ dictionary->Set(index, *value),
+ UnseededNumberDictionary);
+}
+
+
+MaybeObject* SeededNumberDictionary::Set(uint32_t key,
+ Object* value,
+ PropertyDetails details) {
int entry = FindEntry(key);
if (entry == kNotFound) return AddNumberEntry(key, value, details);
// Preserve enumeration index.
details = PropertyDetails(details.attributes(),
details.type(),
DetailsAt(entry).index());
- MaybeObject* maybe_object_key = NumberDictionaryShape::AsObject(key);
+ MaybeObject* maybe_object_key = SeededNumberDictionaryShape::AsObject(key);
Object* object_key;
if (!maybe_object_key->ToObject(&object_key)) return maybe_object_key;
SetEntry(entry, object_key, value, details);
@@ -11895,6 +12336,18 @@
}
+MaybeObject* UnseededNumberDictionary::Set(uint32_t key,
+ Object* value) {
+ int entry = FindEntry(key);
+ if (entry == kNotFound) return AddNumberEntry(key, value);
+ MaybeObject* maybe_object_key = UnseededNumberDictionaryShape::AsObject(key);
+ Object* object_key;
+ if (!maybe_object_key->ToObject(&object_key)) return maybe_object_key;
+ SetEntry(entry, object_key, value);
+ return this;
+}
+
+
template<typename Shape, typename Key>
int Dictionary<Shape, Key>::NumberOfElementsFilterAttributes(
diff --git a/src/objects.h b/src/objects.h
index 49aa2f7..d6d1057 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -107,6 +107,7 @@
// - SharedFunctionInfo
// - Struct
// - AccessorInfo
+// - AccessorPair
// - AccessCheckInfo
// - InterceptorInfo
// - CallHandlerInfo
@@ -162,6 +163,11 @@
LAST_ELEMENTS_KIND = EXTERNAL_PIXEL_ELEMENTS
};
+enum CompareMapMode {
+ REQUIRE_EXACT_MAP,
+ ALLOW_ELEMENT_TRANSITION_MAPS
+};
+
const int kElementsKindCount = LAST_ELEMENTS_KIND - FIRST_ELEMENTS_KIND + 1;
void PrintElementsKind(FILE* out, ElementsKind kind);
@@ -211,7 +217,7 @@
// encoding is considered TWO_BYTE. It is not mentioned in the name. ASCII
// encoding is mentioned explicitly in the name. Likewise, the default
// representation is considered sequential. It is not mentioned in the
-// name. The other representations (eg, CONS, EXTERNAL) are explicitly
+// name. The other representations (e.g. CONS, EXTERNAL) are explicitly
// mentioned. Finally, the string is either a SYMBOL_TYPE (if it is a
// symbol) or a STRING_TYPE (if it is not a symbol).
//
@@ -270,6 +276,7 @@
V(FILLER_TYPE) \
\
V(ACCESSOR_INFO_TYPE) \
+ V(ACCESSOR_PAIR_TYPE) \
V(ACCESS_CHECK_INFO_TYPE) \
V(INTERCEPTOR_INFO_TYPE) \
V(CALL_HANDLER_INFO_TYPE) \
@@ -417,6 +424,7 @@
// manually.
#define STRUCT_LIST_ALL(V) \
V(ACCESSOR_INFO, AccessorInfo, accessor_info) \
+ V(ACCESSOR_PAIR, AccessorPair, accessor_pair) \
V(ACCESS_CHECK_INFO, AccessCheckInfo, access_check_info) \
V(INTERCEPTOR_INFO, InterceptorInfo, interceptor_info) \
V(CALL_HANDLER_INFO, CallHandlerInfo, call_handler_info) \
@@ -484,7 +492,7 @@
STATIC_ASSERT(IS_POWER_OF_TWO(kSlicedNotConsMask) && kSlicedNotConsMask != 0);
// If bit 7 is clear, then bit 3 indicates whether this two-byte
-// string actually contains ascii data.
+// string actually contains ASCII data.
const uint32_t kAsciiDataHintMask = 0x08;
const uint32_t kAsciiDataHintTag = 0x08;
@@ -570,6 +578,7 @@
// Structs.
ACCESSOR_INFO_TYPE,
+ ACCESSOR_PAIR_TYPE,
ACCESS_CHECK_INFO_TYPE,
INTERCEPTOR_INFO_TYPE,
CALL_HANDLER_INFO_TYPE,
@@ -1078,7 +1087,7 @@
// Heap objects typically have a map pointer in their first word. However,
-// during GC other data (eg, mark bits, forwarding addresses) is sometimes
+// during GC other data (e.g. mark bits, forwarding addresses) is sometimes
// encoded in the first word. The class MapWord is an abstraction of the
// value in a heap object's first word.
class MapWord BASE_EMBEDDED {
@@ -1097,7 +1106,7 @@
// True if this map word is a forwarding address for a scavenge
// collection. Only valid during a scavenge collection (specifically,
- // when all map words are heap object pointers, ie. not during a full GC).
+ // when all map words are heap object pointers, i.e. not during a full GC).
inline bool IsForwardingAddress();
// Create a map word from a forwarding address.
@@ -1132,7 +1141,10 @@
// information.
inline Map* map();
inline void set_map(Map* value);
- inline void set_map_unsafe(Map* value);
+ // The no-write-barrier version. This is OK if the object is white and in
+ // new space, or if the value is an immortal immutable object, like the maps
+ // of primitive (non-JS) objects like strings, heap numbers etc.
+ inline void set_map_no_write_barrier(Map* value);
// During garbage collection, the map word of a heap object does not
// necessarily contain a map pointer.
@@ -1320,6 +1332,13 @@
};
+enum EnsureElementsMode {
+ DONT_ALLOW_DOUBLE_ELEMENTS,
+ ALLOW_COPIED_DOUBLE_ELEMENTS,
+ ALLOW_CONVERTED_DOUBLE_ELEMENTS
+};
+
+
// JSReceiver includes types on which properties can be defined, i.e.,
// JSObject and JSProxy.
class JSReceiver: public HeapObject {
@@ -1333,6 +1352,11 @@
// Casting.
static inline JSReceiver* cast(Object* obj);
+ static Handle<Object> SetProperty(Handle<JSReceiver> object,
+ Handle<String> key,
+ Handle<Object> value,
+ PropertyAttributes attributes,
+ StrictModeFlag strict_mode);
// Can cause GC.
MUST_USE_RESULT MaybeObject* SetProperty(String* key,
Object* value,
@@ -1462,8 +1486,12 @@
inline bool HasExternalDoubleElements();
bool HasFastArgumentsElements();
bool HasDictionaryArgumentsElements();
- inline bool AllowsSetElementsLength();
- inline NumberDictionary* element_dictionary(); // Gets slow elements.
+ inline SeededNumberDictionary* element_dictionary(); // Gets slow elements.
+
+ inline void set_map_and_elements(
+ Map* map,
+ FixedArrayBase* value,
+ WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
// Requires: HasFastElements().
MUST_USE_RESULT inline MaybeObject* EnsureWritableFastElements();
@@ -1508,6 +1536,14 @@
Object* value,
PropertyAttributes attributes,
StrictModeFlag strict_mode);
+
+ static Handle<Object> SetLocalPropertyIgnoreAttributes(
+ Handle<JSObject> object,
+ Handle<String> key,
+ Handle<Object> value,
+ PropertyAttributes attributes);
+
+ // Can cause GC.
MUST_USE_RESULT MaybeObject* SetLocalPropertyIgnoreAttributes(
String* key,
Object* value,
@@ -1523,6 +1559,11 @@
// Sets the property value in a normalized object given (key, value, details).
// Handles the special representation of JS global objects.
+ static Handle<Object> SetNormalizedProperty(Handle<JSObject> object,
+ Handle<String> key,
+ Handle<Object> value,
+ PropertyDetails details);
+
MUST_USE_RESULT MaybeObject* SetNormalizedProperty(String* name,
Object* value,
PropertyDetails details);
@@ -1592,8 +1633,11 @@
// hidden properties.
// Sets a hidden property on this object. Returns this object if successful,
- // undefined if called on a detached proxy, and a failure if a GC
- // is required
+ // undefined if called on a detached proxy.
+ static Handle<Object> SetHiddenProperty(Handle<JSObject> obj,
+ Handle<String> key,
+ Handle<Object> value);
+ // Returns a failure if a GC is required.
MaybeObject* SetHiddenProperty(String* key, Object* value);
// Gets the value of a hidden property with the given key. Returns undefined
// if the property doesn't exist (or if called on a detached proxy),
@@ -1605,24 +1649,32 @@
// Returns true if the object has a property with the hidden symbol as name.
bool HasHiddenProperties();
+ static int GetIdentityHash(Handle<JSObject> obj);
MUST_USE_RESULT MaybeObject* GetIdentityHash(CreationFlag flag);
MUST_USE_RESULT MaybeObject* SetIdentityHash(Object* hash, CreationFlag flag);
+ static Handle<Object> DeleteProperty(Handle<JSObject> obj,
+ Handle<String> name);
MUST_USE_RESULT MaybeObject* DeleteProperty(String* name, DeleteMode mode);
+
+ static Handle<Object> DeleteElement(Handle<JSObject> obj, uint32_t index);
MUST_USE_RESULT MaybeObject* DeleteElement(uint32_t index, DeleteMode mode);
inline void ValidateSmiOnlyElements();
- // Makes sure that this object can contain non-smi Object as elements.
- inline MaybeObject* EnsureCanContainNonSmiElements();
+ // Makes sure that this object can contain HeapObject as elements.
+ inline MaybeObject* EnsureCanContainHeapObjectElements();
// Makes sure that this object can contain the specified elements.
inline MaybeObject* EnsureCanContainElements(Object** elements,
- uint32_t count);
- inline MaybeObject* EnsureCanContainElements(FixedArray* elements);
+ uint32_t count,
+ EnsureElementsMode mode);
+ inline MaybeObject* EnsureCanContainElements(FixedArrayBase* elements,
+ EnsureElementsMode mode);
MaybeObject* EnsureCanContainElements(Arguments* arguments,
uint32_t first_arg,
- uint32_t arg_count);
+ uint32_t arg_count,
+ EnsureElementsMode mode);
// Do we want to keep the elements in fast case when increasing the
// capacity?
@@ -1633,8 +1685,9 @@
// elements.
bool ShouldConvertToFastElements();
// Returns true if the elements of JSObject contains only values that can be
- // represented in a FixedDoubleArray.
- bool CanConvertToFastDoubleElements();
+ // represented in a FixedDoubleArray and has at least one value that can only
+ // be represented as a double and not a Smi.
+ bool ShouldConvertToFastDoubleElements(bool* has_smi_only_elements);
// Tells whether the index'th element is present.
bool HasElementWithReceiver(JSReceiver* receiver, uint32_t index);
@@ -1684,7 +1737,18 @@
StrictModeFlag strict_mode,
bool check_prototype = true);
- // Set the index'th array element.
+
+ static Handle<Object> SetOwnElement(Handle<JSObject> object,
+ uint32_t index,
+ Handle<Object> value,
+ StrictModeFlag strict_mode);
+
+ // Empty handle is returned if the element cannot be set to the given value.
+ static MUST_USE_RESULT Handle<Object> SetElement(Handle<JSObject> object,
+ uint32_t index,
+ Handle<Object> value,
+ StrictModeFlag strict_mode);
+
// A Failure object is returned if GC is needed.
MUST_USE_RESULT MaybeObject* SetElement(uint32_t index,
Object* value,
@@ -1697,6 +1761,7 @@
enum SetFastElementsCapacityMode {
kAllowSmiOnlyElements,
+ kForceSmiOnlyElements,
kDontAllowSmiOnlyElements
};
@@ -1721,9 +1786,6 @@
bool HasRealElementProperty(uint32_t index);
bool HasRealNamedCallbackProperty(String* key);
- // Initializes the array to a certain length
- MUST_USE_RESULT MaybeObject* SetElementsLength(Object* length);
-
// Get the header size for a JSObject. Used to compute the index of
// internal fields as well as the number of internal fields.
inline int GetHeaderSize();
@@ -1796,6 +1858,9 @@
MUST_USE_RESULT MaybeObject* GetElementsTransitionMap(
ElementsKind elements_kind);
+ static Handle<Object> TransitionElementsKind(Handle<JSObject> object,
+ ElementsKind to_kind);
+
MUST_USE_RESULT MaybeObject* TransitionElementsKind(ElementsKind to_kind);
// Converts a descriptor of any other type to a real field,
@@ -1836,12 +1901,19 @@
// representation. If the object is expected to have additional properties
// added this number can be indicated to have the backing store allocated to
// an initial capacity for holding these properties.
+ static void NormalizeProperties(Handle<JSObject> object,
+ PropertyNormalizationMode mode,
+ int expected_additional_properties);
+
MUST_USE_RESULT MaybeObject* NormalizeProperties(
PropertyNormalizationMode mode,
int expected_additional_properties);
- // Convert and update the elements backing store to be a NumberDictionary
- // dictionary. Returns the backing after conversion.
+ // Convert and update the elements backing store to be a
+ // SeededNumberDictionary dictionary. Returns the backing after conversion.
+ static Handle<SeededNumberDictionary> NormalizeElements(
+ Handle<JSObject> object);
+
MUST_USE_RESULT MaybeObject* NormalizeElements();
static void UpdateMapCodeCache(Handle<JSObject> object,
@@ -1852,6 +1924,9 @@
// Transform slow named properties to fast variants.
// Returns failure if allocation failed.
+ static void TransformToFastProperties(Handle<JSObject> object,
+ int unused_property_fields);
+
MUST_USE_RESULT MaybeObject* TransformToFastProperties(
int unused_property_fields);
@@ -1883,6 +1958,7 @@
static inline JSObject* cast(Object* obj);
// Disalow further properties to be added to the object.
+ static Handle<Object> PreventExtensions(Handle<JSObject> object);
MUST_USE_RESULT MaybeObject* PreventExtensions();
@@ -1936,11 +2012,6 @@
#endif
Object* SlowReverseLookup(Object* value);
- // Getters and setters are stored in a fixed array property.
- // These are constants for their indices.
- static const int kGetterIndex = 0;
- static const int kSetterIndex = 1;
-
// Maximal number of fast properties for the JSObject. Used to
// restrict the number of map transitions to avoid an explosion in
// the number of maps for objects used as dictionaries.
@@ -2122,6 +2193,9 @@
// Gives access to raw memory which stores the array's data.
inline Object** data_start();
+ inline Object** GetFirstElementAddress();
+ inline bool ContainsOnlySmisOrHoles();
+
// Copy operations.
MUST_USE_RESULT inline MaybeObject* Copy();
MUST_USE_RESULT MaybeObject* CopySize(int new_length);
@@ -2188,6 +2262,13 @@
int index,
Object* value);
+ // Set operation on FixedArray without incremental write barrier. Can
+ // only be used if the object is guaranteed to be white (whiteness witness
+ // is present).
+ static inline void NoIncrementalWriteBarrierSet(FixedArray* array,
+ int index,
+ Object* value);
+
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(FixedArray);
};
@@ -2198,7 +2279,7 @@
public:
inline void Initialize(FixedArray* from);
inline void Initialize(FixedDoubleArray* from);
- inline void Initialize(NumberDictionary* from);
+ inline void Initialize(SeededNumberDictionary* from);
// Setter and getter for elements.
inline double get_scalar(int index);
@@ -2466,12 +2547,12 @@
NULL_DESCRIPTOR;
}
// Swap operation on FixedArray without using write barriers.
- static inline void NoWriteBarrierSwap(FixedArray* array,
- int first,
- int second);
+ static inline void NoIncrementalWriteBarrierSwap(
+ FixedArray* array, int first, int second);
// Swap descriptor first and second.
- inline void NoWriteBarrierSwapDescriptors(int first, int second);
+ inline void NoIncrementalWriteBarrierSwapDescriptors(
+ int first, int second);
FixedArray* GetContentArray() {
return FixedArray::cast(get(kContentArrayIndex));
@@ -2513,9 +2594,44 @@
// beginning of the backing storage that can be used for non-element
// information by subclasses.
+template<typename Key>
+class BaseShape {
+ public:
+ static const bool UsesSeed = false;
+ static uint32_t Hash(Key key) { return 0; }
+ static uint32_t SeededHash(Key key, uint32_t seed) {
+ ASSERT(UsesSeed);
+ return Hash(key);
+ }
+ static uint32_t HashForObject(Key key, Object* object) { return 0; }
+ static uint32_t SeededHashForObject(Key key, uint32_t seed, Object* object) {
+ ASSERT(UsesSeed);
+ return HashForObject(key, object);
+ }
+};
+
template<typename Shape, typename Key>
class HashTable: public FixedArray {
public:
+ // Wrapper methods
+ inline uint32_t Hash(Key key) {
+ if (Shape::UsesSeed) {
+ return Shape::SeededHash(key,
+ GetHeap()->HashSeed());
+ } else {
+ return Shape::Hash(key);
+ }
+ }
+
+ inline uint32_t HashForObject(Key key, Object* object) {
+ if (Shape::UsesSeed) {
+ return Shape::SeededHashForObject(key,
+ GetHeap()->HashSeed(), object);
+ } else {
+ return Shape::HashForObject(key, object);
+ }
+ }
+
// Returns the number of elements in the hash table.
int NumberOfElements() {
return Smi::cast(get(kNumberOfElementsIndex))->value();
@@ -2657,7 +2773,6 @@
};
-
// HashTableKey is an abstract superclass for virtual key behavior.
class HashTableKey {
public:
@@ -2674,7 +2789,8 @@
virtual ~HashTableKey() {}
};
-class SymbolTableShape {
+
+class SymbolTableShape : public BaseShape<HashTableKey*> {
public:
static inline bool IsMatch(HashTableKey* key, Object* value) {
return key->IsMatch(value);
@@ -2733,7 +2849,7 @@
};
-class MapCacheShape {
+class MapCacheShape : public BaseShape<HashTableKey*> {
public:
static inline bool IsMatch(HashTableKey* key, Object* value) {
return key->IsMatch(value);
@@ -2889,7 +3005,7 @@
};
-class StringDictionaryShape {
+class StringDictionaryShape : public BaseShape<String*> {
public:
static inline bool IsMatch(String* key, Object* other);
static inline uint32_t Hash(String* key);
@@ -2922,23 +3038,42 @@
};
-class NumberDictionaryShape {
+class NumberDictionaryShape : public BaseShape<uint32_t> {
public:
static inline bool IsMatch(uint32_t key, Object* other);
- static inline uint32_t Hash(uint32_t key);
- static inline uint32_t HashForObject(uint32_t key, Object* object);
MUST_USE_RESULT static inline MaybeObject* AsObject(uint32_t key);
- static const int kPrefixSize = 2;
static const int kEntrySize = 3;
static const bool kIsEnumerable = false;
};
-class NumberDictionary: public Dictionary<NumberDictionaryShape, uint32_t> {
+class SeededNumberDictionaryShape : public NumberDictionaryShape {
public:
- static NumberDictionary* cast(Object* obj) {
+ static const bool UsesSeed = true;
+ static const int kPrefixSize = 2;
+
+ static inline uint32_t SeededHash(uint32_t key, uint32_t seed);
+ static inline uint32_t SeededHashForObject(uint32_t key,
+ uint32_t seed,
+ Object* object);
+};
+
+
+class UnseededNumberDictionaryShape : public NumberDictionaryShape {
+ public:
+ static const int kPrefixSize = 0;
+
+ static inline uint32_t Hash(uint32_t key);
+ static inline uint32_t HashForObject(uint32_t key, Object* object);
+};
+
+
+class SeededNumberDictionary
+ : public Dictionary<SeededNumberDictionaryShape, uint32_t> {
+ public:
+ static SeededNumberDictionary* cast(Object* obj) {
ASSERT(obj->IsDictionary());
- return reinterpret_cast<NumberDictionary*>(obj);
+ return reinterpret_cast<SeededNumberDictionary*>(obj);
}
// Type specific at put (default NONE attributes is used when adding).
@@ -2948,6 +3083,13 @@
PropertyDetails details);
// Set an existing entry or add a new one if needed.
+ // Return the updated dictionary.
+ MUST_USE_RESULT static Handle<SeededNumberDictionary> Set(
+ Handle<SeededNumberDictionary> dictionary,
+ uint32_t index,
+ Handle<Object> value,
+ PropertyDetails details);
+
MUST_USE_RESULT MaybeObject* Set(uint32_t key,
Object* value,
PropertyDetails details);
@@ -2974,8 +3116,31 @@
};
+class UnseededNumberDictionary
+ : public Dictionary<UnseededNumberDictionaryShape, uint32_t> {
+ public:
+ static UnseededNumberDictionary* cast(Object* obj) {
+ ASSERT(obj->IsDictionary());
+ return reinterpret_cast<UnseededNumberDictionary*>(obj);
+ }
+
+ // Type specific at put (default NONE attributes is used when adding).
+ MUST_USE_RESULT MaybeObject* AtNumberPut(uint32_t key, Object* value);
+ MUST_USE_RESULT MaybeObject* AddNumberEntry(uint32_t key, Object* value);
+
+ // Set an existing entry or add a new one if needed.
+ // Return the updated dictionary.
+ MUST_USE_RESULT static Handle<UnseededNumberDictionary> Set(
+ Handle<UnseededNumberDictionary> dictionary,
+ uint32_t index,
+ Handle<Object> value);
+
+ MUST_USE_RESULT MaybeObject* Set(uint32_t key, Object* value);
+};
+
+
template <int entrysize>
-class ObjectHashTableShape {
+class ObjectHashTableShape : public BaseShape<Object*> {
public:
static inline bool IsMatch(Object* key, Object* other);
static inline uint32_t Hash(Object* key);
@@ -3886,11 +4051,10 @@
// [deoptimization_data]: Array containing data for deopt.
DECL_ACCESSORS(deoptimization_data, FixedArray)
- // [code_flushing_candidate]: Field only used during garbage
- // collection to hold code flushing candidates. The contents of this
+ // [gc_metadata]: Field used to hold GC related metadata. The contents of this
// field does not have to be traced during garbage collection since
// it is only used by the garbage collector itself.
- DECL_ACCESSORS(next_code_flushing_candidate, Object)
+ DECL_ACCESSORS(gc_metadata, Object)
// Unchecked accessors to be used during GC.
inline ByteArray* unchecked_relocation_info();
@@ -4114,10 +4278,8 @@
static const int kHandlerTableOffset = kRelocationInfoOffset + kPointerSize;
static const int kDeoptimizationDataOffset =
kHandlerTableOffset + kPointerSize;
- static const int kNextCodeFlushingCandidateOffset =
- kDeoptimizationDataOffset + kPointerSize;
- static const int kFlagsOffset =
- kNextCodeFlushingCandidateOffset + kPointerSize;
+ static const int kGCMetadataOffset = kDeoptimizationDataOffset + kPointerSize;
+ static const int kFlagsOffset = kGCMetadataOffset + kPointerSize;
static const int kKindSpecificFlagsOffset = kFlagsOffset + kIntSize;
static const int kKindSpecificFlagsSize = 2 * kIntSize;
@@ -4328,6 +4490,11 @@
return elements_kind() == DICTIONARY_ELEMENTS;
}
+ inline bool has_slow_elements_kind() {
+ return elements_kind() == DICTIONARY_ELEMENTS
+ || elements_kind() == NON_STRICT_ARGUMENTS_ELEMENTS;
+ }
+
static bool IsValidElementsTransition(ElementsKind from_kind,
ElementsKind to_kind);
@@ -4766,7 +4933,10 @@
V(Math, atan, MathATan) \
V(Math, exp, MathExp) \
V(Math, sqrt, MathSqrt) \
- V(Math, pow, MathPow)
+ V(Math, pow, MathPow) \
+ V(Math, random, MathRandom) \
+ V(Math, max, MathMax) \
+ V(Math, min, MathMin)
enum BuiltinFunctionId {
@@ -5887,7 +6057,7 @@
};
-class CompilationCacheShape {
+class CompilationCacheShape : public BaseShape<HashTableKey*> {
public:
static inline bool IsMatch(HashTableKey* key, Object* value) {
return key->IsMatch(value);
@@ -5991,7 +6161,7 @@
};
-class CodeCacheHashTableShape {
+class CodeCacheHashTableShape : public BaseShape<HashTableKey*> {
public:
static inline bool IsMatch(HashTableKey* key, Object* value) {
return key->IsMatch(value);
@@ -6092,7 +6262,7 @@
class StringHasher {
public:
- explicit inline StringHasher(int length);
+ explicit inline StringHasher(int length, uint32_t seed);
// Returns true if the hash of this string can be computed without
// looking at the contents.
@@ -6123,6 +6293,11 @@
// value is represented decimal value.
static uint32_t MakeArrayIndexHash(uint32_t value, int length);
+ // No string is allowed to have a hash of zero. That value is reserved
+ // for internal properties. If the hash calculation yields zero then we
+ // use 27 instead.
+ static const int kZeroHash = 27;
+
private:
uint32_t array_index() {
ASSERT(is_array_index());
@@ -6143,7 +6318,9 @@
// Calculates string hash.
template <typename schar>
-inline uint32_t HashSequentialString(const schar* chars, int length);
+inline uint32_t HashSequentialString(const schar* chars,
+ int length,
+ uint32_t seed);
// The characteristics of a string are stored in its map. Retrieving these
@@ -6315,7 +6492,7 @@
inline String* GetUnderlying();
// Mark the string as an undetectable object. It only applies to
- // ascii and two byte string types.
+ // ASCII and two byte string types.
bool MarkAsUndetectable();
// Return a substring.
@@ -6366,7 +6543,8 @@
inline uint32_t Hash();
static uint32_t ComputeHashField(unibrow::CharacterStream* buffer,
- int length);
+ int length,
+ uint32_t seed);
static bool ComputeArrayIndex(unibrow::CharacterStream* buffer,
uint32_t* index,
@@ -6411,14 +6589,11 @@
// value into an array index.
static const int kMaxArrayIndexSize = 10;
- // Max ascii char code.
+ // Max ASCII char code.
static const int kMaxAsciiCharCode = unibrow::Utf8::kMaxOneByteChar;
static const unsigned kMaxAsciiCharCodeU = unibrow::Utf8::kMaxOneByteChar;
static const int kMaxUC16CharCode = 0xffff;
- // Minimum length for a cons string.
- static const int kMinNonFlatLength = 13;
-
// Mask constant for checking if a string has a computed hash code
// and if it is an array index. The least significant bit indicates
// whether a hash code has been computed. If the hash code has been
@@ -6431,6 +6606,10 @@
// Shift constant retrieving hash code from hash field.
static const int kHashShift = kNofHashBitFields;
+ // Only these bits are relevant in the hash, since the top two are shifted
+ // out.
+ static const uint32_t kHashBitMask = 0xffffffffu >> kHashShift;
+
// Array index strings this short can keep their index in the hash
// field.
static const int kMaxCachedArrayIndexLength = 7;
@@ -6593,8 +6772,8 @@
};
-// The AsciiString class captures sequential ascii string objects.
-// Each character in the AsciiString is an ascii character.
+// The AsciiString class captures sequential ASCII string objects.
+// Each character in the AsciiString is an ASCII character.
class SeqAsciiString: public SeqString {
public:
static const bool kHasAsciiEncoding = true;
@@ -7376,8 +7555,12 @@
// capacity is non-zero.
MUST_USE_RESULT MaybeObject* Initialize(int capacity);
+ // Initializes the array to a certain length.
+ inline bool AllowsSetElementsLength();
+ MUST_USE_RESULT MaybeObject* SetElementsLength(Object* length);
+
// Set the content of the array to the content of storage.
- inline MaybeObject* SetContent(FixedArray* storage);
+ inline MaybeObject* SetContent(FixedArrayBase* storage);
// Casting.
static inline JSArray* cast(Object* obj);
@@ -7492,6 +7675,35 @@
};
+// Support for JavaScript accessors: A pair of a getter and a setter. Each
+// accessor can either be
+// * a pointer to a JavaScript function or proxy: a real accessor
+// * undefined: considered an accessor by the spec, too, strangely enough
+// * the hole: an accessor which has not been set
+// * a pointer to a map: a transition used to ensure map sharing
+class AccessorPair: public Struct {
+ public:
+ DECL_ACCESSORS(getter, Object)
+ DECL_ACCESSORS(setter, Object)
+
+ static inline AccessorPair* cast(Object* obj);
+
+#ifdef OBJECT_PRINT
+ void AccessorPairPrint(FILE* out = stdout);
+#endif
+#ifdef DEBUG
+ void AccessorPairVerify();
+#endif
+
+ static const int kGetterOffset = HeapObject::kHeaderSize;
+ static const int kSetterOffset = kGetterOffset + kPointerSize;
+ static const int kSize = kSetterOffset + kPointerSize;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(AccessorPair);
+};
+
+
class AccessCheckInfo: public Struct {
public:
DECL_ACCESSORS(named_callback, Object)
@@ -7592,7 +7804,8 @@
static const int kTagOffset = HeapObject::kHeaderSize;
static const int kPropertyListOffset = kTagOffset + kPointerSize;
static const int kHeaderSize = kPropertyListOffset + kPointerSize;
- protected:
+
+ private:
DISALLOW_IMPLICIT_CONSTRUCTORS(TemplateInfo);
};
@@ -7858,6 +8071,34 @@
#undef DECL_BOOLEAN_ACCESSORS
#undef DECL_ACCESSORS
+#define VISITOR_SYNCHRONIZATION_TAGS_LIST(V) \
+ V(kSymbolTable, "symbol_table", "(Symbols)") \
+ V(kExternalStringsTable, "external_strings_table", "(External strings)") \
+ V(kStrongRootList, "strong_root_list", "(Strong roots)") \
+ V(kSymbol, "symbol", "(Symbol)") \
+ V(kBootstrapper, "bootstrapper", "(Bootstrapper)") \
+ V(kTop, "top", "(Isolate)") \
+ V(kRelocatable, "relocatable", "(Relocatable)") \
+ V(kDebug, "debug", "(Debugger)") \
+ V(kCompilationCache, "compilationcache", "(Compilation cache)") \
+ V(kHandleScope, "handlescope", "(Handle scope)") \
+ V(kBuiltins, "builtins", "(Builtins)") \
+ V(kGlobalHandles, "globalhandles", "(Global handles)") \
+ V(kThreadManager, "threadmanager", "(Thread manager)") \
+ V(kExtensions, "Extensions", "(Extensions)")
+
+class VisitorSynchronization : public AllStatic {
+ public:
+#define DECLARE_ENUM(enum_item, ignore1, ignore2) enum_item,
+ enum SyncTag {
+ VISITOR_SYNCHRONIZATION_TAGS_LIST(DECLARE_ENUM)
+ kNumberOfSyncTags
+ };
+#undef DECLARE_ENUM
+
+ static const char* const kTags[kNumberOfSyncTags];
+ static const char* const kTagNames[kNumberOfSyncTags];
+};
// Abstract base class for visiting, and optionally modifying, the
// pointers contained in Objects. Used in GC and serialization/deserialization.
@@ -7913,13 +8154,10 @@
// Visits a handle that has an embedder-assigned class ID.
virtual void VisitEmbedderReference(Object** p, uint16_t class_id) {}
-#ifdef DEBUG
// Intended for serialization/deserialization checking: insert, or
// check for the presence of, a tag at this position in the stream.
- virtual void Synchronize(const char* tag) {}
-#else
- inline void Synchronize(const char* tag) {}
-#endif
+ // Also used for marking up GC roots in heap snapshots.
+ virtual void Synchronize(VisitorSynchronization::SyncTag tag) {}
};
diff --git a/src/parser.cc b/src/parser.cc
index 51036c9..35cc1c9 100644
--- a/src/parser.cc
+++ b/src/parser.cc
@@ -1186,8 +1186,8 @@
if (directive_prologue) {
// A shot at a directive.
- ExpressionStatement *e_stat;
- Literal *literal;
+ ExpressionStatement* e_stat;
+ Literal* literal;
// Still processing directive prologue?
if ((e_stat = stat->AsExpressionStatement()) != NULL &&
(literal = e_stat->expression()->AsLiteral()) != NULL &&
@@ -1562,7 +1562,7 @@
// TODO(1240846): It's weird that native function declarations are
// introduced dynamically when we meet their declarations, whereas
- // other functions are setup when entering the surrounding scope.
+ // other functions are set up when entering the surrounding scope.
SharedFunctionInfoLiteral* lit =
new(zone()) SharedFunctionInfoLiteral(isolate(), shared);
VariableProxy* var = Declare(name, VAR, NULL, true, CHECK_OK);
@@ -2695,6 +2695,7 @@
// Assignment to eval or arguments is disallowed in strict mode.
CheckStrictModeLValue(expression, "strict_lhs_assignment", CHECK_OK);
}
+ MarkAsLValue(expression);
Token::Value op = Next(); // Get assignment operator.
int pos = scanner().location().beg_pos;
@@ -2928,6 +2929,7 @@
// Prefix expression operand in strict mode may not be eval or arguments.
CheckStrictModeLValue(expression, "strict_lhs_prefix", CHECK_OK);
}
+ MarkAsLValue(expression);
int position = scanner().location().beg_pos;
return new(zone()) CountOperation(isolate(),
@@ -2963,6 +2965,7 @@
// Postfix expression operand in strict mode may not be eval or arguments.
CheckStrictModeLValue(expression, "strict_lhs_prefix", CHECK_OK);
}
+ MarkAsLValue(expression);
Token::Value next = Next();
int position = scanner().location().beg_pos;
@@ -3389,6 +3392,7 @@
isolate()->factory()->NewFixedArray(values->length(), TENURED);
Handle<FixedDoubleArray> double_literals;
ElementsKind elements_kind = FAST_SMI_ONLY_ELEMENTS;
+ bool has_only_undefined_values = true;
// Fill in the literals.
bool is_simple = true;
@@ -3412,6 +3416,7 @@
// FAST_DOUBLE_ELEMENTS and FAST_ELEMENTS as necessary. Always remember
// the tagged value, no matter what the ElementsKind is in case we
// ultimately end up in FAST_ELEMENTS.
+ has_only_undefined_values = false;
object_literals->set(i, *boilerplate_value);
if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
// Smi only elements. Notice if a transition to FAST_DOUBLE_ELEMENTS or
@@ -3450,6 +3455,13 @@
}
}
+ // Very small array literals that don't have a concrete hint about their type
+ // from a constant value should default to the slow case to avoid lots of
+ // elements transitions on really small objects.
+ if (has_only_undefined_values && values->length() <= 2) {
+ elements_kind = FAST_ELEMENTS;
+ }
+
// Simple and shallow arrays can be lazily copied, we transform the
// elements array to a copy-on-write array.
if (is_simple && depth == 1 && values->length() > 0 &&
@@ -3595,7 +3607,7 @@
ASSERT(property != NULL);
- Literal *lit = property->key();
+ Literal* lit = property->key();
Handle<Object> handle = lit->handle();
uint32_t hash;
@@ -3745,7 +3757,7 @@
RelocInfo::kNoPosition,
FunctionLiteral::ANONYMOUS_EXPRESSION,
CHECK_OK);
- // Allow any number of parameters for compatiabilty with JSC.
+ // Allow any number of parameters for compatibilty with JSC.
// Specification only allows zero parameters for get and one for set.
ObjectLiteral::Property* property =
new(zone()) ObjectLiteral::Property(is_getter, value);
@@ -4493,6 +4505,15 @@
}
+void Parser::MarkAsLValue(Expression* expression) {
+ VariableProxy* proxy = expression != NULL
+ ? expression->AsVariableProxy()
+ : NULL;
+
+ if (proxy != NULL) proxy->MarkAsLValue();
+}
+
+
// Checks LHS expression for assignment and prefix/postfix increment/decrement
// in strict mode.
void Parser::CheckStrictModeLValue(Expression* expression,
diff --git a/src/parser.h b/src/parser.h
index 75f8e10..16c2eff 100644
--- a/src/parser.h
+++ b/src/parser.h
@@ -186,7 +186,7 @@
// ----------------------------------------------------------------------------
// REGEXP PARSING
-// A BuffferedZoneList is an automatically growing list, just like (and backed
+// A BufferedZoneList is an automatically growing list, just like (and backed
// by) a ZoneList, that is optimized for the case of adding and removing
// a single element. The last element added is stored outside the backing list,
// and if no more than one element is ever added, the ZoneList isn't even
@@ -661,6 +661,11 @@
bool* is_set,
bool* ok);
+ // Determine if the expression is a variable proxy and mark it as being used
+ // in an assignment or with a increment/decrement operator. This is currently
+ // used on for the statically checking assignments to harmony const bindings.
+ void MarkAsLValue(Expression* expression);
+
// Strict mode validation of LValue expressions
void CheckStrictModeLValue(Expression* expression,
const char* error,
diff --git a/src/platform-cygwin.cc b/src/platform-cygwin.cc
index 942e764..028da77 100644
--- a/src/platform-cygwin.cc
+++ b/src/platform-cygwin.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -61,7 +61,7 @@
static Mutex* limit_mutex = NULL;
-void OS::Setup() {
+void OS::SetUp() {
// Seed the random number generator.
// Convert the current time to a 64-bit integer first, before converting it
// to an unsigned. Going directly can cause an overflow and the seed to be
@@ -114,7 +114,7 @@
// We keep the lowest and highest addresses mapped as a quick way of
// determining that pointers are outside the heap (used mostly in assertions
-// and verification). The estimate is conservative, ie, not all addresses in
+// and verification). The estimate is conservative, i.e., not all addresses in
// 'allocated' space are actually allocated to our heap. The range is
// [lowest, highest), inclusive on the low and and exclusive on the high end.
static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
@@ -290,7 +290,7 @@
}
LOG(isolate, SharedLibraryEvent(lib_name, start, end));
} else {
- // Entry not describing executable data. Skip to end of line to setup
+ // Entry not describing executable data. Skip to end of line to set up
// reading the next entry.
do {
c = getc(fp);
@@ -733,6 +733,7 @@
static Mutex* mutex_;
static SamplerThread* instance_;
+ private:
DISALLOW_COPY_AND_ASSIGN(SamplerThread);
};
diff --git a/src/platform-freebsd.cc b/src/platform-freebsd.cc
index 61fc1b5..95089d6 100644
--- a/src/platform-freebsd.cc
+++ b/src/platform-freebsd.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -79,7 +79,7 @@
static Mutex* limit_mutex = NULL;
-void OS::Setup() {
+void OS::SetUp() {
// Seed the random number generator.
// Convert the current time to a 64-bit integer first, before converting it
// to an unsigned. Going directly can cause an overflow and the seed to be
@@ -128,7 +128,7 @@
// We keep the lowest and highest addresses mapped as a quick way of
// determining that pointers are outside the heap (used mostly in assertions
-// and verification). The estimate is conservative, ie, not all addresses in
+// and verification). The estimate is conservative, i.e., not all addresses in
// 'allocated' space are actually allocated to our heap. The range is
// [lowest, highest), inclusive on the low and and exclusive on the high end.
static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
@@ -470,15 +470,8 @@
Thread::Thread(const Options& options)
: data_(new PlatformData),
- stack_size_(options.stack_size) {
- set_name(options.name);
-}
-
-
-Thread::Thread(const char* name)
- : data_(new PlatformData),
- stack_size_(0) {
- set_name(name);
+ stack_size_(options.stack_size()) {
+ set_name(options.name());
}
@@ -723,8 +716,10 @@
FULL_INTERVAL
};
+ static const int kSignalSenderStackSize = 32 * KB;
+
explicit SignalSender(int interval)
- : Thread("SignalSender"),
+ : Thread(Thread::Options("SignalSender", kSignalSenderStackSize)),
interval_(interval) {}
static void AddActiveSampler(Sampler* sampler) {
@@ -846,6 +841,7 @@
static bool signal_handler_installed_;
static struct sigaction old_signal_handler_;
+ private:
DISALLOW_COPY_AND_ASSIGN(SignalSender);
};
diff --git a/src/platform-linux.cc b/src/platform-linux.cc
index 408e0c0..89a76da 100644
--- a/src/platform-linux.cc
+++ b/src/platform-linux.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -78,7 +78,7 @@
static Mutex* limit_mutex = NULL;
-void OS::Setup() {
+void OS::SetUp() {
// Seed the random number generator. We preserve microsecond resolution.
uint64_t seed = Ticks() ^ (getpid() << 16);
srandom(static_cast<unsigned int>(seed));
@@ -326,7 +326,7 @@
// We keep the lowest and highest addresses mapped as a quick way of
// determining that pointers are outside the heap (used mostly in assertions
-// and verification). The estimate is conservative, ie, not all addresses in
+// and verification). The estimate is conservative, i.e., not all addresses in
// 'allocated' space are actually allocated to our heap. The range is
// [lowest, highest), inclusive on the low and and exclusive on the high end.
static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
@@ -512,7 +512,7 @@
}
LOG(isolate, SharedLibraryEvent(lib_name, start, end));
} else {
- // Entry not describing executable data. Skip to end of line to setup
+ // Entry not describing executable data. Skip to end of line to set up
// reading the next entry.
do {
c = getc(fp);
@@ -726,15 +726,8 @@
Thread::Thread(const Options& options)
: data_(new PlatformData()),
- stack_size_(options.stack_size) {
- set_name(options.name);
-}
-
-
-Thread::Thread(const char* name)
- : data_(new PlatformData()),
- stack_size_(0) {
- set_name(name);
+ stack_size_(options.stack_size()) {
+ set_name(options.name());
}
@@ -1073,8 +1066,10 @@
FULL_INTERVAL
};
+ static const int kSignalSenderStackSize = 32 * KB;
+
explicit SignalSender(int interval)
- : Thread("SignalSender"),
+ : Thread(Thread::Options("SignalSender", kSignalSenderStackSize)),
vm_tgid_(getpid()),
interval_(interval) {}
@@ -1189,6 +1184,9 @@
// occuring during signal delivery.
useconds_t interval = interval_ * 1000 - 100;
if (full_or_half == HALF_INTERVAL) interval /= 2;
+#if defined(ANDROID)
+ usleep(interval);
+#else
int result = usleep(interval);
#ifdef DEBUG
if (result != 0 && errno != EINTR) {
@@ -1198,8 +1196,9 @@
errno);
ASSERT(result == 0 || errno == EINTR);
}
-#endif
+#endif // DEBUG
USE(result);
+#endif // ANDROID
}
const int vm_tgid_;
@@ -1212,6 +1211,7 @@
static bool signal_handler_installed_;
static struct sigaction old_signal_handler_;
+ private:
DISALLOW_COPY_AND_ASSIGN(SignalSender);
};
diff --git a/src/platform-macos.cc b/src/platform-macos.cc
index 0f9b958..497118d 100644
--- a/src/platform-macos.cc
+++ b/src/platform-macos.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -75,7 +75,7 @@
namespace v8 {
namespace internal {
-// 0 is never a valid thread id on MacOSX since a ptread_t is
+// 0 is never a valid thread id on MacOSX since a pthread_t is
// a pointer.
static const pthread_t kNoThread = (pthread_t) 0;
@@ -93,7 +93,7 @@
static Mutex* limit_mutex = NULL;
-void OS::Setup() {
+void OS::SetUp() {
// Seed the random number generator. We preserve microsecond resolution.
uint64_t seed = Ticks() ^ (getpid() << 16);
srandom(static_cast<unsigned int>(seed));
@@ -103,7 +103,7 @@
// We keep the lowest and highest addresses mapped as a quick way of
// determining that pointers are outside the heap (used mostly in assertions
-// and verification). The estimate is conservative, ie, not all addresses in
+// and verification). The estimate is conservative, i.e., not all addresses in
// 'allocated' space are actually allocated to our heap. The range is
// [lowest, highest), inclusive on the low and and exclusive on the high end.
static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
@@ -479,17 +479,11 @@
pthread_t thread_; // Thread handle for pthread.
};
+
Thread::Thread(const Options& options)
: data_(new PlatformData),
- stack_size_(options.stack_size) {
- set_name(options.name);
-}
-
-
-Thread::Thread(const char* name)
- : data_(new PlatformData),
- stack_size_(0) {
- set_name(name);
+ stack_size_(options.stack_size()) {
+ set_name(options.name());
}
@@ -742,10 +736,13 @@
thread_act_t profiled_thread_;
};
+
class SamplerThread : public Thread {
public:
+ static const int kSamplerThreadStackSize = 32 * KB;
+
explicit SamplerThread(int interval)
- : Thread("SamplerThread"),
+ : Thread(Thread::Options("SamplerThread", kSamplerThreadStackSize)),
interval_(interval) {}
static void AddActiveSampler(Sampler* sampler) {
@@ -860,6 +857,7 @@
static Mutex* mutex_;
static SamplerThread* instance_;
+ private:
DISALLOW_COPY_AND_ASSIGN(SamplerThread);
};
diff --git a/src/platform-nullos.cc b/src/platform-nullos.cc
index a59a926..918327a 100644
--- a/src/platform-nullos.cc
+++ b/src/platform-nullos.cc
@@ -56,7 +56,7 @@
// Initialize OS class early in the V8 startup.
-void OS::Setup() {
+void OS::SetUp() {
// Seed the random number generator.
UNIMPLEMENTED();
}
diff --git a/src/platform-openbsd.cc b/src/platform-openbsd.cc
index f044a6e..ca4aab4 100644
--- a/src/platform-openbsd.cc
+++ b/src/platform-openbsd.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -25,8 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Platform specific code for OpenBSD goes here. For the POSIX comaptible parts
-// the implementation is in platform-posix.cc.
+// Platform specific code for OpenBSD and NetBSD goes here. For the POSIX
+// comaptible parts the implementation is in platform-posix.cc.
#include <pthread.h>
#include <semaphore.h>
@@ -99,7 +99,7 @@
}
-void OS::Setup() {
+void OS::SetUp() {
// Seed the random number generator. We preserve microsecond resolution.
uint64_t seed = Ticks() ^ (getpid() << 16);
srandom(static_cast<unsigned int>(seed));
@@ -146,7 +146,7 @@
// We keep the lowest and highest addresses mapped as a quick way of
// determining that pointers are outside the heap (used mostly in assertions
-// and verification). The estimate is conservative, ie, not all addresses in
+// and verification). The estimate is conservative, i.e., not all addresses in
// 'allocated' space are actually allocated to our heap. The range is
// [lowest, highest), inclusive on the low and and exclusive on the high end.
static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
@@ -312,7 +312,7 @@
}
LOG(isolate, SharedLibraryEvent(lib_name, start, end));
} else {
- // Entry not describing executable data. Skip to end of line to setup
+ // Entry not describing executable data. Skip to end of line to set up
// reading the next entry.
do {
c = getc(fp);
@@ -518,15 +518,8 @@
Thread::Thread(const Options& options)
: data_(new PlatformData()),
- stack_size_(options.stack_size) {
- set_name(options.name);
-}
-
-
-Thread::Thread(const char* name)
- : data_(new PlatformData()),
- stack_size_(0) {
- set_name(name);
+ stack_size_(options.stack_size()) {
+ set_name(options.name());
}
@@ -748,8 +741,20 @@
if (sample == NULL) sample = &sample_obj;
// Extracting the sample from the context is extremely machine dependent.
- ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
sample->state = isolate->current_vm_state();
+ ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
+#ifdef __NetBSD__
+ mcontext_t& mcontext = ucontext->uc_mcontext;
+#if V8_HOST_ARCH_IA32
+ sample->pc = reinterpret_cast<Address>(mcontext.__gregs[_REG_EIP]);
+ sample->sp = reinterpret_cast<Address>(mcontext.__gregs[_REG_ESP]);
+ sample->fp = reinterpret_cast<Address>(mcontext.__gregs[_REG_EBP]);
+#elif V8_HOST_ARCH_X64
+ sample->pc = reinterpret_cast<Address>(mcontext.__gregs[_REG_RIP]);
+ sample->sp = reinterpret_cast<Address>(mcontext.__gregs[_REG_RSP]);
+ sample->fp = reinterpret_cast<Address>(mcontext.__gregs[_REG_RBP]);
+#endif // V8_HOST_ARCH
+#else // OpenBSD
#if V8_HOST_ARCH_IA32
sample->pc = reinterpret_cast<Address>(ucontext->sc_eip);
sample->sp = reinterpret_cast<Address>(ucontext->sc_esp);
@@ -758,7 +763,8 @@
sample->pc = reinterpret_cast<Address>(ucontext->sc_rip);
sample->sp = reinterpret_cast<Address>(ucontext->sc_rsp);
sample->fp = reinterpret_cast<Address>(ucontext->sc_rbp);
-#endif
+#endif // V8_HOST_ARCH
+#endif // __NetBSD__
sampler->SampleStack(sample);
sampler->Tick(sample);
}
@@ -782,8 +788,10 @@
FULL_INTERVAL
};
+ static const int kSignalSenderStackSize = 32 * KB;
+
explicit SignalSender(int interval)
- : Thread("SignalSender"),
+ : Thread(Thread::Options("SignalSender", kSignalSenderStackSize)),
vm_tgid_(getpid()),
interval_(interval) {}
@@ -916,6 +924,7 @@
static bool signal_handler_installed_;
static struct sigaction old_signal_handler_;
+ private:
DISALLOW_COPY_AND_ASSIGN(SignalSender);
};
diff --git a/src/platform-posix.cc b/src/platform-posix.cc
index 08417ff..34fd5c4 100644
--- a/src/platform-posix.cc
+++ b/src/platform-posix.cc
@@ -461,7 +461,7 @@
}
-bool Socket::Setup() {
+bool Socket::SetUp() {
// Nothing to do on POSIX.
return true;
}
diff --git a/src/platform-solaris.cc b/src/platform-solaris.cc
index ca6443b..4ba927f 100644
--- a/src/platform-solaris.cc
+++ b/src/platform-solaris.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -53,6 +53,7 @@
#include "v8.h"
#include "platform.h"
+#include "v8threads.h"
#include "vm-state-inl.h"
@@ -89,7 +90,7 @@
static Mutex* limit_mutex = NULL;
-void OS::Setup() {
+void OS::SetUp() {
// Seed the random number generator.
// Convert the current time to a 64-bit integer first, before converting it
// to an unsigned. Going directly will cause an overflow and the seed to be
@@ -139,7 +140,7 @@
// We keep the lowest and highest addresses mapped as a quick way of
// determining that pointers are outside the heap (used mostly in assertions
-// and verification). The estimate is conservative, ie, not all addresses in
+// and verification). The estimate is conservative, i.e., not all addresses in
// 'allocated' space are actually allocated to our heap. The range is
// [lowest, highest), inclusive on the low and and exclusive on the high end.
static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
@@ -322,23 +323,81 @@
static const int kMmapFdOffset = 0;
+VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
+
VirtualMemory::VirtualMemory(size_t size) {
- address_ = mmap(NULL, size, PROT_NONE,
- MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
- kMmapFd, kMmapFdOffset);
+ address_ = ReserveRegion(size);
size_ = size;
}
+VirtualMemory::VirtualMemory(size_t size, size_t alignment)
+ : address_(NULL), size_(0) {
+ ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
+ size_t request_size = RoundUp(size + alignment,
+ static_cast<intptr_t>(OS::AllocateAlignment()));
+ void* reservation = mmap(OS::GetRandomMmapAddr(),
+ request_size,
+ PROT_NONE,
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
+ kMmapFd,
+ kMmapFdOffset);
+ if (reservation == MAP_FAILED) return;
+
+ Address base = static_cast<Address>(reservation);
+ Address aligned_base = RoundUp(base, alignment);
+ ASSERT_LE(base, aligned_base);
+
+ // Unmap extra memory reserved before and after the desired block.
+ if (aligned_base != base) {
+ size_t prefix_size = static_cast<size_t>(aligned_base - base);
+ OS::Free(base, prefix_size);
+ request_size -= prefix_size;
+ }
+
+ size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
+ ASSERT_LE(aligned_size, request_size);
+
+ if (aligned_size != request_size) {
+ size_t suffix_size = request_size - aligned_size;
+ OS::Free(aligned_base + aligned_size, suffix_size);
+ request_size -= suffix_size;
+ }
+
+ ASSERT(aligned_size == request_size);
+
+ address_ = static_cast<void*>(aligned_base);
+ size_ = aligned_size;
+}
+
+
VirtualMemory::~VirtualMemory() {
if (IsReserved()) {
- if (0 == munmap(address(), size())) address_ = MAP_FAILED;
+ bool result = ReleaseRegion(address(), size());
+ ASSERT(result);
+ USE(result);
}
}
bool VirtualMemory::IsReserved() {
- return address_ != MAP_FAILED;
+ return address_ != NULL;
+}
+
+
+void VirtualMemory::Reset() {
+ address_ = NULL;
+ size_ = 0;
+}
+
+
+bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
+ return CommitRegion(address, size, is_executable);
+}
+
+
+bool VirtualMemory::Uncommit(void* address, size_t size) {
+ return UncommitRegion(address, size);
}
@@ -373,15 +432,23 @@
return false;
}
- UpdateAllocatedSpaceLimits(address, size);
+ UpdateAllocatedSpaceLimits(base, size);
return true;
}
-bool VirtualMemory::Uncommit(void* address, size_t size) {
- return mmap(address, size, PROT_NONE,
- MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED,
- kMmapFd, kMmapFdOffset) != MAP_FAILED;
+bool VirtualMemory::UncommitRegion(void* base, size_t size) {
+ return mmap(base,
+ size,
+ PROT_NONE,
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_FIXED,
+ kMmapFd,
+ kMmapFdOffset) != MAP_FAILED;
+}
+
+
+bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
+ return munmap(base, size) == 0;
}
@@ -392,17 +459,11 @@
pthread_t thread_; // Thread handle for pthread.
};
+
Thread::Thread(const Options& options)
: data_(new PlatformData()),
- stack_size_(options.stack_size) {
- set_name(options.name);
-}
-
-
-Thread::Thread(const char* name)
- : data_(new PlatformData()),
- stack_size_(0) {
- set_name(name);
+ stack_size_(options.stack_size()) {
+ set_name(options.name());
}
@@ -649,8 +710,10 @@
FULL_INTERVAL
};
+ static const int kSignalSenderStackSize = 32 * KB;
+
explicit SignalSender(int interval)
- : Thread("SignalSender"),
+ : Thread(Thread::Options("SignalSender", kSignalSenderStackSize)),
interval_(interval) {}
static void InstallSignalHandler() {
@@ -782,6 +845,7 @@
static bool signal_handler_installed_;
static struct sigaction old_signal_handler_;
+ private:
DISALLOW_COPY_AND_ASSIGN(SignalSender);
};
diff --git a/src/platform-win32.cc b/src/platform-win32.cc
index 8bbdcb2..01f3017 100644
--- a/src/platform-win32.cc
+++ b/src/platform-win32.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -198,7 +198,7 @@
// ----------------------------------------------------------------------------
// The Time class represents time on win32. A timestamp is represented as
-// a 64-bit integer in 100 nano-seconds since January 1, 1601 (UTC). JavaScript
+// a 64-bit integer in 100 nanoseconds since January 1, 1601 (UTC). JavaScript
// timestamps are represented as a doubles in milliseconds since 00:00:00 UTC,
// January 1, 1970.
@@ -528,7 +528,7 @@
}
-void OS::Setup() {
+void OS::SetUp() {
// Seed the random number generator.
// Convert the current time to a 64-bit integer first, before converting it
// to an unsigned. Going directly can cause an overflow and the seed to be
@@ -776,7 +776,7 @@
// We keep the lowest and highest addresses mapped as a quick way of
// determining that pointers are outside the heap (used mostly in assertions
-// and verification). The estimate is conservative, ie, not all addresses in
+// and verification). The estimate is conservative, i.e., not all addresses in
// 'allocated' space are actually allocated to our heap. The range is
// [lowest, highest), inclusive on the low and and exclusive on the high end.
static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
@@ -831,11 +831,6 @@
}
-intptr_t OS::CommitPageSize() {
- return 4096;
-}
-
-
static void* GetRandomAddr() {
Isolate* isolate = Isolate::UncheckedCurrent();
// Note that the current isolate isn't set up in a call path via
@@ -913,6 +908,11 @@
}
+intptr_t OS::CommitPageSize() {
+ return 4096;
+}
+
+
void OS::ProtectCode(void* address, const size_t size) {
DWORD old_protect;
VirtualProtect(address, size, PAGE_EXECUTE_READ, &old_protect);
@@ -1555,16 +1555,9 @@
// handle until it is started.
Thread::Thread(const Options& options)
- : stack_size_(options.stack_size) {
+ : stack_size_(options.stack_size()) {
data_ = new PlatformData(kNoThread);
- set_name(options.name);
-}
-
-
-Thread::Thread(const char* name)
- : stack_size_(0) {
- data_ = new PlatformData(kNoThread);
- set_name(name);
+ set_name(options.name());
}
@@ -1854,7 +1847,7 @@
}
-bool Socket::Setup() {
+bool Socket::SetUp() {
// Initialize Winsock32
int err;
WSADATA winsock_data;
@@ -1930,8 +1923,10 @@
class SamplerThread : public Thread {
public:
+ static const int kSamplerThreadStackSize = 32 * KB;
+
explicit SamplerThread(int interval)
- : Thread("SamplerThread"),
+ : Thread(Thread::Options("SamplerThread", kSamplerThreadStackSize)),
interval_(interval) {}
static void AddActiveSampler(Sampler* sampler) {
@@ -2035,6 +2030,7 @@
static Mutex* mutex_;
static SamplerThread* instance_;
+ private:
DISALLOW_COPY_AND_ASSIGN(SamplerThread);
};
diff --git a/src/platform.h b/src/platform.h
index 726f9ca..38e633a 100644
--- a/src/platform.h
+++ b/src/platform.h
@@ -109,7 +109,7 @@
class OS {
public:
// Initializes the platform OS support. Called once at VM startup.
- static void Setup();
+ static void SetUp();
// Returns the accumulated user time for thread. This routine
// can be used for profiling. The implementation should
@@ -172,6 +172,10 @@
bool is_executable);
static void Free(void* address, const size_t size);
+ // This is the granularity at which the ProtectCode(...) call can set page
+ // permissions.
+ static intptr_t CommitPageSize();
+
// Mark code segments non-writable.
static void ProtectCode(void* address, const size_t size);
@@ -184,8 +188,6 @@
// Get the Alignment guaranteed by Allocate().
static size_t AllocateAlignment();
- static intptr_t CommitPageSize();
-
// Returns an indication of whether a pointer is in a space that
// has been allocated by Allocate(). This method may conservatively
// always return false, but giving more accurate information may
@@ -413,16 +415,22 @@
LOCAL_STORAGE_KEY_MAX_VALUE = kMaxInt
};
- struct Options {
- Options() : name("v8:<unknown>"), stack_size(0) {}
+ class Options {
+ public:
+ Options() : name_("v8:<unknown>"), stack_size_(0) {}
+ Options(const char* name, int stack_size = 0)
+ : name_(name), stack_size_(stack_size) {}
- const char* name;
- int stack_size;
+ const char* name() const { return name_; }
+ int stack_size() const { return stack_size_; }
+
+ private:
+ const char* name_;
+ int stack_size_;
};
// Create new thread.
explicit Thread(const Options& options);
- explicit Thread(const char* name);
virtual ~Thread();
// Start new thread by calling the Run() method in the new thread.
@@ -478,7 +486,7 @@
PlatformData* data() { return data_; }
private:
- void set_name(const char *name);
+ void set_name(const char* name);
PlatformData* data_;
@@ -554,7 +562,7 @@
virtual void Wait() = 0;
// Suspends the calling thread until the counter is non zero or the timeout
- // time has passsed. If timeout happens the return value is false and the
+ // time has passed. If timeout happens the return value is false and the
// counter is unchanged. Otherwise the semaphore counter is decremented and
// true is returned. The timeout value is specified in microseconds.
virtual bool Wait(int timeout) = 0;
@@ -594,7 +602,7 @@
virtual bool IsValid() const = 0;
- static bool Setup();
+ static bool SetUp();
static int LastError();
static uint16_t HToN(uint16_t value);
static uint16_t NToH(uint16_t value);
diff --git a/src/preparser.h b/src/preparser.h
index fc8a4a0..f17bac2 100644
--- a/src/preparser.h
+++ b/src/preparser.h
@@ -630,7 +630,7 @@
void SetStrictModeViolation(i::Scanner::Location,
const char* type,
- bool *ok);
+ bool* ok);
void CheckDelayedStrictModeViolation(int beg_pos, int end_pos, bool* ok);
diff --git a/src/prettyprinter.cc b/src/prettyprinter.cc
index 37c76ce..a3a328c 100644
--- a/src/prettyprinter.cc
+++ b/src/prettyprinter.cc
@@ -447,6 +447,7 @@
void PrettyPrinter::PrintStatements(ZoneList<Statement*>* statements) {
+ if (statements == NULL) return;
for (int i = 0; i < statements->length(); i++) {
if (i != 0) Print(" ");
Visit(statements->at(i));
diff --git a/src/profile-generator-inl.h b/src/profile-generator-inl.h
index 88d6e87..7a70b01 100644
--- a/src/profile-generator-inl.h
+++ b/src/profile-generator-inl.h
@@ -95,6 +95,26 @@
}
+uint64_t HeapObjectsMap::GetNthGcSubrootId(int delta) {
+ return kGcRootsFirstSubrootId + delta * kObjectIdStep;
+}
+
+
+HeapObject* V8HeapExplorer::GetNthGcSubrootObject(int delta) {
+ return reinterpret_cast<HeapObject*>(
+ reinterpret_cast<char*>(kFirstGcSubrootObject) +
+ delta * HeapObjectsMap::kObjectIdStep);
+}
+
+
+int V8HeapExplorer::GetGcSubrootOrder(HeapObject* subroot) {
+ return static_cast<int>(
+ (reinterpret_cast<char*>(subroot) -
+ reinterpret_cast<char*>(kFirstGcSubrootObject)) /
+ HeapObjectsMap::kObjectIdStep);
+}
+
+
uint64_t HeapEntry::id() {
union {
Id stored_id;
diff --git a/src/profile-generator.cc b/src/profile-generator.cc
index 5626aca..97de08e 100644
--- a/src/profile-generator.cc
+++ b/src/profile-generator.cc
@@ -110,7 +110,8 @@
Vector<char> dst = Vector<char>::New(len + 1);
OS::StrNCpy(dst, src, len);
dst[len] = '\0';
- uint32_t hash = HashSequentialString(dst.start(), len);
+ uint32_t hash =
+ HashSequentialString(dst.start(), len, HEAP->HashSeed());
return AddOrDisposeString(dst.start(), hash);
}
@@ -143,7 +144,8 @@
DeleteArray(str.start());
return format;
}
- uint32_t hash = HashSequentialString(str.start(), len);
+ uint32_t hash = HashSequentialString(
+ str.start(), len, HEAP->HashSeed());
return AddOrDisposeString(str.start(), hash);
}
@@ -153,7 +155,8 @@
int length = Min(kMaxNameSize, name->length());
SmartArrayPointer<char> data =
name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL, 0, length);
- uint32_t hash = HashSequentialString(*data, length);
+ uint32_t hash =
+ HashSequentialString(*data, length, name->GetHeap()->HashSeed());
return AddOrDisposeString(data.Detach(), hash);
}
return "";
@@ -178,18 +181,21 @@
uint32_t CodeEntry::GetCallUid() const {
- uint32_t hash = ComputeIntegerHash(tag_);
+ uint32_t hash = ComputeIntegerHash(tag_, v8::internal::kZeroHashSeed);
if (shared_id_ != 0) {
- hash ^= ComputeIntegerHash(
- static_cast<uint32_t>(shared_id_));
+ hash ^= ComputeIntegerHash(static_cast<uint32_t>(shared_id_),
+ v8::internal::kZeroHashSeed);
} else {
hash ^= ComputeIntegerHash(
- static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name_prefix_)));
+ static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name_prefix_)),
+ v8::internal::kZeroHashSeed);
hash ^= ComputeIntegerHash(
- static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name_)));
+ static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name_)),
+ v8::internal::kZeroHashSeed);
hash ^= ComputeIntegerHash(
- static_cast<uint32_t>(reinterpret_cast<uintptr_t>(resource_name_)));
- hash ^= ComputeIntegerHash(line_number_);
+ static_cast<uint32_t>(reinterpret_cast<uintptr_t>(resource_name_)),
+ v8::internal::kZeroHashSeed);
+ hash ^= ComputeIntegerHash(line_number_, v8::internal::kZeroHashSeed);
}
return hash;
}
@@ -898,7 +904,7 @@
entry++;
}
- for (const Address *stack_pos = sample.stack,
+ for (const Address* stack_pos = sample.stack,
*stack_end = stack_pos + sample.frames_count;
stack_pos != stack_end;
++stack_pos) {
@@ -938,7 +944,7 @@
void HeapGraphEdge::Init(int child_index, Type type, int index, HeapEntry* to) {
- ASSERT(type == kElement || type == kHidden);
+ ASSERT(type == kElement || type == kHidden || type == kWeak);
child_index_ = child_index;
type_ = type;
index_ = index;
@@ -1053,8 +1059,11 @@
}
-void HeapEntry::Print(int max_depth, int indent) {
- OS::Print("%6d %6d [%llu] ", self_size(), RetainedSize(false), id());
+void HeapEntry::Print(
+ const char* prefix, const char* edge_name, int max_depth, int indent) {
+ OS::Print("%6d %7d @%6llu %*c %s%s: ",
+ self_size(), RetainedSize(false), id(),
+ indent, ' ', prefix, edge_name);
if (type() != kString) {
OS::Print("%s %.40s\n", TypeAsString(), name_);
} else {
@@ -1073,29 +1082,40 @@
Vector<HeapGraphEdge> ch = children();
for (int i = 0; i < ch.length(); ++i) {
HeapGraphEdge& edge = ch[i];
+ const char* edge_prefix = "";
+ ScopedVector<char> index(64);
+ const char* edge_name = index.start();
switch (edge.type()) {
case HeapGraphEdge::kContextVariable:
- OS::Print(" %*c #%s: ", indent, ' ', edge.name());
+ edge_prefix = "#";
+ edge_name = edge.name();
break;
case HeapGraphEdge::kElement:
- OS::Print(" %*c %d: ", indent, ' ', edge.index());
+ OS::SNPrintF(index, "%d", edge.index());
break;
case HeapGraphEdge::kInternal:
- OS::Print(" %*c $%s: ", indent, ' ', edge.name());
+ edge_prefix = "$";
+ edge_name = edge.name();
break;
case HeapGraphEdge::kProperty:
- OS::Print(" %*c %s: ", indent, ' ', edge.name());
+ edge_name = edge.name();
break;
case HeapGraphEdge::kHidden:
- OS::Print(" %*c $%d: ", indent, ' ', edge.index());
+ edge_prefix = "$";
+ OS::SNPrintF(index, "%d", edge.index());
break;
case HeapGraphEdge::kShortcut:
- OS::Print(" %*c ^%s: ", indent, ' ', edge.name());
+ edge_prefix = "^";
+ edge_name = edge.name();
+ break;
+ case HeapGraphEdge::kWeak:
+ edge_prefix = "w";
+ OS::SNPrintF(index, "%d", edge.index());
break;
default:
- OS::Print("!!! unknown edge type: %d ", edge.type());
+ OS::SNPrintF(index, "!!! unknown edge type: %d ", edge.type());
}
- edge.to()->Print(max_depth, indent + 2);
+ edge.to()->Print(edge_prefix, edge_name, max_depth, indent + 2);
}
}
@@ -1211,10 +1231,13 @@
entries_sorted_(false) {
STATIC_ASSERT(
sizeof(HeapGraphEdge) ==
- SnapshotSizeConstants<sizeof(void*)>::kExpectedHeapGraphEdgeSize); // NOLINT
+ SnapshotSizeConstants<kPointerSize>::kExpectedHeapGraphEdgeSize);
STATIC_ASSERT(
sizeof(HeapEntry) ==
- SnapshotSizeConstants<sizeof(void*)>::kExpectedHeapEntrySize); // NOLINT
+ SnapshotSizeConstants<kPointerSize>::kExpectedHeapEntrySize);
+ for (int i = 0; i < VisitorSynchronization::kNumberOfSyncTags; ++i) {
+ gc_subroot_entries_[i] = NULL;
+ }
}
HeapSnapshot::~HeapSnapshot() {
@@ -1270,6 +1293,21 @@
}
+HeapEntry* HeapSnapshot::AddGcSubrootEntry(int tag,
+ int children_count,
+ int retainers_count) {
+ ASSERT(gc_subroot_entries_[tag] == NULL);
+ ASSERT(0 <= tag && tag < VisitorSynchronization::kNumberOfSyncTags);
+ return (gc_subroot_entries_[tag] = AddEntry(
+ HeapEntry::kObject,
+ VisitorSynchronization::kTagNames[tag],
+ HeapObjectsMap::GetNthGcSubrootId(tag),
+ 0,
+ children_count,
+ retainers_count));
+}
+
+
HeapEntry* HeapSnapshot::AddNativesRootEntry(int children_count,
int retainers_count) {
ASSERT(natives_root_entry_ == NULL);
@@ -1355,17 +1393,22 @@
void HeapSnapshot::Print(int max_depth) {
- root()->Print(max_depth, 0);
+ root()->Print("", "", max_depth, 0);
}
// We split IDs on evens for embedder objects (see
// HeapObjectsMap::GenerateId) and odds for native objects.
const uint64_t HeapObjectsMap::kInternalRootObjectId = 1;
-const uint64_t HeapObjectsMap::kGcRootsObjectId = 3;
-const uint64_t HeapObjectsMap::kNativesRootObjectId = 5;
-// Increase kFirstAvailableObjectId if new 'special' objects appear.
-const uint64_t HeapObjectsMap::kFirstAvailableObjectId = 7;
+const uint64_t HeapObjectsMap::kGcRootsObjectId =
+ HeapObjectsMap::kInternalRootObjectId + HeapObjectsMap::kObjectIdStep;
+const uint64_t HeapObjectsMap::kNativesRootObjectId =
+ HeapObjectsMap::kGcRootsObjectId + HeapObjectsMap::kObjectIdStep;
+const uint64_t HeapObjectsMap::kGcRootsFirstSubrootId =
+ HeapObjectsMap::kNativesRootObjectId + HeapObjectsMap::kObjectIdStep;
+const uint64_t HeapObjectsMap::kFirstAvailableObjectId =
+ HeapObjectsMap::kGcRootsFirstSubrootId +
+ VisitorSynchronization::kNumberOfSyncTags * HeapObjectsMap::kObjectIdStep;
HeapObjectsMap::HeapObjectsMap()
: initial_fill_mode_(true),
@@ -1391,7 +1434,7 @@
if (existing != 0) return existing;
}
uint64_t id = next_id_;
- next_id_ += 2;
+ next_id_ += kObjectIdStep;
AddEntry(addr, id);
return id;
}
@@ -1464,10 +1507,13 @@
uint64_t HeapObjectsMap::GenerateId(v8::RetainedObjectInfo* info) {
uint64_t id = static_cast<uint64_t>(info->GetHash());
const char* label = info->GetLabel();
- id ^= HashSequentialString(label, static_cast<int>(strlen(label)));
+ id ^= HashSequentialString(label,
+ static_cast<int>(strlen(label)),
+ HEAP->HashSeed());
intptr_t element_count = info->GetElementCount();
if (element_count != -1)
- id ^= ComputeIntegerHash(static_cast<uint32_t>(element_count));
+ id ^= ComputeIntegerHash(static_cast<uint32_t>(element_count),
+ v8::internal::kZeroHashSeed);
return id << 1;
}
@@ -1549,7 +1595,7 @@
}
-HeapEntry *const HeapEntriesMap::kHeapEntryPlaceholder =
+HeapEntry* const HeapEntriesMap::kHeapEntryPlaceholder =
reinterpret_cast<HeapEntry*>(1);
HeapEntriesMap::HeapEntriesMap()
@@ -1678,12 +1724,18 @@
}
-HeapObject *const V8HeapExplorer::kInternalRootObject =
+HeapObject* const V8HeapExplorer::kInternalRootObject =
reinterpret_cast<HeapObject*>(
static_cast<intptr_t>(HeapObjectsMap::kInternalRootObjectId));
-HeapObject *const V8HeapExplorer::kGcRootsObject =
+HeapObject* const V8HeapExplorer::kGcRootsObject =
reinterpret_cast<HeapObject*>(
static_cast<intptr_t>(HeapObjectsMap::kGcRootsObjectId));
+HeapObject* const V8HeapExplorer::kFirstGcSubrootObject =
+ reinterpret_cast<HeapObject*>(
+ static_cast<intptr_t>(HeapObjectsMap::kGcRootsFirstSubrootId));
+HeapObject* const V8HeapExplorer::kLastGcSubrootObject =
+ reinterpret_cast<HeapObject*>(
+ static_cast<intptr_t>(HeapObjectsMap::kFirstAvailableObjectId));
V8HeapExplorer::V8HeapExplorer(
@@ -1716,6 +1768,11 @@
return snapshot_->AddRootEntry(children_count);
} else if (object == kGcRootsObject) {
return snapshot_->AddGcRootsEntry(children_count, retainers_count);
+ } else if (object >= kFirstGcSubrootObject && object < kLastGcSubrootObject) {
+ return snapshot_->AddGcSubrootEntry(
+ GetGcSubrootOrder(object),
+ children_count,
+ retainers_count);
} else if (object->IsJSGlobalObject()) {
const char* tag = objects_tags_.GetTag(object);
const char* name = collection_->names()->GetName(
@@ -1779,6 +1836,18 @@
: "",
children_count,
retainers_count);
+ } else if (object->IsGlobalContext()) {
+ return AddEntry(object,
+ HeapEntry::kHidden,
+ "system / GlobalContext",
+ children_count,
+ retainers_count);
+ } else if (object->IsContext()) {
+ return AddEntry(object,
+ HeapEntry::kHidden,
+ "system / Context",
+ children_count,
+ retainers_count);
} else if (object->IsFixedArray() ||
object->IsFixedDoubleArray() ||
object->IsByteArray() ||
@@ -1818,9 +1887,38 @@
}
+class GcSubrootsEnumerator : public ObjectVisitor {
+ public:
+ GcSubrootsEnumerator(
+ SnapshotFillerInterface* filler, V8HeapExplorer* explorer)
+ : filler_(filler),
+ explorer_(explorer),
+ previous_object_count_(0),
+ object_count_(0) {
+ }
+ void VisitPointers(Object** start, Object** end) {
+ object_count_ += end - start;
+ }
+ void Synchronize(VisitorSynchronization::SyncTag tag) {
+ // Skip empty subroots.
+ if (previous_object_count_ != object_count_) {
+ previous_object_count_ = object_count_;
+ filler_->AddEntry(V8HeapExplorer::GetNthGcSubrootObject(tag), explorer_);
+ }
+ }
+ private:
+ SnapshotFillerInterface* filler_;
+ V8HeapExplorer* explorer_;
+ intptr_t previous_object_count_;
+ intptr_t object_count_;
+};
+
+
void V8HeapExplorer::AddRootEntries(SnapshotFillerInterface* filler) {
filler->AddEntry(kInternalRootObject, this);
filler->AddEntry(kGcRootsObject, this);
+ GcSubrootsEnumerator enumerator(filler, this);
+ heap_->IterateRoots(&enumerator, VISIT_ALL);
}
@@ -1939,6 +2037,11 @@
"literals_or_bindings",
js_fun->literals_or_bindings(),
JSFunction::kLiteralsOffset);
+ for (int i = JSFunction::kNonWeakFieldsEndOffset;
+ i < JSFunction::kSize;
+ i += kPointerSize) {
+ SetWeakReference(js_fun, entry, i, *HeapObject::RawField(js_fun, i), i);
+ }
}
TagObject(js_obj->properties(), "(object properties)");
SetInternalReference(obj, entry,
@@ -1965,8 +2068,14 @@
"(context func. result caches)");
TagObject(context->normalized_map_cache(), "(context norm. map cache)");
TagObject(context->runtime_context(), "(runtime context)");
- TagObject(context->map_cache(), "(context map cache)");
TagObject(context->data(), "(context data)");
+ for (int i = Context::FIRST_WEAK_SLOT;
+ i < Context::GLOBAL_CONTEXT_SLOTS;
+ ++i) {
+ SetWeakReference(obj, entry,
+ i, context->get(i),
+ FixedArray::OffsetOfElementAt(i));
+ }
} else if (obj->IsMap()) {
Map* map = Map::cast(obj);
SetInternalReference(obj, entry,
@@ -2009,6 +2118,9 @@
SetInternalReference(obj, entry,
"script", shared->script(),
SharedFunctionInfo::kScriptOffset);
+ SetWeakReference(obj, entry,
+ 1, shared->initial_map(),
+ SharedFunctionInfo::kInitialMapOffset);
} else if (obj->IsScript()) {
Script* script = Script::cast(obj);
SetInternalReference(obj, entry,
@@ -2118,13 +2230,13 @@
break;
case CALLBACKS: {
Object* callback_obj = descs->GetValue(i);
- if (callback_obj->IsFixedArray()) {
- FixedArray* accessors = FixedArray::cast(callback_obj);
- if (Object* getter = accessors->get(JSObject::kGetterIndex)) {
+ if (callback_obj->IsAccessorPair()) {
+ AccessorPair* accessors = AccessorPair::cast(callback_obj);
+ if (Object* getter = accessors->getter()) {
SetPropertyReference(js_obj, entry, descs->GetKey(i),
getter, "get-%s");
}
- if (Object* setter = accessors->get(JSObject::kSetterIndex)) {
+ if (Object* setter = accessors->setter()) {
SetPropertyReference(js_obj, entry, descs->GetKey(i),
setter, "set-%s");
}
@@ -2177,7 +2289,7 @@
}
}
} else if (js_obj->HasDictionaryElements()) {
- NumberDictionary* dictionary = js_obj->element_dictionary();
+ SeededNumberDictionary* dictionary = js_obj->element_dictionary();
int length = dictionary->Capacity();
for (int i = 0; i < length; ++i) {
Object* k = dictionary->KeyAt(i);
@@ -2235,15 +2347,66 @@
class RootsReferencesExtractor : public ObjectVisitor {
- public:
- explicit RootsReferencesExtractor(V8HeapExplorer* explorer)
- : explorer_(explorer) {
- }
- void VisitPointers(Object** start, Object** end) {
- for (Object** p = start; p < end; p++) explorer_->SetGcRootsReference(*p);
- }
private:
- V8HeapExplorer* explorer_;
+ struct IndexTag {
+ IndexTag(int index, VisitorSynchronization::SyncTag tag)
+ : index(index), tag(tag) { }
+ int index;
+ VisitorSynchronization::SyncTag tag;
+ };
+
+ public:
+ RootsReferencesExtractor()
+ : collecting_all_references_(false),
+ previous_reference_count_(0) {
+ }
+
+ void VisitPointers(Object** start, Object** end) {
+ if (collecting_all_references_) {
+ for (Object** p = start; p < end; p++) all_references_.Add(*p);
+ } else {
+ for (Object** p = start; p < end; p++) strong_references_.Add(*p);
+ }
+ }
+
+ void SetCollectingAllReferences() { collecting_all_references_ = true; }
+
+ void FillReferences(V8HeapExplorer* explorer) {
+ ASSERT(strong_references_.length() <= all_references_.length());
+ for (int i = 0; i < reference_tags_.length(); ++i) {
+ explorer->SetGcRootsReference(reference_tags_[i].tag);
+ }
+ int strong_index = 0, all_index = 0, tags_index = 0;
+ while (all_index < all_references_.length()) {
+ if (strong_index < strong_references_.length() &&
+ strong_references_[strong_index] == all_references_[all_index]) {
+ explorer->SetGcSubrootReference(reference_tags_[tags_index].tag,
+ false,
+ all_references_[all_index++]);
+ ++strong_index;
+ } else {
+ explorer->SetGcSubrootReference(reference_tags_[tags_index].tag,
+ true,
+ all_references_[all_index++]);
+ }
+ if (reference_tags_[tags_index].index == all_index) ++tags_index;
+ }
+ }
+
+ void Synchronize(VisitorSynchronization::SyncTag tag) {
+ if (collecting_all_references_ &&
+ previous_reference_count_ != all_references_.length()) {
+ previous_reference_count_ = all_references_.length();
+ reference_tags_.Add(IndexTag(previous_reference_count_, tag));
+ }
+ }
+
+ private:
+ bool collecting_all_references_;
+ List<Object*> strong_references_;
+ List<Object*> all_references_;
+ int previous_reference_count_;
+ List<IndexTag> reference_tags_;
};
@@ -2268,8 +2431,11 @@
return false;
}
SetRootGcRootsReference();
- RootsReferencesExtractor extractor(this);
+ RootsReferencesExtractor extractor;
+ heap_->IterateRoots(&extractor, VISIT_ONLY_STRONG);
+ extractor.SetCollectingAllReferences();
heap_->IterateRoots(&extractor, VISIT_ALL);
+ extractor.FillReferences(this);
filler_ = NULL;
return progress_->ProgressReport(false);
}
@@ -2359,6 +2525,24 @@
}
+void V8HeapExplorer::SetWeakReference(HeapObject* parent_obj,
+ HeapEntry* parent_entry,
+ int index,
+ Object* child_obj,
+ int field_offset) {
+ HeapEntry* child_entry = GetEntry(child_obj);
+ if (child_entry != NULL) {
+ filler_->SetIndexedReference(HeapGraphEdge::kWeak,
+ parent_obj,
+ parent_entry,
+ index,
+ child_obj,
+ child_entry);
+ IndexedReferencesExtractor::MarkVisitedField(parent_obj, field_offset);
+ }
+}
+
+
void V8HeapExplorer::SetPropertyReference(HeapObject* parent_obj,
HeapEntry* parent_entry,
String* reference_name,
@@ -2421,12 +2605,21 @@
}
-void V8HeapExplorer::SetGcRootsReference(Object* child_obj) {
+void V8HeapExplorer::SetGcRootsReference(VisitorSynchronization::SyncTag tag) {
+ filler_->SetIndexedAutoIndexReference(
+ HeapGraphEdge::kElement,
+ kGcRootsObject, snapshot_->gc_roots(),
+ GetNthGcSubrootObject(tag), snapshot_->gc_subroot(tag));
+}
+
+
+void V8HeapExplorer::SetGcSubrootReference(
+ VisitorSynchronization::SyncTag tag, bool is_weak, Object* child_obj) {
HeapEntry* child_entry = GetEntry(child_obj);
if (child_entry != NULL) {
filler_->SetIndexedAutoIndexReference(
- HeapGraphEdge::kElement,
- kGcRootsObject, snapshot_->gc_roots(),
+ is_weak ? HeapGraphEdge::kWeak : HeapGraphEdge::kElement,
+ GetNthGcSubrootObject(tag), snapshot_->gc_subroot(tag),
child_obj, child_entry);
}
}
@@ -3235,7 +3428,8 @@
writer_->AddNumber(edge->type());
writer_->AddCharacter(',');
if (edge->type() == HeapGraphEdge::kElement
- || edge->type() == HeapGraphEdge::kHidden) {
+ || edge->type() == HeapGraphEdge::kHidden
+ || edge->type() == HeapGraphEdge::kWeak) {
writer_->AddNumber(edge->index());
} else {
writer_->AddNumber(GetStringId(edge->name()));
@@ -3315,7 +3509,8 @@
"," JSON_S("property")
"," JSON_S("internal")
"," JSON_S("hidden")
- "," JSON_S("shortcut"))
+ "," JSON_S("shortcut")
+ "," JSON_S("weak"))
"," JSON_S("string_or_number")
"," JSON_S("node"))))));
#undef JSON_S
diff --git a/src/profile-generator.h b/src/profile-generator.h
index 44be3db..aefe1a0 100644
--- a/src/profile-generator.h
+++ b/src/profile-generator.h
@@ -455,7 +455,8 @@
kProperty = v8::HeapGraphEdge::kProperty,
kInternal = v8::HeapGraphEdge::kInternal,
kHidden = v8::HeapGraphEdge::kHidden,
- kShortcut = v8::HeapGraphEdge::kShortcut
+ kShortcut = v8::HeapGraphEdge::kShortcut,
+ kWeak = v8::HeapGraphEdge::kWeak
};
HeapGraphEdge() { }
@@ -465,7 +466,7 @@
Type type() { return static_cast<Type>(type_); }
int index() {
- ASSERT(type_ == kElement || type_ == kHidden);
+ ASSERT(type_ == kElement || type_ == kHidden || type_ == kWeak);
return index_;
}
const char* name() {
@@ -588,7 +589,8 @@
int EntrySize() { return EntriesSize(1, children_count_, retainers_count_); }
int RetainedSize(bool exact);
- void Print(int max_depth, int indent);
+ void Print(
+ const char* prefix, const char* edge_name, int max_depth, int indent);
Handle<HeapObject> GetHeapObject();
@@ -661,6 +663,7 @@
HeapEntry* root() { return root_entry_; }
HeapEntry* gc_roots() { return gc_roots_entry_; }
HeapEntry* natives_root() { return natives_root_entry_; }
+ HeapEntry* gc_subroot(int index) { return gc_subroot_entries_[index]; }
List<HeapEntry*>* entries() { return &entries_; }
int raw_entries_size() { return raw_entries_size_; }
@@ -674,6 +677,9 @@
int retainers_count);
HeapEntry* AddRootEntry(int children_count);
HeapEntry* AddGcRootsEntry(int children_count, int retainers_count);
+ HeapEntry* AddGcSubrootEntry(int tag,
+ int children_count,
+ int retainers_count);
HeapEntry* AddNativesRootEntry(int children_count, int retainers_count);
void ClearPaint();
HeapEntry* GetEntryById(uint64_t id);
@@ -695,6 +701,7 @@
HeapEntry* root_entry_;
HeapEntry* gc_roots_entry_;
HeapEntry* natives_root_entry_;
+ HeapEntry* gc_subroot_entries_[VisitorSynchronization::kNumberOfSyncTags];
char* raw_entries_;
List<HeapEntry*> entries_;
bool entries_sorted_;
@@ -716,10 +723,13 @@
void MoveObject(Address from, Address to);
static uint64_t GenerateId(v8::RetainedObjectInfo* info);
+ static inline uint64_t GetNthGcSubrootId(int delta);
+ static const int kObjectIdStep = 2;
static const uint64_t kInternalRootObjectId;
static const uint64_t kGcRootsObjectId;
static const uint64_t kNativesRootObjectId;
+ static const uint64_t kGcRootsFirstSubrootId;
static const uint64_t kFirstAvailableObjectId;
private:
@@ -740,7 +750,8 @@
static uint32_t AddressHash(Address addr) {
return ComputeIntegerHash(
- static_cast<uint32_t>(reinterpret_cast<uintptr_t>(addr)));
+ static_cast<uint32_t>(reinterpret_cast<uintptr_t>(addr)),
+ v8::internal::kZeroHashSeed);
}
bool initial_fill_mode_;
@@ -823,7 +834,7 @@
int total_children_count() { return total_children_count_; }
int total_retainers_count() { return total_retainers_count_; }
- static HeapEntry *const kHeapEntryPlaceholder;
+ static HeapEntry* const kHeapEntryPlaceholder;
private:
struct EntryInfo {
@@ -841,7 +852,8 @@
static uint32_t Hash(HeapThing thing) {
return ComputeIntegerHash(
- static_cast<uint32_t>(reinterpret_cast<uintptr_t>(thing)));
+ static_cast<uint32_t>(reinterpret_cast<uintptr_t>(thing)),
+ v8::internal::kZeroHashSeed);
}
static bool HeapThingsMatch(HeapThing key1, HeapThing key2) {
return key1 == key2;
@@ -969,6 +981,11 @@
HeapEntry* parent,
int index,
Object* child);
+ void SetWeakReference(HeapObject* parent_obj,
+ HeapEntry* parent_entry,
+ int index,
+ Object* child_obj,
+ int field_offset);
void SetPropertyReference(HeapObject* parent_obj,
HeapEntry* parent,
String* reference_name,
@@ -981,11 +998,16 @@
Object* child);
void SetRootShortcutReference(Object* child);
void SetRootGcRootsReference();
- void SetGcRootsReference(Object* child);
+ void SetGcRootsReference(VisitorSynchronization::SyncTag tag);
+ void SetGcSubrootReference(
+ VisitorSynchronization::SyncTag tag, bool is_weak, Object* child);
void TagObject(Object* obj, const char* tag);
HeapEntry* GetEntry(Object* obj);
+ static inline HeapObject* GetNthGcSubrootObject(int delta);
+ static inline int GetGcSubrootOrder(HeapObject* subroot);
+
Heap* heap_;
HeapSnapshot* snapshot_;
HeapSnapshotsCollection* collection_;
@@ -994,8 +1016,11 @@
HeapObjectsSet objects_tags_;
static HeapObject* const kGcRootsObject;
+ static HeapObject* const kFirstGcSubrootObject;
+ static HeapObject* const kLastGcSubrootObject;
friend class IndexedReferencesExtractor;
+ friend class GcSubrootsEnumerator;
friend class RootsReferencesExtractor;
DISALLOW_COPY_AND_ASSIGN(V8HeapExplorer);
@@ -1024,7 +1049,8 @@
void VisitSubtreeWrapper(Object** p, uint16_t class_id);
static uint32_t InfoHash(v8::RetainedObjectInfo* info) {
- return ComputeIntegerHash(static_cast<uint32_t>(info->GetHash()));
+ return ComputeIntegerHash(static_cast<uint32_t>(info->GetHash()),
+ v8::internal::kZeroHashSeed);
}
static bool RetainedInfosMatch(void* key1, void* key2) {
return key1 == key2 ||
@@ -1102,7 +1128,8 @@
INLINE(static uint32_t ObjectHash(const void* key)) {
return ComputeIntegerHash(
- static_cast<uint32_t>(reinterpret_cast<uintptr_t>(key)));
+ static_cast<uint32_t>(reinterpret_cast<uintptr_t>(key)),
+ v8::internal::kZeroHashSeed);
}
void EnumerateNodes();
diff --git a/src/property.h b/src/property.h
index 3203dd1..120734d 100644
--- a/src/property.h
+++ b/src/property.h
@@ -265,11 +265,6 @@
return IsFound() && GetPropertyDetails().IsProperty();
}
- // Is the result a property or a transition?
- bool IsPropertyOrTransition() {
- return IsFound() && (type() != NULL_DESCRIPTOR);
- }
-
bool IsCacheable() { return cacheable_; }
void DisallowCaching() { cacheable_ = false; }
diff --git a/src/regexp-macro-assembler.cc b/src/regexp-macro-assembler.cc
index 99f3a37..b6fb3c5 100644
--- a/src/regexp-macro-assembler.cc
+++ b/src/regexp-macro-assembler.cc
@@ -133,7 +133,7 @@
subject_ptr = slice->parent();
slice_offset = slice->offset();
}
- // Ensure that an underlying string has the same ascii-ness.
+ // Ensure that an underlying string has the same ASCII-ness.
bool is_ascii = subject_ptr->IsAsciiRepresentation();
ASSERT(subject_ptr->IsExternalString() || subject_ptr->IsSeqString());
// String is now either Sequential or External
diff --git a/src/regexp.js b/src/regexp.js
index 596c185..00dd7f1 100644
--- a/src/regexp.js
+++ b/src/regexp.js
@@ -436,8 +436,8 @@
// value is set in SpiderMonkey, the value it is set to is coerced to a
// boolean. We mimic that behavior with a slight difference: in SpiderMonkey
// the value of the expression 'RegExp.multiline = null' (for instance) is the
- // boolean false (ie, the value after coercion), while in V8 it is the value
- // null (ie, the value before coercion).
+ // boolean false (i.e., the value after coercion), while in V8 it is the value
+ // null (i.e., the value before coercion).
// Getter and setter for multiline.
var multiline = false;
diff --git a/src/runtime-profiler.cc b/src/runtime-profiler.cc
index 9837ce7..efb252f 100644
--- a/src/runtime-profiler.cc
+++ b/src/runtime-profiler.cc
@@ -65,7 +65,7 @@
Semaphore* RuntimeProfiler::semaphore_ = OS::CreateSemaphore(0);
#ifdef DEBUG
-bool RuntimeProfiler::has_been_globally_setup_ = false;
+bool RuntimeProfiler::has_been_globally_set_up_ = false;
#endif
bool RuntimeProfiler::enabled_ = false;
@@ -82,10 +82,10 @@
void RuntimeProfiler::GlobalSetup() {
- ASSERT(!has_been_globally_setup_);
+ ASSERT(!has_been_globally_set_up_);
enabled_ = V8::UseCrankshaft() && FLAG_opt;
#ifdef DEBUG
- has_been_globally_setup_ = true;
+ has_been_globally_set_up_ = true;
#endif
}
@@ -246,8 +246,8 @@
}
-void RuntimeProfiler::Setup() {
- ASSERT(has_been_globally_setup_);
+void RuntimeProfiler::SetUp() {
+ ASSERT(has_been_globally_set_up_);
ClearSampleBuffer();
// If the ticker hasn't already started, make sure to do so to get
// the ticks for the runtime profiler.
diff --git a/src/runtime-profiler.h b/src/runtime-profiler.h
index 15c2097..d35b5df 100644
--- a/src/runtime-profiler.h
+++ b/src/runtime-profiler.h
@@ -46,7 +46,7 @@
static void GlobalSetup();
static inline bool IsEnabled() {
- ASSERT(has_been_globally_setup_);
+ ASSERT(has_been_globally_set_up_);
return enabled_;
}
@@ -54,7 +54,7 @@
void NotifyTick();
- void Setup();
+ void SetUp();
void Reset();
void TearDown();
@@ -126,7 +126,7 @@
static Semaphore* semaphore_;
#ifdef DEBUG
- static bool has_been_globally_setup_;
+ static bool has_been_globally_set_up_;
#endif
static bool enabled_;
};
diff --git a/src/runtime.cc b/src/runtime.cc
index a2e569b..f95ecdf 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -228,7 +228,7 @@
break;
}
case DICTIONARY_ELEMENTS: {
- NumberDictionary* element_dictionary = copy->element_dictionary();
+ SeededNumberDictionary* element_dictionary = copy->element_dictionary();
int capacity = element_dictionary->Capacity();
for (int i = 0; i < capacity; i++) {
Object* k = element_dictionary->KeyAt(i);
@@ -355,7 +355,7 @@
Handle<JSObject> boilerplate = isolate->factory()->NewJSObjectFromMap(map);
// Normalize the elements of the boilerplate to save space if needed.
- if (!should_have_fast_elements) NormalizeElements(boilerplate);
+ if (!should_have_fast_elements) JSObject::NormalizeElements(boilerplate);
// Add the constant properties to the boilerplate.
int length = constant_properties->length();
@@ -365,7 +365,8 @@
// Normalize the properties of object to avoid n^2 behavior
// when extending the object multiple properties. Indicate the number of
// properties to be added.
- NormalizeProperties(boilerplate, KEEP_INOBJECT_PROPERTIES, length / 2);
+ JSObject::NormalizeProperties(
+ boilerplate, KEEP_INOBJECT_PROPERTIES, length / 2);
}
for (int index = 0; index < length; index +=2) {
@@ -383,22 +384,18 @@
if (key->IsSymbol()) {
if (Handle<String>::cast(key)->AsArrayIndex(&element_index)) {
// Array index as string (uint32).
- result = SetOwnElement(boilerplate,
- element_index,
- value,
- kNonStrictMode);
+ result = JSObject::SetOwnElement(
+ boilerplate, element_index, value, kNonStrictMode);
} else {
Handle<String> name(String::cast(*key));
ASSERT(!name->AsArrayIndex(&element_index));
- result = SetLocalPropertyIgnoreAttributes(boilerplate, name,
- value, NONE);
+ result = JSObject::SetLocalPropertyIgnoreAttributes(
+ boilerplate, name, value, NONE);
}
} else if (key->ToArrayIndex(&element_index)) {
// Array index (uint32).
- result = SetOwnElement(boilerplate,
- element_index,
- value,
- kNonStrictMode);
+ result = JSObject::SetOwnElement(
+ boilerplate, element_index, value, kNonStrictMode);
} else {
// Non-uint32 number.
ASSERT(key->IsNumber());
@@ -408,8 +405,8 @@
const char* str = DoubleToCString(num, buffer);
Handle<String> name =
isolate->factory()->NewStringFromAscii(CStrVector(str));
- result = SetLocalPropertyIgnoreAttributes(boilerplate, name,
- value, NONE);
+ result = JSObject::SetLocalPropertyIgnoreAttributes(
+ boilerplate, name, value, NONE);
}
// If setting the property on the boilerplate throws an
// exception, the exception is converted to an empty handle in
@@ -423,8 +420,8 @@
// computed properties have been assigned so that we can generate
// constant function properties.
if (should_transform && !has_function_literal) {
- TransformToFastProperties(boilerplate,
- boilerplate->map()->unused_property_fields());
+ JSObject::TransformToFastProperties(
+ boilerplate, boilerplate->map()->unused_property_fields());
}
return boilerplate;
@@ -434,7 +431,7 @@
static const int kSmiOnlyLiteralMinimumLength = 1024;
-static Handle<Object> CreateArrayLiteralBoilerplate(
+Handle<Object> Runtime::CreateArrayLiteralBoilerplate(
Isolate* isolate,
Handle<FixedArray> literals,
Handle<FixedArray> elements) {
@@ -536,7 +533,8 @@
false,
kHasNoFunctionLiteral);
case CompileTimeValue::ARRAY_LITERAL:
- return CreateArrayLiteralBoilerplate(isolate, literals, elements);
+ return Runtime::CreateArrayLiteralBoilerplate(
+ isolate, literals, elements);
default:
UNREACHABLE();
return Handle<Object>::null();
@@ -606,7 +604,8 @@
// Check if boilerplate exists. If not, create it first.
Handle<Object> boilerplate(literals->get(literals_index), isolate);
if (*boilerplate == isolate->heap()->undefined_value()) {
- boilerplate = CreateArrayLiteralBoilerplate(isolate, literals, elements);
+ boilerplate =
+ Runtime::CreateArrayLiteralBoilerplate(isolate, literals, elements);
if (boilerplate.is_null()) return Failure::Exception();
// Update the functions literal and return the boilerplate.
literals->set(literals_index, *boilerplate);
@@ -625,7 +624,9 @@
// Check if boilerplate exists. If not, create it first.
Handle<Object> boilerplate(literals->get(literals_index), isolate);
if (*boilerplate == isolate->heap()->undefined_value()) {
- boilerplate = CreateArrayLiteralBoilerplate(isolate, literals, elements);
+ ASSERT(*elements != isolate->heap()->empty_fixed_array());
+ boilerplate =
+ Runtime::CreateArrayLiteralBoilerplate(isolate, literals, elements);
if (boilerplate.is_null()) return Failure::Exception();
// Update the functions literal and return the boilerplate.
literals->set(literals_index, *boilerplate);
@@ -1043,26 +1044,26 @@
holder = Handle<JSObject>(JSObject::cast(proto));
}
FixedArray* elements = FixedArray::cast(holder->elements());
- NumberDictionary* dictionary = NULL;
+ SeededNumberDictionary* dictionary = NULL;
if (elements->map() == heap->non_strict_arguments_elements_map()) {
- dictionary = NumberDictionary::cast(elements->get(1));
+ dictionary = SeededNumberDictionary::cast(elements->get(1));
} else {
- dictionary = NumberDictionary::cast(elements);
+ dictionary = SeededNumberDictionary::cast(elements);
}
int entry = dictionary->FindEntry(index);
- ASSERT(entry != NumberDictionary::kNotFound);
+ ASSERT(entry != SeededNumberDictionary::kNotFound);
PropertyDetails details = dictionary->DetailsAt(entry);
switch (details.type()) {
case CALLBACKS: {
// This is an accessor property with getter and/or setter.
- FixedArray* callbacks =
- FixedArray::cast(dictionary->ValueAt(entry));
+ AccessorPair* accessors =
+ AccessorPair::cast(dictionary->ValueAt(entry));
elms->set(IS_ACCESSOR_INDEX, heap->true_value());
if (CheckElementAccess(*obj, index, v8::ACCESS_GET)) {
- elms->set(GETTER_INDEX, callbacks->get(0));
+ elms->set(GETTER_INDEX, accessors->getter());
}
if (CheckElementAccess(*obj, index, v8::ACCESS_SET)) {
- elms->set(SETTER_INDEX, callbacks->get(1));
+ elms->set(SETTER_INDEX, accessors->setter());
}
break;
}
@@ -1101,18 +1102,18 @@
elms->set(CONFIGURABLE_INDEX, heap->ToBoolean(!result.IsDontDelete()));
bool is_js_accessor = (result.type() == CALLBACKS) &&
- (result.GetCallbackObject()->IsFixedArray());
+ (result.GetCallbackObject()->IsAccessorPair());
if (is_js_accessor) {
// __defineGetter__/__defineSetter__ callback.
elms->set(IS_ACCESSOR_INDEX, heap->true_value());
- FixedArray* structure = FixedArray::cast(result.GetCallbackObject());
+ AccessorPair* accessors = AccessorPair::cast(result.GetCallbackObject());
if (CheckAccess(*obj, *name, &result, v8::ACCESS_GET)) {
- elms->set(GETTER_INDEX, structure->get(0));
+ elms->set(GETTER_INDEX, accessors->getter());
}
if (CheckAccess(*obj, *name, &result, v8::ACCESS_SET)) {
- elms->set(SETTER_INDEX, structure->get(1));
+ elms->set(SETTER_INDEX, accessors->setter());
}
} else {
elms->set(IS_ACCESSOR_INDEX, heap->false_value());
@@ -1330,21 +1331,19 @@
}
PropertyAttributes attributes = static_cast<PropertyAttributes>(attr);
- RETURN_IF_EMPTY_HANDLE(isolate,
- SetLocalPropertyIgnoreAttributes(global,
- name,
- value,
- attributes));
+ RETURN_IF_EMPTY_HANDLE(
+ isolate,
+ JSObject::SetLocalPropertyIgnoreAttributes(global, name, value,
+ attributes));
} else {
LanguageMode language_mode = DeclareGlobalsLanguageMode::decode(flags);
StrictModeFlag strict_mode_flag = (language_mode == CLASSIC_MODE)
? kNonStrictMode : kStrictMode;
- RETURN_IF_EMPTY_HANDLE(isolate,
- SetProperty(global,
- name,
- value,
- static_cast<PropertyAttributes>(attr),
- strict_mode_flag));
+ RETURN_IF_EMPTY_HANDLE(
+ isolate,
+ JSReceiver::SetProperty(global, name, value,
+ static_cast<PropertyAttributes>(attr),
+ strict_mode_flag));
}
}
@@ -1398,7 +1397,8 @@
Handle<JSObject> object = Handle<JSObject>::cast(holder);
RETURN_IF_EMPTY_HANDLE(
isolate,
- SetProperty(object, name, initial_value, mode, kNonStrictMode));
+ JSReceiver::SetProperty(object, name, initial_value, mode,
+ kNonStrictMode));
}
}
@@ -1434,13 +1434,13 @@
!object->IsJSContextExtensionObject()) {
LookupResult lookup(isolate);
object->Lookup(*name, &lookup);
- if (lookup.IsProperty() && (lookup.type() == CALLBACKS)) {
+ if (lookup.IsFound() && (lookup.type() == CALLBACKS)) {
return ThrowRedeclarationError(isolate, "const", name);
}
}
- RETURN_IF_EMPTY_HANDLE(isolate,
- SetProperty(object, name, value, mode,
- kNonStrictMode));
+ RETURN_IF_EMPTY_HANDLE(
+ isolate,
+ JSReceiver::SetProperty(object, name, value, mode, kNonStrictMode));
}
return isolate->heap()->undefined_value();
@@ -1482,7 +1482,7 @@
JSObject::cast(object)->map()->is_hidden_prototype()) {
JSObject* raw_holder = JSObject::cast(object);
raw_holder->LocalLookup(*name, &lookup);
- if (lookup.IsProperty() && lookup.type() == INTERCEPTOR) {
+ if (lookup.IsFound() && lookup.type() == INTERCEPTOR) {
HandleScope handle_scope(isolate);
Handle<JSObject> holder(raw_holder);
PropertyAttributes intercepted = holder->GetPropertyAttribute(*name);
@@ -1549,12 +1549,10 @@
// property through an interceptor and only do it if it's
// uninitialized, e.g. the hole. Nirk...
// Passing non-strict mode because the property is writable.
- RETURN_IF_EMPTY_HANDLE(isolate,
- SetProperty(global,
- name,
- value,
- attributes,
- kNonStrictMode));
+ RETURN_IF_EMPTY_HANDLE(
+ isolate,
+ JSReceiver::SetProperty(global, name, value, attributes,
+ kNonStrictMode));
return *value;
}
@@ -1624,7 +1622,7 @@
// Strict mode not needed (const disallowed in strict mode).
RETURN_IF_EMPTY_HANDLE(
isolate,
- SetProperty(global, name, value, NONE, kNonStrictMode));
+ JSReceiver::SetProperty(global, name, value, NONE, kNonStrictMode));
return *value;
}
@@ -1650,7 +1648,7 @@
// GetProperty() to get the current value as it 'unholes' the value.
LookupResult lookup(isolate);
object->LocalLookupRealNamedProperty(*name, &lookup);
- ASSERT(lookup.IsProperty()); // the property was declared
+ ASSERT(lookup.IsFound()); // the property was declared
ASSERT(lookup.IsReadOnly()); // and it was declared as read-only
PropertyType type = lookup.type();
@@ -1676,7 +1674,8 @@
// Strict mode not needed (const disallowed in strict mode).
RETURN_IF_EMPTY_HANDLE(
isolate,
- SetProperty(object, name, value, attributes, kNonStrictMode));
+ JSReceiver::SetProperty(object, name, value, attributes,
+ kNonStrictMode));
}
}
@@ -1691,7 +1690,7 @@
CONVERT_ARG_CHECKED(JSObject, object, 0);
CONVERT_SMI_ARG_CHECKED(properties, 1);
if (object->HasFastProperties()) {
- NormalizeProperties(object, KEEP_INOBJECT_PROPERTIES, properties);
+ JSObject::NormalizeProperties(object, KEEP_INOBJECT_PROPERTIES, properties);
}
return *object;
}
@@ -1847,7 +1846,7 @@
code,
false);
optimized->shared()->DontAdaptArguments();
- SetProperty(holder, key, optimized, NONE, kStrictMode);
+ JSReceiver::SetProperty(holder, key, optimized, NONE, kStrictMode);
return optimized;
}
@@ -1870,9 +1869,19 @@
RUNTIME_FUNCTION(MaybeObject*, Runtime_GetDefaultReceiver) {
- NoHandleAllocation handle_free;
ASSERT(args.length() == 1);
- CONVERT_CHECKED(JSFunction, function, args[0]);
+ CONVERT_CHECKED(JSReceiver, callable, args[0]);
+
+ if (!callable->IsJSFunction()) {
+ HandleScope scope(isolate);
+ bool threw = false;
+ Handle<Object> delegate =
+ Execution::TryGetFunctionDelegate(Handle<JSReceiver>(callable), &threw);
+ if (threw) return Failure::Exception();
+ callable = JSFunction::cast(*delegate);
+ }
+ JSFunction* function = JSFunction::cast(callable);
+
SharedFunctionInfo* shared = function->shared();
if (shared->native() || !shared->is_classic_mode()) {
return isolate->heap()->undefined_value();
@@ -3179,7 +3188,7 @@
Address end_of_string = answer->address() + string_size;
isolate->heap()->CreateFillerObjectAt(end_of_string, delta);
if (Marking::IsBlack(Marking::MarkBitFrom(*answer))) {
- MemoryChunk::IncrementLiveBytes(answer->address(), -delta);
+ MemoryChunk::IncrementLiveBytesFromMutator(answer->address(), -delta);
}
return *answer;
@@ -3234,6 +3243,79 @@
}
+Handle<String> Runtime::StringReplaceOneCharWithString(Isolate* isolate,
+ Handle<String> subject,
+ Handle<String> search,
+ Handle<String> replace,
+ bool* found,
+ int recursion_limit) {
+ if (recursion_limit == 0) return Handle<String>::null();
+ if (subject->IsConsString()) {
+ ConsString* cons = ConsString::cast(*subject);
+ Handle<String> first = Handle<String>(cons->first());
+ Handle<String> second = Handle<String>(cons->second());
+ Handle<String> new_first =
+ StringReplaceOneCharWithString(isolate,
+ first,
+ search,
+ replace,
+ found,
+ recursion_limit - 1);
+ if (*found) return isolate->factory()->NewConsString(new_first, second);
+ if (new_first.is_null()) return new_first;
+
+ Handle<String> new_second =
+ StringReplaceOneCharWithString(isolate,
+ second,
+ search,
+ replace,
+ found,
+ recursion_limit - 1);
+ if (*found) return isolate->factory()->NewConsString(first, new_second);
+ if (new_second.is_null()) return new_second;
+
+ return subject;
+ } else {
+ int index = StringMatch(isolate, subject, search, 0);
+ if (index == -1) return subject;
+ *found = true;
+ Handle<String> first = isolate->factory()->NewSubString(subject, 0, index);
+ Handle<String> cons1 = isolate->factory()->NewConsString(first, replace);
+ Handle<String> second =
+ isolate->factory()->NewSubString(subject, index + 1, subject->length());
+ return isolate->factory()->NewConsString(cons1, second);
+ }
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_StringReplaceOneCharWithString) {
+ ASSERT(args.length() == 3);
+ HandleScope scope(isolate);
+ CONVERT_ARG_CHECKED(String, subject, 0);
+ CONVERT_ARG_CHECKED(String, search, 1);
+ CONVERT_ARG_CHECKED(String, replace, 2);
+
+ // If the cons string tree is too deep, we simply abort the recursion and
+ // retry with a flattened subject string.
+ const int kRecursionLimit = 0x1000;
+ bool found = false;
+ Handle<String> result =
+ Runtime::StringReplaceOneCharWithString(isolate,
+ subject,
+ search,
+ replace,
+ &found,
+ kRecursionLimit);
+ if (!result.is_null()) return *result;
+ return *Runtime::StringReplaceOneCharWithString(isolate,
+ FlattenGetString(subject),
+ search,
+ replace,
+ &found,
+ kRecursionLimit);
+}
+
+
// Perform string match of pattern on subject, starting at start index.
// Caller must ensure that 0 <= start_index <= sub->length(),
// and should check that pat->length() + start_index <= sub->length().
@@ -4061,8 +4143,7 @@
}
if (object->IsString() || object->IsNumber() || object->IsBoolean()) {
- Handle<Object> prototype = GetPrototype(object);
- return prototype->GetElement(index);
+ return object->GetPrototype()->GetElement(index);
}
return object->GetElement(index);
@@ -4129,8 +4210,8 @@
ElementsKind from_kind =
Handle<JSObject>::cast(object)->map()->elements_kind();
if (Map::IsValidElementsTransition(from_kind, to_kind)) {
- Handle<Object> result =
- TransitionElementsKind(Handle<JSObject>::cast(object), to_kind);
+ Handle<Object> result = JSObject::TransitionElementsKind(
+ Handle<JSObject>::cast(object), to_kind);
if (result.is_null()) return isolate->ThrowIllegalOperation();
return *result;
}
@@ -4175,7 +4256,7 @@
// appropriate.
LookupResult result(isolate);
receiver->LocalLookup(key, &result);
- if (result.IsProperty() && result.type() == FIELD) {
+ if (result.IsFound() && result.type() == FIELD) {
int offset = result.GetFieldIndex();
keyed_lookup_cache->Update(receiver_map, key, offset);
return receiver->FastPropertyAt(offset);
@@ -4243,27 +4324,14 @@
CONVERT_CHECKED(String, name, args[1]);
CONVERT_CHECKED(Smi, flag_setter, args[2]);
Object* fun = args[3];
- RUNTIME_ASSERT(fun->IsSpecFunction() || fun->IsUndefined());
CONVERT_CHECKED(Smi, flag_attr, args[4]);
+
int unchecked = flag_attr->value();
RUNTIME_ASSERT((unchecked & ~(READ_ONLY | DONT_ENUM | DONT_DELETE)) == 0);
- RUNTIME_ASSERT(!obj->IsNull());
- LookupResult result(isolate);
- obj->LocalLookupRealNamedProperty(name, &result);
-
PropertyAttributes attr = static_cast<PropertyAttributes>(unchecked);
- // If an existing property is either FIELD, NORMAL or CONSTANT_FUNCTION
- // delete it to avoid running into trouble in DefineAccessor, which
- // handles this incorrectly if the property is readonly (does nothing)
- if (result.IsProperty() &&
- (result.type() == FIELD || result.type() == NORMAL
- || result.type() == CONSTANT_FUNCTION)) {
- Object* ok;
- { MaybeObject* maybe_ok =
- obj->DeleteProperty(name, JSReceiver::NORMAL_DELETION);
- if (!maybe_ok->ToObject(&ok)) return maybe_ok;
- }
- }
+
+ RUNTIME_ASSERT(!obj->IsNull());
+ RUNTIME_ASSERT(fun->IsSpecFunction() || fun->IsUndefined());
return obj->DefineAccessor(name, flag_setter->value() == 0, fun, attr);
}
@@ -4279,11 +4347,10 @@
CONVERT_ARG_CHECKED(JSObject, js_object, 0);
CONVERT_ARG_CHECKED(String, name, 1);
Handle<Object> obj_value = args.at<Object>(2);
-
CONVERT_CHECKED(Smi, flag, args[3]);
+
int unchecked = flag->value();
RUNTIME_ASSERT((unchecked & ~(READ_ONLY | DONT_ENUM | DONT_DELETE)) == 0);
-
PropertyAttributes attr = static_cast<PropertyAttributes>(unchecked);
// Check if this is an element.
@@ -4316,12 +4383,13 @@
return isolate->Throw(*error);
}
- Handle<NumberDictionary> dictionary = NormalizeElements(js_object);
+ Handle<SeededNumberDictionary> dictionary =
+ JSObject::NormalizeElements(js_object);
// Make sure that we never go back to fast case.
dictionary->set_requires_slow_elements();
PropertyDetails details = PropertyDetails(attr, NORMAL);
- Handle<NumberDictionary> extended_dictionary =
- NumberDictionarySet(dictionary, index, obj_value, details);
+ Handle<SeededNumberDictionary> extended_dictionary =
+ SeededNumberDictionary::Set(dictionary, index, obj_value, details);
if (*extended_dictionary != *dictionary) {
if (js_object->GetElementsKind() == NON_STRICT_ARGUMENTS_ELEMENTS) {
FixedArray::cast(js_object->elements())->set(1, *extended_dictionary);
@@ -4335,12 +4403,26 @@
LookupResult result(isolate);
js_object->LocalLookupRealNamedProperty(*name, &result);
- // To be compatible with safari we do not change the value on API objects
- // in defineProperty. Firefox disagrees here, and actually changes the value.
- if (result.IsProperty() &&
- (result.type() == CALLBACKS) &&
- result.GetCallbackObject()->IsAccessorInfo()) {
- return isolate->heap()->undefined_value();
+ // Special case for callback properties.
+ if (result.IsFound() && result.type() == CALLBACKS) {
+ Object* callback = result.GetCallbackObject();
+ // To be compatible with Safari we do not change the value on API objects
+ // in Object.defineProperty(). Firefox disagrees here, and actually changes
+ // the value.
+ if (callback->IsAccessorInfo()) {
+ return isolate->heap()->undefined_value();
+ }
+ // Avoid redefining foreign callback as data property, just use the stored
+ // setter to update the value instead.
+ // TODO(mstarzinger): So far this only works if property attributes don't
+ // change, this should be fixed once we cleanup the underlying code.
+ if (callback->IsForeign() && result.GetAttributes() == attr) {
+ return js_object->SetPropertyWithCallback(callback,
+ *name,
+ *obj_value,
+ result.holder(),
+ kStrictMode);
+ }
}
// Take special care when attributes are different and there is already
@@ -4357,7 +4439,7 @@
// we don't have to check for null.
js_object = Handle<JSObject>(JSObject::cast(js_object->GetPrototype()));
}
- NormalizeProperties(js_object, CLEAR_INOBJECT_PROPERTIES, 0);
+ JSObject::NormalizeProperties(js_object, CLEAR_INOBJECT_PROPERTIES, 0);
// Use IgnoreAttributes version since a readonly property may be
// overridden and SetProperty does not allow this.
return js_object->SetLocalPropertyIgnoreAttributes(*name,
@@ -4382,12 +4464,13 @@
Handle<Object> value,
PropertyAttributes attr) {
// Normalize the elements to enable attributes on the property.
- Handle<NumberDictionary> dictionary = NormalizeElements(js_object);
+ Handle<SeededNumberDictionary> dictionary =
+ JSObject::NormalizeElements(js_object);
// Make sure that we never go back to fast case.
dictionary->set_requires_slow_elements();
PropertyDetails details = PropertyDetails(attr, NORMAL);
- Handle<NumberDictionary> extended_dictionary =
- NumberDictionarySet(dictionary, index, value, details);
+ Handle<SeededNumberDictionary> extended_dictionary =
+ SeededNumberDictionary::Set(dictionary, index, value, details);
if (*extended_dictionary != *dictionary) {
js_object->set_elements(*extended_dictionary);
}
@@ -4442,7 +4525,8 @@
return NormalizeObjectSetElement(isolate, js_object, index, value, attr);
}
- Handle<Object> result = SetElement(js_object, index, value, strict_mode);
+ Handle<Object> result =
+ JSObject::SetElement(js_object, index, value, strict_mode);
if (result.is_null()) return Failure::Exception();
return *value;
}
@@ -4457,11 +4541,13 @@
value,
attr);
}
- result = SetElement(js_object, index, value, strict_mode);
+ result =
+ JSObject::SetElement(js_object, index, value, strict_mode);
} else {
Handle<String> key_string = Handle<String>::cast(key);
key_string->TryFlatten();
- result = SetProperty(js_object, key_string, value, attr, strict_mode);
+ result = JSReceiver::SetProperty(
+ js_object, key_string, value, attr, strict_mode);
}
if (result.is_null()) return Failure::Exception();
return *value;
@@ -4650,7 +4736,8 @@
if (value->IsNumber()) {
ASSERT(elements_kind == FAST_SMI_ONLY_ELEMENTS);
- TransitionElementsKind(object, FAST_DOUBLE_ELEMENTS);
+ JSObject::TransitionElementsKind(object, FAST_DOUBLE_ELEMENTS);
+ JSObject::TransitionElementsKind(boilerplate_object, FAST_DOUBLE_ELEMENTS);
ASSERT(object->GetElementsKind() == FAST_DOUBLE_ELEMENTS);
FixedDoubleArray* double_array =
FixedDoubleArray::cast(object->elements());
@@ -4659,7 +4746,8 @@
} else {
ASSERT(elements_kind == FAST_SMI_ONLY_ELEMENTS ||
elements_kind == FAST_DOUBLE_ELEMENTS);
- TransitionElementsKind(object, FAST_ELEMENTS);
+ JSObject::TransitionElementsKind(object, FAST_ELEMENTS);
+ JSObject::TransitionElementsKind(boilerplate_object, FAST_ELEMENTS);
FixedArray* object_array =
FixedArray::cast(object->elements());
object_array->set(store_index, *value);
@@ -4811,15 +4899,15 @@
object = JSObject::cast(proto);
}
FixedArray* elements = FixedArray::cast(object->elements());
- NumberDictionary* dictionary = NULL;
+ SeededNumberDictionary* dictionary = NULL;
if (elements->map() ==
isolate->heap()->non_strict_arguments_elements_map()) {
- dictionary = NumberDictionary::cast(elements->get(1));
+ dictionary = SeededNumberDictionary::cast(elements->get(1));
} else {
- dictionary = NumberDictionary::cast(elements);
+ dictionary = SeededNumberDictionary::cast(elements);
}
int entry = dictionary->FindEntry(index);
- ASSERT(entry != NumberDictionary::kNotFound);
+ ASSERT(entry != SeededNumberDictionary::kNotFound);
PropertyDetails details = dictionary->DetailsAt(entry);
return isolate->heap()->ToBoolean(!details.IsDontEnum());
}
@@ -5137,31 +5225,20 @@
RUNTIME_FUNCTION(MaybeObject*, Runtime_ToFastProperties) {
- HandleScope scope(isolate);
-
ASSERT(args.length() == 1);
- Handle<Object> object = args.at<Object>(0);
- if (object->IsJSObject()) {
- Handle<JSObject> js_object = Handle<JSObject>::cast(object);
- if (!js_object->HasFastProperties() && !js_object->IsGlobalObject()) {
- MaybeObject* ok = js_object->TransformToFastProperties(0);
- if (ok->IsRetryAfterGC()) return ok;
- }
- }
- return *object;
+ Object* object = args[0];
+ return (object->IsJSObject() && !object->IsGlobalObject())
+ ? JSObject::cast(object)->TransformToFastProperties(0)
+ : object;
}
RUNTIME_FUNCTION(MaybeObject*, Runtime_ToSlowProperties) {
- HandleScope scope(isolate);
-
ASSERT(args.length() == 1);
- Handle<Object> object = args.at<Object>(0);
- if (object->IsJSObject() && !object->IsJSGlobalProxy()) {
- Handle<JSObject> js_object = Handle<JSObject>::cast(object);
- NormalizeProperties(js_object, CLEAR_INOBJECT_PROPERTIES, 0);
- }
- return *object;
+ Object* obj = args[0];
+ return (obj->IsJSObject() && !obj->IsJSGlobalProxy())
+ ? JSObject::cast(obj)->NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0)
+ : obj;
}
@@ -5932,8 +6009,8 @@
//
// Allocate the resulting string.
//
- // NOTE: This assumes that the upper/lower case of an ascii
- // character is also ascii. This is currently the case, but it
+ // NOTE: This assumes that the upper/lower case of an ASCII
+ // character is also ASCII. This is currently the case, but it
// might break in the future if we implement more context and locale
// dependent upper/lower conversions.
Object* o;
@@ -6033,9 +6110,9 @@
// This function is only useful when it can be inlined and the
// boundaries are statically known.
// Requires: all bytes in the input word and the boundaries must be
-// ascii (less than 0x7F).
+// ASCII (less than 0x7F).
static inline uintptr_t AsciiRangeMask(uintptr_t w, char m, char n) {
- // Every byte in an ascii string is less than or equal to 0x7F.
+ // Every byte in an ASCII string is less than or equal to 0x7F.
ASSERT((w & (kOneInEveryByte * 0x7F)) == w);
// Use strict inequalities since in edge cases the function could be
// further simplified.
@@ -6163,10 +6240,10 @@
// Assume that the string is not empty; we need this assumption later
if (length == 0) return s;
- // Simpler handling of ascii strings.
+ // Simpler handling of ASCII strings.
//
- // NOTE: This assumes that the upper/lower case of an ascii
- // character is also ascii. This is currently the case, but it
+ // NOTE: This assumes that the upper/lower case of an ASCII
+ // character is also ASCII. This is currently the case, but it
// might break in the future if we implement more context and locale
// dependent upper/lower conversions.
if (s->IsSeqAsciiString()) {
@@ -6293,7 +6370,7 @@
int part_count = indices.length();
Handle<JSArray> result = isolate->factory()->NewJSArray(part_count);
- MaybeObject* maybe_result = result->EnsureCanContainNonSmiElements();
+ MaybeObject* maybe_result = result->EnsureCanContainHeapObjectElements();
if (maybe_result->IsFailure()) return maybe_result;
result->set_length(Smi::FromInt(part_count));
@@ -6329,7 +6406,7 @@
}
-// Copies ascii characters to the given fixed array looking up
+// Copies ASCII characters to the given fixed array looking up
// one-char strings in the cache. Gives up on the first char that is
// not in the cache and fills the remainder with smi zeros. Returns
// the length of the successfully copied prefix.
@@ -6669,7 +6746,7 @@
// This assumption is used by the slice encoding in one or two smis.
ASSERT(Smi::kMaxValue >= String::kMaxLength);
- MaybeObject* maybe_result = array->EnsureCanContainNonSmiElements();
+ MaybeObject* maybe_result = array->EnsureCanContainHeapObjectElements();
if (maybe_result->IsFailure()) return maybe_result;
int special_length = special->length();
@@ -7395,7 +7472,8 @@
return isolate->transcendental_cache()->Get(TranscendentalCache::LOG, x);
}
-
+// Slow version of Math.pow. We check for fast paths for special cases.
+// Used if SSE2/VFP3 is not available.
RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_pow) {
NoHandleAllocation ha;
ASSERT(args.length() == 2);
@@ -7411,22 +7489,36 @@
}
CONVERT_DOUBLE_ARG_CHECKED(y, 1);
- return isolate->heap()->AllocateHeapNumber(power_double_double(x, y));
+ int y_int = static_cast<int>(y);
+ double result;
+ if (y == y_int) {
+ result = power_double_int(x, y_int); // Returns 1 if exponent is 0.
+ } else if (y == 0.5) {
+ result = (isinf(x)) ? V8_INFINITY : sqrt(x + 0.0); // Convert -0 to +0.
+ } else if (y == -0.5) {
+ result = (isinf(x)) ? 0 : 1.0 / sqrt(x + 0.0); // Convert -0 to +0.
+ } else {
+ result = power_double_double(x, y);
+ }
+ if (isnan(result)) return isolate->heap()->nan_value();
+ return isolate->heap()->AllocateHeapNumber(result);
}
-// Fast version of Math.pow if we know that y is not an integer and
-// y is not -0.5 or 0.5. Used as slowcase from codegen.
+// Fast version of Math.pow if we know that y is not an integer and y is not
+// -0.5 or 0.5. Used as slow case from full codegen.
RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_pow_cfunction) {
NoHandleAllocation ha;
ASSERT(args.length() == 2);
+ isolate->counters()->math_pow()->Increment();
+
CONVERT_DOUBLE_ARG_CHECKED(x, 0);
CONVERT_DOUBLE_ARG_CHECKED(y, 1);
if (y == 0) {
return Smi::FromInt(1);
- } else if (isnan(y) || ((x == 1 || x == -1) && isinf(y))) {
- return isolate->heap()->nan_value();
} else {
- return isolate->heap()->AllocateHeapNumber(pow(x, y));
+ double result = power_double_double(x, y);
+ if (isnan(result)) return isolate->heap()->nan_value();
+ return isolate->heap()->AllocateHeapNumber(result);
}
}
@@ -7456,7 +7548,7 @@
// We compare with kSmiValueSize - 2 because (2^30 - 0.1) has exponent 29 and
// should be rounded to 2^30, which is not smi (for 31-bit smis, similar
- // agument holds for 32-bit smis).
+ // argument holds for 32-bit smis).
if (!sign && exponent < kSmiValueSize - 2) {
return Smi::FromInt(static_cast<int>(value + 0.5));
}
@@ -7991,7 +8083,7 @@
AssertNoAllocation no_gc;
FixedArray* array = reinterpret_cast<FixedArray*>(obj);
- array->set_map(isolate->heap()->fixed_array_map());
+ array->set_map_no_write_barrier(isolate->heap()->fixed_array_map());
array->set_length(length);
WriteBarrierMode mode = array->GetWriteBarrierMode(no_gc);
@@ -8034,13 +8126,15 @@
List<JSFunction*> functions(2);
frame->GetFunctions(&functions);
if (functions.length() > 1) {
- int inlined_frame_index = functions.length() - 1;
- JSFunction* inlined_function = functions[inlined_frame_index];
- int args_count = inlined_function->shared()->formal_parameter_count();
- ScopedVector<SlotRef> args_slots(args_count);
- SlotRef::ComputeSlotMappingForArguments(frame,
- inlined_frame_index,
- &args_slots);
+ int inlined_jsframe_index = functions.length() - 1;
+ JSFunction* inlined_function = functions[inlined_jsframe_index];
+ Vector<SlotRef> args_slots =
+ SlotRef::ComputeSlotMappingForArguments(
+ frame,
+ inlined_jsframe_index,
+ inlined_function->shared()->formal_parameter_count());
+
+ int args_count = args_slots.length();
*total_argc = prefix_argc + args_count;
SmartArrayPointer<Handle<Object> > param_data(
@@ -8049,6 +8143,9 @@
Handle<Object> val = args_slots[i].GetValue();
param_data[prefix_argc + i] = val;
}
+
+ args_slots.Dispose();
+
return param_data;
} else {
it.AdvanceToArgumentsFrame();
@@ -8111,7 +8208,8 @@
for (int j = 0; j < argc; j++, i++) {
new_bindings->set(i, *arguments[j + 1]);
}
- new_bindings->set_map(isolate->heap()->fixed_cow_array_map());
+ new_bindings->set_map_no_write_barrier(
+ isolate->heap()->fixed_cow_array_map());
bound_function->set_function_bindings(*new_bindings);
// Update length.
@@ -8393,14 +8491,14 @@
static_cast<Deoptimizer::BailoutType>(args.smi_at(0));
Deoptimizer* deoptimizer = Deoptimizer::Grab(isolate);
ASSERT(isolate->heap()->IsAllocationAllowed());
- int frames = deoptimizer->output_count();
+ int jsframes = deoptimizer->jsframe_count();
deoptimizer->MaterializeHeapNumbers();
delete deoptimizer;
JavaScriptFrameIterator it(isolate);
JavaScriptFrame* frame = NULL;
- for (int i = 0; i < frames - 1; i++) it.Advance();
+ for (int i = 0; i < jsframes - 1; i++) it.Advance();
frame = it.frame();
RUNTIME_ASSERT(frame->function()->IsJSFunction());
@@ -8684,14 +8782,11 @@
RUNTIME_FUNCTION(MaybeObject*, Runtime_Apply) {
HandleScope scope(isolate);
ASSERT(args.length() == 5);
- CONVERT_CHECKED(JSReceiver, fun, args[0]);
- Object* receiver = args[1];
- CONVERT_CHECKED(JSObject, arguments, args[2]);
- CONVERT_CHECKED(Smi, shift, args[3]);
- CONVERT_CHECKED(Smi, arity, args[4]);
-
- int offset = shift->value();
- int argc = arity->value();
+ CONVERT_ARG_CHECKED(JSReceiver, fun, 0);
+ Handle<Object> receiver = args.at<Object>(1);
+ CONVERT_ARG_CHECKED(JSObject, arguments, 2);
+ CONVERT_SMI_ARG_CHECKED(offset, 3);
+ CONVERT_SMI_ARG_CHECKED(argc, 4);
ASSERT(offset >= 0);
ASSERT(argc >= 0);
@@ -8707,17 +8802,12 @@
}
for (int i = 0; i < argc; ++i) {
- MaybeObject* maybe = arguments->GetElement(offset + i);
- Object* object;
- if (!maybe->To<Object>(&object)) return maybe;
- argv[i] = Handle<Object>(object);
+ argv[i] = Object::GetElement(arguments, offset + i);
}
bool threw;
- Handle<JSReceiver> hfun(fun);
- Handle<Object> hreceiver(receiver);
Handle<Object> result =
- Execution::Call(hfun, hreceiver, argc, argv, &threw, true);
+ Execution::Call(fun, receiver, argc, argv, &threw, true);
if (threw) return Failure::Exception();
return *result;
@@ -9118,7 +9208,7 @@
(object->GetLocalPropertyAttribute(*name) == ABSENT)) {
RETURN_IF_EMPTY_HANDLE(
isolate,
- SetProperty(object, name, value, NONE, strict_mode));
+ JSReceiver::SetProperty(object, name, value, NONE, strict_mode));
} else if (strict_mode == kStrictMode && (attributes & READ_ONLY) != 0) {
// Setting read only property in strict mode.
Handle<Object> error =
@@ -9206,22 +9296,6 @@
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_TraceElementsKindTransition) {
- ASSERT(args.length() == 5);
- CONVERT_ARG_CHECKED(JSObject, obj, 0);
- CONVERT_SMI_ARG_CHECKED(from_kind, 1);
- CONVERT_ARG_CHECKED(FixedArrayBase, from_elements, 2);
- CONVERT_SMI_ARG_CHECKED(to_kind, 3);
- CONVERT_ARG_CHECKED(FixedArrayBase, to_elements, 4);
- NoHandleAllocation ha;
- PrintF("*");
- obj->PrintElementsTransition(stdout,
- static_cast<ElementsKind>(from_kind), *from_elements,
- static_cast<ElementsKind>(to_kind), *to_elements);
- return isolate->heap()->undefined_value();
-}
-
-
RUNTIME_FUNCTION(MaybeObject*, Runtime_TraceEnter) {
ASSERT(args.length() == 0);
NoHandleAllocation ha;
@@ -9299,7 +9373,7 @@
CONVERT_ARG_CHECKED(JSArray, output, 1);
MaybeObject* maybe_result_array =
- output->EnsureCanContainNonSmiElements();
+ output->EnsureCanContainHeapObjectElements();
if (maybe_result_array->IsFailure()) return maybe_result_array;
RUNTIME_ASSERT(output->HasFastElements());
@@ -9369,7 +9443,7 @@
CONVERT_ARG_CHECKED(String, source, 0);
source = Handle<String>(source->TryFlattenGetString());
- // Optimized fast case where we only have ascii characters.
+ // Optimized fast case where we only have ASCII characters.
Handle<Object> result;
if (source->IsSeqAsciiString()) {
result = JsonParser<true>::Parse(source);
@@ -9387,20 +9461,18 @@
bool CodeGenerationFromStringsAllowed(Isolate* isolate,
Handle<Context> context) {
- if (context->allow_code_gen_from_strings()->IsFalse()) {
- // Check with callback if set.
- AllowCodeGenerationFromStringsCallback callback =
- isolate->allow_code_gen_callback();
- if (callback == NULL) {
- // No callback set and code generation disallowed.
- return false;
- } else {
- // Callback set. Let it decide if code generation is allowed.
- VMState state(isolate, EXTERNAL);
- return callback(v8::Utils::ToLocal(context));
- }
+ ASSERT(context->allow_code_gen_from_strings()->IsFalse());
+ // Check with callback if set.
+ AllowCodeGenerationFromStringsCallback callback =
+ isolate->allow_code_gen_callback();
+ if (callback == NULL) {
+ // No callback set and code generation disallowed.
+ return false;
+ } else {
+ // Callback set. Let it decide if code generation is allowed.
+ VMState state(isolate, EXTERNAL);
+ return callback(v8::Utils::ToLocal(context));
}
- return true;
}
@@ -9414,7 +9486,8 @@
// Check if global context allows code generation from
// strings. Throw an exception if it doesn't.
- if (!CodeGenerationFromStringsAllowed(isolate, context)) {
+ if (context->allow_code_gen_from_strings()->IsFalse() &&
+ !CodeGenerationFromStringsAllowed(isolate, context)) {
return isolate->Throw(*isolate->factory()->NewError(
"code_gen_from_strings", HandleVector<Object>(NULL, 0)));
}
@@ -9441,7 +9514,8 @@
// Check if global context allows code generation from
// strings. Throw an exception if it doesn't.
- if (!CodeGenerationFromStringsAllowed(isolate, global_context)) {
+ if (global_context->allow_code_gen_from_strings()->IsFalse() &&
+ !CodeGenerationFromStringsAllowed(isolate, global_context)) {
isolate->Throw(*isolate->factory()->NewError(
"code_gen_from_strings", HandleVector<Object>(NULL, 0)));
return MakePair(Failure::Exception(), NULL);
@@ -9597,8 +9671,9 @@
// Fall-through to dictionary mode.
}
ASSERT(!fast_elements_);
- Handle<NumberDictionary> dict(NumberDictionary::cast(*storage_));
- Handle<NumberDictionary> result =
+ Handle<SeededNumberDictionary> dict(
+ SeededNumberDictionary::cast(*storage_));
+ Handle<SeededNumberDictionary> result =
isolate_->factory()->DictionaryAtNumberPut(dict, index, elm);
if (!result.is_identical_to(dict)) {
// Dictionary needed to grow.
@@ -9638,14 +9713,15 @@
void SetDictionaryMode(uint32_t index) {
ASSERT(fast_elements_);
Handle<FixedArray> current_storage(*storage_);
- Handle<NumberDictionary> slow_storage(
- isolate_->factory()->NewNumberDictionary(current_storage->length()));
+ Handle<SeededNumberDictionary> slow_storage(
+ isolate_->factory()->NewSeededNumberDictionary(
+ current_storage->length()));
uint32_t current_length = static_cast<uint32_t>(current_storage->length());
for (uint32_t i = 0; i < current_length; i++) {
HandleScope loop_scope;
Handle<Object> element(current_storage->get(i));
if (!element->IsTheHole()) {
- Handle<NumberDictionary> new_storage =
+ Handle<SeededNumberDictionary> new_storage =
isolate_->factory()->DictionaryAtNumberPut(slow_storage, i, element);
if (!new_storage.is_identical_to(slow_storage)) {
slow_storage = loop_scope.CloseAndEscape(new_storage);
@@ -9697,8 +9773,8 @@
UNREACHABLE();
break;
case DICTIONARY_ELEMENTS: {
- Handle<NumberDictionary> dictionary(
- NumberDictionary::cast(array->elements()));
+ Handle<SeededNumberDictionary> dictionary(
+ SeededNumberDictionary::cast(array->elements()));
int capacity = dictionary->Capacity();
for (int i = 0; i < capacity; i++) {
Handle<Object> key(dictionary->KeyAt(i));
@@ -9801,7 +9877,8 @@
break;
}
case DICTIONARY_ELEMENTS: {
- Handle<NumberDictionary> dict(NumberDictionary::cast(object->elements()));
+ Handle<SeededNumberDictionary> dict(
+ SeededNumberDictionary::cast(object->elements()));
uint32_t capacity = dict->Capacity();
for (uint32_t j = 0; j < capacity; j++) {
HandleScope loop_scope;
@@ -9936,7 +10013,7 @@
break;
}
case DICTIONARY_ELEMENTS: {
- Handle<NumberDictionary> dict(receiver->element_dictionary());
+ Handle<SeededNumberDictionary> dict(receiver->element_dictionary());
List<uint32_t> indices(dict->Capacity() / 2);
// Collect all indices in the object and the prototypes less
// than length. This might introduce duplicates in the indices list.
@@ -10050,7 +10127,7 @@
// FAST_ELEMENTS.
if (array->HasFastDoubleElements()) {
array = Handle<JSArray>::cast(
- TransitionElementsKind(array, FAST_ELEMENTS));
+ JSObject::TransitionElementsKind(array, FAST_ELEMENTS));
}
length_estimate =
static_cast<uint32_t>(array->length()->Number());
@@ -10092,7 +10169,7 @@
uint32_t at_least_space_for = estimate_nof_elements +
(estimate_nof_elements >> 2);
storage = Handle<FixedArray>::cast(
- isolate->factory()->NewNumberDictionary(at_least_space_for));
+ isolate->factory()->NewSeededNumberDictionary(at_least_space_for));
}
ArrayConcatVisitor visitor(isolate, storage, fast_case);
@@ -10180,7 +10257,8 @@
CONVERT_CHECKED(JSObject, object, args[0]);
HeapObject* elements = object->elements();
if (elements->IsDictionary()) {
- return Smi::FromInt(NumberDictionary::cast(elements)->NumberOfElements());
+ int result = SeededNumberDictionary::cast(elements)->NumberOfElements();
+ return Smi::FromInt(result);
} else if (object->IsJSArray()) {
return JSArray::cast(object)->length();
} else {
@@ -10210,10 +10288,10 @@
Handle<Object> tmp2 = Object::GetElement(jsobject, index2);
RETURN_IF_EMPTY_HANDLE(isolate, tmp2);
- RETURN_IF_EMPTY_HANDLE(isolate,
- SetElement(jsobject, index1, tmp2, kStrictMode));
- RETURN_IF_EMPTY_HANDLE(isolate,
- SetElement(jsobject, index2, tmp1, kStrictMode));
+ RETURN_IF_EMPTY_HANDLE(
+ isolate, JSObject::SetElement(jsobject, index1, tmp2, kStrictMode));
+ RETURN_IF_EMPTY_HANDLE(
+ isolate, JSObject::SetElement(jsobject, index2, tmp1, kStrictMode));
return isolate->heap()->undefined_value();
}
@@ -10267,7 +10345,7 @@
// DefineAccessor takes an optional final argument which is the
-// property attributes (eg, DONT_ENUM, DONT_DELETE). IMPORTANT: due
+// property attributes (e.g. DONT_ENUM, DONT_DELETE). IMPORTANT: due
// to the way accessors are implemented, it is set for both the getter
// and setter on the first call to DefineAccessor and ignored on
// subsequent calls.
@@ -10489,15 +10567,15 @@
// If the callback object is a fixed array then it contains JavaScript
// getter and/or setter.
bool hasJavaScriptAccessors = result_type == CALLBACKS &&
- result_callback_obj->IsFixedArray();
+ result_callback_obj->IsAccessorPair();
Handle<FixedArray> details =
isolate->factory()->NewFixedArray(hasJavaScriptAccessors ? 5 : 2);
details->set(0, *value);
details->set(1, property_details);
if (hasJavaScriptAccessors) {
details->set(2, isolate->heap()->ToBoolean(caught_exception));
- details->set(3, FixedArray::cast(*result_callback_obj)->get(0));
- details->set(4, FixedArray::cast(*result_callback_obj)->get(1));
+ details->set(3, AccessorPair::cast(*result_callback_obj)->getter());
+ details->set(4, AccessorPair::cast(*result_callback_obj)->setter());
}
return *isolate->factory()->NewJSArrayWithElements(details);
@@ -10630,13 +10708,13 @@
class FrameInspector {
public:
FrameInspector(JavaScriptFrame* frame,
- int inlined_frame_index,
+ int inlined_jsframe_index,
Isolate* isolate)
: frame_(frame), deoptimized_frame_(NULL), isolate_(isolate) {
// Calculate the deoptimized frame.
if (frame->is_optimized()) {
deoptimized_frame_ = Deoptimizer::DebuggerInspectableFrame(
- frame, inlined_frame_index, isolate);
+ frame, inlined_jsframe_index, isolate);
}
has_adapted_arguments_ = frame_->has_adapted_arguments();
is_optimized_ = frame_->is_optimized();
@@ -10752,8 +10830,6 @@
return heap->undefined_value();
}
- int inlined_frame_index = 0; // Inlined frame index in optimized frame.
-
int count = 0;
JavaScriptFrameIterator it(isolate, id);
for (; !it.done(); it.Advance()) {
@@ -10762,11 +10838,14 @@
}
if (it.done()) return heap->undefined_value();
- if (it.frame()->is_optimized()) {
- inlined_frame_index =
+ bool is_optimized = it.frame()->is_optimized();
+
+ int inlined_jsframe_index = 0; // Inlined frame index in optimized frame.
+ if (is_optimized) {
+ inlined_jsframe_index =
it.frame()->GetInlineCount() - (index - count) - 1;
}
- FrameInspector frame_inspector(it.frame(), inlined_frame_index, isolate);
+ FrameInspector frame_inspector(it.frame(), inlined_jsframe_index, isolate);
// Traverse the saved contexts chain to find the active context for the
// selected frame.
@@ -10780,12 +10859,11 @@
it.frame()->LookupCode()->SourcePosition(it.frame()->pc());
// Check for constructor frame. Inlined frames cannot be construct calls.
- bool inlined_frame =
- it.frame()->is_optimized() && inlined_frame_index != 0;
+ bool inlined_frame = is_optimized && inlined_jsframe_index != 0;
bool constructor = !inlined_frame && it.frame()->IsConstructor();
// Get scope info and read from it for local variable information.
- Handle<JSFunction> function(JSFunction::cast(it.frame()->function()));
+ Handle<JSFunction> function(JSFunction::cast(frame_inspector.GetFunction()));
Handle<SharedFunctionInfo> shared(function->shared());
Handle<ScopeInfo> scope_info(shared->scope_info());
ASSERT(*scope_info != ScopeInfo::Empty());
@@ -10822,7 +10900,7 @@
// Check whether this frame is positioned at return. If not top
// frame or if the frame is optimized it cannot be at a return.
bool at_return = false;
- if (!it.frame()->is_optimized() && index == 0) {
+ if (!is_optimized && index == 0) {
at_return = isolate->debug()->IsBreakAtReturn(it.frame());
}
@@ -10862,7 +10940,7 @@
// the provided parameters whereas the function frame always have the number
// of arguments matching the functions parameters. The rest of the
// information (except for what is collected above) is the same.
- if (it.frame()->has_adapted_arguments()) {
+ if ((inlined_jsframe_index == 0) && it.frame()->has_adapted_arguments()) {
it.AdvanceToArgumentsFrame();
frame_inspector.SetArgumentsFrame(it.frame());
}
@@ -10873,11 +10951,6 @@
if (argument_count < frame_inspector.GetParametersCount()) {
argument_count = frame_inspector.GetParametersCount();
}
-#ifdef DEBUG
- if (it.frame()->is_optimized()) {
- ASSERT_EQ(argument_count, frame_inspector.GetParametersCount());
- }
-#endif
// Calculate the size of the result.
int details_size = kFrameDetailsFirstDynamicIndex +
@@ -10919,9 +10992,9 @@
if (*save->context() == *isolate->debug()->debug_context()) {
flags |= 1 << 0;
}
- if (it.frame()->is_optimized()) {
+ if (is_optimized) {
flags |= 1 << 1;
- flags |= inlined_frame_index << 2;
+ flags |= inlined_jsframe_index << 2;
}
details->set(kFrameDetailsFlagsIndex, Smi::FromInt(flags));
@@ -10938,7 +11011,7 @@
}
// Parameter value.
- if (i < it.frame()->ComputeParametersCount()) {
+ if (i < frame_inspector.GetParametersCount()) {
// Get the value from the stack.
details->set(details_index++, frame_inspector.GetParameter(i));
} else {
@@ -11011,14 +11084,13 @@
// Create a plain JSObject which materializes the local scope for the specified
// frame.
-static Handle<JSObject> MaterializeLocalScope(
+static Handle<JSObject> MaterializeLocalScopeWithFrameInspector(
Isolate* isolate,
JavaScriptFrame* frame,
- int inlined_frame_index) {
- Handle<JSFunction> function(JSFunction::cast(frame->function()));
+ FrameInspector* frame_inspector) {
+ Handle<JSFunction> function(JSFunction::cast(frame_inspector->GetFunction()));
Handle<SharedFunctionInfo> shared(function->shared());
Handle<ScopeInfo> scope_info(shared->scope_info());
- FrameInspector frame_inspector(frame, inlined_frame_index, isolate);
// Allocate and initialize a JSObject with all the arguments, stack locals
// heap locals and extension properties of the debugged function.
@@ -11027,11 +11099,15 @@
// First fill all parameters.
for (int i = 0; i < scope_info->ParameterCount(); ++i) {
+ Handle<Object> value(
+ i < frame_inspector->GetParametersCount() ?
+ frame_inspector->GetParameter(i) : isolate->heap()->undefined_value());
+
RETURN_IF_EMPTY_HANDLE_VALUE(
isolate,
SetProperty(local_scope,
Handle<String>(scope_info->ParameterName(i)),
- Handle<Object>(frame_inspector.GetParameter(i)),
+ value,
NONE,
kNonStrictMode),
Handle<JSObject>());
@@ -11043,7 +11119,7 @@
isolate,
SetProperty(local_scope,
Handle<String>(scope_info->StackLocalName(i)),
- Handle<Object>(frame_inspector.GetExpression(i)),
+ Handle<Object>(frame_inspector->GetExpression(i)),
NONE,
kNonStrictMode),
Handle<JSObject>());
@@ -11090,6 +11166,17 @@
}
+static Handle<JSObject> MaterializeLocalScope(
+ Isolate* isolate,
+ JavaScriptFrame* frame,
+ int inlined_jsframe_index) {
+ FrameInspector frame_inspector(frame, inlined_jsframe_index, isolate);
+ return MaterializeLocalScopeWithFrameInspector(isolate,
+ frame,
+ &frame_inspector);
+}
+
+
// Create a plain JSObject which materializes the closure content for the
// context.
static Handle<JSObject> MaterializeClosure(Isolate* isolate,
@@ -11099,7 +11186,7 @@
Handle<SharedFunctionInfo> shared(context->closure()->shared());
Handle<ScopeInfo> scope_info(shared->scope_info());
- // Allocate and initialize a JSObject with all the content of theis function
+ // Allocate and initialize a JSObject with all the content of this function
// closure.
Handle<JSObject> closure_scope =
isolate->factory()->NewJSObject(isolate->object_function());
@@ -11195,10 +11282,10 @@
ScopeIterator(Isolate* isolate,
JavaScriptFrame* frame,
- int inlined_frame_index)
+ int inlined_jsframe_index)
: isolate_(isolate),
frame_(frame),
- inlined_frame_index_(inlined_frame_index),
+ inlined_jsframe_index_(inlined_jsframe_index),
function_(JSFunction::cast(frame->function())),
context_(Context::cast(frame->context())),
nested_scope_chain_(4) {
@@ -11355,7 +11442,7 @@
case ScopeIterator::ScopeTypeLocal:
// Materialize the content of the local scope into a JSObject.
ASSERT(nested_scope_chain_.length() == 1);
- return MaterializeLocalScope(isolate_, frame_, inlined_frame_index_);
+ return MaterializeLocalScope(isolate_, frame_, inlined_jsframe_index_);
case ScopeIterator::ScopeTypeWith:
// Return the with object.
return Handle<JSObject>(JSObject::cast(CurrentContext()->extension()));
@@ -11451,7 +11538,7 @@
private:
Isolate* isolate_;
JavaScriptFrame* frame_;
- int inlined_frame_index_;
+ int inlined_jsframe_index_;
Handle<JSFunction> function_;
Handle<Context> context_;
List<Handle<ScopeInfo> > nested_scope_chain_;
@@ -11513,7 +11600,7 @@
if (!maybe_check->ToObject(&check)) return maybe_check;
}
CONVERT_CHECKED(Smi, wrapped_id, args[1]);
- CONVERT_NUMBER_CHECKED(int, inlined_frame_index, Int32, args[2]);
+ CONVERT_NUMBER_CHECKED(int, inlined_jsframe_index, Int32, args[2]);
CONVERT_NUMBER_CHECKED(int, index, Int32, args[3]);
// Get the frame where the debugging is performed.
@@ -11523,7 +11610,7 @@
// Find the requested scope.
int n = 0;
- ScopeIterator it(isolate, frame, inlined_frame_index);
+ ScopeIterator it(isolate, frame, inlined_jsframe_index);
for (; !it.Done() && n < index; it.Next()) {
n++;
}
@@ -11921,12 +12008,12 @@
Handle<JSFunction> function,
Handle<Context> base,
JavaScriptFrame* frame,
- int inlined_frame_index) {
+ int inlined_jsframe_index) {
HandleScope scope(isolate);
List<Handle<ScopeInfo> > scope_chain;
List<Handle<Context> > context_chain;
- ScopeIterator it(isolate, frame, inlined_frame_index);
+ ScopeIterator it(isolate, frame, inlined_jsframe_index);
for (; it.Type() != ScopeIterator::ScopeTypeGlobal &&
it.Type() != ScopeIterator::ScopeTypeLocal ; it.Next()) {
ASSERT(!it.Done());
@@ -11983,8 +12070,7 @@
// Runtime_DebugEvaluate.
static Handle<Object> GetArgumentsObject(Isolate* isolate,
JavaScriptFrame* frame,
- int inlined_frame_index,
- Handle<JSFunction> function,
+ FrameInspector* frame_inspector,
Handle<ScopeInfo> scope_info,
Handle<Context> function_context) {
// Try to find the value of 'arguments' to pass as parameter. If it is not
@@ -12008,9 +12094,8 @@
}
}
- FrameInspector frame_inspector(frame, inlined_frame_index, isolate);
-
- int length = frame_inspector.GetParametersCount();
+ Handle<JSFunction> function(JSFunction::cast(frame_inspector->GetFunction()));
+ int length = frame_inspector->GetParametersCount();
Handle<JSObject> arguments =
isolate->factory()->NewArgumentsObject(function, length);
Handle<FixedArray> array = isolate->factory()->NewFixedArray(length);
@@ -12018,7 +12103,7 @@
AssertNoAllocation no_gc;
WriteBarrierMode mode = array->GetWriteBarrierMode(no_gc);
for (int i = 0; i < length; i++) {
- array->set(i, frame_inspector.GetParameter(i), mode);
+ array->set(i, frame_inspector->GetParameter(i), mode);
}
arguments->set_elements(*array);
return arguments;
@@ -12054,7 +12139,7 @@
}
}
CONVERT_CHECKED(Smi, wrapped_id, args[1]);
- CONVERT_NUMBER_CHECKED(int, inlined_frame_index, Int32, args[2]);
+ CONVERT_NUMBER_CHECKED(int, inlined_jsframe_index, Int32, args[2]);
CONVERT_ARG_CHECKED(String, source, 3);
CONVERT_BOOLEAN_CHECKED(disable_break, args[4]);
Handle<Object> additional_context(args[5]);
@@ -12066,7 +12151,8 @@
StackFrame::Id id = UnwrapFrameId(wrapped_id);
JavaScriptFrameIterator it(isolate, id);
JavaScriptFrame* frame = it.frame();
- Handle<JSFunction> function(JSFunction::cast(frame->function()));
+ FrameInspector frame_inspector(frame, inlined_jsframe_index, isolate);
+ Handle<JSFunction> function(JSFunction::cast(frame_inspector.GetFunction()));
Handle<ScopeInfo> scope_info(function->shared()->scope_info());
// Traverse the saved contexts chain to find the active context for the
@@ -12093,8 +12179,8 @@
#endif
// Materialize the content of the local scope into a JSObject.
- Handle<JSObject> local_scope = MaterializeLocalScope(
- isolate, frame, inlined_frame_index);
+ Handle<JSObject> local_scope = MaterializeLocalScopeWithFrameInspector(
+ isolate, frame, &frame_inspector);
RETURN_IF_EMPTY_HANDLE(isolate, local_scope);
// Allocate a new context for the debug evaluation and set the extension
@@ -12114,7 +12200,7 @@
go_between,
context,
frame,
- inlined_frame_index);
+ inlined_jsframe_index);
if (additional_context->IsJSObject()) {
Handle<JSObject> extension = Handle<JSObject>::cast(additional_context);
@@ -12154,8 +12240,7 @@
Handle<Object> arguments = GetArgumentsObject(isolate,
frame,
- inlined_frame_index,
- function,
+ &frame_inspector,
scope_info,
function_context);
@@ -12267,7 +12352,7 @@
// because using
// instances->set(i, *GetScriptWrapper(script))
// is unsafe as GetScriptWrapper might call GC and the C++ compiler might
- // already have deferenced the instances handle.
+ // already have dereferenced the instances handle.
Handle<JSValue> wrapper = GetScriptWrapper(script);
instances->set(i, *wrapper);
}
diff --git a/src/runtime.h b/src/runtime.h
index b13662d..c0c7b13 100644
--- a/src/runtime.h
+++ b/src/runtime.h
@@ -197,6 +197,7 @@
F(StringLocaleCompare, 2, 1) \
F(SubString, 3, 1) \
F(StringReplaceRegExpWithString, 4, 1) \
+ F(StringReplaceOneCharWithString, 3, 1) \
F(StringMatch, 3, 1) \
F(StringTrim, 3, 1) \
F(StringToArray, 2, 1) \
@@ -341,7 +342,6 @@
/* Debugging */ \
F(DebugPrint, 1, 1) \
F(DebugTrace, 0, 1) \
- F(TraceElementsKindTransition, 5, 1) \
F(TraceEnter, 0, 1) \
F(TraceExit, 1, 1) \
F(Abort, 2, 1) \
@@ -630,6 +630,13 @@
// Get the intrinsic function with the given FunctionId.
static const Function* FunctionForId(FunctionId id);
+ static Handle<String> StringReplaceOneCharWithString(Isolate* isolate,
+ Handle<String> subject,
+ Handle<String> search,
+ Handle<String> replace,
+ bool* found,
+ int recursion_limit);
+
// General-purpose helper functions for runtime system.
static int StringMatch(Isolate* isolate,
Handle<String> sub,
@@ -679,6 +686,12 @@
// Helper functions used stubs.
static void PerformGC(Object* result);
+
+ // Used in runtime.cc and hydrogen's VisitArrayLiteral.
+ static Handle<Object> CreateArrayLiteralBoilerplate(
+ Isolate* isolate,
+ Handle<FixedArray> literals,
+ Handle<FixedArray> elements);
};
diff --git a/src/scanner.cc b/src/scanner.cc
old mode 100644
new mode 100755
diff --git a/src/scopes.cc b/src/scopes.cc
index ad8b6a5..a7ff287 100644
--- a/src/scopes.cc
+++ b/src/scopes.cc
@@ -31,6 +31,7 @@
#include "bootstrapper.h"
#include "compiler.h"
+#include "messages.h"
#include "scopeinfo.h"
#include "allocation-inl.h"
@@ -282,8 +283,25 @@
}
#endif
+ if (FLAG_harmony_scoping) {
+ VariableProxy* proxy = scope->CheckAssignmentToConst();
+ if (proxy != NULL) {
+ // Found an assignment to const. Throw a syntax error.
+ MessageLocation location(info->script(),
+ proxy->position(),
+ proxy->position());
+ Isolate* isolate = info->isolate();
+ Factory* factory = isolate->factory();
+ Handle<JSArray> array = factory->NewJSArray(0);
+ Handle<Object> result =
+ factory->NewSyntaxError("harmony_const_assign", array);
+ isolate->Throw(*result, &location);
+ return false;
+ }
+ }
+
info->SetScope(scope);
- return true; // Can not fail.
+ return true;
}
@@ -552,6 +570,29 @@
}
+VariableProxy* Scope::CheckAssignmentToConst() {
+ // Check this scope.
+ if (is_extended_mode()) {
+ for (int i = 0; i < unresolved_.length(); i++) {
+ ASSERT(unresolved_[i]->var() != NULL);
+ if (unresolved_[i]->var()->is_const_mode() &&
+ unresolved_[i]->IsLValue()) {
+ return unresolved_[i];
+ }
+ }
+ }
+
+ // Check inner scopes.
+ for (int i = 0; i < inner_scopes_.length(); i++) {
+ VariableProxy* proxy = inner_scopes_[i]->CheckAssignmentToConst();
+ if (proxy != NULL) return proxy;
+ }
+
+ // No assignments to const found.
+ return NULL;
+}
+
+
void Scope::CollectStackAndContextLocals(ZoneList<Variable*>* stack_locals,
ZoneList<Variable*>* context_locals) {
ASSERT(stack_locals != NULL);
diff --git a/src/scopes.h b/src/scopes.h
index 523a251..af0449e 100644
--- a/src/scopes.h
+++ b/src/scopes.h
@@ -187,6 +187,11 @@
// scope over a let binding of the same name.
Declaration* CheckConflictingVarDeclarations();
+ // For harmony block scoping mode: Check if the scope has variable proxies
+ // that are used as lvalues and point to const variables. Assumes that scopes
+ // have been analyzed and variables been resolved.
+ VariableProxy* CheckAssignmentToConst();
+
// ---------------------------------------------------------------------------
// Scope-specific info.
diff --git a/src/serialize.cc b/src/serialize.cc
index 5830c64..81a94dd 100644
--- a/src/serialize.cc
+++ b/src/serialize.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -1081,36 +1081,6 @@
PutSection(static_cast<int>(integer & 0x7f), "IntLastPart");
}
-#ifdef DEBUG
-
-void Deserializer::Synchronize(const char* tag) {
- int data = source_->Get();
- // If this assert fails then that indicates that you have a mismatch between
- // the number of GC roots when serializing and deserializing.
- ASSERT_EQ(kSynchronize, data);
- do {
- int character = source_->Get();
- if (character == 0) break;
- if (FLAG_debug_serialization) {
- PrintF("%c", character);
- }
- } while (true);
- if (FLAG_debug_serialization) {
- PrintF("\n");
- }
-}
-
-
-void Serializer::Synchronize(const char* tag) {
- sink_->Put(kSynchronize, tag);
- int character;
- do {
- character = *tag++;
- sink_->PutSection(character, "TagCharacter");
- } while (character != 0);
-}
-
-#endif
Serializer::Serializer(SnapshotByteSink* sink)
: sink_(sink),
@@ -1365,6 +1335,13 @@
CHECK(o->IsHeapObject());
HeapObject* heap_object = HeapObject::cast(o);
+ if (heap_object->IsMap()) {
+ // The code-caches link to context-specific code objects, which
+ // the startup and context serializes cannot currently handle.
+ ASSERT(Map::cast(heap_object)->code_cache() ==
+ heap_object->GetHeap()->raw_unchecked_empty_fixed_array());
+ }
+
int root_index;
if ((root_index = RootIndex(heap_object)) != kInvalidRootIndex) {
PutRoot(root_index, heap_object, how_to_code, where_to_point);
diff --git a/src/serialize.h b/src/serialize.h
index bd9c0d8..02bf58a 100644
--- a/src/serialize.h
+++ b/src/serialize.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -228,7 +228,7 @@
kFromStart = 0x20, // Object is described relative to start.
// 0x21-0x28 One per space.
// 0x29-0x2f Free.
- // 0x30-0x3f Used by misc tags below.
+ // 0x30-0x3f Used by misc. tags below.
kPointedToMask = 0x3f
};
@@ -341,10 +341,6 @@
// Deserialize a single object and the objects reachable from it.
void DeserializePartial(Object** root);
-#ifdef DEBUG
- virtual void Synchronize(const char* tag);
-#endif
-
private:
virtual void VisitPointers(Object** start, Object** end);
@@ -359,8 +355,8 @@
// Fills in some heap data in an area from start to end (non-inclusive). The
// space id is used for the write barrier. The object_address is the address
// of the object we are writing into, or NULL if we are not writing into an
- // object, ie if we are writing a series of tagged values that are not on the
- // heap.
+ // object, i.e. if we are writing a series of tagged values that are not on
+ // the heap.
void ReadChunk(
Object** start, Object** end, int space, Address object_address);
HeapObject* GetAddressFromStart(int space);
@@ -485,9 +481,6 @@
SerializationAddressMapper* address_mapper() { return &address_mapper_; }
void PutRoot(
int index, HeapObject* object, HowToCode how, WhereToPoint where);
-#ifdef DEBUG
- virtual void Synchronize(const char* tag);
-#endif
protected:
static const int kInvalidRootIndex = -1;
@@ -584,6 +577,7 @@
friend class ObjectSerializer;
friend class Deserializer;
+ private:
DISALLOW_COPY_AND_ASSIGN(Serializer);
};
@@ -635,7 +629,7 @@
// Serialize the current state of the heap. The order is:
// 1) Strong references.
// 2) Partial snapshot cache.
- // 3) Weak references (eg the symbol table).
+ // 3) Weak references (e.g. the symbol table).
virtual void SerializeStrongReferences();
virtual void SerializeObject(Object* o,
HowToCode how_to_code,
diff --git a/src/spaces-inl.h b/src/spaces-inl.h
index c662980..3709009 100644
--- a/src/spaces-inl.h
+++ b/src/spaces-inl.h
@@ -246,7 +246,7 @@
// Try linear allocation in the page of alloc_info's allocation top. Does
-// not contain slow case logic (eg, move to the next page or try free list
+// not contain slow case logic (e.g. move to the next page or try free list
// allocation) so it can be used by all the allocation functions and for all
// the paged spaces.
HeapObject* PagedSpace::AllocateLinearly(int size_in_bytes) {
@@ -330,7 +330,7 @@
string->set_length(length);
if (Marking::IsBlack(Marking::MarkBitFrom(string))) {
int delta = static_cast<int>(old_top - allocation_info_.top);
- MemoryChunk::IncrementLiveBytes(string->address(), -delta);
+ MemoryChunk::IncrementLiveBytesFromMutator(string->address(), -delta);
}
}
diff --git a/src/spaces.cc b/src/spaces.cc
index 1ee3359..de738fb 100644
--- a/src/spaces.cc
+++ b/src/spaces.cc
@@ -132,7 +132,7 @@
}
-bool CodeRange::Setup(const size_t requested) {
+bool CodeRange::SetUp(const size_t requested) {
ASSERT(code_range_ == NULL);
code_range_ = new VirtualMemory(requested);
@@ -270,7 +270,7 @@
}
-bool MemoryAllocator::Setup(intptr_t capacity, intptr_t capacity_executable) {
+bool MemoryAllocator::SetUp(intptr_t capacity, intptr_t capacity_executable) {
capacity_ = RoundUp(capacity, Page::kPageSize);
capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize);
ASSERT_GE(capacity_, capacity_executable_);
@@ -750,6 +750,17 @@
// -----------------------------------------------------------------------------
+// MemoryChunk implementation
+
+void MemoryChunk::IncrementLiveBytesFromMutator(Address address, int by) {
+ MemoryChunk* chunk = MemoryChunk::FromAddress(address);
+ if (!chunk->InNewSpace() && !static_cast<Page*>(chunk)->WasSwept()) {
+ static_cast<PagedSpace*>(chunk->owner())->IncrementUnsweptFreeBytes(-by);
+ }
+ chunk->IncrementLiveBytes(by);
+}
+
+// -----------------------------------------------------------------------------
// PagedSpace implementation
PagedSpace::PagedSpace(Heap* heap,
@@ -759,7 +770,8 @@
: Space(heap, id, executable),
free_list_(this),
was_swept_conservatively_(false),
- first_unswept_page_(Page::FromAddress(NULL)) {
+ first_unswept_page_(Page::FromAddress(NULL)),
+ unswept_free_bytes_(0) {
if (id == CODE_SPACE) {
area_size_ = heap->isolate()->memory_allocator()->
CodePageAreaSize();
@@ -777,12 +789,12 @@
}
-bool PagedSpace::Setup() {
+bool PagedSpace::SetUp() {
return true;
}
-bool PagedSpace::HasBeenSetup() {
+bool PagedSpace::HasBeenSetUp() {
return true;
}
@@ -872,6 +884,8 @@
intptr_t size = free_list_.EvictFreeListItems(page);
accounting_stats_.AllocateBytes(size);
ASSERT_EQ(AreaSize(), static_cast<int>(size));
+ } else {
+ DecreaseUnsweptFreeBytes(page);
}
if (Page::FromAllocationTop(allocation_info_.top) == page) {
@@ -981,9 +995,9 @@
// NewSpace implementation
-bool NewSpace::Setup(int reserved_semispace_capacity,
+bool NewSpace::SetUp(int reserved_semispace_capacity,
int maximum_semispace_capacity) {
- // Setup new space based on the preallocated memory block defined by
+ // Set up new space based on the preallocated memory block defined by
// start and size. The provided space is divided into two semi-spaces.
// To support fast containment testing in the new space, the size of
// this chunk must be a power of two and it must be aligned to its size.
@@ -1002,7 +1016,7 @@
ASSERT(initial_semispace_capacity <= maximum_semispace_capacity);
ASSERT(IsPowerOf2(maximum_semispace_capacity));
- // Allocate and setup the histogram arrays if necessary.
+ // Allocate and set up the histogram arrays if necessary.
allocated_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
promoted_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
@@ -1016,14 +1030,13 @@
2 * heap()->ReservedSemiSpaceSize());
ASSERT(IsAddressAligned(chunk_base_, 2 * reserved_semispace_capacity, 0));
- if (!to_space_.Setup(chunk_base_,
- initial_semispace_capacity,
- maximum_semispace_capacity)) {
- return false;
- }
- if (!from_space_.Setup(chunk_base_ + reserved_semispace_capacity,
- initial_semispace_capacity,
- maximum_semispace_capacity)) {
+ to_space_.SetUp(chunk_base_,
+ initial_semispace_capacity,
+ maximum_semispace_capacity);
+ from_space_.SetUp(chunk_base_ + reserved_semispace_capacity,
+ initial_semispace_capacity,
+ maximum_semispace_capacity);
+ if (!to_space_.Commit()) {
return false;
}
@@ -1256,7 +1269,7 @@
// -----------------------------------------------------------------------------
// SemiSpace implementation
-bool SemiSpace::Setup(Address start,
+void SemiSpace::SetUp(Address start,
int initial_capacity,
int maximum_capacity) {
// Creates a space in the young generation. The constructor does not
@@ -1275,8 +1288,6 @@
object_mask_ = address_mask_ | kHeapObjectTagMask;
object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag;
age_mark_ = start_;
-
- return Commit();
}
@@ -1326,6 +1337,9 @@
bool SemiSpace::GrowTo(int new_capacity) {
+ if (!is_committed()) {
+ if (!Commit()) return false;
+ }
ASSERT((new_capacity & Page::kPageAlignmentMask) == 0);
ASSERT(new_capacity <= maximum_capacity_);
ASSERT(new_capacity > capacity_);
@@ -1364,24 +1378,29 @@
ASSERT((new_capacity & Page::kPageAlignmentMask) == 0);
ASSERT(new_capacity >= initial_capacity_);
ASSERT(new_capacity < capacity_);
- // Semispaces grow backwards from the end of their allocated capacity,
- // so we find the before and after start addresses relative to the
- // end of the space.
- Address space_end = start_ + maximum_capacity_;
- Address old_start = space_end - capacity_;
- size_t delta = capacity_ - new_capacity;
- ASSERT(IsAligned(delta, OS::AllocateAlignment()));
- if (!heap()->isolate()->memory_allocator()->UncommitBlock(old_start, delta)) {
- return false;
- }
- capacity_ = new_capacity;
+ if (is_committed()) {
+ // Semispaces grow backwards from the end of their allocated capacity,
+ // so we find the before and after start addresses relative to the
+ // end of the space.
+ Address space_end = start_ + maximum_capacity_;
+ Address old_start = space_end - capacity_;
+ size_t delta = capacity_ - new_capacity;
+ ASSERT(IsAligned(delta, OS::AllocateAlignment()));
- int pages_after = capacity_ / Page::kPageSize;
- NewSpacePage* new_last_page =
- NewSpacePage::FromAddress(space_end - pages_after * Page::kPageSize);
- new_last_page->set_next_page(anchor());
- anchor()->set_prev_page(new_last_page);
- ASSERT((current_page_ <= first_page()) && (current_page_ >= new_last_page));
+ MemoryAllocator* allocator = heap()->isolate()->memory_allocator();
+ if (!allocator->UncommitBlock(old_start, delta)) {
+ return false;
+ }
+
+ int pages_after = new_capacity / Page::kPageSize;
+ NewSpacePage* new_last_page =
+ NewSpacePage::FromAddress(space_end - pages_after * Page::kPageSize);
+ new_last_page->set_next_page(anchor());
+ anchor()->set_prev_page(new_last_page);
+ ASSERT((current_page_ <= first_page()) && (current_page_ >= new_last_page));
+ }
+
+ capacity_ = new_capacity;
return true;
}
@@ -1764,14 +1783,14 @@
// field and a next pointer, we give it a filler map that gives it the
// correct size.
if (size_in_bytes > FreeSpace::kHeaderSize) {
- set_map_unsafe(heap->raw_unchecked_free_space_map());
+ set_map_no_write_barrier(heap->raw_unchecked_free_space_map());
// Can't use FreeSpace::cast because it fails during deserialization.
FreeSpace* this_as_free_space = reinterpret_cast<FreeSpace*>(this);
this_as_free_space->set_size(size_in_bytes);
} else if (size_in_bytes == kPointerSize) {
- set_map_unsafe(heap->raw_unchecked_one_pointer_filler_map());
+ set_map_no_write_barrier(heap->raw_unchecked_one_pointer_filler_map());
} else if (size_in_bytes == 2 * kPointerSize) {
- set_map_unsafe(heap->raw_unchecked_two_pointer_filler_map());
+ set_map_no_write_barrier(heap->raw_unchecked_two_pointer_filler_map());
} else {
UNREACHABLE();
}
@@ -2165,6 +2184,7 @@
} while (p != anchor());
}
first_unswept_page_ = Page::FromAddress(NULL);
+ unswept_free_bytes_ = 0;
// Clear the free list before a full GC---it will be rebuilt afterward.
free_list_.Reset();
@@ -2213,6 +2233,7 @@
PrintF("Sweeping 0x%" V8PRIxPTR " lazily advanced.\n",
reinterpret_cast<intptr_t>(p));
}
+ DecreaseUnsweptFreeBytes(p);
freed_bytes += MarkCompactCollector::SweepConservatively(this, p);
}
p = next_page;
@@ -2250,6 +2271,16 @@
HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
// Allocation in this space has failed.
+ // If there are unswept pages advance lazy sweeper then sweep one page before
+ // allocating a new page.
+ if (first_unswept_page_->is_valid()) {
+ AdvanceSweeper(size_in_bytes);
+
+ // Retry the free list allocation.
+ HeapObject* object = free_list_.Allocate(size_in_bytes);
+ if (object != NULL) return object;
+ }
+
// Free list allocation failed and there is no next page. Fail if we have
// hit the old generation size limit that should cause a garbage
// collection.
@@ -2258,26 +2289,19 @@
return NULL;
}
- // If there are unswept pages advance lazy sweeper.
- if (first_unswept_page_->is_valid()) {
- AdvanceSweeper(size_in_bytes);
+ // Try to expand the space and allocate in the new next page.
+ if (Expand()) {
+ return free_list_.Allocate(size_in_bytes);
+ }
+
+ // Last ditch, sweep all the remaining pages to try to find space. This may
+ // cause a pause.
+ if (!IsSweepingComplete()) {
+ AdvanceSweeper(kMaxInt);
// Retry the free list allocation.
HeapObject* object = free_list_.Allocate(size_in_bytes);
if (object != NULL) return object;
-
- if (!IsSweepingComplete()) {
- AdvanceSweeper(kMaxInt);
-
- // Retry the free list allocation.
- object = free_list_.Allocate(size_in_bytes);
- if (object != NULL) return object;
- }
- }
-
- // Try to expand the space and allocate in the new next page.
- if (Expand()) {
- return free_list_.Allocate(size_in_bytes);
}
// Finally, fail.
@@ -2508,7 +2532,7 @@
objects_size_(0) {}
-bool LargeObjectSpace::Setup() {
+bool LargeObjectSpace::SetUp() {
first_page_ = NULL;
size_ = 0;
page_count_ = 0;
@@ -2528,7 +2552,7 @@
space, kAllocationActionFree, page->size());
heap()->isolate()->memory_allocator()->Free(page);
}
- Setup();
+ SetUp();
}
@@ -2610,7 +2634,7 @@
MarkBit mark_bit = Marking::MarkBitFrom(object);
if (mark_bit.Get()) {
mark_bit.Clear();
- MemoryChunk::IncrementLiveBytes(object->address(), -object->Size());
+ MemoryChunk::IncrementLiveBytesFromGC(object->address(), -object->Size());
previous = current;
current = current->next_page();
} else {
diff --git a/src/spaces.h b/src/spaces.h
index 0ca8c39..b2fc0a1 100644
--- a/src/spaces.h
+++ b/src/spaces.h
@@ -295,7 +295,7 @@
// MemoryChunk represents a memory region owned by a specific space.
// It is divided into the header and the body. Chunk start is always
-// 1MB aligned. Start of the body is aligned so it can accomodate
+// 1MB aligned. Start of the body is aligned so it can accommodate
// any heap object.
class MemoryChunk {
public:
@@ -466,10 +466,13 @@
ASSERT(static_cast<unsigned>(live_byte_count_) <= size_);
return live_byte_count_;
}
- static void IncrementLiveBytes(Address address, int by) {
+
+ static void IncrementLiveBytesFromGC(Address address, int by) {
MemoryChunk::FromAddress(address)->IncrementLiveBytes(by);
}
+ static void IncrementLiveBytesFromMutator(Address address, int by);
+
static const intptr_t kAlignment =
(static_cast<uintptr_t>(1) << kPageSizeBits);
@@ -809,7 +812,7 @@
// Reserves a range of virtual memory, but does not commit any of it.
// Can only be called once, at heap initialization time.
// Returns false on failure.
- bool Setup(const size_t requested_size);
+ bool SetUp(const size_t requested_size);
// Frees the range of virtual memory, and frees the data structures used to
// manage it.
@@ -937,7 +940,7 @@
// Initializes its internal bookkeeping structures.
// Max capacity of the total space and executable memory limit.
- bool Setup(intptr_t max_capacity, intptr_t capacity_executable);
+ bool SetUp(intptr_t max_capacity, intptr_t capacity_executable);
void TearDown();
@@ -1189,11 +1192,11 @@
// An abstraction of the accounting statistics of a page-structured space.
-// The 'capacity' of a space is the number of object-area bytes (ie, not
+// The 'capacity' of a space is the number of object-area bytes (i.e., not
// including page bookkeeping structures) currently in the space. The 'size'
// of a space is the number of allocated bytes, the 'waste' in the space is
// the number of bytes that are not allocated and not available to
-// allocation without reorganizing the space via a GC (eg, small blocks due
+// allocation without reorganizing the space via a GC (e.g. small blocks due
// to internal fragmentation, top of page areas in map space), and the bytes
// 'available' is the number of unallocated bytes that are not waste. The
// capacity is the sum of size, waste, and available.
@@ -1206,7 +1209,7 @@
public:
AllocationStats() { Clear(); }
- // Zero out all the allocation statistics (ie, no capacity).
+ // Zero out all the allocation statistics (i.e., no capacity).
void Clear() {
capacity_ = 0;
size_ = 0;
@@ -1218,7 +1221,7 @@
waste_ = 0;
}
- // Reset the allocation statistics (ie, available = capacity with no
+ // Reset the allocation statistics (i.e., available = capacity with no
// wasted or allocated bytes).
void Reset() {
size_ = 0;
@@ -1349,7 +1352,7 @@
// starting at 'start' is placed on the free list. The return value is the
// number of bytes that have been lost due to internal fragmentation by
// freeing the block. Bookkeeping information will be written to the block,
- // ie, its contents will be destroyed. The start address should be word
+ // i.e., its contents will be destroyed. The start address should be word
// aligned, and the size should be a non-zero multiple of the word size.
int Free(Address start, int size_in_bytes);
@@ -1427,11 +1430,11 @@
// the memory allocator's initial chunk) if possible. If the block of
// addresses is not big enough to contain a single page-aligned page, a
// fresh chunk will be allocated.
- bool Setup();
+ bool SetUp();
// Returns true if the space has been successfully set up and not
// subsequently torn down.
- bool HasBeenSetup();
+ bool HasBeenSetUp();
// Cleans up the space, frees all pages in this space except those belonging
// to the initial chunk, uncommits addresses in the initial chunk.
@@ -1477,9 +1480,12 @@
// linear allocation area (between top and limit) are also counted here.
virtual intptr_t Size() { return accounting_stats_.Size(); }
- // As size, but the bytes in the current linear allocation area are not
- // included.
- virtual intptr_t SizeOfObjects() { return Size() - (limit() - top()); }
+ // As size, but the bytes in lazily swept pages are estimated and the bytes
+ // in the current linear allocation area are not included.
+ virtual intptr_t SizeOfObjects() {
+ ASSERT(!IsSweepingComplete() || (unswept_free_bytes_ == 0));
+ return Size() - unswept_free_bytes_ - (limit() - top());
+ }
// Wasted bytes in this space. These are just the bytes that were thrown away
// due to being too small to use for allocation. They do not include the
@@ -1487,9 +1493,7 @@
virtual intptr_t Waste() { return accounting_stats_.Waste(); }
// Returns the allocation pointer in this space.
- Address top() {
- return allocation_info_.top;
- }
+ Address top() { return allocation_info_.top; }
Address limit() { return allocation_info_.limit; }
// Allocate the requested number of bytes in the space if possible, return a
@@ -1565,10 +1569,25 @@
}
void SetPagesToSweep(Page* first) {
+ ASSERT(unswept_free_bytes_ == 0);
if (first == &anchor_) first = NULL;
first_unswept_page_ = first;
}
+ void IncrementUnsweptFreeBytes(int by) {
+ unswept_free_bytes_ += by;
+ }
+
+ void IncreaseUnsweptFreeBytes(Page* p) {
+ ASSERT(ShouldBeSweptLazily(p));
+ unswept_free_bytes_ += (p->area_size() - p->LiveBytes());
+ }
+
+ void DecreaseUnsweptFreeBytes(Page* p) {
+ ASSERT(ShouldBeSweptLazily(p));
+ unswept_free_bytes_ -= (p->area_size() - p->LiveBytes());
+ }
+
bool AdvanceSweeper(intptr_t bytes_to_sweep);
bool IsSweepingComplete() {
@@ -1662,10 +1681,18 @@
bool was_swept_conservatively_;
+ // The first page to be swept when the lazy sweeper advances. Is set
+ // to NULL when all pages have been swept.
Page* first_unswept_page_;
+ // The number of free bytes which could be reclaimed by advancing the
+ // lazy sweeper. This is only an estimation because lazy sweeping is
+ // done conservatively.
+ intptr_t unswept_free_bytes_;
+
// Expands the space by allocating a fixed number of pages. Returns false if
- // it cannot allocate requested number of pages from OS.
+ // it cannot allocate requested number of pages from OS, or if the hard heap
+ // size limit has been hit.
bool Expand();
// Generic fast case allocation function that tries linear allocation at the
@@ -1824,14 +1851,14 @@
current_page_(NULL) { }
// Sets up the semispace using the given chunk.
- bool Setup(Address start, int initial_capacity, int maximum_capacity);
+ void SetUp(Address start, int initial_capacity, int maximum_capacity);
// Tear down the space. Heap memory was not allocated by the space, so it
// is not deallocated here.
void TearDown();
// True if the space has been set up but not torn down.
- bool HasBeenSetup() { return start_ != NULL; }
+ bool HasBeenSetUp() { return start_ != NULL; }
// Grow the semispace to the new capacity. The new capacity
// requested must be larger than the current capacity and less than
@@ -2070,15 +2097,15 @@
inline_allocation_limit_step_(0) {}
// Sets up the new space using the given chunk.
- bool Setup(int reserved_semispace_size_, int max_semispace_size);
+ bool SetUp(int reserved_semispace_size_, int max_semispace_size);
// Tears down the space. Heap memory was not allocated by the space, so it
// is not deallocated here.
void TearDown();
// True if the space has been set up but not torn down.
- bool HasBeenSetup() {
- return to_space_.HasBeenSetup() && from_space_.HasBeenSetup();
+ bool HasBeenSetUp() {
+ return to_space_.HasBeenSetUp() && from_space_.HasBeenSetUp();
}
// Flip the pair of spaces.
@@ -2477,7 +2504,7 @@
virtual ~LargeObjectSpace() {}
// Initializes internal data structures.
- bool Setup();
+ bool SetUp();
// Releases internal resources, frees objects in this space.
void TearDown();
diff --git a/src/store-buffer.cc b/src/store-buffer.cc
index 863b69b..3852155 100644
--- a/src/store-buffer.cc
+++ b/src/store-buffer.cc
@@ -41,6 +41,7 @@
old_start_(NULL),
old_limit_(NULL),
old_top_(NULL),
+ old_reserved_limit_(NULL),
old_buffer_is_sorted_(false),
old_buffer_is_filtered_(false),
during_gc_(false),
@@ -54,16 +55,31 @@
}
-void StoreBuffer::Setup() {
+void StoreBuffer::SetUp() {
virtual_memory_ = new VirtualMemory(kStoreBufferSize * 3);
uintptr_t start_as_int =
reinterpret_cast<uintptr_t>(virtual_memory_->address());
start_ =
reinterpret_cast<Address*>(RoundUp(start_as_int, kStoreBufferSize * 2));
- limit_ = start_ + (kStoreBufferSize / sizeof(*start_));
+ limit_ = start_ + (kStoreBufferSize / kPointerSize);
- old_top_ = old_start_ = new Address[kOldStoreBufferLength];
- old_limit_ = old_start_ + kOldStoreBufferLength;
+ old_virtual_memory_ =
+ new VirtualMemory(kOldStoreBufferLength * kPointerSize);
+ old_top_ = old_start_ =
+ reinterpret_cast<Address*>(old_virtual_memory_->address());
+ // Don't know the alignment requirements of the OS, but it is certainly not
+ // less than 0xfff.
+ ASSERT((reinterpret_cast<uintptr_t>(old_start_) & 0xfff) == 0);
+ int initial_length = static_cast<int>(OS::CommitPageSize() / kPointerSize);
+ ASSERT(initial_length > 0);
+ ASSERT(initial_length <= kOldStoreBufferLength);
+ old_limit_ = old_start_ + initial_length;
+ old_reserved_limit_ = old_start_ + kOldStoreBufferLength;
+
+ CHECK(old_virtual_memory_->Commit(
+ reinterpret_cast<void*>(old_start_),
+ (old_limit_ - old_start_) * kPointerSize,
+ false));
ASSERT(reinterpret_cast<Address>(start_) >= virtual_memory_->address());
ASSERT(reinterpret_cast<Address>(limit_) >= virtual_memory_->address());
@@ -77,9 +93,9 @@
ASSERT((reinterpret_cast<uintptr_t>(limit_ - 1) & kStoreBufferOverflowBit) ==
0);
- virtual_memory_->Commit(reinterpret_cast<Address>(start_),
- kStoreBufferSize,
- false); // Not executable.
+ CHECK(virtual_memory_->Commit(reinterpret_cast<Address>(start_),
+ kStoreBufferSize,
+ false)); // Not executable.
heap_->public_set_store_buffer_top(start_);
hash_set_1_ = new uintptr_t[kHashSetLength];
@@ -92,10 +108,10 @@
void StoreBuffer::TearDown() {
delete virtual_memory_;
+ delete old_virtual_memory_;
delete[] hash_set_1_;
delete[] hash_set_2_;
- delete[] old_start_;
- old_start_ = old_top_ = old_limit_ = NULL;
+ old_start_ = old_top_ = old_limit_ = old_reserved_limit_ = NULL;
start_ = limit_ = NULL;
heap_->public_set_store_buffer_top(start_);
}
@@ -151,7 +167,18 @@
}
-void StoreBuffer::HandleFullness() {
+void StoreBuffer::EnsureSpace(intptr_t space_needed) {
+ while (old_limit_ - old_top_ < space_needed &&
+ old_limit_ < old_reserved_limit_) {
+ size_t grow = old_limit_ - old_start_; // Double size.
+ CHECK(old_virtual_memory_->Commit(reinterpret_cast<void*>(old_limit_),
+ grow * kPointerSize,
+ false));
+ old_limit_ += grow;
+ }
+
+ if (old_limit_ - old_top_ >= space_needed) return;
+
if (old_buffer_is_filtered_) return;
ASSERT(may_move_store_buffer_entries_);
Compact();
@@ -644,9 +671,7 @@
// the worst case (compaction doesn't eliminate any pointers).
ASSERT(top <= limit_);
heap_->public_set_store_buffer_top(start_);
- if (top - start_ > old_limit_ - old_top_) {
- HandleFullness();
- }
+ EnsureSpace(top - start_);
ASSERT(may_move_store_buffer_entries_);
// Goes through the addresses in the store buffer attempting to remove
// duplicates. In the interest of speed this is a lossy operation. Some
@@ -663,9 +688,9 @@
int hash1 =
((int_addr ^ (int_addr >> kHashSetLengthLog2)) & (kHashSetLength - 1));
if (hash_set_1_[hash1] == int_addr) continue;
- int hash2 =
- ((int_addr - (int_addr >> kHashSetLengthLog2)) & (kHashSetLength - 1));
+ uintptr_t hash2 = (int_addr - (int_addr >> kHashSetLengthLog2));
hash2 ^= hash2 >> (kHashSetLengthLog2 * 2);
+ hash2 &= (kHashSetLength - 1);
if (hash_set_2_[hash2] == int_addr) continue;
if (hash_set_1_[hash1] == 0) {
hash_set_1_[hash1] = int_addr;
@@ -688,9 +713,7 @@
void StoreBuffer::CheckForFullBuffer() {
- if (old_limit_ - old_top_ < kStoreBufferSize * 2) {
- HandleFullness();
- }
+ EnsureSpace(kStoreBufferSize * 2);
}
} } // namespace v8::internal
diff --git a/src/store-buffer.h b/src/store-buffer.h
index c3f77c3..951a9ca 100644
--- a/src/store-buffer.h
+++ b/src/store-buffer.h
@@ -54,7 +54,7 @@
inline Address TopAddress();
- void Setup();
+ void SetUp();
void TearDown();
// This is used by the mutator to enter addresses into the store buffer.
@@ -109,7 +109,7 @@
// been promoted. Rebuilds the store buffer completely if it overflowed.
void SortUniq();
- void HandleFullness();
+ void EnsureSpace(intptr_t space_needed);
void Verify();
bool PrepareForIteration();
@@ -134,6 +134,8 @@
Address* old_start_;
Address* old_limit_;
Address* old_top_;
+ Address* old_reserved_limit_;
+ VirtualMemory* old_virtual_memory_;
bool old_buffer_is_sorted_;
bool old_buffer_is_filtered_;
diff --git a/src/string-search.h b/src/string-search.h
index f540583..8c3456a 100644
--- a/src/string-search.h
+++ b/src/string-search.h
@@ -369,6 +369,10 @@
shift_table[pattern_length] = 1;
suffix_table[pattern_length] = pattern_length + 1;
+ if (pattern_length <= start) {
+ return;
+ }
+
// Find suffixes.
PatternChar last_char = pattern[pattern_length - 1];
int suffix = pattern_length + 1;
diff --git a/src/string.js b/src/string.js
index 3608bac..2d68961 100644
--- a/src/string.js
+++ b/src/string.js
@@ -244,6 +244,15 @@
// Convert the search argument to a string and search for it.
search = TO_STRING_INLINE(search);
+ if (search.length == 1 &&
+ subject.length > 0xFF &&
+ IS_STRING(replace) &&
+ %StringIndexOf(replace, '$', 0) < 0) {
+ // Searching by traversing a cons string tree and replace with cons of
+ // slices works only when the replaced string is a single character, being
+ // replaced by a simple string and only pays off for long strings.
+ return %StringReplaceOneCharWithString(subject, search, replace);
+ }
var start = %StringIndexOf(subject, search, 0);
if (start < 0) return subject;
var end = start + search.length;
diff --git a/src/stub-cache.cc b/src/stub-cache.cc
index ea7d74f..c7f4f94 100644
--- a/src/stub-cache.cc
+++ b/src/stub-cache.cc
@@ -184,7 +184,7 @@
Handle<Code> StubCache::ComputeLoadConstant(Handle<String> name,
Handle<JSObject> receiver,
Handle<JSObject> holder,
- Handle<Object> value) {
+ Handle<JSFunction> value) {
ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP);
Code::Flags flags =
Code::ComputeMonomorphicFlags(Code::LOAD_IC, CONSTANT_FUNCTION);
@@ -266,7 +266,7 @@
Handle<Code> StubCache::ComputeKeyedLoadConstant(Handle<String> name,
Handle<JSObject> receiver,
Handle<JSObject> holder,
- Handle<Object> value) {
+ Handle<JSFunction> value) {
ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP);
Code::Flags flags =
Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, CONSTANT_FUNCTION);
@@ -678,11 +678,10 @@
static void FillCache(Isolate* isolate, Handle<Code> code) {
- Handle<NumberDictionary> dictionary =
- NumberDictionarySet(isolate->factory()->non_monomorphic_cache(),
- code->flags(),
- code,
- PropertyDetails(NONE, NORMAL));
+ Handle<UnseededNumberDictionary> dictionary =
+ UnseededNumberDictionary::Set(isolate->factory()->non_monomorphic_cache(),
+ code->flags(),
+ code);
isolate->heap()->public_set_non_monomorphic_cache(*dictionary);
}
@@ -697,7 +696,7 @@
Code::ComputeFlags(kind, UNINITIALIZED, extra_state, NORMAL, argc);
// Use raw_unchecked... so we don't get assert failures during GC.
- NumberDictionary* dictionary =
+ UnseededNumberDictionary* dictionary =
isolate()->heap()->raw_unchecked_non_monomorphic_cache();
int entry = dictionary->FindEntry(isolate(), flags);
ASSERT(entry != -1);
@@ -716,7 +715,8 @@
CallICBase::Contextual::encode(mode == RelocInfo::CODE_TARGET_CONTEXT);
Code::Flags flags =
Code::ComputeFlags(kind, UNINITIALIZED, extra_state, NORMAL, argc);
- Handle<NumberDictionary> cache = isolate_->factory()->non_monomorphic_cache();
+ Handle<UnseededNumberDictionary> cache =
+ isolate_->factory()->non_monomorphic_cache();
int entry = cache->FindEntry(isolate_, flags);
if (entry != -1) return Handle<Code>(Code::cast(cache->ValueAt(entry)));
@@ -744,7 +744,8 @@
Code::ExtraICState extra_state) {
Code::Flags flags =
Code::ComputeFlags(kind, PREMONOMORPHIC, extra_state, NORMAL, argc);
- Handle<NumberDictionary> cache = isolate_->factory()->non_monomorphic_cache();
+ Handle<UnseededNumberDictionary> cache =
+ isolate_->factory()->non_monomorphic_cache();
int entry = cache->FindEntry(isolate_, flags);
if (entry != -1) return Handle<Code>(Code::cast(cache->ValueAt(entry)));
@@ -760,7 +761,8 @@
Code::ExtraICState extra_state) {
Code::Flags flags =
Code::ComputeFlags(kind, MONOMORPHIC, extra_state, NORMAL, argc);
- Handle<NumberDictionary> cache = isolate_->factory()->non_monomorphic_cache();
+ Handle<UnseededNumberDictionary> cache =
+ isolate_->factory()->non_monomorphic_cache();
int entry = cache->FindEntry(isolate_, flags);
if (entry != -1) return Handle<Code>(Code::cast(cache->ValueAt(entry)));
@@ -776,7 +778,8 @@
Code::Flags flags =
Code::ComputeFlags(kind, MEGAMORPHIC, Code::kNoExtraICState,
NORMAL, argc);
- Handle<NumberDictionary> cache = isolate_->factory()->non_monomorphic_cache();
+ Handle<UnseededNumberDictionary> cache =
+ isolate_->factory()->non_monomorphic_cache();
int entry = cache->FindEntry(isolate_, flags);
if (entry != -1) return Handle<Code>(Code::cast(cache->ValueAt(entry)));
@@ -794,7 +797,8 @@
Code::Flags flags =
Code::ComputeFlags(kind, MEGAMORPHIC, extra_state,
NORMAL, argc);
- Handle<NumberDictionary> cache = isolate_->factory()->non_monomorphic_cache();
+ Handle<UnseededNumberDictionary> cache =
+ isolate_->factory()->non_monomorphic_cache();
int entry = cache->FindEntry(isolate_, flags);
if (entry != -1) return Handle<Code>(Code::cast(cache->ValueAt(entry)));
@@ -813,7 +817,8 @@
Code::Flags flags =
Code::ComputeFlags(kind, MONOMORPHIC_PROTOTYPE_FAILURE, extra_state,
NORMAL, argc, OWN_MAP);
- Handle<NumberDictionary> cache = isolate_->factory()->non_monomorphic_cache();
+ Handle<UnseededNumberDictionary> cache =
+ isolate_->factory()->non_monomorphic_cache();
int entry = cache->FindEntry(isolate_, flags);
if (entry != -1) return Handle<Code>(Code::cast(cache->ValueAt(entry)));
@@ -832,7 +837,8 @@
Code::Flags flags =
Code::ComputeFlags(kind, DEBUG_BREAK, Code::kNoExtraICState,
NORMAL, argc);
- Handle<NumberDictionary> cache = isolate_->factory()->non_monomorphic_cache();
+ Handle<UnseededNumberDictionary> cache =
+ isolate_->factory()->non_monomorphic_cache();
int entry = cache->FindEntry(isolate_, flags);
if (entry != -1) return Handle<Code>(Code::cast(cache->ValueAt(entry)));
@@ -850,7 +856,8 @@
Code::Flags flags =
Code::ComputeFlags(kind, DEBUG_PREPARE_STEP_IN, Code::kNoExtraICState,
NORMAL, argc);
- Handle<NumberDictionary> cache = isolate_->factory()->non_monomorphic_cache();
+ Handle<UnseededNumberDictionary> cache =
+ isolate_->factory()->non_monomorphic_cache();
int entry = cache->FindEntry(isolate_, flags);
if (entry != -1) return Handle<Code>(Code::cast(cache->ValueAt(entry)));
@@ -1445,13 +1452,13 @@
CallOptimization::CallOptimization(LookupResult* lookup) {
- if (!lookup->IsProperty() ||
- !lookup->IsCacheable() ||
- lookup->type() != CONSTANT_FUNCTION) {
- Initialize(Handle<JSFunction>::null());
- } else {
+ if (lookup->IsFound() &&
+ lookup->IsCacheable() &&
+ lookup->type() == CONSTANT_FUNCTION) {
// We only optimize constant function calls.
Initialize(Handle<JSFunction>(lookup->GetConstantFunction()));
+ } else {
+ Initialize(Handle<JSFunction>::null());
}
}
diff --git a/src/stub-cache.h b/src/stub-cache.h
index 0843925..720ad8b 100644
--- a/src/stub-cache.h
+++ b/src/stub-cache.h
@@ -92,7 +92,7 @@
Handle<Code> ComputeLoadConstant(Handle<String> name,
Handle<JSObject> receiver,
Handle<JSObject> holder,
- Handle<Object> value);
+ Handle<JSFunction> value);
Handle<Code> ComputeLoadInterceptor(Handle<String> name,
Handle<JSObject> receiver,
@@ -121,7 +121,7 @@
Handle<Code> ComputeKeyedLoadConstant(Handle<String> name,
Handle<JSObject> receiver,
Handle<JSObject> holder,
- Handle<Object> value);
+ Handle<JSFunction> value);
Handle<Code> ComputeKeyedLoadInterceptor(Handle<String> name,
Handle<JSObject> receiver,
@@ -519,7 +519,7 @@
Register scratch1,
Register scratch2,
Register scratch3,
- Handle<Object> value,
+ Handle<JSFunction> value,
Handle<String> name,
Label* miss);
@@ -569,7 +569,7 @@
Handle<Code> CompileLoadConstant(Handle<JSObject> object,
Handle<JSObject> holder,
- Handle<Object> value,
+ Handle<JSFunction> value,
Handle<String> name);
Handle<Code> CompileLoadInterceptor(Handle<JSObject> object,
@@ -604,7 +604,7 @@
Handle<Code> CompileLoadConstant(Handle<String> name,
Handle<JSObject> object,
Handle<JSObject> holder,
- Handle<Object> value);
+ Handle<JSFunction> value);
Handle<Code> CompileLoadInterceptor(Handle<JSObject> object,
Handle<JSObject> holder,
diff --git a/src/type-info.cc b/src/type-info.cc
index e5f7b3e..fcdc610 100644
--- a/src/type-info.cc
+++ b/src/type-info.cc
@@ -71,7 +71,7 @@
Handle<Object> TypeFeedbackOracle::GetInfo(unsigned ast_id) {
int entry = dictionary_->FindEntry(ast_id);
- return entry != NumberDictionary::kNotFound
+ return entry != UnseededNumberDictionary::kNotFound
? Handle<Object>(dictionary_->ValueAt(entry))
: Handle<Object>::cast(isolate_->factory()->undefined_value());
}
@@ -268,6 +268,7 @@
case CompareIC::STRINGS:
return TypeInfo::String();
case CompareIC::OBJECTS:
+ case CompareIC::KNOWN_OBJECTS:
// TODO(kasperl): We really need a type for JS objects here.
return TypeInfo::NonPrimitive();
case CompareIC::GENERIC:
@@ -287,6 +288,23 @@
}
+Handle<Map> TypeFeedbackOracle::GetCompareMap(CompareOperation* expr) {
+ Handle<Object> object = GetInfo(expr->id());
+ if (!object->IsCode()) return Handle<Map>::null();
+ Handle<Code> code = Handle<Code>::cast(object);
+ if (!code->is_compare_ic_stub()) return Handle<Map>::null();
+ CompareIC::State state = static_cast<CompareIC::State>(code->compare_state());
+ if (state != CompareIC::KNOWN_OBJECTS) {
+ return Handle<Map>::null();
+ }
+ Map* first_map = code->FindFirstMap();
+ ASSERT(first_map != NULL);
+ return CanRetainOtherContext(first_map, *global_context_)
+ ? Handle<Map>::null()
+ : Handle<Map>(first_map);
+}
+
+
TypeInfo TypeFeedbackOracle::UnaryType(UnaryOperation* expr) {
Handle<Object> object = GetInfo(expr->id());
TypeInfo unknown = TypeInfo::Unknown();
@@ -376,6 +394,7 @@
case CompareIC::HEAP_NUMBERS:
return TypeInfo::Number();
case CompareIC::OBJECTS:
+ case CompareIC::KNOWN_OBJECTS:
// TODO(kasperl): We really need a type for JS objects here.
return TypeInfo::NonPrimitive();
case CompareIC::GENERIC:
@@ -540,7 +559,7 @@
ZoneList<RelocInfo>* infos) {
DisableAssertNoAllocation allocation_allowed;
byte* old_start = code->instruction_start();
- dictionary_ = FACTORY->NewNumberDictionary(infos->length());
+ dictionary_ = FACTORY->NewUnseededNumberDictionary(infos->length());
byte* new_start = code->instruction_start();
RelocateRelocInfos(infos, old_start, new_start);
}
@@ -620,7 +639,7 @@
void TypeFeedbackOracle::SetInfo(unsigned ast_id, Object* target) {
- ASSERT(dictionary_->FindEntry(ast_id) == NumberDictionary::kNotFound);
+ ASSERT(dictionary_->FindEntry(ast_id) == UnseededNumberDictionary::kNotFound);
MaybeObject* maybe_result = dictionary_->AtNumberPut(ast_id, target);
USE(maybe_result);
#ifdef DEBUG
diff --git a/src/type-info.h b/src/type-info.h
index 167494c..80ebf0c 100644
--- a/src/type-info.h
+++ b/src/type-info.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -223,6 +223,7 @@
class CompareOperation;
class CompilationInfo;
class CountOperation;
+class Expression;
class Property;
class SmallMapList;
class UnaryOperation;
@@ -277,6 +278,7 @@
TypeInfo BinaryType(BinaryOperation* expr);
TypeInfo CompareType(CompareOperation* expr);
bool IsSymbolCompare(CompareOperation* expr);
+ Handle<Map> GetCompareMap(CompareOperation* expr);
TypeInfo SwitchType(CaseClause* clause);
TypeInfo IncrementType(CountOperation* expr);
@@ -302,7 +304,7 @@
Handle<Context> global_context_;
Isolate* isolate_;
- Handle<NumberDictionary> dictionary_;
+ Handle<UnseededNumberDictionary> dictionary_;
DISALLOW_COPY_AND_ASSIGN(TypeFeedbackOracle);
};
diff --git a/src/unicode.cc b/src/unicode.cc
index 6e0ac1a..147f716 100644
--- a/src/unicode.cc
+++ b/src/unicode.cc
@@ -210,7 +210,7 @@
uchar Utf8::CalculateValue(const byte* str,
unsigned length,
unsigned* cursor) {
- // We only get called for non-ascii characters.
+ // We only get called for non-ASCII characters.
if (length == 1) {
*cursor += 1;
return kBadChar;
@@ -286,8 +286,8 @@
}
const byte* data = reinterpret_cast<const byte*>(str.data());
if (data[offset] <= kMaxOneByteChar) {
- // The next character is an ascii char so we scan forward over
- // the following ascii characters and return the next pure ascii
+ // The next character is an ASCII char so we scan forward over
+ // the following ASCII characters and return the next pure ASCII
// substring
const byte* result = data + offset;
offset++;
@@ -297,13 +297,13 @@
*offset_ptr = offset;
return result;
} else {
- // The next character is non-ascii so we just fill the buffer
+ // The next character is non-ASCII so we just fill the buffer
unsigned cursor = 0;
unsigned chars_read = 0;
while (offset < str.length()) {
uchar c = data[offset];
if (c <= kMaxOneByteChar) {
- // Fast case for ascii characters
+ // Fast case for ASCII characters
if (!CharacterStream::EncodeAsciiCharacter(c,
buffer,
capacity,
diff --git a/src/utils.h b/src/utils.h
index 68b1517..1d40c98 100644
--- a/src/utils.h
+++ b/src/utils.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -252,10 +252,13 @@
// ----------------------------------------------------------------------------
// Hash function.
+static const uint32_t kZeroHashSeed = 0;
+
// Thomas Wang, Integer Hash Functions.
// http://www.concentric.net/~Ttwang/tech/inthash.htm
-inline uint32_t ComputeIntegerHash(uint32_t key) {
+inline uint32_t ComputeIntegerHash(uint32_t key, uint32_t seed) {
uint32_t hash = key;
+ hash = hash ^ seed;
hash = ~hash + (hash << 15); // hash = (hash << 15) - hash - 1;
hash = hash ^ (hash >> 12);
hash = hash + (hash << 2);
@@ -280,7 +283,8 @@
inline uint32_t ComputePointerHash(void* ptr) {
return ComputeIntegerHash(
- static_cast<uint32_t>(reinterpret_cast<intptr_t>(ptr)));
+ static_cast<uint32_t>(reinterpret_cast<intptr_t>(ptr)),
+ v8::internal::kZeroHashSeed);
}
@@ -927,9 +931,17 @@
explicit EnumSet(T bits = 0) : bits_(bits) {}
bool IsEmpty() const { return bits_ == 0; }
bool Contains(E element) const { return (bits_ & Mask(element)) != 0; }
+ bool ContainsAnyOf(const EnumSet& set) const {
+ return (bits_ & set.bits_) != 0;
+ }
void Add(E element) { bits_ |= Mask(element); }
+ void Add(const EnumSet& set) { bits_ |= set.bits_; }
void Remove(E element) { bits_ &= ~Mask(element); }
+ void Remove(const EnumSet& set) { bits_ &= ~set.bits_; }
+ void RemoveAll() { bits_ = 0; }
+ void Intersect(const EnumSet& set) { bits_ &= set.bits_; }
T ToIntegral() const { return bits_; }
+ bool operator==(const EnumSet& set) { return bits_ == set.bits_; }
private:
T Mask(E element) const {
diff --git a/src/v8.cc b/src/v8.cc
index 66c65e7..36ee221 100644
--- a/src/v8.cc
+++ b/src/v8.cc
@@ -47,24 +47,18 @@
static bool init_once_called = false;
bool V8::is_running_ = false;
-bool V8::has_been_setup_ = false;
+bool V8::has_been_set_up_ = false;
bool V8::has_been_disposed_ = false;
bool V8::has_fatal_error_ = false;
bool V8::use_crankshaft_ = true;
+List<CallCompletedCallback>* V8::call_completed_callbacks_ = NULL;
static Mutex* entropy_mutex = OS::CreateMutex();
static EntropySource entropy_source;
bool V8::Initialize(Deserializer* des) {
- // Setting --harmony implies all other harmony flags.
- // TODO(rossberg): Is there a better place to put this?
- if (FLAG_harmony) {
- FLAG_harmony_typeof = true;
- FLAG_harmony_scoping = true;
- FLAG_harmony_proxies = true;
- FLAG_harmony_collections = true;
- }
+ FlagList::EnforceFlagImplications();
InitializeOncePerProcess();
@@ -88,7 +82,7 @@
if (isolate->IsInitialized()) return true;
is_running_ = true;
- has_been_setup_ = true;
+ has_been_set_up_ = true;
has_fatal_error_ = false;
has_been_disposed_ = false;
@@ -106,11 +100,14 @@
Isolate* isolate = Isolate::Current();
ASSERT(isolate->IsDefaultIsolate());
- if (!has_been_setup_ || has_been_disposed_) return;
+ if (!has_been_set_up_ || has_been_disposed_) return;
isolate->TearDown();
is_running_ = false;
has_been_disposed_ = true;
+
+ delete call_completed_callbacks_;
+ call_completed_callbacks_ = NULL;
}
@@ -166,13 +163,48 @@
}
-bool V8::IdleNotification() {
+bool V8::IdleNotification(int hint) {
// Returning true tells the caller that there is no need to call
// IdleNotification again.
if (!FLAG_use_idle_notification) return true;
// Tell the heap that it may want to adjust.
- return HEAP->IdleNotification();
+ return HEAP->IdleNotification(hint);
+}
+
+
+void V8::AddCallCompletedCallback(CallCompletedCallback callback) {
+ if (call_completed_callbacks_ == NULL) { // Lazy init.
+ call_completed_callbacks_ = new List<CallCompletedCallback>();
+ }
+ for (int i = 0; i < call_completed_callbacks_->length(); i++) {
+ if (callback == call_completed_callbacks_->at(i)) return;
+ }
+ call_completed_callbacks_->Add(callback);
+}
+
+
+void V8::RemoveCallCompletedCallback(CallCompletedCallback callback) {
+ if (call_completed_callbacks_ == NULL) return;
+ for (int i = 0; i < call_completed_callbacks_->length(); i++) {
+ if (callback == call_completed_callbacks_->at(i)) {
+ call_completed_callbacks_->Remove(i);
+ }
+ }
+}
+
+
+void V8::FireCallCompletedCallback(Isolate* isolate) {
+ if (call_completed_callbacks_ == NULL) return;
+ HandleScopeImplementer* handle_scope_implementer =
+ isolate->handle_scope_implementer();
+ if (!handle_scope_implementer->CallDepthIsZero()) return;
+ // Fire callbacks. Increase call depth to prevent recursive callbacks.
+ handle_scope_implementer->IncrementCallDepth();
+ for (int i = 0; i < call_completed_callbacks_->length(); i++) {
+ call_completed_callbacks_->at(i)();
+ }
+ handle_scope_implementer->DecrementCallDepth();
}
@@ -207,8 +239,8 @@
if (init_once_called) return;
init_once_called = true;
- // Setup the platform OS support.
- OS::Setup();
+ // Set up the platform OS support.
+ OS::SetUp();
use_crankshaft_ = FLAG_crankshaft;
@@ -216,7 +248,7 @@
use_crankshaft_ = false;
}
- CPU::Setup();
+ CPU::SetUp();
if (!CPU::SupportsCrankshaft()) {
use_crankshaft_ = false;
}
diff --git a/src/v8.h b/src/v8.h
index 01feefc..adfdb3e 100644
--- a/src/v8.h
+++ b/src/v8.h
@@ -106,7 +106,11 @@
Context* context);
// Idle notification directly from the API.
- static bool IdleNotification();
+ static bool IdleNotification(int hint);
+
+ static void AddCallCompletedCallback(CallCompletedCallback callback);
+ static void RemoveCallCompletedCallback(CallCompletedCallback callback);
+ static void FireCallCompletedCallback(Isolate* isolate);
private:
static void InitializeOncePerProcess();
@@ -114,7 +118,7 @@
// True if engine is currently running
static bool is_running_;
// True if V8 has ever been run
- static bool has_been_setup_;
+ static bool has_been_set_up_;
// True if error has been signaled for current engine
// (reset to false if engine is restarted)
static bool has_fatal_error_;
@@ -123,6 +127,8 @@
static bool has_been_disposed_;
// True if we are using the crankshaft optimizing compiler.
static bool use_crankshaft_;
+ // List of callbacks when a Call completes.
+ static List<CallCompletedCallback>* call_completed_callbacks_;
};
diff --git a/src/v8globals.h b/src/v8globals.h
index 005cdbd..ff3ad8d 100644
--- a/src/v8globals.h
+++ b/src/v8globals.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -107,14 +107,12 @@
// -----------------------------------------------------------------------------
// Forward declarations for frequently used classes
-// (sorted alphabetically)
class AccessorInfo;
class Allocation;
class Arguments;
class Assembler;
class AssertNoAllocation;
-class BreakableStatement;
class Code;
class CodeGenerator;
class CodeStub;
@@ -124,20 +122,18 @@
class DebugInfo;
class Descriptor;
class DescriptorArray;
-class Expression;
class ExternalReference;
class FixedArray;
-class FunctionLiteral;
class FunctionTemplateInfo;
class MemoryChunk;
-class NumberDictionary;
+class SeededNumberDictionary;
+class UnseededNumberDictionary;
class StringDictionary;
template <typename T> class Handle;
class Heap;
class HeapObject;
class IC;
class InterceptorInfo;
-class IterationStatement;
class JSArray;
class JSFunction;
class JSObject;
@@ -148,31 +144,19 @@
class MapSpace;
class MarkCompactCollector;
class NewSpace;
-class NodeVisitor;
class Object;
class MaybeObject;
class OldSpace;
-class Property;
class Foreign;
-class RegExpNode;
-struct RegExpCompileData;
-class RegExpTree;
-class RegExpCompiler;
-class RegExpVisitor;
class Scope;
class ScopeInfo;
class Script;
-class Slot;
class Smi;
template <typename Config, class Allocator = FreeStoreAllocationPolicy>
class SplayTree;
-class Statement;
class String;
class Struct;
-class SwitchStatement;
-class AstVisitor;
class Variable;
-class VariableProxy;
class RelocInfo;
class Deserializer;
class MessageLocation;
diff --git a/src/v8natives.js b/src/v8natives.js
index 11b1a7e..1d54e28 100644
--- a/src/v8natives.js
+++ b/src/v8natives.js
@@ -660,6 +660,21 @@
}
+// ES5 section 8.12.7.
+function Delete(obj, p, should_throw) {
+ var desc = GetOwnProperty(obj, p);
+ if (IS_UNDEFINED(desc)) return true;
+ if (desc.isConfigurable()) {
+ %DeleteProperty(obj, p, 0);
+ return true;
+ } else if (should_throw) {
+ throw MakeTypeError("define_disallowed", [p]);
+ } else {
+ return;
+ }
+}
+
+
// Harmony proxies.
function DefineProxyProperty(obj, p, attributes, should_throw) {
var handler = %GetHandler(obj);
@@ -677,12 +692,7 @@
// ES5 8.12.9.
-function DefineOwnProperty(obj, p, desc, should_throw) {
- if (%IsJSProxy(obj)) {
- var attributes = FromGenericPropertyDescriptor(desc);
- return DefineProxyProperty(obj, p, attributes, should_throw);
- }
-
+function DefineObjectProperty(obj, p, desc, should_throw) {
var current_or_access = %GetOwnProperty(ToObject(obj), ToString(p));
// A false value here means that access checks failed.
if (current_or_access === false) return void 0;
@@ -846,6 +856,90 @@
}
+// ES5 section 15.4.5.1.
+function DefineArrayProperty(obj, p, desc, should_throw) {
+ // Note that the length of an array is not actually stored as part of the
+ // property, hence we use generated code throughout this function instead of
+ // DefineObjectProperty() to modify its value.
+
+ // Step 3 - Special handling for length property.
+ if (p == "length") {
+ var length = obj.length;
+ if (!desc.hasValue()) {
+ return DefineObjectProperty(obj, "length", desc, should_throw);
+ }
+ var new_length = ToUint32(desc.getValue());
+ if (new_length != ToNumber(desc.getValue())) {
+ throw new $RangeError('defineProperty() array length out of range');
+ }
+ var length_desc = GetOwnProperty(obj, "length");
+ if (new_length != length && !length_desc.isWritable()) {
+ if (should_throw) {
+ throw MakeTypeError("redefine_disallowed", [p]);
+ } else {
+ return false;
+ }
+ }
+ var threw = false;
+ while (new_length < length--) {
+ if (!Delete(obj, ToString(length), false)) {
+ new_length = length + 1;
+ threw = true;
+ break;
+ }
+ }
+ // Make sure the below call to DefineObjectProperty() doesn't overwrite
+ // any magic "length" property by removing the value.
+ obj.length = new_length;
+ desc.value_ = void 0;
+ desc.hasValue_ = false;
+ if (!DefineObjectProperty(obj, "length", desc, should_throw) || threw) {
+ if (should_throw) {
+ throw MakeTypeError("redefine_disallowed", [p]);
+ } else {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ // Step 4 - Special handling for array index.
+ var index = ToUint32(p);
+ if (index == ToNumber(p) && index != 4294967295) {
+ var length = obj.length;
+ var length_desc = GetOwnProperty(obj, "length");
+ if ((index >= length && !length_desc.isWritable()) ||
+ !DefineObjectProperty(obj, p, desc, true)) {
+ if (should_throw) {
+ throw MakeTypeError("define_disallowed", [p]);
+ } else {
+ return false;
+ }
+ }
+ if (index >= length) {
+ obj.length = index + 1;
+ }
+ return true;
+ }
+
+ // Step 5 - Fallback to default implementation.
+ return DefineObjectProperty(obj, p, desc, should_throw);
+}
+
+
+// ES5 section 8.12.9, ES5 section 15.4.5.1 and Harmony proxies.
+function DefineOwnProperty(obj, p, desc, should_throw) {
+ if (%IsJSProxy(obj)) {
+ var attributes = FromGenericPropertyDescriptor(desc);
+ return DefineProxyProperty(obj, p, attributes, should_throw);
+ } else if (IS_ARRAY(obj)) {
+ return DefineArrayProperty(obj, p, desc, should_throw);
+ } else {
+ return DefineObjectProperty(obj, p, desc, should_throw);
+ }
+}
+
+
// ES5 section 15.2.3.2.
function ObjectGetPrototypeOf(obj) {
if (!IS_SPEC_OBJECT(obj)) {
diff --git a/src/v8threads.cc b/src/v8threads.cc
index 3881d66..fd8d536 100644
--- a/src/v8threads.cc
+++ b/src/v8threads.cc
@@ -154,7 +154,7 @@
bool ThreadManager::RestoreThread() {
ASSERT(IsLockedByCurrentThread());
- // First check whether the current thread has been 'lazily archived', ie
+ // First check whether the current thread has been 'lazily archived', i.e.
// not archived at all. If that is the case we put the state storage we
// had prepared back in the free list, since we didn't need it after all.
if (lazily_archived_thread_.Equals(ThreadId::Current())) {
diff --git a/src/v8utils.cc b/src/v8utils.cc
index bf0e05d..042a60f 100644
--- a/src/v8utils.cc
+++ b/src/v8utils.cc
@@ -316,7 +316,7 @@
for (const char* p = data_; p < end; p++) {
char c = *p;
if ((c & 0x80) != 0) {
- // Non-ascii detected:
+ // Non-ASCII detected:
is_ascii = false;
// Report the error and abort if appropriate:
@@ -329,7 +329,7 @@
c, filename_, line_no, char_no);
// Allow for some context up to kNumberOfLeadingContextChars chars
- // before the offending non-ascii char to help the user see where
+ // before the offending non-ASCII char to help the user see where
// the offending char is.
const int kNumberOfLeadingContextChars = 10;
const char* err_context = p - kNumberOfLeadingContextChars;
@@ -345,7 +345,7 @@
OS::Abort();
}
- break; // Non-ascii detected. No need to continue scanning.
+ break; // Non-ASCII detected. No need to continue scanning.
}
if (c == '\n') {
start_of_line = p;
diff --git a/src/version.cc b/src/version.cc
index 3b85fb4..5336f07 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -33,9 +33,9 @@
// NOTE these macros are used by the SCons build script so their names
// cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 3
-#define MINOR_VERSION 7
-#define BUILD_NUMBER 12
-#define PATCH_LEVEL 28
+#define MINOR_VERSION 8
+#define BUILD_NUMBER 9
+#define PATCH_LEVEL 11
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
#define IS_CANDIDATE_VERSION 0
diff --git a/src/win32-headers.h b/src/win32-headers.h
index 0ee3306..ca1b1d8 100644
--- a/src/win32-headers.h
+++ b/src/win32-headers.h
@@ -75,7 +75,9 @@
// makes it impossible to have them elsewhere.
#include <winsock2.h>
#include <ws2tcpip.h>
+#ifndef __MINGW32__
#include <wspiapi.h>
+#endif // __MINGW32__
#include <process.h> // for _beginthreadex()
#include <stdlib.h>
#endif // V8_WIN32_HEADERS_FULL
diff --git a/src/x64/assembler-x64-inl.h b/src/x64/assembler-x64-inl.h
index ab387d6..8e3caa4 100644
--- a/src/x64/assembler-x64-inl.h
+++ b/src/x64/assembler-x64-inl.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -28,6 +28,8 @@
#ifndef V8_X64_ASSEMBLER_X64_INL_H_
#define V8_X64_ASSEMBLER_X64_INL_H_
+#include "x64/assembler-x64.h"
+
#include "cpu.h"
#include "debug.h"
#include "v8memory.h"
@@ -262,7 +264,7 @@
}
-Handle<Object> RelocInfo::target_object_handle(Assembler *origin) {
+Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
if (rmode_ == EMBEDDED_OBJECT) {
return Memory::Object_Handle_at(pc_);
diff --git a/src/x64/assembler-x64.cc b/src/x64/assembler-x64.cc
index 3290f7e..9ce1af8 100644
--- a/src/x64/assembler-x64.cc
+++ b/src/x64/assembler-x64.cc
@@ -383,7 +383,7 @@
}
#endif
- // Setup buffer pointers.
+ // Set up buffer pointers.
ASSERT(buffer_ != NULL);
pc_ = buffer_;
reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
@@ -412,7 +412,7 @@
// Finalize code (at this point overflow() may be true, but the gap ensures
// that we are still not overlapping instructions and relocation info).
ASSERT(pc_ <= reloc_info_writer.pos()); // No overlap.
- // Setup code descriptor.
+ // Set up code descriptor.
desc->buffer = buffer_;
desc->buffer_size = buffer_size_;
desc->instr_size = pc_offset();
@@ -426,13 +426,7 @@
void Assembler::Align(int m) {
ASSERT(IsPowerOf2(m));
int delta = (m - (pc_offset() & (m - 1))) & (m - 1);
- while (delta >= 9) {
- nop(9);
- delta -= 9;
- }
- if (delta > 0) {
- nop(delta);
- }
+ Nop(delta);
}
@@ -441,6 +435,15 @@
}
+bool Assembler::IsNop(Address addr) {
+ Address a = addr;
+ while (*a == 0x66) a++;
+ if (*a == 0x90) return true;
+ if (a[0] == 0xf && a[1] == 0x1f) return true;
+ return false;
+}
+
+
void Assembler::bind_to(Label* L, int pos) {
ASSERT(!L->is_bound()); // Label may only be bound once.
ASSERT(0 <= pos && pos <= pc_offset()); // Position must be valid.
@@ -499,7 +502,7 @@
V8::FatalProcessOutOfMemory("Assembler::GrowBuffer");
}
- // Setup new buffer.
+ // Set up new buffer.
desc.buffer = NewArray<byte>(desc.buffer_size);
desc.instr_size = pc_offset();
desc.reloc_size =
@@ -1763,7 +1766,7 @@
}
-void Assembler::nop(int n) {
+void Assembler::Nop(int n) {
// The recommended muti-byte sequences of NOP instructions from the Intel 64
// and IA-32 Architectures Software Developer's Manual.
//
@@ -1778,73 +1781,64 @@
// 9 bytes 66 NOP DWORD ptr [EAX + EAX*1 + 66 0F 1F 84 00 00 00 00
// 00000000H] 00H
- ASSERT(1 <= n);
- ASSERT(n <= 9);
EnsureSpace ensure_space(this);
- switch (n) {
- case 1:
- emit(0x90);
- return;
- case 2:
- emit(0x66);
- emit(0x90);
- return;
- case 3:
- emit(0x0f);
- emit(0x1f);
- emit(0x00);
- return;
- case 4:
- emit(0x0f);
- emit(0x1f);
- emit(0x40);
- emit(0x00);
- return;
- case 5:
- emit(0x0f);
- emit(0x1f);
- emit(0x44);
- emit(0x00);
- emit(0x00);
- return;
- case 6:
- emit(0x66);
- emit(0x0f);
- emit(0x1f);
- emit(0x44);
- emit(0x00);
- emit(0x00);
- return;
- case 7:
- emit(0x0f);
- emit(0x1f);
- emit(0x80);
- emit(0x00);
- emit(0x00);
- emit(0x00);
- emit(0x00);
- return;
- case 8:
- emit(0x0f);
- emit(0x1f);
- emit(0x84);
- emit(0x00);
- emit(0x00);
- emit(0x00);
- emit(0x00);
- emit(0x00);
- return;
- case 9:
- emit(0x66);
- emit(0x0f);
- emit(0x1f);
- emit(0x84);
- emit(0x00);
- emit(0x00);
- emit(0x00);
- emit(0x00);
- emit(0x00);
- return;
+ while (n > 0) {
+ switch (n) {
+ case 2:
+ emit(0x66);
+ case 1:
+ emit(0x90);
+ return;
+ case 3:
+ emit(0x0f);
+ emit(0x1f);
+ emit(0x00);
+ return;
+ case 4:
+ emit(0x0f);
+ emit(0x1f);
+ emit(0x40);
+ emit(0x00);
+ return;
+ case 6:
+ emit(0x66);
+ case 5:
+ emit(0x0f);
+ emit(0x1f);
+ emit(0x44);
+ emit(0x00);
+ emit(0x00);
+ return;
+ case 7:
+ emit(0x0f);
+ emit(0x1f);
+ emit(0x80);
+ emit(0x00);
+ emit(0x00);
+ emit(0x00);
+ emit(0x00);
+ return;
+ default:
+ case 11:
+ emit(0x66);
+ n--;
+ case 10:
+ emit(0x66);
+ n--;
+ case 9:
+ emit(0x66);
+ n--;
+ case 8:
+ emit(0x0f);
+ emit(0x1f);
+ emit(0x84);
+ emit(0x00);
+ emit(0x00);
+ emit(0x00);
+ emit(0x00);
+ emit(0x00);
+ n -= 8;
+ }
}
}
@@ -2313,6 +2307,27 @@
}
+void Assembler::f2xm1() {
+ EnsureSpace ensure_space(this);
+ emit(0xD9);
+ emit(0xF0);
+}
+
+
+void Assembler::fscale() {
+ EnsureSpace ensure_space(this);
+ emit(0xD9);
+ emit(0xFD);
+}
+
+
+void Assembler::fninit() {
+ EnsureSpace ensure_space(this);
+ emit(0xDB);
+ emit(0xE3);
+}
+
+
void Assembler::fadd(int i) {
EnsureSpace ensure_space(this);
emit_farith(0xDC, 0xC0, i);
@@ -2572,7 +2587,8 @@
void Assembler::extractps(Register dst, XMMRegister src, byte imm8) {
- ASSERT(is_uint2(imm8));
+ ASSERT(CpuFeatures::IsSupported(SSE4_1));
+ ASSERT(is_uint8(imm8));
EnsureSpace ensure_space(this);
emit(0x66);
emit_optional_rex_32(dst, src);
diff --git a/src/x64/assembler-x64.h b/src/x64/assembler-x64.h
index e3b73f0..7af33e1 100644
--- a/src/x64/assembler-x64.h
+++ b/src/x64/assembler-x64.h
@@ -638,6 +638,7 @@
// possible to align the pc offset to a multiple
// of m, where m must be a power of 2.
void Align(int m);
+ void Nop(int bytes = 1);
// Aligns code to something that's optimal for a jump target for the platform.
void CodeTargetAlign();
@@ -1156,7 +1157,6 @@
void hlt();
void int3();
void nop();
- void nop(int n);
void rdtsc();
void ret(int imm16);
void setcc(Condition cc, Register reg);
@@ -1279,6 +1279,9 @@
void fcos();
void fptan();
void fyl2x();
+ void f2xm1();
+ void fscale();
+ void fninit();
void frndint();
@@ -1400,7 +1403,7 @@
return static_cast<int>(reloc_info_writer.pos() - pc_);
}
- static bool IsNop(Address addr) { return *addr == 0x90; }
+ static bool IsNop(Address addr);
// Avoid overflows for displacements etc.
static const int kMaximalBufferSize = 512*MB;
diff --git a/src/x64/builtins-x64.cc b/src/x64/builtins-x64.cc
index e423ae3..5258f26 100644
--- a/src/x64/builtins-x64.cc
+++ b/src/x64/builtins-x64.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -337,7 +337,7 @@
__ push(rbx);
__ push(rbx);
- // Setup pointer to last argument.
+ // Set up pointer to last argument.
__ lea(rbx, Operand(rbp, StandardFrameConstants::kCallerSPOffset));
// Copy arguments and receiver to the expression stack.
@@ -1198,8 +1198,9 @@
// Both registers are preserved by this code so no need to differentiate between
// a construct call and a normal call.
static void ArrayNativeCode(MacroAssembler* masm,
- Label *call_generic_code) {
- Label argc_one_or_more, argc_two_or_more, empty_array, not_empty_array;
+ Label* call_generic_code) {
+ Label argc_one_or_more, argc_two_or_more, empty_array, not_empty_array,
+ has_non_smi_element;
// Check for array construction with zero arguments.
__ testq(rax, rax);
@@ -1305,6 +1306,9 @@
__ jmp(&entry);
__ bind(&loop);
__ movq(kScratchRegister, Operand(r9, rcx, times_pointer_size, 0));
+ if (FLAG_smi_only_arrays) {
+ __ JumpIfNotSmi(kScratchRegister, &has_non_smi_element);
+ }
__ movq(Operand(rdx, 0), kScratchRegister);
__ addq(rdx, Immediate(kPointerSize));
__ bind(&entry);
@@ -1321,6 +1325,45 @@
__ push(rcx);
__ movq(rax, rbx);
__ ret(0);
+
+ __ bind(&has_non_smi_element);
+ __ UndoAllocationInNewSpace(rbx);
+ __ jmp(call_generic_code);
+}
+
+
+void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rax : argc
+ // -- rsp[0] : return address
+ // -- rsp[8] : last argument
+ // -----------------------------------
+ Label generic_array_code;
+
+ // Get the InternalArray function.
+ __ LoadGlobalFunction(Context::INTERNAL_ARRAY_FUNCTION_INDEX, rdi);
+
+ if (FLAG_debug_code) {
+ // Initial map for the builtin InternalArray functions should be maps.
+ __ movq(rbx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
+ // Will both indicate a NULL and a Smi.
+ STATIC_ASSERT(kSmiTag == 0);
+ Condition not_smi = NegateCondition(masm->CheckSmi(rbx));
+ __ Check(not_smi, "Unexpected initial map for InternalArray function");
+ __ CmpObjectType(rbx, MAP_TYPE, rcx);
+ __ Check(equal, "Unexpected initial map for InternalArray function");
+ }
+
+ // Run the native code for the InternalArray function called as a normal
+ // function.
+ ArrayNativeCode(masm, &generic_array_code);
+
+ // Jump to the generic array code in case the specialized code cannot handle
+ // the construction.
+ __ bind(&generic_array_code);
+ Handle<Code> array_code =
+ masm->isolate()->builtins()->InternalArrayCodeGeneric();
+ __ Jump(array_code, RelocInfo::CODE_TARGET);
}
@@ -1504,6 +1547,7 @@
__ bind(&invoke);
__ call(rdx);
+ masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
// Leave frame and return.
LeaveArgumentsAdaptorFrame(masm);
__ ret(0);
diff --git a/src/x64/code-stubs-x64.cc b/src/x64/code-stubs-x64.cc
index 98c5c6f..d306101 100644
--- a/src/x64/code-stubs-x64.cc
+++ b/src/x64/code-stubs-x64.cc
@@ -124,12 +124,12 @@
// Get the function from the stack.
__ movq(rcx, Operand(rsp, 1 * kPointerSize));
- // Setup the object header.
+ // Set up the object header.
__ LoadRoot(kScratchRegister, Heap::kFunctionContextMapRootIndex);
__ movq(FieldOperand(rax, HeapObject::kMapOffset), kScratchRegister);
__ Move(FieldOperand(rax, FixedArray::kLengthOffset), Smi::FromInt(length));
- // Setup the fixed slots.
+ // Set up the fixed slots.
__ Set(rbx, 0); // Set to NULL.
__ movq(Operand(rax, Context::SlotOffset(Context::CLOSURE_INDEX)), rcx);
__ movq(Operand(rax, Context::SlotOffset(Context::PREVIOUS_INDEX)), rsi);
@@ -173,7 +173,7 @@
// Get the serialized scope info from the stack.
__ movq(rbx, Operand(rsp, 2 * kPointerSize));
- // Setup the object header.
+ // Set up the object header.
__ LoadRoot(kScratchRegister, Heap::kBlockContextMapRootIndex);
__ movq(FieldOperand(rax, HeapObject::kMapOffset), kScratchRegister);
__ Move(FieldOperand(rax, FixedArray::kLengthOffset), Smi::FromInt(length));
@@ -194,7 +194,7 @@
__ movq(rcx, ContextOperand(rcx, Context::CLOSURE_INDEX));
__ bind(&after_sentinel);
- // Setup the fixed slots.
+ // Set up the fixed slots.
__ movq(ContextOperand(rax, Context::CLOSURE_INDEX), rcx);
__ movq(ContextOperand(rax, Context::PREVIOUS_INDEX), rsi);
__ movq(ContextOperand(rax, Context::EXTENSION_INDEX), rbx);
@@ -1991,152 +1991,259 @@
void MathPowStub::Generate(MacroAssembler* masm) {
- // Registers are used as follows:
- // rdx = base
- // rax = exponent
- // rcx = temporary, result
+ // Choose register conforming to calling convention (when bailing out).
+#ifdef _WIN64
+ const Register exponent = rdx;
+#else
+ const Register exponent = rdi;
+#endif
+ const Register base = rax;
+ const Register scratch = rcx;
+ const XMMRegister double_result = xmm3;
+ const XMMRegister double_base = xmm2;
+ const XMMRegister double_exponent = xmm1;
+ const XMMRegister double_scratch = xmm4;
- Label allocate_return, call_runtime;
+ Label call_runtime, done, exponent_not_smi, int_exponent;
- // Load input parameters.
- __ movq(rdx, Operand(rsp, 2 * kPointerSize));
- __ movq(rax, Operand(rsp, 1 * kPointerSize));
+ // Save 1 in double_result - we need this several times later on.
+ __ movq(scratch, Immediate(1));
+ __ cvtlsi2sd(double_result, scratch);
- // Save 1 in xmm3 - we need this several times later on.
- __ Set(rcx, 1);
- __ cvtlsi2sd(xmm3, rcx);
+ if (exponent_type_ == ON_STACK) {
+ Label base_is_smi, unpack_exponent;
+ // The exponent and base are supplied as arguments on the stack.
+ // This can only happen if the stub is called from non-optimized code.
+ // Load input parameters from stack.
+ __ movq(base, Operand(rsp, 2 * kPointerSize));
+ __ movq(exponent, Operand(rsp, 1 * kPointerSize));
+ __ JumpIfSmi(base, &base_is_smi, Label::kNear);
+ __ CompareRoot(FieldOperand(base, HeapObject::kMapOffset),
+ Heap::kHeapNumberMapRootIndex);
+ __ j(not_equal, &call_runtime);
- Label exponent_nonsmi;
- Label base_nonsmi;
- // If the exponent is a heap number go to that specific case.
- __ JumpIfNotSmi(rax, &exponent_nonsmi);
- __ JumpIfNotSmi(rdx, &base_nonsmi);
+ __ movsd(double_base, FieldOperand(base, HeapNumber::kValueOffset));
+ __ jmp(&unpack_exponent, Label::kNear);
- // Optimized version when both exponent and base are smis.
- Label powi;
- __ SmiToInteger32(rdx, rdx);
- __ cvtlsi2sd(xmm0, rdx);
- __ jmp(&powi);
- // Exponent is a smi and base is a heapnumber.
- __ bind(&base_nonsmi);
- __ CompareRoot(FieldOperand(rdx, HeapObject::kMapOffset),
- Heap::kHeapNumberMapRootIndex);
- __ j(not_equal, &call_runtime);
+ __ bind(&base_is_smi);
+ __ SmiToInteger32(base, base);
+ __ cvtlsi2sd(double_base, base);
+ __ bind(&unpack_exponent);
- __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
+ __ JumpIfNotSmi(exponent, &exponent_not_smi, Label::kNear);
+ __ SmiToInteger32(exponent, exponent);
+ __ jmp(&int_exponent);
- // Optimized version of pow if exponent is a smi.
- // xmm0 contains the base.
- __ bind(&powi);
- __ SmiToInteger32(rax, rax);
+ __ bind(&exponent_not_smi);
+ __ CompareRoot(FieldOperand(exponent, HeapObject::kMapOffset),
+ Heap::kHeapNumberMapRootIndex);
+ __ j(not_equal, &call_runtime);
+ __ movsd(double_exponent, FieldOperand(exponent, HeapNumber::kValueOffset));
+ } else if (exponent_type_ == TAGGED) {
+ __ JumpIfNotSmi(exponent, &exponent_not_smi, Label::kNear);
+ __ SmiToInteger32(exponent, exponent);
+ __ jmp(&int_exponent);
- // Save exponent in base as we need to check if exponent is negative later.
- // We know that base and exponent are in different registers.
- __ movq(rdx, rax);
+ __ bind(&exponent_not_smi);
+ __ movsd(double_exponent, FieldOperand(exponent, HeapNumber::kValueOffset));
+ }
+
+ if (exponent_type_ != INTEGER) {
+ Label fast_power;
+ // Detect integer exponents stored as double.
+ __ cvttsd2si(exponent, double_exponent);
+ // Skip to runtime if possibly NaN (indicated by the indefinite integer).
+ __ cmpl(exponent, Immediate(0x80000000u));
+ __ j(equal, &call_runtime);
+ __ cvtlsi2sd(double_scratch, exponent);
+ // Already ruled out NaNs for exponent.
+ __ ucomisd(double_exponent, double_scratch);
+ __ j(equal, &int_exponent);
+
+ if (exponent_type_ == ON_STACK) {
+ // Detect square root case. Crankshaft detects constant +/-0.5 at
+ // compile time and uses DoMathPowHalf instead. We then skip this check
+ // for non-constant cases of +/-0.5 as these hardly occur.
+ Label continue_sqrt, continue_rsqrt, not_plus_half;
+ // Test for 0.5.
+ // Load double_scratch with 0.5.
+ __ movq(scratch, V8_UINT64_C(0x3FE0000000000000), RelocInfo::NONE);
+ __ movq(double_scratch, scratch);
+ // Already ruled out NaNs for exponent.
+ __ ucomisd(double_scratch, double_exponent);
+ __ j(not_equal, ¬_plus_half, Label::kNear);
+
+ // Calculates square root of base. Check for the special case of
+ // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
+ // According to IEEE-754, double-precision -Infinity has the highest
+ // 12 bits set and the lowest 52 bits cleared.
+ __ movq(scratch, V8_UINT64_C(0xFFF0000000000000), RelocInfo::NONE);
+ __ movq(double_scratch, scratch);
+ __ ucomisd(double_scratch, double_base);
+ // Comparing -Infinity with NaN results in "unordered", which sets the
+ // zero flag as if both were equal. However, it also sets the carry flag.
+ __ j(not_equal, &continue_sqrt, Label::kNear);
+ __ j(carry, &continue_sqrt, Label::kNear);
+
+ // Set result to Infinity in the special case.
+ __ xorps(double_result, double_result);
+ __ subsd(double_result, double_scratch);
+ __ jmp(&done);
+
+ __ bind(&continue_sqrt);
+ // sqrtsd returns -0 when input is -0. ECMA spec requires +0.
+ __ xorps(double_scratch, double_scratch);
+ __ addsd(double_scratch, double_base); // Convert -0 to 0.
+ __ sqrtsd(double_result, double_scratch);
+ __ jmp(&done);
+
+ // Test for -0.5.
+ __ bind(¬_plus_half);
+ // Load double_scratch with -0.5 by substracting 1.
+ __ subsd(double_scratch, double_result);
+ // Already ruled out NaNs for exponent.
+ __ ucomisd(double_scratch, double_exponent);
+ __ j(not_equal, &fast_power, Label::kNear);
+
+ // Calculates reciprocal of square root of base. Check for the special
+ // case of Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
+ // According to IEEE-754, double-precision -Infinity has the highest
+ // 12 bits set and the lowest 52 bits cleared.
+ __ movq(scratch, V8_UINT64_C(0xFFF0000000000000), RelocInfo::NONE);
+ __ movq(double_scratch, scratch);
+ __ ucomisd(double_scratch, double_base);
+ // Comparing -Infinity with NaN results in "unordered", which sets the
+ // zero flag as if both were equal. However, it also sets the carry flag.
+ __ j(not_equal, &continue_rsqrt, Label::kNear);
+ __ j(carry, &continue_rsqrt, Label::kNear);
+
+ // Set result to 0 in the special case.
+ __ xorps(double_result, double_result);
+ __ jmp(&done);
+
+ __ bind(&continue_rsqrt);
+ // sqrtsd returns -0 when input is -0. ECMA spec requires +0.
+ __ xorps(double_exponent, double_exponent);
+ __ addsd(double_exponent, double_base); // Convert -0 to +0.
+ __ sqrtsd(double_exponent, double_exponent);
+ __ divsd(double_result, double_exponent);
+ __ jmp(&done);
+ }
+
+ // Using FPU instructions to calculate power.
+ Label fast_power_failed;
+ __ bind(&fast_power);
+ __ fnclex(); // Clear flags to catch exceptions later.
+ // Transfer (B)ase and (E)xponent onto the FPU register stack.
+ __ subq(rsp, Immediate(kDoubleSize));
+ __ movsd(Operand(rsp, 0), double_exponent);
+ __ fld_d(Operand(rsp, 0)); // E
+ __ movsd(Operand(rsp, 0), double_base);
+ __ fld_d(Operand(rsp, 0)); // B, E
+
+ // Exponent is in st(1) and base is in st(0)
+ // B ^ E = (2^(E * log2(B)) - 1) + 1 = (2^X - 1) + 1 for X = E * log2(B)
+ // FYL2X calculates st(1) * log2(st(0))
+ __ fyl2x(); // X
+ __ fld(0); // X, X
+ __ frndint(); // rnd(X), X
+ __ fsub(1); // rnd(X), X-rnd(X)
+ __ fxch(1); // X - rnd(X), rnd(X)
+ // F2XM1 calculates 2^st(0) - 1 for -1 < st(0) < 1
+ __ f2xm1(); // 2^(X-rnd(X)) - 1, rnd(X)
+ __ fld1(); // 1, 2^(X-rnd(X)) - 1, rnd(X)
+ __ faddp(1); // 1, 2^(X-rnd(X)), rnd(X)
+ // FSCALE calculates st(0) * 2^st(1)
+ __ fscale(); // 2^X, rnd(X)
+ __ fstp(1);
+ // Bail out to runtime in case of exceptions in the status word.
+ __ fnstsw_ax();
+ __ testb(rax, Immediate(0x5F)); // Check for all but precision exception.
+ __ j(not_zero, &fast_power_failed, Label::kNear);
+ __ fstp_d(Operand(rsp, 0));
+ __ movsd(double_result, Operand(rsp, 0));
+ __ addq(rsp, Immediate(kDoubleSize));
+ __ jmp(&done);
+
+ __ bind(&fast_power_failed);
+ __ fninit();
+ __ addq(rsp, Immediate(kDoubleSize));
+ __ jmp(&call_runtime);
+ }
+
+ // Calculate power with integer exponent.
+ __ bind(&int_exponent);
+ const XMMRegister double_scratch2 = double_exponent;
+ // Back up exponent as we need to check if exponent is negative later.
+ __ movq(scratch, exponent); // Back up exponent.
+ __ movsd(double_scratch, double_base); // Back up base.
+ __ movsd(double_scratch2, double_result); // Load double_exponent with 1.
// Get absolute value of exponent.
- Label no_neg;
- __ cmpl(rax, Immediate(0));
- __ j(greater_equal, &no_neg, Label::kNear);
- __ negl(rax);
+ Label no_neg, while_true, no_multiply;
+ __ testl(scratch, scratch);
+ __ j(positive, &no_neg, Label::kNear);
+ __ negl(scratch);
__ bind(&no_neg);
- // Load xmm1 with 1.
- __ movaps(xmm1, xmm3);
- Label while_true;
- Label no_multiply;
-
__ bind(&while_true);
- __ shrl(rax, Immediate(1));
+ __ shrl(scratch, Immediate(1));
__ j(not_carry, &no_multiply, Label::kNear);
- __ mulsd(xmm1, xmm0);
+ __ mulsd(double_result, double_scratch);
__ bind(&no_multiply);
- __ mulsd(xmm0, xmm0);
+
+ __ mulsd(double_scratch, double_scratch);
__ j(not_zero, &while_true);
- // Base has the original value of the exponent - if the exponent is
- // negative return 1/result.
- __ testl(rdx, rdx);
- __ j(positive, &allocate_return);
- // Special case if xmm1 has reached infinity.
- __ divsd(xmm3, xmm1);
- __ movaps(xmm1, xmm3);
- __ xorps(xmm0, xmm0);
- __ ucomisd(xmm0, xmm1);
- __ j(equal, &call_runtime);
+ // If the exponent is negative, return 1/result.
+ __ testl(exponent, exponent);
+ __ j(greater, &done);
+ __ divsd(double_scratch2, double_result);
+ __ movsd(double_result, double_scratch2);
+ // Test whether result is zero. Bail out to check for subnormal result.
+ // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
+ __ xorps(double_scratch2, double_scratch2);
+ __ ucomisd(double_scratch2, double_result);
+ // double_exponent aliased as double_scratch2 has already been overwritten
+ // and may not have contained the exponent value in the first place when the
+ // input was a smi. We reset it with exponent value before bailing out.
+ __ j(not_equal, &done);
+ __ cvtlsi2sd(double_exponent, exponent);
- __ jmp(&allocate_return);
+ // Returning or bailing out.
+ Counters* counters = masm->isolate()->counters();
+ if (exponent_type_ == ON_STACK) {
+ // The arguments are still on the stack.
+ __ bind(&call_runtime);
+ __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
- // Exponent (or both) is a heapnumber - no matter what we should now work
- // on doubles.
- __ bind(&exponent_nonsmi);
- __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
- Heap::kHeapNumberMapRootIndex);
- __ j(not_equal, &call_runtime);
- __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
- // Test if exponent is nan.
- __ ucomisd(xmm1, xmm1);
- __ j(parity_even, &call_runtime);
+ // The stub is called from non-optimized code, which expects the result
+ // as heap number in eax.
+ __ bind(&done);
+ __ AllocateHeapNumber(rax, rcx, &call_runtime);
+ __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), double_result);
+ __ IncrementCounter(counters->math_pow(), 1);
+ __ ret(2 * kPointerSize);
+ } else {
+ __ bind(&call_runtime);
+ // Move base to the correct argument register. Exponent is already in xmm1.
+ __ movsd(xmm0, double_base);
+ ASSERT(double_exponent.is(xmm1));
+ {
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ PrepareCallCFunction(2);
+ __ CallCFunction(
+ ExternalReference::power_double_double_function(masm->isolate()), 2);
+ }
+ // Return value is in xmm0.
+ __ movsd(double_result, xmm0);
+ // Restore context register.
+ __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- Label base_not_smi, handle_special_cases;
- __ JumpIfNotSmi(rdx, &base_not_smi, Label::kNear);
- __ SmiToInteger32(rdx, rdx);
- __ cvtlsi2sd(xmm0, rdx);
- __ jmp(&handle_special_cases, Label::kNear);
-
- __ bind(&base_not_smi);
- __ CompareRoot(FieldOperand(rdx, HeapObject::kMapOffset),
- Heap::kHeapNumberMapRootIndex);
- __ j(not_equal, &call_runtime);
- __ movl(rcx, FieldOperand(rdx, HeapNumber::kExponentOffset));
- __ andl(rcx, Immediate(HeapNumber::kExponentMask));
- __ cmpl(rcx, Immediate(HeapNumber::kExponentMask));
- // base is NaN or +/-Infinity
- __ j(greater_equal, &call_runtime);
- __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
-
- // base is in xmm0 and exponent is in xmm1.
- __ bind(&handle_special_cases);
- Label not_minus_half;
- // Test for -0.5.
- // Load xmm2 with -0.5.
- __ movq(rcx, V8_UINT64_C(0xBFE0000000000000), RelocInfo::NONE);
- __ movq(xmm2, rcx);
- // xmm2 now has -0.5.
- __ ucomisd(xmm2, xmm1);
- __ j(not_equal, ¬_minus_half, Label::kNear);
-
- // Calculates reciprocal of square root.
- // sqrtsd returns -0 when input is -0. ECMA spec requires +0.
- __ xorps(xmm1, xmm1);
- __ addsd(xmm1, xmm0);
- __ sqrtsd(xmm1, xmm1);
- __ divsd(xmm3, xmm1);
- __ movaps(xmm1, xmm3);
- __ jmp(&allocate_return);
-
- // Test for 0.5.
- __ bind(¬_minus_half);
- // Load xmm2 with 0.5.
- // Since xmm3 is 1 and xmm2 is -0.5 this is simply xmm2 + xmm3.
- __ addsd(xmm2, xmm3);
- // xmm2 now has 0.5.
- __ ucomisd(xmm2, xmm1);
- __ j(not_equal, &call_runtime);
- // Calculates square root.
- // sqrtsd returns -0 when input is -0. ECMA spec requires +0.
- __ xorps(xmm1, xmm1);
- __ addsd(xmm1, xmm0); // Convert -0 to 0.
- __ sqrtsd(xmm1, xmm1);
-
- __ bind(&allocate_return);
- __ AllocateHeapNumber(rcx, rax, &call_runtime);
- __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm1);
- __ movq(rax, rcx);
- __ ret(2 * kPointerSize);
-
- __ bind(&call_runtime);
- __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
+ __ bind(&done);
+ __ IncrementCounter(counters->math_pow(), 1);
+ __ ret(0);
+ }
}
@@ -2250,6 +2357,7 @@
const int kParameterMapHeaderSize =
FixedArray::kHeaderSize + 2 * kPointerSize;
Label no_parameter_map;
+ __ xor_(r8, r8);
__ testq(rbx, rbx);
__ j(zero, &no_parameter_map, Label::kNear);
__ lea(r8, Operand(rbx, times_pointer_size, kParameterMapHeaderSize));
@@ -2292,7 +2400,7 @@
__ movq(FieldOperand(rax, i), rdx);
}
- // Setup the callee in-object property.
+ // Set up the callee in-object property.
STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
__ movq(rdx, Operand(rsp, 3 * kPointerSize));
__ movq(FieldOperand(rax, JSObject::kHeaderSize +
@@ -2307,7 +2415,7 @@
Heap::kArgumentsLengthIndex * kPointerSize),
rcx);
- // Setup the elements pointer in the allocated arguments object.
+ // Set up the elements pointer in the allocated arguments object.
// If we allocated a parameter map, edi will point there, otherwise to the
// backing store.
__ lea(rdi, Operand(rax, Heap::kArgumentsObjectSize));
@@ -2343,16 +2451,13 @@
Label parameters_loop, parameters_test;
// Load tagged parameter count into r9.
- __ movq(r9, Operand(rsp, 1 * kPointerSize));
+ __ Integer32ToSmi(r9, rbx);
__ Move(r8, Smi::FromInt(Context::MIN_CONTEXT_SLOTS));
- __ addq(r8, Operand(rsp, 3 * kPointerSize));
+ __ addq(r8, Operand(rsp, 1 * kPointerSize));
__ subq(r8, r9);
__ Move(r11, factory->the_hole_value());
__ movq(rdx, rdi);
- __ SmiToInteger64(kScratchRegister, r9);
- __ lea(rdi, Operand(rdi, kScratchRegister,
- times_pointer_size,
- kParameterMapHeaderSize));
+ __ lea(rdi, Operand(rdi, rbx, times_pointer_size, kParameterMapHeaderSize));
// r9 = loop variable (tagged)
// r8 = mapping index (tagged)
// r11 = the hole value
@@ -2388,9 +2493,8 @@
Label arguments_loop, arguments_test;
__ movq(r8, rbx);
__ movq(rdx, Operand(rsp, 2 * kPointerSize));
- // Untag rcx and r8 for the loop below.
+ // Untag rcx for the loop below.
__ SmiToInteger64(rcx, rcx);
- __ SmiToInteger64(r8, r8);
__ lea(kScratchRegister, Operand(r8, times_pointer_size, 0));
__ subq(rdx, kScratchRegister);
__ jmp(&arguments_test, Label::kNear);
@@ -2514,7 +2618,7 @@
// Get the parameters pointer from the stack.
__ movq(rdx, Operand(rsp, 2 * kPointerSize));
- // Setup the elements pointer in the allocated arguments object and
+ // Set up the elements pointer in the allocated arguments object and
// initialize the header in the elements fixed array.
__ lea(rdi, Operand(rax, Heap::kArgumentsObjectSizeStrict));
__ movq(FieldOperand(rax, JSObject::kElementsOffset), rdi);
@@ -2664,7 +2768,7 @@
kShortExternalStringMask));
STATIC_ASSERT((kStringTag | kSeqStringTag | kTwoByteStringTag) == 0);
__ j(zero, &seq_two_byte_string, Label::kNear);
- // Any other flat string must be a flat ascii string. None of the following
+ // Any other flat string must be a flat ASCII string. None of the following
// string type tests will succeed if subject is not a string or a short
// external string.
__ andb(rbx, Immediate(kIsNotStringMask |
@@ -2715,16 +2819,16 @@
Immediate(kStringRepresentationMask | kStringEncodingMask));
STATIC_ASSERT((kSeqStringTag | kTwoByteStringTag) == 0);
__ j(zero, &seq_two_byte_string, Label::kNear);
- // Any other flat string must be sequential ascii or external.
+ // Any other flat string must be sequential ASCII or external.
__ testb(FieldOperand(rbx, Map::kInstanceTypeOffset),
Immediate(kStringRepresentationMask));
__ j(not_zero, &external_string);
__ bind(&seq_ascii_string);
- // rdi: subject string (sequential ascii)
+ // rdi: subject string (sequential ASCII)
// rax: RegExp data (FixedArray)
__ movq(r11, FieldOperand(rax, JSRegExp::kDataAsciiCodeOffset));
- __ Set(rcx, 1); // Type is ascii.
+ __ Set(rcx, 1); // Type is ASCII.
__ jmp(&check_code, Label::kNear);
__ bind(&seq_two_byte_string);
@@ -2740,7 +2844,7 @@
__ JumpIfSmi(r11, &runtime);
// rdi: subject string
- // rcx: encoding of subject string (1 if ascii, 0 if two_byte);
+ // rcx: encoding of subject string (1 if ASCII, 0 if two_byte);
// r11: code
// Load used arguments before starting to push arguments for call to native
// RegExp code to avoid handling changing stack height.
@@ -2748,7 +2852,7 @@
// rdi: subject string
// rbx: previous index
- // rcx: encoding of subject string (1 if ascii 0 if two_byte);
+ // rcx: encoding of subject string (1 if ASCII 0 if two_byte);
// r11: code
// All checks done. Now push arguments for native regexp code.
Counters* counters = masm->isolate()->counters();
@@ -2805,7 +2909,7 @@
// Keep track on aliasing between argX defined above and the registers used.
// rdi: subject string
// rbx: previous index
- // rcx: encoding of subject string (1 if ascii 0 if two_byte);
+ // rcx: encoding of subject string (1 if ASCII 0 if two_byte);
// r11: code
// r14: slice offset
// r15: original subject string
@@ -3376,7 +3480,7 @@
__ JumpIfNotBothSequentialAsciiStrings(
rdx, rax, rcx, rbx, &check_unequal_objects);
- // Inline comparison of ascii strings.
+ // Inline comparison of ASCII strings.
if (cc_ == equal) {
StringCompareStub::GenerateFlatAsciiStringEquals(masm,
rdx,
@@ -3835,7 +3939,7 @@
Label not_outermost_js, not_outermost_js_2;
{ // NOLINT. Scope block confuses linter.
MacroAssembler::NoRootArrayScope uninitialized_root_register(masm);
- // Setup frame.
+ // Set up frame.
__ push(rbp);
__ movq(rbp, rsp);
@@ -4329,7 +4433,7 @@
void StringAddStub::Generate(MacroAssembler* masm) {
- Label string_add_runtime, call_builtin;
+ Label call_runtime, call_builtin;
Builtins::JavaScript builtin_id = Builtins::ADD;
// Load the two arguments.
@@ -4338,14 +4442,14 @@
// Make sure that both arguments are strings if not known in advance.
if (flags_ == NO_STRING_ADD_FLAGS) {
- __ JumpIfSmi(rax, &string_add_runtime);
+ __ JumpIfSmi(rax, &call_runtime);
__ CmpObjectType(rax, FIRST_NONSTRING_TYPE, r8);
- __ j(above_equal, &string_add_runtime);
+ __ j(above_equal, &call_runtime);
// First argument is a a string, test second.
- __ JumpIfSmi(rdx, &string_add_runtime);
+ __ JumpIfSmi(rdx, &call_runtime);
__ CmpObjectType(rdx, FIRST_NONSTRING_TYPE, r9);
- __ j(above_equal, &string_add_runtime);
+ __ j(above_equal, &call_runtime);
} else {
// Here at least one of the arguments is definitely a string.
// We convert the one that is not known to be a string.
@@ -4411,9 +4515,9 @@
__ SmiCompare(rbx, Smi::FromInt(2));
__ j(not_equal, &longer_than_two);
- // Check that both strings are non-external ascii strings.
+ // Check that both strings are non-external ASCII strings.
__ JumpIfBothInstanceTypesAreNotSequentialAscii(r8, r9, rbx, rcx,
- &string_add_runtime);
+ &call_runtime);
// Get the two characters forming the sub string.
__ movzxbq(rbx, FieldOperand(rax, SeqAsciiString::kHeaderSize));
@@ -4428,20 +4532,30 @@
__ ret(2 * kPointerSize);
__ bind(&make_two_character_string);
- __ Set(rbx, 2);
- __ jmp(&make_flat_ascii_string);
+ __ Set(rdi, 2);
+ __ AllocateAsciiString(rax, rdi, r8, r9, r11, &call_runtime);
+ // rbx - first byte: first character
+ // rbx - second byte: *maybe* second character
+ // Make sure that the second byte of rbx contains the second character.
+ __ movzxbq(rcx, FieldOperand(rdx, SeqAsciiString::kHeaderSize));
+ __ shll(rcx, Immediate(kBitsPerByte));
+ __ orl(rbx, rcx);
+ // Write both characters to the new string.
+ __ movw(FieldOperand(rax, SeqAsciiString::kHeaderSize), rbx);
+ __ IncrementCounter(counters->string_add_native(), 1);
+ __ ret(2 * kPointerSize);
__ bind(&longer_than_two);
// Check if resulting string will be flat.
- __ SmiCompare(rbx, Smi::FromInt(String::kMinNonFlatLength));
+ __ SmiCompare(rbx, Smi::FromInt(ConsString::kMinLength));
__ j(below, &string_add_flat_result);
// Handle exceptionally long strings in the runtime system.
STATIC_ASSERT((String::kMaxLength & 0x80000000) == 0);
__ SmiCompare(rbx, Smi::FromInt(String::kMaxLength));
- __ j(above, &string_add_runtime);
+ __ j(above, &call_runtime);
// If result is not supposed to be flat, allocate a cons string object. If
- // both strings are ascii the result is an ascii cons string.
+ // both strings are ASCII the result is an ASCII cons string.
// rax: first string
// rbx: length of resulting flat string
// rdx: second string
@@ -4455,8 +4569,8 @@
__ testl(rcx, Immediate(kStringEncodingMask));
__ j(zero, &non_ascii);
__ bind(&ascii_data);
- // Allocate an acsii cons string.
- __ AllocateAsciiConsString(rcx, rdi, no_reg, &string_add_runtime);
+ // Allocate an ASCII cons string.
+ __ AllocateAsciiConsString(rcx, rdi, no_reg, &call_runtime);
__ bind(&allocated);
// Fill the fields of the cons string.
__ movq(FieldOperand(rcx, ConsString::kLengthOffset), rbx);
@@ -4469,7 +4583,7 @@
__ ret(2 * kPointerSize);
__ bind(&non_ascii);
// At least one of the strings is two-byte. Check whether it happens
- // to contain only ascii characters.
+ // to contain only ASCII characters.
// rcx: first instance type AND second instance type.
// r8: first instance type.
// r9: second instance type.
@@ -4481,111 +4595,103 @@
__ cmpb(r8, Immediate(kAsciiStringTag | kAsciiDataHintTag));
__ j(equal, &ascii_data);
// Allocate a two byte cons string.
- __ AllocateTwoByteConsString(rcx, rdi, no_reg, &string_add_runtime);
+ __ AllocateTwoByteConsString(rcx, rdi, no_reg, &call_runtime);
__ jmp(&allocated);
- // Handle creating a flat result. First check that both strings are not
- // external strings.
+ // We cannot encounter sliced strings or cons strings here since:
+ STATIC_ASSERT(SlicedString::kMinLength >= ConsString::kMinLength);
+ // Handle creating a flat result from either external or sequential strings.
+ // Locate the first characters' locations.
// rax: first string
// rbx: length of resulting flat string as smi
// rdx: second string
// r8: instance type of first string
// r9: instance type of first string
+ Label first_prepared, second_prepared;
+ Label first_is_sequential, second_is_sequential;
__ bind(&string_add_flat_result);
- __ SmiToInteger32(rbx, rbx);
- __ movl(rcx, r8);
- __ and_(rcx, Immediate(kStringRepresentationMask));
- __ cmpl(rcx, Immediate(kExternalStringTag));
- __ j(equal, &string_add_runtime);
- __ movl(rcx, r9);
- __ and_(rcx, Immediate(kStringRepresentationMask));
- __ cmpl(rcx, Immediate(kExternalStringTag));
- __ j(equal, &string_add_runtime);
- // We cannot encounter sliced strings here since:
- STATIC_ASSERT(SlicedString::kMinLength >= String::kMinNonFlatLength);
- // Now check if both strings are ascii strings.
- // rax: first string
- // rbx: length of resulting flat string
- // rdx: second string
- // r8: instance type of first string
- // r9: instance type of second string
+
+ __ SmiToInteger32(r14, FieldOperand(rax, SeqString::kLengthOffset));
+ // r14: length of first string
+ STATIC_ASSERT(kSeqStringTag == 0);
+ __ testb(r8, Immediate(kStringRepresentationMask));
+ __ j(zero, &first_is_sequential, Label::kNear);
+ // Rule out short external string and load string resource.
+ STATIC_ASSERT(kShortExternalStringTag != 0);
+ __ testb(r8, Immediate(kShortExternalStringMask));
+ __ j(not_zero, &call_runtime);
+ __ movq(rcx, FieldOperand(rax, ExternalString::kResourceDataOffset));
+ __ jmp(&first_prepared, Label::kNear);
+ __ bind(&first_is_sequential);
+ STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
+ __ lea(rcx, FieldOperand(rax, SeqAsciiString::kHeaderSize));
+ __ bind(&first_prepared);
+
+ // Check whether both strings have same encoding.
+ __ xorl(r8, r9);
+ __ testb(r8, Immediate(kStringEncodingMask));
+ __ j(not_zero, &call_runtime);
+
+ __ SmiToInteger32(r15, FieldOperand(rdx, SeqString::kLengthOffset));
+ // r15: length of second string
+ STATIC_ASSERT(kSeqStringTag == 0);
+ __ testb(r9, Immediate(kStringRepresentationMask));
+ __ j(zero, &second_is_sequential, Label::kNear);
+ // Rule out short external string and load string resource.
+ STATIC_ASSERT(kShortExternalStringTag != 0);
+ __ testb(r9, Immediate(kShortExternalStringMask));
+ __ j(not_zero, &call_runtime);
+ __ movq(rdx, FieldOperand(rdx, ExternalString::kResourceDataOffset));
+ __ jmp(&second_prepared, Label::kNear);
+ __ bind(&second_is_sequential);
+ STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
+ __ lea(rdx, FieldOperand(rdx, SeqAsciiString::kHeaderSize));
+ __ bind(&second_prepared);
+
Label non_ascii_string_add_flat_result;
- STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
- STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
- __ testl(r8, Immediate(kStringEncodingMask));
+ // r9: instance type of second string
+ // First string and second string have the same encoding.
+ STATIC_ASSERT(kTwoByteStringTag == 0);
+ __ SmiToInteger32(rbx, rbx);
+ __ testb(r9, Immediate(kStringEncodingMask));
__ j(zero, &non_ascii_string_add_flat_result);
- __ testl(r9, Immediate(kStringEncodingMask));
- __ j(zero, &string_add_runtime);
__ bind(&make_flat_ascii_string);
- // Both strings are ascii strings. As they are short they are both flat.
- __ AllocateAsciiString(rcx, rbx, rdi, r14, r11, &string_add_runtime);
- // rcx: result string
- __ movq(rbx, rcx);
+ // Both strings are ASCII strings. As they are short they are both flat.
+ __ AllocateAsciiString(rax, rbx, rdi, r8, r9, &call_runtime);
+ // rax: result string
// Locate first character of result.
- __ addq(rcx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
- // Locate first character of first argument
- __ SmiToInteger32(rdi, FieldOperand(rax, String::kLengthOffset));
- __ addq(rax, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
- // rax: first char of first argument
- // rbx: result string
- // rcx: first character of result
- // rdx: second string
- // rdi: length of first argument
- StringHelper::GenerateCopyCharacters(masm, rcx, rax, rdi, true);
- // Locate first character of second argument.
- __ SmiToInteger32(rdi, FieldOperand(rdx, String::kLengthOffset));
- __ addq(rdx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
- // rbx: result string
- // rcx: next character of result
- // rdx: first char of second argument
- // rdi: length of second argument
- StringHelper::GenerateCopyCharacters(masm, rcx, rdx, rdi, true);
- __ movq(rax, rbx);
+ __ lea(rbx, FieldOperand(rax, SeqAsciiString::kHeaderSize));
+ // rcx: first char of first string
+ // rbx: first character of result
+ // r14: length of first string
+ StringHelper::GenerateCopyCharacters(masm, rbx, rcx, r14, true);
+ // rbx: next character of result
+ // rdx: first char of second string
+ // r15: length of second string
+ StringHelper::GenerateCopyCharacters(masm, rbx, rdx, r15, true);
__ IncrementCounter(counters->string_add_native(), 1);
__ ret(2 * kPointerSize);
- // Handle creating a flat two byte result.
- // rax: first string - known to be two byte
- // rbx: length of resulting flat string
- // rdx: second string
- // r8: instance type of first string
- // r9: instance type of first string
__ bind(&non_ascii_string_add_flat_result);
- STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
- STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
- __ and_(r9, Immediate(kStringEncodingMask));
- __ j(not_zero, &string_add_runtime);
- // Both strings are two byte strings. As they are short they are both
- // flat.
- __ AllocateTwoByteString(rcx, rbx, rdi, r14, r11, &string_add_runtime);
- // rcx: result string
- __ movq(rbx, rcx);
+ // Both strings are ASCII strings. As they are short they are both flat.
+ __ AllocateTwoByteString(rax, rbx, rdi, r8, r9, &call_runtime);
+ // rax: result string
// Locate first character of result.
- __ addq(rcx, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
- // Locate first character of first argument.
- __ SmiToInteger32(rdi, FieldOperand(rax, String::kLengthOffset));
- __ addq(rax, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
- // rax: first char of first argument
- // rbx: result string
- // rcx: first character of result
- // rdx: second argument
- // rdi: length of first argument
- StringHelper::GenerateCopyCharacters(masm, rcx, rax, rdi, false);
- // Locate first character of second argument.
- __ SmiToInteger32(rdi, FieldOperand(rdx, String::kLengthOffset));
- __ addq(rdx, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
- // rbx: result string
- // rcx: next character of result
- // rdx: first char of second argument
- // rdi: length of second argument
- StringHelper::GenerateCopyCharacters(masm, rcx, rdx, rdi, false);
- __ movq(rax, rbx);
+ __ lea(rbx, FieldOperand(rax, SeqTwoByteString::kHeaderSize));
+ // rcx: first char of first string
+ // rbx: first character of result
+ // r14: length of first string
+ StringHelper::GenerateCopyCharacters(masm, rbx, rcx, r14, false);
+ // rbx: next character of result
+ // rdx: first char of second string
+ // r15: length of second string
+ StringHelper::GenerateCopyCharacters(masm, rbx, rdx, r15, false);
__ IncrementCounter(counters->string_add_native(), 1);
__ ret(2 * kPointerSize);
// Just jump to runtime to add the two strings.
- __ bind(&string_add_runtime);
+ __ bind(&call_runtime);
__ TailCallRuntime(Runtime::kStringAdd, 2, 1);
if (call_builtin.is_linked()) {
@@ -4822,7 +4928,7 @@
// JumpIfInstanceTypeIsNotSequentialAscii does not use it implicitly
Register temp = kScratchRegister;
- // Check that the candidate is a non-external ascii string.
+ // Check that the candidate is a non-external ASCII string.
__ movzxbl(temp, FieldOperand(map, Map::kInstanceTypeOffset));
__ JumpIfInstanceTypeIsNotSequentialAscii(
temp, temp, &next_probe[i]);
@@ -4851,10 +4957,13 @@
Register hash,
Register character,
Register scratch) {
- // hash = character + (character << 10);
- __ movl(hash, character);
- __ shll(hash, Immediate(10));
- __ addl(hash, character);
+ // hash = (seed + character) + ((seed + character) << 10);
+ __ LoadRoot(scratch, Heap::kHashSeedRootIndex);
+ __ SmiToInteger32(scratch, scratch);
+ __ addl(scratch, character);
+ __ movl(hash, scratch);
+ __ shll(scratch, Immediate(10));
+ __ addl(hash, scratch);
// hash ^= hash >> 6;
__ movl(scratch, hash);
__ shrl(scratch, Immediate(6));
@@ -4893,13 +5002,12 @@
__ shll(scratch, Immediate(15));
__ addl(hash, scratch);
- uint32_t kHashShiftCutOffMask = (1 << (32 - String::kHashShift)) - 1;
- __ andl(hash, Immediate(kHashShiftCutOffMask));
+ __ andl(hash, Immediate(String::kHashBitMask));
// if (hash == 0) hash = 27;
Label hash_not_zero;
__ j(not_zero, &hash_not_zero);
- __ Set(hash, 27);
+ __ Set(hash, StringHasher::kZeroHash);
__ bind(&hash_not_zero);
}
@@ -4935,8 +5043,12 @@
__ SmiSub(rcx, rcx, rdx); // Overflow doesn't happen.
__ cmpq(FieldOperand(rax, String::kLengthOffset), rcx);
- Label return_rax;
- __ j(equal, &return_rax);
+ Label not_original_string;
+ __ j(not_equal, ¬_original_string, Label::kNear);
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->sub_string_native(), 1);
+ __ ret(kArgumentsSize);
+ __ bind(¬_original_string);
// Special handling of sub-strings of length 1 and 2. One character strings
// are handled in the runtime system (looked up in the single character
// cache). Two character strings are looked for in the symbol cache.
@@ -4955,68 +5067,77 @@
// Get the two characters forming the sub string.
__ SmiToInteger32(rdx, rdx); // From index is no longer smi.
__ movzxbq(rbx, FieldOperand(rax, rdx, times_1, SeqAsciiString::kHeaderSize));
- __ movzxbq(rcx,
+ __ movzxbq(rdi,
FieldOperand(rax, rdx, times_1, SeqAsciiString::kHeaderSize + 1));
// Try to lookup two character string in symbol table.
Label make_two_character_string;
StringHelper::GenerateTwoCharacterSymbolTableProbe(
- masm, rbx, rcx, rax, rdx, rdi, r14, &make_two_character_string);
+ masm, rbx, rdi, r9, r11, r14, r15, &make_two_character_string);
+ __ IncrementCounter(counters->sub_string_native(), 1);
__ ret(3 * kPointerSize);
__ bind(&make_two_character_string);
- // Setup registers for allocating the two character string.
- __ movq(rax, Operand(rsp, kStringOffset));
- __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
+ // Set up registers for allocating the two character string.
+ __ movzxwq(rbx, FieldOperand(rax, rdx, times_1, SeqAsciiString::kHeaderSize));
+ __ AllocateAsciiString(rax, rcx, r11, r14, r15, &runtime);
+ __ movw(FieldOperand(rax, SeqAsciiString::kHeaderSize), rbx);
+ __ IncrementCounter(counters->sub_string_native(), 1);
+ __ ret(3 * kPointerSize);
+
+ __ bind(&result_longer_than_two);
+ // rax: string
+ // rbx: instance type
+ // rcx: sub string length
+ // rdx: from index (smi)
+ // Deal with different string types: update the index if necessary
+ // and put the underlying string into edi.
+ Label underlying_unpacked, sliced_string, seq_or_external_string;
+ // If the string is not indirect, it can only be sequential or external.
+ STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
+ STATIC_ASSERT(kIsIndirectStringMask != 0);
+ __ testb(rbx, Immediate(kIsIndirectStringMask));
+ __ j(zero, &seq_or_external_string, Label::kNear);
+
+ __ testb(rbx, Immediate(kSlicedNotConsMask));
+ __ j(not_zero, &sliced_string, Label::kNear);
+ // Cons string. Check whether it is flat, then fetch first part.
+ // Flat cons strings have an empty second part.
+ __ CompareRoot(FieldOperand(rax, ConsString::kSecondOffset),
+ Heap::kEmptyStringRootIndex);
+ __ j(not_equal, &runtime);
+ __ movq(rdi, FieldOperand(rax, ConsString::kFirstOffset));
+ // Update instance type.
+ __ movq(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
__ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
- __ Set(rcx, 2);
+ __ jmp(&underlying_unpacked, Label::kNear);
+
+ __ bind(&sliced_string);
+ // Sliced string. Fetch parent and correct start index by offset.
+ __ addq(rdx, FieldOperand(rax, SlicedString::kOffsetOffset));
+ __ movq(rdi, FieldOperand(rax, SlicedString::kParentOffset));
+ // Update instance type.
+ __ movq(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
+ __ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
+ __ jmp(&underlying_unpacked, Label::kNear);
+
+ __ bind(&seq_or_external_string);
+ // Sequential or external string. Just move string to the correct register.
+ __ movq(rdi, rax);
+
+ __ bind(&underlying_unpacked);
if (FLAG_string_slices) {
Label copy_routine;
+ // rdi: underlying subject string
+ // rbx: instance type of underlying subject string
+ // rdx: adjusted start index (smi)
+ // rcx: length
// If coming from the make_two_character_string path, the string
// is too short to be sliced anyways.
- STATIC_ASSERT(2 < SlicedString::kMinLength);
- __ jmp(©_routine);
- __ bind(&result_longer_than_two);
-
- // rax: string
- // rbx: instance type
- // rcx: sub string length
- // rdx: from index (smi)
- Label allocate_slice, sliced_string, seq_or_external_string;
__ cmpq(rcx, Immediate(SlicedString::kMinLength));
// Short slice. Copy instead of slicing.
__ j(less, ©_routine);
- // If the string is not indirect, it can only be sequential or external.
- STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
- STATIC_ASSERT(kIsIndirectStringMask != 0);
- __ testb(rbx, Immediate(kIsIndirectStringMask));
- __ j(zero, &seq_or_external_string, Label::kNear);
-
- __ testb(rbx, Immediate(kSlicedNotConsMask));
- __ j(not_zero, &sliced_string, Label::kNear);
- // Cons string. Check whether it is flat, then fetch first part.
- __ CompareRoot(FieldOperand(rax, ConsString::kSecondOffset),
- Heap::kEmptyStringRootIndex);
- __ j(not_equal, &runtime);
- __ movq(rdi, FieldOperand(rax, ConsString::kFirstOffset));
- __ jmp(&allocate_slice, Label::kNear);
-
- __ bind(&sliced_string);
- // Sliced string. Fetch parent and correct start index by offset.
- __ addq(rdx, FieldOperand(rax, SlicedString::kOffsetOffset));
- __ movq(rdi, FieldOperand(rax, SlicedString::kParentOffset));
- __ jmp(&allocate_slice, Label::kNear);
-
- __ bind(&seq_or_external_string);
- // Sequential or external string. Just move string to the correct register.
- __ movq(rdi, rax);
-
- __ bind(&allocate_slice);
- // edi: underlying subject string
- // ebx: instance type of original subject string
- // edx: offset
- // ecx: length
// Allocate new sliced string. At this point we do not reload the instance
// type including the string encoding because we simply rely on the info
// provided by the original string. It does not matter if the original
@@ -5027,10 +5148,10 @@
STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
__ testb(rbx, Immediate(kStringEncodingMask));
__ j(zero, &two_byte_slice, Label::kNear);
- __ AllocateAsciiSlicedString(rax, rbx, no_reg, &runtime);
+ __ AllocateAsciiSlicedString(rax, rbx, r14, &runtime);
__ jmp(&set_slice_header, Label::kNear);
__ bind(&two_byte_slice);
- __ AllocateTwoByteSlicedString(rax, rbx, no_reg, &runtime);
+ __ AllocateTwoByteSlicedString(rax, rbx, r14, &runtime);
__ bind(&set_slice_header);
__ movq(FieldOperand(rax, SlicedString::kOffsetOffset), rdx);
__ Integer32ToSmi(rcx, rcx);
@@ -5038,82 +5159,85 @@
__ movq(FieldOperand(rax, SlicedString::kParentOffset), rdi);
__ movq(FieldOperand(rax, SlicedString::kHashFieldOffset),
Immediate(String::kEmptyHashField));
- __ jmp(&return_rax);
+ __ IncrementCounter(counters->sub_string_native(), 1);
+ __ ret(kArgumentsSize);
__ bind(©_routine);
- } else {
- __ bind(&result_longer_than_two);
}
- // rax: string
- // rbx: instance type
- // rcx: result string length
- // Check for flat ascii string
- Label non_ascii_flat;
- __ JumpIfInstanceTypeIsNotSequentialAscii(rbx, rbx, &non_ascii_flat);
+ // rdi: underlying subject string
+ // rbx: instance type of underlying subject string
+ // rdx: adjusted start index (smi)
+ // rcx: length
+ // The subject string can only be external or sequential string of either
+ // encoding at this point.
+ Label two_byte_sequential, sequential_string;
+ STATIC_ASSERT(kExternalStringTag != 0);
+ STATIC_ASSERT(kSeqStringTag == 0);
+ __ testb(rbx, Immediate(kExternalStringTag));
+ __ j(zero, &sequential_string);
+
+ // Handle external string.
+ // Rule out short external strings.
+ STATIC_CHECK(kShortExternalStringTag != 0);
+ __ testb(rbx, Immediate(kShortExternalStringMask));
+ __ j(not_zero, &runtime);
+ __ movq(rdi, FieldOperand(rdi, ExternalString::kResourceDataOffset));
+ // Move the pointer so that offset-wise, it looks like a sequential string.
+ STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
+ __ subq(rdi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+
+ __ bind(&sequential_string);
+ STATIC_ASSERT((kAsciiStringTag & kStringEncodingMask) != 0);
+ __ testb(rbx, Immediate(kStringEncodingMask));
+ __ j(zero, &two_byte_sequential);
// Allocate the result.
- __ AllocateAsciiString(rax, rcx, rbx, rdx, rdi, &runtime);
+ __ AllocateAsciiString(rax, rcx, r11, r14, r15, &runtime);
// rax: result string
// rcx: result string length
- __ movq(rdx, rsi); // esi used by following code.
- // Locate first character of result.
- __ lea(rdi, FieldOperand(rax, SeqAsciiString::kHeaderSize));
- // Load string argument and locate character of sub string start.
- __ movq(rsi, Operand(rsp, kStringOffset));
- __ movq(rbx, Operand(rsp, kFromOffset));
- {
- SmiIndex smi_as_index = masm->SmiToIndex(rbx, rbx, times_1);
- __ lea(rsi, Operand(rsi, smi_as_index.reg, smi_as_index.scale,
+ __ movq(r14, rsi); // esi used by following code.
+ { // Locate character of sub string start.
+ SmiIndex smi_as_index = masm->SmiToIndex(rdx, rdx, times_1);
+ __ lea(rsi, Operand(rdi, smi_as_index.reg, smi_as_index.scale,
SeqAsciiString::kHeaderSize - kHeapObjectTag));
}
+ // Locate first character of result.
+ __ lea(rdi, FieldOperand(rax, SeqAsciiString::kHeaderSize));
// rax: result string
// rcx: result length
- // rdx: original value of rsi
// rdi: first character of result
// rsi: character of sub string start
+ // r14: original value of rsi
StringHelper::GenerateCopyCharactersREP(masm, rdi, rsi, rcx, true);
- __ movq(rsi, rdx); // Restore rsi.
- Counters* counters = masm->isolate()->counters();
+ __ movq(rsi, r14); // Restore rsi.
__ IncrementCounter(counters->sub_string_native(), 1);
__ ret(kArgumentsSize);
- __ bind(&non_ascii_flat);
- // rax: string
- // rbx: instance type & kStringRepresentationMask | kStringEncodingMask
- // rcx: result string length
- // Check for sequential two byte string
- __ cmpb(rbx, Immediate(kSeqStringTag | kTwoByteStringTag));
- __ j(not_equal, &runtime);
-
+ __ bind(&two_byte_sequential);
// Allocate the result.
- __ AllocateTwoByteString(rax, rcx, rbx, rdx, rdi, &runtime);
+ __ AllocateTwoByteString(rax, rcx, r11, r14, r15, &runtime);
// rax: result string
// rcx: result string length
- __ movq(rdx, rsi); // esi used by following code.
- // Locate first character of result.
- __ lea(rdi, FieldOperand(rax, SeqTwoByteString::kHeaderSize));
- // Load string argument and locate character of sub string start.
- __ movq(rsi, Operand(rsp, kStringOffset));
- __ movq(rbx, Operand(rsp, kFromOffset));
- {
- SmiIndex smi_as_index = masm->SmiToIndex(rbx, rbx, times_2);
- __ lea(rsi, Operand(rsi, smi_as_index.reg, smi_as_index.scale,
+ __ movq(r14, rsi); // esi used by following code.
+ { // Locate character of sub string start.
+ SmiIndex smi_as_index = masm->SmiToIndex(rdx, rdx, times_2);
+ __ lea(rsi, Operand(rdi, smi_as_index.reg, smi_as_index.scale,
SeqAsciiString::kHeaderSize - kHeapObjectTag));
}
+ // Locate first character of result.
+ __ lea(rdi, FieldOperand(rax, SeqTwoByteString::kHeaderSize));
// rax: result string
// rcx: result length
- // rdx: original value of rsi
// rdi: first character of result
// rsi: character of sub string start
+ // r14: original value of rsi
StringHelper::GenerateCopyCharactersREP(masm, rdi, rsi, rcx, false);
- __ movq(rsi, rdx); // Restore esi.
-
- __ bind(&return_rax);
+ __ movq(rsi, r14); // Restore esi.
__ IncrementCounter(counters->sub_string_native(), 1);
__ ret(kArgumentsSize);
@@ -5284,7 +5408,7 @@
// Check that both are sequential ASCII strings.
__ JumpIfNotBothSequentialAsciiStrings(rdx, rax, rcx, rbx, &runtime);
- // Inline comparison of ascii strings.
+ // Inline comparison of ASCII strings.
__ IncrementCounter(counters->string_compare_native(), 1);
// Drop arguments from the stack
__ pop(rcx);
@@ -5503,33 +5627,46 @@
}
-void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
- // Save the registers.
- __ pop(rcx);
- __ push(rdx);
- __ push(rax);
- __ push(rcx);
+void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
+ Label miss;
+ Condition either_smi = masm->CheckEitherSmi(rdx, rax);
+ __ j(either_smi, &miss, Label::kNear);
- // Call the runtime system in a fresh internal frame.
- ExternalReference miss =
- ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate());
+ __ movq(rcx, FieldOperand(rax, HeapObject::kMapOffset));
+ __ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
+ __ Cmp(rcx, known_map_);
+ __ j(not_equal, &miss, Label::kNear);
+ __ Cmp(rbx, known_map_);
+ __ j(not_equal, &miss, Label::kNear);
+
+ __ subq(rax, rdx);
+ __ ret(0);
+
+ __ bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
{
+ // Call the runtime system in a fresh internal frame.
+ ExternalReference miss =
+ ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate());
+
FrameScope scope(masm, StackFrame::INTERNAL);
__ push(rdx);
__ push(rax);
+ __ push(rdx);
+ __ push(rax);
__ Push(Smi::FromInt(op_));
__ CallExternalReference(miss, 3);
+
+ // Compute the entry point of the rewritten stub.
+ __ lea(rdi, FieldOperand(rax, Code::kHeaderSize));
+ __ pop(rax);
+ __ pop(rdx);
}
- // Compute the entry point of the rewritten stub.
- __ lea(rdi, FieldOperand(rax, Code::kHeaderSize));
-
- // Restore registers.
- __ pop(rcx);
- __ pop(rax);
- __ pop(rdx);
- __ push(rcx);
-
// Do a tail call to the rewritten stub.
__ jmp(rdi);
}
diff --git a/src/x64/cpu-x64.cc b/src/x64/cpu-x64.cc
index ae5045f..69e77ee 100644
--- a/src/x64/cpu-x64.cc
+++ b/src/x64/cpu-x64.cc
@@ -41,7 +41,7 @@
namespace v8 {
namespace internal {
-void CPU::Setup() {
+void CPU::SetUp() {
CpuFeatures::Probe();
}
diff --git a/src/x64/debug-x64.cc b/src/x64/debug-x64.cc
index 339b961..f84772e 100644
--- a/src/x64/debug-x64.cc
+++ b/src/x64/debug-x64.cc
@@ -264,9 +264,7 @@
Label check_codesize;
__ bind(&check_codesize);
__ RecordDebugBreakSlot();
- for (int i = 0; i < Assembler::kDebugBreakSlotLength; i++) {
- __ nop();
- }
+ __ Nop(Assembler::kDebugBreakSlotLength);
ASSERT_EQ(Assembler::kDebugBreakSlotLength,
masm->SizeOfCodeGeneratedSince(&check_codesize));
}
diff --git a/src/x64/deoptimizer-x64.cc b/src/x64/deoptimizer-x64.cc
index 1fd78fc..efa9888 100644
--- a/src/x64/deoptimizer-x64.cc
+++ b/src/x64/deoptimizer-x64.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -138,8 +138,8 @@
ASSERT(*(call_target_address - 3) == 0x73 && // jae
*(call_target_address - 2) == 0x07 && // offset
*(call_target_address - 1) == 0xe8); // call
- *(call_target_address - 3) = 0x90; // nop
- *(call_target_address - 2) = 0x90; // nop
+ *(call_target_address - 3) = 0x66; // 2 byte nop part 1
+ *(call_target_address - 2) = 0x90; // 2 byte nop part 2
Assembler::set_target_address_at(call_target_address,
replacement_code->entry());
@@ -157,8 +157,8 @@
Assembler::target_address_at(call_target_address));
// Replace the nops from patching (Deoptimizer::PatchStackCheckCode) to
// restore the conditional branch.
- ASSERT(*(call_target_address - 3) == 0x90 && // nop
- *(call_target_address - 2) == 0x90 && // nop
+ ASSERT(*(call_target_address - 3) == 0x66 && // 2 byte nop part 1
+ *(call_target_address - 2) == 0x90 && // 2 byte nop part 2
*(call_target_address - 1) == 0xe8); // call
*(call_target_address - 3) = 0x73; // jae
*(call_target_address - 2) = 0x07; // offset
@@ -206,12 +206,13 @@
ASSERT(Translation::BEGIN == opcode);
USE(opcode);
int count = iterator.Next();
+ iterator.Skip(1); // Drop JS frame count.
ASSERT(count == 1);
USE(count);
opcode = static_cast<Translation::Opcode>(iterator.Next());
USE(opcode);
- ASSERT(Translation::FRAME == opcode);
+ ASSERT(Translation::JS_FRAME == opcode);
unsigned node_id = iterator.Next();
USE(node_id);
ASSERT(node_id == ast_id);
@@ -247,9 +248,7 @@
output_ = new FrameDescription*[1];
output_[0] = new(output_frame_size) FrameDescription(
output_frame_size, function_);
-#ifdef DEBUG
- output_[0]->SetKind(Code::OPTIMIZED_FUNCTION);
-#endif
+ output_[0]->SetFrameType(StackFrame::JAVA_SCRIPT);
// Clear the incoming parameters in the optimized frame to avoid
// confusing the garbage collector.
@@ -314,7 +313,7 @@
output_[0] = input_;
output_[0]->SetPc(reinterpret_cast<intptr_t>(from_));
} else {
- // Setup the frame pointer and the context pointer.
+ // Set up the frame pointer and the context pointer.
output_[0]->SetRegister(rbp.code(), input_->GetRegister(rbp.code()));
output_[0]->SetRegister(rsi.code(), input_->GetRegister(rsi.code()));
@@ -338,13 +337,117 @@
}
-void Deoptimizer::DoComputeFrame(TranslationIterator* iterator,
- int frame_index) {
- // Read the ast node id, function, and frame height for this output frame.
- Translation::Opcode opcode =
- static_cast<Translation::Opcode>(iterator->Next());
- USE(opcode);
- ASSERT(Translation::FRAME == opcode);
+void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
+ int frame_index) {
+ JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
+ unsigned height = iterator->Next();
+ unsigned height_in_bytes = height * kPointerSize;
+ if (FLAG_trace_deopt) {
+ PrintF(" translating arguments adaptor => height=%d\n", height_in_bytes);
+ }
+
+ unsigned fixed_frame_size = ArgumentsAdaptorFrameConstants::kFrameSize;
+ unsigned input_frame_size = input_->GetFrameSize();
+ unsigned output_frame_size = height_in_bytes + fixed_frame_size;
+
+ // Allocate and store the output frame description.
+ FrameDescription* output_frame =
+ new(output_frame_size) FrameDescription(output_frame_size, function);
+ output_frame->SetFrameType(StackFrame::ARGUMENTS_ADAPTOR);
+
+ // Arguments adaptor can not be topmost or bottommost.
+ ASSERT(frame_index > 0 && frame_index < output_count_ - 1);
+ ASSERT(output_[frame_index] == NULL);
+ output_[frame_index] = output_frame;
+
+ // The top address of the frame is computed from the previous
+ // frame's top and this frame's size.
+ intptr_t top_address;
+ top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
+ output_frame->SetTop(top_address);
+
+ // Compute the incoming parameter translation.
+ int parameter_count = height;
+ unsigned output_offset = output_frame_size;
+ unsigned input_offset = input_frame_size;
+ for (int i = 0; i < parameter_count; ++i) {
+ output_offset -= kPointerSize;
+ DoTranslateCommand(iterator, frame_index, output_offset);
+ }
+ input_offset -= (parameter_count * kPointerSize);
+
+ // Read caller's PC from the previous frame.
+ output_offset -= kPointerSize;
+ input_offset -= kPointerSize;
+ intptr_t callers_pc = output_[frame_index - 1]->GetPc();
+ output_frame->SetFrameSlot(output_offset, callers_pc);
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ V8PRIxPTR " ; caller's pc\n",
+ top_address + output_offset, output_offset, callers_pc);
+ }
+
+ // Read caller's FP from the previous frame, and set this frame's FP.
+ output_offset -= kPointerSize;
+ input_offset -= kPointerSize;
+ intptr_t value = output_[frame_index - 1]->GetFp();
+ output_frame->SetFrameSlot(output_offset, value);
+ intptr_t fp_value = top_address + output_offset;
+ output_frame->SetFp(fp_value);
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ V8PRIxPTR " ; caller's fp\n",
+ fp_value, output_offset, value);
+ }
+
+ // A marker value is used in place of the context.
+ output_offset -= kPointerSize;
+ input_offset -= kPointerSize;
+ intptr_t context = reinterpret_cast<intptr_t>(
+ Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ output_frame->SetFrameSlot(output_offset, context);
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ V8PRIxPTR " ; context (adaptor sentinel)\n",
+ top_address + output_offset, output_offset, context);
+ }
+
+ // The function was mentioned explicitly in the ARGUMENTS_ADAPTOR_FRAME.
+ output_offset -= kPointerSize;
+ input_offset -= kPointerSize;
+ value = reinterpret_cast<intptr_t>(function);
+ output_frame->SetFrameSlot(output_offset, value);
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ V8PRIxPTR " ; function\n",
+ top_address + output_offset, output_offset, value);
+ }
+
+ // Number of incoming arguments.
+ output_offset -= kPointerSize;
+ input_offset -= kPointerSize;
+ value = reinterpret_cast<intptr_t>(Smi::FromInt(height - 1));
+ output_frame->SetFrameSlot(output_offset, value);
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ V8PRIxPTR " ; argc (%d)\n",
+ top_address + output_offset, output_offset, value, height - 1);
+ }
+
+ ASSERT(0 == output_offset);
+
+ Builtins* builtins = isolate_->builtins();
+ Code* adaptor_trampoline =
+ builtins->builtin(Builtins::kArgumentsAdaptorTrampoline);
+ intptr_t pc_value = reinterpret_cast<intptr_t>(
+ adaptor_trampoline->instruction_start() +
+ isolate_->heap()->arguments_adaptor_deopt_pc_offset()->value());
+ output_frame->SetPc(pc_value);
+}
+
+
+void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
+ int frame_index) {
int node_id = iterator->Next();
JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
unsigned height = iterator->Next();
@@ -364,9 +467,7 @@
// Allocate and store the output frame description.
FrameDescription* output_frame =
new(output_frame_size) FrameDescription(output_frame_size, function);
-#ifdef DEBUG
- output_frame->SetKind(Code::FUNCTION);
-#endif
+ output_frame->SetFrameType(StackFrame::JAVA_SCRIPT);
bool is_bottommost = (0 == frame_index);
bool is_topmost = (output_count_ - 1 == frame_index);
diff --git a/src/x64/disasm-x64.cc b/src/x64/disasm-x64.cc
index 1b8871f..5cbdad7 100644
--- a/src/x64/disasm-x64.cc
+++ b/src/x64/disasm-x64.cc
@@ -109,6 +109,7 @@
{ 0xC3, UNSET_OP_ORDER, "ret" },
{ 0xC9, UNSET_OP_ORDER, "leave" },
{ 0xF4, UNSET_OP_ORDER, "hlt" },
+ { 0xFC, UNSET_OP_ORDER, "cld" },
{ 0xCC, UNSET_OP_ORDER, "int3" },
{ 0x60, UNSET_OP_ORDER, "pushad" },
{ 0x61, UNSET_OP_ORDER, "popad" },
@@ -910,15 +911,19 @@
switch (modrm_byte) {
case 0xE0: mnem = "fchs"; break;
case 0xE1: mnem = "fabs"; break;
+ case 0xE3: mnem = "fninit"; break;
case 0xE4: mnem = "ftst"; break;
case 0xE8: mnem = "fld1"; break;
case 0xEB: mnem = "fldpi"; break;
case 0xED: mnem = "fldln2"; break;
case 0xEE: mnem = "fldz"; break;
+ case 0xF0: mnem = "f2xm1"; break;
case 0xF1: mnem = "fyl2x"; break;
+ case 0xF2: mnem = "fptan"; break;
case 0xF5: mnem = "fprem1"; break;
case 0xF7: mnem = "fincstp"; break;
case 0xF8: mnem = "fprem"; break;
+ case 0xFD: mnem = "fscale"; break;
case 0xFE: mnem = "fsin"; break;
case 0xFF: mnem = "fcos"; break;
default: UnimplementedInstruction();
@@ -1034,7 +1039,18 @@
}
} else {
get_modrm(*current, &mod, ®op, &rm);
- if (opcode == 0x28) {
+ if (opcode == 0x1f) {
+ current++;
+ if (rm == 4) { // SIB byte present.
+ current++;
+ }
+ if (mod == 1) { // Byte displacement.
+ current += 1;
+ } else if (mod == 2) { // 32-bit displacement.
+ current += 4;
+ } // else no immediate displacement.
+ AppendToBuffer("nop");
+ } else if (opcode == 0x28) {
AppendToBuffer("movapd %s, ", NameOfXMMRegister(regop));
current += PrintRightXMMOperand(current);
} else if (opcode == 0x29) {
@@ -1178,7 +1194,7 @@
int mod, regop, rm;
get_modrm(*current, &mod, ®op, &rm);
current++;
- if (regop == 4) { // SIB byte present.
+ if (rm == 4) { // SIB byte present.
current++;
}
if (mod == 1) { // Byte displacement.
diff --git a/src/x64/frames-x64.h b/src/x64/frames-x64.h
index 2626954..3e3d63d 100644
--- a/src/x64/frames-x64.h
+++ b/src/x64/frames-x64.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -87,6 +87,9 @@
class StandardFrameConstants : public AllStatic {
public:
+ // Fixed part of the frame consists of return address, caller fp,
+ // context and function.
+ static const int kFixedFrameSize = 4 * kPointerSize;
static const int kExpressionsOffset = -3 * kPointerSize;
static const int kMarkerOffset = -2 * kPointerSize;
static const int kContextOffset = -1 * kPointerSize;
@@ -112,6 +115,8 @@
class ArgumentsAdaptorFrameConstants : public AllStatic {
public:
static const int kLengthOffset = StandardFrameConstants::kExpressionsOffset;
+ static const int kFrameSize =
+ StandardFrameConstants::kFixedFrameSize + kPointerSize;
};
diff --git a/src/x64/full-codegen-x64.cc b/src/x64/full-codegen-x64.cc
index 963912f..4387a32 100644
--- a/src/x64/full-codegen-x64.cc
+++ b/src/x64/full-codegen-x64.cc
@@ -106,7 +106,7 @@
// formal parameter count expected by the function.
//
// The live registers are:
-// o rdi: the JS function object being called (ie, ourselves)
+// o rdi: the JS function object being called (i.e. ourselves)
// o rsi: our context
// o rbp: our caller's frame pointer
// o rsp: stack pointer (pointing to return address)
@@ -226,9 +226,15 @@
// function, receiver address, parameter count.
// The stub will rewrite receiver and parameter count if the previous
// stack frame was an arguments adapter frame.
- ArgumentsAccessStub stub(
- is_classic_mode() ? ArgumentsAccessStub::NEW_NON_STRICT_SLOW
- : ArgumentsAccessStub::NEW_STRICT);
+ ArgumentsAccessStub::Type type;
+ if (!is_classic_mode()) {
+ type = ArgumentsAccessStub::NEW_STRICT;
+ } else if (function()->has_duplicate_parameters()) {
+ type = ArgumentsAccessStub::NEW_NON_STRICT_SLOW;
+ } else {
+ type = ArgumentsAccessStub::NEW_NON_STRICT_FAST;
+ }
+ ArgumentsAccessStub stub(type);
__ CallStub(&stub);
SetVar(arguments, rax, rbx, rdx);
@@ -967,7 +973,7 @@
__ movq(rcx, FieldOperand(rcx, DescriptorArray::kEnumerationIndexOffset));
__ movq(rdx, FieldOperand(rcx, DescriptorArray::kEnumCacheBridgeCacheOffset));
- // Setup the four remaining stack slots.
+ // Set up the four remaining stack slots.
__ push(rax); // Map.
__ push(rdx); // Enumeration cache.
__ movq(rax, FieldOperand(rdx, FixedArray::kLengthOffset));
@@ -2820,7 +2826,7 @@
ASSERT(args->length() == 2);
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
- MathPowStub stub;
+ MathPowStub stub(MathPowStub::ON_STACK);
__ CallStub(&stub);
context()->Plug(rax);
}
@@ -3530,7 +3536,7 @@
// One-character separator case
__ bind(&one_char_separator);
- // Get the separator ascii character value.
+ // Get the separator ASCII character value.
// Register "string" holds the separator.
__ movzxbl(scratch, FieldOperand(string, SeqAsciiString::kHeaderSize));
__ Set(index, 0);
diff --git a/src/x64/ic-x64.cc b/src/x64/ic-x64.cc
index 3a57753..1fdffa2 100644
--- a/src/x64/ic-x64.cc
+++ b/src/x64/ic-x64.cc
@@ -462,30 +462,58 @@
__ movl(rdi, FieldOperand(rax, String::kHashFieldOffset));
__ shr(rdi, Immediate(String::kHashShift));
__ xor_(rcx, rdi);
- __ and_(rcx, Immediate(KeyedLookupCache::kCapacityMask));
+ int mask = (KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask);
+ __ and_(rcx, Immediate(mask));
// Load the key (consisting of map and symbol) from the cache and
// check for match.
+ Label load_in_object_property;
+ static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket;
+ Label hit_on_nth_entry[kEntriesPerBucket];
ExternalReference cache_keys
= ExternalReference::keyed_lookup_cache_keys(masm->isolate());
- __ movq(rdi, rcx);
- __ shl(rdi, Immediate(kPointerSizeLog2 + 1));
- __ LoadAddress(kScratchRegister, cache_keys);
- __ cmpq(rbx, Operand(kScratchRegister, rdi, times_1, 0));
+
+ for (int i = 0; i < kEntriesPerBucket - 1; i++) {
+ Label try_next_entry;
+ __ movq(rdi, rcx);
+ __ shl(rdi, Immediate(kPointerSizeLog2 + 1));
+ __ LoadAddress(kScratchRegister, cache_keys);
+ int off = kPointerSize * i * 2;
+ __ cmpq(rbx, Operand(kScratchRegister, rdi, times_1, off));
+ __ j(not_equal, &try_next_entry);
+ __ cmpq(rax, Operand(kScratchRegister, rdi, times_1, off + kPointerSize));
+ __ j(equal, &hit_on_nth_entry[i]);
+ __ bind(&try_next_entry);
+ }
+
+ int off = kPointerSize * (kEntriesPerBucket - 1) * 2;
+ __ cmpq(rbx, Operand(kScratchRegister, rdi, times_1, off));
__ j(not_equal, &slow);
- __ cmpq(rax, Operand(kScratchRegister, rdi, times_1, kPointerSize));
+ __ cmpq(rax, Operand(kScratchRegister, rdi, times_1, off + kPointerSize));
__ j(not_equal, &slow);
// Get field offset, which is a 32-bit integer.
ExternalReference cache_field_offsets
= ExternalReference::keyed_lookup_cache_field_offsets(masm->isolate());
- __ LoadAddress(kScratchRegister, cache_field_offsets);
- __ movl(rdi, Operand(kScratchRegister, rcx, times_4, 0));
- __ movzxbq(rcx, FieldOperand(rbx, Map::kInObjectPropertiesOffset));
- __ subq(rdi, rcx);
- __ j(above_equal, &property_array_property);
+
+ // Hit on nth entry.
+ for (int i = kEntriesPerBucket - 1; i >= 0; i--) {
+ __ bind(&hit_on_nth_entry[i]);
+ if (i != 0) {
+ __ addl(rcx, Immediate(i));
+ }
+ __ LoadAddress(kScratchRegister, cache_field_offsets);
+ __ movl(rdi, Operand(kScratchRegister, rcx, times_4, 0));
+ __ movzxbq(rcx, FieldOperand(rbx, Map::kInObjectPropertiesOffset));
+ __ subq(rdi, rcx);
+ __ j(above_equal, &property_array_property);
+ if (i != 0) {
+ __ jmp(&load_in_object_property);
+ }
+ }
// Load in-object property.
+ __ bind(&load_in_object_property);
__ movzxbq(rcx, FieldOperand(rbx, Map::kInstanceSizeOffset));
__ addq(rcx, rdi);
__ movq(rax, FieldOperand(rdx, rcx, times_pointer_size, 0));
@@ -1397,11 +1425,10 @@
// -- rsp[0] : return address
// -----------------------------------
//
- // This accepts as a receiver anything JSObject::SetElementsLength accepts
- // (currently anything except for external and pixel arrays which means
- // anything with elements of FixedArray type.), but currently is restricted
- // to JSArray.
- // Value must be a number, but only smis are accepted as the most common case.
+ // This accepts as a receiver anything JSArray::SetElementsLength accepts
+ // (currently anything except for external arrays which means anything with
+ // elements of FixedArray type). Value must be a number, but only smis are
+ // accepted as the most common case.
Label miss;
@@ -1423,6 +1450,13 @@
__ CmpObjectType(scratch, FIXED_ARRAY_TYPE, scratch);
__ j(not_equal, &miss);
+ // Check that the array has fast properties, otherwise the length
+ // property might have been redefined.
+ __ movq(scratch, FieldOperand(receiver, JSArray::kPropertiesOffset));
+ __ CompareRoot(FieldOperand(scratch, FixedArray::kMapOffset),
+ Heap::kHashTableMapRootIndex);
+ __ j(equal, &miss);
+
// Check that value is a smi.
__ JumpIfNotSmi(value, &miss);
@@ -1641,6 +1675,9 @@
rewritten = stub.GetCode();
} else {
ICCompareStub stub(op_, state);
+ if (state == KNOWN_OBJECTS) {
+ stub.set_known_map(Handle<Map>(Handle<JSObject>::cast(x)->map()));
+ }
rewritten = stub.GetCode();
}
set_target(*rewritten);
diff --git a/src/x64/lithium-codegen-x64.cc b/src/x64/lithium-codegen-x64.cc
index 82eabac..c0723ff 100644
--- a/src/x64/lithium-codegen-x64.cc
+++ b/src/x64/lithium-codegen-x64.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -368,7 +368,11 @@
WriteTranslation(environment->outer(), translation);
int closure_id = DefineDeoptimizationLiteral(environment->closure());
- translation->BeginFrame(environment->ast_id(), closure_id, height);
+ if (environment->is_arguments_adaptor()) {
+ translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
+ } else {
+ translation->BeginJSFrame(environment->ast_id(), closure_id, height);
+ }
for (int i = 0; i < translation_size; ++i) {
LOperand* value = environment->values()->at(i);
// spilled_registers_ and spilled_double_registers_ are either
@@ -504,10 +508,14 @@
// |>------------ translation_size ------------<|
int frame_count = 0;
+ int jsframe_count = 0;
for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
++frame_count;
+ if (!e->is_arguments_adaptor()) {
+ ++jsframe_count;
+ }
}
- Translation translation(&translations_, frame_count);
+ Translation translation(&translations_, frame_count, jsframe_count);
WriteTranslation(environment, &translation);
int deoptimization_index = deoptimizations_.length();
int pc_offset = masm()->pc_offset();
@@ -1154,8 +1162,13 @@
void LCodeGen::DoConstantT(LConstantT* instr) {
- ASSERT(instr->result()->IsRegister());
- __ Move(ToRegister(instr->result()), instr->value());
+ Handle<Object> value = instr->value();
+ if (value->IsSmi()) {
+ __ Move(ToRegister(instr->result()), value);
+ } else {
+ __ LoadHeapObject(ToRegister(instr->result()),
+ Handle<HeapObject>::cast(value));
+ }
}
@@ -1750,13 +1763,17 @@
// Branches to a label or falls through with the answer in the z flag.
-// Trashes the temp register and possibly input (if it and temp are aliased).
+// Trashes the temp register.
void LCodeGen::EmitClassOfTest(Label* is_true,
Label* is_false,
Handle<String> class_name,
Register input,
Register temp,
- Register scratch) {
+ Register temp2) {
+ ASSERT(!input.is(temp));
+ ASSERT(!input.is(temp2));
+ ASSERT(!temp.is(temp2));
+
__ JumpIfSmi(input, is_false);
if (class_name->IsEqualTo(CStrVector("Function"))) {
@@ -1777,10 +1794,10 @@
// Faster code path to avoid two compares: subtract lower bound from the
// actual type and do a signed compare with the width of the type range.
__ movq(temp, FieldOperand(input, HeapObject::kMapOffset));
- __ movzxbl(scratch, FieldOperand(temp, Map::kInstanceTypeOffset));
- __ subq(scratch, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
- __ cmpq(scratch, Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
- FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+ __ movzxbl(temp2, FieldOperand(temp, Map::kInstanceTypeOffset));
+ __ subq(temp2, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+ __ cmpq(temp2, Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
+ FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
__ j(above, is_false);
}
@@ -1932,7 +1949,7 @@
InstanceofStub stub(flags);
__ push(ToRegister(instr->InputAt(0)));
- __ Push(instr->function());
+ __ PushHeapObject(instr->function());
static const int kAdditionalDelta = 10;
int delta =
@@ -2002,13 +2019,7 @@
void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
Register result = ToRegister(instr->result());
- if (result.is(rax)) {
- __ load_rax(instr->hydrogen()->cell().location(),
- RelocInfo::GLOBAL_PROPERTY_CELL);
- } else {
- __ movq(result, instr->hydrogen()->cell(), RelocInfo::GLOBAL_PROPERTY_CELL);
- __ movq(result, Operand(result, 0));
- }
+ __ LoadGlobalCell(result, instr->hydrogen()->cell());
if (instr->hydrogen()->RequiresHoleCheck()) {
__ CompareRoot(result, Heap::kTheHoleValueRootIndex);
DeoptimizeIf(equal, instr->environment());
@@ -2029,25 +2040,27 @@
void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
- Register object = ToRegister(instr->TempAt(0));
- Register address = ToRegister(instr->TempAt(1));
- Register value = ToRegister(instr->InputAt(0));
- ASSERT(!value.is(object));
- Handle<JSGlobalPropertyCell> cell_handle(instr->hydrogen()->cell());
-
- __ movq(address, cell_handle, RelocInfo::GLOBAL_PROPERTY_CELL);
+ Register value = ToRegister(instr->value());
+ Handle<JSGlobalPropertyCell> cell_handle = instr->hydrogen()->cell();
// If the cell we are storing to contains the hole it could have
// been deleted from the property dictionary. In that case, we need
// to update the property details in the property dictionary to mark
// it as no longer deleted. We deoptimize in that case.
if (instr->hydrogen()->RequiresHoleCheck()) {
- __ CompareRoot(Operand(address, 0), Heap::kTheHoleValueRootIndex);
+ // We have a temp because CompareRoot might clobber kScratchRegister.
+ Register cell = ToRegister(instr->TempAt(0));
+ ASSERT(!value.is(cell));
+ __ movq(cell, cell_handle, RelocInfo::GLOBAL_PROPERTY_CELL);
+ __ CompareRoot(Operand(cell, 0), Heap::kTheHoleValueRootIndex);
DeoptimizeIf(equal, instr->environment());
+ // Store the value.
+ __ movq(Operand(cell, 0), value);
+ } else {
+ // Store the value.
+ __ movq(kScratchRegister, cell_handle, RelocInfo::GLOBAL_PROPERTY_CELL);
+ __ movq(Operand(kScratchRegister, 0), value);
}
-
- // Store the value.
- __ movq(Operand(address, 0), value);
// Cells are always rescanned, so no write barrier here.
}
@@ -2068,13 +2081,37 @@
Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
__ movq(result, ContextOperand(context, instr->slot_index()));
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
+ if (instr->hydrogen()->DeoptimizesOnHole()) {
+ DeoptimizeIf(equal, instr->environment());
+ } else {
+ Label is_not_hole;
+ __ j(not_equal, &is_not_hole, Label::kNear);
+ __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
+ __ bind(&is_not_hole);
+ }
+ }
}
void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
Register context = ToRegister(instr->context());
Register value = ToRegister(instr->value());
- __ movq(ContextOperand(context, instr->slot_index()), value);
+
+ Operand target = ContextOperand(context, instr->slot_index());
+
+ Label skip_assignment;
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ __ CompareRoot(target, Heap::kTheHoleValueRootIndex);
+ if (instr->hydrogen()->DeoptimizesOnHole()) {
+ DeoptimizeIf(equal, instr->environment());
+ } else {
+ __ j(not_equal, &skip_assignment);
+ }
+ }
+ __ movq(target, value);
+
if (instr->hydrogen()->NeedsWriteBarrier()) {
HType type = instr->hydrogen()->value()->type();
SmiCheck check_needed =
@@ -2089,6 +2126,8 @@
EMIT_REMEMBERED_SET,
check_needed);
}
+
+ __ bind(&skip_assignment);
}
@@ -2110,7 +2149,7 @@
Handle<String> name) {
LookupResult lookup(isolate());
type->LookupInDescriptors(NULL, *name, &lookup);
- ASSERT(lookup.IsProperty() &&
+ ASSERT(lookup.IsFound() &&
(lookup.type() == FIELD || lookup.type() == CONSTANT_FUNCTION));
if (lookup.type() == FIELD) {
int index = lookup.GetLocalFieldIndexFromMap(*type);
@@ -2126,7 +2165,7 @@
}
} else {
Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*type));
- LoadHeapObject(result, Handle<HeapObject>::cast(function));
+ __ LoadHeapObject(result, function);
}
}
@@ -2533,7 +2572,7 @@
RecordPosition(pointers->position());
SafepointGenerator safepoint_generator(
this, pointers, Safepoint::kLazyDeopt);
- v8::internal::ParameterCount actual(rax);
+ ParameterCount actual(rax);
__ InvokeFunction(function, actual, CALL_FUNCTION,
safepoint_generator, CALL_AS_METHOD);
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
@@ -2548,7 +2587,7 @@
void LCodeGen::DoThisFunction(LThisFunction* instr) {
Register result = ToRegister(instr->result());
- LoadHeapObject(result, instr->hydrogen()->closure());
+ __ LoadHeapObject(result, instr->hydrogen()->closure());
}
@@ -2583,34 +2622,47 @@
int arity,
LInstruction* instr,
CallKind call_kind) {
- // Change context if needed.
- bool change_context =
- (info()->closure()->context() != function->context()) ||
- scope()->contains_with() ||
- (scope()->num_heap_slots() > 0);
- if (change_context) {
- __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
- }
-
- // Set rax to arguments count if adaption is not needed. Assumes that rax
- // is available to write to at this point.
- if (!function->NeedsArgumentsAdaption()) {
- __ Set(rax, arity);
- }
+ bool can_invoke_directly = !function->NeedsArgumentsAdaption() ||
+ function->shared()->formal_parameter_count() == arity;
LPointerMap* pointers = instr->pointer_map();
RecordPosition(pointers->position());
- // Invoke function.
- __ SetCallKind(rcx, call_kind);
- if (*function == *info()->closure()) {
- __ CallSelf();
- } else {
- __ call(FieldOperand(rdi, JSFunction::kCodeEntryOffset));
- }
+ if (can_invoke_directly) {
+ __ LoadHeapObject(rdi, function);
- // Setup deoptimization.
- RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT, 0);
+ // Change context if needed.
+ bool change_context =
+ (info()->closure()->context() != function->context()) ||
+ scope()->contains_with() ||
+ (scope()->num_heap_slots() > 0);
+ if (change_context) {
+ __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+ }
+
+ // Set rax to arguments count if adaption is not needed. Assumes that rax
+ // is available to write to at this point.
+ if (!function->NeedsArgumentsAdaption()) {
+ __ Set(rax, arity);
+ }
+
+ // Invoke function.
+ __ SetCallKind(rcx, call_kind);
+ if (*function == *info()->closure()) {
+ __ CallSelf();
+ } else {
+ __ call(FieldOperand(rdi, JSFunction::kCodeEntryOffset));
+ }
+
+ // Set up deoptimization.
+ RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT, 0);
+ } else {
+ // We need to adapt arguments.
+ SafepointGenerator generator(
+ this, pointers, Safepoint::kLazyDeopt);
+ ParameterCount count(arity);
+ __ InvokeFunction(function, count, CALL_FUNCTION, generator, call_kind);
+ }
// Restore context.
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
@@ -2619,7 +2671,6 @@
void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
ASSERT(ToRegister(instr->result()).is(rax));
- __ Move(rdi, instr->function());
CallKnownFunction(instr->function(),
instr->arity(),
instr,
@@ -2834,65 +2885,101 @@
XMMRegister xmm_scratch = xmm0;
XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
+
+ // Note that according to ECMA-262 15.8.2.13:
+ // Math.pow(-Infinity, 0.5) == Infinity
+ // Math.sqrt(-Infinity) == NaN
+ Label done, sqrt;
+ // Check base for -Infinity. According to IEEE-754, double-precision
+ // -Infinity has the highest 12 bits set and the lowest 52 bits cleared.
+ __ movq(kScratchRegister, V8_INT64_C(0xFFF0000000000000), RelocInfo::NONE);
+ __ movq(xmm_scratch, kScratchRegister);
+ __ ucomisd(xmm_scratch, input_reg);
+ // Comparing -Infinity with NaN results in "unordered", which sets the
+ // zero flag as if both were equal. However, it also sets the carry flag.
+ __ j(not_equal, &sqrt, Label::kNear);
+ __ j(carry, &sqrt, Label::kNear);
+ // If input is -Infinity, return Infinity.
+ __ xorps(input_reg, input_reg);
+ __ subsd(input_reg, xmm_scratch);
+ __ jmp(&done, Label::kNear);
+
+ // Square root.
+ __ bind(&sqrt);
__ xorps(xmm_scratch, xmm_scratch);
__ addsd(input_reg, xmm_scratch); // Convert -0 to +0.
__ sqrtsd(input_reg, input_reg);
+ __ bind(&done);
}
void LCodeGen::DoPower(LPower* instr) {
- LOperand* left = instr->InputAt(0);
- XMMRegister left_reg = ToDoubleRegister(left);
- ASSERT(!left_reg.is(xmm1));
- LOperand* right = instr->InputAt(1);
- XMMRegister result_reg = ToDoubleRegister(instr->result());
Representation exponent_type = instr->hydrogen()->right()->representation();
- if (exponent_type.IsDouble()) {
- __ PrepareCallCFunction(2);
- // Move arguments to correct registers
- __ movaps(xmm0, left_reg);
- ASSERT(ToDoubleRegister(right).is(xmm1));
- __ CallCFunction(
- ExternalReference::power_double_double_function(isolate()), 2);
- } else if (exponent_type.IsInteger32()) {
- __ PrepareCallCFunction(2);
- // Move arguments to correct registers: xmm0 and edi (not rdi).
- // On Windows, the registers are xmm0 and edx.
- __ movaps(xmm0, left_reg);
+ // Having marked this as a call, we can use any registers.
+ // Just make sure that the input/output registers are the expected ones.
+
+ // Choose register conforming to calling convention (when bailing out).
#ifdef _WIN64
- ASSERT(ToRegister(right).is(rdx));
+ Register exponent = rdx;
#else
- ASSERT(ToRegister(right).is(rdi));
+ Register exponent = rdi;
#endif
- __ CallCFunction(
- ExternalReference::power_double_int_function(isolate()), 2);
- } else {
- ASSERT(exponent_type.IsTagged());
- Register right_reg = ToRegister(right);
+ ASSERT(!instr->InputAt(1)->IsRegister() ||
+ ToRegister(instr->InputAt(1)).is(exponent));
+ ASSERT(!instr->InputAt(1)->IsDoubleRegister() ||
+ ToDoubleRegister(instr->InputAt(1)).is(xmm1));
+ ASSERT(ToDoubleRegister(instr->InputAt(0)).is(xmm2));
+ ASSERT(ToDoubleRegister(instr->result()).is(xmm3));
- Label non_smi, call;
- __ JumpIfNotSmi(right_reg, &non_smi);
- __ SmiToInteger32(right_reg, right_reg);
- __ cvtlsi2sd(xmm1, right_reg);
- __ jmp(&call);
-
- __ bind(&non_smi);
- __ CmpObjectType(right_reg, HEAP_NUMBER_TYPE , kScratchRegister);
+ if (exponent_type.IsTagged()) {
+ Label no_deopt;
+ __ JumpIfSmi(exponent, &no_deopt);
+ __ CmpObjectType(exponent, HEAP_NUMBER_TYPE, rcx);
DeoptimizeIf(not_equal, instr->environment());
- __ movsd(xmm1, FieldOperand(right_reg, HeapNumber::kValueOffset));
-
- __ bind(&call);
- __ PrepareCallCFunction(2);
- // Move arguments to correct registers xmm0 and xmm1.
- __ movaps(xmm0, left_reg);
- // Right argument is already in xmm1.
- __ CallCFunction(
- ExternalReference::power_double_double_function(isolate()), 2);
+ __ bind(&no_deopt);
+ MathPowStub stub(MathPowStub::TAGGED);
+ __ CallStub(&stub);
+ } else if (exponent_type.IsInteger32()) {
+ MathPowStub stub(MathPowStub::INTEGER);
+ __ CallStub(&stub);
+ } else {
+ ASSERT(exponent_type.IsDouble());
+ MathPowStub stub(MathPowStub::DOUBLE);
+ __ CallStub(&stub);
}
- // Return value is in xmm0.
- __ movaps(result_reg, xmm0);
- // Restore context register.
+}
+
+
+void LCodeGen::DoRandom(LRandom* instr) {
+ // Having marked this instruction as a call we can use any
+ // registers.
+ ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
+
+ // Choose the right register for the first argument depending on
+ // calling convention.
+#ifdef _WIN64
+ ASSERT(ToRegister(instr->InputAt(0)).is(rcx));
+ Register global_object = rcx;
+#else
+ ASSERT(ToRegister(instr->InputAt(0)).is(rdi));
+ Register global_object = rdi;
+#endif
+
+ __ PrepareCallCFunction(1);
+ __ movq(global_object,
+ FieldOperand(global_object, GlobalObject::kGlobalContextOffset));
+ __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+
+ // Convert 32 random bits in rax to 0.(32 random bits) in a double
+ // by computing:
+ // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
+ __ movl(rcx, Immediate(0x49800000)); // 1.0 x 2^20 as single.
+ __ movd(xmm2, rcx);
+ __ movd(xmm1, rax);
+ __ cvtss2sd(xmm2, xmm2);
+ __ xorps(xmm1, xmm2);
+ __ subsd(xmm1, xmm2);
}
@@ -3027,7 +3114,6 @@
void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
ASSERT(ToRegister(instr->result()).is(rax));
- __ Move(rdi, instr->target());
CallKnownFunction(instr->target(), instr->arity(), instr, CALL_AS_FUNCTION);
}
@@ -3170,13 +3256,6 @@
Register elements = ToRegister(instr->object());
Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
- // This instruction cannot handle the FAST_SMI_ONLY_ELEMENTS -> FAST_ELEMENTS
- // conversion, so it deopts in that case.
- if (instr->hydrogen()->ValueNeedsSmiCheck()) {
- Condition cc = masm()->CheckSmi(value);
- DeoptimizeIf(NegateCondition(cc), instr->environment());
- }
-
// Do the store.
if (instr->key()->IsConstantOperand()) {
ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
@@ -3487,6 +3566,7 @@
void LCodeGen::EmitNumberUntagD(Register input_reg,
XMMRegister result_reg,
bool deoptimize_on_undefined,
+ bool deoptimize_on_minus_zero,
LEnvironment* env) {
Label load_smi, done;
@@ -3514,6 +3594,15 @@
}
// Heap number to XMM conversion.
__ movsd(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
+ if (deoptimize_on_minus_zero) {
+ XMMRegister xmm_scratch = xmm0;
+ __ xorps(xmm_scratch, xmm_scratch);
+ __ ucomisd(xmm_scratch, result_reg);
+ __ j(not_equal, &done, Label::kNear);
+ __ movmskpd(kScratchRegister, result_reg);
+ __ testq(kScratchRegister, Immediate(1));
+ DeoptimizeIf(not_zero, env);
+ }
__ jmp(&done, Label::kNear);
// Smi to XMM conversion
@@ -3605,6 +3694,7 @@
EmitNumberUntagD(input_reg, result_reg,
instr->hydrogen()->deoptimize_on_undefined(),
+ instr->hydrogen()->deoptimize_on_minus_zero(),
instr->environment());
}
@@ -3710,20 +3800,37 @@
void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
- ASSERT(instr->InputAt(0)->IsRegister());
- Register reg = ToRegister(instr->InputAt(0));
- __ Cmp(reg, instr->hydrogen()->target());
+ Register reg = ToRegister(instr->value());
+ Handle<JSFunction> target = instr->hydrogen()->target();
+ if (isolate()->heap()->InNewSpace(*target)) {
+ Handle<JSGlobalPropertyCell> cell =
+ isolate()->factory()->NewJSGlobalPropertyCell(target);
+ __ movq(kScratchRegister, cell, RelocInfo::GLOBAL_PROPERTY_CELL);
+ __ cmpq(reg, Operand(kScratchRegister, 0));
+ } else {
+ __ Cmp(reg, target);
+ }
DeoptimizeIf(not_equal, instr->environment());
}
+void LCodeGen::DoCheckMapCommon(Register reg,
+ Handle<Map> map,
+ CompareMapMode mode,
+ LEnvironment* env) {
+ Label success;
+ __ CompareMap(reg, map, &success, mode);
+ DeoptimizeIf(not_equal, env);
+ __ bind(&success);
+}
+
+
void LCodeGen::DoCheckMap(LCheckMap* instr) {
LOperand* input = instr->InputAt(0);
ASSERT(input->IsRegister());
Register reg = ToRegister(input);
- __ Cmp(FieldOperand(reg, HeapObject::kMapOffset),
- instr->hydrogen()->map());
- DeoptimizeIf(not_equal, instr->environment());
+ Handle<Map> map = instr->hydrogen()->map();
+ DoCheckMapCommon(reg, map, instr->hydrogen()->mode(), instr->environment());
}
@@ -3778,18 +3885,6 @@
}
-void LCodeGen::LoadHeapObject(Register result, Handle<HeapObject> object) {
- if (heap()->InNewSpace(*object)) {
- Handle<JSGlobalPropertyCell> cell =
- factory()->NewJSGlobalPropertyCell(object);
- __ movq(result, cell, RelocInfo::GLOBAL_PROPERTY_CELL);
- __ movq(result, Operand(result, 0));
- } else {
- __ Move(result, object);
- }
-}
-
-
void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
Register reg = ToRegister(instr->TempAt(0));
@@ -3797,37 +3892,51 @@
Handle<JSObject> current_prototype = instr->prototype();
// Load prototype object.
- LoadHeapObject(reg, current_prototype);
+ __ LoadHeapObject(reg, current_prototype);
// Check prototype maps up to the holder.
while (!current_prototype.is_identical_to(holder)) {
- __ Cmp(FieldOperand(reg, HeapObject::kMapOffset),
- Handle<Map>(current_prototype->map()));
- DeoptimizeIf(not_equal, instr->environment());
+ DoCheckMapCommon(reg, Handle<Map>(current_prototype->map()),
+ ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
current_prototype =
Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype()));
// Load next prototype object.
- LoadHeapObject(reg, current_prototype);
+ __ LoadHeapObject(reg, current_prototype);
}
// Check the holder map.
- __ Cmp(FieldOperand(reg, HeapObject::kMapOffset),
- Handle<Map>(current_prototype->map()));
- DeoptimizeIf(not_equal, instr->environment());
+ DoCheckMapCommon(reg, Handle<Map>(current_prototype->map()),
+ ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
}
void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
- Handle<FixedArray> constant_elements = instr->hydrogen()->constant_elements();
- ASSERT_EQ(2, constant_elements->length());
- ElementsKind constant_elements_kind =
- static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value());
+ Heap* heap = isolate()->heap();
+ ElementsKind boilerplate_elements_kind =
+ instr->hydrogen()->boilerplate_elements_kind();
- // Setup the parameters to the stub/runtime call.
+ // Deopt if the array literal boilerplate ElementsKind is of a type different
+ // than the expected one. The check isn't necessary if the boilerplate has
+ // already been converted to FAST_ELEMENTS.
+ if (boilerplate_elements_kind != FAST_ELEMENTS) {
+ __ LoadHeapObject(rax, instr->hydrogen()->boilerplate_object());
+ __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
+ // Load the map's "bit field 2".
+ __ movb(rbx, FieldOperand(rbx, Map::kBitField2Offset));
+ // Retrieve elements_kind from bit field 2.
+ __ and_(rbx, Immediate(Map::kElementsKindMask));
+ __ cmpb(rbx, Immediate(boilerplate_elements_kind <<
+ Map::kElementsKindShift));
+ DeoptimizeIf(not_equal, instr->environment());
+ }
+
+ // Set up the parameters to the stub/runtime call.
__ movq(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
__ push(FieldOperand(rax, JSFunction::kLiteralsOffset));
__ Push(Smi::FromInt(instr->hydrogen()->literal_index()));
- __ Push(instr->hydrogen()->constant_elements());
+ // Boilerplate already exists, constant elements are never accessed.
+ // Pass an empty fixed array.
+ __ Push(Handle<FixedArray>(heap->empty_fixed_array()));
// Pick the right runtime function or stub to call.
int length = instr->hydrogen()->length();
@@ -3843,9 +3952,9 @@
CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr);
} else {
FastCloneShallowArrayStub::Mode mode =
- constant_elements_kind == FAST_DOUBLE_ELEMENTS
- ? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
- : FastCloneShallowArrayStub::CLONE_ELEMENTS;
+ boilerplate_elements_kind == FAST_DOUBLE_ELEMENTS
+ ? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
+ : FastCloneShallowArrayStub::CLONE_ELEMENTS;
FastCloneShallowArrayStub stub(mode, length);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
@@ -3884,10 +3993,10 @@
Handle<JSObject> value_object = Handle<JSObject>::cast(value);
__ lea(rcx, Operand(result, *offset));
__ movq(FieldOperand(result, total_offset), rcx);
- LoadHeapObject(source, value_object);
+ __ LoadHeapObject(source, value_object);
EmitDeepCopy(value_object, result, source, offset);
} else if (value->IsHeapObject()) {
- LoadHeapObject(rcx, Handle<HeapObject>::cast(value));
+ __ LoadHeapObject(rcx, Handle<HeapObject>::cast(value));
__ movq(FieldOperand(result, total_offset), rcx);
} else {
__ movq(rcx, value, RelocInfo::NONE);
@@ -3912,7 +4021,7 @@
__ bind(&allocated);
int offset = 0;
- LoadHeapObject(rbx, instr->hydrogen()->boilerplate());
+ __ LoadHeapObject(rbx, instr->hydrogen()->boilerplate());
EmitDeepCopy(instr->hydrogen()->boilerplate(), rax, rbx, &offset);
ASSERT_EQ(size, offset);
}
@@ -3922,7 +4031,7 @@
Handle<FixedArray> constant_properties =
instr->hydrogen()->constant_properties();
- // Setup the parameters to the stub/runtime call.
+ // Set up the parameters to the stub/runtime call.
__ movq(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
__ push(FieldOperand(rax, JSFunction::kLiteralsOffset));
__ Push(Smi::FromInt(instr->hydrogen()->literal_index()));
@@ -4038,7 +4147,12 @@
void LCodeGen::EmitPushTaggedOperand(LOperand* operand) {
ASSERT(!operand->IsDoubleRegister());
if (operand->IsConstantOperand()) {
- __ Push(ToHandle(LConstantOperand::cast(operand)));
+ Handle<Object> object = ToHandle(LConstantOperand::cast(operand));
+ if (object->IsSmi()) {
+ __ Push(Handle<Smi>::cast(object));
+ } else {
+ __ PushHeapObject(Handle<HeapObject>::cast(object));
+ }
} else if (operand->IsRegister()) {
__ push(ToRegister(operand));
} else {
@@ -4167,11 +4281,7 @@
int current_pc = masm()->pc_offset();
if (current_pc < last_lazy_deopt_pc_ + space_needed) {
int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
- while (padding_size > 0) {
- int nop_size = padding_size > 9 ? 9 : padding_size;
- __ nop(nop_size);
- padding_size -= nop_size;
- }
+ __ Nop(padding_size);
}
}
diff --git a/src/x64/lithium-codegen-x64.h b/src/x64/lithium-codegen-x64.h
index 7bd7fe6..2890c53 100644
--- a/src/x64/lithium-codegen-x64.h
+++ b/src/x64/lithium-codegen-x64.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -102,7 +102,10 @@
void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
Label* map_check);
- // Parallel move support.
+ void DoCheckMapCommon(Register reg, Handle<Map> map,
+ CompareMapMode mode, LEnvironment* env);
+
+// Parallel move support.
void DoParallelMove(LParallelMove* move);
void DoGap(LGap* instr);
@@ -198,7 +201,6 @@
LInstruction* instr,
CallKind call_kind);
- void LoadHeapObject(Register result, Handle<HeapObject> object);
void RecordSafepointWithLazyDeopt(LInstruction* instr,
SafepointMode safepoint_mode,
@@ -253,6 +255,7 @@
void EmitNumberUntagD(Register input,
XMMRegister result,
bool deoptimize_on_undefined,
+ bool deoptimize_on_minus_zero,
LEnvironment* env);
// Emits optimized code for typeof x == "y". Modifies input register.
@@ -373,7 +376,7 @@
virtual void Generate() = 0;
virtual LInstruction* instr() = 0;
- void SetExit(Label *exit) { external_exit_ = exit; }
+ void SetExit(Label* exit) { external_exit_ = exit; }
Label* entry() { return &entry_; }
Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; }
int instruction_index() const { return instruction_index_; }
diff --git a/src/x64/lithium-gap-resolver-x64.cc b/src/x64/lithium-gap-resolver-x64.cc
index c3c617c..bf5d31d 100644
--- a/src/x64/lithium-gap-resolver-x64.cc
+++ b/src/x64/lithium-gap-resolver-x64.cc
@@ -198,7 +198,7 @@
if (cgen_->IsInteger32Constant(constant_source)) {
__ movl(dst, Immediate(cgen_->ToInteger32(constant_source)));
} else {
- __ Move(dst, cgen_->ToHandle(constant_source));
+ __ LoadObject(dst, cgen_->ToHandle(constant_source));
}
} else {
ASSERT(destination->IsStackSlot());
@@ -207,7 +207,8 @@
// Allow top 32 bits of an untagged Integer32 to be arbitrary.
__ movl(dst, Immediate(cgen_->ToInteger32(constant_source)));
} else {
- __ Move(dst, cgen_->ToHandle(constant_source));
+ __ LoadObject(kScratchRegister, cgen_->ToHandle(constant_source));
+ __ movq(dst, kScratchRegister);
}
}
diff --git a/src/x64/lithium-x64.cc b/src/x64/lithium-x64.cc
index b486fae..ac98a4c 100644
--- a/src/x64/lithium-x64.cc
+++ b/src/x64/lithium-x64.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -1000,14 +1000,16 @@
LEnvironment* outer =
CreateEnvironment(hydrogen_env->outer(), argument_index_accumulator);
int ast_id = hydrogen_env->ast_id();
- ASSERT(ast_id != AstNode::kNoNumber);
+ ASSERT(ast_id != AstNode::kNoNumber || hydrogen_env->is_arguments_adaptor());
int value_count = hydrogen_env->length();
LEnvironment* result = new LEnvironment(hydrogen_env->closure(),
+ hydrogen_env->is_arguments_adaptor(),
ast_id,
hydrogen_env->parameter_count(),
argument_count_,
value_count,
outer);
+ int argument_index = *argument_index_accumulator;
for (int i = 0; i < value_count; ++i) {
if (hydrogen_env->is_special_index(i)) continue;
@@ -1016,13 +1018,17 @@
if (value->IsArgumentsObject()) {
op = NULL;
} else if (value->IsPushArgument()) {
- op = new LArgument((*argument_index_accumulator)++);
+ op = new LArgument(argument_index++);
} else {
op = UseAny(value);
}
result->AddValue(op, value->representation());
}
+ if (!hydrogen_env->is_arguments_adaptor()) {
+ *argument_index_accumulator = argument_index;
+ }
+
return result;
}
@@ -1033,16 +1039,25 @@
LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
- HValue* v = instr->value();
- if (v->EmitAtUses()) {
- ASSERT(v->IsConstant());
- ASSERT(!v->representation().IsDouble());
- HBasicBlock* successor = HConstant::cast(v)->ToBoolean()
+ HValue* value = instr->value();
+ if (value->EmitAtUses()) {
+ ASSERT(value->IsConstant());
+ ASSERT(!value->representation().IsDouble());
+ HBasicBlock* successor = HConstant::cast(value)->ToBoolean()
? instr->FirstSuccessor()
: instr->SecondSuccessor();
return new LGoto(successor->block_id());
}
- return AssignEnvironment(new LBranch(UseRegister(v)));
+
+ LBranch* result = new LBranch(UseRegister(value));
+ // Tagged values that are not known smis or booleans require a
+ // deoptimization environment.
+ Representation rep = value->representation();
+ HType type = value->type();
+ if (rep.IsTagged() && !type.IsSmi() && !type.IsBoolean()) {
+ return AssignEnvironment(result);
+ }
+ return result;
}
@@ -1329,7 +1344,11 @@
LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
LOperand* right = UseOrConstant(instr->MostConstantOperand());
LMulI* mul = new LMulI(left, right);
- return AssignEnvironment(DefineSameAsFirst(mul));
+ if (instr->CheckFlag(HValue::kCanOverflow) ||
+ instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ AssignEnvironment(mul);
+ }
+ return DefineSameAsFirst(mul);
} else if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::MUL, instr);
} else {
@@ -1397,11 +1416,24 @@
UseFixed(instr->right(), rdi);
#endif
LPower* result = new LPower(left, right);
- return MarkAsCall(DefineFixedDouble(result, xmm1), instr,
+ return MarkAsCall(DefineFixedDouble(result, xmm3), instr,
CAN_DEOPTIMIZE_EAGERLY);
}
+LInstruction* LChunkBuilder::DoRandom(HRandom* instr) {
+ ASSERT(instr->representation().IsDouble());
+ ASSERT(instr->global_object()->representation().IsTagged());
+#ifdef _WIN64
+ LOperand* global_object = UseFixed(instr->global_object(), rcx);
+#else
+ LOperand* global_object = UseFixed(instr->global_object(), rdi);
+#endif
+ LRandom* result = new LRandom(global_object);
+ return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
+}
+
+
LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
ASSERT(instr->left()->representation().IsTagged());
ASSERT(instr->right()->representation().IsTagged());
@@ -1525,7 +1557,7 @@
LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
HClassOfTestAndBranch* instr) {
- return new LClassOfTestAndBranch(UseTempRegister(instr->value()),
+ return new LClassOfTestAndBranch(UseRegister(instr->value()),
TempRegister(),
TempRegister());
}
@@ -1553,7 +1585,7 @@
LInstruction* LChunkBuilder::DoValueOf(HValueOf* instr) {
LOperand* object = UseRegister(instr->value());
LValueOf* result = new LValueOf(object);
- return AssignEnvironment(DefineSameAsFirst(result));
+ return DefineSameAsFirst(result);
}
@@ -1768,11 +1800,12 @@
LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) {
- LStoreGlobalCell* result =
- new LStoreGlobalCell(UseTempRegister(instr->value()),
- TempRegister(),
- TempRegister());
- return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
+ LOperand* value = UseRegister(instr->value());
+ // Use a temp to avoid reloading the cell value address in the case where
+ // we perform a hole check.
+ return instr->RequiresHoleCheck()
+ ? AssignEnvironment(new LStoreGlobalCell(value, TempRegister()))
+ : new LStoreGlobalCell(value, NULL);
}
@@ -1786,7 +1819,8 @@
LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
LOperand* context = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new LLoadContextSlot(context));
+ LInstruction* result = DefineAsRegister(new LLoadContextSlot(context));
+ return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
}
@@ -1803,7 +1837,8 @@
value = UseRegister(instr->value());
temp = NULL;
}
- return new LStoreContextSlot(context, value, temp);
+ LInstruction* result = new LStoreContextSlot(context, value, temp);
+ return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
}
@@ -1863,7 +1898,8 @@
LOperand* obj = UseRegisterAtStart(instr->object());
LOperand* key = UseRegisterOrConstantAtStart(instr->key());
LLoadKeyedFastElement* result = new LLoadKeyedFastElement(obj, key);
- return AssignEnvironment(DefineAsRegister(result));
+ if (instr->RequiresHoleCheck()) AssignEnvironment(result);
+ return DefineAsRegister(result);
}
@@ -1882,12 +1918,11 @@
LInstruction* LChunkBuilder::DoLoadKeyedSpecializedArrayElement(
HLoadKeyedSpecializedArrayElement* instr) {
ElementsKind elements_kind = instr->elements_kind();
- Representation representation(instr->representation());
ASSERT(
- (representation.IsInteger32() &&
+ (instr->representation().IsInteger32() &&
(elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
(elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
- (representation.IsDouble() &&
+ (instr->representation().IsDouble() &&
((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
(elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
ASSERT(instr->key()->representation().IsInteger32());
@@ -1926,8 +1961,7 @@
LOperand* key = needs_write_barrier
? UseTempRegister(instr->key())
: UseRegisterOrConstantAtStart(instr->key());
-
- return AssignEnvironment(new LStoreKeyedFastElement(obj, key, val));
+ return new LStoreKeyedFastElement(obj, key, val);
}
@@ -1947,13 +1981,12 @@
LInstruction* LChunkBuilder::DoStoreKeyedSpecializedArrayElement(
HStoreKeyedSpecializedArrayElement* instr) {
- Representation representation(instr->value()->representation());
ElementsKind elements_kind = instr->elements_kind();
ASSERT(
- (representation.IsInteger32() &&
+ (instr->value()->representation().IsInteger32() &&
(elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
(elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
- (representation.IsDouble() &&
+ (instr->value()->representation().IsDouble() &&
((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
(elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
ASSERT(instr->external_pointer()->representation().IsExternal());
@@ -2216,6 +2249,7 @@
HEnvironment* outer = current_block_->last_environment();
HConstant* undefined = graph()->GetConstantUndefined();
HEnvironment* inner = outer->CopyForInlining(instr->closure(),
+ instr->arguments_count(),
instr->function(),
undefined,
instr->call_kind());
@@ -2226,7 +2260,8 @@
LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
- HEnvironment* outer = current_block_->last_environment()->outer();
+ HEnvironment* outer = current_block_->last_environment()->
+ DiscardInlined(false);
current_block_->UpdateEnvironment(outer);
return NULL;
}
diff --git a/src/x64/lithium-x64.h b/src/x64/lithium-x64.h
index c21223b..193f038 100644
--- a/src/x64/lithium-x64.h
+++ b/src/x64/lithium-x64.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -141,6 +141,7 @@
V(Parameter) \
V(Power) \
V(PushArgument) \
+ V(Random) \
V(RegExpLiteral) \
V(Return) \
V(ShiftI) \
@@ -1024,6 +1025,17 @@
};
+class LRandom: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LRandom(LOperand* global_object) {
+ inputs_[0] = global_object;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(Random, "random")
+ DECLARE_HYDROGEN_ACCESSOR(Random)
+};
+
+
class LArithmeticD: public LTemplateInstruction<1, 2, 0> {
public:
LArithmeticD(Token::Value op, LOperand* left, LOperand* right)
@@ -1231,16 +1243,17 @@
};
-class LStoreGlobalCell: public LTemplateInstruction<0, 1, 2> {
+class LStoreGlobalCell: public LTemplateInstruction<0, 1, 1> {
public:
- explicit LStoreGlobalCell(LOperand* value, LOperand* temp1, LOperand* temp2) {
+ explicit LStoreGlobalCell(LOperand* value, LOperand* temp) {
inputs_[0] = value;
- temps_[0] = temp1;
- temps_[1] = temp2;
+ temps_[0] = temp;
}
DECLARE_CONCRETE_INSTRUCTION(StoreGlobalCell, "store-global-cell")
DECLARE_HYDROGEN_ACCESSOR(StoreGlobalCell)
+
+ LOperand* value() { return inputs_[0]; }
};
@@ -1781,6 +1794,8 @@
inputs_[0] = value;
}
+ LOperand* value() { return InputAt(0); }
+
DECLARE_CONCRETE_INSTRUCTION(CheckFunction, "check-function")
DECLARE_HYDROGEN_ACCESSOR(CheckFunction)
};
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc
index caca628..2d6bd08 100644
--- a/src/x64/macro-assembler-x64.cc
+++ b/src/x64/macro-assembler-x64.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -495,7 +495,7 @@
// from the real pointer as a smi.
intptr_t p1 = reinterpret_cast<intptr_t>(msg);
intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
- // Note: p0 might not be a valid Smi *value*, but it has a valid Smi tag.
+ // Note: p0 might not be a valid Smi _value_, but it has a valid Smi tag.
ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
#ifdef DEBUG
if (msg != NULL) {
@@ -2114,7 +2114,7 @@
movzxbl(scratch1, FieldOperand(scratch1, Map::kInstanceTypeOffset));
movzxbl(scratch2, FieldOperand(scratch2, Map::kInstanceTypeOffset));
- // Check that both are flat ascii strings.
+ // Check that both are flat ASCII strings.
ASSERT(kNotStringTag != 0);
const int kFlatAsciiStringMask =
kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
@@ -2160,7 +2160,7 @@
movq(scratch1, first_object_instance_type);
movq(scratch2, second_object_instance_type);
- // Check that both are flat ascii strings.
+ // Check that both are flat ASCII strings.
ASSERT(kNotStringTag != 0);
const int kFlatAsciiStringMask =
kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
@@ -2238,6 +2238,43 @@
}
+void MacroAssembler::LoadHeapObject(Register result,
+ Handle<HeapObject> object) {
+ if (isolate()->heap()->InNewSpace(*object)) {
+ Handle<JSGlobalPropertyCell> cell =
+ isolate()->factory()->NewJSGlobalPropertyCell(object);
+ movq(result, cell, RelocInfo::GLOBAL_PROPERTY_CELL);
+ movq(result, Operand(result, 0));
+ } else {
+ Move(result, object);
+ }
+}
+
+
+void MacroAssembler::PushHeapObject(Handle<HeapObject> object) {
+ if (isolate()->heap()->InNewSpace(*object)) {
+ Handle<JSGlobalPropertyCell> cell =
+ isolate()->factory()->NewJSGlobalPropertyCell(object);
+ movq(kScratchRegister, cell, RelocInfo::GLOBAL_PROPERTY_CELL);
+ movq(kScratchRegister, Operand(kScratchRegister, 0));
+ push(kScratchRegister);
+ } else {
+ Push(object);
+ }
+}
+
+
+void MacroAssembler::LoadGlobalCell(Register dst,
+ Handle<JSGlobalPropertyCell> cell) {
+ if (dst.is(rax)) {
+ load_rax(cell.location(), RelocInfo::GLOBAL_PROPERTY_CELL);
+ } else {
+ movq(dst, cell, RelocInfo::GLOBAL_PROPERTY_CELL);
+ movq(dst, Operand(dst, 0));
+ }
+}
+
+
void MacroAssembler::Push(Smi* source) {
intptr_t smi = reinterpret_cast<intptr_t>(source);
if (is_int32(smi)) {
@@ -2702,15 +2739,48 @@
}
+void MacroAssembler::CompareMap(Register obj,
+ Handle<Map> map,
+ Label* early_success,
+ CompareMapMode mode) {
+ Cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
+ if (mode == ALLOW_ELEMENT_TRANSITION_MAPS) {
+ Map* transitioned_fast_element_map(
+ map->LookupElementsTransitionMap(FAST_ELEMENTS, NULL));
+ ASSERT(transitioned_fast_element_map == NULL ||
+ map->elements_kind() != FAST_ELEMENTS);
+ if (transitioned_fast_element_map != NULL) {
+ j(equal, early_success, Label::kNear);
+ Cmp(FieldOperand(obj, HeapObject::kMapOffset),
+ Handle<Map>(transitioned_fast_element_map));
+ }
+
+ Map* transitioned_double_map(
+ map->LookupElementsTransitionMap(FAST_DOUBLE_ELEMENTS, NULL));
+ ASSERT(transitioned_double_map == NULL ||
+ map->elements_kind() == FAST_SMI_ONLY_ELEMENTS);
+ if (transitioned_double_map != NULL) {
+ j(equal, early_success, Label::kNear);
+ Cmp(FieldOperand(obj, HeapObject::kMapOffset),
+ Handle<Map>(transitioned_double_map));
+ }
+ }
+}
+
+
void MacroAssembler::CheckMap(Register obj,
Handle<Map> map,
Label* fail,
- SmiCheckType smi_check_type) {
+ SmiCheckType smi_check_type,
+ CompareMapMode mode) {
if (smi_check_type == DO_SMI_CHECK) {
JumpIfSmi(obj, fail);
}
- Cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
+
+ Label success;
+ CompareMap(obj, map, &success, mode);
j(not_equal, fail);
+ bind(&success);
}
@@ -2960,26 +3030,30 @@
ASSERT(flag == JUMP_FUNCTION || has_frame());
Label done;
+ bool definitely_mismatches = false;
InvokePrologue(expected,
actual,
Handle<Code>::null(),
code,
&done,
+ &definitely_mismatches,
flag,
Label::kNear,
call_wrapper,
call_kind);
- if (flag == CALL_FUNCTION) {
- call_wrapper.BeforeCall(CallSize(code));
- SetCallKind(rcx, call_kind);
- call(code);
- call_wrapper.AfterCall();
- } else {
- ASSERT(flag == JUMP_FUNCTION);
- SetCallKind(rcx, call_kind);
- jmp(code);
+ if (!definitely_mismatches) {
+ if (flag == CALL_FUNCTION) {
+ call_wrapper.BeforeCall(CallSize(code));
+ SetCallKind(rcx, call_kind);
+ call(code);
+ call_wrapper.AfterCall();
+ } else {
+ ASSERT(flag == JUMP_FUNCTION);
+ SetCallKind(rcx, call_kind);
+ jmp(code);
+ }
+ bind(&done);
}
- bind(&done);
}
@@ -2994,27 +3068,31 @@
ASSERT(flag == JUMP_FUNCTION || has_frame());
Label done;
+ bool definitely_mismatches = false;
Register dummy = rax;
InvokePrologue(expected,
actual,
code,
dummy,
&done,
+ &definitely_mismatches,
flag,
Label::kNear,
call_wrapper,
call_kind);
- if (flag == CALL_FUNCTION) {
- call_wrapper.BeforeCall(CallSize(code));
- SetCallKind(rcx, call_kind);
- Call(code, rmode);
- call_wrapper.AfterCall();
- } else {
- ASSERT(flag == JUMP_FUNCTION);
- SetCallKind(rcx, call_kind);
- Jump(code, rmode);
+ if (!definitely_mismatches) {
+ if (flag == CALL_FUNCTION) {
+ call_wrapper.BeforeCall(CallSize(code));
+ SetCallKind(rcx, call_kind);
+ Call(code, rmode);
+ call_wrapper.AfterCall();
+ } else {
+ ASSERT(flag == JUMP_FUNCTION);
+ SetCallKind(rcx, call_kind);
+ Jump(code, rmode);
+ }
+ bind(&done);
}
- bind(&done);
}
@@ -3049,7 +3127,7 @@
ASSERT(flag == JUMP_FUNCTION || has_frame());
// Get the function and setup the context.
- Move(rdi, function);
+ LoadHeapObject(rdi, function);
movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
// We call indirectly through the code field in the function to
@@ -3066,11 +3144,13 @@
Handle<Code> code_constant,
Register code_register,
Label* done,
+ bool* definitely_mismatches,
InvokeFlag flag,
Label::Distance near_jump,
const CallWrapper& call_wrapper,
CallKind call_kind) {
bool definitely_matches = false;
+ *definitely_mismatches = false;
Label invoke;
if (expected.is_immediate()) {
ASSERT(actual.is_immediate());
@@ -3086,6 +3166,7 @@
// arguments.
definitely_matches = true;
} else {
+ *definitely_mismatches = true;
Set(rbx, expected.immediate());
}
}
@@ -3122,7 +3203,9 @@
SetCallKind(rcx, call_kind);
Call(adaptor, RelocInfo::CODE_TARGET);
call_wrapper.AfterCall();
- jmp(done, near_jump);
+ if (!*definitely_mismatches) {
+ jmp(done, near_jump);
+ }
} else {
SetCallKind(rcx, call_kind);
Jump(adaptor, RelocInfo::CODE_TARGET);
@@ -3161,7 +3244,7 @@
void MacroAssembler::EnterExitFramePrologue(bool save_rax) {
- // Setup the frame structure on the stack.
+ // Set up the frame structure on the stack.
// All constants are relative to the frame pointer of the exit frame.
ASSERT(ExitFrameConstants::kCallerSPDisplacement == +2 * kPointerSize);
ASSERT(ExitFrameConstants::kCallerPCOffset == +1 * kPointerSize);
@@ -3221,7 +3304,7 @@
void MacroAssembler::EnterExitFrame(int arg_stack_space, bool save_doubles) {
EnterExitFramePrologue(true);
- // Setup argv in callee-saved register r15. It is reused in LeaveExitFrame,
+ // Set up argv in callee-saved register r15. It is reused in LeaveExitFrame,
// so it must be retained across the C-call.
int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
lea(r15, Operand(rbp, r14, times_pointer_size, offset));
@@ -3349,6 +3432,42 @@
}
+void MacroAssembler::GetNumberHash(Register r0, Register scratch) {
+ // First of all we assign the hash seed to scratch.
+ LoadRoot(scratch, Heap::kHashSeedRootIndex);
+ SmiToInteger32(scratch, scratch);
+
+ // Xor original key with a seed.
+ xorl(r0, scratch);
+
+ // Compute the hash code from the untagged key. This must be kept in sync
+ // with ComputeIntegerHash in utils.h.
+ //
+ // hash = ~hash + (hash << 15);
+ movl(scratch, r0);
+ notl(r0);
+ shll(scratch, Immediate(15));
+ addl(r0, scratch);
+ // hash = hash ^ (hash >> 12);
+ movl(scratch, r0);
+ shrl(scratch, Immediate(12));
+ xorl(r0, scratch);
+ // hash = hash + (hash << 2);
+ leal(r0, Operand(r0, r0, times_4, 0));
+ // hash = hash ^ (hash >> 4);
+ movl(scratch, r0);
+ shrl(scratch, Immediate(4));
+ xorl(r0, scratch);
+ // hash = hash * 2057;
+ imull(r0, r0, Immediate(2057));
+ // hash = hash ^ (hash >> 16);
+ movl(scratch, r0);
+ shrl(scratch, Immediate(16));
+ xorl(r0, scratch);
+}
+
+
+
void MacroAssembler::LoadFromNumberDictionary(Label* miss,
Register elements,
Register key,
@@ -3379,34 +3498,11 @@
Label done;
- // Compute the hash code from the untagged key. This must be kept in sync
- // with ComputeIntegerHash in utils.h.
- //
- // hash = ~hash + (hash << 15);
- movl(r1, r0);
- notl(r0);
- shll(r1, Immediate(15));
- addl(r0, r1);
- // hash = hash ^ (hash >> 12);
- movl(r1, r0);
- shrl(r1, Immediate(12));
- xorl(r0, r1);
- // hash = hash + (hash << 2);
- leal(r0, Operand(r0, r0, times_4, 0));
- // hash = hash ^ (hash >> 4);
- movl(r1, r0);
- shrl(r1, Immediate(4));
- xorl(r0, r1);
- // hash = hash * 2057;
- imull(r0, r0, Immediate(2057));
- // hash = hash ^ (hash >> 16);
- movl(r1, r0);
- shrl(r1, Immediate(16));
- xorl(r0, r1);
+ GetNumberHash(r0, r1);
// Compute capacity mask.
- SmiToInteger32(r1,
- FieldOperand(elements, NumberDictionary::kCapacityOffset));
+ SmiToInteger32(r1, FieldOperand(elements,
+ SeededNumberDictionary::kCapacityOffset));
decl(r1);
// Generate an unrolled loop that performs a few probes before giving up.
@@ -3416,19 +3512,19 @@
movq(r2, r0);
// Compute the masked index: (hash + i + i * i) & mask.
if (i > 0) {
- addl(r2, Immediate(NumberDictionary::GetProbeOffset(i)));
+ addl(r2, Immediate(SeededNumberDictionary::GetProbeOffset(i)));
}
and_(r2, r1);
// Scale the index by multiplying by the entry size.
- ASSERT(NumberDictionary::kEntrySize == 3);
+ ASSERT(SeededNumberDictionary::kEntrySize == 3);
lea(r2, Operand(r2, r2, times_2, 0)); // r2 = r2 * 3
// Check if the key matches.
cmpq(key, FieldOperand(elements,
r2,
times_pointer_size,
- NumberDictionary::kElementsStartOffset));
+ SeededNumberDictionary::kElementsStartOffset));
if (i != (kProbes - 1)) {
j(equal, &done);
} else {
@@ -3439,7 +3535,7 @@
bind(&done);
// Check that the value is a normal propety.
const int kDetailsOffset =
- NumberDictionary::kElementsStartOffset + 2 * kPointerSize;
+ SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
ASSERT_EQ(NORMAL, 0);
Test(FieldOperand(elements, r2, times_pointer_size, kDetailsOffset),
Smi::FromInt(PropertyDetails::TypeField::kMask));
@@ -3447,7 +3543,7 @@
// Get the value at the masked, scaled index.
const int kValueOffset =
- NumberDictionary::kElementsStartOffset + kPointerSize;
+ SeededNumberDictionary::kElementsStartOffset + kPointerSize;
movq(result, FieldOperand(elements, r2, times_pointer_size, kValueOffset));
}
@@ -3742,7 +3838,7 @@
subq(scratch1, Immediate(kHeaderAlignment));
}
- // Allocate ascii string in new space.
+ // Allocate ASCII string in new space.
AllocateInNewSpace(SeqAsciiString::kHeaderSize,
times_1,
scratch1,
diff --git a/src/x64/macro-assembler-x64.h b/src/x64/macro-assembler-x64.h
index cf03e59..aad76bc 100644
--- a/src/x64/macro-assembler-x64.h
+++ b/src/x64/macro-assembler-x64.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -203,7 +203,7 @@
Label* on_black,
Label::Distance on_black_distance = Label::kFar);
- // Detects conservatively whether an object is data-only, ie it does need to
+ // Detects conservatively whether an object is data-only, i.e. it does need to
// be scanned by the garbage collector.
void JumpIfDataObject(Register value,
Register scratch,
@@ -328,7 +328,7 @@
// ---------------------------------------------------------------------------
// JavaScript invokes
- // Setup call kind marking in rcx. The method takes rcx as an
+ // Set up call kind marking in rcx. The method takes rcx as an
// explicit first parameter to make the code more readable at the
// call sites.
void SetCallKind(Register dst, CallKind kind);
@@ -745,7 +745,7 @@
Label* on_not_both_flat_ascii,
Label::Distance near_jump = Label::kFar);
- // Check whether the instance type represents a flat ascii string. Jump to the
+ // Check whether the instance type represents a flat ASCII string. Jump to the
// label if not. If the instance type can be scratched specify same register
// for both instance type and scratch.
void JumpIfInstanceTypeIsNotSequentialAscii(
@@ -784,6 +784,22 @@
void Cmp(const Operand& dst, Smi* src);
void Push(Handle<Object> source);
+ // Load a heap object and handle the case of new-space objects by
+ // indirecting via a global cell.
+ void LoadHeapObject(Register result, Handle<HeapObject> object);
+ void PushHeapObject(Handle<HeapObject> object);
+
+ void LoadObject(Register result, Handle<Object> object) {
+ if (object->IsHeapObject()) {
+ LoadHeapObject(result, Handle<HeapObject>::cast(object));
+ } else {
+ Move(result, object);
+ }
+ }
+
+ // Load a global cell into a register.
+ void LoadGlobalCell(Register dst, Handle<JSGlobalPropertyCell> cell);
+
// Emit code to discard a non-negative number of pointer-sized elements
// from the stack, clobbering only the rsp register.
void Drop(int stack_elements);
@@ -873,13 +889,24 @@
XMMRegister xmm_scratch,
Label* fail);
- // Check if the map of an object is equal to a specified map and
- // branch to label if not. Skip the smi check if not required
- // (object is known to be a heap object)
+ // Compare an object's map with the specified map and its transitioned
+ // elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. FLAGS are set with
+ // result of map compare. If multiple map compares are required, the compare
+ // sequences branches to early_success.
+ void CompareMap(Register obj,
+ Handle<Map> map,
+ Label* early_success,
+ CompareMapMode mode = REQUIRE_EXACT_MAP);
+
+ // Check if the map of an object is equal to a specified map and branch to
+ // label if not. Skip the smi check if not required (object is known to be a
+ // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
+ // against maps that are ElementsKind transition maps of the specified map.
void CheckMap(Register obj,
Handle<Map> map,
Label* fail,
- SmiCheckType smi_check_type);
+ SmiCheckType smi_check_type,
+ CompareMapMode mode = REQUIRE_EXACT_MAP);
// Check if the map of an object is equal to a specified map and branch to a
// specified target if equal. Skip the smi check if not required (object is
@@ -959,6 +986,7 @@
Register scratch,
Label* miss);
+ void GetNumberHash(Register r0, Register scratch);
void LoadFromNumberDictionary(Label* miss,
Register elements,
@@ -1281,6 +1309,7 @@
Handle<Code> code_constant,
Register code_register,
Label* done,
+ bool* definitely_mismatches,
InvokeFlag flag,
Label::Distance near_jump = Label::kFar,
const CallWrapper& call_wrapper = NullCallWrapper(),
diff --git a/src/x64/regexp-macro-assembler-x64.cc b/src/x64/regexp-macro-assembler-x64.cc
index 1e0cd6a..16730d2 100644
--- a/src/x64/regexp-macro-assembler-x64.cc
+++ b/src/x64/regexp-macro-assembler-x64.cc
@@ -226,7 +226,7 @@
bool check_end_of_string) {
#ifdef DEBUG
// If input is ASCII, don't even bother calling here if the string to
- // match contains a non-ascii character.
+ // match contains a non-ASCII character.
if (mode_ == ASCII) {
ASSERT(String::IsAscii(str.start(), str.length()));
}
diff --git a/src/x64/stub-cache-x64.cc b/src/x64/stub-cache-x64.cc
index 58994f2..a6e1b83 100644
--- a/src/x64/stub-cache-x64.cc
+++ b/src/x64/stub-cache-x64.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -421,7 +421,7 @@
// -----------------------------------
// Get the function and setup the context.
Handle<JSFunction> function = optimization.constant_function();
- __ Move(rdi, function);
+ __ LoadHeapObject(rdi, function);
__ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
// Pass the additional arguments.
@@ -691,13 +691,9 @@
Register name_reg,
Register scratch,
Label* miss_label) {
- // Check that the object isn't a smi.
- __ JumpIfSmi(receiver_reg, miss_label);
-
// Check that the map of the object hasn't changed.
- __ Cmp(FieldOperand(receiver_reg, HeapObject::kMapOffset),
- Handle<Map>(object->map()));
- __ j(not_equal, miss_label);
+ __ CheckMap(receiver_reg, Handle<Map>(object->map()),
+ miss_label, DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
// Perform global security token check if needed.
if (object->IsJSGlobalProxy()) {
@@ -864,12 +860,10 @@
if (in_new_space) {
// Save the map in scratch1 for later.
__ movq(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
- __ Cmp(scratch1, current_map);
- } else {
- __ Cmp(FieldOperand(reg, HeapObject::kMapOffset), current_map);
}
- // Branch on the result of the map check.
- __ j(not_equal, miss);
+ __ CheckMap(reg, Handle<Map>(current_map),
+ miss, DONT_DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
+
// Check access rights to the global object. This has to happen after
// the map check so that we know that the object is actually a global
// object.
@@ -901,8 +895,8 @@
LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
// Check the holder map.
- __ Cmp(FieldOperand(reg, HeapObject::kMapOffset), Handle<Map>(holder->map()));
- __ j(not_equal, miss);
+ __ CheckMap(reg, Handle<Map>(holder->map()),
+ miss, DONT_DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
// Perform security check for access to the global object.
ASSERT(current->IsJSGlobalProxy() || !current->IsAccessCheckNeeded());
@@ -988,7 +982,7 @@
__ movq(name_arg, rsp);
__ push(scratch2); // Restore return address.
- // 3 elements array for v8::Agruments::values_ and handler for name.
+ // 3 elements array for v8::Arguments::values_ and handler for name.
const int kStackSpace = 4;
// Allocate v8::AccessorInfo in non-GCed stack space.
@@ -1015,7 +1009,7 @@
Register scratch1,
Register scratch2,
Register scratch3,
- Handle<Object> value,
+ Handle<JSFunction> value,
Handle<String> name,
Label* miss) {
// Check that the receiver isn't a smi.
@@ -1026,7 +1020,7 @@
object, receiver, holder, scratch1, scratch2, scratch3, name, miss);
// Return the constant value.
- __ Move(rax, value);
+ __ LoadHeapObject(rax, value);
__ ret(0);
}
@@ -1051,7 +1045,7 @@
// and CALLBACKS, so inline only them, other cases may be added
// later.
bool compile_followup_inline = false;
- if (lookup->IsProperty() && lookup->IsCacheable()) {
+ if (lookup->IsFound() && lookup->IsCacheable()) {
if (lookup->type() == FIELD) {
compile_followup_inline = true;
} else if (lookup->type() == CALLBACKS &&
@@ -2187,7 +2181,7 @@
__ movq(Operand(rsp, (argc + 1) * kPointerSize), rdx);
}
- // Setup the context (function already in rdi).
+ // Set up the context (function already in rdi).
__ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
// Jump to the cached code (tail call).
@@ -2251,13 +2245,9 @@
// -----------------------------------
Label miss;
- // Check that the object isn't a smi.
- __ JumpIfSmi(rdx, &miss);
-
// Check that the map of the object hasn't changed.
- __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset),
- Handle<Map>(object->map()));
- __ j(not_equal, &miss);
+ __ CheckMap(rdx, Handle<Map>(object->map()), &miss,
+ DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
// Perform global security token check if needed.
if (object->IsJSGlobalProxy()) {
@@ -2301,13 +2291,9 @@
// -----------------------------------
Label miss;
- // Check that the object isn't a smi.
- __ JumpIfSmi(rdx, &miss);
-
// Check that the map of the object hasn't changed.
- __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset),
- Handle<Map>(receiver->map()));
- __ j(not_equal, &miss);
+ __ CheckMap(rdx, Handle<Map>(receiver->map()), &miss,
+ DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
// Perform global security token check if needed.
if (receiver->IsJSGlobalProxy()) {
@@ -2564,7 +2550,7 @@
Handle<Code> LoadStubCompiler::CompileLoadConstant(Handle<JSObject> object,
Handle<JSObject> holder,
- Handle<Object> value,
+ Handle<JSFunction> value,
Handle<String> name) {
// ----------- S t a t e -------------
// -- rax : receiver
@@ -2718,7 +2704,7 @@
Handle<String> name,
Handle<JSObject> receiver,
Handle<JSObject> holder,
- Handle<Object> value) {
+ Handle<JSFunction> value) {
// ----------- S t a t e -------------
// -- rax : key
// -- rdx : receiver