Version 3.22.1
Sped up creating typed arrays from array-like objects. (Chromium issue 270507)
Performance and stability improvements on all platforms.
git-svn-id: http://v8.googlecode.com/svn/trunk@16931 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/src/api.cc b/src/api.cc
index 6d53d91..6eb3789 100644
--- a/src/api.cc
+++ b/src/api.cc
@@ -3055,6 +3055,19 @@
}
+bool Value::SameValue(Handle<Value> that) const {
+ i::Isolate* isolate = i::Isolate::Current();
+ if (EmptyCheck("v8::Value::SameValue()", this) ||
+ EmptyCheck("v8::Value::SameValue()", that)) {
+ return false;
+ }
+ LOG_API(isolate, "SameValue");
+ i::Handle<i::Object> obj = Utils::OpenHandle(this);
+ i::Handle<i::Object> other = Utils::OpenHandle(*that);
+ return obj->SameValue(*other);
+}
+
+
uint32_t Value::Uint32Value() const {
i::Handle<i::Object> obj = Utils::OpenHandle(this);
if (obj->IsSmi()) {
@@ -7427,9 +7440,11 @@
v->VisitPointers(blocks()->last(), handle_scope_data_.next);
}
- if (!saved_contexts_.is_empty()) {
- Object** start = reinterpret_cast<Object**>(&saved_contexts_.first());
- v->VisitPointers(start, start + saved_contexts_.length());
+ List<Context*>* context_lists[2] = { &saved_contexts_, &entered_contexts_};
+ for (unsigned i = 0; i < ARRAY_SIZE(context_lists); i++) {
+ if (context_lists[i]->is_empty()) continue;
+ Object** start = reinterpret_cast<Object**>(&context_lists[i]->first());
+ v->VisitPointers(start, start + context_lists[i]->length());
}
}
diff --git a/src/api.h b/src/api.h
index 7dfa36d..8fb17a0 100644
--- a/src/api.h
+++ b/src/api.h
@@ -542,12 +542,12 @@
inline void DecrementCallDepth() {call_depth_--;}
inline bool CallDepthIsZero() { return call_depth_ == 0; }
- inline void EnterContext(Handle<Object> context);
- inline bool LeaveContext(Handle<Object> context);
+ inline void EnterContext(Handle<Context> context);
+ inline bool LeaveContext(Handle<Context> context);
// Returns the last entered context or an empty handle if no
// contexts have been entered.
- inline Handle<Object> LastEnteredContext();
+ inline Handle<Context> LastEnteredContext();
inline void SaveContext(Context* context);
inline Context* RestoreContext();
@@ -592,7 +592,7 @@
Isolate* isolate_;
List<internal::Object**> blocks_;
// Used as a stack to keep track of entered contexts.
- List<Handle<Object> > entered_contexts_;
+ List<Context*> entered_contexts_;
// Used as a stack to keep track of saved contexts.
List<Context*> saved_contexts_;
Object** spare_;
@@ -630,23 +630,23 @@
}
-void HandleScopeImplementer::EnterContext(Handle<Object> context) {
- entered_contexts_.Add(context);
+void HandleScopeImplementer::EnterContext(Handle<Context> context) {
+ entered_contexts_.Add(*context);
}
-bool HandleScopeImplementer::LeaveContext(Handle<Object> context) {
+bool HandleScopeImplementer::LeaveContext(Handle<Context> context) {
if (entered_contexts_.is_empty()) return false;
// TODO(dcarney): figure out what's wrong here
- // if (*entered_contexts_.last() != *context) return false;
+ // if (entered_contexts_.last() != *context) return false;
entered_contexts_.RemoveLast();
return true;
}
-Handle<Object> HandleScopeImplementer::LastEnteredContext() {
- if (entered_contexts_.is_empty()) return Handle<Object>::null();
- return entered_contexts_.last();
+Handle<Context> HandleScopeImplementer::LastEnteredContext() {
+ if (entered_contexts_.is_empty()) return Handle<Context>::null();
+ return Handle<Context>(entered_contexts_.last());
}
diff --git a/src/arm/assembler-arm.h b/src/arm/assembler-arm.h
index 1399021..d7b8f02 100644
--- a/src/arm/assembler-arm.h
+++ b/src/arm/assembler-arm.h
@@ -118,7 +118,8 @@
// Core register
struct Register {
static const int kNumRegisters = 16;
- static const int kMaxNumAllocatableRegisters = 8;
+ static const int kMaxNumAllocatableRegisters =
+ FLAG_enable_ool_constant_pool ? 7 : 8;
static const int kSizeInBytes = 4;
inline static int NumAllocatableRegisters();
@@ -201,6 +202,7 @@
const Register r4 = { kRegister_r4_Code };
const Register r5 = { kRegister_r5_Code };
const Register r6 = { kRegister_r6_Code };
+// Used as constant pool pointer register if FLAGS_enable_ool_constant_pool.
const Register r7 = { kRegister_r7_Code };
// Used as context register.
const Register r8 = { kRegister_r8_Code };
diff --git a/src/arm/builtins-arm.cc b/src/arm/builtins-arm.cc
index ec1b227..530c1d2 100644
--- a/src/arm/builtins-arm.cc
+++ b/src/arm/builtins-arm.cc
@@ -445,9 +445,8 @@
// r3: object size (in words)
// r4: JSObject (not tagged)
// r5: First in-object property of JSObject (not tagged)
- __ add(r6, r4, Operand(r3, LSL, kPointerSizeLog2)); // End of object.
ASSERT_EQ(3 * kPointerSize, JSObject::kHeaderSize);
- __ LoadRoot(r7, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(r6, Heap::kUndefinedValueRootIndex);
if (count_constructions) {
__ ldr(r0, FieldMemOperand(r2, Map::kInstanceSizesOffset));
__ Ubfx(r0, r0, Map::kPreAllocatedPropertyFieldsByte * kBitsPerByte,
@@ -455,14 +454,16 @@
__ add(r0, r5, Operand(r0, LSL, kPointerSizeLog2));
// r0: offset of first field after pre-allocated fields
if (FLAG_debug_code) {
- __ cmp(r0, r6);
+ __ add(ip, r4, Operand(r3, LSL, kPointerSizeLog2)); // End of object.
+ __ cmp(r0, ip);
__ Assert(le, kUnexpectedNumberOfPreAllocatedPropertyFields);
}
- __ InitializeFieldsWithFiller(r5, r0, r7);
+ __ InitializeFieldsWithFiller(r5, r0, r6);
// To allow for truncation.
- __ LoadRoot(r7, Heap::kOnePointerFillerMapRootIndex);
+ __ LoadRoot(r6, Heap::kOnePointerFillerMapRootIndex);
}
- __ InitializeFieldsWithFiller(r5, r6, r7);
+ __ add(r0, r4, Operand(r3, LSL, kPointerSizeLog2)); // End of object.
+ __ InitializeFieldsWithFiller(r5, r0, r6);
// Add the object tag to make the JSObject real, so that we can continue
// and jump into the continuation code at any time from now on. Any
@@ -527,16 +528,10 @@
__ add(r6, r2, Operand(r3, LSL, kPointerSizeLog2)); // End of object.
ASSERT_EQ(2 * kPointerSize, FixedArray::kHeaderSize);
{ Label loop, entry;
- if (count_constructions) {
- __ LoadRoot(r7, Heap::kUndefinedValueRootIndex);
- } else if (FLAG_debug_code) {
- __ LoadRoot(r8, Heap::kUndefinedValueRootIndex);
- __ cmp(r7, r8);
- __ Assert(eq, kUndefinedValueNotLoaded);
- }
+ __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
__ b(&entry);
__ bind(&loop);
- __ str(r7, MemOperand(r2, kPointerSize, PostIndex));
+ __ str(r0, MemOperand(r2, kPointerSize, PostIndex));
__ bind(&entry);
__ cmp(r2, r6);
__ b(lt, &loop);
@@ -700,7 +695,7 @@
// r2: receiver
// r3: argc
// r4: argv
- // r5-r7, cp may be clobbered
+ // r5-r6, r7 (if not FLAG_enable_ool_constant_pool) and cp may be clobbered
ProfileEntryHookStub::MaybeCallEntryHook(masm);
// Clear the context before we push it when entering the internal frame.
@@ -740,7 +735,9 @@
__ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
__ mov(r5, Operand(r4));
__ mov(r6, Operand(r4));
- __ mov(r7, Operand(r4));
+ if (!FLAG_enable_ool_constant_pool) {
+ __ mov(r7, Operand(r4));
+ }
if (kR9Available == 1) {
__ mov(r9, Operand(r4));
}
@@ -894,21 +891,6 @@
}
-void Builtins::Generate_NotifyOSR(MacroAssembler* masm) {
- // For now, we are relying on the fact that Runtime::NotifyOSR
- // doesn't do any garbage collection which allows us to save/restore
- // the registers without worrying about which of them contain
- // pointers. This seems a bit fragile.
- __ stm(db_w, sp, kJSCallerSaved | kCalleeSaved | lr.bit() | fp.bit());
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kNotifyOSR, 0);
- }
- __ ldm(ia_w, sp, kJSCallerSaved | kCalleeSaved | lr.bit() | fp.bit());
- __ Ret();
-}
-
-
void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
// Lookup the function in the JavaScript frame.
__ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
diff --git a/src/arm/code-stubs-arm.cc b/src/arm/code-stubs-arm.cc
index 0335607..2c7fb78 100644
--- a/src/arm/code-stubs-arm.cc
+++ b/src/arm/code-stubs-arm.cc
@@ -59,6 +59,16 @@
}
+void NumberToStringStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { r0 };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = NULL;
+}
+
+
void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
@@ -825,8 +835,7 @@
// Convert lhs to a double in d7.
__ SmiToDouble(d7, lhs);
// Load the double from rhs, tagged HeapNumber r0, to d6.
- __ sub(r7, rhs, Operand(kHeapObjectTag));
- __ vldr(d6, r7, HeapNumber::kValueOffset);
+ __ vldr(d6, rhs, HeapNumber::kValueOffset - kHeapObjectTag);
// We now have both loaded as doubles but we can skip the lhs nan check
// since it's a smi.
@@ -851,8 +860,7 @@
// Rhs is a smi, lhs is a heap number.
// Load the double from lhs, tagged HeapNumber r1, to d7.
- __ sub(r7, lhs, Operand(kHeapObjectTag));
- __ vldr(d7, r7, HeapNumber::kValueOffset);
+ __ vldr(d7, lhs, HeapNumber::kValueOffset - kHeapObjectTag);
// Convert rhs to a double in d6 .
__ SmiToDouble(d6, rhs);
// Fall through to both_loaded_as_doubles.
@@ -920,10 +928,8 @@
// Both are heap numbers. Load them up then jump to the code we have
// for that.
- __ sub(r7, rhs, Operand(kHeapObjectTag));
- __ vldr(d6, r7, HeapNumber::kValueOffset);
- __ sub(r7, lhs, Operand(kHeapObjectTag));
- __ vldr(d7, r7, HeapNumber::kValueOffset);
+ __ vldr(d6, rhs, HeapNumber::kValueOffset - kHeapObjectTag);
+ __ vldr(d7, lhs, HeapNumber::kValueOffset - kHeapObjectTag);
__ jmp(both_loaded_as_doubles);
}
@@ -972,22 +978,6 @@
}
-void NumberToStringStub::Generate(MacroAssembler* masm) {
- Label runtime;
-
- __ ldr(r1, MemOperand(sp, 0));
-
- // Generate code to lookup number in the number string cache.
- __ LookupNumberStringCache(r1, r0, r2, r3, r4, &runtime);
- __ add(sp, sp, Operand(1 * kPointerSize));
- __ Ret();
-
- __ bind(&runtime);
- // Handle number to string in the runtime system if not found in the cache.
- __ TailCallRuntime(Runtime::kNumberToStringSkipCache, 1, 1);
-}
-
-
static void ICCompareStub_CheckInputType(MacroAssembler* masm,
Register input,
Register scratch,
@@ -1267,13 +1257,14 @@
void BinaryOpStub_GenerateSmiSmiOperation(MacroAssembler* masm,
- Token::Value op) {
+ Token::Value op,
+ Register scratch1,
+ Register scratch2) {
Register left = r1;
Register right = r0;
- Register scratch1 = r7;
- Register scratch2 = r9;
ASSERT(right.is(r0));
+ ASSERT(!AreAliased(left, right, scratch1, scratch2, ip));
STATIC_ASSERT(kSmiTag == 0);
Label not_smi_result;
@@ -1488,11 +1479,15 @@
Label* gc_required,
Label* miss,
Token::Value op,
- OverwriteMode mode) {
+ OverwriteMode mode,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4) {
Register left = r1;
Register right = r0;
- Register scratch1 = r6;
- Register scratch2 = r7;
+ Register result = scratch3;
+ ASSERT(!AreAliased(left, right, scratch1, scratch2, scratch3, scratch4));
ASSERT(smi_operands || (not_numbers != NULL));
if (smi_operands) {
@@ -1506,7 +1501,7 @@
__ JumpIfNotSmi(right, miss);
}
- Register heap_number_map = r9;
+ Register heap_number_map = scratch4;
__ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
switch (op) {
@@ -1516,7 +1511,6 @@
case Token::DIV:
case Token::MOD: {
// Allocate new heap number for result.
- Register result = r5;
BinaryOpStub_GenerateHeapResultAllocation(
masm, result, heap_number_map, scratch1, scratch2, gc_required, mode);
@@ -1635,7 +1629,6 @@
// Allocate new heap number for result.
__ bind(&result_not_a_smi);
- Register result = r5;
if (smi_operands) {
__ AllocateHeapNumber(
result, scratch1, scratch2, heap_number_map, gc_required);
@@ -1646,11 +1639,11 @@
}
// r2: Answer as signed int32.
- // r5: Heap number to write answer into.
+ // result: Heap number to write answer into.
// Nothing can go wrong now, so move the heap number to r0, which is the
// result.
- __ mov(r0, Operand(r5));
+ __ mov(r0, Operand(result));
// Convert the int32 in r2 to the heap number in r0. r3 is corrupted. As
// mentioned above SHR needs to always produce a positive result.
@@ -1681,26 +1674,31 @@
Label* gc_required,
Token::Value op,
BinaryOpStub::SmiCodeGenerateHeapNumberResults allow_heapnumber_results,
- OverwriteMode mode) {
+ OverwriteMode mode,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4) {
Label not_smis;
Register left = r1;
Register right = r0;
- Register scratch1 = r7;
+ ASSERT(!AreAliased(left, right, scratch1, scratch2, scratch3, scratch4));
// Perform combined smi check on both operands.
__ orr(scratch1, left, Operand(right));
__ JumpIfNotSmi(scratch1, ¬_smis);
// If the smi-smi operation results in a smi return is generated.
- BinaryOpStub_GenerateSmiSmiOperation(masm, op);
+ BinaryOpStub_GenerateSmiSmiOperation(masm, op, scratch1, scratch2);
// If heap number results are possible generate the result in an allocated
// heap number.
if (allow_heapnumber_results == BinaryOpStub::ALLOW_HEAPNUMBER_RESULTS) {
BinaryOpStub_GenerateFPOperation(
masm, BinaryOpIC::UNINITIALIZED, BinaryOpIC::UNINITIALIZED, true,
- use_runtime, gc_required, ¬_smis, op, mode);
+ use_runtime, gc_required, ¬_smis, op, mode, scratch2, scratch3,
+ scratch1, scratch4);
}
__ bind(¬_smis);
}
@@ -1719,14 +1717,13 @@
if (result_type_ == BinaryOpIC::UNINITIALIZED ||
result_type_ == BinaryOpIC::SMI) {
// Only allow smi results.
- BinaryOpStub_GenerateSmiCode(
- masm, &call_runtime, NULL, op_, NO_HEAPNUMBER_RESULTS, mode_);
+ BinaryOpStub_GenerateSmiCode(masm, &call_runtime, NULL, op_,
+ NO_HEAPNUMBER_RESULTS, mode_, r5, r6, r4, r9);
} else {
// Allow heap number result and don't make a transition if a heap number
// cannot be allocated.
- BinaryOpStub_GenerateSmiCode(
- masm, &call_runtime, &call_runtime, op_, ALLOW_HEAPNUMBER_RESULTS,
- mode_);
+ BinaryOpStub_GenerateSmiCode(masm, &call_runtime, &call_runtime, op_,
+ ALLOW_HEAPNUMBER_RESULTS, mode_, r5, r6, r4, r9);
}
// Code falls through if the result is not returned as either a smi or heap
@@ -1780,8 +1777,9 @@
Register left = r1;
Register right = r0;
- Register scratch1 = r7;
+ Register scratch1 = r4;
Register scratch2 = r9;
+ Register scratch3 = r5;
LowDwVfpRegister double_scratch = d0;
Register heap_number_result = no_reg;
@@ -1798,7 +1796,7 @@
Label skip;
__ orr(scratch1, left, right);
__ JumpIfNotSmi(scratch1, &skip);
- BinaryOpStub_GenerateSmiSmiOperation(masm, op_);
+ BinaryOpStub_GenerateSmiSmiOperation(masm, op_, scratch2, scratch3);
// Fall through if the result is not a smi.
__ bind(&skip);
@@ -1892,12 +1890,6 @@
__ b(ne, &transition);
}
- // We preserved r0 and r1 to be able to call runtime.
- // Save the left value on the stack.
- __ Push(r5, r4);
-
- Label pop_and_call_runtime;
-
// Allocate a heap number to store the result.
heap_number_result = r5;
BinaryOpStub_GenerateHeapResultAllocation(masm,
@@ -1905,20 +1897,15 @@
heap_number_map,
scratch1,
scratch2,
- &pop_and_call_runtime,
+ &call_runtime,
mode_);
- // Load the left value from the value saved on the stack.
- __ Pop(r1, r0);
-
// Call the C function to handle the double operation.
CallCCodeForDoubleOperation(masm, op_, heap_number_result, scratch1);
if (FLAG_debug_code) {
__ stop("Unreachable code.");
}
- __ bind(&pop_and_call_runtime);
- __ Drop(2);
__ b(&call_runtime);
}
@@ -2069,7 +2056,7 @@
Label call_runtime, transition;
BinaryOpStub_GenerateFPOperation(
masm, left_type_, right_type_, false,
- &transition, &call_runtime, &transition, op_, mode_);
+ &transition, &call_runtime, &transition, op_, mode_, r6, r4, r5, r9);
__ bind(&transition);
GenerateTypeTransition(masm);
@@ -2088,11 +2075,13 @@
Label call_runtime, call_string_add_or_runtime, transition;
BinaryOpStub_GenerateSmiCode(
- masm, &call_runtime, &call_runtime, op_, ALLOW_HEAPNUMBER_RESULTS, mode_);
+ masm, &call_runtime, &call_runtime, op_, ALLOW_HEAPNUMBER_RESULTS, mode_,
+ r5, r6, r4, r9);
BinaryOpStub_GenerateFPOperation(
masm, left_type_, right_type_, false,
- &call_string_add_or_runtime, &call_runtime, &transition, op_, mode_);
+ &call_string_add_or_runtime, &call_runtime, &transition, op_, mode_, r6,
+ r4, r5, r9);
__ bind(&transition);
GenerateTypeTransition(masm);
@@ -2194,7 +2183,7 @@
Label calculate;
Label invalid_cache;
const Register scratch0 = r9;
- const Register scratch1 = r7;
+ Register scratch1 = no_reg; // will be r4
const Register cache_entry = r0;
const bool tagged = (argument_type_ == TAGGED);
@@ -2274,6 +2263,9 @@
__ cmp(r2, r4);
__ cmp(r3, r5, eq);
__ b(ne, &calculate);
+
+ scratch1 = r4; // Start of scratch1 range.
+
// Cache hit. Load result, cleanup and return.
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(
@@ -2416,7 +2408,7 @@
const DwVfpRegister double_scratch = d0;
const SwVfpRegister single_scratch = s0;
const Register scratch = r9;
- const Register scratch2 = r7;
+ const Register scratch2 = r4;
Label call_runtime, done, int_exponent;
if (exponent_type_ == ON_STACK) {
@@ -2926,14 +2918,14 @@
// r3: argc
// r4: argv
Isolate* isolate = masm->isolate();
- __ mov(r8, Operand(-1)); // Push a bad frame pointer to fail if it is used.
int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
- __ mov(r7, Operand(Smi::FromInt(marker)));
+ __ mov(r8, Operand(Smi::FromInt(marker)));
__ mov(r6, Operand(Smi::FromInt(marker)));
__ mov(r5,
Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate)));
__ ldr(r5, MemOperand(r5));
- __ Push(r8, r7, r6, r5);
+ __ mov(ip, Operand(-1)); // Push a bad frame pointer to fail if it is used.
+ __ Push(ip, r8, r6, r5);
// Set up frame pointer for the frame to be pushed.
__ add(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
@@ -2979,7 +2971,7 @@
// Invoke: Link this frame into the handler chain. There's only one
// handler block in this code object, so its index is 0.
__ bind(&invoke);
- // Must preserve r0-r4, r5-r7 are available.
+ // Must preserve r0-r4, r5-r6 are available.
__ PushTryHandler(StackHandler::JS_ENTRY, 0);
// If an exception not caught by another handler occurs, this handler
// returns control to the code after the bl(&invoke) above, which
@@ -3586,31 +3578,36 @@
__ ldr(r9, MemOperand(sp, 0 * kPointerSize));
__ add(r9, r9, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
__ sub(r9, r9, Operand(r1));
- __ LoadRoot(r7, Heap::kTheHoleValueRootIndex);
+ __ LoadRoot(r5, Heap::kTheHoleValueRootIndex);
__ add(r3, r4, Operand(r6, LSL, 1));
__ add(r3, r3, Operand(kParameterMapHeaderSize));
// r6 = loop variable (tagged)
// r1 = mapping index (tagged)
// r3 = address of backing store (tagged)
- // r4 = address of parameter map (tagged)
- // r5 = temporary scratch (a.o., for address calculation)
- // r7 = the hole value
+ // r4 = address of parameter map (tagged), which is also the address of new
+ // object + Heap::kArgumentsObjectSize (tagged)
+ // r0 = temporary scratch (a.o., for address calculation)
+ // r5 = the hole value
__ jmp(¶meters_test);
__ bind(¶meters_loop);
__ sub(r6, r6, Operand(Smi::FromInt(1)));
- __ mov(r5, Operand(r6, LSL, 1));
- __ add(r5, r5, Operand(kParameterMapHeaderSize - kHeapObjectTag));
- __ str(r9, MemOperand(r4, r5));
- __ sub(r5, r5, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize));
- __ str(r7, MemOperand(r3, r5));
+ __ mov(r0, Operand(r6, LSL, 1));
+ __ add(r0, r0, Operand(kParameterMapHeaderSize - kHeapObjectTag));
+ __ str(r9, MemOperand(r4, r0));
+ __ sub(r0, r0, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize));
+ __ str(r5, MemOperand(r3, r0));
__ add(r9, r9, Operand(Smi::FromInt(1)));
__ bind(¶meters_test);
__ cmp(r6, Operand(Smi::FromInt(0)));
__ b(ne, ¶meters_loop);
+ // Restore r0 = new object (tagged)
+ __ sub(r0, r4, Operand(Heap::kArgumentsObjectSize));
+
__ bind(&skip_parameter_map);
+ // r0 = address of new object (tagged)
// r2 = argument count (tagged)
// r3 = address of backing store (tagged)
// r5 = scratch
@@ -3641,6 +3638,7 @@
__ Ret();
// Do the runtime call to allocate the arguments object.
+ // r0 = address of new object (tagged)
// r2 = argument count (tagged)
__ bind(&runtime);
__ str(r2, MemOperand(sp, 0 * kPointerSize)); // Patch argument count.
@@ -3769,7 +3767,7 @@
// therefore the content of these registers are safe to use after the call.
Register subject = r4;
Register regexp_data = r5;
- Register last_match_info_elements = r6;
+ Register last_match_info_elements = no_reg; // will be r6;
// Ensure that a RegExp stack is allocated.
Isolate* isolate = masm->isolate();
@@ -3902,19 +3900,19 @@
STATIC_ASSERT(kTwoByteStringTag == 0);
__ and_(r0, r0, Operand(kStringEncodingMask));
__ mov(r3, Operand(r0, ASR, 2), SetCC);
- __ ldr(r7, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset), ne);
- __ ldr(r7, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset), eq);
+ __ ldr(r6, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset), ne);
+ __ ldr(r6, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset), eq);
// (E) Carry on. String handling is done.
- // r7: irregexp code
+ // r6: irregexp code
// Check that the irregexp code has been generated for the actual string
// encoding. If it has, the field contains a code object otherwise it contains
// a smi (code flushing support).
- __ JumpIfSmi(r7, &runtime);
+ __ JumpIfSmi(r6, &runtime);
// r1: previous index
// r3: encoding of subject string (1 if ASCII, 0 if two_byte);
- // r7: code
+ // r6: code
// subject: Subject string
// regexp_data: RegExp data (FixedArray)
// All checks done. Now push arguments for native regexp code.
@@ -3981,12 +3979,14 @@
__ mov(r0, subject);
// Locate the code entry and call it.
- __ add(r7, r7, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ add(r6, r6, Operand(Code::kHeaderSize - kHeapObjectTag));
DirectCEntryStub stub;
- stub.GenerateCall(masm, r7);
+ stub.GenerateCall(masm, r6);
__ LeaveExitFrame(false, no_reg, true);
+ last_match_info_elements = r6;
+
// r0: result
// subject: subject string (callee saved)
// regexp_data: RegExp data (callee saved)
@@ -4075,7 +4075,7 @@
__ RecordWriteField(last_match_info_elements,
RegExpImpl::kLastSubjectOffset,
subject,
- r7,
+ r3,
kLRHasNotBeenSaved,
kDontSaveFPRegs);
__ mov(subject, r2);
@@ -4085,7 +4085,7 @@
__ RecordWriteField(last_match_info_elements,
RegExpImpl::kLastInputOffset,
subject,
- r7,
+ r3,
kLRHasNotBeenSaved,
kDontSaveFPRegs);
@@ -4652,7 +4652,6 @@
Register scratch2,
Register scratch3,
Register scratch4,
- Register scratch5,
int flags) {
bool ascii = (flags & COPY_ASCII) != 0;
bool dest_always_aligned = (flags & DEST_ALWAYS_ALIGNED) != 0;
@@ -4727,30 +4726,29 @@
__ bind(&loop);
__ ldr(scratch3, MemOperand(src, 4, PostIndex));
- __ sub(scratch5, limit, Operand(dest));
__ orr(scratch1, scratch1, Operand(scratch3, LSL, left_shift));
__ str(scratch1, MemOperand(dest, 4, PostIndex));
__ mov(scratch1, Operand(scratch3, LSR, right_shift));
// Loop if four or more bytes left to copy.
- // Compare to eight, because we did the subtract before increasing dst.
- __ sub(scratch5, scratch5, Operand(8), SetCC);
+ __ sub(scratch3, limit, Operand(dest));
+ __ sub(scratch3, scratch3, Operand(4), SetCC);
__ b(ge, &loop);
}
// There is now between zero and three bytes left to copy (negative that
- // number is in scratch5), and between one and three bytes already read into
+ // number is in scratch3), and between one and three bytes already read into
// scratch1 (eight times that number in scratch4). We may have read past
// the end of the string, but because objects are aligned, we have not read
// past the end of the object.
// Find the minimum of remaining characters to move and preloaded characters
// and write those as bytes.
- __ add(scratch5, scratch5, Operand(4), SetCC);
+ __ add(scratch3, scratch3, Operand(4), SetCC);
__ b(eq, &done);
- __ cmp(scratch4, Operand(scratch5, LSL, 3), ne);
+ __ cmp(scratch4, Operand(scratch3, LSL, 3), ne);
// Move minimum of bytes read and bytes left to copy to scratch4.
- __ mov(scratch5, Operand(scratch4, LSR, 3), LeaveCC, lt);
- // Between one and three (value in scratch5) characters already read into
+ __ mov(scratch3, Operand(scratch4, LSR, 3), LeaveCC, lt);
+ // Between one and three (value in scratch3) characters already read into
// scratch ready to write.
- __ cmp(scratch5, Operand(2));
+ __ cmp(scratch3, Operand(2));
__ strb(scratch1, MemOperand(dest, 1, PostIndex));
__ mov(scratch1, Operand(scratch1, LSR, 8), LeaveCC, ge);
__ strb(scratch1, MemOperand(dest, 1, PostIndex), ge);
@@ -5090,10 +5088,10 @@
STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
__ tst(r1, Operand(kStringEncodingMask));
__ b(eq, &two_byte_slice);
- __ AllocateAsciiSlicedString(r0, r2, r6, r7, &runtime);
+ __ AllocateAsciiSlicedString(r0, r2, r6, r4, &runtime);
__ jmp(&set_slice_header);
__ bind(&two_byte_slice);
- __ AllocateTwoByteSlicedString(r0, r2, r6, r7, &runtime);
+ __ AllocateTwoByteSlicedString(r0, r2, r6, r4, &runtime);
__ bind(&set_slice_header);
__ mov(r3, Operand(r3, LSL, 1));
__ str(r5, FieldMemOperand(r0, SlicedString::kParentOffset));
@@ -5134,7 +5132,7 @@
__ b(eq, &two_byte_sequential);
// Allocate and copy the resulting ASCII string.
- __ AllocateAsciiString(r0, r2, r4, r6, r7, &runtime);
+ __ AllocateAsciiString(r0, r2, r4, r6, r1, &runtime);
// Locate first character of substring to copy.
__ add(r5, r5, r3);
@@ -5146,13 +5144,13 @@
// r2: result string length
// r5: first character of substring to copy
STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
- StringHelper::GenerateCopyCharactersLong(masm, r1, r5, r2, r3, r4, r6, r7, r9,
+ StringHelper::GenerateCopyCharactersLong(masm, r1, r5, r2, r3, r4, r6, r9,
COPY_ASCII | DEST_ALWAYS_ALIGNED);
__ jmp(&return_r0);
// Allocate and copy the resulting two-byte string.
__ bind(&two_byte_sequential);
- __ AllocateTwoByteString(r0, r2, r4, r6, r7, &runtime);
+ __ AllocateTwoByteString(r0, r2, r4, r6, r1, &runtime);
// Locate first character of substring to copy.
STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
@@ -5166,7 +5164,7 @@
// r5: first character of substring to copy.
STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
StringHelper::GenerateCopyCharactersLong(
- masm, r1, r5, r2, r3, r4, r6, r7, r9, DEST_ALWAYS_ALIGNED);
+ masm, r1, r5, r2, r3, r4, r6, r9, DEST_ALWAYS_ALIGNED);
__ bind(&return_r0);
Counters* counters = masm->isolate()->counters();
@@ -5432,7 +5430,7 @@
__ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
__ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
}
- __ JumpIfBothInstanceTypesAreNotSequentialAscii(r4, r5, r6, r7,
+ __ JumpIfBothInstanceTypesAreNotSequentialAscii(r4, r5, r6, r3,
&call_runtime);
// Get the two characters forming the sub string.
@@ -5443,7 +5441,7 @@
// just allocate a new one.
Label make_two_character_string;
StringHelper::GenerateTwoCharacterStringTableProbe(
- masm, r2, r3, r6, r7, r4, r5, r9, &make_two_character_string);
+ masm, r2, r3, r6, r0, r4, r5, r9, &make_two_character_string);
__ IncrementCounter(counters->string_add_native(), 1, r2, r3);
__ add(sp, sp, Operand(2 * kPointerSize));
__ Ret();
@@ -5488,7 +5486,7 @@
// Allocate an ASCII cons string.
__ bind(&ascii_data);
- __ AllocateAsciiConsString(r7, r6, r4, r5, &call_runtime);
+ __ AllocateAsciiConsString(r3, r6, r4, r5, &call_runtime);
__ bind(&allocated);
// Fill the fields of the cons string.
Label skip_write_barrier, after_writing;
@@ -5499,15 +5497,15 @@
__ cmp(r4, Operand::Zero());
__ b(eq, &skip_write_barrier);
- __ str(r0, FieldMemOperand(r7, ConsString::kFirstOffset));
- __ RecordWriteField(r7,
+ __ str(r0, FieldMemOperand(r3, ConsString::kFirstOffset));
+ __ RecordWriteField(r3,
ConsString::kFirstOffset,
r0,
r4,
kLRHasNotBeenSaved,
kDontSaveFPRegs);
- __ str(r1, FieldMemOperand(r7, ConsString::kSecondOffset));
- __ RecordWriteField(r7,
+ __ str(r1, FieldMemOperand(r3, ConsString::kSecondOffset));
+ __ RecordWriteField(r3,
ConsString::kSecondOffset,
r1,
r4,
@@ -5516,12 +5514,12 @@
__ jmp(&after_writing);
__ bind(&skip_write_barrier);
- __ str(r0, FieldMemOperand(r7, ConsString::kFirstOffset));
- __ str(r1, FieldMemOperand(r7, ConsString::kSecondOffset));
+ __ str(r0, FieldMemOperand(r3, ConsString::kFirstOffset));
+ __ str(r1, FieldMemOperand(r3, ConsString::kSecondOffset));
__ bind(&after_writing);
- __ mov(r0, Operand(r7));
+ __ mov(r0, Operand(r3));
__ IncrementCounter(counters->string_add_native(), 1, r2, r3);
__ add(sp, sp, Operand(2 * kPointerSize));
__ Ret();
@@ -5541,7 +5539,7 @@
__ b(eq, &ascii_data);
// Allocate a two byte cons string.
- __ AllocateTwoByteConsString(r7, r6, r4, r5, &call_runtime);
+ __ AllocateTwoByteConsString(r3, r6, r4, r5, &call_runtime);
__ jmp(&allocated);
// We cannot encounter sliced strings or cons strings here since:
@@ -5565,14 +5563,15 @@
}
// Check whether both strings have same encoding
- __ eor(r7, r4, Operand(r5));
- __ tst(r7, Operand(kStringEncodingMask));
+ __ eor(ip, r4, Operand(r5));
+ ASSERT(__ ImmediateFitsAddrMode1Instruction(kStringEncodingMask));
+ __ tst(ip, Operand(kStringEncodingMask));
__ b(ne, &call_runtime);
STATIC_ASSERT(kSeqStringTag == 0);
__ tst(r4, Operand(kStringRepresentationMask));
STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize);
- __ add(r7,
+ __ add(r6,
r0,
Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag),
LeaveCC,
@@ -5582,7 +5581,7 @@
STATIC_ASSERT(kShortExternalStringTag != 0);
__ tst(r4, Operand(kShortExternalStringMask));
__ b(ne, &call_runtime);
- __ ldr(r7, FieldMemOperand(r0, ExternalString::kResourceDataOffset));
+ __ ldr(r6, FieldMemOperand(r0, ExternalString::kResourceDataOffset));
__ bind(&first_prepared);
STATIC_ASSERT(kSeqStringTag == 0);
@@ -5602,43 +5601,46 @@
__ bind(&second_prepared);
Label non_ascii_string_add_flat_result;
- // r7: first character of first string
+ // r6: first character of first string
// r1: first character of second string
// r2: length of first string.
// r3: length of second string.
- // r6: sum of lengths.
// Both strings have the same encoding.
STATIC_ASSERT(kTwoByteStringTag == 0);
__ tst(r5, Operand(kStringEncodingMask));
__ b(eq, &non_ascii_string_add_flat_result);
- __ AllocateAsciiString(r0, r6, r4, r5, r9, &call_runtime);
- __ add(r6, r0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
+ __ add(r2, r2, Operand(r3));
+ __ AllocateAsciiString(r0, r2, r4, r5, r9, &call_runtime);
+ __ sub(r2, r2, Operand(r3));
+ __ add(r5, r0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
// r0: result string.
- // r7: first character of first string.
+ // r6: first character of first string.
// r1: first character of second string.
// r2: length of first string.
// r3: length of second string.
- // r6: first character of result.
- StringHelper::GenerateCopyCharacters(masm, r6, r7, r2, r4, true);
- // r6: next character of result.
- StringHelper::GenerateCopyCharacters(masm, r6, r1, r3, r4, true);
+ // r5: first character of result.
+ StringHelper::GenerateCopyCharacters(masm, r5, r6, r2, r4, true);
+ // r5: next character of result.
+ StringHelper::GenerateCopyCharacters(masm, r5, r1, r3, r4, true);
__ IncrementCounter(counters->string_add_native(), 1, r2, r3);
__ add(sp, sp, Operand(2 * kPointerSize));
__ Ret();
__ bind(&non_ascii_string_add_flat_result);
- __ AllocateTwoByteString(r0, r6, r4, r5, r9, &call_runtime);
- __ add(r6, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+ __ add(r2, r2, Operand(r3));
+ __ AllocateTwoByteString(r0, r2, r4, r5, r9, &call_runtime);
+ __ sub(r2, r2, Operand(r3));
+ __ add(r5, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
// r0: result string.
- // r7: first character of first string.
+ // r6: first character of first string.
// r1: first character of second string.
// r2: length of first string.
// r3: length of second string.
- // r6: first character of result.
- StringHelper::GenerateCopyCharacters(masm, r6, r7, r2, r4, false);
- // r6: next character of result.
- StringHelper::GenerateCopyCharacters(masm, r6, r1, r3, r4, false);
+ // r5: first character of result.
+ StringHelper::GenerateCopyCharacters(masm, r5, r6, r2, r4, false);
+ // r5: next character of result.
+ StringHelper::GenerateCopyCharacters(masm, r5, r1, r3, r4, false);
__ IncrementCounter(counters->string_add_native(), 1, r2, r3);
__ add(sp, sp, Operand(2 * kPointerSize));
__ Ret();
@@ -6308,7 +6310,7 @@
static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
// Used in RegExpExecStub.
- { REG(r6), REG(r4), REG(r7), EMIT_REMEMBERED_SET },
+ { REG(r6), REG(r4), REG(r3), EMIT_REMEMBERED_SET },
// Used in CompileArrayPushCall.
// Also used in StoreIC::GenerateNormal via GenerateDictionaryStore.
// Also used in KeyedStoreIC::GenerateGeneric.
@@ -6335,8 +6337,8 @@
// FastNewClosureStub::Generate
{ REG(r2), REG(r4), REG(r1), EMIT_REMEMBERED_SET },
// StringAddStub::Generate
- { REG(r7), REG(r1), REG(r4), EMIT_REMEMBERED_SET },
- { REG(r7), REG(r0), REG(r4), EMIT_REMEMBERED_SET },
+ { REG(r3), REG(r1), REG(r4), EMIT_REMEMBERED_SET },
+ { REG(r3), REG(r0), REG(r4), EMIT_REMEMBERED_SET },
// Null termination.
{ REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET}
};
diff --git a/src/arm/code-stubs-arm.h b/src/arm/code-stubs-arm.h
index a404f01..c03d8f2 100644
--- a/src/arm/code-stubs-arm.h
+++ b/src/arm/code-stubs-arm.h
@@ -106,7 +106,6 @@
Register scratch2,
Register scratch3,
Register scratch4,
- Register scratch5,
int flags);
@@ -257,18 +256,6 @@
};
-class NumberToStringStub: public PlatformCodeStub {
- public:
- NumberToStringStub() { }
-
- private:
- Major MajorKey() { return NumberToString; }
- int MinorKey() { return 0; }
-
- void Generate(MacroAssembler* masm);
-};
-
-
class RecordWriteStub: public PlatformCodeStub {
public:
RecordWriteStub(Register object,
diff --git a/src/arm/codegen-arm.cc b/src/arm/codegen-arm.cc
index 5b80d6f..e19c5ab 100644
--- a/src/arm/codegen-arm.cc
+++ b/src/arm/codegen-arm.cc
@@ -444,15 +444,16 @@
__ push(lr);
__ ldr(r5, FieldMemOperand(r4, FixedArray::kLengthOffset));
- // r4: source FixedArray
// r5: number of elements (smi-tagged)
// Allocate new FixedDoubleArray.
// Use lr as a temporary register.
__ mov(lr, Operand(r5, LSL, 2));
__ add(lr, lr, Operand(FixedDoubleArray::kHeaderSize));
- __ Allocate(lr, r6, r7, r9, &gc_required, DOUBLE_ALIGNMENT);
+ __ Allocate(lr, r6, r4, r9, &gc_required, DOUBLE_ALIGNMENT);
// r6: destination FixedDoubleArray, not tagged as heap object.
+ __ ldr(r4, FieldMemOperand(r2, JSObject::kElementsOffset));
+ // r4: source FixedArray.
// Set destination FixedDoubleArray's length and map.
__ LoadRoot(r9, Heap::kFixedDoubleArrayMapRootIndex);
@@ -483,15 +484,15 @@
// Prepare for conversion loop.
__ add(r3, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ add(r7, r6, Operand(FixedDoubleArray::kHeaderSize));
- __ add(r6, r7, Operand(r5, LSL, 2));
+ __ add(r9, r6, Operand(FixedDoubleArray::kHeaderSize));
+ __ add(r6, r9, Operand(r5, LSL, 2));
__ mov(r4, Operand(kHoleNanLower32));
__ mov(r5, Operand(kHoleNanUpper32));
// r3: begin of source FixedArray element fields, not tagged
// r4: kHoleNanLower32
// r5: kHoleNanUpper32
// r6: end of destination FixedDoubleArray, not tagged
- // r7: begin of FixedDoubleArray element fields, not tagged
+ // r9: begin of FixedDoubleArray element fields, not tagged
__ b(&entry);
@@ -514,30 +515,30 @@
// Convert and copy elements.
__ bind(&loop);
- __ ldr(r9, MemOperand(r3, 4, PostIndex));
- // r9: current element
- __ UntagAndJumpIfNotSmi(r9, r9, &convert_hole);
+ __ ldr(lr, MemOperand(r3, 4, PostIndex));
+ // lr: current element
+ __ UntagAndJumpIfNotSmi(lr, lr, &convert_hole);
// Normal smi, convert to double and store.
- __ vmov(s0, r9);
+ __ vmov(s0, lr);
__ vcvt_f64_s32(d0, s0);
- __ vstr(d0, r7, 0);
- __ add(r7, r7, Operand(8));
+ __ vstr(d0, r9, 0);
+ __ add(r9, r9, Operand(8));
__ b(&entry);
// Hole found, store the-hole NaN.
__ bind(&convert_hole);
if (FLAG_debug_code) {
// Restore a "smi-untagged" heap object.
- __ SmiTag(r9);
- __ orr(r9, r9, Operand(1));
- __ CompareRoot(r9, Heap::kTheHoleValueRootIndex);
+ __ SmiTag(lr);
+ __ orr(lr, lr, Operand(1));
+ __ CompareRoot(lr, Heap::kTheHoleValueRootIndex);
__ Assert(eq, kObjectFoundInSmiOnlyArray);
}
- __ Strd(r4, r5, MemOperand(r7, 8, PostIndex));
+ __ Strd(r4, r5, MemOperand(r9, 8, PostIndex));
__ bind(&entry);
- __ cmp(r7, r6);
+ __ cmp(r9, r6);
__ b(lt, &loop);
__ pop(lr);
@@ -577,7 +578,7 @@
// Allocate new FixedArray.
__ mov(r0, Operand(FixedDoubleArray::kHeaderSize));
__ add(r0, r0, Operand(r5, LSL, 1));
- __ Allocate(r0, r6, r7, r9, &gc_required, NO_ALLOCATION_FLAGS);
+ __ Allocate(r0, r6, r3, r9, &gc_required, NO_ALLOCATION_FLAGS);
// r6: destination FixedArray, not tagged as heap object
// Set destination FixedDoubleArray's length and map.
__ LoadRoot(r9, Heap::kFixedArrayMapRootIndex);
@@ -589,14 +590,12 @@
__ add(r3, r6, Operand(FixedArray::kHeaderSize));
__ add(r6, r6, Operand(kHeapObjectTag));
__ add(r5, r3, Operand(r5, LSL, 1));
- __ LoadRoot(r7, Heap::kTheHoleValueRootIndex);
__ LoadRoot(r9, Heap::kHeapNumberMapRootIndex);
// Using offsetted addresses in r4 to fully take advantage of post-indexing.
// r3: begin of destination FixedArray element fields, not tagged
// r4: begin of source FixedDoubleArray element fields, not tagged, +4
// r5: end of destination FixedArray, not tagged
// r6: destination FixedArray
- // r7: the-hole pointer
// r9: heap number map
__ b(&entry);
@@ -608,7 +607,7 @@
__ bind(&loop);
__ ldr(r1, MemOperand(r4, 8, PostIndex));
- // lr: current element's upper 32 bit
+ // r1: current element's upper 32 bit
// r4: address of next element's upper 32 bit
__ cmp(r1, Operand(kHoleNanUpper32));
__ b(eq, &convert_hole);
@@ -631,7 +630,8 @@
// Replace the-hole NaN with the-hole pointer.
__ bind(&convert_hole);
- __ str(r7, MemOperand(r3, 4, PostIndex));
+ __ LoadRoot(r0, Heap::kTheHoleValueRootIndex);
+ __ str(r0, MemOperand(r3, 4, PostIndex));
__ bind(&entry);
__ cmp(r3, r5);
@@ -775,50 +775,65 @@
ASSERT(!temp2.is(temp3));
ASSERT(ExternalReference::math_exp_constants(0).address() != NULL);
- Label done;
+ Label zero, infinity, done;
__ mov(temp3, Operand(ExternalReference::math_exp_constants(0)));
__ vldr(double_scratch1, ExpConstant(0, temp3));
- __ vmov(result, kDoubleRegZero);
__ VFPCompareAndSetFlags(double_scratch1, input);
- __ b(ge, &done);
+ __ b(ge, &zero);
+
__ vldr(double_scratch2, ExpConstant(1, temp3));
__ VFPCompareAndSetFlags(input, double_scratch2);
- __ vldr(result, ExpConstant(2, temp3));
- __ b(ge, &done);
+ __ b(ge, &infinity);
+
__ vldr(double_scratch1, ExpConstant(3, temp3));
__ vldr(result, ExpConstant(4, temp3));
__ vmul(double_scratch1, double_scratch1, input);
__ vadd(double_scratch1, double_scratch1, result);
- __ vmov(temp2, temp1, double_scratch1);
+ __ VmovLow(temp2, double_scratch1);
__ vsub(double_scratch1, double_scratch1, result);
__ vldr(result, ExpConstant(6, temp3));
__ vldr(double_scratch2, ExpConstant(5, temp3));
__ vmul(double_scratch1, double_scratch1, double_scratch2);
__ vsub(double_scratch1, double_scratch1, input);
__ vsub(result, result, double_scratch1);
- __ vmul(input, double_scratch1, double_scratch1);
- __ vmul(result, result, input);
- __ mov(temp1, Operand(temp2, LSR, 11));
+ __ vmul(double_scratch2, double_scratch1, double_scratch1);
+ __ vmul(result, result, double_scratch2);
__ vldr(double_scratch2, ExpConstant(7, temp3));
__ vmul(result, result, double_scratch2);
__ vsub(result, result, double_scratch1);
- __ vldr(double_scratch2, ExpConstant(8, temp3));
+ // Mov 1 in double_scratch2 as math_exp_constants_array[8] == 1.
+ ASSERT(*reinterpret_cast<double*>
+ (ExternalReference::math_exp_constants(8).address()) == 1);
+ __ vmov(double_scratch2, 1);
__ vadd(result, result, double_scratch2);
- __ movw(ip, 0x7ff);
- __ and_(temp2, temp2, Operand(ip));
+ __ mov(temp1, Operand(temp2, LSR, 11));
+ __ Ubfx(temp2, temp2, 0, 11);
__ add(temp1, temp1, Operand(0x3ff));
- __ mov(temp1, Operand(temp1, LSL, 20));
// Must not call ExpConstant() after overwriting temp3!
__ mov(temp3, Operand(ExternalReference::math_exp_log_table()));
- __ ldr(ip, MemOperand(temp3, temp2, LSL, 3));
- __ add(temp3, temp3, Operand(kPointerSize));
- __ ldr(temp2, MemOperand(temp3, temp2, LSL, 3));
- __ orr(temp1, temp1, temp2);
- __ vmov(input, ip, temp1);
- __ vmul(result, result, input);
+ __ add(temp3, temp3, Operand(temp2, LSL, 3));
+ __ ldm(ia, temp3, temp2.bit() | temp3.bit());
+ // The first word is loaded is the lower number register.
+ if (temp2.code() < temp3.code()) {
+ __ orr(temp1, temp3, Operand(temp1, LSL, 20));
+ __ vmov(double_scratch1, temp2, temp1);
+ } else {
+ __ orr(temp1, temp2, Operand(temp1, LSL, 20));
+ __ vmov(double_scratch1, temp3, temp1);
+ }
+ __ vmul(result, result, double_scratch1);
+ __ b(&done);
+
+ __ bind(&zero);
+ __ vmov(result, kDoubleRegZero);
+ __ b(&done);
+
+ __ bind(&infinity);
+ __ vldr(result, ExpConstant(2, temp3));
+
__ bind(&done);
}
diff --git a/src/arm/codegen-arm.h b/src/arm/codegen-arm.h
index 54530d8..ecbe64c 100644
--- a/src/arm/codegen-arm.h
+++ b/src/arm/codegen-arm.h
@@ -97,6 +97,7 @@
class MathExpGenerator : public AllStatic {
public:
+ // Register input isn't modified. All other registers are clobbered.
static void EmitMathExp(MacroAssembler* masm,
DwVfpRegister input,
DwVfpRegister result,
diff --git a/src/arm/deoptimizer-arm.cc b/src/arm/deoptimizer-arm.cc
index 9f8da50..0ed84ff 100644
--- a/src/arm/deoptimizer-arm.cc
+++ b/src/arm/deoptimizer-arm.cc
@@ -268,8 +268,8 @@
__ bind(&inner_push_loop);
__ sub(r3, r3, Operand(sizeof(uint32_t)));
__ add(r6, r2, Operand(r3));
- __ ldr(r7, MemOperand(r6, FrameDescription::frame_content_offset()));
- __ push(r7);
+ __ ldr(r6, MemOperand(r6, FrameDescription::frame_content_offset()));
+ __ push(r6);
__ bind(&inner_loop_header);
__ cmp(r3, Operand::Zero());
__ b(ne, &inner_push_loop); // test for gt?
@@ -315,9 +315,9 @@
__ InitializeRootRegister();
__ pop(ip); // remove pc
- __ pop(r7); // get continuation, leave pc on stack
+ __ pop(ip); // get continuation, leave pc on stack
__ pop(lr);
- __ Jump(r7);
+ __ Jump(ip);
__ stop("Unreachable.");
}
diff --git a/src/arm/frames-arm.h b/src/arm/frames-arm.h
index d022b41..64a718e 100644
--- a/src/arm/frames-arm.h
+++ b/src/arm/frames-arm.h
@@ -64,7 +64,7 @@
1 << 4 | // r4 v1
1 << 5 | // r5 v2
1 << 6 | // r6 v3
- 1 << 7 | // r7 v4
+ 1 << 7 | // r7 v4 (pp in JavaScript code)
1 << 8 | // r8 v5 (cp in JavaScript code)
kR9Available << 9 | // r9 v6
1 << 10 | // r10 v7
diff --git a/src/arm/full-codegen-arm.cc b/src/arm/full-codegen-arm.cc
index 195fc8c..d49f842 100644
--- a/src/arm/full-codegen-arm.cc
+++ b/src/arm/full-codegen-arm.cc
@@ -3590,8 +3590,8 @@
void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(args->length(), 1);
- // Load the argument on the stack and call the stub.
- VisitForStackValue(args->at(0));
+ // Load the argument into r0 and call the stub.
+ VisitForAccumulatorValue(args->at(0));
NumberToStringStub stub;
__ CallStub(&stub);
@@ -3962,9 +3962,8 @@
void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
- Label bailout, done, one_char_separator, long_separator,
- non_trivial_array, not_size_one_array, loop,
- empty_separator_loop, one_char_separator_loop,
+ Label bailout, done, one_char_separator, long_separator, non_trivial_array,
+ not_size_one_array, loop, empty_separator_loop, one_char_separator_loop,
one_char_separator_loop_entry, long_separator_loop;
ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 2);
@@ -3982,19 +3981,18 @@
Register string = r4;
Register element = r5;
Register elements_end = r6;
- Register scratch1 = r7;
- Register scratch2 = r9;
+ Register scratch = r9;
// Separator operand is on the stack.
__ pop(separator);
// Check that the array is a JSArray.
__ JumpIfSmi(array, &bailout);
- __ CompareObjectType(array, scratch1, scratch2, JS_ARRAY_TYPE);
+ __ CompareObjectType(array, scratch, array_length, JS_ARRAY_TYPE);
__ b(ne, &bailout);
// Check that the array has fast elements.
- __ CheckFastElements(scratch1, scratch2, &bailout);
+ __ CheckFastElements(scratch, array_length, &bailout);
// If the array has length zero, return the empty string.
__ ldr(array_length, FieldMemOperand(array, JSArray::kLengthOffset));
@@ -4031,11 +4029,11 @@
__ bind(&loop);
__ ldr(string, MemOperand(element, kPointerSize, PostIndex));
__ JumpIfSmi(string, &bailout);
- __ ldr(scratch1, FieldMemOperand(string, HeapObject::kMapOffset));
- __ ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
- __ JumpIfInstanceTypeIsNotSequentialAscii(scratch1, scratch2, &bailout);
- __ ldr(scratch1, FieldMemOperand(string, SeqOneByteString::kLengthOffset));
- __ add(string_length, string_length, Operand(scratch1), SetCC);
+ __ ldr(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
+ __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
+ __ JumpIfInstanceTypeIsNotSequentialAscii(scratch, scratch, &bailout);
+ __ ldr(scratch, FieldMemOperand(string, SeqOneByteString::kLengthOffset));
+ __ add(string_length, string_length, Operand(scratch), SetCC);
__ b(vs, &bailout);
__ cmp(element, elements_end);
__ b(lt, &loop);
@@ -4056,23 +4054,23 @@
// Check that the separator is a flat ASCII string.
__ JumpIfSmi(separator, &bailout);
- __ ldr(scratch1, FieldMemOperand(separator, HeapObject::kMapOffset));
- __ ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
- __ JumpIfInstanceTypeIsNotSequentialAscii(scratch1, scratch2, &bailout);
+ __ ldr(scratch, FieldMemOperand(separator, HeapObject::kMapOffset));
+ __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
+ __ JumpIfInstanceTypeIsNotSequentialAscii(scratch, scratch, &bailout);
// Add (separator length times array_length) - separator length to the
// string_length to get the length of the result string. array_length is not
// smi but the other values are, so the result is a smi
- __ ldr(scratch1, FieldMemOperand(separator, SeqOneByteString::kLengthOffset));
- __ sub(string_length, string_length, Operand(scratch1));
- __ smull(scratch2, ip, array_length, scratch1);
+ __ ldr(scratch, FieldMemOperand(separator, SeqOneByteString::kLengthOffset));
+ __ sub(string_length, string_length, Operand(scratch));
+ __ smull(scratch, ip, array_length, scratch);
// Check for smi overflow. No overflow if higher 33 bits of 64-bit result are
// zero.
__ cmp(ip, Operand::Zero());
__ b(ne, &bailout);
- __ tst(scratch2, Operand(0x80000000));
+ __ tst(scratch, Operand(0x80000000));
__ b(ne, &bailout);
- __ add(string_length, string_length, Operand(scratch2), SetCC);
+ __ add(string_length, string_length, Operand(scratch), SetCC);
__ b(vs, &bailout);
__ SmiUntag(string_length);
@@ -4089,9 +4087,9 @@
// array_length: Length of the array.
__ AllocateAsciiString(result,
string_length,
- scratch1,
- scratch2,
- elements_end,
+ scratch,
+ string, // used as scratch
+ elements_end, // used as scratch
&bailout);
// Prepare for looping. Set up elements_end to end of the array. Set
// result_pos to the position of the result where to write the first
@@ -4104,8 +4102,8 @@
Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
// Check the length of the separator.
- __ ldr(scratch1, FieldMemOperand(separator, SeqOneByteString::kLengthOffset));
- __ cmp(scratch1, Operand(Smi::FromInt(1)));
+ __ ldr(scratch, FieldMemOperand(separator, SeqOneByteString::kLengthOffset));
+ __ cmp(scratch, Operand(Smi::FromInt(1)));
__ b(eq, &one_char_separator);
__ b(gt, &long_separator);
@@ -4123,7 +4121,7 @@
__ add(string,
string,
Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
- __ CopyBytes(string, result_pos, string_length, scratch1);
+ __ CopyBytes(string, result_pos, string_length, scratch);
__ cmp(element, elements_end);
__ b(lt, &empty_separator_loop); // End while (element < elements_end).
ASSERT(result.is(r0));
@@ -4155,7 +4153,7 @@
__ add(string,
string,
Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
- __ CopyBytes(string, result_pos, string_length, scratch1);
+ __ CopyBytes(string, result_pos, string_length, scratch);
__ cmp(element, elements_end);
__ b(lt, &one_char_separator_loop); // End while (element < elements_end).
ASSERT(result.is(r0));
@@ -4176,7 +4174,7 @@
__ add(string,
separator,
Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
- __ CopyBytes(string, result_pos, string_length, scratch1);
+ __ CopyBytes(string, result_pos, string_length, scratch);
__ bind(&long_separator);
__ ldr(string, MemOperand(element, kPointerSize, PostIndex));
@@ -4185,7 +4183,7 @@
__ add(string,
string,
Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
- __ CopyBytes(string, result_pos, string_length, scratch1);
+ __ CopyBytes(string, result_pos, string_length, scratch);
__ cmp(element, elements_end);
__ b(lt, &long_separator_loop); // End while (element < elements_end).
ASSERT(result.is(r0));
diff --git a/src/arm/ic-arm.cc b/src/arm/ic-arm.cc
index f15d4b1..b39a0ed 100644
--- a/src/arm/ic-arm.cc
+++ b/src/arm/ic-arm.cc
@@ -1394,7 +1394,7 @@
Register receiver = r2;
Register receiver_map = r3;
Register elements_map = r6;
- Register elements = r7; // Elements array of the receiver.
+ Register elements = r9; // Elements array of the receiver.
// r4 and r5 are used as general scratch registers.
// Check that the key is a smi.
diff --git a/src/arm/lithium-arm.cc b/src/arm/lithium-arm.cc
index f216a8e..6fdbbb5 100644
--- a/src/arm/lithium-arm.cc
+++ b/src/arm/lithium-arm.cc
@@ -1240,7 +1240,7 @@
LInstruction* LChunkBuilder::DoMathExp(HUnaryMathOperation* instr) {
ASSERT(instr->representation().IsDouble());
ASSERT(instr->value()->representation().IsDouble());
- LOperand* input = UseTempRegister(instr->value());
+ LOperand* input = UseRegister(instr->value());
LOperand* temp1 = TempRegister();
LOperand* temp2 = TempRegister();
LOperand* double_temp = FixedTemp(d3); // Chosen by fair dice roll.
diff --git a/src/arm/lithium-arm.h b/src/arm/lithium-arm.h
index 3902b4c..76867c8 100644
--- a/src/arm/lithium-arm.h
+++ b/src/arm/lithium-arm.h
@@ -1360,8 +1360,8 @@
LOperand* temp() { return temps_[0]; }
Smi* index() const { return index_; }
- DECLARE_CONCRETE_INSTRUCTION(ValueOf, "date-field")
- DECLARE_HYDROGEN_ACCESSOR(ValueOf)
+ DECLARE_CONCRETE_INSTRUCTION(DateField, "date-field")
+ DECLARE_HYDROGEN_ACCESSOR(DateField)
private:
Smi* index_;
diff --git a/src/arm/lithium-codegen-arm.cc b/src/arm/lithium-codegen-arm.cc
index 2680c34..ca38743 100644
--- a/src/arm/lithium-codegen-arm.cc
+++ b/src/arm/lithium-codegen-arm.cc
@@ -1090,11 +1090,6 @@
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
break;
}
- case CodeStub::NumberToString: {
- NumberToStringStub stub;
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- break;
- }
case CodeStub::StringCompare: {
StringCompareStub stub;
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
@@ -3879,9 +3874,9 @@
} else if (exponent_type.IsTagged()) {
Label no_deopt;
__ JumpIfSmi(r2, &no_deopt);
- __ ldr(r7, FieldMemOperand(r2, HeapObject::kMapOffset));
+ __ ldr(r6, FieldMemOperand(r2, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
- __ cmp(r7, Operand(ip));
+ __ cmp(r6, Operand(ip));
DeoptimizeIf(ne, instr->environment());
__ bind(&no_deopt);
MathPowStub stub(MathPowStub::TAGGED);
@@ -5386,24 +5381,24 @@
void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
Label materialized;
// Registers will be used as follows:
- // r7 = literals array.
+ // r6 = literals array.
// r1 = regexp literal.
// r0 = regexp literal clone.
- // r2 and r4-r6 are used as temporaries.
+ // r2-5 are used as temporaries.
int literal_offset =
FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
- __ LoadHeapObject(r7, instr->hydrogen()->literals());
- __ ldr(r1, FieldMemOperand(r7, literal_offset));
+ __ LoadHeapObject(r6, instr->hydrogen()->literals());
+ __ ldr(r1, FieldMemOperand(r6, literal_offset));
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
__ cmp(r1, ip);
__ b(ne, &materialized);
// Create regexp literal using runtime function
// Result will be in r0.
- __ mov(r6, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
- __ mov(r5, Operand(instr->hydrogen()->pattern()));
- __ mov(r4, Operand(instr->hydrogen()->flags()));
- __ Push(r7, r6, r5, r4);
+ __ mov(r5, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
+ __ mov(r4, Operand(instr->hydrogen()->pattern()));
+ __ mov(r3, Operand(instr->hydrogen()->flags()));
+ __ Push(r6, r5, r4, r3);
CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
__ mov(r1, r0);
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
index eeabc05..fd51521 100644
--- a/src/arm/macro-assembler-arm.cc
+++ b/src/arm/macro-assembler-arm.cc
@@ -1337,7 +1337,7 @@
STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
- // For the JSEntry handler, we must preserve r0-r4, r5-r7 are available.
+ // For the JSEntry handler, we must preserve r0-r4, r5-r6 are available.
// We will build up the handler from the bottom by pushing on the stack.
// Set up the code object (r5) and the state (r6) for pushing.
unsigned state =
@@ -1348,9 +1348,9 @@
// Push the frame pointer, context, state, and code object.
if (kind == StackHandler::JS_ENTRY) {
- mov(r7, Operand(Smi::FromInt(0))); // Indicates no context.
+ mov(cp, Operand(Smi::FromInt(0))); // Indicates no context.
mov(ip, Operand::Zero()); // NULL frame pointer.
- stm(db_w, sp, r5.bit() | r6.bit() | r7.bit() | ip.bit());
+ stm(db_w, sp, r5.bit() | r6.bit() | cp.bit() | ip.bit());
} else {
stm(db_w, sp, r5.bit() | r6.bit() | cp.bit() | fp.bit());
}
@@ -2305,13 +2305,15 @@
ExternalReference::handle_scope_level_address(isolate()),
next_address);
+ ASSERT(!thunk_last_arg.is(r3));
+
// Allocate HandleScope in callee-save registers.
- mov(r7, Operand(next_address));
- ldr(r4, MemOperand(r7, kNextOffset));
- ldr(r5, MemOperand(r7, kLimitOffset));
- ldr(r6, MemOperand(r7, kLevelOffset));
+ mov(r9, Operand(next_address));
+ ldr(r4, MemOperand(r9, kNextOffset));
+ ldr(r5, MemOperand(r9, kLimitOffset));
+ ldr(r6, MemOperand(r9, kLevelOffset));
add(r6, r6, Operand(1));
- str(r6, MemOperand(r7, kLevelOffset));
+ str(r6, MemOperand(r9, kLevelOffset));
if (FLAG_log_timer_events) {
FrameScope frame(this, StackFrame::MANUAL);
@@ -2322,7 +2324,6 @@
PopSafepointRegisters();
}
- ASSERT(!thunk_last_arg.is(r3));
Label profiler_disabled;
Label end_profiler_check;
bool* is_profiling_flag =
@@ -2368,15 +2369,15 @@
bind(&return_value_loaded);
// No more valid handles (the result handle was the last one). Restore
// previous handle scope.
- str(r4, MemOperand(r7, kNextOffset));
+ str(r4, MemOperand(r9, kNextOffset));
if (emit_debug_code()) {
- ldr(r1, MemOperand(r7, kLevelOffset));
+ ldr(r1, MemOperand(r9, kLevelOffset));
cmp(r1, r6);
Check(eq, kUnexpectedLevelAfterReturnFromApiCall);
}
sub(r6, r6, Operand(1));
- str(r6, MemOperand(r7, kLevelOffset));
- ldr(ip, MemOperand(r7, kLimitOffset));
+ str(r6, MemOperand(r9, kLevelOffset));
+ ldr(ip, MemOperand(r9, kLimitOffset));
cmp(r5, ip);
b(ne, &delete_allocated_handles);
@@ -2409,7 +2410,7 @@
// HandleScope limit has changed. Delete allocated extensions.
bind(&delete_allocated_handles);
- str(r5, MemOperand(r7, kLimitOffset));
+ str(r5, MemOperand(r9, kLimitOffset));
mov(r4, r0);
PrepareCallCFunction(1, r5);
mov(r0, Operand(ExternalReference::isolate_address(isolate())));
diff --git a/src/arm/macro-assembler-arm.h b/src/arm/macro-assembler-arm.h
index 6d08ab9..6d5ba51 100644
--- a/src/arm/macro-assembler-arm.h
+++ b/src/arm/macro-assembler-arm.h
@@ -45,8 +45,9 @@
// Give alias names to registers
-const Register cp = { 8 }; // JavaScript context pointer
-const Register kRootRegister = { 10 }; // Roots array pointer.
+const Register pp = { kRegister_r7_Code }; // Constant pool pointer.
+const Register cp = { kRegister_r8_Code }; // JavaScript context pointer.
+const Register kRootRegister = { kRegister_r10_Code }; // Roots array pointer.
// Flags used for AllocateHeapNumber
enum TaggingMode {
diff --git a/src/arm/stub-cache-arm.cc b/src/arm/stub-cache-arm.cc
index 567eb63..6308b34 100644
--- a/src/arm/stub-cache-arm.cc
+++ b/src/arm/stub-cache-arm.cc
@@ -874,8 +874,8 @@
// Store call data.
__ str(r6, MemOperand(sp, 3 * kPointerSize));
// Store isolate.
- __ mov(r7, Operand(ExternalReference::isolate_address(masm->isolate())));
- __ str(r7, MemOperand(sp, 4 * kPointerSize));
+ __ mov(r5, Operand(ExternalReference::isolate_address(masm->isolate())));
+ __ str(r5, MemOperand(sp, 4 * kPointerSize));
// Store ReturnValue default and ReturnValue.
__ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
__ str(r5, MemOperand(sp, 5 * kPointerSize));
@@ -1855,15 +1855,15 @@
if (FLAG_smi_only_arrays && !FLAG_trace_elements_transitions) {
Label fast_object, not_fast_object;
- __ CheckFastObjectElements(r3, r7, ¬_fast_object);
+ __ CheckFastObjectElements(r3, r9, ¬_fast_object);
__ jmp(&fast_object);
// In case of fast smi-only, convert to fast object, otherwise bail out.
__ bind(¬_fast_object);
- __ CheckFastSmiElements(r3, r7, &call_builtin);
+ __ CheckFastSmiElements(r3, r9, &call_builtin);
- __ ldr(r7, FieldMemOperand(r4, HeapObject::kMapOffset));
+ __ ldr(r9, FieldMemOperand(r4, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
- __ cmp(r7, ip);
+ __ cmp(r9, ip);
__ b(eq, &call_builtin);
// edx: receiver
// r3: map
@@ -1871,7 +1871,7 @@
__ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
FAST_ELEMENTS,
r3,
- r7,
+ r9,
&try_holey_map);
__ mov(r2, receiver);
ElementsTransitionGenerator::
@@ -1884,7 +1884,7 @@
__ LoadTransitionedArrayMapConditional(FAST_HOLEY_SMI_ELEMENTS,
FAST_HOLEY_ELEMENTS,
r3,
- r7,
+ r9,
&call_builtin);
__ mov(r2, receiver);
ElementsTransitionGenerator::
@@ -1917,7 +1917,6 @@
__ bind(&attempt_to_grow_elements);
// r0: array's length + 1.
- // r4: elements' length.
if (!FLAG_inline_new) {
__ b(&call_builtin);
@@ -1928,8 +1927,8 @@
// the new element is non-Smi. For now, delegate to the builtin.
Label no_fast_elements_check;
__ JumpIfSmi(r2, &no_fast_elements_check);
- __ ldr(r7, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ CheckFastObjectElements(r7, r7, &call_builtin);
+ __ ldr(r9, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ CheckFastObjectElements(r9, r9, &call_builtin);
__ bind(&no_fast_elements_check);
ExternalReference new_space_allocation_top =
@@ -1941,8 +1940,8 @@
// Load top and check if it is the end of elements.
__ add(end_elements, elements, Operand::PointerOffsetFromSmiKey(r0));
__ add(end_elements, end_elements, Operand(kEndElementsOffset));
- __ mov(r7, Operand(new_space_allocation_top));
- __ ldr(r3, MemOperand(r7));
+ __ mov(r4, Operand(new_space_allocation_top));
+ __ ldr(r3, MemOperand(r4));
__ cmp(end_elements, r3);
__ b(ne, &call_builtin);
@@ -1954,7 +1953,7 @@
// We fit and could grow elements.
// Update new_space_allocation_top.
- __ str(r3, MemOperand(r7));
+ __ str(r3, MemOperand(r4));
// Push the argument.
__ str(r2, MemOperand(end_elements));
// Fill the rest with holes.
@@ -1965,6 +1964,7 @@
// Update elements' and array's sizes.
__ str(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ __ ldr(r4, FieldMemOperand(elements, FixedArray::kLengthOffset));
__ add(r4, r4, Operand(Smi::FromInt(kAllocationDelta)));
__ str(r4, FieldMemOperand(elements, FixedArray::kLengthOffset));
diff --git a/src/builtins.h b/src/builtins.h
index 01061b6..54787f7 100644
--- a/src/builtins.h
+++ b/src/builtins.h
@@ -108,8 +108,6 @@
Code::kNoExtraICState) \
V(NotifyStubFailure, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
- V(NotifyOSR, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
\
V(LoadIC_Miss, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
@@ -388,7 +386,6 @@
static void Generate_NotifyDeoptimized(MacroAssembler* masm);
static void Generate_NotifySoftDeoptimized(MacroAssembler* masm);
static void Generate_NotifyLazyDeoptimized(MacroAssembler* masm);
- static void Generate_NotifyOSR(MacroAssembler* masm);
static void Generate_NotifyStubFailure(MacroAssembler* masm);
static void Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm);
diff --git a/src/checks.cc b/src/checks.cc
index 7108d18..d2a107c 100644
--- a/src/checks.cc
+++ b/src/checks.cc
@@ -25,11 +25,48 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#include <stdarg.h>
+#include "checks.h"
-#include "v8.h"
+#if V8_LIBC_GLIBC || V8_OS_BSD
+# include <cxxabi.h>
+# include <execinfo.h>
+#endif // V8_LIBC_GLIBC || V8_OS_BSD
+#include <stdio.h>
#include "platform.h"
+#include "v8.h"
+
+
+// Attempts to dump a backtrace (if supported).
+static V8_INLINE void DumpBacktrace() {
+#if V8_LIBC_GLIBC || V8_OS_BSD
+ void* trace[100];
+ int size = backtrace(trace, ARRAY_SIZE(trace));
+ char** symbols = backtrace_symbols(trace, size);
+ i::OS::PrintError("\n==== C stack trace ===============================\n\n");
+ if (size == 0) {
+ i::OS::PrintError("(empty)\n");
+ } else if (symbols == NULL) {
+ i::OS::PrintError("(no symbols)\n");
+ } else {
+ for (int i = 1; i < size; ++i) {
+ i::OS::PrintError("%2d: ", i);
+ char mangled[201];
+ if (sscanf(symbols[i], "%*[^(]%*[(]%200[^)+]", mangled) == 1) { // NOLINT
+ int status;
+ size_t length;
+ char* demangled = abi::__cxa_demangle(mangled, NULL, &length, &status);
+ i::OS::PrintError("%s\n", demangled != NULL ? demangled : mangled);
+ free(demangled);
+ } else {
+ i::OS::PrintError("??\n");
+ }
+ }
+ }
+ free(symbols);
+#endif // V8_LIBC_GLIBC || V8_OS_BSD
+}
+
// Contains protection against recursive calls (faults while handling faults).
extern "C" void V8_Fatal(const char* file, int line, const char* format, ...) {
@@ -43,7 +80,8 @@
i::OS::VPrintError(format, arguments);
va_end(arguments);
i::OS::PrintError("\n#\n");
- i::OS::DumpBacktrace();
+ DumpBacktrace();
+ fflush(stderr);
i::OS::Abort();
}
diff --git a/src/code-stubs-hydrogen.cc b/src/code-stubs-hydrogen.cc
index 0d06209..2700dbb 100644
--- a/src/code-stubs-hydrogen.cc
+++ b/src/code-stubs-hydrogen.cc
@@ -339,6 +339,19 @@
template <>
+HValue* CodeStubGraphBuilder<NumberToStringStub>::BuildCodeStub() {
+ info()->MarkAsSavesCallerDoubles();
+ HValue* number = GetParameter(NumberToStringStub::kNumber);
+ return BuildNumberToString(number);
+}
+
+
+Handle<Code> NumberToStringStub::GenerateCode(Isolate* isolate) {
+ return DoGenerateCode(isolate, this);
+}
+
+
+template <>
HValue* CodeStubGraphBuilder<FastCloneShallowArrayStub>::BuildCodeStub() {
Factory* factory = isolate()->factory();
HValue* undefined = graph()->GetConstantUndefined();
@@ -473,6 +486,11 @@
HObjectAccess::ForAllocationSiteTransitionInfo(),
initial_elements_kind);
+ // Unlike literals, constructed arrays don't have nested sites
+ Add<HStoreNamedField>(object,
+ HObjectAccess::ForAllocationSiteNestedSite(),
+ graph()->GetConstant0());
+
// Store an empty fixed array for the code dependency.
HConstant* empty_fixed_array =
Add<HConstant>(isolate()->factory()->empty_fixed_array());
diff --git a/src/code-stubs.h b/src/code-stubs.h
index 30ec1c7..a5c2f88 100644
--- a/src/code-stubs.h
+++ b/src/code-stubs.h
@@ -464,6 +464,25 @@
};
+class NumberToStringStub V8_FINAL : public HydrogenCodeStub {
+ public:
+ NumberToStringStub() {}
+
+ virtual Handle<Code> GenerateCode(Isolate* isolate) V8_OVERRIDE;
+
+ virtual void InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE;
+
+ // Parameters accessed via CodeStubGraphBuilder::GetParameter()
+ static const int kNumber = 0;
+
+ private:
+ virtual Major MajorKey() V8_OVERRIDE { return NumberToString; }
+ virtual int NotMissMinorKey() V8_OVERRIDE { return 0; }
+};
+
+
class FastNewClosureStub : public HydrogenCodeStub {
public:
explicit FastNewClosureStub(LanguageMode language_mode, bool is_generator)
diff --git a/src/flag-definitions.h b/src/flag-definitions.h
index 2c01251..a8229ea 100644
--- a/src/flag-definitions.h
+++ b/src/flag-definitions.h
@@ -176,11 +176,6 @@
"enable harmony collections (sets, maps, and weak maps)")
DEFINE_bool(harmony_observation, false,
"enable harmony object observation (implies harmony collections")
-DEFINE_bool(harmony_typed_arrays, true,
- "enable harmony typed arrays")
-DEFINE_bool(harmony_array_buffer, true,
- "enable harmony array buffer")
-DEFINE_implication(harmony_typed_arrays, harmony_array_buffer)
DEFINE_bool(harmony_generators, false, "enable harmony generators")
DEFINE_bool(harmony_iteration, false, "enable harmony iteration (for-of)")
DEFINE_bool(harmony_numeric_literals, false,
@@ -201,7 +196,6 @@
DEFINE_implication(harmony, harmony_arrays)
DEFINE_implication(harmony_modules, harmony_scoping)
DEFINE_implication(harmony_observation, harmony_collections)
-// TODO[dslomov] add harmony => harmony_typed_arrays
// Flags for experimental implementation features.
DEFINE_bool(packed_arrays, true, "optimizes arrays that have no holes")
@@ -826,6 +820,16 @@
#endif
#endif
+//
+// Read-only flags
+//
+#undef FLAG
+#define FLAG FLAG_READONLY
+
+// assembler-arm.h
+DEFINE_bool(enable_ool_constant_pool, false,
+ "enable use of out-of-line constant pools (ARM only)")
+
// Cleanup...
#undef FLAG_FULL
#undef FLAG_READONLY
diff --git a/src/heap-inl.h b/src/heap-inl.h
index e11dec8..a0762a3 100644
--- a/src/heap-inl.h
+++ b/src/heap-inl.h
@@ -140,12 +140,11 @@
// Compute map and object size.
Map* map = ascii_internalized_string_map();
int size = SeqOneByteString::SizeFor(str.length());
+ AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, TENURED);
// Allocate string.
Object* result;
- { MaybeObject* maybe_result = (size > Page::kMaxNonCodeHeapObjectSize)
- ? lo_space_->AllocateRaw(size, NOT_EXECUTABLE)
- : old_data_space_->AllocateRaw(size);
+ { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
@@ -174,12 +173,11 @@
// Compute map and object size.
Map* map = internalized_string_map();
int size = SeqTwoByteString::SizeFor(str.length());
+ AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, TENURED);
// Allocate string.
Object* result;
- { MaybeObject* maybe_result = (size > Page::kMaxNonCodeHeapObjectSize)
- ? lo_space_->AllocateRaw(size, NOT_EXECUTABLE)
- : old_data_space_->AllocateRaw(size);
+ { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
@@ -211,7 +209,9 @@
MaybeObject* Heap::AllocateRaw(int size_in_bytes,
AllocationSpace space,
AllocationSpace retry_space) {
- ASSERT(AllowHandleAllocation::IsAllowed() && gc_state_ == NOT_IN_GC);
+ ASSERT(AllowHandleAllocation::IsAllowed());
+ ASSERT(AllowHeapAllocation::IsAllowed());
+ ASSERT(gc_state_ == NOT_IN_GC);
ASSERT(space != NEW_SPACE ||
retry_space == OLD_POINTER_SPACE ||
retry_space == OLD_DATA_SPACE ||
diff --git a/src/heap-snapshot-generator.cc b/src/heap-snapshot-generator.cc
index 5799132..5570362 100644
--- a/src/heap-snapshot-generator.cc
+++ b/src/heap-snapshot-generator.cc
@@ -1301,6 +1301,8 @@
AllocationSite* site) {
SetInternalReference(site, entry, "transition_info", site->transition_info(),
AllocationSite::kTransitionInfoOffset);
+ SetInternalReference(site, entry, "nested_site", site->nested_site(),
+ AllocationSite::kNestedSiteOffset);
SetInternalReference(site, entry, "dependent_code", site->dependent_code(),
AllocationSite::kDependentCodeOffset);
}
diff --git a/src/heap.cc b/src/heap.cc
index e81a0e3..7b9f0d5 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -608,6 +608,11 @@
// Note: as weak callbacks can execute arbitrary code, we cannot
// hope that eventually there will be no weak callbacks invocations.
// Therefore stop recollecting after several attempts.
+ if (FLAG_concurrent_recompilation) {
+ // The optimizing compiler may be unnecessarily holding on to memory.
+ DisallowHeapAllocation no_recursive_gc;
+ isolate()->optimizing_compiler_thread()->Flush();
+ }
mark_compact_collector()->SetFlags(kMakeHeapIterableMask |
kReduceMemoryFootprintMask);
isolate_->compilation_cache()->Clear();
@@ -2896,12 +2901,12 @@
MaybeObject* Heap::AllocateHeapNumber(double value, PretenureFlag pretenure) {
// Statically ensure that it is safe to allocate heap numbers in paged
// spaces.
+ int size = HeapNumber::kSize;
STATIC_ASSERT(HeapNumber::kSize <= Page::kNonCodeObjectAreaSize);
- AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
+ AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
Object* result;
- { MaybeObject* maybe_result =
- AllocateRaw(HeapNumber::kSize, space, OLD_DATA_SPACE);
+ { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
@@ -2911,23 +2916,6 @@
}
-MaybeObject* Heap::AllocateHeapNumber(double value) {
- // Use general version, if we're forced to always allocate.
- if (always_allocate()) return AllocateHeapNumber(value, TENURED);
-
- // This version of AllocateHeapNumber is optimized for
- // allocation in new space.
- STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxNonCodeHeapObjectSize);
- Object* result;
- { MaybeObject* maybe_result = new_space_.AllocateRaw(HeapNumber::kSize);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- HeapObject::cast(result)->set_map_no_write_barrier(heap_number_map());
- HeapNumber::cast(result)->set_value(value);
- return result;
-}
-
-
MaybeObject* Heap::AllocateCell(Object* value) {
Object* result;
{ MaybeObject* maybe_result = AllocateRawCell();
@@ -4065,31 +4053,8 @@
if (length < 0 || length > ByteArray::kMaxLength) {
return Failure::OutOfMemoryException(0x7);
}
- if (pretenure == NOT_TENURED) {
- return AllocateByteArray(length);
- }
int size = ByteArray::SizeFor(length);
- AllocationSpace space =
- (size > Page::kMaxNonCodeHeapObjectSize) ? LO_SPACE : OLD_DATA_SPACE;
- Object* result;
- { MaybeObject* maybe_result = AllocateRaw(size, space, space);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
-
- reinterpret_cast<ByteArray*>(result)->set_map_no_write_barrier(
- byte_array_map());
- reinterpret_cast<ByteArray*>(result)->set_length(length);
- return result;
-}
-
-
-MaybeObject* Heap::AllocateByteArray(int length) {
- if (length < 0 || length > ByteArray::kMaxLength) {
- return Failure::OutOfMemoryException(0x8);
- }
- int size = ByteArray::SizeFor(length);
- AllocationSpace space =
- (size > Page::kMaxNonCodeHeapObjectSize) ? LO_SPACE : NEW_SPACE;
+ AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
Object* result;
{ MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
if (!maybe_result->ToObject(&result)) return maybe_result;
@@ -4120,11 +4085,10 @@
ExternalArrayType array_type,
void* external_pointer,
PretenureFlag pretenure) {
- AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
+ int size = ExternalArray::kAlignedSize;
+ AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
Object* result;
- { MaybeObject* maybe_result = AllocateRaw(ExternalArray::kAlignedSize,
- space,
- OLD_DATA_SPACE);
+ { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
@@ -4423,10 +4387,6 @@
arguments_object_size = kArgumentsObjectSize;
}
- // This calls Copy directly rather than using Heap::AllocateRaw so we
- // duplicate the check here.
- ASSERT(AllowHeapAllocation::IsAllowed() && gc_state_ == NOT_IN_GC);
-
// Check that the size of the boilerplate matches our
// expectations. The ArgumentsAccessStub::GenerateNewObject relies
// on the size being a known constant.
@@ -4562,9 +4522,8 @@
}
// Allocate the JSObject.
- AllocationSpace space =
- (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
- if (map->instance_size() > Page::kMaxNonCodeHeapObjectSize) space = LO_SPACE;
+ int size = map->instance_size();
+ AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, pretenure);
Object* obj;
MaybeObject* maybe_obj = Allocate(map, space);
if (!maybe_obj->To(&obj)) return maybe_obj;
@@ -4597,8 +4556,8 @@
}
// Allocate the JSObject.
- AllocationSpace space = NEW_SPACE;
- if (map->instance_size() > Page::kMaxNonCodeHeapObjectSize) space = LO_SPACE;
+ int size = map->instance_size();
+ AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, NOT_TENURED);
Object* obj;
MaybeObject* maybe_obj =
AllocateWithAllocationSite(map, space, allocation_site);
@@ -5266,12 +5225,11 @@
map = internalized_string_map();
size = SeqTwoByteString::SizeFor(chars);
}
+ AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, TENURED);
// Allocate string.
Object* result;
- { MaybeObject* maybe_result = (size > Page::kMaxNonCodeHeapObjectSize)
- ? lo_space_->AllocateRaw(size, NOT_EXECUTABLE)
- : old_data_space_->AllocateRaw(size);
+ { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
@@ -5310,16 +5268,10 @@
}
int size = SeqOneByteString::SizeFor(length);
ASSERT(size <= SeqOneByteString::kMaxSize);
- AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
- AllocationSpace retry_space = OLD_DATA_SPACE;
-
- if (size > Page::kMaxNonCodeHeapObjectSize) {
- // Allocate in large object space, retry space will be ignored.
- space = LO_SPACE;
- }
+ AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
Object* result;
- { MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
+ { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
@@ -5340,16 +5292,10 @@
}
int size = SeqTwoByteString::SizeFor(length);
ASSERT(size <= SeqTwoByteString::kMaxSize);
- AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
- AllocationSpace retry_space = OLD_DATA_SPACE;
-
- if (size > Page::kMaxNonCodeHeapObjectSize) {
- // Allocate in large object space, retry space will be ignored.
- space = LO_SPACE;
- }
+ AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
Object* result;
- { MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
+ { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
@@ -5452,39 +5398,14 @@
}
-MaybeObject* Heap::AllocateFixedArray(int length) {
- ASSERT(length >= 0);
- if (length == 0) return empty_fixed_array();
- Object* result;
- { MaybeObject* maybe_result = AllocateRawFixedArray(length);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- // Initialize header.
- FixedArray* array = reinterpret_cast<FixedArray*>(result);
- array->set_map_no_write_barrier(fixed_array_map());
- array->set_length(length);
- // Initialize body.
- ASSERT(!InNewSpace(undefined_value()));
- MemsetPointer(array->data_start(), undefined_value(), length);
- return result;
-}
-
-
MaybeObject* Heap::AllocateRawFixedArray(int length, PretenureFlag pretenure) {
if (length < 0 || length > FixedArray::kMaxLength) {
return Failure::OutOfMemoryException(0xe);
}
int size = FixedArray::SizeFor(length);
- AllocationSpace space =
- (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
- AllocationSpace retry_space = OLD_POINTER_SPACE;
+ AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, pretenure);
- if (size > Page::kMaxNonCodeHeapObjectSize) {
- // Allocate in large object space, retry space will be ignored.
- space = LO_SPACE;
- }
-
- return AllocateRaw(size, space, retry_space);
+ return AllocateRaw(size, space, OLD_POINTER_SPACE);
}
@@ -5602,20 +5523,13 @@
return Failure::OutOfMemoryException(0xf);
}
int size = FixedDoubleArray::SizeFor(length);
- AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
- AllocationSpace retry_space = OLD_DATA_SPACE;
-
#ifndef V8_HOST_ARCH_64_BIT
size += kPointerSize;
#endif
-
- if (size > Page::kMaxNonCodeHeapObjectSize) {
- // Allocate in large object space, retry space will be ignored.
- space = LO_SPACE;
- }
+ AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
HeapObject* object;
- { MaybeObject* maybe_object = AllocateRaw(size, space, retry_space);
+ { MaybeObject* maybe_object = AllocateRaw(size, space, OLD_DATA_SPACE);
if (!maybe_object->To<HeapObject>(&object)) return maybe_object;
}
@@ -5819,8 +5733,7 @@
return Failure::InternalError();
}
int size = map->instance_size();
- AllocationSpace space =
- (size > Page::kMaxNonCodeHeapObjectSize) ? LO_SPACE : OLD_POINTER_SPACE;
+ AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, TENURED);
Object* result;
{ MaybeObject* maybe_result = Allocate(map, space);
if (!maybe_result->ToObject(&result)) return maybe_result;
diff --git a/src/heap.h b/src/heap.h
index 573c512..cb979a6 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -864,14 +864,9 @@
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* AllocateByteArray(int length,
- PretenureFlag pretenure);
-
- // Allocate a non-tenured byte array of the specified length
- // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
- // failed.
- // Please note this does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* AllocateByteArray(int length);
+ MUST_USE_RESULT MaybeObject* AllocateByteArray(
+ int length,
+ PretenureFlag pretenure = NOT_TENURED);
// Allocates an external array of the specified length and type.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
@@ -912,10 +907,9 @@
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* AllocateFixedArray(int length,
- PretenureFlag pretenure);
- // Allocates a fixed array initialized with undefined values
- MUST_USE_RESULT MaybeObject* AllocateFixedArray(int length);
+ MUST_USE_RESULT MaybeObject* AllocateFixedArray(
+ int length,
+ PretenureFlag pretenure = NOT_TENURED);
// Allocates an uninitialized fixed array. It must be filled by the caller.
//
@@ -1044,10 +1038,7 @@
// Allocated a HeapNumber from value.
MUST_USE_RESULT MaybeObject* AllocateHeapNumber(
- double value,
- PretenureFlag pretenure);
- // pretenure = NOT_TENURED
- MUST_USE_RESULT MaybeObject* AllocateHeapNumber(double value);
+ double value, PretenureFlag pretenure = NOT_TENURED);
// Converts an int into either a Smi or a HeapNumber object.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
@@ -2066,6 +2057,17 @@
inline void UpdateOldSpaceLimits();
+ // Selects the proper allocation space depending on the given object
+ // size, pretenuring decision, and preferred old-space.
+ static AllocationSpace SelectSpace(int object_size,
+ AllocationSpace preferred_old_space,
+ PretenureFlag pretenure) {
+ ASSERT(preferred_old_space == OLD_POINTER_SPACE ||
+ preferred_old_space == OLD_DATA_SPACE);
+ if (object_size > Page::kMaxNonCodeHeapObjectSize) return LO_SPACE;
+ return (pretenure == TENURED) ? preferred_old_space : NEW_SPACE;
+ }
+
// Allocate an uninitialized object in map space. The behavior is identical
// to Heap::AllocateRaw(size_in_bytes, MAP_SPACE), except that (a) it doesn't
// have to test the allocation space argument and (b) can reduce code size
diff --git a/src/hydrogen-instructions.cc b/src/hydrogen-instructions.cc
index a685198..d301036 100644
--- a/src/hydrogen-instructions.cc
+++ b/src/hydrogen-instructions.cc
@@ -2363,8 +2363,7 @@
HConstant::HConstant(Handle<Object> handle, Representation r)
: HTemplateInstruction<0>(HType::TypeFromValue(handle)),
- handle_(handle),
- unique_id_(),
+ object_(Unique<Object>::CreateUninitialized(handle)),
has_smi_value_(false),
has_int32_value_(false),
has_double_value_(false),
@@ -2373,29 +2372,28 @@
is_not_in_new_space_(true),
is_cell_(false),
boolean_value_(handle->BooleanValue()) {
- if (handle_->IsHeapObject()) {
+ if (handle->IsHeapObject()) {
Heap* heap = Handle<HeapObject>::cast(handle)->GetHeap();
is_not_in_new_space_ = !heap->InNewSpace(*handle);
}
- if (handle_->IsNumber()) {
- double n = handle_->Number();
+ if (handle->IsNumber()) {
+ double n = handle->Number();
has_int32_value_ = IsInteger32(n);
int32_value_ = DoubleToInt32(n);
has_smi_value_ = has_int32_value_ && Smi::IsValid(int32_value_);
double_value_ = n;
has_double_value_ = true;
} else {
- is_internalized_string_ = handle_->IsInternalizedString();
+ is_internalized_string_ = handle->IsInternalizedString();
}
- is_cell_ = !handle_.is_null() &&
- (handle_->IsCell() || handle_->IsPropertyCell());
+ is_cell_ = !handle.is_null() &&
+ (handle->IsCell() || handle->IsPropertyCell());
Initialize(r);
}
-HConstant::HConstant(Handle<Object> handle,
- UniqueValueId unique_id,
+HConstant::HConstant(Unique<Object> unique,
Representation r,
HType type,
bool is_internalize_string,
@@ -2403,8 +2401,7 @@
bool is_cell,
bool boolean_value)
: HTemplateInstruction<0>(type),
- handle_(handle),
- unique_id_(unique_id),
+ object_(unique),
has_smi_value_(false),
has_int32_value_(false),
has_double_value_(false),
@@ -2413,36 +2410,17 @@
is_not_in_new_space_(is_not_in_new_space),
is_cell_(is_cell),
boolean_value_(boolean_value) {
- ASSERT(!handle.is_null());
+ ASSERT(!unique.handle().is_null());
ASSERT(!type.IsTaggedNumber());
Initialize(r);
}
-HConstant::HConstant(Handle<Map> handle,
- UniqueValueId unique_id)
- : HTemplateInstruction<0>(HType::Tagged()),
- handle_(handle),
- unique_id_(unique_id),
- has_smi_value_(false),
- has_int32_value_(false),
- has_double_value_(false),
- has_external_reference_value_(false),
- is_internalized_string_(false),
- is_not_in_new_space_(true),
- is_cell_(false),
- boolean_value_(false) {
- ASSERT(!handle.is_null());
- Initialize(Representation::Tagged());
-}
-
-
HConstant::HConstant(int32_t integer_value,
Representation r,
bool is_not_in_new_space,
- Handle<Object> optional_handle)
- : handle_(optional_handle),
- unique_id_(),
+ Unique<Object> object)
+ : object_(object),
has_smi_value_(Smi::IsValid(integer_value)),
has_int32_value_(true),
has_double_value_(true),
@@ -2461,9 +2439,8 @@
HConstant::HConstant(double double_value,
Representation r,
bool is_not_in_new_space,
- Handle<Object> optional_handle)
- : handle_(optional_handle),
- unique_id_(),
+ Unique<Object> object)
+ : object_(object),
has_int32_value_(IsInteger32(double_value)),
has_double_value_(true),
has_external_reference_value_(false),
@@ -2481,6 +2458,7 @@
HConstant::HConstant(ExternalReference reference)
: HTemplateInstruction<0>(HType::None()),
+ object_(Unique<Object>(Handle<Object>::null())),
has_smi_value_(false),
has_int32_value_(false),
has_double_value_(false),
@@ -2494,14 +2472,6 @@
}
-static void PrepareConstant(Handle<Object> object) {
- if (!object->IsJSObject()) return;
- Handle<JSObject> js_object = Handle<JSObject>::cast(object);
- if (!js_object->map()->is_deprecated()) return;
- JSObject::TryMigrateInstance(js_object);
-}
-
-
void HConstant::Initialize(Representation r) {
if (r.IsNone()) {
if (has_smi_value_ && SmiValuesAre31Bits()) {
@@ -2513,7 +2483,14 @@
} else if (has_external_reference_value_) {
r = Representation::External();
} else {
- PrepareConstant(handle_);
+ Handle<Object> object = object_.handle();
+ if (object->IsJSObject()) {
+ // Try to eagerly migrate JSObjects that have deprecated maps.
+ Handle<JSObject> js_object = Handle<JSObject>::cast(object);
+ if (js_object->map()->is_deprecated()) {
+ JSObject::TryMigrateInstance(js_object);
+ }
+ }
r = Representation::Tagged();
}
}
@@ -2541,17 +2518,16 @@
if (r.IsDouble() && !has_double_value_) return NULL;
if (r.IsExternal() && !has_external_reference_value_) return NULL;
if (has_int32_value_) {
- return new(zone) HConstant(int32_value_, r, is_not_in_new_space_, handle_);
+ return new(zone) HConstant(int32_value_, r, is_not_in_new_space_, object_);
}
if (has_double_value_) {
- return new(zone) HConstant(double_value_, r, is_not_in_new_space_, handle_);
+ return new(zone) HConstant(double_value_, r, is_not_in_new_space_, object_);
}
if (has_external_reference_value_) {
return new(zone) HConstant(external_reference_value_);
}
- ASSERT(!handle_.is_null());
- return new(zone) HConstant(handle_,
- unique_id_,
+ ASSERT(!object_.handle().is_null());
+ return new(zone) HConstant(object_,
r,
type_,
is_internalized_string_,
@@ -2567,12 +2543,12 @@
res = new(zone) HConstant(int32_value_,
Representation::Integer32(),
is_not_in_new_space_,
- handle_);
+ object_);
} else if (has_double_value_) {
res = new(zone) HConstant(DoubleToInt32(double_value_),
Representation::Integer32(),
is_not_in_new_space_,
- handle_);
+ object_);
}
return Maybe<HConstant*>(res != NULL, res);
}
@@ -3439,8 +3415,8 @@
HConstant* filler_map = HConstant::New(
zone,
context(),
- isolate()->factory()->free_space_map(),
- UniqueValueId::free_space_map(isolate()->heap()));
+ isolate()->factory()->free_space_map());
+ filler_map->FinalizeUniqueness(); // TODO(titzer): should be init'd a'ready
filler_map->InsertAfter(free_space_instr);
HInstruction* store_map = HStoreNamedField::New(zone, context(),
free_space_instr, HObjectAccess::ForMap(), filler_map);
diff --git a/src/hydrogen-instructions.h b/src/hydrogen-instructions.h
index 8cb2f59..71bdb33 100644
--- a/src/hydrogen-instructions.h
+++ b/src/hydrogen-instructions.h
@@ -306,65 +306,6 @@
};
-class UniqueValueId V8_FINAL {
- public:
- UniqueValueId() : raw_address_(NULL) { }
-
- explicit UniqueValueId(Handle<Object> handle) {
- ASSERT(!AllowHeapAllocation::IsAllowed());
- static const Address kEmptyHandleSentinel = reinterpret_cast<Address>(1);
- if (handle.is_null()) {
- raw_address_ = kEmptyHandleSentinel;
- } else {
- raw_address_ = reinterpret_cast<Address>(*handle);
- ASSERT_NE(kEmptyHandleSentinel, raw_address_);
- }
- ASSERT(IsInitialized());
- }
-
- bool IsInitialized() const { return raw_address_ != NULL; }
-
- bool operator==(const UniqueValueId& other) const {
- ASSERT(IsInitialized() && other.IsInitialized());
- return raw_address_ == other.raw_address_;
- }
-
- bool operator!=(const UniqueValueId& other) const {
- ASSERT(IsInitialized() && other.IsInitialized());
- return raw_address_ != other.raw_address_;
- }
-
- intptr_t Hashcode() const {
- ASSERT(IsInitialized());
- return reinterpret_cast<intptr_t>(raw_address_);
- }
-
-#define IMMOVABLE_UNIQUE_VALUE_ID(name) \
- static UniqueValueId name(Heap* heap) { return UniqueValueId(heap->name()); }
-
- IMMOVABLE_UNIQUE_VALUE_ID(free_space_map)
- IMMOVABLE_UNIQUE_VALUE_ID(minus_zero_value)
- IMMOVABLE_UNIQUE_VALUE_ID(nan_value)
- IMMOVABLE_UNIQUE_VALUE_ID(undefined_value)
- IMMOVABLE_UNIQUE_VALUE_ID(null_value)
- IMMOVABLE_UNIQUE_VALUE_ID(true_value)
- IMMOVABLE_UNIQUE_VALUE_ID(false_value)
- IMMOVABLE_UNIQUE_VALUE_ID(the_hole_value)
- IMMOVABLE_UNIQUE_VALUE_ID(empty_string)
- IMMOVABLE_UNIQUE_VALUE_ID(empty_fixed_array)
-
-#undef IMMOVABLE_UNIQUE_VALUE_ID
-
- private:
- Address raw_address_;
-
- explicit UniqueValueId(Object* object) {
- raw_address_ = reinterpret_cast<Address>(object);
- ASSERT(IsInitialized());
- }
-};
-
-
class HType V8_FINAL {
public:
static HType None() { return HType(kNone); }
@@ -904,7 +845,7 @@
virtual intptr_t Hashcode();
// Compute unique ids upfront that is safe wrt GC and concurrent compilation.
- virtual void FinalizeUniqueValueId() { }
+ virtual void FinalizeUniqueness() { }
// Printing support.
virtual void PrintTo(StringStream* stream) = 0;
@@ -2691,7 +2632,7 @@
return new(zone) HCheckValue(value, target, object_in_new_space);
}
- virtual void FinalizeUniqueValueId() V8_OVERRIDE {
+ virtual void FinalizeUniqueness() V8_OVERRIDE {
object_ = Unique<HeapObject>(object_.handle());
}
@@ -3297,7 +3238,6 @@
DECLARE_INSTRUCTION_FACTORY_P2(HConstant, int32_t, Representation);
DECLARE_INSTRUCTION_FACTORY_P1(HConstant, double);
DECLARE_INSTRUCTION_FACTORY_P1(HConstant, Handle<Object>);
- DECLARE_INSTRUCTION_FACTORY_P2(HConstant, Handle<Map>, UniqueValueId);
DECLARE_INSTRUCTION_FACTORY_P1(HConstant, ExternalReference);
static HConstant* CreateAndInsertAfter(Zone* zone,
@@ -3323,15 +3263,15 @@
}
Handle<Object> handle(Isolate* isolate) {
- if (handle_.is_null()) {
- Factory* factory = isolate->factory();
+ if (object_.handle().is_null()) {
// Default arguments to is_not_in_new_space depend on this heap number
- // to be tenured so that it's guaranteed not be be located in new space.
- handle_ = factory->NewNumber(double_value_, TENURED);
+ // to be tenured so that it's guaranteed not to be located in new space.
+ object_ = Unique<Object>::CreateUninitialized(
+ isolate->factory()->NewNumber(double_value_, TENURED));
}
AllowDeferredHandleDereference smi_check;
- ASSERT(has_int32_value_ || !handle_->IsSmi());
- return handle_;
+ ASSERT(has_int32_value_ || !object_.handle()->IsSmi());
+ return object_.handle();
}
bool HasMap(Handle<Map> map) {
@@ -3365,17 +3305,18 @@
return false;
}
- ASSERT(!handle_.is_null());
+ ASSERT(!object_.handle().is_null());
Heap* heap = isolate()->heap();
- ASSERT(unique_id_ != UniqueValueId::minus_zero_value(heap));
- ASSERT(unique_id_ != UniqueValueId::nan_value(heap));
- return unique_id_ == UniqueValueId::undefined_value(heap) ||
- unique_id_ == UniqueValueId::null_value(heap) ||
- unique_id_ == UniqueValueId::true_value(heap) ||
- unique_id_ == UniqueValueId::false_value(heap) ||
- unique_id_ == UniqueValueId::the_hole_value(heap) ||
- unique_id_ == UniqueValueId::empty_string(heap) ||
- unique_id_ == UniqueValueId::empty_fixed_array(heap);
+ ASSERT(!object_.IsKnownGlobal(heap->minus_zero_value()));
+ ASSERT(!object_.IsKnownGlobal(heap->nan_value()));
+ return
+ object_.IsKnownGlobal(heap->undefined_value()) ||
+ object_.IsKnownGlobal(heap->null_value()) ||
+ object_.IsKnownGlobal(heap->true_value()) ||
+ object_.IsKnownGlobal(heap->false_value()) ||
+ object_.IsKnownGlobal(heap->the_hole_value()) ||
+ object_.IsKnownGlobal(heap->empty_string()) ||
+ object_.IsKnownGlobal(heap->empty_fixed_array());
}
bool IsCell() const {
@@ -3414,11 +3355,7 @@
if (HasDoubleValue() && FixedDoubleArray::is_the_hole_nan(double_value_)) {
return true;
}
- Heap* heap = isolate()->heap();
- if (!handle_.is_null() && *handle_ == heap->the_hole_value()) {
- return true;
- }
- return false;
+ return object_.IsKnownGlobal(isolate()->heap()->the_hole_value());
}
bool HasNumberValue() const { return has_double_value_; }
int32_t NumberValueAsInteger32() const {
@@ -3430,12 +3367,12 @@
}
bool HasStringValue() const {
if (has_double_value_ || has_int32_value_) return false;
- ASSERT(!handle_.is_null());
+ ASSERT(!object_.handle().is_null());
return type_.IsString();
}
Handle<String> StringValue() const {
ASSERT(HasStringValue());
- return Handle<String>::cast(handle_);
+ return Handle<String>::cast(object_.handle());
}
bool HasInternalizedStringValue() const {
return HasStringValue() && is_internalized_string_;
@@ -3459,27 +3396,20 @@
} else if (has_external_reference_value_) {
return reinterpret_cast<intptr_t>(external_reference_value_.address());
} else {
- ASSERT(!handle_.is_null());
- return unique_id_.Hashcode();
+ ASSERT(!object_.handle().is_null());
+ return object_.Hashcode();
}
}
- virtual void FinalizeUniqueValueId() V8_OVERRIDE {
+ virtual void FinalizeUniqueness() V8_OVERRIDE {
if (!has_double_value_ && !has_external_reference_value_) {
- ASSERT(!handle_.is_null());
- unique_id_ = UniqueValueId(handle_);
+ ASSERT(!object_.handle().is_null());
+ object_ = Unique<Object>(object_.handle());
}
}
- bool UniqueValueIdsMatch(UniqueValueId other) {
- return !has_double_value_ && !has_external_reference_value_ &&
- unique_id_ == other;
- }
-
Unique<Object> GetUnique() const {
- // TODO(titzer): store a Unique<HeapObject> inside the HConstant.
- Address raw_address = reinterpret_cast<Address>(unique_id_.Hashcode());
- return Unique<Object>(raw_address, handle_);
+ return object_;
}
#ifdef DEBUG
@@ -3505,9 +3435,8 @@
external_reference_value_ ==
other_constant->external_reference_value_;
} else {
- ASSERT(!handle_.is_null());
- return !other_constant->handle_.is_null() &&
- unique_id_ == other_constant->unique_id_;
+ ASSERT(!object_.handle().is_null());
+ return other_constant->object_ == object_;
}
}
@@ -3517,33 +3446,30 @@
HConstant(int32_t value,
Representation r = Representation::None(),
bool is_not_in_new_space = true,
- Handle<Object> optional_handle = Handle<Object>::null());
+ Unique<Object> optional = Unique<Object>(Handle<Object>::null()));
HConstant(double value,
Representation r = Representation::None(),
bool is_not_in_new_space = true,
- Handle<Object> optional_handle = Handle<Object>::null());
- HConstant(Handle<Object> handle,
- UniqueValueId unique_id,
+ Unique<Object> optional = Unique<Object>(Handle<Object>::null()));
+ HConstant(Unique<Object> unique,
Representation r,
HType type,
bool is_internalized_string,
bool is_not_in_new_space,
bool is_cell,
bool boolean_value);
- HConstant(Handle<Map> handle,
- UniqueValueId unique_id);
+
explicit HConstant(ExternalReference reference);
void Initialize(Representation r);
virtual bool IsDeletable() const V8_OVERRIDE { return true; }
- // If this is a numerical constant, handle_ either points to to the
+ // If this is a numerical constant, object_ either points to the
// HeapObject the constant originated from or is null. If the
- // constant is non-numeric, handle_ always points to a valid
+ // constant is non-numeric, object_ always points to a valid
// constant HeapObject.
- Handle<Object> handle_;
- UniqueValueId unique_id_;
+ Unique<Object> object_;
// We store the HConstant in the most specific form safely possible.
// The two flags, has_int32_value_ and has_double_value_ tell us if
@@ -5137,7 +5063,7 @@
return cell_.Hashcode();
}
- virtual void FinalizeUniqueValueId() V8_OVERRIDE {
+ virtual void FinalizeUniqueness() V8_OVERRIDE {
cell_ = Unique<Cell>(cell_.handle());
}
@@ -5435,7 +5361,7 @@
return StoringValueNeedsWriteBarrier(value());
}
- virtual void FinalizeUniqueValueId() V8_OVERRIDE {
+ virtual void FinalizeUniqueness() V8_OVERRIDE {
cell_ = Unique<PropertyCell>(cell_.handle());
}
@@ -5667,6 +5593,18 @@
kDouble, HeapNumber::kValueOffset, Representation::Double());
}
+ static HObjectAccess ForHeapNumberValueLowestBits() {
+ return HObjectAccess(kDouble,
+ HeapNumber::kValueOffset,
+ Representation::Integer32());
+ }
+
+ static HObjectAccess ForHeapNumberValueHighestBits() {
+ return HObjectAccess(kDouble,
+ HeapNumber::kValueOffset + kIntSize,
+ Representation::Integer32());
+ }
+
static HObjectAccess ForElementsPointer() {
return HObjectAccess(kElementsPointer, JSObject::kElementsOffset);
}
@@ -5699,6 +5637,10 @@
return HObjectAccess(kInobject, AllocationSite::kTransitionInfoOffset);
}
+ static HObjectAccess ForAllocationSiteNestedSite() {
+ return HObjectAccess(kInobject, AllocationSite::kNestedSiteOffset);
+ }
+
static HObjectAccess ForAllocationSiteDependentCode() {
return HObjectAccess(kInobject, AllocationSite::kDependentCodeOffset);
}
diff --git a/src/hydrogen.cc b/src/hydrogen.cc
index 50882a8..0a7e9f0 100644
--- a/src/hydrogen.cc
+++ b/src/hydrogen.cc
@@ -649,8 +649,7 @@
HConstant* HGraph::GetConstant##Name() { \
if (!constant_##name##_.is_set()) { \
HConstant* constant = new(zone()) HConstant( \
- isolate()->factory()->name##_value(), \
- UniqueValueId::name##_value(isolate()->heap()), \
+ Unique<Object>::CreateImmovable(isolate()->factory()->name##_value()), \
Representation::Tagged(), \
htype, \
false, \
@@ -1039,7 +1038,7 @@
CompilationPhase phase("H_Block building", info_);
set_current_block(graph()->entry_block());
if (!BuildGraph()) return NULL;
- graph()->FinalizeUniqueValueIds();
+ graph()->FinalizeUniqueness();
return graph_;
}
@@ -1273,6 +1272,142 @@
}
+HValue* HGraphBuilder::BuildLookupNumberStringCache(
+ HValue* object,
+ HIfContinuation* continuation) {
+ // Create a joinable continuation.
+ HIfContinuation found(graph()->CreateBasicBlock(),
+ graph()->CreateBasicBlock());
+
+ // Load the number string cache.
+ HValue* number_string_cache =
+ Add<HLoadRoot>(Heap::kNumberStringCacheRootIndex);
+
+ // Make the hash maks from the length of the number string cache. It
+ // contains two elements (number and string) for each cache entry.
+ HValue* mask = AddLoadFixedArrayLength(number_string_cache);
+ mask->set_type(HType::Smi());
+ mask = Add<HSar>(mask, graph()->GetConstant1());
+ mask = Add<HSub>(mask, graph()->GetConstant1());
+
+ // Check whether object is a smi.
+ IfBuilder if_objectissmi(this);
+ if_objectissmi.If<HIsSmiAndBranch>(object);
+ if_objectissmi.Then();
+ {
+ // Compute hash for smi similar to smi_get_hash().
+ HValue* hash = Add<HBitwise>(Token::BIT_AND, object, mask);
+
+ // Load the key.
+ HValue* key_index = Add<HShl>(hash, graph()->GetConstant1());
+ HValue* key = AddFastElementAccess(number_string_cache, key_index,
+ NULL, NULL, FAST_ELEMENTS, false,
+ ALLOW_RETURN_HOLE, STANDARD_STORE);
+
+ // Check if object == key.
+ IfBuilder if_objectiskey(this);
+ if_objectiskey.If<HCompareObjectEqAndBranch>(key, object);
+ if_objectiskey.Then();
+ {
+ // Make the key_index available.
+ Push(key_index);
+ }
+ if_objectiskey.JoinContinuation(&found);
+ }
+ if_objectissmi.Else();
+ {
+ // Check if object is a heap number.
+ IfBuilder if_objectisnumber(this);
+ if_objectisnumber.If<HCompareMap>(
+ object, isolate()->factory()->heap_number_map());
+ if_objectisnumber.Then();
+ {
+ // Compute hash for heap number similar to double_get_hash().
+ HValue* low = Add<HLoadNamedField>(
+ object, HObjectAccess::ForHeapNumberValueLowestBits());
+ HValue* high = Add<HLoadNamedField>(
+ object, HObjectAccess::ForHeapNumberValueHighestBits());
+ HValue* hash = Add<HBitwise>(Token::BIT_XOR, low, high);
+ hash = Add<HBitwise>(Token::BIT_AND, hash, mask);
+
+ // Load the key.
+ HValue* key_index = Add<HShl>(hash, graph()->GetConstant1());
+ HValue* key = AddFastElementAccess(number_string_cache, key_index,
+ NULL, NULL, FAST_ELEMENTS, false,
+ ALLOW_RETURN_HOLE, STANDARD_STORE);
+
+ // Check if key is a heap number.
+ IfBuilder if_keyisnumber(this);
+ if_keyisnumber.IfNot<HIsSmiAndBranch>(key);
+ if_keyisnumber.AndIf<HCompareMap>(
+ key, isolate()->factory()->heap_number_map());
+ if_keyisnumber.Then();
+ {
+ // Check if values of key and object match.
+ IfBuilder if_keyeqobject(this);
+ if_keyeqobject.If<HCompareNumericAndBranch>(
+ Add<HLoadNamedField>(key, HObjectAccess::ForHeapNumberValue()),
+ Add<HLoadNamedField>(object, HObjectAccess::ForHeapNumberValue()),
+ Token::EQ);
+ if_keyeqobject.Then();
+ {
+ // Make the key_index available.
+ Push(key_index);
+ }
+ if_keyeqobject.JoinContinuation(&found);
+ }
+ if_keyisnumber.JoinContinuation(&found);
+ }
+ if_objectisnumber.JoinContinuation(&found);
+ }
+ if_objectissmi.End();
+
+ // Check for cache hit.
+ IfBuilder if_found(this, &found);
+ if_found.Then();
+
+ // Load the value in case of cache hit.
+ HValue* key_index = Pop();
+ HValue* value_index = Add<HAdd>(key_index, graph()->GetConstant1());
+ HValue* value = AddFastElementAccess(number_string_cache, value_index,
+ NULL, NULL, FAST_ELEMENTS, false,
+ ALLOW_RETURN_HOLE, STANDARD_STORE);
+ AddIncrementCounter(isolate()->counters()->number_to_string_native());
+
+ if_found.CaptureContinuation(continuation);
+
+ // The value is only available in true branch of continuation.
+ return value;
+}
+
+
+HValue* HGraphBuilder::BuildNumberToString(HValue* number) {
+ NoObservableSideEffectsScope scope(this);
+
+ // Lookup the number in the number string cache.
+ HIfContinuation continuation;
+ HValue* value = BuildLookupNumberStringCache(number, &continuation);
+ IfBuilder if_found(this, &continuation);
+ if_found.Then();
+
+ // Cache hit.
+ Push(value);
+
+ if_found.Else();
+
+ // Cache miss, fallback to runtime.
+ Add<HPushArgument>(number);
+ Push(Add<HCallRuntime>(
+ isolate()->factory()->empty_string(),
+ Runtime::FunctionForId(Runtime::kNumberToStringSkipCache),
+ 1));
+
+ if_found.End();
+
+ return Pop();
+}
+
+
HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
HValue* checked_object,
HValue* key,
@@ -2157,12 +2292,12 @@
}
-void HGraph::FinalizeUniqueValueIds() {
+void HGraph::FinalizeUniqueness() {
DisallowHeapAllocation no_gc;
ASSERT(!isolate()->optimizing_compiler_thread()->IsOptimizerThread());
for (int i = 0; i < blocks()->length(); ++i) {
for (HInstructionIterator it(blocks()->at(i)); !it.Done(); it.Advance()) {
- it.Current()->FinalizeUniqueValueId();
+ it.Current()->FinalizeUniqueness();
}
}
}
@@ -4106,7 +4241,7 @@
int* max_properties) {
if (boilerplate->map()->is_deprecated()) {
Handle<Object> result = JSObject::TryMigrateInstance(boilerplate);
- if (result->IsSmi()) return false;
+ if (result.is_null()) return false;
}
ASSERT(max_depth >= 0 && *max_properties >= 0);
@@ -9009,12 +9144,10 @@
// Fast support for number to string.
void HOptimizedGraphBuilder::GenerateNumberToString(CallRuntime* call) {
ASSERT_EQ(1, call->arguments()->length());
- CHECK_ALIVE(VisitArgumentList(call->arguments()));
- HValue* context = environment()->context();
- HCallStub* result =
- new(zone()) HCallStub(context, CodeStub::NumberToString, 1);
- Drop(1);
- return ast_context()->ReturnInstruction(result, call->id());
+ CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+ HValue* number = Pop();
+ HValue* result = BuildNumberToString(number);
+ return ast_context()->ReturnValue(result);
}
diff --git a/src/hydrogen.h b/src/hydrogen.h
index a371fa5..6aa8217 100644
--- a/src/hydrogen.h
+++ b/src/hydrogen.h
@@ -316,7 +316,7 @@
HBasicBlock* entry_block() const { return entry_block_; }
HEnvironment* start_environment() const { return start_environment_; }
- void FinalizeUniqueValueIds();
+ void FinalizeUniqueness();
bool ProcessArgumentsObject();
void OrderBlocks();
void AssignDominators();
@@ -1237,6 +1237,15 @@
ElementsKind to_kind,
bool is_jsarray);
+ // Do lookup in the number string cache. If the object is not found
+ // in the cache, the false branch of the continuation is taken;
+ // otherwise the true branch is taken and the returned value contains
+ // the cache value for the object. The returned value must NOT be used
+ // on the false branch.
+ HValue* BuildLookupNumberStringCache(HValue* object,
+ HIfContinuation* continuation);
+ HValue* BuildNumberToString(HValue* number);
+
HInstruction* BuildUncheckedMonomorphicElementAccess(
HValue* checked_object,
HValue* key,
diff --git a/src/ia32/assembler-ia32.cc b/src/ia32/assembler-ia32.cc
index e5456da..c2e8431 100644
--- a/src/ia32/assembler-ia32.cc
+++ b/src/ia32/assembler-ia32.cc
@@ -1131,30 +1131,21 @@
void Assembler::test(Register reg, const Immediate& imm) {
- EnsureSpace ensure_space(this);
- // Only use test against byte for registers that have a byte
- // variant: eax, ebx, ecx, and edx.
- if (RelocInfo::IsNone(imm.rmode_) &&
- is_uint8(imm.x_) &&
- reg.is_byte_register()) {
- uint8_t imm8 = imm.x_;
- if (reg.is(eax)) {
- EMIT(0xA8);
- EMIT(imm8);
- } else {
- emit_arith_b(0xF6, 0xC0, reg, imm8);
- }
- } else {
- // This is not using emit_arith because test doesn't support
- // sign-extension of 8-bit operands.
- if (reg.is(eax)) {
- EMIT(0xA9);
- } else {
- EMIT(0xF7);
- EMIT(0xC0 | reg.code());
- }
- emit(imm);
+ if (RelocInfo::IsNone(imm.rmode_) && is_uint8(imm.x_)) {
+ test_b(reg, imm.x_);
+ return;
}
+
+ EnsureSpace ensure_space(this);
+ // This is not using emit_arith because test doesn't support
+ // sign-extension of 8-bit operands.
+ if (reg.is(eax)) {
+ EMIT(0xA9);
+ } else {
+ EMIT(0xF7);
+ EMIT(0xC0 | reg.code());
+ }
+ emit(imm);
}
@@ -1178,6 +1169,9 @@
test(op.reg(), imm);
return;
}
+ if (RelocInfo::IsNone(imm.rmode_) && is_uint8(imm.x_)) {
+ return test_b(op, imm.x_);
+ }
EnsureSpace ensure_space(this);
EMIT(0xF7);
emit_operand(eax, op);
@@ -1185,9 +1179,26 @@
}
+void Assembler::test_b(Register reg, uint8_t imm8) {
+ EnsureSpace ensure_space(this);
+ // Only use test against byte for registers that have a byte
+ // variant: eax, ebx, ecx, and edx.
+ if (reg.is(eax)) {
+ EMIT(0xA8);
+ EMIT(imm8);
+ } else if (reg.is_byte_register()) {
+ emit_arith_b(0xF6, 0xC0, reg, imm8);
+ } else {
+ EMIT(0xF7);
+ EMIT(0xC0 | reg.code());
+ emit(imm8);
+ }
+}
+
+
void Assembler::test_b(const Operand& op, uint8_t imm8) {
- if (op.is_reg_only() && !op.reg().is_byte_register()) {
- test(op, Immediate(imm8));
+ if (op.is_reg_only()) {
+ test_b(op.reg(), imm8);
return;
}
EnsureSpace ensure_space(this);
diff --git a/src/ia32/assembler-ia32.h b/src/ia32/assembler-ia32.h
index 736dd3b..7c3999b 100644
--- a/src/ia32/assembler-ia32.h
+++ b/src/ia32/assembler-ia32.h
@@ -853,7 +853,7 @@
void test(Register reg, const Operand& op);
void test_b(Register reg, const Operand& op);
void test(const Operand& op, const Immediate& imm);
- void test_b(Register reg, uint8_t imm8) { test_b(Operand(reg), imm8); }
+ void test_b(Register reg, uint8_t imm8);
void test_b(const Operand& op, uint8_t imm8);
void xor_(Register dst, int32_t imm32);
diff --git a/src/ia32/builtins-ia32.cc b/src/ia32/builtins-ia32.cc
index 5169627..91ff72c 100644
--- a/src/ia32/builtins-ia32.cc
+++ b/src/ia32/builtins-ia32.cc
@@ -630,25 +630,6 @@
}
-void Builtins::Generate_NotifyOSR(MacroAssembler* masm) {
- // TODO(kasperl): Do we need to save/restore the XMM registers too?
- // TODO(mvstanton): We should save these regs, do this in a future
- // checkin.
-
- // For now, we are relying on the fact that Runtime::NotifyOSR
- // doesn't do any garbage collection which allows us to save/restore
- // the registers without worrying about which of them contain
- // pointers. This seems a bit fragile.
- __ pushad();
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kNotifyOSR, 0);
- }
- __ popad();
- __ ret(0);
-}
-
-
void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
Factory* factory = masm->isolate()->factory();
diff --git a/src/ia32/code-stubs-ia32.cc b/src/ia32/code-stubs-ia32.cc
index 6128633..83613cc 100644
--- a/src/ia32/code-stubs-ia32.cc
+++ b/src/ia32/code-stubs-ia32.cc
@@ -64,6 +64,16 @@
}
+void NumberToStringStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { eax };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = NULL;
+}
+
+
void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
@@ -3767,21 +3777,6 @@
}
-void NumberToStringStub::Generate(MacroAssembler* masm) {
- Label runtime;
-
- __ mov(ebx, Operand(esp, kPointerSize));
-
- // Generate code to lookup number in the number string cache.
- __ LookupNumberStringCache(ebx, eax, ecx, edx, &runtime);
- __ ret(1 * kPointerSize);
-
- __ bind(&runtime);
- // Handle number to string in the runtime system if not found in the cache.
- __ TailCallRuntime(Runtime::kNumberToStringSkipCache, 1, 1);
-}
-
-
static int NegativeComparisonResult(Condition cc) {
ASSERT(cc != equal);
ASSERT((cc == less) || (cc == less_equal)
diff --git a/src/ia32/code-stubs-ia32.h b/src/ia32/code-stubs-ia32.h
index f36cd61..dc536e7 100644
--- a/src/ia32/code-stubs-ia32.h
+++ b/src/ia32/code-stubs-ia32.h
@@ -217,18 +217,6 @@
};
-class NumberToStringStub: public PlatformCodeStub {
- public:
- NumberToStringStub() { }
-
- private:
- Major MajorKey() { return NumberToString; }
- int MinorKey() { return 0; }
-
- void Generate(MacroAssembler* masm);
-};
-
-
class NameDictionaryLookupStub: public PlatformCodeStub {
public:
enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
diff --git a/src/ia32/full-codegen-ia32.cc b/src/ia32/full-codegen-ia32.cc
index 9a2c3ce..5250f4b 100644
--- a/src/ia32/full-codegen-ia32.cc
+++ b/src/ia32/full-codegen-ia32.cc
@@ -3549,8 +3549,8 @@
ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(args->length(), 1);
- // Load the argument on the stack and call the stub.
- VisitForStackValue(args->at(0));
+ // Load the argument into eax and call the stub.
+ VisitForAccumulatorValue(args->at(0));
NumberToStringStub stub;
__ CallStub(&stub);
diff --git a/src/ia32/lithium-codegen-ia32.cc b/src/ia32/lithium-codegen-ia32.cc
index 98a049b..86ac466 100644
--- a/src/ia32/lithium-codegen-ia32.cc
+++ b/src/ia32/lithium-codegen-ia32.cc
@@ -416,6 +416,13 @@
x87_stack_.LeavingBlock(current_block_, LGoto::cast(instr));
} else if (FLAG_debug_code && FLAG_enable_slow_asserts &&
!instr->IsGap() && !instr->IsReturn()) {
+ if (instr->ClobbersDoubleRegisters()) {
+ if (instr->HasDoubleRegisterResult()) {
+ ASSERT_EQ(1, x87_stack_.depth());
+ } else {
+ ASSERT_EQ(0, x87_stack_.depth());
+ }
+ }
__ VerifyX87StackDepth(x87_stack_.depth());
}
}
@@ -561,6 +568,16 @@
}
+void LCodeGen::X87LoadForUsage(X87Register reg1, X87Register reg2) {
+ ASSERT(x87_stack_.Contains(reg1));
+ ASSERT(x87_stack_.Contains(reg2));
+ x87_stack_.Fxch(reg1, 1);
+ x87_stack_.Fxch(reg2);
+ x87_stack_.pop();
+ x87_stack_.pop();
+}
+
+
void LCodeGen::X87Stack::Fxch(X87Register reg, int other_slot) {
ASSERT(is_mutable_);
ASSERT(Contains(reg) && stack_depth_ > other_slot);
@@ -1365,11 +1382,6 @@
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
break;
}
- case CodeStub::NumberToString: {
- NumberToStringStub stub;
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- break;
- }
case CodeStub::StringCompare: {
StringCompareStub stub;
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
@@ -2572,10 +2584,7 @@
CpuFeatureScope scope(masm(), SSE2);
__ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
} else {
- X87Fxch(ToX87Register(right));
- X87Fxch(ToX87Register(left), 1);
- __ fld(0);
- __ fld(2);
+ X87LoadForUsage(ToX87Register(right), ToX87Register(left));
__ FCmp();
}
// Don't base result on EFLAGS when a NaN is involved. Instead
diff --git a/src/ia32/lithium-codegen-ia32.h b/src/ia32/lithium-codegen-ia32.h
index a2280f8..a813b3c 100644
--- a/src/ia32/lithium-codegen-ia32.h
+++ b/src/ia32/lithium-codegen-ia32.h
@@ -129,6 +129,7 @@
X87Register left, X87Register right, X87Register result);
void X87LoadForUsage(X87Register reg);
+ void X87LoadForUsage(X87Register reg1, X87Register reg2);
void X87PrepareToWrite(X87Register reg) { x87_stack_.PrepareToWrite(reg); }
void X87CommitWrite(X87Register reg) { x87_stack_.CommitWrite(reg); }
diff --git a/src/mips/builtins-mips.cc b/src/mips/builtins-mips.cc
index e528dd7..1b18140 100644
--- a/src/mips/builtins-mips.cc
+++ b/src/mips/builtins-mips.cc
@@ -924,23 +924,6 @@
}
-void Builtins::Generate_NotifyOSR(MacroAssembler* masm) {
- // For now, we are relying on the fact that Runtime::NotifyOSR
- // doesn't do any garbage collection which allows us to save/restore
- // the registers without worrying about which of them contain
- // pointers. This seems a bit fragile.
- RegList saved_regs =
- (kJSCallerSaved | kCalleeSaved | ra.bit() | fp.bit()) & ~sp.bit();
- __ MultiPush(saved_regs);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kNotifyOSR, 0);
- }
- __ MultiPop(saved_regs);
- __ Ret();
-}
-
-
void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
// Lookup the function in the JavaScript frame.
__ lw(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
diff --git a/src/mips/code-stubs-mips.cc b/src/mips/code-stubs-mips.cc
index ea104fb..5cdfecc 100644
--- a/src/mips/code-stubs-mips.cc
+++ b/src/mips/code-stubs-mips.cc
@@ -60,6 +60,16 @@
}
+void NumberToStringStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { a0 };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = NULL;
+}
+
+
void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
@@ -994,21 +1004,6 @@
}
-void NumberToStringStub::Generate(MacroAssembler* masm) {
- Label runtime;
-
- __ lw(a1, MemOperand(sp, 0));
-
- // Generate code to lookup number in the number string cache.
- __ LookupNumberStringCache(a1, v0, a2, a3, t0, &runtime);
- __ DropAndRet(1);
-
- __ bind(&runtime);
- // Handle number to string in the runtime system if not found in the cache.
- __ TailCallRuntime(Runtime::kNumberToString, 1, 1);
-}
-
-
static void ICCompareStub_CheckInputType(MacroAssembler* masm,
Register input,
Register scratch,
@@ -6131,9 +6126,16 @@
void DirectCEntryStub::Generate(MacroAssembler* masm) {
- // No need to pop or drop anything, LeaveExitFrame will restore the old
- // stack, thus dropping the allocated space for the return value.
- // The saved ra is after the reserved stack space for the 4 args.
+ // Make place for arguments to fit C calling convention. Most of the callers
+ // of DirectCEntryStub::GenerateCall are using EnterExitFrame/LeaveExitFrame
+ // so they handle stack restoring and we don't have to do that here.
+ // Any caller of DirectCEntryStub::GenerateCall must take care of dropping
+ // kCArgsSlotsSize stack space after the call.
+ __ Subu(sp, sp, Operand(kCArgsSlotsSize));
+ // Place the return address on the stack, making the call
+ // GC safe. The RegExp backend also relies on this.
+ __ sw(ra, MemOperand(sp, kCArgsSlotsSize));
+ __ Call(t9); // Call the C++ function.
__ lw(t9, MemOperand(sp, kCArgsSlotsSize));
if (FLAG_debug_code && FLAG_enable_slow_asserts) {
@@ -6150,33 +6152,11 @@
void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
Register target) {
- __ Move(t9, target);
- __ AssertStackIsAligned();
- // Allocate space for arg slots.
- __ Subu(sp, sp, kCArgsSlotsSize);
-
- // Block the trampoline pool through the whole function to make sure the
- // number of generated instructions is constant.
- Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm);
-
- // We need to get the current 'pc' value, which is not available on MIPS.
- Label find_ra;
- masm->bal(&find_ra); // ra = pc + 8.
- masm->nop(); // Branch delay slot nop.
- masm->bind(&find_ra);
-
- const int kNumInstructionsToJump = 6;
- masm->addiu(ra, ra, kNumInstructionsToJump * kPointerSize);
- // Push return address (accessible to GC through exit frame pc).
- // This spot for ra was reserved in EnterExitFrame.
- masm->sw(ra, MemOperand(sp, kCArgsSlotsSize));
intptr_t loc =
reinterpret_cast<intptr_t>(GetCode(masm->isolate()).location());
- masm->li(ra, Operand(loc, RelocInfo::CODE_TARGET), CONSTANT_SIZE);
- // Call the function.
- masm->Jump(t9);
- // Make sure the stored 'ra' points to this position.
- ASSERT_EQ(kNumInstructionsToJump, masm->InstructionsGeneratedSince(&find_ra));
+ __ Move(t9, target);
+ __ li(ra, Operand(loc, RelocInfo::CODE_TARGET), CONSTANT_SIZE);
+ __ Call(ra);
}
diff --git a/src/mips/code-stubs-mips.h b/src/mips/code-stubs-mips.h
index 627244c..10531a8 100644
--- a/src/mips/code-stubs-mips.h
+++ b/src/mips/code-stubs-mips.h
@@ -268,18 +268,6 @@
};
-class NumberToStringStub: public PlatformCodeStub {
- public:
- NumberToStringStub() { }
-
- private:
- Major MajorKey() { return NumberToString; }
- int MinorKey() { return 0; }
-
- void Generate(MacroAssembler* masm);
-};
-
-
class RecordWriteStub: public PlatformCodeStub {
public:
RecordWriteStub(Register object,
@@ -467,22 +455,6 @@
};
-// Enter C code from generated RegExp code in a way that allows
-// the C code to fix the return address in case of a GC.
-// Currently only needed on ARM and MIPS.
-class RegExpCEntryStub: public PlatformCodeStub {
- public:
- RegExpCEntryStub() {}
- virtual ~RegExpCEntryStub() {}
- void Generate(MacroAssembler* masm);
-
- private:
- Major MajorKey() { return RegExpCEntry; }
- int MinorKey() { return 0; }
-
- bool NeedsImmovableCode() { return true; }
-};
-
// Trampoline stub to call into native code. To call safely into native code
// in the presence of compacting GC (which can move code objects) we need to
// keep the code which called into native pinned in the memory. Currently the
diff --git a/src/mips/full-codegen-mips.cc b/src/mips/full-codegen-mips.cc
index 853ee08..f889470 100644
--- a/src/mips/full-codegen-mips.cc
+++ b/src/mips/full-codegen-mips.cc
@@ -3611,8 +3611,9 @@
ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(args->length(), 1);
- // Load the argument on the stack and call the stub.
- VisitForStackValue(args->at(0));
+ // Load the argument into a0 and call the stub.
+ VisitForAccumulatorValue(args->at(0));
+ __ mov(a0, result_register());
NumberToStringStub stub;
__ CallStub(&stub);
diff --git a/src/mips/lithium-codegen-mips.cc b/src/mips/lithium-codegen-mips.cc
index 69a3c89..c869184 100644
--- a/src/mips/lithium-codegen-mips.cc
+++ b/src/mips/lithium-codegen-mips.cc
@@ -1063,11 +1063,6 @@
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
break;
}
- case CodeStub::NumberToString: {
- NumberToStringStub stub;
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- break;
- }
case CodeStub::StringCompare: {
StringCompareStub stub;
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
diff --git a/src/mips/lithium-mips.h b/src/mips/lithium-mips.h
index 7c22ae8..a8ca620 100644
--- a/src/mips/lithium-mips.h
+++ b/src/mips/lithium-mips.h
@@ -1342,8 +1342,8 @@
LOperand* temp() { return temps_[0]; }
Smi* index() const { return index_; }
- DECLARE_CONCRETE_INSTRUCTION(ValueOf, "date-field")
- DECLARE_HYDROGEN_ACCESSOR(ValueOf)
+ DECLARE_CONCRETE_INSTRUCTION(DateField, "date-field")
+ DECLARE_HYDROGEN_ACCESSOR(DateField)
private:
Smi* index_;
diff --git a/src/mips/macro-assembler-mips.cc b/src/mips/macro-assembler-mips.cc
index 159c924..cbb538f 100644
--- a/src/mips/macro-assembler-mips.cc
+++ b/src/mips/macro-assembler-mips.cc
@@ -3220,11 +3220,10 @@
Register dst,
Register length,
Register scratch) {
- Label align_loop, align_loop_1, word_loop, byte_loop, byte_loop_1, done;
+ Label align_loop_1, word_loop, byte_loop, byte_loop_1, done;
// Align src before copying in word size chunks.
- bind(&align_loop);
- Branch(&done, eq, length, Operand(zero_reg));
+ Branch(&byte_loop, le, length, Operand(kPointerSize));
bind(&align_loop_1);
And(scratch, src, kPointerSize - 1);
Branch(&word_loop, eq, scratch, Operand(zero_reg));
@@ -3233,7 +3232,7 @@
sb(scratch, MemOperand(dst));
Addu(dst, dst, 1);
Subu(length, length, Operand(1));
- Branch(&byte_loop_1, ne, length, Operand(zero_reg));
+ Branch(&align_loop_1, ne, length, Operand(zero_reg));
// Copy bytes in word size chunks.
bind(&word_loop);
diff --git a/src/mips/regexp-macro-assembler-mips.cc b/src/mips/regexp-macro-assembler-mips.cc
index 1a04fd1..49dec3c 100644
--- a/src/mips/regexp-macro-assembler-mips.cc
+++ b/src/mips/regexp-macro-assembler-mips.cc
@@ -1063,15 +1063,56 @@
// Private methods:
void RegExpMacroAssemblerMIPS::CallCheckStackGuardState(Register scratch) {
- static const int num_arguments = 3;
- __ PrepareCallCFunction(num_arguments, scratch);
+ int stack_alignment = OS::ActivationFrameAlignment();
+
+ // Align the stack pointer and save the original sp value on the stack.
+ __ mov(scratch, sp);
+ __ Subu(sp, sp, Operand(kPointerSize));
+ ASSERT(IsPowerOf2(stack_alignment));
+ __ And(sp, sp, Operand(-stack_alignment));
+ __ sw(scratch, MemOperand(sp));
+
__ mov(a2, frame_pointer());
// Code* of self.
__ li(a1, Operand(masm_->CodeObject()), CONSTANT_SIZE);
- // a0 becomes return address pointer.
+
+ // We need to make room for the return address on the stack.
+ ASSERT(IsAligned(stack_alignment, kPointerSize));
+ __ Subu(sp, sp, Operand(stack_alignment));
+
+ // Stack pointer now points to cell where return address is to be written.
+ // Arguments are in registers, meaning we teat the return address as
+ // argument 5. Since DirectCEntryStub will handleallocating space for the C
+ // argument slots, we don't need to care about that here. This is how the
+ // stack will look (sp meaning the value of sp at this moment):
+ // [sp + 3] - empty slot if needed for alignment.
+ // [sp + 2] - saved sp.
+ // [sp + 1] - second word reserved for return value.
+ // [sp + 0] - first word reserved for return value.
+
+ // a0 will point to the return address, placed by DirectCEntry.
+ __ mov(a0, sp);
+
ExternalReference stack_guard_check =
ExternalReference::re_check_stack_guard_state(masm_->isolate());
- CallCFunctionUsingStub(stack_guard_check, num_arguments);
+ __ li(t9, Operand(stack_guard_check));
+ DirectCEntryStub stub;
+ stub.GenerateCall(masm_, t9);
+
+ // DirectCEntryStub allocated space for the C argument slots so we have to
+ // drop them with the return address from the stack with loading saved sp.
+ // At this point stack must look:
+ // [sp + 7] - empty slot if needed for alignment.
+ // [sp + 6] - saved sp.
+ // [sp + 5] - second word reserved for return value.
+ // [sp + 4] - first word reserved for return value.
+ // [sp + 3] - C argument slot.
+ // [sp + 2] - C argument slot.
+ // [sp + 1] - C argument slot.
+ // [sp + 0] - C argument slot.
+ __ lw(sp, MemOperand(sp, stack_alignment + kCArgsSlotsSize));
+
+ __ li(code_pointer(), Operand(masm_->CodeObject()));
}
@@ -1276,21 +1317,6 @@
}
-void RegExpMacroAssemblerMIPS::CallCFunctionUsingStub(
- ExternalReference function,
- int num_arguments) {
- // Must pass all arguments in registers. The stub pushes on the stack.
- ASSERT(num_arguments <= 4);
- __ li(code_pointer(), Operand(function));
- RegExpCEntryStub stub;
- __ CallStub(&stub);
- if (OS::ActivationFrameAlignment() != 0) {
- __ lw(sp, MemOperand(sp, 16));
- }
- __ li(code_pointer(), Operand(masm_->CodeObject()), CONSTANT_SIZE);
-}
-
-
void RegExpMacroAssemblerMIPS::LoadCurrentCharacterUnchecked(int cp_offset,
int characters) {
Register offset = current_input_offset();
@@ -1312,23 +1338,6 @@
}
-void RegExpCEntryStub::Generate(MacroAssembler* masm_) {
- int stack_alignment = OS::ActivationFrameAlignment();
- if (stack_alignment < kPointerSize) stack_alignment = kPointerSize;
- // Stack is already aligned for call, so decrement by alignment
- // to make room for storing the return address.
- __ Subu(sp, sp, Operand(stack_alignment + kCArgsSlotsSize));
- const int return_address_offset = kCArgsSlotsSize;
- __ Addu(a0, sp, return_address_offset);
- __ sw(ra, MemOperand(a0, 0));
- __ mov(t9, t1);
- __ Call(t9);
- __ lw(ra, MemOperand(sp, return_address_offset));
- __ Addu(sp, sp, Operand(stack_alignment + kCArgsSlotsSize));
- __ Jump(ra);
-}
-
-
#undef __
#endif // V8_INTERPRETED_REGEXP
diff --git a/src/mips/regexp-macro-assembler-mips.h b/src/mips/regexp-macro-assembler-mips.h
index 86ae4d4..063582c 100644
--- a/src/mips/regexp-macro-assembler-mips.h
+++ b/src/mips/regexp-macro-assembler-mips.h
@@ -217,14 +217,6 @@
// and increments it by a word size.
inline void Pop(Register target);
- // Calls a C function and cleans up the frame alignment done by
- // by FrameAlign. The called function *is* allowed to trigger a garbage
- // collection, but may not take more than four arguments (no arguments
- // passed on the stack), and the first argument will be a pointer to the
- // return address.
- inline void CallCFunctionUsingStub(ExternalReference function,
- int num_arguments);
-
Isolate* isolate() const { return masm_->isolate(); }
MacroAssembler* masm_;
diff --git a/src/objects-inl.h b/src/objects-inl.h
index dcce931..8c20c7a 100644
--- a/src/objects-inl.h
+++ b/src/objects-inl.h
@@ -285,14 +285,13 @@
MaybeObject* Object::AllocateNewStorageFor(Heap* heap,
- Representation representation,
- PretenureFlag tenure) {
+ Representation representation) {
if (!FLAG_track_double_fields) return this;
if (!representation.IsDouble()) return this;
if (IsUninitialized()) {
- return heap->AllocateHeapNumber(0, tenure);
+ return heap->AllocateHeapNumber(0);
}
- return heap->AllocateHeapNumber(Number(), tenure);
+ return heap->AllocateHeapNumber(Number());
}
@@ -1325,6 +1324,7 @@
void AllocationSite::Initialize() {
SetElementsKind(GetInitialFastElementsKind());
+ set_nested_site(Smi::FromInt(0));
set_dependent_code(DependentCode::cast(GetHeap()->empty_fixed_array()),
SKIP_WRITE_BARRIER);
}
@@ -1542,19 +1542,6 @@
}
-MaybeObject* JSObject::TryMigrateInstance() {
- Map* new_map = map()->CurrentMapForDeprecated();
- if (new_map == NULL) return Smi::FromInt(0);
- Map* original_map = map();
- MaybeObject* maybe_result = MigrateToMap(new_map);
- JSObject* result;
- if (FLAG_trace_migration && maybe_result->To(&result)) {
- PrintInstanceMigration(stdout, original_map, result->map());
- }
- return maybe_result;
-}
-
-
Handle<String> JSObject::ExpectedTransitionKey(Handle<Map> map) {
DisallowHeapAllocation no_gc;
if (!map->HasTransitionArray()) return Handle<String>::null();
@@ -4456,6 +4443,7 @@
ACCESSORS(TypeSwitchInfo, types, Object, kTypesOffset)
ACCESSORS(AllocationSite, transition_info, Object, kTransitionInfoOffset)
+ACCESSORS(AllocationSite, nested_site, Object, kNestedSiteOffset)
ACCESSORS(AllocationSite, dependent_code, DependentCode,
kDependentCodeOffset)
ACCESSORS(AllocationSite, weak_next, Object, kWeakNextOffset)
diff --git a/src/objects-printer.cc b/src/objects-printer.cc
index 26a7098..f4b4a4f 100644
--- a/src/objects-printer.cc
+++ b/src/objects-printer.cc
@@ -1102,7 +1102,8 @@
weak_next()->ShortPrint(out);
PrintF(out, "\n - dependent code: ");
dependent_code()->ShortPrint(out);
-
+ PrintF(out, "\n - nested site: ");
+ nested_site()->ShortPrint(out);
PrintF(out, "\n - transition_info: ");
if (transition_info()->IsCell()) {
Cell* cell = Cell::cast(transition_info());
diff --git a/src/objects.cc b/src/objects.cc
index 67f26f0..169307f 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -1005,8 +1005,11 @@
if (IsNumber() && other->IsNumber()) {
double this_value = Number();
double other_value = other->Number();
- return (this_value == other_value) ||
- (std::isnan(this_value) && std::isnan(other_value));
+ bool equal = this_value == other_value;
+ // SameValue(NaN, NaN) is true.
+ if (!equal) return std::isnan(this_value) && std::isnan(other_value);
+ // SameValue(0.0, -0.0) is false.
+ return (this_value != 0) || ((1 / this_value) == (1 / other_value));
}
if (IsString() && other->IsString()) {
return String::cast(this)->Equals(String::cast(other));
@@ -2270,11 +2273,6 @@
}
-void JSObject::MigrateToMap(Handle<JSObject> object, Handle<Map> new_map) {
- CALL_HEAP_FUNCTION_VOID(object->GetIsolate(), object->MigrateToMap(*new_map));
-}
-
-
// To migrate an instance to a map:
// - First check whether the instance needs to be rewritten. If not, simply
// change the map.
@@ -2290,28 +2288,27 @@
// to temporarily store the inobject properties.
// * If there are properties left in the backing store, install the backing
// store.
-MaybeObject* JSObject::MigrateToMap(Map* new_map) {
- Heap* heap = GetHeap();
- Map* old_map = map();
+void JSObject::MigrateToMap(Handle<JSObject> object, Handle<Map> new_map) {
+ Isolate* isolate = object->GetIsolate();
+ Handle<Map> old_map(object->map());
int number_of_fields = new_map->NumberOfFields();
int inobject = new_map->inobject_properties();
int unused = new_map->unused_property_fields();
- // Nothing to do if no functions were converted to fields.
+ // Nothing to do if no functions were converted to fields and no smis were
+ // converted to doubles.
if (!old_map->InstancesNeedRewriting(
- new_map, number_of_fields, inobject, unused)) {
- set_map(new_map);
- return this;
+ *new_map, number_of_fields, inobject, unused)) {
+ object->set_map(*new_map);
+ return;
}
int total_size = number_of_fields + unused;
int external = total_size - inobject;
- FixedArray* array;
- MaybeObject* maybe_array = heap->AllocateFixedArray(total_size);
- if (!maybe_array->To(&array)) return maybe_array;
+ Handle<FixedArray> array = isolate->factory()->NewFixedArray(total_size);
- DescriptorArray* old_descriptors = old_map->instance_descriptors();
- DescriptorArray* new_descriptors = new_map->instance_descriptors();
+ Handle<DescriptorArray> old_descriptors(old_map->instance_descriptors());
+ Handle<DescriptorArray> new_descriptors(new_map->instance_descriptors());
int descriptors = new_map->NumberOfOwnDescriptors();
for (int i = 0; i < descriptors; i++) {
@@ -2324,55 +2321,51 @@
}
ASSERT(old_details.type() == CONSTANT ||
old_details.type() == FIELD);
- Object* value = old_details.type() == CONSTANT
+ Object* raw_value = old_details.type() == CONSTANT
? old_descriptors->GetValue(i)
- : RawFastPropertyAt(old_descriptors->GetFieldIndex(i));
+ : object->RawFastPropertyAt(old_descriptors->GetFieldIndex(i));
+ Handle<Object> value(raw_value, isolate);
if (FLAG_track_double_fields &&
!old_details.representation().IsDouble() &&
details.representation().IsDouble()) {
- if (old_details.representation().IsNone()) value = Smi::FromInt(0);
- // Objects must be allocated in the old object space, since the
- // overall number of HeapNumbers needed for the conversion might
- // exceed the capacity of new space, and we would fail repeatedly
- // trying to migrate the instance.
- MaybeObject* maybe_storage =
- value->AllocateNewStorageFor(heap, details.representation(), TENURED);
- if (!maybe_storage->To(&value)) return maybe_storage;
+ if (old_details.representation().IsNone()) {
+ value = handle(Smi::FromInt(0), isolate);
+ }
+ value = NewStorageFor(isolate, value, details.representation());
}
ASSERT(!(FLAG_track_double_fields &&
details.representation().IsDouble() &&
value->IsSmi()));
int target_index = new_descriptors->GetFieldIndex(i) - inobject;
if (target_index < 0) target_index += total_size;
- array->set(target_index, value);
+ array->set(target_index, *value);
}
- // From here on we cannot fail anymore.
+ // From here on we cannot fail and we shouldn't GC anymore.
+ DisallowHeapAllocation no_allocation;
// Copy (real) inobject properties. If necessary, stop at number_of_fields to
// avoid overwriting |one_pointer_filler_map|.
int limit = Min(inobject, number_of_fields);
for (int i = 0; i < limit; i++) {
- FastPropertyAtPut(i, array->get(external + i));
+ object->FastPropertyAtPut(i, array->get(external + i));
}
// Create filler object past the new instance size.
int new_instance_size = new_map->instance_size();
int instance_size_delta = old_map->instance_size() - new_instance_size;
ASSERT(instance_size_delta >= 0);
- Address address = this->address() + new_instance_size;
- heap->CreateFillerObjectAt(address, instance_size_delta);
+ Address address = object->address() + new_instance_size;
+ isolate->heap()->CreateFillerObjectAt(address, instance_size_delta);
// If there are properties in the new backing store, trim it to the correct
// size and install the backing store into the object.
if (external > 0) {
- RightTrimFixedArray<FROM_MUTATOR>(heap, array, inobject);
- set_properties(array);
+ RightTrimFixedArray<FROM_MUTATOR>(isolate->heap(), *array, inobject);
+ object->set_properties(*array);
}
- set_map(new_map);
-
- return this;
+ object->set_map(*new_map);
}
@@ -3758,7 +3751,13 @@
Handle<Object> JSObject::TryMigrateInstance(Handle<JSObject> object) {
- MigrateInstance(object);
+ Map* new_map = object->map()->CurrentMapForDeprecated();
+ if (new_map == NULL) return Handle<Object>();
+ Handle<Map> original_map(object->map());
+ JSObject::MigrateToMap(object, handle(new_map));
+ if (FLAG_trace_migration) {
+ object->PrintInstanceMigration(stdout, *original_map, object->map());
+ }
return object;
}
diff --git a/src/objects.h b/src/objects.h
index c894915..437e61a 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -1440,8 +1440,7 @@
}
inline MaybeObject* AllocateNewStorageFor(Heap* heap,
- Representation representation,
- PretenureFlag tenure = NOT_TENURED);
+ Representation representation);
// Returns true if the object is of the correct type to be used as a
// implementation of a JSObject's elements.
@@ -2173,10 +2172,13 @@
// passed map. This also extends the property backing store if necessary.
static void AllocateStorageForMap(Handle<JSObject> object, Handle<Map> map);
+ // Migrates the given object to a map whose field representations are the
+ // lowest upper bound of all known representations for that field.
static void MigrateInstance(Handle<JSObject> instance);
+ // Migrates the given object only if the target map is already available,
+ // or returns an empty handle if such a map is not yet available.
static Handle<Object> TryMigrateInstance(Handle<JSObject> instance);
- inline MUST_USE_RESULT MaybeObject* TryMigrateInstance();
// Can cause GC.
MUST_USE_RESULT MaybeObject* SetLocalPropertyIgnoreAttributesTrampoline(
@@ -2470,8 +2472,8 @@
MUST_USE_RESULT MaybeObject* TransitionElementsKind(ElementsKind to_kind);
MUST_USE_RESULT MaybeObject* UpdateAllocationSite(ElementsKind to_kind);
+ // TODO(mstarzinger): Both public because of ConvertAnsSetLocalProperty().
static void MigrateToMap(Handle<JSObject> object, Handle<Map> new_map);
- MUST_USE_RESULT MaybeObject* MigrateToMap(Map* new_map);
static void GeneralizeFieldRepresentation(Handle<JSObject> object,
int modify_index,
Representation new_representation,
@@ -7825,6 +7827,10 @@
static const uint32_t kMaximumArrayBytesToPretransition = 8 * 1024;
DECL_ACCESSORS(transition_info, Object)
+ // nested_site threads a list of sites that represent nested literals
+ // walked in a particular order. So [[1, 2], 1, 2] will have one
+ // nested_site, but [[1, 2], 3, [4]] will have a list of two.
+ DECL_ACCESSORS(nested_site, Object)
DECL_ACCESSORS(dependent_code, DependentCode)
DECL_ACCESSORS(weak_next, Object)
@@ -7856,7 +7862,8 @@
static inline bool CanTrack(InstanceType type);
static const int kTransitionInfoOffset = HeapObject::kHeaderSize;
- static const int kDependentCodeOffset = kTransitionInfoOffset + kPointerSize;
+ static const int kNestedSiteOffset = kTransitionInfoOffset + kPointerSize;
+ static const int kDependentCodeOffset = kNestedSiteOffset + kPointerSize;
static const int kWeakNextOffset = kDependentCodeOffset + kPointerSize;
static const int kSize = kWeakNextOffset + kPointerSize;
diff --git a/src/platform-cygwin.cc b/src/platform-cygwin.cc
index 5903438..0076d56 100644
--- a/src/platform-cygwin.cc
+++ b/src/platform-cygwin.cc
@@ -41,7 +41,6 @@
#include "v8.h"
-#include "platform-posix.h"
#include "platform.h"
#include "simulator.h"
#include "v8threads.h"
@@ -88,11 +87,6 @@
}
-void OS::DumpBacktrace() {
- // Currently unsupported.
-}
-
-
class PosixMemoryMappedFile : public OS::MemoryMappedFile {
public:
PosixMemoryMappedFile(FILE* file, void* memory, int size)
diff --git a/src/platform-freebsd.cc b/src/platform-freebsd.cc
index 518ad31..103fd6c 100644
--- a/src/platform-freebsd.cc
+++ b/src/platform-freebsd.cc
@@ -43,7 +43,6 @@
#include <sys/fcntl.h> // open
#include <unistd.h> // getpagesize
// If you don't have execinfo.h then you need devel/libexecinfo from ports.
-#include <execinfo.h> // backtrace, backtrace_symbols
#include <strings.h> // index
#include <errno.h>
#include <stdarg.h>
@@ -54,7 +53,6 @@
#include "v8.h"
#include "v8threads.h"
-#include "platform-posix.h"
#include "platform.h"
#include "vm-state-inl.h"
@@ -97,11 +95,6 @@
}
-void OS::DumpBacktrace() {
- POSIXBacktraceHelper<backtrace, backtrace_symbols>::DumpBacktrace();
-}
-
-
class PosixMemoryMappedFile : public OS::MemoryMappedFile {
public:
PosixMemoryMappedFile(FILE* file, void* memory, int size)
diff --git a/src/platform-linux.cc b/src/platform-linux.cc
index 74d473f..eb2d10b 100644
--- a/src/platform-linux.cc
+++ b/src/platform-linux.cc
@@ -38,11 +38,6 @@
#include <sys/types.h>
#include <stdlib.h>
-#if defined(__GLIBC__) && !defined(__UCLIBC__)
-#include <execinfo.h>
-#include <cxxabi.h>
-#endif
-
// Ubuntu Dapper requires memory pages to be marked as
// executable. Otherwise, OS raises an exception when executing code
// in that page.
@@ -66,7 +61,6 @@
#include "v8.h"
-#include "platform-posix.h"
#include "platform.h"
#include "v8threads.h"
#include "vm-state-inl.h"
@@ -154,14 +148,6 @@
}
-void OS::DumpBacktrace() {
- // backtrace is a glibc extension.
-#if defined(__GLIBC__) && !defined(__UCLIBC__)
- POSIXBacktraceHelper<backtrace, backtrace_symbols>::DumpBacktrace();
-#endif
-}
-
-
class PosixMemoryMappedFile : public OS::MemoryMappedFile {
public:
PosixMemoryMappedFile(FILE* file, void* memory, int size)
diff --git a/src/platform-macos.cc b/src/platform-macos.cc
index a58bc1a..5ffc3fc 100644
--- a/src/platform-macos.cc
+++ b/src/platform-macos.cc
@@ -53,27 +53,15 @@
#include <stdlib.h>
#include <string.h>
#include <errno.h>
-#include <cxxabi.h>
#undef MAP_TYPE
#include "v8.h"
-#include "platform-posix.h"
#include "platform.h"
#include "simulator.h"
#include "vm-state-inl.h"
-// Manually define these here as weak imports, rather than including execinfo.h.
-// This lets us launch on 10.4 which does not have these calls.
-extern "C" {
- extern int backtrace(void**, int) __attribute__((weak_import));
- extern char** backtrace_symbols(void* const*, int)
- __attribute__((weak_import));
- extern void backtrace_symbols_fd(void* const*, int, int)
- __attribute__((weak_import));
-}
-
namespace v8 {
namespace internal {
@@ -107,14 +95,6 @@
}
-void OS::DumpBacktrace() {
- // If weak link to execinfo lib has failed, ie because we are on 10.4, abort.
- if (backtrace == NULL) return;
-
- POSIXBacktraceHelper<backtrace, backtrace_symbols>::DumpBacktrace();
-}
-
-
class PosixMemoryMappedFile : public OS::MemoryMappedFile {
public:
PosixMemoryMappedFile(FILE* file, void* memory, int size)
diff --git a/src/platform-openbsd.cc b/src/platform-openbsd.cc
index 4f5420e..710c390 100644
--- a/src/platform-openbsd.cc
+++ b/src/platform-openbsd.cc
@@ -42,7 +42,6 @@
#include <sys/stat.h> // open
#include <fcntl.h> // open
#include <unistd.h> // sysconf
-#include <execinfo.h> // backtrace, backtrace_symbols
#include <strings.h> // index
#include <errno.h>
#include <stdarg.h>
@@ -51,7 +50,6 @@
#include "v8.h"
-#include "platform-posix.h"
#include "platform.h"
#include "v8threads.h"
#include "vm-state-inl.h"
@@ -96,11 +94,6 @@
}
-void OS::DumpBacktrace() {
- // Currently unsupported.
-}
-
-
class PosixMemoryMappedFile : public OS::MemoryMappedFile {
public:
PosixMemoryMappedFile(FILE* file, void* memory, int size)
diff --git a/src/platform-posix.cc b/src/platform-posix.cc
index fe27eaf..df15ee2 100644
--- a/src/platform-posix.cc
+++ b/src/platform-posix.cc
@@ -29,8 +29,6 @@
// own but contains the parts which are the same across POSIX platforms Linux,
// Mac OS, FreeBSD and OpenBSD.
-#include "platform-posix.h"
-
#include <dlfcn.h>
#include <pthread.h>
#if defined(__DragonFly__) || defined(__FreeBSD__) || defined(__OpenBSD__)
diff --git a/src/platform-posix.h b/src/platform-posix.h
deleted file mode 100644
index e0fbc0c..0000000
--- a/src/platform-posix.h
+++ /dev/null
@@ -1,79 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_PLATFORM_POSIX_H_
-#define V8_PLATFORM_POSIX_H_
-
-#if !defined(ANDROID)
-#include <cxxabi.h>
-#endif
-#include <stdio.h>
-
-#include "platform.h"
-
-namespace v8 {
-namespace internal {
-
-// Used by platform implementation files during OS::DumpBacktrace()
-template<int (*backtrace)(void**, int),
- char** (*backtrace_symbols)(void* const*, int)>
-struct POSIXBacktraceHelper {
- static void DumpBacktrace() {
- void* trace[100];
- int size = backtrace(trace, ARRAY_SIZE(trace));
- char** symbols = backtrace_symbols(trace, size);
- fprintf(stderr, "\n==== C stack trace ===============================\n\n");
- if (size == 0) {
- fprintf(stderr, "(empty)\n");
- } else if (symbols == NULL) {
- fprintf(stderr, "(no symbols)\n");
- } else {
- for (int i = 1; i < size; ++i) {
- fprintf(stderr, "%2d: ", i);
- char mangled[201];
- if (sscanf(symbols[i], "%*[^(]%*[(]%200[^)+]", mangled) == 1) {// NOLINT
- char* demangled = NULL;
-#if !defined(ANDROID)
- int status;
- size_t length;
- demangled = abi::__cxa_demangle(mangled, NULL, &length, &status);
-#endif
- fprintf(stderr, "%s\n", demangled != NULL ? demangled : mangled);
- free(demangled);
- } else {
- fprintf(stderr, "??\n");
- }
- }
- }
- fflush(stderr);
- free(symbols);
- }
-};
-
-} } // namespace v8::internal
-
-#endif // V8_PLATFORM_POSIX_H_
diff --git a/src/platform-solaris.cc b/src/platform-solaris.cc
index df81c3a..a0590cb 100644
--- a/src/platform-solaris.cc
+++ b/src/platform-solaris.cc
@@ -51,7 +51,6 @@
#include "v8.h"
-#include "platform-posix.h"
#include "platform.h"
#include "v8threads.h"
#include "vm-state-inl.h"
@@ -112,11 +111,6 @@
}
-void OS::DumpBacktrace() {
- // Currently unsupported.
-}
-
-
class PosixMemoryMappedFile : public OS::MemoryMappedFile {
public:
PosixMemoryMappedFile(FILE* file, void* memory, int size)
diff --git a/src/platform-win32.cc b/src/platform-win32.cc
index 073b21a..7b26dfb 100644
--- a/src/platform-win32.cc
+++ b/src/platform-win32.cc
@@ -891,11 +891,6 @@
}
-void OS::DumpBacktrace() {
- // Currently unsupported.
-}
-
-
class Win32MemoryMappedFile : public OS::MemoryMappedFile {
public:
Win32MemoryMappedFile(HANDLE file,
diff --git a/src/platform.h b/src/platform.h
index e0e62fa..527de16 100644
--- a/src/platform.h
+++ b/src/platform.h
@@ -256,9 +256,6 @@
// Debug break.
static void DebugBreak();
- // Dump C++ current stack trace (only functional on Linux).
- static void DumpBacktrace();
-
// Walk the stack.
static const int kStackWalkError = -1;
static const int kStackWalkMaxNameLen = 256;
diff --git a/src/runtime.cc b/src/runtime.cc
index dbc8f11..30763c1 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -933,17 +933,24 @@
HandleVector<Object>(NULL, 0)));
}
+ // NOTE: not initializing backing store.
// We assume that the caller of this function will initialize holder
// with the loop
// for(i = 0; i < length; i++) { holder[i] = source[i]; }
+ // We assume that the caller of this function is always a typed array
+ // constructor.
// If source is a typed array, this loop will always run to completion,
// so we are sure that the backing store will be initialized.
- // Otherwise, we do not know (the indexing operation might throw).
- // Hence we require zero initialization unless our source is a typed array.
- bool should_zero_initialize = !source->IsJSTypedArray();
+ // Otherwise, the indexing operation might throw, so the loop will not
+ // run to completion and the typed array might remain partly initialized.
+ // However we further assume that the caller of this function is a typed array
+ // constructor, and the exception will propagate out of the constructor,
+ // therefore uninitialized memory will not be accessible by a user program.
+ //
+ // TODO(dslomov): revise this once we support subclassing.
if (!Runtime::SetupArrayBufferAllocatingData(
- isolate, buffer, byte_length, should_zero_initialize)) {
+ isolate, buffer, byte_length, false)) {
return isolate->Throw(*isolate->factory()->
NewRangeError("invalid_array_buffer_length",
HandleVector<Object>(NULL, 0)));
@@ -8436,14 +8443,6 @@
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NotifyOSR) {
- SealHandleScope shs(isolate);
- Deoptimizer* deoptimizer = Deoptimizer::Grab(isolate);
- delete deoptimizer;
- return isolate->heap()->undefined_value();
-}
-
-
RUNTIME_FUNCTION(MaybeObject*, Runtime_DeoptimizeFunction) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
diff --git a/src/runtime.h b/src/runtime.h
index 64fade4..1b68863 100644
--- a/src/runtime.h
+++ b/src/runtime.h
@@ -91,7 +91,6 @@
F(TryInstallRecompiledCode, 1, 1) \
F(NotifyDeoptimized, 1, 1) \
F(NotifyStubFailure, 0, 1) \
- F(NotifyOSR, 0, 1) \
F(DeoptimizeFunction, 1, 1) \
F(ClearFunctionTypeFeedback, 1, 1) \
F(RunningInSimulator, 0, 1) \
diff --git a/src/typedarray.js b/src/typedarray.js
index 7bd16f6..da12ccf 100644
--- a/src/typedarray.js
+++ b/src/typedarray.js
@@ -76,9 +76,11 @@
function ConstructByArrayLike(obj, arrayLike) {
var length = arrayLike.length;
- var l = ToPositiveInteger(length, "invalid_typed_array_length");
+ var l = ToPositiveInteger(length, "invalid_typed_array_length");
if(!%TypedArrayInitializeFromArrayLike(obj, arrayId, arrayLike, l)) {
for (var i = 0; i < l; i++) {
+ // It is crucial that we let any execptions from arrayLike[i]
+ // propagate outside the function.
obj[i] = arrayLike[i];
}
}
diff --git a/src/unique.h b/src/unique.h
index 6816654..fdb7016 100644
--- a/src/unique.h
+++ b/src/unique.h
@@ -54,7 +54,7 @@
template <typename T>
class Unique V8_FINAL {
public:
- // TODO(titzer): make private and introduce a factory.
+ // TODO(titzer): make private and introduce a uniqueness scope.
explicit Unique(Handle<T> handle) {
if (handle.is_null()) {
raw_address_ = NULL;
@@ -111,8 +111,11 @@
return raw_address_ == NULL;
}
- // Extract the handle from this Unique in order to dereference it.
- // WARNING: Only do this if you have access to the heap.
+ inline bool IsKnownGlobal(void* global) const {
+ ASSERT(IsInitialized());
+ return raw_address_ == reinterpret_cast<Address>(global);
+ }
+
inline Handle<T> handle() const {
return handle_;
}
@@ -123,7 +126,11 @@
// TODO(titzer): this is a hack to migrate to Unique<T> incrementally.
static Unique<T> CreateUninitialized(Handle<T> handle) {
- return Unique<T>(static_cast<Address>(NULL), handle);
+ return Unique<T>(reinterpret_cast<Address>(NULL), handle);
+ }
+
+ static Unique<T> CreateImmovable(Handle<T> handle) {
+ return Unique<T>(reinterpret_cast<Address>(*handle), handle);
}
friend class UniqueSet<T>; // Uses internal details for speed.
@@ -171,9 +178,10 @@
return true;
}
+ // Check whether this set contains the given element. O(|this|)
+ // TODO(titzer): use binary search for large sets to make this O(log|this|)
template <typename U>
bool Contains(Unique<U> elem) const {
- // TODO(titzer): use binary search for larger sets.
for (int i = 0; i < size_; i++) {
if (this->array_[i] == elem) return true;
}
diff --git a/src/utils/random-number-generator.cc b/src/utils/random-number-generator.cc
index 1e03ee2..3f657fb 100644
--- a/src/utils/random-number-generator.cc
+++ b/src/utils/random-number-generator.cc
@@ -28,6 +28,7 @@
#include "utils/random-number-generator.h"
#include <cstdio>
+#include <cstdlib>
#include "flags.h"
#include "platform/mutex.h"
@@ -67,6 +68,16 @@
}
}
+#if V8_OS_CYGWIN || V8_OS_WIN
+ // Use rand_s() to gather entropy on Windows. See:
+ // https://code.google.com/p/v8/issues/detail?id=2905
+ unsigned first_half, second_half;
+ errno_t result = rand_s(&first_half);
+ ASSERT_EQ(0, result);
+ result = rand_s(&second_half);
+ ASSERT_EQ(0, result);
+ SetSeed((static_cast<int64_t>(first_half) << 32) + second_half);
+#else
// Gather entropy from /dev/urandom if available.
FILE* fp = fopen("/dev/urandom", "rb");
if (fp != NULL) {
@@ -82,10 +93,16 @@
// We cannot assume that random() or rand() were seeded
// properly, so instead of relying on random() or rand(),
// we just seed our PRNG using timing data as fallback.
+ // This is weak entropy, but it's sufficient, because
+ // it is the responsibility of the embedder to install
+ // an entropy source using v8::V8::SetEntropySource(),
+ // which provides reasonable entropy, see:
+ // https://code.google.com/p/v8/issues/detail?id=2905
int64_t seed = Time::NowFromSystemTime().ToInternalValue() << 24;
seed ^= TimeTicks::HighResNow().ToInternalValue() << 16;
seed ^= TimeTicks::Now().ToInternalValue() << 8;
SetSeed(seed);
+#endif // V8_OS_CYGWIN || V8_OS_WIN
}
diff --git a/src/utils/random-number-generator.h b/src/utils/random-number-generator.h
index bd7dca7..cc7d739 100644
--- a/src/utils/random-number-generator.h
+++ b/src/utils/random-number-generator.h
@@ -42,6 +42,10 @@
// If two instances of RandomNumberGenerator are created with the same seed, and
// the same sequence of method calls is made for each, they will generate and
// return identical sequences of numbers.
+// This class uses (probably) weak entropy by default, but it's sufficient,
+// because it is the responsibility of the embedder to install an entropy source
+// using v8::V8::SetEntropySource(), which provides reasonable entropy, see:
+// https://code.google.com/p/v8/issues/detail?id=2905
// This class is neither reentrant nor threadsafe.
class RandomNumberGenerator V8_FINAL {
diff --git a/src/version.cc b/src/version.cc
index 02f61e4..02ffa65 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -34,8 +34,8 @@
// system so their names cannot be changed without changing the scripts.
#define MAJOR_VERSION 3
#define MINOR_VERSION 22
-#define BUILD_NUMBER 0
-#define PATCH_LEVEL 1
+#define BUILD_NUMBER 1
+#define PATCH_LEVEL 0
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
#define IS_CANDIDATE_VERSION 0
diff --git a/src/x64/assembler-x64.cc b/src/x64/assembler-x64.cc
index 41bf297..b6a28fc 100644
--- a/src/x64/assembler-x64.cc
+++ b/src/x64/assembler-x64.cc
@@ -2058,6 +2058,10 @@
void Assembler::testq(Register dst, Immediate mask) {
+ if (is_uint8(mask.value_)) {
+ testb(dst, mask);
+ return;
+ }
EnsureSpace ensure_space(this);
if (dst.is(rax)) {
emit_rex_64();
diff --git a/src/x64/builtins-x64.cc b/src/x64/builtins-x64.cc
index a6dc003..947f687 100644
--- a/src/x64/builtins-x64.cc
+++ b/src/x64/builtins-x64.cc
@@ -694,21 +694,6 @@
}
-void Builtins::Generate_NotifyOSR(MacroAssembler* masm) {
- // For now, we are relying on the fact that Runtime::NotifyOSR
- // doesn't do any garbage collection which allows us to save/restore
- // the registers without worrying about which of them contain
- // pointers. This seems a bit fragile.
- __ Pushad();
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kNotifyOSR, 0);
- }
- __ Popad();
- __ ret(0);
-}
-
-
void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// Stack Layout:
// rsp[0] : Return address
diff --git a/src/x64/code-stubs-x64.cc b/src/x64/code-stubs-x64.cc
index 8f27374..1896c00 100644
--- a/src/x64/code-stubs-x64.cc
+++ b/src/x64/code-stubs-x64.cc
@@ -60,6 +60,16 @@
}
+void NumberToStringStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { rax };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = NULL;
+}
+
+
void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
@@ -2909,22 +2919,6 @@
}
-void NumberToStringStub::Generate(MacroAssembler* masm) {
- Label runtime;
-
- StackArgumentsAccessor args(rsp, 1, ARGUMENTS_DONT_CONTAIN_RECEIVER);
- __ movq(rbx, args.GetArgumentOperand(0));
-
- // Generate code to lookup number in the number string cache.
- __ LookupNumberStringCache(rbx, rax, r8, r9, &runtime);
- __ ret(1 * kPointerSize);
-
- __ bind(&runtime);
- // Handle number to string in the runtime system if not found in the cache.
- __ TailCallRuntime(Runtime::kNumberToStringSkipCache, 1, 1);
-}
-
-
static int NegativeComparisonResult(Condition cc) {
ASSERT(cc != equal);
ASSERT((cc == less) || (cc == less_equal)
diff --git a/src/x64/code-stubs-x64.h b/src/x64/code-stubs-x64.h
index c3eac81..c76abcf 100644
--- a/src/x64/code-stubs-x64.h
+++ b/src/x64/code-stubs-x64.h
@@ -208,18 +208,6 @@
};
-class NumberToStringStub: public PlatformCodeStub {
- public:
- NumberToStringStub() { }
-
- private:
- Major MajorKey() { return NumberToString; }
- int MinorKey() { return 0; }
-
- void Generate(MacroAssembler* masm);
-};
-
-
class NameDictionaryLookupStub: public PlatformCodeStub {
public:
enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
diff --git a/src/x64/full-codegen-x64.cc b/src/x64/full-codegen-x64.cc
index f9d1ffa..55feb4c 100644
--- a/src/x64/full-codegen-x64.cc
+++ b/src/x64/full-codegen-x64.cc
@@ -3507,8 +3507,8 @@
ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(args->length(), 1);
- // Load the argument on the stack and call the stub.
- VisitForStackValue(args->at(0));
+ // Load the argument into rax and call the stub.
+ VisitForAccumulatorValue(args->at(0));
NumberToStringStub stub;
__ CallStub(&stub);
diff --git a/src/x64/lithium-codegen-x64.cc b/src/x64/lithium-codegen-x64.cc
index 83a8cb2..27d064d 100644
--- a/src/x64/lithium-codegen-x64.cc
+++ b/src/x64/lithium-codegen-x64.cc
@@ -973,11 +973,6 @@
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
break;
}
- case CodeStub::NumberToString: {
- NumberToStringStub stub;
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- break;
- }
case CodeStub::StringCompare: {
StringCompareStub stub;
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
diff --git a/src/x64/lithium-x64.h b/src/x64/lithium-x64.h
index 9f45f97..0ccbeb8 100644
--- a/src/x64/lithium-x64.h
+++ b/src/x64/lithium-x64.h
@@ -1297,8 +1297,8 @@
LOperand* date() { return inputs_[0]; }
Smi* index() const { return index_; }
- DECLARE_CONCRETE_INSTRUCTION(ValueOf, "date-field")
- DECLARE_HYDROGEN_ACCESSOR(ValueOf)
+ DECLARE_CONCRETE_INSTRUCTION(DateField, "date-field")
+ DECLARE_HYDROGEN_ACCESSOR(DateField)
private:
Smi* index_;